blob: d64b7d1945e4740732031006a876f175a93e2426 [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Daniel Boulby84350712021-11-26 11:13:20 +000012#include "hf/check.h"
Andrew Scull3c257452019-11-26 13:32:50 +000013#include "hf/mpool.h"
14#include "hf/vm.h"
15}
16
J-Alvesb37fd082020-10-22 12:29:21 +010017#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000018#include <memory>
19#include <span>
20#include <vector>
21
22#include "mm_test.hh"
23
24namespace
25{
26using namespace ::std::placeholders;
27
28using ::testing::AllOf;
29using ::testing::Each;
30using ::testing::SizeIs;
31
32using struct_vm = struct vm;
J-Alves96f6e292021-06-08 17:32:40 +010033using struct_vm_locked = struct vm_locked;
Andrew Scull3c257452019-11-26 13:32:50 +000034
J-Alvesb37fd082020-10-22 12:29:21 +010035constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 32;
Andrew Scull3c257452019-11-26 13:32:50 +000036const int TOP_LEVEL = arch_mm_stage2_max_level();
37
38class vm : public ::testing::Test
39{
40 void SetUp() override
41 {
42 /*
43 * TODO: replace with direct use of stdlib allocator so
44 * sanitizers are more effective.
45 */
46 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
47 mpool_init(&ppool, sizeof(struct mm_page_table));
48 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
49 }
50
51 std::unique_ptr<uint8_t[]> test_heap;
52
53 protected:
54 struct mpool ppool;
J-Alvesb37fd082020-10-22 12:29:21 +010055
56 public:
J-Alvesbeeb6dc2021-12-08 18:21:32 +000057 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
J-Alvesb37fd082020-10-22 12:29:21 +010058 {
J-Alvesbeeb6dc2021-12-08 18:21:32 +000059 return vm1->boot_order < vm2->boot_order;
J-Alvesb37fd082020-10-22 12:29:21 +010060 }
Andrew Scull3c257452019-11-26 13:32:50 +000061};
62
63/**
64 * If nothing is mapped, unmapping the hypervisor has no effect.
65 */
66TEST_F(vm, vm_unmap_hypervisor_not_mapped)
67{
68 struct_vm *vm;
69 struct vm_locked vm_locked;
70
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080071 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false));
Andrew Scull3c257452019-11-26 13:32:50 +000072 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080073 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000074 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
75 EXPECT_THAT(
76 mm_test::get_ptable(vm->ptable),
77 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
78 mm_vm_fini(&vm->ptable, &ppool);
79 vm_unlock(&vm_locked);
80}
81
J-Alvesb37fd082020-10-22 12:29:21 +010082/**
83 * Validate the "boot_list" is created properly, according to vm's "boot_order"
84 * field.
85 */
86TEST_F(vm, vm_boot_order)
87{
88 struct_vm *vm_cur;
89 std::list<struct_vm *> expected_final_order;
90
91 EXPECT_FALSE(vm_get_first_boot());
92
93 /*
94 * Insertion when no call to "vm_update_boot" has been made yet.
95 * The "boot_list" is expected to be empty.
96 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080097 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
J-Alvesbeeb6dc2021-12-08 18:21:32 +000098 vm_cur->boot_order = 3;
J-Alvesb37fd082020-10-22 12:29:21 +010099 vm_update_boot(vm_cur);
100 expected_final_order.push_back(vm_cur);
101
102 EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
103
104 /* Insertion at the head of the boot list */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800105 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000106 vm_cur->boot_order = 1;
J-Alvesb37fd082020-10-22 12:29:21 +0100107 vm_update_boot(vm_cur);
108 expected_final_order.push_back(vm_cur);
109
110 EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
111
112 /* Insertion of two in the middle of the boot list */
113 for (int i = 0; i < 2; i++) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800114 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
J-Alvesb37fd082020-10-22 12:29:21 +0100115 vm_cur->boot_order = 2;
116 vm_update_boot(vm_cur);
117 expected_final_order.push_back(vm_cur);
118 }
119
120 /*
121 * Insertion in the end of the list.
122 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
123 * As such, a VM is expected to have been initialized before this
124 * test, with ID 1 and boot_order 0.
125 */
126 vm_cur = vm_find(1);
127 EXPECT_FALSE(vm_cur == NULL);
128 vm_update_boot(vm_cur);
129 expected_final_order.push_back(vm_cur);
130
131 /*
132 * Number of VMs initialized should be the same as in the
133 * "expected_final_order", before the final verification.
134 */
135 EXPECT_EQ(expected_final_order.size(), vm_get_count())
136 << "Something went wrong with the test itself...\n";
137
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000138 /* Sort VMs from lower to higher "boot_order" field.*/
139 expected_final_order.sort(vm::BootOrderSmallerThan);
J-Alvesb37fd082020-10-22 12:29:21 +0100140
141 std::list<struct_vm *>::iterator it;
142 for (it = expected_final_order.begin(), vm_cur = vm_get_first_boot();
143 it != expected_final_order.end() && vm_cur != NULL;
144 it++, vm_cur = vm_cur->next_boot) {
145 EXPECT_EQ((*it)->id, vm_cur->id);
146 }
147}
J-Alves60eaff92021-05-27 14:54:41 +0100148
149/**
150 * Validates updates and check functions for binding notifications to endpoints.
151 */
152TEST_F(vm, vm_notifications_bind_diff_senders)
153{
J-Alvesd3e81622021-10-05 14:55:57 +0100154 struct_vm *current_vm = nullptr;
155 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100156 std::vector<struct_vm *> dummy_senders;
157 ffa_notifications_bitmap_t bitmaps[] = {
158 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
159 bool is_from_vm = true;
160
161 /* For the subsequent tests three VMs are used. */
162 CHECK(vm_get_count() >= 3);
163
J-Alvesd3e81622021-10-05 14:55:57 +0100164 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100165
166 dummy_senders.push_back(vm_find_index(1));
167 dummy_senders.push_back(vm_find_index(2));
168
J-Alvesd3e81622021-10-05 14:55:57 +0100169 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100170
171 for (unsigned int i = 0; i < 2; i++) {
172 /* Validate bindings condition after initialization. */
173 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100174 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
175 bitmaps[i], false));
J-Alves60eaff92021-05-27 14:54:41 +0100176
177 /*
178 * Validate bind related operations. For this test considering
179 * only global notifications.
180 */
J-Alvesd3e81622021-10-05 14:55:57 +0100181 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100182 dummy_senders[i]->id,
183 bitmaps[i], false);
184
185 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100186 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100187 bitmaps[i], false));
188
189 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100190 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100191 bitmaps[i], false));
192
193 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100194 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100195 bitmaps[1 - i], false));
196
197 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100198 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100199 bitmaps[2], false));
200 }
201
202 /** Clean up bind for other tests. */
J-Alvesd3e81622021-10-05 14:55:57 +0100203 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100204 bitmaps[0], false);
J-Alvesd3e81622021-10-05 14:55:57 +0100205 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100206 bitmaps[1], false);
207
J-Alvesd3e81622021-10-05 14:55:57 +0100208 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100209}
210
211/**
212 * Validates updates and check functions for binding notifications, namely the
J-Alves96f6e292021-06-08 17:32:40 +0100213 * configuration of bindings of global and per-vCPU notifications.
J-Alves60eaff92021-05-27 14:54:41 +0100214 */
215TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
216{
J-Alvesd3e81622021-10-05 14:55:57 +0100217 struct_vm *current_vm;
218 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100219 struct_vm *dummy_sender;
220 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
221 ffa_notifications_bitmap_t per_vcpu = ~global;
222 bool is_from_vm = true;
223
224 CHECK(vm_get_count() >= 2);
225
J-Alvesd3e81622021-10-05 14:55:57 +0100226 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100227
228 dummy_sender = vm_find_index(1);
229
J-Alvesd3e81622021-10-05 14:55:57 +0100230 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100231
J-Alvesd3e81622021-10-05 14:55:57 +0100232 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100233 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100234 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100235 dummy_sender->id, per_vcpu, true);
236
237 /* Check validation of global notifications bindings. */
238 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100239 current_vm_locked, is_from_vm, dummy_sender->id, global,
240 false));
J-Alves60eaff92021-05-27 14:54:41 +0100241
J-Alves96f6e292021-06-08 17:32:40 +0100242 /* Check validation of per-vCPU notifications bindings. */
J-Alves60eaff92021-05-27 14:54:41 +0100243 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100244 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
245 true));
J-Alves60eaff92021-05-27 14:54:41 +0100246
247 /**
J-Alves96f6e292021-06-08 17:32:40 +0100248 * Check that global notifications are not validated as per-vCPU, and
J-Alves60eaff92021-05-27 14:54:41 +0100249 * vice-versa.
250 */
251 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100252 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
J-Alves60eaff92021-05-27 14:54:41 +0100253 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100254 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
J-Alves60eaff92021-05-27 14:54:41 +0100255 false));
J-Alvesd3e81622021-10-05 14:55:57 +0100256 EXPECT_FALSE(vm_notifications_validate_binding(
257 current_vm_locked, is_from_vm, dummy_sender->id,
258 global | per_vcpu, true));
259 EXPECT_FALSE(vm_notifications_validate_binding(
260 current_vm_locked, is_from_vm, dummy_sender->id,
261 global | per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100262
263 /** Undo the bindings */
J-Alvesd3e81622021-10-05 14:55:57 +0100264 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
265 global, false);
266 EXPECT_TRUE(vm_notifications_validate_binding(
267 current_vm_locked, is_from_vm, 0, global, false));
J-Alves60eaff92021-05-27 14:54:41 +0100268
J-Alvesd3e81622021-10-05 14:55:57 +0100269 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
270 per_vcpu, false);
271 EXPECT_TRUE(vm_notifications_validate_binding(
272 current_vm_locked, is_from_vm, 0, per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100273
J-Alvesd3e81622021-10-05 14:55:57 +0100274 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100275}
276
J-Alvesce2f8d32021-06-10 18:30:21 +0100277/**
278 * Validates accesses to notifications bitmaps.
279 */
280TEST_F(vm, vm_notifications_set_and_get)
281{
J-Alvesd3e81622021-10-05 14:55:57 +0100282 struct_vm *current_vm;
283 struct vm_locked current_vm_locked;
J-Alvesce2f8d32021-06-10 18:30:21 +0100284 struct_vm *dummy_sender;
285 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
286 ffa_notifications_bitmap_t per_vcpu = ~global;
287 ffa_notifications_bitmap_t ret;
288 const unsigned int vcpu_idx = 1;
289 struct notifications *notifications;
290 const bool is_from_vm = true;
291
292 CHECK(vm_get_count() >= 2);
293
J-Alvesd3e81622021-10-05 14:55:57 +0100294 current_vm = vm_find_index(0);
J-Alvesce2f8d32021-06-10 18:30:21 +0100295 dummy_sender = vm_find_index(1);
296
J-Alvesd3e81622021-10-05 14:55:57 +0100297 notifications = &current_vm->notifications.from_vm;
298 current_vm_locked = vm_lock(current_vm);
J-Alvesce2f8d32021-06-10 18:30:21 +0100299
J-Alvesd3e81622021-10-05 14:55:57 +0100300 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100301 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100302 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100303 dummy_sender->id, per_vcpu, true);
304
305 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100306 * Validate get notifications bitmap for global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100307 */
J-Alvesd3e81622021-10-05 14:55:57 +0100308 vm_notifications_set(current_vm_locked, is_from_vm, global, 0ull,
309 false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100310
J-Alvesd3e81622021-10-05 14:55:57 +0100311 ret = vm_notifications_get_pending_and_clear(current_vm_locked,
312 is_from_vm, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100313 EXPECT_EQ(ret, global);
J-Alvesd3e81622021-10-05 14:55:57 +0100314 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100315
316 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100317 * Validate get notifications bitmap for per-vCPU notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100318 */
J-Alvesd3e81622021-10-05 14:55:57 +0100319 vm_notifications_set(current_vm_locked, is_from_vm, per_vcpu, vcpu_idx,
J-Alvesce2f8d32021-06-10 18:30:21 +0100320 true);
321
J-Alvesd3e81622021-10-05 14:55:57 +0100322 ret = vm_notifications_get_pending_and_clear(current_vm_locked,
323 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100324 EXPECT_EQ(ret, per_vcpu);
J-Alvesd3e81622021-10-05 14:55:57 +0100325 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100326
327 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100328 * Validate that getting notifications for a specific vCPU also returns
329 * global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100330 */
J-Alvesd3e81622021-10-05 14:55:57 +0100331 vm_notifications_set(current_vm_locked, is_from_vm, per_vcpu, vcpu_idx,
J-Alvesce2f8d32021-06-10 18:30:21 +0100332 true);
J-Alvesd3e81622021-10-05 14:55:57 +0100333 vm_notifications_set(current_vm_locked, is_from_vm, global, 0ull,
334 false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100335
J-Alvesd3e81622021-10-05 14:55:57 +0100336 ret = vm_notifications_get_pending_and_clear(current_vm_locked,
337 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100338 EXPECT_EQ(ret, per_vcpu | global);
J-Alvesd3e81622021-10-05 14:55:57 +0100339 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
340 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100341
342 /** Undo the binding */
J-Alvesd3e81622021-10-05 14:55:57 +0100343 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
344 global, false);
345 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
346 per_vcpu, true);
347 vm_unlock(&current_vm_locked);
J-Alvesce2f8d32021-06-10 18:30:21 +0100348}
349
J-Alves96f6e292021-06-08 17:32:40 +0100350/**
351 * Validates simple getting of notifications info for global notifications.
352 */
353TEST_F(vm, vm_notifications_info_get_global)
354{
355 ffa_notifications_bitmap_t to_set = 0xFU;
356 ffa_notifications_bitmap_t got;
357
358 /**
359 * Following set of variables that are also expected to be used when
360 * handling FFA_NOTIFICATION_INFO_GET.
361 */
362 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
363 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
364 uint32_t ids_count = 0;
365 uint32_t lists_count = 0;
366 enum notifications_info_get_state current_state = INIT;
367
368 CHECK(vm_get_count() >= 2);
369
370 for (unsigned int i = 0; i < 2; i++) {
371 struct_vm *current_vm = vm_find_index(0);
372 struct vm_locked current_vm_locked = vm_lock(current_vm);
373 struct notifications *notifications =
374 &current_vm->notifications.from_sp;
375 const bool is_from_vm = false;
376
377 vm_notifications_set(current_vm_locked, is_from_vm, to_set, 0,
378 false);
379
380 vm_notifications_info_get_pending(
381 current_vm_locked, is_from_vm, ids, &ids_count,
382 lists_sizes, &lists_count,
383 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
384
385 /*
386 * Here the number of IDs and list count should be the same.
387 * As we are testing with Global notifications, this is
388 * expected.
389 */
390 EXPECT_EQ(ids_count, i + 1);
391 EXPECT_EQ(lists_count, i + 1);
392 EXPECT_EQ(lists_sizes[i], 0);
393 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
394
395 /* Action must be reset to initial state for each VM. */
396 current_state = INIT;
397
398 /*
399 * Check that getting pending notifications gives the expected
400 * return and cleans the 'pending' and 'info_get_retrieved'
401 * bitmaps.
402 */
403 got = vm_notifications_get_pending_and_clear(current_vm_locked,
404 is_from_vm, 0);
405 EXPECT_EQ(got, to_set);
406
407 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
408 EXPECT_EQ(notifications->global.pending, 0U);
409
410 vm_unlock(&current_vm_locked);
411 }
412}
413
414/**
415 * Validates simple getting of notifications info for per-vCPU notifications.
416 */
417TEST_F(vm, vm_notifications_info_get_per_vcpu)
418{
419 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
420 ffa_notifications_bitmap_t got;
421
422 /*
423 * Following set of variables that are also expected to be used when
424 * handling ffa_notification_info_get.
425 */
426 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
427 uint32_t ids_count = 0;
428 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
429 uint32_t lists_count = 0;
430 enum notifications_info_get_state current_state = INIT;
431
432 CHECK(vm_get_count() >= 2);
433
434 for (unsigned int i = 0; i < 2; i++) {
435 struct_vm *current_vm = vm_find_index(0);
436 struct vm_locked current_vm_locked = vm_lock(current_vm);
437 struct notifications *notifications =
438 &current_vm->notifications.from_sp;
439 const bool is_from_vm = false;
440
441 vm_notifications_set(current_vm_locked, is_from_vm, per_vcpu, 0,
442 true);
443
444 vm_notifications_info_get_pending(
445 current_vm_locked, is_from_vm, ids, &ids_count,
446 lists_sizes, &lists_count,
447 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
448
449 /*
450 * Here the number of IDs and list count should be the same.
451 * As we are testing with Global notifications, this is
452 * expected.
453 */
454 EXPECT_EQ(ids_count, (i + 1) * 2);
455 EXPECT_EQ(lists_count, i + 1);
456 EXPECT_EQ(lists_sizes[i], 1);
457 EXPECT_EQ(per_vcpu,
458 notifications->per_vcpu[0].info_get_retrieved);
459
460 /* Action must be reset to initial state for each VM. */
461 current_state = INIT;
462
463 /*
464 * Check that getting pending notifications gives the expected
465 * return and cleans the 'pending' and 'info_get_retrieved'
466 * bitmaps.
467 */
468 got = vm_notifications_get_pending_and_clear(current_vm_locked,
469 is_from_vm, 0);
470 EXPECT_EQ(got, per_vcpu);
471
472 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
473 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
474
475 vm_unlock(&current_vm_locked);
476 }
477}
478
479/**
480 * Validate getting of notifications information if all VCPUs have notifications
481 * pending.
482 */
483TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
484{
485 struct_vm *current_vm = nullptr;
486 struct vm_locked current_vm_locked;
487 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
488 ffa_notifications_bitmap_t got;
489 const ffa_notifications_bitmap_t global = 0xF0000;
490
491 /*
492 * Following set of variables that are also expected to be used when
493 * handling ffa_notification_info_get.
494 */
495 struct notifications *notifications;
496 const bool is_from_sp = false;
497 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
498 uint32_t ids_count = 0;
499 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
500 uint32_t lists_count = 0;
501 enum notifications_info_get_state current_state = INIT;
502
503 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false));
504 current_vm_locked = vm_lock(current_vm);
505 notifications = &current_vm->notifications.from_sp;
506
507 for (unsigned int i = 0; i < vcpu_count; i++) {
508 vm_notifications_set(current_vm_locked, is_from_sp,
509 FFA_NOTIFICATION_MASK(i), i, true);
510 }
511
512 /*
513 * Adding a global notification should not change the list of IDs,
514 * because global notifications only require the VM ID to be included in
515 * the list, at least once.
516 */
517 vm_notifications_set(current_vm_locked, is_from_sp, global, 0, false);
518
519 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
520 &ids_count, lists_sizes, &lists_count,
521 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
522 &current_state);
523
524 /*
525 * This test has been conceived for the expected MAX_CPUS 4.
526 * All VCPUs have notifications of the same VM, to be broken down in 2
527 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
528 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
529 */
530 CHECK(MAX_CPUS == 4);
531 EXPECT_EQ(ids_count, 6U);
532 EXPECT_EQ(lists_count, 2U);
533 EXPECT_EQ(lists_sizes[0], 3);
534 EXPECT_EQ(lists_sizes[1], 1);
535
536 for (unsigned int i = 0; i < vcpu_count; i++) {
537 got = vm_notifications_get_pending_and_clear(current_vm_locked,
538 is_from_sp, i);
539
540 /*
541 * The first call to vm_notifications_get_pending_and_clear
542 * should also include the global notifications on the return.
543 */
544 ffa_notifications_bitmap_t to_check =
545 (i != 0) ? FFA_NOTIFICATION_MASK(i)
546 : FFA_NOTIFICATION_MASK(i) | global;
547
548 EXPECT_EQ(got, to_check);
549
550 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
551 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
552 }
553
554 vm_unlock(&current_vm_locked);
555}
556
557/**
558 * Validate change of state from 'vm_notifications_info_get_pending', when the
559 * list of IDs is full.
560 */
561TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
562{
563 struct_vm *current_vm = vm_find_index(0);
564 struct vm_locked current_vm_locked = vm_lock(current_vm);
565 struct notifications *notifications =
566 &current_vm->notifications.from_sp;
567 const bool is_from_vm = false;
568 ffa_notifications_bitmap_t got = 0;
569
570 /*
571 * Following set of variables that are also expected to be used when
572 * handling ffa_notification_info_get.
573 * For this 'ids_count' has been initialized such that it indicates
574 * there is no space in the list for a per-vCPU notification (VM ID and
575 * VCPU ID).
576 */
577 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
578 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
579 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
580 uint32_t lists_count = 10;
581 enum notifications_info_get_state current_state = INIT;
582 CHECK(vm_get_count() >= 2);
583
584 vm_notifications_set(current_vm_locked, is_from_vm,
585 FFA_NOTIFICATION_MASK(1), 0, true);
586
587 /* Call function to get notifications info, with only per-vCPU set. */
588 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
589 &ids_count, lists_sizes, &lists_count,
590 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
591 &current_state);
592
593 /*
594 * Verify that as soon as there isn't space to do the required
595 * insertion in the list, the 'vm_notifications_get_pending_and_clear'
596 * returns and changes list state to FULL.
597 * In this case returning, because it would need to add two IDs (VM ID
598 * and VCPU ID).
599 */
600 EXPECT_EQ(current_state, FULL);
601 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
602 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
603
604 /*
605 * At this point there is still room for the information of a global
606 * notification (only VM ID to be added). Reset 'current_state'
607 * for the insertion to happen at the last position of the array.
608 */
609 current_state = INIT;
610
611 /* Setting global notification */
612 vm_notifications_set(current_vm_locked, is_from_vm,
613 FFA_NOTIFICATION_MASK(2), 0, false);
614
615 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
616 &ids_count, lists_sizes, &lists_count,
617 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
618 &current_state);
619
620 /*
621 * Now List must be full, the set global notification must be part of
622 * 'info_get_retrieved', and the 'current_state' should be set to FULL
623 * due to the pending per-vCPU notification in VCPU 0.
624 */
625 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
626 EXPECT_EQ(current_state, FULL);
627 EXPECT_EQ(notifications->global.info_get_retrieved,
628 FFA_NOTIFICATION_MASK(2));
629
630 got = vm_notifications_get_pending_and_clear(current_vm_locked,
631 is_from_vm, 0);
632 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
633
634 vm_unlock(&current_vm_locked);
635}
636
637TEST_F(vm, vm_notifications_info_get_full_global)
638{
639 struct_vm *current_vm = vm_find_index(0);
640 struct vm_locked current_vm_locked = vm_lock(current_vm);
641 ffa_notifications_bitmap_t got;
642 struct notifications *notifications;
643 const bool is_from_vm = false;
644 /*
645 * Following set of variables that are also expected to be used when
646 * handling ffa_notification_info_get.
647 * For this 'ids_count' has been initialized such that it indicates
648 * there is no space in the list for a global notification (VM ID only).
649 */
650 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
651 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
652 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
653 uint32_t lists_count = 10;
654 enum notifications_info_get_state current_state = INIT;
655
656 CHECK(vm_get_count() >= 1);
657
658 current_vm = vm_find_index(0);
659
660 notifications = &current_vm->notifications.from_sp;
661
662 /* Set global notification. */
663 vm_notifications_set(current_vm_locked, is_from_vm,
664 FFA_NOTIFICATION_MASK(10), 0, false);
665
666 /* Get notifications info for the given notifications. */
667 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
668 &ids_count, lists_sizes, &lists_count,
669 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
670 &current_state);
671
672 /* Expect 'info_get_retrieved' bitmap to be 0. */
673 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
674 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
675 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
676 EXPECT_EQ(current_state, FULL);
677
678 got = vm_notifications_get_pending_and_clear(current_vm_locked,
679 is_from_vm, 0);
J-Alves9f74b932021-10-11 14:20:05 +0100680 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
681
J-Alves96f6e292021-06-08 17:32:40 +0100682 vm_unlock(&current_vm_locked);
683}
684
Andrew Scull3c257452019-11-26 13:32:50 +0000685} /* namespace */