blob: cfe468618773ea801d22a580754743eb51dd004b [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Daniel Boulby84350712021-11-26 11:13:20 +000012#include "hf/check.h"
J-Alves67f5ba32024-09-27 18:07:11 +010013#include "hf/list.h"
Andrew Scull3c257452019-11-26 13:32:50 +000014#include "hf/mpool.h"
Madhukar Pappireddya067dc12024-10-16 22:20:44 -050015#include "hf/timer_mgmt.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/vm.h"
17}
18
J-Alvesb37fd082020-10-22 12:29:21 +010019#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000020#include <memory>
21#include <span>
22#include <vector>
23
24#include "mm_test.hh"
25
26namespace
27{
28using namespace ::std::placeholders;
29
30using ::testing::AllOf;
31using ::testing::Each;
32using ::testing::SizeIs;
33
34using struct_vm = struct vm;
Olivier Deprez181074b2023-02-02 14:53:23 +010035using struct_vcpu = struct vcpu;
J-Alves96f6e292021-06-08 17:32:40 +010036using struct_vm_locked = struct vm_locked;
Andrew Scull3c257452019-11-26 13:32:50 +000037
Olivier Deprezd5a54892023-02-02 16:45:59 +010038constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
Andrew Scull3c257452019-11-26 13:32:50 +000039const int TOP_LEVEL = arch_mm_stage2_max_level();
40
41class vm : public ::testing::Test
42{
Olivier Deprezd5a54892023-02-02 16:45:59 +010043 protected:
44 static std::unique_ptr<uint8_t[]> test_heap;
45
46 struct mpool ppool;
47
Andrew Scull3c257452019-11-26 13:32:50 +000048 void SetUp() override
49 {
Olivier Deprezd5a54892023-02-02 16:45:59 +010050 if (!test_heap) {
51 /*
52 * TODO: replace with direct use of stdlib allocator so
53 * sanitizers are more effective.
54 */
55 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
56 mpool_init(&ppool, sizeof(struct mm_page_table));
57 mpool_add_chunk(&ppool, test_heap.get(),
58 TEST_HEAP_SIZE);
59 }
Andrew Scull3c257452019-11-26 13:32:50 +000060 }
61
J-Alvesb37fd082020-10-22 12:29:21 +010062 public:
J-Alvesbeeb6dc2021-12-08 18:21:32 +000063 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
J-Alvesb37fd082020-10-22 12:29:21 +010064 {
J-Alvesbeeb6dc2021-12-08 18:21:32 +000065 return vm1->boot_order < vm2->boot_order;
J-Alvesb37fd082020-10-22 12:29:21 +010066 }
Andrew Scull3c257452019-11-26 13:32:50 +000067};
68
Olivier Deprezd5a54892023-02-02 16:45:59 +010069std::unique_ptr<uint8_t[]> vm::test_heap;
70
Andrew Scull3c257452019-11-26 13:32:50 +000071/**
72 * If nothing is mapped, unmapping the hypervisor has no effect.
73 */
74TEST_F(vm, vm_unmap_hypervisor_not_mapped)
75{
76 struct_vm *vm;
77 struct vm_locked vm_locked;
78
Olivier Deprez878bd5b2021-04-15 19:05:10 +020079 /* TODO: check ptable usage (security state?) */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060080 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
Andrew Scull3c257452019-11-26 13:32:50 +000081 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080082 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000083 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
84 EXPECT_THAT(
85 mm_test::get_ptable(vm->ptable),
86 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
87 mm_vm_fini(&vm->ptable, &ppool);
88 vm_unlock(&vm_locked);
89}
90
J-Alvesb37fd082020-10-22 12:29:21 +010091/**
92 * Validate the "boot_list" is created properly, according to vm's "boot_order"
93 * field.
94 */
95TEST_F(vm, vm_boot_order)
96{
97 struct_vm *vm_cur;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -060098 struct_vm *vm;
J-Alvesb37fd082020-10-22 12:29:21 +010099 std::list<struct_vm *> expected_final_order;
100
J-Alvesb37fd082020-10-22 12:29:21 +0100101 /*
Olivier Deprez181074b2023-02-02 14:53:23 +0100102 * Insertion when no call to "vcpu_update_boot" has been made yet.
J-Alvesb37fd082020-10-22 12:29:21 +0100103 * The "boot_list" is expected to be empty.
104 */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600105 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000106 vm_cur->boot_order = 3;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600107 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100108 expected_final_order.push_back(vm_cur);
109
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600110 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100111
112 /* Insertion at the head of the boot list */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600113 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000114 vm_cur->boot_order = 1;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600115 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100116 expected_final_order.push_back(vm_cur);
117
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600118 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100119
120 /* Insertion of two in the middle of the boot list */
Olivier Deprez181074b2023-02-02 14:53:23 +0100121 for (uint32_t i = 0; i < 2; i++) {
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000122 EXPECT_TRUE(vm_init_next(MAX_CPUS, &ppool, &vm_cur, false, 0));
J-Alvesb37fd082020-10-22 12:29:21 +0100123 vm_cur->boot_order = 2;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600124 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100125 expected_final_order.push_back(vm_cur);
126 }
127
128 /*
129 * Insertion in the end of the list.
130 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
131 * As such, a VM is expected to have been initialized before this
132 * test, with ID 1 and boot_order 0.
133 */
134 vm_cur = vm_find(1);
135 EXPECT_FALSE(vm_cur == NULL);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600136 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100137 expected_final_order.push_back(vm_cur);
138
139 /*
140 * Number of VMs initialized should be the same as in the
141 * "expected_final_order", before the final verification.
142 */
143 EXPECT_EQ(expected_final_order.size(), vm_get_count())
144 << "Something went wrong with the test itself...\n";
145
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000146 /* Sort VMs from lower to higher "boot_order" field.*/
147 expected_final_order.sort(vm::BootOrderSmallerThan);
J-Alvesb37fd082020-10-22 12:29:21 +0100148
149 std::list<struct_vm *>::iterator it;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600150 vm = vm_get_boot_vm();
Olivier Deprez181074b2023-02-02 14:53:23 +0100151 for (it = expected_final_order.begin();
152 it != expected_final_order.end(); it++) {
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600153 EXPECT_TRUE(vm != NULL);
154 EXPECT_EQ((*it)->id, vm->id);
155 vm = vm_get_next_boot(vm);
J-Alvesb37fd082020-10-22 12:29:21 +0100156 }
157}
J-Alves60eaff92021-05-27 14:54:41 +0100158
Madhukar Pappireddya067dc12024-10-16 22:20:44 -0500159TEST_F(vm, vcpu_arch_timer)
160{
161 const cpu_id_t cpu_ids[2] = {0, 1};
162 struct_vcpu *vm0_vcpu;
163 struct_vcpu *vm1_vcpu;
164 struct_vcpu *deadline_vcpu;
165 struct_vcpu *target_vcpu;
166 struct vcpu_locked vcpu_locked;
167 struct cpu *cpu0;
168 struct cpu *cpu1;
169
170 /* Initialie CPU module with two physical CPUs. */
171 cpu_module_init(cpu_ids, 2);
172 cpu0 = cpu_find_index(0);
173 cpu1 = cpu_find_index(1);
174
175 /* Two UP endpoints are deployed for this test. */
176 CHECK(vm_get_count() >= 2);
177 vm0_vcpu = vm_get_vcpu(vm_find_index(0), 0);
178 vm1_vcpu = vm_get_vcpu(vm_find_index(1), 0);
179
180 /* The execution context of each VM is scheduled on CPU0. */
181 vm0_vcpu->cpu = cpu0;
182 vm1_vcpu->cpu = cpu0;
183
184 /*
185 * Enable the timer peripheral for each vCPU and setup an arbitraty
186 * countdown value.
187 */
188 vm0_vcpu->regs.arch_timer.cval = 555555;
189 vm1_vcpu->regs.arch_timer.cval = 999999;
190 vm0_vcpu->regs.arch_timer.ctl = 1;
191 vm1_vcpu->regs.arch_timer.ctl = 1;
192
193 /* No vCPU is being tracked through either timer list. */
194 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
195 EXPECT_TRUE(deadline_vcpu == NULL);
196 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu1);
197 EXPECT_TRUE(deadline_vcpu == NULL);
198
199 /* vCPU of VM0 and VM1 are being added to the list. */
200 timer_vcpu_manage(vm0_vcpu);
201 timer_vcpu_manage(vm1_vcpu);
202
203 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
204 EXPECT_EQ(deadline_vcpu, vm0_vcpu);
205
206 /* Remove one of the vCPUs from the CPU0 list. */
207 vm0_vcpu->regs.arch_timer.cval = 0;
208 vm0_vcpu->regs.arch_timer.ctl = 0;
209 timer_vcpu_manage(vm0_vcpu);
210
211 /* This leaves one vCPU entry on CPU0 list. */
212 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
213 EXPECT_EQ(deadline_vcpu, vm1_vcpu);
214
215 /* Attempt to migrate VM1 vCPU from CPU0 to CPU1. */
216 vcpu_locked = vcpu_lock(vm1_vcpu);
217 timer_migrate_to_other_cpu(cpu1, vcpu_locked);
218 vcpu_unlock(&vcpu_locked);
219
220 /*
221 * After migration, ensure the list is empty on CPU0 but non-empty on
222 * CPU1.
223 */
224 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
225 EXPECT_TRUE(deadline_vcpu == NULL);
226
227 /*
228 * vCPU of VM1 is now running on CPU1. It must be the target vCPU when
229 * the timer has expired.
230 */
231 target_vcpu = timer_find_target_vcpu(vm1_vcpu);
232 EXPECT_EQ(target_vcpu, vm1_vcpu);
233}
234
J-Alves60eaff92021-05-27 14:54:41 +0100235/**
236 * Validates updates and check functions for binding notifications to endpoints.
237 */
238TEST_F(vm, vm_notifications_bind_diff_senders)
239{
J-Alvesd3e81622021-10-05 14:55:57 +0100240 struct_vm *current_vm = nullptr;
241 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100242 std::vector<struct_vm *> dummy_senders;
243 ffa_notifications_bitmap_t bitmaps[] = {
244 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
245 bool is_from_vm = true;
246
247 /* For the subsequent tests three VMs are used. */
248 CHECK(vm_get_count() >= 3);
249
J-Alvesd3e81622021-10-05 14:55:57 +0100250 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100251
252 dummy_senders.push_back(vm_find_index(1));
253 dummy_senders.push_back(vm_find_index(2));
254
J-Alvesd3e81622021-10-05 14:55:57 +0100255 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100256
257 for (unsigned int i = 0; i < 2; i++) {
258 /* Validate bindings condition after initialization. */
259 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100260 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
261 bitmaps[i], false));
J-Alves60eaff92021-05-27 14:54:41 +0100262
263 /*
264 * Validate bind related operations. For this test considering
265 * only global notifications.
266 */
J-Alvesd3e81622021-10-05 14:55:57 +0100267 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100268 dummy_senders[i]->id,
269 bitmaps[i], false);
270
271 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100272 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100273 bitmaps[i], false));
274
275 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100276 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100277 bitmaps[i], false));
278
279 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100280 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100281 bitmaps[1 - i], false));
282
283 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100284 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100285 bitmaps[2], false));
286 }
287
288 /** Clean up bind for other tests. */
J-Alvesd3e81622021-10-05 14:55:57 +0100289 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100290 bitmaps[0], false);
J-Alvesd3e81622021-10-05 14:55:57 +0100291 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100292 bitmaps[1], false);
293
J-Alvesd3e81622021-10-05 14:55:57 +0100294 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100295}
296
297/**
298 * Validates updates and check functions for binding notifications, namely the
J-Alves96f6e292021-06-08 17:32:40 +0100299 * configuration of bindings of global and per-vCPU notifications.
J-Alves60eaff92021-05-27 14:54:41 +0100300 */
301TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
302{
J-Alvesd3e81622021-10-05 14:55:57 +0100303 struct_vm *current_vm;
304 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100305 struct_vm *dummy_sender;
306 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
307 ffa_notifications_bitmap_t per_vcpu = ~global;
308 bool is_from_vm = true;
309
310 CHECK(vm_get_count() >= 2);
311
J-Alvesd3e81622021-10-05 14:55:57 +0100312 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100313
314 dummy_sender = vm_find_index(1);
315
J-Alvesd3e81622021-10-05 14:55:57 +0100316 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100317
J-Alvesd3e81622021-10-05 14:55:57 +0100318 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100319 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100320 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100321 dummy_sender->id, per_vcpu, true);
322
323 /* Check validation of global notifications bindings. */
324 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100325 current_vm_locked, is_from_vm, dummy_sender->id, global,
326 false));
J-Alves60eaff92021-05-27 14:54:41 +0100327
J-Alves96f6e292021-06-08 17:32:40 +0100328 /* Check validation of per-vCPU notifications bindings. */
J-Alves60eaff92021-05-27 14:54:41 +0100329 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100330 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
331 true));
J-Alves60eaff92021-05-27 14:54:41 +0100332
333 /**
J-Alves96f6e292021-06-08 17:32:40 +0100334 * Check that global notifications are not validated as per-vCPU, and
J-Alves60eaff92021-05-27 14:54:41 +0100335 * vice-versa.
336 */
337 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100338 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
J-Alves60eaff92021-05-27 14:54:41 +0100339 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100340 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
J-Alves60eaff92021-05-27 14:54:41 +0100341 false));
J-Alvesd3e81622021-10-05 14:55:57 +0100342 EXPECT_FALSE(vm_notifications_validate_binding(
343 current_vm_locked, is_from_vm, dummy_sender->id,
344 global | per_vcpu, true));
345 EXPECT_FALSE(vm_notifications_validate_binding(
346 current_vm_locked, is_from_vm, dummy_sender->id,
347 global | per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100348
349 /** Undo the bindings */
J-Alvesd3e81622021-10-05 14:55:57 +0100350 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
351 global, false);
352 EXPECT_TRUE(vm_notifications_validate_binding(
353 current_vm_locked, is_from_vm, 0, global, false));
J-Alves60eaff92021-05-27 14:54:41 +0100354
J-Alvesd3e81622021-10-05 14:55:57 +0100355 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
356 per_vcpu, false);
357 EXPECT_TRUE(vm_notifications_validate_binding(
358 current_vm_locked, is_from_vm, 0, per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100359
J-Alvesd3e81622021-10-05 14:55:57 +0100360 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100361}
362
J-Alvesce2f8d32021-06-10 18:30:21 +0100363/**
364 * Validates accesses to notifications bitmaps.
365 */
366TEST_F(vm, vm_notifications_set_and_get)
367{
J-Alvesd3e81622021-10-05 14:55:57 +0100368 struct_vm *current_vm;
369 struct vm_locked current_vm_locked;
J-Alvesce2f8d32021-06-10 18:30:21 +0100370 struct_vm *dummy_sender;
371 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
372 ffa_notifications_bitmap_t per_vcpu = ~global;
373 ffa_notifications_bitmap_t ret;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700374 const unsigned int vcpu_idx = 0;
J-Alvesce2f8d32021-06-10 18:30:21 +0100375 struct notifications *notifications;
376 const bool is_from_vm = true;
377
378 CHECK(vm_get_count() >= 2);
379
J-Alvesd3e81622021-10-05 14:55:57 +0100380 current_vm = vm_find_index(0);
J-Alvesce2f8d32021-06-10 18:30:21 +0100381 dummy_sender = vm_find_index(1);
382
J-Alvesd3e81622021-10-05 14:55:57 +0100383 notifications = &current_vm->notifications.from_vm;
384 current_vm_locked = vm_lock(current_vm);
J-Alvesce2f8d32021-06-10 18:30:21 +0100385
J-Alvesd3e81622021-10-05 14:55:57 +0100386 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100387 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100388 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100389 dummy_sender->id, per_vcpu, true);
390
391 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100392 * Validate get notifications bitmap for global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100393 */
J-Alves5a16c962022-03-25 12:32:51 +0000394 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
395 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100396
J-Alves8450ecb2025-02-03 11:51:39 +0000397 EXPECT_EQ(notifications->global.pending, global);
398
399 /* Counter should track pending notifications. */
400 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
401
J-Alves5136dda2022-03-25 12:26:38 +0000402 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100403 is_from_vm, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100404 EXPECT_EQ(ret, global);
J-Alvesd3e81622021-10-05 14:55:57 +0100405 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100406
407 /*
J-Alves8450ecb2025-02-03 11:51:39 +0000408 * After getting the pending notifications, the pending count should
409 * be zeroed.
410 */
411 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
412
413 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100414 * Validate get notifications bitmap for per-vCPU notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100415 */
J-Alves5a16c962022-03-25 12:32:51 +0000416 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
417 per_vcpu, vcpu_idx, true);
J-Alvesfc50ef72025-02-03 11:57:51 +0000418
419 /*
420 * Duplicate call to check that the state of the counters doesn't alter
421 * because of it.
422 */
423 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
424 per_vcpu, vcpu_idx, true);
425
J-Alves8450ecb2025-02-03 11:51:39 +0000426 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100427
J-Alves5136dda2022-03-25 12:26:38 +0000428 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100429 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100430 EXPECT_EQ(ret, per_vcpu);
J-Alvesd3e81622021-10-05 14:55:57 +0100431 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
J-Alves8450ecb2025-02-03 11:51:39 +0000432 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100433
434 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100435 * Validate that getting notifications for a specific vCPU also returns
436 * global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100437 */
J-Alves5a16c962022-03-25 12:32:51 +0000438 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
439 per_vcpu, vcpu_idx, true);
J-Alves8450ecb2025-02-03 11:51:39 +0000440
J-Alves5a16c962022-03-25 12:32:51 +0000441 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
442 global, 0ull, false);
J-Alves8450ecb2025-02-03 11:51:39 +0000443 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100444
J-Alves5136dda2022-03-25 12:26:38 +0000445 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100446 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100447 EXPECT_EQ(ret, per_vcpu | global);
J-Alvesd3e81622021-10-05 14:55:57 +0100448 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
449 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alves8450ecb2025-02-03 11:51:39 +0000450 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100451
452 /** Undo the binding */
J-Alvesd3e81622021-10-05 14:55:57 +0100453 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
454 global, false);
455 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
456 per_vcpu, true);
457 vm_unlock(&current_vm_locked);
J-Alvesce2f8d32021-06-10 18:30:21 +0100458}
459
J-Alves96f6e292021-06-08 17:32:40 +0100460/**
461 * Validates simple getting of notifications info for global notifications.
462 */
463TEST_F(vm, vm_notifications_info_get_global)
464{
465 ffa_notifications_bitmap_t to_set = 0xFU;
466 ffa_notifications_bitmap_t got;
467
468 /**
469 * Following set of variables that are also expected to be used when
470 * handling FFA_NOTIFICATION_INFO_GET.
471 */
472 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
473 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
474 uint32_t ids_count = 0;
475 uint32_t lists_count = 0;
476 enum notifications_info_get_state current_state = INIT;
477
478 CHECK(vm_get_count() >= 2);
479
480 for (unsigned int i = 0; i < 2; i++) {
481 struct_vm *current_vm = vm_find_index(0);
482 struct vm_locked current_vm_locked = vm_lock(current_vm);
483 struct notifications *notifications =
484 &current_vm->notifications.from_sp;
485 const bool is_from_vm = false;
486
J-Alves5a16c962022-03-25 12:32:51 +0000487 vm_notifications_partition_set_pending(
488 current_vm_locked, is_from_vm, to_set, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100489
490 vm_notifications_info_get_pending(
491 current_vm_locked, is_from_vm, ids, &ids_count,
492 lists_sizes, &lists_count,
493 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
494
495 /*
496 * Here the number of IDs and list count should be the same.
497 * As we are testing with Global notifications, this is
498 * expected.
499 */
500 EXPECT_EQ(ids_count, i + 1);
501 EXPECT_EQ(lists_count, i + 1);
502 EXPECT_EQ(lists_sizes[i], 0);
503 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
504
505 /* Action must be reset to initial state for each VM. */
506 current_state = INIT;
507
508 /*
509 * Check that getting pending notifications gives the expected
510 * return and cleans the 'pending' and 'info_get_retrieved'
511 * bitmaps.
512 */
J-Alves5136dda2022-03-25 12:26:38 +0000513 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100514 is_from_vm, 0);
515 EXPECT_EQ(got, to_set);
516
517 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
518 EXPECT_EQ(notifications->global.pending, 0U);
519
520 vm_unlock(&current_vm_locked);
521 }
522}
523
524/**
525 * Validates simple getting of notifications info for per-vCPU notifications.
526 */
527TEST_F(vm, vm_notifications_info_get_per_vcpu)
528{
529 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
530 ffa_notifications_bitmap_t got;
531
532 /*
533 * Following set of variables that are also expected to be used when
534 * handling ffa_notification_info_get.
535 */
536 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
537 uint32_t ids_count = 0;
538 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
539 uint32_t lists_count = 0;
540 enum notifications_info_get_state current_state = INIT;
541
542 CHECK(vm_get_count() >= 2);
543
544 for (unsigned int i = 0; i < 2; i++) {
545 struct_vm *current_vm = vm_find_index(0);
546 struct vm_locked current_vm_locked = vm_lock(current_vm);
547 struct notifications *notifications =
548 &current_vm->notifications.from_sp;
549 const bool is_from_vm = false;
550
J-Alves5a16c962022-03-25 12:32:51 +0000551 vm_notifications_partition_set_pending(
552 current_vm_locked, is_from_vm, per_vcpu, 0, true);
J-Alves96f6e292021-06-08 17:32:40 +0100553
554 vm_notifications_info_get_pending(
555 current_vm_locked, is_from_vm, ids, &ids_count,
556 lists_sizes, &lists_count,
557 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
558
559 /*
560 * Here the number of IDs and list count should be the same.
561 * As we are testing with Global notifications, this is
562 * expected.
563 */
564 EXPECT_EQ(ids_count, (i + 1) * 2);
565 EXPECT_EQ(lists_count, i + 1);
566 EXPECT_EQ(lists_sizes[i], 1);
567 EXPECT_EQ(per_vcpu,
568 notifications->per_vcpu[0].info_get_retrieved);
569
570 /* Action must be reset to initial state for each VM. */
571 current_state = INIT;
572
573 /*
574 * Check that getting pending notifications gives the expected
575 * return and cleans the 'pending' and 'info_get_retrieved'
576 * bitmaps.
577 */
J-Alves5136dda2022-03-25 12:26:38 +0000578 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100579 is_from_vm, 0);
580 EXPECT_EQ(got, per_vcpu);
581
582 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
583 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
584
585 vm_unlock(&current_vm_locked);
586 }
587}
588
589/**
590 * Validate getting of notifications information if all VCPUs have notifications
591 * pending.
592 */
593TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
594{
595 struct_vm *current_vm = nullptr;
596 struct vm_locked current_vm_locked;
597 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
598 ffa_notifications_bitmap_t got;
599 const ffa_notifications_bitmap_t global = 0xF0000;
600
601 /*
602 * Following set of variables that are also expected to be used when
603 * handling ffa_notification_info_get.
604 */
605 struct notifications *notifications;
606 const bool is_from_sp = false;
607 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
608 uint32_t ids_count = 0;
609 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
610 uint32_t lists_count = 0;
611 enum notifications_info_get_state current_state = INIT;
612
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600613 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
J-Alves96f6e292021-06-08 17:32:40 +0100614 current_vm_locked = vm_lock(current_vm);
615 notifications = &current_vm->notifications.from_sp;
616
617 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5a16c962022-03-25 12:32:51 +0000618 vm_notifications_partition_set_pending(
619 current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
620 i, true);
J-Alves96f6e292021-06-08 17:32:40 +0100621 }
622
623 /*
624 * Adding a global notification should not change the list of IDs,
625 * because global notifications only require the VM ID to be included in
626 * the list, at least once.
627 */
J-Alves5a16c962022-03-25 12:32:51 +0000628 vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
629 global, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100630
631 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
632 &ids_count, lists_sizes, &lists_count,
633 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
634 &current_state);
635
636 /*
637 * This test has been conceived for the expected MAX_CPUS 4.
638 * All VCPUs have notifications of the same VM, to be broken down in 2
639 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
640 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
641 */
642 CHECK(MAX_CPUS == 4);
643 EXPECT_EQ(ids_count, 6U);
644 EXPECT_EQ(lists_count, 2U);
645 EXPECT_EQ(lists_sizes[0], 3);
646 EXPECT_EQ(lists_sizes[1], 1);
647
648 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5136dda2022-03-25 12:26:38 +0000649 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100650 is_from_sp, i);
651
652 /*
J-Alves5136dda2022-03-25 12:26:38 +0000653 * The first call to
654 * vm_notifications_partition_get_pending should also
655 * include the global notifications on the return.
J-Alves96f6e292021-06-08 17:32:40 +0100656 */
657 ffa_notifications_bitmap_t to_check =
658 (i != 0) ? FFA_NOTIFICATION_MASK(i)
659 : FFA_NOTIFICATION_MASK(i) | global;
660
661 EXPECT_EQ(got, to_check);
662
663 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
664 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
665 }
666
667 vm_unlock(&current_vm_locked);
668}
669
670/**
671 * Validate change of state from 'vm_notifications_info_get_pending', when the
672 * list of IDs is full.
673 */
674TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
675{
676 struct_vm *current_vm = vm_find_index(0);
677 struct vm_locked current_vm_locked = vm_lock(current_vm);
678 struct notifications *notifications =
679 &current_vm->notifications.from_sp;
680 const bool is_from_vm = false;
681 ffa_notifications_bitmap_t got = 0;
682
683 /*
684 * Following set of variables that are also expected to be used when
685 * handling ffa_notification_info_get.
686 * For this 'ids_count' has been initialized such that it indicates
687 * there is no space in the list for a per-vCPU notification (VM ID and
688 * VCPU ID).
689 */
690 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
691 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
692 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
693 uint32_t lists_count = 10;
694 enum notifications_info_get_state current_state = INIT;
695 CHECK(vm_get_count() >= 2);
696
J-Alves5a16c962022-03-25 12:32:51 +0000697 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
698 FFA_NOTIFICATION_MASK(1), 0,
699 true);
J-Alves96f6e292021-06-08 17:32:40 +0100700
701 /* Call function to get notifications info, with only per-vCPU set. */
702 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
703 &ids_count, lists_sizes, &lists_count,
704 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
705 &current_state);
706
707 /*
708 * Verify that as soon as there isn't space to do the required
J-Alves5136dda2022-03-25 12:26:38 +0000709 * insertion in the list, the
710 * 'vm_notifications_partition_get_pending' returns and changes
711 * list state to FULL. In this case returning, because it would need to
712 * add two IDs (VM ID and VCPU ID).
J-Alves96f6e292021-06-08 17:32:40 +0100713 */
714 EXPECT_EQ(current_state, FULL);
715 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
716 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
717
718 /*
719 * At this point there is still room for the information of a global
720 * notification (only VM ID to be added). Reset 'current_state'
721 * for the insertion to happen at the last position of the array.
722 */
723 current_state = INIT;
724
725 /* Setting global notification */
J-Alves5a16c962022-03-25 12:32:51 +0000726 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
727 FFA_NOTIFICATION_MASK(2), 0,
728 false);
J-Alves96f6e292021-06-08 17:32:40 +0100729
730 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
731 &ids_count, lists_sizes, &lists_count,
732 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
733 &current_state);
734
735 /*
736 * Now List must be full, the set global notification must be part of
737 * 'info_get_retrieved', and the 'current_state' should be set to FULL
738 * due to the pending per-vCPU notification in VCPU 0.
739 */
740 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
741 EXPECT_EQ(current_state, FULL);
742 EXPECT_EQ(notifications->global.info_get_retrieved,
743 FFA_NOTIFICATION_MASK(2));
744
J-Alves5136dda2022-03-25 12:26:38 +0000745 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100746 is_from_vm, 0);
747 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
748
749 vm_unlock(&current_vm_locked);
750}
751
752TEST_F(vm, vm_notifications_info_get_full_global)
753{
754 struct_vm *current_vm = vm_find_index(0);
755 struct vm_locked current_vm_locked = vm_lock(current_vm);
756 ffa_notifications_bitmap_t got;
757 struct notifications *notifications;
758 const bool is_from_vm = false;
759 /*
760 * Following set of variables that are also expected to be used when
761 * handling ffa_notification_info_get.
762 * For this 'ids_count' has been initialized such that it indicates
763 * there is no space in the list for a global notification (VM ID only).
764 */
765 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
766 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
767 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
768 uint32_t lists_count = 10;
769 enum notifications_info_get_state current_state = INIT;
770
771 CHECK(vm_get_count() >= 1);
772
773 current_vm = vm_find_index(0);
774
775 notifications = &current_vm->notifications.from_sp;
776
777 /* Set global notification. */
J-Alves5a16c962022-03-25 12:32:51 +0000778 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
779 FFA_NOTIFICATION_MASK(10), 0,
780 false);
J-Alves96f6e292021-06-08 17:32:40 +0100781
782 /* Get notifications info for the given notifications. */
783 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
784 &ids_count, lists_sizes, &lists_count,
785 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
786 &current_state);
787
788 /* Expect 'info_get_retrieved' bitmap to be 0. */
789 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
790 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
791 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
792 EXPECT_EQ(current_state, FULL);
793
J-Alves5136dda2022-03-25 12:26:38 +0000794 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100795 is_from_vm, 0);
J-Alves9f74b932021-10-11 14:20:05 +0100796 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
797
J-Alves96f6e292021-06-08 17:32:40 +0100798 vm_unlock(&current_vm_locked);
799}
800
J-Alvesf31940e2022-03-25 17:24:00 +0000801TEST_F(vm, vm_notifications_info_get_from_framework)
802{
803 struct vm_locked vm_locked = vm_lock(vm_find_index(0));
804 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
805 uint32_t ids_count = 0;
806 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
807 uint32_t lists_count = 0;
808
809 vm_notifications_framework_set_pending(vm_locked, 0x1U);
810
811 /* Get notifications info for the given notifications. */
812 vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
813 &lists_count,
814 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
815
816 EXPECT_EQ(ids[0], vm_locked.vm->id);
817 EXPECT_EQ(ids_count, 1);
818 EXPECT_EQ(lists_sizes[0], 0);
819 EXPECT_EQ(lists_count, 1);
820
821 EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
822
823 vm_unlock(&vm_locked);
824}
825
Daniel Boulby8be26512024-09-03 19:41:11 +0100826/**
827 * Validates simple getting of notifications info for pending IPI.
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000828 * Also checks that vCPUs with pending IPIs are only reported if the
829 * vCPU is in the waiting state.
Daniel Boulby8be26512024-09-03 19:41:11 +0100830 */
831TEST_F(vm, vm_notifications_info_get_ipi)
832{
833 /*
834 * Following set of variables that are also expected to be used when
835 * handling ffa_notification_info_get.
836 */
837 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
838 uint32_t ids_count = 0;
839 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
840 uint32_t lists_count = 0;
841 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000842 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100843 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000844 struct vcpu_locked vcpu_locked;
Daniel Boulby8be26512024-09-03 19:41:11 +0100845 const bool is_from_vm = false;
846 struct vm_locked current_vm_locked = vm_lock(current_vm);
847
848 EXPECT_TRUE(current_vm->vcpu_count >= 2);
849
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000850 vcpu_locked = vcpu_lock(target_vcpu);
851 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
852 vcpu_unlock(&vcpu_locked);
Daniel Boulby8be26512024-09-03 19:41:11 +0100853
854 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
855 &ids_count, lists_sizes, &lists_count,
856 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
857 &current_state);
858
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000859 EXPECT_EQ(ids_count, 0);
860 EXPECT_EQ(lists_count, 0);
861
862 target_vcpu->state = VCPU_STATE_WAITING;
863
864 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
865 &ids_count, lists_sizes, &lists_count,
866 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
867 &current_state);
868
Daniel Boulby8be26512024-09-03 19:41:11 +0100869 EXPECT_EQ(ids_count, 2);
870 EXPECT_EQ(lists_count, 1);
871 EXPECT_EQ(lists_sizes[0], 1);
872 EXPECT_EQ(ids[0], current_vm->id);
873 EXPECT_EQ(ids[1], 1);
874 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
875
876 /* Check it is not retrieved multiple times. */
877 current_state = INIT;
878 ids[0] = 0;
879 ids[1] = 0;
880 ids_count = 0;
881 lists_sizes[0] = 0;
882 lists_count = 0;
883
884 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
885 &ids_count, lists_sizes, &lists_count,
886 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
887 &current_state);
888 EXPECT_EQ(ids_count, 0);
889 EXPECT_EQ(lists_count, 0);
890 EXPECT_EQ(lists_sizes[0], 0);
891
892 vm_unlock(&current_vm_locked);
893}
894
895/**
896 * Validates simple getting of notifications info for pending with IPI when
897 * notification for the same vcpu is also pending.
898 */
899TEST_F(vm, vm_notifications_info_get_ipi_with_per_vcpu)
900{
901 /*
902 * Following set of variables that are also expected to be used when
903 * handling ffa_notification_info_get.
904 */
905 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
906 uint32_t ids_count = 0;
907 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
908 uint32_t lists_count = 0;
909 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000910 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100911 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000912 struct vcpu_locked vcpu_locked;
Daniel Boulby8be26512024-09-03 19:41:11 +0100913 const bool is_from_vm = false;
914 struct vm_locked current_vm_locked = vm_lock(current_vm);
915
916 EXPECT_TRUE(current_vm->vcpu_count >= 2);
917
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000918 vcpu_locked = vcpu_lock(target_vcpu);
919 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
920 vcpu_unlock(&vcpu_locked);
Daniel Boulby8be26512024-09-03 19:41:11 +0100921
922 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
923 true, 1, true);
924 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
925 &ids_count, lists_sizes, &lists_count,
926 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
927 &current_state);
928
929 EXPECT_EQ(ids_count, 2);
930 EXPECT_EQ(lists_count, 1);
931 EXPECT_EQ(lists_sizes[0], 1);
932 EXPECT_EQ(ids[0], current_vm->id);
933 EXPECT_EQ(ids[1], 1);
934 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
935
936 /* Reset the state and values. */
937 current_state = INIT;
938 ids[0] = 0;
939 ids[1] = 0;
940 ids_count = 0;
941 lists_sizes[0] = 0;
942 lists_count = 0;
943
944 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
945 &ids_count, lists_sizes, &lists_count,
946 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
947 &current_state);
948 EXPECT_EQ(ids_count, 0);
949 EXPECT_EQ(lists_count, 0);
950 EXPECT_EQ(lists_sizes[0], 0);
951
952 vm_unlock(&current_vm_locked);
953}
954
955/**
956 * Validate that a mix of a pending IPI and notifcations are correctly
957 * reported across vcpus.
958 */
959TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)
960{
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000961 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100962 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
963 CHECK(vcpu_count > 1);
964
965 struct vm_locked current_vm_locked = vm_lock(current_vm);
966
967 /*
968 * Following set of variables that are also expected to be used when
969 * handling ffa_notification_info_get.
970 */
971 const bool is_from_vm = false;
972 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
973 uint32_t ids_count = 0;
974 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
975 uint32_t lists_count = 0;
976 enum notifications_info_get_state current_state = INIT;
977 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 0);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000978 struct vcpu_locked vcpu_locked;
Daniel Boulby8be26512024-09-03 19:41:11 +0100979
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000980 target_vcpu->state = VCPU_STATE_WAITING;
981
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000982 vcpu_locked = vcpu_lock(target_vcpu);
983 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
984 vcpu_unlock(&vcpu_locked);
Daniel Boulby8be26512024-09-03 19:41:11 +0100985
986 for (unsigned int i = 1; i < vcpu_count; i++) {
987 vm_notifications_partition_set_pending(
988 current_vm_locked, is_from_vm, FFA_NOTIFICATION_MASK(i),
989 i, true);
990 }
991
992 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
993 &ids_count, lists_sizes, &lists_count,
994 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
995 &current_state);
996
997 /*
998 * This test has been conceived for the expected MAX_CPUS 4.
999 * All VCPUs have notifications of the same VM, to be broken down in 2
1000 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
1001 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
1002 */
1003 EXPECT_EQ(ids_count, 6U);
1004 EXPECT_EQ(lists_count, 2U);
1005 EXPECT_EQ(lists_sizes[0], 3);
1006 EXPECT_EQ(lists_sizes[1], 1);
1007 EXPECT_EQ(ids[0], current_vm->id);
1008 EXPECT_EQ(ids[1], 0);
1009 EXPECT_EQ(ids[2], 1);
1010 EXPECT_EQ(ids[3], 2);
1011 EXPECT_EQ(ids[4], current_vm->id);
1012 EXPECT_EQ(ids[5], 3);
1013
1014 vm_unlock(&current_vm_locked);
1015}
Andrew Scull3c257452019-11-26 13:32:50 +00001016} /* namespace */