blob: 9dc76e1c14711d00cb54ac143e62ca4fd8358911 [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Daniel Boulby84350712021-11-26 11:13:20 +000012#include "hf/check.h"
J-Alves67f5ba32024-09-27 18:07:11 +010013#include "hf/list.h"
Andrew Scull3c257452019-11-26 13:32:50 +000014#include "hf/mpool.h"
Madhukar Pappireddya067dc12024-10-16 22:20:44 -050015#include "hf/timer_mgmt.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/vm.h"
17}
18
J-Alvesb37fd082020-10-22 12:29:21 +010019#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000020#include <memory>
21#include <span>
22#include <vector>
23
24#include "mm_test.hh"
25
26namespace
27{
28using namespace ::std::placeholders;
29
30using ::testing::AllOf;
31using ::testing::Each;
32using ::testing::SizeIs;
33
34using struct_vm = struct vm;
Olivier Deprez181074b2023-02-02 14:53:23 +010035using struct_vcpu = struct vcpu;
J-Alves96f6e292021-06-08 17:32:40 +010036using struct_vm_locked = struct vm_locked;
Andrew Scull3c257452019-11-26 13:32:50 +000037
Olivier Deprezd5a54892023-02-02 16:45:59 +010038constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
Andrew Scull3c257452019-11-26 13:32:50 +000039const int TOP_LEVEL = arch_mm_stage2_max_level();
40
41class vm : public ::testing::Test
42{
Olivier Deprezd5a54892023-02-02 16:45:59 +010043 protected:
44 static std::unique_ptr<uint8_t[]> test_heap;
45
46 struct mpool ppool;
47
Andrew Scull3c257452019-11-26 13:32:50 +000048 void SetUp() override
49 {
Olivier Deprezd5a54892023-02-02 16:45:59 +010050 if (!test_heap) {
51 /*
52 * TODO: replace with direct use of stdlib allocator so
53 * sanitizers are more effective.
54 */
55 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
56 mpool_init(&ppool, sizeof(struct mm_page_table));
57 mpool_add_chunk(&ppool, test_heap.get(),
58 TEST_HEAP_SIZE);
59 }
Andrew Scull3c257452019-11-26 13:32:50 +000060 }
61
J-Alvesb37fd082020-10-22 12:29:21 +010062 public:
J-Alvesbeeb6dc2021-12-08 18:21:32 +000063 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
J-Alvesb37fd082020-10-22 12:29:21 +010064 {
J-Alvesbeeb6dc2021-12-08 18:21:32 +000065 return vm1->boot_order < vm2->boot_order;
J-Alvesb37fd082020-10-22 12:29:21 +010066 }
Andrew Scull3c257452019-11-26 13:32:50 +000067};
68
Olivier Deprezd5a54892023-02-02 16:45:59 +010069std::unique_ptr<uint8_t[]> vm::test_heap;
70
Andrew Scull3c257452019-11-26 13:32:50 +000071/**
72 * If nothing is mapped, unmapping the hypervisor has no effect.
73 */
74TEST_F(vm, vm_unmap_hypervisor_not_mapped)
75{
76 struct_vm *vm;
77 struct vm_locked vm_locked;
78
Olivier Deprez878bd5b2021-04-15 19:05:10 +020079 /* TODO: check ptable usage (security state?) */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060080 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
Andrew Scull3c257452019-11-26 13:32:50 +000081 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080082 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000083 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
84 EXPECT_THAT(
85 mm_test::get_ptable(vm->ptable),
86 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
87 mm_vm_fini(&vm->ptable, &ppool);
88 vm_unlock(&vm_locked);
89}
90
J-Alvesb37fd082020-10-22 12:29:21 +010091/**
92 * Validate the "boot_list" is created properly, according to vm's "boot_order"
93 * field.
94 */
95TEST_F(vm, vm_boot_order)
96{
97 struct_vm *vm_cur;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -060098 struct_vm *vm;
J-Alvesb37fd082020-10-22 12:29:21 +010099 std::list<struct_vm *> expected_final_order;
100
J-Alvesb37fd082020-10-22 12:29:21 +0100101 /*
Olivier Deprez181074b2023-02-02 14:53:23 +0100102 * Insertion when no call to "vcpu_update_boot" has been made yet.
J-Alvesb37fd082020-10-22 12:29:21 +0100103 * The "boot_list" is expected to be empty.
104 */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600105 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000106 vm_cur->boot_order = 3;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600107 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100108 expected_final_order.push_back(vm_cur);
109
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600110 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100111
112 /* Insertion at the head of the boot list */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600113 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000114 vm_cur->boot_order = 1;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600115 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100116 expected_final_order.push_back(vm_cur);
117
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600118 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100119
120 /* Insertion of two in the middle of the boot list */
Olivier Deprez181074b2023-02-02 14:53:23 +0100121 for (uint32_t i = 0; i < 2; i++) {
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000122 EXPECT_TRUE(vm_init_next(MAX_CPUS, &ppool, &vm_cur, false, 0));
J-Alvesb37fd082020-10-22 12:29:21 +0100123 vm_cur->boot_order = 2;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600124 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100125 expected_final_order.push_back(vm_cur);
126 }
127
128 /*
129 * Insertion in the end of the list.
130 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
131 * As such, a VM is expected to have been initialized before this
132 * test, with ID 1 and boot_order 0.
133 */
134 vm_cur = vm_find(1);
135 EXPECT_FALSE(vm_cur == NULL);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600136 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100137 expected_final_order.push_back(vm_cur);
138
139 /*
140 * Number of VMs initialized should be the same as in the
141 * "expected_final_order", before the final verification.
142 */
143 EXPECT_EQ(expected_final_order.size(), vm_get_count())
144 << "Something went wrong with the test itself...\n";
145
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000146 /* Sort VMs from lower to higher "boot_order" field.*/
147 expected_final_order.sort(vm::BootOrderSmallerThan);
J-Alvesb37fd082020-10-22 12:29:21 +0100148
149 std::list<struct_vm *>::iterator it;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600150 vm = vm_get_boot_vm();
Olivier Deprez181074b2023-02-02 14:53:23 +0100151 for (it = expected_final_order.begin();
152 it != expected_final_order.end(); it++) {
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600153 EXPECT_TRUE(vm != NULL);
154 EXPECT_EQ((*it)->id, vm->id);
155 vm = vm_get_next_boot(vm);
J-Alvesb37fd082020-10-22 12:29:21 +0100156 }
157}
J-Alves60eaff92021-05-27 14:54:41 +0100158
Madhukar Pappireddya067dc12024-10-16 22:20:44 -0500159TEST_F(vm, vcpu_arch_timer)
160{
161 const cpu_id_t cpu_ids[2] = {0, 1};
162 struct_vcpu *vm0_vcpu;
163 struct_vcpu *vm1_vcpu;
164 struct_vcpu *deadline_vcpu;
165 struct_vcpu *target_vcpu;
166 struct vcpu_locked vcpu_locked;
167 struct cpu *cpu0;
168 struct cpu *cpu1;
169
170 /* Initialie CPU module with two physical CPUs. */
171 cpu_module_init(cpu_ids, 2);
172 cpu0 = cpu_find_index(0);
173 cpu1 = cpu_find_index(1);
174
175 /* Two UP endpoints are deployed for this test. */
176 CHECK(vm_get_count() >= 2);
177 vm0_vcpu = vm_get_vcpu(vm_find_index(0), 0);
178 vm1_vcpu = vm_get_vcpu(vm_find_index(1), 0);
179
180 /* The execution context of each VM is scheduled on CPU0. */
181 vm0_vcpu->cpu = cpu0;
182 vm1_vcpu->cpu = cpu0;
183
184 /*
185 * Enable the timer peripheral for each vCPU and setup an arbitraty
186 * countdown value.
187 */
188 vm0_vcpu->regs.arch_timer.cval = 555555;
189 vm1_vcpu->regs.arch_timer.cval = 999999;
190 vm0_vcpu->regs.arch_timer.ctl = 1;
191 vm1_vcpu->regs.arch_timer.ctl = 1;
192
193 /* No vCPU is being tracked through either timer list. */
194 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
195 EXPECT_TRUE(deadline_vcpu == NULL);
196 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu1);
197 EXPECT_TRUE(deadline_vcpu == NULL);
198
199 /* vCPU of VM0 and VM1 are being added to the list. */
200 timer_vcpu_manage(vm0_vcpu);
201 timer_vcpu_manage(vm1_vcpu);
202
203 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
204 EXPECT_EQ(deadline_vcpu, vm0_vcpu);
205
206 /* Remove one of the vCPUs from the CPU0 list. */
207 vm0_vcpu->regs.arch_timer.cval = 0;
208 vm0_vcpu->regs.arch_timer.ctl = 0;
209 timer_vcpu_manage(vm0_vcpu);
210
211 /* This leaves one vCPU entry on CPU0 list. */
212 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
213 EXPECT_EQ(deadline_vcpu, vm1_vcpu);
214
215 /* Attempt to migrate VM1 vCPU from CPU0 to CPU1. */
216 vcpu_locked = vcpu_lock(vm1_vcpu);
217 timer_migrate_to_other_cpu(cpu1, vcpu_locked);
218 vcpu_unlock(&vcpu_locked);
219
220 /*
221 * After migration, ensure the list is empty on CPU0 but non-empty on
222 * CPU1.
223 */
224 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
225 EXPECT_TRUE(deadline_vcpu == NULL);
226
227 /*
228 * vCPU of VM1 is now running on CPU1. It must be the target vCPU when
229 * the timer has expired.
230 */
231 target_vcpu = timer_find_target_vcpu(vm1_vcpu);
232 EXPECT_EQ(target_vcpu, vm1_vcpu);
233}
234
J-Alves60eaff92021-05-27 14:54:41 +0100235/**
236 * Validates updates and check functions for binding notifications to endpoints.
237 */
238TEST_F(vm, vm_notifications_bind_diff_senders)
239{
J-Alvesd3e81622021-10-05 14:55:57 +0100240 struct_vm *current_vm = nullptr;
241 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100242 std::vector<struct_vm *> dummy_senders;
243 ffa_notifications_bitmap_t bitmaps[] = {
244 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
245 bool is_from_vm = true;
246
247 /* For the subsequent tests three VMs are used. */
248 CHECK(vm_get_count() >= 3);
249
J-Alvesd3e81622021-10-05 14:55:57 +0100250 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100251
252 dummy_senders.push_back(vm_find_index(1));
253 dummy_senders.push_back(vm_find_index(2));
254
J-Alvesd3e81622021-10-05 14:55:57 +0100255 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100256
257 for (unsigned int i = 0; i < 2; i++) {
258 /* Validate bindings condition after initialization. */
259 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100260 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
261 bitmaps[i], false));
J-Alves60eaff92021-05-27 14:54:41 +0100262
263 /*
264 * Validate bind related operations. For this test considering
265 * only global notifications.
266 */
J-Alvesd3e81622021-10-05 14:55:57 +0100267 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100268 dummy_senders[i]->id,
269 bitmaps[i], false);
270
271 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100272 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100273 bitmaps[i], false));
274
275 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100276 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100277 bitmaps[i], false));
278
279 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100280 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100281 bitmaps[1 - i], false));
282
283 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100284 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100285 bitmaps[2], false));
286 }
287
288 /** Clean up bind for other tests. */
J-Alvesd3e81622021-10-05 14:55:57 +0100289 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100290 bitmaps[0], false);
J-Alvesd3e81622021-10-05 14:55:57 +0100291 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100292 bitmaps[1], false);
293
J-Alvesd3e81622021-10-05 14:55:57 +0100294 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100295}
296
297/**
298 * Validates updates and check functions for binding notifications, namely the
J-Alves96f6e292021-06-08 17:32:40 +0100299 * configuration of bindings of global and per-vCPU notifications.
J-Alves60eaff92021-05-27 14:54:41 +0100300 */
301TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
302{
J-Alvesd3e81622021-10-05 14:55:57 +0100303 struct_vm *current_vm;
304 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100305 struct_vm *dummy_sender;
306 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
307 ffa_notifications_bitmap_t per_vcpu = ~global;
308 bool is_from_vm = true;
309
310 CHECK(vm_get_count() >= 2);
311
J-Alvesd3e81622021-10-05 14:55:57 +0100312 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100313
314 dummy_sender = vm_find_index(1);
315
J-Alvesd3e81622021-10-05 14:55:57 +0100316 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100317
J-Alvesd3e81622021-10-05 14:55:57 +0100318 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100319 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100320 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100321 dummy_sender->id, per_vcpu, true);
322
323 /* Check validation of global notifications bindings. */
324 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100325 current_vm_locked, is_from_vm, dummy_sender->id, global,
326 false));
J-Alves60eaff92021-05-27 14:54:41 +0100327
J-Alves96f6e292021-06-08 17:32:40 +0100328 /* Check validation of per-vCPU notifications bindings. */
J-Alves60eaff92021-05-27 14:54:41 +0100329 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100330 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
331 true));
J-Alves60eaff92021-05-27 14:54:41 +0100332
333 /**
J-Alves96f6e292021-06-08 17:32:40 +0100334 * Check that global notifications are not validated as per-vCPU, and
J-Alves60eaff92021-05-27 14:54:41 +0100335 * vice-versa.
336 */
337 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100338 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
J-Alves60eaff92021-05-27 14:54:41 +0100339 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100340 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
J-Alves60eaff92021-05-27 14:54:41 +0100341 false));
J-Alvesd3e81622021-10-05 14:55:57 +0100342 EXPECT_FALSE(vm_notifications_validate_binding(
343 current_vm_locked, is_from_vm, dummy_sender->id,
344 global | per_vcpu, true));
345 EXPECT_FALSE(vm_notifications_validate_binding(
346 current_vm_locked, is_from_vm, dummy_sender->id,
347 global | per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100348
349 /** Undo the bindings */
J-Alvesd3e81622021-10-05 14:55:57 +0100350 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
351 global, false);
352 EXPECT_TRUE(vm_notifications_validate_binding(
353 current_vm_locked, is_from_vm, 0, global, false));
J-Alves60eaff92021-05-27 14:54:41 +0100354
J-Alvesd3e81622021-10-05 14:55:57 +0100355 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
356 per_vcpu, false);
357 EXPECT_TRUE(vm_notifications_validate_binding(
358 current_vm_locked, is_from_vm, 0, per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100359
J-Alvesd3e81622021-10-05 14:55:57 +0100360 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100361}
362
J-Alvesce2f8d32021-06-10 18:30:21 +0100363/**
364 * Validates accesses to notifications bitmaps.
365 */
366TEST_F(vm, vm_notifications_set_and_get)
367{
J-Alvesd3e81622021-10-05 14:55:57 +0100368 struct_vm *current_vm;
369 struct vm_locked current_vm_locked;
J-Alvesce2f8d32021-06-10 18:30:21 +0100370 struct_vm *dummy_sender;
371 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
372 ffa_notifications_bitmap_t per_vcpu = ~global;
373 ffa_notifications_bitmap_t ret;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700374 const unsigned int vcpu_idx = 0;
J-Alvesce2f8d32021-06-10 18:30:21 +0100375 struct notifications *notifications;
376 const bool is_from_vm = true;
377
378 CHECK(vm_get_count() >= 2);
379
J-Alvesd3e81622021-10-05 14:55:57 +0100380 current_vm = vm_find_index(0);
J-Alvesce2f8d32021-06-10 18:30:21 +0100381 dummy_sender = vm_find_index(1);
382
J-Alvesd3e81622021-10-05 14:55:57 +0100383 notifications = &current_vm->notifications.from_vm;
384 current_vm_locked = vm_lock(current_vm);
J-Alvesce2f8d32021-06-10 18:30:21 +0100385
J-Alvesd3e81622021-10-05 14:55:57 +0100386 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100387 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100388 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100389 dummy_sender->id, per_vcpu, true);
390
391 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100392 * Validate get notifications bitmap for global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100393 */
J-Alves5a16c962022-03-25 12:32:51 +0000394 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
395 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100396
J-Alves8450ecb2025-02-03 11:51:39 +0000397 EXPECT_EQ(notifications->global.pending, global);
398
399 /* Counter should track pending notifications. */
400 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
401
J-Alves5136dda2022-03-25 12:26:38 +0000402 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100403 is_from_vm, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100404 EXPECT_EQ(ret, global);
J-Alvesd3e81622021-10-05 14:55:57 +0100405 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100406
407 /*
J-Alves8450ecb2025-02-03 11:51:39 +0000408 * After getting the pending notifications, the pending count should
409 * be zeroed.
410 */
411 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
412
413 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100414 * Validate get notifications bitmap for per-vCPU notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100415 */
J-Alves5a16c962022-03-25 12:32:51 +0000416 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
417 per_vcpu, vcpu_idx, true);
J-Alves8450ecb2025-02-03 11:51:39 +0000418 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100419
J-Alves5136dda2022-03-25 12:26:38 +0000420 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100421 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100422 EXPECT_EQ(ret, per_vcpu);
J-Alvesd3e81622021-10-05 14:55:57 +0100423 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
J-Alves8450ecb2025-02-03 11:51:39 +0000424 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100425
426 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100427 * Validate that getting notifications for a specific vCPU also returns
428 * global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100429 */
J-Alves5a16c962022-03-25 12:32:51 +0000430 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
431 per_vcpu, vcpu_idx, true);
J-Alves8450ecb2025-02-03 11:51:39 +0000432
J-Alves5a16c962022-03-25 12:32:51 +0000433 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
434 global, 0ull, false);
J-Alves8450ecb2025-02-03 11:51:39 +0000435 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100436
J-Alves5136dda2022-03-25 12:26:38 +0000437 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100438 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100439 EXPECT_EQ(ret, per_vcpu | global);
J-Alvesd3e81622021-10-05 14:55:57 +0100440 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
441 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alves8450ecb2025-02-03 11:51:39 +0000442 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100443
444 /** Undo the binding */
J-Alvesd3e81622021-10-05 14:55:57 +0100445 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
446 global, false);
447 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
448 per_vcpu, true);
449 vm_unlock(&current_vm_locked);
J-Alvesce2f8d32021-06-10 18:30:21 +0100450}
451
J-Alves96f6e292021-06-08 17:32:40 +0100452/**
453 * Validates simple getting of notifications info for global notifications.
454 */
455TEST_F(vm, vm_notifications_info_get_global)
456{
457 ffa_notifications_bitmap_t to_set = 0xFU;
458 ffa_notifications_bitmap_t got;
459
460 /**
461 * Following set of variables that are also expected to be used when
462 * handling FFA_NOTIFICATION_INFO_GET.
463 */
464 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
465 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
466 uint32_t ids_count = 0;
467 uint32_t lists_count = 0;
468 enum notifications_info_get_state current_state = INIT;
469
470 CHECK(vm_get_count() >= 2);
471
472 for (unsigned int i = 0; i < 2; i++) {
473 struct_vm *current_vm = vm_find_index(0);
474 struct vm_locked current_vm_locked = vm_lock(current_vm);
475 struct notifications *notifications =
476 &current_vm->notifications.from_sp;
477 const bool is_from_vm = false;
478
J-Alves5a16c962022-03-25 12:32:51 +0000479 vm_notifications_partition_set_pending(
480 current_vm_locked, is_from_vm, to_set, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100481
482 vm_notifications_info_get_pending(
483 current_vm_locked, is_from_vm, ids, &ids_count,
484 lists_sizes, &lists_count,
485 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
486
487 /*
488 * Here the number of IDs and list count should be the same.
489 * As we are testing with Global notifications, this is
490 * expected.
491 */
492 EXPECT_EQ(ids_count, i + 1);
493 EXPECT_EQ(lists_count, i + 1);
494 EXPECT_EQ(lists_sizes[i], 0);
495 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
496
497 /* Action must be reset to initial state for each VM. */
498 current_state = INIT;
499
500 /*
501 * Check that getting pending notifications gives the expected
502 * return and cleans the 'pending' and 'info_get_retrieved'
503 * bitmaps.
504 */
J-Alves5136dda2022-03-25 12:26:38 +0000505 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100506 is_from_vm, 0);
507 EXPECT_EQ(got, to_set);
508
509 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
510 EXPECT_EQ(notifications->global.pending, 0U);
511
512 vm_unlock(&current_vm_locked);
513 }
514}
515
516/**
517 * Validates simple getting of notifications info for per-vCPU notifications.
518 */
519TEST_F(vm, vm_notifications_info_get_per_vcpu)
520{
521 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
522 ffa_notifications_bitmap_t got;
523
524 /*
525 * Following set of variables that are also expected to be used when
526 * handling ffa_notification_info_get.
527 */
528 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
529 uint32_t ids_count = 0;
530 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
531 uint32_t lists_count = 0;
532 enum notifications_info_get_state current_state = INIT;
533
534 CHECK(vm_get_count() >= 2);
535
536 for (unsigned int i = 0; i < 2; i++) {
537 struct_vm *current_vm = vm_find_index(0);
538 struct vm_locked current_vm_locked = vm_lock(current_vm);
539 struct notifications *notifications =
540 &current_vm->notifications.from_sp;
541 const bool is_from_vm = false;
542
J-Alves5a16c962022-03-25 12:32:51 +0000543 vm_notifications_partition_set_pending(
544 current_vm_locked, is_from_vm, per_vcpu, 0, true);
J-Alves96f6e292021-06-08 17:32:40 +0100545
546 vm_notifications_info_get_pending(
547 current_vm_locked, is_from_vm, ids, &ids_count,
548 lists_sizes, &lists_count,
549 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
550
551 /*
552 * Here the number of IDs and list count should be the same.
553 * As we are testing with Global notifications, this is
554 * expected.
555 */
556 EXPECT_EQ(ids_count, (i + 1) * 2);
557 EXPECT_EQ(lists_count, i + 1);
558 EXPECT_EQ(lists_sizes[i], 1);
559 EXPECT_EQ(per_vcpu,
560 notifications->per_vcpu[0].info_get_retrieved);
561
562 /* Action must be reset to initial state for each VM. */
563 current_state = INIT;
564
565 /*
566 * Check that getting pending notifications gives the expected
567 * return and cleans the 'pending' and 'info_get_retrieved'
568 * bitmaps.
569 */
J-Alves5136dda2022-03-25 12:26:38 +0000570 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100571 is_from_vm, 0);
572 EXPECT_EQ(got, per_vcpu);
573
574 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
575 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
576
577 vm_unlock(&current_vm_locked);
578 }
579}
580
581/**
582 * Validate getting of notifications information if all VCPUs have notifications
583 * pending.
584 */
585TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
586{
587 struct_vm *current_vm = nullptr;
588 struct vm_locked current_vm_locked;
589 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
590 ffa_notifications_bitmap_t got;
591 const ffa_notifications_bitmap_t global = 0xF0000;
592
593 /*
594 * Following set of variables that are also expected to be used when
595 * handling ffa_notification_info_get.
596 */
597 struct notifications *notifications;
598 const bool is_from_sp = false;
599 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
600 uint32_t ids_count = 0;
601 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
602 uint32_t lists_count = 0;
603 enum notifications_info_get_state current_state = INIT;
604
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600605 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
J-Alves96f6e292021-06-08 17:32:40 +0100606 current_vm_locked = vm_lock(current_vm);
607 notifications = &current_vm->notifications.from_sp;
608
609 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5a16c962022-03-25 12:32:51 +0000610 vm_notifications_partition_set_pending(
611 current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
612 i, true);
J-Alves96f6e292021-06-08 17:32:40 +0100613 }
614
615 /*
616 * Adding a global notification should not change the list of IDs,
617 * because global notifications only require the VM ID to be included in
618 * the list, at least once.
619 */
J-Alves5a16c962022-03-25 12:32:51 +0000620 vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
621 global, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100622
623 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
624 &ids_count, lists_sizes, &lists_count,
625 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
626 &current_state);
627
628 /*
629 * This test has been conceived for the expected MAX_CPUS 4.
630 * All VCPUs have notifications of the same VM, to be broken down in 2
631 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
632 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
633 */
634 CHECK(MAX_CPUS == 4);
635 EXPECT_EQ(ids_count, 6U);
636 EXPECT_EQ(lists_count, 2U);
637 EXPECT_EQ(lists_sizes[0], 3);
638 EXPECT_EQ(lists_sizes[1], 1);
639
640 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5136dda2022-03-25 12:26:38 +0000641 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100642 is_from_sp, i);
643
644 /*
J-Alves5136dda2022-03-25 12:26:38 +0000645 * The first call to
646 * vm_notifications_partition_get_pending should also
647 * include the global notifications on the return.
J-Alves96f6e292021-06-08 17:32:40 +0100648 */
649 ffa_notifications_bitmap_t to_check =
650 (i != 0) ? FFA_NOTIFICATION_MASK(i)
651 : FFA_NOTIFICATION_MASK(i) | global;
652
653 EXPECT_EQ(got, to_check);
654
655 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
656 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
657 }
658
659 vm_unlock(&current_vm_locked);
660}
661
662/**
663 * Validate change of state from 'vm_notifications_info_get_pending', when the
664 * list of IDs is full.
665 */
666TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
667{
668 struct_vm *current_vm = vm_find_index(0);
669 struct vm_locked current_vm_locked = vm_lock(current_vm);
670 struct notifications *notifications =
671 &current_vm->notifications.from_sp;
672 const bool is_from_vm = false;
673 ffa_notifications_bitmap_t got = 0;
674
675 /*
676 * Following set of variables that are also expected to be used when
677 * handling ffa_notification_info_get.
678 * For this 'ids_count' has been initialized such that it indicates
679 * there is no space in the list for a per-vCPU notification (VM ID and
680 * VCPU ID).
681 */
682 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
683 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
684 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
685 uint32_t lists_count = 10;
686 enum notifications_info_get_state current_state = INIT;
687 CHECK(vm_get_count() >= 2);
688
J-Alves5a16c962022-03-25 12:32:51 +0000689 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
690 FFA_NOTIFICATION_MASK(1), 0,
691 true);
J-Alves96f6e292021-06-08 17:32:40 +0100692
693 /* Call function to get notifications info, with only per-vCPU set. */
694 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
695 &ids_count, lists_sizes, &lists_count,
696 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
697 &current_state);
698
699 /*
700 * Verify that as soon as there isn't space to do the required
J-Alves5136dda2022-03-25 12:26:38 +0000701 * insertion in the list, the
702 * 'vm_notifications_partition_get_pending' returns and changes
703 * list state to FULL. In this case returning, because it would need to
704 * add two IDs (VM ID and VCPU ID).
J-Alves96f6e292021-06-08 17:32:40 +0100705 */
706 EXPECT_EQ(current_state, FULL);
707 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
708 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
709
710 /*
711 * At this point there is still room for the information of a global
712 * notification (only VM ID to be added). Reset 'current_state'
713 * for the insertion to happen at the last position of the array.
714 */
715 current_state = INIT;
716
717 /* Setting global notification */
J-Alves5a16c962022-03-25 12:32:51 +0000718 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
719 FFA_NOTIFICATION_MASK(2), 0,
720 false);
J-Alves96f6e292021-06-08 17:32:40 +0100721
722 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
723 &ids_count, lists_sizes, &lists_count,
724 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
725 &current_state);
726
727 /*
728 * Now List must be full, the set global notification must be part of
729 * 'info_get_retrieved', and the 'current_state' should be set to FULL
730 * due to the pending per-vCPU notification in VCPU 0.
731 */
732 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
733 EXPECT_EQ(current_state, FULL);
734 EXPECT_EQ(notifications->global.info_get_retrieved,
735 FFA_NOTIFICATION_MASK(2));
736
J-Alves5136dda2022-03-25 12:26:38 +0000737 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100738 is_from_vm, 0);
739 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
740
741 vm_unlock(&current_vm_locked);
742}
743
744TEST_F(vm, vm_notifications_info_get_full_global)
745{
746 struct_vm *current_vm = vm_find_index(0);
747 struct vm_locked current_vm_locked = vm_lock(current_vm);
748 ffa_notifications_bitmap_t got;
749 struct notifications *notifications;
750 const bool is_from_vm = false;
751 /*
752 * Following set of variables that are also expected to be used when
753 * handling ffa_notification_info_get.
754 * For this 'ids_count' has been initialized such that it indicates
755 * there is no space in the list for a global notification (VM ID only).
756 */
757 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
758 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
759 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
760 uint32_t lists_count = 10;
761 enum notifications_info_get_state current_state = INIT;
762
763 CHECK(vm_get_count() >= 1);
764
765 current_vm = vm_find_index(0);
766
767 notifications = &current_vm->notifications.from_sp;
768
769 /* Set global notification. */
J-Alves5a16c962022-03-25 12:32:51 +0000770 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
771 FFA_NOTIFICATION_MASK(10), 0,
772 false);
J-Alves96f6e292021-06-08 17:32:40 +0100773
774 /* Get notifications info for the given notifications. */
775 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
776 &ids_count, lists_sizes, &lists_count,
777 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
778 &current_state);
779
780 /* Expect 'info_get_retrieved' bitmap to be 0. */
781 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
782 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
783 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
784 EXPECT_EQ(current_state, FULL);
785
J-Alves5136dda2022-03-25 12:26:38 +0000786 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100787 is_from_vm, 0);
J-Alves9f74b932021-10-11 14:20:05 +0100788 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
789
J-Alves96f6e292021-06-08 17:32:40 +0100790 vm_unlock(&current_vm_locked);
791}
792
J-Alvesf31940e2022-03-25 17:24:00 +0000793TEST_F(vm, vm_notifications_info_get_from_framework)
794{
795 struct vm_locked vm_locked = vm_lock(vm_find_index(0));
796 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
797 uint32_t ids_count = 0;
798 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
799 uint32_t lists_count = 0;
800
801 vm_notifications_framework_set_pending(vm_locked, 0x1U);
802
803 /* Get notifications info for the given notifications. */
804 vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
805 &lists_count,
806 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
807
808 EXPECT_EQ(ids[0], vm_locked.vm->id);
809 EXPECT_EQ(ids_count, 1);
810 EXPECT_EQ(lists_sizes[0], 0);
811 EXPECT_EQ(lists_count, 1);
812
813 EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
814
815 vm_unlock(&vm_locked);
816}
817
Daniel Boulby8be26512024-09-03 19:41:11 +0100818/**
819 * Validates simple getting of notifications info for pending IPI.
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000820 * Also checks that vCPUs with pending IPIs are only reported if the
821 * vCPU is in the waiting state.
Daniel Boulby8be26512024-09-03 19:41:11 +0100822 */
823TEST_F(vm, vm_notifications_info_get_ipi)
824{
825 /*
826 * Following set of variables that are also expected to be used when
827 * handling ffa_notification_info_get.
828 */
829 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
830 uint32_t ids_count = 0;
831 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
832 uint32_t lists_count = 0;
833 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000834 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100835 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
836 struct interrupts *interrupts = &target_vcpu->interrupts;
837 const bool is_from_vm = false;
838 struct vm_locked current_vm_locked = vm_lock(current_vm);
839
840 EXPECT_TRUE(current_vm->vcpu_count >= 2);
841
842 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
843
844 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
845 &ids_count, lists_sizes, &lists_count,
846 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
847 &current_state);
848
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000849 EXPECT_EQ(ids_count, 0);
850 EXPECT_EQ(lists_count, 0);
851
852 target_vcpu->state = VCPU_STATE_WAITING;
853
854 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
855 &ids_count, lists_sizes, &lists_count,
856 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
857 &current_state);
858
Daniel Boulby8be26512024-09-03 19:41:11 +0100859 EXPECT_EQ(ids_count, 2);
860 EXPECT_EQ(lists_count, 1);
861 EXPECT_EQ(lists_sizes[0], 1);
862 EXPECT_EQ(ids[0], current_vm->id);
863 EXPECT_EQ(ids[1], 1);
864 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
865
866 /* Check it is not retrieved multiple times. */
867 current_state = INIT;
868 ids[0] = 0;
869 ids[1] = 0;
870 ids_count = 0;
871 lists_sizes[0] = 0;
872 lists_count = 0;
873
874 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
875 &ids_count, lists_sizes, &lists_count,
876 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
877 &current_state);
878 EXPECT_EQ(ids_count, 0);
879 EXPECT_EQ(lists_count, 0);
880 EXPECT_EQ(lists_sizes[0], 0);
881
882 vm_unlock(&current_vm_locked);
883}
884
885/**
886 * Validates simple getting of notifications info for pending with IPI when
887 * notification for the same vcpu is also pending.
888 */
889TEST_F(vm, vm_notifications_info_get_ipi_with_per_vcpu)
890{
891 /*
892 * Following set of variables that are also expected to be used when
893 * handling ffa_notification_info_get.
894 */
895 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
896 uint32_t ids_count = 0;
897 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
898 uint32_t lists_count = 0;
899 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000900 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100901 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
902 struct interrupts *interrupts = &target_vcpu->interrupts;
903 const bool is_from_vm = false;
904 struct vm_locked current_vm_locked = vm_lock(current_vm);
905
906 EXPECT_TRUE(current_vm->vcpu_count >= 2);
907
908 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
909
910 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
911 true, 1, true);
912 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
913 &ids_count, lists_sizes, &lists_count,
914 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
915 &current_state);
916
917 EXPECT_EQ(ids_count, 2);
918 EXPECT_EQ(lists_count, 1);
919 EXPECT_EQ(lists_sizes[0], 1);
920 EXPECT_EQ(ids[0], current_vm->id);
921 EXPECT_EQ(ids[1], 1);
922 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
923
924 /* Reset the state and values. */
925 current_state = INIT;
926 ids[0] = 0;
927 ids[1] = 0;
928 ids_count = 0;
929 lists_sizes[0] = 0;
930 lists_count = 0;
931
932 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
933 &ids_count, lists_sizes, &lists_count,
934 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
935 &current_state);
936 EXPECT_EQ(ids_count, 0);
937 EXPECT_EQ(lists_count, 0);
938 EXPECT_EQ(lists_sizes[0], 0);
939
940 vm_unlock(&current_vm_locked);
941}
942
943/**
944 * Validate that a mix of a pending IPI and notifcations are correctly
945 * reported across vcpus.
946 */
947TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)
948{
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000949 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100950 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
951 CHECK(vcpu_count > 1);
952
953 struct vm_locked current_vm_locked = vm_lock(current_vm);
954
955 /*
956 * Following set of variables that are also expected to be used when
957 * handling ffa_notification_info_get.
958 */
959 const bool is_from_vm = false;
960 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
961 uint32_t ids_count = 0;
962 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
963 uint32_t lists_count = 0;
964 enum notifications_info_get_state current_state = INIT;
965 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 0);
966 struct interrupts *interrupts = &target_vcpu->interrupts;
967
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000968 target_vcpu->state = VCPU_STATE_WAITING;
969
Daniel Boulby8be26512024-09-03 19:41:11 +0100970 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
971
972 for (unsigned int i = 1; i < vcpu_count; i++) {
973 vm_notifications_partition_set_pending(
974 current_vm_locked, is_from_vm, FFA_NOTIFICATION_MASK(i),
975 i, true);
976 }
977
978 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
979 &ids_count, lists_sizes, &lists_count,
980 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
981 &current_state);
982
983 /*
984 * This test has been conceived for the expected MAX_CPUS 4.
985 * All VCPUs have notifications of the same VM, to be broken down in 2
986 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
987 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
988 */
989 EXPECT_EQ(ids_count, 6U);
990 EXPECT_EQ(lists_count, 2U);
991 EXPECT_EQ(lists_sizes[0], 3);
992 EXPECT_EQ(lists_sizes[1], 1);
993 EXPECT_EQ(ids[0], current_vm->id);
994 EXPECT_EQ(ids[1], 0);
995 EXPECT_EQ(ids[2], 1);
996 EXPECT_EQ(ids[3], 2);
997 EXPECT_EQ(ids[4], current_vm->id);
998 EXPECT_EQ(ids[5], 3);
999
1000 vm_unlock(&current_vm_locked);
1001}
Andrew Scull3c257452019-11-26 13:32:50 +00001002} /* namespace */