blob: aed2422cec712b4ba2add48fc1dcc14bfc5988ac [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Daniel Boulby84350712021-11-26 11:13:20 +000012#include "hf/check.h"
J-Alves67f5ba32024-09-27 18:07:11 +010013#include "hf/list.h"
Andrew Scull3c257452019-11-26 13:32:50 +000014#include "hf/mpool.h"
Madhukar Pappireddya067dc12024-10-16 22:20:44 -050015#include "hf/timer_mgmt.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/vm.h"
17}
18
J-Alvesb37fd082020-10-22 12:29:21 +010019#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000020#include <memory>
21#include <span>
22#include <vector>
23
24#include "mm_test.hh"
25
26namespace
27{
28using namespace ::std::placeholders;
29
30using ::testing::AllOf;
31using ::testing::Each;
32using ::testing::SizeIs;
33
34using struct_vm = struct vm;
Olivier Deprez181074b2023-02-02 14:53:23 +010035using struct_vcpu = struct vcpu;
J-Alves96f6e292021-06-08 17:32:40 +010036using struct_vm_locked = struct vm_locked;
Andrew Scull3c257452019-11-26 13:32:50 +000037
Olivier Deprezd5a54892023-02-02 16:45:59 +010038constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
Andrew Scull3c257452019-11-26 13:32:50 +000039const int TOP_LEVEL = arch_mm_stage2_max_level();
40
41class vm : public ::testing::Test
42{
Olivier Deprezd5a54892023-02-02 16:45:59 +010043 protected:
44 static std::unique_ptr<uint8_t[]> test_heap;
45
46 struct mpool ppool;
47
Andrew Scull3c257452019-11-26 13:32:50 +000048 void SetUp() override
49 {
Olivier Deprezd5a54892023-02-02 16:45:59 +010050 if (!test_heap) {
51 /*
52 * TODO: replace with direct use of stdlib allocator so
53 * sanitizers are more effective.
54 */
55 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
56 mpool_init(&ppool, sizeof(struct mm_page_table));
57 mpool_add_chunk(&ppool, test_heap.get(),
58 TEST_HEAP_SIZE);
59 }
Andrew Scull3c257452019-11-26 13:32:50 +000060 }
61
J-Alvesb37fd082020-10-22 12:29:21 +010062 public:
J-Alvesbeeb6dc2021-12-08 18:21:32 +000063 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
J-Alvesb37fd082020-10-22 12:29:21 +010064 {
J-Alvesbeeb6dc2021-12-08 18:21:32 +000065 return vm1->boot_order < vm2->boot_order;
J-Alvesb37fd082020-10-22 12:29:21 +010066 }
Andrew Scull3c257452019-11-26 13:32:50 +000067};
68
Olivier Deprezd5a54892023-02-02 16:45:59 +010069std::unique_ptr<uint8_t[]> vm::test_heap;
70
Andrew Scull3c257452019-11-26 13:32:50 +000071/**
72 * If nothing is mapped, unmapping the hypervisor has no effect.
73 */
74TEST_F(vm, vm_unmap_hypervisor_not_mapped)
75{
76 struct_vm *vm;
77 struct vm_locked vm_locked;
78
Olivier Deprez878bd5b2021-04-15 19:05:10 +020079 /* TODO: check ptable usage (security state?) */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060080 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
Andrew Scull3c257452019-11-26 13:32:50 +000081 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080082 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000083 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
84 EXPECT_THAT(
85 mm_test::get_ptable(vm->ptable),
86 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
87 mm_vm_fini(&vm->ptable, &ppool);
88 vm_unlock(&vm_locked);
89}
90
J-Alvesb37fd082020-10-22 12:29:21 +010091/**
92 * Validate the "boot_list" is created properly, according to vm's "boot_order"
93 * field.
94 */
95TEST_F(vm, vm_boot_order)
96{
97 struct_vm *vm_cur;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -060098 struct_vm *vm;
J-Alvesb37fd082020-10-22 12:29:21 +010099 std::list<struct_vm *> expected_final_order;
100
J-Alvesb37fd082020-10-22 12:29:21 +0100101 /*
Olivier Deprez181074b2023-02-02 14:53:23 +0100102 * Insertion when no call to "vcpu_update_boot" has been made yet.
J-Alvesb37fd082020-10-22 12:29:21 +0100103 * The "boot_list" is expected to be empty.
104 */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600105 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000106 vm_cur->boot_order = 3;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600107 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100108 expected_final_order.push_back(vm_cur);
109
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600110 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100111
112 /* Insertion at the head of the boot list */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600113 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000114 vm_cur->boot_order = 1;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600115 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100116 expected_final_order.push_back(vm_cur);
117
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600118 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100119
120 /* Insertion of two in the middle of the boot list */
Olivier Deprez181074b2023-02-02 14:53:23 +0100121 for (uint32_t i = 0; i < 2; i++) {
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000122 EXPECT_TRUE(vm_init_next(MAX_CPUS, &ppool, &vm_cur, false, 0));
J-Alvesb37fd082020-10-22 12:29:21 +0100123 vm_cur->boot_order = 2;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600124 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100125 expected_final_order.push_back(vm_cur);
126 }
127
128 /*
129 * Insertion in the end of the list.
130 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
131 * As such, a VM is expected to have been initialized before this
132 * test, with ID 1 and boot_order 0.
133 */
134 vm_cur = vm_find(1);
135 EXPECT_FALSE(vm_cur == NULL);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600136 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100137 expected_final_order.push_back(vm_cur);
138
139 /*
140 * Number of VMs initialized should be the same as in the
141 * "expected_final_order", before the final verification.
142 */
143 EXPECT_EQ(expected_final_order.size(), vm_get_count())
144 << "Something went wrong with the test itself...\n";
145
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000146 /* Sort VMs from lower to higher "boot_order" field.*/
147 expected_final_order.sort(vm::BootOrderSmallerThan);
J-Alvesb37fd082020-10-22 12:29:21 +0100148
149 std::list<struct_vm *>::iterator it;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600150 vm = vm_get_boot_vm();
Olivier Deprez181074b2023-02-02 14:53:23 +0100151 for (it = expected_final_order.begin();
152 it != expected_final_order.end(); it++) {
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600153 EXPECT_TRUE(vm != NULL);
154 EXPECT_EQ((*it)->id, vm->id);
155 vm = vm_get_next_boot(vm);
J-Alvesb37fd082020-10-22 12:29:21 +0100156 }
157}
J-Alves60eaff92021-05-27 14:54:41 +0100158
Madhukar Pappireddya067dc12024-10-16 22:20:44 -0500159TEST_F(vm, vcpu_arch_timer)
160{
161 const cpu_id_t cpu_ids[2] = {0, 1};
162 struct_vcpu *vm0_vcpu;
163 struct_vcpu *vm1_vcpu;
164 struct_vcpu *deadline_vcpu;
165 struct_vcpu *target_vcpu;
166 struct vcpu_locked vcpu_locked;
167 struct cpu *cpu0;
168 struct cpu *cpu1;
169
170 /* Initialie CPU module with two physical CPUs. */
171 cpu_module_init(cpu_ids, 2);
172 cpu0 = cpu_find_index(0);
173 cpu1 = cpu_find_index(1);
174
175 /* Two UP endpoints are deployed for this test. */
176 CHECK(vm_get_count() >= 2);
177 vm0_vcpu = vm_get_vcpu(vm_find_index(0), 0);
178 vm1_vcpu = vm_get_vcpu(vm_find_index(1), 0);
179
180 /* The execution context of each VM is scheduled on CPU0. */
181 vm0_vcpu->cpu = cpu0;
182 vm1_vcpu->cpu = cpu0;
183
184 /*
185 * Enable the timer peripheral for each vCPU and setup an arbitraty
186 * countdown value.
187 */
188 vm0_vcpu->regs.arch_timer.cval = 555555;
189 vm1_vcpu->regs.arch_timer.cval = 999999;
190 vm0_vcpu->regs.arch_timer.ctl = 1;
191 vm1_vcpu->regs.arch_timer.ctl = 1;
192
193 /* No vCPU is being tracked through either timer list. */
194 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
195 EXPECT_TRUE(deadline_vcpu == NULL);
196 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu1);
197 EXPECT_TRUE(deadline_vcpu == NULL);
198
199 /* vCPU of VM0 and VM1 are being added to the list. */
200 timer_vcpu_manage(vm0_vcpu);
201 timer_vcpu_manage(vm1_vcpu);
202
203 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
204 EXPECT_EQ(deadline_vcpu, vm0_vcpu);
205
206 /* Remove one of the vCPUs from the CPU0 list. */
207 vm0_vcpu->regs.arch_timer.cval = 0;
208 vm0_vcpu->regs.arch_timer.ctl = 0;
209 timer_vcpu_manage(vm0_vcpu);
210
211 /* This leaves one vCPU entry on CPU0 list. */
212 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
213 EXPECT_EQ(deadline_vcpu, vm1_vcpu);
214
215 /* Attempt to migrate VM1 vCPU from CPU0 to CPU1. */
216 vcpu_locked = vcpu_lock(vm1_vcpu);
217 timer_migrate_to_other_cpu(cpu1, vcpu_locked);
218 vcpu_unlock(&vcpu_locked);
219
220 /*
221 * After migration, ensure the list is empty on CPU0 but non-empty on
222 * CPU1.
223 */
224 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
225 EXPECT_TRUE(deadline_vcpu == NULL);
226
227 /*
228 * vCPU of VM1 is now running on CPU1. It must be the target vCPU when
229 * the timer has expired.
230 */
231 target_vcpu = timer_find_target_vcpu(vm1_vcpu);
232 EXPECT_EQ(target_vcpu, vm1_vcpu);
233}
234
J-Alves60eaff92021-05-27 14:54:41 +0100235/**
236 * Validates updates and check functions for binding notifications to endpoints.
237 */
238TEST_F(vm, vm_notifications_bind_diff_senders)
239{
J-Alvesd3e81622021-10-05 14:55:57 +0100240 struct_vm *current_vm = nullptr;
241 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100242 std::vector<struct_vm *> dummy_senders;
243 ffa_notifications_bitmap_t bitmaps[] = {
244 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
245 bool is_from_vm = true;
246
247 /* For the subsequent tests three VMs are used. */
248 CHECK(vm_get_count() >= 3);
249
J-Alvesd3e81622021-10-05 14:55:57 +0100250 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100251
252 dummy_senders.push_back(vm_find_index(1));
253 dummy_senders.push_back(vm_find_index(2));
254
J-Alvesd3e81622021-10-05 14:55:57 +0100255 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100256
257 for (unsigned int i = 0; i < 2; i++) {
258 /* Validate bindings condition after initialization. */
259 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100260 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
261 bitmaps[i], false));
J-Alves60eaff92021-05-27 14:54:41 +0100262
263 /*
264 * Validate bind related operations. For this test considering
265 * only global notifications.
266 */
J-Alvesd3e81622021-10-05 14:55:57 +0100267 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100268 dummy_senders[i]->id,
269 bitmaps[i], false);
270
271 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100272 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100273 bitmaps[i], false));
274
275 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100276 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100277 bitmaps[i], false));
278
279 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100280 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100281 bitmaps[1 - i], false));
282
283 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100284 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100285 bitmaps[2], false));
286 }
287
288 /** Clean up bind for other tests. */
J-Alvesd3e81622021-10-05 14:55:57 +0100289 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100290 bitmaps[0], false);
J-Alvesd3e81622021-10-05 14:55:57 +0100291 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100292 bitmaps[1], false);
293
J-Alvesd3e81622021-10-05 14:55:57 +0100294 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100295}
296
297/**
298 * Validates updates and check functions for binding notifications, namely the
J-Alves96f6e292021-06-08 17:32:40 +0100299 * configuration of bindings of global and per-vCPU notifications.
J-Alves60eaff92021-05-27 14:54:41 +0100300 */
301TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
302{
J-Alvesd3e81622021-10-05 14:55:57 +0100303 struct_vm *current_vm;
304 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100305 struct_vm *dummy_sender;
306 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
307 ffa_notifications_bitmap_t per_vcpu = ~global;
308 bool is_from_vm = true;
309
310 CHECK(vm_get_count() >= 2);
311
J-Alvesd3e81622021-10-05 14:55:57 +0100312 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100313
314 dummy_sender = vm_find_index(1);
315
J-Alvesd3e81622021-10-05 14:55:57 +0100316 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100317
J-Alvesd3e81622021-10-05 14:55:57 +0100318 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100319 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100320 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100321 dummy_sender->id, per_vcpu, true);
322
323 /* Check validation of global notifications bindings. */
324 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100325 current_vm_locked, is_from_vm, dummy_sender->id, global,
326 false));
J-Alves60eaff92021-05-27 14:54:41 +0100327
J-Alves96f6e292021-06-08 17:32:40 +0100328 /* Check validation of per-vCPU notifications bindings. */
J-Alves60eaff92021-05-27 14:54:41 +0100329 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100330 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
331 true));
J-Alves60eaff92021-05-27 14:54:41 +0100332
333 /**
J-Alves96f6e292021-06-08 17:32:40 +0100334 * Check that global notifications are not validated as per-vCPU, and
J-Alves60eaff92021-05-27 14:54:41 +0100335 * vice-versa.
336 */
337 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100338 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
J-Alves60eaff92021-05-27 14:54:41 +0100339 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100340 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
J-Alves60eaff92021-05-27 14:54:41 +0100341 false));
J-Alvesd3e81622021-10-05 14:55:57 +0100342 EXPECT_FALSE(vm_notifications_validate_binding(
343 current_vm_locked, is_from_vm, dummy_sender->id,
344 global | per_vcpu, true));
345 EXPECT_FALSE(vm_notifications_validate_binding(
346 current_vm_locked, is_from_vm, dummy_sender->id,
347 global | per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100348
349 /** Undo the bindings */
J-Alvesd3e81622021-10-05 14:55:57 +0100350 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
351 global, false);
352 EXPECT_TRUE(vm_notifications_validate_binding(
353 current_vm_locked, is_from_vm, 0, global, false));
J-Alves60eaff92021-05-27 14:54:41 +0100354
J-Alvesd3e81622021-10-05 14:55:57 +0100355 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
356 per_vcpu, false);
357 EXPECT_TRUE(vm_notifications_validate_binding(
358 current_vm_locked, is_from_vm, 0, per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100359
J-Alvesd3e81622021-10-05 14:55:57 +0100360 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100361}
362
J-Alvesce2f8d32021-06-10 18:30:21 +0100363/**
364 * Validates accesses to notifications bitmaps.
365 */
366TEST_F(vm, vm_notifications_set_and_get)
367{
J-Alvesd3e81622021-10-05 14:55:57 +0100368 struct_vm *current_vm;
369 struct vm_locked current_vm_locked;
J-Alvesce2f8d32021-06-10 18:30:21 +0100370 struct_vm *dummy_sender;
371 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
372 ffa_notifications_bitmap_t per_vcpu = ~global;
373 ffa_notifications_bitmap_t ret;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700374 const unsigned int vcpu_idx = 0;
J-Alvesce2f8d32021-06-10 18:30:21 +0100375 struct notifications *notifications;
376 const bool is_from_vm = true;
377
378 CHECK(vm_get_count() >= 2);
379
J-Alvesd3e81622021-10-05 14:55:57 +0100380 current_vm = vm_find_index(0);
J-Alvesce2f8d32021-06-10 18:30:21 +0100381 dummy_sender = vm_find_index(1);
382
J-Alvesd3e81622021-10-05 14:55:57 +0100383 notifications = &current_vm->notifications.from_vm;
384 current_vm_locked = vm_lock(current_vm);
J-Alvesce2f8d32021-06-10 18:30:21 +0100385
J-Alvesd3e81622021-10-05 14:55:57 +0100386 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100387 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100388 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100389 dummy_sender->id, per_vcpu, true);
390
391 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100392 * Validate get notifications bitmap for global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100393 */
J-Alves5a16c962022-03-25 12:32:51 +0000394 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
395 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100396
J-Alves5136dda2022-03-25 12:26:38 +0000397 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100398 is_from_vm, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100399 EXPECT_EQ(ret, global);
J-Alvesd3e81622021-10-05 14:55:57 +0100400 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100401
402 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100403 * Validate get notifications bitmap for per-vCPU notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100404 */
J-Alves5a16c962022-03-25 12:32:51 +0000405 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
406 per_vcpu, vcpu_idx, true);
J-Alvesce2f8d32021-06-10 18:30:21 +0100407
J-Alves5136dda2022-03-25 12:26:38 +0000408 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100409 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100410 EXPECT_EQ(ret, per_vcpu);
J-Alvesd3e81622021-10-05 14:55:57 +0100411 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100412
413 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100414 * Validate that getting notifications for a specific vCPU also returns
415 * global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100416 */
J-Alves5a16c962022-03-25 12:32:51 +0000417 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
418 per_vcpu, vcpu_idx, true);
419 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
420 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100421
J-Alves5136dda2022-03-25 12:26:38 +0000422 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100423 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100424 EXPECT_EQ(ret, per_vcpu | global);
J-Alvesd3e81622021-10-05 14:55:57 +0100425 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
426 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100427
428 /** Undo the binding */
J-Alvesd3e81622021-10-05 14:55:57 +0100429 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
430 global, false);
431 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
432 per_vcpu, true);
433 vm_unlock(&current_vm_locked);
J-Alvesce2f8d32021-06-10 18:30:21 +0100434}
435
J-Alves96f6e292021-06-08 17:32:40 +0100436/**
437 * Validates simple getting of notifications info for global notifications.
438 */
439TEST_F(vm, vm_notifications_info_get_global)
440{
441 ffa_notifications_bitmap_t to_set = 0xFU;
442 ffa_notifications_bitmap_t got;
443
444 /**
445 * Following set of variables that are also expected to be used when
446 * handling FFA_NOTIFICATION_INFO_GET.
447 */
448 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
449 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
450 uint32_t ids_count = 0;
451 uint32_t lists_count = 0;
452 enum notifications_info_get_state current_state = INIT;
453
454 CHECK(vm_get_count() >= 2);
455
456 for (unsigned int i = 0; i < 2; i++) {
457 struct_vm *current_vm = vm_find_index(0);
458 struct vm_locked current_vm_locked = vm_lock(current_vm);
459 struct notifications *notifications =
460 &current_vm->notifications.from_sp;
461 const bool is_from_vm = false;
462
J-Alves5a16c962022-03-25 12:32:51 +0000463 vm_notifications_partition_set_pending(
464 current_vm_locked, is_from_vm, to_set, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100465
466 vm_notifications_info_get_pending(
467 current_vm_locked, is_from_vm, ids, &ids_count,
468 lists_sizes, &lists_count,
469 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
470
471 /*
472 * Here the number of IDs and list count should be the same.
473 * As we are testing with Global notifications, this is
474 * expected.
475 */
476 EXPECT_EQ(ids_count, i + 1);
477 EXPECT_EQ(lists_count, i + 1);
478 EXPECT_EQ(lists_sizes[i], 0);
479 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
480
481 /* Action must be reset to initial state for each VM. */
482 current_state = INIT;
483
484 /*
485 * Check that getting pending notifications gives the expected
486 * return and cleans the 'pending' and 'info_get_retrieved'
487 * bitmaps.
488 */
J-Alves5136dda2022-03-25 12:26:38 +0000489 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100490 is_from_vm, 0);
491 EXPECT_EQ(got, to_set);
492
493 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
494 EXPECT_EQ(notifications->global.pending, 0U);
495
496 vm_unlock(&current_vm_locked);
497 }
498}
499
500/**
501 * Validates simple getting of notifications info for per-vCPU notifications.
502 */
503TEST_F(vm, vm_notifications_info_get_per_vcpu)
504{
505 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
506 ffa_notifications_bitmap_t got;
507
508 /*
509 * Following set of variables that are also expected to be used when
510 * handling ffa_notification_info_get.
511 */
512 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
513 uint32_t ids_count = 0;
514 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
515 uint32_t lists_count = 0;
516 enum notifications_info_get_state current_state = INIT;
517
518 CHECK(vm_get_count() >= 2);
519
520 for (unsigned int i = 0; i < 2; i++) {
521 struct_vm *current_vm = vm_find_index(0);
522 struct vm_locked current_vm_locked = vm_lock(current_vm);
523 struct notifications *notifications =
524 &current_vm->notifications.from_sp;
525 const bool is_from_vm = false;
526
J-Alves5a16c962022-03-25 12:32:51 +0000527 vm_notifications_partition_set_pending(
528 current_vm_locked, is_from_vm, per_vcpu, 0, true);
J-Alves96f6e292021-06-08 17:32:40 +0100529
530 vm_notifications_info_get_pending(
531 current_vm_locked, is_from_vm, ids, &ids_count,
532 lists_sizes, &lists_count,
533 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
534
535 /*
536 * Here the number of IDs and list count should be the same.
537 * As we are testing with Global notifications, this is
538 * expected.
539 */
540 EXPECT_EQ(ids_count, (i + 1) * 2);
541 EXPECT_EQ(lists_count, i + 1);
542 EXPECT_EQ(lists_sizes[i], 1);
543 EXPECT_EQ(per_vcpu,
544 notifications->per_vcpu[0].info_get_retrieved);
545
546 /* Action must be reset to initial state for each VM. */
547 current_state = INIT;
548
549 /*
550 * Check that getting pending notifications gives the expected
551 * return and cleans the 'pending' and 'info_get_retrieved'
552 * bitmaps.
553 */
J-Alves5136dda2022-03-25 12:26:38 +0000554 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100555 is_from_vm, 0);
556 EXPECT_EQ(got, per_vcpu);
557
558 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
559 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
560
561 vm_unlock(&current_vm_locked);
562 }
563}
564
565/**
566 * Validate getting of notifications information if all VCPUs have notifications
567 * pending.
568 */
569TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
570{
571 struct_vm *current_vm = nullptr;
572 struct vm_locked current_vm_locked;
573 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
574 ffa_notifications_bitmap_t got;
575 const ffa_notifications_bitmap_t global = 0xF0000;
576
577 /*
578 * Following set of variables that are also expected to be used when
579 * handling ffa_notification_info_get.
580 */
581 struct notifications *notifications;
582 const bool is_from_sp = false;
583 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
584 uint32_t ids_count = 0;
585 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
586 uint32_t lists_count = 0;
587 enum notifications_info_get_state current_state = INIT;
588
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600589 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
J-Alves96f6e292021-06-08 17:32:40 +0100590 current_vm_locked = vm_lock(current_vm);
591 notifications = &current_vm->notifications.from_sp;
592
593 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5a16c962022-03-25 12:32:51 +0000594 vm_notifications_partition_set_pending(
595 current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
596 i, true);
J-Alves96f6e292021-06-08 17:32:40 +0100597 }
598
599 /*
600 * Adding a global notification should not change the list of IDs,
601 * because global notifications only require the VM ID to be included in
602 * the list, at least once.
603 */
J-Alves5a16c962022-03-25 12:32:51 +0000604 vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
605 global, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100606
607 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
608 &ids_count, lists_sizes, &lists_count,
609 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
610 &current_state);
611
612 /*
613 * This test has been conceived for the expected MAX_CPUS 4.
614 * All VCPUs have notifications of the same VM, to be broken down in 2
615 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
616 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
617 */
618 CHECK(MAX_CPUS == 4);
619 EXPECT_EQ(ids_count, 6U);
620 EXPECT_EQ(lists_count, 2U);
621 EXPECT_EQ(lists_sizes[0], 3);
622 EXPECT_EQ(lists_sizes[1], 1);
623
624 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5136dda2022-03-25 12:26:38 +0000625 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100626 is_from_sp, i);
627
628 /*
J-Alves5136dda2022-03-25 12:26:38 +0000629 * The first call to
630 * vm_notifications_partition_get_pending should also
631 * include the global notifications on the return.
J-Alves96f6e292021-06-08 17:32:40 +0100632 */
633 ffa_notifications_bitmap_t to_check =
634 (i != 0) ? FFA_NOTIFICATION_MASK(i)
635 : FFA_NOTIFICATION_MASK(i) | global;
636
637 EXPECT_EQ(got, to_check);
638
639 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
640 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
641 }
642
643 vm_unlock(&current_vm_locked);
644}
645
646/**
647 * Validate change of state from 'vm_notifications_info_get_pending', when the
648 * list of IDs is full.
649 */
650TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
651{
652 struct_vm *current_vm = vm_find_index(0);
653 struct vm_locked current_vm_locked = vm_lock(current_vm);
654 struct notifications *notifications =
655 &current_vm->notifications.from_sp;
656 const bool is_from_vm = false;
657 ffa_notifications_bitmap_t got = 0;
658
659 /*
660 * Following set of variables that are also expected to be used when
661 * handling ffa_notification_info_get.
662 * For this 'ids_count' has been initialized such that it indicates
663 * there is no space in the list for a per-vCPU notification (VM ID and
664 * VCPU ID).
665 */
666 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
667 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
668 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
669 uint32_t lists_count = 10;
670 enum notifications_info_get_state current_state = INIT;
671 CHECK(vm_get_count() >= 2);
672
J-Alves5a16c962022-03-25 12:32:51 +0000673 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
674 FFA_NOTIFICATION_MASK(1), 0,
675 true);
J-Alves96f6e292021-06-08 17:32:40 +0100676
677 /* Call function to get notifications info, with only per-vCPU set. */
678 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
679 &ids_count, lists_sizes, &lists_count,
680 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
681 &current_state);
682
683 /*
684 * Verify that as soon as there isn't space to do the required
J-Alves5136dda2022-03-25 12:26:38 +0000685 * insertion in the list, the
686 * 'vm_notifications_partition_get_pending' returns and changes
687 * list state to FULL. In this case returning, because it would need to
688 * add two IDs (VM ID and VCPU ID).
J-Alves96f6e292021-06-08 17:32:40 +0100689 */
690 EXPECT_EQ(current_state, FULL);
691 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
692 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
693
694 /*
695 * At this point there is still room for the information of a global
696 * notification (only VM ID to be added). Reset 'current_state'
697 * for the insertion to happen at the last position of the array.
698 */
699 current_state = INIT;
700
701 /* Setting global notification */
J-Alves5a16c962022-03-25 12:32:51 +0000702 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
703 FFA_NOTIFICATION_MASK(2), 0,
704 false);
J-Alves96f6e292021-06-08 17:32:40 +0100705
706 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
707 &ids_count, lists_sizes, &lists_count,
708 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
709 &current_state);
710
711 /*
712 * Now List must be full, the set global notification must be part of
713 * 'info_get_retrieved', and the 'current_state' should be set to FULL
714 * due to the pending per-vCPU notification in VCPU 0.
715 */
716 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
717 EXPECT_EQ(current_state, FULL);
718 EXPECT_EQ(notifications->global.info_get_retrieved,
719 FFA_NOTIFICATION_MASK(2));
720
J-Alves5136dda2022-03-25 12:26:38 +0000721 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100722 is_from_vm, 0);
723 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
724
725 vm_unlock(&current_vm_locked);
726}
727
728TEST_F(vm, vm_notifications_info_get_full_global)
729{
730 struct_vm *current_vm = vm_find_index(0);
731 struct vm_locked current_vm_locked = vm_lock(current_vm);
732 ffa_notifications_bitmap_t got;
733 struct notifications *notifications;
734 const bool is_from_vm = false;
735 /*
736 * Following set of variables that are also expected to be used when
737 * handling ffa_notification_info_get.
738 * For this 'ids_count' has been initialized such that it indicates
739 * there is no space in the list for a global notification (VM ID only).
740 */
741 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
742 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
743 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
744 uint32_t lists_count = 10;
745 enum notifications_info_get_state current_state = INIT;
746
747 CHECK(vm_get_count() >= 1);
748
749 current_vm = vm_find_index(0);
750
751 notifications = &current_vm->notifications.from_sp;
752
753 /* Set global notification. */
J-Alves5a16c962022-03-25 12:32:51 +0000754 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
755 FFA_NOTIFICATION_MASK(10), 0,
756 false);
J-Alves96f6e292021-06-08 17:32:40 +0100757
758 /* Get notifications info for the given notifications. */
759 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
760 &ids_count, lists_sizes, &lists_count,
761 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
762 &current_state);
763
764 /* Expect 'info_get_retrieved' bitmap to be 0. */
765 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
766 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
767 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
768 EXPECT_EQ(current_state, FULL);
769
J-Alves5136dda2022-03-25 12:26:38 +0000770 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100771 is_from_vm, 0);
J-Alves9f74b932021-10-11 14:20:05 +0100772 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
773
J-Alves96f6e292021-06-08 17:32:40 +0100774 vm_unlock(&current_vm_locked);
775}
776
J-Alvesf31940e2022-03-25 17:24:00 +0000777TEST_F(vm, vm_notifications_info_get_from_framework)
778{
779 struct vm_locked vm_locked = vm_lock(vm_find_index(0));
780 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
781 uint32_t ids_count = 0;
782 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
783 uint32_t lists_count = 0;
784
785 vm_notifications_framework_set_pending(vm_locked, 0x1U);
786
787 /* Get notifications info for the given notifications. */
788 vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
789 &lists_count,
790 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
791
792 EXPECT_EQ(ids[0], vm_locked.vm->id);
793 EXPECT_EQ(ids_count, 1);
794 EXPECT_EQ(lists_sizes[0], 0);
795 EXPECT_EQ(lists_count, 1);
796
797 EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
798
799 vm_unlock(&vm_locked);
800}
801
Daniel Boulby8be26512024-09-03 19:41:11 +0100802/**
803 * Validates simple getting of notifications info for pending IPI.
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000804 * Also checks that vCPUs with pending IPIs are only reported if the
805 * vCPU is in the waiting state.
Daniel Boulby8be26512024-09-03 19:41:11 +0100806 */
807TEST_F(vm, vm_notifications_info_get_ipi)
808{
809 /*
810 * Following set of variables that are also expected to be used when
811 * handling ffa_notification_info_get.
812 */
813 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
814 uint32_t ids_count = 0;
815 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
816 uint32_t lists_count = 0;
817 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000818 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100819 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
820 struct interrupts *interrupts = &target_vcpu->interrupts;
821 const bool is_from_vm = false;
822 struct vm_locked current_vm_locked = vm_lock(current_vm);
823
824 EXPECT_TRUE(current_vm->vcpu_count >= 2);
825
826 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
827
828 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
829 &ids_count, lists_sizes, &lists_count,
830 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
831 &current_state);
832
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000833 EXPECT_EQ(ids_count, 0);
834 EXPECT_EQ(lists_count, 0);
835
836 target_vcpu->state = VCPU_STATE_WAITING;
837
838 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
839 &ids_count, lists_sizes, &lists_count,
840 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
841 &current_state);
842
Daniel Boulby8be26512024-09-03 19:41:11 +0100843 EXPECT_EQ(ids_count, 2);
844 EXPECT_EQ(lists_count, 1);
845 EXPECT_EQ(lists_sizes[0], 1);
846 EXPECT_EQ(ids[0], current_vm->id);
847 EXPECT_EQ(ids[1], 1);
848 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
849
850 /* Check it is not retrieved multiple times. */
851 current_state = INIT;
852 ids[0] = 0;
853 ids[1] = 0;
854 ids_count = 0;
855 lists_sizes[0] = 0;
856 lists_count = 0;
857
858 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
859 &ids_count, lists_sizes, &lists_count,
860 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
861 &current_state);
862 EXPECT_EQ(ids_count, 0);
863 EXPECT_EQ(lists_count, 0);
864 EXPECT_EQ(lists_sizes[0], 0);
865
866 vm_unlock(&current_vm_locked);
867}
868
869/**
870 * Validates simple getting of notifications info for pending with IPI when
871 * notification for the same vcpu is also pending.
872 */
873TEST_F(vm, vm_notifications_info_get_ipi_with_per_vcpu)
874{
875 /*
876 * Following set of variables that are also expected to be used when
877 * handling ffa_notification_info_get.
878 */
879 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
880 uint32_t ids_count = 0;
881 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
882 uint32_t lists_count = 0;
883 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000884 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100885 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
886 struct interrupts *interrupts = &target_vcpu->interrupts;
887 const bool is_from_vm = false;
888 struct vm_locked current_vm_locked = vm_lock(current_vm);
889
890 EXPECT_TRUE(current_vm->vcpu_count >= 2);
891
892 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
893
894 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
895 true, 1, true);
896 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
897 &ids_count, lists_sizes, &lists_count,
898 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
899 &current_state);
900
901 EXPECT_EQ(ids_count, 2);
902 EXPECT_EQ(lists_count, 1);
903 EXPECT_EQ(lists_sizes[0], 1);
904 EXPECT_EQ(ids[0], current_vm->id);
905 EXPECT_EQ(ids[1], 1);
906 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
907
908 /* Reset the state and values. */
909 current_state = INIT;
910 ids[0] = 0;
911 ids[1] = 0;
912 ids_count = 0;
913 lists_sizes[0] = 0;
914 lists_count = 0;
915
916 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
917 &ids_count, lists_sizes, &lists_count,
918 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
919 &current_state);
920 EXPECT_EQ(ids_count, 0);
921 EXPECT_EQ(lists_count, 0);
922 EXPECT_EQ(lists_sizes[0], 0);
923
924 vm_unlock(&current_vm_locked);
925}
926
927/**
928 * Validate that a mix of a pending IPI and notifcations are correctly
929 * reported across vcpus.
930 */
931TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)
932{
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000933 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100934 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
935 CHECK(vcpu_count > 1);
936
937 struct vm_locked current_vm_locked = vm_lock(current_vm);
938
939 /*
940 * Following set of variables that are also expected to be used when
941 * handling ffa_notification_info_get.
942 */
943 const bool is_from_vm = false;
944 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
945 uint32_t ids_count = 0;
946 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
947 uint32_t lists_count = 0;
948 enum notifications_info_get_state current_state = INIT;
949 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 0);
950 struct interrupts *interrupts = &target_vcpu->interrupts;
951
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000952 target_vcpu->state = VCPU_STATE_WAITING;
953
Daniel Boulby8be26512024-09-03 19:41:11 +0100954 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
955
956 for (unsigned int i = 1; i < vcpu_count; i++) {
957 vm_notifications_partition_set_pending(
958 current_vm_locked, is_from_vm, FFA_NOTIFICATION_MASK(i),
959 i, true);
960 }
961
962 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
963 &ids_count, lists_sizes, &lists_count,
964 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
965 &current_state);
966
967 /*
968 * This test has been conceived for the expected MAX_CPUS 4.
969 * All VCPUs have notifications of the same VM, to be broken down in 2
970 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
971 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
972 */
973 EXPECT_EQ(ids_count, 6U);
974 EXPECT_EQ(lists_count, 2U);
975 EXPECT_EQ(lists_sizes[0], 3);
976 EXPECT_EQ(lists_sizes[1], 1);
977 EXPECT_EQ(ids[0], current_vm->id);
978 EXPECT_EQ(ids[1], 0);
979 EXPECT_EQ(ids[2], 1);
980 EXPECT_EQ(ids[3], 2);
981 EXPECT_EQ(ids[4], current_vm->id);
982 EXPECT_EQ(ids[5], 3);
983
984 vm_unlock(&current_vm_locked);
985}
Andrew Scull3c257452019-11-26 13:32:50 +0000986} /* namespace */