blob: 0896da53db4cbf2830e9aee3c4b9dd1e1195aee7 [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Karl Meakin07a69ab2025-02-07 14:53:19 +000012#include "hf/arch/mm.h"
13
Daniel Boulby84350712021-11-26 11:13:20 +000014#include "hf/check.h"
J-Alves67f5ba32024-09-27 18:07:11 +010015#include "hf/list.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000016#include "hf/mm.h"
Andrew Scull3c257452019-11-26 13:32:50 +000017#include "hf/mpool.h"
Madhukar Pappireddya067dc12024-10-16 22:20:44 -050018#include "hf/timer_mgmt.h"
Andrew Scull3c257452019-11-26 13:32:50 +000019#include "hf/vm.h"
20}
21
J-Alvesb37fd082020-10-22 12:29:21 +010022#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000023#include <memory>
24#include <span>
25#include <vector>
26
27#include "mm_test.hh"
28
29namespace
30{
31using namespace ::std::placeholders;
32
33using ::testing::AllOf;
34using ::testing::Each;
35using ::testing::SizeIs;
36
37using struct_vm = struct vm;
Olivier Deprez181074b2023-02-02 14:53:23 +010038using struct_vcpu = struct vcpu;
J-Alves96f6e292021-06-08 17:32:40 +010039using struct_vm_locked = struct vm_locked;
Andrew Scull3c257452019-11-26 13:32:50 +000040
Olivier Deprezd5a54892023-02-02 16:45:59 +010041constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
Karl Meakina3a9f952025-02-08 00:11:16 +000042const mm_level_t TOP_LEVEL = arch_mm_stage2_root_level() - 1;
Andrew Scull3c257452019-11-26 13:32:50 +000043
44class vm : public ::testing::Test
45{
Olivier Deprezd5a54892023-02-02 16:45:59 +010046 protected:
47 static std::unique_ptr<uint8_t[]> test_heap;
48
49 struct mpool ppool;
50
Andrew Scull3c257452019-11-26 13:32:50 +000051 void SetUp() override
52 {
Olivier Deprezd5a54892023-02-02 16:45:59 +010053 if (!test_heap) {
54 /*
55 * TODO: replace with direct use of stdlib allocator so
56 * sanitizers are more effective.
57 */
58 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
59 mpool_init(&ppool, sizeof(struct mm_page_table));
60 mpool_add_chunk(&ppool, test_heap.get(),
61 TEST_HEAP_SIZE);
62 }
Andrew Scull3c257452019-11-26 13:32:50 +000063 }
64
J-Alvesb37fd082020-10-22 12:29:21 +010065 public:
J-Alvesbeeb6dc2021-12-08 18:21:32 +000066 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
J-Alvesb37fd082020-10-22 12:29:21 +010067 {
J-Alvesbeeb6dc2021-12-08 18:21:32 +000068 return vm1->boot_order < vm2->boot_order;
J-Alvesb37fd082020-10-22 12:29:21 +010069 }
Andrew Scull3c257452019-11-26 13:32:50 +000070};
71
Olivier Deprezd5a54892023-02-02 16:45:59 +010072std::unique_ptr<uint8_t[]> vm::test_heap;
73
Andrew Scull3c257452019-11-26 13:32:50 +000074/**
75 * If nothing is mapped, unmapping the hypervisor has no effect.
76 */
77TEST_F(vm, vm_unmap_hypervisor_not_mapped)
78{
79 struct_vm *vm;
80 struct vm_locked vm_locked;
81
Olivier Deprez878bd5b2021-04-15 19:05:10 +020082 /* TODO: check ptable usage (security state?) */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060083 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
Andrew Scull3c257452019-11-26 13:32:50 +000084 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080085 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000086 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
87 EXPECT_THAT(
88 mm_test::get_ptable(vm->ptable),
89 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
90 mm_vm_fini(&vm->ptable, &ppool);
91 vm_unlock(&vm_locked);
92}
93
J-Alvesb37fd082020-10-22 12:29:21 +010094/**
95 * Validate the "boot_list" is created properly, according to vm's "boot_order"
96 * field.
97 */
98TEST_F(vm, vm_boot_order)
99{
100 struct_vm *vm_cur;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600101 struct_vm *vm;
J-Alvesb37fd082020-10-22 12:29:21 +0100102 std::list<struct_vm *> expected_final_order;
103
J-Alvesb37fd082020-10-22 12:29:21 +0100104 /*
Olivier Deprez181074b2023-02-02 14:53:23 +0100105 * Insertion when no call to "vcpu_update_boot" has been made yet.
J-Alvesb37fd082020-10-22 12:29:21 +0100106 * The "boot_list" is expected to be empty.
107 */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600108 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000109 vm_cur->boot_order = 3;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600110 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100111 expected_final_order.push_back(vm_cur);
112
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600113 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100114
115 /* Insertion at the head of the boot list */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600116 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000117 vm_cur->boot_order = 1;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600118 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100119 expected_final_order.push_back(vm_cur);
120
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600121 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100122
123 /* Insertion of two in the middle of the boot list */
Olivier Deprez181074b2023-02-02 14:53:23 +0100124 for (uint32_t i = 0; i < 2; i++) {
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000125 EXPECT_TRUE(vm_init_next(MAX_CPUS, &ppool, &vm_cur, false, 0));
J-Alvesb37fd082020-10-22 12:29:21 +0100126 vm_cur->boot_order = 2;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600127 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100128 expected_final_order.push_back(vm_cur);
129 }
130
131 /*
132 * Insertion in the end of the list.
133 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
134 * As such, a VM is expected to have been initialized before this
135 * test, with ID 1 and boot_order 0.
136 */
137 vm_cur = vm_find(1);
138 EXPECT_FALSE(vm_cur == NULL);
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600139 vm_update_boot(vm_cur);
J-Alvesb37fd082020-10-22 12:29:21 +0100140 expected_final_order.push_back(vm_cur);
141
142 /*
143 * Number of VMs initialized should be the same as in the
144 * "expected_final_order", before the final verification.
145 */
146 EXPECT_EQ(expected_final_order.size(), vm_get_count())
147 << "Something went wrong with the test itself...\n";
148
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000149 /* Sort VMs from lower to higher "boot_order" field.*/
150 expected_final_order.sort(vm::BootOrderSmallerThan);
J-Alvesb37fd082020-10-22 12:29:21 +0100151
152 std::list<struct_vm *>::iterator it;
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600153 vm = vm_get_boot_vm();
Olivier Deprez181074b2023-02-02 14:53:23 +0100154 for (it = expected_final_order.begin();
155 it != expected_final_order.end(); it++) {
Madhukar Pappireddya49ba162024-11-25 09:40:45 -0600156 EXPECT_TRUE(vm != NULL);
157 EXPECT_EQ((*it)->id, vm->id);
158 vm = vm_get_next_boot(vm);
J-Alvesb37fd082020-10-22 12:29:21 +0100159 }
160}
J-Alves60eaff92021-05-27 14:54:41 +0100161
Madhukar Pappireddya067dc12024-10-16 22:20:44 -0500162TEST_F(vm, vcpu_arch_timer)
163{
164 const cpu_id_t cpu_ids[2] = {0, 1};
165 struct_vcpu *vm0_vcpu;
166 struct_vcpu *vm1_vcpu;
167 struct_vcpu *deadline_vcpu;
168 struct_vcpu *target_vcpu;
169 struct vcpu_locked vcpu_locked;
170 struct cpu *cpu0;
171 struct cpu *cpu1;
172
173 /* Initialie CPU module with two physical CPUs. */
174 cpu_module_init(cpu_ids, 2);
175 cpu0 = cpu_find_index(0);
176 cpu1 = cpu_find_index(1);
177
178 /* Two UP endpoints are deployed for this test. */
179 CHECK(vm_get_count() >= 2);
180 vm0_vcpu = vm_get_vcpu(vm_find_index(0), 0);
181 vm1_vcpu = vm_get_vcpu(vm_find_index(1), 0);
182
183 /* The execution context of each VM is scheduled on CPU0. */
184 vm0_vcpu->cpu = cpu0;
185 vm1_vcpu->cpu = cpu0;
186
187 /*
188 * Enable the timer peripheral for each vCPU and setup an arbitraty
189 * countdown value.
190 */
191 vm0_vcpu->regs.arch_timer.cval = 555555;
192 vm1_vcpu->regs.arch_timer.cval = 999999;
193 vm0_vcpu->regs.arch_timer.ctl = 1;
194 vm1_vcpu->regs.arch_timer.ctl = 1;
195
196 /* No vCPU is being tracked through either timer list. */
197 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
198 EXPECT_TRUE(deadline_vcpu == NULL);
199 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu1);
200 EXPECT_TRUE(deadline_vcpu == NULL);
201
202 /* vCPU of VM0 and VM1 are being added to the list. */
203 timer_vcpu_manage(vm0_vcpu);
204 timer_vcpu_manage(vm1_vcpu);
205
206 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
207 EXPECT_EQ(deadline_vcpu, vm0_vcpu);
208
209 /* Remove one of the vCPUs from the CPU0 list. */
210 vm0_vcpu->regs.arch_timer.cval = 0;
211 vm0_vcpu->regs.arch_timer.ctl = 0;
212 timer_vcpu_manage(vm0_vcpu);
213
214 /* This leaves one vCPU entry on CPU0 list. */
215 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
216 EXPECT_EQ(deadline_vcpu, vm1_vcpu);
217
218 /* Attempt to migrate VM1 vCPU from CPU0 to CPU1. */
219 vcpu_locked = vcpu_lock(vm1_vcpu);
220 timer_migrate_to_other_cpu(cpu1, vcpu_locked);
221 vcpu_unlock(&vcpu_locked);
222
223 /*
224 * After migration, ensure the list is empty on CPU0 but non-empty on
225 * CPU1.
226 */
227 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
228 EXPECT_TRUE(deadline_vcpu == NULL);
229
230 /*
231 * vCPU of VM1 is now running on CPU1. It must be the target vCPU when
232 * the timer has expired.
233 */
234 target_vcpu = timer_find_target_vcpu(vm1_vcpu);
235 EXPECT_EQ(target_vcpu, vm1_vcpu);
236}
237
J-Alves60eaff92021-05-27 14:54:41 +0100238/**
239 * Validates updates and check functions for binding notifications to endpoints.
240 */
241TEST_F(vm, vm_notifications_bind_diff_senders)
242{
J-Alvesd3e81622021-10-05 14:55:57 +0100243 struct_vm *current_vm = nullptr;
244 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100245 std::vector<struct_vm *> dummy_senders;
246 ffa_notifications_bitmap_t bitmaps[] = {
247 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
248 bool is_from_vm = true;
249
250 /* For the subsequent tests three VMs are used. */
251 CHECK(vm_get_count() >= 3);
252
J-Alvesd3e81622021-10-05 14:55:57 +0100253 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100254
255 dummy_senders.push_back(vm_find_index(1));
256 dummy_senders.push_back(vm_find_index(2));
257
J-Alvesd3e81622021-10-05 14:55:57 +0100258 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100259
260 for (unsigned int i = 0; i < 2; i++) {
261 /* Validate bindings condition after initialization. */
262 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100263 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
264 bitmaps[i], false));
J-Alves60eaff92021-05-27 14:54:41 +0100265
266 /*
267 * Validate bind related operations. For this test considering
268 * only global notifications.
269 */
J-Alvesd3e81622021-10-05 14:55:57 +0100270 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100271 dummy_senders[i]->id,
272 bitmaps[i], false);
273
274 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100275 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100276 bitmaps[i], false));
277
278 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100279 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100280 bitmaps[i], false));
281
282 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100283 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100284 bitmaps[1 - i], false));
285
286 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100287 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100288 bitmaps[2], false));
289 }
290
291 /** Clean up bind for other tests. */
J-Alvesd3e81622021-10-05 14:55:57 +0100292 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100293 bitmaps[0], false);
J-Alvesd3e81622021-10-05 14:55:57 +0100294 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100295 bitmaps[1], false);
296
J-Alvesd3e81622021-10-05 14:55:57 +0100297 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100298}
299
300/**
301 * Validates updates and check functions for binding notifications, namely the
J-Alves96f6e292021-06-08 17:32:40 +0100302 * configuration of bindings of global and per-vCPU notifications.
J-Alves60eaff92021-05-27 14:54:41 +0100303 */
304TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
305{
J-Alvesd3e81622021-10-05 14:55:57 +0100306 struct_vm *current_vm;
307 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100308 struct_vm *dummy_sender;
309 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
310 ffa_notifications_bitmap_t per_vcpu = ~global;
311 bool is_from_vm = true;
312
313 CHECK(vm_get_count() >= 2);
314
J-Alvesd3e81622021-10-05 14:55:57 +0100315 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100316
317 dummy_sender = vm_find_index(1);
318
J-Alvesd3e81622021-10-05 14:55:57 +0100319 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100320
J-Alvesd3e81622021-10-05 14:55:57 +0100321 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100322 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100323 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100324 dummy_sender->id, per_vcpu, true);
325
326 /* Check validation of global notifications bindings. */
327 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100328 current_vm_locked, is_from_vm, dummy_sender->id, global,
329 false));
J-Alves60eaff92021-05-27 14:54:41 +0100330
J-Alves96f6e292021-06-08 17:32:40 +0100331 /* Check validation of per-vCPU notifications bindings. */
J-Alves60eaff92021-05-27 14:54:41 +0100332 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100333 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
334 true));
J-Alves60eaff92021-05-27 14:54:41 +0100335
336 /**
J-Alves96f6e292021-06-08 17:32:40 +0100337 * Check that global notifications are not validated as per-vCPU, and
J-Alves60eaff92021-05-27 14:54:41 +0100338 * vice-versa.
339 */
340 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100341 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
J-Alves60eaff92021-05-27 14:54:41 +0100342 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100343 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
J-Alves60eaff92021-05-27 14:54:41 +0100344 false));
J-Alvesd3e81622021-10-05 14:55:57 +0100345 EXPECT_FALSE(vm_notifications_validate_binding(
346 current_vm_locked, is_from_vm, dummy_sender->id,
347 global | per_vcpu, true));
348 EXPECT_FALSE(vm_notifications_validate_binding(
349 current_vm_locked, is_from_vm, dummy_sender->id,
350 global | per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100351
352 /** Undo the bindings */
J-Alvesd3e81622021-10-05 14:55:57 +0100353 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
354 global, false);
355 EXPECT_TRUE(vm_notifications_validate_binding(
356 current_vm_locked, is_from_vm, 0, global, false));
J-Alves60eaff92021-05-27 14:54:41 +0100357
J-Alvesd3e81622021-10-05 14:55:57 +0100358 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
359 per_vcpu, false);
360 EXPECT_TRUE(vm_notifications_validate_binding(
361 current_vm_locked, is_from_vm, 0, per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100362
J-Alvesd3e81622021-10-05 14:55:57 +0100363 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100364}
365
J-Alvesce2f8d32021-06-10 18:30:21 +0100366/**
367 * Validates accesses to notifications bitmaps.
368 */
369TEST_F(vm, vm_notifications_set_and_get)
370{
J-Alvesd3e81622021-10-05 14:55:57 +0100371 struct_vm *current_vm;
372 struct vm_locked current_vm_locked;
J-Alvesce2f8d32021-06-10 18:30:21 +0100373 struct_vm *dummy_sender;
374 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
375 ffa_notifications_bitmap_t per_vcpu = ~global;
376 ffa_notifications_bitmap_t ret;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700377 const unsigned int vcpu_idx = 0;
J-Alvesce2f8d32021-06-10 18:30:21 +0100378 struct notifications *notifications;
379 const bool is_from_vm = true;
380
381 CHECK(vm_get_count() >= 2);
382
J-Alvesd3e81622021-10-05 14:55:57 +0100383 current_vm = vm_find_index(0);
J-Alvesce2f8d32021-06-10 18:30:21 +0100384 dummy_sender = vm_find_index(1);
385
J-Alvesd3e81622021-10-05 14:55:57 +0100386 notifications = &current_vm->notifications.from_vm;
387 current_vm_locked = vm_lock(current_vm);
J-Alvesce2f8d32021-06-10 18:30:21 +0100388
J-Alvesd3e81622021-10-05 14:55:57 +0100389 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100390 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100391 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100392 dummy_sender->id, per_vcpu, true);
393
394 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100395 * Validate get notifications bitmap for global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100396 */
J-Alves5a16c962022-03-25 12:32:51 +0000397 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
398 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100399
J-Alves8450ecb2025-02-03 11:51:39 +0000400 EXPECT_EQ(notifications->global.pending, global);
401
402 /* Counter should track pending notifications. */
403 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
404
J-Alves5136dda2022-03-25 12:26:38 +0000405 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100406 is_from_vm, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100407 EXPECT_EQ(ret, global);
J-Alvesd3e81622021-10-05 14:55:57 +0100408 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100409
410 /*
J-Alves8450ecb2025-02-03 11:51:39 +0000411 * After getting the pending notifications, the pending count should
412 * be zeroed.
413 */
414 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
415
416 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100417 * Validate get notifications bitmap for per-vCPU notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100418 */
J-Alves5a16c962022-03-25 12:32:51 +0000419 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
420 per_vcpu, vcpu_idx, true);
J-Alvesfc50ef72025-02-03 11:57:51 +0000421
422 /*
423 * Duplicate call to check that the state of the counters doesn't alter
424 * because of it.
425 */
426 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
427 per_vcpu, vcpu_idx, true);
428
J-Alves8450ecb2025-02-03 11:51:39 +0000429 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100430
J-Alves5136dda2022-03-25 12:26:38 +0000431 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100432 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100433 EXPECT_EQ(ret, per_vcpu);
J-Alvesd3e81622021-10-05 14:55:57 +0100434 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
J-Alves8450ecb2025-02-03 11:51:39 +0000435 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100436
437 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100438 * Validate that getting notifications for a specific vCPU also returns
439 * global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100440 */
J-Alves5a16c962022-03-25 12:32:51 +0000441 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
442 per_vcpu, vcpu_idx, true);
J-Alves8450ecb2025-02-03 11:51:39 +0000443
J-Alves5a16c962022-03-25 12:32:51 +0000444 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
445 global, 0ull, false);
J-Alves8450ecb2025-02-03 11:51:39 +0000446 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100447
J-Alves5136dda2022-03-25 12:26:38 +0000448 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100449 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100450 EXPECT_EQ(ret, per_vcpu | global);
J-Alvesd3e81622021-10-05 14:55:57 +0100451 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
452 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alves8450ecb2025-02-03 11:51:39 +0000453 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
J-Alvesce2f8d32021-06-10 18:30:21 +0100454
455 /** Undo the binding */
J-Alvesd3e81622021-10-05 14:55:57 +0100456 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
457 global, false);
458 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
459 per_vcpu, true);
460 vm_unlock(&current_vm_locked);
J-Alvesce2f8d32021-06-10 18:30:21 +0100461}
462
J-Alves96f6e292021-06-08 17:32:40 +0100463/**
464 * Validates simple getting of notifications info for global notifications.
465 */
466TEST_F(vm, vm_notifications_info_get_global)
467{
468 ffa_notifications_bitmap_t to_set = 0xFU;
469 ffa_notifications_bitmap_t got;
470
471 /**
472 * Following set of variables that are also expected to be used when
473 * handling FFA_NOTIFICATION_INFO_GET.
474 */
475 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
476 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
477 uint32_t ids_count = 0;
478 uint32_t lists_count = 0;
479 enum notifications_info_get_state current_state = INIT;
480
481 CHECK(vm_get_count() >= 2);
482
483 for (unsigned int i = 0; i < 2; i++) {
484 struct_vm *current_vm = vm_find_index(0);
485 struct vm_locked current_vm_locked = vm_lock(current_vm);
486 struct notifications *notifications =
487 &current_vm->notifications.from_sp;
488 const bool is_from_vm = false;
489
J-Alves5a16c962022-03-25 12:32:51 +0000490 vm_notifications_partition_set_pending(
491 current_vm_locked, is_from_vm, to_set, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100492
493 vm_notifications_info_get_pending(
494 current_vm_locked, is_from_vm, ids, &ids_count,
495 lists_sizes, &lists_count,
496 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
497
498 /*
499 * Here the number of IDs and list count should be the same.
500 * As we are testing with Global notifications, this is
501 * expected.
502 */
503 EXPECT_EQ(ids_count, i + 1);
504 EXPECT_EQ(lists_count, i + 1);
505 EXPECT_EQ(lists_sizes[i], 0);
506 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
507
508 /* Action must be reset to initial state for each VM. */
509 current_state = INIT;
510
511 /*
512 * Check that getting pending notifications gives the expected
513 * return and cleans the 'pending' and 'info_get_retrieved'
514 * bitmaps.
515 */
J-Alves5136dda2022-03-25 12:26:38 +0000516 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100517 is_from_vm, 0);
518 EXPECT_EQ(got, to_set);
519
520 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
521 EXPECT_EQ(notifications->global.pending, 0U);
522
523 vm_unlock(&current_vm_locked);
524 }
525}
526
527/**
528 * Validates simple getting of notifications info for per-vCPU notifications.
529 */
530TEST_F(vm, vm_notifications_info_get_per_vcpu)
531{
532 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
533 ffa_notifications_bitmap_t got;
534
535 /*
536 * Following set of variables that are also expected to be used when
537 * handling ffa_notification_info_get.
538 */
539 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
540 uint32_t ids_count = 0;
541 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
542 uint32_t lists_count = 0;
543 enum notifications_info_get_state current_state = INIT;
544
545 CHECK(vm_get_count() >= 2);
546
547 for (unsigned int i = 0; i < 2; i++) {
548 struct_vm *current_vm = vm_find_index(0);
549 struct vm_locked current_vm_locked = vm_lock(current_vm);
550 struct notifications *notifications =
551 &current_vm->notifications.from_sp;
552 const bool is_from_vm = false;
553
J-Alves5a16c962022-03-25 12:32:51 +0000554 vm_notifications_partition_set_pending(
555 current_vm_locked, is_from_vm, per_vcpu, 0, true);
J-Alves96f6e292021-06-08 17:32:40 +0100556
557 vm_notifications_info_get_pending(
558 current_vm_locked, is_from_vm, ids, &ids_count,
559 lists_sizes, &lists_count,
560 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
561
562 /*
563 * Here the number of IDs and list count should be the same.
564 * As we are testing with Global notifications, this is
565 * expected.
566 */
567 EXPECT_EQ(ids_count, (i + 1) * 2);
568 EXPECT_EQ(lists_count, i + 1);
569 EXPECT_EQ(lists_sizes[i], 1);
570 EXPECT_EQ(per_vcpu,
571 notifications->per_vcpu[0].info_get_retrieved);
572
573 /* Action must be reset to initial state for each VM. */
574 current_state = INIT;
575
576 /*
577 * Check that getting pending notifications gives the expected
578 * return and cleans the 'pending' and 'info_get_retrieved'
579 * bitmaps.
580 */
J-Alves5136dda2022-03-25 12:26:38 +0000581 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100582 is_from_vm, 0);
583 EXPECT_EQ(got, per_vcpu);
584
585 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
586 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
587
588 vm_unlock(&current_vm_locked);
589 }
590}
591
592/**
593 * Validate getting of notifications information if all VCPUs have notifications
594 * pending.
595 */
596TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
597{
598 struct_vm *current_vm = nullptr;
599 struct vm_locked current_vm_locked;
600 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
601 ffa_notifications_bitmap_t got;
602 const ffa_notifications_bitmap_t global = 0xF0000;
603
604 /*
605 * Following set of variables that are also expected to be used when
606 * handling ffa_notification_info_get.
607 */
608 struct notifications *notifications;
609 const bool is_from_sp = false;
610 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
611 uint32_t ids_count = 0;
612 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
613 uint32_t lists_count = 0;
614 enum notifications_info_get_state current_state = INIT;
615
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600616 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
J-Alves96f6e292021-06-08 17:32:40 +0100617 current_vm_locked = vm_lock(current_vm);
618 notifications = &current_vm->notifications.from_sp;
619
620 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5a16c962022-03-25 12:32:51 +0000621 vm_notifications_partition_set_pending(
622 current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
623 i, true);
J-Alves96f6e292021-06-08 17:32:40 +0100624 }
625
626 /*
627 * Adding a global notification should not change the list of IDs,
628 * because global notifications only require the VM ID to be included in
629 * the list, at least once.
630 */
J-Alves5a16c962022-03-25 12:32:51 +0000631 vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
632 global, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100633
634 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
635 &ids_count, lists_sizes, &lists_count,
636 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
637 &current_state);
638
639 /*
640 * This test has been conceived for the expected MAX_CPUS 4.
641 * All VCPUs have notifications of the same VM, to be broken down in 2
642 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
643 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
644 */
645 CHECK(MAX_CPUS == 4);
646 EXPECT_EQ(ids_count, 6U);
647 EXPECT_EQ(lists_count, 2U);
648 EXPECT_EQ(lists_sizes[0], 3);
649 EXPECT_EQ(lists_sizes[1], 1);
650
651 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5136dda2022-03-25 12:26:38 +0000652 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100653 is_from_sp, i);
654
655 /*
J-Alves5136dda2022-03-25 12:26:38 +0000656 * The first call to
657 * vm_notifications_partition_get_pending should also
658 * include the global notifications on the return.
J-Alves96f6e292021-06-08 17:32:40 +0100659 */
660 ffa_notifications_bitmap_t to_check =
661 (i != 0) ? FFA_NOTIFICATION_MASK(i)
662 : FFA_NOTIFICATION_MASK(i) | global;
663
664 EXPECT_EQ(got, to_check);
665
666 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
667 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
668 }
669
670 vm_unlock(&current_vm_locked);
671}
672
673/**
674 * Validate change of state from 'vm_notifications_info_get_pending', when the
675 * list of IDs is full.
676 */
677TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
678{
679 struct_vm *current_vm = vm_find_index(0);
680 struct vm_locked current_vm_locked = vm_lock(current_vm);
681 struct notifications *notifications =
682 &current_vm->notifications.from_sp;
683 const bool is_from_vm = false;
684 ffa_notifications_bitmap_t got = 0;
685
686 /*
687 * Following set of variables that are also expected to be used when
688 * handling ffa_notification_info_get.
689 * For this 'ids_count' has been initialized such that it indicates
690 * there is no space in the list for a per-vCPU notification (VM ID and
691 * VCPU ID).
692 */
693 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
694 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
695 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
696 uint32_t lists_count = 10;
697 enum notifications_info_get_state current_state = INIT;
698 CHECK(vm_get_count() >= 2);
699
J-Alves5a16c962022-03-25 12:32:51 +0000700 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
701 FFA_NOTIFICATION_MASK(1), 0,
702 true);
J-Alves96f6e292021-06-08 17:32:40 +0100703
704 /* Call function to get notifications info, with only per-vCPU set. */
705 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
706 &ids_count, lists_sizes, &lists_count,
707 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
708 &current_state);
709
710 /*
711 * Verify that as soon as there isn't space to do the required
J-Alves5136dda2022-03-25 12:26:38 +0000712 * insertion in the list, the
713 * 'vm_notifications_partition_get_pending' returns and changes
714 * list state to FULL. In this case returning, because it would need to
715 * add two IDs (VM ID and VCPU ID).
J-Alves96f6e292021-06-08 17:32:40 +0100716 */
717 EXPECT_EQ(current_state, FULL);
718 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
719 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
720
721 /*
722 * At this point there is still room for the information of a global
723 * notification (only VM ID to be added). Reset 'current_state'
724 * for the insertion to happen at the last position of the array.
725 */
726 current_state = INIT;
727
728 /* Setting global notification */
J-Alves5a16c962022-03-25 12:32:51 +0000729 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
730 FFA_NOTIFICATION_MASK(2), 0,
731 false);
J-Alves96f6e292021-06-08 17:32:40 +0100732
733 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
734 &ids_count, lists_sizes, &lists_count,
735 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
736 &current_state);
737
738 /*
739 * Now List must be full, the set global notification must be part of
740 * 'info_get_retrieved', and the 'current_state' should be set to FULL
741 * due to the pending per-vCPU notification in VCPU 0.
742 */
743 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
744 EXPECT_EQ(current_state, FULL);
745 EXPECT_EQ(notifications->global.info_get_retrieved,
746 FFA_NOTIFICATION_MASK(2));
747
J-Alves5136dda2022-03-25 12:26:38 +0000748 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100749 is_from_vm, 0);
750 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
751
752 vm_unlock(&current_vm_locked);
753}
754
755TEST_F(vm, vm_notifications_info_get_full_global)
756{
757 struct_vm *current_vm = vm_find_index(0);
758 struct vm_locked current_vm_locked = vm_lock(current_vm);
759 ffa_notifications_bitmap_t got;
760 struct notifications *notifications;
761 const bool is_from_vm = false;
762 /*
763 * Following set of variables that are also expected to be used when
764 * handling ffa_notification_info_get.
765 * For this 'ids_count' has been initialized such that it indicates
766 * there is no space in the list for a global notification (VM ID only).
767 */
768 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
769 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
770 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
771 uint32_t lists_count = 10;
772 enum notifications_info_get_state current_state = INIT;
773
774 CHECK(vm_get_count() >= 1);
775
776 current_vm = vm_find_index(0);
777
778 notifications = &current_vm->notifications.from_sp;
779
780 /* Set global notification. */
J-Alves5a16c962022-03-25 12:32:51 +0000781 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
782 FFA_NOTIFICATION_MASK(10), 0,
783 false);
J-Alves96f6e292021-06-08 17:32:40 +0100784
785 /* Get notifications info for the given notifications. */
786 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
787 &ids_count, lists_sizes, &lists_count,
788 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
789 &current_state);
790
791 /* Expect 'info_get_retrieved' bitmap to be 0. */
792 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
793 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
794 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
795 EXPECT_EQ(current_state, FULL);
796
J-Alves5136dda2022-03-25 12:26:38 +0000797 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100798 is_from_vm, 0);
J-Alves9f74b932021-10-11 14:20:05 +0100799 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
800
J-Alves96f6e292021-06-08 17:32:40 +0100801 vm_unlock(&current_vm_locked);
802}
803
J-Alvesf31940e2022-03-25 17:24:00 +0000804TEST_F(vm, vm_notifications_info_get_from_framework)
805{
806 struct vm_locked vm_locked = vm_lock(vm_find_index(0));
807 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
808 uint32_t ids_count = 0;
809 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
810 uint32_t lists_count = 0;
811
812 vm_notifications_framework_set_pending(vm_locked, 0x1U);
813
814 /* Get notifications info for the given notifications. */
815 vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
816 &lists_count,
817 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
818
819 EXPECT_EQ(ids[0], vm_locked.vm->id);
820 EXPECT_EQ(ids_count, 1);
821 EXPECT_EQ(lists_sizes[0], 0);
822 EXPECT_EQ(lists_count, 1);
823
824 EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
825
826 vm_unlock(&vm_locked);
827}
828
Daniel Boulby8be26512024-09-03 19:41:11 +0100829/**
830 * Validates simple getting of notifications info for pending IPI.
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000831 * Also checks that vCPUs with pending IPIs are only reported if the
832 * vCPU is in the waiting state.
Daniel Boulby8be26512024-09-03 19:41:11 +0100833 */
834TEST_F(vm, vm_notifications_info_get_ipi)
835{
836 /*
837 * Following set of variables that are also expected to be used when
838 * handling ffa_notification_info_get.
839 */
840 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
841 uint32_t ids_count = 0;
842 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
843 uint32_t lists_count = 0;
844 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000845 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100846 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000847 struct vcpu_locked vcpu_locked;
Daniel Boulby8be26512024-09-03 19:41:11 +0100848 const bool is_from_vm = false;
849 struct vm_locked current_vm_locked = vm_lock(current_vm);
850
851 EXPECT_TRUE(current_vm->vcpu_count >= 2);
852
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000853 vcpu_locked = vcpu_lock(target_vcpu);
854 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
J-Alves0cbd7a32025-02-10 17:29:15 +0000855 vcpu_virt_interrupt_enable(vcpu_locked, HF_IPI_INTID, true);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000856 vcpu_unlock(&vcpu_locked);
Daniel Boulby8be26512024-09-03 19:41:11 +0100857
858 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
859 &ids_count, lists_sizes, &lists_count,
860 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
861 &current_state);
862
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000863 EXPECT_EQ(ids_count, 0);
864 EXPECT_EQ(lists_count, 0);
865
866 target_vcpu->state = VCPU_STATE_WAITING;
867
868 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
869 &ids_count, lists_sizes, &lists_count,
870 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
871 &current_state);
872
Daniel Boulby8be26512024-09-03 19:41:11 +0100873 EXPECT_EQ(ids_count, 2);
874 EXPECT_EQ(lists_count, 1);
875 EXPECT_EQ(lists_sizes[0], 1);
876 EXPECT_EQ(ids[0], current_vm->id);
877 EXPECT_EQ(ids[1], 1);
J-Alves0cbd7a32025-02-10 17:29:15 +0000878 EXPECT_EQ(target_vcpu->interrupts_info_get_retrieved, true);
Daniel Boulby8be26512024-09-03 19:41:11 +0100879
880 /* Check it is not retrieved multiple times. */
881 current_state = INIT;
882 ids[0] = 0;
883 ids[1] = 0;
884 ids_count = 0;
885 lists_sizes[0] = 0;
886 lists_count = 0;
887
888 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
889 &ids_count, lists_sizes, &lists_count,
890 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
891 &current_state);
892 EXPECT_EQ(ids_count, 0);
893 EXPECT_EQ(lists_count, 0);
894 EXPECT_EQ(lists_sizes[0], 0);
895
J-Alves0cbd7a32025-02-10 17:29:15 +0000896 vcpu_locked = vcpu_lock(target_vcpu);
897
898 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
899 HF_IPI_INTID);
900 EXPECT_FALSE(vcpu_locked.vcpu->interrupts_info_get_retrieved);
901
902 vcpu_unlock(&vcpu_locked);
903
Daniel Boulby8be26512024-09-03 19:41:11 +0100904 vm_unlock(&current_vm_locked);
905}
906
907/**
908 * Validates simple getting of notifications info for pending with IPI when
909 * notification for the same vcpu is also pending.
910 */
911TEST_F(vm, vm_notifications_info_get_ipi_with_per_vcpu)
912{
913 /*
914 * Following set of variables that are also expected to be used when
915 * handling ffa_notification_info_get.
916 */
917 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
918 uint32_t ids_count = 0;
919 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
920 uint32_t lists_count = 0;
921 enum notifications_info_get_state current_state = INIT;
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000922 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100923 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000924 struct vcpu_locked vcpu_locked;
Daniel Boulby8be26512024-09-03 19:41:11 +0100925 const bool is_from_vm = false;
926 struct vm_locked current_vm_locked = vm_lock(current_vm);
927
928 EXPECT_TRUE(current_vm->vcpu_count >= 2);
929
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000930 vcpu_locked = vcpu_lock(target_vcpu);
931 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
J-Alves0cbd7a32025-02-10 17:29:15 +0000932 vcpu_virt_interrupt_enable(vcpu_locked, HF_IPI_INTID, true);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000933 vcpu_unlock(&vcpu_locked);
Daniel Boulby8be26512024-09-03 19:41:11 +0100934
935 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
936 true, 1, true);
937 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
938 &ids_count, lists_sizes, &lists_count,
939 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
940 &current_state);
941
942 EXPECT_EQ(ids_count, 2);
943 EXPECT_EQ(lists_count, 1);
944 EXPECT_EQ(lists_sizes[0], 1);
945 EXPECT_EQ(ids[0], current_vm->id);
946 EXPECT_EQ(ids[1], 1);
J-Alves0cbd7a32025-02-10 17:29:15 +0000947 EXPECT_EQ(target_vcpu->interrupts_info_get_retrieved, true);
Daniel Boulby8be26512024-09-03 19:41:11 +0100948
949 /* Reset the state and values. */
950 current_state = INIT;
951 ids[0] = 0;
952 ids[1] = 0;
953 ids_count = 0;
954 lists_sizes[0] = 0;
955 lists_count = 0;
956
957 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
958 &ids_count, lists_sizes, &lists_count,
959 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
960 &current_state);
961 EXPECT_EQ(ids_count, 0);
962 EXPECT_EQ(lists_count, 0);
963 EXPECT_EQ(lists_sizes[0], 0);
964
J-Alves0cbd7a32025-02-10 17:29:15 +0000965 vcpu_locked = vcpu_lock(target_vcpu);
966 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
967 HF_IPI_INTID);
968 vcpu_unlock(&vcpu_locked);
969
Daniel Boulby8be26512024-09-03 19:41:11 +0100970 vm_unlock(&current_vm_locked);
971}
972
973/**
974 * Validate that a mix of a pending IPI and notifcations are correctly
975 * reported across vcpus.
976 */
977TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)
978{
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000979 struct_vm *current_vm = vm_find_index(4);
Daniel Boulby8be26512024-09-03 19:41:11 +0100980 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
981 CHECK(vcpu_count > 1);
982
983 struct vm_locked current_vm_locked = vm_lock(current_vm);
984
985 /*
986 * Following set of variables that are also expected to be used when
987 * handling ffa_notification_info_get.
988 */
989 const bool is_from_vm = false;
990 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
991 uint32_t ids_count = 0;
992 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
993 uint32_t lists_count = 0;
994 enum notifications_info_get_state current_state = INIT;
995 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 0);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000996 struct vcpu_locked vcpu_locked;
Daniel Boulby8be26512024-09-03 19:41:11 +0100997
Daniel Boulby6c2aa332024-11-13 13:54:08 +0000998 target_vcpu->state = VCPU_STATE_WAITING;
999
Daniel Boulby3c1506b2025-02-25 10:49:51 +00001000 vcpu_locked = vcpu_lock(target_vcpu);
1001 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
J-Alves0cbd7a32025-02-10 17:29:15 +00001002 vcpu_virt_interrupt_enable(vcpu_locked, HF_IPI_INTID, true);
Daniel Boulby3c1506b2025-02-25 10:49:51 +00001003 vcpu_unlock(&vcpu_locked);
Daniel Boulby8be26512024-09-03 19:41:11 +01001004
1005 for (unsigned int i = 1; i < vcpu_count; i++) {
1006 vm_notifications_partition_set_pending(
1007 current_vm_locked, is_from_vm, FFA_NOTIFICATION_MASK(i),
1008 i, true);
1009 }
1010
1011 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
1012 &ids_count, lists_sizes, &lists_count,
1013 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
1014 &current_state);
1015
1016 /*
1017 * This test has been conceived for the expected MAX_CPUS 4.
1018 * All VCPUs have notifications of the same VM, to be broken down in 2
1019 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
1020 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
1021 */
1022 EXPECT_EQ(ids_count, 6U);
1023 EXPECT_EQ(lists_count, 2U);
1024 EXPECT_EQ(lists_sizes[0], 3);
1025 EXPECT_EQ(lists_sizes[1], 1);
1026 EXPECT_EQ(ids[0], current_vm->id);
1027 EXPECT_EQ(ids[1], 0);
1028 EXPECT_EQ(ids[2], 1);
1029 EXPECT_EQ(ids[3], 2);
1030 EXPECT_EQ(ids[4], current_vm->id);
1031 EXPECT_EQ(ids[5], 3);
1032
J-Alves0cbd7a32025-02-10 17:29:15 +00001033 vcpu_locked = vcpu_lock(target_vcpu);
1034 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
1035 HF_IPI_INTID);
1036 vcpu_unlock(&vcpu_locked);
1037
Daniel Boulby8be26512024-09-03 19:41:11 +01001038 vm_unlock(&current_vm_locked);
1039}
J-Alves0cbd7a32025-02-10 17:29:15 +00001040
1041TEST_F(vm, pending_interrupts_info_retrieved)
1042{
1043 struct_vm *test_vm = vm_find_index(4);
1044 struct_vcpu *vcpu = vm_get_vcpu(test_vm, 1);
1045 const uint32_t intid = HF_NUM_INTIDS - 2;
1046 struct vm_locked test_vm_locked;
1047 struct vcpu_locked vcpu_locked;
1048
1049 /*
1050 *
1051 * Following set of variables that are also expected to be used when
1052 * handling ffa_notification_info_get.
1053 * For this 'ids_count' has been initialized such that it indicates
1054 * there is no space in the list for a per-vCPU notification (VM ID and
1055 * VCPU ID).
1056 */
1057 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
1058 uint32_t ids_count = 0;
1059 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
1060 uint32_t lists_count = 0;
1061 enum notifications_info_get_state current_state = INIT;
1062
1063 /*
1064 * Make it such the FF-A and vCPU ID are included in the list,
1065 * when invoking notification info get.
1066 */
1067 test_vm->sri_policy.intr_while_waiting = true;
1068
1069 vcpu_locked = vcpu_lock(vcpu);
1070
1071 /* Check this is starting from a clean state. */
1072 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
1073 EXPECT_FALSE(vcpu->interrupts_info_get_retrieved);
1074
1075 /* Enable and get pending. */
1076 vcpu_virt_interrupt_enable(vcpu_locked, intid, true);
1077
1078 vcpu_virt_interrupt_inject(vcpu_locked, intid);
1079
1080 vcpu->state = VCPU_STATE_WAITING;
1081
1082 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
1083
1084 /* Free resource. */
1085 vcpu_unlock(&vcpu_locked);
1086
1087 test_vm_locked = vm_lock(test_vm);
1088
1089 vm_notifications_info_get_pending(test_vm_locked, true, ids, &ids_count,
1090 lists_sizes, &lists_count,
1091 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
1092 &current_state);
1093
1094 /* Assert the information flag as been retrieved. */
1095 EXPECT_TRUE(vcpu->interrupts_info_get_retrieved);
1096
1097 vm_unlock(&test_vm_locked);
1098
1099 /* Pop to clear test and attest intid is returned. */
1100 vcpu_locked = vcpu_lock(vcpu);
1101
1102 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
1103 intid);
1104
1105 EXPECT_FALSE(vcpu_locked.vcpu->interrupts_info_get_retrieved);
1106
1107 vcpu_unlock(&vcpu_locked);
1108}
Andrew Scull3c257452019-11-26 13:32:50 +00001109} /* namespace */