blob: 2fec26d17253f6a3a61693b26376dd141c1b3e01 [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Daniel Boulby84350712021-11-26 11:13:20 +000012#include "hf/check.h"
J-Alves67f5ba32024-09-27 18:07:11 +010013#include "hf/list.h"
Andrew Scull3c257452019-11-26 13:32:50 +000014#include "hf/mpool.h"
Madhukar Pappireddya067dc12024-10-16 22:20:44 -050015#include "hf/timer_mgmt.h"
Andrew Scull3c257452019-11-26 13:32:50 +000016#include "hf/vm.h"
17}
18
J-Alvesb37fd082020-10-22 12:29:21 +010019#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000020#include <memory>
21#include <span>
22#include <vector>
23
24#include "mm_test.hh"
25
26namespace
27{
28using namespace ::std::placeholders;
29
30using ::testing::AllOf;
31using ::testing::Each;
32using ::testing::SizeIs;
33
34using struct_vm = struct vm;
Olivier Deprez181074b2023-02-02 14:53:23 +010035using struct_vcpu = struct vcpu;
J-Alves96f6e292021-06-08 17:32:40 +010036using struct_vm_locked = struct vm_locked;
Andrew Scull3c257452019-11-26 13:32:50 +000037
Olivier Deprezd5a54892023-02-02 16:45:59 +010038constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
Andrew Scull3c257452019-11-26 13:32:50 +000039const int TOP_LEVEL = arch_mm_stage2_max_level();
40
41class vm : public ::testing::Test
42{
Olivier Deprezd5a54892023-02-02 16:45:59 +010043 protected:
44 static std::unique_ptr<uint8_t[]> test_heap;
45
46 struct mpool ppool;
47
Andrew Scull3c257452019-11-26 13:32:50 +000048 void SetUp() override
49 {
Olivier Deprezd5a54892023-02-02 16:45:59 +010050 if (!test_heap) {
51 /*
52 * TODO: replace with direct use of stdlib allocator so
53 * sanitizers are more effective.
54 */
55 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
56 mpool_init(&ppool, sizeof(struct mm_page_table));
57 mpool_add_chunk(&ppool, test_heap.get(),
58 TEST_HEAP_SIZE);
59 }
Andrew Scull3c257452019-11-26 13:32:50 +000060 }
61
J-Alvesb37fd082020-10-22 12:29:21 +010062 public:
J-Alvesbeeb6dc2021-12-08 18:21:32 +000063 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
J-Alvesb37fd082020-10-22 12:29:21 +010064 {
J-Alvesbeeb6dc2021-12-08 18:21:32 +000065 return vm1->boot_order < vm2->boot_order;
J-Alvesb37fd082020-10-22 12:29:21 +010066 }
Andrew Scull3c257452019-11-26 13:32:50 +000067};
68
Olivier Deprezd5a54892023-02-02 16:45:59 +010069std::unique_ptr<uint8_t[]> vm::test_heap;
70
Andrew Scull3c257452019-11-26 13:32:50 +000071/**
72 * If nothing is mapped, unmapping the hypervisor has no effect.
73 */
74TEST_F(vm, vm_unmap_hypervisor_not_mapped)
75{
76 struct_vm *vm;
77 struct vm_locked vm_locked;
78
Olivier Deprez878bd5b2021-04-15 19:05:10 +020079 /* TODO: check ptable usage (security state?) */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -060080 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
Andrew Scull3c257452019-11-26 13:32:50 +000081 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080082 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000083 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
84 EXPECT_THAT(
85 mm_test::get_ptable(vm->ptable),
86 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
87 mm_vm_fini(&vm->ptable, &ppool);
88 vm_unlock(&vm_locked);
89}
90
J-Alvesb37fd082020-10-22 12:29:21 +010091/**
92 * Validate the "boot_list" is created properly, according to vm's "boot_order"
93 * field.
94 */
95TEST_F(vm, vm_boot_order)
96{
97 struct_vm *vm_cur;
Olivier Deprez181074b2023-02-02 14:53:23 +010098 struct_vcpu *vcpu;
J-Alvesb37fd082020-10-22 12:29:21 +010099 std::list<struct_vm *> expected_final_order;
100
J-Alvesb37fd082020-10-22 12:29:21 +0100101 /*
Olivier Deprez181074b2023-02-02 14:53:23 +0100102 * Insertion when no call to "vcpu_update_boot" has been made yet.
J-Alvesb37fd082020-10-22 12:29:21 +0100103 * The "boot_list" is expected to be empty.
104 */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600105 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000106 vm_cur->boot_order = 3;
Olivier Deprez181074b2023-02-02 14:53:23 +0100107 vcpu = vm_get_vcpu(vm_cur, 0);
108 vcpu_update_boot(vcpu);
J-Alvesb37fd082020-10-22 12:29:21 +0100109 expected_final_order.push_back(vm_cur);
110
Olivier Deprez181074b2023-02-02 14:53:23 +0100111 EXPECT_EQ(vcpu_get_boot_vcpu()->vm->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100112
113 /* Insertion at the head of the boot list */
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600114 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000115 vm_cur->boot_order = 1;
Olivier Deprez181074b2023-02-02 14:53:23 +0100116 vcpu = vm_get_vcpu(vm_cur, 0);
117 vcpu_update_boot(vcpu);
J-Alvesb37fd082020-10-22 12:29:21 +0100118 expected_final_order.push_back(vm_cur);
119
Olivier Deprez181074b2023-02-02 14:53:23 +0100120 EXPECT_EQ(vcpu_get_boot_vcpu()->vm->id, vm_cur->id);
J-Alvesb37fd082020-10-22 12:29:21 +0100121
122 /* Insertion of two in the middle of the boot list */
Olivier Deprez181074b2023-02-02 14:53:23 +0100123 for (uint32_t i = 0; i < 2; i++) {
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600124 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
J-Alvesb37fd082020-10-22 12:29:21 +0100125 vm_cur->boot_order = 2;
Olivier Deprez181074b2023-02-02 14:53:23 +0100126 vcpu = vm_get_vcpu(vm_cur, 0);
127 vcpu_update_boot(vcpu);
J-Alvesb37fd082020-10-22 12:29:21 +0100128 expected_final_order.push_back(vm_cur);
129 }
130
131 /*
132 * Insertion in the end of the list.
133 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
134 * As such, a VM is expected to have been initialized before this
135 * test, with ID 1 and boot_order 0.
136 */
137 vm_cur = vm_find(1);
138 EXPECT_FALSE(vm_cur == NULL);
Olivier Deprez181074b2023-02-02 14:53:23 +0100139 vcpu = vm_get_vcpu(vm_cur, 0);
140 vcpu_update_boot(vcpu);
J-Alvesb37fd082020-10-22 12:29:21 +0100141 expected_final_order.push_back(vm_cur);
142
143 /*
144 * Number of VMs initialized should be the same as in the
145 * "expected_final_order", before the final verification.
146 */
147 EXPECT_EQ(expected_final_order.size(), vm_get_count())
148 << "Something went wrong with the test itself...\n";
149
J-Alvesbeeb6dc2021-12-08 18:21:32 +0000150 /* Sort VMs from lower to higher "boot_order" field.*/
151 expected_final_order.sort(vm::BootOrderSmallerThan);
J-Alvesb37fd082020-10-22 12:29:21 +0100152
153 std::list<struct_vm *>::iterator it;
Olivier Deprez181074b2023-02-02 14:53:23 +0100154 vcpu = vcpu_get_boot_vcpu();
155 for (it = expected_final_order.begin();
156 it != expected_final_order.end(); it++) {
157 EXPECT_TRUE(vcpu != NULL);
158 EXPECT_EQ((*it)->id, vcpu->vm->id);
J-Alves67f5ba32024-09-27 18:07:11 +0100159 vcpu = vcpu_get_next_boot(vcpu);
J-Alvesb37fd082020-10-22 12:29:21 +0100160 }
161}
J-Alves60eaff92021-05-27 14:54:41 +0100162
Madhukar Pappireddya067dc12024-10-16 22:20:44 -0500163TEST_F(vm, vcpu_arch_timer)
164{
165 const cpu_id_t cpu_ids[2] = {0, 1};
166 struct_vcpu *vm0_vcpu;
167 struct_vcpu *vm1_vcpu;
168 struct_vcpu *deadline_vcpu;
169 struct_vcpu *target_vcpu;
170 struct vcpu_locked vcpu_locked;
171 struct cpu *cpu0;
172 struct cpu *cpu1;
173
174 /* Initialie CPU module with two physical CPUs. */
175 cpu_module_init(cpu_ids, 2);
176 cpu0 = cpu_find_index(0);
177 cpu1 = cpu_find_index(1);
178
179 /* Two UP endpoints are deployed for this test. */
180 CHECK(vm_get_count() >= 2);
181 vm0_vcpu = vm_get_vcpu(vm_find_index(0), 0);
182 vm1_vcpu = vm_get_vcpu(vm_find_index(1), 0);
183
184 /* The execution context of each VM is scheduled on CPU0. */
185 vm0_vcpu->cpu = cpu0;
186 vm1_vcpu->cpu = cpu0;
187
188 /*
189 * Enable the timer peripheral for each vCPU and setup an arbitraty
190 * countdown value.
191 */
192 vm0_vcpu->regs.arch_timer.cval = 555555;
193 vm1_vcpu->regs.arch_timer.cval = 999999;
194 vm0_vcpu->regs.arch_timer.ctl = 1;
195 vm1_vcpu->regs.arch_timer.ctl = 1;
196
197 /* No vCPU is being tracked through either timer list. */
198 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
199 EXPECT_TRUE(deadline_vcpu == NULL);
200 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu1);
201 EXPECT_TRUE(deadline_vcpu == NULL);
202
203 /* vCPU of VM0 and VM1 are being added to the list. */
204 timer_vcpu_manage(vm0_vcpu);
205 timer_vcpu_manage(vm1_vcpu);
206
207 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
208 EXPECT_EQ(deadline_vcpu, vm0_vcpu);
209
210 /* Remove one of the vCPUs from the CPU0 list. */
211 vm0_vcpu->regs.arch_timer.cval = 0;
212 vm0_vcpu->regs.arch_timer.ctl = 0;
213 timer_vcpu_manage(vm0_vcpu);
214
215 /* This leaves one vCPU entry on CPU0 list. */
216 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
217 EXPECT_EQ(deadline_vcpu, vm1_vcpu);
218
219 /* Attempt to migrate VM1 vCPU from CPU0 to CPU1. */
220 vcpu_locked = vcpu_lock(vm1_vcpu);
221 timer_migrate_to_other_cpu(cpu1, vcpu_locked);
222 vcpu_unlock(&vcpu_locked);
223
224 /*
225 * After migration, ensure the list is empty on CPU0 but non-empty on
226 * CPU1.
227 */
228 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
229 EXPECT_TRUE(deadline_vcpu == NULL);
230
231 /*
232 * vCPU of VM1 is now running on CPU1. It must be the target vCPU when
233 * the timer has expired.
234 */
235 target_vcpu = timer_find_target_vcpu(vm1_vcpu);
236 EXPECT_EQ(target_vcpu, vm1_vcpu);
237}
238
J-Alves60eaff92021-05-27 14:54:41 +0100239/**
240 * Validates updates and check functions for binding notifications to endpoints.
241 */
242TEST_F(vm, vm_notifications_bind_diff_senders)
243{
J-Alvesd3e81622021-10-05 14:55:57 +0100244 struct_vm *current_vm = nullptr;
245 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100246 std::vector<struct_vm *> dummy_senders;
247 ffa_notifications_bitmap_t bitmaps[] = {
248 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
249 bool is_from_vm = true;
250
251 /* For the subsequent tests three VMs are used. */
252 CHECK(vm_get_count() >= 3);
253
J-Alvesd3e81622021-10-05 14:55:57 +0100254 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100255
256 dummy_senders.push_back(vm_find_index(1));
257 dummy_senders.push_back(vm_find_index(2));
258
J-Alvesd3e81622021-10-05 14:55:57 +0100259 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100260
261 for (unsigned int i = 0; i < 2; i++) {
262 /* Validate bindings condition after initialization. */
263 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100264 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
265 bitmaps[i], false));
J-Alves60eaff92021-05-27 14:54:41 +0100266
267 /*
268 * Validate bind related operations. For this test considering
269 * only global notifications.
270 */
J-Alvesd3e81622021-10-05 14:55:57 +0100271 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100272 dummy_senders[i]->id,
273 bitmaps[i], false);
274
275 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100276 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100277 bitmaps[i], false));
278
279 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100280 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100281 bitmaps[i], false));
282
283 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100284 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100285 bitmaps[1 - i], false));
286
287 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100288 current_vm_locked, is_from_vm, dummy_senders[i]->id,
J-Alves60eaff92021-05-27 14:54:41 +0100289 bitmaps[2], false));
290 }
291
292 /** Clean up bind for other tests. */
J-Alvesd3e81622021-10-05 14:55:57 +0100293 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100294 bitmaps[0], false);
J-Alvesd3e81622021-10-05 14:55:57 +0100295 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
J-Alves60eaff92021-05-27 14:54:41 +0100296 bitmaps[1], false);
297
J-Alvesd3e81622021-10-05 14:55:57 +0100298 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100299}
300
301/**
302 * Validates updates and check functions for binding notifications, namely the
J-Alves96f6e292021-06-08 17:32:40 +0100303 * configuration of bindings of global and per-vCPU notifications.
J-Alves60eaff92021-05-27 14:54:41 +0100304 */
305TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
306{
J-Alvesd3e81622021-10-05 14:55:57 +0100307 struct_vm *current_vm;
308 struct vm_locked current_vm_locked;
J-Alves60eaff92021-05-27 14:54:41 +0100309 struct_vm *dummy_sender;
310 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
311 ffa_notifications_bitmap_t per_vcpu = ~global;
312 bool is_from_vm = true;
313
314 CHECK(vm_get_count() >= 2);
315
J-Alvesd3e81622021-10-05 14:55:57 +0100316 current_vm = vm_find_index(0);
J-Alves60eaff92021-05-27 14:54:41 +0100317
318 dummy_sender = vm_find_index(1);
319
J-Alvesd3e81622021-10-05 14:55:57 +0100320 current_vm_locked = vm_lock(current_vm);
J-Alves60eaff92021-05-27 14:54:41 +0100321
J-Alvesd3e81622021-10-05 14:55:57 +0100322 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100323 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100324 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alves60eaff92021-05-27 14:54:41 +0100325 dummy_sender->id, per_vcpu, true);
326
327 /* Check validation of global notifications bindings. */
328 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100329 current_vm_locked, is_from_vm, dummy_sender->id, global,
330 false));
J-Alves60eaff92021-05-27 14:54:41 +0100331
J-Alves96f6e292021-06-08 17:32:40 +0100332 /* Check validation of per-vCPU notifications bindings. */
J-Alves60eaff92021-05-27 14:54:41 +0100333 EXPECT_TRUE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100334 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
335 true));
J-Alves60eaff92021-05-27 14:54:41 +0100336
337 /**
J-Alves96f6e292021-06-08 17:32:40 +0100338 * Check that global notifications are not validated as per-vCPU, and
J-Alves60eaff92021-05-27 14:54:41 +0100339 * vice-versa.
340 */
341 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100342 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
J-Alves60eaff92021-05-27 14:54:41 +0100343 EXPECT_FALSE(vm_notifications_validate_binding(
J-Alvesd3e81622021-10-05 14:55:57 +0100344 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
J-Alves60eaff92021-05-27 14:54:41 +0100345 false));
J-Alvesd3e81622021-10-05 14:55:57 +0100346 EXPECT_FALSE(vm_notifications_validate_binding(
347 current_vm_locked, is_from_vm, dummy_sender->id,
348 global | per_vcpu, true));
349 EXPECT_FALSE(vm_notifications_validate_binding(
350 current_vm_locked, is_from_vm, dummy_sender->id,
351 global | per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100352
353 /** Undo the bindings */
J-Alvesd3e81622021-10-05 14:55:57 +0100354 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
355 global, false);
356 EXPECT_TRUE(vm_notifications_validate_binding(
357 current_vm_locked, is_from_vm, 0, global, false));
J-Alves60eaff92021-05-27 14:54:41 +0100358
J-Alvesd3e81622021-10-05 14:55:57 +0100359 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
360 per_vcpu, false);
361 EXPECT_TRUE(vm_notifications_validate_binding(
362 current_vm_locked, is_from_vm, 0, per_vcpu, false));
J-Alves60eaff92021-05-27 14:54:41 +0100363
J-Alvesd3e81622021-10-05 14:55:57 +0100364 vm_unlock(&current_vm_locked);
J-Alves60eaff92021-05-27 14:54:41 +0100365}
366
J-Alvesce2f8d32021-06-10 18:30:21 +0100367/**
368 * Validates accesses to notifications bitmaps.
369 */
370TEST_F(vm, vm_notifications_set_and_get)
371{
J-Alvesd3e81622021-10-05 14:55:57 +0100372 struct_vm *current_vm;
373 struct vm_locked current_vm_locked;
J-Alvesce2f8d32021-06-10 18:30:21 +0100374 struct_vm *dummy_sender;
375 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
376 ffa_notifications_bitmap_t per_vcpu = ~global;
377 ffa_notifications_bitmap_t ret;
Raghu Krishnamurthy30aabd62022-09-17 21:41:00 -0700378 const unsigned int vcpu_idx = 0;
J-Alvesce2f8d32021-06-10 18:30:21 +0100379 struct notifications *notifications;
380 const bool is_from_vm = true;
381
382 CHECK(vm_get_count() >= 2);
383
J-Alvesd3e81622021-10-05 14:55:57 +0100384 current_vm = vm_find_index(0);
J-Alvesce2f8d32021-06-10 18:30:21 +0100385 dummy_sender = vm_find_index(1);
386
J-Alvesd3e81622021-10-05 14:55:57 +0100387 notifications = &current_vm->notifications.from_vm;
388 current_vm_locked = vm_lock(current_vm);
J-Alvesce2f8d32021-06-10 18:30:21 +0100389
J-Alvesd3e81622021-10-05 14:55:57 +0100390 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100391 dummy_sender->id, global, false);
J-Alvesd3e81622021-10-05 14:55:57 +0100392 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
J-Alvesce2f8d32021-06-10 18:30:21 +0100393 dummy_sender->id, per_vcpu, true);
394
395 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100396 * Validate get notifications bitmap for global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100397 */
J-Alves5a16c962022-03-25 12:32:51 +0000398 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
399 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100400
J-Alves5136dda2022-03-25 12:26:38 +0000401 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100402 is_from_vm, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100403 EXPECT_EQ(ret, global);
J-Alvesd3e81622021-10-05 14:55:57 +0100404 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100405
406 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100407 * Validate get notifications bitmap for per-vCPU notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100408 */
J-Alves5a16c962022-03-25 12:32:51 +0000409 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
410 per_vcpu, vcpu_idx, true);
J-Alvesce2f8d32021-06-10 18:30:21 +0100411
J-Alves5136dda2022-03-25 12:26:38 +0000412 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100413 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100414 EXPECT_EQ(ret, per_vcpu);
J-Alvesd3e81622021-10-05 14:55:57 +0100415 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100416
417 /*
J-Alvesd3e81622021-10-05 14:55:57 +0100418 * Validate that getting notifications for a specific vCPU also returns
419 * global notifications.
J-Alvesce2f8d32021-06-10 18:30:21 +0100420 */
J-Alves5a16c962022-03-25 12:32:51 +0000421 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
422 per_vcpu, vcpu_idx, true);
423 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
424 global, 0ull, false);
J-Alvesce2f8d32021-06-10 18:30:21 +0100425
J-Alves5136dda2022-03-25 12:26:38 +0000426 ret = vm_notifications_partition_get_pending(current_vm_locked,
J-Alvesd3e81622021-10-05 14:55:57 +0100427 is_from_vm, vcpu_idx);
J-Alvesce2f8d32021-06-10 18:30:21 +0100428 EXPECT_EQ(ret, per_vcpu | global);
J-Alvesd3e81622021-10-05 14:55:57 +0100429 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
430 EXPECT_EQ(notifications->global.pending, 0ull);
J-Alvesce2f8d32021-06-10 18:30:21 +0100431
432 /** Undo the binding */
J-Alvesd3e81622021-10-05 14:55:57 +0100433 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
434 global, false);
435 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
436 per_vcpu, true);
437 vm_unlock(&current_vm_locked);
J-Alvesce2f8d32021-06-10 18:30:21 +0100438}
439
J-Alves96f6e292021-06-08 17:32:40 +0100440/**
441 * Validates simple getting of notifications info for global notifications.
442 */
443TEST_F(vm, vm_notifications_info_get_global)
444{
445 ffa_notifications_bitmap_t to_set = 0xFU;
446 ffa_notifications_bitmap_t got;
447
448 /**
449 * Following set of variables that are also expected to be used when
450 * handling FFA_NOTIFICATION_INFO_GET.
451 */
452 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
453 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
454 uint32_t ids_count = 0;
455 uint32_t lists_count = 0;
456 enum notifications_info_get_state current_state = INIT;
457
458 CHECK(vm_get_count() >= 2);
459
460 for (unsigned int i = 0; i < 2; i++) {
461 struct_vm *current_vm = vm_find_index(0);
462 struct vm_locked current_vm_locked = vm_lock(current_vm);
463 struct notifications *notifications =
464 &current_vm->notifications.from_sp;
465 const bool is_from_vm = false;
466
J-Alves5a16c962022-03-25 12:32:51 +0000467 vm_notifications_partition_set_pending(
468 current_vm_locked, is_from_vm, to_set, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100469
470 vm_notifications_info_get_pending(
471 current_vm_locked, is_from_vm, ids, &ids_count,
472 lists_sizes, &lists_count,
473 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
474
475 /*
476 * Here the number of IDs and list count should be the same.
477 * As we are testing with Global notifications, this is
478 * expected.
479 */
480 EXPECT_EQ(ids_count, i + 1);
481 EXPECT_EQ(lists_count, i + 1);
482 EXPECT_EQ(lists_sizes[i], 0);
483 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
484
485 /* Action must be reset to initial state for each VM. */
486 current_state = INIT;
487
488 /*
489 * Check that getting pending notifications gives the expected
490 * return and cleans the 'pending' and 'info_get_retrieved'
491 * bitmaps.
492 */
J-Alves5136dda2022-03-25 12:26:38 +0000493 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100494 is_from_vm, 0);
495 EXPECT_EQ(got, to_set);
496
497 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
498 EXPECT_EQ(notifications->global.pending, 0U);
499
500 vm_unlock(&current_vm_locked);
501 }
502}
503
504/**
505 * Validates simple getting of notifications info for per-vCPU notifications.
506 */
507TEST_F(vm, vm_notifications_info_get_per_vcpu)
508{
509 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
510 ffa_notifications_bitmap_t got;
511
512 /*
513 * Following set of variables that are also expected to be used when
514 * handling ffa_notification_info_get.
515 */
516 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
517 uint32_t ids_count = 0;
518 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
519 uint32_t lists_count = 0;
520 enum notifications_info_get_state current_state = INIT;
521
522 CHECK(vm_get_count() >= 2);
523
524 for (unsigned int i = 0; i < 2; i++) {
525 struct_vm *current_vm = vm_find_index(0);
526 struct vm_locked current_vm_locked = vm_lock(current_vm);
527 struct notifications *notifications =
528 &current_vm->notifications.from_sp;
529 const bool is_from_vm = false;
530
J-Alves5a16c962022-03-25 12:32:51 +0000531 vm_notifications_partition_set_pending(
532 current_vm_locked, is_from_vm, per_vcpu, 0, true);
J-Alves96f6e292021-06-08 17:32:40 +0100533
534 vm_notifications_info_get_pending(
535 current_vm_locked, is_from_vm, ids, &ids_count,
536 lists_sizes, &lists_count,
537 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
538
539 /*
540 * Here the number of IDs and list count should be the same.
541 * As we are testing with Global notifications, this is
542 * expected.
543 */
544 EXPECT_EQ(ids_count, (i + 1) * 2);
545 EXPECT_EQ(lists_count, i + 1);
546 EXPECT_EQ(lists_sizes[i], 1);
547 EXPECT_EQ(per_vcpu,
548 notifications->per_vcpu[0].info_get_retrieved);
549
550 /* Action must be reset to initial state for each VM. */
551 current_state = INIT;
552
553 /*
554 * Check that getting pending notifications gives the expected
555 * return and cleans the 'pending' and 'info_get_retrieved'
556 * bitmaps.
557 */
J-Alves5136dda2022-03-25 12:26:38 +0000558 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100559 is_from_vm, 0);
560 EXPECT_EQ(got, per_vcpu);
561
562 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
563 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
564
565 vm_unlock(&current_vm_locked);
566 }
567}
568
569/**
570 * Validate getting of notifications information if all VCPUs have notifications
571 * pending.
572 */
573TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
574{
575 struct_vm *current_vm = nullptr;
576 struct vm_locked current_vm_locked;
577 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
578 ffa_notifications_bitmap_t got;
579 const ffa_notifications_bitmap_t global = 0xF0000;
580
581 /*
582 * Following set of variables that are also expected to be used when
583 * handling ffa_notification_info_get.
584 */
585 struct notifications *notifications;
586 const bool is_from_sp = false;
587 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
588 uint32_t ids_count = 0;
589 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
590 uint32_t lists_count = 0;
591 enum notifications_info_get_state current_state = INIT;
592
Madhukar Pappireddy070f49e2024-01-12 13:02:27 -0600593 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
J-Alves96f6e292021-06-08 17:32:40 +0100594 current_vm_locked = vm_lock(current_vm);
595 notifications = &current_vm->notifications.from_sp;
596
597 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5a16c962022-03-25 12:32:51 +0000598 vm_notifications_partition_set_pending(
599 current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
600 i, true);
J-Alves96f6e292021-06-08 17:32:40 +0100601 }
602
603 /*
604 * Adding a global notification should not change the list of IDs,
605 * because global notifications only require the VM ID to be included in
606 * the list, at least once.
607 */
J-Alves5a16c962022-03-25 12:32:51 +0000608 vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
609 global, 0, false);
J-Alves96f6e292021-06-08 17:32:40 +0100610
611 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
612 &ids_count, lists_sizes, &lists_count,
613 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
614 &current_state);
615
616 /*
617 * This test has been conceived for the expected MAX_CPUS 4.
618 * All VCPUs have notifications of the same VM, to be broken down in 2
619 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
620 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
621 */
622 CHECK(MAX_CPUS == 4);
623 EXPECT_EQ(ids_count, 6U);
624 EXPECT_EQ(lists_count, 2U);
625 EXPECT_EQ(lists_sizes[0], 3);
626 EXPECT_EQ(lists_sizes[1], 1);
627
628 for (unsigned int i = 0; i < vcpu_count; i++) {
J-Alves5136dda2022-03-25 12:26:38 +0000629 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100630 is_from_sp, i);
631
632 /*
J-Alves5136dda2022-03-25 12:26:38 +0000633 * The first call to
634 * vm_notifications_partition_get_pending should also
635 * include the global notifications on the return.
J-Alves96f6e292021-06-08 17:32:40 +0100636 */
637 ffa_notifications_bitmap_t to_check =
638 (i != 0) ? FFA_NOTIFICATION_MASK(i)
639 : FFA_NOTIFICATION_MASK(i) | global;
640
641 EXPECT_EQ(got, to_check);
642
643 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
644 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
645 }
646
647 vm_unlock(&current_vm_locked);
648}
649
650/**
651 * Validate change of state from 'vm_notifications_info_get_pending', when the
652 * list of IDs is full.
653 */
654TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
655{
656 struct_vm *current_vm = vm_find_index(0);
657 struct vm_locked current_vm_locked = vm_lock(current_vm);
658 struct notifications *notifications =
659 &current_vm->notifications.from_sp;
660 const bool is_from_vm = false;
661 ffa_notifications_bitmap_t got = 0;
662
663 /*
664 * Following set of variables that are also expected to be used when
665 * handling ffa_notification_info_get.
666 * For this 'ids_count' has been initialized such that it indicates
667 * there is no space in the list for a per-vCPU notification (VM ID and
668 * VCPU ID).
669 */
670 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
671 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
672 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
673 uint32_t lists_count = 10;
674 enum notifications_info_get_state current_state = INIT;
675 CHECK(vm_get_count() >= 2);
676
J-Alves5a16c962022-03-25 12:32:51 +0000677 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
678 FFA_NOTIFICATION_MASK(1), 0,
679 true);
J-Alves96f6e292021-06-08 17:32:40 +0100680
681 /* Call function to get notifications info, with only per-vCPU set. */
682 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
683 &ids_count, lists_sizes, &lists_count,
684 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
685 &current_state);
686
687 /*
688 * Verify that as soon as there isn't space to do the required
J-Alves5136dda2022-03-25 12:26:38 +0000689 * insertion in the list, the
690 * 'vm_notifications_partition_get_pending' returns and changes
691 * list state to FULL. In this case returning, because it would need to
692 * add two IDs (VM ID and VCPU ID).
J-Alves96f6e292021-06-08 17:32:40 +0100693 */
694 EXPECT_EQ(current_state, FULL);
695 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
696 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
697
698 /*
699 * At this point there is still room for the information of a global
700 * notification (only VM ID to be added). Reset 'current_state'
701 * for the insertion to happen at the last position of the array.
702 */
703 current_state = INIT;
704
705 /* Setting global notification */
J-Alves5a16c962022-03-25 12:32:51 +0000706 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
707 FFA_NOTIFICATION_MASK(2), 0,
708 false);
J-Alves96f6e292021-06-08 17:32:40 +0100709
710 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
711 &ids_count, lists_sizes, &lists_count,
712 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
713 &current_state);
714
715 /*
716 * Now List must be full, the set global notification must be part of
717 * 'info_get_retrieved', and the 'current_state' should be set to FULL
718 * due to the pending per-vCPU notification in VCPU 0.
719 */
720 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
721 EXPECT_EQ(current_state, FULL);
722 EXPECT_EQ(notifications->global.info_get_retrieved,
723 FFA_NOTIFICATION_MASK(2));
724
J-Alves5136dda2022-03-25 12:26:38 +0000725 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100726 is_from_vm, 0);
727 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
728
729 vm_unlock(&current_vm_locked);
730}
731
732TEST_F(vm, vm_notifications_info_get_full_global)
733{
734 struct_vm *current_vm = vm_find_index(0);
735 struct vm_locked current_vm_locked = vm_lock(current_vm);
736 ffa_notifications_bitmap_t got;
737 struct notifications *notifications;
738 const bool is_from_vm = false;
739 /*
740 * Following set of variables that are also expected to be used when
741 * handling ffa_notification_info_get.
742 * For this 'ids_count' has been initialized such that it indicates
743 * there is no space in the list for a global notification (VM ID only).
744 */
745 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
746 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
747 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
748 uint32_t lists_count = 10;
749 enum notifications_info_get_state current_state = INIT;
750
751 CHECK(vm_get_count() >= 1);
752
753 current_vm = vm_find_index(0);
754
755 notifications = &current_vm->notifications.from_sp;
756
757 /* Set global notification. */
J-Alves5a16c962022-03-25 12:32:51 +0000758 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
759 FFA_NOTIFICATION_MASK(10), 0,
760 false);
J-Alves96f6e292021-06-08 17:32:40 +0100761
762 /* Get notifications info for the given notifications. */
763 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
764 &ids_count, lists_sizes, &lists_count,
765 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
766 &current_state);
767
768 /* Expect 'info_get_retrieved' bitmap to be 0. */
769 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
770 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
771 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
772 EXPECT_EQ(current_state, FULL);
773
J-Alves5136dda2022-03-25 12:26:38 +0000774 got = vm_notifications_partition_get_pending(current_vm_locked,
J-Alves96f6e292021-06-08 17:32:40 +0100775 is_from_vm, 0);
J-Alves9f74b932021-10-11 14:20:05 +0100776 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
777
J-Alves96f6e292021-06-08 17:32:40 +0100778 vm_unlock(&current_vm_locked);
779}
780
J-Alvesf31940e2022-03-25 17:24:00 +0000781TEST_F(vm, vm_notifications_info_get_from_framework)
782{
783 struct vm_locked vm_locked = vm_lock(vm_find_index(0));
784 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
785 uint32_t ids_count = 0;
786 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
787 uint32_t lists_count = 0;
788
789 vm_notifications_framework_set_pending(vm_locked, 0x1U);
790
791 /* Get notifications info for the given notifications. */
792 vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
793 &lists_count,
794 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
795
796 EXPECT_EQ(ids[0], vm_locked.vm->id);
797 EXPECT_EQ(ids_count, 1);
798 EXPECT_EQ(lists_sizes[0], 0);
799 EXPECT_EQ(lists_count, 1);
800
801 EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
802
803 vm_unlock(&vm_locked);
804}
805
Daniel Boulby8be26512024-09-03 19:41:11 +0100806/**
807 * Validates simple getting of notifications info for pending IPI.
808 */
809TEST_F(vm, vm_notifications_info_get_ipi)
810{
811 /*
812 * Following set of variables that are also expected to be used when
813 * handling ffa_notification_info_get.
814 */
815 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
816 uint32_t ids_count = 0;
817 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
818 uint32_t lists_count = 0;
819 enum notifications_info_get_state current_state = INIT;
820 struct_vm *current_vm = vm_find_index(5);
821 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
822 struct interrupts *interrupts = &target_vcpu->interrupts;
823 const bool is_from_vm = false;
824 struct vm_locked current_vm_locked = vm_lock(current_vm);
825
826 EXPECT_TRUE(current_vm->vcpu_count >= 2);
827
828 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
829
830 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
831 &ids_count, lists_sizes, &lists_count,
832 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
833 &current_state);
834
835 EXPECT_EQ(ids_count, 2);
836 EXPECT_EQ(lists_count, 1);
837 EXPECT_EQ(lists_sizes[0], 1);
838 EXPECT_EQ(ids[0], current_vm->id);
839 EXPECT_EQ(ids[1], 1);
840 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
841
842 /* Check it is not retrieved multiple times. */
843 current_state = INIT;
844 ids[0] = 0;
845 ids[1] = 0;
846 ids_count = 0;
847 lists_sizes[0] = 0;
848 lists_count = 0;
849
850 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
851 &ids_count, lists_sizes, &lists_count,
852 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
853 &current_state);
854 EXPECT_EQ(ids_count, 0);
855 EXPECT_EQ(lists_count, 0);
856 EXPECT_EQ(lists_sizes[0], 0);
857
858 vm_unlock(&current_vm_locked);
859}
860
861/**
862 * Validates simple getting of notifications info for pending with IPI when
863 * notification for the same vcpu is also pending.
864 */
865TEST_F(vm, vm_notifications_info_get_ipi_with_per_vcpu)
866{
867 /*
868 * Following set of variables that are also expected to be used when
869 * handling ffa_notification_info_get.
870 */
871 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
872 uint32_t ids_count = 0;
873 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
874 uint32_t lists_count = 0;
875 enum notifications_info_get_state current_state = INIT;
876 struct_vm *current_vm = vm_find_index(5);
877 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
878 struct interrupts *interrupts = &target_vcpu->interrupts;
879 const bool is_from_vm = false;
880 struct vm_locked current_vm_locked = vm_lock(current_vm);
881
882 EXPECT_TRUE(current_vm->vcpu_count >= 2);
883
884 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
885
886 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
887 true, 1, true);
888 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
889 &ids_count, lists_sizes, &lists_count,
890 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
891 &current_state);
892
893 EXPECT_EQ(ids_count, 2);
894 EXPECT_EQ(lists_count, 1);
895 EXPECT_EQ(lists_sizes[0], 1);
896 EXPECT_EQ(ids[0], current_vm->id);
897 EXPECT_EQ(ids[1], 1);
898 EXPECT_EQ(target_vcpu->ipi_info_get_retrieved, true);
899
900 /* Reset the state and values. */
901 current_state = INIT;
902 ids[0] = 0;
903 ids[1] = 0;
904 ids_count = 0;
905 lists_sizes[0] = 0;
906 lists_count = 0;
907
908 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
909 &ids_count, lists_sizes, &lists_count,
910 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
911 &current_state);
912 EXPECT_EQ(ids_count, 0);
913 EXPECT_EQ(lists_count, 0);
914 EXPECT_EQ(lists_sizes[0], 0);
915
916 vm_unlock(&current_vm_locked);
917}
918
919/**
920 * Validate that a mix of a pending IPI and notifcations are correctly
921 * reported across vcpus.
922 */
923TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)
924{
925 struct_vm *current_vm = vm_find_index(5);
926 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
927 CHECK(vcpu_count > 1);
928
929 struct vm_locked current_vm_locked = vm_lock(current_vm);
930
931 /*
932 * Following set of variables that are also expected to be used when
933 * handling ffa_notification_info_get.
934 */
935 const bool is_from_vm = false;
936 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
937 uint32_t ids_count = 0;
938 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
939 uint32_t lists_count = 0;
940 enum notifications_info_get_state current_state = INIT;
941 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 0);
942 struct interrupts *interrupts = &target_vcpu->interrupts;
943
944 vcpu_virt_interrupt_set_pending(interrupts, HF_IPI_INTID);
945
946 for (unsigned int i = 1; i < vcpu_count; i++) {
947 vm_notifications_partition_set_pending(
948 current_vm_locked, is_from_vm, FFA_NOTIFICATION_MASK(i),
949 i, true);
950 }
951
952 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
953 &ids_count, lists_sizes, &lists_count,
954 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
955 &current_state);
956
957 /*
958 * This test has been conceived for the expected MAX_CPUS 4.
959 * All VCPUs have notifications of the same VM, to be broken down in 2
960 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
961 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
962 */
963 EXPECT_EQ(ids_count, 6U);
964 EXPECT_EQ(lists_count, 2U);
965 EXPECT_EQ(lists_sizes[0], 3);
966 EXPECT_EQ(lists_sizes[1], 1);
967 EXPECT_EQ(ids[0], current_vm->id);
968 EXPECT_EQ(ids[1], 0);
969 EXPECT_EQ(ids[2], 1);
970 EXPECT_EQ(ids[3], 2);
971 EXPECT_EQ(ids[4], current_vm->id);
972 EXPECT_EQ(ids[5], 3);
973
974 vm_unlock(&current_vm_locked);
975}
Andrew Scull3c257452019-11-26 13:32:50 +0000976} /* namespace */