blob: 9ee06efb2f5977d0a975b96986b9f2a2d54d9370 [file] [log] [blame]
Andrew Scull3c257452019-11-26 13:32:50 +00001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull3c257452019-11-26 13:32:50 +00007 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
12#include "hf/mpool.h"
13#include "hf/vm.h"
14}
15
J-Alvesb37fd082020-10-22 12:29:21 +010016#include <list>
Andrew Scull3c257452019-11-26 13:32:50 +000017#include <memory>
18#include <span>
19#include <vector>
20
21#include "mm_test.hh"
22
23namespace
24{
25using namespace ::std::placeholders;
26
27using ::testing::AllOf;
28using ::testing::Each;
29using ::testing::SizeIs;
30
31using struct_vm = struct vm;
32
J-Alvesb37fd082020-10-22 12:29:21 +010033constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 32;
Andrew Scull3c257452019-11-26 13:32:50 +000034const int TOP_LEVEL = arch_mm_stage2_max_level();
35
36class vm : public ::testing::Test
37{
38 void SetUp() override
39 {
40 /*
41 * TODO: replace with direct use of stdlib allocator so
42 * sanitizers are more effective.
43 */
44 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
45 mpool_init(&ppool, sizeof(struct mm_page_table));
46 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
47 }
48
49 std::unique_ptr<uint8_t[]> test_heap;
50
51 protected:
52 struct mpool ppool;
J-Alvesb37fd082020-10-22 12:29:21 +010053
54 public:
55 static bool BootOrderBiggerThan(struct_vm *vm1, struct_vm *vm2)
56 {
57 return vm1->boot_order > vm2->boot_order;
58 }
Andrew Scull3c257452019-11-26 13:32:50 +000059};
60
61/**
62 * If nothing is mapped, unmapping the hypervisor has no effect.
63 */
64TEST_F(vm, vm_unmap_hypervisor_not_mapped)
65{
66 struct_vm *vm;
67 struct vm_locked vm_locked;
68
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080069 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false));
Andrew Scull3c257452019-11-26 13:32:50 +000070 vm_locked = vm_lock(vm);
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080071 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
Andrew Scull3c257452019-11-26 13:32:50 +000072 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
73 EXPECT_THAT(
74 mm_test::get_ptable(vm->ptable),
75 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
76 mm_vm_fini(&vm->ptable, &ppool);
77 vm_unlock(&vm_locked);
78}
79
J-Alvesb37fd082020-10-22 12:29:21 +010080/**
81 * Validate the "boot_list" is created properly, according to vm's "boot_order"
82 * field.
83 */
84TEST_F(vm, vm_boot_order)
85{
86 struct_vm *vm_cur;
87 std::list<struct_vm *> expected_final_order;
88
89 EXPECT_FALSE(vm_get_first_boot());
90
91 /*
92 * Insertion when no call to "vm_update_boot" has been made yet.
93 * The "boot_list" is expected to be empty.
94 */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -080095 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
J-Alvesb37fd082020-10-22 12:29:21 +010096 vm_cur->boot_order = 1;
97 vm_update_boot(vm_cur);
98 expected_final_order.push_back(vm_cur);
99
100 EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
101
102 /* Insertion at the head of the boot list */
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800103 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
J-Alvesb37fd082020-10-22 12:29:21 +0100104 vm_cur->boot_order = 3;
105 vm_update_boot(vm_cur);
106 expected_final_order.push_back(vm_cur);
107
108 EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
109
110 /* Insertion of two in the middle of the boot list */
111 for (int i = 0; i < 2; i++) {
Raghu Krishnamurthycd1eceb2021-01-04 12:20:48 -0800112 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
J-Alvesb37fd082020-10-22 12:29:21 +0100113 vm_cur->boot_order = 2;
114 vm_update_boot(vm_cur);
115 expected_final_order.push_back(vm_cur);
116 }
117
118 /*
119 * Insertion in the end of the list.
120 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
121 * As such, a VM is expected to have been initialized before this
122 * test, with ID 1 and boot_order 0.
123 */
124 vm_cur = vm_find(1);
125 EXPECT_FALSE(vm_cur == NULL);
126 vm_update_boot(vm_cur);
127 expected_final_order.push_back(vm_cur);
128
129 /*
130 * Number of VMs initialized should be the same as in the
131 * "expected_final_order", before the final verification.
132 */
133 EXPECT_EQ(expected_final_order.size(), vm_get_count())
134 << "Something went wrong with the test itself...\n";
135
136 /* Sort "expected_final_order" by "boot_order" field */
137 expected_final_order.sort(vm::BootOrderBiggerThan);
138
139 std::list<struct_vm *>::iterator it;
140 for (it = expected_final_order.begin(), vm_cur = vm_get_first_boot();
141 it != expected_final_order.end() && vm_cur != NULL;
142 it++, vm_cur = vm_cur->next_boot) {
143 EXPECT_EQ((*it)->id, vm_cur->id);
144 }
145}
J-Alves60eaff92021-05-27 14:54:41 +0100146
147/**
148 * Validates updates and check functions for binding notifications to endpoints.
149 */
150TEST_F(vm, vm_notifications_bind_diff_senders)
151{
152 struct_vm *cur_vm = nullptr;
153 struct vm_locked cur_vm_locked;
154 std::vector<struct_vm *> dummy_senders;
155 ffa_notifications_bitmap_t bitmaps[] = {
156 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
157 bool is_from_vm = true;
158
159 /* For the subsequent tests three VMs are used. */
160 CHECK(vm_get_count() >= 3);
161
162 cur_vm = vm_find_index(0);
163
164 dummy_senders.push_back(vm_find_index(1));
165 dummy_senders.push_back(vm_find_index(2));
166
167 cur_vm_locked = vm_lock(cur_vm);
168
169 for (unsigned int i = 0; i < 2; i++) {
170 /* Validate bindings condition after initialization. */
171 EXPECT_TRUE(vm_notifications_validate_binding(
172 cur_vm_locked, is_from_vm, HF_INVALID_VM_ID, bitmaps[i],
173 false));
174
175 /*
176 * Validate bind related operations. For this test considering
177 * only global notifications.
178 */
179 vm_notifications_update_bindings(cur_vm_locked, is_from_vm,
180 dummy_senders[i]->id,
181 bitmaps[i], false);
182
183 EXPECT_TRUE(vm_notifications_validate_binding(
184 cur_vm_locked, is_from_vm, dummy_senders[i]->id,
185 bitmaps[i], false));
186
187 EXPECT_FALSE(vm_notifications_validate_binding(
188 cur_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
189 bitmaps[i], false));
190
191 EXPECT_FALSE(vm_notifications_validate_binding(
192 cur_vm_locked, is_from_vm, dummy_senders[i]->id,
193 bitmaps[1 - i], false));
194
195 EXPECT_FALSE(vm_notifications_validate_binding(
196 cur_vm_locked, is_from_vm, dummy_senders[i]->id,
197 bitmaps[2], false));
198 }
199
200 /** Clean up bind for other tests. */
201 vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0,
202 bitmaps[0], false);
203 vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0,
204 bitmaps[1], false);
205
206 vm_unlock(&cur_vm_locked);
207}
208
209/**
210 * Validates updates and check functions for binding notifications, namely the
211 * configuration of bindings of global and per VCPU notifications.
212 */
213TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
214{
215 struct_vm *cur_vm;
216 struct vm_locked cur_vm_locked;
217 struct_vm *dummy_sender;
218 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
219 ffa_notifications_bitmap_t per_vcpu = ~global;
220 bool is_from_vm = true;
221
222 CHECK(vm_get_count() >= 2);
223
224 cur_vm = vm_find_index(0);
225
226 dummy_sender = vm_find_index(1);
227
228 cur_vm_locked = vm_lock(cur_vm);
229
230 vm_notifications_update_bindings(cur_vm_locked, is_from_vm,
231 dummy_sender->id, global, false);
232 vm_notifications_update_bindings(cur_vm_locked, is_from_vm,
233 dummy_sender->id, per_vcpu, true);
234
235 /* Check validation of global notifications bindings. */
236 EXPECT_TRUE(vm_notifications_validate_binding(
237 cur_vm_locked, is_from_vm, dummy_sender->id, global, false));
238
239 /* Check validation of per vcpu notifications bindings. */
240 EXPECT_TRUE(vm_notifications_validate_binding(
241 cur_vm_locked, is_from_vm, dummy_sender->id, per_vcpu, true));
242
243 /**
244 * Check that global notifications are not validated as per VCPU, and
245 * vice-versa.
246 */
247 EXPECT_FALSE(vm_notifications_validate_binding(
248 cur_vm_locked, is_from_vm, dummy_sender->id, global, true));
249 EXPECT_FALSE(vm_notifications_validate_binding(
250 cur_vm_locked, is_from_vm, dummy_sender->id, per_vcpu, false));
251 EXPECT_FALSE(vm_notifications_validate_binding(
252 cur_vm_locked, is_from_vm, dummy_sender->id, global | per_vcpu,
253 true));
254 EXPECT_FALSE(vm_notifications_validate_binding(
255 cur_vm_locked, is_from_vm, dummy_sender->id, global | per_vcpu,
256 false));
257
258 /** Undo the bindings */
259 vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, global,
260 false);
261 EXPECT_TRUE(vm_notifications_validate_binding(cur_vm_locked, is_from_vm,
262 0, global, false));
263
264 vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, per_vcpu,
265 false);
266 EXPECT_TRUE(vm_notifications_validate_binding(cur_vm_locked, is_from_vm,
267 0, per_vcpu, false));
268
269 vm_unlock(&cur_vm_locked);
270}
271
J-Alvesce2f8d32021-06-10 18:30:21 +0100272/**
273 * Validates accesses to notifications bitmaps.
274 */
275TEST_F(vm, vm_notifications_set_and_get)
276{
277 struct_vm *cur_vm;
278 struct vm_locked cur_vm_locked;
279 struct_vm *dummy_sender;
280 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
281 ffa_notifications_bitmap_t per_vcpu = ~global;
282 ffa_notifications_bitmap_t ret;
283 const unsigned int vcpu_idx = 1;
284 struct notifications *notifications;
285 const bool is_from_vm = true;
286
287 CHECK(vm_get_count() >= 2);
288
289 cur_vm = vm_find_index(0);
290 dummy_sender = vm_find_index(1);
291
292 notifications = &cur_vm->notifications.from_vm;
293 cur_vm_locked = vm_lock(cur_vm);
294
295 vm_notifications_update_bindings(cur_vm_locked, is_from_vm,
296 dummy_sender->id, global, false);
297 vm_notifications_update_bindings(cur_vm_locked, is_from_vm,
298 dummy_sender->id, per_vcpu, true);
299
300 /*
301 * Validate get notifications bitmap for per_vcpu notifications.
302 */
303 vm_notifications_set(cur_vm_locked, is_from_vm, global, 0u, false);
304
305 ret = vm_notifications_get_pending_and_clear(cur_vm_locked, is_from_vm,
306 0u);
307 EXPECT_EQ(ret, global);
308 EXPECT_EQ(notifications->global.pending, 0u);
309
310 /*
311 * Validate get notifications bitmap for per_vcpu notifications.
312 */
313 vm_notifications_set(cur_vm_locked, is_from_vm, per_vcpu, vcpu_idx,
314 true);
315
316 ret = vm_notifications_get_pending_and_clear(cur_vm_locked, is_from_vm,
317 vcpu_idx);
318 EXPECT_EQ(ret, per_vcpu);
319 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0u);
320
321 /*
322 * Validate that getting for a specific VCPU also returns global
323 * notifications.
324 */
325 vm_notifications_set(cur_vm_locked, is_from_vm, per_vcpu, vcpu_idx,
326 true);
327 vm_notifications_set(cur_vm_locked, is_from_vm, global, 0, false);
328
329 ret = vm_notifications_get_pending_and_clear(cur_vm_locked, is_from_vm,
330 vcpu_idx);
331 EXPECT_EQ(ret, per_vcpu | global);
332 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0u);
333 EXPECT_EQ(notifications->global.pending, 0u);
334
335 /** Undo the binding */
336 vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, global,
337 false);
338 vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, per_vcpu,
339 true);
340 vm_unlock(&cur_vm_locked);
341}
342
Andrew Scull3c257452019-11-26 13:32:50 +0000343} /* namespace */