Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <gmock/gmock.h> |
| 10 | |
| 11 | extern "C" { |
| 12 | #include "hf/mpool.h" |
| 13 | #include "hf/vm.h" |
| 14 | } |
| 15 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 16 | #include <list> |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 17 | #include <memory> |
| 18 | #include <span> |
| 19 | #include <vector> |
| 20 | |
| 21 | #include "mm_test.hh" |
| 22 | |
| 23 | namespace |
| 24 | { |
| 25 | using namespace ::std::placeholders; |
| 26 | |
| 27 | using ::testing::AllOf; |
| 28 | using ::testing::Each; |
| 29 | using ::testing::SizeIs; |
| 30 | |
| 31 | using struct_vm = struct vm; |
| 32 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 33 | constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 32; |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 34 | const int TOP_LEVEL = arch_mm_stage2_max_level(); |
| 35 | |
| 36 | class vm : public ::testing::Test |
| 37 | { |
| 38 | void SetUp() override |
| 39 | { |
| 40 | /* |
| 41 | * TODO: replace with direct use of stdlib allocator so |
| 42 | * sanitizers are more effective. |
| 43 | */ |
| 44 | test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE); |
| 45 | mpool_init(&ppool, sizeof(struct mm_page_table)); |
| 46 | mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE); |
| 47 | } |
| 48 | |
| 49 | std::unique_ptr<uint8_t[]> test_heap; |
| 50 | |
| 51 | protected: |
| 52 | struct mpool ppool; |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 53 | |
| 54 | public: |
| 55 | static bool BootOrderBiggerThan(struct_vm *vm1, struct_vm *vm2) |
| 56 | { |
| 57 | return vm1->boot_order > vm2->boot_order; |
| 58 | } |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 59 | }; |
| 60 | |
| 61 | /** |
| 62 | * If nothing is mapped, unmapping the hypervisor has no effect. |
| 63 | */ |
| 64 | TEST_F(vm, vm_unmap_hypervisor_not_mapped) |
| 65 | { |
| 66 | struct_vm *vm; |
| 67 | struct vm_locked vm_locked; |
| 68 | |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 69 | EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false)); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 70 | vm_locked = vm_lock(vm); |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame] | 71 | ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool)); |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 72 | EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool)); |
| 73 | EXPECT_THAT( |
| 74 | mm_test::get_ptable(vm->ptable), |
| 75 | AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL))))); |
| 76 | mm_vm_fini(&vm->ptable, &ppool); |
| 77 | vm_unlock(&vm_locked); |
| 78 | } |
| 79 | |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 80 | /** |
| 81 | * Validate the "boot_list" is created properly, according to vm's "boot_order" |
| 82 | * field. |
| 83 | */ |
| 84 | TEST_F(vm, vm_boot_order) |
| 85 | { |
| 86 | struct_vm *vm_cur; |
| 87 | std::list<struct_vm *> expected_final_order; |
| 88 | |
| 89 | EXPECT_FALSE(vm_get_first_boot()); |
| 90 | |
| 91 | /* |
| 92 | * Insertion when no call to "vm_update_boot" has been made yet. |
| 93 | * The "boot_list" is expected to be empty. |
| 94 | */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 95 | EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false)); |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 96 | vm_cur->boot_order = 1; |
| 97 | vm_update_boot(vm_cur); |
| 98 | expected_final_order.push_back(vm_cur); |
| 99 | |
| 100 | EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id); |
| 101 | |
| 102 | /* Insertion at the head of the boot list */ |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 103 | EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false)); |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 104 | vm_cur->boot_order = 3; |
| 105 | vm_update_boot(vm_cur); |
| 106 | expected_final_order.push_back(vm_cur); |
| 107 | |
| 108 | EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id); |
| 109 | |
| 110 | /* Insertion of two in the middle of the boot list */ |
| 111 | for (int i = 0; i < 2; i++) { |
Raghu Krishnamurthy | cd1eceb | 2021-01-04 12:20:48 -0800 | [diff] [blame] | 112 | EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false)); |
J-Alves | b37fd08 | 2020-10-22 12:29:21 +0100 | [diff] [blame] | 113 | vm_cur->boot_order = 2; |
| 114 | vm_update_boot(vm_cur); |
| 115 | expected_final_order.push_back(vm_cur); |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * Insertion in the end of the list. |
| 120 | * This tests shares the data with "vm_unmap_hypervisor_not_mapped". |
| 121 | * As such, a VM is expected to have been initialized before this |
| 122 | * test, with ID 1 and boot_order 0. |
| 123 | */ |
| 124 | vm_cur = vm_find(1); |
| 125 | EXPECT_FALSE(vm_cur == NULL); |
| 126 | vm_update_boot(vm_cur); |
| 127 | expected_final_order.push_back(vm_cur); |
| 128 | |
| 129 | /* |
| 130 | * Number of VMs initialized should be the same as in the |
| 131 | * "expected_final_order", before the final verification. |
| 132 | */ |
| 133 | EXPECT_EQ(expected_final_order.size(), vm_get_count()) |
| 134 | << "Something went wrong with the test itself...\n"; |
| 135 | |
| 136 | /* Sort "expected_final_order" by "boot_order" field */ |
| 137 | expected_final_order.sort(vm::BootOrderBiggerThan); |
| 138 | |
| 139 | std::list<struct_vm *>::iterator it; |
| 140 | for (it = expected_final_order.begin(), vm_cur = vm_get_first_boot(); |
| 141 | it != expected_final_order.end() && vm_cur != NULL; |
| 142 | it++, vm_cur = vm_cur->next_boot) { |
| 143 | EXPECT_EQ((*it)->id, vm_cur->id); |
| 144 | } |
| 145 | } |
J-Alves | 60eaff9 | 2021-05-27 14:54:41 +0100 | [diff] [blame] | 146 | |
| 147 | /** |
| 148 | * Validates updates and check functions for binding notifications to endpoints. |
| 149 | */ |
| 150 | TEST_F(vm, vm_notifications_bind_diff_senders) |
| 151 | { |
| 152 | struct_vm *cur_vm = nullptr; |
| 153 | struct vm_locked cur_vm_locked; |
| 154 | std::vector<struct_vm *> dummy_senders; |
| 155 | ffa_notifications_bitmap_t bitmaps[] = { |
| 156 | 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U}; |
| 157 | bool is_from_vm = true; |
| 158 | |
| 159 | /* For the subsequent tests three VMs are used. */ |
| 160 | CHECK(vm_get_count() >= 3); |
| 161 | |
| 162 | cur_vm = vm_find_index(0); |
| 163 | |
| 164 | dummy_senders.push_back(vm_find_index(1)); |
| 165 | dummy_senders.push_back(vm_find_index(2)); |
| 166 | |
| 167 | cur_vm_locked = vm_lock(cur_vm); |
| 168 | |
| 169 | for (unsigned int i = 0; i < 2; i++) { |
| 170 | /* Validate bindings condition after initialization. */ |
| 171 | EXPECT_TRUE(vm_notifications_validate_binding( |
| 172 | cur_vm_locked, is_from_vm, HF_INVALID_VM_ID, bitmaps[i], |
| 173 | false)); |
| 174 | |
| 175 | /* |
| 176 | * Validate bind related operations. For this test considering |
| 177 | * only global notifications. |
| 178 | */ |
| 179 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, |
| 180 | dummy_senders[i]->id, |
| 181 | bitmaps[i], false); |
| 182 | |
| 183 | EXPECT_TRUE(vm_notifications_validate_binding( |
| 184 | cur_vm_locked, is_from_vm, dummy_senders[i]->id, |
| 185 | bitmaps[i], false)); |
| 186 | |
| 187 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 188 | cur_vm_locked, is_from_vm, dummy_senders[1 - i]->id, |
| 189 | bitmaps[i], false)); |
| 190 | |
| 191 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 192 | cur_vm_locked, is_from_vm, dummy_senders[i]->id, |
| 193 | bitmaps[1 - i], false)); |
| 194 | |
| 195 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 196 | cur_vm_locked, is_from_vm, dummy_senders[i]->id, |
| 197 | bitmaps[2], false)); |
| 198 | } |
| 199 | |
| 200 | /** Clean up bind for other tests. */ |
| 201 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, |
| 202 | bitmaps[0], false); |
| 203 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, |
| 204 | bitmaps[1], false); |
| 205 | |
| 206 | vm_unlock(&cur_vm_locked); |
| 207 | } |
| 208 | |
| 209 | /** |
| 210 | * Validates updates and check functions for binding notifications, namely the |
| 211 | * configuration of bindings of global and per VCPU notifications. |
| 212 | */ |
| 213 | TEST_F(vm, vm_notification_bind_per_vcpu_vs_global) |
| 214 | { |
| 215 | struct_vm *cur_vm; |
| 216 | struct vm_locked cur_vm_locked; |
| 217 | struct_vm *dummy_sender; |
| 218 | ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU; |
| 219 | ffa_notifications_bitmap_t per_vcpu = ~global; |
| 220 | bool is_from_vm = true; |
| 221 | |
| 222 | CHECK(vm_get_count() >= 2); |
| 223 | |
| 224 | cur_vm = vm_find_index(0); |
| 225 | |
| 226 | dummy_sender = vm_find_index(1); |
| 227 | |
| 228 | cur_vm_locked = vm_lock(cur_vm); |
| 229 | |
| 230 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, |
| 231 | dummy_sender->id, global, false); |
| 232 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, |
| 233 | dummy_sender->id, per_vcpu, true); |
| 234 | |
| 235 | /* Check validation of global notifications bindings. */ |
| 236 | EXPECT_TRUE(vm_notifications_validate_binding( |
| 237 | cur_vm_locked, is_from_vm, dummy_sender->id, global, false)); |
| 238 | |
| 239 | /* Check validation of per vcpu notifications bindings. */ |
| 240 | EXPECT_TRUE(vm_notifications_validate_binding( |
| 241 | cur_vm_locked, is_from_vm, dummy_sender->id, per_vcpu, true)); |
| 242 | |
| 243 | /** |
| 244 | * Check that global notifications are not validated as per VCPU, and |
| 245 | * vice-versa. |
| 246 | */ |
| 247 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 248 | cur_vm_locked, is_from_vm, dummy_sender->id, global, true)); |
| 249 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 250 | cur_vm_locked, is_from_vm, dummy_sender->id, per_vcpu, false)); |
| 251 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 252 | cur_vm_locked, is_from_vm, dummy_sender->id, global | per_vcpu, |
| 253 | true)); |
| 254 | EXPECT_FALSE(vm_notifications_validate_binding( |
| 255 | cur_vm_locked, is_from_vm, dummy_sender->id, global | per_vcpu, |
| 256 | false)); |
| 257 | |
| 258 | /** Undo the bindings */ |
| 259 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, global, |
| 260 | false); |
| 261 | EXPECT_TRUE(vm_notifications_validate_binding(cur_vm_locked, is_from_vm, |
| 262 | 0, global, false)); |
| 263 | |
| 264 | vm_notifications_update_bindings(cur_vm_locked, is_from_vm, 0, per_vcpu, |
| 265 | false); |
| 266 | EXPECT_TRUE(vm_notifications_validate_binding(cur_vm_locked, is_from_vm, |
| 267 | 0, per_vcpu, false)); |
| 268 | |
| 269 | vm_unlock(&cur_vm_locked); |
| 270 | } |
| 271 | |
Andrew Scull | 3c25745 | 2019-11-26 13:32:50 +0000 | [diff] [blame] | 272 | } /* namespace */ |