Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2025 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #include <gmock/gmock.h> |
| 10 | |
| 11 | extern "C" { |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 12 | #include "hf/arch/mm.h" |
| 13 | |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 14 | #include "hf/check.h" |
| 15 | #include "hf/vcpu.h" |
| 16 | #include "hf/vm.h" |
| 17 | } |
| 18 | |
| 19 | #include <map> |
| 20 | |
| 21 | #include "mm_test.hh" |
| 22 | |
| 23 | namespace |
| 24 | { |
| 25 | using namespace ::std::placeholders; |
| 26 | using ::testing::AllOf; |
| 27 | using ::testing::Each; |
| 28 | using ::testing::SizeIs; |
| 29 | using struct_vm = struct vm; |
| 30 | using struct_vcpu = struct vcpu; |
| 31 | using struct_vm_locked = struct vm_locked; |
| 32 | |
| 33 | constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64; |
| 34 | const int TOP_LEVEL = arch_mm_stage2_max_level(); |
| 35 | class vcpu : public ::testing::Test |
| 36 | { |
| 37 | protected: |
| 38 | static std::unique_ptr<uint8_t[]> test_heap; |
| 39 | struct mpool ppool; |
| 40 | const uint32_t first_intid = HF_NUM_INTIDS - 2; |
| 41 | const uint32_t second_intid = HF_NUM_INTIDS - 1; |
| 42 | struct_vm *test_vm; |
| 43 | struct_vcpu *test_vcpu; |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 44 | struct vcpu_locked vcpu_locked; |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 45 | struct interrupts *interrupts; |
| 46 | |
| 47 | void SetUp() override |
| 48 | { |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 49 | if (!test_heap) { |
| 50 | test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE); |
| 51 | mpool_init(&ppool, sizeof(struct mm_page_table)); |
| 52 | mpool_add_chunk(&ppool, test_heap.get(), |
| 53 | TEST_HEAP_SIZE); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 54 | } |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 55 | |
J-Alves | 7867e7d | 2025-03-04 14:25:21 +0000 | [diff] [blame] | 56 | test_vm = vm_init(HF_VM_ID_OFFSET, 8, &ppool, false, 0); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 57 | test_vcpu = vm_get_vcpu(test_vm, 0); |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 58 | vcpu_locked = vcpu_lock(test_vcpu); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 59 | interrupts = &test_vcpu->interrupts; |
| 60 | |
| 61 | /* Enable the interrupts used in testing. */ |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame] | 62 | vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true); |
| 63 | vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true); |
| 64 | } |
| 65 | |
| 66 | void TearDown() override |
| 67 | { |
| 68 | vcpu_unlock(&vcpu_locked); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 69 | } |
| 70 | }; |
| 71 | |
| 72 | std::unique_ptr<uint8_t[]> vcpu::test_heap; |
| 73 | |
| 74 | /** |
| 75 | * Check that interrupts that are set pending, can later be fetched |
| 76 | * from the queue. |
| 77 | */ |
| 78 | TEST_F(vcpu, pending_interrupts_are_fetched) |
| 79 | { |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 80 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 81 | |
| 82 | /* Pend the interrupts, and check the count is incremented. */ |
| 83 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 84 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 85 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 86 | |
| 87 | /* |
| 88 | * Check the pended interrupts are correctly returned, and once both |
| 89 | * have been returned the invalid intid is given to show there are no |
| 90 | * more pending interrupts. |
| 91 | */ |
| 92 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 93 | first_intid); |
| 94 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 95 | second_intid); |
| 96 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 97 | HF_INVALID_INTID); |
| 98 | |
| 99 | /* |
| 100 | * Check, having been fetched, the interrupts are no longer marked as |
| 101 | * pending in the bitmap, and the interrupt count is 0. |
| 102 | */ |
| 103 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid)); |
| 104 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid)); |
| 105 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 106 | |
| 107 | /* |
| 108 | * Check that this expected behavour happens on a consecutive run. |
| 109 | * Invert the order of the interrupts to add some variation. |
| 110 | */ |
| 111 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 112 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 113 | |
| 114 | EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, second_intid)); |
| 115 | EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, first_intid)); |
| 116 | EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 2); |
| 117 | |
| 118 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 119 | second_intid); |
| 120 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 121 | first_intid); |
| 122 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 123 | HF_INVALID_INTID); |
| 124 | |
| 125 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid)); |
| 126 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid)); |
| 127 | EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 0); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | /* |
| 131 | * Check that a disabled interrupt will not be returned until it is |
| 132 | * enabled. |
| 133 | */ |
| 134 | TEST_F(vcpu, pending_interrupts_not_enabled_are_not_returned) |
| 135 | { |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 136 | /* |
| 137 | * Pend the interrupts, check the count is incremented, the pending |
| 138 | * interrupts are returned correctly and this causes the count to |
| 139 | * return to 0. |
| 140 | */ |
| 141 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 142 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 143 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 144 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 145 | first_intid); |
| 146 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 147 | second_intid); |
| 148 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 149 | |
| 150 | /* Again pend the interrupts. */ |
| 151 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 152 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 153 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 154 | |
| 155 | /* Disable the first interrupt. */ |
| 156 | vcpu_virt_interrupt_enable(vcpu_locked, first_intid, false); |
| 157 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 158 | |
| 159 | /* |
| 160 | * Check that the disabled first interrupt is not returned, |
| 161 | * the second intid should be returned and then the invalid |
| 162 | * intid to show there are no more pending and enabled interrupts. |
| 163 | */ |
| 164 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 165 | second_intid); |
| 166 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 167 | HF_INVALID_INTID); |
| 168 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 169 | |
| 170 | /* Reenable the first interrupt and disable the second interrupt.*/ |
| 171 | vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true); |
| 172 | vcpu_virt_interrupt_enable(vcpu_locked, second_intid, false); |
| 173 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 174 | |
| 175 | /* |
| 176 | * Check that an interrupt injected when the interrupt is disabled will |
| 177 | * eventually be returned once the interrupt is enabled. |
| 178 | */ |
| 179 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 180 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 181 | |
| 182 | /* |
| 183 | * Check that it is now returned as a pending interrupt and is the only |
| 184 | * interrupt pending. |
| 185 | */ |
| 186 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 187 | first_intid); |
| 188 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 189 | HF_INVALID_INTID); |
| 190 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 191 | |
| 192 | /* Enable the second interrupt to check it will now be returned. */ |
| 193 | vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true); |
| 194 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 195 | |
| 196 | /* |
| 197 | * Check that it is now returned as a pending interrupt and is the only |
| 198 | * interrupt pending. |
| 199 | */ |
| 200 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 201 | second_intid); |
| 202 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 203 | HF_INVALID_INTID); |
| 204 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 205 | } |
J-Alves | 7867e7d | 2025-03-04 14:25:21 +0000 | [diff] [blame] | 206 | |
| 207 | /** |
| 208 | * Check the queue state from disabling some interrupts. And then reenabling. |
| 209 | */ |
| 210 | TEST_F(vcpu, injecting_getting_interrupts_multiple_times) |
| 211 | { |
| 212 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 213 | |
| 214 | /* Pend the interrupts, and check the count is incremented. */ |
| 215 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 216 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 217 | |
| 218 | for (uint32_t i = 0; i < VINT_QUEUE_MAX * 3; i++) { |
| 219 | uint32_t it_intid = vcpu_virt_interrupt_get_pending_and_enabled( |
| 220 | vcpu_locked); |
| 221 | uint32_t peek_intid = |
| 222 | vcpu_virt_interrupt_peek_pending_and_enabled( |
| 223 | vcpu_locked); |
| 224 | |
| 225 | EXPECT_NE(it_intid, HF_INVALID_INTID); |
| 226 | /* |
| 227 | * Sequence to validate the `first_intid` and `second_intid` |
| 228 | * are retrieved and left pending as expected. |
| 229 | */ |
| 230 | if (i % 2 == 0) { |
| 231 | EXPECT_EQ(it_intid, first_intid); |
| 232 | EXPECT_EQ(peek_intid, second_intid); |
| 233 | } else { |
| 234 | EXPECT_EQ(it_intid, second_intid); |
| 235 | EXPECT_EQ(peek_intid, first_intid); |
| 236 | } |
| 237 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 238 | vcpu_virt_interrupt_inject(vcpu_locked, it_intid); |
| 239 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 240 | } |
| 241 | |
| 242 | EXPECT_NE(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 243 | HF_INVALID_INTID); |
| 244 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 245 | EXPECT_NE(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 246 | HF_INVALID_INTID); |
| 247 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Test that each interrupt ID is only set to pending and the count is |
| 252 | * incremented once. |
| 253 | */ |
| 254 | TEST_F(vcpu, pending_interrupt_is_only_added_once) |
| 255 | { |
| 256 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 257 | |
| 258 | /* Pend the interrupt, and check the count is incremented. */ |
| 259 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 260 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 261 | |
| 262 | /* Inject the same interrupt the count should not be incremented. */ |
| 263 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 264 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 265 | |
| 266 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 267 | first_intid); |
| 268 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 269 | HF_INVALID_INTID); |
| 270 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 271 | } |
| 272 | |
| 273 | /* Check that an cleared interrupts are made to be no longer pending. */ |
| 274 | TEST_F(vcpu, pending_interrupts_can_be_cleared) |
| 275 | { |
| 276 | /* |
| 277 | * Pend the interrupts, check the count is incremented, the pending |
| 278 | * interrupts are returned correctly and this causes the count to |
| 279 | * return to 0. |
| 280 | */ |
| 281 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 282 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 283 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 284 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 285 | first_intid); |
| 286 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 287 | second_intid); |
| 288 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 289 | |
| 290 | /* Again pend the interrupts. */ |
| 291 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 292 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 293 | |
| 294 | /* Remove the first interrupt. */ |
| 295 | vcpu_virt_interrupt_clear(vcpu_locked, first_intid); |
| 296 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 297 | |
| 298 | /* |
| 299 | * Check that the first interrupt is cleared. |
| 300 | * The second intid should be returned and then the invalid |
| 301 | * intid to show there are no more pending and enabled interrupts. |
| 302 | */ |
| 303 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 304 | second_intid); |
| 305 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 306 | HF_INVALID_INTID); |
| 307 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 308 | |
| 309 | /* Inject the interrupts again. */ |
| 310 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 311 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 312 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 313 | |
| 314 | /* Remove the second interrupt. */ |
| 315 | vcpu_virt_interrupt_clear(vcpu_locked, second_intid); |
| 316 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 317 | |
| 318 | /* |
| 319 | * Check that it is now returned as a pending interrupt and is the only |
| 320 | * interrupt pending. |
| 321 | */ |
| 322 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 323 | first_intid); |
| 324 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 325 | HF_INVALID_INTID); |
| 326 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * Check that when an interrupt is cleared space is create for a new |
| 331 | * interrupt to be injected. In addition that after clearing interrupts |
| 332 | * the FIFO policy of the remaining interrupts is maintainted. |
| 333 | */ |
| 334 | TEST_F(vcpu, pending_interrupts_clear_full_list) |
| 335 | { |
| 336 | /* Fill the interrupt queue for the vCPU. */ |
| 337 | for (int i = 0; i < VINT_QUEUE_MAX; i++) { |
| 338 | vcpu_virt_interrupt_enable(vcpu_locked, i, true); |
| 339 | vcpu_virt_interrupt_inject(vcpu_locked, i); |
| 340 | } |
| 341 | |
| 342 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), VINT_QUEUE_MAX); |
| 343 | |
| 344 | /* Check clearing an interrupt clears space for another interrupt. */ |
| 345 | vcpu_virt_interrupt_clear(vcpu_locked, 2); |
| 346 | |
| 347 | vcpu_virt_interrupt_enable(vcpu_locked, VINT_QUEUE_MAX, true); |
| 348 | vcpu_virt_interrupt_inject(vcpu_locked, VINT_QUEUE_MAX); |
| 349 | |
| 350 | /* Check disabled interrupts are also cleared. */ |
| 351 | vcpu_virt_interrupt_enable(vcpu_locked, 1, false); |
| 352 | vcpu_virt_interrupt_clear(vcpu_locked, 1); |
| 353 | |
| 354 | vcpu_virt_interrupt_inject(vcpu_locked, VINT_QUEUE_MAX + 1); |
| 355 | /* |
| 356 | * Enable the interrupt after injecting it to ensure it will be returned |
| 357 | * by vcpu_virt_interrupt_get_pending_and_enabled but is correctly |
| 358 | * injected when disabled. |
| 359 | */ |
| 360 | vcpu_virt_interrupt_enable(vcpu_locked, VINT_QUEUE_MAX + 1, true); |
| 361 | |
| 362 | /* Get the interrupts to check the FIFO order is maintained. */ |
| 363 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 0); |
| 364 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 3); |
| 365 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 4); |
| 366 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 5); |
| 367 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 6); |
| 368 | } |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 369 | } /* namespace */ |