Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2025 The Hafnium Authors. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
| 7 | */ |
| 8 | |
| 9 | #include <gmock/gmock.h> |
| 10 | |
| 11 | extern "C" { |
| 12 | #include "hf/check.h" |
| 13 | #include "hf/vcpu.h" |
| 14 | #include "hf/vm.h" |
| 15 | } |
| 16 | |
| 17 | #include <map> |
| 18 | |
| 19 | #include "mm_test.hh" |
| 20 | |
| 21 | namespace |
| 22 | { |
| 23 | using namespace ::std::placeholders; |
| 24 | using ::testing::AllOf; |
| 25 | using ::testing::Each; |
| 26 | using ::testing::SizeIs; |
| 27 | using struct_vm = struct vm; |
| 28 | using struct_vcpu = struct vcpu; |
| 29 | using struct_vm_locked = struct vm_locked; |
| 30 | |
| 31 | constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64; |
| 32 | const int TOP_LEVEL = arch_mm_stage2_max_level(); |
| 33 | class vcpu : public ::testing::Test |
| 34 | { |
| 35 | protected: |
| 36 | static std::unique_ptr<uint8_t[]> test_heap; |
| 37 | struct mpool ppool; |
| 38 | const uint32_t first_intid = HF_NUM_INTIDS - 2; |
| 39 | const uint32_t second_intid = HF_NUM_INTIDS - 1; |
| 40 | struct_vm *test_vm; |
| 41 | struct_vcpu *test_vcpu; |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame^] | 42 | struct vcpu_locked vcpu_locked; |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 43 | struct interrupts *interrupts; |
| 44 | |
| 45 | void SetUp() override |
| 46 | { |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame^] | 47 | if (!test_heap) { |
| 48 | test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE); |
| 49 | mpool_init(&ppool, sizeof(struct mm_page_table)); |
| 50 | mpool_add_chunk(&ppool, test_heap.get(), |
| 51 | TEST_HEAP_SIZE); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 52 | } |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame^] | 53 | |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 54 | test_vm = vm_init(HF_VM_ID_OFFSET, 1, &ppool, false, 0); |
| 55 | test_vcpu = vm_get_vcpu(test_vm, 0); |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame^] | 56 | vcpu_locked = vcpu_lock(test_vcpu); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 57 | interrupts = &test_vcpu->interrupts; |
| 58 | |
| 59 | /* Enable the interrupts used in testing. */ |
Daniel Boulby | d633a61 | 2025-03-07 18:08:04 +0000 | [diff] [blame^] | 60 | vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true); |
| 61 | vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true); |
| 62 | } |
| 63 | |
| 64 | void TearDown() override |
| 65 | { |
| 66 | vcpu_unlock(&vcpu_locked); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 67 | } |
| 68 | }; |
| 69 | |
| 70 | std::unique_ptr<uint8_t[]> vcpu::test_heap; |
| 71 | |
| 72 | /** |
| 73 | * Check that interrupts that are set pending, can later be fetched |
| 74 | * from the queue. |
| 75 | */ |
| 76 | TEST_F(vcpu, pending_interrupts_are_fetched) |
| 77 | { |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 78 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 79 | |
| 80 | /* Pend the interrupts, and check the count is incremented. */ |
| 81 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 82 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 83 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 84 | |
| 85 | /* |
| 86 | * Check the pended interrupts are correctly returned, and once both |
| 87 | * have been returned the invalid intid is given to show there are no |
| 88 | * more pending interrupts. |
| 89 | */ |
| 90 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 91 | first_intid); |
| 92 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 93 | second_intid); |
| 94 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 95 | HF_INVALID_INTID); |
| 96 | |
| 97 | /* |
| 98 | * Check, having been fetched, the interrupts are no longer marked as |
| 99 | * pending in the bitmap, and the interrupt count is 0. |
| 100 | */ |
| 101 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid)); |
| 102 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid)); |
| 103 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 104 | |
| 105 | /* |
| 106 | * Check that this expected behavour happens on a consecutive run. |
| 107 | * Invert the order of the interrupts to add some variation. |
| 108 | */ |
| 109 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 110 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 111 | |
| 112 | EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, second_intid)); |
| 113 | EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, first_intid)); |
| 114 | EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 2); |
| 115 | |
| 116 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 117 | second_intid); |
| 118 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 119 | first_intid); |
| 120 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 121 | HF_INVALID_INTID); |
| 122 | |
| 123 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid)); |
| 124 | EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid)); |
| 125 | EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 0); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Check that a disabled interrupt will not be returned until it is |
| 130 | * enabled. |
| 131 | */ |
| 132 | TEST_F(vcpu, pending_interrupts_not_enabled_are_not_returned) |
| 133 | { |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 134 | /* |
| 135 | * Pend the interrupts, check the count is incremented, the pending |
| 136 | * interrupts are returned correctly and this causes the count to |
| 137 | * return to 0. |
| 138 | */ |
| 139 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 140 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 141 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 142 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 143 | first_intid); |
| 144 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 145 | second_intid); |
| 146 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 147 | |
| 148 | /* Again pend the interrupts. */ |
| 149 | vcpu_virt_interrupt_inject(vcpu_locked, first_intid); |
| 150 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 151 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2); |
| 152 | |
| 153 | /* Disable the first interrupt. */ |
| 154 | vcpu_virt_interrupt_enable(vcpu_locked, first_intid, false); |
| 155 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 156 | |
| 157 | /* |
| 158 | * Check that the disabled first interrupt is not returned, |
| 159 | * the second intid should be returned and then the invalid |
| 160 | * intid to show there are no more pending and enabled interrupts. |
| 161 | */ |
| 162 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 163 | second_intid); |
| 164 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 165 | HF_INVALID_INTID); |
| 166 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 167 | |
| 168 | /* Reenable the first interrupt and disable the second interrupt.*/ |
| 169 | vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true); |
| 170 | vcpu_virt_interrupt_enable(vcpu_locked, second_intid, false); |
| 171 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 172 | |
| 173 | /* |
| 174 | * Check that an interrupt injected when the interrupt is disabled will |
| 175 | * eventually be returned once the interrupt is enabled. |
| 176 | */ |
| 177 | vcpu_virt_interrupt_inject(vcpu_locked, second_intid); |
| 178 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 179 | |
| 180 | /* |
| 181 | * Check that it is now returned as a pending interrupt and is the only |
| 182 | * interrupt pending. |
| 183 | */ |
| 184 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 185 | first_intid); |
| 186 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 187 | HF_INVALID_INTID); |
| 188 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
| 189 | |
| 190 | /* Enable the second interrupt to check it will now be returned. */ |
| 191 | vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true); |
| 192 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1); |
| 193 | |
| 194 | /* |
| 195 | * Check that it is now returned as a pending interrupt and is the only |
| 196 | * interrupt pending. |
| 197 | */ |
| 198 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 199 | second_intid); |
| 200 | EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), |
| 201 | HF_INVALID_INTID); |
| 202 | EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0); |
Daniel Boulby | ea296e8 | 2025-01-31 10:08:16 +0000 | [diff] [blame] | 203 | } |
| 204 | } /* namespace */ |