blob: e1244d03d365f3a4590071fdc0609bcb3a7d866d [file] [log] [blame]
Daniel Boulbyea296e82025-01-31 10:08:16 +00001/*
2 * Copyright 2025 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
12#include "hf/check.h"
13#include "hf/vcpu.h"
14#include "hf/vm.h"
15}
16
17#include <map>
18
19#include "mm_test.hh"
20
21namespace
22{
23using namespace ::std::placeholders;
24using ::testing::AllOf;
25using ::testing::Each;
26using ::testing::SizeIs;
27using struct_vm = struct vm;
28using struct_vcpu = struct vcpu;
29using struct_vm_locked = struct vm_locked;
30
31constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
32const int TOP_LEVEL = arch_mm_stage2_max_level();
33class vcpu : public ::testing::Test
34{
35 protected:
36 static std::unique_ptr<uint8_t[]> test_heap;
37 struct mpool ppool;
38 const uint32_t first_intid = HF_NUM_INTIDS - 2;
39 const uint32_t second_intid = HF_NUM_INTIDS - 1;
40 struct_vm *test_vm;
41 struct_vcpu *test_vcpu;
42 struct interrupts *interrupts;
43
44 void SetUp() override
45 {
46 if (test_heap) {
47 return;
48 }
49 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
50 mpool_init(&ppool, sizeof(struct mm_page_table));
51 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
52 test_vm = vm_init(HF_VM_ID_OFFSET, 1, &ppool, false, 0);
53 test_vcpu = vm_get_vcpu(test_vm, 0);
54 interrupts = &test_vcpu->interrupts;
55
56 /* Enable the interrupts used in testing. */
57 vcpu_virt_interrupt_set_enabled(interrupts, first_intid);
58 vcpu_virt_interrupt_set_enabled(interrupts, second_intid);
59 }
60};
61
62std::unique_ptr<uint8_t[]> vcpu::test_heap;
63
64/**
65 * Check that interrupts that are set pending, can later be fetched
66 * from the queue.
67 */
68TEST_F(vcpu, pending_interrupts_are_fetched)
69{
70 struct vcpu_locked vcpu_locked = vcpu_lock(test_vcpu);
71
72 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
73
74 /* Pend the interrupts, and check the count is incremented. */
75 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
76 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
77 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
78
79 /*
80 * Check the pended interrupts are correctly returned, and once both
81 * have been returned the invalid intid is given to show there are no
82 * more pending interrupts.
83 */
84 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
85 first_intid);
86 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
87 second_intid);
88 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
89 HF_INVALID_INTID);
90
91 /*
92 * Check, having been fetched, the interrupts are no longer marked as
93 * pending in the bitmap, and the interrupt count is 0.
94 */
95 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid));
96 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid));
97 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
98
99 /*
100 * Check that this expected behavour happens on a consecutive run.
101 * Invert the order of the interrupts to add some variation.
102 */
103 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
104 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
105
106 EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, second_intid));
107 EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, first_intid));
108 EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 2);
109
110 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
111 second_intid);
112 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
113 first_intid);
114 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
115 HF_INVALID_INTID);
116
117 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid));
118 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid));
119 EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 0);
120
121 vcpu_unlock(&vcpu_locked);
122}
123
124/*
125 * Check that a disabled interrupt will not be returned until it is
126 * enabled.
127 */
128TEST_F(vcpu, pending_interrupts_not_enabled_are_not_returned)
129{
130 struct vcpu_locked vcpu_locked = vcpu_lock(test_vcpu);
131
132 /*
133 * Pend the interrupts, check the count is incremented, the pending
134 * interrupts are returned correctly and this causes the count to
135 * return to 0.
136 */
137 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
138 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
139 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
140 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
141 first_intid);
142 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
143 second_intid);
144 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
145
146 /* Again pend the interrupts. */
147 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
148 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
149 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
150
151 /* Disable the first interrupt. */
152 vcpu_virt_interrupt_enable(vcpu_locked, first_intid, false);
153 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
154
155 /*
156 * Check that the disabled first interrupt is not returned,
157 * the second intid should be returned and then the invalid
158 * intid to show there are no more pending and enabled interrupts.
159 */
160 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
161 second_intid);
162 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
163 HF_INVALID_INTID);
164 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
165
166 /* Reenable the first interrupt and disable the second interrupt.*/
167 vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true);
168 vcpu_virt_interrupt_enable(vcpu_locked, second_intid, false);
169 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
170
171 /*
172 * Check that an interrupt injected when the interrupt is disabled will
173 * eventually be returned once the interrupt is enabled.
174 */
175 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
176 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
177
178 /*
179 * Check that it is now returned as a pending interrupt and is the only
180 * interrupt pending.
181 */
182 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
183 first_intid);
184 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
185 HF_INVALID_INTID);
186 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
187
188 /* Enable the second interrupt to check it will now be returned. */
189 vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true);
190 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
191
192 /*
193 * Check that it is now returned as a pending interrupt and is the only
194 * interrupt pending.
195 */
196 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
197 second_intid);
198 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
199 HF_INVALID_INTID);
200 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
201
202 vcpu_unlock(&vcpu_locked);
203}
204} /* namespace */