blob: 798e4a36caf09f4b53cd8873c89769770210ef3d [file] [log] [blame]
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +01001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
12#include "hf/check.h"
13#include "hf/hf_ipi.h"
14}
15
16#include <map>
17
18#include "mm_test.hh"
19
20namespace
21{
22using namespace ::std::placeholders;
23using ::testing::AllOf;
24using ::testing::Each;
25using ::testing::SizeIs;
26using struct_vm = struct vm;
27using struct_vcpu = struct vcpu;
28using struct_vm_locked = struct vm_locked;
29
30/**
31 * IPI Test to check sent IPIs are correctly recorded as pending.
32 */
33
34constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
35const int TOP_LEVEL = arch_mm_stage2_max_level();
36class ipi : public ::testing::Test
37{
38 protected:
39 static std::unique_ptr<uint8_t[]> test_heap;
40 struct mpool ppool;
41 struct_vm *test_vm[4];
42 void SetUp() override
43 {
44 if (test_heap) {
45 return;
46 }
47 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
48 mpool_init(&ppool, sizeof(struct mm_page_table));
49 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
50 for (size_t i = 0; i < std::size(test_vm); i++) {
51 test_vm[i] = vm_init(i + HF_VM_ID_OFFSET, MAX_CPUS,
52 &ppool, false, 0);
53 }
54
55 for (size_t i = 0; i < MAX_CPUS; i++) {
56 struct vcpu *running_vcpu = vm_get_vcpu(test_vm[0], i);
57 struct vcpu *waiting_vcpu = vm_get_vcpu(test_vm[1], i);
58 struct vcpu *blocked_vcpu = vm_get_vcpu(test_vm[2], i);
59 struct vcpu *preempted_vcpu =
60 vm_get_vcpu(test_vm[3], i);
61 struct cpu *cpu = cpu_find_index(i);
62
63 running_vcpu->cpu = cpu;
64 running_vcpu->state = VCPU_STATE_RUNNING;
Daniel Boulby3c1506b2025-02-25 10:49:51 +000065 vcpu_virt_interrupt_set_enabled(
66 &running_vcpu->interrupts, HF_IPI_INTID);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010067 waiting_vcpu->cpu = cpu;
68 waiting_vcpu->state = VCPU_STATE_WAITING;
Daniel Boulby3c1506b2025-02-25 10:49:51 +000069 vcpu_virt_interrupt_set_enabled(
70 &waiting_vcpu->interrupts, HF_IPI_INTID);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010071 blocked_vcpu->cpu = cpu;
72 blocked_vcpu->state = VCPU_STATE_BLOCKED;
Daniel Boulby3c1506b2025-02-25 10:49:51 +000073 vcpu_virt_interrupt_set_enabled(
74 &blocked_vcpu->interrupts, HF_IPI_INTID);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010075 preempted_vcpu->cpu = cpu;
76 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
Daniel Boulby3c1506b2025-02-25 10:49:51 +000077 vcpu_virt_interrupt_set_enabled(
78 &preempted_vcpu->interrupts, HF_IPI_INTID);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010079
80 list_init(&cpu->pending_ipis);
81 }
82 }
83};
84
85std::unique_ptr<uint8_t[]> ipi::test_heap;
86
87/**
88 * Check that when an IPI is sent to vCPU0, vCPU0 is
89 * stored as the pending target_vcpu within the IPI framework.
90 *
91 * This function also sets the vm at index 1 to running on all
92 * CPUs. This is used in later tests.
93 */
94TEST_F(ipi, one_service_to_one_cpu)
95{
96 struct_vm *current_vm = ipi::test_vm[0];
97 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
98
99 CHECK(vcpu_count == MAX_CPUS);
100
101 for (size_t i = 0; i < MAX_CPUS; i++) {
102 struct vcpu *vcpu = vm_get_vcpu(current_vm, i);
103 struct cpu *cpu = cpu_find_index(i);
104 vcpu->cpu = cpu;
105 vcpu->state = VCPU_STATE_RUNNING;
106 list_init(&cpu->pending_ipis);
107 }
108
109 hf_ipi_send_interrupt(current_vm, 0);
110
111 /* Check vCPU0 is stored as having a pending interrupt on CPU 0. */
112 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
113 vm_get_vcpu(current_vm, 0));
114 /* Check that there are no longer pending interrupts on CPU 0. */
115 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
116 (struct vcpu *)NULL);
117}
118
119/**
120 * Check if one service sends IPIs to different target vCPUs they are stored
121 * under the correct CPUs.
122 */
123TEST_F(ipi, one_service_to_different_cpus)
124{
125 struct_vm *current_vm = ipi::test_vm[0];
126 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
127
128 CHECK(vcpu_count >= 2);
129
130 hf_ipi_send_interrupt(current_vm, 0);
131 hf_ipi_send_interrupt(current_vm, 1);
132
133 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
134 vm_get_vcpu(current_vm, 0));
135 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 1)),
136 vm_get_vcpu(current_vm, 1));
137}
138
139/**
140 * Multiple services targeting IPIs to CPU0,1,2 and 3 respectively.
141 */
142TEST_F(ipi, multiple_services_to_different_cpus)
143{
144 struct_vm *running_vm = ipi::test_vm[0];
145 struct_vm *waiting_vm = ipi::test_vm[1];
146 struct_vm *blocked_vm = ipi::test_vm[2];
147 struct_vm *preempted_vm = ipi::test_vm[3];
148
149 hf_ipi_send_interrupt(running_vm, 0);
150 hf_ipi_send_interrupt(waiting_vm, 1);
151 hf_ipi_send_interrupt(blocked_vm, 2);
152 hf_ipi_send_interrupt(preempted_vm, 3);
153
154 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
155 vm_get_vcpu(running_vm, 0));
156 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 1)),
157 vm_get_vcpu(waiting_vm, 1));
158 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 2)),
159 vm_get_vcpu(blocked_vm, 2));
160 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 3)),
161 vm_get_vcpu(preempted_vm, 3));
162}
163
164/**
165 * Multiple services targeting IPIs to CPU0 are both pending.
166 */
167TEST_F(ipi, multiple_services_to_same_cpu)
168{
169 struct_vm *running_vm = ipi::test_vm[0];
170 struct_vm *waiting_vm = ipi::test_vm[1];
171 struct_vm *blocked_vm = ipi::test_vm[2];
172 struct_vm *preempted_vm = ipi::test_vm[3];
173
174 hf_ipi_send_interrupt(running_vm, 0);
175 hf_ipi_send_interrupt(waiting_vm, 0);
176 hf_ipi_send_interrupt(blocked_vm, 0);
177 hf_ipi_send_interrupt(preempted_vm, 0);
178
179 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
180 vm_get_vcpu(running_vm, 0));
181 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
182 vm_get_vcpu(waiting_vm, 0));
183 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
184 vm_get_vcpu(blocked_vm, 0));
185 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
186 vm_get_vcpu(preempted_vm, 0));
187 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
188 (struct vcpu *)NULL);
189}
190
191/**
192 * Check if the same service sends an IPI to the same target_vcpu
193 * multiple times it is only added to the list once and does not create
194 * loops in the list.
195 */
196TEST_F(ipi, multiple_services_to_same_cpu_multiple_sends)
197{
198 struct_vm *running_vm = ipi::test_vm[0];
199 struct_vm *waiting_vm = ipi::test_vm[1];
200
201 hf_ipi_send_interrupt(running_vm, 0);
202 hf_ipi_send_interrupt(waiting_vm, 0);
203 hf_ipi_send_interrupt(running_vm, 0);
204
205 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
206 vm_get_vcpu(running_vm, 0));
207 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
208 vm_get_vcpu(waiting_vm, 0));
209 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
210 (struct vcpu *)NULL);
211}
212
213/**
214 * Multiple services targeting IPIs to CPU0 are both pending and the running
215 * vCPU is returned first.
216 */
217TEST_F(ipi, multiple_services_to_same_cpu_running_prioritized)
218{
219 struct_vm *running_vm = ipi::test_vm[0];
220 struct_vm *waiting_vm = ipi::test_vm[1];
221 struct_vm *blocked_vm = ipi::test_vm[2];
222 struct_vm *preempted_vm = ipi::test_vm[3];
223
224 hf_ipi_send_interrupt(waiting_vm, 0);
225 hf_ipi_send_interrupt(blocked_vm, 0);
226 hf_ipi_send_interrupt(preempted_vm, 0);
227 hf_ipi_send_interrupt(running_vm, 0);
228
229 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
230 vm_get_vcpu(running_vm, 0));
231 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
232 vm_get_vcpu(waiting_vm, 0));
233 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
234 vm_get_vcpu(blocked_vm, 0));
235 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
236 vm_get_vcpu(preempted_vm, 0));
237 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
238 (struct vcpu *)NULL);
239}
240
241/**
242 * Multiple services targeting IPIs to CPU0 are both pending and the running
243 * vCPU is returned first.
244 */
245TEST_F(ipi, multiple_services_to_same_cpu_full_handle)
246{
247 struct_vm *running_vm = ipi::test_vm[0];
248 struct_vm *waiting_vm = ipi::test_vm[1];
249 struct_vm *blocked_vm = ipi::test_vm[2];
250 struct_vm *preempted_vm = ipi::test_vm[3];
251
252 struct vcpu *top_priority_vcpu;
253 struct vcpu_locked vcpu_locked;
254 constexpr size_t test_service_count = 4;
255 struct_vm *test_service[test_service_count] = {
256 waiting_vm, blocked_vm, preempted_vm, running_vm};
257
258 for (size_t i = 0; i < test_service_count; i++) {
259 for (size_t j = 0; j < MAX_CPUS; j++) {
260 hf_ipi_send_interrupt(test_service[i], j);
261 }
262 }
263
264 /* Handle the IPI on all CPUs and do some inital checks. */
265 for (size_t i = 0; i < MAX_CPUS; i++) {
266 top_priority_vcpu = hf_ipi_get_pending_target_vcpu(
267 vm_get_vcpu(running_vm, i));
268 vcpu_locked = vcpu_lock(top_priority_vcpu);
269 /*
270 * Check running service is returned as the top priority vCPU.
271 */
272 EXPECT_EQ(top_priority_vcpu, vm_get_vcpu(running_vm, i));
273 /* Run IPI handle on CPU0. */
274 hf_ipi_handle(vcpu_locked);
275 /*
276 * Since there is a running vCPU with a pending IPI when handing
277 * the WAITING vCPU we should have set the SRI to be delayed.
278 * Check this is the case.
279 */
280 EXPECT_TRUE(top_priority_vcpu->cpu->is_sri_delayed);
281 vcpu_unlock(&vcpu_locked);
282 }
283
284 for (size_t i = 0; i < test_service_count; i++) {
285 struct vm_locked vm_locked = vm_lock(test_service[i]);
286 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
287 uint32_t ids_count = 0;
288 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
289 uint32_t lists_count = 0;
290 enum notifications_info_get_state current_state = INIT;
291 const bool is_from_vm = false;
292 /*
293 * Check response of FFA_NOTIFICATION_INFO_GET. The ID should
294 * only be returned if the service is in the waiting state.
295 */
296 vm_notifications_info_get_pending(
297 vm_locked, is_from_vm, ids, &ids_count, lists_sizes,
298 &lists_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
299 &current_state);
300 /* In this test setup all vCPUs of a service are in the same
301 * state. */
302 if (vm_get_vcpu(test_service[i], 0)->state ==
303 VCPU_STATE_WAITING) {
304 EXPECT_EQ(ids_count, 6);
305 EXPECT_EQ(lists_count, 2);
306 EXPECT_EQ(lists_sizes[0], 3);
307 EXPECT_EQ(lists_sizes[1], 1);
308 EXPECT_EQ(ids[0], test_service[i]->id);
309 EXPECT_EQ(ids[1], 0);
310 EXPECT_EQ(ids[2], 1);
311 EXPECT_EQ(ids[3], 2);
312 EXPECT_EQ(ids[4], test_service[i]->id);
313 EXPECT_EQ(ids[5], 3);
314 } else {
315 EXPECT_EQ(ids_count, 0);
316 EXPECT_EQ(lists_count, 0);
317 }
318
319 for (size_t j = 0; j < MAX_CPUS; j++) {
320 /* Check the IPI interrupt is pending. */
321 struct vcpu *vcpu = vm_get_vcpu(test_service[i], j);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000322 vcpu_locked = vcpu_lock(vcpu);
323 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(
324 vcpu_locked),
325 HF_IPI_INTID);
326 vcpu_unlock(&vcpu_locked);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +0100327 }
328 vm_unlock(&vm_locked);
329 }
330
331 for (size_t i = 0; i < MAX_CPUS; i++) {
332 /* Check that there are no more vCPUs with pending IPIs */
333 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(
334 vm_get_vcpu(running_vm, i)),
335 (struct vcpu *)NULL);
336 }
337}
338} /* namespace */