blob: 984c28b83e52036a79fbb66d25357ab7f169c8ed [file] [log] [blame]
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +01001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
12#include "hf/check.h"
13#include "hf/hf_ipi.h"
14}
15
16#include <map>
17
18#include "mm_test.hh"
19
20namespace
21{
22using namespace ::std::placeholders;
23using ::testing::AllOf;
24using ::testing::Each;
25using ::testing::SizeIs;
26using struct_vm = struct vm;
27using struct_vcpu = struct vcpu;
28using struct_vm_locked = struct vm_locked;
29
30/**
31 * IPI Test to check sent IPIs are correctly recorded as pending.
32 */
33
34constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
35const int TOP_LEVEL = arch_mm_stage2_max_level();
36class ipi : public ::testing::Test
37{
38 protected:
39 static std::unique_ptr<uint8_t[]> test_heap;
40 struct mpool ppool;
41 struct_vm *test_vm[4];
42 void SetUp() override
43 {
44 if (test_heap) {
45 return;
46 }
47 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
48 mpool_init(&ppool, sizeof(struct mm_page_table));
49 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
50 for (size_t i = 0; i < std::size(test_vm); i++) {
51 test_vm[i] = vm_init(i + HF_VM_ID_OFFSET, MAX_CPUS,
52 &ppool, false, 0);
53 }
54
55 for (size_t i = 0; i < MAX_CPUS; i++) {
56 struct vcpu *running_vcpu = vm_get_vcpu(test_vm[0], i);
57 struct vcpu *waiting_vcpu = vm_get_vcpu(test_vm[1], i);
58 struct vcpu *blocked_vcpu = vm_get_vcpu(test_vm[2], i);
59 struct vcpu *preempted_vcpu =
60 vm_get_vcpu(test_vm[3], i);
Daniel Boulbyd633a612025-03-07 18:08:04 +000061 struct vcpu_locked running_locked =
62 vcpu_lock(running_vcpu);
63 struct vcpu_locked waiting_locked =
64 vcpu_lock(waiting_vcpu);
65 struct vcpu_locked blocked_locked =
66 vcpu_lock(blocked_vcpu);
67 struct vcpu_locked preempted_locked =
68 vcpu_lock(preempted_vcpu);
69
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010070 struct cpu *cpu = cpu_find_index(i);
71
72 running_vcpu->cpu = cpu;
73 running_vcpu->state = VCPU_STATE_RUNNING;
Daniel Boulbyd633a612025-03-07 18:08:04 +000074 vcpu_virt_interrupt_enable(running_locked, HF_IPI_INTID,
75 true);
76
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010077 waiting_vcpu->cpu = cpu;
78 waiting_vcpu->state = VCPU_STATE_WAITING;
Daniel Boulbyd633a612025-03-07 18:08:04 +000079 vcpu_virt_interrupt_enable(waiting_locked, HF_IPI_INTID,
80 true);
81
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010082 blocked_vcpu->cpu = cpu;
83 blocked_vcpu->state = VCPU_STATE_BLOCKED;
Daniel Boulbyd633a612025-03-07 18:08:04 +000084 vcpu_virt_interrupt_enable(blocked_locked, HF_IPI_INTID,
85 true);
86
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010087 preempted_vcpu->cpu = cpu;
88 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
Daniel Boulbyd633a612025-03-07 18:08:04 +000089 vcpu_virt_interrupt_enable(preempted_locked,
90 HF_IPI_INTID, true);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010091
92 list_init(&cpu->pending_ipis);
Daniel Boulbyd633a612025-03-07 18:08:04 +000093
94 vcpu_unlock(&running_locked);
95 vcpu_unlock(&waiting_locked);
96 vcpu_unlock(&blocked_locked);
97 vcpu_unlock(&preempted_locked);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010098 }
99 }
100};
101
102std::unique_ptr<uint8_t[]> ipi::test_heap;
103
104/**
105 * Check that when an IPI is sent to vCPU0, vCPU0 is
106 * stored as the pending target_vcpu within the IPI framework.
107 *
108 * This function also sets the vm at index 1 to running on all
109 * CPUs. This is used in later tests.
110 */
111TEST_F(ipi, one_service_to_one_cpu)
112{
113 struct_vm *current_vm = ipi::test_vm[0];
114 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
115
116 CHECK(vcpu_count == MAX_CPUS);
117
118 for (size_t i = 0; i < MAX_CPUS; i++) {
119 struct vcpu *vcpu = vm_get_vcpu(current_vm, i);
120 struct cpu *cpu = cpu_find_index(i);
121 vcpu->cpu = cpu;
122 vcpu->state = VCPU_STATE_RUNNING;
123 list_init(&cpu->pending_ipis);
124 }
125
126 hf_ipi_send_interrupt(current_vm, 0);
127
128 /* Check vCPU0 is stored as having a pending interrupt on CPU 0. */
129 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
130 vm_get_vcpu(current_vm, 0));
131 /* Check that there are no longer pending interrupts on CPU 0. */
132 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
133 (struct vcpu *)NULL);
134}
135
136/**
137 * Check if one service sends IPIs to different target vCPUs they are stored
138 * under the correct CPUs.
139 */
140TEST_F(ipi, one_service_to_different_cpus)
141{
142 struct_vm *current_vm = ipi::test_vm[0];
143 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
144
145 CHECK(vcpu_count >= 2);
146
147 hf_ipi_send_interrupt(current_vm, 0);
148 hf_ipi_send_interrupt(current_vm, 1);
149
150 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
151 vm_get_vcpu(current_vm, 0));
152 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 1)),
153 vm_get_vcpu(current_vm, 1));
154}
155
156/**
157 * Multiple services targeting IPIs to CPU0,1,2 and 3 respectively.
158 */
159TEST_F(ipi, multiple_services_to_different_cpus)
160{
161 struct_vm *running_vm = ipi::test_vm[0];
162 struct_vm *waiting_vm = ipi::test_vm[1];
163 struct_vm *blocked_vm = ipi::test_vm[2];
164 struct_vm *preempted_vm = ipi::test_vm[3];
165
166 hf_ipi_send_interrupt(running_vm, 0);
167 hf_ipi_send_interrupt(waiting_vm, 1);
168 hf_ipi_send_interrupt(blocked_vm, 2);
169 hf_ipi_send_interrupt(preempted_vm, 3);
170
171 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
172 vm_get_vcpu(running_vm, 0));
173 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 1)),
174 vm_get_vcpu(waiting_vm, 1));
175 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 2)),
176 vm_get_vcpu(blocked_vm, 2));
177 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 3)),
178 vm_get_vcpu(preempted_vm, 3));
179}
180
181/**
182 * Multiple services targeting IPIs to CPU0 are both pending.
183 */
184TEST_F(ipi, multiple_services_to_same_cpu)
185{
186 struct_vm *running_vm = ipi::test_vm[0];
187 struct_vm *waiting_vm = ipi::test_vm[1];
188 struct_vm *blocked_vm = ipi::test_vm[2];
189 struct_vm *preempted_vm = ipi::test_vm[3];
190
191 hf_ipi_send_interrupt(running_vm, 0);
192 hf_ipi_send_interrupt(waiting_vm, 0);
193 hf_ipi_send_interrupt(blocked_vm, 0);
194 hf_ipi_send_interrupt(preempted_vm, 0);
195
196 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
197 vm_get_vcpu(running_vm, 0));
198 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
199 vm_get_vcpu(waiting_vm, 0));
200 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
201 vm_get_vcpu(blocked_vm, 0));
202 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
203 vm_get_vcpu(preempted_vm, 0));
204 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
205 (struct vcpu *)NULL);
206}
207
208/**
209 * Check if the same service sends an IPI to the same target_vcpu
210 * multiple times it is only added to the list once and does not create
211 * loops in the list.
212 */
213TEST_F(ipi, multiple_services_to_same_cpu_multiple_sends)
214{
215 struct_vm *running_vm = ipi::test_vm[0];
216 struct_vm *waiting_vm = ipi::test_vm[1];
217
218 hf_ipi_send_interrupt(running_vm, 0);
219 hf_ipi_send_interrupt(waiting_vm, 0);
220 hf_ipi_send_interrupt(running_vm, 0);
221
222 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
223 vm_get_vcpu(running_vm, 0));
224 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
225 vm_get_vcpu(waiting_vm, 0));
226 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
227 (struct vcpu *)NULL);
228}
229
230/**
231 * Multiple services targeting IPIs to CPU0 are both pending and the running
232 * vCPU is returned first.
233 */
234TEST_F(ipi, multiple_services_to_same_cpu_running_prioritized)
235{
236 struct_vm *running_vm = ipi::test_vm[0];
237 struct_vm *waiting_vm = ipi::test_vm[1];
238 struct_vm *blocked_vm = ipi::test_vm[2];
239 struct_vm *preempted_vm = ipi::test_vm[3];
240
241 hf_ipi_send_interrupt(waiting_vm, 0);
242 hf_ipi_send_interrupt(blocked_vm, 0);
243 hf_ipi_send_interrupt(preempted_vm, 0);
244 hf_ipi_send_interrupt(running_vm, 0);
245
246 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
247 vm_get_vcpu(running_vm, 0));
248 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
249 vm_get_vcpu(waiting_vm, 0));
250 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
251 vm_get_vcpu(blocked_vm, 0));
252 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
253 vm_get_vcpu(preempted_vm, 0));
254 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
255 (struct vcpu *)NULL);
256}
257
258/**
259 * Multiple services targeting IPIs to CPU0 are both pending and the running
260 * vCPU is returned first.
261 */
262TEST_F(ipi, multiple_services_to_same_cpu_full_handle)
263{
264 struct_vm *running_vm = ipi::test_vm[0];
265 struct_vm *waiting_vm = ipi::test_vm[1];
266 struct_vm *blocked_vm = ipi::test_vm[2];
267 struct_vm *preempted_vm = ipi::test_vm[3];
268
269 struct vcpu *top_priority_vcpu;
270 struct vcpu_locked vcpu_locked;
271 constexpr size_t test_service_count = 4;
272 struct_vm *test_service[test_service_count] = {
273 waiting_vm, blocked_vm, preempted_vm, running_vm};
274
275 for (size_t i = 0; i < test_service_count; i++) {
276 for (size_t j = 0; j < MAX_CPUS; j++) {
277 hf_ipi_send_interrupt(test_service[i], j);
278 }
279 }
280
281 /* Handle the IPI on all CPUs and do some inital checks. */
282 for (size_t i = 0; i < MAX_CPUS; i++) {
283 top_priority_vcpu = hf_ipi_get_pending_target_vcpu(
284 vm_get_vcpu(running_vm, i));
285 vcpu_locked = vcpu_lock(top_priority_vcpu);
286 /*
287 * Check running service is returned as the top priority vCPU.
288 */
289 EXPECT_EQ(top_priority_vcpu, vm_get_vcpu(running_vm, i));
290 /* Run IPI handle on CPU0. */
291 hf_ipi_handle(vcpu_locked);
292 /*
293 * Since there is a running vCPU with a pending IPI when handing
294 * the WAITING vCPU we should have set the SRI to be delayed.
295 * Check this is the case.
296 */
297 EXPECT_TRUE(top_priority_vcpu->cpu->is_sri_delayed);
298 vcpu_unlock(&vcpu_locked);
299 }
300
301 for (size_t i = 0; i < test_service_count; i++) {
302 struct vm_locked vm_locked = vm_lock(test_service[i]);
303 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
304 uint32_t ids_count = 0;
305 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
306 uint32_t lists_count = 0;
307 enum notifications_info_get_state current_state = INIT;
308 const bool is_from_vm = false;
309 /*
310 * Check response of FFA_NOTIFICATION_INFO_GET. The ID should
311 * only be returned if the service is in the waiting state.
312 */
313 vm_notifications_info_get_pending(
314 vm_locked, is_from_vm, ids, &ids_count, lists_sizes,
315 &lists_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
316 &current_state);
317 /* In this test setup all vCPUs of a service are in the same
318 * state. */
319 if (vm_get_vcpu(test_service[i], 0)->state ==
320 VCPU_STATE_WAITING) {
321 EXPECT_EQ(ids_count, 6);
322 EXPECT_EQ(lists_count, 2);
323 EXPECT_EQ(lists_sizes[0], 3);
324 EXPECT_EQ(lists_sizes[1], 1);
325 EXPECT_EQ(ids[0], test_service[i]->id);
326 EXPECT_EQ(ids[1], 0);
327 EXPECT_EQ(ids[2], 1);
328 EXPECT_EQ(ids[3], 2);
329 EXPECT_EQ(ids[4], test_service[i]->id);
330 EXPECT_EQ(ids[5], 3);
331 } else {
332 EXPECT_EQ(ids_count, 0);
333 EXPECT_EQ(lists_count, 0);
334 }
335
336 for (size_t j = 0; j < MAX_CPUS; j++) {
337 /* Check the IPI interrupt is pending. */
338 struct vcpu *vcpu = vm_get_vcpu(test_service[i], j);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000339 vcpu_locked = vcpu_lock(vcpu);
340 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(
341 vcpu_locked),
342 HF_IPI_INTID);
343 vcpu_unlock(&vcpu_locked);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +0100344 }
345 vm_unlock(&vm_locked);
346 }
347
348 for (size_t i = 0; i < MAX_CPUS; i++) {
349 /* Check that there are no more vCPUs with pending IPIs */
350 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(
351 vm_get_vcpu(running_vm, i)),
352 (struct vcpu *)NULL);
353 }
354}
355} /* namespace */