blob: 91cc22969321516746f1ce7dbe6c7b68129abe09 [file] [log] [blame]
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +01001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
Karl Meakin07a69ab2025-02-07 14:53:19 +000012#include "hf/arch/mm.h"
13
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010014#include "hf/check.h"
15#include "hf/hf_ipi.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000016#include "hf/mm.h"
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010017}
18
19#include <map>
20
21#include "mm_test.hh"
22
23namespace
24{
25using namespace ::std::placeholders;
26using ::testing::AllOf;
27using ::testing::Each;
28using ::testing::SizeIs;
29using struct_vm = struct vm;
30using struct_vcpu = struct vcpu;
31using struct_vm_locked = struct vm_locked;
32
33/**
34 * IPI Test to check sent IPIs are correctly recorded as pending.
35 */
36
37constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
Karl Meakin07a69ab2025-02-07 14:53:19 +000038const mm_level_t TOP_LEVEL = arch_mm_stage2_max_level();
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010039class ipi : public ::testing::Test
40{
41 protected:
42 static std::unique_ptr<uint8_t[]> test_heap;
43 struct mpool ppool;
44 struct_vm *test_vm[4];
45 void SetUp() override
46 {
47 if (test_heap) {
48 return;
49 }
50 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
51 mpool_init(&ppool, sizeof(struct mm_page_table));
52 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
53 for (size_t i = 0; i < std::size(test_vm); i++) {
54 test_vm[i] = vm_init(i + HF_VM_ID_OFFSET, MAX_CPUS,
55 &ppool, false, 0);
56 }
57
58 for (size_t i = 0; i < MAX_CPUS; i++) {
59 struct vcpu *running_vcpu = vm_get_vcpu(test_vm[0], i);
60 struct vcpu *waiting_vcpu = vm_get_vcpu(test_vm[1], i);
61 struct vcpu *blocked_vcpu = vm_get_vcpu(test_vm[2], i);
62 struct vcpu *preempted_vcpu =
63 vm_get_vcpu(test_vm[3], i);
Daniel Boulbyd633a612025-03-07 18:08:04 +000064 struct vcpu_locked running_locked =
65 vcpu_lock(running_vcpu);
66 struct vcpu_locked waiting_locked =
67 vcpu_lock(waiting_vcpu);
68 struct vcpu_locked blocked_locked =
69 vcpu_lock(blocked_vcpu);
70 struct vcpu_locked preempted_locked =
71 vcpu_lock(preempted_vcpu);
72
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010073 struct cpu *cpu = cpu_find_index(i);
74
75 running_vcpu->cpu = cpu;
76 running_vcpu->state = VCPU_STATE_RUNNING;
Daniel Boulbyd633a612025-03-07 18:08:04 +000077 vcpu_virt_interrupt_enable(running_locked, HF_IPI_INTID,
78 true);
79
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010080 waiting_vcpu->cpu = cpu;
81 waiting_vcpu->state = VCPU_STATE_WAITING;
Daniel Boulbyd633a612025-03-07 18:08:04 +000082 vcpu_virt_interrupt_enable(waiting_locked, HF_IPI_INTID,
83 true);
84
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010085 blocked_vcpu->cpu = cpu;
86 blocked_vcpu->state = VCPU_STATE_BLOCKED;
Daniel Boulbyd633a612025-03-07 18:08:04 +000087 vcpu_virt_interrupt_enable(blocked_locked, HF_IPI_INTID,
88 true);
89
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010090 preempted_vcpu->cpu = cpu;
91 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
Daniel Boulbyd633a612025-03-07 18:08:04 +000092 vcpu_virt_interrupt_enable(preempted_locked,
93 HF_IPI_INTID, true);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +010094
95 list_init(&cpu->pending_ipis);
Daniel Boulbyd633a612025-03-07 18:08:04 +000096
97 vcpu_unlock(&running_locked);
98 vcpu_unlock(&waiting_locked);
99 vcpu_unlock(&blocked_locked);
100 vcpu_unlock(&preempted_locked);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +0100101 }
102 }
103};
104
105std::unique_ptr<uint8_t[]> ipi::test_heap;
106
107/**
108 * Check that when an IPI is sent to vCPU0, vCPU0 is
109 * stored as the pending target_vcpu within the IPI framework.
110 *
111 * This function also sets the vm at index 1 to running on all
112 * CPUs. This is used in later tests.
113 */
114TEST_F(ipi, one_service_to_one_cpu)
115{
116 struct_vm *current_vm = ipi::test_vm[0];
117 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
118
119 CHECK(vcpu_count == MAX_CPUS);
120
121 for (size_t i = 0; i < MAX_CPUS; i++) {
122 struct vcpu *vcpu = vm_get_vcpu(current_vm, i);
123 struct cpu *cpu = cpu_find_index(i);
124 vcpu->cpu = cpu;
125 vcpu->state = VCPU_STATE_RUNNING;
126 list_init(&cpu->pending_ipis);
127 }
128
129 hf_ipi_send_interrupt(current_vm, 0);
130
131 /* Check vCPU0 is stored as having a pending interrupt on CPU 0. */
132 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
133 vm_get_vcpu(current_vm, 0));
134 /* Check that there are no longer pending interrupts on CPU 0. */
135 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
136 (struct vcpu *)NULL);
137}
138
139/**
140 * Check if one service sends IPIs to different target vCPUs they are stored
141 * under the correct CPUs.
142 */
143TEST_F(ipi, one_service_to_different_cpus)
144{
145 struct_vm *current_vm = ipi::test_vm[0];
146 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
147
148 CHECK(vcpu_count >= 2);
149
150 hf_ipi_send_interrupt(current_vm, 0);
151 hf_ipi_send_interrupt(current_vm, 1);
152
153 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
154 vm_get_vcpu(current_vm, 0));
155 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 1)),
156 vm_get_vcpu(current_vm, 1));
157}
158
159/**
160 * Multiple services targeting IPIs to CPU0,1,2 and 3 respectively.
161 */
162TEST_F(ipi, multiple_services_to_different_cpus)
163{
164 struct_vm *running_vm = ipi::test_vm[0];
165 struct_vm *waiting_vm = ipi::test_vm[1];
166 struct_vm *blocked_vm = ipi::test_vm[2];
167 struct_vm *preempted_vm = ipi::test_vm[3];
168
169 hf_ipi_send_interrupt(running_vm, 0);
170 hf_ipi_send_interrupt(waiting_vm, 1);
171 hf_ipi_send_interrupt(blocked_vm, 2);
172 hf_ipi_send_interrupt(preempted_vm, 3);
173
174 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
175 vm_get_vcpu(running_vm, 0));
176 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 1)),
177 vm_get_vcpu(waiting_vm, 1));
178 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 2)),
179 vm_get_vcpu(blocked_vm, 2));
180 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 3)),
181 vm_get_vcpu(preempted_vm, 3));
182}
183
184/**
185 * Multiple services targeting IPIs to CPU0 are both pending.
186 */
187TEST_F(ipi, multiple_services_to_same_cpu)
188{
189 struct_vm *running_vm = ipi::test_vm[0];
190 struct_vm *waiting_vm = ipi::test_vm[1];
191 struct_vm *blocked_vm = ipi::test_vm[2];
192 struct_vm *preempted_vm = ipi::test_vm[3];
193
194 hf_ipi_send_interrupt(running_vm, 0);
195 hf_ipi_send_interrupt(waiting_vm, 0);
196 hf_ipi_send_interrupt(blocked_vm, 0);
197 hf_ipi_send_interrupt(preempted_vm, 0);
198
199 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
200 vm_get_vcpu(running_vm, 0));
201 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
202 vm_get_vcpu(waiting_vm, 0));
203 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
204 vm_get_vcpu(blocked_vm, 0));
205 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
206 vm_get_vcpu(preempted_vm, 0));
207 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
208 (struct vcpu *)NULL);
209}
210
211/**
212 * Check if the same service sends an IPI to the same target_vcpu
213 * multiple times it is only added to the list once and does not create
214 * loops in the list.
215 */
216TEST_F(ipi, multiple_services_to_same_cpu_multiple_sends)
217{
218 struct_vm *running_vm = ipi::test_vm[0];
219 struct_vm *waiting_vm = ipi::test_vm[1];
220
221 hf_ipi_send_interrupt(running_vm, 0);
222 hf_ipi_send_interrupt(waiting_vm, 0);
223 hf_ipi_send_interrupt(running_vm, 0);
224
225 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
226 vm_get_vcpu(running_vm, 0));
227 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
228 vm_get_vcpu(waiting_vm, 0));
229 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
230 (struct vcpu *)NULL);
231}
232
233/**
234 * Multiple services targeting IPIs to CPU0 are both pending and the running
235 * vCPU is returned first.
236 */
237TEST_F(ipi, multiple_services_to_same_cpu_running_prioritized)
238{
239 struct_vm *running_vm = ipi::test_vm[0];
240 struct_vm *waiting_vm = ipi::test_vm[1];
241 struct_vm *blocked_vm = ipi::test_vm[2];
242 struct_vm *preempted_vm = ipi::test_vm[3];
243
244 hf_ipi_send_interrupt(waiting_vm, 0);
245 hf_ipi_send_interrupt(blocked_vm, 0);
246 hf_ipi_send_interrupt(preempted_vm, 0);
247 hf_ipi_send_interrupt(running_vm, 0);
248
249 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
250 vm_get_vcpu(running_vm, 0));
251 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
252 vm_get_vcpu(waiting_vm, 0));
253 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
254 vm_get_vcpu(blocked_vm, 0));
255 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
256 vm_get_vcpu(preempted_vm, 0));
257 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
258 (struct vcpu *)NULL);
259}
260
261/**
262 * Multiple services targeting IPIs to CPU0 are both pending and the running
263 * vCPU is returned first.
264 */
265TEST_F(ipi, multiple_services_to_same_cpu_full_handle)
266{
267 struct_vm *running_vm = ipi::test_vm[0];
268 struct_vm *waiting_vm = ipi::test_vm[1];
269 struct_vm *blocked_vm = ipi::test_vm[2];
270 struct_vm *preempted_vm = ipi::test_vm[3];
271
272 struct vcpu *top_priority_vcpu;
273 struct vcpu_locked vcpu_locked;
274 constexpr size_t test_service_count = 4;
275 struct_vm *test_service[test_service_count] = {
276 waiting_vm, blocked_vm, preempted_vm, running_vm};
277
278 for (size_t i = 0; i < test_service_count; i++) {
279 for (size_t j = 0; j < MAX_CPUS; j++) {
280 hf_ipi_send_interrupt(test_service[i], j);
281 }
282 }
283
284 /* Handle the IPI on all CPUs and do some inital checks. */
285 for (size_t i = 0; i < MAX_CPUS; i++) {
286 top_priority_vcpu = hf_ipi_get_pending_target_vcpu(
287 vm_get_vcpu(running_vm, i));
288 vcpu_locked = vcpu_lock(top_priority_vcpu);
289 /*
290 * Check running service is returned as the top priority vCPU.
291 */
292 EXPECT_EQ(top_priority_vcpu, vm_get_vcpu(running_vm, i));
293 /* Run IPI handle on CPU0. */
294 hf_ipi_handle(vcpu_locked);
295 /*
296 * Since there is a running vCPU with a pending IPI when handing
297 * the WAITING vCPU we should have set the SRI to be delayed.
298 * Check this is the case.
299 */
300 EXPECT_TRUE(top_priority_vcpu->cpu->is_sri_delayed);
301 vcpu_unlock(&vcpu_locked);
302 }
303
304 for (size_t i = 0; i < test_service_count; i++) {
305 struct vm_locked vm_locked = vm_lock(test_service[i]);
306 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
307 uint32_t ids_count = 0;
308 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
309 uint32_t lists_count = 0;
310 enum notifications_info_get_state current_state = INIT;
311 const bool is_from_vm = false;
312 /*
313 * Check response of FFA_NOTIFICATION_INFO_GET. The ID should
314 * only be returned if the service is in the waiting state.
315 */
316 vm_notifications_info_get_pending(
317 vm_locked, is_from_vm, ids, &ids_count, lists_sizes,
318 &lists_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
319 &current_state);
320 /* In this test setup all vCPUs of a service are in the same
321 * state. */
322 if (vm_get_vcpu(test_service[i], 0)->state ==
323 VCPU_STATE_WAITING) {
324 EXPECT_EQ(ids_count, 6);
325 EXPECT_EQ(lists_count, 2);
326 EXPECT_EQ(lists_sizes[0], 3);
327 EXPECT_EQ(lists_sizes[1], 1);
328 EXPECT_EQ(ids[0], test_service[i]->id);
329 EXPECT_EQ(ids[1], 0);
330 EXPECT_EQ(ids[2], 1);
331 EXPECT_EQ(ids[3], 2);
332 EXPECT_EQ(ids[4], test_service[i]->id);
333 EXPECT_EQ(ids[5], 3);
334 } else {
335 EXPECT_EQ(ids_count, 0);
336 EXPECT_EQ(lists_count, 0);
337 }
338
339 for (size_t j = 0; j < MAX_CPUS; j++) {
340 /* Check the IPI interrupt is pending. */
341 struct vcpu *vcpu = vm_get_vcpu(test_service[i], j);
Daniel Boulby3c1506b2025-02-25 10:49:51 +0000342 vcpu_locked = vcpu_lock(vcpu);
343 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(
344 vcpu_locked),
345 HF_IPI_INTID);
346 vcpu_unlock(&vcpu_locked);
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +0100347 }
348 vm_unlock(&vm_locked);
349 }
350
351 for (size_t i = 0; i < MAX_CPUS; i++) {
352 /* Check that there are no more vCPUs with pending IPIs */
353 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(
354 vm_get_vcpu(running_vm, i)),
355 (struct vcpu *)NULL);
356 }
357}
358} /* namespace */