blob: cd684695244190a7c8de86da3c720fb18936364e [file] [log] [blame]
Daniel Boulby7a2e9ba2024-09-27 15:24:51 +01001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include <gmock/gmock.h>
10
11extern "C" {
12#include "hf/check.h"
13#include "hf/hf_ipi.h"
14}
15
16#include <map>
17
18#include "mm_test.hh"
19
20namespace
21{
22using namespace ::std::placeholders;
23using ::testing::AllOf;
24using ::testing::Each;
25using ::testing::SizeIs;
26using struct_vm = struct vm;
27using struct_vcpu = struct vcpu;
28using struct_vm_locked = struct vm_locked;
29
30/**
31 * IPI Test to check sent IPIs are correctly recorded as pending.
32 */
33
34constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
35const int TOP_LEVEL = arch_mm_stage2_max_level();
36class ipi : public ::testing::Test
37{
38 protected:
39 static std::unique_ptr<uint8_t[]> test_heap;
40 struct mpool ppool;
41 struct_vm *test_vm[4];
42 void SetUp() override
43 {
44 if (test_heap) {
45 return;
46 }
47 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
48 mpool_init(&ppool, sizeof(struct mm_page_table));
49 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
50 for (size_t i = 0; i < std::size(test_vm); i++) {
51 test_vm[i] = vm_init(i + HF_VM_ID_OFFSET, MAX_CPUS,
52 &ppool, false, 0);
53 }
54
55 for (size_t i = 0; i < MAX_CPUS; i++) {
56 struct vcpu *running_vcpu = vm_get_vcpu(test_vm[0], i);
57 struct vcpu *waiting_vcpu = vm_get_vcpu(test_vm[1], i);
58 struct vcpu *blocked_vcpu = vm_get_vcpu(test_vm[2], i);
59 struct vcpu *preempted_vcpu =
60 vm_get_vcpu(test_vm[3], i);
61 struct cpu *cpu = cpu_find_index(i);
62
63 running_vcpu->cpu = cpu;
64 running_vcpu->state = VCPU_STATE_RUNNING;
65 waiting_vcpu->cpu = cpu;
66 waiting_vcpu->state = VCPU_STATE_WAITING;
67 blocked_vcpu->cpu = cpu;
68 blocked_vcpu->state = VCPU_STATE_BLOCKED;
69 preempted_vcpu->cpu = cpu;
70 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
71
72 list_init(&cpu->pending_ipis);
73 }
74 }
75};
76
77std::unique_ptr<uint8_t[]> ipi::test_heap;
78
79/**
80 * Check that when an IPI is sent to vCPU0, vCPU0 is
81 * stored as the pending target_vcpu within the IPI framework.
82 *
83 * This function also sets the vm at index 1 to running on all
84 * CPUs. This is used in later tests.
85 */
86TEST_F(ipi, one_service_to_one_cpu)
87{
88 struct_vm *current_vm = ipi::test_vm[0];
89 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
90
91 CHECK(vcpu_count == MAX_CPUS);
92
93 for (size_t i = 0; i < MAX_CPUS; i++) {
94 struct vcpu *vcpu = vm_get_vcpu(current_vm, i);
95 struct cpu *cpu = cpu_find_index(i);
96 vcpu->cpu = cpu;
97 vcpu->state = VCPU_STATE_RUNNING;
98 list_init(&cpu->pending_ipis);
99 }
100
101 hf_ipi_send_interrupt(current_vm, 0);
102
103 /* Check vCPU0 is stored as having a pending interrupt on CPU 0. */
104 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
105 vm_get_vcpu(current_vm, 0));
106 /* Check that there are no longer pending interrupts on CPU 0. */
107 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
108 (struct vcpu *)NULL);
109}
110
111/**
112 * Check if one service sends IPIs to different target vCPUs they are stored
113 * under the correct CPUs.
114 */
115TEST_F(ipi, one_service_to_different_cpus)
116{
117 struct_vm *current_vm = ipi::test_vm[0];
118 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
119
120 CHECK(vcpu_count >= 2);
121
122 hf_ipi_send_interrupt(current_vm, 0);
123 hf_ipi_send_interrupt(current_vm, 1);
124
125 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
126 vm_get_vcpu(current_vm, 0));
127 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 1)),
128 vm_get_vcpu(current_vm, 1));
129}
130
131/**
132 * Multiple services targeting IPIs to CPU0,1,2 and 3 respectively.
133 */
134TEST_F(ipi, multiple_services_to_different_cpus)
135{
136 struct_vm *running_vm = ipi::test_vm[0];
137 struct_vm *waiting_vm = ipi::test_vm[1];
138 struct_vm *blocked_vm = ipi::test_vm[2];
139 struct_vm *preempted_vm = ipi::test_vm[3];
140
141 hf_ipi_send_interrupt(running_vm, 0);
142 hf_ipi_send_interrupt(waiting_vm, 1);
143 hf_ipi_send_interrupt(blocked_vm, 2);
144 hf_ipi_send_interrupt(preempted_vm, 3);
145
146 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
147 vm_get_vcpu(running_vm, 0));
148 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 1)),
149 vm_get_vcpu(waiting_vm, 1));
150 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 2)),
151 vm_get_vcpu(blocked_vm, 2));
152 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 3)),
153 vm_get_vcpu(preempted_vm, 3));
154}
155
156/**
157 * Multiple services targeting IPIs to CPU0 are both pending.
158 */
159TEST_F(ipi, multiple_services_to_same_cpu)
160{
161 struct_vm *running_vm = ipi::test_vm[0];
162 struct_vm *waiting_vm = ipi::test_vm[1];
163 struct_vm *blocked_vm = ipi::test_vm[2];
164 struct_vm *preempted_vm = ipi::test_vm[3];
165
166 hf_ipi_send_interrupt(running_vm, 0);
167 hf_ipi_send_interrupt(waiting_vm, 0);
168 hf_ipi_send_interrupt(blocked_vm, 0);
169 hf_ipi_send_interrupt(preempted_vm, 0);
170
171 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
172 vm_get_vcpu(running_vm, 0));
173 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
174 vm_get_vcpu(waiting_vm, 0));
175 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
176 vm_get_vcpu(blocked_vm, 0));
177 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
178 vm_get_vcpu(preempted_vm, 0));
179 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
180 (struct vcpu *)NULL);
181}
182
183/**
184 * Check if the same service sends an IPI to the same target_vcpu
185 * multiple times it is only added to the list once and does not create
186 * loops in the list.
187 */
188TEST_F(ipi, multiple_services_to_same_cpu_multiple_sends)
189{
190 struct_vm *running_vm = ipi::test_vm[0];
191 struct_vm *waiting_vm = ipi::test_vm[1];
192
193 hf_ipi_send_interrupt(running_vm, 0);
194 hf_ipi_send_interrupt(waiting_vm, 0);
195 hf_ipi_send_interrupt(running_vm, 0);
196
197 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
198 vm_get_vcpu(running_vm, 0));
199 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
200 vm_get_vcpu(waiting_vm, 0));
201 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
202 (struct vcpu *)NULL);
203}
204
205/**
206 * Multiple services targeting IPIs to CPU0 are both pending and the running
207 * vCPU is returned first.
208 */
209TEST_F(ipi, multiple_services_to_same_cpu_running_prioritized)
210{
211 struct_vm *running_vm = ipi::test_vm[0];
212 struct_vm *waiting_vm = ipi::test_vm[1];
213 struct_vm *blocked_vm = ipi::test_vm[2];
214 struct_vm *preempted_vm = ipi::test_vm[3];
215
216 hf_ipi_send_interrupt(waiting_vm, 0);
217 hf_ipi_send_interrupt(blocked_vm, 0);
218 hf_ipi_send_interrupt(preempted_vm, 0);
219 hf_ipi_send_interrupt(running_vm, 0);
220
221 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
222 vm_get_vcpu(running_vm, 0));
223 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
224 vm_get_vcpu(waiting_vm, 0));
225 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
226 vm_get_vcpu(blocked_vm, 0));
227 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
228 vm_get_vcpu(preempted_vm, 0));
229 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
230 (struct vcpu *)NULL);
231}
232
233/**
234 * Multiple services targeting IPIs to CPU0 are both pending and the running
235 * vCPU is returned first.
236 */
237TEST_F(ipi, multiple_services_to_same_cpu_full_handle)
238{
239 struct_vm *running_vm = ipi::test_vm[0];
240 struct_vm *waiting_vm = ipi::test_vm[1];
241 struct_vm *blocked_vm = ipi::test_vm[2];
242 struct_vm *preempted_vm = ipi::test_vm[3];
243
244 struct vcpu *top_priority_vcpu;
245 struct vcpu_locked vcpu_locked;
246 constexpr size_t test_service_count = 4;
247 struct_vm *test_service[test_service_count] = {
248 waiting_vm, blocked_vm, preempted_vm, running_vm};
249
250 for (size_t i = 0; i < test_service_count; i++) {
251 for (size_t j = 0; j < MAX_CPUS; j++) {
252 hf_ipi_send_interrupt(test_service[i], j);
253 }
254 }
255
256 /* Handle the IPI on all CPUs and do some inital checks. */
257 for (size_t i = 0; i < MAX_CPUS; i++) {
258 top_priority_vcpu = hf_ipi_get_pending_target_vcpu(
259 vm_get_vcpu(running_vm, i));
260 vcpu_locked = vcpu_lock(top_priority_vcpu);
261 /*
262 * Check running service is returned as the top priority vCPU.
263 */
264 EXPECT_EQ(top_priority_vcpu, vm_get_vcpu(running_vm, i));
265 /* Run IPI handle on CPU0. */
266 hf_ipi_handle(vcpu_locked);
267 /*
268 * Since there is a running vCPU with a pending IPI when handing
269 * the WAITING vCPU we should have set the SRI to be delayed.
270 * Check this is the case.
271 */
272 EXPECT_TRUE(top_priority_vcpu->cpu->is_sri_delayed);
273 vcpu_unlock(&vcpu_locked);
274 }
275
276 for (size_t i = 0; i < test_service_count; i++) {
277 struct vm_locked vm_locked = vm_lock(test_service[i]);
278 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
279 uint32_t ids_count = 0;
280 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
281 uint32_t lists_count = 0;
282 enum notifications_info_get_state current_state = INIT;
283 const bool is_from_vm = false;
284 /*
285 * Check response of FFA_NOTIFICATION_INFO_GET. The ID should
286 * only be returned if the service is in the waiting state.
287 */
288 vm_notifications_info_get_pending(
289 vm_locked, is_from_vm, ids, &ids_count, lists_sizes,
290 &lists_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
291 &current_state);
292 /* In this test setup all vCPUs of a service are in the same
293 * state. */
294 if (vm_get_vcpu(test_service[i], 0)->state ==
295 VCPU_STATE_WAITING) {
296 EXPECT_EQ(ids_count, 6);
297 EXPECT_EQ(lists_count, 2);
298 EXPECT_EQ(lists_sizes[0], 3);
299 EXPECT_EQ(lists_sizes[1], 1);
300 EXPECT_EQ(ids[0], test_service[i]->id);
301 EXPECT_EQ(ids[1], 0);
302 EXPECT_EQ(ids[2], 1);
303 EXPECT_EQ(ids[3], 2);
304 EXPECT_EQ(ids[4], test_service[i]->id);
305 EXPECT_EQ(ids[5], 3);
306 } else {
307 EXPECT_EQ(ids_count, 0);
308 EXPECT_EQ(lists_count, 0);
309 }
310
311 for (size_t j = 0; j < MAX_CPUS; j++) {
312 /* Check the IPI interrupt is pending. */
313 struct vcpu *vcpu = vm_get_vcpu(test_service[i], j);
314 EXPECT_TRUE(vcpu_is_virt_interrupt_pending(
315 &vcpu->interrupts, HF_IPI_INTID));
316 if (vcpu->state == VCPU_STATE_BLOCKED ||
317 vcpu->state == VCPU_STATE_PREEMPTED) {
318 vcpu_locked = vcpu_lock(vcpu);
319 EXPECT_TRUE(vcpu_is_interrupt_in_queue(
320 vcpu_locked, HF_IPI_INTID));
321 vcpu_unlock(&vcpu_locked);
322 }
323 }
324 vm_unlock(&vm_locked);
325 }
326
327 for (size_t i = 0; i < MAX_CPUS; i++) {
328 /* Check that there are no more vCPUs with pending IPIs */
329 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(
330 vm_get_vcpu(running_vm, i)),
331 (struct vcpu *)NULL);
332 }
333}
334} /* namespace */