blob: 7437ae7484df369a0ce3fd3fd7660757ef30a16b [file] [log] [blame]
Madhukar Pappireddy81237692024-01-04 17:32:23 -06001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
J-Alves3a9510e2024-09-04 14:34:00 +01009#include "hf/arch/irq.h"
10#include "hf/arch/vm/interrupts.h"
11#include "hf/arch/vm/interrupts_gicv3.h"
Daniel Boulby377defd2024-08-22 10:48:23 +010012#include "hf/arch/vm/power_mgmt.h"
13
Madhukar Pappireddy81237692024-01-04 17:32:23 -060014#include "vmapi/hf/call.h"
15
J-Alves3a9510e2024-09-04 14:34:00 +010016#include "gicv3.h"
Daniel Boulby53716552024-08-22 10:55:10 +010017#include "ipi_state.h"
Madhukar Pappireddy81237692024-01-04 17:32:23 -060018#include "primary_with_secondary.h"
19#include "test/hftest.h"
Daniel Boulby377defd2024-08-22 10:48:23 +010020#include "test/semaphore.h"
21
22/**
Daniel Boulby53716552024-08-22 10:55:10 +010023 * Where the ipi_state struct is stored for the IPI tests.
24 * Used to track the IPI state across different threads in
25 * different endpoints.
26 */
27alignas(PAGE_SIZE) static uint8_t ipi_state_page[PAGE_SIZE];
28
29/**
Daniel Boulby377defd2024-08-22 10:48:23 +010030 * Structure defined for usage in tests with multiple cores.
31 * Used to pass arguments from primary to secondary core.
32 */
33struct ipi_cpu_entry_args {
34 ffa_id_t service_id;
35 ffa_vcpu_count_t vcpu_count;
36 ffa_vcpu_index_t vcpu_id;
37 ffa_vcpu_index_t target_vcpu_id;
38 struct mailbox_buffers mb;
39 struct semaphore work_done;
40};
Madhukar Pappireddy81237692024-01-04 17:32:23 -060041
42/*
43 * Test secure interrupt handling while the Secure Partition runs in FFA_RUN
44 * partition runtime model with virtual interrupts potentially masked. This
45 * test helps to validate the functionality of the SPMC, which is to:
46 * - Intercept a FFA_MSG_WAIT invocation by the current SP in FFA_RUN partition
47 * runtime model, if there are pending virtual secure interrupts.
48 * - Resume the SP to handle the pending secure virtual interrupt.
49 *
50 * For orchestrating the above scenario, we leverage indirect messaging
51 * interface and allocate CPU cycles to the Secure Partition through FFA_RUN
52 * interface.
53 */
54TEST_PRECONDITION(secure_interrupts, preempted_by_secure_interrupt,
55 service1_is_not_vm)
56{
57 struct ffa_value ret;
58 struct mailbox_buffers mb = set_up_mailbox();
59 const uint32_t delay = 100;
60 const uint32_t echo_payload;
61 ffa_id_t echo_sender;
62 ffa_id_t own_id = hf_vm_get_id();
63 struct ffa_partition_info *service1_info = service1(mb.recv);
64
65 SERVICE_SELECT(service1_info->vm_id, "sec_interrupt_preempt_msg",
66 mb.send);
67
68 /*
69 * Send an indirect message to convey the Secure Watchdog timer delay
70 * which serves as the source of the secure interrupt.
71 */
72 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
73 &delay, sizeof(delay), 0);
74 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
75
76 /* Schedule message receiver through FFA_RUN interface. */
77 ret = ffa_run(service1_info->vm_id, 0);
78 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
79
80 receive_indirect_message((void *)&echo_payload, sizeof(echo_payload),
81 mb.recv, &echo_sender);
82
83 HFTEST_LOG("Message echoed back: %#x", echo_payload);
84 EXPECT_EQ(echo_payload, delay);
85 EXPECT_EQ(echo_sender, service1_info->vm_id);
86}
J-Alves3e9f6052024-07-23 13:41:56 +010087
88/**
89 * This test expects SP1 to have pended an interrupt for SP2, before SP2 has
90 * booted, following the boot protocol.
91 *
92 * TODO: Make this test applicable to S-EL0 and S-EL1 UP partitions.
93 */
94TEST_PRECONDITION(secure_interrupts, handle_interrupt_rtm_init,
95 service2_is_mp_sp)
96{
97 struct ffa_value ret;
98 struct mailbox_buffers mb = set_up_mailbox();
99 struct ffa_partition_info *service2_info = service2(mb.recv);
100
101 SERVICE_SELECT(service2_info->vm_id, "check_interrupt_rtm_init_handled",
102 mb.send);
103
104 /* Schedule message receiver through FFA_RUN interface. */
105 ret = ffa_run(service2_info->vm_id, 0);
106 EXPECT_EQ(ret.func, FFA_YIELD_32);
107}
Daniel Boulby377defd2024-08-22 10:48:23 +0100108
109/**
J-Alves3a9510e2024-09-04 14:34:00 +0100110 * Setups up SRI and returns the interrupt ID.
111 */
112uint32_t enable_sri(void)
113{
114 struct ffa_value ret;
115 uint32_t sri_id;
116
117 dlog_verbose("Enabling the SRI");
118
119 gicv3_system_setup();
120
121 ret = ffa_features(FFA_FEATURE_SRI);
122
123 sri_id = ffa_feature_intid(ret);
124
125 interrupt_enable(sri_id, true);
126 interrupt_set_priority(sri_id, 0x10);
127 interrupt_set_edge_triggered(sri_id, false);
128 interrupt_set_priority_mask(0xff);
129
130 arch_irq_enable();
131
132 return sri_id;
133}
134
135/**
Daniel Boulby377defd2024-08-22 10:48:23 +0100136 * Secondary CPU entrypoint.
137 * Requests the 'send_ipi' function in the designated FF-A endpoint.
138 * Sends the vCPU to be targeted by the IPI via indirect messaging.
139 */
140static void cpu_entry_send_ipi(uintptr_t arg)
141{
142 struct ipi_cpu_entry_args *args =
143 // NOLINTNEXTLINE(performance-no-int-to-ptr)
144 (struct ipi_cpu_entry_args *)arg;
145 struct ffa_value ret;
146 const ffa_id_t own_id = hf_vm_get_id();
147
148 ASSERT_TRUE(args != NULL);
149 ASSERT_TRUE(args->vcpu_count > 1);
150
151 HFTEST_LOG("%s: Within secondary core... %u", __func__, args->vcpu_id);
152
153 SERVICE_SELECT_MP(args->service_id, "send_ipi", args->mb.send,
154 args->vcpu_id);
155
156 /* Run service. */
157 ret = ffa_run(args->service_id, args->vcpu_id);
158 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
159
160 /* Send it the target vCPU ID. */
161 ret = send_indirect_message(own_id, args->service_id, args->mb.send,
162 &args->target_vcpu_id,
163 sizeof(args->target_vcpu_id), 0);
164
165 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
166 EXPECT_EQ(ffa_run(args->service_id, args->vcpu_id).func, FFA_YIELD_32);
167
168 HFTEST_LOG("%s cpu done...", __func__);
169
170 /* Signal to primary core that test is complete.*/
171 semaphore_signal(&args->work_done);
172
173 arch_cpu_stop();
174}
175
176/**
177 * Test that Service1 can send IPI to vCPU0 from vCPU1, whilst vCPU0 is in
178 * running state.
179 * Test Sequence:
180 * - Bootstrap vCPU0 in the respective test service, such that it can initialise
181 * the IPI state.
182 * - Service1 vCPU0 terminates and leaves the IPI state not READY.
183 * - Start CPU1 and within it, invoke test service to send IPI. Test service
184 * waits for state machine to transition into READY state.
185 * - Resume Service1 vCPU0 such that it can set IPI state to READY.
186 *
187 * Failure in this test would be captured by timeout as Service1 vCPU0 would
188 * hang waiting for the IPI.
189 */
190TEST_PRECONDITION(ipi, receive_ipi_running_vcpu, service1_is_mp_sp)
191{
192 struct mailbox_buffers mb = set_up_mailbox();
193 struct ffa_partition_info *service1_info = service1(mb.recv);
194 struct ffa_value ret;
195 struct ipi_cpu_entry_args vcpu1_args = {
196 .service_id = service1_info->vm_id,
197 .vcpu_count = service1_info->vcpu_count,
198 .vcpu_id = 1,
199 .target_vcpu_id = 0,
200 .mb = mb};
201
202 /* Initialize semaphores to sync primary and secondary cores. */
203 semaphore_init(&vcpu1_args.work_done);
204
205 SERVICE_SELECT(service1_info->vm_id, "receive_ipi_running", mb.send);
206
207 ret = ffa_run(service1_info->vm_id, 0);
208 EXPECT_EQ(ret.func, FFA_YIELD_32);
209
210 /* Bring-up the core that sends the IPI. */
211 ASSERT_TRUE(hftest_cpu_start(
212 hftest_get_cpu_id(vcpu1_args.vcpu_id),
213 hftest_get_secondary_ec_stack(vcpu1_args.vcpu_id),
214 cpu_entry_send_ipi, (uintptr_t)&vcpu1_args));
215
216 /*
217 * Resumes service1 in target vCPU0 so it sets IPI state to READY and
218 * handles IPI.
219 */
220 ret = ffa_run(service1_info->vm_id, 0);
221 EXPECT_EQ(ret.func, FFA_YIELD_32);
222
223 /* Wait for secondary core to return before finishing the test. */
224 semaphore_wait(&vcpu1_args.work_done);
225}
Daniel Boulby53716552024-08-22 10:55:10 +0100226
227/**
228 * Test that Service1 can send IPI to vCPU0 from vCPU1, whilst vCPU0 is in
229 * waiting state and execution is in the normal world.
230 * Test Sequence:
231 * - Bootstrap vCPU0 and share memory with it to instanciate the IPI state. The
232 * vCPU0 terminates with FFA_MSG_WAIT, so it is in the waiting state.
233 * - Start CPU1 and within it, invoke test service to send IPI. Test service
234 * waits for state machine to transition into READY state.
Daniel Boulby1308a632024-09-11 15:19:16 +0100235 * - NWd waits for the Schedule Reciever Interrupt, checks that Service1 vCPU0
236 * is reported by FFA_NOTIFICATION_INFO_GET as having an IPI pending
237 * and then runs Service1 vCPU0 to handle the IPI.
Daniel Boulby53716552024-08-22 10:55:10 +0100238 * - vCPU0 is resumed to handle the IPI virtual interrupt. It should attest
Daniel Boulby1308a632024-09-11 15:19:16 +0100239 * state transitions into HANDLED from the interrupt handler.
Daniel Boulby53716552024-08-22 10:55:10 +0100240 */
241TEST_PRECONDITION(ipi, receive_ipi_waiting_vcpu_in_nwd, service1_is_mp_sp)
242{
243 struct mailbox_buffers mb = set_up_mailbox();
244 struct ffa_partition_info *service1_info = service1(mb.recv);
245 struct ffa_value ret;
246 struct ipi_cpu_entry_args vcpu1_args = {
247 .service_id = service1_info->vm_id,
248 .vcpu_count = service1_info->vcpu_count,
249 .vcpu_id = 1,
250 .target_vcpu_id = 0,
251 .mb = mb};
252 ffa_id_t memory_receivers[] = {
253 service1_info->vm_id,
254 };
255 uint32_t sri_id;
Daniel Boulby1308a632024-09-11 15:19:16 +0100256 uint32_t expected_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
257 uint16_t expected_ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
Daniel Boulby53716552024-08-22 10:55:10 +0100258
259 /* Get ready to handle SRI. */
260 sri_id = enable_sri();
261
262 SERVICE_SELECT(service1_info->vm_id, "receive_ipi_waiting_vcpu",
263 mb.send);
264 ret = ffa_run(service1_info->vm_id, 0);
265 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
266
267 /* Share memory to setup the IPI state structure. */
268 hftest_ipi_state_share_page_and_init((uint64_t)ipi_state_page,
269 memory_receivers, 1, mb.send);
270
271 /*
272 * Resumes service1 in target vCPU0 to retrieve memory and configure the
273 * IPI state.
274 */
275 ret = ffa_run(service1_info->vm_id, 0);
276 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
277
278 /* Initialize semaphores to sync primary and secondary cores. */
279 semaphore_init(&vcpu1_args.work_done);
280
281 /* Bring-up the core that sends the IPI. */
282 ASSERT_TRUE(hftest_cpu_start(
283 hftest_get_cpu_id(vcpu1_args.vcpu_id),
284 hftest_get_secondary_ec_stack(vcpu1_args.vcpu_id),
285 cpu_entry_send_ipi, (uintptr_t)&vcpu1_args));
286
287 /*
288 * Reset the last interrupt ID so we know the next SRI is relate to
289 * the IPI handling.
290 */
291 last_interrupt_id = 0;
292
293 /*
294 * Set the state to READY such that vCPU1 injects IPI to target vCPU0.
295 */
296 hftest_ipi_state_set(READY);
297
298 /* Wait for the SRI. */
299 while (last_interrupt_id != sri_id) {
300 interrupt_wait();
301 }
302
Daniel Boulby1308a632024-09-11 15:19:16 +0100303 /* Check the target vCPU 0 is returned by FFA_NOTIFICATION_INFO_GET. */
304 expected_lists_sizes[0] = 1;
305 expected_ids[0] = service1_info->vm_id;
306 expected_ids[1] = 0;
307
308 ffa_notification_info_get_and_check(1, expected_lists_sizes,
309 expected_ids);
310
Daniel Boulby53716552024-08-22 10:55:10 +0100311 /* Resumes service1 in target vCPU 0 to handle IPI. */
312 ret = ffa_run(service1_info->vm_id, 0);
313 EXPECT_EQ(ret.func, FFA_YIELD_32);
314
315 /* Wait for secondary core to return before finishing the test. */
316 semaphore_wait(&vcpu1_args.work_done);
317}
J-Alvesd270b862024-09-03 17:57:43 +0100318
319/**
320 * Test that Service1 can send IPI to vCPU0 from vCPU1, whilst vCPU0 is in
321 * waiting state and execution is in the secure world. Service2 is given access
322 * to a shared buffer, where Service1 would have instanciated the IPI state. At
323 * the appropriate timing, Service2 transitions IPI state into READY.
324 *
325 * Test Sequence:
326 * - Bootstrap vCPU0 and share memory with it to instanciate the IPI state. The
327 * vCPU0 terminates with FFA_MSG_WAIT, so it is in the waiting state.
328 * - Bootstrap Service2 vCPU0 in 'set_ipi_ready'. This gives it access to the
329 * IPI state.
330 * - Start CPU1 and within it, invoke test service to send IPI. Test service
331 * waits for state machine to transition into READY state.
332 * - Resume Service2 vCPU0 so execution is in the Secure World. At this point,
333 * Service2 transitions IPI state to READY, and waits for the IPI state to be
334 * Handled.
Daniel Boulby1308a632024-09-11 15:19:16 +0100335 * - NWd vCPU0 is resumed by the Schedule Reciever Interrupt checks that
336 * Service1 vCPU0 is reported by FFA_NOTIFICATION_INFO_GET as having an IPI
337 * pending, and then runs Service1 vCPU0 to handle the IPI.
338 * - Service1 vCPU0 is resumed to handle the IPI virtual interrupt.
339 * It should attest state transitions into HANDLED from the interrupt handler.
J-Alvesd270b862024-09-03 17:57:43 +0100340 * - Service2 vCPU0 is then run to check that it successfully runs and completes
341 * after being interrupted.
342 */
343TEST_PRECONDITION(ipi, receive_ipi_waiting_vcpu_in_swd, service1_is_mp_sp)
344{
345 struct mailbox_buffers mb = set_up_mailbox();
346 struct ffa_partition_info *service1_info = service1(mb.recv);
347 struct ffa_partition_info *service2_info = service2(mb.recv);
348 struct ipi_cpu_entry_args vcpu1_args = {
349 .service_id = service1_info->vm_id,
350 .vcpu_count = service1_info->vcpu_count,
351 .vcpu_id = 1,
352 .target_vcpu_id = 0,
353 .mb = mb};
354 ffa_id_t memory_receivers[] = {
355 service1_info->vm_id,
356 service2_info->vm_id,
357 };
358 uint32_t sri_id;
Daniel Boulby1308a632024-09-11 15:19:16 +0100359 uint32_t expected_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
360 uint16_t expected_ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
J-Alvesd270b862024-09-03 17:57:43 +0100361
362 /* Get ready to handle SRI. */
363 sri_id = enable_sri();
364
365 /* Initialize semaphores to sync primary and secondary cores. */
366 semaphore_init(&vcpu1_args.work_done);
367
368 /* Service1 is to handle the IPI in vCPU0. */
369 SERVICE_SELECT(service1_info->vm_id, "receive_ipi_waiting_vcpu",
370 mb.send);
371 EXPECT_EQ(ffa_run(service1_info->vm_id, 0).func, FFA_MSG_WAIT_32);
372
373 SERVICE_SELECT(service2_info->vm_id, "set_ipi_ready", mb.send);
374 EXPECT_EQ(ffa_run(service2_info->vm_id, 0).func, FFA_MSG_WAIT_32);
375
376 hftest_ipi_state_share_page_and_init(
377 (uint64_t)ipi_state_page, memory_receivers,
378 ARRAY_SIZE(memory_receivers), mb.send);
379
380 /*
381 * Resumes service1/2 in target vCPU to retrieve the memory, and
382 * initialise the state.
383 */
384 EXPECT_EQ(ffa_run(service1_info->vm_id, 0).func, FFA_MSG_WAIT_32);
385 EXPECT_EQ(ffa_run(service2_info->vm_id, 0).func, FFA_MSG_WAIT_32);
386
387 /* Bring-up the core that sends the IPI. */
388 ASSERT_TRUE(hftest_cpu_start(
389 hftest_get_cpu_id(vcpu1_args.vcpu_id),
390 hftest_get_secondary_ec_stack(vcpu1_args.vcpu_id),
391 cpu_entry_send_ipi, (uintptr_t)&vcpu1_args));
392
393 /*
394 * Reset the last interrupt ID so we know the next SRI is relate to
395 * the IPI handling.
396 */
397 last_interrupt_id = 0;
398
399 /*
400 * Resume service2 to set IPI state to ready, and cause service1 in
401 * vCPU1 to send the IPI.
402 */
403 EXPECT_EQ(ffa_run(service2_info->vm_id, 0).func, FFA_INTERRUPT_32);
404
405 /* Wait for the SRI. */
406 while (last_interrupt_id != sri_id) {
407 interrupt_wait();
408 }
409
Daniel Boulby1308a632024-09-11 15:19:16 +0100410 /* Check the target vCPU 0 is returned by FFA_NOTIFICATION_INFO_GET. */
411 expected_lists_sizes[0] = 1;
412 expected_ids[0] = service1_info->vm_id;
413 expected_ids[1] = 0;
414
415 ffa_notification_info_get_and_check(1, expected_lists_sizes,
416 expected_ids);
417
J-Alvesd270b862024-09-03 17:57:43 +0100418 /* Resumes service1 in target vCPU 0 to handle IPI. */
419 EXPECT_EQ(ffa_run(service1_info->vm_id, 0).func, FFA_YIELD_32);
420
421 /*
422 * Resume service2 to check it can run to completion after being
423 * interrupted.
424 */
425 EXPECT_EQ(ffa_run(service2_info->vm_id, 0).func, FFA_YIELD_32);
426
427 /* Wait for secondary core to return before finishing the test. */
428 semaphore_wait(&vcpu1_args.work_done);
429}