blob: 3f2f83d62839c016c70bc69eddb41d5d214fdcd3 [file] [log] [blame]
Madhukar Pappireddy81237692024-01-04 17:32:23 -06001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
J-Alves3a9510e2024-09-04 14:34:00 +01009#include "hf/arch/irq.h"
10#include "hf/arch/vm/interrupts.h"
11#include "hf/arch/vm/interrupts_gicv3.h"
Daniel Boulby377defd2024-08-22 10:48:23 +010012#include "hf/arch/vm/power_mgmt.h"
13
Madhukar Pappireddy81237692024-01-04 17:32:23 -060014#include "vmapi/hf/call.h"
15
J-Alves3a9510e2024-09-04 14:34:00 +010016#include "gicv3.h"
Daniel Boulby53716552024-08-22 10:55:10 +010017#include "ipi_state.h"
Madhukar Pappireddy81237692024-01-04 17:32:23 -060018#include "primary_with_secondary.h"
19#include "test/hftest.h"
Daniel Boulby377defd2024-08-22 10:48:23 +010020#include "test/semaphore.h"
21
22/**
Daniel Boulby53716552024-08-22 10:55:10 +010023 * Where the ipi_state struct is stored for the IPI tests.
24 * Used to track the IPI state across different threads in
25 * different endpoints.
26 */
27alignas(PAGE_SIZE) static uint8_t ipi_state_page[PAGE_SIZE];
28
29/**
Daniel Boulby377defd2024-08-22 10:48:23 +010030 * Structure defined for usage in tests with multiple cores.
31 * Used to pass arguments from primary to secondary core.
32 */
33struct ipi_cpu_entry_args {
34 ffa_id_t service_id;
35 ffa_vcpu_count_t vcpu_count;
36 ffa_vcpu_index_t vcpu_id;
37 ffa_vcpu_index_t target_vcpu_id;
38 struct mailbox_buffers mb;
39 struct semaphore work_done;
40};
Madhukar Pappireddy81237692024-01-04 17:32:23 -060041
42/*
43 * Test secure interrupt handling while the Secure Partition runs in FFA_RUN
44 * partition runtime model with virtual interrupts potentially masked. This
45 * test helps to validate the functionality of the SPMC, which is to:
46 * - Intercept a FFA_MSG_WAIT invocation by the current SP in FFA_RUN partition
47 * runtime model, if there are pending virtual secure interrupts.
48 * - Resume the SP to handle the pending secure virtual interrupt.
49 *
50 * For orchestrating the above scenario, we leverage indirect messaging
51 * interface and allocate CPU cycles to the Secure Partition through FFA_RUN
52 * interface.
53 */
54TEST_PRECONDITION(secure_interrupts, preempted_by_secure_interrupt,
55 service1_is_not_vm)
56{
57 struct ffa_value ret;
58 struct mailbox_buffers mb = set_up_mailbox();
59 const uint32_t delay = 100;
60 const uint32_t echo_payload;
61 ffa_id_t echo_sender;
62 ffa_id_t own_id = hf_vm_get_id();
63 struct ffa_partition_info *service1_info = service1(mb.recv);
64
65 SERVICE_SELECT(service1_info->vm_id, "sec_interrupt_preempt_msg",
66 mb.send);
67
68 /*
69 * Send an indirect message to convey the Secure Watchdog timer delay
70 * which serves as the source of the secure interrupt.
71 */
72 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
73 &delay, sizeof(delay), 0);
74 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
75
76 /* Schedule message receiver through FFA_RUN interface. */
77 ret = ffa_run(service1_info->vm_id, 0);
78 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
79
80 receive_indirect_message((void *)&echo_payload, sizeof(echo_payload),
81 mb.recv, &echo_sender);
82
83 HFTEST_LOG("Message echoed back: %#x", echo_payload);
84 EXPECT_EQ(echo_payload, delay);
85 EXPECT_EQ(echo_sender, service1_info->vm_id);
86}
J-Alves3e9f6052024-07-23 13:41:56 +010087
88/**
89 * This test expects SP1 to have pended an interrupt for SP2, before SP2 has
90 * booted, following the boot protocol.
91 *
92 * TODO: Make this test applicable to S-EL0 and S-EL1 UP partitions.
93 */
94TEST_PRECONDITION(secure_interrupts, handle_interrupt_rtm_init,
95 service2_is_mp_sp)
96{
97 struct ffa_value ret;
98 struct mailbox_buffers mb = set_up_mailbox();
99 struct ffa_partition_info *service2_info = service2(mb.recv);
100
101 SERVICE_SELECT(service2_info->vm_id, "check_interrupt_rtm_init_handled",
102 mb.send);
103
104 /* Schedule message receiver through FFA_RUN interface. */
105 ret = ffa_run(service2_info->vm_id, 0);
106 EXPECT_EQ(ret.func, FFA_YIELD_32);
107}
Daniel Boulby377defd2024-08-22 10:48:23 +0100108
109/**
J-Alves3a9510e2024-09-04 14:34:00 +0100110 * Setups up SRI and returns the interrupt ID.
111 */
112uint32_t enable_sri(void)
113{
114 struct ffa_value ret;
115 uint32_t sri_id;
116
117 dlog_verbose("Enabling the SRI");
118
119 gicv3_system_setup();
120
121 ret = ffa_features(FFA_FEATURE_SRI);
122
123 sri_id = ffa_feature_intid(ret);
124
125 interrupt_enable(sri_id, true);
126 interrupt_set_priority(sri_id, 0x10);
127 interrupt_set_edge_triggered(sri_id, false);
128 interrupt_set_priority_mask(0xff);
129
130 arch_irq_enable();
131
132 return sri_id;
133}
134
135/**
Daniel Boulby377defd2024-08-22 10:48:23 +0100136 * Secondary CPU entrypoint.
137 * Requests the 'send_ipi' function in the designated FF-A endpoint.
138 * Sends the vCPU to be targeted by the IPI via indirect messaging.
139 */
140static void cpu_entry_send_ipi(uintptr_t arg)
141{
142 struct ipi_cpu_entry_args *args =
143 // NOLINTNEXTLINE(performance-no-int-to-ptr)
144 (struct ipi_cpu_entry_args *)arg;
145 struct ffa_value ret;
146 const ffa_id_t own_id = hf_vm_get_id();
147
148 ASSERT_TRUE(args != NULL);
149 ASSERT_TRUE(args->vcpu_count > 1);
150
151 HFTEST_LOG("%s: Within secondary core... %u", __func__, args->vcpu_id);
152
153 SERVICE_SELECT_MP(args->service_id, "send_ipi", args->mb.send,
154 args->vcpu_id);
155
156 /* Run service. */
157 ret = ffa_run(args->service_id, args->vcpu_id);
158 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
159
160 /* Send it the target vCPU ID. */
161 ret = send_indirect_message(own_id, args->service_id, args->mb.send,
162 &args->target_vcpu_id,
163 sizeof(args->target_vcpu_id), 0);
164
165 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
166 EXPECT_EQ(ffa_run(args->service_id, args->vcpu_id).func, FFA_YIELD_32);
167
168 HFTEST_LOG("%s cpu done...", __func__);
169
170 /* Signal to primary core that test is complete.*/
171 semaphore_signal(&args->work_done);
172
173 arch_cpu_stop();
174}
175
176/**
177 * Test that Service1 can send IPI to vCPU0 from vCPU1, whilst vCPU0 is in
178 * running state.
179 * Test Sequence:
180 * - Bootstrap vCPU0 in the respective test service, such that it can initialise
181 * the IPI state.
182 * - Service1 vCPU0 terminates and leaves the IPI state not READY.
183 * - Start CPU1 and within it, invoke test service to send IPI. Test service
184 * waits for state machine to transition into READY state.
185 * - Resume Service1 vCPU0 such that it can set IPI state to READY.
186 *
187 * Failure in this test would be captured by timeout as Service1 vCPU0 would
188 * hang waiting for the IPI.
189 */
190TEST_PRECONDITION(ipi, receive_ipi_running_vcpu, service1_is_mp_sp)
191{
192 struct mailbox_buffers mb = set_up_mailbox();
193 struct ffa_partition_info *service1_info = service1(mb.recv);
194 struct ffa_value ret;
195 struct ipi_cpu_entry_args vcpu1_args = {
196 .service_id = service1_info->vm_id,
197 .vcpu_count = service1_info->vcpu_count,
198 .vcpu_id = 1,
199 .target_vcpu_id = 0,
200 .mb = mb};
201
202 /* Initialize semaphores to sync primary and secondary cores. */
203 semaphore_init(&vcpu1_args.work_done);
204
205 SERVICE_SELECT(service1_info->vm_id, "receive_ipi_running", mb.send);
206
207 ret = ffa_run(service1_info->vm_id, 0);
208 EXPECT_EQ(ret.func, FFA_YIELD_32);
209
210 /* Bring-up the core that sends the IPI. */
211 ASSERT_TRUE(hftest_cpu_start(
212 hftest_get_cpu_id(vcpu1_args.vcpu_id),
213 hftest_get_secondary_ec_stack(vcpu1_args.vcpu_id),
214 cpu_entry_send_ipi, (uintptr_t)&vcpu1_args));
215
216 /*
217 * Resumes service1 in target vCPU0 so it sets IPI state to READY and
218 * handles IPI.
219 */
220 ret = ffa_run(service1_info->vm_id, 0);
221 EXPECT_EQ(ret.func, FFA_YIELD_32);
222
223 /* Wait for secondary core to return before finishing the test. */
224 semaphore_wait(&vcpu1_args.work_done);
225}
Daniel Boulby53716552024-08-22 10:55:10 +0100226
227/**
228 * Test that Service1 can send IPI to vCPU0 from vCPU1, whilst vCPU0 is in
229 * waiting state and execution is in the normal world.
230 * Test Sequence:
231 * - Bootstrap vCPU0 and share memory with it to instanciate the IPI state. The
232 * vCPU0 terminates with FFA_MSG_WAIT, so it is in the waiting state.
233 * - Start CPU1 and within it, invoke test service to send IPI. Test service
234 * waits for state machine to transition into READY state.
235 * - NWd waits for the Schedule Reciever Interrupt and then runs Service1 vCPU0
236 * to handle the IPI.
237 * - vCPU0 is resumed to handle the IPI virtual interrupt. It should attest
238 * state transitions into HANDLED from the interrupt handler.
239 */
240TEST_PRECONDITION(ipi, receive_ipi_waiting_vcpu_in_nwd, service1_is_mp_sp)
241{
242 struct mailbox_buffers mb = set_up_mailbox();
243 struct ffa_partition_info *service1_info = service1(mb.recv);
244 struct ffa_value ret;
245 struct ipi_cpu_entry_args vcpu1_args = {
246 .service_id = service1_info->vm_id,
247 .vcpu_count = service1_info->vcpu_count,
248 .vcpu_id = 1,
249 .target_vcpu_id = 0,
250 .mb = mb};
251 ffa_id_t memory_receivers[] = {
252 service1_info->vm_id,
253 };
254 uint32_t sri_id;
255
256 /* Get ready to handle SRI. */
257 sri_id = enable_sri();
258
259 SERVICE_SELECT(service1_info->vm_id, "receive_ipi_waiting_vcpu",
260 mb.send);
261 ret = ffa_run(service1_info->vm_id, 0);
262 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
263
264 /* Share memory to setup the IPI state structure. */
265 hftest_ipi_state_share_page_and_init((uint64_t)ipi_state_page,
266 memory_receivers, 1, mb.send);
267
268 /*
269 * Resumes service1 in target vCPU0 to retrieve memory and configure the
270 * IPI state.
271 */
272 ret = ffa_run(service1_info->vm_id, 0);
273 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
274
275 /* Initialize semaphores to sync primary and secondary cores. */
276 semaphore_init(&vcpu1_args.work_done);
277
278 /* Bring-up the core that sends the IPI. */
279 ASSERT_TRUE(hftest_cpu_start(
280 hftest_get_cpu_id(vcpu1_args.vcpu_id),
281 hftest_get_secondary_ec_stack(vcpu1_args.vcpu_id),
282 cpu_entry_send_ipi, (uintptr_t)&vcpu1_args));
283
284 /*
285 * Reset the last interrupt ID so we know the next SRI is relate to
286 * the IPI handling.
287 */
288 last_interrupt_id = 0;
289
290 /*
291 * Set the state to READY such that vCPU1 injects IPI to target vCPU0.
292 */
293 hftest_ipi_state_set(READY);
294
295 /* Wait for the SRI. */
296 while (last_interrupt_id != sri_id) {
297 interrupt_wait();
298 }
299
300 /* Resumes service1 in target vCPU 0 to handle IPI. */
301 ret = ffa_run(service1_info->vm_id, 0);
302 EXPECT_EQ(ret.func, FFA_YIELD_32);
303
304 /* Wait for secondary core to return before finishing the test. */
305 semaphore_wait(&vcpu1_args.work_done);
306}