blob: 68dfa55c267271857754878c542266476759114e [file] [log] [blame]
Madhukar Pappireddy81237692024-01-04 17:32:23 -06001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
Daniel Boulby377defd2024-08-22 10:48:23 +01009#include "hf/arch/vm/power_mgmt.h"
10
Madhukar Pappireddy81237692024-01-04 17:32:23 -060011#include "vmapi/hf/call.h"
12
13#include "primary_with_secondary.h"
14#include "test/hftest.h"
Daniel Boulby377defd2024-08-22 10:48:23 +010015#include "test/semaphore.h"
16
17/**
18 * Structure defined for usage in tests with multiple cores.
19 * Used to pass arguments from primary to secondary core.
20 */
21struct ipi_cpu_entry_args {
22 ffa_id_t service_id;
23 ffa_vcpu_count_t vcpu_count;
24 ffa_vcpu_index_t vcpu_id;
25 ffa_vcpu_index_t target_vcpu_id;
26 struct mailbox_buffers mb;
27 struct semaphore work_done;
28};
Madhukar Pappireddy81237692024-01-04 17:32:23 -060029
30/*
31 * Test secure interrupt handling while the Secure Partition runs in FFA_RUN
32 * partition runtime model with virtual interrupts potentially masked. This
33 * test helps to validate the functionality of the SPMC, which is to:
34 * - Intercept a FFA_MSG_WAIT invocation by the current SP in FFA_RUN partition
35 * runtime model, if there are pending virtual secure interrupts.
36 * - Resume the SP to handle the pending secure virtual interrupt.
37 *
38 * For orchestrating the above scenario, we leverage indirect messaging
39 * interface and allocate CPU cycles to the Secure Partition through FFA_RUN
40 * interface.
41 */
42TEST_PRECONDITION(secure_interrupts, preempted_by_secure_interrupt,
43 service1_is_not_vm)
44{
45 struct ffa_value ret;
46 struct mailbox_buffers mb = set_up_mailbox();
47 const uint32_t delay = 100;
48 const uint32_t echo_payload;
49 ffa_id_t echo_sender;
50 ffa_id_t own_id = hf_vm_get_id();
51 struct ffa_partition_info *service1_info = service1(mb.recv);
52
53 SERVICE_SELECT(service1_info->vm_id, "sec_interrupt_preempt_msg",
54 mb.send);
55
56 /*
57 * Send an indirect message to convey the Secure Watchdog timer delay
58 * which serves as the source of the secure interrupt.
59 */
60 ret = send_indirect_message(own_id, service1_info->vm_id, mb.send,
61 &delay, sizeof(delay), 0);
62 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
63
64 /* Schedule message receiver through FFA_RUN interface. */
65 ret = ffa_run(service1_info->vm_id, 0);
66 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
67
68 receive_indirect_message((void *)&echo_payload, sizeof(echo_payload),
69 mb.recv, &echo_sender);
70
71 HFTEST_LOG("Message echoed back: %#x", echo_payload);
72 EXPECT_EQ(echo_payload, delay);
73 EXPECT_EQ(echo_sender, service1_info->vm_id);
74}
J-Alves3e9f6052024-07-23 13:41:56 +010075
76/**
77 * This test expects SP1 to have pended an interrupt for SP2, before SP2 has
78 * booted, following the boot protocol.
79 *
80 * TODO: Make this test applicable to S-EL0 and S-EL1 UP partitions.
81 */
82TEST_PRECONDITION(secure_interrupts, handle_interrupt_rtm_init,
83 service2_is_mp_sp)
84{
85 struct ffa_value ret;
86 struct mailbox_buffers mb = set_up_mailbox();
87 struct ffa_partition_info *service2_info = service2(mb.recv);
88
89 SERVICE_SELECT(service2_info->vm_id, "check_interrupt_rtm_init_handled",
90 mb.send);
91
92 /* Schedule message receiver through FFA_RUN interface. */
93 ret = ffa_run(service2_info->vm_id, 0);
94 EXPECT_EQ(ret.func, FFA_YIELD_32);
95}
Daniel Boulby377defd2024-08-22 10:48:23 +010096
97/**
98 * Secondary CPU entrypoint.
99 * Requests the 'send_ipi' function in the designated FF-A endpoint.
100 * Sends the vCPU to be targeted by the IPI via indirect messaging.
101 */
102static void cpu_entry_send_ipi(uintptr_t arg)
103{
104 struct ipi_cpu_entry_args *args =
105 // NOLINTNEXTLINE(performance-no-int-to-ptr)
106 (struct ipi_cpu_entry_args *)arg;
107 struct ffa_value ret;
108 const ffa_id_t own_id = hf_vm_get_id();
109
110 ASSERT_TRUE(args != NULL);
111 ASSERT_TRUE(args->vcpu_count > 1);
112
113 HFTEST_LOG("%s: Within secondary core... %u", __func__, args->vcpu_id);
114
115 SERVICE_SELECT_MP(args->service_id, "send_ipi", args->mb.send,
116 args->vcpu_id);
117
118 /* Run service. */
119 ret = ffa_run(args->service_id, args->vcpu_id);
120 EXPECT_EQ(ret.func, FFA_MSG_WAIT_32);
121
122 /* Send it the target vCPU ID. */
123 ret = send_indirect_message(own_id, args->service_id, args->mb.send,
124 &args->target_vcpu_id,
125 sizeof(args->target_vcpu_id), 0);
126
127 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
128 EXPECT_EQ(ffa_run(args->service_id, args->vcpu_id).func, FFA_YIELD_32);
129
130 HFTEST_LOG("%s cpu done...", __func__);
131
132 /* Signal to primary core that test is complete.*/
133 semaphore_signal(&args->work_done);
134
135 arch_cpu_stop();
136}
137
138/**
139 * Test that Service1 can send IPI to vCPU0 from vCPU1, whilst vCPU0 is in
140 * running state.
141 * Test Sequence:
142 * - Bootstrap vCPU0 in the respective test service, such that it can initialise
143 * the IPI state.
144 * - Service1 vCPU0 terminates and leaves the IPI state not READY.
145 * - Start CPU1 and within it, invoke test service to send IPI. Test service
146 * waits for state machine to transition into READY state.
147 * - Resume Service1 vCPU0 such that it can set IPI state to READY.
148 *
149 * Failure in this test would be captured by timeout as Service1 vCPU0 would
150 * hang waiting for the IPI.
151 */
152TEST_PRECONDITION(ipi, receive_ipi_running_vcpu, service1_is_mp_sp)
153{
154 struct mailbox_buffers mb = set_up_mailbox();
155 struct ffa_partition_info *service1_info = service1(mb.recv);
156 struct ffa_value ret;
157 struct ipi_cpu_entry_args vcpu1_args = {
158 .service_id = service1_info->vm_id,
159 .vcpu_count = service1_info->vcpu_count,
160 .vcpu_id = 1,
161 .target_vcpu_id = 0,
162 .mb = mb};
163
164 /* Initialize semaphores to sync primary and secondary cores. */
165 semaphore_init(&vcpu1_args.work_done);
166
167 SERVICE_SELECT(service1_info->vm_id, "receive_ipi_running", mb.send);
168
169 ret = ffa_run(service1_info->vm_id, 0);
170 EXPECT_EQ(ret.func, FFA_YIELD_32);
171
172 /* Bring-up the core that sends the IPI. */
173 ASSERT_TRUE(hftest_cpu_start(
174 hftest_get_cpu_id(vcpu1_args.vcpu_id),
175 hftest_get_secondary_ec_stack(vcpu1_args.vcpu_id),
176 cpu_entry_send_ipi, (uintptr_t)&vcpu1_args));
177
178 /*
179 * Resumes service1 in target vCPU0 so it sets IPI state to READY and
180 * handles IPI.
181 */
182 ret = ffa_run(service1_info->vm_id, 0);
183 EXPECT_EQ(ret.func, FFA_YIELD_32);
184
185 /* Wait for secondary core to return before finishing the test. */
186 semaphore_wait(&vcpu1_args.work_done);
187}