blob: 176269342739ba7600031bef63e320eeb46b052e [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <granule.h>
7#include <psci.h>
8#include <realm.h>
9#include <rec.h>
AlexeiFedorov97844202023-04-27 15:17:35 +010010#include <rsi-handler.h>
Soby Mathewb4c6df42022-11-09 11:13:29 +000011#include <smc-rmi.h>
12#include <smc.h>
13#include <stdint.h>
14
AlexeiFedorov97844202023-04-27 15:17:35 +010015/*
16 * Copy @count GPRs from @rec to @rec_exit.
17 * The remaining @rec_exit.gprs[] values are zero filled.
18 */
19static void forward_args_to_host(unsigned int count, struct rec *rec,
20 struct rmi_rec_exit *rec_exit)
Soby Mathewb4c6df42022-11-09 11:13:29 +000021{
AlexeiFedorov97844202023-04-27 15:17:35 +010022 unsigned int i;
Soby Mathewb4c6df42022-11-09 11:13:29 +000023
AlexeiFedorov97844202023-04-27 15:17:35 +010024 assert(count <= 4U);
25
26 for (i = 0U; i < count; ++i) {
27 rec_exit->gprs[i] = rec->regs[i];
28 }
29
30 for (i = count; i < REC_EXIT_NR_GPRS; ++i) {
31 rec_exit->gprs[i] = 0UL;
32 }
Soby Mathewb4c6df42022-11-09 11:13:29 +000033}
34
AlexeiFedorov97844202023-04-27 15:17:35 +010035static void psci_version(struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +000036{
AlexeiFedorov97844202023-04-27 15:17:35 +010037 const unsigned long version_1_1 = (1UL << 16) | 1UL;
38
39 res->action = UPDATE_REC_RETURN_TO_REALM;
40 res->smc_res.x[0] = version_1_1;
41}
42
43static void psci_cpu_suspend(struct rec *rec, struct rmi_rec_exit *rec_exit,
44 struct rsi_result *res)
45{
46 res->action = UPDATE_REC_EXIT_TO_HOST;
Soby Mathewb4c6df42022-11-09 11:13:29 +000047
48 /*
AlexeiFedorov97844202023-04-27 15:17:35 +010049 * We treat all target power states as suspend requests,
50 * so all we need to do is forward the FID to the NS hypervisor,
51 * and we can ignore all the parameters.
Soby Mathewb4c6df42022-11-09 11:13:29 +000052 */
AlexeiFedorov97844202023-04-27 15:17:35 +010053 forward_args_to_host(1U, rec, rec_exit);
Soby Mathewb4c6df42022-11-09 11:13:29 +000054
AlexeiFedorov97844202023-04-27 15:17:35 +010055 /*
56 * The exit to the Host is just a notification; the Host does not need
57 * to complete a PSCI request before the next call to RMI_REC_ENTER.
58 * We therefore update the REC immediately with the results of the PSCI
59 * command.
60 */
61 res->smc_res.x[0] = PSCI_RETURN_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +000062}
63
AlexeiFedorov97844202023-04-27 15:17:35 +010064static void psci_cpu_off(struct rec *rec, struct rmi_rec_exit *rec_exit,
65 struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +000066{
AlexeiFedorov97844202023-04-27 15:17:35 +010067 res->action = UPDATE_REC_EXIT_TO_HOST;
Soby Mathewb4c6df42022-11-09 11:13:29 +000068
69 /*
70 * It should be fine to set this flag without holding a lock on the
71 * REC or without explicit memory barriers or ordering semantics
72 * operations, because we already ensure that a REC can only be in an
73 * executing state once at any given time, and we're in this execution
74 * context already, and we will be holding a reference count on the
75 * REC at this point, which will be dropped and re-evaluated with
76 * proper barriers before any CPU can evaluate the runnable field
77 * after this change.
78 */
79 rec->runnable = false;
80
AlexeiFedorov97844202023-04-27 15:17:35 +010081 /* Notify the Host, passing the FID only. */
82 forward_args_to_host(1U, rec, rec_exit);
83
84 /*
85 * The exit to the Host is just a notification; the Host does not need
86 * to complete a PSCI request before the next call to RMI_REC_ENTER.
87 * We therefore update the REC immediately with the results of the PSCI
88 * command.
89 */
90 res->smc_res.x[0] = PSCI_RETURN_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +000091}
92
93static void psci_reset_rec(struct rec *rec, unsigned long caller_sctlr_el1)
94{
95 /* Set execution level to EL1 (AArch64) and mask exceptions */
96 rec->pstate = SPSR_EL2_MODE_EL1h |
97 SPSR_EL2_nRW_AARCH64 |
98 SPSR_EL2_F_BIT |
99 SPSR_EL2_I_BIT |
100 SPSR_EL2_A_BIT |
101 SPSR_EL2_D_BIT;
102
103 /* Disable stage 1 MMU and caches */
104 rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS;
105
106 /* Set the endianness of the target to that of the caller */
Arvind Ram Prakashbd36a1b2022-12-15 12:16:36 -0600107 rec->sysregs.sctlr_el1 |= caller_sctlr_el1 & SCTLR_ELx_EE_BIT;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000108}
109
110static unsigned long rd_map_read_rec_count(struct granule *g_rd)
111{
112 unsigned long rec_count;
113 struct rd *rd = granule_map(g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100114 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000115
116 rec_count = get_rd_rec_count_unlocked(rd);
117 buffer_unmap(rd);
118 return rec_count;
119}
120
AlexeiFedorov97844202023-04-27 15:17:35 +0100121static void psci_cpu_on(struct rec *rec, struct rmi_rec_exit *rec_exit,
122 struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000123{
AlexeiFedorov97844202023-04-27 15:17:35 +0100124 unsigned long target_cpu = rec->regs[1];
125 unsigned long entry_point_address = rec->regs[2];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000126 unsigned long target_rec_idx;
127
AlexeiFedorov97844202023-04-27 15:17:35 +0100128 res->action = UPDATE_REC_RETURN_TO_REALM;
129
Soby Mathewb4c6df42022-11-09 11:13:29 +0000130 /* Check that entry_point_address is a Protected Realm Address */
131 if (!addr_in_rec_par(rec, entry_point_address)) {
AlexeiFedorov97844202023-04-27 15:17:35 +0100132 res->smc_res.x[0] = PSCI_RETURN_INVALID_ADDRESS;
133 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000134 }
135
136 /* Get REC index from MPIDR */
137 target_rec_idx = mpidr_to_rec_idx(target_cpu);
138
139 /*
140 * Check that the target_cpu is a valid value.
141 * Note that the RMM enforces that the REC are created with
142 * consecutively increasing indexes starting from zero.
143 */
144 if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) {
AlexeiFedorov97844202023-04-27 15:17:35 +0100145 res->smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
146 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000147 }
148
149 /* Check if we're trying to turn ourselves on */
150 if (target_rec_idx == rec->rec_idx) {
AlexeiFedorov97844202023-04-27 15:17:35 +0100151 res->smc_res.x[0] = PSCI_RETURN_ALREADY_ON;
152 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000153 }
154
AlexeiFedorov97844202023-04-27 15:17:35 +0100155 /* Record that a PSCI request is outstanding */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000156 rec->psci_info.pending = true;
157
AlexeiFedorov97844202023-04-27 15:17:35 +0100158 /*
159 * Notify the Host, passing the FID and MPIDR arguments.
160 * Leave REC registers unchanged; these will be read and updated by
161 * psci_complete_request.
162 */
163 forward_args_to_host(2U, rec, rec_exit);
164 res->action = EXIT_TO_HOST;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000165}
166
AlexeiFedorov97844202023-04-27 15:17:35 +0100167static void psci_affinity_info(struct rec *rec, struct rmi_rec_exit *rec_exit,
168 struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000169{
AlexeiFedorov97844202023-04-27 15:17:35 +0100170 unsigned long target_affinity = rec->regs[1];
171 unsigned long lowest_affinity_level = rec->regs[2];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000172 unsigned long target_rec_idx;
173
AlexeiFedorov97844202023-04-27 15:17:35 +0100174 res->action = UPDATE_REC_RETURN_TO_REALM;
175
Soby Mathewb4c6df42022-11-09 11:13:29 +0000176 if (lowest_affinity_level != 0UL) {
AlexeiFedorov97844202023-04-27 15:17:35 +0100177 res->smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
178 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000179 }
180
181 /* Get REC index from MPIDR */
182 target_rec_idx = mpidr_to_rec_idx(target_affinity);
183
184 /*
185 * Check that the target_affinity is a valid value.
186 * Note that the RMM enforces that the REC are created with
187 * consecutively increasing indexes starting from zero.
188 */
189 if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) {
AlexeiFedorov97844202023-04-27 15:17:35 +0100190 res->smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
191 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000192 }
193
194 /* Check if the vCPU targets itself */
195 if (target_rec_idx == rec->rec_idx) {
AlexeiFedorov97844202023-04-27 15:17:35 +0100196 res->smc_res.x[0] = PSCI_AFFINITY_INFO_ON;
197 return;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000198 }
199
AlexeiFedorov97844202023-04-27 15:17:35 +0100200 /* Record that a PSCI request is outstanding */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000201 rec->psci_info.pending = true;
202
AlexeiFedorov97844202023-04-27 15:17:35 +0100203 /*
204 * Notify the Host, passing the FID and MPIDR arguments.
205 * Leave REC registers unchanged; these will be read and updated
206 * by psci_complete_request.
207 */
208 forward_args_to_host(2U, rec, rec_exit);
209
210 res->action = EXIT_TO_HOST;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000211}
212
213/*
214 * Turning a system off or requesting a reboot of a realm is enforced by the
215 * RMM by preventing execution of a REC after the function has run. Reboot
216 * functionality must be provided by the host hypervisor by creating a new
217 * Realm with associated attestation, measurement etc.
218 */
219static void system_off_reboot(struct rec *rec)
220{
221 struct rd *rd;
222 struct granule *g_rd = rec->realm_info.g_rd;
223
224 /*
225 * The RECs (and, consequently, the PSCI calls) run without any
226 * RMM lock held. Therefore, we cannot cause a deadlock when we acquire
227 * the rd lock here before we set the Realm's new state.
228 */
229 granule_lock(g_rd, GRANULE_STATE_RD);
230 rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
AlexeiFedorov9a9062c2023-08-21 15:41:48 +0100231 assert(rd != NULL);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000232
233 set_rd_state(rd, REALM_STATE_SYSTEM_OFF);
234
235 buffer_unmap(rd);
236 granule_unlock(g_rd);
237
238 /* TODO: Invalidate all stage 2 entris to ensure REC exits */
239}
240
AlexeiFedorov97844202023-04-27 15:17:35 +0100241static void psci_system_off_reset(struct rec *rec,
242 struct rmi_rec_exit *rec_exit,
243 struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000244{
Soby Mathewb4c6df42022-11-09 11:13:29 +0000245 system_off_reboot(rec);
246
AlexeiFedorov97844202023-04-27 15:17:35 +0100247 /* Notify the Host, passing the FID only */
248 forward_args_to_host(1U, rec, rec_exit);
249
250 res->action = EXIT_TO_HOST;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000251}
252
AlexeiFedorov97844202023-04-27 15:17:35 +0100253static void psci_features(struct rec *rec, struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000254{
AlexeiFedorov97844202023-04-27 15:17:35 +0100255 unsigned int psci_func_id = (unsigned int)rec->regs[1];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000256
257 switch (psci_func_id) {
258 case SMC32_PSCI_CPU_SUSPEND:
259 case SMC64_PSCI_CPU_SUSPEND:
260 case SMC32_PSCI_CPU_OFF:
261 case SMC32_PSCI_CPU_ON:
262 case SMC64_PSCI_CPU_ON:
263 case SMC32_PSCI_AFFINITY_INFO:
264 case SMC64_PSCI_AFFINITY_INFO:
265 case SMC32_PSCI_SYSTEM_OFF:
266 case SMC32_PSCI_SYSTEM_RESET:
267 case SMC32_PSCI_FEATURES:
268 case SMCCC_VERSION:
AlexeiFedorov97844202023-04-27 15:17:35 +0100269 res->smc_res.x[0] = PSCI_RETURN_SUCCESS;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000270 break;
271 default:
AlexeiFedorov97844202023-04-27 15:17:35 +0100272 res->smc_res.x[0] = PSCI_RETURN_NOT_SUPPORTED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000273 }
274
AlexeiFedorov97844202023-04-27 15:17:35 +0100275 res->action = UPDATE_REC_RETURN_TO_REALM;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000276}
277
AlexeiFedorov97844202023-04-27 15:17:35 +0100278void handle_psci(struct rec *rec,
279 struct rmi_rec_exit *rec_exit,
280 struct rsi_result *res)
Soby Mathewb4c6df42022-11-09 11:13:29 +0000281{
AlexeiFedorov97844202023-04-27 15:17:35 +0100282 unsigned int function_id = (unsigned int)rec->regs[0];
Soby Mathewb4c6df42022-11-09 11:13:29 +0000283
284 switch (function_id) {
285 case SMC32_PSCI_VERSION:
AlexeiFedorov97844202023-04-27 15:17:35 +0100286 psci_version(res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000287 break;
288 case SMC32_PSCI_CPU_SUSPEND:
289 case SMC64_PSCI_CPU_SUSPEND:
AlexeiFedorov97844202023-04-27 15:17:35 +0100290 psci_cpu_suspend(rec, rec_exit, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000291 break;
292 case SMC32_PSCI_CPU_OFF:
AlexeiFedorov97844202023-04-27 15:17:35 +0100293 psci_cpu_off(rec, rec_exit, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000294 break;
295 case SMC32_PSCI_CPU_ON:
Soby Mathewb4c6df42022-11-09 11:13:29 +0000296 case SMC64_PSCI_CPU_ON:
AlexeiFedorov97844202023-04-27 15:17:35 +0100297 psci_cpu_on(rec, rec_exit, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000298 break;
299 case SMC32_PSCI_AFFINITY_INFO:
Soby Mathewb4c6df42022-11-09 11:13:29 +0000300 case SMC64_PSCI_AFFINITY_INFO:
AlexeiFedorov97844202023-04-27 15:17:35 +0100301 psci_affinity_info(rec, rec_exit, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000302 break;
303 case SMC32_PSCI_SYSTEM_OFF:
Soby Mathewb4c6df42022-11-09 11:13:29 +0000304 case SMC32_PSCI_SYSTEM_RESET:
AlexeiFedorov97844202023-04-27 15:17:35 +0100305 psci_system_off_reset(rec, rec_exit, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000306 break;
307 case SMC32_PSCI_FEATURES:
AlexeiFedorov97844202023-04-27 15:17:35 +0100308 psci_features(rec, res);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000309 break;
310 default:
AlexeiFedorov97844202023-04-27 15:17:35 +0100311 res->action = UPDATE_REC_RETURN_TO_REALM;
312 res->smc_res.x[0] = PSCI_RETURN_NOT_SUPPORTED;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000313 break;
314 }
315
AlexeiFedorov97844202023-04-27 15:17:35 +0100316 if ((res->action & FLAG_EXIT_TO_HOST) != 0) {
317 rec_exit->exit_reason = RMI_EXIT_PSCI;
318 }
Soby Mathewb4c6df42022-11-09 11:13:29 +0000319}
320
321/*
322 * In the following two functions, it is only safe to access the runnable field
323 * on the target_rec once the target_rec is no longer running on another PE and
324 * all writes performed by the other PE as part of smc_rec_enter is also
325 * guaranteed to be observed here, which we know when we read a zero refcount
326 * on the target rec using acquire semantics paired with the release semantics
327 * on the reference count in smc_rec_enter. If we observe a non-zero refcount
328 * it simply means that the target_rec is running and we can return the
329 * corresponding value.
330 */
331static unsigned long complete_psci_cpu_on(struct rec *target_rec,
332 unsigned long entry_point_address,
333 unsigned long caller_sctlr_el1)
334{
335 if ((granule_refcount_read_acquire(target_rec->g_rec) != 0UL) ||
336 target_rec->runnable) {
337 return PSCI_RETURN_ALREADY_ON;
338 }
339
340 psci_reset_rec(target_rec, caller_sctlr_el1);
341 target_rec->pc = entry_point_address;
342 target_rec->runnable = true;
343 return PSCI_RETURN_SUCCESS;
344}
345
346static unsigned long complete_psci_affinity_info(struct rec *target_rec)
347{
348 if ((granule_refcount_read_acquire(target_rec->g_rec) != 0UL) ||
349 target_rec->runnable) {
350 return PSCI_AFFINITY_INFO_ON;
351 }
352
353 return PSCI_AFFINITY_INFO_OFF;
354}
355
356unsigned long psci_complete_request(struct rec *calling_rec,
357 struct rec *target_rec)
358{
359 unsigned long ret = PSCI_RETURN_NOT_SUPPORTED;
360 unsigned long mpidr = calling_rec->regs[1];
361
362 if (!calling_rec->psci_info.pending) {
363 return RMI_ERROR_INPUT;
364 }
365
366 if (calling_rec->realm_info.g_rd != target_rec->realm_info.g_rd) {
367 return RMI_ERROR_INPUT;
368 }
369
370 if (mpidr_to_rec_idx(mpidr) != target_rec->rec_idx) {
371 return RMI_ERROR_INPUT;
372 }
373
374 switch (calling_rec->regs[0]) {
375 case SMC32_PSCI_CPU_ON:
376 case SMC64_PSCI_CPU_ON:
377 ret = complete_psci_cpu_on(target_rec,
378 calling_rec->regs[2],
379 calling_rec->sysregs.sctlr_el1);
380 break;
381 case SMC32_PSCI_AFFINITY_INFO:
382 case SMC64_PSCI_AFFINITY_INFO:
383 ret = complete_psci_affinity_info(target_rec);
384 break;
385 default:
386 assert(false);
387 }
388
389 calling_rec->regs[0] = ret;
390 calling_rec->regs[1] = 0;
391 calling_rec->regs[2] = 0;
392 calling_rec->regs[3] = 0;
393 calling_rec->psci_info.pending = false;
394
395 return RMI_SUCCESS;
396}