Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: BSD-3-Clause |
| 3 | * SPDX-FileCopyrightText: Copyright TF-RMM Contributors. |
| 4 | */ |
| 5 | |
Javier Almansa Sobrino | 2f717dd | 2024-02-12 20:49:46 +0000 | [diff] [blame^] | 6 | #include <buffer.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 7 | #include <granule.h> |
| 8 | #include <psci.h> |
| 9 | #include <realm.h> |
| 10 | #include <rec.h> |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 11 | #include <rsi-handler.h> |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 12 | #include <smc-rmi.h> |
| 13 | #include <smc.h> |
| 14 | #include <stdint.h> |
| 15 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 16 | /* |
| 17 | * Copy @count GPRs from @rec to @rec_exit. |
| 18 | * The remaining @rec_exit.gprs[] values are zero filled. |
| 19 | */ |
| 20 | static void forward_args_to_host(unsigned int count, struct rec *rec, |
| 21 | struct rmi_rec_exit *rec_exit) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 22 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 23 | unsigned int i; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 24 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 25 | assert(count <= 4U); |
| 26 | |
| 27 | for (i = 0U; i < count; ++i) { |
| 28 | rec_exit->gprs[i] = rec->regs[i]; |
| 29 | } |
| 30 | |
| 31 | for (i = count; i < REC_EXIT_NR_GPRS; ++i) { |
| 32 | rec_exit->gprs[i] = 0UL; |
| 33 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 34 | } |
| 35 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 36 | static void psci_version(struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 37 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 38 | const unsigned long version_1_1 = (1UL << 16) | 1UL; |
| 39 | |
| 40 | res->action = UPDATE_REC_RETURN_TO_REALM; |
| 41 | res->smc_res.x[0] = version_1_1; |
| 42 | } |
| 43 | |
| 44 | static void psci_cpu_suspend(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 45 | struct rsi_result *res) |
| 46 | { |
| 47 | res->action = UPDATE_REC_EXIT_TO_HOST; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 48 | |
| 49 | /* |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 50 | * We treat all target power states as suspend requests, |
| 51 | * so all we need to do is forward the FID to the NS hypervisor, |
| 52 | * and we can ignore all the parameters. |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 53 | */ |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 54 | forward_args_to_host(1U, rec, rec_exit); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 55 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 56 | /* |
| 57 | * The exit to the Host is just a notification; the Host does not need |
| 58 | * to complete a PSCI request before the next call to RMI_REC_ENTER. |
| 59 | * We therefore update the REC immediately with the results of the PSCI |
| 60 | * command. |
| 61 | */ |
| 62 | res->smc_res.x[0] = PSCI_RETURN_SUCCESS; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 63 | } |
| 64 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 65 | static void psci_cpu_off(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 66 | struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 67 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 68 | res->action = UPDATE_REC_EXIT_TO_HOST; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 69 | |
| 70 | /* |
| 71 | * It should be fine to set this flag without holding a lock on the |
| 72 | * REC or without explicit memory barriers or ordering semantics |
| 73 | * operations, because we already ensure that a REC can only be in an |
| 74 | * executing state once at any given time, and we're in this execution |
| 75 | * context already, and we will be holding a reference count on the |
| 76 | * REC at this point, which will be dropped and re-evaluated with |
| 77 | * proper barriers before any CPU can evaluate the runnable field |
| 78 | * after this change. |
| 79 | */ |
| 80 | rec->runnable = false; |
| 81 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 82 | /* Notify the Host, passing the FID only. */ |
| 83 | forward_args_to_host(1U, rec, rec_exit); |
| 84 | |
| 85 | /* |
| 86 | * The exit to the Host is just a notification; the Host does not need |
| 87 | * to complete a PSCI request before the next call to RMI_REC_ENTER. |
| 88 | * We therefore update the REC immediately with the results of the PSCI |
| 89 | * command. |
| 90 | */ |
| 91 | res->smc_res.x[0] = PSCI_RETURN_SUCCESS; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | static void psci_reset_rec(struct rec *rec, unsigned long caller_sctlr_el1) |
| 95 | { |
| 96 | /* Set execution level to EL1 (AArch64) and mask exceptions */ |
| 97 | rec->pstate = SPSR_EL2_MODE_EL1h | |
| 98 | SPSR_EL2_nRW_AARCH64 | |
| 99 | SPSR_EL2_F_BIT | |
| 100 | SPSR_EL2_I_BIT | |
| 101 | SPSR_EL2_A_BIT | |
| 102 | SPSR_EL2_D_BIT; |
| 103 | |
| 104 | /* Disable stage 1 MMU and caches */ |
| 105 | rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS; |
| 106 | |
| 107 | /* Set the endianness of the target to that of the caller */ |
Arvind Ram Prakash | bd36a1b | 2022-12-15 12:16:36 -0600 | [diff] [blame] | 108 | rec->sysregs.sctlr_el1 |= caller_sctlr_el1 & SCTLR_ELx_EE_BIT; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | static unsigned long rd_map_read_rec_count(struct granule *g_rd) |
| 112 | { |
| 113 | unsigned long rec_count; |
Javier Almansa Sobrino | 2f717dd | 2024-02-12 20:49:46 +0000 | [diff] [blame^] | 114 | struct rd *rd = buffer_granule_map(g_rd, SLOT_RD); |
AlexeiFedorov | 4716542 | 2023-09-13 11:47:57 +0100 | [diff] [blame] | 115 | |
AlexeiFedorov | 9a9062c | 2023-08-21 15:41:48 +0100 | [diff] [blame] | 116 | assert(rd != NULL); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 117 | |
| 118 | rec_count = get_rd_rec_count_unlocked(rd); |
| 119 | buffer_unmap(rd); |
| 120 | return rec_count; |
| 121 | } |
| 122 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 123 | static void psci_cpu_on(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 124 | struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 125 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 126 | unsigned long target_cpu = rec->regs[1]; |
| 127 | unsigned long entry_point_address = rec->regs[2]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 128 | unsigned long target_rec_idx; |
| 129 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 130 | res->action = UPDATE_REC_RETURN_TO_REALM; |
| 131 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 132 | /* Check that entry_point_address is a Protected Realm Address */ |
| 133 | if (!addr_in_rec_par(rec, entry_point_address)) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 134 | res->smc_res.x[0] = PSCI_RETURN_INVALID_ADDRESS; |
| 135 | return; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | /* Get REC index from MPIDR */ |
| 139 | target_rec_idx = mpidr_to_rec_idx(target_cpu); |
| 140 | |
| 141 | /* |
| 142 | * Check that the target_cpu is a valid value. |
| 143 | * Note that the RMM enforces that the REC are created with |
| 144 | * consecutively increasing indexes starting from zero. |
| 145 | */ |
| 146 | if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 147 | res->smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS; |
| 148 | return; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | /* Check if we're trying to turn ourselves on */ |
| 152 | if (target_rec_idx == rec->rec_idx) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 153 | res->smc_res.x[0] = PSCI_RETURN_ALREADY_ON; |
| 154 | return; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 155 | } |
| 156 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 157 | /* Record that a PSCI request is outstanding */ |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 158 | rec->psci_info.pending = true; |
| 159 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 160 | /* |
| 161 | * Notify the Host, passing the FID and MPIDR arguments. |
| 162 | * Leave REC registers unchanged; these will be read and updated by |
| 163 | * psci_complete_request. |
| 164 | */ |
| 165 | forward_args_to_host(2U, rec, rec_exit); |
| 166 | res->action = EXIT_TO_HOST; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 167 | } |
| 168 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 169 | static void psci_affinity_info(struct rec *rec, struct rmi_rec_exit *rec_exit, |
| 170 | struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 171 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 172 | unsigned long target_affinity = rec->regs[1]; |
| 173 | unsigned long lowest_affinity_level = rec->regs[2]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 174 | unsigned long target_rec_idx; |
| 175 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 176 | res->action = UPDATE_REC_RETURN_TO_REALM; |
| 177 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 178 | if (lowest_affinity_level != 0UL) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 179 | res->smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS; |
| 180 | return; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | /* Get REC index from MPIDR */ |
| 184 | target_rec_idx = mpidr_to_rec_idx(target_affinity); |
| 185 | |
| 186 | /* |
| 187 | * Check that the target_affinity is a valid value. |
| 188 | * Note that the RMM enforces that the REC are created with |
| 189 | * consecutively increasing indexes starting from zero. |
| 190 | */ |
| 191 | if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 192 | res->smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS; |
| 193 | return; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | /* Check if the vCPU targets itself */ |
| 197 | if (target_rec_idx == rec->rec_idx) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 198 | res->smc_res.x[0] = PSCI_AFFINITY_INFO_ON; |
| 199 | return; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 200 | } |
| 201 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 202 | /* Record that a PSCI request is outstanding */ |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 203 | rec->psci_info.pending = true; |
| 204 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 205 | /* |
| 206 | * Notify the Host, passing the FID and MPIDR arguments. |
| 207 | * Leave REC registers unchanged; these will be read and updated |
| 208 | * by psci_complete_request. |
| 209 | */ |
| 210 | forward_args_to_host(2U, rec, rec_exit); |
| 211 | |
| 212 | res->action = EXIT_TO_HOST; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | /* |
| 216 | * Turning a system off or requesting a reboot of a realm is enforced by the |
| 217 | * RMM by preventing execution of a REC after the function has run. Reboot |
| 218 | * functionality must be provided by the host hypervisor by creating a new |
| 219 | * Realm with associated attestation, measurement etc. |
| 220 | */ |
| 221 | static void system_off_reboot(struct rec *rec) |
| 222 | { |
| 223 | struct rd *rd; |
| 224 | struct granule *g_rd = rec->realm_info.g_rd; |
| 225 | |
| 226 | /* |
| 227 | * The RECs (and, consequently, the PSCI calls) run without any |
| 228 | * RMM lock held. Therefore, we cannot cause a deadlock when we acquire |
| 229 | * the rd lock here before we set the Realm's new state. |
| 230 | */ |
| 231 | granule_lock(g_rd, GRANULE_STATE_RD); |
Javier Almansa Sobrino | 2f717dd | 2024-02-12 20:49:46 +0000 | [diff] [blame^] | 232 | rd = buffer_granule_map(rec->realm_info.g_rd, SLOT_RD); |
AlexeiFedorov | 9a9062c | 2023-08-21 15:41:48 +0100 | [diff] [blame] | 233 | assert(rd != NULL); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 234 | |
Mate Toth-Pal | 988dfcb | 2024-01-19 10:52:06 +0100 | [diff] [blame] | 235 | set_rd_state(rd, REALM_SYSTEM_OFF); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 236 | |
| 237 | buffer_unmap(rd); |
| 238 | granule_unlock(g_rd); |
| 239 | |
| 240 | /* TODO: Invalidate all stage 2 entris to ensure REC exits */ |
| 241 | } |
| 242 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 243 | static void psci_system_off_reset(struct rec *rec, |
| 244 | struct rmi_rec_exit *rec_exit, |
| 245 | struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 246 | { |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 247 | system_off_reboot(rec); |
| 248 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 249 | /* Notify the Host, passing the FID only */ |
| 250 | forward_args_to_host(1U, rec, rec_exit); |
| 251 | |
| 252 | res->action = EXIT_TO_HOST; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 253 | } |
| 254 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 255 | static void psci_features(struct rec *rec, struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 256 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 257 | unsigned int psci_func_id = (unsigned int)rec->regs[1]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 258 | |
| 259 | switch (psci_func_id) { |
| 260 | case SMC32_PSCI_CPU_SUSPEND: |
| 261 | case SMC64_PSCI_CPU_SUSPEND: |
| 262 | case SMC32_PSCI_CPU_OFF: |
| 263 | case SMC32_PSCI_CPU_ON: |
| 264 | case SMC64_PSCI_CPU_ON: |
| 265 | case SMC32_PSCI_AFFINITY_INFO: |
| 266 | case SMC64_PSCI_AFFINITY_INFO: |
| 267 | case SMC32_PSCI_SYSTEM_OFF: |
| 268 | case SMC32_PSCI_SYSTEM_RESET: |
| 269 | case SMC32_PSCI_FEATURES: |
| 270 | case SMCCC_VERSION: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 271 | res->smc_res.x[0] = PSCI_RETURN_SUCCESS; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 272 | break; |
| 273 | default: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 274 | res->smc_res.x[0] = PSCI_RETURN_NOT_SUPPORTED; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 275 | } |
| 276 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 277 | res->action = UPDATE_REC_RETURN_TO_REALM; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 278 | } |
| 279 | |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 280 | void handle_psci(struct rec *rec, |
| 281 | struct rmi_rec_exit *rec_exit, |
| 282 | struct rsi_result *res) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 283 | { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 284 | unsigned int function_id = (unsigned int)rec->regs[0]; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 285 | |
| 286 | switch (function_id) { |
| 287 | case SMC32_PSCI_VERSION: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 288 | psci_version(res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 289 | break; |
| 290 | case SMC32_PSCI_CPU_SUSPEND: |
| 291 | case SMC64_PSCI_CPU_SUSPEND: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 292 | psci_cpu_suspend(rec, rec_exit, res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 293 | break; |
| 294 | case SMC32_PSCI_CPU_OFF: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 295 | psci_cpu_off(rec, rec_exit, res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 296 | break; |
| 297 | case SMC32_PSCI_CPU_ON: |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 298 | case SMC64_PSCI_CPU_ON: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 299 | psci_cpu_on(rec, rec_exit, res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 300 | break; |
| 301 | case SMC32_PSCI_AFFINITY_INFO: |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 302 | case SMC64_PSCI_AFFINITY_INFO: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 303 | psci_affinity_info(rec, rec_exit, res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 304 | break; |
| 305 | case SMC32_PSCI_SYSTEM_OFF: |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 306 | case SMC32_PSCI_SYSTEM_RESET: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 307 | psci_system_off_reset(rec, rec_exit, res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 308 | break; |
| 309 | case SMC32_PSCI_FEATURES: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 310 | psci_features(rec, res); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 311 | break; |
| 312 | default: |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 313 | res->action = UPDATE_REC_RETURN_TO_REALM; |
| 314 | res->smc_res.x[0] = PSCI_RETURN_NOT_SUPPORTED; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 315 | break; |
| 316 | } |
| 317 | |
AlexeiFedorov | e5dcae2 | 2023-08-29 12:58:18 +0100 | [diff] [blame] | 318 | if (((unsigned int)res->action & FLAG_EXIT_TO_HOST) != 0U) { |
AlexeiFedorov | 9784420 | 2023-04-27 15:17:35 +0100 | [diff] [blame] | 319 | rec_exit->exit_reason = RMI_EXIT_PSCI; |
| 320 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | /* |
| 324 | * In the following two functions, it is only safe to access the runnable field |
| 325 | * on the target_rec once the target_rec is no longer running on another PE and |
| 326 | * all writes performed by the other PE as part of smc_rec_enter is also |
| 327 | * guaranteed to be observed here, which we know when we read a zero refcount |
| 328 | * on the target rec using acquire semantics paired with the release semantics |
| 329 | * on the reference count in smc_rec_enter. If we observe a non-zero refcount |
| 330 | * it simply means that the target_rec is running and we can return the |
| 331 | * corresponding value. |
| 332 | */ |
| 333 | static unsigned long complete_psci_cpu_on(struct rec *target_rec, |
| 334 | unsigned long entry_point_address, |
Soby Mathew | e52db16 | 2023-09-19 13:46:08 +0100 | [diff] [blame] | 335 | unsigned long context_id, |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 336 | unsigned long caller_sctlr_el1, |
| 337 | unsigned long status) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 338 | { |
AlexeiFedorov | d6d93d8 | 2024-02-13 16:52:11 +0000 | [diff] [blame] | 339 | if ((granule_refcount_read_acquire(target_rec->g_rec) != 0U) || |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 340 | target_rec->runnable) { |
| 341 | return PSCI_RETURN_ALREADY_ON; |
| 342 | } |
| 343 | |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 344 | /* |
| 345 | * Host is permitted to deny a PSCI_CPU_ON request, |
| 346 | * if the target CPU is not already on. |
| 347 | */ |
| 348 | if (status == PSCI_RETURN_DENIED) { |
| 349 | return PSCI_RETURN_DENIED; |
| 350 | } |
| 351 | |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 352 | psci_reset_rec(target_rec, caller_sctlr_el1); |
Soby Mathew | e52db16 | 2023-09-19 13:46:08 +0100 | [diff] [blame] | 353 | target_rec->regs[0] = context_id; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 354 | target_rec->pc = entry_point_address; |
| 355 | target_rec->runnable = true; |
| 356 | return PSCI_RETURN_SUCCESS; |
| 357 | } |
| 358 | |
| 359 | static unsigned long complete_psci_affinity_info(struct rec *target_rec) |
| 360 | { |
AlexeiFedorov | d6d93d8 | 2024-02-13 16:52:11 +0000 | [diff] [blame] | 361 | if ((granule_refcount_read_acquire(target_rec->g_rec) != 0U) || |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 362 | target_rec->runnable) { |
| 363 | return PSCI_AFFINITY_INFO_ON; |
| 364 | } |
| 365 | |
| 366 | return PSCI_AFFINITY_INFO_OFF; |
| 367 | } |
| 368 | |
| 369 | unsigned long psci_complete_request(struct rec *calling_rec, |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 370 | struct rec *target_rec, unsigned long status) |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 371 | { |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 372 | unsigned long ret = RMI_SUCCESS; |
| 373 | unsigned long rec_ret = PSCI_RETURN_NOT_SUPPORTED; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 374 | unsigned long mpidr = calling_rec->regs[1]; |
| 375 | |
| 376 | if (!calling_rec->psci_info.pending) { |
| 377 | return RMI_ERROR_INPUT; |
| 378 | } |
| 379 | |
| 380 | if (calling_rec->realm_info.g_rd != target_rec->realm_info.g_rd) { |
| 381 | return RMI_ERROR_INPUT; |
| 382 | } |
| 383 | |
| 384 | if (mpidr_to_rec_idx(mpidr) != target_rec->rec_idx) { |
| 385 | return RMI_ERROR_INPUT; |
| 386 | } |
| 387 | |
| 388 | switch (calling_rec->regs[0]) { |
| 389 | case SMC32_PSCI_CPU_ON: |
| 390 | case SMC64_PSCI_CPU_ON: |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 391 | if ((status != PSCI_RETURN_SUCCESS) && |
| 392 | (status != PSCI_RETURN_DENIED)) { |
| 393 | return RMI_ERROR_INPUT; |
| 394 | } |
| 395 | |
| 396 | rec_ret = complete_psci_cpu_on(target_rec, |
| 397 | calling_rec->regs[2], |
Soby Mathew | e52db16 | 2023-09-19 13:46:08 +0100 | [diff] [blame] | 398 | calling_rec->regs[3], |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 399 | calling_rec->sysregs.sctlr_el1, |
| 400 | status); |
| 401 | /* |
| 402 | * If the target CPU is already running and the Host has denied the |
| 403 | * PSCI_CPU_ON request, then return error back to Host. |
| 404 | */ |
| 405 | if ((status == PSCI_RETURN_DENIED) && |
| 406 | (rec_ret == PSCI_RETURN_ALREADY_ON)) { |
| 407 | ret = RMI_ERROR_INPUT; |
| 408 | } |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 409 | break; |
| 410 | case SMC32_PSCI_AFFINITY_INFO: |
| 411 | case SMC64_PSCI_AFFINITY_INFO: |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 412 | if (status != PSCI_RETURN_SUCCESS) { |
| 413 | return RMI_ERROR_INPUT; |
| 414 | } |
| 415 | |
| 416 | rec_ret = complete_psci_affinity_info(target_rec); |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 417 | break; |
| 418 | default: |
| 419 | assert(false); |
| 420 | } |
| 421 | |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 422 | calling_rec->regs[0] = rec_ret; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 423 | calling_rec->regs[1] = 0; |
| 424 | calling_rec->regs[2] = 0; |
| 425 | calling_rec->regs[3] = 0; |
| 426 | calling_rec->psci_info.pending = false; |
| 427 | |
AlexeiFedorov | 120d7d0 | 2023-08-02 16:51:48 +0100 | [diff] [blame] | 428 | return ret; |
Soby Mathew | b4c6df4 | 2022-11-09 11:13:29 +0000 | [diff] [blame] | 429 | } |