Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018-2020, Arm Limited. All rights reserved. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | * |
| 6 | */ |
| 7 | |
Summer Qin | 9c1fba1 | 2020-08-12 15:49:12 +0800 | [diff] [blame] | 8 | #include "arch.h" |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 9 | #include "tfm_secure_api.h" |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 10 | #include "tfm/tfm_spm_services.h" |
| 11 | |
TTornblom | 18b3bf0 | 2020-09-03 17:42:11 +0200 | [diff] [blame^] | 12 | #if defined(__ICCARM__) |
| 13 | uint32_t tfm_core_svc_handler(uint32_t *svc_args, uint32_t lr, uint32_t *msp); |
| 14 | #pragma required=tfm_core_svc_handler |
| 15 | #endif |
| 16 | |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 17 | nsfptr_t ns_entry; |
| 18 | |
| 19 | void jump_to_ns_code(void) |
| 20 | { |
| 21 | /* Calls the non-secure Reset_Handler to jump to the non-secure binary */ |
| 22 | ns_entry(); |
| 23 | } |
| 24 | |
| 25 | __attribute__((naked)) |
| 26 | int32_t tfm_core_get_caller_client_id(int32_t *caller_client_id) |
| 27 | { |
| 28 | __ASM volatile( |
| 29 | "SVC %0\n" |
| 30 | "BX LR\n" |
| 31 | : : "I" (TFM_SVC_GET_CALLER_CLIENT_ID)); |
| 32 | } |
| 33 | |
| 34 | __attribute__((naked)) |
Mark Horvath | 4924cf8 | 2020-08-05 15:38:17 +0200 | [diff] [blame] | 35 | static int32_t tfm_spm_request(int32_t request_type) |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 36 | { |
| 37 | __ASM volatile( |
| 38 | "SVC %0\n" |
| 39 | "BX lr\n" |
| 40 | : : "I" (TFM_SVC_SPM_REQUEST)); |
| 41 | } |
| 42 | |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 43 | int32_t tfm_spm_request_reset_vote(void) |
| 44 | { |
Mark Horvath | 4924cf8 | 2020-08-05 15:38:17 +0200 | [diff] [blame] | 45 | return tfm_spm_request((int32_t)TFM_SPM_REQUEST_RESET_VOTE); |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | __attribute__((naked)) |
| 49 | void tfm_enable_irq(psa_signal_t irq_signal) |
| 50 | { |
| 51 | __ASM("SVC %0\n" |
| 52 | "BX LR\n" |
| 53 | : : "I" (TFM_SVC_ENABLE_IRQ)); |
| 54 | } |
| 55 | |
| 56 | __attribute__((naked)) |
| 57 | void tfm_disable_irq(psa_signal_t irq_signal) |
| 58 | { |
| 59 | __ASM("SVC %0\n" |
| 60 | "BX LR\n" |
| 61 | : : "I" (TFM_SVC_DISABLE_IRQ)); |
| 62 | } |
| 63 | |
| 64 | __attribute__((naked)) |
| 65 | static psa_signal_t psa_wait_internal(psa_signal_t signal_mask, |
| 66 | uint32_t timeout) |
| 67 | { |
| 68 | __ASM("SVC %0\n" |
| 69 | "BX LR\n" |
| 70 | : : "I" (TFM_SVC_PSA_WAIT)); |
| 71 | } |
| 72 | |
| 73 | psa_signal_t psa_wait(psa_signal_t signal_mask, uint32_t timeout) |
| 74 | { |
| 75 | /* FIXME: By using the 'WFI' instruction this function blocks until an |
| 76 | * interrupt happens. It is necessary to do this here as tfm_core_psa_wait |
| 77 | * runs with the priority of the SVC, so it cannot be interrupted, so |
| 78 | * waiting in it for the required interrupt to happen is not an option. |
| 79 | */ |
| 80 | psa_signal_t actual_signal_mask; |
| 81 | |
| 82 | while (1) { |
| 83 | actual_signal_mask = psa_wait_internal(signal_mask, timeout); |
| 84 | if ((actual_signal_mask & signal_mask) != 0) { |
| 85 | return actual_signal_mask; |
| 86 | } |
| 87 | __WFI(); |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | __attribute__((naked)) |
| 92 | void psa_eoi(psa_signal_t irq_signal) |
| 93 | { |
| 94 | __ASM("SVC %0\n" |
| 95 | "BX LR\n" |
| 96 | : : "I" (TFM_SVC_PSA_EOI)); |
| 97 | } |
| 98 | |
| 99 | #if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__) |
| 100 | __attribute__((section("SFN"), naked)) |
| 101 | int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr) |
| 102 | { |
| 103 | __ASM volatile( |
| 104 | "PUSH {r4-r12, lr} \n" |
| 105 | "SVC %[SVC_REQ] \n" |
| 106 | "MOV r4, #0 \n" |
| 107 | "MOV r5, r4 \n" |
| 108 | "MOV r6, r4 \n" |
| 109 | "MOV r7, r4 \n" |
| 110 | "MOV r8, r4 \n" |
| 111 | "MOV r9, r4 \n" |
| 112 | "MOV r10, r4 \n" |
| 113 | "MOV r11, r4 \n" |
| 114 | "BLX lr \n" |
| 115 | "SVC %[SVC_RET] \n" |
| 116 | "POP {r4-r12, pc} \n" |
| 117 | : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST), |
| 118 | [SVC_RET] "I" (TFM_SVC_SFN_RETURN) |
| 119 | ); |
| 120 | } |
| 121 | |
| 122 | __attribute__((section("SFN"), naked)) |
| 123 | void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler, |
| 124 | uint32_t irq_signal, uint32_t irq_line) |
| 125 | { |
| 126 | __ASM( |
| 127 | /* Save the callee saved registers*/ |
| 128 | "PUSH {r4-r12, lr} \n" |
| 129 | /* Request SVC to configure environment for the unpriv IRQ handler */ |
| 130 | "SVC %[SVC_REQ] \n" |
| 131 | /* clear the callee saved registers to prevent information leak */ |
| 132 | "MOV r4, #0 \n" |
| 133 | "MOV r5, r4 \n" |
| 134 | "MOV r6, r4 \n" |
| 135 | "MOV r7, r4 \n" |
| 136 | "MOV r8, r4 \n" |
| 137 | "MOV r9, r4 \n" |
| 138 | "MOV r10, r4 \n" |
| 139 | "MOV r11, r4 \n" |
| 140 | /* Branch to the unprivileged handler */ |
| 141 | "BLX lr \n" |
| 142 | /* Request SVC to reconfigure the environment of the interrupted |
| 143 | * partition |
| 144 | */ |
| 145 | "SVC %[SVC_RET] \n" |
| 146 | /* restore callee saved registers and return */ |
| 147 | "POP {r4-r12, pc} \n" |
| 148 | : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ) |
| 149 | , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET) |
| 150 | ); |
| 151 | } |
| 152 | #elif defined(__ARM_ARCH_8M_BASE__) |
| 153 | __attribute__((section("SFN"), naked)) |
| 154 | int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr) |
| 155 | { |
| 156 | __ASM volatile( |
| 157 | "PUSH {lr} \n" |
| 158 | "PUSH {r4-r7} \n" |
| 159 | "MOV r4, r8 \n" |
| 160 | "MOV r5, r9 \n" |
| 161 | "MOV r6, r10 \n" |
| 162 | "MOV r7, r11 \n" |
| 163 | "PUSH {r4-r7} \n" |
| 164 | "MOV r4, r12 \n" |
| 165 | "PUSH {r4} \n" |
| 166 | "SVC %[SVC_REQ] \n" |
| 167 | "MOVS r4, #0 \n" |
| 168 | "MOV r5, r4 \n" |
| 169 | "MOV r6, r4 \n" |
| 170 | "MOV r7, r4 \n" |
| 171 | "MOV r8, r4 \n" |
| 172 | "MOV r9, r4 \n" |
| 173 | "MOV r10, r4 \n" |
| 174 | "MOV r11, r4 \n" |
| 175 | "BLX lr \n" |
| 176 | "SVC %[SVC_RET] \n" |
| 177 | "POP {r4} \n" |
| 178 | "MOV r12, r4 \n" |
| 179 | "POP {r4-r7} \n" |
| 180 | "MOV r8, r4 \n" |
| 181 | "MOV r9, r5 \n" |
| 182 | "MOV r10, r6 \n" |
| 183 | "MOV r11, r7 \n" |
| 184 | "POP {r4-r7} \n" |
| 185 | "POP {pc} \n" |
| 186 | : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST), |
| 187 | [SVC_RET] "I" (TFM_SVC_SFN_RETURN) |
| 188 | ); |
| 189 | } |
| 190 | |
| 191 | __attribute__((section("SFN"), naked)) |
| 192 | void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler, |
| 193 | uint32_t irq_signal, uint32_t irq_line) |
| 194 | { |
| 195 | __ASM( |
| 196 | /* Save the callee saved registers*/ |
| 197 | "PUSH {r4-r7, lr} \n" |
| 198 | "MOV r4, r8 \n" |
| 199 | "MOV r5, r9 \n" |
| 200 | "MOV r6, r10 \n" |
| 201 | "MOV r7, r11 \n" |
| 202 | "PUSH {r4-r7} \n" |
| 203 | "MOV r4, r12 \n" |
| 204 | "PUSH {r4} \n" |
| 205 | /* Request SVC to configure environment for the unpriv IRQ handler */ |
| 206 | "SVC %[SVC_REQ] \n" |
| 207 | /* clear the callee saved registers to prevent information leak */ |
| 208 | "MOVS r4, #0 \n" |
| 209 | "MOV r5, r4 \n" |
| 210 | "MOV r6, r4 \n" |
| 211 | "MOV r7, r4 \n" |
| 212 | "MOV r8, r4 \n" |
| 213 | "MOV r9, r4 \n" |
| 214 | "MOV r10, r4 \n" |
| 215 | "MOV r11, r4 \n" |
| 216 | /* Branch to the unprivileged handler */ |
| 217 | "BLX lr \n" |
| 218 | /* Request SVC to reconfigure the environment of the interrupted |
| 219 | * partition |
| 220 | */ |
| 221 | "SVC %[SVC_RET] \n" |
| 222 | /* restore callee saved registers and return */ |
| 223 | "POP {r4} \n" |
| 224 | "MOV r12, r4 \n" |
| 225 | "POP {r4-r7} \n" |
| 226 | "MOV r8, r4 \n" |
| 227 | "MOV r9, r5 \n" |
| 228 | "MOV r10, r6 \n" |
| 229 | "MOV r11, r7 \n" |
| 230 | "POP {r4-r7, pc} \n" |
| 231 | : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ) |
| 232 | , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET) |
| 233 | ); |
| 234 | } |
| 235 | #endif |
| 236 | |
| 237 | #if defined(__ARM_ARCH_8_1M_MAIN__) || \ |
| 238 | defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) |
| 239 | void tfm_arch_prioritize_secure_exception(void) |
| 240 | { |
| 241 | uint32_t VECTKEY; |
| 242 | SCB_Type *scb = SCB; |
| 243 | uint32_t AIRCR; |
| 244 | |
| 245 | /* Set PRIS flag in AIRCR */ |
| 246 | AIRCR = scb->AIRCR; |
| 247 | VECTKEY = (~AIRCR & SCB_AIRCR_VECTKEYSTAT_Msk); |
| 248 | scb->AIRCR = SCB_AIRCR_PRIS_Msk | |
| 249 | VECTKEY | |
| 250 | (AIRCR & ~SCB_AIRCR_VECTKEY_Msk); |
| 251 | } |
| 252 | #elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \ |
| 253 | defined(__ARM_ARCH_7EM__) |
| 254 | void tfm_arch_prioritize_secure_exception(void) |
| 255 | { |
| 256 | } |
| 257 | #endif |
| 258 | |
Jamie Fox | 4558767 | 2020-08-17 18:31:14 +0100 | [diff] [blame] | 259 | void tfm_arch_configure_coprocessors(void) |
| 260 | { |
| 261 | #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) |
| 262 | /* Configure Secure access to the FPU only if the secure image is being |
| 263 | * built with the FPU in use. This avoids introducing extra interrupt |
| 264 | * latency when the FPU is not used by the SPE. |
| 265 | */ |
| 266 | #if defined (__FPU_USED) && (__FPU_USED == 1U) |
| 267 | /* Enable Secure privileged and unprivilged access to the FP Extension */ |
| 268 | SCB->CPACR |= (3U << 10U*2U) /* enable CP10 full access */ |
| 269 | | (3U << 11U*2U); /* enable CP11 full access */ |
| 270 | |
| 271 | #if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__) |
| 272 | /* If the SPE will ever use the floating-point registers for sensitive data, |
| 273 | * then FPCCR.TS, FPCCR.CLRONRET and FPCCR.CLRONRETS must be set at |
| 274 | * initialisation and not changed again afterwards. |
| 275 | */ |
| 276 | FPU->FPCCR |= FPU_FPCCR_TS_Msk |
| 277 | | FPU_FPCCR_CLRONRET_Msk |
| 278 | | FPU_FPCCR_CLRONRETS_Msk; |
| 279 | #endif |
| 280 | #endif |
| 281 | |
| 282 | #if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__) |
| 283 | /* Permit Non-secure access to the Floating-point Extension. |
| 284 | * Note: It is still necessary to set CPACR_NS to enable the FP Extension in |
| 285 | * the NSPE. This configuration is left to NS privileged software. |
| 286 | */ |
| 287 | SCB->NSACR |= SCB_NSACR_CP10_Msk | SCB_NSACR_CP11_Msk; |
| 288 | #endif |
| 289 | #endif |
| 290 | } |
| 291 | |
Summer Qin | 90602de | 2020-08-04 10:23:39 +0800 | [diff] [blame] | 292 | #if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__) |
| 293 | __attribute__((naked)) void SVC_Handler(void) |
| 294 | { |
| 295 | __ASM volatile( |
| 296 | "MRS r2, MSP \n" |
| 297 | /* Check store SP in thread mode to r0 */ |
| 298 | "TST lr, #4 \n" |
| 299 | "ITE EQ \n" |
| 300 | "MOVEQ r0, r2 \n" |
| 301 | "MRSNE r0, PSP \n" |
| 302 | "MOV r1, lr \n" |
| 303 | "BL tfm_core_svc_handler \n" |
| 304 | "BX r0 \n" |
| 305 | ); |
| 306 | } |
| 307 | #elif defined(__ARM_ARCH_8M_BASE__) |
| 308 | __attribute__((naked)) void SVC_Handler(void) |
| 309 | { |
| 310 | __ASM volatile( |
| 311 | "MRS r2, MSP \n" |
| 312 | "MOVS r1, #4 \n" |
| 313 | "MOV r3, lr \n" |
| 314 | "MOV r0, r2 \n" |
| 315 | "TST r1, r3 \n" |
| 316 | "BEQ handler \n" |
| 317 | /* If SVC was made from thread mode, overwrite r0 with PSP */ |
| 318 | "MRS r0, PSP \n" |
| 319 | "handler: \n" |
| 320 | "MOV r1, lr \n" |
| 321 | "BL tfm_core_svc_handler \n" |
| 322 | "BX r0 \n" |
| 323 | ); |
| 324 | } |
| 325 | #elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \ |
| 326 | defined(__ARM_ARCH_7EM__) |
| 327 | __attribute__((naked)) void SVC_Handler(void) |
| 328 | { |
| 329 | __ASM volatile( |
| 330 | "MOVS r0, #4 \n" /* Check store SP in thread mode to r0 */ |
| 331 | "MOV r1, lr \n" |
| 332 | "TST r0, r1 \n" |
| 333 | "BEQ handler \n" |
| 334 | "MRS r0, PSP \n" /* Coming from thread mode */ |
| 335 | "B sp_stored \n" |
| 336 | "handler: \n" |
| 337 | "BX lr \n" /* Coming from handler mode */ |
| 338 | "sp_stored: \n" |
| 339 | "MOV r1, lr \n" |
| 340 | "BL tfm_core_svc_handler \n" |
| 341 | "BX r0 \n" |
| 342 | ); |
| 343 | } |
| 344 | #endif |