blob: c615d1348522551ae9233c62bbbf3af033ddae28 [file] [log] [blame]
Summer Qin90602de2020-08-04 10:23:39 +08001/*
2 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Summer Qin9c1fba12020-08-12 15:49:12 +08008#include "arch.h"
Summer Qin90602de2020-08-04 10:23:39 +08009#include "tfm_secure_api.h"
Summer Qin90602de2020-08-04 10:23:39 +080010#include "tfm/tfm_spm_services.h"
11
TTornblom18b3bf02020-09-03 17:42:11 +020012#if defined(__ICCARM__)
13uint32_t tfm_core_svc_handler(uint32_t *svc_args, uint32_t lr, uint32_t *msp);
14#pragma required=tfm_core_svc_handler
15#endif
16
Summer Qin90602de2020-08-04 10:23:39 +080017nsfptr_t ns_entry;
18
19void jump_to_ns_code(void)
20{
21 /* Calls the non-secure Reset_Handler to jump to the non-secure binary */
22 ns_entry();
Ken Liu92e46a32020-07-25 22:58:00 +080023
24 tfm_core_panic();
Summer Qin90602de2020-08-04 10:23:39 +080025}
26
27__attribute__((naked))
28int32_t tfm_core_get_caller_client_id(int32_t *caller_client_id)
29{
30 __ASM volatile(
31 "SVC %0\n"
32 "BX LR\n"
33 : : "I" (TFM_SVC_GET_CALLER_CLIENT_ID));
34}
35
36__attribute__((naked))
Mark Horvath4924cf82020-08-05 15:38:17 +020037static int32_t tfm_spm_request(int32_t request_type)
Summer Qin90602de2020-08-04 10:23:39 +080038{
39 __ASM volatile(
40 "SVC %0\n"
41 "BX lr\n"
42 : : "I" (TFM_SVC_SPM_REQUEST));
43}
44
Summer Qin90602de2020-08-04 10:23:39 +080045int32_t tfm_spm_request_reset_vote(void)
46{
Mark Horvath4924cf82020-08-05 15:38:17 +020047 return tfm_spm_request((int32_t)TFM_SPM_REQUEST_RESET_VOTE);
Summer Qin90602de2020-08-04 10:23:39 +080048}
49
50__attribute__((naked))
51void tfm_enable_irq(psa_signal_t irq_signal)
52{
53 __ASM("SVC %0\n"
54 "BX LR\n"
55 : : "I" (TFM_SVC_ENABLE_IRQ));
56}
57
58__attribute__((naked))
59void tfm_disable_irq(psa_signal_t irq_signal)
60{
61 __ASM("SVC %0\n"
62 "BX LR\n"
63 : : "I" (TFM_SVC_DISABLE_IRQ));
64}
65
66__attribute__((naked))
67static psa_signal_t psa_wait_internal(psa_signal_t signal_mask,
68 uint32_t timeout)
69{
70 __ASM("SVC %0\n"
71 "BX LR\n"
72 : : "I" (TFM_SVC_PSA_WAIT));
73}
74
75psa_signal_t psa_wait(psa_signal_t signal_mask, uint32_t timeout)
76{
77 /* FIXME: By using the 'WFI' instruction this function blocks until an
78 * interrupt happens. It is necessary to do this here as tfm_core_psa_wait
79 * runs with the priority of the SVC, so it cannot be interrupted, so
80 * waiting in it for the required interrupt to happen is not an option.
81 */
82 psa_signal_t actual_signal_mask;
83
84 while (1) {
85 actual_signal_mask = psa_wait_internal(signal_mask, timeout);
86 if ((actual_signal_mask & signal_mask) != 0) {
87 return actual_signal_mask;
88 }
89 __WFI();
90 }
91}
92
93__attribute__((naked))
94void psa_eoi(psa_signal_t irq_signal)
95{
96 __ASM("SVC %0\n"
97 "BX LR\n"
98 : : "I" (TFM_SVC_PSA_EOI));
99}
100
101#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
102__attribute__((section("SFN"), naked))
103int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
104{
105 __ASM volatile(
106 "PUSH {r4-r12, lr} \n"
107 "SVC %[SVC_REQ] \n"
108 "MOV r4, #0 \n"
109 "MOV r5, r4 \n"
110 "MOV r6, r4 \n"
111 "MOV r7, r4 \n"
112 "MOV r8, r4 \n"
113 "MOV r9, r4 \n"
114 "MOV r10, r4 \n"
115 "MOV r11, r4 \n"
116 "BLX lr \n"
117 "SVC %[SVC_RET] \n"
118 "POP {r4-r12, pc} \n"
119 : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST),
120 [SVC_RET] "I" (TFM_SVC_SFN_RETURN)
121 );
122}
123
124__attribute__((section("SFN"), naked))
125void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler,
126 uint32_t irq_signal, uint32_t irq_line)
127{
128 __ASM(
129 /* Save the callee saved registers*/
130 "PUSH {r4-r12, lr} \n"
131 /* Request SVC to configure environment for the unpriv IRQ handler */
132 "SVC %[SVC_REQ] \n"
133 /* clear the callee saved registers to prevent information leak */
134 "MOV r4, #0 \n"
135 "MOV r5, r4 \n"
136 "MOV r6, r4 \n"
137 "MOV r7, r4 \n"
138 "MOV r8, r4 \n"
139 "MOV r9, r4 \n"
140 "MOV r10, r4 \n"
141 "MOV r11, r4 \n"
142 /* Branch to the unprivileged handler */
143 "BLX lr \n"
144 /* Request SVC to reconfigure the environment of the interrupted
145 * partition
146 */
147 "SVC %[SVC_RET] \n"
148 /* restore callee saved registers and return */
149 "POP {r4-r12, pc} \n"
150 : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ)
151 , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET)
152 );
153}
154#elif defined(__ARM_ARCH_8M_BASE__)
155__attribute__((section("SFN"), naked))
156int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
157{
158 __ASM volatile(
159 "PUSH {lr} \n"
160 "PUSH {r4-r7} \n"
161 "MOV r4, r8 \n"
162 "MOV r5, r9 \n"
163 "MOV r6, r10 \n"
164 "MOV r7, r11 \n"
165 "PUSH {r4-r7} \n"
166 "MOV r4, r12 \n"
167 "PUSH {r4} \n"
168 "SVC %[SVC_REQ] \n"
169 "MOVS r4, #0 \n"
170 "MOV r5, r4 \n"
171 "MOV r6, r4 \n"
172 "MOV r7, r4 \n"
173 "MOV r8, r4 \n"
174 "MOV r9, r4 \n"
175 "MOV r10, r4 \n"
176 "MOV r11, r4 \n"
177 "BLX lr \n"
178 "SVC %[SVC_RET] \n"
179 "POP {r4} \n"
180 "MOV r12, r4 \n"
181 "POP {r4-r7} \n"
182 "MOV r8, r4 \n"
183 "MOV r9, r5 \n"
184 "MOV r10, r6 \n"
185 "MOV r11, r7 \n"
186 "POP {r4-r7} \n"
187 "POP {pc} \n"
188 : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST),
189 [SVC_RET] "I" (TFM_SVC_SFN_RETURN)
190 );
191}
192
193__attribute__((section("SFN"), naked))
194void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler,
195 uint32_t irq_signal, uint32_t irq_line)
196{
197 __ASM(
198 /* Save the callee saved registers*/
199 "PUSH {r4-r7, lr} \n"
200 "MOV r4, r8 \n"
201 "MOV r5, r9 \n"
202 "MOV r6, r10 \n"
203 "MOV r7, r11 \n"
204 "PUSH {r4-r7} \n"
205 "MOV r4, r12 \n"
206 "PUSH {r4} \n"
207 /* Request SVC to configure environment for the unpriv IRQ handler */
208 "SVC %[SVC_REQ] \n"
209 /* clear the callee saved registers to prevent information leak */
210 "MOVS r4, #0 \n"
211 "MOV r5, r4 \n"
212 "MOV r6, r4 \n"
213 "MOV r7, r4 \n"
214 "MOV r8, r4 \n"
215 "MOV r9, r4 \n"
216 "MOV r10, r4 \n"
217 "MOV r11, r4 \n"
218 /* Branch to the unprivileged handler */
219 "BLX lr \n"
220 /* Request SVC to reconfigure the environment of the interrupted
221 * partition
222 */
223 "SVC %[SVC_RET] \n"
224 /* restore callee saved registers and return */
225 "POP {r4} \n"
226 "MOV r12, r4 \n"
227 "POP {r4-r7} \n"
228 "MOV r8, r4 \n"
229 "MOV r9, r5 \n"
230 "MOV r10, r6 \n"
231 "MOV r11, r7 \n"
232 "POP {r4-r7, pc} \n"
233 : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ)
234 , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET)
235 );
236}
237#endif
238
239#if defined(__ARM_ARCH_8_1M_MAIN__) || \
240 defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)
Ken Liu50e21092020-10-14 16:42:15 +0800241void tfm_arch_set_secure_exception_priorities(void)
Summer Qin90602de2020-08-04 10:23:39 +0800242{
243 uint32_t VECTKEY;
244 SCB_Type *scb = SCB;
245 uint32_t AIRCR;
246
247 /* Set PRIS flag in AIRCR */
248 AIRCR = scb->AIRCR;
249 VECTKEY = (~AIRCR & SCB_AIRCR_VECTKEYSTAT_Msk);
250 scb->AIRCR = SCB_AIRCR_PRIS_Msk |
251 VECTKEY |
252 (AIRCR & ~SCB_AIRCR_VECTKEY_Msk);
Summer Qin90602de2020-08-04 10:23:39 +0800253
Ken Liu50e21092020-10-14 16:42:15 +0800254#ifndef __ARM_ARCH_8M_BASE__
Jamie Fox3ede9712020-09-28 23:14:54 +0100255 NVIC_SetPriority(MemoryManagement_IRQn, 0);
256 NVIC_SetPriority(BusFault_IRQn, 0);
Jamie Fox3ede9712020-09-28 23:14:54 +0100257 NVIC_SetPriority(SecureFault_IRQn, 0);
258#endif
Ken Liu50e21092020-10-14 16:42:15 +0800259
260 /*
261 * Function based model needs no PendSV for scheduling,
262 * set its priority just higher than thread mode.
263 */
264 NVIC_SetPriority(SVCall_IRQn, 0);
265 NVIC_SetPriority(PendSV_IRQn, (1 << __NVIC_PRIO_BITS) - 1);
Jamie Fox3ede9712020-09-28 23:14:54 +0100266}
Ken Liu50e21092020-10-14 16:42:15 +0800267#else
268#error Function based model works on V8M series only.
269#endif
Jamie Fox3ede9712020-09-28 23:14:54 +0100270
Jamie Fox45587672020-08-17 18:31:14 +0100271void tfm_arch_configure_coprocessors(void)
272{
273#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
274 /* Configure Secure access to the FPU only if the secure image is being
275 * built with the FPU in use. This avoids introducing extra interrupt
276 * latency when the FPU is not used by the SPE.
277 */
278#if defined (__FPU_USED) && (__FPU_USED == 1U)
279 /* Enable Secure privileged and unprivilged access to the FP Extension */
280 SCB->CPACR |= (3U << 10U*2U) /* enable CP10 full access */
281 | (3U << 11U*2U); /* enable CP11 full access */
282
283#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
284 /* If the SPE will ever use the floating-point registers for sensitive data,
285 * then FPCCR.TS, FPCCR.CLRONRET and FPCCR.CLRONRETS must be set at
286 * initialisation and not changed again afterwards.
287 */
288 FPU->FPCCR |= FPU_FPCCR_TS_Msk
289 | FPU_FPCCR_CLRONRET_Msk
290 | FPU_FPCCR_CLRONRETS_Msk;
291#endif
292#endif
293
294#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
295 /* Permit Non-secure access to the Floating-point Extension.
296 * Note: It is still necessary to set CPACR_NS to enable the FP Extension in
297 * the NSPE. This configuration is left to NS privileged software.
298 */
299 SCB->NSACR |= SCB_NSACR_CP10_Msk | SCB_NSACR_CP11_Msk;
300#endif
301#endif
302}
303
Ken Liue0af44c2020-07-25 22:51:30 +0800304#if defined(__ARM_ARCH_8M_BASE__) || defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
Summer Qin90602de2020-08-04 10:23:39 +0800305__attribute__((naked)) void SVC_Handler(void)
306{
307 __ASM volatile(
Ken Liue0af44c2020-07-25 22:51:30 +0800308#if !defined(__ICCARM__)
309 ".syntax unified \n"
310#endif
311 "MRS r0, PSP \n"
Summer Qin90602de2020-08-04 10:23:39 +0800312 "MRS r2, MSP \n"
313 "MOVS r1, #4 \n"
314 "MOV r3, lr \n"
Summer Qin90602de2020-08-04 10:23:39 +0800315 "TST r1, r3 \n"
Ken Liue0af44c2020-07-25 22:51:30 +0800316 "BNE from_thread \n"
317 /*
318 * This branch is taken when the code is being invoked from handler mode.
319 * This happens when a de-privileged interrupt handler is to be run. Seal
320 * the stack before de-privileging.
321 */
322 "LDR r0, =0xFEF5EDA5 \n"
323 "MOVS r3, r0 \n"
324 "PUSH {r0, r3} \n"
325 /* Overwrite r0 with MSP */
326 "MOV r0, r2 \n"
327 "from_thread: \n"
Summer Qin90602de2020-08-04 10:23:39 +0800328 "MOV r1, lr \n"
329 "BL tfm_core_svc_handler \n"
Ken Liue0af44c2020-07-25 22:51:30 +0800330 "MOVS r1, #4 \n"
331 "TST r1, r0 \n"
332 "BNE to_thread \n"
333 /*
334 * This branch is taken when the code is going to return to handler mode.
335 * This happens after a de-privileged interrupt handler had been run. Pop
336 * the sealing from the stack.
337 */
338 "POP {r1, r2} \n"
339 "to_thread: \n"
Summer Qin90602de2020-08-04 10:23:39 +0800340 "BX r0 \n"
341 );
342}
343#elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
344 defined(__ARM_ARCH_7EM__)
345__attribute__((naked)) void SVC_Handler(void)
346{
347 __ASM volatile(
348 "MOVS r0, #4 \n" /* Check store SP in thread mode to r0 */
349 "MOV r1, lr \n"
350 "TST r0, r1 \n"
351 "BEQ handler \n"
352 "MRS r0, PSP \n" /* Coming from thread mode */
353 "B sp_stored \n"
354 "handler: \n"
355 "BX lr \n" /* Coming from handler mode */
356 "sp_stored: \n"
357 "MOV r1, lr \n"
358 "BL tfm_core_svc_handler \n"
359 "BX r0 \n"
360 );
361}
362#endif
Jamie Foxb78795a2020-09-28 20:39:06 +0100363
364__attribute__((naked)) void HardFault_Handler(void)
365{
366 /* A HardFault may indicate corruption of secure state, so it is essential
367 * that Non-secure code does not regain control after one is raised.
368 * Returning from this exception could allow a pending NS exception to be
369 * taken, so the current solution is not to return.
370 */
371 __ASM volatile("b .");
372}
373
374__attribute__((naked)) void MemManage_Handler(void)
375{
376 /* A MemManage fault may indicate corruption of secure state, so it is
377 * essential that Non-secure code does not regain control after one is
378 * raised. Returning from this exception could allow a pending NS exception
379 * to be taken, so the current solution is not to return.
380 */
381 __ASM volatile("b .");
382}
383
384__attribute__((naked)) void BusFault_Handler(void)
385{
386 /* A BusFault may indicate corruption of secure state, so it is essential
387 * that Non-secure code does not regain control after one is raised.
388 * Returning from this exception could allow a pending NS exception to be
389 * taken, so the current solution is not to return.
390 */
391 __ASM volatile("b .");
392}
393
394__attribute__((naked)) void SecureFault_Handler(void)
395{
396 /* A SecureFault may indicate corruption of secure state, so it is essential
397 * that Non-secure code does not regain control after one is raised.
398 * Returning from this exception could allow a pending NS exception to be
399 * taken, so the current solution is not to return.
400 */
401 __ASM volatile("b .");
402}