blob: 0f5a93715800ee09c6afc972e96f2b3d15d8300b [file] [log] [blame]
Summer Qin90602de2020-08-04 10:23:39 +08001/*
2 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Summer Qin9c1fba12020-08-12 15:49:12 +08008#include "arch.h"
Summer Qin90602de2020-08-04 10:23:39 +08009#include "tfm_secure_api.h"
Summer Qin90602de2020-08-04 10:23:39 +080010#include "tfm/tfm_spm_services.h"
11
TTornblom18b3bf02020-09-03 17:42:11 +020012#if defined(__ICCARM__)
13uint32_t tfm_core_svc_handler(uint32_t *svc_args, uint32_t lr, uint32_t *msp);
14#pragma required=tfm_core_svc_handler
15#endif
16
Summer Qin90602de2020-08-04 10:23:39 +080017nsfptr_t ns_entry;
18
19void jump_to_ns_code(void)
20{
21 /* Calls the non-secure Reset_Handler to jump to the non-secure binary */
22 ns_entry();
23}
24
25__attribute__((naked))
26int32_t tfm_core_get_caller_client_id(int32_t *caller_client_id)
27{
28 __ASM volatile(
29 "SVC %0\n"
30 "BX LR\n"
31 : : "I" (TFM_SVC_GET_CALLER_CLIENT_ID));
32}
33
34__attribute__((naked))
Mark Horvath4924cf82020-08-05 15:38:17 +020035static int32_t tfm_spm_request(int32_t request_type)
Summer Qin90602de2020-08-04 10:23:39 +080036{
37 __ASM volatile(
38 "SVC %0\n"
39 "BX lr\n"
40 : : "I" (TFM_SVC_SPM_REQUEST));
41}
42
Summer Qin90602de2020-08-04 10:23:39 +080043int32_t tfm_spm_request_reset_vote(void)
44{
Mark Horvath4924cf82020-08-05 15:38:17 +020045 return tfm_spm_request((int32_t)TFM_SPM_REQUEST_RESET_VOTE);
Summer Qin90602de2020-08-04 10:23:39 +080046}
47
48__attribute__((naked))
49void tfm_enable_irq(psa_signal_t irq_signal)
50{
51 __ASM("SVC %0\n"
52 "BX LR\n"
53 : : "I" (TFM_SVC_ENABLE_IRQ));
54}
55
56__attribute__((naked))
57void tfm_disable_irq(psa_signal_t irq_signal)
58{
59 __ASM("SVC %0\n"
60 "BX LR\n"
61 : : "I" (TFM_SVC_DISABLE_IRQ));
62}
63
64__attribute__((naked))
65static psa_signal_t psa_wait_internal(psa_signal_t signal_mask,
66 uint32_t timeout)
67{
68 __ASM("SVC %0\n"
69 "BX LR\n"
70 : : "I" (TFM_SVC_PSA_WAIT));
71}
72
73psa_signal_t psa_wait(psa_signal_t signal_mask, uint32_t timeout)
74{
75 /* FIXME: By using the 'WFI' instruction this function blocks until an
76 * interrupt happens. It is necessary to do this here as tfm_core_psa_wait
77 * runs with the priority of the SVC, so it cannot be interrupted, so
78 * waiting in it for the required interrupt to happen is not an option.
79 */
80 psa_signal_t actual_signal_mask;
81
82 while (1) {
83 actual_signal_mask = psa_wait_internal(signal_mask, timeout);
84 if ((actual_signal_mask & signal_mask) != 0) {
85 return actual_signal_mask;
86 }
87 __WFI();
88 }
89}
90
91__attribute__((naked))
92void psa_eoi(psa_signal_t irq_signal)
93{
94 __ASM("SVC %0\n"
95 "BX LR\n"
96 : : "I" (TFM_SVC_PSA_EOI));
97}
98
99#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
100__attribute__((section("SFN"), naked))
101int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
102{
103 __ASM volatile(
104 "PUSH {r4-r12, lr} \n"
105 "SVC %[SVC_REQ] \n"
106 "MOV r4, #0 \n"
107 "MOV r5, r4 \n"
108 "MOV r6, r4 \n"
109 "MOV r7, r4 \n"
110 "MOV r8, r4 \n"
111 "MOV r9, r4 \n"
112 "MOV r10, r4 \n"
113 "MOV r11, r4 \n"
114 "BLX lr \n"
115 "SVC %[SVC_RET] \n"
116 "POP {r4-r12, pc} \n"
117 : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST),
118 [SVC_RET] "I" (TFM_SVC_SFN_RETURN)
119 );
120}
121
122__attribute__((section("SFN"), naked))
123void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler,
124 uint32_t irq_signal, uint32_t irq_line)
125{
126 __ASM(
127 /* Save the callee saved registers*/
128 "PUSH {r4-r12, lr} \n"
129 /* Request SVC to configure environment for the unpriv IRQ handler */
130 "SVC %[SVC_REQ] \n"
131 /* clear the callee saved registers to prevent information leak */
132 "MOV r4, #0 \n"
133 "MOV r5, r4 \n"
134 "MOV r6, r4 \n"
135 "MOV r7, r4 \n"
136 "MOV r8, r4 \n"
137 "MOV r9, r4 \n"
138 "MOV r10, r4 \n"
139 "MOV r11, r4 \n"
140 /* Branch to the unprivileged handler */
141 "BLX lr \n"
142 /* Request SVC to reconfigure the environment of the interrupted
143 * partition
144 */
145 "SVC %[SVC_RET] \n"
146 /* restore callee saved registers and return */
147 "POP {r4-r12, pc} \n"
148 : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ)
149 , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET)
150 );
151}
152#elif defined(__ARM_ARCH_8M_BASE__)
153__attribute__((section("SFN"), naked))
154int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
155{
156 __ASM volatile(
157 "PUSH {lr} \n"
158 "PUSH {r4-r7} \n"
159 "MOV r4, r8 \n"
160 "MOV r5, r9 \n"
161 "MOV r6, r10 \n"
162 "MOV r7, r11 \n"
163 "PUSH {r4-r7} \n"
164 "MOV r4, r12 \n"
165 "PUSH {r4} \n"
166 "SVC %[SVC_REQ] \n"
167 "MOVS r4, #0 \n"
168 "MOV r5, r4 \n"
169 "MOV r6, r4 \n"
170 "MOV r7, r4 \n"
171 "MOV r8, r4 \n"
172 "MOV r9, r4 \n"
173 "MOV r10, r4 \n"
174 "MOV r11, r4 \n"
175 "BLX lr \n"
176 "SVC %[SVC_RET] \n"
177 "POP {r4} \n"
178 "MOV r12, r4 \n"
179 "POP {r4-r7} \n"
180 "MOV r8, r4 \n"
181 "MOV r9, r5 \n"
182 "MOV r10, r6 \n"
183 "MOV r11, r7 \n"
184 "POP {r4-r7} \n"
185 "POP {pc} \n"
186 : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST),
187 [SVC_RET] "I" (TFM_SVC_SFN_RETURN)
188 );
189}
190
191__attribute__((section("SFN"), naked))
192void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler,
193 uint32_t irq_signal, uint32_t irq_line)
194{
195 __ASM(
196 /* Save the callee saved registers*/
197 "PUSH {r4-r7, lr} \n"
198 "MOV r4, r8 \n"
199 "MOV r5, r9 \n"
200 "MOV r6, r10 \n"
201 "MOV r7, r11 \n"
202 "PUSH {r4-r7} \n"
203 "MOV r4, r12 \n"
204 "PUSH {r4} \n"
205 /* Request SVC to configure environment for the unpriv IRQ handler */
206 "SVC %[SVC_REQ] \n"
207 /* clear the callee saved registers to prevent information leak */
208 "MOVS r4, #0 \n"
209 "MOV r5, r4 \n"
210 "MOV r6, r4 \n"
211 "MOV r7, r4 \n"
212 "MOV r8, r4 \n"
213 "MOV r9, r4 \n"
214 "MOV r10, r4 \n"
215 "MOV r11, r4 \n"
216 /* Branch to the unprivileged handler */
217 "BLX lr \n"
218 /* Request SVC to reconfigure the environment of the interrupted
219 * partition
220 */
221 "SVC %[SVC_RET] \n"
222 /* restore callee saved registers and return */
223 "POP {r4} \n"
224 "MOV r12, r4 \n"
225 "POP {r4-r7} \n"
226 "MOV r8, r4 \n"
227 "MOV r9, r5 \n"
228 "MOV r10, r6 \n"
229 "MOV r11, r7 \n"
230 "POP {r4-r7, pc} \n"
231 : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ)
232 , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET)
233 );
234}
235#endif
236
237#if defined(__ARM_ARCH_8_1M_MAIN__) || \
238 defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)
239void tfm_arch_prioritize_secure_exception(void)
240{
241 uint32_t VECTKEY;
242 SCB_Type *scb = SCB;
243 uint32_t AIRCR;
244
245 /* Set PRIS flag in AIRCR */
246 AIRCR = scb->AIRCR;
247 VECTKEY = (~AIRCR & SCB_AIRCR_VECTKEYSTAT_Msk);
248 scb->AIRCR = SCB_AIRCR_PRIS_Msk |
249 VECTKEY |
250 (AIRCR & ~SCB_AIRCR_VECTKEY_Msk);
251}
252#elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
253 defined(__ARM_ARCH_7EM__)
254void tfm_arch_prioritize_secure_exception(void)
255{
256}
257#endif
258
Jamie Fox3ede9712020-09-28 23:14:54 +0100259void tfm_arch_set_fault_priority(void)
260{
261 /* For Armv8-M, set fault priority to less than 0x80 (with AIRCR.PRIS set)
262 * to prevent Non-secure from pre-empting faults that may indicate
263 * corruption of Secure state. For Armv7-M, also set fault priority to the
264 * highest for consistent behaviour.
265 */
266#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__) || \
267 defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
268 NVIC_SetPriority(MemoryManagement_IRQn, 0);
269 NVIC_SetPriority(BusFault_IRQn, 0);
270#endif
271#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
272 NVIC_SetPriority(SecureFault_IRQn, 0);
273#endif
274}
275
Jamie Fox45587672020-08-17 18:31:14 +0100276void tfm_arch_configure_coprocessors(void)
277{
278#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
279 /* Configure Secure access to the FPU only if the secure image is being
280 * built with the FPU in use. This avoids introducing extra interrupt
281 * latency when the FPU is not used by the SPE.
282 */
283#if defined (__FPU_USED) && (__FPU_USED == 1U)
284 /* Enable Secure privileged and unprivilged access to the FP Extension */
285 SCB->CPACR |= (3U << 10U*2U) /* enable CP10 full access */
286 | (3U << 11U*2U); /* enable CP11 full access */
287
288#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
289 /* If the SPE will ever use the floating-point registers for sensitive data,
290 * then FPCCR.TS, FPCCR.CLRONRET and FPCCR.CLRONRETS must be set at
291 * initialisation and not changed again afterwards.
292 */
293 FPU->FPCCR |= FPU_FPCCR_TS_Msk
294 | FPU_FPCCR_CLRONRET_Msk
295 | FPU_FPCCR_CLRONRETS_Msk;
296#endif
297#endif
298
299#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
300 /* Permit Non-secure access to the Floating-point Extension.
301 * Note: It is still necessary to set CPACR_NS to enable the FP Extension in
302 * the NSPE. This configuration is left to NS privileged software.
303 */
304 SCB->NSACR |= SCB_NSACR_CP10_Msk | SCB_NSACR_CP11_Msk;
305#endif
306#endif
307}
308
Summer Qin90602de2020-08-04 10:23:39 +0800309#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
310__attribute__((naked)) void SVC_Handler(void)
311{
312 __ASM volatile(
313 "MRS r2, MSP \n"
314 /* Check store SP in thread mode to r0 */
315 "TST lr, #4 \n"
316 "ITE EQ \n"
317 "MOVEQ r0, r2 \n"
318 "MRSNE r0, PSP \n"
319 "MOV r1, lr \n"
320 "BL tfm_core_svc_handler \n"
321 "BX r0 \n"
322 );
323}
324#elif defined(__ARM_ARCH_8M_BASE__)
325__attribute__((naked)) void SVC_Handler(void)
326{
327 __ASM volatile(
328 "MRS r2, MSP \n"
329 "MOVS r1, #4 \n"
330 "MOV r3, lr \n"
331 "MOV r0, r2 \n"
332 "TST r1, r3 \n"
333 "BEQ handler \n"
334 /* If SVC was made from thread mode, overwrite r0 with PSP */
335 "MRS r0, PSP \n"
336 "handler: \n"
337 "MOV r1, lr \n"
338 "BL tfm_core_svc_handler \n"
339 "BX r0 \n"
340 );
341}
342#elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
343 defined(__ARM_ARCH_7EM__)
344__attribute__((naked)) void SVC_Handler(void)
345{
346 __ASM volatile(
347 "MOVS r0, #4 \n" /* Check store SP in thread mode to r0 */
348 "MOV r1, lr \n"
349 "TST r0, r1 \n"
350 "BEQ handler \n"
351 "MRS r0, PSP \n" /* Coming from thread mode */
352 "B sp_stored \n"
353 "handler: \n"
354 "BX lr \n" /* Coming from handler mode */
355 "sp_stored: \n"
356 "MOV r1, lr \n"
357 "BL tfm_core_svc_handler \n"
358 "BX r0 \n"
359 );
360}
361#endif
Jamie Foxb78795a2020-09-28 20:39:06 +0100362
363__attribute__((naked)) void HardFault_Handler(void)
364{
365 /* A HardFault may indicate corruption of secure state, so it is essential
366 * that Non-secure code does not regain control after one is raised.
367 * Returning from this exception could allow a pending NS exception to be
368 * taken, so the current solution is not to return.
369 */
370 __ASM volatile("b .");
371}
372
373__attribute__((naked)) void MemManage_Handler(void)
374{
375 /* A MemManage fault may indicate corruption of secure state, so it is
376 * essential that Non-secure code does not regain control after one is
377 * raised. Returning from this exception could allow a pending NS exception
378 * to be taken, so the current solution is not to return.
379 */
380 __ASM volatile("b .");
381}
382
383__attribute__((naked)) void BusFault_Handler(void)
384{
385 /* A BusFault may indicate corruption of secure state, so it is essential
386 * that Non-secure code does not regain control after one is raised.
387 * Returning from this exception could allow a pending NS exception to be
388 * taken, so the current solution is not to return.
389 */
390 __ASM volatile("b .");
391}
392
393__attribute__((naked)) void SecureFault_Handler(void)
394{
395 /* A SecureFault may indicate corruption of secure state, so it is essential
396 * that Non-secure code does not regain control after one is raised.
397 * Returning from this exception could allow a pending NS exception to be
398 * taken, so the current solution is not to return.
399 */
400 __ASM volatile("b .");
401}