blob: d62ce71f935e8335bb6bb1ff4d782361cfc701a8 [file] [log] [blame]
Summer Qin90602de2020-08-04 10:23:39 +08001/*
2 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Summer Qin9c1fba12020-08-12 15:49:12 +08008#include "arch.h"
Summer Qin90602de2020-08-04 10:23:39 +08009#include "tfm_secure_api.h"
Summer Qin90602de2020-08-04 10:23:39 +080010#include "tfm/tfm_spm_services.h"
11
12nsfptr_t ns_entry;
13
14void jump_to_ns_code(void)
15{
16 /* Calls the non-secure Reset_Handler to jump to the non-secure binary */
17 ns_entry();
18}
19
20__attribute__((naked))
21int32_t tfm_core_get_caller_client_id(int32_t *caller_client_id)
22{
23 __ASM volatile(
24 "SVC %0\n"
25 "BX LR\n"
26 : : "I" (TFM_SVC_GET_CALLER_CLIENT_ID));
27}
28
29__attribute__((naked))
Mark Horvath4924cf82020-08-05 15:38:17 +020030static int32_t tfm_spm_request(int32_t request_type)
Summer Qin90602de2020-08-04 10:23:39 +080031{
32 __ASM volatile(
33 "SVC %0\n"
34 "BX lr\n"
35 : : "I" (TFM_SVC_SPM_REQUEST));
36}
37
Summer Qin90602de2020-08-04 10:23:39 +080038int32_t tfm_spm_request_reset_vote(void)
39{
Mark Horvath4924cf82020-08-05 15:38:17 +020040 return tfm_spm_request((int32_t)TFM_SPM_REQUEST_RESET_VOTE);
Summer Qin90602de2020-08-04 10:23:39 +080041}
42
43__attribute__((naked))
44void tfm_enable_irq(psa_signal_t irq_signal)
45{
46 __ASM("SVC %0\n"
47 "BX LR\n"
48 : : "I" (TFM_SVC_ENABLE_IRQ));
49}
50
51__attribute__((naked))
52void tfm_disable_irq(psa_signal_t irq_signal)
53{
54 __ASM("SVC %0\n"
55 "BX LR\n"
56 : : "I" (TFM_SVC_DISABLE_IRQ));
57}
58
59__attribute__((naked))
60static psa_signal_t psa_wait_internal(psa_signal_t signal_mask,
61 uint32_t timeout)
62{
63 __ASM("SVC %0\n"
64 "BX LR\n"
65 : : "I" (TFM_SVC_PSA_WAIT));
66}
67
68psa_signal_t psa_wait(psa_signal_t signal_mask, uint32_t timeout)
69{
70 /* FIXME: By using the 'WFI' instruction this function blocks until an
71 * interrupt happens. It is necessary to do this here as tfm_core_psa_wait
72 * runs with the priority of the SVC, so it cannot be interrupted, so
73 * waiting in it for the required interrupt to happen is not an option.
74 */
75 psa_signal_t actual_signal_mask;
76
77 while (1) {
78 actual_signal_mask = psa_wait_internal(signal_mask, timeout);
79 if ((actual_signal_mask & signal_mask) != 0) {
80 return actual_signal_mask;
81 }
82 __WFI();
83 }
84}
85
86__attribute__((naked))
87void psa_eoi(psa_signal_t irq_signal)
88{
89 __ASM("SVC %0\n"
90 "BX LR\n"
91 : : "I" (TFM_SVC_PSA_EOI));
92}
93
94#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
95__attribute__((section("SFN"), naked))
96int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
97{
98 __ASM volatile(
99 "PUSH {r4-r12, lr} \n"
100 "SVC %[SVC_REQ] \n"
101 "MOV r4, #0 \n"
102 "MOV r5, r4 \n"
103 "MOV r6, r4 \n"
104 "MOV r7, r4 \n"
105 "MOV r8, r4 \n"
106 "MOV r9, r4 \n"
107 "MOV r10, r4 \n"
108 "MOV r11, r4 \n"
109 "BLX lr \n"
110 "SVC %[SVC_RET] \n"
111 "POP {r4-r12, pc} \n"
112 : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST),
113 [SVC_RET] "I" (TFM_SVC_SFN_RETURN)
114 );
115}
116
117__attribute__((section("SFN"), naked))
118void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler,
119 uint32_t irq_signal, uint32_t irq_line)
120{
121 __ASM(
122 /* Save the callee saved registers*/
123 "PUSH {r4-r12, lr} \n"
124 /* Request SVC to configure environment for the unpriv IRQ handler */
125 "SVC %[SVC_REQ] \n"
126 /* clear the callee saved registers to prevent information leak */
127 "MOV r4, #0 \n"
128 "MOV r5, r4 \n"
129 "MOV r6, r4 \n"
130 "MOV r7, r4 \n"
131 "MOV r8, r4 \n"
132 "MOV r9, r4 \n"
133 "MOV r10, r4 \n"
134 "MOV r11, r4 \n"
135 /* Branch to the unprivileged handler */
136 "BLX lr \n"
137 /* Request SVC to reconfigure the environment of the interrupted
138 * partition
139 */
140 "SVC %[SVC_RET] \n"
141 /* restore callee saved registers and return */
142 "POP {r4-r12, pc} \n"
143 : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ)
144 , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET)
145 );
146}
147#elif defined(__ARM_ARCH_8M_BASE__)
148__attribute__((section("SFN"), naked))
149int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
150{
151 __ASM volatile(
152 "PUSH {lr} \n"
153 "PUSH {r4-r7} \n"
154 "MOV r4, r8 \n"
155 "MOV r5, r9 \n"
156 "MOV r6, r10 \n"
157 "MOV r7, r11 \n"
158 "PUSH {r4-r7} \n"
159 "MOV r4, r12 \n"
160 "PUSH {r4} \n"
161 "SVC %[SVC_REQ] \n"
162 "MOVS r4, #0 \n"
163 "MOV r5, r4 \n"
164 "MOV r6, r4 \n"
165 "MOV r7, r4 \n"
166 "MOV r8, r4 \n"
167 "MOV r9, r4 \n"
168 "MOV r10, r4 \n"
169 "MOV r11, r4 \n"
170 "BLX lr \n"
171 "SVC %[SVC_RET] \n"
172 "POP {r4} \n"
173 "MOV r12, r4 \n"
174 "POP {r4-r7} \n"
175 "MOV r8, r4 \n"
176 "MOV r9, r5 \n"
177 "MOV r10, r6 \n"
178 "MOV r11, r7 \n"
179 "POP {r4-r7} \n"
180 "POP {pc} \n"
181 : : [SVC_REQ] "I" (TFM_SVC_SFN_REQUEST),
182 [SVC_RET] "I" (TFM_SVC_SFN_RETURN)
183 );
184}
185
186__attribute__((section("SFN"), naked))
187void priv_irq_handler_main(uint32_t partition_id, uint32_t unpriv_handler,
188 uint32_t irq_signal, uint32_t irq_line)
189{
190 __ASM(
191 /* Save the callee saved registers*/
192 "PUSH {r4-r7, lr} \n"
193 "MOV r4, r8 \n"
194 "MOV r5, r9 \n"
195 "MOV r6, r10 \n"
196 "MOV r7, r11 \n"
197 "PUSH {r4-r7} \n"
198 "MOV r4, r12 \n"
199 "PUSH {r4} \n"
200 /* Request SVC to configure environment for the unpriv IRQ handler */
201 "SVC %[SVC_REQ] \n"
202 /* clear the callee saved registers to prevent information leak */
203 "MOVS r4, #0 \n"
204 "MOV r5, r4 \n"
205 "MOV r6, r4 \n"
206 "MOV r7, r4 \n"
207 "MOV r8, r4 \n"
208 "MOV r9, r4 \n"
209 "MOV r10, r4 \n"
210 "MOV r11, r4 \n"
211 /* Branch to the unprivileged handler */
212 "BLX lr \n"
213 /* Request SVC to reconfigure the environment of the interrupted
214 * partition
215 */
216 "SVC %[SVC_RET] \n"
217 /* restore callee saved registers and return */
218 "POP {r4} \n"
219 "MOV r12, r4 \n"
220 "POP {r4-r7} \n"
221 "MOV r8, r4 \n"
222 "MOV r9, r5 \n"
223 "MOV r10, r6 \n"
224 "MOV r11, r7 \n"
225 "POP {r4-r7, pc} \n"
226 : : [SVC_REQ] "I" (TFM_SVC_DEPRIV_REQ)
227 , [SVC_RET] "I" (TFM_SVC_DEPRIV_RET)
228 );
229}
230#endif
231
232#if defined(__ARM_ARCH_8_1M_MAIN__) || \
233 defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)
234void tfm_arch_prioritize_secure_exception(void)
235{
236 uint32_t VECTKEY;
237 SCB_Type *scb = SCB;
238 uint32_t AIRCR;
239
240 /* Set PRIS flag in AIRCR */
241 AIRCR = scb->AIRCR;
242 VECTKEY = (~AIRCR & SCB_AIRCR_VECTKEYSTAT_Msk);
243 scb->AIRCR = SCB_AIRCR_PRIS_Msk |
244 VECTKEY |
245 (AIRCR & ~SCB_AIRCR_VECTKEY_Msk);
246}
247#elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
248 defined(__ARM_ARCH_7EM__)
249void tfm_arch_prioritize_secure_exception(void)
250{
251}
252#endif
253
Jamie Fox45587672020-08-17 18:31:14 +0100254void tfm_arch_configure_coprocessors(void)
255{
256#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
257 /* Configure Secure access to the FPU only if the secure image is being
258 * built with the FPU in use. This avoids introducing extra interrupt
259 * latency when the FPU is not used by the SPE.
260 */
261#if defined (__FPU_USED) && (__FPU_USED == 1U)
262 /* Enable Secure privileged and unprivilged access to the FP Extension */
263 SCB->CPACR |= (3U << 10U*2U) /* enable CP10 full access */
264 | (3U << 11U*2U); /* enable CP11 full access */
265
266#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
267 /* If the SPE will ever use the floating-point registers for sensitive data,
268 * then FPCCR.TS, FPCCR.CLRONRET and FPCCR.CLRONRETS must be set at
269 * initialisation and not changed again afterwards.
270 */
271 FPU->FPCCR |= FPU_FPCCR_TS_Msk
272 | FPU_FPCCR_CLRONRET_Msk
273 | FPU_FPCCR_CLRONRETS_Msk;
274#endif
275#endif
276
277#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
278 /* Permit Non-secure access to the Floating-point Extension.
279 * Note: It is still necessary to set CPACR_NS to enable the FP Extension in
280 * the NSPE. This configuration is left to NS privileged software.
281 */
282 SCB->NSACR |= SCB_NSACR_CP10_Msk | SCB_NSACR_CP11_Msk;
283#endif
284#endif
285}
286
Summer Qin90602de2020-08-04 10:23:39 +0800287#if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
288__attribute__((naked)) void SVC_Handler(void)
289{
290 __ASM volatile(
291 "MRS r2, MSP \n"
292 /* Check store SP in thread mode to r0 */
293 "TST lr, #4 \n"
294 "ITE EQ \n"
295 "MOVEQ r0, r2 \n"
296 "MRSNE r0, PSP \n"
297 "MOV r1, lr \n"
298 "BL tfm_core_svc_handler \n"
299 "BX r0 \n"
300 );
301}
302#elif defined(__ARM_ARCH_8M_BASE__)
303__attribute__((naked)) void SVC_Handler(void)
304{
305 __ASM volatile(
306 "MRS r2, MSP \n"
307 "MOVS r1, #4 \n"
308 "MOV r3, lr \n"
309 "MOV r0, r2 \n"
310 "TST r1, r3 \n"
311 "BEQ handler \n"
312 /* If SVC was made from thread mode, overwrite r0 with PSP */
313 "MRS r0, PSP \n"
314 "handler: \n"
315 "MOV r1, lr \n"
316 "BL tfm_core_svc_handler \n"
317 "BX r0 \n"
318 );
319}
320#elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
321 defined(__ARM_ARCH_7EM__)
322__attribute__((naked)) void SVC_Handler(void)
323{
324 __ASM volatile(
325 "MOVS r0, #4 \n" /* Check store SP in thread mode to r0 */
326 "MOV r1, lr \n"
327 "TST r0, r1 \n"
328 "BEQ handler \n"
329 "MRS r0, PSP \n" /* Coming from thread mode */
330 "B sp_stored \n"
331 "handler: \n"
332 "BX lr \n" /* Coming from handler mode */
333 "sp_stored: \n"
334 "MOV r1, lr \n"
335 "BL tfm_core_svc_handler \n"
336 "BX r0 \n"
337 );
338}
339#endif