blob: b4e7a7abe9e9341263221a00efe788cc4e628391 [file] [log] [blame]
Achin Gupta7c88f3f2014-02-18 18:09:12 +00001/*
Achin Gupta4a8bfdb2021-10-04 20:13:36 +01002 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
Achin Gupta7c88f3f2014-02-18 18:09:12 +00003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta7c88f3f2014-02-18 18:09:12 +00005 */
6
Masahiro Yamadad9743012020-01-17 13:45:14 +09007#include <platform_def.h>
8
Achin Gupta7c88f3f2014-02-18 18:09:12 +00009#include <arch.h>
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000010#include <asm_macros.S>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000011#include <bl32/tsp/tsp.h>
12#include <lib/xlat_tables/xlat_tables_defs.h>
Achin Gupta4a8bfdb2021-10-04 20:13:36 +010013#include <smccc_helpers.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000014
Dan Handleyda0af782014-08-01 17:58:27 +010015#include "../tsp_private.h"
Achin Gupta7c88f3f2014-02-18 18:09:12 +000016
17
18 .globl tsp_entrypoint
Andrew Thoelke399fb082014-05-20 21:43:27 +010019 .globl tsp_vector_table
Achin Gupta4a8bfdb2021-10-04 20:13:36 +010020#if SPMC_AT_EL3
21 .globl tsp_cpu_on_entry
22#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +000023
Soby Mathew239b04f2014-05-09 20:49:17 +010024
25
Achin Gupta7c88f3f2014-02-18 18:09:12 +000026 /* ---------------------------------------------
27 * Populate the params in x0-x7 from the pointer
28 * to the smc args structure in x0.
29 * ---------------------------------------------
30 */
31 .macro restore_args_call_smc
Achin Gupta4a8bfdb2021-10-04 20:13:36 +010032 ldp x6, x7, [x0, #SMC_ARG6]
33 ldp x4, x5, [x0, #SMC_ARG4]
34 ldp x2, x3, [x0, #SMC_ARG2]
35 ldp x0, x1, [x0, #SMC_ARG0]
Achin Gupta7c88f3f2014-02-18 18:09:12 +000036 smc #0
37 .endm
38
Achin Gupta6cf89022014-05-09 11:42:56 +010039 .macro save_eret_context reg1 reg2
40 mrs \reg1, elr_el1
41 mrs \reg2, spsr_el1
42 stp \reg1, \reg2, [sp, #-0x10]!
43 stp x30, x18, [sp, #-0x10]!
44 .endm
45
46 .macro restore_eret_context reg1 reg2
47 ldp x30, x18, [sp], #0x10
48 ldp \reg1, \reg2, [sp], #0x10
49 msr elr_el1, \reg1
50 msr spsr_el1, \reg2
51 .endm
52
Julius Werner64726e62017-08-01 15:16:36 -070053func tsp_entrypoint _align=3
Achin Gupta7c88f3f2014-02-18 18:09:12 +000054
Masahiro Yamadad9743012020-01-17 13:45:14 +090055#if ENABLE_PIE
56 /*
57 * ------------------------------------------------------------
58 * If PIE is enabled fixup the Global descriptor Table only
59 * once during primary core cold boot path.
60 *
61 * Compile time base address, required for fixup, is calculated
62 * using "pie_fixup" label present within first page.
63 * ------------------------------------------------------------
64 */
65 pie_fixup:
66 ldr x0, =pie_fixup
Jimmy Brissond7b5f402020-08-04 16:18:52 -050067 and x0, x0, #~(PAGE_SIZE_MASK)
Masahiro Yamadad9743012020-01-17 13:45:14 +090068 mov_imm x1, (BL32_LIMIT - BL32_BASE)
69 add x1, x1, x0
70 bl fixup_gdt_reloc
71#endif /* ENABLE_PIE */
72
Achin Gupta7c88f3f2014-02-18 18:09:12 +000073 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +000074 * Set the exception vector to something sane.
75 * ---------------------------------------------
76 */
Achin Gupta57356e92014-05-09 12:17:56 +010077 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +000078 msr vbar_el1, x0
Achin Gupta0c8d4fe2014-08-04 23:13:10 +010079 isb
80
81 /* ---------------------------------------------
82 * Enable the SError interrupt now that the
83 * exception vectors have been setup.
84 * ---------------------------------------------
85 */
86 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +000087
88 /* ---------------------------------------------
Achin Guptaec3c1002014-07-18 18:38:28 +010089 * Enable the instruction cache, stack pointer
John Tsichritzis02b57942019-03-04 16:42:54 +000090 * and data access alignment checks and disable
91 * speculative loads.
Achin Gupta7c88f3f2014-02-18 18:09:12 +000092 * ---------------------------------------------
93 */
Achin Guptaec3c1002014-07-18 18:38:28 +010094 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +000095 mrs x0, sctlr_el1
Achin Guptaec3c1002014-07-18 18:38:28 +010096 orr x0, x0, x1
Boyan Karatotev10ecd582025-03-26 15:54:55 +000097#if ENABLE_BTI
98 /* Enable PAC branch type compatibility */
99 bic x0, x0, #(SCTLR_BT0_BIT | SCTLR_BT1_BIT)
100#endif
John Tsichritzis02b57942019-03-04 16:42:54 +0000101 bic x0, x0, #SCTLR_DSSBS_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000102 msr sctlr_el1, x0
103 isb
104
105 /* ---------------------------------------------
Achin Gupta54dc71e2015-09-11 16:03:13 +0100106 * Invalidate the RW memory used by the BL32
107 * image. This includes the data and NOBITS
108 * sections. This is done to safeguard against
109 * possible corruption of this memory by dirty
110 * cache lines in a system cache as a result of
Zelalem Aweke596d20d2021-10-15 17:25:52 -0500111 * use by an earlier boot loader stage. If PIE
112 * is enabled however, RO sections including the
113 * GOT may be modified during pie fixup.
114 * Therefore, to be on the safe side, invalidate
115 * the entire image region if PIE is enabled.
Achin Gupta54dc71e2015-09-11 16:03:13 +0100116 * ---------------------------------------------
117 */
Zelalem Aweke596d20d2021-10-15 17:25:52 -0500118#if ENABLE_PIE
119#if SEPARATE_CODE_AND_RODATA
120 adrp x0, __TEXT_START__
121 add x0, x0, :lo12:__TEXT_START__
122#else
123 adrp x0, __RO_START__
124 add x0, x0, :lo12:__RO_START__
125#endif /* SEPARATE_CODE_AND_RODATA */
126#else
127 adrp x0, __RW_START__
128 add x0, x0, :lo12:__RW_START__
129#endif /* ENABLE_PIE */
130 adrp x1, __RW_END__
131 add x1, x1, :lo12:__RW_END__
Achin Gupta54dc71e2015-09-11 16:03:13 +0100132 sub x1, x1, x0
133 bl inv_dcache_range
134
135 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000136 * Zero out NOBITS sections. There are 2 of them:
137 * - the .bss section;
138 * - the coherent memory section.
139 * ---------------------------------------------
140 */
Yann Gautierfb4f5112020-08-18 14:42:41 +0200141 adrp x0, __BSS_START__
142 add x0, x0, :lo12:__BSS_START__
143 adrp x1, __BSS_END__
144 add x1, x1, :lo12:__BSS_END__
145 sub x1, x1, x0
Douglas Raillard308d3592016-12-02 13:51:54 +0000146 bl zeromem
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000147
Soby Mathewab8707e2015-01-08 18:02:44 +0000148#if USE_COHERENT_MEM
Yann Gautierfb4f5112020-08-18 14:42:41 +0200149 adrp x0, __COHERENT_RAM_START__
150 add x0, x0, :lo12:__COHERENT_RAM_START__
151 adrp x1, __COHERENT_RAM_END_UNALIGNED__
152 add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
153 sub x1, x1, x0
Douglas Raillard308d3592016-12-02 13:51:54 +0000154 bl zeromem
Soby Mathewab8707e2015-01-08 18:02:44 +0000155#endif
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000156
157 /* --------------------------------------------
Achin Gupta754a2b72014-06-25 19:26:22 +0100158 * Allocate a stack whose memory will be marked
159 * as Normal-IS-WBWA when the MMU is enabled.
160 * There is no risk of reading stale stack
161 * memory after enabling the MMU as only the
162 * primary cpu is running at the moment.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000163 * --------------------------------------------
164 */
Soby Mathewfd650ff2015-07-08 21:45:46 +0100165 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000166
167 /* ---------------------------------------------
Douglas Raillard51faada2017-02-24 18:14:15 +0000168 * Initialize the stack protector canary before
169 * any C code is called.
170 * ---------------------------------------------
171 */
172#if STACK_PROTECTOR_ENABLED
173 bl update_stack_protector_canary
174#endif
175
176 /* ---------------------------------------------
Antonio Nino Diaz67b6ff92019-02-26 11:41:03 +0000177 * Perform TSP setup
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000178 * ---------------------------------------------
179 */
Antonio Nino Diaz67b6ff92019-02-26 11:41:03 +0000180 bl tsp_setup
181
Antonio Nino Diaz67b6ff92019-02-26 11:41:03 +0000182#if ENABLE_PAUTH
Alexei Fedorov9fc59632019-05-24 12:17:09 +0100183 /* ---------------------------------------------
Alexei Fedoroved108b52019-09-13 14:11:59 +0100184 * Program APIAKey_EL1
185 * and enable pointer authentication
Alexei Fedorov9fc59632019-05-24 12:17:09 +0100186 * ---------------------------------------------
187 */
Alexei Fedoroved108b52019-09-13 14:11:59 +0100188 bl pauth_init_enable_el1
Antonio Nino Diaz67b6ff92019-02-26 11:41:03 +0000189#endif /* ENABLE_PAUTH */
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000190
191 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000192 * Jump to main function.
193 * ---------------------------------------------
194 */
195 bl tsp_main
196
197 /* ---------------------------------------------
198 * Tell TSPD that we are done initialising
199 * ---------------------------------------------
200 */
201 mov x1, x0
202 mov x0, #TSP_ENTRY_DONE
203 smc #0
204
205tsp_entrypoint_panic:
206 b tsp_entrypoint_panic
Kévin Petit8b779622015-03-24 14:03:57 +0000207endfunc tsp_entrypoint
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000208
Andrew Thoelke399fb082014-05-20 21:43:27 +0100209
210 /* -------------------------------------------
211 * Table of entrypoint vectors provided to the
212 * TSPD for the various entrypoints
213 * -------------------------------------------
214 */
Alexei Fedorov9fc59632019-05-24 12:17:09 +0100215vector_base tsp_vector_table
David Cunado16292f52017-04-05 11:34:03 +0100216 b tsp_yield_smc_entry
Andrew Thoelke399fb082014-05-20 21:43:27 +0100217 b tsp_fast_smc_entry
218 b tsp_cpu_on_entry
219 b tsp_cpu_off_entry
220 b tsp_cpu_resume_entry
221 b tsp_cpu_suspend_entry
Soby Mathew02446132015-09-03 18:29:38 +0100222 b tsp_sel1_intr_entry
Juan Castillod5f13092014-08-12 11:17:06 +0100223 b tsp_system_off_entry
224 b tsp_system_reset_entry
David Cunado16292f52017-04-05 11:34:03 +0100225 b tsp_abort_yield_smc_entry
Andrew Thoelke399fb082014-05-20 21:43:27 +0100226
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000227 /*---------------------------------------------
228 * This entrypoint is used by the TSPD when this
229 * cpu is to be turned off through a CPU_OFF
230 * psci call to ask the TSP to perform any
231 * bookeeping necessary. In the current
232 * implementation, the TSPD expects the TSP to
233 * re-initialise its state so nothing is done
234 * here except for acknowledging the request.
235 * ---------------------------------------------
236 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000237func tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000238 bl tsp_cpu_off_main
239 restore_args_call_smc
Kévin Petit8b779622015-03-24 14:03:57 +0000240endfunc tsp_cpu_off_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000241
242 /*---------------------------------------------
Juan Castillod5f13092014-08-12 11:17:06 +0100243 * This entrypoint is used by the TSPD when the
244 * system is about to be switched off (through
245 * a SYSTEM_OFF psci call) to ask the TSP to
246 * perform any necessary bookkeeping.
247 * ---------------------------------------------
248 */
249func tsp_system_off_entry
250 bl tsp_system_off_main
251 restore_args_call_smc
Kévin Petit8b779622015-03-24 14:03:57 +0000252endfunc tsp_system_off_entry
Juan Castillod5f13092014-08-12 11:17:06 +0100253
254 /*---------------------------------------------
255 * This entrypoint is used by the TSPD when the
256 * system is about to be reset (through a
257 * SYSTEM_RESET psci call) to ask the TSP to
258 * perform any necessary bookkeeping.
259 * ---------------------------------------------
260 */
261func tsp_system_reset_entry
262 bl tsp_system_reset_main
263 restore_args_call_smc
Kévin Petit8b779622015-03-24 14:03:57 +0000264endfunc tsp_system_reset_entry
Juan Castillod5f13092014-08-12 11:17:06 +0100265
266 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000267 * This entrypoint is used by the TSPD when this
268 * cpu is turned on using a CPU_ON psci call to
269 * ask the TSP to initialise itself i.e. setup
270 * the mmu, stacks etc. Minimal architectural
271 * state will be initialised by the TSPD when
272 * this function is entered i.e. Caches and MMU
273 * will be turned off, the execution state
274 * will be aarch64 and exceptions masked.
275 * ---------------------------------------------
276 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000277func tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000278 /* ---------------------------------------------
279 * Set the exception vector to something sane.
280 * ---------------------------------------------
281 */
Achin Gupta57356e92014-05-09 12:17:56 +0100282 adr x0, tsp_exceptions
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000283 msr vbar_el1, x0
Achin Gupta0c8d4fe2014-08-04 23:13:10 +0100284 isb
285
286 /* Enable the SError interrupt */
287 msr daifclr, #DAIF_ABT_BIT
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000288
289 /* ---------------------------------------------
Achin Guptaec3c1002014-07-18 18:38:28 +0100290 * Enable the instruction cache, stack pointer
291 * and data access alignment checks
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000292 * ---------------------------------------------
293 */
Achin Guptaec3c1002014-07-18 18:38:28 +0100294 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000295 mrs x0, sctlr_el1
Achin Guptaec3c1002014-07-18 18:38:28 +0100296 orr x0, x0, x1
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000297 msr sctlr_el1, x0
298 isb
299
300 /* --------------------------------------------
Achin Guptab51da822014-06-26 09:58:52 +0100301 * Give ourselves a stack whose memory will be
302 * marked as Normal-IS-WBWA when the MMU is
303 * enabled.
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000304 * --------------------------------------------
305 */
Soby Mathewfd650ff2015-07-08 21:45:46 +0100306 bl plat_set_my_stack
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000307
Achin Guptab51da822014-06-26 09:58:52 +0100308 /* --------------------------------------------
Jeenu Viswambharanbb00ea52018-04-27 16:28:12 +0100309 * Enable MMU and D-caches together.
Achin Guptab51da822014-06-26 09:58:52 +0100310 * --------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000311 */
Jeenu Viswambharanbb00ea52018-04-27 16:28:12 +0100312 mov x0, #0
Dan Handleydff8e472014-05-16 14:08:45 +0100313 bl bl32_plat_enable_mmu
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000314
Alexei Fedoroved108b52019-09-13 14:11:59 +0100315#if ENABLE_PAUTH
316 /* ---------------------------------------------
317 * Program APIAKey_EL1
318 * and enable pointer authentication
319 * ---------------------------------------------
320 */
321 bl pauth_init_enable_el1
322#endif /* ENABLE_PAUTH */
323
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000324 /* ---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000325 * Enter C runtime to perform any remaining
326 * book keeping
327 * ---------------------------------------------
328 */
329 bl tsp_cpu_on_main
330 restore_args_call_smc
331
332 /* Should never reach here */
333tsp_cpu_on_entry_panic:
334 b tsp_cpu_on_entry_panic
Kévin Petit8b779622015-03-24 14:03:57 +0000335endfunc tsp_cpu_on_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000336
337 /*---------------------------------------------
338 * This entrypoint is used by the TSPD when this
339 * cpu is to be suspended through a CPU_SUSPEND
340 * psci call to ask the TSP to perform any
341 * bookeeping necessary. In the current
342 * implementation, the TSPD saves and restores
343 * the EL1 state.
344 * ---------------------------------------------
345 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000346func tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000347 bl tsp_cpu_suspend_main
348 restore_args_call_smc
Kévin Petit8b779622015-03-24 14:03:57 +0000349endfunc tsp_cpu_suspend_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000350
Soby Mathew02446132015-09-03 18:29:38 +0100351 /*-------------------------------------------------
Achin Gupta6cf89022014-05-09 11:42:56 +0100352 * This entrypoint is used by the TSPD to pass
Soby Mathew63b84402015-11-13 02:08:43 +0000353 * control for `synchronously` handling a S-EL1
354 * Interrupt which was triggered while executing
355 * in normal world. 'x0' contains a magic number
356 * which indicates this. TSPD expects control to
357 * be handed back at the end of interrupt
358 * processing. This is done through an SMC.
359 * The handover agreement is:
Achin Gupta6cf89022014-05-09 11:42:56 +0100360 *
361 * 1. PSTATE.DAIF are set upon entry. 'x1' has
362 * the ELR_EL3 from the non-secure state.
363 * 2. TSP has to preserve the callee saved
364 * general purpose registers, SP_EL1/EL0 and
365 * LR.
366 * 3. TSP has to preserve the system and vfp
367 * registers (if applicable).
368 * 4. TSP can use 'x0-x18' to enable its C
369 * runtime.
370 * 5. TSP returns to TSPD using an SMC with
Soby Mathew02446132015-09-03 18:29:38 +0100371 * 'x0' = TSP_HANDLED_S_EL1_INTR
372 * ------------------------------------------------
Achin Gupta6cf89022014-05-09 11:42:56 +0100373 */
Soby Mathew02446132015-09-03 18:29:38 +0100374func tsp_sel1_intr_entry
Achin Gupta6cf89022014-05-09 11:42:56 +0100375#if DEBUG
Soby Mathew63b84402015-11-13 02:08:43 +0000376 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
Achin Gupta6cf89022014-05-09 11:42:56 +0100377 cmp x0, x2
Soby Mathew02446132015-09-03 18:29:38 +0100378 b.ne tsp_sel1_int_entry_panic
Achin Gupta6cf89022014-05-09 11:42:56 +0100379#endif
Soby Mathew02446132015-09-03 18:29:38 +0100380 /*-------------------------------------------------
Achin Gupta6cf89022014-05-09 11:42:56 +0100381 * Save any previous context needed to perform
382 * an exception return from S-EL1 e.g. context
Soby Mathew02446132015-09-03 18:29:38 +0100383 * from a previous Non secure Interrupt.
384 * Update statistics and handle the S-EL1
385 * interrupt before returning to the TSPD.
Achin Gupta6cf89022014-05-09 11:42:56 +0100386 * IRQ/FIQs are not enabled since that will
387 * complicate the implementation. Execution
388 * will be transferred back to the normal world
Soby Mathew63b84402015-11-13 02:08:43 +0000389 * in any case. The handler can return 0
390 * if the interrupt was handled or TSP_PREEMPTED
391 * if the expected interrupt was preempted
392 * by an interrupt that should be handled in EL3
393 * e.g. Group 0 interrupt in GICv3. In both
394 * the cases switch to EL3 using SMC with id
395 * TSP_HANDLED_S_EL1_INTR. Any other return value
396 * from the handler will result in panic.
Soby Mathew02446132015-09-03 18:29:38 +0100397 * ------------------------------------------------
Achin Gupta6cf89022014-05-09 11:42:56 +0100398 */
399 save_eret_context x2 x3
Soby Mathew02446132015-09-03 18:29:38 +0100400 bl tsp_update_sync_sel1_intr_stats
401 bl tsp_common_int_handler
Soby Mathew63b84402015-11-13 02:08:43 +0000402 /* Check if the S-EL1 interrupt has been handled */
403 cbnz x0, tsp_sel1_intr_check_preemption
404 b tsp_sel1_intr_return
405tsp_sel1_intr_check_preemption:
406 /* Check if the S-EL1 interrupt has been preempted */
407 mov_imm x1, TSP_PREEMPTED
408 cmp x0, x1
409 b.ne tsp_sel1_int_entry_panic
410tsp_sel1_intr_return:
411 mov_imm x0, TSP_HANDLED_S_EL1_INTR
Achin Gupta6cf89022014-05-09 11:42:56 +0100412 restore_eret_context x2 x3
Achin Gupta6cf89022014-05-09 11:42:56 +0100413 smc #0
414
Soby Mathew63b84402015-11-13 02:08:43 +0000415 /* Should never reach here */
Soby Mathew02446132015-09-03 18:29:38 +0100416tsp_sel1_int_entry_panic:
Jeenu Viswambharana806dad2016-11-30 15:21:11 +0000417 no_ret plat_panic_handler
Soby Mathew02446132015-09-03 18:29:38 +0100418endfunc tsp_sel1_intr_entry
Achin Gupta6cf89022014-05-09 11:42:56 +0100419
420 /*---------------------------------------------
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000421 * This entrypoint is used by the TSPD when this
422 * cpu resumes execution after an earlier
423 * CPU_SUSPEND psci call to ask the TSP to
424 * restore its saved context. In the current
425 * implementation, the TSPD saves and restores
426 * EL1 state so nothing is done here apart from
427 * acknowledging the request.
428 * ---------------------------------------------
429 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000430func tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000431 bl tsp_cpu_resume_main
432 restore_args_call_smc
Antonio Nino Diaz1c3ea102016-02-01 13:57:25 +0000433
434 /* Should never reach here */
Jeenu Viswambharana806dad2016-11-30 15:21:11 +0000435 no_ret plat_panic_handler
Kévin Petit8b779622015-03-24 14:03:57 +0000436endfunc tsp_cpu_resume_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000437
438 /*---------------------------------------------
439 * This entrypoint is used by the TSPD to ask
440 * the TSP to service a fast smc request.
441 * ---------------------------------------------
442 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000443func tsp_fast_smc_entry
Soby Mathew239b04f2014-05-09 20:49:17 +0100444 bl tsp_smc_handler
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000445 restore_args_call_smc
Antonio Nino Diaz1c3ea102016-02-01 13:57:25 +0000446
447 /* Should never reach here */
Jeenu Viswambharana806dad2016-11-30 15:21:11 +0000448 no_ret plat_panic_handler
Kévin Petit8b779622015-03-24 14:03:57 +0000449endfunc tsp_fast_smc_entry
Achin Gupta7c88f3f2014-02-18 18:09:12 +0000450
Soby Mathew239b04f2014-05-09 20:49:17 +0100451 /*---------------------------------------------
452 * This entrypoint is used by the TSPD to ask
David Cunado16292f52017-04-05 11:34:03 +0100453 * the TSP to service a Yielding SMC request.
Soby Mathew239b04f2014-05-09 20:49:17 +0100454 * We will enable preemption during execution
455 * of tsp_smc_handler.
456 * ---------------------------------------------
457 */
David Cunado16292f52017-04-05 11:34:03 +0100458func tsp_yield_smc_entry
Soby Mathew239b04f2014-05-09 20:49:17 +0100459 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
460 bl tsp_smc_handler
461 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
462 restore_args_call_smc
Antonio Nino Diaz1c3ea102016-02-01 13:57:25 +0000463
464 /* Should never reach here */
Jeenu Viswambharana806dad2016-11-30 15:21:11 +0000465 no_ret plat_panic_handler
David Cunado16292f52017-04-05 11:34:03 +0100466endfunc tsp_yield_smc_entry
Douglas Raillard3df60122016-11-24 15:43:19 +0000467
468 /*---------------------------------------------------------------------
David Cunado16292f52017-04-05 11:34:03 +0100469 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
Douglas Raillard3df60122016-11-24 15:43:19 +0000470 * SMC. It could be on behalf of non-secure world or because a CPU
471 * suspend/CPU off request needs to abort the preempted SMC.
472 * --------------------------------------------------------------------
473 */
David Cunado16292f52017-04-05 11:34:03 +0100474func tsp_abort_yield_smc_entry
Douglas Raillard3df60122016-11-24 15:43:19 +0000475
476 /*
477 * Exceptions masking is already done by the TSPD when entering this
478 * hook so there is no need to do it here.
479 */
480
481 /* Reset the stack used by the pre-empted SMC */
482 bl plat_set_my_stack
483
484 /*
485 * Allow some cleanup such as releasing locks.
486 */
487 bl tsp_abort_smc_handler
488
489 restore_args_call_smc
490
491 /* Should never reach here */
492 bl plat_panic_handler
David Cunado16292f52017-04-05 11:34:03 +0100493endfunc tsp_abort_yield_smc_entry