blob: da51bf864e97494e894b871badcc64f2b0eb42b7 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Igor Podgainõi58fadd62024-11-15 15:20:50 +01002 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +00006#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Dan Handley97043ac2014-04-09 13:14:54 +01009#include <arch.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000010#include <common/asm_macros_common.S>
Boyan Karatotev0d020822024-11-19 11:27:01 +000011#include <lib/cpus/cpu_ops.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000012#include <lib/spinlock.h>
Dan Handley97043ac2014-04-09 13:14:54 +010013
Jeenu Viswambharan0cc7aa82018-04-27 15:06:57 +010014/*
15 * TLBI instruction with type specifier that implements the workaround for
Soby Mathewf85edce2019-05-03 13:17:56 +010016 * errata 813419 of Cortex-A57 or errata 1286807 of Cortex-A76.
Jeenu Viswambharan0cc7aa82018-04-27 15:06:57 +010017 */
Soby Mathewf85edce2019-05-03 13:17:56 +010018#if ERRATA_A57_813419 || ERRATA_A76_1286807
Jeenu Viswambharan0cc7aa82018-04-27 15:06:57 +010019#define TLB_INVALIDATE(_type) \
20 tlbi _type; \
21 dsb ish; \
22 tlbi _type
23#else
24#define TLB_INVALIDATE(_type) \
25 tlbi _type
26#endif
27
Dan Handley97043ac2014-04-09 13:14:54 +010028
Boyan Karatotevf8328852023-03-10 18:24:50 +000029 /*
30 * Create a stack frame at the start of an assembly function. Will also
31 * add all necessary call frame information (cfi) directives for a
32 * pretty stack trace. This is necessary as there is quite a bit of
33 * flexibility within a stack frame and the stack pointer can move
34 * around throughout the function. If the debugger isn't told where to
35 * find things, it gets lost, gives up and displays nothing. So inform
36 * the debugger of what's where. Anchor the Canonical Frame Address
37 * (CFA; the thing used to track what's where) to the frame pointer as
38 * that's not expected to change in the function body and no extra
39 * bookkeeping will be necessary, allowing free movement of the sp
40 *
41 * _frame_size: requested space for caller to use. Must be a mutliple
42 * of 16 for stack pointer alignment
43 */
44 .macro func_prologue _frame_size=0
45 .if \_frame_size & 0xf
46 .error "frame_size must have stack pointer alignment (multiple of 16)"
47 .endif
48
49 /* put frame record at top of frame */
Achin Gupta4f6ad662013-10-25 09:08:21 +010050 stp x29, x30, [sp, #-0x10]!
51 mov x29,sp
Boyan Karatotevf8328852023-03-10 18:24:50 +000052 .if \_frame_size
53 sub sp, sp, #\_frame_size
54 .endif
55
56 /* point CFA to start of frame record, i.e. x29 + 0x10 */
57 .cfi_def_cfa x29, 0x10
58 /* inform it about x29, x30 locations */
59 .cfi_offset x30, -0x8
60 .cfi_offset x29, -0x10
Achin Gupta4f6ad662013-10-25 09:08:21 +010061 .endm
62
Boyan Karatotevf8328852023-03-10 18:24:50 +000063 /*
64 * Clear stack frame at the end of an assembly function.
65 *
66 * _frame_size: the value passed to func_prologue
67 */
68 .macro func_epilogue _frame_size=0
69 /* remove requested space */
70 .if \_frame_size
71 add sp, sp, #\_frame_size
72 .endif
Achin Gupta4f6ad662013-10-25 09:08:21 +010073 ldp x29, x30, [sp], #0x10
74 .endm
75
76
77 .macro dcache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000078 mrs \tmp, ctr_el0
Achin Gupta4f6ad662013-10-25 09:08:21 +010079 ubfx \tmp, \tmp, #16, #4
Achin Gupta07f4e072014-02-02 12:02:23 +000080 mov \reg, #4
81 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010082 .endm
83
84
85 .macro icache_line_size reg, tmp
Achin Gupta07f4e072014-02-02 12:02:23 +000086 mrs \tmp, ctr_el0
87 and \tmp, \tmp, #0xf
88 mov \reg, #4
89 lsl \reg, \reg, \tmp
Achin Gupta4f6ad662013-10-25 09:08:21 +010090 .endm
91
92
Achin Gupta4f6ad662013-10-25 09:08:21 +010093 .macro smc_check label
Andrew Thoelke7935d0a2014-04-28 12:32:02 +010094 mrs x0, esr_el3
Achin Gupta4f6ad662013-10-25 09:08:21 +010095 ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
96 cmp x0, #EC_AARCH64_SMC
97 b.ne $label
98 .endm
99
Sandrine Bailleuxe0ae9fa2016-05-24 16:56:03 +0100100 /*
101 * Declare the exception vector table, enforcing it is aligned on a
102 * 2KB boundary, as required by the ARMv8 architecture.
Sandrine Bailleux79627dc2016-05-24 16:22:59 +0100103 * Use zero bytes as the fill value to be stored in the padding bytes
104 * so that it inserts illegal AArch64 instructions. This increases
105 * security, robustness and potentially facilitates debugging.
Sandrine Bailleuxe0ae9fa2016-05-24 16:56:03 +0100106 */
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100107 .macro vector_base label, section_name=.vectors
108 .section \section_name, "ax"
Sandrine Bailleux79627dc2016-05-24 16:22:59 +0100109 .align 11, 0
Sandrine Bailleuxe0ae9fa2016-05-24 16:56:03 +0100110 \label:
111 .endm
Achin Gupta4f6ad662013-10-25 09:08:21 +0100112
Jeenu Viswambharana7934d62014-02-07 15:53:18 +0000113 /*
Sandrine Bailleuxe0ae9fa2016-05-24 16:56:03 +0100114 * Create an entry in the exception vector table, enforcing it is
115 * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
Sandrine Bailleux79627dc2016-05-24 16:22:59 +0100116 * Use zero bytes as the fill value to be stored in the padding bytes
117 * so that it inserts illegal AArch64 instructions. This increases
118 * security, robustness and potentially facilitates debugging.
Sandrine Bailleuxe0ae9fa2016-05-24 16:56:03 +0100119 */
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100120 .macro vector_entry label, section_name=.vectors
Douglas Raillard31823b62017-08-07 16:20:46 +0100121 .cfi_sections .debug_frame
Antonio Nino Diaz2fccb222017-10-24 10:07:35 +0100122 .section \section_name, "ax"
Sandrine Bailleux79627dc2016-05-24 16:22:59 +0100123 .align 7, 0
Douglas Raillard31823b62017-08-07 16:20:46 +0100124 .type \label, %function
Douglas Raillard31823b62017-08-07 16:20:46 +0100125 .cfi_startproc
Sandrine Bailleuxe0ae9fa2016-05-24 16:56:03 +0100126 \label:
127 .endm
128
129 /*
Roberto Vargasa9203ed2018-04-17 11:31:43 +0100130 * Add the bytes until fill the full exception vector, whose size is always
131 * 32 instructions. If there are more than 32 instructions in the
132 * exception vector then an error is emitted.
133 */
134 .macro end_vector_entry label
135 .cfi_endproc
136 .fill \label + (32 * 4) - .
137 .endm
138
139 /*
Soby Mathew12d0d002015-04-09 13:40:55 +0100140 * This macro calculates the base address of the current CPU's MP stack
141 * using the plat_my_core_pos() index, the name of the stack storage
142 * and the size of each stack
143 * Out: X0 = physical address of stack base
144 * Clobber: X30, X1, X2
145 */
146 .macro get_my_mp_stack _name, _size
Soby Mathewf1722b62018-10-12 16:40:28 +0100147 bl plat_my_core_pos
148 adrp x2, (\_name + \_size)
149 add x2, x2, :lo12:(\_name + \_size)
Soby Mathew12d0d002015-04-09 13:40:55 +0100150 mov x1, #\_size
151 madd x0, x0, x1, x2
152 .endm
153
154 /*
Andrew Thoelke2bf28e62014-03-20 10:48:23 +0000155 * This macro calculates the base address of a UP stack using the
156 * name of the stack storage and the size of the stack
157 * Out: X0 = physical address of stack base
158 */
159 .macro get_up_stack _name, _size
Soby Mathewf1722b62018-10-12 16:40:28 +0100160 adrp x0, (\_name + \_size)
161 add x0, x0, :lo12:(\_name + \_size)
Andrew Thoelke2bf28e62014-03-20 10:48:23 +0000162 .endm
Soby Mathewc67b09b2014-07-14 16:57:23 +0100163
164 /*
165 * Helper macro to generate the best mov/movk combinations according
166 * the value to be moved. The 16 bits from '_shift' are tested and
167 * if not zero, they are moved into '_reg' without affecting
168 * other bits.
169 */
170 .macro _mov_imm16 _reg, _val, _shift
171 .if (\_val >> \_shift) & 0xffff
172 .if (\_val & (1 << \_shift - 1))
173 movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
174 .else
175 mov \_reg, \_val & (0xffff << \_shift)
176 .endif
177 .endif
178 .endm
179
180 /*
181 * Helper macro to load arbitrary values into 32 or 64-bit registers
182 * which generates the best mov/movk combinations. Many base addresses
183 * are 64KB aligned the macro will eliminate updating bits 15:0 in
184 * that case
185 */
186 .macro mov_imm _reg, _val
187 .if (\_val) == 0
188 mov \_reg, #0
189 .else
190 _mov_imm16 \_reg, (\_val), 0
191 _mov_imm16 \_reg, (\_val), 16
192 _mov_imm16 \_reg, (\_val), 32
193 _mov_imm16 \_reg, (\_val), 48
194 .endif
195 .endm
Dan Handleye2bf57f2015-04-01 17:34:24 +0100196
Jeenu Viswambharana806dad2016-11-30 15:21:11 +0000197 /*
198 * Macro to mark instances where we're jumping to a function and don't
199 * expect a return. To provide the function being jumped to with
200 * additional information, we use 'bl' instruction to jump rather than
201 * 'b'.
202 *
203 * Debuggers infer the location of a call from where LR points to, which
204 * is usually the instruction after 'bl'. If this macro expansion
205 * happens to be the last location in a function, that'll cause the LR
206 * to point a location beyond the function, thereby misleading debugger
207 * back trace. We therefore insert a 'nop' after the function call for
208 * debug builds, unless 'skip_nop' parameter is non-zero.
209 */
210 .macro no_ret _func:req, skip_nop=0
211 bl \_func
212#if DEBUG
213 .ifeq \skip_nop
214 nop
215 .endif
216#endif
217 .endm
218
Jeenu Viswambharanb38bc682017-01-19 14:23:36 +0000219 /*
220 * Reserve space for a spin lock in assembly file.
221 */
222 .macro define_asm_spinlock _name:req
223 .align SPINLOCK_ASM_ALIGN
224 \_name:
225 .space SPINLOCK_ASM_SIZE
226 .endm
227
Manish Pandey7d5036b2023-04-27 10:02:35 +0100228 /*
Alexei Fedorov9fc59632019-05-24 12:17:09 +0100229 * Helper macro to read system register value into x0
230 */
231 .macro read reg:req
232#if ENABLE_BTI
233 bti j
234#endif
235 mrs x0, \reg
236 ret
237 .endm
238
239 /*
240 * Helper macro to write value from x1 to system register
241 */
242 .macro write reg:req
243#if ENABLE_BTI
244 bti j
245#endif
246 msr \reg, x1
247 ret
248 .endm
249
Anthony Steinhauserf461fe32020-01-07 15:44:06 -0800250 /*
Andre Przywara387b8802022-11-25 14:10:13 +0000251 * The "sb" instruction was introduced later into the architecture,
252 * so not all toolchains understand it. Some deny its usage unless
253 * a supported processor is specified on the build command line.
254 * Use sb's system register encoding to work around this, we already
255 * guard the sb execution with a feature flag.
256 */
257
258 .macro sb_barrier_insn
259 msr SYSREG_SB, xzr
260 .endm
261
Boyan Karatotevf8088732024-11-21 13:55:59 +0000262 .macro psb_csync
263 hint #17 /* use the hint synonym for compatibility */
264 .endm
265
Boyan Karatotev73d98e32024-12-02 09:36:10 +0000266 .macro tsb_csync
267 hint #18 /* use the hint synonym for compatibility */
268 .endm
269
Andre Przywara387b8802022-11-25 14:10:13 +0000270 /*
Bipin Ravie74d6582022-10-13 17:25:51 -0500271 * Macro for using speculation barrier instruction introduced by
272 * FEAT_SB, if it's enabled.
273 */
274 .macro speculation_barrier
275#if ENABLE_FEAT_SB
Andre Przywara387b8802022-11-25 14:10:13 +0000276 sb_barrier_insn
Bipin Ravie74d6582022-10-13 17:25:51 -0500277#else
278 dsb sy
279 isb
280#endif
281 .endm
282
283 /*
Chris Kay4e044782021-03-09 13:34:35 +0000284 * Macro for mitigating against speculative execution beyond ERET. Uses the
285 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
Anthony Steinhauserf461fe32020-01-07 15:44:06 -0800286 */
287 .macro exception_return
288 eret
Chris Kay4e044782021-03-09 13:34:35 +0000289#if ENABLE_FEAT_SB
Andre Przywara387b8802022-11-25 14:10:13 +0000290 sb_barrier_insn
Madhukar Pappireddyccfb5c82020-03-10 18:04:59 -0500291#else
292 dsb nsh
Anthony Steinhauserf461fe32020-01-07 15:44:06 -0800293 isb
Madhukar Pappireddyccfb5c82020-03-10 18:04:59 -0500294#endif
Anthony Steinhauserf461fe32020-01-07 15:44:06 -0800295 .endm
296
Manish Pandeyd04c04a2023-05-25 13:46:14 +0100297 /*
298 * Macro to unmask External Aborts by changing PSTATE.A bit.
299 * Put explicit synchronization event to ensure newly unmasked interrupt
300 * is taken immediately.
301 */
302 .macro unmask_async_ea
303 msr daifclr, #DAIF_ABT_BIT
304 isb
305 .endm
Manish Pandey6597fcf2023-06-26 17:46:14 +0100306
307 /* Macro for error synchronization on exception boundries.
308 * With FEAT_RAS enabled, it is assumed that FEAT_IESB is also present
309 * and enabled.
310 * FEAT_IESB provides an implicit error synchronization event at exception
311 * entry and exception return, so there is no need for any explicit instruction.
312 */
313 .macro synchronize_errors
Manish Pandey970a4a82023-10-10 13:53:25 +0100314#if !ENABLE_FEAT_RAS
Manish Pandey6597fcf2023-06-26 17:46:14 +0100315 /* Complete any stores that may return an abort */
316 dsb sy
317 /* Synchronise the CPU context with the completion of the dsb */
318 isb
319#endif
320 .endm
321
Hsin-Hsiung Wang31857d42024-02-22 15:16:32 +0800322 /*
323 * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
324 * within the range +/- 4 GB.
325 */
326 .macro adr_l, dst, sym
327 adrp \dst, \sym
328 add \dst, \dst, :lo12:\sym
329 .endm
Igor Podgainõi58fadd62024-11-15 15:20:50 +0100330
331 /*
Boyan Karatotev8d9f5f22025-04-02 11:16:18 +0100332 * is_feat_XYZ_present_asm - Set flags and reg if FEAT_XYZ
Igor Podgainõi58fadd62024-11-15 15:20:50 +0100333 * is enabled at runtime.
334 *
335 * Arguments:
336 * reg: Register for temporary use.
337 *
338 * Clobbers: reg
339 */
340 .macro is_feat_sysreg128_present_asm reg:req
341 mrs \reg, ID_AA64ISAR2_EL1
342 ands \reg, \reg, #(ID_AA64ISAR2_SYSREG128_MASK << ID_AA64ISAR2_SYSREG128_SHIFT)
343 .endm
Boyan Karatotev0d020822024-11-19 11:27:01 +0000344
Boyan Karatotev8d9f5f22025-04-02 11:16:18 +0100345 .macro is_feat_pauth_present_asm reg:req, clobber:req
346 mrs \reg, ID_AA64ISAR1_EL1
347 mov_imm \clobber, ((ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) \
348 | (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT) \
349 | (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) \
350 | (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT))
351 tst \reg, \clobber
352 .endm
353
Boyan Karatotev0d020822024-11-19 11:27:01 +0000354.macro call_reset_handler
355#if !(defined(IMAGE_BL2) && ENABLE_RME)
356 /* ---------------------------------------------------------------------
357 * It is a cold boot.
358 * Perform any processor specific actions upon reset e.g. cache, TLB
359 * invalidations etc.
360 * ---------------------------------------------------------------------
361 */
362 /* The plat_reset_handler can clobber x0 - x18, x30 */
363 bl plat_reset_handler
364
365 /* Get the matching cpu_ops pointer */
366 bl get_cpu_ops_ptr
367
368 /* Get the cpu_ops reset handler */
369 ldr x2, [x0, #CPU_RESET_FUNC]
370
371 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
372 blr x2
373#endif
374.endm
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000375#endif /* ASM_MACROS_S */