blob: ad472901801f9adcd0c5a8d997bc88d07751c59d [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
2 * Copyright (c) 2018, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef __ASM_MACROS_S__
8#define __ASM_MACROS_S__
9
10#include <arch.h>
11#include <asm_macros_common.S>
12
13#define TLB_INVALIDATE(_type) \
14 tlbi _type
15
16 .macro func_prologue
17 stp x29, x30, [sp, #-0x10]!
18 mov x29,sp
19 .endm
20
21 .macro func_epilogue
22 ldp x29, x30, [sp], #0x10
23 .endm
24
25
26 .macro dcache_line_size reg, tmp
27 mrs \tmp, ctr_el0
28 ubfx \tmp, \tmp, #16, #4
29 mov \reg, #4
30 lsl \reg, \reg, \tmp
31 .endm
32
33
34 .macro icache_line_size reg, tmp
35 mrs \tmp, ctr_el0
36 and \tmp, \tmp, #0xf
37 mov \reg, #4
38 lsl \reg, \reg, \tmp
39 .endm
40
41 /*
42 * Declare the exception vector table, enforcing it is aligned on a
43 * 2KB boundary, as required by the ARMv8 architecture.
44 * Use zero bytes as the fill value to be stored in the padding bytes
45 * so that it inserts illegal AArch64 instructions. This increases
46 * security, robustness and potentially facilitates debugging.
47 */
48 .macro vector_base label
49 .section .vectors, "ax"
50 .align 11, 0
51 \label:
52 .endm
53
54 /*
55 * Create an entry in the exception vector table, enforcing it is
56 * aligned on a 128-byte boundary, as required by the ARMv8
57 * architecture. Use zero bytes as the fill value to be stored in the
58 * padding bytes so that it inserts illegal AArch64 instructions.
59 * This increases security, robustness and potentially facilitates
60 * debugging.
61 */
62 .macro vector_entry label
63 .section .vectors, "ax"
64 .align 7, 0
65 \label:
66 .endm
67
68 /*
69 * This macro verifies that the a given vector doesn't exceed the
70 * architectural limit of 32 instructions. This is meant to be placed
71 * immedately after the last instruction in the vector. It takes the
72 * vector entry as the parameter
73 */
74 .macro check_vector_size since
75 .if (. - \since) > (32 * 4)
76 .error "Vector exceeds 32 instructions"
77 .endif
78 .endm
79
80 /*
81 * This macro calculates the base address of an MP stack using the
82 * platform_get_core_pos() index, the name of the stack storage and
83 * the size of each stack
84 * Out: X0 = physical address of stack base
85 * Clobber: X30, X1, X2
86 */
87 .macro get_mp_stack _name, _size
88 bl platform_get_core_pos
89 ldr x2, =(\_name + \_size)
90 mov x1, #\_size
91 madd x0, x0, x1, x2
92 .endm
93
94 /*
95 * This macro calculates the base address of a UP stack using the
96 * name of the stack storage and the size of the stack
97 * Out: X0 = physical address of stack base
98 */
99 .macro get_up_stack _name, _size
100 ldr x0, =(\_name + \_size)
101 .endm
102
103 /*
104 * Helper macro to generate the best mov/movk combinations according
105 * the value to be moved. The 16 bits from '_shift' are tested and
106 * if not zero, they are moved into '_reg' without affecting
107 * other bits.
108 */
109 .macro _mov_imm16 _reg, _val, _shift
110 .if (\_val >> \_shift) & 0xffff
111 .if (\_val & (1 << \_shift - 1))
112 movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
113 .else
114 mov \_reg, \_val & (0xffff << \_shift)
115 .endif
116 .endif
117 .endm
118
119 /*
120 * Helper macro to load arbitrary values into 32 or 64-bit registers
121 * which generates the best mov/movk combinations. Many base addresses
122 * are 64KB aligned the macro will eliminate updating bits 15:0 in
123 * that case
124 */
125 .macro mov_imm _reg, _val
126 .if (\_val) == 0
127 mov \_reg, #0
128 .else
129 _mov_imm16 \_reg, (\_val), 0
130 _mov_imm16 \_reg, (\_val), 16
131 _mov_imm16 \_reg, (\_val), 32
132 _mov_imm16 \_reg, (\_val), 48
133 .endif
134 .endm
135
136 .macro asm_read_sysreg_el1_or_el2 sysreg
137 mrs x0, CurrentEL
138 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
139 b.eq 1f
140 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
141 b.eq 2f
142 b dead
1431:
144 mrs x0, \sysreg\()_el1
145 b 3f
1462:
147 mrs x0, \sysreg\()_el2
1483:
149 .endm
150
151 .macro asm_write_sysreg_el1_or_el2 sysreg scratch_reg
152 mrs \scratch_reg, CurrentEL
153 cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
154 b.eq 1f
155 cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
156 b.eq 2f
157 b dead
1581:
159 msr \sysreg\()_el1, x0
160 b 3f
1612:
162 msr \sysreg\()_el2, x0
1633:
164 .endm
165
166 .macro asm_read_sctlr_el1_or_el2
167 asm_read_sysreg_el1_or_el2 sctlr
168 .endm
169
170 .macro asm_write_sctlr_el1_or_el2 scratch_reg
171 asm_write_sysreg_el1_or_el2 sctlr \scratch_reg
172 .endm
173
174 .macro asm_write_vbar_el1_or_el2 scratch_reg
175 asm_write_sysreg_el1_or_el2 vbar \scratch_reg
176 .endm
177
178/*
179 * Depending on the current exception level, jump to 'label_el1' or 'label_el2'.
180 * If the current exception level is neither EL1 nor EL2, jump to 'label_error'
181 * instead.
182 * The caller needs to provide the macro with a scratch 64-bit register to use.
183 * Its contents prior to calling this function will be lost.
184 */
185 .macro JUMP_EL1_OR_EL2 scratch_reg, label_el1, label_el2, label_error
186 mrs \scratch_reg, CurrentEL
187 cmp \scratch_reg, #(MODE_EL1 << MODE_EL_SHIFT)
188 b.eq \label_el1
189 cmp \scratch_reg, #(MODE_EL2 << MODE_EL_SHIFT)
190 b.eq \label_el2
191 b \label_error
192 .endm
193
194#endif /* __ASM_MACROS_S__ */