aboutsummaryrefslogtreecommitdiff log msg author committer range
path: root/include/common/aarch64/asm_macros.S
blob: dea302137715cdbcc36cfff4120b2359a7ed33da (plain)
 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195  /* * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #ifndef ASM_MACROS_S #define ASM_MACROS_S #include #include #include /* * TLBI instruction with type specifier that implements the workaround for * errata 813419 of Cortex-A57. */ #if ERRATA_A57_813419 #define TLB_INVALIDATE(_type) \ tlbi _type; \ dsb ish; \ tlbi _type #else #define TLB_INVALIDATE(_type) \ tlbi _type #endif .macro func_prologue stp x29, x30, [sp, #-0x10]! mov x29,sp .endm .macro func_epilogue ldp x29, x30, [sp], #0x10 .endm .macro dcache_line_size reg, tmp mrs \tmp, ctr_el0 ubfx \tmp, \tmp, #16, #4 mov \reg, #4 lsl \reg, \reg, \tmp .endm .macro icache_line_size reg, tmp mrs \tmp, ctr_el0 and \tmp, \tmp, #0xf mov \reg, #4 lsl \reg, \reg, \tmp .endm .macro smc_check label mrs x0, esr_el3 ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH cmp x0, #EC_AARCH64_SMC b.ne \$label .endm /* * Declare the exception vector table, enforcing it is aligned on a * 2KB boundary, as required by the ARMv8 architecture. * Use zero bytes as the fill value to be stored in the padding bytes * so that it inserts illegal AArch64 instructions. This increases * security, robustness and potentially facilitates debugging. */ .macro vector_base label, section_name=.vectors .section \section_name, "ax" .align 11, 0 \label: .endm /* * Create an entry in the exception vector table, enforcing it is * aligned on a 128-byte boundary, as required by the ARMv8 architecture. * Use zero bytes as the fill value to be stored in the padding bytes * so that it inserts illegal AArch64 instructions. This increases * security, robustness and potentially facilitates debugging. */ .macro vector_entry label, section_name=.vectors .cfi_sections .debug_frame .section \section_name, "ax" .align 7, 0 .type \label, %function .cfi_startproc \label: .endm /* * Add the bytes until fill the full exception vector, whose size is always * 32 instructions. If there are more than 32 instructions in the * exception vector then an error is emitted. */ .macro end_vector_entry label .cfi_endproc .fill \label + (32 * 4) - . .endm /* * This macro calculates the base address of the current CPU's MP stack * using the plat_my_core_pos() index, the name of the stack storage * and the size of each stack * Out: X0 = physical address of stack base * Clobber: X30, X1, X2 */ .macro get_my_mp_stack _name, _size bl plat_my_core_pos adrp x2, (\_name + \_size) add x2, x2, :lo12:(\_name + \_size) mov x1, #\_size madd x0, x0, x1, x2 .endm /* * This macro calculates the base address of a UP stack using the * name of the stack storage and the size of the stack * Out: X0 = physical address of stack base */ .macro get_up_stack _name, _size adrp x0, (\_name + \_size) add x0, x0, :lo12:(\_name + \_size) .endm /* * Helper macro to generate the best mov/movk combinations according * the value to be moved. The 16 bits from '_shift' are tested and * if not zero, they are moved into '_reg' without affecting * other bits. */ .macro _mov_imm16 _reg, _val, _shift .if (\_val >> \_shift) & 0xffff .if (\_val & (1 << \_shift - 1)) movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift .else mov \_reg, \_val & (0xffff << \_shift) .endif .endif .endm /* * Helper macro to load arbitrary values into 32 or 64-bit registers * which generates the best mov/movk combinations. Many base addresses * are 64KB aligned the macro will eliminate updating bits 15:0 in * that case */ .macro mov_imm _reg, _val .if (\_val) == 0 mov \_reg, #0 .else _mov_imm16 \_reg, (\_val), 0 _mov_imm16 \_reg, (\_val), 16 _mov_imm16 \_reg, (\_val), 32 _mov_imm16 \_reg, (\_val), 48 .endif .endm /* * Macro to mark instances where we're jumping to a function and don't * expect a return. To provide the function being jumped to with * additional information, we use 'bl' instruction to jump rather than * 'b'. * * Debuggers infer the location of a call from where LR points to, which * is usually the instruction after 'bl'. If this macro expansion * happens to be the last location in a function, that'll cause the LR * to point a location beyond the function, thereby misleading debugger * back trace. We therefore insert a 'nop' after the function call for * debug builds, unless 'skip_nop' parameter is non-zero. */ .macro no_ret _func:req, skip_nop=0 bl \_func #if DEBUG .ifeq \skip_nop nop .endif #endif .endm /* * Reserve space for a spin lock in assembly file. */ .macro define_asm_spinlock _name:req .align SPINLOCK_ASM_ALIGN \_name: .space SPINLOCK_ASM_SIZE .endm #if RAS_EXTENSION .macro esb .inst 0xd503221f .endm #endif #endif /* ASM_MACROS_S */