blob: 24a1f498b3b5fac1d230e5438bc6fcf04a0a0977 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * arch/arm/include/asm/assembler.h
4 *
5 * Copyright (C) 1996-2000 Russell King
6 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 * This file contains arm architecture specific defines
8 * for the different processors.
9 *
10 * Do not include any C declarations in this file - it is included by
11 * assembler source.
12 */
13#ifndef __ASM_ASSEMBLER_H__
14#define __ASM_ASSEMBLER_H__
15
16#ifndef __ASSEMBLY__
17#error "Only include this from assembly code"
18#endif
19
20#include <asm/ptrace.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021#include <asm/opcodes-virt.h>
22#include <asm/asm-offsets.h>
23#include <asm/page.h>
24#include <asm/thread_info.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020025#include <asm/uaccess-asm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026
27#define IOMEM(x) (x)
28
29/*
30 * Endian independent macros for shifting bytes within registers.
31 */
32#ifndef __ARMEB__
33#define lspull lsr
34#define lspush lsl
35#define get_byte_0 lsl #0
36#define get_byte_1 lsr #8
37#define get_byte_2 lsr #16
38#define get_byte_3 lsr #24
39#define put_byte_0 lsl #0
40#define put_byte_1 lsl #8
41#define put_byte_2 lsl #16
42#define put_byte_3 lsl #24
43#else
44#define lspull lsl
45#define lspush lsr
46#define get_byte_0 lsr #24
47#define get_byte_1 lsr #16
48#define get_byte_2 lsr #8
49#define get_byte_3 lsl #0
50#define put_byte_0 lsl #24
51#define put_byte_1 lsl #16
52#define put_byte_2 lsl #8
53#define put_byte_3 lsl #0
54#endif
55
56/* Select code for any configuration running in BE8 mode */
57#ifdef CONFIG_CPU_ENDIAN_BE8
58#define ARM_BE8(code...) code
59#else
60#define ARM_BE8(code...)
61#endif
62
63/*
64 * Data preload for architectures that support it
65 */
66#if __LINUX_ARM_ARCH__ >= 5
67#define PLD(code...) code
68#else
69#define PLD(code...)
70#endif
71
72/*
73 * This can be used to enable code to cacheline align the destination
74 * pointer when bulk writing to memory. Experiments on StrongARM and
75 * XScale didn't show this a worthwhile thing to do when the cache is not
76 * set to write-allocate (this would need further testing on XScale when WA
77 * is used).
78 *
79 * On Feroceon there is much to gain however, regardless of cache mode.
80 */
81#ifdef CONFIG_CPU_FEROCEON
82#define CALGN(code...) code
83#else
84#define CALGN(code...)
85#endif
86
87#define IMM12_MASK 0xfff
88
89/*
90 * Enable and disable interrupts
91 */
92#if __LINUX_ARM_ARCH__ >= 6
93 .macro disable_irq_notrace
94 cpsid i
95 .endm
96
97 .macro enable_irq_notrace
98 cpsie i
99 .endm
100#else
101 .macro disable_irq_notrace
102 msr cpsr_c, #PSR_I_BIT | SVC_MODE
103 .endm
104
105 .macro enable_irq_notrace
106 msr cpsr_c, #SVC_MODE
107 .endm
108#endif
109
Olivier Deprez157378f2022-04-04 15:47:50 +0200110#if __LINUX_ARM_ARCH__ < 7
111 .macro dsb, args
112 mcr p15, 0, r0, c7, c10, 4
113 .endm
114
115 .macro isb, args
116 mcr p15, 0, r0, c7, c5, 4
117 .endm
118#endif
119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 .macro asm_trace_hardirqs_off, save=1
121#if defined(CONFIG_TRACE_IRQFLAGS)
122 .if \save
123 stmdb sp!, {r0-r3, ip, lr}
124 .endif
125 bl trace_hardirqs_off
126 .if \save
127 ldmia sp!, {r0-r3, ip, lr}
128 .endif
129#endif
130 .endm
131
132 .macro asm_trace_hardirqs_on, cond=al, save=1
133#if defined(CONFIG_TRACE_IRQFLAGS)
134 /*
135 * actually the registers should be pushed and pop'd conditionally, but
136 * after bl the flags are certainly clobbered
137 */
138 .if \save
139 stmdb sp!, {r0-r3, ip, lr}
140 .endif
141 bl\cond trace_hardirqs_on
142 .if \save
143 ldmia sp!, {r0-r3, ip, lr}
144 .endif
145#endif
146 .endm
147
148 .macro disable_irq, save=1
149 disable_irq_notrace
150 asm_trace_hardirqs_off \save
151 .endm
152
153 .macro enable_irq
154 asm_trace_hardirqs_on
155 enable_irq_notrace
156 .endm
157/*
158 * Save the current IRQ state and disable IRQs. Note that this macro
159 * assumes FIQs are enabled, and that the processor is in SVC mode.
160 */
161 .macro save_and_disable_irqs, oldcpsr
162#ifdef CONFIG_CPU_V7M
163 mrs \oldcpsr, primask
164#else
165 mrs \oldcpsr, cpsr
166#endif
167 disable_irq
168 .endm
169
170 .macro save_and_disable_irqs_notrace, oldcpsr
171#ifdef CONFIG_CPU_V7M
172 mrs \oldcpsr, primask
173#else
174 mrs \oldcpsr, cpsr
175#endif
176 disable_irq_notrace
177 .endm
178
179/*
180 * Restore interrupt state previously stored in a register. We don't
181 * guarantee that this will preserve the flags.
182 */
183 .macro restore_irqs_notrace, oldcpsr
184#ifdef CONFIG_CPU_V7M
185 msr primask, \oldcpsr
186#else
187 msr cpsr_c, \oldcpsr
188#endif
189 .endm
190
191 .macro restore_irqs, oldcpsr
192 tst \oldcpsr, #PSR_I_BIT
193 asm_trace_hardirqs_on cond=eq
194 restore_irqs_notrace \oldcpsr
195 .endm
196
197/*
198 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
199 * reference local symbols in the same assembly file which are to be
200 * resolved by the assembler. Other usage is undefined.
201 */
202 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
203 .macro badr\c, rd, sym
204#ifdef CONFIG_THUMB2_KERNEL
205 adr\c \rd, \sym + 1
206#else
207 adr\c \rd, \sym
208#endif
209 .endm
210 .endr
211
212/*
213 * Get current thread_info.
214 */
215 .macro get_thread_info, rd
216 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
217 THUMB( mov \rd, sp )
218 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
219 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
220 .endm
221
222/*
223 * Increment/decrement the preempt count.
224 */
225#ifdef CONFIG_PREEMPT_COUNT
226 .macro inc_preempt_count, ti, tmp
227 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
228 add \tmp, \tmp, #1 @ increment it
229 str \tmp, [\ti, #TI_PREEMPT]
230 .endm
231
232 .macro dec_preempt_count, ti, tmp
233 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
234 sub \tmp, \tmp, #1 @ decrement it
235 str \tmp, [\ti, #TI_PREEMPT]
236 .endm
237
238 .macro dec_preempt_count_ti, ti, tmp
239 get_thread_info \ti
240 dec_preempt_count \ti, \tmp
241 .endm
242#else
243 .macro inc_preempt_count, ti, tmp
244 .endm
245
246 .macro dec_preempt_count, ti, tmp
247 .endm
248
249 .macro dec_preempt_count_ti, ti, tmp
250 .endm
251#endif
252
David Brazdil0f672f62019-12-10 10:32:29 +0000253#define USERL(l, x...) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002549999: x; \
255 .pushsection __ex_table,"a"; \
256 .align 3; \
David Brazdil0f672f62019-12-10 10:32:29 +0000257 .long 9999b,l; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 .popsection
259
David Brazdil0f672f62019-12-10 10:32:29 +0000260#define USER(x...) USERL(9001f, x)
261
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262#ifdef CONFIG_SMP
263#define ALT_SMP(instr...) \
2649998: instr
265/*
266 * Note: if you get assembler errors from ALT_UP() when building with
267 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
268 * ALT_SMP( W(instr) ... )
269 */
270#define ALT_UP(instr...) \
271 .pushsection ".alt.smp.init", "a" ;\
272 .long 9998b ;\
2739997: instr ;\
274 .if . - 9997b == 2 ;\
275 nop ;\
276 .endif ;\
277 .if . - 9997b != 4 ;\
278 .error "ALT_UP() content must assemble to exactly 4 bytes";\
279 .endif ;\
280 .popsection
281#define ALT_UP_B(label) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282 .pushsection ".alt.smp.init", "a" ;\
283 .long 9998b ;\
Olivier Deprez157378f2022-04-04 15:47:50 +0200284 W(b) . + (label - 9998b) ;\
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 .popsection
286#else
287#define ALT_SMP(instr...)
288#define ALT_UP(instr...) instr
289#define ALT_UP_B(label) b label
290#endif
291
292/*
293 * Instruction barrier
294 */
295 .macro instr_sync
296#if __LINUX_ARM_ARCH__ >= 7
297 isb
298#elif __LINUX_ARM_ARCH__ == 6
299 mcr p15, 0, r0, c7, c5, 4
300#endif
301 .endm
302
303/*
304 * SMP data memory barrier
305 */
306 .macro smp_dmb mode
307#ifdef CONFIG_SMP
308#if __LINUX_ARM_ARCH__ >= 7
309 .ifeqs "\mode","arm"
310 ALT_SMP(dmb ish)
311 .else
312 ALT_SMP(W(dmb) ish)
313 .endif
314#elif __LINUX_ARM_ARCH__ == 6
315 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
316#else
317#error Incompatible SMP platform
318#endif
319 .ifeqs "\mode","arm"
320 ALT_UP(nop)
321 .else
322 ALT_UP(W(nop))
323 .endif
324#endif
325 .endm
326
327#if defined(CONFIG_CPU_V7M)
328 /*
329 * setmode is used to assert to be in svc mode during boot. For v7-M
330 * this is done in __v7m_setup, so setmode can be empty here.
331 */
332 .macro setmode, mode, reg
333 .endm
334#elif defined(CONFIG_THUMB2_KERNEL)
335 .macro setmode, mode, reg
336 mov \reg, #\mode
337 msr cpsr_c, \reg
338 .endm
339#else
340 .macro setmode, mode, reg
341 msr cpsr_c, #\mode
342 .endm
343#endif
344
345/*
346 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
347 * a scratch register for the macro to overwrite.
348 *
349 * This macro is intended for forcing the CPU into SVC mode at boot time.
350 * you cannot return to the original mode.
351 */
352.macro safe_svcmode_maskall reg:req
353#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
354 mrs \reg , cpsr
355 eor \reg, \reg, #HYP_MODE
356 tst \reg, #MODE_MASK
357 bic \reg , \reg , #MODE_MASK
358 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
359THUMB( orr \reg , \reg , #PSR_T_BIT )
360 bne 1f
361 orr \reg, \reg, #PSR_A_BIT
362 badr lr, 2f
363 msr spsr_cxsf, \reg
364 __MSR_ELR_HYP(14)
365 __ERET
3661: msr cpsr_c, \reg
3672:
368#else
369/*
370 * workaround for possibly broken pre-v6 hardware
371 * (akita, Sharp Zaurus C-1000, PXA270-based)
372 */
373 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
374#endif
375.endm
376
377/*
378 * STRT/LDRT access macros with ARM and Thumb-2 variants
379 */
380#ifdef CONFIG_THUMB2_KERNEL
381
382 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3839999:
384 .if \inc == 1
David Brazdil0f672f62019-12-10 10:32:29 +0000385 \instr\()b\t\cond\().w \reg, [\ptr, #\off]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 .elseif \inc == 4
David Brazdil0f672f62019-12-10 10:32:29 +0000387 \instr\t\cond\().w \reg, [\ptr, #\off]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388 .else
389 .error "Unsupported inc macro argument"
390 .endif
391
392 .pushsection __ex_table,"a"
393 .align 3
394 .long 9999b, \abort
395 .popsection
396 .endm
397
398 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
399 @ explicit IT instruction needed because of the label
400 @ introduced by the USER macro
401 .ifnc \cond,al
402 .if \rept == 1
403 itt \cond
404 .elseif \rept == 2
405 ittt \cond
406 .else
407 .error "Unsupported rept macro argument"
408 .endif
409 .endif
410
411 @ Slightly optimised to avoid incrementing the pointer twice
412 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
413 .if \rept == 2
414 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
415 .endif
416
417 add\cond \ptr, #\rept * \inc
418 .endm
419
420#else /* !CONFIG_THUMB2_KERNEL */
421
422 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
423 .rept \rept
4249999:
425 .if \inc == 1
David Brazdil0f672f62019-12-10 10:32:29 +0000426 \instr\()b\t\cond \reg, [\ptr], #\inc
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 .elseif \inc == 4
David Brazdil0f672f62019-12-10 10:32:29 +0000428 \instr\t\cond \reg, [\ptr], #\inc
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000429 .else
430 .error "Unsupported inc macro argument"
431 .endif
432
433 .pushsection __ex_table,"a"
434 .align 3
435 .long 9999b, \abort
436 .popsection
437 .endr
438 .endm
439
440#endif /* CONFIG_THUMB2_KERNEL */
441
442 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
443 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
444 .endm
445
446 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
447 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
448 .endm
449
450/* Utility macro for declaring string literals */
451 .macro string name:req, string
452 .type \name , #object
453\name:
454 .asciz "\string"
455 .size \name , . - \name
456 .endm
457
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
459 .macro ret\c, reg
460#if __LINUX_ARM_ARCH__ < 6
461 mov\c pc, \reg
462#else
463 .ifeqs "\reg", "lr"
464 bx\c \reg
465 .else
466 mov\c pc, \reg
467 .endif
468#endif
469 .endm
470 .endr
471
472 .macro ret.w, reg
473 ret \reg
474#ifdef CONFIG_THUMB2_KERNEL
475 nop
476#endif
477 .endm
478
479 .macro bug, msg, line
480#ifdef CONFIG_THUMB2_KERNEL
4811: .inst 0xde02
482#else
4831: .inst 0xe7f001f2
484#endif
485#ifdef CONFIG_DEBUG_BUGVERBOSE
486 .pushsection .rodata.str, "aMS", %progbits, 1
4872: .asciz "\msg"
488 .popsection
489 .pushsection __bug_table, "aw"
490 .align 2
491 .word 1b, 2b
492 .hword \line
493 .popsection
494#endif
495 .endm
496
497#ifdef CONFIG_KPROBES
498#define _ASM_NOKPROBE(entry) \
499 .pushsection "_kprobe_blacklist", "aw" ; \
500 .balign 4 ; \
501 .long entry; \
502 .popsection
503#else
504#define _ASM_NOKPROBE(entry)
505#endif
506
Olivier Deprez157378f2022-04-04 15:47:50 +0200507 .macro __adldst_l, op, reg, sym, tmp, c
508 .if __LINUX_ARM_ARCH__ < 7
509 ldr\c \tmp, .La\@
510 .subsection 1
511 .align 2
512.La\@: .long \sym - .Lpc\@
513 .previous
514 .else
515 .ifnb \c
516 THUMB( ittt \c )
517 .endif
518 movw\c \tmp, #:lower16:\sym - .Lpc\@
519 movt\c \tmp, #:upper16:\sym - .Lpc\@
520 .endif
521
522#ifndef CONFIG_THUMB2_KERNEL
523 .set .Lpc\@, . + 8 // PC bias
524 .ifc \op, add
525 add\c \reg, \tmp, pc
526 .else
527 \op\c \reg, [pc, \tmp]
528 .endif
529#else
530.Lb\@: add\c \tmp, \tmp, pc
531 /*
532 * In Thumb-2 builds, the PC bias depends on whether we are currently
533 * emitting into a .arm or a .thumb section. The size of the add opcode
534 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
535 * emitting in ARM mode, so let's use this to account for the bias.
536 */
537 .set .Lpc\@, . + (. - .Lb\@)
538
539 .ifnc \op, add
540 \op\c \reg, [\tmp]
541 .endif
542#endif
543 .endm
544
545 /*
546 * mov_l - move a constant value or [relocated] address into a register
547 */
548 .macro mov_l, dst:req, imm:req
549 .if __LINUX_ARM_ARCH__ < 7
550 ldr \dst, =\imm
551 .else
552 movw \dst, #:lower16:\imm
553 movt \dst, #:upper16:\imm
554 .endif
555 .endm
556
557 /*
558 * adr_l - adr pseudo-op with unlimited range
559 *
560 * @dst: destination register
561 * @sym: name of the symbol
562 * @cond: conditional opcode suffix
563 */
564 .macro adr_l, dst:req, sym:req, cond
565 __adldst_l add, \dst, \sym, \dst, \cond
566 .endm
567
568 /*
569 * ldr_l - ldr <literal> pseudo-op with unlimited range
570 *
571 * @dst: destination register
572 * @sym: name of the symbol
573 * @cond: conditional opcode suffix
574 */
575 .macro ldr_l, dst:req, sym:req, cond
576 __adldst_l ldr, \dst, \sym, \dst, \cond
577 .endm
578
579 /*
580 * str_l - str <literal> pseudo-op with unlimited range
581 *
582 * @src: source register
583 * @sym: name of the symbol
584 * @tmp: mandatory scratch register
585 * @cond: conditional opcode suffix
586 */
587 .macro str_l, src:req, sym:req, tmp:req, cond
588 __adldst_l str, \src, \sym, \tmp, \cond
589 .endm
590
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591#endif /* __ASM_ASSEMBLER_H__ */