blob: 4a4258f17c868f8acde2d5cfc7cdcfe325d55e82 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 *
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#ifndef __ASSEMBLY__
9#error "Only include this from assembly code"
10#endif
11
12#ifndef __ASM_ASSEMBLER_H
13#define __ASM_ASSEMBLER_H
14
David Brazdil0f672f62019-12-10 10:32:29 +000015#include <asm-generic/export.h>
16
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include <asm/asm-offsets.h>
18#include <asm/cpufeature.h>
David Brazdil0f672f62019-12-10 10:32:29 +000019#include <asm/cputype.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020#include <asm/debug-monitors.h>
21#include <asm/page.h>
22#include <asm/pgtable-hwdef.h>
23#include <asm/ptrace.h>
24#include <asm/thread_info.h>
25
26 .macro save_and_disable_daif, flags
27 mrs \flags, daif
28 msr daifset, #0xf
29 .endm
30
31 .macro disable_daif
32 msr daifset, #0xf
33 .endm
34
35 .macro enable_daif
36 msr daifclr, #0xf
37 .endm
38
39 .macro restore_daif, flags:req
40 msr daif, \flags
41 .endm
42
43 /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
44 .macro inherit_daif, pstate:req, tmp:req
45 and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
46 msr daif, \tmp
47 .endm
48
49 /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
50 .macro enable_da_f
51 msr daifclr, #(8 | 4 | 1)
52 .endm
53
54/*
David Brazdil0f672f62019-12-10 10:32:29 +000055 * Save/restore interrupts.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057 .macro save_and_disable_irq, flags
58 mrs \flags, daif
59 msr daifset, #2
60 .endm
61
62 .macro restore_irq, flags
63 msr daif, \flags
64 .endm
65
66 .macro enable_dbg
67 msr daifclr, #8
68 .endm
69
70 .macro disable_step_tsk, flgs, tmp
71 tbz \flgs, #TIF_SINGLESTEP, 9990f
72 mrs \tmp, mdscr_el1
73 bic \tmp, \tmp, #DBG_MDSCR_SS
74 msr mdscr_el1, \tmp
75 isb // Synchronise with enable_dbg
769990:
77 .endm
78
79 /* call with daif masked */
80 .macro enable_step_tsk, flgs, tmp
81 tbz \flgs, #TIF_SINGLESTEP, 9990f
82 mrs \tmp, mdscr_el1
83 orr \tmp, \tmp, #DBG_MDSCR_SS
84 msr mdscr_el1, \tmp
859990:
86 .endm
87
88/*
89 * SMP data memory barrier
90 */
91 .macro smp_dmb, opt
92 dmb \opt
93 .endm
94
95/*
96 * RAS Error Synchronization barrier
97 */
98 .macro esb
David Brazdil0f672f62019-12-10 10:32:29 +000099#ifdef CONFIG_ARM64_RAS_EXTN
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 hint #16
David Brazdil0f672f62019-12-10 10:32:29 +0000101#else
102 nop
103#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 .endm
105
106/*
107 * Value prediction barrier
108 */
109 .macro csdb
110 hint #20
111 .endm
112
113/*
David Brazdil0f672f62019-12-10 10:32:29 +0000114 * Speculation barrier
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 */
David Brazdil0f672f62019-12-10 10:32:29 +0000116 .macro sb
117alternative_if_not ARM64_HAS_SB
118 dsb nsh
119 isb
120alternative_else
121 SB_BARRIER_INSN
122 nop
123alternative_endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124 .endm
125
126/*
127 * NOP sequence
128 */
129 .macro nops, num
130 .rept \num
131 nop
132 .endr
133 .endm
134
135/*
136 * Emit an entry into the exception table
137 */
138 .macro _asm_extable, from, to
139 .pushsection __ex_table, "a"
140 .align 3
141 .long (\from - .), (\to - .)
142 .popsection
143 .endm
144
145#define USER(l, x...) \
1469999: x; \
147 _asm_extable 9999b, l
148
149/*
150 * Register aliases.
151 */
152lr .req x30 // link register
153
154/*
155 * Vector entry
156 */
157 .macro ventry label
158 .align 7
159 b \label
160 .endm
161
162/*
163 * Select code when configured for BE.
164 */
165#ifdef CONFIG_CPU_BIG_ENDIAN
166#define CPU_BE(code...) code
167#else
168#define CPU_BE(code...)
169#endif
170
171/*
172 * Select code when configured for LE.
173 */
174#ifdef CONFIG_CPU_BIG_ENDIAN
175#define CPU_LE(code...)
176#else
177#define CPU_LE(code...) code
178#endif
179
180/*
181 * Define a macro that constructs a 64-bit value by concatenating two
182 * 32-bit registers. Note that on big endian systems the order of the
183 * registers is swapped.
184 */
185#ifndef CONFIG_CPU_BIG_ENDIAN
186 .macro regs_to_64, rd, lbits, hbits
187#else
188 .macro regs_to_64, rd, hbits, lbits
189#endif
190 orr \rd, \lbits, \hbits, lsl #32
191 .endm
192
193/*
194 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
195 * <symbol> is within the range +/- 4 GB of the PC.
196 */
197 /*
198 * @dst: destination register (64 bit wide)
199 * @sym: name of the symbol
200 */
201 .macro adr_l, dst, sym
202 adrp \dst, \sym
203 add \dst, \dst, :lo12:\sym
204 .endm
205
206 /*
207 * @dst: destination register (32 or 64 bit wide)
208 * @sym: name of the symbol
209 * @tmp: optional 64-bit scratch register to be used if <dst> is a
210 * 32-bit wide register, in which case it cannot be used to hold
211 * the address
212 */
213 .macro ldr_l, dst, sym, tmp=
214 .ifb \tmp
215 adrp \dst, \sym
216 ldr \dst, [\dst, :lo12:\sym]
217 .else
218 adrp \tmp, \sym
219 ldr \dst, [\tmp, :lo12:\sym]
220 .endif
221 .endm
222
223 /*
224 * @src: source register (32 or 64 bit wide)
225 * @sym: name of the symbol
226 * @tmp: mandatory 64-bit scratch register to calculate the address
227 * while <src> needs to be preserved.
228 */
229 .macro str_l, src, sym, tmp
230 adrp \tmp, \sym
231 str \src, [\tmp, :lo12:\sym]
232 .endm
233
234 /*
235 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
236 * @sym: The name of the per-cpu variable
237 * @tmp: scratch register
238 */
239 .macro adr_this_cpu, dst, sym, tmp
240 adrp \tmp, \sym
241 add \dst, \tmp, #:lo12:\sym
242alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
243 mrs \tmp, tpidr_el1
244alternative_else
245 mrs \tmp, tpidr_el2
246alternative_endif
247 add \dst, \dst, \tmp
248 .endm
249
250 /*
251 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
252 * @sym: The name of the per-cpu variable
253 * @tmp: scratch register
254 */
255 .macro ldr_this_cpu dst, sym, tmp
256 adr_l \dst, \sym
257alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
258 mrs \tmp, tpidr_el1
259alternative_else
260 mrs \tmp, tpidr_el2
261alternative_endif
262 ldr \dst, [\dst, \tmp]
263 .endm
264
265/*
266 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
267 */
268 .macro vma_vm_mm, rd, rn
269 ldr \rd, [\rn, #VMA_VM_MM]
270 .endm
271
272/*
273 * mmid - get context id from mm pointer (mm->context.id)
274 */
275 .macro mmid, rd, rn
276 ldr \rd, [\rn, #MM_CONTEXT_ID]
277 .endm
278/*
David Brazdil0f672f62019-12-10 10:32:29 +0000279 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
280 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281 */
282 .macro read_ctr, reg
David Brazdil0f672f62019-12-10 10:32:29 +0000283alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284 mrs \reg, ctr_el0 // read CTR
285 nop
286alternative_else
287 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
288alternative_endif
289 .endm
290
291
292/*
293 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
294 * from the CTR register.
295 */
296 .macro raw_dcache_line_size, reg, tmp
297 mrs \tmp, ctr_el0 // read CTR
298 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
299 mov \reg, #4 // bytes per word
300 lsl \reg, \reg, \tmp // actual cache line size
301 .endm
302
303/*
304 * dcache_line_size - get the safe D-cache line size across all CPUs
305 */
306 .macro dcache_line_size, reg, tmp
307 read_ctr \tmp
308 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
309 mov \reg, #4 // bytes per word
310 lsl \reg, \reg, \tmp // actual cache line size
311 .endm
312
313/*
314 * raw_icache_line_size - get the minimum I-cache line size on this CPU
315 * from the CTR register.
316 */
317 .macro raw_icache_line_size, reg, tmp
318 mrs \tmp, ctr_el0 // read CTR
319 and \tmp, \tmp, #0xf // cache line size encoding
320 mov \reg, #4 // bytes per word
321 lsl \reg, \reg, \tmp // actual cache line size
322 .endm
323
324/*
325 * icache_line_size - get the safe I-cache line size across all CPUs
326 */
327 .macro icache_line_size, reg, tmp
328 read_ctr \tmp
329 and \tmp, \tmp, #0xf // cache line size encoding
330 mov \reg, #4 // bytes per word
331 lsl \reg, \reg, \tmp // actual cache line size
332 .endm
333
334/*
David Brazdil0f672f62019-12-10 10:32:29 +0000335 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336 */
David Brazdil0f672f62019-12-10 10:32:29 +0000337 .macro tcr_set_t0sz, valreg, t0sz
338 bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
339 .endm
340
341/*
342 * tcr_set_t1sz - update TCR.T1SZ
343 */
344 .macro tcr_set_t1sz, valreg, t1sz
345 bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346 .endm
347
348/*
349 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
350 * ID_AA64MMFR0_EL1.PARange value
351 *
352 * tcr: register with the TCR_ELx value to be updated
353 * pos: IPS or PS bitfield position
354 * tmp{0,1}: temporary registers
355 */
356 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
357 mrs \tmp0, ID_AA64MMFR0_EL1
358 // Narrow PARange to fit the PS field in TCR_ELx
359 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
360 mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
361 cmp \tmp0, \tmp1
362 csel \tmp0, \tmp1, \tmp0, hi
363 bfi \tcr, \tmp0, \pos, #3
364 .endm
365
366/*
367 * Macro to perform a data cache maintenance for the interval
368 * [kaddr, kaddr + size)
369 *
370 * op: operation passed to dc instruction
371 * domain: domain used in dsb instruciton
372 * kaddr: starting virtual address of the region
373 * size: size of the region
374 * Corrupts: kaddr, size, tmp1, tmp2
375 */
David Brazdil0f672f62019-12-10 10:32:29 +0000376 .macro __dcache_op_workaround_clean_cache, op, kaddr
377alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
378 dc \op, \kaddr
379alternative_else
380 dc civac, \kaddr
381alternative_endif
382 .endm
383
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
385 dcache_line_size \tmp1, \tmp2
386 add \size, \kaddr, \size
387 sub \tmp2, \tmp1, #1
388 bic \kaddr, \kaddr, \tmp2
3899998:
David Brazdil0f672f62019-12-10 10:32:29 +0000390 .ifc \op, cvau
391 __dcache_op_workaround_clean_cache \op, \kaddr
392 .else
393 .ifc \op, cvac
394 __dcache_op_workaround_clean_cache \op, \kaddr
395 .else
396 .ifc \op, cvap
397 sys 3, c7, c12, 1, \kaddr // dc cvap
398 .else
399 .ifc \op, cvadp
400 sys 3, c7, c13, 1, \kaddr // dc cvadp
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401 .else
402 dc \op, \kaddr
403 .endif
David Brazdil0f672f62019-12-10 10:32:29 +0000404 .endif
405 .endif
406 .endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407 add \kaddr, \kaddr, \tmp1
408 cmp \kaddr, \size
409 b.lo 9998b
410 dsb \domain
411 .endm
412
413/*
414 * Macro to perform an instruction cache maintenance for the interval
415 * [start, end)
416 *
417 * start, end: virtual addresses describing the region
418 * label: A label to branch to on user fault.
419 * Corrupts: tmp1, tmp2
420 */
421 .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
422 icache_line_size \tmp1, \tmp2
423 sub \tmp2, \tmp1, #1
424 bic \tmp2, \start, \tmp2
4259997:
426USER(\label, ic ivau, \tmp2) // invalidate I line PoU
427 add \tmp2, \tmp2, \tmp1
428 cmp \tmp2, \end
429 b.lo 9997b
430 dsb ish
431 isb
432 .endm
433
434/*
435 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
436 */
437 .macro reset_pmuserenr_el0, tmpreg
David Brazdil0f672f62019-12-10 10:32:29 +0000438 mrs \tmpreg, id_aa64dfr0_el1
439 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000440 cmp \tmpreg, #1 // Skip if no PMU present
441 b.lt 9000f
442 msr pmuserenr_el0, xzr // Disable PMU access from EL0
4439000:
444 .endm
445
446/*
447 * copy_page - copy src to dest using temp registers t1-t8
448 */
449 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
4509998: ldp \t1, \t2, [\src]
451 ldp \t3, \t4, [\src, #16]
452 ldp \t5, \t6, [\src, #32]
453 ldp \t7, \t8, [\src, #48]
454 add \src, \src, #64
455 stnp \t1, \t2, [\dest]
456 stnp \t3, \t4, [\dest, #16]
457 stnp \t5, \t6, [\dest, #32]
458 stnp \t7, \t8, [\dest, #48]
459 add \dest, \dest, #64
460 tst \src, #(PAGE_SIZE - 1)
461 b.ne 9998b
462 .endm
463
464/*
Olivier Deprez0e641232021-09-23 10:07:05 +0200465 * Deprecated! Use SYM_FUNC_{START,START_WEAK,END}_PI instead.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466 * Annotate a function as position independent, i.e., safe to be called before
467 * the kernel virtual mapping is activated.
468 */
469#define ENDPIPROC(x) \
470 .globl __pi_##x; \
471 .type __pi_##x, %function; \
472 .set __pi_##x, x; \
473 .size __pi_##x, . - x; \
474 ENDPROC(x)
475
476/*
477 * Annotate a function as being unsuitable for kprobes.
478 */
479#ifdef CONFIG_KPROBES
480#define NOKPROBE(x) \
481 .pushsection "_kprobe_blacklist", "aw"; \
482 .quad x; \
483 .popsection;
484#else
485#define NOKPROBE(x)
486#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000487
488#ifdef CONFIG_KASAN
489#define EXPORT_SYMBOL_NOKASAN(name)
490#else
491#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
492#endif
493
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494 /*
495 * Emit a 64-bit absolute little endian symbol reference in a way that
496 * ensures that it will be resolved at build time, even when building a
497 * PIE binary. This requires cooperation from the linker script, which
498 * must emit the lo32/hi32 halves individually.
499 */
500 .macro le64sym, sym
501 .long \sym\()_lo32
502 .long \sym\()_hi32
503 .endm
504
505 /*
506 * mov_q - move an immediate constant into a 64-bit register using
507 * between 2 and 4 movz/movk instructions (depending on the
508 * magnitude and sign of the operand)
509 */
510 .macro mov_q, reg, val
511 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
512 movz \reg, :abs_g1_s:\val
513 .else
514 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
515 movz \reg, :abs_g2_s:\val
516 .else
517 movz \reg, :abs_g3:\val
518 movk \reg, :abs_g2_nc:\val
519 .endif
520 movk \reg, :abs_g1_nc:\val
521 .endif
522 movk \reg, :abs_g0_nc:\val
523 .endm
524
525/*
David Brazdil0f672f62019-12-10 10:32:29 +0000526 * Return the current task_struct.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527 */
David Brazdil0f672f62019-12-10 10:32:29 +0000528 .macro get_current_task, rd
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529 mrs \rd, sp_el0
530 .endm
531
532/*
David Brazdil0f672f62019-12-10 10:32:29 +0000533 * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
534 * orr is used as it can cover the immediate value (and is idempotent).
535 * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
536 * ttbr: Value of ttbr to set, modified.
537 */
538 .macro offset_ttbr1, ttbr, tmp
539#ifdef CONFIG_ARM64_VA_BITS_52
540 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
541 and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
542 cbnz \tmp, .Lskipoffs_\@
543 orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
544.Lskipoffs_\@ :
545#endif
546 .endm
547
548/*
549 * Perform the reverse of offset_ttbr1.
550 * bic is used as it can cover the immediate value and, in future, won't need
551 * to be nop'ed out when dealing with 52-bit kernel VAs.
552 */
553 .macro restore_ttbr1, ttbr
554#ifdef CONFIG_ARM64_VA_BITS_52
555 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
556#endif
557 .endm
558
559/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 * Arrange a physical address in a TTBR register, taking care of 52-bit
561 * addresses.
562 *
563 * phys: physical address, preserved
564 * ttbr: returns the TTBR value
565 */
566 .macro phys_to_ttbr, ttbr, phys
567#ifdef CONFIG_ARM64_PA_BITS_52
568 orr \ttbr, \phys, \phys, lsr #46
569 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
570#else
571 mov \ttbr, \phys
572#endif
573 .endm
574
575 .macro phys_to_pte, pte, phys
576#ifdef CONFIG_ARM64_PA_BITS_52
577 /*
578 * We assume \phys is 64K aligned and this is guaranteed by only
579 * supporting this configuration with 64K pages.
580 */
581 orr \pte, \phys, \phys, lsr #36
582 and \pte, \pte, #PTE_ADDR_MASK
583#else
584 mov \pte, \phys
585#endif
586 .endm
587
588 .macro pte_to_phys, phys, pte
589#ifdef CONFIG_ARM64_PA_BITS_52
590 ubfiz \phys, \pte, #(48 - 16 - 12), #16
591 bfxil \phys, \pte, #16, #32
592 lsl \phys, \phys, #16
593#else
594 and \phys, \pte, #PTE_ADDR_MASK
595#endif
596 .endm
597
David Brazdil0f672f62019-12-10 10:32:29 +0000598/*
599 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
600 */
601 .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
602#ifdef CONFIG_FUJITSU_ERRATUM_010001
603 mrs \tmp1, midr_el1
604
605 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
606 and \tmp1, \tmp1, \tmp2
607 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
608 cmp \tmp1, \tmp2
609 b.ne 10f
610
611 mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
612 bic \tcr, \tcr, \tmp2
61310:
614#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
615 .endm
616
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617/**
618 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
619 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
620 */
621 .macro pre_disable_mmu_workaround
622#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
623 isb
624#endif
625 .endm
626
627 /*
628 * frame_push - Push @regcount callee saved registers to the stack,
629 * starting at x19, as well as x29/x30, and set x29 to
630 * the new value of sp. Add @extra bytes of stack space
631 * for locals.
632 */
633 .macro frame_push, regcount:req, extra
634 __frame st, \regcount, \extra
635 .endm
636
637 /*
638 * frame_pop - Pop the callee saved registers from the stack that were
639 * pushed in the most recent call to frame_push, as well
640 * as x29/x30 and any extra stack space that may have been
641 * allocated.
642 */
643 .macro frame_pop
644 __frame ld
645 .endm
646
647 .macro __frame_regs, reg1, reg2, op, num
648 .if .Lframe_regcount == \num
649 \op\()r \reg1, [sp, #(\num + 1) * 8]
650 .elseif .Lframe_regcount > \num
651 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
652 .endif
653 .endm
654
655 .macro __frame, op, regcount, extra=0
656 .ifc \op, st
657 .if (\regcount) < 0 || (\regcount) > 10
658 .error "regcount should be in the range [0 ... 10]"
659 .endif
660 .if ((\extra) % 16) != 0
661 .error "extra should be a multiple of 16 bytes"
662 .endif
663 .ifdef .Lframe_regcount
664 .if .Lframe_regcount != -1
665 .error "frame_push/frame_pop may not be nested"
666 .endif
667 .endif
668 .set .Lframe_regcount, \regcount
669 .set .Lframe_extra, \extra
670 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
671 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
672 mov x29, sp
673 .endif
674
675 __frame_regs x19, x20, \op, 1
676 __frame_regs x21, x22, \op, 3
677 __frame_regs x23, x24, \op, 5
678 __frame_regs x25, x26, \op, 7
679 __frame_regs x27, x28, \op, 9
680
681 .ifc \op, ld
682 .if .Lframe_regcount == -1
683 .error "frame_push/frame_pop may not be nested"
684 .endif
685 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
686 .set .Lframe_regcount, -1
687 .endif
688 .endm
689
690/*
691 * Check whether to yield to another runnable task from kernel mode NEON code
692 * (which runs with preemption disabled).
693 *
694 * if_will_cond_yield_neon
695 * // pre-yield patchup code
696 * do_cond_yield_neon
697 * // post-yield patchup code
698 * endif_yield_neon <label>
699 *
700 * where <label> is optional, and marks the point where execution will resume
701 * after a yield has been performed. If omitted, execution resumes right after
702 * the endif_yield_neon invocation. Note that the entire sequence, including
703 * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
704 * is not defined.
705 *
706 * As a convenience, in the case where no patchup code is required, the above
707 * sequence may be abbreviated to
708 *
709 * cond_yield_neon <label>
710 *
711 * Note that the patchup code does not support assembler directives that change
712 * the output section, any use of such directives is undefined.
713 *
714 * The yield itself consists of the following:
David Brazdil0f672f62019-12-10 10:32:29 +0000715 * - Check whether the preempt count is exactly 1 and a reschedule is also
716 * needed. If so, calling of preempt_enable() in kernel_neon_end() will
717 * trigger a reschedule. If it is not the case, yielding is pointless.
718 * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
719 * code.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 *
721 * This macro sequence may clobber all CPU state that is not guaranteed by the
722 * AAPCS to be preserved across an ordinary function call.
723 */
724
725 .macro cond_yield_neon, lbl
726 if_will_cond_yield_neon
727 do_cond_yield_neon
728 endif_yield_neon \lbl
729 .endm
730
731 .macro if_will_cond_yield_neon
732#ifdef CONFIG_PREEMPT
David Brazdil0f672f62019-12-10 10:32:29 +0000733 get_current_task x0
734 ldr x0, [x0, #TSK_TI_PREEMPT]
735 sub x0, x0, #PREEMPT_DISABLE_OFFSET
736 cbz x0, .Lyield_\@
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000737 /* fall through to endif_yield_neon */
738 .subsection 1
739.Lyield_\@ :
740#else
741 .section ".discard.cond_yield_neon", "ax"
742#endif
743 .endm
744
745 .macro do_cond_yield_neon
746 bl kernel_neon_end
747 bl kernel_neon_begin
748 .endm
749
750 .macro endif_yield_neon, lbl
751 .ifnb \lbl
752 b \lbl
753 .else
754 b .Lyield_out_\@
755 .endif
756 .previous
757.Lyield_out_\@ :
758 .endm
759
760#endif /* __ASM_ASSEMBLER_H */