blob: 6acaa868985c208a58d8d3801f140cfbcc59dcb4 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
2 * Copyright (c) 2018, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10
11 .globl get_afflvl_shift
12 .globl mpidr_mask_lower_afflvls
13 .globl eret
14 .globl smc
15
16 .globl zeromem16
17 .globl memcpy16
18
19 .globl disable_mmu
20 .globl disable_mmu_icache
21
22func get_afflvl_shift
23 cmp x0, #3
24 cinc x0, x0, eq
25 mov x1, #MPIDR_AFFLVL_SHIFT
26 lsl x0, x0, x1
27 ret
28endfunc get_afflvl_shift
29
30func mpidr_mask_lower_afflvls
31 cmp x1, #3
32 cinc x1, x1, eq
33 mov x2, #MPIDR_AFFLVL_SHIFT
34 lsl x2, x1, x2
35 lsr x0, x0, x2
36 lsl x0, x0, x2
37 ret
38endfunc mpidr_mask_lower_afflvls
39
40
41func eret
42 eret
43endfunc eret
44
45func smc
46 smc #0
47endfunc smc
48
49/* -----------------------------------------------------------------------
50 * void zeromem16(void *mem, unsigned int length);
51 *
52 * Initialise a memory region to 0.
53 * The memory address must be 16-byte aligned.
54 * -----------------------------------------------------------------------
55 */
56func zeromem16
57#if ENABLE_ASSERTIONS
58 tst x0, #0xf
59 ASM_ASSERT(eq)
60#endif
61 add x2, x0, x1
62/* zero 16 bytes at a time */
63z_loop16:
64 sub x3, x2, x0
65 cmp x3, #16
66 b.lt z_loop1
67 stp xzr, xzr, [x0], #16
68 b z_loop16
69/* zero byte per byte */
70z_loop1:
71 cmp x0, x2
72 b.eq z_end
73 strb wzr, [x0], #1
74 b z_loop1
75z_end:
76 ret
77endfunc zeromem16
78
79
80/* --------------------------------------------------------------------------
81 * void memcpy16(void *dest, const void *src, unsigned int length)
82 *
83 * Copy length bytes from memory area src to memory area dest.
84 * The memory areas should not overlap.
85 * Destination and source addresses must be 16-byte aligned.
86 * --------------------------------------------------------------------------
87 */
88func memcpy16
89#if ENABLE_ASSERTIONS
90 orr x3, x0, x1
91 tst x3, #0xf
92 ASM_ASSERT(eq)
93#endif
94/* copy 16 bytes at a time */
95m_loop16:
96 cmp x2, #16
97 b.lt m_loop1
98 ldp x3, x4, [x1], #16
99 stp x3, x4, [x0], #16
100 sub x2, x2, #16
101 b m_loop16
102/* copy byte per byte */
103m_loop1:
104 cbz x2, m_end
105 ldrb w3, [x1], #1
106 strb w3, [x0], #1
107 subs x2, x2, #1
108 b.ne m_loop1
109m_end:
110 ret
111endfunc memcpy16
112
113/* ---------------------------------------------------------------------------
114 * Disable the MMU at the current exception level (NS-EL1 or EL2)
115 * This is implemented in assembler to ensure that the data cache is cleaned
116 * and invalidated after the MMU is disabled without any intervening cacheable
117 * data accesses
118 * ---------------------------------------------------------------------------
119 */
120func disable_mmu
121 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
122do_disable_mmu:
123 asm_read_sctlr_el1_or_el2
124 bic x0, x0, x1
125 asm_write_sctlr_el1_or_el2 x1
126 isb /* ensure MMU is off */
127 mov x0, #DCCISW /* DCache clean and invalidate */
128 b dcsw_op_all
129endfunc disable_mmu
130
131func disable_mmu_icache
132 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
133 b do_disable_mmu
134endfunc disable_mmu_icache
135
136/* Need this label for asm_read/write_sctlr_el1_or_el2 */
137dead:
138 b dead