blob: db767b072601e36fddb8ee7991d801d6f1f2f6d6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Cache maintenance
4 *
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#include <linux/errno.h>
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/assembler.h>
13#include <asm/cpufeature.h>
14#include <asm/alternative.h>
15#include <asm/asm-uaccess.h>
16
17/*
18 * flush_icache_range(start,end)
19 *
20 * Ensure that the I and D caches are coherent within specified region.
21 * This is typically used when code has been written to a memory region,
22 * and will be executed.
23 *
24 * - start - virtual start address of region
25 * - end - virtual end address of region
26 */
27ENTRY(__flush_icache_range)
28 /* FALLTHROUGH */
29
30/*
31 * __flush_cache_user_range(start,end)
32 *
33 * Ensure that the I and D caches are coherent within specified region.
34 * This is typically used when code has been written to a memory region,
35 * and will be executed.
36 *
37 * - start - virtual start address of region
38 * - end - virtual end address of region
39 */
40ENTRY(__flush_cache_user_range)
41 uaccess_ttbr0_enable x2, x3, x4
42alternative_if ARM64_HAS_CACHE_IDC
43 dsb ishst
44 b 7f
45alternative_else_nop_endif
46 dcache_line_size x2, x3
47 sub x3, x2, #1
48 bic x4, x0, x3
491:
50user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
51 add x4, x4, x2
52 cmp x4, x1
53 b.lo 1b
54 dsb ish
55
567:
57alternative_if ARM64_HAS_CACHE_DIC
58 isb
59 b 8f
60alternative_else_nop_endif
61 invalidate_icache_by_line x0, x1, x2, x3, 9f
628: mov x0, #0
631:
64 uaccess_ttbr0_disable x1, x2
65 ret
669:
67 mov x0, #-EFAULT
68 b 1b
69ENDPROC(__flush_icache_range)
70ENDPROC(__flush_cache_user_range)
71
72/*
73 * invalidate_icache_range(start,end)
74 *
75 * Ensure that the I cache is invalid within specified region.
76 *
77 * - start - virtual start address of region
78 * - end - virtual end address of region
79 */
80ENTRY(invalidate_icache_range)
81alternative_if ARM64_HAS_CACHE_DIC
82 mov x0, xzr
83 isb
84 ret
85alternative_else_nop_endif
86
87 uaccess_ttbr0_enable x2, x3, x4
88
89 invalidate_icache_by_line x0, x1, x2, x3, 2f
90 mov x0, xzr
911:
92 uaccess_ttbr0_disable x1, x2
93 ret
942:
95 mov x0, #-EFAULT
96 b 1b
97ENDPROC(invalidate_icache_range)
98
99/*
100 * __flush_dcache_area(kaddr, size)
101 *
102 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
103 * are cleaned and invalidated to the PoC.
104 *
105 * - kaddr - kernel address
106 * - size - size in question
107 */
108ENTRY(__flush_dcache_area)
109 dcache_by_line_op civac, sy, x0, x1, x2, x3
110 ret
111ENDPIPROC(__flush_dcache_area)
112
113/*
114 * __clean_dcache_area_pou(kaddr, size)
115 *
116 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
117 * are cleaned to the PoU.
118 *
119 * - kaddr - kernel address
120 * - size - size in question
121 */
122ENTRY(__clean_dcache_area_pou)
123alternative_if ARM64_HAS_CACHE_IDC
124 dsb ishst
125 ret
126alternative_else_nop_endif
127 dcache_by_line_op cvau, ish, x0, x1, x2, x3
128 ret
129ENDPROC(__clean_dcache_area_pou)
130
131/*
132 * __inval_dcache_area(kaddr, size)
133 *
134 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
135 * are invalidated. Any partial lines at the ends of the interval are
136 * also cleaned to PoC to prevent data loss.
137 *
138 * - kaddr - kernel address
139 * - size - size in question
140 */
141ENTRY(__inval_dcache_area)
142 /* FALLTHROUGH */
143
144/*
145 * __dma_inv_area(start, size)
146 * - start - virtual start address of region
147 * - size - size in question
148 */
149__dma_inv_area:
150 add x1, x1, x0
151 dcache_line_size x2, x3
152 sub x3, x2, #1
153 tst x1, x3 // end cache line aligned?
154 bic x1, x1, x3
155 b.eq 1f
156 dc civac, x1 // clean & invalidate D / U line
1571: tst x0, x3 // start cache line aligned?
158 bic x0, x0, x3
159 b.eq 2f
160 dc civac, x0 // clean & invalidate D / U line
161 b 3f
1622: dc ivac, x0 // invalidate D / U line
1633: add x0, x0, x2
164 cmp x0, x1
165 b.lo 2b
166 dsb sy
167 ret
168ENDPIPROC(__inval_dcache_area)
169ENDPROC(__dma_inv_area)
170
171/*
172 * __clean_dcache_area_poc(kaddr, size)
173 *
174 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
175 * are cleaned to the PoC.
176 *
177 * - kaddr - kernel address
178 * - size - size in question
179 */
180ENTRY(__clean_dcache_area_poc)
181 /* FALLTHROUGH */
182
183/*
184 * __dma_clean_area(start, size)
185 * - start - virtual start address of region
186 * - size - size in question
187 */
188__dma_clean_area:
189 dcache_by_line_op cvac, sy, x0, x1, x2, x3
190 ret
191ENDPIPROC(__clean_dcache_area_poc)
192ENDPROC(__dma_clean_area)
193
194/*
195 * __clean_dcache_area_pop(kaddr, size)
196 *
197 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
198 * are cleaned to the PoP.
199 *
200 * - kaddr - kernel address
201 * - size - size in question
202 */
203ENTRY(__clean_dcache_area_pop)
David Brazdil0f672f62019-12-10 10:32:29 +0000204 alternative_if_not ARM64_HAS_DCPOP
205 b __clean_dcache_area_poc
206 alternative_else_nop_endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 dcache_by_line_op cvap, sy, x0, x1, x2, x3
208 ret
209ENDPIPROC(__clean_dcache_area_pop)
210
211/*
212 * __dma_flush_area(start, size)
213 *
214 * clean & invalidate D / U line
215 *
216 * - start - virtual start address of region
217 * - size - size in question
218 */
219ENTRY(__dma_flush_area)
220 dcache_by_line_op civac, sy, x0, x1, x2, x3
221 ret
222ENDPIPROC(__dma_flush_area)
223
224/*
225 * __dma_map_area(start, size, dir)
226 * - start - kernel virtual start address
227 * - size - size of region
228 * - dir - DMA direction
229 */
230ENTRY(__dma_map_area)
231 cmp w2, #DMA_FROM_DEVICE
232 b.eq __dma_inv_area
233 b __dma_clean_area
234ENDPIPROC(__dma_map_area)
235
236/*
237 * __dma_unmap_area(start, size, dir)
238 * - start - kernel virtual start address
239 * - size - size of region
240 * - dir - DMA direction
241 */
242ENTRY(__dma_unmap_area)
243 cmp w2, #DMA_TO_DEVICE
244 b.ne __dma_inv_area
245 ret
246ENDPIPROC(__dma_unmap_area)