Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Cache maintenance |
| 3 | * |
| 4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/errno.h> |
| 21 | #include <linux/linkage.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <asm/assembler.h> |
| 24 | #include <asm/cpufeature.h> |
| 25 | #include <asm/alternative.h> |
| 26 | #include <asm/asm-uaccess.h> |
| 27 | |
| 28 | /* |
| 29 | * flush_icache_range(start,end) |
| 30 | * |
| 31 | * Ensure that the I and D caches are coherent within specified region. |
| 32 | * This is typically used when code has been written to a memory region, |
| 33 | * and will be executed. |
| 34 | * |
| 35 | * - start - virtual start address of region |
| 36 | * - end - virtual end address of region |
| 37 | */ |
| 38 | ENTRY(__flush_icache_range) |
| 39 | /* FALLTHROUGH */ |
| 40 | |
| 41 | /* |
| 42 | * __flush_cache_user_range(start,end) |
| 43 | * |
| 44 | * Ensure that the I and D caches are coherent within specified region. |
| 45 | * This is typically used when code has been written to a memory region, |
| 46 | * and will be executed. |
| 47 | * |
| 48 | * - start - virtual start address of region |
| 49 | * - end - virtual end address of region |
| 50 | */ |
| 51 | ENTRY(__flush_cache_user_range) |
| 52 | uaccess_ttbr0_enable x2, x3, x4 |
| 53 | alternative_if ARM64_HAS_CACHE_IDC |
| 54 | dsb ishst |
| 55 | b 7f |
| 56 | alternative_else_nop_endif |
| 57 | dcache_line_size x2, x3 |
| 58 | sub x3, x2, #1 |
| 59 | bic x4, x0, x3 |
| 60 | 1: |
| 61 | user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE |
| 62 | add x4, x4, x2 |
| 63 | cmp x4, x1 |
| 64 | b.lo 1b |
| 65 | dsb ish |
| 66 | |
| 67 | 7: |
| 68 | alternative_if ARM64_HAS_CACHE_DIC |
| 69 | isb |
| 70 | b 8f |
| 71 | alternative_else_nop_endif |
| 72 | invalidate_icache_by_line x0, x1, x2, x3, 9f |
| 73 | 8: mov x0, #0 |
| 74 | 1: |
| 75 | uaccess_ttbr0_disable x1, x2 |
| 76 | ret |
| 77 | 9: |
| 78 | mov x0, #-EFAULT |
| 79 | b 1b |
| 80 | ENDPROC(__flush_icache_range) |
| 81 | ENDPROC(__flush_cache_user_range) |
| 82 | |
| 83 | /* |
| 84 | * invalidate_icache_range(start,end) |
| 85 | * |
| 86 | * Ensure that the I cache is invalid within specified region. |
| 87 | * |
| 88 | * - start - virtual start address of region |
| 89 | * - end - virtual end address of region |
| 90 | */ |
| 91 | ENTRY(invalidate_icache_range) |
| 92 | alternative_if ARM64_HAS_CACHE_DIC |
| 93 | mov x0, xzr |
| 94 | isb |
| 95 | ret |
| 96 | alternative_else_nop_endif |
| 97 | |
| 98 | uaccess_ttbr0_enable x2, x3, x4 |
| 99 | |
| 100 | invalidate_icache_by_line x0, x1, x2, x3, 2f |
| 101 | mov x0, xzr |
| 102 | 1: |
| 103 | uaccess_ttbr0_disable x1, x2 |
| 104 | ret |
| 105 | 2: |
| 106 | mov x0, #-EFAULT |
| 107 | b 1b |
| 108 | ENDPROC(invalidate_icache_range) |
| 109 | |
| 110 | /* |
| 111 | * __flush_dcache_area(kaddr, size) |
| 112 | * |
| 113 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) |
| 114 | * are cleaned and invalidated to the PoC. |
| 115 | * |
| 116 | * - kaddr - kernel address |
| 117 | * - size - size in question |
| 118 | */ |
| 119 | ENTRY(__flush_dcache_area) |
| 120 | dcache_by_line_op civac, sy, x0, x1, x2, x3 |
| 121 | ret |
| 122 | ENDPIPROC(__flush_dcache_area) |
| 123 | |
| 124 | /* |
| 125 | * __clean_dcache_area_pou(kaddr, size) |
| 126 | * |
| 127 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) |
| 128 | * are cleaned to the PoU. |
| 129 | * |
| 130 | * - kaddr - kernel address |
| 131 | * - size - size in question |
| 132 | */ |
| 133 | ENTRY(__clean_dcache_area_pou) |
| 134 | alternative_if ARM64_HAS_CACHE_IDC |
| 135 | dsb ishst |
| 136 | ret |
| 137 | alternative_else_nop_endif |
| 138 | dcache_by_line_op cvau, ish, x0, x1, x2, x3 |
| 139 | ret |
| 140 | ENDPROC(__clean_dcache_area_pou) |
| 141 | |
| 142 | /* |
| 143 | * __inval_dcache_area(kaddr, size) |
| 144 | * |
| 145 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) |
| 146 | * are invalidated. Any partial lines at the ends of the interval are |
| 147 | * also cleaned to PoC to prevent data loss. |
| 148 | * |
| 149 | * - kaddr - kernel address |
| 150 | * - size - size in question |
| 151 | */ |
| 152 | ENTRY(__inval_dcache_area) |
| 153 | /* FALLTHROUGH */ |
| 154 | |
| 155 | /* |
| 156 | * __dma_inv_area(start, size) |
| 157 | * - start - virtual start address of region |
| 158 | * - size - size in question |
| 159 | */ |
| 160 | __dma_inv_area: |
| 161 | add x1, x1, x0 |
| 162 | dcache_line_size x2, x3 |
| 163 | sub x3, x2, #1 |
| 164 | tst x1, x3 // end cache line aligned? |
| 165 | bic x1, x1, x3 |
| 166 | b.eq 1f |
| 167 | dc civac, x1 // clean & invalidate D / U line |
| 168 | 1: tst x0, x3 // start cache line aligned? |
| 169 | bic x0, x0, x3 |
| 170 | b.eq 2f |
| 171 | dc civac, x0 // clean & invalidate D / U line |
| 172 | b 3f |
| 173 | 2: dc ivac, x0 // invalidate D / U line |
| 174 | 3: add x0, x0, x2 |
| 175 | cmp x0, x1 |
| 176 | b.lo 2b |
| 177 | dsb sy |
| 178 | ret |
| 179 | ENDPIPROC(__inval_dcache_area) |
| 180 | ENDPROC(__dma_inv_area) |
| 181 | |
| 182 | /* |
| 183 | * __clean_dcache_area_poc(kaddr, size) |
| 184 | * |
| 185 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) |
| 186 | * are cleaned to the PoC. |
| 187 | * |
| 188 | * - kaddr - kernel address |
| 189 | * - size - size in question |
| 190 | */ |
| 191 | ENTRY(__clean_dcache_area_poc) |
| 192 | /* FALLTHROUGH */ |
| 193 | |
| 194 | /* |
| 195 | * __dma_clean_area(start, size) |
| 196 | * - start - virtual start address of region |
| 197 | * - size - size in question |
| 198 | */ |
| 199 | __dma_clean_area: |
| 200 | dcache_by_line_op cvac, sy, x0, x1, x2, x3 |
| 201 | ret |
| 202 | ENDPIPROC(__clean_dcache_area_poc) |
| 203 | ENDPROC(__dma_clean_area) |
| 204 | |
| 205 | /* |
| 206 | * __clean_dcache_area_pop(kaddr, size) |
| 207 | * |
| 208 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) |
| 209 | * are cleaned to the PoP. |
| 210 | * |
| 211 | * - kaddr - kernel address |
| 212 | * - size - size in question |
| 213 | */ |
| 214 | ENTRY(__clean_dcache_area_pop) |
| 215 | dcache_by_line_op cvap, sy, x0, x1, x2, x3 |
| 216 | ret |
| 217 | ENDPIPROC(__clean_dcache_area_pop) |
| 218 | |
| 219 | /* |
| 220 | * __dma_flush_area(start, size) |
| 221 | * |
| 222 | * clean & invalidate D / U line |
| 223 | * |
| 224 | * - start - virtual start address of region |
| 225 | * - size - size in question |
| 226 | */ |
| 227 | ENTRY(__dma_flush_area) |
| 228 | dcache_by_line_op civac, sy, x0, x1, x2, x3 |
| 229 | ret |
| 230 | ENDPIPROC(__dma_flush_area) |
| 231 | |
| 232 | /* |
| 233 | * __dma_map_area(start, size, dir) |
| 234 | * - start - kernel virtual start address |
| 235 | * - size - size of region |
| 236 | * - dir - DMA direction |
| 237 | */ |
| 238 | ENTRY(__dma_map_area) |
| 239 | cmp w2, #DMA_FROM_DEVICE |
| 240 | b.eq __dma_inv_area |
| 241 | b __dma_clean_area |
| 242 | ENDPIPROC(__dma_map_area) |
| 243 | |
| 244 | /* |
| 245 | * __dma_unmap_area(start, size, dir) |
| 246 | * - start - kernel virtual start address |
| 247 | * - size - size of region |
| 248 | * - dir - DMA direction |
| 249 | */ |
| 250 | ENTRY(__dma_unmap_area) |
| 251 | cmp w2, #DMA_TO_DEVICE |
| 252 | b.ne __dma_inv_area |
| 253 | ret |
| 254 | ENDPIPROC(__dma_unmap_area) |