Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Based on arch/arm/include/asm/tlbflush.h |
| 3 | * |
| 4 | * Copyright (C) 1999-2003 Russell King |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | #ifndef __ASM_TLBFLUSH_H |
| 20 | #define __ASM_TLBFLUSH_H |
| 21 | |
| 22 | #ifndef __ASSEMBLY__ |
| 23 | |
| 24 | #include <linux/sched.h> |
| 25 | #include <asm/cputype.h> |
| 26 | #include <asm/mmu.h> |
| 27 | |
| 28 | /* |
| 29 | * Raw TLBI operations. |
| 30 | * |
| 31 | * Where necessary, use the __tlbi() macro to avoid asm() |
| 32 | * boilerplate. Drivers and most kernel code should use the TLB |
| 33 | * management routines in preference to the macro below. |
| 34 | * |
| 35 | * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending |
| 36 | * on whether a particular TLBI operation takes an argument or |
| 37 | * not. The macros handles invoking the asm with or without the |
| 38 | * register argument as appropriate. |
| 39 | */ |
| 40 | #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ |
| 41 | ALTERNATIVE("nop\n nop", \ |
| 42 | "dsb ish\n tlbi " #op, \ |
| 43 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
| 44 | CONFIG_QCOM_FALKOR_ERRATUM_1009) \ |
| 45 | : : ) |
| 46 | |
| 47 | #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ |
| 48 | ALTERNATIVE("nop\n nop", \ |
| 49 | "dsb ish\n tlbi " #op ", %0", \ |
| 50 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
| 51 | CONFIG_QCOM_FALKOR_ERRATUM_1009) \ |
| 52 | : : "r" (arg)) |
| 53 | |
| 54 | #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) |
| 55 | |
| 56 | #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) |
| 57 | |
| 58 | #define __tlbi_user(op, arg) do { \ |
| 59 | if (arm64_kernel_unmapped_at_el0()) \ |
| 60 | __tlbi(op, (arg) | USER_ASID_FLAG); \ |
| 61 | } while (0) |
| 62 | |
| 63 | /* This macro creates a properly formatted VA operand for the TLBI */ |
| 64 | #define __TLBI_VADDR(addr, asid) \ |
| 65 | ({ \ |
| 66 | unsigned long __ta = (addr) >> 12; \ |
| 67 | __ta &= GENMASK_ULL(43, 0); \ |
| 68 | __ta |= (unsigned long)(asid) << 48; \ |
| 69 | __ta; \ |
| 70 | }) |
| 71 | |
| 72 | /* |
| 73 | * TLB Management |
| 74 | * ============== |
| 75 | * |
| 76 | * The TLB specific code is expected to perform whatever tests it needs |
| 77 | * to determine if it should invalidate the TLB for each call. Start |
| 78 | * addresses are inclusive and end addresses are exclusive; it is safe to |
| 79 | * round these addresses down. |
| 80 | * |
| 81 | * flush_tlb_all() |
| 82 | * |
| 83 | * Invalidate the entire TLB. |
| 84 | * |
| 85 | * flush_tlb_mm(mm) |
| 86 | * |
| 87 | * Invalidate all TLB entries in a particular address space. |
| 88 | * - mm - mm_struct describing address space |
| 89 | * |
| 90 | * flush_tlb_range(mm,start,end) |
| 91 | * |
| 92 | * Invalidate a range of TLB entries in the specified address |
| 93 | * space. |
| 94 | * - mm - mm_struct describing address space |
| 95 | * - start - start address (may not be aligned) |
| 96 | * - end - end address (exclusive, may not be aligned) |
| 97 | * |
| 98 | * flush_tlb_page(vaddr,vma) |
| 99 | * |
| 100 | * Invalidate the specified page in the specified address range. |
| 101 | * - vaddr - virtual address (may not be aligned) |
| 102 | * - vma - vma_struct describing address range |
| 103 | * |
| 104 | * flush_kern_tlb_page(kaddr) |
| 105 | * |
| 106 | * Invalidate the TLB entry for the specified page. The address |
| 107 | * will be in the kernels virtual memory space. Current uses |
| 108 | * only require the D-TLB to be invalidated. |
| 109 | * - kaddr - Kernel virtual memory address |
| 110 | */ |
| 111 | static inline void local_flush_tlb_all(void) |
| 112 | { |
| 113 | dsb(nshst); |
| 114 | __tlbi(vmalle1); |
| 115 | dsb(nsh); |
| 116 | isb(); |
| 117 | } |
| 118 | |
| 119 | static inline void flush_tlb_all(void) |
| 120 | { |
| 121 | dsb(ishst); |
| 122 | __tlbi(vmalle1is); |
| 123 | dsb(ish); |
| 124 | isb(); |
| 125 | } |
| 126 | |
| 127 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 128 | { |
| 129 | unsigned long asid = __TLBI_VADDR(0, ASID(mm)); |
| 130 | |
| 131 | dsb(ishst); |
| 132 | __tlbi(aside1is, asid); |
| 133 | __tlbi_user(aside1is, asid); |
| 134 | dsb(ish); |
| 135 | } |
| 136 | |
| 137 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 138 | unsigned long uaddr) |
| 139 | { |
| 140 | unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); |
| 141 | |
| 142 | dsb(ishst); |
| 143 | __tlbi(vale1is, addr); |
| 144 | __tlbi_user(vale1is, addr); |
| 145 | dsb(ish); |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * This is meant to avoid soft lock-ups on large TLB flushing ranges and not |
| 150 | * necessarily a performance improvement. |
| 151 | */ |
| 152 | #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) |
| 153 | |
| 154 | static inline void __flush_tlb_range(struct vm_area_struct *vma, |
| 155 | unsigned long start, unsigned long end, |
| 156 | bool last_level) |
| 157 | { |
| 158 | unsigned long asid = ASID(vma->vm_mm); |
| 159 | unsigned long addr; |
| 160 | |
| 161 | if ((end - start) > MAX_TLB_RANGE) { |
| 162 | flush_tlb_mm(vma->vm_mm); |
| 163 | return; |
| 164 | } |
| 165 | |
| 166 | start = __TLBI_VADDR(start, asid); |
| 167 | end = __TLBI_VADDR(end, asid); |
| 168 | |
| 169 | dsb(ishst); |
| 170 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { |
| 171 | if (last_level) { |
| 172 | __tlbi(vale1is, addr); |
| 173 | __tlbi_user(vale1is, addr); |
| 174 | } else { |
| 175 | __tlbi(vae1is, addr); |
| 176 | __tlbi_user(vae1is, addr); |
| 177 | } |
| 178 | } |
| 179 | dsb(ish); |
| 180 | } |
| 181 | |
| 182 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 183 | unsigned long start, unsigned long end) |
| 184 | { |
| 185 | __flush_tlb_range(vma, start, end, false); |
| 186 | } |
| 187 | |
| 188 | static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 189 | { |
| 190 | unsigned long addr; |
| 191 | |
| 192 | if ((end - start) > MAX_TLB_RANGE) { |
| 193 | flush_tlb_all(); |
| 194 | return; |
| 195 | } |
| 196 | |
| 197 | start = __TLBI_VADDR(start, 0); |
| 198 | end = __TLBI_VADDR(end, 0); |
| 199 | |
| 200 | dsb(ishst); |
| 201 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) |
| 202 | __tlbi(vaae1is, addr); |
| 203 | dsb(ish); |
| 204 | isb(); |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * Used to invalidate the TLB (walk caches) corresponding to intermediate page |
| 209 | * table levels (pgd/pud/pmd). |
| 210 | */ |
| 211 | static inline void __flush_tlb_pgtable(struct mm_struct *mm, |
| 212 | unsigned long uaddr) |
| 213 | { |
| 214 | unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm)); |
| 215 | |
| 216 | __tlbi(vae1is, addr); |
| 217 | __tlbi_user(vae1is, addr); |
| 218 | dsb(ish); |
| 219 | } |
| 220 | |
| 221 | static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) |
| 222 | { |
| 223 | unsigned long addr = __TLBI_VADDR(kaddr, 0); |
| 224 | |
| 225 | __tlbi(vaae1is, addr); |
| 226 | dsb(ish); |
| 227 | } |
| 228 | #endif |
| 229 | |
| 230 | #endif |