Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_IA64_TLB_H |
| 3 | #define _ASM_IA64_TLB_H |
| 4 | /* |
| 5 | * Based on <asm-generic/tlb.h>. |
| 6 | * |
| 7 | * Copyright (C) 2002-2003 Hewlett-Packard Co |
| 8 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 9 | */ |
| 10 | /* |
| 11 | * Removing a translation from a page table (including TLB-shootdown) is a four-step |
| 12 | * procedure: |
| 13 | * |
| 14 | * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory |
| 15 | * (this is a no-op on ia64). |
| 16 | * (2) Clear the relevant portions of the page-table |
| 17 | * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs |
| 18 | * (4) Release the pages that were freed up in step (2). |
| 19 | * |
| 20 | * Note that the ordering of these steps is crucial to avoid races on MP machines. |
| 21 | * |
| 22 | * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When |
| 23 | * unmapping a portion of the virtual address space, these hooks are called according to |
| 24 | * the following template: |
| 25 | * |
| 26 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
| 27 | * { |
| 28 | * for each vma that needs a shootdown do { |
| 29 | * tlb_start_vma(tlb, vma); |
| 30 | * for each page-table-entry PTE that needs to be removed do { |
| 31 | * tlb_remove_tlb_entry(tlb, pte, address); |
| 32 | * if (pte refers to a normal page) { |
| 33 | * tlb_remove_page(tlb, page); |
| 34 | * } |
| 35 | * } |
| 36 | * tlb_end_vma(tlb, vma); |
| 37 | * } |
| 38 | * } |
| 39 | * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM |
| 40 | */ |
| 41 | #include <linux/mm.h> |
| 42 | #include <linux/pagemap.h> |
| 43 | #include <linux/swap.h> |
| 44 | |
| 45 | #include <asm/pgalloc.h> |
| 46 | #include <asm/processor.h> |
| 47 | #include <asm/tlbflush.h> |
| 48 | #include <asm/machvec.h> |
| 49 | |
| 50 | /* |
| 51 | * If we can't allocate a page to make a big batch of page pointers |
| 52 | * to work on, then just handle a few from the on-stack structure. |
| 53 | */ |
| 54 | #define IA64_GATHER_BUNDLE 8 |
| 55 | |
| 56 | struct mmu_gather { |
| 57 | struct mm_struct *mm; |
| 58 | unsigned int nr; |
| 59 | unsigned int max; |
| 60 | unsigned char fullmm; /* non-zero means full mm flush */ |
| 61 | unsigned char need_flush; /* really unmapped some PTEs? */ |
| 62 | unsigned long start, end; |
| 63 | unsigned long start_addr; |
| 64 | unsigned long end_addr; |
| 65 | struct page **pages; |
| 66 | struct page *local[IA64_GATHER_BUNDLE]; |
| 67 | }; |
| 68 | |
| 69 | struct ia64_tr_entry { |
| 70 | u64 ifa; |
| 71 | u64 itir; |
| 72 | u64 pte; |
| 73 | u64 rr; |
| 74 | }; /*Record for tr entry!*/ |
| 75 | |
| 76 | extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); |
| 77 | extern void ia64_ptr_entry(u64 target_mask, int slot); |
| 78 | |
| 79 | extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; |
| 80 | |
| 81 | /* |
| 82 | region register macros |
| 83 | */ |
| 84 | #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) |
| 85 | #define RR_VE(val) (((val) & 0x0000000000000001) << 0) |
| 86 | #define RR_VE_MASK 0x0000000000000001L |
| 87 | #define RR_VE_SHIFT 0 |
| 88 | #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) |
| 89 | #define RR_PS(val) (((val) & 0x000000000000003f) << 2) |
| 90 | #define RR_PS_MASK 0x00000000000000fcL |
| 91 | #define RR_PS_SHIFT 2 |
| 92 | #define RR_RID_MASK 0x00000000ffffff00L |
| 93 | #define RR_TO_RID(val) ((val >> 8) & 0xffffff) |
| 94 | |
| 95 | static inline void |
| 96 | ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 97 | { |
| 98 | tlb->need_flush = 0; |
| 99 | |
| 100 | if (tlb->fullmm) { |
| 101 | /* |
| 102 | * Tearing down the entire address space. This happens both as a result |
| 103 | * of exit() and execve(). The latter case necessitates the call to |
| 104 | * flush_tlb_mm() here. |
| 105 | */ |
| 106 | flush_tlb_mm(tlb->mm); |
| 107 | } else if (unlikely (end - start >= 1024*1024*1024*1024UL |
| 108 | || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) |
| 109 | { |
| 110 | /* |
| 111 | * If we flush more than a tera-byte or across regions, we're probably |
| 112 | * better off just flushing the entire TLB(s). This should be very rare |
| 113 | * and is not worth optimizing for. |
| 114 | */ |
| 115 | flush_tlb_all(); |
| 116 | } else { |
| 117 | /* |
| 118 | * flush_tlb_range() takes a vma instead of a mm pointer because |
| 119 | * some architectures want the vm_flags for ITLB/DTLB flush. |
| 120 | */ |
| 121 | struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); |
| 122 | |
| 123 | /* flush the address range from the tlb: */ |
| 124 | flush_tlb_range(&vma, start, end); |
| 125 | /* now flush the virt. page-table area mapping the address range: */ |
| 126 | flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); |
| 127 | } |
| 128 | |
| 129 | } |
| 130 | |
| 131 | static inline void |
| 132 | ia64_tlb_flush_mmu_free(struct mmu_gather *tlb) |
| 133 | { |
| 134 | unsigned long i; |
| 135 | unsigned int nr; |
| 136 | |
| 137 | /* lastly, release the freed pages */ |
| 138 | nr = tlb->nr; |
| 139 | |
| 140 | tlb->nr = 0; |
| 141 | tlb->start_addr = ~0UL; |
| 142 | for (i = 0; i < nr; ++i) |
| 143 | free_page_and_swap_cache(tlb->pages[i]); |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * Flush the TLB for address range START to END and, if not in fast mode, release the |
| 148 | * freed pages that where gathered up to this point. |
| 149 | */ |
| 150 | static inline void |
| 151 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 152 | { |
| 153 | if (!tlb->need_flush) |
| 154 | return; |
| 155 | ia64_tlb_flush_mmu_tlbonly(tlb, start, end); |
| 156 | ia64_tlb_flush_mmu_free(tlb); |
| 157 | } |
| 158 | |
| 159 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
| 160 | { |
| 161 | unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
| 162 | |
| 163 | if (addr) { |
| 164 | tlb->pages = (void *)addr; |
| 165 | tlb->max = PAGE_SIZE / sizeof(void *); |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | |
| 170 | static inline void |
| 171 | arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
| 172 | unsigned long start, unsigned long end) |
| 173 | { |
| 174 | tlb->mm = mm; |
| 175 | tlb->max = ARRAY_SIZE(tlb->local); |
| 176 | tlb->pages = tlb->local; |
| 177 | tlb->nr = 0; |
| 178 | tlb->fullmm = !(start | (end+1)); |
| 179 | tlb->start = start; |
| 180 | tlb->end = end; |
| 181 | tlb->start_addr = ~0UL; |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Called at the end of the shootdown operation to free up any resources that were |
| 186 | * collected. |
| 187 | */ |
| 188 | static inline void |
| 189 | arch_tlb_finish_mmu(struct mmu_gather *tlb, |
| 190 | unsigned long start, unsigned long end, bool force) |
| 191 | { |
| 192 | if (force) |
| 193 | tlb->need_flush = 1; |
| 194 | /* |
| 195 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and |
| 196 | * tlb->end_addr. |
| 197 | */ |
| 198 | ia64_tlb_flush_mmu(tlb, start, end); |
| 199 | |
| 200 | /* keep the page table cache within bounds */ |
| 201 | check_pgt_cache(); |
| 202 | |
| 203 | if (tlb->pages != tlb->local) |
| 204 | free_pages((unsigned long)tlb->pages, 0); |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page |
| 209 | * must be delayed until after the TLB has been flushed (see comments at the beginning of |
| 210 | * this file). |
| 211 | */ |
| 212 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 213 | { |
| 214 | tlb->need_flush = 1; |
| 215 | |
| 216 | if (!tlb->nr && tlb->pages == tlb->local) |
| 217 | __tlb_alloc_page(tlb); |
| 218 | |
| 219 | tlb->pages[tlb->nr++] = page; |
| 220 | VM_WARN_ON(tlb->nr > tlb->max); |
| 221 | if (tlb->nr == tlb->max) |
| 222 | return true; |
| 223 | return false; |
| 224 | } |
| 225 | |
| 226 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
| 227 | { |
| 228 | ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr); |
| 229 | } |
| 230 | |
| 231 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) |
| 232 | { |
| 233 | ia64_tlb_flush_mmu_free(tlb); |
| 234 | } |
| 235 | |
| 236 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
| 237 | { |
| 238 | ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); |
| 239 | } |
| 240 | |
| 241 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 242 | { |
| 243 | if (__tlb_remove_page(tlb, page)) |
| 244 | tlb_flush_mmu(tlb); |
| 245 | } |
| 246 | |
| 247 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
| 248 | struct page *page, int page_size) |
| 249 | { |
| 250 | return __tlb_remove_page(tlb, page); |
| 251 | } |
| 252 | |
| 253 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
| 254 | struct page *page, int page_size) |
| 255 | { |
| 256 | return tlb_remove_page(tlb, page); |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any |
| 261 | * PTE, not just those pointing to (normal) physical memory. |
| 262 | */ |
| 263 | static inline void |
| 264 | __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) |
| 265 | { |
| 266 | if (tlb->start_addr == ~0UL) |
| 267 | tlb->start_addr = address; |
| 268 | tlb->end_addr = address + PAGE_SIZE; |
| 269 | } |
| 270 | |
| 271 | #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) |
| 272 | |
| 273 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 274 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 275 | |
| 276 | #define tlb_remove_tlb_entry(tlb, ptep, addr) \ |
| 277 | do { \ |
| 278 | tlb->need_flush = 1; \ |
| 279 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ |
| 280 | } while (0) |
| 281 | |
| 282 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
| 283 | tlb_remove_tlb_entry(tlb, ptep, address) |
| 284 | |
| 285 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
| 286 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, |
| 287 | unsigned int page_size) |
| 288 | { |
| 289 | } |
| 290 | |
| 291 | #define pte_free_tlb(tlb, ptep, address) \ |
| 292 | do { \ |
| 293 | tlb->need_flush = 1; \ |
| 294 | __pte_free_tlb(tlb, ptep, address); \ |
| 295 | } while (0) |
| 296 | |
| 297 | #define pmd_free_tlb(tlb, ptep, address) \ |
| 298 | do { \ |
| 299 | tlb->need_flush = 1; \ |
| 300 | __pmd_free_tlb(tlb, ptep, address); \ |
| 301 | } while (0) |
| 302 | |
| 303 | #define pud_free_tlb(tlb, pudp, address) \ |
| 304 | do { \ |
| 305 | tlb->need_flush = 1; \ |
| 306 | __pud_free_tlb(tlb, pudp, address); \ |
| 307 | } while (0) |
| 308 | |
| 309 | #endif /* _ASM_IA64_TLB_H */ |