Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_TLBFLUSH_H |
| 3 | #define _ASM_X86_TLBFLUSH_H |
| 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/sched.h> |
| 7 | |
| 8 | #include <asm/processor.h> |
| 9 | #include <asm/cpufeature.h> |
| 10 | #include <asm/special_insns.h> |
| 11 | #include <asm/smp.h> |
| 12 | #include <asm/invpcid.h> |
| 13 | #include <asm/pti.h> |
| 14 | #include <asm/processor-flags.h> |
| 15 | |
| 16 | /* |
| 17 | * The x86 feature is called PCID (Process Context IDentifier). It is similar |
| 18 | * to what is traditionally called ASID on the RISC processors. |
| 19 | * |
| 20 | * We don't use the traditional ASID implementation, where each process/mm gets |
| 21 | * its own ASID and flush/restart when we run out of ASID space. |
| 22 | * |
| 23 | * Instead we have a small per-cpu array of ASIDs and cache the last few mm's |
| 24 | * that came by on this CPU, allowing cheaper switch_mm between processes on |
| 25 | * this CPU. |
| 26 | * |
| 27 | * We end up with different spaces for different things. To avoid confusion we |
| 28 | * use different names for each of them: |
| 29 | * |
| 30 | * ASID - [0, TLB_NR_DYN_ASIDS-1] |
| 31 | * the canonical identifier for an mm |
| 32 | * |
| 33 | * kPCID - [1, TLB_NR_DYN_ASIDS] |
| 34 | * the value we write into the PCID part of CR3; corresponds to the |
| 35 | * ASID+1, because PCID 0 is special. |
| 36 | * |
| 37 | * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] |
| 38 | * for KPTI each mm has two address spaces and thus needs two |
| 39 | * PCID values, but we can still do with a single ASID denomination |
| 40 | * for each mm. Corresponds to kPCID + 2048. |
| 41 | * |
| 42 | */ |
| 43 | |
| 44 | /* There are 12 bits of space for ASIDS in CR3 */ |
| 45 | #define CR3_HW_ASID_BITS 12 |
| 46 | |
| 47 | /* |
| 48 | * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for |
| 49 | * user/kernel switches |
| 50 | */ |
| 51 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 52 | # define PTI_CONSUMED_PCID_BITS 1 |
| 53 | #else |
| 54 | # define PTI_CONSUMED_PCID_BITS 0 |
| 55 | #endif |
| 56 | |
| 57 | #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) |
| 58 | |
| 59 | /* |
| 60 | * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account |
| 61 | * for them being zero-based. Another -1 is because PCID 0 is reserved for |
| 62 | * use by non-PCID-aware users. |
| 63 | */ |
| 64 | #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) |
| 65 | |
| 66 | /* |
| 67 | * 6 because 6 should be plenty and struct tlb_state will fit in two cache |
| 68 | * lines. |
| 69 | */ |
| 70 | #define TLB_NR_DYN_ASIDS 6 |
| 71 | |
| 72 | /* |
| 73 | * Given @asid, compute kPCID |
| 74 | */ |
| 75 | static inline u16 kern_pcid(u16 asid) |
| 76 | { |
| 77 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
| 78 | |
| 79 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 80 | /* |
| 81 | * Make sure that the dynamic ASID space does not confict with the |
| 82 | * bit we are using to switch between user and kernel ASIDs. |
| 83 | */ |
| 84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); |
| 85 | |
| 86 | /* |
| 87 | * The ASID being passed in here should have respected the |
| 88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. |
| 89 | */ |
| 90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); |
| 91 | #endif |
| 92 | /* |
| 93 | * The dynamically-assigned ASIDs that get passed in are small |
| 94 | * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, |
| 95 | * so do not bother to clear it. |
| 96 | * |
| 97 | * If PCID is on, ASID-aware code paths put the ASID+1 into the |
| 98 | * PCID bits. This serves two purposes. It prevents a nasty |
| 99 | * situation in which PCID-unaware code saves CR3, loads some other |
| 100 | * value (with PCID == 0), and then restores CR3, thus corrupting |
| 101 | * the TLB for ASID 0 if the saved ASID was nonzero. It also means |
| 102 | * that any bugs involving loading a PCID-enabled CR3 with |
| 103 | * CR4.PCIDE off will trigger deterministically. |
| 104 | */ |
| 105 | return asid + 1; |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * Given @asid, compute uPCID |
| 110 | */ |
| 111 | static inline u16 user_pcid(u16 asid) |
| 112 | { |
| 113 | u16 ret = kern_pcid(asid); |
| 114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 115 | ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; |
| 116 | #endif |
| 117 | return ret; |
| 118 | } |
| 119 | |
| 120 | struct pgd_t; |
| 121 | static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) |
| 122 | { |
| 123 | if (static_cpu_has(X86_FEATURE_PCID)) { |
| 124 | return __sme_pa(pgd) | kern_pcid(asid); |
| 125 | } else { |
| 126 | VM_WARN_ON_ONCE(asid != 0); |
| 127 | return __sme_pa(pgd); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) |
| 132 | { |
| 133 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
| 134 | /* |
| 135 | * Use boot_cpu_has() instead of this_cpu_has() as this function |
| 136 | * might be called during early boot. This should work even after |
| 137 | * boot because all CPU's the have same capabilities: |
| 138 | */ |
| 139 | VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); |
| 140 | return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; |
| 141 | } |
| 142 | |
| 143 | #ifdef CONFIG_PARAVIRT |
| 144 | #include <asm/paravirt.h> |
| 145 | #else |
| 146 | #define __flush_tlb() __native_flush_tlb() |
| 147 | #define __flush_tlb_global() __native_flush_tlb_global() |
| 148 | #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) |
| 149 | #endif |
| 150 | |
| 151 | static inline bool tlb_defer_switch_to_init_mm(void) |
| 152 | { |
| 153 | /* |
| 154 | * If we have PCID, then switching to init_mm is reasonably |
| 155 | * fast. If we don't have PCID, then switching to init_mm is |
| 156 | * quite slow, so we try to defer it in the hopes that we can |
| 157 | * avoid it entirely. The latter approach runs the risk of |
| 158 | * receiving otherwise unnecessary IPIs. |
| 159 | * |
| 160 | * This choice is just a heuristic. The tlb code can handle this |
| 161 | * function returning true or false regardless of whether we have |
| 162 | * PCID. |
| 163 | */ |
| 164 | return !static_cpu_has(X86_FEATURE_PCID); |
| 165 | } |
| 166 | |
| 167 | struct tlb_context { |
| 168 | u64 ctx_id; |
| 169 | u64 tlb_gen; |
| 170 | }; |
| 171 | |
| 172 | struct tlb_state { |
| 173 | /* |
| 174 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts |
| 175 | * are on. This means that it may not match current->active_mm, |
| 176 | * which will contain the previous user mm when we're in lazy TLB |
| 177 | * mode even if we've already switched back to swapper_pg_dir. |
| 178 | * |
| 179 | * During switch_mm_irqs_off(), loaded_mm will be set to |
| 180 | * LOADED_MM_SWITCHING during the brief interrupts-off window |
| 181 | * when CR3 and loaded_mm would otherwise be inconsistent. This |
| 182 | * is for nmi_uaccess_okay()'s benefit. |
| 183 | */ |
| 184 | struct mm_struct *loaded_mm; |
| 185 | |
| 186 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1) |
| 187 | |
| 188 | /* Last user mm for optimizing IBPB */ |
| 189 | union { |
| 190 | struct mm_struct *last_user_mm; |
| 191 | unsigned long last_user_mm_ibpb; |
| 192 | }; |
| 193 | |
| 194 | u16 loaded_mm_asid; |
| 195 | u16 next_asid; |
| 196 | |
| 197 | /* |
| 198 | * We can be in one of several states: |
| 199 | * |
| 200 | * - Actively using an mm. Our CPU's bit will be set in |
| 201 | * mm_cpumask(loaded_mm) and is_lazy == false; |
| 202 | * |
| 203 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit |
| 204 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. |
| 205 | * |
| 206 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit |
| 207 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. |
| 208 | * We're heuristically guessing that the CR3 load we |
| 209 | * skipped more than makes up for the overhead added by |
| 210 | * lazy mode. |
| 211 | */ |
| 212 | bool is_lazy; |
| 213 | |
| 214 | /* |
| 215 | * If set we changed the page tables in such a way that we |
| 216 | * needed an invalidation of all contexts (aka. PCIDs / ASIDs). |
| 217 | * This tells us to go invalidate all the non-loaded ctxs[] |
| 218 | * on the next context switch. |
| 219 | * |
| 220 | * The current ctx was kept up-to-date as it ran and does not |
| 221 | * need to be invalidated. |
| 222 | */ |
| 223 | bool invalidate_other; |
| 224 | |
| 225 | /* |
| 226 | * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate |
| 227 | * the corresponding user PCID needs a flush next time we |
| 228 | * switch to it; see SWITCH_TO_USER_CR3. |
| 229 | */ |
| 230 | unsigned short user_pcid_flush_mask; |
| 231 | |
| 232 | /* |
| 233 | * Access to this CR4 shadow and to H/W CR4 is protected by |
| 234 | * disabling interrupts when modifying either one. |
| 235 | */ |
| 236 | unsigned long cr4; |
| 237 | |
| 238 | /* |
| 239 | * This is a list of all contexts that might exist in the TLB. |
| 240 | * There is one per ASID that we use, and the ASID (what the |
| 241 | * CPU calls PCID) is the index into ctxts. |
| 242 | * |
| 243 | * For each context, ctx_id indicates which mm the TLB's user |
| 244 | * entries came from. As an invariant, the TLB will never |
| 245 | * contain entries that are out-of-date as when that mm reached |
| 246 | * the tlb_gen in the list. |
| 247 | * |
| 248 | * To be clear, this means that it's legal for the TLB code to |
| 249 | * flush the TLB without updating tlb_gen. This can happen |
| 250 | * (for now, at least) due to paravirt remote flushes. |
| 251 | * |
| 252 | * NB: context 0 is a bit special, since it's also used by |
| 253 | * various bits of init code. This is fine -- code that |
| 254 | * isn't aware of PCID will end up harmlessly flushing |
| 255 | * context 0. |
| 256 | */ |
| 257 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
| 258 | }; |
| 259 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 260 | |
| 261 | /* |
| 262 | * Blindly accessing user memory from NMI context can be dangerous |
| 263 | * if we're in the middle of switching the current user task or |
| 264 | * switching the loaded mm. It can also be dangerous if we |
| 265 | * interrupted some kernel code that was temporarily using a |
| 266 | * different mm. |
| 267 | */ |
| 268 | static inline bool nmi_uaccess_okay(void) |
| 269 | { |
| 270 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
| 271 | struct mm_struct *current_mm = current->mm; |
| 272 | |
| 273 | VM_WARN_ON_ONCE(!loaded_mm); |
| 274 | |
| 275 | /* |
| 276 | * The condition we want to check is |
| 277 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, |
| 278 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() |
| 279 | * is supposed to be reasonably fast. |
| 280 | * |
| 281 | * Instead, we check the almost equivalent but somewhat conservative |
| 282 | * condition below, and we rely on the fact that switch_mm_irqs_off() |
| 283 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. |
| 284 | */ |
| 285 | if (loaded_mm != current_mm) |
| 286 | return false; |
| 287 | |
| 288 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); |
| 289 | |
| 290 | return true; |
| 291 | } |
| 292 | |
| 293 | /* Initialize cr4 shadow for this CPU. */ |
| 294 | static inline void cr4_init_shadow(void) |
| 295 | { |
| 296 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
| 297 | } |
| 298 | |
| 299 | static inline void __cr4_set(unsigned long cr4) |
| 300 | { |
| 301 | lockdep_assert_irqs_disabled(); |
| 302 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 303 | __write_cr4(cr4); |
| 304 | } |
| 305 | |
| 306 | /* Set in this cpu's CR4. */ |
| 307 | static inline void cr4_set_bits(unsigned long mask) |
| 308 | { |
| 309 | unsigned long cr4, flags; |
| 310 | |
| 311 | local_irq_save(flags); |
| 312 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 313 | if ((cr4 | mask) != cr4) |
| 314 | __cr4_set(cr4 | mask); |
| 315 | local_irq_restore(flags); |
| 316 | } |
| 317 | |
| 318 | /* Clear in this cpu's CR4. */ |
| 319 | static inline void cr4_clear_bits(unsigned long mask) |
| 320 | { |
| 321 | unsigned long cr4, flags; |
| 322 | |
| 323 | local_irq_save(flags); |
| 324 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 325 | if ((cr4 & ~mask) != cr4) |
| 326 | __cr4_set(cr4 & ~mask); |
| 327 | local_irq_restore(flags); |
| 328 | } |
| 329 | |
| 330 | static inline void cr4_toggle_bits_irqsoff(unsigned long mask) |
| 331 | { |
| 332 | unsigned long cr4; |
| 333 | |
| 334 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 335 | __cr4_set(cr4 ^ mask); |
| 336 | } |
| 337 | |
| 338 | /* Read the CR4 shadow. */ |
| 339 | static inline unsigned long cr4_read_shadow(void) |
| 340 | { |
| 341 | return this_cpu_read(cpu_tlbstate.cr4); |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * Mark all other ASIDs as invalid, preserves the current. |
| 346 | */ |
| 347 | static inline void invalidate_other_asid(void) |
| 348 | { |
| 349 | this_cpu_write(cpu_tlbstate.invalidate_other, true); |
| 350 | } |
| 351 | |
| 352 | /* |
| 353 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB |
| 354 | * enable and PPro Global page enable), so that any CPU's that boot |
| 355 | * up after us can get the correct flags. This should only be used |
| 356 | * during boot on the boot cpu. |
| 357 | */ |
| 358 | extern unsigned long mmu_cr4_features; |
| 359 | extern u32 *trampoline_cr4_features; |
| 360 | |
| 361 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) |
| 362 | { |
| 363 | mmu_cr4_features |= mask; |
| 364 | if (trampoline_cr4_features) |
| 365 | *trampoline_cr4_features = mmu_cr4_features; |
| 366 | cr4_set_bits(mask); |
| 367 | } |
| 368 | |
| 369 | extern void initialize_tlbstate_and_flush(void); |
| 370 | |
| 371 | /* |
| 372 | * Given an ASID, flush the corresponding user ASID. We can delay this |
| 373 | * until the next time we switch to it. |
| 374 | * |
| 375 | * See SWITCH_TO_USER_CR3. |
| 376 | */ |
| 377 | static inline void invalidate_user_asid(u16 asid) |
| 378 | { |
| 379 | /* There is no user ASID if address space separation is off */ |
| 380 | if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) |
| 381 | return; |
| 382 | |
| 383 | /* |
| 384 | * We only have a single ASID if PCID is off and the CR3 |
| 385 | * write will have flushed it. |
| 386 | */ |
| 387 | if (!cpu_feature_enabled(X86_FEATURE_PCID)) |
| 388 | return; |
| 389 | |
| 390 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 391 | return; |
| 392 | |
| 393 | __set_bit(kern_pcid(asid), |
| 394 | (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); |
| 395 | } |
| 396 | |
| 397 | /* |
| 398 | * flush the entire current user mapping |
| 399 | */ |
| 400 | static inline void __native_flush_tlb(void) |
| 401 | { |
| 402 | /* |
| 403 | * Preemption or interrupts must be disabled to protect the access |
| 404 | * to the per CPU variable and to prevent being preempted between |
| 405 | * read_cr3() and write_cr3(). |
| 406 | */ |
| 407 | WARN_ON_ONCE(preemptible()); |
| 408 | |
| 409 | invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
| 410 | |
| 411 | /* If current->mm == NULL then the read_cr3() "borrows" an mm */ |
| 412 | native_write_cr3(__native_read_cr3()); |
| 413 | } |
| 414 | |
| 415 | /* |
| 416 | * flush everything |
| 417 | */ |
| 418 | static inline void __native_flush_tlb_global(void) |
| 419 | { |
| 420 | unsigned long cr4, flags; |
| 421 | |
| 422 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
| 423 | /* |
| 424 | * Using INVPCID is considerably faster than a pair of writes |
| 425 | * to CR4 sandwiched inside an IRQ flag save/restore. |
| 426 | * |
| 427 | * Note, this works with CR4.PCIDE=0 or 1. |
| 428 | */ |
| 429 | invpcid_flush_all(); |
| 430 | return; |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * Read-modify-write to CR4 - protect it from preemption and |
| 435 | * from interrupts. (Use the raw variant because this code can |
| 436 | * be called from deep inside debugging code.) |
| 437 | */ |
| 438 | raw_local_irq_save(flags); |
| 439 | |
| 440 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 441 | /* toggle PGE */ |
| 442 | native_write_cr4(cr4 ^ X86_CR4_PGE); |
| 443 | /* write old PGE again and flush TLBs */ |
| 444 | native_write_cr4(cr4); |
| 445 | |
| 446 | raw_local_irq_restore(flags); |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * flush one page in the user mapping |
| 451 | */ |
| 452 | static inline void __native_flush_tlb_one_user(unsigned long addr) |
| 453 | { |
| 454 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
| 455 | |
| 456 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
| 457 | |
| 458 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 459 | return; |
| 460 | |
| 461 | /* |
| 462 | * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. |
| 463 | * Just use invalidate_user_asid() in case we are called early. |
| 464 | */ |
| 465 | if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) |
| 466 | invalidate_user_asid(loaded_mm_asid); |
| 467 | else |
| 468 | invpcid_flush_one(user_pcid(loaded_mm_asid), addr); |
| 469 | } |
| 470 | |
| 471 | /* |
| 472 | * flush everything |
| 473 | */ |
| 474 | static inline void __flush_tlb_all(void) |
| 475 | { |
| 476 | /* |
| 477 | * This is to catch users with enabled preemption and the PGE feature |
| 478 | * and don't trigger the warning in __native_flush_tlb(). |
| 479 | */ |
| 480 | VM_WARN_ON_ONCE(preemptible()); |
| 481 | |
| 482 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
| 483 | __flush_tlb_global(); |
| 484 | } else { |
| 485 | /* |
| 486 | * !PGE -> !PCID (setup_pcid()), thus every flush is total. |
| 487 | */ |
| 488 | __flush_tlb(); |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * flush one page in the kernel mapping |
| 494 | */ |
| 495 | static inline void __flush_tlb_one_kernel(unsigned long addr) |
| 496 | { |
| 497 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
| 498 | |
| 499 | /* |
| 500 | * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its |
| 501 | * paravirt equivalent. Even with PCID, this is sufficient: we only |
| 502 | * use PCID if we also use global PTEs for the kernel mapping, and |
| 503 | * INVLPG flushes global translations across all address spaces. |
| 504 | * |
| 505 | * If PTI is on, then the kernel is mapped with non-global PTEs, and |
| 506 | * __flush_tlb_one_user() will flush the given address for the current |
| 507 | * kernel address space and for its usermode counterpart, but it does |
| 508 | * not flush it for other address spaces. |
| 509 | */ |
| 510 | __flush_tlb_one_user(addr); |
| 511 | |
| 512 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 513 | return; |
| 514 | |
| 515 | /* |
| 516 | * See above. We need to propagate the flush to all other address |
| 517 | * spaces. In principle, we only need to propagate it to kernelmode |
| 518 | * address spaces, but the extra bookkeeping we would need is not |
| 519 | * worth it. |
| 520 | */ |
| 521 | invalidate_other_asid(); |
| 522 | } |
| 523 | |
| 524 | #define TLB_FLUSH_ALL -1UL |
| 525 | |
| 526 | /* |
| 527 | * TLB flushing: |
| 528 | * |
| 529 | * - flush_tlb_all() flushes all processes TLBs |
| 530 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 531 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 532 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 533 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
| 534 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
| 535 | * |
| 536 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
| 537 | * and page-granular flushes are available only on i486 and up. |
| 538 | */ |
| 539 | struct flush_tlb_info { |
| 540 | /* |
| 541 | * We support several kinds of flushes. |
| 542 | * |
| 543 | * - Fully flush a single mm. .mm will be set, .end will be |
| 544 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to |
| 545 | * which the IPI sender is trying to catch us up. |
| 546 | * |
| 547 | * - Partially flush a single mm. .mm will be set, .start and |
| 548 | * .end will indicate the range, and .new_tlb_gen will be set |
| 549 | * such that the changes between generation .new_tlb_gen-1 and |
| 550 | * .new_tlb_gen are entirely contained in the indicated range. |
| 551 | * |
| 552 | * - Fully flush all mms whose tlb_gens have been updated. .mm |
| 553 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen |
| 554 | * will be zero. |
| 555 | */ |
| 556 | struct mm_struct *mm; |
| 557 | unsigned long start; |
| 558 | unsigned long end; |
| 559 | u64 new_tlb_gen; |
| 560 | }; |
| 561 | |
| 562 | #define local_flush_tlb() __flush_tlb() |
| 563 | |
| 564 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
| 565 | |
| 566 | #define flush_tlb_range(vma, start, end) \ |
| 567 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 568 | |
| 569 | extern void flush_tlb_all(void); |
| 570 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 571 | unsigned long end, unsigned long vmflag); |
| 572 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
| 573 | |
| 574 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
| 575 | { |
| 576 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); |
| 577 | } |
| 578 | |
| 579 | void native_flush_tlb_others(const struct cpumask *cpumask, |
| 580 | const struct flush_tlb_info *info); |
| 581 | |
| 582 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
| 583 | { |
| 584 | /* |
| 585 | * Bump the generation count. This also serves as a full barrier |
| 586 | * that synchronizes with switch_mm(): callers are required to order |
| 587 | * their read of mm_cpumask after their writes to the paging |
| 588 | * structures. |
| 589 | */ |
| 590 | return atomic64_inc_return(&mm->context.tlb_gen); |
| 591 | } |
| 592 | |
| 593 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
| 594 | struct mm_struct *mm) |
| 595 | { |
| 596 | inc_mm_tlb_gen(mm); |
| 597 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
| 598 | } |
| 599 | |
| 600 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); |
| 601 | |
| 602 | #ifndef CONFIG_PARAVIRT |
| 603 | #define flush_tlb_others(mask, info) \ |
| 604 | native_flush_tlb_others(mask, info) |
| 605 | |
| 606 | #define paravirt_tlb_remove_table(tlb, page) \ |
| 607 | tlb_remove_page(tlb, (void *)(page)) |
| 608 | #endif |
| 609 | |
| 610 | #endif /* _ASM_X86_TLBFLUSH_H */ |