Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | #include <linux/init.h> |
| 2 | |
| 3 | #include <linux/mm.h> |
| 4 | #include <linux/spinlock.h> |
| 5 | #include <linux/smp.h> |
| 6 | #include <linux/interrupt.h> |
| 7 | #include <linux/export.h> |
| 8 | #include <linux/cpu.h> |
| 9 | #include <linux/debugfs.h> |
| 10 | |
| 11 | #include <asm/tlbflush.h> |
| 12 | #include <asm/mmu_context.h> |
| 13 | #include <asm/nospec-branch.h> |
| 14 | #include <asm/cache.h> |
| 15 | #include <asm/apic.h> |
| 16 | #include <asm/uv/uv.h> |
| 17 | |
| 18 | /* |
| 19 | * TLB flushing, formerly SMP-only |
| 20 | * c/o Linus Torvalds. |
| 21 | * |
| 22 | * These mean you can really definitely utterly forget about |
| 23 | * writing to user space from interrupts. (Its not allowed anyway). |
| 24 | * |
| 25 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
| 26 | * |
| 27 | * More scalable flush, from Andi Kleen |
| 28 | * |
| 29 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
| 30 | */ |
| 31 | |
| 32 | /* |
| 33 | * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is |
| 34 | * stored in cpu_tlb_state.last_user_mm_ibpb. |
| 35 | */ |
| 36 | #define LAST_USER_MM_IBPB 0x1UL |
| 37 | |
| 38 | /* |
| 39 | * We get here when we do something requiring a TLB invalidation |
| 40 | * but could not go invalidate all of the contexts. We do the |
| 41 | * necessary invalidation by clearing out the 'ctx_id' which |
| 42 | * forces a TLB flush when the context is loaded. |
| 43 | */ |
| 44 | static void clear_asid_other(void) |
| 45 | { |
| 46 | u16 asid; |
| 47 | |
| 48 | /* |
| 49 | * This is only expected to be set if we have disabled |
| 50 | * kernel _PAGE_GLOBAL pages. |
| 51 | */ |
| 52 | if (!static_cpu_has(X86_FEATURE_PTI)) { |
| 53 | WARN_ON_ONCE(1); |
| 54 | return; |
| 55 | } |
| 56 | |
| 57 | for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { |
| 58 | /* Do not need to flush the current asid */ |
| 59 | if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) |
| 60 | continue; |
| 61 | /* |
| 62 | * Make sure the next time we go to switch to |
| 63 | * this asid, we do a flush: |
| 64 | */ |
| 65 | this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); |
| 66 | } |
| 67 | this_cpu_write(cpu_tlbstate.invalidate_other, false); |
| 68 | } |
| 69 | |
| 70 | atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); |
| 71 | |
| 72 | |
| 73 | static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, |
| 74 | u16 *new_asid, bool *need_flush) |
| 75 | { |
| 76 | u16 asid; |
| 77 | |
| 78 | if (!static_cpu_has(X86_FEATURE_PCID)) { |
| 79 | *new_asid = 0; |
| 80 | *need_flush = true; |
| 81 | return; |
| 82 | } |
| 83 | |
| 84 | if (this_cpu_read(cpu_tlbstate.invalidate_other)) |
| 85 | clear_asid_other(); |
| 86 | |
| 87 | for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { |
| 88 | if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != |
| 89 | next->context.ctx_id) |
| 90 | continue; |
| 91 | |
| 92 | *new_asid = asid; |
| 93 | *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < |
| 94 | next_tlb_gen); |
| 95 | return; |
| 96 | } |
| 97 | |
| 98 | /* |
| 99 | * We don't currently own an ASID slot on this CPU. |
| 100 | * Allocate a slot. |
| 101 | */ |
| 102 | *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; |
| 103 | if (*new_asid >= TLB_NR_DYN_ASIDS) { |
| 104 | *new_asid = 0; |
| 105 | this_cpu_write(cpu_tlbstate.next_asid, 1); |
| 106 | } |
| 107 | *need_flush = true; |
| 108 | } |
| 109 | |
| 110 | static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) |
| 111 | { |
| 112 | unsigned long new_mm_cr3; |
| 113 | |
| 114 | if (need_flush) { |
| 115 | invalidate_user_asid(new_asid); |
| 116 | new_mm_cr3 = build_cr3(pgdir, new_asid); |
| 117 | } else { |
| 118 | new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); |
| 119 | } |
| 120 | |
| 121 | /* |
| 122 | * Caution: many callers of this function expect |
| 123 | * that load_cr3() is serializing and orders TLB |
| 124 | * fills with respect to the mm_cpumask writes. |
| 125 | */ |
| 126 | write_cr3(new_mm_cr3); |
| 127 | } |
| 128 | |
| 129 | void leave_mm(int cpu) |
| 130 | { |
| 131 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
| 132 | |
| 133 | /* |
| 134 | * It's plausible that we're in lazy TLB mode while our mm is init_mm. |
| 135 | * If so, our callers still expect us to flush the TLB, but there |
| 136 | * aren't any user TLB entries in init_mm to worry about. |
| 137 | * |
| 138 | * This needs to happen before any other sanity checks due to |
| 139 | * intel_idle's shenanigans. |
| 140 | */ |
| 141 | if (loaded_mm == &init_mm) |
| 142 | return; |
| 143 | |
| 144 | /* Warn if we're not lazy. */ |
| 145 | WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); |
| 146 | |
| 147 | switch_mm(NULL, &init_mm, NULL); |
| 148 | } |
| 149 | EXPORT_SYMBOL_GPL(leave_mm); |
| 150 | |
| 151 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 152 | struct task_struct *tsk) |
| 153 | { |
| 154 | unsigned long flags; |
| 155 | |
| 156 | local_irq_save(flags); |
| 157 | switch_mm_irqs_off(prev, next, tsk); |
| 158 | local_irq_restore(flags); |
| 159 | } |
| 160 | |
| 161 | static void sync_current_stack_to_mm(struct mm_struct *mm) |
| 162 | { |
| 163 | unsigned long sp = current_stack_pointer; |
| 164 | pgd_t *pgd = pgd_offset(mm, sp); |
| 165 | |
| 166 | if (pgtable_l5_enabled()) { |
| 167 | if (unlikely(pgd_none(*pgd))) { |
| 168 | pgd_t *pgd_ref = pgd_offset_k(sp); |
| 169 | |
| 170 | set_pgd(pgd, *pgd_ref); |
| 171 | } |
| 172 | } else { |
| 173 | /* |
| 174 | * "pgd" is faked. The top level entries are "p4d"s, so sync |
| 175 | * the p4d. This compiles to approximately the same code as |
| 176 | * the 5-level case. |
| 177 | */ |
| 178 | p4d_t *p4d = p4d_offset(pgd, sp); |
| 179 | |
| 180 | if (unlikely(p4d_none(*p4d))) { |
| 181 | pgd_t *pgd_ref = pgd_offset_k(sp); |
| 182 | p4d_t *p4d_ref = p4d_offset(pgd_ref, sp); |
| 183 | |
| 184 | set_p4d(p4d, *p4d_ref); |
| 185 | } |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) |
| 190 | { |
| 191 | unsigned long next_tif = task_thread_info(next)->flags; |
| 192 | unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB; |
| 193 | |
| 194 | return (unsigned long)next->mm | ibpb; |
| 195 | } |
| 196 | |
| 197 | static void cond_ibpb(struct task_struct *next) |
| 198 | { |
| 199 | if (!next || !next->mm) |
| 200 | return; |
| 201 | |
| 202 | /* |
| 203 | * Both, the conditional and the always IBPB mode use the mm |
| 204 | * pointer to avoid the IBPB when switching between tasks of the |
| 205 | * same process. Using the mm pointer instead of mm->context.ctx_id |
| 206 | * opens a hypothetical hole vs. mm_struct reuse, which is more or |
| 207 | * less impossible to control by an attacker. Aside of that it |
| 208 | * would only affect the first schedule so the theoretically |
| 209 | * exposed data is not really interesting. |
| 210 | */ |
| 211 | if (static_branch_likely(&switch_mm_cond_ibpb)) { |
| 212 | unsigned long prev_mm, next_mm; |
| 213 | |
| 214 | /* |
| 215 | * This is a bit more complex than the always mode because |
| 216 | * it has to handle two cases: |
| 217 | * |
| 218 | * 1) Switch from a user space task (potential attacker) |
| 219 | * which has TIF_SPEC_IB set to a user space task |
| 220 | * (potential victim) which has TIF_SPEC_IB not set. |
| 221 | * |
| 222 | * 2) Switch from a user space task (potential attacker) |
| 223 | * which has TIF_SPEC_IB not set to a user space task |
| 224 | * (potential victim) which has TIF_SPEC_IB set. |
| 225 | * |
| 226 | * This could be done by unconditionally issuing IBPB when |
| 227 | * a task which has TIF_SPEC_IB set is either scheduled in |
| 228 | * or out. Though that results in two flushes when: |
| 229 | * |
| 230 | * - the same user space task is scheduled out and later |
| 231 | * scheduled in again and only a kernel thread ran in |
| 232 | * between. |
| 233 | * |
| 234 | * - a user space task belonging to the same process is |
| 235 | * scheduled in after a kernel thread ran in between |
| 236 | * |
| 237 | * - a user space task belonging to the same process is |
| 238 | * scheduled in immediately. |
| 239 | * |
| 240 | * Optimize this with reasonably small overhead for the |
| 241 | * above cases. Mangle the TIF_SPEC_IB bit into the mm |
| 242 | * pointer of the incoming task which is stored in |
| 243 | * cpu_tlbstate.last_user_mm_ibpb for comparison. |
| 244 | */ |
| 245 | next_mm = mm_mangle_tif_spec_ib(next); |
| 246 | prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb); |
| 247 | |
| 248 | /* |
| 249 | * Issue IBPB only if the mm's are different and one or |
| 250 | * both have the IBPB bit set. |
| 251 | */ |
| 252 | if (next_mm != prev_mm && |
| 253 | (next_mm | prev_mm) & LAST_USER_MM_IBPB) |
| 254 | indirect_branch_prediction_barrier(); |
| 255 | |
| 256 | this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm); |
| 257 | } |
| 258 | |
| 259 | if (static_branch_unlikely(&switch_mm_always_ibpb)) { |
| 260 | /* |
| 261 | * Only flush when switching to a user space task with a |
| 262 | * different context than the user space task which ran |
| 263 | * last on this CPU. |
| 264 | */ |
| 265 | if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) { |
| 266 | indirect_branch_prediction_barrier(); |
| 267 | this_cpu_write(cpu_tlbstate.last_user_mm, next->mm); |
| 268 | } |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 273 | struct task_struct *tsk) |
| 274 | { |
| 275 | struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); |
| 276 | u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
| 277 | unsigned cpu = smp_processor_id(); |
| 278 | u64 next_tlb_gen; |
| 279 | |
| 280 | /* |
| 281 | * NB: The scheduler will call us with prev == next when switching |
| 282 | * from lazy TLB mode to normal mode if active_mm isn't changing. |
| 283 | * When this happens, we don't assume that CR3 (and hence |
| 284 | * cpu_tlbstate.loaded_mm) matches next. |
| 285 | * |
| 286 | * NB: leave_mm() calls us with prev == NULL and tsk == NULL. |
| 287 | */ |
| 288 | |
| 289 | /* We don't want flush_tlb_func_* to run concurrently with us. */ |
| 290 | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) |
| 291 | WARN_ON_ONCE(!irqs_disabled()); |
| 292 | |
| 293 | /* |
| 294 | * Verify that CR3 is what we think it is. This will catch |
| 295 | * hypothetical buggy code that directly switches to swapper_pg_dir |
| 296 | * without going through leave_mm() / switch_mm_irqs_off() or that |
| 297 | * does something like write_cr3(read_cr3_pa()). |
| 298 | * |
| 299 | * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() |
| 300 | * isn't free. |
| 301 | */ |
| 302 | #ifdef CONFIG_DEBUG_VM |
| 303 | if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) { |
| 304 | /* |
| 305 | * If we were to BUG here, we'd be very likely to kill |
| 306 | * the system so hard that we don't see the call trace. |
| 307 | * Try to recover instead by ignoring the error and doing |
| 308 | * a global flush to minimize the chance of corruption. |
| 309 | * |
| 310 | * (This is far from being a fully correct recovery. |
| 311 | * Architecturally, the CPU could prefetch something |
| 312 | * back into an incorrect ASID slot and leave it there |
| 313 | * to cause trouble down the road. It's better than |
| 314 | * nothing, though.) |
| 315 | */ |
| 316 | __flush_tlb_all(); |
| 317 | } |
| 318 | #endif |
| 319 | this_cpu_write(cpu_tlbstate.is_lazy, false); |
| 320 | |
| 321 | /* |
| 322 | * The membarrier system call requires a full memory barrier and |
| 323 | * core serialization before returning to user-space, after |
| 324 | * storing to rq->curr. Writing to CR3 provides that full |
| 325 | * memory barrier and core serializing instruction. |
| 326 | */ |
| 327 | if (real_prev == next) { |
| 328 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != |
| 329 | next->context.ctx_id); |
| 330 | |
| 331 | /* |
| 332 | * We don't currently support having a real mm loaded without |
| 333 | * our cpu set in mm_cpumask(). We have all the bookkeeping |
| 334 | * in place to figure out whether we would need to flush |
| 335 | * if our cpu were cleared in mm_cpumask(), but we don't |
| 336 | * currently use it. |
| 337 | */ |
| 338 | if (WARN_ON_ONCE(real_prev != &init_mm && |
| 339 | !cpumask_test_cpu(cpu, mm_cpumask(next)))) |
| 340 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 341 | |
| 342 | return; |
| 343 | } else { |
| 344 | u16 new_asid; |
| 345 | bool need_flush; |
| 346 | |
| 347 | /* |
| 348 | * Avoid user/user BTB poisoning by flushing the branch |
| 349 | * predictor when switching between processes. This stops |
| 350 | * one process from doing Spectre-v2 attacks on another. |
| 351 | */ |
| 352 | cond_ibpb(tsk); |
| 353 | |
| 354 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
| 355 | /* |
| 356 | * If our current stack is in vmalloc space and isn't |
| 357 | * mapped in the new pgd, we'll double-fault. Forcibly |
| 358 | * map it. |
| 359 | */ |
| 360 | sync_current_stack_to_mm(next); |
| 361 | } |
| 362 | |
| 363 | /* |
| 364 | * Stop remote flushes for the previous mm. |
| 365 | * Skip kernel threads; we never send init_mm TLB flushing IPIs, |
| 366 | * but the bitmap manipulation can cause cache line contention. |
| 367 | */ |
| 368 | if (real_prev != &init_mm) { |
| 369 | VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, |
| 370 | mm_cpumask(real_prev))); |
| 371 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * Start remote flushes and then read tlb_gen. |
| 376 | */ |
| 377 | if (next != &init_mm) |
| 378 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 379 | next_tlb_gen = atomic64_read(&next->context.tlb_gen); |
| 380 | |
| 381 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); |
| 382 | |
| 383 | /* Let nmi_uaccess_okay() know that we're changing CR3. */ |
| 384 | this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); |
| 385 | barrier(); |
| 386 | |
| 387 | if (need_flush) { |
| 388 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
| 389 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
| 390 | load_new_mm_cr3(next->pgd, new_asid, true); |
| 391 | |
| 392 | /* |
| 393 | * NB: This gets called via leave_mm() in the idle path |
| 394 | * where RCU functions differently. Tracing normally |
| 395 | * uses RCU, so we need to use the _rcuidle variant. |
| 396 | * |
| 397 | * (There is no good reason for this. The idle code should |
| 398 | * be rearranged to call this before rcu_idle_enter().) |
| 399 | */ |
| 400 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
| 401 | } else { |
| 402 | /* The new ASID is already up to date. */ |
| 403 | load_new_mm_cr3(next->pgd, new_asid, false); |
| 404 | |
| 405 | /* See above wrt _rcuidle. */ |
| 406 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); |
| 407 | } |
| 408 | |
| 409 | /* Make sure we write CR3 before loaded_mm. */ |
| 410 | barrier(); |
| 411 | |
| 412 | this_cpu_write(cpu_tlbstate.loaded_mm, next); |
| 413 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); |
| 414 | } |
| 415 | |
| 416 | load_mm_cr4(next); |
| 417 | switch_ldt(real_prev, next); |
| 418 | } |
| 419 | |
| 420 | /* |
| 421 | * Please ignore the name of this function. It should be called |
| 422 | * switch_to_kernel_thread(). |
| 423 | * |
| 424 | * enter_lazy_tlb() is a hint from the scheduler that we are entering a |
| 425 | * kernel thread or other context without an mm. Acceptable implementations |
| 426 | * include doing nothing whatsoever, switching to init_mm, or various clever |
| 427 | * lazy tricks to try to minimize TLB flushes. |
| 428 | * |
| 429 | * The scheduler reserves the right to call enter_lazy_tlb() several times |
| 430 | * in a row. It will notify us that we're going back to a real mm by |
| 431 | * calling switch_mm_irqs_off(). |
| 432 | */ |
| 433 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 434 | { |
| 435 | if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) |
| 436 | return; |
| 437 | |
| 438 | if (tlb_defer_switch_to_init_mm()) { |
| 439 | /* |
| 440 | * There's a significant optimization that may be possible |
| 441 | * here. We have accurate enough TLB flush tracking that we |
| 442 | * don't need to maintain coherence of TLB per se when we're |
| 443 | * lazy. We do, however, need to maintain coherence of |
| 444 | * paging-structure caches. We could, in principle, leave our |
| 445 | * old mm loaded and only switch to init_mm when |
| 446 | * tlb_remove_page() happens. |
| 447 | */ |
| 448 | this_cpu_write(cpu_tlbstate.is_lazy, true); |
| 449 | } else { |
| 450 | switch_mm(NULL, &init_mm, NULL); |
| 451 | } |
| 452 | } |
| 453 | |
| 454 | /* |
| 455 | * Call this when reinitializing a CPU. It fixes the following potential |
| 456 | * problems: |
| 457 | * |
| 458 | * - The ASID changed from what cpu_tlbstate thinks it is (most likely |
| 459 | * because the CPU was taken down and came back up with CR3's PCID |
| 460 | * bits clear. CPU hotplug can do this. |
| 461 | * |
| 462 | * - The TLB contains junk in slots corresponding to inactive ASIDs. |
| 463 | * |
| 464 | * - The CPU went so far out to lunch that it may have missed a TLB |
| 465 | * flush. |
| 466 | */ |
| 467 | void initialize_tlbstate_and_flush(void) |
| 468 | { |
| 469 | int i; |
| 470 | struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
| 471 | u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); |
| 472 | unsigned long cr3 = __read_cr3(); |
| 473 | |
| 474 | /* Assert that CR3 already references the right mm. */ |
| 475 | WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd)); |
| 476 | |
| 477 | /* |
| 478 | * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization |
| 479 | * doesn't work like other CR4 bits because it can only be set from |
| 480 | * long mode.) |
| 481 | */ |
| 482 | WARN_ON(boot_cpu_has(X86_FEATURE_PCID) && |
| 483 | !(cr4_read_shadow() & X86_CR4_PCIDE)); |
| 484 | |
| 485 | /* Force ASID 0 and force a TLB flush. */ |
| 486 | write_cr3(build_cr3(mm->pgd, 0)); |
| 487 | |
| 488 | /* Reinitialize tlbstate. */ |
| 489 | this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB); |
| 490 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); |
| 491 | this_cpu_write(cpu_tlbstate.next_asid, 1); |
| 492 | this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); |
| 493 | this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); |
| 494 | |
| 495 | for (i = 1; i < TLB_NR_DYN_ASIDS; i++) |
| 496 | this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); |
| 497 | } |
| 498 | |
| 499 | /* |
| 500 | * flush_tlb_func_common()'s memory ordering requirement is that any |
| 501 | * TLB fills that happen after we flush the TLB are ordered after we |
| 502 | * read active_mm's tlb_gen. We don't need any explicit barriers |
| 503 | * because all x86 flush operations are serializing and the |
| 504 | * atomic64_read operation won't be reordered by the compiler. |
| 505 | */ |
| 506 | static void flush_tlb_func_common(const struct flush_tlb_info *f, |
| 507 | bool local, enum tlb_flush_reason reason) |
| 508 | { |
| 509 | /* |
| 510 | * We have three different tlb_gen values in here. They are: |
| 511 | * |
| 512 | * - mm_tlb_gen: the latest generation. |
| 513 | * - local_tlb_gen: the generation that this CPU has already caught |
| 514 | * up to. |
| 515 | * - f->new_tlb_gen: the generation that the requester of the flush |
| 516 | * wants us to catch up to. |
| 517 | */ |
| 518 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
| 519 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
| 520 | u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); |
| 521 | u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); |
| 522 | |
| 523 | /* This code cannot presently handle being reentered. */ |
| 524 | VM_WARN_ON(!irqs_disabled()); |
| 525 | |
| 526 | if (unlikely(loaded_mm == &init_mm)) |
| 527 | return; |
| 528 | |
| 529 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != |
| 530 | loaded_mm->context.ctx_id); |
| 531 | |
| 532 | if (this_cpu_read(cpu_tlbstate.is_lazy)) { |
| 533 | /* |
| 534 | * We're in lazy mode. We need to at least flush our |
| 535 | * paging-structure cache to avoid speculatively reading |
| 536 | * garbage into our TLB. Since switching to init_mm is barely |
| 537 | * slower than a minimal flush, just switch to init_mm. |
| 538 | */ |
| 539 | switch_mm_irqs_off(NULL, &init_mm, NULL); |
| 540 | return; |
| 541 | } |
| 542 | |
| 543 | if (unlikely(local_tlb_gen == mm_tlb_gen)) { |
| 544 | /* |
| 545 | * There's nothing to do: we're already up to date. This can |
| 546 | * happen if two concurrent flushes happen -- the first flush to |
| 547 | * be handled can catch us all the way up, leaving no work for |
| 548 | * the second flush. |
| 549 | */ |
| 550 | trace_tlb_flush(reason, 0); |
| 551 | return; |
| 552 | } |
| 553 | |
| 554 | WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); |
| 555 | WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); |
| 556 | |
| 557 | /* |
| 558 | * If we get to this point, we know that our TLB is out of date. |
| 559 | * This does not strictly imply that we need to flush (it's |
| 560 | * possible that f->new_tlb_gen <= local_tlb_gen), but we're |
| 561 | * going to need to flush in the very near future, so we might |
| 562 | * as well get it over with. |
| 563 | * |
| 564 | * The only question is whether to do a full or partial flush. |
| 565 | * |
| 566 | * We do a partial flush if requested and two extra conditions |
| 567 | * are met: |
| 568 | * |
| 569 | * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that |
| 570 | * we've always done all needed flushes to catch up to |
| 571 | * local_tlb_gen. If, for example, local_tlb_gen == 2 and |
| 572 | * f->new_tlb_gen == 3, then we know that the flush needed to bring |
| 573 | * us up to date for tlb_gen 3 is the partial flush we're |
| 574 | * processing. |
| 575 | * |
| 576 | * As an example of why this check is needed, suppose that there |
| 577 | * are two concurrent flushes. The first is a full flush that |
| 578 | * changes context.tlb_gen from 1 to 2. The second is a partial |
| 579 | * flush that changes context.tlb_gen from 2 to 3. If they get |
| 580 | * processed on this CPU in reverse order, we'll see |
| 581 | * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. |
| 582 | * If we were to use __flush_tlb_one_user() and set local_tlb_gen to |
| 583 | * 3, we'd be break the invariant: we'd update local_tlb_gen above |
| 584 | * 1 without the full flush that's needed for tlb_gen 2. |
| 585 | * |
| 586 | * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation. |
| 587 | * Partial TLB flushes are not all that much cheaper than full TLB |
| 588 | * flushes, so it seems unlikely that it would be a performance win |
| 589 | * to do a partial flush if that won't bring our TLB fully up to |
| 590 | * date. By doing a full flush instead, we can increase |
| 591 | * local_tlb_gen all the way to mm_tlb_gen and we can probably |
| 592 | * avoid another flush in the very near future. |
| 593 | */ |
| 594 | if (f->end != TLB_FLUSH_ALL && |
| 595 | f->new_tlb_gen == local_tlb_gen + 1 && |
| 596 | f->new_tlb_gen == mm_tlb_gen) { |
| 597 | /* Partial flush */ |
| 598 | unsigned long addr; |
| 599 | unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT; |
| 600 | |
| 601 | addr = f->start; |
| 602 | while (addr < f->end) { |
| 603 | __flush_tlb_one_user(addr); |
| 604 | addr += PAGE_SIZE; |
| 605 | } |
| 606 | if (local) |
| 607 | count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages); |
| 608 | trace_tlb_flush(reason, nr_pages); |
| 609 | } else { |
| 610 | /* Full flush. */ |
| 611 | local_flush_tlb(); |
| 612 | if (local) |
| 613 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
| 614 | trace_tlb_flush(reason, TLB_FLUSH_ALL); |
| 615 | } |
| 616 | |
| 617 | /* Both paths above update our state to mm_tlb_gen. */ |
| 618 | this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); |
| 619 | } |
| 620 | |
| 621 | static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason) |
| 622 | { |
| 623 | const struct flush_tlb_info *f = info; |
| 624 | |
| 625 | flush_tlb_func_common(f, true, reason); |
| 626 | } |
| 627 | |
| 628 | static void flush_tlb_func_remote(void *info) |
| 629 | { |
| 630 | const struct flush_tlb_info *f = info; |
| 631 | |
| 632 | inc_irq_stat(irq_tlb_count); |
| 633 | |
| 634 | if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) |
| 635 | return; |
| 636 | |
| 637 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
| 638 | flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); |
| 639 | } |
| 640 | |
| 641 | void native_flush_tlb_others(const struct cpumask *cpumask, |
| 642 | const struct flush_tlb_info *info) |
| 643 | { |
| 644 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
| 645 | if (info->end == TLB_FLUSH_ALL) |
| 646 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); |
| 647 | else |
| 648 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, |
| 649 | (info->end - info->start) >> PAGE_SHIFT); |
| 650 | |
| 651 | if (is_uv_system()) { |
| 652 | /* |
| 653 | * This whole special case is confused. UV has a "Broadcast |
| 654 | * Assist Unit", which seems to be a fancy way to send IPIs. |
| 655 | * Back when x86 used an explicit TLB flush IPI, UV was |
| 656 | * optimized to use its own mechanism. These days, x86 uses |
| 657 | * smp_call_function_many(), but UV still uses a manual IPI, |
| 658 | * and that IPI's action is out of date -- it does a manual |
| 659 | * flush instead of calling flush_tlb_func_remote(). This |
| 660 | * means that the percpu tlb_gen variables won't be updated |
| 661 | * and we'll do pointless flushes on future context switches. |
| 662 | * |
| 663 | * Rather than hooking native_flush_tlb_others() here, I think |
| 664 | * that UV should be updated so that smp_call_function_many(), |
| 665 | * etc, are optimal on UV. |
| 666 | */ |
| 667 | unsigned int cpu; |
| 668 | |
| 669 | cpu = smp_processor_id(); |
| 670 | cpumask = uv_flush_tlb_others(cpumask, info); |
| 671 | if (cpumask) |
| 672 | smp_call_function_many(cpumask, flush_tlb_func_remote, |
| 673 | (void *)info, 1); |
| 674 | return; |
| 675 | } |
| 676 | smp_call_function_many(cpumask, flush_tlb_func_remote, |
| 677 | (void *)info, 1); |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * See Documentation/x86/tlb.txt for details. We choose 33 |
| 682 | * because it is large enough to cover the vast majority (at |
| 683 | * least 95%) of allocations, and is small enough that we are |
| 684 | * confident it will not cause too much overhead. Each single |
| 685 | * flush is about 100 ns, so this caps the maximum overhead at |
| 686 | * _about_ 3,000 ns. |
| 687 | * |
| 688 | * This is in units of pages. |
| 689 | */ |
| 690 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
| 691 | |
| 692 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 693 | unsigned long end, unsigned long vmflag) |
| 694 | { |
| 695 | int cpu; |
| 696 | |
| 697 | struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = { |
| 698 | .mm = mm, |
| 699 | }; |
| 700 | |
| 701 | cpu = get_cpu(); |
| 702 | |
| 703 | /* This is also a barrier that synchronizes with switch_mm(). */ |
| 704 | info.new_tlb_gen = inc_mm_tlb_gen(mm); |
| 705 | |
| 706 | /* Should we flush just the requested range? */ |
| 707 | if ((end != TLB_FLUSH_ALL) && |
| 708 | !(vmflag & VM_HUGETLB) && |
| 709 | ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) { |
| 710 | info.start = start; |
| 711 | info.end = end; |
| 712 | } else { |
| 713 | info.start = 0UL; |
| 714 | info.end = TLB_FLUSH_ALL; |
| 715 | } |
| 716 | |
| 717 | if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { |
| 718 | VM_WARN_ON(irqs_disabled()); |
| 719 | local_irq_disable(); |
| 720 | flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN); |
| 721 | local_irq_enable(); |
| 722 | } |
| 723 | |
| 724 | if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) |
| 725 | flush_tlb_others(mm_cpumask(mm), &info); |
| 726 | |
| 727 | put_cpu(); |
| 728 | } |
| 729 | |
| 730 | |
| 731 | static void do_flush_tlb_all(void *info) |
| 732 | { |
| 733 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
| 734 | __flush_tlb_all(); |
| 735 | } |
| 736 | |
| 737 | void flush_tlb_all(void) |
| 738 | { |
| 739 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
| 740 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
| 741 | } |
| 742 | |
| 743 | static void do_kernel_range_flush(void *info) |
| 744 | { |
| 745 | struct flush_tlb_info *f = info; |
| 746 | unsigned long addr; |
| 747 | |
| 748 | /* flush range by one by one 'invlpg' */ |
| 749 | for (addr = f->start; addr < f->end; addr += PAGE_SIZE) |
| 750 | __flush_tlb_one_kernel(addr); |
| 751 | } |
| 752 | |
| 753 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 754 | { |
| 755 | |
| 756 | /* Balance as user space task's flush, a bit conservative */ |
| 757 | if (end == TLB_FLUSH_ALL || |
| 758 | (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { |
| 759 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
| 760 | } else { |
| 761 | struct flush_tlb_info info; |
| 762 | info.start = start; |
| 763 | info.end = end; |
| 764 | on_each_cpu(do_kernel_range_flush, &info, 1); |
| 765 | } |
| 766 | } |
| 767 | |
| 768 | void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) |
| 769 | { |
| 770 | struct flush_tlb_info info = { |
| 771 | .mm = NULL, |
| 772 | .start = 0UL, |
| 773 | .end = TLB_FLUSH_ALL, |
| 774 | }; |
| 775 | |
| 776 | int cpu = get_cpu(); |
| 777 | |
| 778 | if (cpumask_test_cpu(cpu, &batch->cpumask)) { |
| 779 | VM_WARN_ON(irqs_disabled()); |
| 780 | local_irq_disable(); |
| 781 | flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN); |
| 782 | local_irq_enable(); |
| 783 | } |
| 784 | |
| 785 | if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) |
| 786 | flush_tlb_others(&batch->cpumask, &info); |
| 787 | |
| 788 | cpumask_clear(&batch->cpumask); |
| 789 | |
| 790 | put_cpu(); |
| 791 | } |
| 792 | |
| 793 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, |
| 794 | size_t count, loff_t *ppos) |
| 795 | { |
| 796 | char buf[32]; |
| 797 | unsigned int len; |
| 798 | |
| 799 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); |
| 800 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
| 801 | } |
| 802 | |
| 803 | static ssize_t tlbflush_write_file(struct file *file, |
| 804 | const char __user *user_buf, size_t count, loff_t *ppos) |
| 805 | { |
| 806 | char buf[32]; |
| 807 | ssize_t len; |
| 808 | int ceiling; |
| 809 | |
| 810 | len = min(count, sizeof(buf) - 1); |
| 811 | if (copy_from_user(buf, user_buf, len)) |
| 812 | return -EFAULT; |
| 813 | |
| 814 | buf[len] = '\0'; |
| 815 | if (kstrtoint(buf, 0, &ceiling)) |
| 816 | return -EINVAL; |
| 817 | |
| 818 | if (ceiling < 0) |
| 819 | return -EINVAL; |
| 820 | |
| 821 | tlb_single_page_flush_ceiling = ceiling; |
| 822 | return count; |
| 823 | } |
| 824 | |
| 825 | static const struct file_operations fops_tlbflush = { |
| 826 | .read = tlbflush_read_file, |
| 827 | .write = tlbflush_write_file, |
| 828 | .llseek = default_llseek, |
| 829 | }; |
| 830 | |
| 831 | static int __init create_tlb_single_page_flush_ceiling(void) |
| 832 | { |
| 833 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, |
| 834 | arch_debugfs_dir, NULL, &fops_tlbflush); |
| 835 | return 0; |
| 836 | } |
| 837 | late_initcall(create_tlb_single_page_flush_ceiling); |