Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Common implementation of switch_mm_irqs_off |
| 3 | * |
| 4 | * Copyright IBM Corp. 2017 |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/cpu.h> |
| 15 | #include <linux/sched/mm.h> |
| 16 | |
| 17 | #include <asm/mmu_context.h> |
| 18 | |
| 19 | #if defined(CONFIG_PPC32) |
| 20 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
| 21 | struct mm_struct *mm) |
| 22 | { |
| 23 | /* 32-bit keeps track of the current PGDIR in the thread struct */ |
| 24 | tsk->thread.pgdir = mm->pgd; |
| 25 | } |
| 26 | #elif defined(CONFIG_PPC_BOOK3E_64) |
| 27 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
| 28 | struct mm_struct *mm) |
| 29 | { |
| 30 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
| 31 | get_paca()->pgd = mm->pgd; |
| 32 | } |
| 33 | #else |
| 34 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
| 35 | struct mm_struct *mm) { } |
| 36 | #endif |
| 37 | |
| 38 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 39 | struct task_struct *tsk) |
| 40 | { |
| 41 | bool new_on_cpu = false; |
| 42 | |
| 43 | /* Mark this context has been used on the new CPU */ |
| 44 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { |
| 45 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
| 46 | inc_mm_active_cpus(next); |
| 47 | |
| 48 | /* |
| 49 | * This full barrier orders the store to the cpumask above vs |
| 50 | * a subsequent operation which allows this CPU to begin loading |
| 51 | * translations for next. |
| 52 | * |
| 53 | * When using the radix MMU that operation is the load of the |
| 54 | * MMU context id, which is then moved to SPRN_PID. |
| 55 | * |
| 56 | * For the hash MMU it is either the first load from slb_cache |
| 57 | * in switch_slb(), and/or the store of paca->mm_ctx_id in |
| 58 | * copy_mm_to_paca(). |
| 59 | * |
| 60 | * On the other side, the barrier is in mm/tlb-radix.c for |
| 61 | * radix which orders earlier stores to clear the PTEs vs |
| 62 | * the load of mm_cpumask. And pte_xchg which does the same |
| 63 | * thing for hash. |
| 64 | * |
| 65 | * This full barrier is needed by membarrier when switching |
| 66 | * between processes after store to rq->curr, before user-space |
| 67 | * memory accesses. |
| 68 | */ |
| 69 | smp_mb(); |
| 70 | |
| 71 | new_on_cpu = true; |
| 72 | } |
| 73 | |
| 74 | /* Some subarchs need to track the PGD elsewhere */ |
| 75 | switch_mm_pgdir(tsk, next); |
| 76 | |
| 77 | /* Nothing else to do if we aren't actually switching */ |
| 78 | if (prev == next) |
| 79 | return; |
| 80 | |
| 81 | /* |
| 82 | * We must stop all altivec streams before changing the HW |
| 83 | * context |
| 84 | */ |
| 85 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 86 | asm volatile ("dssall"); |
| 87 | |
| 88 | if (new_on_cpu) |
| 89 | radix_kvm_prefetch_workaround(next); |
| 90 | else |
| 91 | membarrier_arch_switch_mm(prev, next, tsk); |
| 92 | |
| 93 | /* |
| 94 | * The actual HW switching method differs between the various |
| 95 | * sub architectures. Out of line for now |
| 96 | */ |
| 97 | switch_mmu_context(prev, next, tsk); |
| 98 | } |
| 99 | |