Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _PARISC_TLBFLUSH_H |
| 3 | #define _PARISC_TLBFLUSH_H |
| 4 | |
| 5 | /* TLB flushing routines.... */ |
| 6 | |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <asm/mmu_context.h> |
| 10 | |
| 11 | |
| 12 | /* This is for the serialisation of PxTLB broadcasts. At least on the |
| 13 | * N class systems, only one PxTLB inter processor broadcast can be |
| 14 | * active at any one time on the Merced bus. This tlb purge |
| 15 | * synchronisation is fairly lightweight and harmless so we activate |
| 16 | * it on all systems not just the N class. |
| 17 | |
| 18 | * It is also used to ensure PTE updates are atomic and consistent |
| 19 | * with the TLB. |
| 20 | */ |
| 21 | extern spinlock_t pa_tlb_lock; |
| 22 | |
| 23 | #define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) |
| 24 | #define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) |
| 25 | |
| 26 | extern void flush_tlb_all(void); |
| 27 | extern void flush_tlb_all_local(void *); |
| 28 | |
| 29 | #define smp_flush_tlb_all() flush_tlb_all() |
| 30 | |
| 31 | int __flush_tlb_range(unsigned long sid, |
| 32 | unsigned long start, unsigned long end); |
| 33 | |
| 34 | #define flush_tlb_range(vma, start, end) \ |
| 35 | __flush_tlb_range((vma)->vm_mm->context, start, end) |
| 36 | |
| 37 | #define flush_tlb_kernel_range(start, end) \ |
| 38 | __flush_tlb_range(0, start, end) |
| 39 | |
| 40 | /* |
| 41 | * flush_tlb_mm() |
| 42 | * |
| 43 | * The code to switch to a new context is NOT valid for processes |
| 44 | * which play with the space id's. Thus, we have to preserve the |
| 45 | * space and just flush the entire tlb. However, the compilers, |
| 46 | * dynamic linker, etc, do not manipulate space id's, so there |
| 47 | * could be a significant performance benefit in switching contexts |
| 48 | * and not flushing the whole tlb. |
| 49 | */ |
| 50 | |
| 51 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 52 | { |
| 53 | BUG_ON(mm == &init_mm); /* Should never happen */ |
| 54 | |
| 55 | #if 1 || defined(CONFIG_SMP) |
| 56 | /* Except for very small threads, flushing the whole TLB is |
| 57 | * faster than using __flush_tlb_range. The pdtlb and pitlb |
| 58 | * instructions are very slow because of the TLB broadcast. |
| 59 | * It might be faster to do local range flushes on all CPUs |
| 60 | * on PA 2.0 systems. |
| 61 | */ |
| 62 | flush_tlb_all(); |
| 63 | #else |
| 64 | /* FIXME: currently broken, causing space id and protection ids |
| 65 | * to go out of sync, resulting in faults on userspace accesses. |
| 66 | * This approach needs further investigation since running many |
| 67 | * small applications (e.g., GCC testsuite) is faster on HP-UX. |
| 68 | */ |
| 69 | if (mm) { |
| 70 | if (mm->context != 0) |
| 71 | free_sid(mm->context); |
| 72 | mm->context = alloc_sid(); |
| 73 | if (mm == current->active_mm) |
| 74 | load_context(mm->context); |
| 75 | } |
| 76 | #endif |
| 77 | } |
| 78 | |
| 79 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 80 | unsigned long addr) |
| 81 | { |
| 82 | unsigned long flags, sid; |
| 83 | |
| 84 | sid = vma->vm_mm->context; |
| 85 | purge_tlb_start(flags); |
| 86 | mtsp(sid, 1); |
| 87 | pdtlb(addr); |
| 88 | if (unlikely(split_tlb)) |
| 89 | pitlb(addr); |
| 90 | purge_tlb_end(flags); |
| 91 | } |
| 92 | #endif |