Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * TLB shootdown specifics for powerpc |
| 3 | * |
| 4 | * Copyright (C) 2002 Anton Blanchard, IBM Corp. |
| 5 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | #ifndef _ASM_POWERPC_TLB_H |
| 13 | #define _ASM_POWERPC_TLB_H |
| 14 | #ifdef __KERNEL__ |
| 15 | |
| 16 | #ifndef __powerpc64__ |
| 17 | #include <asm/pgtable.h> |
| 18 | #endif |
| 19 | #include <asm/pgalloc.h> |
| 20 | #ifndef __powerpc64__ |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/mmu.h> |
| 23 | #endif |
| 24 | |
| 25 | #include <linux/pagemap.h> |
| 26 | |
| 27 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 28 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 29 | #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry |
| 30 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
| 31 | |
| 32 | extern void tlb_flush(struct mmu_gather *tlb); |
| 33 | |
| 34 | /* Get the generic bits... */ |
| 35 | #include <asm-generic/tlb.h> |
| 36 | |
| 37 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, |
| 38 | unsigned long address); |
| 39 | |
| 40 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, |
| 41 | unsigned long address) |
| 42 | { |
| 43 | #ifdef CONFIG_PPC_STD_MMU_32 |
| 44 | if (pte_val(*ptep) & _PAGE_HASHPTE) |
| 45 | flush_hash_entry(tlb->mm, ptep, address); |
| 46 | #endif |
| 47 | } |
| 48 | |
| 49 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, |
| 50 | unsigned int page_size) |
| 51 | { |
| 52 | if (!tlb->page_size) |
| 53 | tlb->page_size = page_size; |
| 54 | else if (tlb->page_size != page_size) { |
| 55 | if (!tlb->fullmm) |
| 56 | tlb_flush_mmu(tlb); |
| 57 | /* |
| 58 | * update the page size after flush for the new |
| 59 | * mmu_gather. |
| 60 | */ |
| 61 | tlb->page_size = page_size; |
| 62 | } |
| 63 | } |
| 64 | |
| 65 | #ifdef CONFIG_SMP |
| 66 | static inline int mm_is_core_local(struct mm_struct *mm) |
| 67 | { |
| 68 | return cpumask_subset(mm_cpumask(mm), |
| 69 | topology_sibling_cpumask(smp_processor_id())); |
| 70 | } |
| 71 | |
| 72 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 73 | static inline int mm_is_thread_local(struct mm_struct *mm) |
| 74 | { |
| 75 | if (atomic_read(&mm->context.active_cpus) > 1) |
| 76 | return false; |
| 77 | return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 78 | } |
| 79 | static inline void mm_reset_thread_local(struct mm_struct *mm) |
| 80 | { |
| 81 | WARN_ON(atomic_read(&mm->context.copros) > 0); |
| 82 | /* |
| 83 | * It's possible for mm_access to take a reference on mm_users to |
| 84 | * access the remote mm from another thread, but it's not allowed |
| 85 | * to set mm_cpumask, so mm_users may be > 1 here. |
| 86 | */ |
| 87 | WARN_ON(current->mm != mm); |
| 88 | atomic_set(&mm->context.active_cpus, 1); |
| 89 | cpumask_clear(mm_cpumask(mm)); |
| 90 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 91 | } |
| 92 | #else /* CONFIG_PPC_BOOK3S_64 */ |
| 93 | static inline int mm_is_thread_local(struct mm_struct *mm) |
| 94 | { |
| 95 | return cpumask_equal(mm_cpumask(mm), |
| 96 | cpumask_of(smp_processor_id())); |
| 97 | } |
| 98 | #endif /* !CONFIG_PPC_BOOK3S_64 */ |
| 99 | |
| 100 | #else /* CONFIG_SMP */ |
| 101 | static inline int mm_is_core_local(struct mm_struct *mm) |
| 102 | { |
| 103 | return 1; |
| 104 | } |
| 105 | |
| 106 | static inline int mm_is_thread_local(struct mm_struct *mm) |
| 107 | { |
| 108 | return 1; |
| 109 | } |
| 110 | #endif |
| 111 | |
| 112 | #endif /* __KERNEL__ */ |
| 113 | #endif /* __ASM_POWERPC_TLB_H */ |