David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * TLB shootdown specifics for powerpc |
| 4 | * |
| 5 | * Copyright (C) 2002 Anton Blanchard, IBM Corp. |
| 6 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | */ |
| 8 | #ifndef _ASM_POWERPC_TLB_H |
| 9 | #define _ASM_POWERPC_TLB_H |
| 10 | #ifdef __KERNEL__ |
| 11 | |
| 12 | #ifndef __powerpc64__ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 13 | #include <linux/pgtable.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | #ifndef __powerpc64__ |
| 16 | #include <asm/page.h> |
| 17 | #include <asm/mmu.h> |
| 18 | #endif |
| 19 | |
| 20 | #include <linux/pagemap.h> |
| 21 | |
| 22 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 23 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 24 | #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 26 | #define tlb_flush tlb_flush |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | extern void tlb_flush(struct mmu_gather *tlb); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 28 | /* |
| 29 | * book3s: |
| 30 | * Hash does not use the linux page-tables, so we can avoid |
| 31 | * the TLB invalidate for page-table freeing, Radix otoh does use the |
| 32 | * page-tables and needs the TLBI. |
| 33 | * |
| 34 | * nohash: |
| 35 | * We still do TLB invalidate in the __pte_free_tlb routine before we |
| 36 | * add the page table pages to mmu gather table batch. |
| 37 | */ |
| 38 | #define tlb_needs_table_invalidate() radix_enabled() |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | |
| 40 | /* Get the generic bits... */ |
| 41 | #include <asm-generic/tlb.h> |
| 42 | |
| 43 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, |
| 44 | unsigned long address); |
| 45 | |
| 46 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, |
| 47 | unsigned long address) |
| 48 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 49 | #ifdef CONFIG_PPC_BOOK3S_32 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | if (pte_val(*ptep) & _PAGE_HASHPTE) |
| 51 | flush_hash_entry(tlb->mm, ptep, address); |
| 52 | #endif |
| 53 | } |
| 54 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | #ifdef CONFIG_SMP |
| 56 | static inline int mm_is_core_local(struct mm_struct *mm) |
| 57 | { |
| 58 | return cpumask_subset(mm_cpumask(mm), |
| 59 | topology_sibling_cpumask(smp_processor_id())); |
| 60 | } |
| 61 | |
| 62 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 63 | static inline int mm_is_thread_local(struct mm_struct *mm) |
| 64 | { |
| 65 | if (atomic_read(&mm->context.active_cpus) > 1) |
| 66 | return false; |
| 67 | return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 68 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 69 | #else /* CONFIG_PPC_BOOK3S_64 */ |
| 70 | static inline int mm_is_thread_local(struct mm_struct *mm) |
| 71 | { |
| 72 | return cpumask_equal(mm_cpumask(mm), |
| 73 | cpumask_of(smp_processor_id())); |
| 74 | } |
| 75 | #endif /* !CONFIG_PPC_BOOK3S_64 */ |
| 76 | |
| 77 | #else /* CONFIG_SMP */ |
| 78 | static inline int mm_is_core_local(struct mm_struct *mm) |
| 79 | { |
| 80 | return 1; |
| 81 | } |
| 82 | |
| 83 | static inline int mm_is_thread_local(struct mm_struct *mm) |
| 84 | { |
| 85 | return 1; |
| 86 | } |
| 87 | #endif |
| 88 | |
| 89 | #endif /* __KERNEL__ */ |
| 90 | #endif /* __ASM_POWERPC_TLB_H */ |