Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _S390_TLB_H |
| 3 | #define _S390_TLB_H |
| 4 | |
| 5 | /* |
| 6 | * TLB flushing on s390 is complicated. The following requirement |
| 7 | * from the principles of operation is the most arduous: |
| 8 | * |
| 9 | * "A valid table entry must not be changed while it is attached |
| 10 | * to any CPU and may be used for translation by that CPU except to |
| 11 | * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY, |
| 12 | * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page |
| 13 | * table entry, or (3) make a change by means of a COMPARE AND SWAP |
| 14 | * AND PURGE instruction that purges the TLB." |
| 15 | * |
| 16 | * The modification of a pte of an active mm struct therefore is |
| 17 | * a two step process: i) invalidate the pte, ii) store the new pte. |
| 18 | * This is true for the page protection bit as well. |
| 19 | * The only possible optimization is to flush at the beginning of |
| 20 | * a tlb_gather_mmu cycle if the mm_struct is currently not in use. |
| 21 | * |
| 22 | * Pages used for the page tables is a different story. FIXME: more |
| 23 | */ |
| 24 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 25 | void __tlb_remove_table(void *_table); |
| 26 | static inline void tlb_flush(struct mmu_gather *tlb); |
| 27 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
| 28 | struct page *page, int page_size); |
| 29 | |
| 30 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 31 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 32 | |
| 33 | #define tlb_flush tlb_flush |
| 34 | #define pte_free_tlb pte_free_tlb |
| 35 | #define pmd_free_tlb pmd_free_tlb |
| 36 | #define p4d_free_tlb p4d_free_tlb |
| 37 | #define pud_free_tlb pud_free_tlb |
| 38 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | #include <asm/pgalloc.h> |
| 40 | #include <asm/tlbflush.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 41 | #include <asm-generic/tlb.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * Release the page cache reference for a pte removed by |
| 45 | * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page |
| 46 | * has already been freed, so just do free_page_and_swap_cache. |
| 47 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
| 49 | struct page *page, int page_size) |
| 50 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 51 | free_page_and_swap_cache(page); |
| 52 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | } |
| 54 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 55 | static inline void tlb_flush(struct mmu_gather *tlb) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 57 | __tlb_flush_mm_lazy(tlb->mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | /* |
| 61 | * pte_free_tlb frees a pte table and clears the CRSTE for the |
| 62 | * page table from the tlb. |
| 63 | */ |
| 64 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 65 | unsigned long address) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 67 | __tlb_adjust_range(tlb, address, PAGE_SIZE); |
| 68 | tlb->mm->context.flush_mm = 1; |
| 69 | tlb->freed_tables = 1; |
| 70 | tlb->cleared_ptes = 1; |
| 71 | /* |
| 72 | * page_table_free_rcu takes care of the allocation bit masks |
| 73 | * of the 2K table fragments in the 4K page table page, |
| 74 | * then calls tlb_remove_table. |
| 75 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | page_table_free_rcu(tlb, (unsigned long *) pte, address); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * pmd_free_tlb frees a pmd table and clears the CRSTE for the |
| 81 | * segment table entry from the tlb. |
| 82 | * If the mm uses a two level page table the single pmd is freed |
| 83 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB |
| 84 | * to avoid the double free of the pmd in this case. |
| 85 | */ |
| 86 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
| 87 | unsigned long address) |
| 88 | { |
| 89 | if (mm_pmd_folded(tlb->mm)) |
| 90 | return; |
| 91 | pgtable_pmd_page_dtor(virt_to_page(pmd)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 92 | __tlb_adjust_range(tlb, address, PAGE_SIZE); |
| 93 | tlb->mm->context.flush_mm = 1; |
| 94 | tlb->freed_tables = 1; |
| 95 | tlb->cleared_puds = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 96 | tlb_remove_table(tlb, pmd); |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | * p4d_free_tlb frees a pud table and clears the CRSTE for the |
| 101 | * region second table entry from the tlb. |
| 102 | * If the mm uses a four level page table the single p4d is freed |
| 103 | * as the pgd. p4d_free_tlb checks the asce_limit against 8PB |
| 104 | * to avoid the double free of the p4d in this case. |
| 105 | */ |
| 106 | static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, |
| 107 | unsigned long address) |
| 108 | { |
| 109 | if (mm_p4d_folded(tlb->mm)) |
| 110 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 111 | __tlb_adjust_range(tlb, address, PAGE_SIZE); |
| 112 | tlb->mm->context.flush_mm = 1; |
| 113 | tlb->freed_tables = 1; |
| 114 | tlb->cleared_p4ds = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | tlb_remove_table(tlb, p4d); |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * pud_free_tlb frees a pud table and clears the CRSTE for the |
| 120 | * region third table entry from the tlb. |
| 121 | * If the mm uses a three level page table the single pud is freed |
| 122 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB |
| 123 | * to avoid the double free of the pud in this case. |
| 124 | */ |
| 125 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
| 126 | unsigned long address) |
| 127 | { |
| 128 | if (mm_pud_folded(tlb->mm)) |
| 129 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 130 | tlb->mm->context.flush_mm = 1; |
| 131 | tlb->freed_tables = 1; |
| 132 | tlb->cleared_puds = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | tlb_remove_table(tlb, pud); |
| 134 | } |
| 135 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | |
| 137 | #endif /* _S390_TLB_H */ |