David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/include/asm/tlb.h |
| 4 | * |
| 5 | * Copyright (C) 2002 Russell King |
| 6 | * Copyright (C) 2012 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | */ |
| 8 | #ifndef __ASM_TLB_H |
| 9 | #define __ASM_TLB_H |
| 10 | |
| 11 | #include <linux/pagemap.h> |
| 12 | #include <linux/swap.h> |
| 13 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | static inline void __tlb_remove_table(void *_table) |
| 15 | { |
| 16 | free_page_and_swap_cache((struct page *)_table); |
| 17 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 19 | #define tlb_flush tlb_flush |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | static void tlb_flush(struct mmu_gather *tlb); |
| 21 | |
| 22 | #include <asm-generic/tlb.h> |
| 23 | |
| 24 | static inline void tlb_flush(struct mmu_gather *tlb) |
| 25 | { |
| 26 | struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 27 | bool last_level = !tlb->freed_tables; |
| 28 | unsigned long stride = tlb_get_unmap_size(tlb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | |
| 30 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 31 | * If we're tearing down the address space then we only care about |
| 32 | * invalidating the walk-cache, since the ASID allocator won't |
| 33 | * reallocate our ASID without invalidating the entire TLB. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 34 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 35 | if (tlb->fullmm) { |
| 36 | if (!last_level) |
| 37 | flush_tlb_mm(tlb->mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 39 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 41 | __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
| 45 | unsigned long addr) |
| 46 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 47 | pgtable_pte_page_dtor(pte); |
| 48 | tlb_remove_table(tlb, pte); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | #if CONFIG_PGTABLE_LEVELS > 2 |
| 52 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
| 53 | unsigned long addr) |
| 54 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 55 | struct page *page = virt_to_page(pmdp); |
| 56 | |
| 57 | pgtable_pmd_page_dtor(page); |
| 58 | tlb_remove_table(tlb, page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | } |
| 60 | #endif |
| 61 | |
| 62 | #if CONFIG_PGTABLE_LEVELS > 3 |
| 63 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, |
| 64 | unsigned long addr) |
| 65 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 66 | tlb_remove_table(tlb, virt_to_page(pudp)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 67 | } |
| 68 | #endif |
| 69 | |
| 70 | #endif |