Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_TLB_H |
| 3 | #define _ASM_X86_TLB_H |
| 4 | |
| 5 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 6 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 7 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
| 8 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 9 | #define tlb_flush tlb_flush |
| 10 | static inline void tlb_flush(struct mmu_gather *tlb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | |
| 12 | #include <asm-generic/tlb.h> |
| 13 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 14 | static inline void tlb_flush(struct mmu_gather *tlb) |
| 15 | { |
| 16 | unsigned long start = 0UL, end = TLB_FLUSH_ALL; |
| 17 | unsigned int stride_shift = tlb_get_unmap_shift(tlb); |
| 18 | |
| 19 | if (!tlb->fullmm && !tlb->need_flush_all) { |
| 20 | start = tlb->start; |
| 21 | end = tlb->end; |
| 22 | } |
| 23 | |
| 24 | flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); |
| 25 | } |
| 26 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | /* |
| 28 | * While x86 architecture in general requires an IPI to perform TLB |
| 29 | * shootdown, enablement code for several hypervisors overrides |
| 30 | * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing |
| 31 | * a hypercall. To keep software pagetable walkers safe in this case we |
| 32 | * switch to RCU based table free (HAVE_RCU_TABLE_FREE). See the comment |
| 33 | * below 'ifdef CONFIG_HAVE_RCU_TABLE_FREE' in include/asm-generic/tlb.h |
| 34 | * for more details. |
| 35 | */ |
| 36 | static inline void __tlb_remove_table(void *table) |
| 37 | { |
| 38 | free_page_and_swap_cache(table); |
| 39 | } |
| 40 | |
| 41 | #endif /* _ASM_X86_TLB_H */ |