blob: b76df828e6b73d1faadc15b1cf921dffd14c7f67 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/include/asm/tlb.h
4 *
5 * Copyright (C) 2002 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#ifndef __ASM_TLB_H
9#define __ASM_TLB_H
10
11#include <linux/pagemap.h>
12#include <linux/swap.h>
13
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014static inline void __tlb_remove_table(void *_table)
15{
16 free_page_and_swap_cache((struct page *)_table);
17}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018
David Brazdil0f672f62019-12-10 10:32:29 +000019#define tlb_flush tlb_flush
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020static void tlb_flush(struct mmu_gather *tlb);
21
22#include <asm-generic/tlb.h>
23
24static inline void tlb_flush(struct mmu_gather *tlb)
25{
26 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
David Brazdil0f672f62019-12-10 10:32:29 +000027 bool last_level = !tlb->freed_tables;
28 unsigned long stride = tlb_get_unmap_size(tlb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
30 /*
David Brazdil0f672f62019-12-10 10:32:29 +000031 * If we're tearing down the address space then we only care about
32 * invalidating the walk-cache, since the ASID allocator won't
33 * reallocate our ASID without invalidating the entire TLB.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034 */
David Brazdil0f672f62019-12-10 10:32:29 +000035 if (tlb->fullmm) {
36 if (!last_level)
37 flush_tlb_mm(tlb->mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038 return;
David Brazdil0f672f62019-12-10 10:32:29 +000039 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040
David Brazdil0f672f62019-12-10 10:32:29 +000041 __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042}
43
44static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
45 unsigned long addr)
46{
David Brazdil0f672f62019-12-10 10:32:29 +000047 pgtable_pte_page_dtor(pte);
48 tlb_remove_table(tlb, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049}
50
51#if CONFIG_PGTABLE_LEVELS > 2
52static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
53 unsigned long addr)
54{
David Brazdil0f672f62019-12-10 10:32:29 +000055 struct page *page = virt_to_page(pmdp);
56
57 pgtable_pmd_page_dtor(page);
58 tlb_remove_table(tlb, page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059}
60#endif
61
62#if CONFIG_PGTABLE_LEVELS > 3
63static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
64 unsigned long addr)
65{
David Brazdil0f672f62019-12-10 10:32:29 +000066 tlb_remove_table(tlb, virt_to_page(pudp));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067}
68#endif
69
70#endif