blob: c995d1f4594f6692936425d49436ece7491cf425 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/include/asm/tlb.h
4 *
5 * Copyright (C) 2002 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#ifndef __ASM_TLB_H
9#define __ASM_TLB_H
10
11#include <linux/pagemap.h>
12#include <linux/swap.h>
13
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014static inline void __tlb_remove_table(void *_table)
15{
16 free_page_and_swap_cache((struct page *)_table);
17}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018
David Brazdil0f672f62019-12-10 10:32:29 +000019#define tlb_flush tlb_flush
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020static void tlb_flush(struct mmu_gather *tlb);
21
22#include <asm-generic/tlb.h>
23
Olivier Deprez157378f2022-04-04 15:47:50 +020024/*
25 * get the tlbi levels in arm64. Default value is 0 if more than one
26 * of cleared_* is set or neither is set.
27 * Arm64 doesn't support p4ds now.
28 */
29static inline int tlb_get_level(struct mmu_gather *tlb)
30{
31 /* The TTL field is only valid for the leaf entry. */
32 if (tlb->freed_tables)
33 return 0;
34
35 if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
36 tlb->cleared_puds ||
37 tlb->cleared_p4ds))
38 return 3;
39
40 if (tlb->cleared_pmds && !(tlb->cleared_ptes ||
41 tlb->cleared_puds ||
42 tlb->cleared_p4ds))
43 return 2;
44
45 if (tlb->cleared_puds && !(tlb->cleared_ptes ||
46 tlb->cleared_pmds ||
47 tlb->cleared_p4ds))
48 return 1;
49
50 return 0;
51}
52
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053static inline void tlb_flush(struct mmu_gather *tlb)
54{
55 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
David Brazdil0f672f62019-12-10 10:32:29 +000056 bool last_level = !tlb->freed_tables;
57 unsigned long stride = tlb_get_unmap_size(tlb);
Olivier Deprez157378f2022-04-04 15:47:50 +020058 int tlb_level = tlb_get_level(tlb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
60 /*
David Brazdil0f672f62019-12-10 10:32:29 +000061 * If we're tearing down the address space then we only care about
62 * invalidating the walk-cache, since the ASID allocator won't
63 * reallocate our ASID without invalidating the entire TLB.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064 */
David Brazdil0f672f62019-12-10 10:32:29 +000065 if (tlb->fullmm) {
66 if (!last_level)
67 flush_tlb_mm(tlb->mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068 return;
David Brazdil0f672f62019-12-10 10:32:29 +000069 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070
Olivier Deprez157378f2022-04-04 15:47:50 +020071 __flush_tlb_range(&vma, tlb->start, tlb->end, stride,
72 last_level, tlb_level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073}
74
75static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
76 unsigned long addr)
77{
David Brazdil0f672f62019-12-10 10:32:29 +000078 pgtable_pte_page_dtor(pte);
79 tlb_remove_table(tlb, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080}
81
82#if CONFIG_PGTABLE_LEVELS > 2
83static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
84 unsigned long addr)
85{
David Brazdil0f672f62019-12-10 10:32:29 +000086 struct page *page = virt_to_page(pmdp);
87
88 pgtable_pmd_page_dtor(page);
89 tlb_remove_table(tlb, page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090}
91#endif
92
93#if CONFIG_PGTABLE_LEVELS > 3
94static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
95 unsigned long addr)
96{
David Brazdil0f672f62019-12-10 10:32:29 +000097 tlb_remove_table(tlb, virt_to_page(pudp));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098}
99#endif
100
101#endif