blob: 8c87a2e0b660c1dbfcf74cbd455ec5728962bd3a [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_TLBFLUSH_H
3#define _ASM_X86_TLBFLUSH_H
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7
8#include <asm/processor.h>
9#include <asm/cpufeature.h>
10#include <asm/special_insns.h>
11#include <asm/smp.h>
12#include <asm/invpcid.h>
13#include <asm/pti.h>
14#include <asm/processor-flags.h>
15
Olivier Deprez157378f2022-04-04 15:47:50 +020016void __flush_tlb_all(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017
Olivier Deprez157378f2022-04-04 15:47:50 +020018#define TLB_FLUSH_ALL -1UL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019
Olivier Deprez157378f2022-04-04 15:47:50 +020020void cr4_update_irqsoff(unsigned long set, unsigned long clear);
21unsigned long cr4_read_shadow(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022
Olivier Deprez157378f2022-04-04 15:47:50 +020023/* Set in this cpu's CR4. */
24static inline void cr4_set_bits_irqsoff(unsigned long mask)
25{
26 cr4_update_irqsoff(mask, 0);
27}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028
Olivier Deprez157378f2022-04-04 15:47:50 +020029/* Clear in this cpu's CR4. */
30static inline void cr4_clear_bits_irqsoff(unsigned long mask)
31{
32 cr4_update_irqsoff(0, mask);
33}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034
Olivier Deprez157378f2022-04-04 15:47:50 +020035/* Set in this cpu's CR4. */
36static inline void cr4_set_bits(unsigned long mask)
37{
38 unsigned long flags;
39
40 local_irq_save(flags);
41 cr4_set_bits_irqsoff(mask);
42 local_irq_restore(flags);
43}
44
45/* Clear in this cpu's CR4. */
46static inline void cr4_clear_bits(unsigned long mask)
47{
48 unsigned long flags;
49
50 local_irq_save(flags);
51 cr4_clear_bits_irqsoff(mask);
52 local_irq_restore(flags);
53}
54
55#ifndef MODULE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056/*
57 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
58 * lines.
59 */
60#define TLB_NR_DYN_ASIDS 6
61
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062struct tlb_context {
63 u64 ctx_id;
64 u64 tlb_gen;
65};
66
67struct tlb_state {
68 /*
69 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
70 * are on. This means that it may not match current->active_mm,
71 * which will contain the previous user mm when we're in lazy TLB
72 * mode even if we've already switched back to swapper_pg_dir.
73 *
74 * During switch_mm_irqs_off(), loaded_mm will be set to
75 * LOADED_MM_SWITCHING during the brief interrupts-off window
76 * when CR3 and loaded_mm would otherwise be inconsistent. This
77 * is for nmi_uaccess_okay()'s benefit.
78 */
79 struct mm_struct *loaded_mm;
80
David Brazdil0f672f62019-12-10 10:32:29 +000081#define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082
83 /* Last user mm for optimizing IBPB */
84 union {
85 struct mm_struct *last_user_mm;
86 unsigned long last_user_mm_ibpb;
87 };
88
89 u16 loaded_mm_asid;
90 u16 next_asid;
91
92 /*
93 * We can be in one of several states:
94 *
95 * - Actively using an mm. Our CPU's bit will be set in
96 * mm_cpumask(loaded_mm) and is_lazy == false;
97 *
98 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
99 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
100 *
101 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
102 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
103 * We're heuristically guessing that the CR3 load we
104 * skipped more than makes up for the overhead added by
105 * lazy mode.
106 */
107 bool is_lazy;
108
109 /*
110 * If set we changed the page tables in such a way that we
111 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
112 * This tells us to go invalidate all the non-loaded ctxs[]
113 * on the next context switch.
114 *
115 * The current ctx was kept up-to-date as it ran and does not
116 * need to be invalidated.
117 */
118 bool invalidate_other;
119
120 /*
121 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
122 * the corresponding user PCID needs a flush next time we
123 * switch to it; see SWITCH_TO_USER_CR3.
124 */
125 unsigned short user_pcid_flush_mask;
126
127 /*
128 * Access to this CR4 shadow and to H/W CR4 is protected by
129 * disabling interrupts when modifying either one.
130 */
131 unsigned long cr4;
132
133 /*
134 * This is a list of all contexts that might exist in the TLB.
135 * There is one per ASID that we use, and the ASID (what the
136 * CPU calls PCID) is the index into ctxts.
137 *
138 * For each context, ctx_id indicates which mm the TLB's user
139 * entries came from. As an invariant, the TLB will never
140 * contain entries that are out-of-date as when that mm reached
141 * the tlb_gen in the list.
142 *
143 * To be clear, this means that it's legal for the TLB code to
144 * flush the TLB without updating tlb_gen. This can happen
145 * (for now, at least) due to paravirt remote flushes.
146 *
147 * NB: context 0 is a bit special, since it's also used by
148 * various bits of init code. This is fine -- code that
149 * isn't aware of PCID will end up harmlessly flushing
150 * context 0.
151 */
152 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
153};
154DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
155
Olivier Deprez157378f2022-04-04 15:47:50 +0200156bool nmi_uaccess_okay(void);
David Brazdil0f672f62019-12-10 10:32:29 +0000157#define nmi_uaccess_okay nmi_uaccess_okay
158
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159/* Initialize cr4 shadow for this CPU. */
160static inline void cr4_init_shadow(void)
161{
162 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
163}
164
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165extern unsigned long mmu_cr4_features;
166extern u32 *trampoline_cr4_features;
167
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168extern void initialize_tlbstate_and_flush(void);
169
170/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 * TLB flushing:
172 *
173 * - flush_tlb_all() flushes all processes TLBs
174 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
175 * - flush_tlb_page(vma, vmaddr) flushes one page
176 * - flush_tlb_range(vma, start, end) flushes a range of pages
177 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
178 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
179 *
180 * ..but the i386 has somewhat limited tlb flushing capabilities,
181 * and page-granular flushes are available only on i486 and up.
182 */
183struct flush_tlb_info {
184 /*
185 * We support several kinds of flushes.
186 *
187 * - Fully flush a single mm. .mm will be set, .end will be
188 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
189 * which the IPI sender is trying to catch us up.
190 *
191 * - Partially flush a single mm. .mm will be set, .start and
192 * .end will indicate the range, and .new_tlb_gen will be set
193 * such that the changes between generation .new_tlb_gen-1 and
194 * .new_tlb_gen are entirely contained in the indicated range.
195 *
196 * - Fully flush all mms whose tlb_gens have been updated. .mm
197 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
198 * will be zero.
199 */
200 struct mm_struct *mm;
201 unsigned long start;
202 unsigned long end;
203 u64 new_tlb_gen;
David Brazdil0f672f62019-12-10 10:32:29 +0000204 unsigned int stride_shift;
205 bool freed_tables;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206};
207
Olivier Deprez157378f2022-04-04 15:47:50 +0200208void flush_tlb_local(void);
209void flush_tlb_one_user(unsigned long addr);
210void flush_tlb_one_kernel(unsigned long addr);
211void flush_tlb_others(const struct cpumask *cpumask,
212 const struct flush_tlb_info *info);
213
214#ifdef CONFIG_PARAVIRT
215#include <asm/paravirt.h>
216#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217
David Brazdil0f672f62019-12-10 10:32:29 +0000218#define flush_tlb_mm(mm) \
219 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220
David Brazdil0f672f62019-12-10 10:32:29 +0000221#define flush_tlb_range(vma, start, end) \
222 flush_tlb_mm_range((vma)->vm_mm, start, end, \
223 ((vma)->vm_flags & VM_HUGETLB) \
224 ? huge_page_shift(hstate_vma(vma)) \
225 : PAGE_SHIFT, false)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226
227extern void flush_tlb_all(void);
228extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
David Brazdil0f672f62019-12-10 10:32:29 +0000229 unsigned long end, unsigned int stride_shift,
230 bool freed_tables);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
232
233static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
234{
David Brazdil0f672f62019-12-10 10:32:29 +0000235 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236}
237
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
239{
240 /*
241 * Bump the generation count. This also serves as a full barrier
242 * that synchronizes with switch_mm(): callers are required to order
243 * their read of mm_cpumask after their writes to the paging
244 * structures.
245 */
246 return atomic64_inc_return(&mm->context.tlb_gen);
247}
248
249static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
250 struct mm_struct *mm)
251{
252 inc_mm_tlb_gen(mm);
253 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
254}
255
256extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
257
Olivier Deprez157378f2022-04-04 15:47:50 +0200258#endif /* !MODULE */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259
260#endif /* _ASM_X86_TLBFLUSH_H */