blob: a0c4b99d28994136aed771888cdf3206cc0fa089 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* include/asm-generic/tlb.h
3 *
4 * Generic TLB shootdown code
5 *
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 *
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 */
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
14#include <linux/mmu_notifier.h>
15#include <linux/swap.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020016#include <linux/hugetlb_inline.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include <asm/tlbflush.h>
David Brazdil0f672f62019-12-10 10:32:29 +000018#include <asm/cacheflush.h>
19
20/*
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
23 * the loaded mm.
24 */
25#ifndef nmi_uaccess_okay
26# define nmi_uaccess_okay() true
27#endif
28
29#ifdef CONFIG_MMU
30
31/*
32 * Generic MMU-gather implementation.
33 *
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
36 *
37 * This correct ordering is:
38 *
39 * 1) unhook page
40 * 2) TLB invalidate page
41 * 3) free page
42 *
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
46 *
47 * The mmu_gather API consists of:
48 *
49 * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
50 *
51 * Finish in particular will issue a (final) TLB invalidate and free
52 * all (remaining) queued pages.
53 *
54 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
55 *
56 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
57 * there's large holes between the VMAs.
58 *
Olivier Deprez157378f2022-04-04 15:47:50 +020059 * - tlb_remove_table()
60 *
61 * tlb_remove_table() is the basic primitive to free page-table directories
62 * (__p*_free_tlb()). In it's most primitive form it is an alias for
63 * tlb_remove_page() below, for when page directories are pages and have no
64 * additional constraints.
65 *
66 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
67 *
David Brazdil0f672f62019-12-10 10:32:29 +000068 * - tlb_remove_page() / __tlb_remove_page()
69 * - tlb_remove_page_size() / __tlb_remove_page_size()
70 *
71 * __tlb_remove_page_size() is the basic primitive that queues a page for
72 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
73 * boolean indicating if the queue is (now) full and a call to
74 * tlb_flush_mmu() is required.
75 *
76 * tlb_remove_page() and tlb_remove_page_size() imply the call to
77 * tlb_flush_mmu() when required and has no return value.
78 *
79 * - tlb_change_page_size()
80 *
81 * call before __tlb_remove_page*() to set the current page-size; implies a
82 * possible tlb_flush_mmu() call.
83 *
84 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
85 *
86 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
87 * related state, like the range)
88 *
89 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
90 * whatever pages are still batched.
91 *
92 * - mmu_gather::fullmm
93 *
94 * A flag set by tlb_gather_mmu() to indicate we're going to free
95 * the entire mm; this allows a number of optimizations.
96 *
97 * - We can ignore tlb_{start,end}_vma(); because we don't
98 * care about ranges. Everything will be shot down.
99 *
100 * - (RISC) architectures that use ASIDs can cycle to a new ASID
101 * and delay the invalidation until ASID space runs out.
102 *
103 * - mmu_gather::need_flush_all
104 *
105 * A flag that can be set by the arch code if it wants to force
106 * flush the entire TLB irrespective of the range. For instance
107 * x86-PAE needs this when changing top-level entries.
108 *
109 * And allows the architecture to provide and implement tlb_flush():
110 *
111 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
112 * use of:
113 *
114 * - mmu_gather::start / mmu_gather::end
115 *
116 * which provides the range that needs to be flushed to cover the pages to
117 * be freed.
118 *
119 * - mmu_gather::freed_tables
120 *
121 * set when we freed page table pages
122 *
123 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
124 *
125 * returns the smallest TLB entry size unmapped in this range.
126 *
127 * If an architecture does not provide tlb_flush() a default implementation
128 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
129 * specified, in which case we'll default to flush_tlb_mm().
130 *
131 * Additionally there are a few opt-in features:
132 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200133 * MMU_GATHER_PAGE_SIZE
David Brazdil0f672f62019-12-10 10:32:29 +0000134 *
135 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
136 * changes the size and provides mmu_gather::page_size to tlb_flush().
137 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200138 * This might be useful if your architecture has size specific TLB
139 * invalidation instructions.
140 *
141 * MMU_GATHER_TABLE_FREE
David Brazdil0f672f62019-12-10 10:32:29 +0000142 *
143 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 * for page directores (__p*_free_tlb()).
145 *
146 * Useful if your architecture has non-page page directories.
David Brazdil0f672f62019-12-10 10:32:29 +0000147 *
148 * When used, an architecture is expected to provide __tlb_remove_table()
149 * which does the actual freeing of these pages.
150 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200151 * MMU_GATHER_RCU_TABLE_FREE
152 *
153 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
154 * comment below).
155 *
156 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
157 * and therefore doesn't naturally serialize with software page-table walkers.
158 *
David Brazdil0f672f62019-12-10 10:32:29 +0000159 * MMU_GATHER_NO_RANGE
160 *
161 * Use this if your architecture lacks an efficient flush_tlb_range().
Olivier Deprez157378f2022-04-04 15:47:50 +0200162 *
163 * MMU_GATHER_NO_GATHER
164 *
165 * If the option is set the mmu_gather will not track individual pages for
166 * delayed page free anymore. A platform that enables the option needs to
167 * provide its own implementation of the __tlb_remove_page_size() function to
168 * free pages.
169 *
170 * This is useful if your architecture already flushes TLB entries in the
171 * various ptep_get_and_clear() functions.
David Brazdil0f672f62019-12-10 10:32:29 +0000172 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173
Olivier Deprez157378f2022-04-04 15:47:50 +0200174#ifdef CONFIG_MMU_GATHER_TABLE_FREE
175
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176struct mmu_table_batch {
Olivier Deprez157378f2022-04-04 15:47:50 +0200177#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 struct rcu_head rcu;
Olivier Deprez157378f2022-04-04 15:47:50 +0200179#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180 unsigned int nr;
181 void *tables[0];
182};
183
184#define MAX_TABLE_BATCH \
185 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
186
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
188
Olivier Deprez157378f2022-04-04 15:47:50 +0200189#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
190
191/*
192 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
193 * page directories and we can use the normal page batching to free them.
194 */
195#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
196
197#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
198
199#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
Olivier Deprez0e641232021-09-23 10:07:05 +0200200/*
201 * This allows an architecture that does not use the linux page-tables for
202 * hardware to skip the TLBI when freeing page tables.
203 */
204#ifndef tlb_needs_table_invalidate
205#define tlb_needs_table_invalidate() (true)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206#endif
207
Olivier Deprez0e641232021-09-23 10:07:05 +0200208#else
209
210#ifdef tlb_needs_table_invalidate
Olivier Deprez157378f2022-04-04 15:47:50 +0200211#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
Olivier Deprez0e641232021-09-23 10:07:05 +0200212#endif
213
Olivier Deprez157378f2022-04-04 15:47:50 +0200214#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
Olivier Deprez0e641232021-09-23 10:07:05 +0200215
216
Olivier Deprez157378f2022-04-04 15:47:50 +0200217#ifndef CONFIG_MMU_GATHER_NO_GATHER
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218/*
219 * If we can't allocate a page to make a big batch of page pointers
220 * to work on, then just handle a few from the on-stack structure.
221 */
222#define MMU_GATHER_BUNDLE 8
223
224struct mmu_gather_batch {
225 struct mmu_gather_batch *next;
226 unsigned int nr;
227 unsigned int max;
228 struct page *pages[0];
229};
230
231#define MAX_GATHER_BATCH \
232 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
233
234/*
235 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
236 * lockups for non-preemptible kernels on huge machines when a lot of memory
237 * is zapped during unmapping.
238 * 10K pages freed at once should be safe even without a preemption point.
239 */
240#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
241
David Brazdil0f672f62019-12-10 10:32:29 +0000242extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
243 int page_size);
244#endif
245
246/*
247 * struct mmu_gather is an opaque type used by the mm code for passing around
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 * any data needed by arch specific code for tlb_remove_page.
249 */
250struct mmu_gather {
251 struct mm_struct *mm;
David Brazdil0f672f62019-12-10 10:32:29 +0000252
Olivier Deprez157378f2022-04-04 15:47:50 +0200253#ifdef CONFIG_MMU_GATHER_TABLE_FREE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254 struct mmu_table_batch *batch;
255#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000256
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257 unsigned long start;
258 unsigned long end;
David Brazdil0f672f62019-12-10 10:32:29 +0000259 /*
260 * we are in the middle of an operation to clear
261 * a full mm and can make some optimizations
262 */
263 unsigned int fullmm : 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264
David Brazdil0f672f62019-12-10 10:32:29 +0000265 /*
266 * we have performed an operation which
267 * requires a complete flush of the tlb
268 */
269 unsigned int need_flush_all : 1;
270
271 /*
272 * we have removed page directories
273 */
274 unsigned int freed_tables : 1;
275
276 /*
277 * at which levels have we cleared entries?
278 */
279 unsigned int cleared_ptes : 1;
280 unsigned int cleared_pmds : 1;
281 unsigned int cleared_puds : 1;
282 unsigned int cleared_p4ds : 1;
283
284 /*
285 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
286 */
287 unsigned int vma_exec : 1;
288 unsigned int vma_huge : 1;
289
290 unsigned int batch_count;
291
Olivier Deprez157378f2022-04-04 15:47:50 +0200292#ifndef CONFIG_MMU_GATHER_NO_GATHER
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293 struct mmu_gather_batch *active;
294 struct mmu_gather_batch local;
295 struct page *__pages[MMU_GATHER_BUNDLE];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296
Olivier Deprez157378f2022-04-04 15:47:50 +0200297#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
David Brazdil0f672f62019-12-10 10:32:29 +0000298 unsigned int page_size;
299#endif
300#endif
301};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000302
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303void tlb_flush_mmu(struct mmu_gather *tlb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304
305static inline void __tlb_adjust_range(struct mmu_gather *tlb,
306 unsigned long address,
307 unsigned int range_size)
308{
309 tlb->start = min(tlb->start, address);
310 tlb->end = max(tlb->end, address + range_size);
311}
312
313static inline void __tlb_reset_range(struct mmu_gather *tlb)
314{
315 if (tlb->fullmm) {
316 tlb->start = tlb->end = ~0;
317 } else {
318 tlb->start = TASK_SIZE;
319 tlb->end = 0;
320 }
David Brazdil0f672f62019-12-10 10:32:29 +0000321 tlb->freed_tables = 0;
322 tlb->cleared_ptes = 0;
323 tlb->cleared_pmds = 0;
324 tlb->cleared_puds = 0;
325 tlb->cleared_p4ds = 0;
326 /*
327 * Do not reset mmu_gather::vma_* fields here, we do not
328 * call into tlb_start_vma() again to set them if there is an
329 * intermediate flush.
330 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331}
332
David Brazdil0f672f62019-12-10 10:32:29 +0000333#ifdef CONFIG_MMU_GATHER_NO_RANGE
334
335#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
336#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
337#endif
338
339/*
340 * When an architecture does not have efficient means of range flushing TLBs
341 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
342 * range small. We equally don't have to worry about page granularity or other
343 * things.
344 *
345 * All we need to do is issue a full flush for any !0 range.
346 */
347static inline void tlb_flush(struct mmu_gather *tlb)
348{
349 if (tlb->end)
350 flush_tlb_mm(tlb->mm);
351}
352
353static inline void
354tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
355
356#define tlb_end_vma tlb_end_vma
357static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
358
359#else /* CONFIG_MMU_GATHER_NO_RANGE */
360
361#ifndef tlb_flush
362
363#if defined(tlb_start_vma) || defined(tlb_end_vma)
364#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
365#endif
366
367/*
368 * When an architecture does not provide its own tlb_flush() implementation
369 * but does have a reasonably efficient flush_vma_range() implementation
370 * use that.
371 */
372static inline void tlb_flush(struct mmu_gather *tlb)
373{
374 if (tlb->fullmm || tlb->need_flush_all) {
375 flush_tlb_mm(tlb->mm);
376 } else if (tlb->end) {
377 struct vm_area_struct vma = {
378 .vm_mm = tlb->mm,
379 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
380 (tlb->vma_huge ? VM_HUGETLB : 0),
381 };
382
383 flush_tlb_range(&vma, tlb->start, tlb->end);
384 }
385}
386
387static inline void
388tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
389{
390 /*
391 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
392 * mips-4k) flush only large pages.
393 *
394 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
395 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
396 * range.
397 *
398 * We rely on tlb_end_vma() to issue a flush, such that when we reset
399 * these values the batch is empty.
400 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200401 tlb->vma_huge = is_vm_hugetlb_page(vma);
David Brazdil0f672f62019-12-10 10:32:29 +0000402 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
403}
404
405#else
406
407static inline void
408tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
409
410#endif
411
412#endif /* CONFIG_MMU_GATHER_NO_RANGE */
413
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000414static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
415{
Olivier Deprez157378f2022-04-04 15:47:50 +0200416 /*
417 * Anything calling __tlb_adjust_range() also sets at least one of
418 * these bits.
419 */
420 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
421 tlb->cleared_puds || tlb->cleared_p4ds))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422 return;
423
424 tlb_flush(tlb);
425 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
426 __tlb_reset_range(tlb);
427}
428
429static inline void tlb_remove_page_size(struct mmu_gather *tlb,
430 struct page *page, int page_size)
431{
432 if (__tlb_remove_page_size(tlb, page, page_size))
433 tlb_flush_mmu(tlb);
434}
435
436static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
437{
438 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
439}
440
441/* tlb_remove_page
442 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
443 * required.
444 */
445static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
446{
447 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
448}
449
David Brazdil0f672f62019-12-10 10:32:29 +0000450static inline void tlb_change_page_size(struct mmu_gather *tlb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451 unsigned int page_size)
452{
Olivier Deprez157378f2022-04-04 15:47:50 +0200453#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
David Brazdil0f672f62019-12-10 10:32:29 +0000454 if (tlb->page_size && tlb->page_size != page_size) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200455 if (!tlb->fullmm && !tlb->need_flush_all)
David Brazdil0f672f62019-12-10 10:32:29 +0000456 tlb_flush_mmu(tlb);
457 }
458
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000459 tlb->page_size = page_size;
460#endif
461}
David Brazdil0f672f62019-12-10 10:32:29 +0000462
463static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
464{
465 if (tlb->cleared_ptes)
466 return PAGE_SHIFT;
467 if (tlb->cleared_pmds)
468 return PMD_SHIFT;
469 if (tlb->cleared_puds)
470 return PUD_SHIFT;
471 if (tlb->cleared_p4ds)
472 return P4D_SHIFT;
473
474 return PAGE_SHIFT;
475}
476
477static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
478{
479 return 1UL << tlb_get_unmap_shift(tlb);
480}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000481
482/*
483 * In the case of tlb vma handling, we can optimise these away in the
484 * case where we're doing a full MM flush. When we're doing a munmap,
485 * the vmas are adjusted to only cover the region to be torn down.
486 */
487#ifndef tlb_start_vma
David Brazdil0f672f62019-12-10 10:32:29 +0000488static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
489{
490 if (tlb->fullmm)
491 return;
492
493 tlb_update_vma_flags(tlb, vma);
494 flush_cache_range(vma, vma->vm_start, vma->vm_end);
495}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000496#endif
497
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498#ifndef tlb_end_vma
David Brazdil0f672f62019-12-10 10:32:29 +0000499static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
500{
501 if (tlb->fullmm)
502 return;
503
504 /*
505 * Do a TLB flush and reset the range at VMA boundaries; this avoids
506 * the ranges growing with the unused space between consecutive VMAs,
507 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
508 * this.
509 */
510 tlb_flush_mmu_tlbonly(tlb);
511}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512#endif
513
Olivier Deprez157378f2022-04-04 15:47:50 +0200514/*
515 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
516 * and set corresponding cleared_*.
517 */
518static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
519 unsigned long address, unsigned long size)
520{
521 __tlb_adjust_range(tlb, address, size);
522 tlb->cleared_ptes = 1;
523}
524
525static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
526 unsigned long address, unsigned long size)
527{
528 __tlb_adjust_range(tlb, address, size);
529 tlb->cleared_pmds = 1;
530}
531
532static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
533 unsigned long address, unsigned long size)
534{
535 __tlb_adjust_range(tlb, address, size);
536 tlb->cleared_puds = 1;
537}
538
539static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
540 unsigned long address, unsigned long size)
541{
542 __tlb_adjust_range(tlb, address, size);
543 tlb->cleared_p4ds = 1;
544}
545
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546#ifndef __tlb_remove_tlb_entry
547#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
548#endif
549
550/**
551 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
552 *
553 * Record the fact that pte's were really unmapped by updating the range,
554 * so we can later optimise away the tlb invalidate. This helps when
555 * userspace is unmapping already-unmapped pages, which happens quite a lot.
556 */
557#define tlb_remove_tlb_entry(tlb, ptep, address) \
558 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200559 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 __tlb_remove_tlb_entry(tlb, ptep, address); \
561 } while (0)
562
David Brazdil0f672f62019-12-10 10:32:29 +0000563#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
564 do { \
565 unsigned long _sz = huge_page_size(h); \
Olivier Deprez92d4c212022-12-06 15:05:30 +0100566 if (_sz >= P4D_SIZE) \
567 tlb_flush_p4d_range(tlb, address, _sz); \
568 else if (_sz >= PUD_SIZE) \
Olivier Deprez157378f2022-04-04 15:47:50 +0200569 tlb_flush_pud_range(tlb, address, _sz); \
Olivier Deprez92d4c212022-12-06 15:05:30 +0100570 else if (_sz >= PMD_SIZE) \
571 tlb_flush_pmd_range(tlb, address, _sz); \
572 else \
573 tlb_flush_pte_range(tlb, address, _sz); \
David Brazdil0f672f62019-12-10 10:32:29 +0000574 __tlb_remove_tlb_entry(tlb, ptep, address); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 } while (0)
576
577/**
578 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
579 * This is a nop so far, because only x86 needs it.
580 */
581#ifndef __tlb_remove_pmd_tlb_entry
582#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
583#endif
584
585#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
586 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200587 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
589 } while (0)
590
591/**
592 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
593 * invalidation. This is a nop so far, because only x86 needs it.
594 */
595#ifndef __tlb_remove_pud_tlb_entry
596#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
597#endif
598
599#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
600 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200601 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
603 } while (0)
604
605/*
606 * For things like page tables caches (ie caching addresses "inside" the
607 * page tables, like x86 does), for legacy reasons, flushing an
608 * individual page had better flush the page table caches behind it. This
609 * is definitely how x86 works, for example. And if you have an
610 * architected non-legacy page table cache (which I'm not aware of
611 * anybody actually doing), you're going to have some architecturally
612 * explicit flushing for that, likely *separate* from a regular TLB entry
613 * flush, and thus you'd need more than just some range expansion..
614 *
615 * So if we ever find an architecture
616 * that would want something that odd, I think it is up to that
617 * architecture to do its own odd thing, not cause pain for others
618 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
619 *
620 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
621 */
622
623#ifndef pte_free_tlb
624#define pte_free_tlb(tlb, ptep, address) \
625 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200626 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
David Brazdil0f672f62019-12-10 10:32:29 +0000627 tlb->freed_tables = 1; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000628 __pte_free_tlb(tlb, ptep, address); \
629 } while (0)
630#endif
631
632#ifndef pmd_free_tlb
633#define pmd_free_tlb(tlb, pmdp, address) \
634 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200635 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
David Brazdil0f672f62019-12-10 10:32:29 +0000636 tlb->freed_tables = 1; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000637 __pmd_free_tlb(tlb, pmdp, address); \
638 } while (0)
639#endif
640
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000641#ifndef pud_free_tlb
642#define pud_free_tlb(tlb, pudp, address) \
643 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200644 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
David Brazdil0f672f62019-12-10 10:32:29 +0000645 tlb->freed_tables = 1; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 __pud_free_tlb(tlb, pudp, address); \
647 } while (0)
648#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650#ifndef p4d_free_tlb
651#define p4d_free_tlb(tlb, pudp, address) \
652 do { \
David Brazdil0f672f62019-12-10 10:32:29 +0000653 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
654 tlb->freed_tables = 1; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655 __p4d_free_tlb(tlb, pudp, address); \
656 } while (0)
657#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658
David Brazdil0f672f62019-12-10 10:32:29 +0000659#endif /* CONFIG_MMU */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660
661#endif /* _ASM_GENERIC__TLB_H */