blob: 029f5598251c2a7b80565afa5a94d4f51e0ab146 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
24 /*
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
31 *
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
35 *
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
38 */
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
45 return false;
46 } else if (!pte_present(*pvmw->pte))
47 return false;
48 }
49 }
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
52 return true;
53}
54
55static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
56{
57 unsigned long hpage_pfn = page_to_pfn(hpage);
58
59 /* THP can be referenced by any subpage */
60 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
61}
62
63/**
64 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
65 *
66 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
67 * mapped. check_pte() has to validate this.
68 *
69 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
70 * page.
71 *
72 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
73 * entry that points to @pvmw->page or any subpage in case of THP.
74 *
75 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
76 * @pvmw->page or any subpage in case of THP.
77 *
78 * Otherwise, return false.
79 *
80 */
81static bool check_pte(struct page_vma_mapped_walk *pvmw)
82{
83 unsigned long pfn;
84
85 if (pvmw->flags & PVMW_MIGRATION) {
86 swp_entry_t entry;
87 if (!is_swap_pte(*pvmw->pte))
88 return false;
89 entry = pte_to_swp_entry(*pvmw->pte);
90
91 if (!is_migration_entry(entry))
92 return false;
93
94 pfn = migration_entry_to_pfn(entry);
95 } else if (is_swap_pte(*pvmw->pte)) {
96 swp_entry_t entry;
97
98 /* Handle un-addressable ZONE_DEVICE memory */
99 entry = pte_to_swp_entry(*pvmw->pte);
100 if (!is_device_private_entry(entry))
101 return false;
102
103 pfn = device_private_entry_to_pfn(entry);
104 } else {
105 if (!pte_present(*pvmw->pte))
106 return false;
107
108 pfn = pte_pfn(*pvmw->pte);
109 }
110
111 return pfn_in_hpage(pvmw->page, pfn);
112}
113
Olivier Deprez0e641232021-09-23 10:07:05 +0200114static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
115{
116 pvmw->address = (pvmw->address + size) & ~(size - 1);
117 if (!pvmw->address)
118 pvmw->address = ULONG_MAX;
119}
120
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121/**
122 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
123 * @pvmw->address
124 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
125 * must be set. pmd, pte and ptl must be NULL.
126 *
127 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
128 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
129 * adjusted if needed (for PTE-mapped THPs).
130 *
131 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
132 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
133 * a loop to find all PTEs that map the THP.
134 *
135 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
136 * regardless of which page table level the page is mapped at. @pvmw->pmd is
137 * NULL.
138 *
139 * Retruns false if there are no more page table entries for the page in
140 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
141 *
142 * If you need to stop the walk before page_vma_mapped_walk() returned false,
143 * use page_vma_mapped_walk_done(). It will do the housekeeping.
144 */
145bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
146{
147 struct mm_struct *mm = pvmw->vma->vm_mm;
148 struct page *page = pvmw->page;
Olivier Deprez0e641232021-09-23 10:07:05 +0200149 unsigned long end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150 pgd_t *pgd;
151 p4d_t *p4d;
152 pud_t *pud;
153 pmd_t pmde;
154
155 /* The only possible pmd mapping has been handled on last iteration */
156 if (pvmw->pmd && !pvmw->pte)
157 return not_found(pvmw);
158
Olivier Deprez0e641232021-09-23 10:07:05 +0200159 if (unlikely(PageHuge(page))) {
160 /* The only possible mapping was handled on last iteration */
161 if (pvmw->pte)
162 return not_found(pvmw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000164 /* when pud is not present, pte will be NULL */
David Brazdil0f672f62019-12-10 10:32:29 +0000165 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166 if (!pvmw->pte)
167 return false;
168
169 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
170 spin_lock(pvmw->ptl);
171 if (!check_pte(pvmw))
172 return not_found(pvmw);
173 return true;
174 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175
Olivier Deprez0e641232021-09-23 10:07:05 +0200176 /*
177 * Seek to next pte only makes sense for THP.
178 * But more important than that optimization, is to filter out
179 * any PageKsm page: whose page->index misleads vma_address()
180 * and vma_address_end() to disaster.
181 */
182 end = PageTransCompound(page) ?
183 vma_address_end(page, pvmw->vma) :
184 pvmw->address + PAGE_SIZE;
185 if (pvmw->pte)
186 goto next_pte;
187restart:
188 do {
189 pgd = pgd_offset(mm, pvmw->address);
190 if (!pgd_present(*pgd)) {
191 step_forward(pvmw, PGDIR_SIZE);
192 continue;
193 }
194 p4d = p4d_offset(pgd, pvmw->address);
195 if (!p4d_present(*p4d)) {
196 step_forward(pvmw, P4D_SIZE);
197 continue;
198 }
199 pud = pud_offset(p4d, pvmw->address);
200 if (!pud_present(*pud)) {
201 step_forward(pvmw, PUD_SIZE);
202 continue;
203 }
204
205 pvmw->pmd = pmd_offset(pud, pvmw->address);
206 /*
207 * Make sure the pmd value isn't cached in a register by the
208 * compiler and used as a stale value after we've observed a
209 * subsequent update.
210 */
211 pmde = READ_ONCE(*pvmw->pmd);
212
213 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
214 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
215 pmde = *pvmw->pmd;
216 if (likely(pmd_trans_huge(pmde))) {
217 if (pvmw->flags & PVMW_MIGRATION)
218 return not_found(pvmw);
219 if (pmd_page(pmde) != page)
220 return not_found(pvmw);
221 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200223 if (!pmd_present(pmde)) {
224 swp_entry_t entry;
225
226 if (!thp_migration_supported() ||
227 !(pvmw->flags & PVMW_MIGRATION))
228 return not_found(pvmw);
229 entry = pmd_to_swp_entry(pmde);
230 if (!is_migration_entry(entry) ||
231 migration_entry_to_page(entry) != page)
232 return not_found(pvmw);
233 return true;
234 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235 /* THP pmd was split under us: handle on pte level */
236 spin_unlock(pvmw->ptl);
237 pvmw->ptl = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +0200238 } else if (!pmd_present(pmde)) {
239 /*
240 * If PVMW_SYNC, take and drop THP pmd lock so that we
241 * cannot return prematurely, while zap_huge_pmd() has
242 * cleared *pmd but not decremented compound_mapcount().
243 */
244 if ((pvmw->flags & PVMW_SYNC) &&
245 PageTransCompound(page)) {
246 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
247
248 spin_unlock(ptl);
249 }
250 step_forward(pvmw, PMD_SIZE);
251 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000252 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200253 if (!map_pte(pvmw))
254 goto next_pte;
255this_pte:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256 if (check_pte(pvmw))
257 return true;
258next_pte:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259 do {
260 pvmw->address += PAGE_SIZE;
Olivier Deprez0e641232021-09-23 10:07:05 +0200261 if (pvmw->address >= end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 return not_found(pvmw);
263 /* Did we cross page table boundary? */
Olivier Deprez0e641232021-09-23 10:07:05 +0200264 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 if (pvmw->ptl) {
266 spin_unlock(pvmw->ptl);
267 pvmw->ptl = NULL;
268 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200269 pte_unmap(pvmw->pte);
270 pvmw->pte = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271 goto restart;
Olivier Deprez0e641232021-09-23 10:07:05 +0200272 }
273 pvmw->pte++;
274 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
275 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
276 spin_lock(pvmw->ptl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277 }
278 } while (pte_none(*pvmw->pte));
279
280 if (!pvmw->ptl) {
281 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
282 spin_lock(pvmw->ptl);
283 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200284 goto this_pte;
285 } while (pvmw->address < end);
286
287 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288}
289
290/**
291 * page_mapped_in_vma - check whether a page is really mapped in a VMA
292 * @page: the page to test
293 * @vma: the VMA to test
294 *
295 * Returns 1 if the page is mapped into the page tables of the VMA, 0
296 * if the page is not mapped into the page tables of this VMA. Only
297 * valid for normal file or anonymous VMAs.
298 */
299int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
300{
301 struct page_vma_mapped_walk pvmw = {
302 .page = page,
303 .vma = vma,
304 .flags = PVMW_SYNC,
305 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000306
Olivier Deprez0e641232021-09-23 10:07:05 +0200307 pvmw.address = vma_address(page, vma);
308 if (pvmw.address == -EFAULT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 if (!page_vma_mapped_walk(&pvmw))
311 return 0;
312 page_vma_mapped_walk_done(&pvmw);
313 return 1;
314}