blob: cf4dceb9682bf38a878add219b5f2000fbd61c20 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/sched/mm.h>
7#include <linux/sched/coredump.h>
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
20#include <linux/shmem_fs.h>
21
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
Olivier Deprez157378f2022-04-04 15:47:50 +020031 SCAN_EXCEED_SWAP_PTE,
32 SCAN_EXCEED_SHARED_PTE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033 SCAN_PTE_NON_PRESENT,
Olivier Deprez157378f2022-04-04 15:47:50 +020034 SCAN_PTE_UFFD_WP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035 SCAN_PAGE_RO,
36 SCAN_LACK_REFERENCED_PAGE,
37 SCAN_PAGE_NULL,
38 SCAN_SCAN_ABORT,
39 SCAN_PAGE_COUNT,
40 SCAN_PAGE_LRU,
41 SCAN_PAGE_LOCK,
42 SCAN_PAGE_ANON,
43 SCAN_PAGE_COMPOUND,
44 SCAN_ANY_PROCESS,
45 SCAN_VMA_NULL,
46 SCAN_VMA_CHECK,
47 SCAN_ADDRESS_RANGE,
48 SCAN_SWAP_CACHE_PAGE,
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 SCAN_TRUNCATED,
David Brazdil0f672f62019-12-10 10:32:29 +000053 SCAN_PAGE_HAS_PRIVATE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
Olivier Deprez0e641232021-09-23 10:07:05 +020059static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
Olivier Deprez157378f2022-04-04 15:47:50 +020079static unsigned int khugepaged_max_ptes_shared __read_mostly;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
David Brazdil0f672f62019-12-10 10:32:29 +000086#define MAX_PTE_MAPPED_THP 8
87
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
93 */
94struct mm_slot {
95 struct hlist_node hash;
96 struct list_head mm_node;
97 struct mm_struct *mm;
David Brazdil0f672f62019-12-10 10:32:29 +000098
99 /* pte-mapped THP in this mm */
100 int nr_pte_mapped_thp;
101 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102};
103
104/**
105 * struct khugepaged_scan - cursor for scanning
106 * @mm_head: the head of the mm list to scan
107 * @mm_slot: the current mm_slot we are scanning
108 * @address: the next address inside that to be scanned
109 *
110 * There is only the one khugepaged_scan instance of this cursor structure.
111 */
112struct khugepaged_scan {
113 struct list_head mm_head;
114 struct mm_slot *mm_slot;
115 unsigned long address;
116};
117
118static struct khugepaged_scan khugepaged_scan = {
119 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
120};
121
122#ifdef CONFIG_SYSFS
123static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124 struct kobj_attribute *attr,
125 char *buf)
126{
127 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
128}
129
130static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131 struct kobj_attribute *attr,
132 const char *buf, size_t count)
133{
134 unsigned long msecs;
135 int err;
136
137 err = kstrtoul(buf, 10, &msecs);
138 if (err || msecs > UINT_MAX)
139 return -EINVAL;
140
141 khugepaged_scan_sleep_millisecs = msecs;
142 khugepaged_sleep_expire = 0;
143 wake_up_interruptible(&khugepaged_wait);
144
145 return count;
146}
147static struct kobj_attribute scan_sleep_millisecs_attr =
148 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149 scan_sleep_millisecs_store);
150
151static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152 struct kobj_attribute *attr,
153 char *buf)
154{
155 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
156}
157
158static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159 struct kobj_attribute *attr,
160 const char *buf, size_t count)
161{
162 unsigned long msecs;
163 int err;
164
165 err = kstrtoul(buf, 10, &msecs);
166 if (err || msecs > UINT_MAX)
167 return -EINVAL;
168
169 khugepaged_alloc_sleep_millisecs = msecs;
170 khugepaged_sleep_expire = 0;
171 wake_up_interruptible(&khugepaged_wait);
172
173 return count;
174}
175static struct kobj_attribute alloc_sleep_millisecs_attr =
176 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177 alloc_sleep_millisecs_store);
178
179static ssize_t pages_to_scan_show(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 char *buf)
182{
183 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
184}
185static ssize_t pages_to_scan_store(struct kobject *kobj,
186 struct kobj_attribute *attr,
187 const char *buf, size_t count)
188{
189 int err;
190 unsigned long pages;
191
192 err = kstrtoul(buf, 10, &pages);
193 if (err || !pages || pages > UINT_MAX)
194 return -EINVAL;
195
196 khugepaged_pages_to_scan = pages;
197
198 return count;
199}
200static struct kobj_attribute pages_to_scan_attr =
201 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
202 pages_to_scan_store);
203
204static ssize_t pages_collapsed_show(struct kobject *kobj,
205 struct kobj_attribute *attr,
206 char *buf)
207{
208 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
209}
210static struct kobj_attribute pages_collapsed_attr =
211 __ATTR_RO(pages_collapsed);
212
213static ssize_t full_scans_show(struct kobject *kobj,
214 struct kobj_attribute *attr,
215 char *buf)
216{
217 return sprintf(buf, "%u\n", khugepaged_full_scans);
218}
219static struct kobj_attribute full_scans_attr =
220 __ATTR_RO(full_scans);
221
222static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223 struct kobj_attribute *attr, char *buf)
224{
225 return single_hugepage_flag_show(kobj, attr, buf,
226 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
227}
228static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229 struct kobj_attribute *attr,
230 const char *buf, size_t count)
231{
232 return single_hugepage_flag_store(kobj, attr, buf, count,
233 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
234}
235static struct kobj_attribute khugepaged_defrag_attr =
236 __ATTR(defrag, 0644, khugepaged_defrag_show,
237 khugepaged_defrag_store);
238
239/*
240 * max_ptes_none controls if khugepaged should collapse hugepages over
241 * any unmapped ptes in turn potentially increasing the memory
242 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243 * reduce the available free memory in the system as it
244 * runs. Increasing max_ptes_none will instead potentially reduce the
245 * free memory in the system during the khugepaged scan.
246 */
247static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248 struct kobj_attribute *attr,
249 char *buf)
250{
251 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
252}
253static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254 struct kobj_attribute *attr,
255 const char *buf, size_t count)
256{
257 int err;
258 unsigned long max_ptes_none;
259
260 err = kstrtoul(buf, 10, &max_ptes_none);
261 if (err || max_ptes_none > HPAGE_PMD_NR-1)
262 return -EINVAL;
263
264 khugepaged_max_ptes_none = max_ptes_none;
265
266 return count;
267}
268static struct kobj_attribute khugepaged_max_ptes_none_attr =
269 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270 khugepaged_max_ptes_none_store);
271
272static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273 struct kobj_attribute *attr,
274 char *buf)
275{
276 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
277}
278
279static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280 struct kobj_attribute *attr,
281 const char *buf, size_t count)
282{
283 int err;
284 unsigned long max_ptes_swap;
285
286 err = kstrtoul(buf, 10, &max_ptes_swap);
287 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
288 return -EINVAL;
289
290 khugepaged_max_ptes_swap = max_ptes_swap;
291
292 return count;
293}
294
295static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297 khugepaged_max_ptes_swap_store);
298
Olivier Deprez157378f2022-04-04 15:47:50 +0200299static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
300 struct kobj_attribute *attr,
301 char *buf)
302{
303 return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
304}
305
306static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
307 struct kobj_attribute *attr,
308 const char *buf, size_t count)
309{
310 int err;
311 unsigned long max_ptes_shared;
312
313 err = kstrtoul(buf, 10, &max_ptes_shared);
314 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
315 return -EINVAL;
316
317 khugepaged_max_ptes_shared = max_ptes_shared;
318
319 return count;
320}
321
322static struct kobj_attribute khugepaged_max_ptes_shared_attr =
323 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
324 khugepaged_max_ptes_shared_store);
325
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326static struct attribute *khugepaged_attr[] = {
327 &khugepaged_defrag_attr.attr,
328 &khugepaged_max_ptes_none_attr.attr,
Olivier Deprez157378f2022-04-04 15:47:50 +0200329 &khugepaged_max_ptes_swap_attr.attr,
330 &khugepaged_max_ptes_shared_attr.attr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331 &pages_to_scan_attr.attr,
332 &pages_collapsed_attr.attr,
333 &full_scans_attr.attr,
334 &scan_sleep_millisecs_attr.attr,
335 &alloc_sleep_millisecs_attr.attr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336 NULL,
337};
338
339struct attribute_group khugepaged_attr_group = {
340 .attrs = khugepaged_attr,
341 .name = "khugepaged",
342};
343#endif /* CONFIG_SYSFS */
344
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345int hugepage_madvise(struct vm_area_struct *vma,
346 unsigned long *vm_flags, int advice)
347{
348 switch (advice) {
349 case MADV_HUGEPAGE:
350#ifdef CONFIG_S390
351 /*
352 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
353 * can't handle this properly after s390_enable_sie, so we simply
354 * ignore the madvise to prevent qemu from causing a SIGSEGV.
355 */
356 if (mm_has_pgste(vma->vm_mm))
357 return 0;
358#endif
359 *vm_flags &= ~VM_NOHUGEPAGE;
360 *vm_flags |= VM_HUGEPAGE;
361 /*
362 * If the vma become good for khugepaged to scan,
363 * register it here without waiting a page fault that
364 * may not happen any time soon.
365 */
366 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
367 khugepaged_enter_vma_merge(vma, *vm_flags))
368 return -ENOMEM;
369 break;
370 case MADV_NOHUGEPAGE:
371 *vm_flags &= ~VM_HUGEPAGE;
372 *vm_flags |= VM_NOHUGEPAGE;
373 /*
374 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375 * this vma even if we leave the mm registered in khugepaged if
376 * it got registered before VM_NOHUGEPAGE was set.
377 */
378 break;
379 }
380
381 return 0;
382}
383
384int __init khugepaged_init(void)
385{
386 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387 sizeof(struct mm_slot),
388 __alignof__(struct mm_slot), 0, NULL);
389 if (!mm_slot_cache)
390 return -ENOMEM;
391
392 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Olivier Deprez157378f2022-04-04 15:47:50 +0200395 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396
397 return 0;
398}
399
400void __init khugepaged_destroy(void)
401{
402 kmem_cache_destroy(mm_slot_cache);
403}
404
405static inline struct mm_slot *alloc_mm_slot(void)
406{
407 if (!mm_slot_cache) /* initialization failed */
408 return NULL;
409 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410}
411
412static inline void free_mm_slot(struct mm_slot *mm_slot)
413{
414 kmem_cache_free(mm_slot_cache, mm_slot);
415}
416
417static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418{
419 struct mm_slot *mm_slot;
420
421 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422 if (mm == mm_slot->mm)
423 return mm_slot;
424
425 return NULL;
426}
427
428static void insert_to_mm_slots_hash(struct mm_struct *mm,
429 struct mm_slot *mm_slot)
430{
431 mm_slot->mm = mm;
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433}
434
435static inline int khugepaged_test_exit(struct mm_struct *mm)
436{
Olivier Deprez157378f2022-04-04 15:47:50 +0200437 return atomic_read(&mm->mm_users) == 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438}
439
440static bool hugepage_vma_check(struct vm_area_struct *vma,
441 unsigned long vm_flags)
442{
Olivier Deprez157378f2022-04-04 15:47:50 +0200443 if (!transhuge_vma_enabled(vma, vm_flags))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 return false;
David Brazdil0f672f62019-12-10 10:32:29 +0000445
Olivier Deprez157378f2022-04-04 15:47:50 +0200446 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
447 vma->vm_pgoff, HPAGE_PMD_NR))
448 return false;
449
450 /* Enabled via shmem mount options or sysfs settings. */
451 if (shmem_file(vma->vm_file))
452 return shmem_huge_enabled(vma);
453
454 /* THP settings require madvise. */
455 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
456 return false;
457
458 /* Only regular file is valid */
459 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
460 (vm_flags & VM_DENYWRITE)) {
461 struct inode *inode = vma->vm_file->f_inode;
462
463 return S_ISREG(inode->i_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200465
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466 if (!vma->anon_vma || vma->vm_ops)
467 return false;
Olivier Deprez157378f2022-04-04 15:47:50 +0200468 if (vma_is_temporary_stack(vma))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469 return false;
470 return !(vm_flags & VM_NO_KHUGEPAGED);
471}
472
473int __khugepaged_enter(struct mm_struct *mm)
474{
475 struct mm_slot *mm_slot;
476 int wakeup;
477
478 mm_slot = alloc_mm_slot();
479 if (!mm_slot)
480 return -ENOMEM;
481
482 /* __khugepaged_exit() must not run from under us */
Olivier Deprez0e641232021-09-23 10:07:05 +0200483 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
485 free_mm_slot(mm_slot);
486 return 0;
487 }
488
489 spin_lock(&khugepaged_mm_lock);
490 insert_to_mm_slots_hash(mm, mm_slot);
491 /*
492 * Insert just behind the scanning cursor, to let the area settle
493 * down a little.
494 */
495 wakeup = list_empty(&khugepaged_scan.mm_head);
496 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
497 spin_unlock(&khugepaged_mm_lock);
498
499 mmgrab(mm);
500 if (wakeup)
501 wake_up_interruptible(&khugepaged_wait);
502
503 return 0;
504}
505
506int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
507 unsigned long vm_flags)
508{
509 unsigned long hstart, hend;
510
511 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000512 * khugepaged only supports read-only files for non-shmem files.
513 * khugepaged does not yet work on special mappings. And
514 * file-private shmem THP is not supported.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 */
516 if (!hugepage_vma_check(vma, vm_flags))
517 return 0;
518
519 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
520 hend = vma->vm_end & HPAGE_PMD_MASK;
521 if (hstart < hend)
522 return khugepaged_enter(vma, vm_flags);
523 return 0;
524}
525
526void __khugepaged_exit(struct mm_struct *mm)
527{
528 struct mm_slot *mm_slot;
529 int free = 0;
530
531 spin_lock(&khugepaged_mm_lock);
532 mm_slot = get_mm_slot(mm);
533 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
534 hash_del(&mm_slot->hash);
535 list_del(&mm_slot->mm_node);
536 free = 1;
537 }
538 spin_unlock(&khugepaged_mm_lock);
539
540 if (free) {
541 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
542 free_mm_slot(mm_slot);
543 mmdrop(mm);
544 } else if (mm_slot) {
545 /*
546 * This is required to serialize against
547 * khugepaged_test_exit() (which is guaranteed to run
548 * under mmap sem read mode). Stop here (after we
549 * return all pagetables will be destroyed) until
550 * khugepaged has finished working on the pagetables
Olivier Deprez157378f2022-04-04 15:47:50 +0200551 * under the mmap_lock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200553 mmap_write_lock(mm);
554 mmap_write_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555 }
556}
557
558static void release_pte_page(struct page *page)
559{
Olivier Deprez157378f2022-04-04 15:47:50 +0200560 mod_node_page_state(page_pgdat(page),
561 NR_ISOLATED_ANON + page_is_file_lru(page),
562 -compound_nr(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563 unlock_page(page);
564 putback_lru_page(page);
565}
566
Olivier Deprez157378f2022-04-04 15:47:50 +0200567static void release_pte_pages(pte_t *pte, pte_t *_pte,
568 struct list_head *compound_pagelist)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569{
Olivier Deprez157378f2022-04-04 15:47:50 +0200570 struct page *page, *tmp;
571
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572 while (--_pte >= pte) {
573 pte_t pteval = *_pte;
Olivier Deprez157378f2022-04-04 15:47:50 +0200574
575 page = pte_page(pteval);
576 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
577 !PageCompound(page))
578 release_pte_page(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200580
581 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
582 list_del(&page->lru);
583 release_pte_page(page);
584 }
585}
586
587static bool is_refcount_suitable(struct page *page)
588{
589 int expected_refcount;
590
591 expected_refcount = total_mapcount(page);
592 if (PageSwapCache(page))
593 expected_refcount += compound_nr(page);
594
595 return page_count(page) == expected_refcount;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596}
597
598static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
599 unsigned long address,
Olivier Deprez157378f2022-04-04 15:47:50 +0200600 pte_t *pte,
601 struct list_head *compound_pagelist)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602{
603 struct page *page = NULL;
604 pte_t *_pte;
Olivier Deprez157378f2022-04-04 15:47:50 +0200605 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 bool writable = false;
607
608 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
609 _pte++, address += PAGE_SIZE) {
610 pte_t pteval = *_pte;
611 if (pte_none(pteval) || (pte_present(pteval) &&
612 is_zero_pfn(pte_pfn(pteval)))) {
613 if (!userfaultfd_armed(vma) &&
614 ++none_or_zero <= khugepaged_max_ptes_none) {
615 continue;
616 } else {
617 result = SCAN_EXCEED_NONE_PTE;
618 goto out;
619 }
620 }
621 if (!pte_present(pteval)) {
622 result = SCAN_PTE_NON_PRESENT;
623 goto out;
624 }
625 page = vm_normal_page(vma, address, pteval);
626 if (unlikely(!page)) {
627 result = SCAN_PAGE_NULL;
628 goto out;
629 }
630
Olivier Deprez157378f2022-04-04 15:47:50 +0200631 VM_BUG_ON_PAGE(!PageAnon(page), page);
632
633 if (page_mapcount(page) > 1 &&
634 ++shared > khugepaged_max_ptes_shared) {
635 result = SCAN_EXCEED_SHARED_PTE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000636 goto out;
637 }
638
Olivier Deprez157378f2022-04-04 15:47:50 +0200639 if (PageCompound(page)) {
640 struct page *p;
641 page = compound_head(page);
642
643 /*
644 * Check if we have dealt with the compound page
645 * already
646 */
647 list_for_each_entry(p, compound_pagelist, lru) {
648 if (page == p)
649 goto next;
650 }
651 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652
653 /*
654 * We can do it before isolate_lru_page because the
655 * page can't be freed from under us. NOTE: PG_lock
656 * is needed to serialize against split_huge_page
657 * when invoked from the VM.
658 */
659 if (!trylock_page(page)) {
660 result = SCAN_PAGE_LOCK;
661 goto out;
662 }
663
664 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200665 * Check if the page has any GUP (or other external) pins.
666 *
667 * The page table that maps the page has been already unlinked
668 * from the page table tree and this process cannot get
669 * an additinal pin on the page.
670 *
671 * New pins can come later if the page is shared across fork,
672 * but not from this process. The other process cannot write to
673 * the page, only trigger CoW.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000674 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200675 if (!is_refcount_suitable(page)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676 unlock_page(page);
677 result = SCAN_PAGE_COUNT;
678 goto out;
679 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200680 if (!pte_write(pteval) && PageSwapCache(page) &&
681 !reuse_swap_page(page, NULL)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000682 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200683 * Page is in the swap cache and cannot be re-used.
684 * It cannot be collapsed into a THP.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200686 unlock_page(page);
687 result = SCAN_SWAP_CACHE_PAGE;
688 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000689 }
690
691 /*
692 * Isolate the page to avoid collapsing an hugepage
693 * currently in use by the VM.
694 */
695 if (isolate_lru_page(page)) {
696 unlock_page(page);
697 result = SCAN_DEL_PAGE_LRU;
698 goto out;
699 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200700 mod_node_page_state(page_pgdat(page),
701 NR_ISOLATED_ANON + page_is_file_lru(page),
702 compound_nr(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000703 VM_BUG_ON_PAGE(!PageLocked(page), page);
704 VM_BUG_ON_PAGE(PageLRU(page), page);
705
Olivier Deprez157378f2022-04-04 15:47:50 +0200706 if (PageCompound(page))
707 list_add_tail(&page->lru, compound_pagelist);
708next:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000709 /* There should be enough young pte to collapse the page */
710 if (pte_young(pteval) ||
711 page_is_young(page) || PageReferenced(page) ||
712 mmu_notifier_test_young(vma->vm_mm, address))
713 referenced++;
Olivier Deprez157378f2022-04-04 15:47:50 +0200714
715 if (pte_write(pteval))
716 writable = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718
Olivier Deprez0e641232021-09-23 10:07:05 +0200719 if (unlikely(!writable)) {
720 result = SCAN_PAGE_RO;
721 } else if (unlikely(!referenced)) {
722 result = SCAN_LACK_REFERENCED_PAGE;
723 } else {
724 result = SCAN_SUCCEED;
725 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
726 referenced, writable, result);
727 return 1;
728 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000729out:
Olivier Deprez157378f2022-04-04 15:47:50 +0200730 release_pte_pages(pte, _pte, compound_pagelist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000731 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
732 referenced, writable, result);
733 return 0;
734}
735
736static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
737 struct vm_area_struct *vma,
738 unsigned long address,
Olivier Deprez157378f2022-04-04 15:47:50 +0200739 spinlock_t *ptl,
740 struct list_head *compound_pagelist)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000741{
Olivier Deprez157378f2022-04-04 15:47:50 +0200742 struct page *src_page, *tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000743 pte_t *_pte;
744 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
745 _pte++, page++, address += PAGE_SIZE) {
746 pte_t pteval = *_pte;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000747
748 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
749 clear_user_highpage(page, address);
750 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
751 if (is_zero_pfn(pte_pfn(pteval))) {
752 /*
753 * ptl mostly unnecessary.
754 */
755 spin_lock(ptl);
756 /*
757 * paravirt calls inside pte_clear here are
758 * superfluous.
759 */
760 pte_clear(vma->vm_mm, address, _pte);
761 spin_unlock(ptl);
762 }
763 } else {
764 src_page = pte_page(pteval);
765 copy_user_highpage(page, src_page, address, vma);
Olivier Deprez157378f2022-04-04 15:47:50 +0200766 if (!PageCompound(src_page))
767 release_pte_page(src_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768 /*
769 * ptl mostly unnecessary, but preempt has to
770 * be disabled to update the per-cpu stats
771 * inside page_remove_rmap().
772 */
773 spin_lock(ptl);
774 /*
775 * paravirt calls inside pte_clear here are
776 * superfluous.
777 */
778 pte_clear(vma->vm_mm, address, _pte);
779 page_remove_rmap(src_page, false);
780 spin_unlock(ptl);
781 free_page_and_swap_cache(src_page);
782 }
783 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200784
785 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
786 list_del(&src_page->lru);
787 release_pte_page(src_page);
788 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000789}
790
791static void khugepaged_alloc_sleep(void)
792{
793 DEFINE_WAIT(wait);
794
795 add_wait_queue(&khugepaged_wait, &wait);
796 freezable_schedule_timeout_interruptible(
797 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
798 remove_wait_queue(&khugepaged_wait, &wait);
799}
800
801static int khugepaged_node_load[MAX_NUMNODES];
802
803static bool khugepaged_scan_abort(int nid)
804{
805 int i;
806
807 /*
808 * If node_reclaim_mode is disabled, then no extra effort is made to
809 * allocate memory locally.
810 */
811 if (!node_reclaim_mode)
812 return false;
813
814 /* If there is a count for this node already, it must be acceptable */
815 if (khugepaged_node_load[nid])
816 return false;
817
818 for (i = 0; i < MAX_NUMNODES; i++) {
819 if (!khugepaged_node_load[i])
820 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000821 if (node_distance(nid, i) > node_reclaim_distance)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000822 return true;
823 }
824 return false;
825}
826
827/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
828static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
829{
830 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
831}
832
833#ifdef CONFIG_NUMA
834static int khugepaged_find_target_node(void)
835{
836 static int last_khugepaged_target_node = NUMA_NO_NODE;
837 int nid, target_node = 0, max_value = 0;
838
839 /* find first node with max normal pages hit */
840 for (nid = 0; nid < MAX_NUMNODES; nid++)
841 if (khugepaged_node_load[nid] > max_value) {
842 max_value = khugepaged_node_load[nid];
843 target_node = nid;
844 }
845
846 /* do some balance if several nodes have the same hit record */
847 if (target_node <= last_khugepaged_target_node)
848 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
849 nid++)
850 if (max_value == khugepaged_node_load[nid]) {
851 target_node = nid;
852 break;
853 }
854
855 last_khugepaged_target_node = target_node;
856 return target_node;
857}
858
859static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
860{
861 if (IS_ERR(*hpage)) {
862 if (!*wait)
863 return false;
864
865 *wait = false;
866 *hpage = NULL;
867 khugepaged_alloc_sleep();
868 } else if (*hpage) {
869 put_page(*hpage);
870 *hpage = NULL;
871 }
872
873 return true;
874}
875
876static struct page *
877khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
878{
879 VM_BUG_ON_PAGE(*hpage, *hpage);
880
881 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
882 if (unlikely(!*hpage)) {
883 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
884 *hpage = ERR_PTR(-ENOMEM);
885 return NULL;
886 }
887
888 prep_transhuge_page(*hpage);
889 count_vm_event(THP_COLLAPSE_ALLOC);
890 return *hpage;
891}
892#else
893static int khugepaged_find_target_node(void)
894{
895 return 0;
896}
897
898static inline struct page *alloc_khugepaged_hugepage(void)
899{
900 struct page *page;
901
902 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
903 HPAGE_PMD_ORDER);
904 if (page)
905 prep_transhuge_page(page);
906 return page;
907}
908
909static struct page *khugepaged_alloc_hugepage(bool *wait)
910{
911 struct page *hpage;
912
913 do {
914 hpage = alloc_khugepaged_hugepage();
915 if (!hpage) {
916 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
917 if (!*wait)
918 return NULL;
919
920 *wait = false;
921 khugepaged_alloc_sleep();
922 } else
923 count_vm_event(THP_COLLAPSE_ALLOC);
924 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
925
926 return hpage;
927}
928
929static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
930{
Olivier Deprez0e641232021-09-23 10:07:05 +0200931 /*
932 * If the hpage allocated earlier was briefly exposed in page cache
933 * before collapse_file() failed, it is possible that racing lookups
934 * have not yet completed, and would then be unpleasantly surprised by
935 * finding the hpage reused for the same mapping at a different offset.
936 * Just release the previous allocation if there is any danger of that.
937 */
938 if (*hpage && page_count(*hpage) > 1) {
939 put_page(*hpage);
940 *hpage = NULL;
941 }
942
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000943 if (!*hpage)
944 *hpage = khugepaged_alloc_hugepage(wait);
945
946 if (unlikely(!*hpage))
947 return false;
948
949 return true;
950}
951
952static struct page *
953khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
954{
955 VM_BUG_ON(!*hpage);
956
957 return *hpage;
958}
959#endif
960
961/*
Olivier Deprez157378f2022-04-04 15:47:50 +0200962 * If mmap_lock temporarily dropped, revalidate vma
963 * before taking mmap_lock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000964 * Return 0 if succeeds, otherwise return none-zero
965 * value (scan code).
966 */
967
968static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
969 struct vm_area_struct **vmap)
970{
971 struct vm_area_struct *vma;
972 unsigned long hstart, hend;
973
974 if (unlikely(khugepaged_test_exit(mm)))
975 return SCAN_ANY_PROCESS;
976
977 *vmap = vma = find_vma(mm, address);
978 if (!vma)
979 return SCAN_VMA_NULL;
980
981 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
982 hend = vma->vm_end & HPAGE_PMD_MASK;
983 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
984 return SCAN_ADDRESS_RANGE;
985 if (!hugepage_vma_check(vma, vma->vm_flags))
986 return SCAN_VMA_CHECK;
Olivier Deprez0e641232021-09-23 10:07:05 +0200987 /* Anon VMA expected */
988 if (!vma->anon_vma || vma->vm_ops)
989 return SCAN_VMA_CHECK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000990 return 0;
991}
992
993/*
994 * Bring missing pages in from swap, to complete THP collapse.
995 * Only done if khugepaged_scan_pmd believes it is worthwhile.
996 *
997 * Called and returns without pte mapped or spinlocks held,
Olivier Deprez157378f2022-04-04 15:47:50 +0200998 * but with mmap_lock held to protect against vma changes.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000999 */
1000
1001static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1002 struct vm_area_struct *vma,
1003 unsigned long address, pmd_t *pmd,
1004 int referenced)
1005{
1006 int swapped_in = 0;
1007 vm_fault_t ret = 0;
1008 struct vm_fault vmf = {
1009 .vma = vma,
1010 .address = address,
1011 .flags = FAULT_FLAG_ALLOW_RETRY,
1012 .pmd = pmd,
1013 .pgoff = linear_page_index(vma, address),
1014 };
1015
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016 vmf.pte = pte_offset_map(pmd, address);
1017 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1018 vmf.pte++, vmf.address += PAGE_SIZE) {
1019 vmf.orig_pte = *vmf.pte;
1020 if (!is_swap_pte(vmf.orig_pte))
1021 continue;
1022 swapped_in++;
1023 ret = do_swap_page(&vmf);
1024
Olivier Deprez157378f2022-04-04 15:47:50 +02001025 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026 if (ret & VM_FAULT_RETRY) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001027 mmap_read_lock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001028 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
1029 /* vma is no longer available, don't continue to swapin */
1030 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1031 return false;
1032 }
1033 /* check if the pmd is still valid */
1034 if (mm_find_pmd(mm, address) != pmd) {
1035 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1036 return false;
1037 }
1038 }
1039 if (ret & VM_FAULT_ERROR) {
1040 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1041 return false;
1042 }
1043 /* pte is unmapped now, we need to map it */
1044 vmf.pte = pte_offset_map(pmd, vmf.address);
1045 }
1046 vmf.pte--;
1047 pte_unmap(vmf.pte);
Olivier Deprez157378f2022-04-04 15:47:50 +02001048
1049 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1050 if (swapped_in)
1051 lru_add_drain();
1052
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001053 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1054 return true;
1055}
1056
1057static void collapse_huge_page(struct mm_struct *mm,
1058 unsigned long address,
1059 struct page **hpage,
Olivier Deprez157378f2022-04-04 15:47:50 +02001060 int node, int referenced, int unmapped)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001061{
Olivier Deprez157378f2022-04-04 15:47:50 +02001062 LIST_HEAD(compound_pagelist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001063 pmd_t *pmd, _pmd;
1064 pte_t *pte;
1065 pgtable_t pgtable;
1066 struct page *new_page;
1067 spinlock_t *pmd_ptl, *pte_ptl;
1068 int isolated = 0, result = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001069 struct vm_area_struct *vma;
David Brazdil0f672f62019-12-10 10:32:29 +00001070 struct mmu_notifier_range range;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 gfp_t gfp;
1072
1073 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1074
1075 /* Only allocate from the target node */
1076 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1077
1078 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02001079 * Before allocating the hugepage, release the mmap_lock read lock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001080 * The allocation can take potentially a long time if it involves
Olivier Deprez157378f2022-04-04 15:47:50 +02001081 * sync compaction, and we do not need to hold the mmap_lock during
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001082 * that. We will recheck the vma after taking it again in write mode.
1083 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001084 mmap_read_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001085 new_page = khugepaged_alloc_page(hpage, gfp, node);
1086 if (!new_page) {
1087 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1088 goto out_nolock;
1089 }
1090
Olivier Deprez157378f2022-04-04 15:47:50 +02001091 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092 result = SCAN_CGROUP_CHARGE_FAIL;
1093 goto out_nolock;
1094 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001095 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096
Olivier Deprez157378f2022-04-04 15:47:50 +02001097 mmap_read_lock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098 result = hugepage_vma_revalidate(mm, address, &vma);
1099 if (result) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001100 mmap_read_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001101 goto out_nolock;
1102 }
1103
1104 pmd = mm_find_pmd(mm, address);
1105 if (!pmd) {
1106 result = SCAN_PMD_NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001107 mmap_read_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001108 goto out_nolock;
1109 }
1110
1111 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02001112 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1113 * If it fails, we release mmap_lock and jump out_nolock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001114 * Continuing to collapse causes inconsistency.
1115 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001116 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1117 pmd, referenced)) {
1118 mmap_read_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119 goto out_nolock;
1120 }
1121
Olivier Deprez157378f2022-04-04 15:47:50 +02001122 mmap_read_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123 /*
1124 * Prevent all access to pagetables with the exception of
1125 * gup_fast later handled by the ptep_clear_flush and the VM
1126 * handled by the anon_vma lock + PG_lock.
1127 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001128 mmap_write_lock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001129 result = hugepage_vma_revalidate(mm, address, &vma);
1130 if (result)
1131 goto out;
1132 /* check if the pmd is still valid */
1133 if (mm_find_pmd(mm, address) != pmd)
1134 goto out;
1135
1136 anon_vma_lock_write(vma->anon_vma);
1137
David Brazdil0f672f62019-12-10 10:32:29 +00001138 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1139 address, address + HPAGE_PMD_SIZE);
1140 mmu_notifier_invalidate_range_start(&range);
1141
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001142 pte = pte_offset_map(pmd, address);
1143 pte_ptl = pte_lockptr(mm, pmd);
1144
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1146 /*
Olivier Deprez92d4c212022-12-06 15:05:30 +01001147 * This removes any huge TLB entry from the CPU so we won't allow
1148 * huge and small TLB entries for the same virtual address to
1149 * avoid the risk of CPU bugs in that area.
1150 *
1151 * Parallel fast GUP is fine since fast GUP will back off when
1152 * it detects PMD is changed.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153 */
1154 _pmd = pmdp_collapse_flush(vma, address, pmd);
1155 spin_unlock(pmd_ptl);
David Brazdil0f672f62019-12-10 10:32:29 +00001156 mmu_notifier_invalidate_range_end(&range);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001157
1158 spin_lock(pte_ptl);
Olivier Deprez157378f2022-04-04 15:47:50 +02001159 isolated = __collapse_huge_page_isolate(vma, address, pte,
1160 &compound_pagelist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001161 spin_unlock(pte_ptl);
1162
1163 if (unlikely(!isolated)) {
1164 pte_unmap(pte);
1165 spin_lock(pmd_ptl);
1166 BUG_ON(!pmd_none(*pmd));
1167 /*
1168 * We can only use set_pmd_at when establishing
1169 * hugepmds and never for establishing regular pmds that
1170 * points to regular pagetables. Use pmd_populate for that
1171 */
1172 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1173 spin_unlock(pmd_ptl);
1174 anon_vma_unlock_write(vma->anon_vma);
1175 result = SCAN_FAIL;
1176 goto out;
1177 }
1178
1179 /*
1180 * All pages are isolated and locked so anon_vma rmap
1181 * can't run anymore.
1182 */
1183 anon_vma_unlock_write(vma->anon_vma);
1184
Olivier Deprez157378f2022-04-04 15:47:50 +02001185 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1186 &compound_pagelist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001187 pte_unmap(pte);
1188 __SetPageUptodate(new_page);
1189 pgtable = pmd_pgtable(_pmd);
1190
1191 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1192 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1193
1194 /*
1195 * spin_lock() below is not the equivalent of smp_wmb(), so
1196 * this is needed to avoid the copy_huge_page writes to become
1197 * visible after the set_pmd_at() write.
1198 */
1199 smp_wmb();
1200
1201 spin_lock(pmd_ptl);
1202 BUG_ON(!pmd_none(*pmd));
1203 page_add_new_anon_rmap(new_page, vma, address, true);
Olivier Deprez157378f2022-04-04 15:47:50 +02001204 lru_cache_add_inactive_or_unevictable(new_page, vma);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1206 set_pmd_at(mm, address, pmd, _pmd);
1207 update_mmu_cache_pmd(vma, address, pmd);
1208 spin_unlock(pmd_ptl);
1209
1210 *hpage = NULL;
1211
1212 khugepaged_pages_collapsed++;
1213 result = SCAN_SUCCEED;
1214out_up_write:
Olivier Deprez157378f2022-04-04 15:47:50 +02001215 mmap_write_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216out_nolock:
Olivier Deprez157378f2022-04-04 15:47:50 +02001217 if (!IS_ERR_OR_NULL(*hpage))
1218 mem_cgroup_uncharge(*hpage);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001219 trace_mm_collapse_huge_page(mm, isolated, result);
1220 return;
1221out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222 goto out_up_write;
1223}
1224
1225static int khugepaged_scan_pmd(struct mm_struct *mm,
1226 struct vm_area_struct *vma,
1227 unsigned long address,
1228 struct page **hpage)
1229{
1230 pmd_t *pmd;
1231 pte_t *pte, *_pte;
Olivier Deprez157378f2022-04-04 15:47:50 +02001232 int ret = 0, result = 0, referenced = 0;
1233 int none_or_zero = 0, shared = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234 struct page *page = NULL;
1235 unsigned long _address;
1236 spinlock_t *ptl;
1237 int node = NUMA_NO_NODE, unmapped = 0;
1238 bool writable = false;
1239
1240 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1241
1242 pmd = mm_find_pmd(mm, address);
1243 if (!pmd) {
1244 result = SCAN_PMD_NULL;
1245 goto out;
1246 }
1247
1248 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1249 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1250 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1251 _pte++, _address += PAGE_SIZE) {
1252 pte_t pteval = *_pte;
1253 if (is_swap_pte(pteval)) {
1254 if (++unmapped <= khugepaged_max_ptes_swap) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001255 /*
1256 * Always be strict with uffd-wp
1257 * enabled swap entries. Please see
1258 * comment below for pte_uffd_wp().
1259 */
1260 if (pte_swp_uffd_wp(pteval)) {
1261 result = SCAN_PTE_UFFD_WP;
1262 goto out_unmap;
1263 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264 continue;
1265 } else {
1266 result = SCAN_EXCEED_SWAP_PTE;
1267 goto out_unmap;
1268 }
1269 }
1270 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1271 if (!userfaultfd_armed(vma) &&
1272 ++none_or_zero <= khugepaged_max_ptes_none) {
1273 continue;
1274 } else {
1275 result = SCAN_EXCEED_NONE_PTE;
1276 goto out_unmap;
1277 }
1278 }
1279 if (!pte_present(pteval)) {
1280 result = SCAN_PTE_NON_PRESENT;
1281 goto out_unmap;
1282 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001283 if (pte_uffd_wp(pteval)) {
1284 /*
1285 * Don't collapse the page if any of the small
1286 * PTEs are armed with uffd write protection.
1287 * Here we can also mark the new huge pmd as
1288 * write protected if any of the small ones is
1289 * marked but that could bring uknown
1290 * userfault messages that falls outside of
1291 * the registered range. So, just be simple.
1292 */
1293 result = SCAN_PTE_UFFD_WP;
1294 goto out_unmap;
1295 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001296 if (pte_write(pteval))
1297 writable = true;
1298
1299 page = vm_normal_page(vma, _address, pteval);
1300 if (unlikely(!page)) {
1301 result = SCAN_PAGE_NULL;
1302 goto out_unmap;
1303 }
1304
Olivier Deprez157378f2022-04-04 15:47:50 +02001305 if (page_mapcount(page) > 1 &&
1306 ++shared > khugepaged_max_ptes_shared) {
1307 result = SCAN_EXCEED_SHARED_PTE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 goto out_unmap;
1309 }
1310
Olivier Deprez157378f2022-04-04 15:47:50 +02001311 page = compound_head(page);
1312
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001313 /*
1314 * Record which node the original page is from and save this
1315 * information to khugepaged_node_load[].
1316 * Khupaged will allocate hugepage from the node has the max
1317 * hit record.
1318 */
1319 node = page_to_nid(page);
1320 if (khugepaged_scan_abort(node)) {
1321 result = SCAN_SCAN_ABORT;
1322 goto out_unmap;
1323 }
1324 khugepaged_node_load[node]++;
1325 if (!PageLRU(page)) {
1326 result = SCAN_PAGE_LRU;
1327 goto out_unmap;
1328 }
1329 if (PageLocked(page)) {
1330 result = SCAN_PAGE_LOCK;
1331 goto out_unmap;
1332 }
1333 if (!PageAnon(page)) {
1334 result = SCAN_PAGE_ANON;
1335 goto out_unmap;
1336 }
1337
1338 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02001339 * Check if the page has any GUP (or other external) pins.
1340 *
1341 * Here the check is racy it may see totmal_mapcount > refcount
1342 * in some cases.
1343 * For example, one process with one forked child process.
1344 * The parent has the PMD split due to MADV_DONTNEED, then
1345 * the child is trying unmap the whole PMD, but khugepaged
1346 * may be scanning the parent between the child has
1347 * PageDoubleMap flag cleared and dec the mapcount. So
1348 * khugepaged may see total_mapcount > refcount.
1349 *
1350 * But such case is ephemeral we could always retry collapse
1351 * later. However it may report false positive if the page
1352 * has excessive GUP pins (i.e. 512). Anyway the same check
1353 * will be done again later the risk seems low.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001354 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001355 if (!is_refcount_suitable(page)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001356 result = SCAN_PAGE_COUNT;
1357 goto out_unmap;
1358 }
1359 if (pte_young(pteval) ||
1360 page_is_young(page) || PageReferenced(page) ||
1361 mmu_notifier_test_young(vma->vm_mm, address))
1362 referenced++;
1363 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001364 if (!writable) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001365 result = SCAN_PAGE_RO;
Olivier Deprez157378f2022-04-04 15:47:50 +02001366 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1367 result = SCAN_LACK_REFERENCED_PAGE;
1368 } else {
1369 result = SCAN_SUCCEED;
1370 ret = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001371 }
1372out_unmap:
1373 pte_unmap_unlock(pte, ptl);
1374 if (ret) {
1375 node = khugepaged_find_target_node();
Olivier Deprez157378f2022-04-04 15:47:50 +02001376 /* collapse_huge_page will return with the mmap_lock released */
1377 collapse_huge_page(mm, address, hpage, node,
1378 referenced, unmapped);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001379 }
1380out:
1381 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1382 none_or_zero, result, unmapped);
1383 return ret;
1384}
1385
1386static void collect_mm_slot(struct mm_slot *mm_slot)
1387{
1388 struct mm_struct *mm = mm_slot->mm;
1389
David Brazdil0f672f62019-12-10 10:32:29 +00001390 lockdep_assert_held(&khugepaged_mm_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001391
1392 if (khugepaged_test_exit(mm)) {
1393 /* free mm_slot */
1394 hash_del(&mm_slot->hash);
1395 list_del(&mm_slot->mm_node);
1396
1397 /*
1398 * Not strictly needed because the mm exited already.
1399 *
1400 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1401 */
1402
1403 /* khugepaged_mm_lock actually not necessary for the below */
1404 free_mm_slot(mm_slot);
1405 mmdrop(mm);
1406 }
1407}
1408
Olivier Deprez157378f2022-04-04 15:47:50 +02001409#ifdef CONFIG_SHMEM
David Brazdil0f672f62019-12-10 10:32:29 +00001410/*
1411 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1412 * khugepaged should try to collapse the page table.
1413 */
1414static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1415 unsigned long addr)
1416{
1417 struct mm_slot *mm_slot;
1418
1419 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1420
1421 spin_lock(&khugepaged_mm_lock);
1422 mm_slot = get_mm_slot(mm);
1423 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1424 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1425 spin_unlock(&khugepaged_mm_lock);
1426 return 0;
1427}
1428
1429/**
1430 * Try to collapse a pte-mapped THP for mm at address haddr.
1431 *
1432 * This function checks whether all the PTEs in the PMD are pointing to the
1433 * right THP. If so, retract the page table so the THP can refault in with
1434 * as pmd-mapped.
1435 */
1436void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1437{
1438 unsigned long haddr = addr & HPAGE_PMD_MASK;
1439 struct vm_area_struct *vma = find_vma(mm, haddr);
Olivier Deprez0e641232021-09-23 10:07:05 +02001440 struct page *hpage;
David Brazdil0f672f62019-12-10 10:32:29 +00001441 pte_t *start_pte, *pte;
1442 pmd_t *pmd, _pmd;
1443 spinlock_t *ptl;
1444 int count = 0;
1445 int i;
1446
1447 if (!vma || !vma->vm_file ||
1448 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1449 return;
1450
1451 /*
1452 * This vm_flags may not have VM_HUGEPAGE if the page was not
1453 * collapsed by this mm. But we can still collapse if the page is
1454 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1455 * will not fail the vma for missing VM_HUGEPAGE
1456 */
1457 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1458 return;
1459
Olivier Deprez0e641232021-09-23 10:07:05 +02001460 hpage = find_lock_page(vma->vm_file->f_mapping,
1461 linear_page_index(vma, haddr));
1462 if (!hpage)
1463 return;
1464
1465 if (!PageHead(hpage))
1466 goto drop_hpage;
1467
David Brazdil0f672f62019-12-10 10:32:29 +00001468 pmd = mm_find_pmd(mm, haddr);
1469 if (!pmd)
Olivier Deprez0e641232021-09-23 10:07:05 +02001470 goto drop_hpage;
David Brazdil0f672f62019-12-10 10:32:29 +00001471
1472 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1473
1474 /* step 1: check all mapped PTEs are to the right huge page */
1475 for (i = 0, addr = haddr, pte = start_pte;
1476 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1477 struct page *page;
1478
1479 /* empty pte, skip */
1480 if (pte_none(*pte))
1481 continue;
1482
1483 /* page swapped out, abort */
1484 if (!pte_present(*pte))
1485 goto abort;
1486
1487 page = vm_normal_page(vma, addr, *pte);
1488
David Brazdil0f672f62019-12-10 10:32:29 +00001489 /*
Olivier Deprez0e641232021-09-23 10:07:05 +02001490 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1491 * page table, but the new page will not be a subpage of hpage.
David Brazdil0f672f62019-12-10 10:32:29 +00001492 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001493 if (hpage + i != page)
David Brazdil0f672f62019-12-10 10:32:29 +00001494 goto abort;
1495 count++;
1496 }
1497
1498 /* step 2: adjust rmap */
1499 for (i = 0, addr = haddr, pte = start_pte;
1500 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1501 struct page *page;
1502
1503 if (pte_none(*pte))
1504 continue;
1505 page = vm_normal_page(vma, addr, *pte);
1506 page_remove_rmap(page, false);
1507 }
1508
1509 pte_unmap_unlock(start_pte, ptl);
1510
1511 /* step 3: set proper refcount and mm_counters. */
Olivier Deprez0e641232021-09-23 10:07:05 +02001512 if (count) {
David Brazdil0f672f62019-12-10 10:32:29 +00001513 page_ref_sub(hpage, count);
1514 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1515 }
1516
1517 /* step 4: collapse pmd */
1518 ptl = pmd_lock(vma->vm_mm, pmd);
Olivier Deprez0e641232021-09-23 10:07:05 +02001519 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
David Brazdil0f672f62019-12-10 10:32:29 +00001520 spin_unlock(ptl);
1521 mm_dec_nr_ptes(mm);
1522 pte_free(mm, pmd_pgtable(_pmd));
Olivier Deprez0e641232021-09-23 10:07:05 +02001523
1524drop_hpage:
1525 unlock_page(hpage);
1526 put_page(hpage);
David Brazdil0f672f62019-12-10 10:32:29 +00001527 return;
1528
1529abort:
1530 pte_unmap_unlock(start_pte, ptl);
Olivier Deprez0e641232021-09-23 10:07:05 +02001531 goto drop_hpage;
David Brazdil0f672f62019-12-10 10:32:29 +00001532}
1533
1534static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1535{
1536 struct mm_struct *mm = mm_slot->mm;
1537 int i;
1538
1539 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1540 return 0;
1541
Olivier Deprez157378f2022-04-04 15:47:50 +02001542 if (!mmap_write_trylock(mm))
David Brazdil0f672f62019-12-10 10:32:29 +00001543 return -EBUSY;
1544
1545 if (unlikely(khugepaged_test_exit(mm)))
1546 goto out;
1547
1548 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1549 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1550
1551out:
1552 mm_slot->nr_pte_mapped_thp = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001553 mmap_write_unlock(mm);
David Brazdil0f672f62019-12-10 10:32:29 +00001554 return 0;
1555}
1556
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001557static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1558{
1559 struct vm_area_struct *vma;
Olivier Deprez0e641232021-09-23 10:07:05 +02001560 struct mm_struct *mm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001561 unsigned long addr;
1562 pmd_t *pmd, _pmd;
1563
1564 i_mmap_lock_write(mapping);
1565 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
David Brazdil0f672f62019-12-10 10:32:29 +00001566 /*
1567 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1568 * got written to. These VMAs are likely not worth investing
Olivier Deprez157378f2022-04-04 15:47:50 +02001569 * mmap_write_lock(mm) as PMD-mapping is likely to be split
David Brazdil0f672f62019-12-10 10:32:29 +00001570 * later.
1571 *
1572 * Not that vma->anon_vma check is racy: it can be set up after
Olivier Deprez157378f2022-04-04 15:47:50 +02001573 * the check but before we took mmap_lock by the fault path.
David Brazdil0f672f62019-12-10 10:32:29 +00001574 * But page lock would prevent establishing any new ptes of the
1575 * page, so we are safe.
1576 *
1577 * An alternative would be drop the check, but check that page
1578 * table is clear before calling pmdp_collapse_flush() under
1579 * ptl. It has higher chance to recover THP for the VMA, but
1580 * has higher cost too.
1581 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001582 if (vma->anon_vma)
1583 continue;
1584 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1585 if (addr & ~HPAGE_PMD_MASK)
1586 continue;
1587 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1588 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +02001589 mm = vma->vm_mm;
1590 pmd = mm_find_pmd(mm, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001591 if (!pmd)
1592 continue;
1593 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02001594 * We need exclusive mmap_lock to retract page table.
David Brazdil0f672f62019-12-10 10:32:29 +00001595 *
1596 * We use trylock due to lock inversion: we need to acquire
Olivier Deprez157378f2022-04-04 15:47:50 +02001597 * mmap_lock while holding page lock. Fault path does it in
David Brazdil0f672f62019-12-10 10:32:29 +00001598 * reverse order. Trylock is a way to avoid deadlock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001599 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001600 if (mmap_write_trylock(mm)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001601 if (!khugepaged_test_exit(mm)) {
1602 spinlock_t *ptl = pmd_lock(mm, pmd);
1603 /* assume page table is clear */
1604 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1605 spin_unlock(ptl);
1606 mm_dec_nr_ptes(mm);
1607 pte_free(mm, pmd_pgtable(_pmd));
1608 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001609 mmap_write_unlock(mm);
David Brazdil0f672f62019-12-10 10:32:29 +00001610 } else {
1611 /* Try again later */
Olivier Deprez0e641232021-09-23 10:07:05 +02001612 khugepaged_add_pte_mapped_thp(mm, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001613 }
1614 }
1615 i_mmap_unlock_write(mapping);
1616}
1617
1618/**
David Brazdil0f672f62019-12-10 10:32:29 +00001619 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001620 *
1621 * Basic scheme is simple, details are more complex:
1622 * - allocate and lock a new huge page;
David Brazdil0f672f62019-12-10 10:32:29 +00001623 * - scan page cache replacing old pages with the new one
1624 * + swap/gup in pages if necessary;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001625 * + fill in gaps;
David Brazdil0f672f62019-12-10 10:32:29 +00001626 * + keep old pages around in case rollback is required;
1627 * - if replacing succeeds:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001628 * + copy data over;
1629 * + free old pages;
1630 * + unlock huge page;
1631 * - if replacing failed;
1632 * + put all pages back and unfreeze them;
David Brazdil0f672f62019-12-10 10:32:29 +00001633 * + restore gaps in the page cache;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001634 * + unlock and free huge page;
1635 */
David Brazdil0f672f62019-12-10 10:32:29 +00001636static void collapse_file(struct mm_struct *mm,
1637 struct file *file, pgoff_t start,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001638 struct page **hpage, int node)
1639{
David Brazdil0f672f62019-12-10 10:32:29 +00001640 struct address_space *mapping = file->f_mapping;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001641 gfp_t gfp;
David Brazdil0f672f62019-12-10 10:32:29 +00001642 struct page *new_page;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001643 pgoff_t index, end = start + HPAGE_PMD_NR;
1644 LIST_HEAD(pagelist);
David Brazdil0f672f62019-12-10 10:32:29 +00001645 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001646 int nr_none = 0, result = SCAN_SUCCEED;
David Brazdil0f672f62019-12-10 10:32:29 +00001647 bool is_shmem = shmem_file(file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001648
David Brazdil0f672f62019-12-10 10:32:29 +00001649 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001650 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1651
1652 /* Only allocate from the target node */
1653 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1654
1655 new_page = khugepaged_alloc_page(hpage, gfp, node);
1656 if (!new_page) {
1657 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1658 goto out;
1659 }
1660
Olivier Deprez157378f2022-04-04 15:47:50 +02001661 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001662 result = SCAN_CGROUP_CHARGE_FAIL;
1663 goto out;
1664 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001665 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001666
David Brazdil0f672f62019-12-10 10:32:29 +00001667 /* This will be less messy when we use multi-index entries */
1668 do {
1669 xas_lock_irq(&xas);
1670 xas_create_range(&xas);
1671 if (!xas_error(&xas))
1672 break;
1673 xas_unlock_irq(&xas);
1674 if (!xas_nomem(&xas, GFP_KERNEL)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001675 result = SCAN_FAIL;
1676 goto out;
1677 }
1678 } while (1);
1679
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680 __SetPageLocked(new_page);
David Brazdil0f672f62019-12-10 10:32:29 +00001681 if (is_shmem)
1682 __SetPageSwapBacked(new_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683 new_page->index = start;
1684 new_page->mapping = mapping;
1685
1686 /*
1687 * At this point the new_page is locked and not up-to-date.
1688 * It's safe to insert it into the page cache, because nobody would
1689 * be able to map it or use it in another way until we unlock it.
1690 */
1691
David Brazdil0f672f62019-12-10 10:32:29 +00001692 xas_set(&xas, start);
1693 for (index = start; index < end; index++) {
1694 struct page *page = xas_next(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001695
David Brazdil0f672f62019-12-10 10:32:29 +00001696 VM_BUG_ON(index != xas.xa_index);
1697 if (is_shmem) {
1698 if (!page) {
1699 /*
1700 * Stop if extent has been truncated or
1701 * hole-punched, and is now completely
1702 * empty.
1703 */
1704 if (index == start) {
1705 if (!xas_next_entry(&xas, end - 1)) {
1706 result = SCAN_TRUNCATED;
1707 goto xa_locked;
1708 }
1709 xas_set(&xas, index);
1710 }
1711 if (!shmem_charge(mapping->host, 1)) {
1712 result = SCAN_FAIL;
1713 goto xa_locked;
1714 }
1715 xas_store(&xas, new_page);
1716 nr_none++;
1717 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001718 }
David Brazdil0f672f62019-12-10 10:32:29 +00001719
1720 if (xa_is_value(page) || !PageUptodate(page)) {
1721 xas_unlock_irq(&xas);
1722 /* swap in or instantiate fallocated page */
1723 if (shmem_getpage(mapping->host, index, &page,
1724 SGP_NOHUGE)) {
1725 result = SCAN_FAIL;
1726 goto xa_unlocked;
1727 }
1728 } else if (trylock_page(page)) {
1729 get_page(page);
1730 xas_unlock_irq(&xas);
1731 } else {
1732 result = SCAN_PAGE_LOCK;
1733 goto xa_locked;
1734 }
1735 } else { /* !is_shmem */
1736 if (!page || xa_is_value(page)) {
1737 xas_unlock_irq(&xas);
1738 page_cache_sync_readahead(mapping, &file->f_ra,
1739 file, index,
Olivier Deprez0e641232021-09-23 10:07:05 +02001740 end - index);
David Brazdil0f672f62019-12-10 10:32:29 +00001741 /* drain pagevecs to help isolate_lru_page() */
1742 lru_add_drain();
1743 page = find_lock_page(mapping, index);
1744 if (unlikely(page == NULL)) {
1745 result = SCAN_FAIL;
1746 goto xa_unlocked;
1747 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001748 } else if (PageDirty(page)) {
1749 /*
1750 * khugepaged only works on read-only fd,
1751 * so this page is dirty because it hasn't
1752 * been flushed since first write. There
1753 * won't be new dirty pages.
1754 *
1755 * Trigger async flush here and hope the
1756 * writeback is done when khugepaged
1757 * revisits this page.
1758 *
1759 * This is a one-off situation. We are not
1760 * forcing writeback in loop.
1761 */
1762 xas_unlock_irq(&xas);
1763 filemap_flush(mapping);
1764 result = SCAN_FAIL;
1765 goto xa_unlocked;
1766 } else if (PageWriteback(page)) {
1767 xas_unlock_irq(&xas);
1768 result = SCAN_FAIL;
1769 goto xa_unlocked;
David Brazdil0f672f62019-12-10 10:32:29 +00001770 } else if (trylock_page(page)) {
1771 get_page(page);
1772 xas_unlock_irq(&xas);
1773 } else {
1774 result = SCAN_PAGE_LOCK;
1775 goto xa_locked;
1776 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001777 }
1778
1779 /*
1780 * The page must be locked, so we can drop the i_pages lock
1781 * without racing with truncate.
1782 */
1783 VM_BUG_ON_PAGE(!PageLocked(page), page);
David Brazdil0f672f62019-12-10 10:32:29 +00001784
1785 /* make sure the page is up to date */
1786 if (unlikely(!PageUptodate(page))) {
1787 result = SCAN_FAIL;
1788 goto out_unlock;
1789 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001790
1791 /*
1792 * If file was truncated then extended, or hole-punched, before
1793 * we locked the first page, then a THP might be there already.
1794 */
1795 if (PageTransCompound(page)) {
1796 result = SCAN_PAGE_COMPOUND;
1797 goto out_unlock;
1798 }
1799
1800 if (page_mapping(page) != mapping) {
1801 result = SCAN_TRUNCATED;
1802 goto out_unlock;
1803 }
1804
Olivier Deprez157378f2022-04-04 15:47:50 +02001805 if (!is_shmem && (PageDirty(page) ||
1806 PageWriteback(page))) {
David Brazdil0f672f62019-12-10 10:32:29 +00001807 /*
1808 * khugepaged only works on read-only fd, so this
1809 * page is dirty because it hasn't been flushed
1810 * since first write.
1811 */
1812 result = SCAN_FAIL;
1813 goto out_unlock;
1814 }
1815
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001816 if (isolate_lru_page(page)) {
1817 result = SCAN_DEL_PAGE_LRU;
1818 goto out_unlock;
1819 }
1820
David Brazdil0f672f62019-12-10 10:32:29 +00001821 if (page_has_private(page) &&
1822 !try_to_release_page(page, GFP_KERNEL)) {
1823 result = SCAN_PAGE_HAS_PRIVATE;
Olivier Deprez0e641232021-09-23 10:07:05 +02001824 putback_lru_page(page);
David Brazdil0f672f62019-12-10 10:32:29 +00001825 goto out_unlock;
1826 }
1827
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001828 if (page_mapped(page))
1829 unmap_mapping_pages(mapping, index, 1, false);
1830
David Brazdil0f672f62019-12-10 10:32:29 +00001831 xas_lock_irq(&xas);
1832 xas_set(&xas, index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001833
David Brazdil0f672f62019-12-10 10:32:29 +00001834 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001835 VM_BUG_ON_PAGE(page_mapped(page), page);
1836
1837 /*
1838 * The page is expected to have page_count() == 3:
1839 * - we hold a pin on it;
David Brazdil0f672f62019-12-10 10:32:29 +00001840 * - one reference from page cache;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001841 * - one from isolate_lru_page;
1842 */
1843 if (!page_ref_freeze(page, 3)) {
1844 result = SCAN_PAGE_COUNT;
David Brazdil0f672f62019-12-10 10:32:29 +00001845 xas_unlock_irq(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001846 putback_lru_page(page);
1847 goto out_unlock;
1848 }
1849
1850 /*
1851 * Add the page to the list to be able to undo the collapse if
1852 * something go wrong.
1853 */
1854 list_add_tail(&page->lru, &pagelist);
1855
1856 /* Finally, replace with the new page. */
David Brazdil0f672f62019-12-10 10:32:29 +00001857 xas_store(&xas, new_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001858 continue;
1859out_unlock:
1860 unlock_page(page);
1861 put_page(page);
David Brazdil0f672f62019-12-10 10:32:29 +00001862 goto xa_unlocked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001863 }
1864
David Brazdil0f672f62019-12-10 10:32:29 +00001865 if (is_shmem)
1866 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1867 else {
1868 __inc_node_page_state(new_page, NR_FILE_THPS);
1869 filemap_nr_thps_inc(mapping);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001870 }
1871
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001872 if (nr_none) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001873 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
David Brazdil0f672f62019-12-10 10:32:29 +00001874 if (is_shmem)
Olivier Deprez157378f2022-04-04 15:47:50 +02001875 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001876 }
1877
David Brazdil0f672f62019-12-10 10:32:29 +00001878xa_locked:
1879 xas_unlock_irq(&xas);
1880xa_unlocked:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001881
1882 if (result == SCAN_SUCCEED) {
David Brazdil0f672f62019-12-10 10:32:29 +00001883 struct page *page, *tmp;
1884
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001885 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001886 * Replacing old pages with new one has succeeded, now we
1887 * need to copy the content and free the old pages.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001888 */
1889 index = start;
1890 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1891 while (index < page->index) {
1892 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1893 index++;
1894 }
1895 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1896 page);
1897 list_del(&page->lru);
1898 page->mapping = NULL;
1899 page_ref_unfreeze(page, 1);
1900 ClearPageActive(page);
1901 ClearPageUnevictable(page);
1902 unlock_page(page);
1903 put_page(page);
1904 index++;
1905 }
1906 while (index < end) {
1907 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1908 index++;
1909 }
1910
1911 SetPageUptodate(new_page);
1912 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Olivier Deprez157378f2022-04-04 15:47:50 +02001913 if (is_shmem)
David Brazdil0f672f62019-12-10 10:32:29 +00001914 set_page_dirty(new_page);
Olivier Deprez157378f2022-04-04 15:47:50 +02001915 lru_cache_add(new_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001916
1917 /*
1918 * Remove pte page tables, so we can re-fault the page as huge.
1919 */
1920 retract_page_tables(mapping, start);
1921 *hpage = NULL;
1922
1923 khugepaged_pages_collapsed++;
1924 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001925 struct page *page;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001926
David Brazdil0f672f62019-12-10 10:32:29 +00001927 /* Something went wrong: roll back page cache changes */
1928 xas_lock_irq(&xas);
1929 mapping->nrpages -= nr_none;
1930
1931 if (is_shmem)
1932 shmem_uncharge(mapping->host, nr_none);
1933
1934 xas_set(&xas, start);
1935 xas_for_each(&xas, page, end - 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001936 page = list_first_entry_or_null(&pagelist,
1937 struct page, lru);
David Brazdil0f672f62019-12-10 10:32:29 +00001938 if (!page || xas.xa_index < page->index) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001939 if (!nr_none)
1940 break;
1941 nr_none--;
1942 /* Put holes back where they were */
David Brazdil0f672f62019-12-10 10:32:29 +00001943 xas_store(&xas, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001944 continue;
1945 }
1946
David Brazdil0f672f62019-12-10 10:32:29 +00001947 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001948
1949 /* Unfreeze the page. */
1950 list_del(&page->lru);
1951 page_ref_unfreeze(page, 2);
David Brazdil0f672f62019-12-10 10:32:29 +00001952 xas_store(&xas, page);
1953 xas_pause(&xas);
1954 xas_unlock_irq(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001955 unlock_page(page);
1956 putback_lru_page(page);
David Brazdil0f672f62019-12-10 10:32:29 +00001957 xas_lock_irq(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001958 }
1959 VM_BUG_ON(nr_none);
David Brazdil0f672f62019-12-10 10:32:29 +00001960 xas_unlock_irq(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001961
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001962 new_page->mapping = NULL;
1963 }
1964
1965 unlock_page(new_page);
1966out:
1967 VM_BUG_ON(!list_empty(&pagelist));
Olivier Deprez157378f2022-04-04 15:47:50 +02001968 if (!IS_ERR_OR_NULL(*hpage))
1969 mem_cgroup_uncharge(*hpage);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001970 /* TODO: tracepoints */
1971}
1972
David Brazdil0f672f62019-12-10 10:32:29 +00001973static void khugepaged_scan_file(struct mm_struct *mm,
1974 struct file *file, pgoff_t start, struct page **hpage)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001975{
1976 struct page *page = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001977 struct address_space *mapping = file->f_mapping;
1978 XA_STATE(xas, &mapping->i_pages, start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001979 int present, swap;
1980 int node = NUMA_NO_NODE;
1981 int result = SCAN_SUCCEED;
1982
1983 present = 0;
1984 swap = 0;
1985 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1986 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +00001987 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1988 if (xas_retry(&xas, page))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001989 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001990
David Brazdil0f672f62019-12-10 10:32:29 +00001991 if (xa_is_value(page)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001992 if (++swap > khugepaged_max_ptes_swap) {
1993 result = SCAN_EXCEED_SWAP_PTE;
1994 break;
1995 }
1996 continue;
1997 }
1998
1999 if (PageTransCompound(page)) {
2000 result = SCAN_PAGE_COMPOUND;
2001 break;
2002 }
2003
2004 node = page_to_nid(page);
2005 if (khugepaged_scan_abort(node)) {
2006 result = SCAN_SCAN_ABORT;
2007 break;
2008 }
2009 khugepaged_node_load[node]++;
2010
2011 if (!PageLRU(page)) {
2012 result = SCAN_PAGE_LRU;
2013 break;
2014 }
2015
David Brazdil0f672f62019-12-10 10:32:29 +00002016 if (page_count(page) !=
2017 1 + page_mapcount(page) + page_has_private(page)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002018 result = SCAN_PAGE_COUNT;
2019 break;
2020 }
2021
2022 /*
2023 * We probably should check if the page is referenced here, but
2024 * nobody would transfer pte_young() to PageReferenced() for us.
2025 * And rmap walk here is just too costly...
2026 */
2027
2028 present++;
2029
2030 if (need_resched()) {
David Brazdil0f672f62019-12-10 10:32:29 +00002031 xas_pause(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002032 cond_resched_rcu();
2033 }
2034 }
2035 rcu_read_unlock();
2036
2037 if (result == SCAN_SUCCEED) {
2038 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2039 result = SCAN_EXCEED_NONE_PTE;
2040 } else {
2041 node = khugepaged_find_target_node();
David Brazdil0f672f62019-12-10 10:32:29 +00002042 collapse_file(mm, file, start, hpage, node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002043 }
2044 }
2045
2046 /* TODO: tracepoints */
2047}
2048#else
David Brazdil0f672f62019-12-10 10:32:29 +00002049static void khugepaged_scan_file(struct mm_struct *mm,
2050 struct file *file, pgoff_t start, struct page **hpage)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002051{
2052 BUILD_BUG();
2053}
David Brazdil0f672f62019-12-10 10:32:29 +00002054
2055static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2056{
2057 return 0;
2058}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002059#endif
2060
2061static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2062 struct page **hpage)
2063 __releases(&khugepaged_mm_lock)
2064 __acquires(&khugepaged_mm_lock)
2065{
2066 struct mm_slot *mm_slot;
2067 struct mm_struct *mm;
2068 struct vm_area_struct *vma;
2069 int progress = 0;
2070
2071 VM_BUG_ON(!pages);
David Brazdil0f672f62019-12-10 10:32:29 +00002072 lockdep_assert_held(&khugepaged_mm_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002073
2074 if (khugepaged_scan.mm_slot)
2075 mm_slot = khugepaged_scan.mm_slot;
2076 else {
2077 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2078 struct mm_slot, mm_node);
2079 khugepaged_scan.address = 0;
2080 khugepaged_scan.mm_slot = mm_slot;
2081 }
2082 spin_unlock(&khugepaged_mm_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00002083 khugepaged_collapse_pte_mapped_thps(mm_slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002084
2085 mm = mm_slot->mm;
2086 /*
2087 * Don't wait for semaphore (to avoid long wait times). Just move to
2088 * the next mm on the list.
2089 */
2090 vma = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02002091 if (unlikely(!mmap_read_trylock(mm)))
2092 goto breakouterloop_mmap_lock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002093 if (likely(!khugepaged_test_exit(mm)))
2094 vma = find_vma(mm, khugepaged_scan.address);
2095
2096 progress++;
2097 for (; vma; vma = vma->vm_next) {
2098 unsigned long hstart, hend;
2099
2100 cond_resched();
2101 if (unlikely(khugepaged_test_exit(mm))) {
2102 progress++;
2103 break;
2104 }
2105 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2106skip:
2107 progress++;
2108 continue;
2109 }
2110 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2111 hend = vma->vm_end & HPAGE_PMD_MASK;
2112 if (hstart >= hend)
2113 goto skip;
2114 if (khugepaged_scan.address > hend)
2115 goto skip;
2116 if (khugepaged_scan.address < hstart)
2117 khugepaged_scan.address = hstart;
2118 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Olivier Deprez157378f2022-04-04 15:47:50 +02002119 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2120 goto skip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002121
2122 while (khugepaged_scan.address < hend) {
2123 int ret;
2124 cond_resched();
2125 if (unlikely(khugepaged_test_exit(mm)))
2126 goto breakouterloop;
2127
2128 VM_BUG_ON(khugepaged_scan.address < hstart ||
2129 khugepaged_scan.address + HPAGE_PMD_SIZE >
2130 hend);
David Brazdil0f672f62019-12-10 10:32:29 +00002131 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002132 struct file *file = get_file(vma->vm_file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002133 pgoff_t pgoff = linear_page_index(vma,
2134 khugepaged_scan.address);
David Brazdil0f672f62019-12-10 10:32:29 +00002135
Olivier Deprez157378f2022-04-04 15:47:50 +02002136 mmap_read_unlock(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002137 ret = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00002138 khugepaged_scan_file(mm, file, pgoff, hpage);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002139 fput(file);
2140 } else {
2141 ret = khugepaged_scan_pmd(mm, vma,
2142 khugepaged_scan.address,
2143 hpage);
2144 }
2145 /* move to next address */
2146 khugepaged_scan.address += HPAGE_PMD_SIZE;
2147 progress += HPAGE_PMD_NR;
2148 if (ret)
Olivier Deprez157378f2022-04-04 15:47:50 +02002149 /* we released mmap_lock so break loop */
2150 goto breakouterloop_mmap_lock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002151 if (progress >= pages)
2152 goto breakouterloop;
2153 }
2154 }
2155breakouterloop:
Olivier Deprez157378f2022-04-04 15:47:50 +02002156 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2157breakouterloop_mmap_lock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002158
2159 spin_lock(&khugepaged_mm_lock);
2160 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2161 /*
2162 * Release the current mm_slot if this mm is about to die, or
2163 * if we scanned all vmas of this mm.
2164 */
2165 if (khugepaged_test_exit(mm) || !vma) {
2166 /*
2167 * Make sure that if mm_users is reaching zero while
2168 * khugepaged runs here, khugepaged_exit will find
2169 * mm_slot not pointing to the exiting mm.
2170 */
2171 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2172 khugepaged_scan.mm_slot = list_entry(
2173 mm_slot->mm_node.next,
2174 struct mm_slot, mm_node);
2175 khugepaged_scan.address = 0;
2176 } else {
2177 khugepaged_scan.mm_slot = NULL;
2178 khugepaged_full_scans++;
2179 }
2180
2181 collect_mm_slot(mm_slot);
2182 }
2183
2184 return progress;
2185}
2186
2187static int khugepaged_has_work(void)
2188{
2189 return !list_empty(&khugepaged_scan.mm_head) &&
2190 khugepaged_enabled();
2191}
2192
2193static int khugepaged_wait_event(void)
2194{
2195 return !list_empty(&khugepaged_scan.mm_head) ||
2196 kthread_should_stop();
2197}
2198
2199static void khugepaged_do_scan(void)
2200{
2201 struct page *hpage = NULL;
2202 unsigned int progress = 0, pass_through_head = 0;
2203 unsigned int pages = khugepaged_pages_to_scan;
2204 bool wait = true;
2205
2206 barrier(); /* write khugepaged_pages_to_scan to local stack */
2207
Olivier Deprez157378f2022-04-04 15:47:50 +02002208 lru_add_drain_all();
2209
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002210 while (progress < pages) {
2211 if (!khugepaged_prealloc_page(&hpage, &wait))
2212 break;
2213
2214 cond_resched();
2215
2216 if (unlikely(kthread_should_stop() || try_to_freeze()))
2217 break;
2218
2219 spin_lock(&khugepaged_mm_lock);
2220 if (!khugepaged_scan.mm_slot)
2221 pass_through_head++;
2222 if (khugepaged_has_work() &&
2223 pass_through_head < 2)
2224 progress += khugepaged_scan_mm_slot(pages - progress,
2225 &hpage);
2226 else
2227 progress = pages;
2228 spin_unlock(&khugepaged_mm_lock);
2229 }
2230
2231 if (!IS_ERR_OR_NULL(hpage))
2232 put_page(hpage);
2233}
2234
2235static bool khugepaged_should_wakeup(void)
2236{
2237 return kthread_should_stop() ||
2238 time_after_eq(jiffies, khugepaged_sleep_expire);
2239}
2240
2241static void khugepaged_wait_work(void)
2242{
2243 if (khugepaged_has_work()) {
2244 const unsigned long scan_sleep_jiffies =
2245 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2246
2247 if (!scan_sleep_jiffies)
2248 return;
2249
2250 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2251 wait_event_freezable_timeout(khugepaged_wait,
2252 khugepaged_should_wakeup(),
2253 scan_sleep_jiffies);
2254 return;
2255 }
2256
2257 if (khugepaged_enabled())
2258 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2259}
2260
2261static int khugepaged(void *none)
2262{
2263 struct mm_slot *mm_slot;
2264
2265 set_freezable();
2266 set_user_nice(current, MAX_NICE);
2267
2268 while (!kthread_should_stop()) {
2269 khugepaged_do_scan();
2270 khugepaged_wait_work();
2271 }
2272
2273 spin_lock(&khugepaged_mm_lock);
2274 mm_slot = khugepaged_scan.mm_slot;
2275 khugepaged_scan.mm_slot = NULL;
2276 if (mm_slot)
2277 collect_mm_slot(mm_slot);
2278 spin_unlock(&khugepaged_mm_lock);
2279 return 0;
2280}
2281
2282static void set_recommended_min_free_kbytes(void)
2283{
2284 struct zone *zone;
2285 int nr_zones = 0;
2286 unsigned long recommended_min;
2287
2288 for_each_populated_zone(zone) {
2289 /*
2290 * We don't need to worry about fragmentation of
2291 * ZONE_MOVABLE since it only has movable pages.
2292 */
2293 if (zone_idx(zone) > gfp_zone(GFP_USER))
2294 continue;
2295
2296 nr_zones++;
2297 }
2298
2299 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2300 recommended_min = pageblock_nr_pages * nr_zones * 2;
2301
2302 /*
2303 * Make sure that on average at least two pageblocks are almost free
2304 * of another type, one for a migratetype to fall back to and a
2305 * second to avoid subsequent fallbacks of other types There are 3
2306 * MIGRATE_TYPES we care about.
2307 */
2308 recommended_min += pageblock_nr_pages * nr_zones *
2309 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2310
2311 /* don't ever allow to reserve more than 5% of the lowmem */
2312 recommended_min = min(recommended_min,
2313 (unsigned long) nr_free_buffer_pages() / 20);
2314 recommended_min <<= (PAGE_SHIFT-10);
2315
2316 if (recommended_min > min_free_kbytes) {
2317 if (user_min_free_kbytes >= 0)
2318 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2319 min_free_kbytes, recommended_min);
2320
2321 min_free_kbytes = recommended_min;
2322 }
2323 setup_per_zone_wmarks();
2324}
2325
2326int start_stop_khugepaged(void)
2327{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002328 int err = 0;
2329
2330 mutex_lock(&khugepaged_mutex);
2331 if (khugepaged_enabled()) {
2332 if (!khugepaged_thread)
2333 khugepaged_thread = kthread_run(khugepaged, NULL,
2334 "khugepaged");
2335 if (IS_ERR(khugepaged_thread)) {
2336 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2337 err = PTR_ERR(khugepaged_thread);
2338 khugepaged_thread = NULL;
2339 goto fail;
2340 }
2341
2342 if (!list_empty(&khugepaged_scan.mm_head))
2343 wake_up_interruptible(&khugepaged_wait);
2344
2345 set_recommended_min_free_kbytes();
2346 } else if (khugepaged_thread) {
2347 kthread_stop(khugepaged_thread);
2348 khugepaged_thread = NULL;
2349 }
2350fail:
2351 mutex_unlock(&khugepaged_mutex);
2352 return err;
2353}
Olivier Deprez0e641232021-09-23 10:07:05 +02002354
2355void khugepaged_min_free_kbytes_update(void)
2356{
2357 mutex_lock(&khugepaged_mutex);
2358 if (khugepaged_enabled() && khugepaged_thread)
2359 set_recommended_min_free_kbytes();
2360 mutex_unlock(&khugepaged_mutex);
2361}