blob: 0f8d1583fa8edd4ce625ac4a6c578b311a6c887f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
5#include <linux/mm.h>
6#include <linux/mempolicy.h>
7#include <linux/migrate_mode.h>
8#include <linux/hugetlb.h>
9
10typedef struct page *new_page_t(struct page *page, unsigned long private);
11typedef void free_page_t(struct page *page, unsigned long private);
12
Olivier Deprez157378f2022-04-04 15:47:50 +020013struct migration_target_control;
14
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015/*
16 * Return values from addresss_space_operations.migratepage():
17 * - negative errno on page migration failure;
18 * - zero on page migration success;
19 */
20#define MIGRATEPAGE_SUCCESS 0
21
22enum migrate_reason {
23 MR_COMPACTION,
24 MR_MEMORY_FAILURE,
25 MR_MEMORY_HOTPLUG,
26 MR_SYSCALL, /* also applies to cpusets */
27 MR_MEMPOLICY_MBIND,
28 MR_NUMA_MISPLACED,
29 MR_CONTIG_RANGE,
30 MR_TYPES
31};
32
33/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
David Brazdil0f672f62019-12-10 10:32:29 +000034extern const char *migrate_reason_names[MR_TYPES];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036#ifdef CONFIG_MIGRATION
37
38extern void putback_movable_pages(struct list_head *l);
39extern int migrate_page(struct address_space *mapping,
40 struct page *newpage, struct page *page,
41 enum migrate_mode mode);
42extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
43 unsigned long private, enum migrate_mode mode, int reason);
Olivier Deprez157378f2022-04-04 15:47:50 +020044extern struct page *alloc_migration_target(struct page *page, unsigned long private);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
46extern void putback_movable_page(struct page *page);
47
48extern int migrate_prep(void);
49extern int migrate_prep_local(void);
50extern void migrate_page_states(struct page *newpage, struct page *page);
51extern void migrate_page_copy(struct page *newpage, struct page *page);
52extern int migrate_huge_page_move_mapping(struct address_space *mapping,
53 struct page *newpage, struct page *page);
54extern int migrate_page_move_mapping(struct address_space *mapping,
David Brazdil0f672f62019-12-10 10:32:29 +000055 struct page *newpage, struct page *page, int extra_count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056#else
57
58static inline void putback_movable_pages(struct list_head *l) {}
59static inline int migrate_pages(struct list_head *l, new_page_t new,
60 free_page_t free, unsigned long private, enum migrate_mode mode,
61 int reason)
62 { return -ENOSYS; }
Olivier Deprez157378f2022-04-04 15:47:50 +020063static inline struct page *alloc_migration_target(struct page *page,
64 unsigned long private)
65 { return NULL; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
67 { return -EBUSY; }
68
69static inline int migrate_prep(void) { return -ENOSYS; }
70static inline int migrate_prep_local(void) { return -ENOSYS; }
71
72static inline void migrate_page_states(struct page *newpage, struct page *page)
73{
74}
75
76static inline void migrate_page_copy(struct page *newpage,
77 struct page *page) {}
78
79static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
80 struct page *newpage, struct page *page)
81{
82 return -ENOSYS;
83}
84
85#endif /* CONFIG_MIGRATION */
86
87#ifdef CONFIG_COMPACTION
88extern int PageMovable(struct page *page);
89extern void __SetPageMovable(struct page *page, struct address_space *mapping);
90extern void __ClearPageMovable(struct page *page);
91#else
92static inline int PageMovable(struct page *page) { return 0; };
93static inline void __SetPageMovable(struct page *page,
94 struct address_space *mapping)
95{
96}
97static inline void __ClearPageMovable(struct page *page)
98{
99}
100#endif
101
102#ifdef CONFIG_NUMA_BALANCING
103extern bool pmd_trans_migrating(pmd_t pmd);
104extern int migrate_misplaced_page(struct page *page,
105 struct vm_area_struct *vma, int node);
106#else
107static inline bool pmd_trans_migrating(pmd_t pmd)
108{
109 return false;
110}
111static inline int migrate_misplaced_page(struct page *page,
112 struct vm_area_struct *vma, int node)
113{
114 return -EAGAIN; /* can't migrate now */
115}
116#endif /* CONFIG_NUMA_BALANCING */
117
118#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
119extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
120 struct vm_area_struct *vma,
121 pmd_t *pmd, pmd_t entry,
122 unsigned long address,
123 struct page *page, int node);
124#else
125static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
126 struct vm_area_struct *vma,
127 pmd_t *pmd, pmd_t entry,
128 unsigned long address,
129 struct page *page, int node)
130{
131 return -EAGAIN;
132}
133#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
134
135
136#ifdef CONFIG_MIGRATION
137
138/*
139 * Watch out for PAE architecture, which has an unsigned long, and might not
140 * have enough bits to store all physical address and flags. So far we have
141 * enough room for all our flags.
142 */
143#define MIGRATE_PFN_VALID (1UL << 0)
144#define MIGRATE_PFN_MIGRATE (1UL << 1)
145#define MIGRATE_PFN_LOCKED (1UL << 2)
146#define MIGRATE_PFN_WRITE (1UL << 3)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147#define MIGRATE_PFN_SHIFT 6
148
149static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
150{
151 if (!(mpfn & MIGRATE_PFN_VALID))
152 return NULL;
153 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
154}
155
156static inline unsigned long migrate_pfn(unsigned long pfn)
157{
158 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
159}
160
Olivier Deprez157378f2022-04-04 15:47:50 +0200161enum migrate_vma_direction {
162 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
163 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
164};
165
David Brazdil0f672f62019-12-10 10:32:29 +0000166struct migrate_vma {
167 struct vm_area_struct *vma;
168 /*
169 * Both src and dst array must be big enough for
170 * (end - start) >> PAGE_SHIFT entries.
171 *
172 * The src array must not be modified by the caller after
173 * migrate_vma_setup(), and must not change the dst array after
174 * migrate_vma_pages() returns.
175 */
176 unsigned long *dst;
177 unsigned long *src;
178 unsigned long cpages;
179 unsigned long npages;
180 unsigned long start;
181 unsigned long end;
Olivier Deprez157378f2022-04-04 15:47:50 +0200182
183 /*
184 * Set to the owner value also stored in page->pgmap->owner for
185 * migrating out of device private memory. The flags also need to
186 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
187 * The caller should always set this field when using mmu notifier
188 * callbacks to avoid device MMU invalidations for device private
189 * pages that are not being migrated.
190 */
191 void *pgmap_owner;
192 unsigned long flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193};
194
David Brazdil0f672f62019-12-10 10:32:29 +0000195int migrate_vma_setup(struct migrate_vma *args);
196void migrate_vma_pages(struct migrate_vma *migrate);
197void migrate_vma_finalize(struct migrate_vma *migrate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198
199#endif /* CONFIG_MIGRATION */
200
201#endif /* _LINUX_MIGRATE_H */