blob: 72120061b7d4f13e04407ff7b2c088f51a602e70 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
5#include <linux/mm.h>
6#include <linux/mempolicy.h>
7#include <linux/migrate_mode.h>
8#include <linux/hugetlb.h>
9
10typedef struct page *new_page_t(struct page *page, unsigned long private);
11typedef void free_page_t(struct page *page, unsigned long private);
12
13/*
14 * Return values from addresss_space_operations.migratepage():
15 * - negative errno on page migration failure;
16 * - zero on page migration success;
17 */
18#define MIGRATEPAGE_SUCCESS 0
19
20enum migrate_reason {
21 MR_COMPACTION,
22 MR_MEMORY_FAILURE,
23 MR_MEMORY_HOTPLUG,
24 MR_SYSCALL, /* also applies to cpusets */
25 MR_MEMPOLICY_MBIND,
26 MR_NUMA_MISPLACED,
27 MR_CONTIG_RANGE,
28 MR_TYPES
29};
30
31/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
David Brazdil0f672f62019-12-10 10:32:29 +000032extern const char *migrate_reason_names[MR_TYPES];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033
34static inline struct page *new_page_nodemask(struct page *page,
35 int preferred_nid, nodemask_t *nodemask)
36{
37 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 unsigned int order = 0;
39 struct page *new_page = NULL;
40
41 if (PageHuge(page))
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43 preferred_nid, nodemask);
44
45 if (PageTransHuge(page)) {
46 gfp_mask |= GFP_TRANSHUGE;
47 order = HPAGE_PMD_ORDER;
48 }
49
50 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
51 gfp_mask |= __GFP_HIGHMEM;
52
53 new_page = __alloc_pages_nodemask(gfp_mask, order,
54 preferred_nid, nodemask);
55
56 if (new_page && PageTransHuge(new_page))
57 prep_transhuge_page(new_page);
58
59 return new_page;
60}
61
62#ifdef CONFIG_MIGRATION
63
64extern void putback_movable_pages(struct list_head *l);
65extern int migrate_page(struct address_space *mapping,
66 struct page *newpage, struct page *page,
67 enum migrate_mode mode);
68extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69 unsigned long private, enum migrate_mode mode, int reason);
70extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
71extern void putback_movable_page(struct page *page);
72
73extern int migrate_prep(void);
74extern int migrate_prep_local(void);
75extern void migrate_page_states(struct page *newpage, struct page *page);
76extern void migrate_page_copy(struct page *newpage, struct page *page);
77extern int migrate_huge_page_move_mapping(struct address_space *mapping,
78 struct page *newpage, struct page *page);
79extern int migrate_page_move_mapping(struct address_space *mapping,
David Brazdil0f672f62019-12-10 10:32:29 +000080 struct page *newpage, struct page *page, int extra_count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081#else
82
83static inline void putback_movable_pages(struct list_head *l) {}
84static inline int migrate_pages(struct list_head *l, new_page_t new,
85 free_page_t free, unsigned long private, enum migrate_mode mode,
86 int reason)
87 { return -ENOSYS; }
88static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
89 { return -EBUSY; }
90
91static inline int migrate_prep(void) { return -ENOSYS; }
92static inline int migrate_prep_local(void) { return -ENOSYS; }
93
94static inline void migrate_page_states(struct page *newpage, struct page *page)
95{
96}
97
98static inline void migrate_page_copy(struct page *newpage,
99 struct page *page) {}
100
101static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
102 struct page *newpage, struct page *page)
103{
104 return -ENOSYS;
105}
106
107#endif /* CONFIG_MIGRATION */
108
109#ifdef CONFIG_COMPACTION
110extern int PageMovable(struct page *page);
111extern void __SetPageMovable(struct page *page, struct address_space *mapping);
112extern void __ClearPageMovable(struct page *page);
113#else
114static inline int PageMovable(struct page *page) { return 0; };
115static inline void __SetPageMovable(struct page *page,
116 struct address_space *mapping)
117{
118}
119static inline void __ClearPageMovable(struct page *page)
120{
121}
122#endif
123
124#ifdef CONFIG_NUMA_BALANCING
125extern bool pmd_trans_migrating(pmd_t pmd);
126extern int migrate_misplaced_page(struct page *page,
127 struct vm_area_struct *vma, int node);
128#else
129static inline bool pmd_trans_migrating(pmd_t pmd)
130{
131 return false;
132}
133static inline int migrate_misplaced_page(struct page *page,
134 struct vm_area_struct *vma, int node)
135{
136 return -EAGAIN; /* can't migrate now */
137}
138#endif /* CONFIG_NUMA_BALANCING */
139
140#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
141extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
142 struct vm_area_struct *vma,
143 pmd_t *pmd, pmd_t entry,
144 unsigned long address,
145 struct page *page, int node);
146#else
147static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
148 struct vm_area_struct *vma,
149 pmd_t *pmd, pmd_t entry,
150 unsigned long address,
151 struct page *page, int node)
152{
153 return -EAGAIN;
154}
155#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
156
157
158#ifdef CONFIG_MIGRATION
159
160/*
161 * Watch out for PAE architecture, which has an unsigned long, and might not
162 * have enough bits to store all physical address and flags. So far we have
163 * enough room for all our flags.
164 */
165#define MIGRATE_PFN_VALID (1UL << 0)
166#define MIGRATE_PFN_MIGRATE (1UL << 1)
167#define MIGRATE_PFN_LOCKED (1UL << 2)
168#define MIGRATE_PFN_WRITE (1UL << 3)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169#define MIGRATE_PFN_SHIFT 6
170
171static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
172{
173 if (!(mpfn & MIGRATE_PFN_VALID))
174 return NULL;
175 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
176}
177
178static inline unsigned long migrate_pfn(unsigned long pfn)
179{
180 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
181}
182
David Brazdil0f672f62019-12-10 10:32:29 +0000183struct migrate_vma {
184 struct vm_area_struct *vma;
185 /*
186 * Both src and dst array must be big enough for
187 * (end - start) >> PAGE_SHIFT entries.
188 *
189 * The src array must not be modified by the caller after
190 * migrate_vma_setup(), and must not change the dst array after
191 * migrate_vma_pages() returns.
192 */
193 unsigned long *dst;
194 unsigned long *src;
195 unsigned long cpages;
196 unsigned long npages;
197 unsigned long start;
198 unsigned long end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199};
200
David Brazdil0f672f62019-12-10 10:32:29 +0000201int migrate_vma_setup(struct migrate_vma *args);
202void migrate_vma_pages(struct migrate_vma *migrate);
203void migrate_vma_finalize(struct migrate_vma *migrate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204
205#endif /* CONFIG_MIGRATION */
206
207#endif /* _LINUX_MIGRATE_H */