Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_MIGRATE_H |
| 3 | #define _LINUX_MIGRATE_H |
| 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/mempolicy.h> |
| 7 | #include <linux/migrate_mode.h> |
| 8 | #include <linux/hugetlb.h> |
| 9 | |
| 10 | typedef struct page *new_page_t(struct page *page, unsigned long private); |
| 11 | typedef void free_page_t(struct page *page, unsigned long private); |
| 12 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 13 | struct migration_target_control; |
| 14 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | /* |
| 16 | * Return values from addresss_space_operations.migratepage(): |
| 17 | * - negative errno on page migration failure; |
| 18 | * - zero on page migration success; |
| 19 | */ |
| 20 | #define MIGRATEPAGE_SUCCESS 0 |
| 21 | |
| 22 | enum migrate_reason { |
| 23 | MR_COMPACTION, |
| 24 | MR_MEMORY_FAILURE, |
| 25 | MR_MEMORY_HOTPLUG, |
| 26 | MR_SYSCALL, /* also applies to cpusets */ |
| 27 | MR_MEMPOLICY_MBIND, |
| 28 | MR_NUMA_MISPLACED, |
| 29 | MR_CONTIG_RANGE, |
| 30 | MR_TYPES |
| 31 | }; |
| 32 | |
| 33 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 34 | extern const char *migrate_reason_names[MR_TYPES]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 35 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | #ifdef CONFIG_MIGRATION |
| 37 | |
| 38 | extern void putback_movable_pages(struct list_head *l); |
| 39 | extern int migrate_page(struct address_space *mapping, |
| 40 | struct page *newpage, struct page *page, |
| 41 | enum migrate_mode mode); |
| 42 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
| 43 | unsigned long private, enum migrate_mode mode, int reason); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 44 | extern struct page *alloc_migration_target(struct page *page, unsigned long private); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | extern int isolate_movable_page(struct page *page, isolate_mode_t mode); |
| 46 | extern void putback_movable_page(struct page *page); |
| 47 | |
| 48 | extern int migrate_prep(void); |
| 49 | extern int migrate_prep_local(void); |
| 50 | extern void migrate_page_states(struct page *newpage, struct page *page); |
| 51 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
| 52 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 53 | struct page *newpage, struct page *page); |
| 54 | extern int migrate_page_move_mapping(struct address_space *mapping, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 55 | struct page *newpage, struct page *page, int extra_count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | #else |
| 57 | |
| 58 | static inline void putback_movable_pages(struct list_head *l) {} |
| 59 | static inline int migrate_pages(struct list_head *l, new_page_t new, |
| 60 | free_page_t free, unsigned long private, enum migrate_mode mode, |
| 61 | int reason) |
| 62 | { return -ENOSYS; } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 63 | static inline struct page *alloc_migration_target(struct page *page, |
| 64 | unsigned long private) |
| 65 | { return NULL; } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) |
| 67 | { return -EBUSY; } |
| 68 | |
| 69 | static inline int migrate_prep(void) { return -ENOSYS; } |
| 70 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
| 71 | |
| 72 | static inline void migrate_page_states(struct page *newpage, struct page *page) |
| 73 | { |
| 74 | } |
| 75 | |
| 76 | static inline void migrate_page_copy(struct page *newpage, |
| 77 | struct page *page) {} |
| 78 | |
| 79 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 80 | struct page *newpage, struct page *page) |
| 81 | { |
| 82 | return -ENOSYS; |
| 83 | } |
| 84 | |
| 85 | #endif /* CONFIG_MIGRATION */ |
| 86 | |
| 87 | #ifdef CONFIG_COMPACTION |
| 88 | extern int PageMovable(struct page *page); |
| 89 | extern void __SetPageMovable(struct page *page, struct address_space *mapping); |
| 90 | extern void __ClearPageMovable(struct page *page); |
| 91 | #else |
| 92 | static inline int PageMovable(struct page *page) { return 0; }; |
| 93 | static inline void __SetPageMovable(struct page *page, |
| 94 | struct address_space *mapping) |
| 95 | { |
| 96 | } |
| 97 | static inline void __ClearPageMovable(struct page *page) |
| 98 | { |
| 99 | } |
| 100 | #endif |
| 101 | |
| 102 | #ifdef CONFIG_NUMA_BALANCING |
| 103 | extern bool pmd_trans_migrating(pmd_t pmd); |
| 104 | extern int migrate_misplaced_page(struct page *page, |
| 105 | struct vm_area_struct *vma, int node); |
| 106 | #else |
| 107 | static inline bool pmd_trans_migrating(pmd_t pmd) |
| 108 | { |
| 109 | return false; |
| 110 | } |
| 111 | static inline int migrate_misplaced_page(struct page *page, |
| 112 | struct vm_area_struct *vma, int node) |
| 113 | { |
| 114 | return -EAGAIN; /* can't migrate now */ |
| 115 | } |
| 116 | #endif /* CONFIG_NUMA_BALANCING */ |
| 117 | |
| 118 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 119 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 120 | struct vm_area_struct *vma, |
| 121 | pmd_t *pmd, pmd_t entry, |
| 122 | unsigned long address, |
| 123 | struct page *page, int node); |
| 124 | #else |
| 125 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 126 | struct vm_area_struct *vma, |
| 127 | pmd_t *pmd, pmd_t entry, |
| 128 | unsigned long address, |
| 129 | struct page *page, int node) |
| 130 | { |
| 131 | return -EAGAIN; |
| 132 | } |
| 133 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
| 134 | |
| 135 | |
| 136 | #ifdef CONFIG_MIGRATION |
| 137 | |
| 138 | /* |
| 139 | * Watch out for PAE architecture, which has an unsigned long, and might not |
| 140 | * have enough bits to store all physical address and flags. So far we have |
| 141 | * enough room for all our flags. |
| 142 | */ |
| 143 | #define MIGRATE_PFN_VALID (1UL << 0) |
| 144 | #define MIGRATE_PFN_MIGRATE (1UL << 1) |
| 145 | #define MIGRATE_PFN_LOCKED (1UL << 2) |
| 146 | #define MIGRATE_PFN_WRITE (1UL << 3) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | #define MIGRATE_PFN_SHIFT 6 |
| 148 | |
| 149 | static inline struct page *migrate_pfn_to_page(unsigned long mpfn) |
| 150 | { |
| 151 | if (!(mpfn & MIGRATE_PFN_VALID)) |
| 152 | return NULL; |
| 153 | return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); |
| 154 | } |
| 155 | |
| 156 | static inline unsigned long migrate_pfn(unsigned long pfn) |
| 157 | { |
| 158 | return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; |
| 159 | } |
| 160 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 161 | enum migrate_vma_direction { |
| 162 | MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, |
| 163 | MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, |
| 164 | }; |
| 165 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 166 | struct migrate_vma { |
| 167 | struct vm_area_struct *vma; |
| 168 | /* |
| 169 | * Both src and dst array must be big enough for |
| 170 | * (end - start) >> PAGE_SHIFT entries. |
| 171 | * |
| 172 | * The src array must not be modified by the caller after |
| 173 | * migrate_vma_setup(), and must not change the dst array after |
| 174 | * migrate_vma_pages() returns. |
| 175 | */ |
| 176 | unsigned long *dst; |
| 177 | unsigned long *src; |
| 178 | unsigned long cpages; |
| 179 | unsigned long npages; |
| 180 | unsigned long start; |
| 181 | unsigned long end; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 182 | |
| 183 | /* |
| 184 | * Set to the owner value also stored in page->pgmap->owner for |
| 185 | * migrating out of device private memory. The flags also need to |
| 186 | * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. |
| 187 | * The caller should always set this field when using mmu notifier |
| 188 | * callbacks to avoid device MMU invalidations for device private |
| 189 | * pages that are not being migrated. |
| 190 | */ |
| 191 | void *pgmap_owner; |
| 192 | unsigned long flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | }; |
| 194 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 195 | int migrate_vma_setup(struct migrate_vma *args); |
| 196 | void migrate_vma_pages(struct migrate_vma *migrate); |
| 197 | void migrate_vma_finalize(struct migrate_vma *migrate); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | |
| 199 | #endif /* CONFIG_MIGRATION */ |
| 200 | |
| 201 | #endif /* _LINUX_MIGRATE_H */ |