Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/mm/swap_state.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 6 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 7 | * |
| 8 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
| 9 | */ |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/gfp.h> |
| 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/swap.h> |
| 14 | #include <linux/swapops.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/backing-dev.h> |
| 18 | #include <linux/blkdev.h> |
| 19 | #include <linux/pagevec.h> |
| 20 | #include <linux/migrate.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/swap_slots.h> |
| 23 | #include <linux/huge_mm.h> |
| 24 | |
| 25 | #include <asm/pgtable.h> |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 26 | #include "internal.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * swapper_space is a fiction, retained to simplify the path through |
| 30 | * vmscan's shrink_page_list. |
| 31 | */ |
| 32 | static const struct address_space_operations swap_aops = { |
| 33 | .writepage = swap_writepage, |
| 34 | .set_page_dirty = swap_set_page_dirty, |
| 35 | #ifdef CONFIG_MIGRATION |
| 36 | .migratepage = migrate_page, |
| 37 | #endif |
| 38 | }; |
| 39 | |
| 40 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
| 41 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; |
| 42 | static bool enable_vma_readahead __read_mostly = true; |
| 43 | |
| 44 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
| 45 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
| 46 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK |
| 47 | #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) |
| 48 | |
| 49 | #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) |
| 50 | #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) |
| 51 | #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) |
| 52 | |
| 53 | #define SWAP_RA_VAL(addr, win, hits) \ |
| 54 | (((addr) & PAGE_MASK) | \ |
| 55 | (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ |
| 56 | ((hits) & SWAP_RA_HITS_MASK)) |
| 57 | |
| 58 | /* Initial readahead hits is 4 to start up with a small window */ |
| 59 | #define GET_SWAP_RA_VAL(vma) \ |
| 60 | (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) |
| 61 | |
| 62 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) |
| 63 | #define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0) |
| 64 | |
| 65 | static struct { |
| 66 | unsigned long add_total; |
| 67 | unsigned long del_total; |
| 68 | unsigned long find_success; |
| 69 | unsigned long find_total; |
| 70 | } swap_cache_info; |
| 71 | |
| 72 | unsigned long total_swapcache_pages(void) |
| 73 | { |
| 74 | unsigned int i, j, nr; |
| 75 | unsigned long ret = 0; |
| 76 | struct address_space *spaces; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 77 | struct swap_info_struct *si; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 | for (i = 0; i < MAX_SWAPFILES; i++) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 80 | swp_entry_t entry = swp_entry(i, 1); |
| 81 | |
| 82 | /* Avoid get_swap_device() to warn for bad swap entry */ |
| 83 | if (!swp_swap_info(entry)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | continue; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 85 | /* Prevent swapoff to free swapper_spaces */ |
| 86 | si = get_swap_device(entry); |
| 87 | if (!si) |
| 88 | continue; |
| 89 | nr = nr_swapper_spaces[i]; |
| 90 | spaces = swapper_spaces[i]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | for (j = 0; j < nr; j++) |
| 92 | ret += spaces[j].nrpages; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 93 | put_swap_device(si); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | return ret; |
| 96 | } |
| 97 | |
| 98 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
| 99 | |
| 100 | void show_swap_cache_info(void) |
| 101 | { |
| 102 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
| 103 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
| 104 | swap_cache_info.add_total, swap_cache_info.del_total, |
| 105 | swap_cache_info.find_success, swap_cache_info.find_total); |
| 106 | printk("Free swap = %ldkB\n", |
| 107 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); |
| 108 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
| 109 | } |
| 110 | |
| 111 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 112 | * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | * but sets SwapCache flag and private instead of mapping and index. |
| 114 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 115 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 117 | struct address_space *address_space = swap_address_space(entry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | pgoff_t idx = swp_offset(entry); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 119 | XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); |
| 120 | unsigned long i, nr = compound_nr(page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | |
| 122 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 123 | VM_BUG_ON_PAGE(PageSwapCache(page), page); |
| 124 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
| 125 | |
| 126 | page_ref_add(page, nr); |
| 127 | SetPageSwapCache(page); |
| 128 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | do { |
| 130 | xas_lock_irq(&xas); |
| 131 | xas_create_range(&xas); |
| 132 | if (xas_error(&xas)) |
| 133 | goto unlock; |
| 134 | for (i = 0; i < nr; i++) { |
| 135 | VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); |
| 136 | set_page_private(page + i, entry.val + i); |
| 137 | xas_store(&xas, page); |
| 138 | xas_next(&xas); |
| 139 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | address_space->nrpages += nr; |
| 141 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); |
| 142 | ADD_CACHE_INFO(add_total, nr); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 143 | unlock: |
| 144 | xas_unlock_irq(&xas); |
| 145 | } while (xas_nomem(&xas, gfp)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 147 | if (!xas_error(&xas)) |
| 148 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 149 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 150 | ClearPageSwapCache(page); |
| 151 | page_ref_sub(page, nr); |
| 152 | return xas_error(&xas); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | /* |
| 156 | * This must be called only on pages that have |
| 157 | * been verified to be in the swap cache. |
| 158 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 159 | void __delete_from_swap_cache(struct page *page, swp_entry_t entry) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 160 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 161 | struct address_space *address_space = swap_address_space(entry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 162 | int i, nr = hpage_nr_pages(page); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 163 | pgoff_t idx = swp_offset(entry); |
| 164 | XA_STATE(xas, &address_space->i_pages, idx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 165 | |
| 166 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 167 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
| 168 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
| 169 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 170 | for (i = 0; i < nr; i++) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 171 | void *entry = xas_store(&xas, NULL); |
| 172 | VM_BUG_ON_PAGE(entry != page, entry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | set_page_private(page + i, 0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 174 | xas_next(&xas); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 175 | } |
| 176 | ClearPageSwapCache(page); |
| 177 | address_space->nrpages -= nr; |
| 178 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); |
| 179 | ADD_CACHE_INFO(del_total, nr); |
| 180 | } |
| 181 | |
| 182 | /** |
| 183 | * add_to_swap - allocate swap space for a page |
| 184 | * @page: page we want to move to swap |
| 185 | * |
| 186 | * Allocate swap space for the page and add the page to the |
| 187 | * swap cache. Caller needs to hold the page lock. |
| 188 | */ |
| 189 | int add_to_swap(struct page *page) |
| 190 | { |
| 191 | swp_entry_t entry; |
| 192 | int err; |
| 193 | |
| 194 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 195 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
| 196 | |
| 197 | entry = get_swap_page(page); |
| 198 | if (!entry.val) |
| 199 | return 0; |
| 200 | |
| 201 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 202 | * XArray node allocations from PF_MEMALLOC contexts could |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 203 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
| 204 | * stops emergency reserves from being allocated. |
| 205 | * |
| 206 | * TODO: this could cause a theoretical memory reclaim |
| 207 | * deadlock in the swap out path. |
| 208 | */ |
| 209 | /* |
| 210 | * Add it to the swap cache. |
| 211 | */ |
| 212 | err = add_to_swap_cache(page, entry, |
| 213 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | if (err) |
| 215 | /* |
| 216 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 217 | * clear SWAP_HAS_CACHE flag. |
| 218 | */ |
| 219 | goto fail; |
| 220 | /* |
| 221 | * Normally the page will be dirtied in unmap because its pte should be |
| 222 | * dirty. A special case is MADV_FREE page. The page'e pte could have |
| 223 | * dirty bit cleared but the page's SwapBacked bit is still set because |
| 224 | * clearing the dirty bit and SwapBacked bit has no lock protected. For |
| 225 | * such page, unmap will not set dirty bit for it, so page reclaim will |
| 226 | * not write the page out. This can cause data corruption when the page |
| 227 | * is swap in later. Always setting the dirty bit for the page solves |
| 228 | * the problem. |
| 229 | */ |
| 230 | set_page_dirty(page); |
| 231 | |
| 232 | return 1; |
| 233 | |
| 234 | fail: |
| 235 | put_swap_page(page, entry); |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * This must be called only on pages that have |
| 241 | * been verified to be in the swap cache and locked. |
| 242 | * It will never put the page into the free list, |
| 243 | * the caller has a reference on the page. |
| 244 | */ |
| 245 | void delete_from_swap_cache(struct page *page) |
| 246 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 247 | swp_entry_t entry = { .val = page_private(page) }; |
| 248 | struct address_space *address_space = swap_address_space(entry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 249 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 250 | xa_lock_irq(&address_space->i_pages); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 251 | __delete_from_swap_cache(page, entry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 252 | xa_unlock_irq(&address_space->i_pages); |
| 253 | |
| 254 | put_swap_page(page, entry); |
| 255 | page_ref_sub(page, hpage_nr_pages(page)); |
| 256 | } |
| 257 | |
| 258 | /* |
| 259 | * If we are the only user, then try to free up the swap cache. |
| 260 | * |
| 261 | * Its ok to check for PageSwapCache without the page lock |
| 262 | * here because we are going to recheck again inside |
| 263 | * try_to_free_swap() _with_ the lock. |
| 264 | * - Marcelo |
| 265 | */ |
| 266 | static inline void free_swap_cache(struct page *page) |
| 267 | { |
| 268 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
| 269 | try_to_free_swap(page); |
| 270 | unlock_page(page); |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | /* |
| 275 | * Perform a free_page(), also freeing any swap cache associated with |
| 276 | * this page if it is the last user of the page. |
| 277 | */ |
| 278 | void free_page_and_swap_cache(struct page *page) |
| 279 | { |
| 280 | free_swap_cache(page); |
| 281 | if (!is_huge_zero_page(page)) |
| 282 | put_page(page); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Passed an array of pages, drop them all from swapcache and then release |
| 287 | * them. They are removed from the LRU and freed if this is their last use. |
| 288 | */ |
| 289 | void free_pages_and_swap_cache(struct page **pages, int nr) |
| 290 | { |
| 291 | struct page **pagep = pages; |
| 292 | int i; |
| 293 | |
| 294 | lru_add_drain(); |
| 295 | for (i = 0; i < nr; i++) |
| 296 | free_swap_cache(pagep[i]); |
| 297 | release_pages(pagep, nr); |
| 298 | } |
| 299 | |
| 300 | static inline bool swap_use_vma_readahead(void) |
| 301 | { |
| 302 | return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); |
| 303 | } |
| 304 | |
| 305 | /* |
| 306 | * Lookup a swap entry in the swap cache. A found page will be returned |
| 307 | * unlocked and with its refcount incremented - we rely on the kernel |
| 308 | * lock getting page table operations atomic even if we drop the page |
| 309 | * lock before returning. |
| 310 | */ |
| 311 | struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, |
| 312 | unsigned long addr) |
| 313 | { |
| 314 | struct page *page; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 315 | struct swap_info_struct *si; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 317 | si = get_swap_device(entry); |
| 318 | if (!si) |
| 319 | return NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 320 | page = find_get_page(swap_address_space(entry), swp_offset(entry)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 321 | put_swap_device(si); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 322 | |
| 323 | INC_CACHE_INFO(find_total); |
| 324 | if (page) { |
| 325 | bool vma_ra = swap_use_vma_readahead(); |
| 326 | bool readahead; |
| 327 | |
| 328 | INC_CACHE_INFO(find_success); |
| 329 | /* |
| 330 | * At the moment, we don't support PG_readahead for anon THP |
| 331 | * so let's bail out rather than confusing the readahead stat. |
| 332 | */ |
| 333 | if (unlikely(PageTransCompound(page))) |
| 334 | return page; |
| 335 | |
| 336 | readahead = TestClearPageReadahead(page); |
| 337 | if (vma && vma_ra) { |
| 338 | unsigned long ra_val; |
| 339 | int win, hits; |
| 340 | |
| 341 | ra_val = GET_SWAP_RA_VAL(vma); |
| 342 | win = SWAP_RA_WIN(ra_val); |
| 343 | hits = SWAP_RA_HITS(ra_val); |
| 344 | if (readahead) |
| 345 | hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); |
| 346 | atomic_long_set(&vma->swap_readahead_info, |
| 347 | SWAP_RA_VAL(addr, win, hits)); |
| 348 | } |
| 349 | |
| 350 | if (readahead) { |
| 351 | count_vm_event(SWAP_RA_HIT); |
| 352 | if (!vma || !vma_ra) |
| 353 | atomic_inc(&swapin_readahead_hits); |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | return page; |
| 358 | } |
| 359 | |
| 360 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 361 | struct vm_area_struct *vma, unsigned long addr, |
| 362 | bool *new_page_allocated) |
| 363 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 364 | struct page *found_page = NULL, *new_page = NULL; |
| 365 | struct swap_info_struct *si; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | int err; |
| 367 | *new_page_allocated = false; |
| 368 | |
| 369 | do { |
| 370 | /* |
| 371 | * First check the swap cache. Since this is normally |
| 372 | * called after lookup_swap_cache() failed, re-calling |
| 373 | * that would confuse statistics. |
| 374 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 375 | si = get_swap_device(entry); |
| 376 | if (!si) |
| 377 | break; |
| 378 | found_page = find_get_page(swap_address_space(entry), |
| 379 | swp_offset(entry)); |
| 380 | put_swap_device(si); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 381 | if (found_page) |
| 382 | break; |
| 383 | |
| 384 | /* |
| 385 | * Just skip read ahead for unused swap slot. |
| 386 | * During swap_off when swap_slot_cache is disabled, |
| 387 | * we have to handle the race between putting |
| 388 | * swap entry in swap cache and marking swap slot |
| 389 | * as SWAP_HAS_CACHE. That's done in later part of code or |
| 390 | * else swap_off will be aborted if we return NULL. |
| 391 | */ |
| 392 | if (!__swp_swapcount(entry) && swap_slot_cache_enabled) |
| 393 | break; |
| 394 | |
| 395 | /* |
| 396 | * Get a new page to read into from swap. |
| 397 | */ |
| 398 | if (!new_page) { |
| 399 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
| 400 | if (!new_page) |
| 401 | break; /* Out of memory */ |
| 402 | } |
| 403 | |
| 404 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 405 | * Swap entry may have been freed since our caller observed it. |
| 406 | */ |
| 407 | err = swapcache_prepare(entry); |
| 408 | if (err == -EEXIST) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 409 | /* |
| 410 | * We might race against get_swap_page() and stumble |
| 411 | * across a SWAP_HAS_CACHE swap_map entry whose page |
| 412 | * has not been brought into the swapcache yet. |
| 413 | */ |
| 414 | cond_resched(); |
| 415 | continue; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 416 | } else if (err) /* swp entry is obsolete ? */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 417 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 418 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 419 | /* May fail (-ENOMEM) if XArray node allocation failed. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 420 | __SetPageLocked(new_page); |
| 421 | __SetPageSwapBacked(new_page); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 422 | err = add_to_swap_cache(new_page, entry, |
| 423 | gfp_mask & GFP_RECLAIM_MASK); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 424 | if (likely(!err)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 425 | /* Initiate read into locked page */ |
| 426 | SetPageWorkingset(new_page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 427 | lru_cache_add_anon(new_page); |
| 428 | *new_page_allocated = true; |
| 429 | return new_page; |
| 430 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 431 | __ClearPageLocked(new_page); |
| 432 | /* |
| 433 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 434 | * clear SWAP_HAS_CACHE flag. |
| 435 | */ |
| 436 | put_swap_page(new_page, entry); |
| 437 | } while (err != -ENOMEM); |
| 438 | |
| 439 | if (new_page) |
| 440 | put_page(new_page); |
| 441 | return found_page; |
| 442 | } |
| 443 | |
| 444 | /* |
| 445 | * Locate a page of swap in physical memory, reserving swap cache space |
| 446 | * and reading the disk if it is not already cached. |
| 447 | * A failure return means that either the page allocation failed or that |
| 448 | * the swap entry is no longer in use. |
| 449 | */ |
| 450 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 451 | struct vm_area_struct *vma, unsigned long addr, bool do_poll) |
| 452 | { |
| 453 | bool page_was_allocated; |
| 454 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, |
| 455 | vma, addr, &page_was_allocated); |
| 456 | |
| 457 | if (page_was_allocated) |
| 458 | swap_readpage(retpage, do_poll); |
| 459 | |
| 460 | return retpage; |
| 461 | } |
| 462 | |
| 463 | static unsigned int __swapin_nr_pages(unsigned long prev_offset, |
| 464 | unsigned long offset, |
| 465 | int hits, |
| 466 | int max_pages, |
| 467 | int prev_win) |
| 468 | { |
| 469 | unsigned int pages, last_ra; |
| 470 | |
| 471 | /* |
| 472 | * This heuristic has been found to work well on both sequential and |
| 473 | * random loads, swapping to hard disk or to SSD: please don't ask |
| 474 | * what the "+ 2" means, it just happens to work well, that's all. |
| 475 | */ |
| 476 | pages = hits + 2; |
| 477 | if (pages == 2) { |
| 478 | /* |
| 479 | * We can have no readahead hits to judge by: but must not get |
| 480 | * stuck here forever, so check for an adjacent offset instead |
| 481 | * (and don't even bother to check whether swap type is same). |
| 482 | */ |
| 483 | if (offset != prev_offset + 1 && offset != prev_offset - 1) |
| 484 | pages = 1; |
| 485 | } else { |
| 486 | unsigned int roundup = 4; |
| 487 | while (roundup < pages) |
| 488 | roundup <<= 1; |
| 489 | pages = roundup; |
| 490 | } |
| 491 | |
| 492 | if (pages > max_pages) |
| 493 | pages = max_pages; |
| 494 | |
| 495 | /* Don't shrink readahead too fast */ |
| 496 | last_ra = prev_win / 2; |
| 497 | if (pages < last_ra) |
| 498 | pages = last_ra; |
| 499 | |
| 500 | return pages; |
| 501 | } |
| 502 | |
| 503 | static unsigned long swapin_nr_pages(unsigned long offset) |
| 504 | { |
| 505 | static unsigned long prev_offset; |
| 506 | unsigned int hits, pages, max_pages; |
| 507 | static atomic_t last_readahead_pages; |
| 508 | |
| 509 | max_pages = 1 << READ_ONCE(page_cluster); |
| 510 | if (max_pages <= 1) |
| 511 | return 1; |
| 512 | |
| 513 | hits = atomic_xchg(&swapin_readahead_hits, 0); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 514 | pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
| 515 | max_pages, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | atomic_read(&last_readahead_pages)); |
| 517 | if (!hits) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 518 | WRITE_ONCE(prev_offset, offset); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 519 | atomic_set(&last_readahead_pages, pages); |
| 520 | |
| 521 | return pages; |
| 522 | } |
| 523 | |
| 524 | /** |
| 525 | * swap_cluster_readahead - swap in pages in hope we need them soon |
| 526 | * @entry: swap entry of this memory |
| 527 | * @gfp_mask: memory allocation flags |
| 528 | * @vmf: fault information |
| 529 | * |
| 530 | * Returns the struct page for entry and addr, after queueing swapin. |
| 531 | * |
| 532 | * Primitive swap readahead code. We simply read an aligned block of |
| 533 | * (1 << page_cluster) entries in the swap area. This method is chosen |
| 534 | * because it doesn't cost us any seek time. We also make sure to queue |
| 535 | * the 'original' request together with the readahead ones... |
| 536 | * |
| 537 | * This has been extended to use the NUMA policies from the mm triggering |
| 538 | * the readahead. |
| 539 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 540 | * Caller must hold read mmap_sem if vmf->vma is not NULL. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 541 | */ |
| 542 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
| 543 | struct vm_fault *vmf) |
| 544 | { |
| 545 | struct page *page; |
| 546 | unsigned long entry_offset = swp_offset(entry); |
| 547 | unsigned long offset = entry_offset; |
| 548 | unsigned long start_offset, end_offset; |
| 549 | unsigned long mask; |
| 550 | struct swap_info_struct *si = swp_swap_info(entry); |
| 551 | struct blk_plug plug; |
| 552 | bool do_poll = true, page_allocated; |
| 553 | struct vm_area_struct *vma = vmf->vma; |
| 554 | unsigned long addr = vmf->address; |
| 555 | |
| 556 | mask = swapin_nr_pages(offset) - 1; |
| 557 | if (!mask) |
| 558 | goto skip; |
| 559 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 560 | /* Test swap type to make sure the dereference is safe */ |
| 561 | if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) { |
| 562 | struct inode *inode = si->swap_file->f_mapping->host; |
| 563 | if (inode_read_congested(inode)) |
| 564 | goto skip; |
| 565 | } |
| 566 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 567 | do_poll = false; |
| 568 | /* Read a page_cluster sized and aligned cluster around offset. */ |
| 569 | start_offset = offset & ~mask; |
| 570 | end_offset = offset | mask; |
| 571 | if (!start_offset) /* First page is swap header. */ |
| 572 | start_offset++; |
| 573 | if (end_offset >= si->max) |
| 574 | end_offset = si->max - 1; |
| 575 | |
| 576 | blk_start_plug(&plug); |
| 577 | for (offset = start_offset; offset <= end_offset ; offset++) { |
| 578 | /* Ok, do the async read-ahead now */ |
| 579 | page = __read_swap_cache_async( |
| 580 | swp_entry(swp_type(entry), offset), |
| 581 | gfp_mask, vma, addr, &page_allocated); |
| 582 | if (!page) |
| 583 | continue; |
| 584 | if (page_allocated) { |
| 585 | swap_readpage(page, false); |
| 586 | if (offset != entry_offset) { |
| 587 | SetPageReadahead(page); |
| 588 | count_vm_event(SWAP_RA); |
| 589 | } |
| 590 | } |
| 591 | put_page(page); |
| 592 | } |
| 593 | blk_finish_plug(&plug); |
| 594 | |
| 595 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
| 596 | skip: |
| 597 | return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); |
| 598 | } |
| 599 | |
| 600 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) |
| 601 | { |
| 602 | struct address_space *spaces, *space; |
| 603 | unsigned int i, nr; |
| 604 | |
| 605 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
| 606 | spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); |
| 607 | if (!spaces) |
| 608 | return -ENOMEM; |
| 609 | for (i = 0; i < nr; i++) { |
| 610 | space = spaces + i; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 611 | xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 612 | atomic_set(&space->i_mmap_writable, 0); |
| 613 | space->a_ops = &swap_aops; |
| 614 | /* swap cache doesn't use writeback related tags */ |
| 615 | mapping_set_no_writeback_tags(space); |
| 616 | } |
| 617 | nr_swapper_spaces[type] = nr; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 618 | swapper_spaces[type] = spaces; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 619 | |
| 620 | return 0; |
| 621 | } |
| 622 | |
| 623 | void exit_swap_address_space(unsigned int type) |
| 624 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 625 | kvfree(swapper_spaces[type]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 626 | nr_swapper_spaces[type] = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 627 | swapper_spaces[type] = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 628 | } |
| 629 | |
| 630 | static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, |
| 631 | unsigned long faddr, |
| 632 | unsigned long lpfn, |
| 633 | unsigned long rpfn, |
| 634 | unsigned long *start, |
| 635 | unsigned long *end) |
| 636 | { |
| 637 | *start = max3(lpfn, PFN_DOWN(vma->vm_start), |
| 638 | PFN_DOWN(faddr & PMD_MASK)); |
| 639 | *end = min3(rpfn, PFN_DOWN(vma->vm_end), |
| 640 | PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); |
| 641 | } |
| 642 | |
| 643 | static void swap_ra_info(struct vm_fault *vmf, |
| 644 | struct vma_swap_readahead *ra_info) |
| 645 | { |
| 646 | struct vm_area_struct *vma = vmf->vma; |
| 647 | unsigned long ra_val; |
| 648 | swp_entry_t entry; |
| 649 | unsigned long faddr, pfn, fpfn; |
| 650 | unsigned long start, end; |
| 651 | pte_t *pte, *orig_pte; |
| 652 | unsigned int max_win, hits, prev_win, win, left; |
| 653 | #ifndef CONFIG_64BIT |
| 654 | pte_t *tpte; |
| 655 | #endif |
| 656 | |
| 657 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), |
| 658 | SWAP_RA_ORDER_CEILING); |
| 659 | if (max_win == 1) { |
| 660 | ra_info->win = 1; |
| 661 | return; |
| 662 | } |
| 663 | |
| 664 | faddr = vmf->address; |
| 665 | orig_pte = pte = pte_offset_map(vmf->pmd, faddr); |
| 666 | entry = pte_to_swp_entry(*pte); |
| 667 | if ((unlikely(non_swap_entry(entry)))) { |
| 668 | pte_unmap(orig_pte); |
| 669 | return; |
| 670 | } |
| 671 | |
| 672 | fpfn = PFN_DOWN(faddr); |
| 673 | ra_val = GET_SWAP_RA_VAL(vma); |
| 674 | pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); |
| 675 | prev_win = SWAP_RA_WIN(ra_val); |
| 676 | hits = SWAP_RA_HITS(ra_val); |
| 677 | ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, |
| 678 | max_win, prev_win); |
| 679 | atomic_long_set(&vma->swap_readahead_info, |
| 680 | SWAP_RA_VAL(faddr, win, 0)); |
| 681 | |
| 682 | if (win == 1) { |
| 683 | pte_unmap(orig_pte); |
| 684 | return; |
| 685 | } |
| 686 | |
| 687 | /* Copy the PTEs because the page table may be unmapped */ |
| 688 | if (fpfn == pfn + 1) |
| 689 | swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); |
| 690 | else if (pfn == fpfn + 1) |
| 691 | swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, |
| 692 | &start, &end); |
| 693 | else { |
| 694 | left = (win - 1) / 2; |
| 695 | swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, |
| 696 | &start, &end); |
| 697 | } |
| 698 | ra_info->nr_pte = end - start; |
| 699 | ra_info->offset = fpfn - start; |
| 700 | pte -= ra_info->offset; |
| 701 | #ifdef CONFIG_64BIT |
| 702 | ra_info->ptes = pte; |
| 703 | #else |
| 704 | tpte = ra_info->ptes; |
| 705 | for (pfn = start; pfn != end; pfn++) |
| 706 | *tpte++ = *pte++; |
| 707 | #endif |
| 708 | pte_unmap(orig_pte); |
| 709 | } |
| 710 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 711 | /** |
| 712 | * swap_vma_readahead - swap in pages in hope we need them soon |
| 713 | * @entry: swap entry of this memory |
| 714 | * @gfp_mask: memory allocation flags |
| 715 | * @vmf: fault information |
| 716 | * |
| 717 | * Returns the struct page for entry and addr, after queueing swapin. |
| 718 | * |
| 719 | * Primitive swap readahead code. We simply read in a few pages whoes |
| 720 | * virtual addresses are around the fault address in the same vma. |
| 721 | * |
| 722 | * Caller must hold read mmap_sem if vmf->vma is not NULL. |
| 723 | * |
| 724 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 725 | static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, |
| 726 | struct vm_fault *vmf) |
| 727 | { |
| 728 | struct blk_plug plug; |
| 729 | struct vm_area_struct *vma = vmf->vma; |
| 730 | struct page *page; |
| 731 | pte_t *pte, pentry; |
| 732 | swp_entry_t entry; |
| 733 | unsigned int i; |
| 734 | bool page_allocated; |
| 735 | struct vma_swap_readahead ra_info = {0,}; |
| 736 | |
| 737 | swap_ra_info(vmf, &ra_info); |
| 738 | if (ra_info.win == 1) |
| 739 | goto skip; |
| 740 | |
| 741 | blk_start_plug(&plug); |
| 742 | for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; |
| 743 | i++, pte++) { |
| 744 | pentry = *pte; |
| 745 | if (pte_none(pentry)) |
| 746 | continue; |
| 747 | if (pte_present(pentry)) |
| 748 | continue; |
| 749 | entry = pte_to_swp_entry(pentry); |
| 750 | if (unlikely(non_swap_entry(entry))) |
| 751 | continue; |
| 752 | page = __read_swap_cache_async(entry, gfp_mask, vma, |
| 753 | vmf->address, &page_allocated); |
| 754 | if (!page) |
| 755 | continue; |
| 756 | if (page_allocated) { |
| 757 | swap_readpage(page, false); |
| 758 | if (i != ra_info.offset) { |
| 759 | SetPageReadahead(page); |
| 760 | count_vm_event(SWAP_RA); |
| 761 | } |
| 762 | } |
| 763 | put_page(page); |
| 764 | } |
| 765 | blk_finish_plug(&plug); |
| 766 | lru_add_drain(); |
| 767 | skip: |
| 768 | return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, |
| 769 | ra_info.win == 1); |
| 770 | } |
| 771 | |
| 772 | /** |
| 773 | * swapin_readahead - swap in pages in hope we need them soon |
| 774 | * @entry: swap entry of this memory |
| 775 | * @gfp_mask: memory allocation flags |
| 776 | * @vmf: fault information |
| 777 | * |
| 778 | * Returns the struct page for entry and addr, after queueing swapin. |
| 779 | * |
| 780 | * It's a main entry function for swap readahead. By the configuration, |
| 781 | * it will read ahead blocks by cluster-based(ie, physical disk based) |
| 782 | * or vma-based(ie, virtual address based on faulty address) readahead. |
| 783 | */ |
| 784 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
| 785 | struct vm_fault *vmf) |
| 786 | { |
| 787 | return swap_use_vma_readahead() ? |
| 788 | swap_vma_readahead(entry, gfp_mask, vmf) : |
| 789 | swap_cluster_readahead(entry, gfp_mask, vmf); |
| 790 | } |
| 791 | |
| 792 | #ifdef CONFIG_SYSFS |
| 793 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, |
| 794 | struct kobj_attribute *attr, char *buf) |
| 795 | { |
| 796 | return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false"); |
| 797 | } |
| 798 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, |
| 799 | struct kobj_attribute *attr, |
| 800 | const char *buf, size_t count) |
| 801 | { |
| 802 | if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) |
| 803 | enable_vma_readahead = true; |
| 804 | else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) |
| 805 | enable_vma_readahead = false; |
| 806 | else |
| 807 | return -EINVAL; |
| 808 | |
| 809 | return count; |
| 810 | } |
| 811 | static struct kobj_attribute vma_ra_enabled_attr = |
| 812 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, |
| 813 | vma_ra_enabled_store); |
| 814 | |
| 815 | static struct attribute *swap_attrs[] = { |
| 816 | &vma_ra_enabled_attr.attr, |
| 817 | NULL, |
| 818 | }; |
| 819 | |
| 820 | static struct attribute_group swap_attr_group = { |
| 821 | .attrs = swap_attrs, |
| 822 | }; |
| 823 | |
| 824 | static int __init swap_init_sysfs(void) |
| 825 | { |
| 826 | int err; |
| 827 | struct kobject *swap_kobj; |
| 828 | |
| 829 | swap_kobj = kobject_create_and_add("swap", mm_kobj); |
| 830 | if (!swap_kobj) { |
| 831 | pr_err("failed to create swap kobject\n"); |
| 832 | return -ENOMEM; |
| 833 | } |
| 834 | err = sysfs_create_group(swap_kobj, &swap_attr_group); |
| 835 | if (err) { |
| 836 | pr_err("failed to register swap group\n"); |
| 837 | goto delete_obj; |
| 838 | } |
| 839 | return 0; |
| 840 | |
| 841 | delete_obj: |
| 842 | kobject_put(swap_kobj); |
| 843 | return err; |
| 844 | } |
| 845 | subsys_initcall(swap_init_sysfs); |
| 846 | #endif |