blob: 975a4d2dd02eeb064a440e13cdce1cf1ee6a6c55 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Workingset detection
4 *
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6 */
7
8#include <linux/memcontrol.h>
Olivier Deprez157378f2022-04-04 15:47:50 +02009#include <linux/mm_inline.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include <linux/writeback.h>
11#include <linux/shmem_fs.h>
12#include <linux/pagemap.h>
13#include <linux/atomic.h>
14#include <linux/module.h>
15#include <linux/swap.h>
16#include <linux/dax.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19
20/*
21 * Double CLOCK lists
22 *
23 * Per node, two clock lists are maintained for file pages: the
24 * inactive and the active list. Freshly faulted pages start out at
25 * the head of the inactive list and page reclaim scans pages from the
26 * tail. Pages that are accessed multiple times on the inactive list
27 * are promoted to the active list, to protect them from reclaim,
28 * whereas active pages are demoted to the inactive list when the
29 * active list grows too big.
30 *
31 * fault ------------------------+
32 * |
33 * +--------------+ | +-------------+
34 * reclaim <- | inactive | <-+-- demotion | active | <--+
35 * +--------------+ +-------------+ |
36 * | |
37 * +-------------- promotion ------------------+
38 *
39 *
40 * Access frequency and refault distance
41 *
42 * A workload is thrashing when its pages are frequently used but they
43 * are evicted from the inactive list every time before another access
44 * would have promoted them to the active list.
45 *
46 * In cases where the average access distance between thrashing pages
47 * is bigger than the size of memory there is nothing that can be
48 * done - the thrashing set could never fit into memory under any
49 * circumstance.
50 *
51 * However, the average access distance could be bigger than the
52 * inactive list, yet smaller than the size of memory. In this case,
53 * the set could fit into memory if it weren't for the currently
54 * active pages - which may be used more, hopefully less frequently:
55 *
56 * +-memory available to cache-+
57 * | |
58 * +-inactive------+-active----+
59 * a b | c d e f g h i | J K L M N |
60 * +---------------+-----------+
61 *
62 * It is prohibitively expensive to accurately track access frequency
63 * of pages. But a reasonable approximation can be made to measure
64 * thrashing on the inactive list, after which refaulting pages can be
65 * activated optimistically to compete with the existing active pages.
66 *
67 * Approximating inactive page access frequency - Observations:
68 *
69 * 1. When a page is accessed for the first time, it is added to the
70 * head of the inactive list, slides every existing inactive page
71 * towards the tail by one slot, and pushes the current tail page
72 * out of memory.
73 *
74 * 2. When a page is accessed for the second time, it is promoted to
75 * the active list, shrinking the inactive list by one slot. This
76 * also slides all inactive pages that were faulted into the cache
77 * more recently than the activated page towards the tail of the
78 * inactive list.
79 *
80 * Thus:
81 *
82 * 1. The sum of evictions and activations between any two points in
83 * time indicate the minimum number of inactive pages accessed in
84 * between.
85 *
86 * 2. Moving one inactive page N page slots towards the tail of the
87 * list requires at least N inactive page accesses.
88 *
89 * Combining these:
90 *
91 * 1. When a page is finally evicted from memory, the number of
92 * inactive pages accessed while the page was in cache is at least
93 * the number of page slots on the inactive list.
94 *
95 * 2. In addition, measuring the sum of evictions and activations (E)
96 * at the time of a page's eviction, and comparing it to another
97 * reading (R) at the time the page faults back into memory tells
98 * the minimum number of accesses while the page was not cached.
99 * This is called the refault distance.
100 *
101 * Because the first access of the page was the fault and the second
102 * access the refault, we combine the in-cache distance with the
103 * out-of-cache distance to get the complete minimum access distance
104 * of this page:
105 *
106 * NR_inactive + (R - E)
107 *
108 * And knowing the minimum access distance of a page, we can easily
109 * tell if the page would be able to stay in cache assuming all page
110 * slots in the cache were available:
111 *
112 * NR_inactive + (R - E) <= NR_inactive + NR_active
113 *
114 * which can be further simplified to
115 *
116 * (R - E) <= NR_active
117 *
118 * Put into words, the refault distance (out-of-cache) can be seen as
119 * a deficit in inactive list space (in-cache). If the inactive list
120 * had (R - E) more page slots, the page would not have been evicted
121 * in between accesses, but activated instead. And on a full system,
122 * the only thing eating into inactive list space is active pages.
123 *
124 *
David Brazdil0f672f62019-12-10 10:32:29 +0000125 * Refaulting inactive pages
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126 *
127 * All that is known about the active list is that the pages have been
128 * accessed more than once in the past. This means that at any given
129 * time there is actually a good chance that pages on the active list
130 * are no longer in active use.
131 *
132 * So when a refault distance of (R - E) is observed and there are at
133 * least (R - E) active pages, the refaulting page is activated
134 * optimistically in the hope that (R - E) active pages are actually
135 * used less frequently than the refaulting page - or even not used at
136 * all anymore.
137 *
David Brazdil0f672f62019-12-10 10:32:29 +0000138 * That means if inactive cache is refaulting with a suitable refault
139 * distance, we assume the cache workingset is transitioning and put
140 * pressure on the current active list.
141 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 * If this is wrong and demotion kicks in, the pages which are truly
143 * used more frequently will be reactivated while the less frequently
144 * used once will be evicted from memory.
145 *
146 * But if this is right, the stale pages will be pushed out of memory
147 * and the used pages get to stay in cache.
148 *
David Brazdil0f672f62019-12-10 10:32:29 +0000149 * Refaulting active pages
150 *
151 * If on the other hand the refaulting pages have recently been
152 * deactivated, it means that the active list is no longer protecting
153 * actively used cache from reclaim. The cache is NOT transitioning to
154 * a different workingset; the existing workingset is thrashing in the
155 * space allocated to the page cache.
156 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 *
158 * Implementation
159 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200160 * For each node's LRU lists, a counter for inactive evictions and
161 * activations is maintained (node->nonresident_age).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000162 *
163 * On eviction, a snapshot of this counter (along with some bits to
David Brazdil0f672f62019-12-10 10:32:29 +0000164 * identify the node) is stored in the now empty page cache
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 * slot of the evicted page. This is called a shadow entry.
166 *
167 * On cache misses for which there are shadow entries, an eligible
168 * refault distance will immediately activate the refaulting page.
169 */
170
David Brazdil0f672f62019-12-10 10:32:29 +0000171#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
172 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
174
175/*
176 * Eviction timestamps need to be able to cover the full range of
David Brazdil0f672f62019-12-10 10:32:29 +0000177 * actionable refaults. However, bits are tight in the xarray
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 * entry, and after storing the identifier for the lruvec there might
179 * not be enough left to represent every single actionable refault. In
180 * that case, we have to sacrifice granularity for distance, and group
181 * evictions into coarser buckets by shaving off lower timestamp bits.
182 */
183static unsigned int bucket_order __read_mostly;
184
David Brazdil0f672f62019-12-10 10:32:29 +0000185static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
186 bool workingset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187{
188 eviction >>= bucket_order;
David Brazdil0f672f62019-12-10 10:32:29 +0000189 eviction &= EVICTION_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
191 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
David Brazdil0f672f62019-12-10 10:32:29 +0000192 eviction = (eviction << 1) | workingset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193
David Brazdil0f672f62019-12-10 10:32:29 +0000194 return xa_mk_value(eviction);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195}
196
197static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
David Brazdil0f672f62019-12-10 10:32:29 +0000198 unsigned long *evictionp, bool *workingsetp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199{
David Brazdil0f672f62019-12-10 10:32:29 +0000200 unsigned long entry = xa_to_value(shadow);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201 int memcgid, nid;
David Brazdil0f672f62019-12-10 10:32:29 +0000202 bool workingset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203
David Brazdil0f672f62019-12-10 10:32:29 +0000204 workingset = entry & 1;
205 entry >>= 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 nid = entry & ((1UL << NODES_SHIFT) - 1);
207 entry >>= NODES_SHIFT;
208 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
209 entry >>= MEM_CGROUP_ID_SHIFT;
210
211 *memcgidp = memcgid;
212 *pgdat = NODE_DATA(nid);
213 *evictionp = entry << bucket_order;
David Brazdil0f672f62019-12-10 10:32:29 +0000214 *workingsetp = workingset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000215}
216
217/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200218 * workingset_age_nonresident - age non-resident entries as LRU ages
219 * @lruvec: the lruvec that was aged
220 * @nr_pages: the number of pages to count
221 *
222 * As in-memory pages are aged, non-resident pages need to be aged as
223 * well, in order for the refault distances later on to be comparable
224 * to the in-memory dimensions. This function allows reclaim and LRU
225 * operations to drive the non-resident aging along in parallel.
226 */
227void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
228{
229 /*
230 * Reclaiming a cgroup means reclaiming all its children in a
231 * round-robin fashion. That means that each cgroup has an LRU
232 * order that is composed of the LRU orders of its child
233 * cgroups; and every page has an LRU position not just in the
234 * cgroup that owns it, but in all of that group's ancestors.
235 *
236 * So when the physical inactive list of a leaf cgroup ages,
237 * the virtual inactive lists of all its parents, including
238 * the root cgroup's, age as well.
239 */
240 do {
241 atomic_long_add(nr_pages, &lruvec->nonresident_age);
242 } while ((lruvec = parent_lruvec(lruvec)));
243}
244
245/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246 * workingset_eviction - note the eviction of a page from memory
Olivier Deprez157378f2022-04-04 15:47:50 +0200247 * @target_memcg: the cgroup that is causing the reclaim
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 * @page: the page being evicted
249 *
David Brazdil0f672f62019-12-10 10:32:29 +0000250 * Returns a shadow entry to be stored in @page->mapping->i_pages in place
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251 * of the evicted @page so that a later refault can be detected.
252 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200253void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255 struct pglist_data *pgdat = page_pgdat(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256 unsigned long eviction;
257 struct lruvec *lruvec;
Olivier Deprez157378f2022-04-04 15:47:50 +0200258 int memcgid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259
260 /* Page is fully exclusive and pins page->mem_cgroup */
261 VM_BUG_ON_PAGE(PageLRU(page), page);
262 VM_BUG_ON_PAGE(page_count(page), page);
263 VM_BUG_ON_PAGE(!PageLocked(page), page);
264
Olivier Deprez157378f2022-04-04 15:47:50 +0200265 lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
266 workingset_age_nonresident(lruvec, thp_nr_pages(page));
267 /* XXX: target_memcg can be NULL, go through lruvec */
268 memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
269 eviction = atomic_long_read(&lruvec->nonresident_age);
David Brazdil0f672f62019-12-10 10:32:29 +0000270 return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271}
272
273/**
274 * workingset_refault - evaluate the refault of a previously evicted page
David Brazdil0f672f62019-12-10 10:32:29 +0000275 * @page: the freshly allocated replacement page
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276 * @shadow: shadow entry of the evicted page
277 *
278 * Calculates and evaluates the refault distance of the previously
Olivier Deprez157378f2022-04-04 15:47:50 +0200279 * evicted page in the context of the node and the memcg whose memory
280 * pressure caused the eviction.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281 */
David Brazdil0f672f62019-12-10 10:32:29 +0000282void workingset_refault(struct page *page, void *shadow)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283{
Olivier Deprez157378f2022-04-04 15:47:50 +0200284 bool file = page_is_file_lru(page);
285 struct mem_cgroup *eviction_memcg;
286 struct lruvec *eviction_lruvec;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000287 unsigned long refault_distance;
Olivier Deprez157378f2022-04-04 15:47:50 +0200288 unsigned long workingset_size;
David Brazdil0f672f62019-12-10 10:32:29 +0000289 struct pglist_data *pgdat;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290 struct mem_cgroup *memcg;
291 unsigned long eviction;
292 struct lruvec *lruvec;
293 unsigned long refault;
David Brazdil0f672f62019-12-10 10:32:29 +0000294 bool workingset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295 int memcgid;
296
David Brazdil0f672f62019-12-10 10:32:29 +0000297 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298
299 rcu_read_lock();
300 /*
301 * Look up the memcg associated with the stored ID. It might
302 * have been deleted since the page's eviction.
303 *
304 * Note that in rare events the ID could have been recycled
305 * for a new cgroup that refaults a shared page. This is
306 * impossible to tell from the available data. However, this
307 * should be a rare and limited disturbance, and activations
308 * are always speculative anyway. Ultimately, it's the aging
309 * algorithm's job to shake out the minimum access frequency
310 * for the active cache.
311 *
312 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
313 * would be better if the root_mem_cgroup existed in all
314 * configurations instead.
315 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200316 eviction_memcg = mem_cgroup_from_id(memcgid);
317 if (!mem_cgroup_disabled() && !eviction_memcg)
David Brazdil0f672f62019-12-10 10:32:29 +0000318 goto out;
Olivier Deprez157378f2022-04-04 15:47:50 +0200319 eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
320 refault = atomic_long_read(&eviction_lruvec->nonresident_age);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321
322 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000323 * Calculate the refault distance
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324 *
David Brazdil0f672f62019-12-10 10:32:29 +0000325 * The unsigned subtraction here gives an accurate distance
Olivier Deprez157378f2022-04-04 15:47:50 +0200326 * across nonresident_age overflows in most cases. There is a
David Brazdil0f672f62019-12-10 10:32:29 +0000327 * special case: usually, shadow entries have a short lifetime
328 * and are either refaulted or reclaimed along with the inode
329 * before they get too old. But it is not impossible for the
Olivier Deprez157378f2022-04-04 15:47:50 +0200330 * nonresident_age to lap a shadow entry in the field, which
331 * can then result in a false small refault distance, leading
332 * to a false activation should this old entry actually
333 * refault again. However, earlier kernels used to deactivate
David Brazdil0f672f62019-12-10 10:32:29 +0000334 * unconditionally with *every* reclaim invocation for the
335 * longest time, so the occasional inappropriate activation
336 * leading to pressure on the active list is not a problem.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000337 */
338 refault_distance = (refault - eviction) & EVICTION_MASK;
339
Olivier Deprez157378f2022-04-04 15:47:50 +0200340 /*
341 * The activation decision for this page is made at the level
342 * where the eviction occurred, as that is where the LRU order
343 * during page reclaim is being determined.
344 *
345 * However, the cgroup that will own the page is the one that
346 * is actually experiencing the refault event.
347 */
348 memcg = page_memcg(page);
349 lruvec = mem_cgroup_lruvec(memcg, pgdat);
350
351 inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000352
David Brazdil0f672f62019-12-10 10:32:29 +0000353 /*
354 * Compare the distance to the existing workingset size. We
Olivier Deprez157378f2022-04-04 15:47:50 +0200355 * don't activate pages that couldn't stay resident even if
356 * all the memory was available to the workingset. Whether
357 * workingset competition needs to consider anon or not depends
358 * on having swap.
David Brazdil0f672f62019-12-10 10:32:29 +0000359 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200360 workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
361 if (!file) {
362 workingset_size += lruvec_page_state(eviction_lruvec,
363 NR_INACTIVE_FILE);
364 }
365 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
366 workingset_size += lruvec_page_state(eviction_lruvec,
367 NR_ACTIVE_ANON);
368 if (file) {
369 workingset_size += lruvec_page_state(eviction_lruvec,
370 NR_INACTIVE_ANON);
371 }
372 }
373 if (refault_distance > workingset_size)
David Brazdil0f672f62019-12-10 10:32:29 +0000374 goto out;
375
376 SetPageActive(page);
Olivier Deprez157378f2022-04-04 15:47:50 +0200377 workingset_age_nonresident(lruvec, thp_nr_pages(page));
378 inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
David Brazdil0f672f62019-12-10 10:32:29 +0000379
380 /* Page was active prior to eviction */
381 if (workingset) {
382 SetPageWorkingset(page);
Olivier Deprez157378f2022-04-04 15:47:50 +0200383 /* XXX: Move to lru_cache_add() when it supports new vs putback */
384 spin_lock_irq(&page_pgdat(page)->lru_lock);
385 lru_note_cost_page(page);
386 spin_unlock_irq(&page_pgdat(page)->lru_lock);
387 inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388 }
David Brazdil0f672f62019-12-10 10:32:29 +0000389out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391}
392
393/**
394 * workingset_activation - note a page activation
395 * @page: page that is being activated
396 */
397void workingset_activation(struct page *page)
398{
399 struct mem_cgroup *memcg;
400 struct lruvec *lruvec;
401
402 rcu_read_lock();
403 /*
404 * Filter non-memcg pages here, e.g. unmap can call
405 * mark_page_accessed() on VDSO pages.
406 *
407 * XXX: See workingset_refault() - this should return
408 * root_mem_cgroup even for !CONFIG_MEMCG.
409 */
410 memcg = page_memcg_rcu(page);
411 if (!mem_cgroup_disabled() && !memcg)
412 goto out;
Olivier Deprez157378f2022-04-04 15:47:50 +0200413 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
414 workingset_age_nonresident(lruvec, thp_nr_pages(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415out:
416 rcu_read_unlock();
417}
418
419/*
420 * Shadow entries reflect the share of the working set that does not
421 * fit into memory, so their number depends on the access pattern of
422 * the workload. In most cases, they will refault or get reclaimed
423 * along with the inode, but a (malicious) workload that streams
424 * through files with a total size several times that of available
425 * memory, while preventing the inodes from being reclaimed, can
426 * create excessive amounts of shadow nodes. To keep a lid on this,
427 * track shadow nodes and reclaim them when they grow way past the
428 * point where they would still be useful.
429 */
430
431static struct list_lru shadow_nodes;
432
David Brazdil0f672f62019-12-10 10:32:29 +0000433void workingset_update_node(struct xa_node *node)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434{
435 /*
436 * Track non-empty nodes that contain only shadow entries;
437 * unlink those that contain pages or are being freed.
438 *
439 * Avoid acquiring the list_lru lock when the nodes are
440 * already where they should be. The list_empty() test is safe
441 * as node->private_list is protected by the i_pages lock.
442 */
David Brazdil0f672f62019-12-10 10:32:29 +0000443 VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
444
445 if (node->count && node->count == node->nr_values) {
446 if (list_empty(&node->private_list)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447 list_lru_add(&shadow_nodes, &node->private_list);
David Brazdil0f672f62019-12-10 10:32:29 +0000448 __inc_lruvec_slab_state(node, WORKINGSET_NODES);
449 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000451 if (!list_empty(&node->private_list)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452 list_lru_del(&shadow_nodes, &node->private_list);
David Brazdil0f672f62019-12-10 10:32:29 +0000453 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
454 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455 }
456}
457
458static unsigned long count_shadow_nodes(struct shrinker *shrinker,
459 struct shrink_control *sc)
460{
461 unsigned long max_nodes;
462 unsigned long nodes;
David Brazdil0f672f62019-12-10 10:32:29 +0000463 unsigned long pages;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464
465 nodes = list_lru_shrink_count(&shadow_nodes, sc);
466
467 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000468 * Approximate a reasonable limit for the nodes
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469 * containing shadow entries. We don't need to keep more
470 * shadow entries than possible pages on the active list,
471 * since refault distances bigger than that are dismissed.
472 *
473 * The size of the active list converges toward 100% of
474 * overall page cache as memory grows, with only a tiny
475 * inactive list. Assume the total cache size for that.
476 *
477 * Nodes might be sparsely populated, with only one shadow
478 * entry in the extreme case. Obviously, we cannot keep one
479 * node for every eligible shadow entry, so compromise on a
480 * worst-case density of 1/8th. Below that, not all eligible
481 * refaults can be detected anymore.
482 *
David Brazdil0f672f62019-12-10 10:32:29 +0000483 * On 64-bit with 7 xa_nodes per page and 64 slots
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484 * each, this will reclaim shadow entries when they consume
485 * ~1.8% of available memory:
486 *
David Brazdil0f672f62019-12-10 10:32:29 +0000487 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488 */
David Brazdil0f672f62019-12-10 10:32:29 +0000489#ifdef CONFIG_MEMCG
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490 if (sc->memcg) {
David Brazdil0f672f62019-12-10 10:32:29 +0000491 struct lruvec *lruvec;
492 int i;
493
Olivier Deprez157378f2022-04-04 15:47:50 +0200494 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
David Brazdil0f672f62019-12-10 10:32:29 +0000495 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
496 pages += lruvec_page_state_local(lruvec,
497 NR_LRU_BASE + i);
Olivier Deprez157378f2022-04-04 15:47:50 +0200498 pages += lruvec_page_state_local(
499 lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
500 pages += lruvec_page_state_local(
501 lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
David Brazdil0f672f62019-12-10 10:32:29 +0000502 } else
503#endif
504 pages = node_present_pages(sc->nid);
505
506 max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000507
508 if (!nodes)
509 return SHRINK_EMPTY;
510
511 if (nodes <= max_nodes)
512 return 0;
513 return nodes - max_nodes;
514}
515
516static enum lru_status shadow_lru_isolate(struct list_head *item,
517 struct list_lru_one *lru,
518 spinlock_t *lru_lock,
David Brazdil0f672f62019-12-10 10:32:29 +0000519 void *arg) __must_hold(lru_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520{
David Brazdil0f672f62019-12-10 10:32:29 +0000521 struct xa_node *node = container_of(item, struct xa_node, private_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 struct address_space *mapping;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523 int ret;
524
525 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200526 * Page cache insertions and deletions synchronously maintain
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527 * the shadow node LRU under the i_pages lock and the
528 * lru_lock. Because the page cache tree is emptied before
529 * the inode can be destroyed, holding the lru_lock pins any
David Brazdil0f672f62019-12-10 10:32:29 +0000530 * address_space that has nodes on the LRU.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000531 *
532 * We can then safely transition to the i_pages lock to
533 * pin only the address_space of the particular node we want
534 * to reclaim, take the node off-LRU, and drop the lru_lock.
535 */
536
David Brazdil0f672f62019-12-10 10:32:29 +0000537 mapping = container_of(node->array, struct address_space, i_pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538
539 /* Coming from the list, invert the lock order */
540 if (!xa_trylock(&mapping->i_pages)) {
541 spin_unlock_irq(lru_lock);
542 ret = LRU_RETRY;
543 goto out;
544 }
545
546 list_lru_isolate(lru, item);
David Brazdil0f672f62019-12-10 10:32:29 +0000547 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 spin_unlock(lru_lock);
550
551 /*
552 * The nodes should only contain one or more shadow entries,
553 * no pages, so we expect to be able to remove them all and
554 * delete and free the empty node afterwards.
555 */
David Brazdil0f672f62019-12-10 10:32:29 +0000556 if (WARN_ON_ONCE(!node->nr_values))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 goto out_invalid;
David Brazdil0f672f62019-12-10 10:32:29 +0000558 if (WARN_ON_ONCE(node->count != node->nr_values))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000559 goto out_invalid;
David Brazdil0f672f62019-12-10 10:32:29 +0000560 mapping->nrexceptional -= node->nr_values;
Olivier Deprez157378f2022-04-04 15:47:50 +0200561 xa_delete_node(node, workingset_update_node);
David Brazdil0f672f62019-12-10 10:32:29 +0000562 __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563
564out_invalid:
565 xa_unlock_irq(&mapping->i_pages);
566 ret = LRU_REMOVED_RETRY;
567out:
568 cond_resched();
569 spin_lock_irq(lru_lock);
570 return ret;
571}
572
573static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
574 struct shrink_control *sc)
575{
576 /* list_lru lock nests inside the IRQ-safe i_pages lock */
577 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
578 NULL);
579}
580
581static struct shrinker workingset_shadow_shrinker = {
582 .count_objects = count_shadow_nodes,
583 .scan_objects = scan_shadow_nodes,
David Brazdil0f672f62019-12-10 10:32:29 +0000584 .seeks = 0, /* ->count reports only fully expendable nodes */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
586};
587
588/*
589 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
590 * i_pages lock.
591 */
592static struct lock_class_key shadow_nodes_key;
593
594static int __init workingset_init(void)
595{
596 unsigned int timestamp_bits;
597 unsigned int max_order;
598 int ret;
599
600 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
601 /*
602 * Calculate the eviction bucket size to cover the longest
603 * actionable refault distance, which is currently half of
604 * memory (totalram_pages/2). However, memory hotplug may add
605 * some more pages at runtime, so keep working with up to
606 * double the initial memory by using totalram_pages as-is.
607 */
608 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
David Brazdil0f672f62019-12-10 10:32:29 +0000609 max_order = fls_long(totalram_pages() - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610 if (max_order > timestamp_bits)
611 bucket_order = max_order - timestamp_bits;
612 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
613 timestamp_bits, max_order, bucket_order);
614
615 ret = prealloc_shrinker(&workingset_shadow_shrinker);
616 if (ret)
617 goto err;
618 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
619 &workingset_shadow_shrinker);
620 if (ret)
621 goto err_list_lru;
622 register_shrinker_prepared(&workingset_shadow_shrinker);
623 return 0;
624err_list_lru:
625 free_prealloced_shrinker(&workingset_shadow_shrinker);
626err:
627 return ret;
628}
629module_init(workingset_init);