blob: 4f6ba937911218a757827477ccdd000a00cde253 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Macros for manipulating and testing page->flags
4 */
5
6#ifndef PAGE_FLAGS_H
7#define PAGE_FLAGS_H
8
9#include <linux/types.h>
10#include <linux/bug.h>
11#include <linux/mmdebug.h>
12#ifndef __GENERATING_BOUNDS_H
13#include <linux/mm_types.h>
14#include <generated/bounds.h>
15#endif /* !__GENERATING_BOUNDS_H */
16
17/*
18 * Various page->flags bits:
19 *
David Brazdil0f672f62019-12-10 10:32:29 +000020 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051 *
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
55 *
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
65 *
Olivier Deprez157378f2022-04-04 15:47:50 +020066 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071 * PG_uptodate tells whether the page's contents is valid. When a read
72 * completes, the page becomes uptodate, unless a disk I/O error happened.
73 *
74 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75 * file-backed pagecache (see mm/vmscan.c).
76 *
77 * PG_error is set to indicate that an I/O error occurred on this page.
78 *
79 * PG_arch_1 is an architecture specific page state bit. The generic code
80 * guarantees that this bit is cleared for a page when it first is entered into
81 * the page cache.
82 *
83 * PG_hwpoison indicates that a page got corrupted in hardware and contains
84 * data with incorrect ECC bits that triggered a machine check. Accessing is
85 * not safe since it may cause another machine check. Don't touch!
86 */
87
88/*
89 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
90 * locked- and dirty-page accounting.
91 *
92 * The page flags field is split into two parts, the main flags area
93 * which extends from the low bits upwards, and the fields area which
94 * extends from the high bits downwards.
95 *
96 * | FIELD | ... | FLAGS |
97 * N-1 ^ 0
98 * (NR_PAGEFLAGS)
99 *
100 * The fields area is reserved for fields mapping zone, node (for NUMA) and
101 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
102 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
103 */
104enum pageflags {
105 PG_locked, /* Page is locked. Don't touch. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000106 PG_referenced,
107 PG_uptodate,
108 PG_dirty,
109 PG_lru,
110 PG_active,
David Brazdil0f672f62019-12-10 10:32:29 +0000111 PG_workingset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
David Brazdil0f672f62019-12-10 10:32:29 +0000113 PG_error,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 PG_slab,
115 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
116 PG_arch_1,
117 PG_reserved,
118 PG_private, /* If pagecache, has fs-private data */
119 PG_private_2, /* If pagecache, has fs aux data */
120 PG_writeback, /* Page is under writeback */
121 PG_head, /* A head page */
122 PG_mappedtodisk, /* Has blocks allocated on-disk */
123 PG_reclaim, /* To be reclaimed asap */
124 PG_swapbacked, /* Page is backed by RAM/swap */
125 PG_unevictable, /* Page is "unevictable" */
126#ifdef CONFIG_MMU
127 PG_mlocked, /* Page is vma mlocked */
128#endif
129#ifdef CONFIG_ARCH_USES_PG_UNCACHED
130 PG_uncached, /* Page has been mapped as uncached */
131#endif
132#ifdef CONFIG_MEMORY_FAILURE
133 PG_hwpoison, /* hardware poisoned page. Don't touch */
134#endif
135#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
136 PG_young,
137 PG_idle,
138#endif
Olivier Deprez157378f2022-04-04 15:47:50 +0200139#ifdef CONFIG_64BIT
140 PG_arch_2,
141#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 __NR_PAGEFLAGS,
143
144 /* Filesystems */
145 PG_checked = PG_owner_priv_1,
146
147 /* SwapBacked */
148 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
149
150 /* Two page bits are conscripted by FS-Cache to maintain local caching
151 * state. These bits are set on pages belonging to the netfs's inodes
152 * when those inodes are being locally cached.
153 */
154 PG_fscache = PG_private_2, /* page backed by cache */
155
156 /* XEN */
157 /* Pinned in Xen as a read-only pagetable page. */
158 PG_pinned = PG_owner_priv_1,
159 /* Pinned as part of domain save (see xen_mm_pin_all()). */
160 PG_savepinned = PG_dirty,
161 /* Has a grant mapping of another (foreign) domain's page. */
162 PG_foreign = PG_owner_priv_1,
David Brazdil0f672f62019-12-10 10:32:29 +0000163 /* Remapped by swiotlb-xen. */
164 PG_xen_remapped = PG_owner_priv_1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165
166 /* SLOB */
167 PG_slob_free = PG_private,
168
169 /* Compound pages. Stored in first tail page's flags */
Olivier Deprez157378f2022-04-04 15:47:50 +0200170 PG_double_map = PG_workingset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171
172 /* non-lru isolated movable page */
173 PG_isolated = PG_reclaim,
Olivier Deprez157378f2022-04-04 15:47:50 +0200174
175 /* Only valid for buddy pages. Used to track pages that are reported */
176 PG_reported = PG_uptodate,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177};
178
179#ifndef __GENERATING_BOUNDS_H
180
181struct page; /* forward declaration */
182
183static inline struct page *compound_head(struct page *page)
184{
185 unsigned long head = READ_ONCE(page->compound_head);
186
187 if (unlikely(head & 1))
188 return (struct page *) (head - 1);
189 return page;
190}
191
192static __always_inline int PageTail(struct page *page)
193{
194 return READ_ONCE(page->compound_head) & 1;
195}
196
197static __always_inline int PageCompound(struct page *page)
198{
199 return test_bit(PG_head, &page->flags) || PageTail(page);
200}
201
202#define PAGE_POISON_PATTERN -1l
203static inline int PagePoisoned(const struct page *page)
204{
205 return page->flags == PAGE_POISON_PATTERN;
206}
207
David Brazdil0f672f62019-12-10 10:32:29 +0000208#ifdef CONFIG_DEBUG_VM
209void page_init_poison(struct page *page, size_t size);
210#else
211static inline void page_init_poison(struct page *page, size_t size)
212{
213}
214#endif
215
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216/*
217 * Page flags policies wrt compound pages
218 *
219 * PF_POISONED_CHECK
220 * check if this struct page poisoned/uninitialized
221 *
222 * PF_ANY:
223 * the page flag is relevant for small, head and tail pages.
224 *
225 * PF_HEAD:
226 * for compound page all operations related to the page flag applied to
227 * head page.
228 *
229 * PF_ONLY_HEAD:
230 * for compound page, callers only ever operate on the head page.
231 *
232 * PF_NO_TAIL:
233 * modifications of the page flag must be done on small or head pages,
234 * checks can be done on tail pages too.
235 *
236 * PF_NO_COMPOUND:
237 * the page flag is not relevant for compound pages.
Olivier Deprez157378f2022-04-04 15:47:50 +0200238 *
239 * PF_SECOND:
240 * the page flag is stored in the first tail page.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 */
242#define PF_POISONED_CHECK(page) ({ \
243 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
244 page; })
245#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
246#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
247#define PF_ONLY_HEAD(page, enforce) ({ \
248 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
249 PF_POISONED_CHECK(page); })
250#define PF_NO_TAIL(page, enforce) ({ \
251 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
252 PF_POISONED_CHECK(compound_head(page)); })
253#define PF_NO_COMPOUND(page, enforce) ({ \
254 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
255 PF_POISONED_CHECK(page); })
Olivier Deprez157378f2022-04-04 15:47:50 +0200256#define PF_SECOND(page, enforce) ({ \
257 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
258 PF_POISONED_CHECK(&page[1]); })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259
260/*
261 * Macros to create function definitions for page flags
262 */
263#define TESTPAGEFLAG(uname, lname, policy) \
264static __always_inline int Page##uname(struct page *page) \
265 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
266
267#define SETPAGEFLAG(uname, lname, policy) \
268static __always_inline void SetPage##uname(struct page *page) \
269 { set_bit(PG_##lname, &policy(page, 1)->flags); }
270
271#define CLEARPAGEFLAG(uname, lname, policy) \
272static __always_inline void ClearPage##uname(struct page *page) \
273 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
274
275#define __SETPAGEFLAG(uname, lname, policy) \
276static __always_inline void __SetPage##uname(struct page *page) \
277 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
278
279#define __CLEARPAGEFLAG(uname, lname, policy) \
280static __always_inline void __ClearPage##uname(struct page *page) \
281 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
282
283#define TESTSETFLAG(uname, lname, policy) \
284static __always_inline int TestSetPage##uname(struct page *page) \
285 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
286
287#define TESTCLEARFLAG(uname, lname, policy) \
288static __always_inline int TestClearPage##uname(struct page *page) \
289 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
290
291#define PAGEFLAG(uname, lname, policy) \
292 TESTPAGEFLAG(uname, lname, policy) \
293 SETPAGEFLAG(uname, lname, policy) \
294 CLEARPAGEFLAG(uname, lname, policy)
295
296#define __PAGEFLAG(uname, lname, policy) \
297 TESTPAGEFLAG(uname, lname, policy) \
298 __SETPAGEFLAG(uname, lname, policy) \
299 __CLEARPAGEFLAG(uname, lname, policy)
300
301#define TESTSCFLAG(uname, lname, policy) \
302 TESTSETFLAG(uname, lname, policy) \
303 TESTCLEARFLAG(uname, lname, policy)
304
305#define TESTPAGEFLAG_FALSE(uname) \
306static inline int Page##uname(const struct page *page) { return 0; }
307
308#define SETPAGEFLAG_NOOP(uname) \
309static inline void SetPage##uname(struct page *page) { }
310
311#define CLEARPAGEFLAG_NOOP(uname) \
312static inline void ClearPage##uname(struct page *page) { }
313
314#define __CLEARPAGEFLAG_NOOP(uname) \
315static inline void __ClearPage##uname(struct page *page) { }
316
317#define TESTSETFLAG_FALSE(uname) \
318static inline int TestSetPage##uname(struct page *page) { return 0; }
319
320#define TESTCLEARFLAG_FALSE(uname) \
321static inline int TestClearPage##uname(struct page *page) { return 0; }
322
323#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
324 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
325
326#define TESTSCFLAG_FALSE(uname) \
327 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
328
329__PAGEFLAG(Locked, locked, PF_NO_TAIL)
330PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
Olivier Deprez0e641232021-09-23 10:07:05 +0200331PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000332PAGEFLAG(Referenced, referenced, PF_HEAD)
333 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
334 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
335PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
336 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
337PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
338PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
339 TESTCLEARFLAG(Active, active, PF_HEAD)
David Brazdil0f672f62019-12-10 10:32:29 +0000340PAGEFLAG(Workingset, workingset, PF_HEAD)
341 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342__PAGEFLAG(Slab, slab, PF_NO_TAIL)
343__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
344PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
345
346/* Xen */
347PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
348 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
349PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
350PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
David Brazdil0f672f62019-12-10 10:32:29 +0000351PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
352 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000353
354PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
355 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
David Brazdil0f672f62019-12-10 10:32:29 +0000356 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
358 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
359 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
360
361/*
362 * Private page markings that may be used by the filesystem that owns the page
363 * for its own purposes.
364 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
365 */
366PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
367 __CLEARPAGEFLAG(Private, private, PF_ANY)
368PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
369PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
370 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
371
372/*
373 * Only test-and-set exist for PG_writeback. The unconditional operators are
374 * risky: they bypass page accounting.
375 */
376TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
377 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
378PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
379
380/* PG_readahead is only used for reads; PG_reclaim is only for writes */
381PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
382 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
383PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
384 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
385
386#ifdef CONFIG_HIGHMEM
387/*
388 * Must use a macro here due to header dependency issues. page_zone() is not
389 * available at this point.
390 */
391#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
392#else
393PAGEFLAG_FALSE(HighMem)
394#endif
395
396#ifdef CONFIG_SWAP
397static __always_inline int PageSwapCache(struct page *page)
398{
399#ifdef CONFIG_THP_SWAP
400 page = compound_head(page);
401#endif
402 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
403
404}
405SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
406CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
407#else
408PAGEFLAG_FALSE(SwapCache)
409#endif
410
411PAGEFLAG(Unevictable, unevictable, PF_HEAD)
412 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
413 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
414
415#ifdef CONFIG_MMU
416PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
417 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
418 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
419#else
420PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
421 TESTSCFLAG_FALSE(Mlocked)
422#endif
423
424#ifdef CONFIG_ARCH_USES_PG_UNCACHED
425PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
426#else
427PAGEFLAG_FALSE(Uncached)
428#endif
429
430#ifdef CONFIG_MEMORY_FAILURE
431PAGEFLAG(HWPoison, hwpoison, PF_ANY)
432TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
433#define __PG_HWPOISON (1UL << PG_hwpoison)
Olivier Deprez157378f2022-04-04 15:47:50 +0200434extern bool take_page_off_buddy(struct page *page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435#else
436PAGEFLAG_FALSE(HWPoison)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437#define __PG_HWPOISON 0
438#endif
439
440#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
441TESTPAGEFLAG(Young, young, PF_ANY)
442SETPAGEFLAG(Young, young, PF_ANY)
443TESTCLEARFLAG(Young, young, PF_ANY)
444PAGEFLAG(Idle, idle, PF_ANY)
445#endif
446
447/*
Olivier Deprez157378f2022-04-04 15:47:50 +0200448 * PageReported() is used to track reported free pages within the Buddy
449 * allocator. We can use the non-atomic version of the test and set
450 * operations as both should be shielded with the zone lock to prevent
451 * any possible races on the setting or clearing of the bit.
452 */
453__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
454
455/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 * On an anonymous page mapped into a user virtual memory area,
457 * page->mapping points to its anon_vma, not to a struct address_space;
458 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
459 *
460 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
461 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
462 * bit; and then page->mapping points, not to an anon_vma, but to a private
463 * structure which KSM associates with that merged page. See ksm.h.
464 *
465 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
466 * page and then page->mapping points a struct address_space.
467 *
468 * Please note that, confusingly, "page_mapping" refers to the inode
469 * address_space which maps the page from disk; whereas "page_mapped"
470 * refers to user virtual address space into which the page is mapped.
471 */
472#define PAGE_MAPPING_ANON 0x1
473#define PAGE_MAPPING_MOVABLE 0x2
474#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
475#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
476
477static __always_inline int PageMappingFlags(struct page *page)
478{
479 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
480}
481
482static __always_inline int PageAnon(struct page *page)
483{
484 page = compound_head(page);
485 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
486}
487
488static __always_inline int __PageMovable(struct page *page)
489{
490 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
491 PAGE_MAPPING_MOVABLE;
492}
493
494#ifdef CONFIG_KSM
495/*
496 * A KSM page is one of those write-protected "shared pages" or "merged pages"
497 * which KSM maps into multiple mms, wherever identical anonymous page content
498 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
499 * anon_vma, but to that page's node of the stable tree.
500 */
501static __always_inline int PageKsm(struct page *page)
502{
503 page = compound_head(page);
504 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
505 PAGE_MAPPING_KSM;
506}
507#else
508TESTPAGEFLAG_FALSE(Ksm)
509#endif
510
511u64 stable_page_flags(struct page *page);
512
513static inline int PageUptodate(struct page *page)
514{
515 int ret;
516 page = compound_head(page);
517 ret = test_bit(PG_uptodate, &(page)->flags);
518 /*
519 * Must ensure that the data we read out of the page is loaded
520 * _after_ we've loaded page->flags to check for PageUptodate.
521 * We can skip the barrier if the page is not uptodate, because
522 * we wouldn't be reading anything from it.
523 *
524 * See SetPageUptodate() for the other side of the story.
525 */
526 if (ret)
527 smp_rmb();
528
529 return ret;
530}
531
532static __always_inline void __SetPageUptodate(struct page *page)
533{
534 VM_BUG_ON_PAGE(PageTail(page), page);
535 smp_wmb();
536 __set_bit(PG_uptodate, &page->flags);
537}
538
539static __always_inline void SetPageUptodate(struct page *page)
540{
541 VM_BUG_ON_PAGE(PageTail(page), page);
542 /*
543 * Memory barrier must be issued before setting the PG_uptodate bit,
544 * so that all previous stores issued in order to bring the page
545 * uptodate are actually visible before PageUptodate becomes true.
546 */
547 smp_wmb();
548 set_bit(PG_uptodate, &page->flags);
549}
550
551CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
552
553int test_clear_page_writeback(struct page *page);
554int __test_set_page_writeback(struct page *page, bool keep_write);
555
556#define test_set_page_writeback(page) \
557 __test_set_page_writeback(page, false)
558#define test_set_page_writeback_keepwrite(page) \
559 __test_set_page_writeback(page, true)
560
561static inline void set_page_writeback(struct page *page)
562{
563 test_set_page_writeback(page);
564}
565
566static inline void set_page_writeback_keepwrite(struct page *page)
567{
568 test_set_page_writeback_keepwrite(page);
569}
570
571__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
572
573static __always_inline void set_compound_head(struct page *page, struct page *head)
574{
575 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
576}
577
578static __always_inline void clear_compound_head(struct page *page)
579{
580 WRITE_ONCE(page->compound_head, 0);
581}
582
583#ifdef CONFIG_TRANSPARENT_HUGEPAGE
584static inline void ClearPageCompound(struct page *page)
585{
586 BUG_ON(!PageHead(page));
587 ClearPageHead(page);
588}
589#endif
590
591#define PG_head_mask ((1UL << PG_head))
592
593#ifdef CONFIG_HUGETLB_PAGE
594int PageHuge(struct page *page);
595int PageHeadHuge(struct page *page);
596bool page_huge_active(struct page *page);
597#else
598TESTPAGEFLAG_FALSE(Huge)
599TESTPAGEFLAG_FALSE(HeadHuge)
600
601static inline bool page_huge_active(struct page *page)
602{
603 return 0;
604}
605#endif
606
607
608#ifdef CONFIG_TRANSPARENT_HUGEPAGE
609/*
610 * PageHuge() only returns true for hugetlbfs pages, but not for
611 * normal or transparent huge pages.
612 *
613 * PageTransHuge() returns true for both transparent huge and
614 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
615 * called only in the core VM paths where hugetlbfs pages can't exist.
616 */
617static inline int PageTransHuge(struct page *page)
618{
619 VM_BUG_ON_PAGE(PageTail(page), page);
620 return PageHead(page);
621}
622
623/*
624 * PageTransCompound returns true for both transparent huge pages
625 * and hugetlbfs pages, so it should only be called when it's known
626 * that hugetlbfs pages aren't involved.
627 */
628static inline int PageTransCompound(struct page *page)
629{
630 return PageCompound(page);
631}
632
633/*
634 * PageTransCompoundMap is the same as PageTransCompound, but it also
635 * guarantees the primary MMU has the entire compound page mapped
636 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
637 * can also map the entire compound page. This allows the secondary
638 * MMUs to call get_user_pages() only once for each compound page and
639 * to immediately map the entire compound page with a single secondary
640 * MMU fault. If there will be a pmd split later, the secondary MMUs
641 * will get an update through the MMU notifier invalidation through
642 * split_huge_pmd().
643 *
644 * Unlike PageTransCompound, this is safe to be called only while
645 * split_huge_pmd() cannot run from under us, like if protected by the
David Brazdil0f672f62019-12-10 10:32:29 +0000646 * MMU notifier, otherwise it may result in page->_mapcount check false
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000647 * positives.
David Brazdil0f672f62019-12-10 10:32:29 +0000648 *
649 * We have to treat page cache THP differently since every subpage of it
650 * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
651 * mapped in the current process so comparing subpage's _mapcount to
652 * compound_mapcount to filter out PTE mapped case.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653 */
654static inline int PageTransCompoundMap(struct page *page)
655{
David Brazdil0f672f62019-12-10 10:32:29 +0000656 struct page *head;
657
658 if (!PageTransCompound(page))
659 return 0;
660
661 if (PageAnon(page))
662 return atomic_read(&page->_mapcount) < 0;
663
664 head = compound_head(page);
665 /* File THP is PMD mapped and not PTE mapped */
666 return atomic_read(&page->_mapcount) ==
667 atomic_read(compound_mapcount_ptr(head));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668}
669
670/*
671 * PageTransTail returns true for both transparent huge pages
672 * and hugetlbfs pages, so it should only be called when it's known
673 * that hugetlbfs pages aren't involved.
674 */
675static inline int PageTransTail(struct page *page)
676{
677 return PageTail(page);
678}
679
680/*
681 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
682 * as PMDs.
683 *
684 * This is required for optimization of rmap operations for THP: we can postpone
685 * per small page mapcount accounting (and its overhead from atomic operations)
686 * until the first PMD split.
687 *
688 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
689 * by one. This reference will go away with last compound_mapcount.
690 *
691 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
692 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200693PAGEFLAG(DoubleMap, double_map, PF_SECOND)
694 TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695#else
696TESTPAGEFLAG_FALSE(TransHuge)
697TESTPAGEFLAG_FALSE(TransCompound)
698TESTPAGEFLAG_FALSE(TransCompoundMap)
699TESTPAGEFLAG_FALSE(TransTail)
700PAGEFLAG_FALSE(DoubleMap)
Olivier Deprez157378f2022-04-04 15:47:50 +0200701 TESTSCFLAG_FALSE(DoubleMap)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000702#endif
703
704/*
705 * For pages that are never mapped to userspace (and aren't PageSlab),
706 * page_type may be used. Because it is initialised to -1, we invert the
707 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
708 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
709 * low bits so that an underflow or overflow of page_mapcount() won't be
710 * mistaken for a page type value.
711 */
712
713#define PAGE_TYPE_BASE 0xf0000000
714/* Reserve 0x0000007f to catch underflows of page_mapcount */
David Brazdil0f672f62019-12-10 10:32:29 +0000715#define PAGE_MAPCOUNT_RESERVE -128
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000716#define PG_buddy 0x00000080
David Brazdil0f672f62019-12-10 10:32:29 +0000717#define PG_offline 0x00000100
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718#define PG_kmemcg 0x00000200
719#define PG_table 0x00000400
David Brazdil0f672f62019-12-10 10:32:29 +0000720#define PG_guard 0x00000800
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000721
722#define PageType(page, flag) \
723 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
724
David Brazdil0f672f62019-12-10 10:32:29 +0000725static inline int page_has_type(struct page *page)
726{
727 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
728}
729
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730#define PAGE_TYPE_OPS(uname, lname) \
731static __always_inline int Page##uname(struct page *page) \
732{ \
733 return PageType(page, PG_##lname); \
734} \
735static __always_inline void __SetPage##uname(struct page *page) \
736{ \
737 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
738 page->page_type &= ~PG_##lname; \
739} \
740static __always_inline void __ClearPage##uname(struct page *page) \
741{ \
742 VM_BUG_ON_PAGE(!Page##uname(page), page); \
743 page->page_type |= PG_##lname; \
744}
745
746/*
747 * PageBuddy() indicates that the page is free and in the buddy system
748 * (see mm/page_alloc.c).
749 */
750PAGE_TYPE_OPS(Buddy, buddy)
751
752/*
David Brazdil0f672f62019-12-10 10:32:29 +0000753 * PageOffline() indicates that the page is logically offline although the
754 * containing section is online. (e.g. inflated in a balloon driver or
755 * not onlined when onlining the section).
756 * The content of these pages is effectively stale. Such pages should not
757 * be touched (read/write/dump/save) except by their owner.
Olivier Deprez157378f2022-04-04 15:47:50 +0200758 *
759 * If a driver wants to allow to offline unmovable PageOffline() pages without
760 * putting them back to the buddy, it can do so via the memory notifier by
761 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
762 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
763 * pages (now with a reference count of zero) are treated like free pages,
764 * allowing the containing memory block to get offlined. A driver that
765 * relies on this feature is aware that re-onlining the memory block will
766 * require to re-set the pages PageOffline() and not giving them to the
767 * buddy via online_page_callback_t.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768 */
David Brazdil0f672f62019-12-10 10:32:29 +0000769PAGE_TYPE_OPS(Offline, offline)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770
771/*
772 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
773 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
774 */
775PAGE_TYPE_OPS(Kmemcg, kmemcg)
776
777/*
778 * Marks pages in use as page tables.
779 */
780PAGE_TYPE_OPS(Table, table)
781
David Brazdil0f672f62019-12-10 10:32:29 +0000782/*
783 * Marks guardpages used with debug_pagealloc.
784 */
785PAGE_TYPE_OPS(Guard, guard)
786
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787extern bool is_free_buddy_page(struct page *page);
788
789__PAGEFLAG(Isolated, isolated, PF_ANY);
790
791/*
792 * If network-based swap is enabled, sl*b must keep track of whether pages
793 * were allocated from pfmemalloc reserves.
794 */
795static inline int PageSlabPfmemalloc(struct page *page)
796{
797 VM_BUG_ON_PAGE(!PageSlab(page), page);
798 return PageActive(page);
799}
800
801static inline void SetPageSlabPfmemalloc(struct page *page)
802{
803 VM_BUG_ON_PAGE(!PageSlab(page), page);
804 SetPageActive(page);
805}
806
807static inline void __ClearPageSlabPfmemalloc(struct page *page)
808{
809 VM_BUG_ON_PAGE(!PageSlab(page), page);
810 __ClearPageActive(page);
811}
812
813static inline void ClearPageSlabPfmemalloc(struct page *page)
814{
815 VM_BUG_ON_PAGE(!PageSlab(page), page);
816 ClearPageActive(page);
817}
818
819#ifdef CONFIG_MMU
820#define __PG_MLOCKED (1UL << PG_mlocked)
821#else
822#define __PG_MLOCKED 0
823#endif
824
825/*
826 * Flags checked when a page is freed. Pages being freed should not have
827 * these flags set. It they are, there is a problem.
828 */
829#define PAGE_FLAGS_CHECK_AT_FREE \
830 (1UL << PG_lru | 1UL << PG_locked | \
831 1UL << PG_private | 1UL << PG_private_2 | \
832 1UL << PG_writeback | 1UL << PG_reserved | \
833 1UL << PG_slab | 1UL << PG_active | \
834 1UL << PG_unevictable | __PG_MLOCKED)
835
836/*
837 * Flags checked when a page is prepped for return by the page allocator.
838 * Pages being prepped should not have these flags set. It they are set,
839 * there has been a kernel bug or struct page corruption.
840 *
841 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
842 * alloc-free cycle to prevent from reusing the page.
843 */
844#define PAGE_FLAGS_CHECK_AT_PREP \
845 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
846
847#define PAGE_FLAGS_PRIVATE \
848 (1UL << PG_private | 1UL << PG_private_2)
849/**
850 * page_has_private - Determine if page has private stuff
851 * @page: The page to be checked
852 *
853 * Determine if a page has private stuff, indicating that release routines
854 * should be invoked upon it.
855 */
856static inline int page_has_private(struct page *page)
857{
858 return !!(page->flags & PAGE_FLAGS_PRIVATE);
859}
860
861#undef PF_ANY
862#undef PF_HEAD
863#undef PF_ONLY_HEAD
864#undef PF_NO_TAIL
865#undef PF_NO_COMPOUND
Olivier Deprez157378f2022-04-04 15:47:50 +0200866#undef PF_SECOND
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000867#endif /* !__GENERATING_BOUNDS_H */
868
869#endif /* PAGE_FLAGS_H */