blob: fff52ad370c116f3199cbf1202367788b5ca5580 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Macros for manipulating and testing flags related to a
4 * pageblock_nr_pages number of pages.
5 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 * Copyright (C) IBM Corporation, 2006
7 *
8 * Original author, Mel Gorman
9 * Major cleanups and reduction of bit operations, Andy Whitcroft
10 */
11#ifndef PAGEBLOCK_FLAGS_H
12#define PAGEBLOCK_FLAGS_H
13
14#include <linux/types.h>
15
David Brazdil0f672f62019-12-10 10:32:29 +000016#define PB_migratetype_bits 3
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017/* Bit indices that affect a whole block of pages */
18enum pageblock_bits {
19 PB_migrate,
David Brazdil0f672f62019-12-10 10:32:29 +000020 PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021 /* 3 bits required for migrate types */
22 PB_migrate_skip,/* If set the block is skipped by compaction */
23
24 /*
25 * Assume the bits will always align on a word. If this assumption
26 * changes then get/set pageblock needs updating.
27 */
28 NR_PAGEBLOCK_BITS
29};
30
31#ifdef CONFIG_HUGETLB_PAGE
32
33#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
34
35/* Huge page sizes are variable */
36extern unsigned int pageblock_order;
37
38#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
39
40/* Huge pages are a constant size */
41#define pageblock_order HUGETLB_PAGE_ORDER
42
43#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
44
45#else /* CONFIG_HUGETLB_PAGE */
46
47/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
48#define pageblock_order (MAX_ORDER-1)
49
50#endif /* CONFIG_HUGETLB_PAGE */
51
52#define pageblock_nr_pages (1UL << pageblock_order)
53
54/* Forward declaration */
55struct page;
56
57unsigned long get_pfnblock_flags_mask(struct page *page,
58 unsigned long pfn,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 unsigned long mask);
60
61void set_pfnblock_flags_mask(struct page *page,
62 unsigned long flags,
63 unsigned long pfn,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064 unsigned long mask);
65
66/* Declarations for getting and setting flags. See mm/page_alloc.c */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067#ifdef CONFIG_COMPACTION
68#define get_pageblock_skip(page) \
Olivier Deprez157378f2022-04-04 15:47:50 +020069 get_pfnblock_flags_mask(page, page_to_pfn(page), \
70 (1 << (PB_migrate_skip)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071#define clear_pageblock_skip(page) \
Olivier Deprez157378f2022-04-04 15:47:50 +020072 set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
73 (1 << PB_migrate_skip))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074#define set_pageblock_skip(page) \
Olivier Deprez157378f2022-04-04 15:47:50 +020075 set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
76 page_to_pfn(page), \
77 (1 << PB_migrate_skip))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078#else
79static inline bool get_pageblock_skip(struct page *page)
80{
81 return false;
82}
83static inline void clear_pageblock_skip(struct page *page)
84{
85}
86static inline void set_pageblock_skip(struct page *page)
87{
88}
89#endif /* CONFIG_COMPACTION */
90
91#endif /* PAGEBLOCK_FLAGS_H */