blob: cef70d6e1657cdc335e766871d1b058c1ff099bd [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <asm/pgtable.h>
13
14struct ctl_table;
15struct user_struct;
16struct mmu_gather;
17
18#ifndef is_hugepd
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019typedef struct { unsigned long pd; } hugepd_t;
20#define is_hugepd(hugepd) (0)
21#define __hugepd(x) ((hugepd_t) { (x) })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022#endif
23
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#ifdef CONFIG_HUGETLB_PAGE
25
26#include <linux/mempolicy.h>
27#include <linux/shm.h>
28#include <asm/tlbflush.h>
29
30struct hugepage_subpool {
31 spinlock_t lock;
32 long count;
33 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
34 long used_hpages; /* Used count against maximum, includes */
35 /* both alloced and reserved pages. */
36 struct hstate *hstate;
37 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
38 long rsv_hpages; /* Pages reserved against global pool to */
39 /* sasitfy minimum size. */
40};
41
42struct resv_map {
43 struct kref refs;
44 spinlock_t lock;
45 struct list_head regions;
46 long adds_in_progress;
47 struct list_head region_cache;
48 long region_cache_count;
49};
50extern struct resv_map *resv_map_alloc(void);
51void resv_map_release(struct kref *ref);
52
53extern spinlock_t hugetlb_lock;
54extern int hugetlb_max_hstate __read_mostly;
55#define for_each_hstate(h) \
56 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
57
58struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
59 long min_hpages);
60void hugepage_put_subpool(struct hugepage_subpool *spool);
61
62void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
63int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
64int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
65int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
66
67#ifdef CONFIG_NUMA
68int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
69 void __user *, size_t *, loff_t *);
70#endif
71
72int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
73long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
74 struct page **, struct vm_area_struct **,
75 unsigned long *, unsigned long *, long, unsigned int,
76 int *);
77void unmap_hugepage_range(struct vm_area_struct *,
78 unsigned long, unsigned long, struct page *);
79void __unmap_hugepage_range_final(struct mmu_gather *tlb,
80 struct vm_area_struct *vma,
81 unsigned long start, unsigned long end,
82 struct page *ref_page);
83void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
84 unsigned long start, unsigned long end,
85 struct page *ref_page);
86void hugetlb_report_meminfo(struct seq_file *);
87int hugetlb_report_node_meminfo(int, char *);
88void hugetlb_show_meminfo(void);
89unsigned long hugetlb_total_pages(void);
90vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91 unsigned long address, unsigned int flags);
92int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
93 struct vm_area_struct *dst_vma,
94 unsigned long dst_addr,
95 unsigned long src_addr,
96 struct page **pagep);
97int hugetlb_reserve_pages(struct inode *inode, long from, long to,
98 struct vm_area_struct *vma,
99 vm_flags_t vm_flags);
100long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
101 long freed);
102bool isolate_huge_page(struct page *page, struct list_head *list);
103void putback_active_hugepage(struct page *page);
104void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
105void free_huge_page(struct page *page);
106void hugetlb_fix_reserve_counts(struct inode *inode);
107extern struct mutex *hugetlb_fault_mutex_table;
David Brazdil0f672f62019-12-10 10:32:29 +0000108u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
Olivier Deprez0e641232021-09-23 10:07:05 +0200109 pgoff_t idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110
111pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
112
113extern int sysctl_hugetlb_shm_group;
114extern struct list_head huge_boot_pages;
115
116/* arch callbacks */
117
118pte_t *huge_pte_alloc(struct mm_struct *mm,
119 unsigned long addr, unsigned long sz);
120pte_t *huge_pte_offset(struct mm_struct *mm,
121 unsigned long addr, unsigned long sz);
122int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
123void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
124 unsigned long *start, unsigned long *end);
125struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
126 int write);
127struct page *follow_huge_pd(struct vm_area_struct *vma,
128 unsigned long address, hugepd_t hpd,
129 int flags, int pdshift);
130struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
131 pmd_t *pmd, int flags);
132struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
133 pud_t *pud, int flags);
134struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
135 pgd_t *pgd, int flags);
136
137int pmd_huge(pmd_t pmd);
138int pud_huge(pud_t pud);
139unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
140 unsigned long address, unsigned long end, pgprot_t newprot);
141
142bool is_hugetlb_entry_migration(pte_t pte);
143
144#else /* !CONFIG_HUGETLB_PAGE */
145
146static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
147{
148}
149
150static inline unsigned long hugetlb_total_pages(void)
151{
152 return 0;
153}
154
155static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
156 pte_t *ptep)
157{
158 return 0;
159}
160
161static inline void adjust_range_if_pmd_sharing_possible(
162 struct vm_area_struct *vma,
163 unsigned long *start, unsigned long *end)
164{
165}
166
167#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
168#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
169#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
170static inline void hugetlb_report_meminfo(struct seq_file *m)
171{
172}
173#define hugetlb_report_node_meminfo(n, buf) 0
174static inline void hugetlb_show_meminfo(void)
175{
176}
177#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
178#define follow_huge_pmd(mm, addr, pmd, flags) NULL
179#define follow_huge_pud(mm, addr, pud, flags) NULL
180#define follow_huge_pgd(mm, addr, pgd, flags) NULL
181#define prepare_hugepage_range(file, addr, len) (-EINVAL)
182#define pmd_huge(x) 0
183#define pud_huge(x) 0
184#define is_hugepage_only_range(mm, addr, len) 0
185#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
187 src_addr, pagep) ({ BUG(); 0; })
188#define huge_pte_offset(mm, address, sz) 0
189
190static inline bool isolate_huge_page(struct page *page, struct list_head *list)
191{
192 return false;
193}
194#define putback_active_hugepage(p) do {} while (0)
195#define move_hugetlb_state(old, new, reason) do {} while (0)
196
197static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
198 unsigned long address, unsigned long end, pgprot_t newprot)
199{
200 return 0;
201}
202
203static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
204 struct vm_area_struct *vma, unsigned long start,
205 unsigned long end, struct page *ref_page)
206{
207 BUG();
208}
209
210static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
211 struct vm_area_struct *vma, unsigned long start,
212 unsigned long end, struct page *ref_page)
213{
214 BUG();
215}
David Brazdil0f672f62019-12-10 10:32:29 +0000216static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
217 struct vm_area_struct *vma, unsigned long address,
218 unsigned int flags)
219{
220 BUG();
221 return 0;
222}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223
224#endif /* !CONFIG_HUGETLB_PAGE */
225/*
226 * hugepages at page global directory. If arch support
227 * hugepages at pgd level, they need to define this.
228 */
229#ifndef pgd_huge
230#define pgd_huge(x) 0
231#endif
232#ifndef p4d_huge
233#define p4d_huge(x) 0
234#endif
235
236#ifndef pgd_write
237static inline int pgd_write(pgd_t pgd)
238{
239 BUG();
240 return 0;
241}
242#endif
243
244#define HUGETLB_ANON_FILE "anon_hugepage"
245
246enum {
247 /*
248 * The file will be used as an shm file so shmfs accounting rules
249 * apply
250 */
251 HUGETLB_SHMFS_INODE = 1,
252 /*
253 * The file is being created on the internal vfs mount and shmfs
254 * accounting rules do not apply
255 */
256 HUGETLB_ANONHUGE_INODE = 2,
257};
258
259#ifdef CONFIG_HUGETLBFS
260struct hugetlbfs_sb_info {
261 long max_inodes; /* inodes allowed */
262 long free_inodes; /* inodes free */
263 spinlock_t stat_lock;
264 struct hstate *hstate;
265 struct hugepage_subpool *spool;
266 kuid_t uid;
267 kgid_t gid;
268 umode_t mode;
269};
270
271static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
272{
273 return sb->s_fs_info;
274}
275
276struct hugetlbfs_inode_info {
277 struct shared_policy policy;
278 struct inode vfs_inode;
279 unsigned int seals;
280};
281
282static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
283{
284 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
285}
286
287extern const struct file_operations hugetlbfs_file_operations;
288extern const struct vm_operations_struct hugetlb_vm_ops;
289struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
290 struct user_struct **user, int creat_flags,
291 int page_size_log);
292
293static inline bool is_file_hugepages(struct file *file)
294{
295 if (file->f_op == &hugetlbfs_file_operations)
296 return true;
297
298 return is_file_shm_hugepages(file);
299}
300
Olivier Deprez0e641232021-09-23 10:07:05 +0200301static inline struct hstate *hstate_inode(struct inode *i)
302{
303 return HUGETLBFS_SB(i->i_sb)->hstate;
304}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000305#else /* !CONFIG_HUGETLBFS */
306
307#define is_file_hugepages(file) false
308static inline struct file *
309hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
310 struct user_struct **user, int creat_flags,
311 int page_size_log)
312{
313 return ERR_PTR(-ENOSYS);
314}
315
Olivier Deprez0e641232021-09-23 10:07:05 +0200316static inline struct hstate *hstate_inode(struct inode *i)
317{
318 return NULL;
319}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000320#endif /* !CONFIG_HUGETLBFS */
321
322#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
323unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
324 unsigned long len, unsigned long pgoff,
325 unsigned long flags);
326#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
327
328#ifdef CONFIG_HUGETLB_PAGE
329
330#define HSTATE_NAME_LEN 32
331/* Defines one hugetlb page size */
332struct hstate {
333 int next_nid_to_alloc;
334 int next_nid_to_free;
335 unsigned int order;
336 unsigned long mask;
337 unsigned long max_huge_pages;
338 unsigned long nr_huge_pages;
339 unsigned long free_huge_pages;
340 unsigned long resv_huge_pages;
341 unsigned long surplus_huge_pages;
342 unsigned long nr_overcommit_huge_pages;
343 struct list_head hugepage_activelist;
344 struct list_head hugepage_freelists[MAX_NUMNODES];
345 unsigned int nr_huge_pages_node[MAX_NUMNODES];
346 unsigned int free_huge_pages_node[MAX_NUMNODES];
347 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
348#ifdef CONFIG_CGROUP_HUGETLB
349 /* cgroup control files */
350 struct cftype cgroup_files[5];
351#endif
352 char name[HSTATE_NAME_LEN];
353};
354
355struct huge_bootmem_page {
356 struct list_head list;
357 struct hstate *hstate;
358};
359
360struct page *alloc_huge_page(struct vm_area_struct *vma,
361 unsigned long addr, int avoid_reserve);
362struct page *alloc_huge_page_node(struct hstate *h, int nid);
363struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
364 nodemask_t *nmask);
365struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
366 unsigned long address);
David Brazdil0f672f62019-12-10 10:32:29 +0000367struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
368 int nid, nodemask_t *nmask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000369int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
370 pgoff_t idx);
371
372/* arch callback */
373int __init __alloc_bootmem_huge_page(struct hstate *h);
374int __init alloc_bootmem_huge_page(struct hstate *h);
375
376void __init hugetlb_bad_size(void);
377void __init hugetlb_add_hstate(unsigned order);
378struct hstate *size_to_hstate(unsigned long size);
379
380#ifndef HUGE_MAX_HSTATE
381#define HUGE_MAX_HSTATE 1
382#endif
383
384extern struct hstate hstates[HUGE_MAX_HSTATE];
385extern unsigned int default_hstate_idx;
386
387#define default_hstate (hstates[default_hstate_idx])
388
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389static inline struct hstate *hstate_file(struct file *f)
390{
391 return hstate_inode(file_inode(f));
392}
393
394static inline struct hstate *hstate_sizelog(int page_size_log)
395{
396 if (!page_size_log)
397 return &default_hstate;
398
399 return size_to_hstate(1UL << page_size_log);
400}
401
402static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
403{
404 return hstate_file(vma->vm_file);
405}
406
407static inline unsigned long huge_page_size(struct hstate *h)
408{
409 return (unsigned long)PAGE_SIZE << h->order;
410}
411
412extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
413
414extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
415
416static inline unsigned long huge_page_mask(struct hstate *h)
417{
418 return h->mask;
419}
420
421static inline unsigned int huge_page_order(struct hstate *h)
422{
423 return h->order;
424}
425
426static inline unsigned huge_page_shift(struct hstate *h)
427{
428 return h->order + PAGE_SHIFT;
429}
430
431static inline bool hstate_is_gigantic(struct hstate *h)
432{
433 return huge_page_order(h) >= MAX_ORDER;
434}
435
436static inline unsigned int pages_per_huge_page(struct hstate *h)
437{
438 return 1 << h->order;
439}
440
441static inline unsigned int blocks_per_huge_page(struct hstate *h)
442{
443 return huge_page_size(h) / 512;
444}
445
446#include <asm/hugetlb.h>
447
448#ifndef arch_make_huge_pte
449static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
450 struct page *page, int writable)
451{
452 return entry;
453}
454#endif
455
456static inline struct hstate *page_hstate(struct page *page)
457{
458 VM_BUG_ON_PAGE(!PageHuge(page), page);
David Brazdil0f672f62019-12-10 10:32:29 +0000459 return size_to_hstate(page_size(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460}
461
462static inline unsigned hstate_index_to_shift(unsigned index)
463{
464 return hstates[index].order + PAGE_SHIFT;
465}
466
467static inline int hstate_index(struct hstate *h)
468{
469 return h - hstates;
470}
471
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472extern int dissolve_free_huge_page(struct page *page);
473extern int dissolve_free_huge_pages(unsigned long start_pfn,
474 unsigned long end_pfn);
David Brazdil0f672f62019-12-10 10:32:29 +0000475
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
David Brazdil0f672f62019-12-10 10:32:29 +0000477#ifndef arch_hugetlb_migration_supported
478static inline bool arch_hugetlb_migration_supported(struct hstate *h)
479{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 if ((huge_page_shift(h) == PMD_SHIFT) ||
David Brazdil0f672f62019-12-10 10:32:29 +0000481 (huge_page_shift(h) == PUD_SHIFT) ||
482 (huge_page_shift(h) == PGDIR_SHIFT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 return true;
484 else
485 return false;
David Brazdil0f672f62019-12-10 10:32:29 +0000486}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000488#else
489static inline bool arch_hugetlb_migration_supported(struct hstate *h)
490{
491 return false;
492}
493#endif
494
495static inline bool hugepage_migration_supported(struct hstate *h)
496{
497 return arch_hugetlb_migration_supported(h);
498}
499
500/*
501 * Movability check is different as compared to migration check.
502 * It determines whether or not a huge page should be placed on
503 * movable zone or not. Movability of any huge page should be
504 * required only if huge page size is supported for migration.
505 * There wont be any reason for the huge page to be movable if
506 * it is not migratable to start with. Also the size of the huge
507 * page should be large enough to be placed under a movable zone
508 * and still feasible enough to be migratable. Just the presence
509 * in movable zone does not make the migration feasible.
510 *
511 * So even though large huge page sizes like the gigantic ones
512 * are migratable they should not be movable because its not
513 * feasible to migrate them from movable zone.
514 */
515static inline bool hugepage_movable_supported(struct hstate *h)
516{
517 if (!hugepage_migration_supported(h))
518 return false;
519
520 if (hstate_is_gigantic(h))
521 return false;
522 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523}
524
525static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
526 struct mm_struct *mm, pte_t *pte)
527{
528 if (huge_page_size(h) == PMD_SIZE)
529 return pmd_lockptr(mm, (pmd_t *) pte);
530 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
531 return &mm->page_table_lock;
532}
533
534#ifndef hugepages_supported
535/*
536 * Some platform decide whether they support huge pages at boot
537 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
538 * when there is no such support
539 */
540#define hugepages_supported() (HPAGE_SHIFT != 0)
541#endif
542
543void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
544
Olivier Deprez0e641232021-09-23 10:07:05 +0200545static inline void hugetlb_count_init(struct mm_struct *mm)
546{
547 atomic_long_set(&mm->hugetlb_usage, 0);
548}
549
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550static inline void hugetlb_count_add(long l, struct mm_struct *mm)
551{
552 atomic_long_add(l, &mm->hugetlb_usage);
553}
554
555static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
556{
557 atomic_long_sub(l, &mm->hugetlb_usage);
558}
559
560#ifndef set_huge_swap_pte_at
561static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
562 pte_t *ptep, pte_t pte, unsigned long sz)
563{
564 set_huge_pte_at(mm, addr, ptep, pte);
565}
566#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000567
568#ifndef huge_ptep_modify_prot_start
569#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
570static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
571 unsigned long addr, pte_t *ptep)
572{
573 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
574}
575#endif
576
577#ifndef huge_ptep_modify_prot_commit
578#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
579static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
580 unsigned long addr, pte_t *ptep,
581 pte_t old_pte, pte_t pte)
582{
583 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
584}
585#endif
586
Olivier Deprez0e641232021-09-23 10:07:05 +0200587void set_page_huge_active(struct page *page);
588
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589#else /* CONFIG_HUGETLB_PAGE */
590struct hstate {};
David Brazdil0f672f62019-12-10 10:32:29 +0000591
592static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
593 unsigned long addr,
594 int avoid_reserve)
595{
596 return NULL;
597}
598
599static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
600{
601 return NULL;
602}
603
604static inline struct page *
605alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
606{
607 return NULL;
608}
609
610static inline struct page *alloc_huge_page_vma(struct hstate *h,
611 struct vm_area_struct *vma,
612 unsigned long address)
613{
614 return NULL;
615}
616
617static inline int __alloc_bootmem_huge_page(struct hstate *h)
618{
619 return 0;
620}
621
622static inline struct hstate *hstate_file(struct file *f)
623{
624 return NULL;
625}
626
627static inline struct hstate *hstate_sizelog(int page_size_log)
628{
629 return NULL;
630}
631
632static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
633{
634 return NULL;
635}
636
David Brazdil0f672f62019-12-10 10:32:29 +0000637static inline struct hstate *page_hstate(struct page *page)
638{
639 return NULL;
640}
641
642static inline unsigned long huge_page_size(struct hstate *h)
643{
644 return PAGE_SIZE;
645}
646
647static inline unsigned long huge_page_mask(struct hstate *h)
648{
649 return PAGE_MASK;
650}
651
652static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
653{
654 return PAGE_SIZE;
655}
656
657static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
658{
659 return PAGE_SIZE;
660}
661
662static inline unsigned int huge_page_order(struct hstate *h)
663{
664 return 0;
665}
666
667static inline unsigned int huge_page_shift(struct hstate *h)
668{
669 return PAGE_SHIFT;
670}
671
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000672static inline bool hstate_is_gigantic(struct hstate *h)
673{
674 return false;
675}
676
677static inline unsigned int pages_per_huge_page(struct hstate *h)
678{
679 return 1;
680}
681
682static inline unsigned hstate_index_to_shift(unsigned index)
683{
684 return 0;
685}
686
687static inline int hstate_index(struct hstate *h)
688{
689 return 0;
690}
691
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000692static inline int dissolve_free_huge_page(struct page *page)
693{
694 return 0;
695}
696
697static inline int dissolve_free_huge_pages(unsigned long start_pfn,
698 unsigned long end_pfn)
699{
700 return 0;
701}
702
703static inline bool hugepage_migration_supported(struct hstate *h)
704{
705 return false;
706}
707
David Brazdil0f672f62019-12-10 10:32:29 +0000708static inline bool hugepage_movable_supported(struct hstate *h)
709{
710 return false;
711}
712
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
714 struct mm_struct *mm, pte_t *pte)
715{
716 return &mm->page_table_lock;
717}
718
Olivier Deprez0e641232021-09-23 10:07:05 +0200719static inline void hugetlb_count_init(struct mm_struct *mm)
720{
721}
722
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
724{
725}
726
727static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
728{
729}
730
731static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
732 pte_t *ptep, pte_t pte, unsigned long sz)
733{
734}
735#endif /* CONFIG_HUGETLB_PAGE */
736
737static inline spinlock_t *huge_pte_lock(struct hstate *h,
738 struct mm_struct *mm, pte_t *pte)
739{
740 spinlock_t *ptl;
741
742 ptl = huge_pte_lockptr(h, mm, pte);
743 spin_lock(ptl);
744 return ptl;
745}
746
747#endif /* _LINUX_HUGETLB_H */