blob: bf1bf8c7c332b9328d17fdf2009a8f95284f056c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
7 */
8
9#ifndef __ASM_HUGETLB_H
10#define __ASM_HUGETLB_H
11
12#include <asm/page.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len)
17{
18 return 0;
19}
20
David Brazdil0f672f62019-12-10 10:32:29 +000021#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022static inline int prepare_hugepage_range(struct file *file,
23 unsigned long addr,
24 unsigned long len)
25{
26 unsigned long task_size = STACK_TOP;
27 struct hstate *h = hstate_file(file);
28
29 if (len & ~huge_page_mask(h))
30 return -EINVAL;
31 if (addr & ~huge_page_mask(h))
32 return -EINVAL;
33 if (len > task_size)
34 return -ENOMEM;
35 if (task_size - len < addr)
36 return -EINVAL;
37 return 0;
38}
39
David Brazdil0f672f62019-12-10 10:32:29 +000040#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
42 unsigned long addr, pte_t *ptep)
43{
44 pte_t clear;
45 pte_t pte = *ptep;
46
47 pte_val(clear) = (unsigned long)invalid_pte_table;
48 set_pte_at(mm, addr, ptep, clear);
49 return pte;
50}
51
David Brazdil0f672f62019-12-10 10:32:29 +000052#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
54 unsigned long addr, pte_t *ptep)
55{
Olivier Deprez0e641232021-09-23 10:07:05 +020056 /*
57 * clear the huge pte entry firstly, so that the other smp threads will
58 * not get old pte entry after finishing flush_tlb_page and before
59 * setting new huge pte entry
60 */
61 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
62 flush_tlb_page(vma, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063}
64
David Brazdil0f672f62019-12-10 10:32:29 +000065#define __HAVE_ARCH_HUGE_PTE_NONE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066static inline int huge_pte_none(pte_t pte)
67{
68 unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
69 return !val || (val == (unsigned long)invalid_pte_table);
70}
71
David Brazdil0f672f62019-12-10 10:32:29 +000072#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
74 unsigned long addr,
75 pte_t *ptep, pte_t pte,
76 int dirty)
77{
78 int changed = !pte_same(*ptep, pte);
79
80 if (changed) {
81 set_pte_at(vma->vm_mm, addr, ptep, pte);
82 /*
83 * There could be some standard sized pages in there,
84 * get them all.
85 */
86 flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
87 }
88 return changed;
89}
90
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000091static inline void arch_clear_hugepage_flags(struct page *page)
92{
93}
94
David Brazdil0f672f62019-12-10 10:32:29 +000095#include <asm-generic/hugetlb.h>
96
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097#endif /* __ASM_HUGETLB_H */