blob: 2d00cc53008336d0044f24a41d32c26d815a9148 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_HUGETLB_H
3#define _ASM_POWERPC_HUGETLB_H
4
5#ifdef CONFIG_HUGETLB_PAGE
6#include <asm/page.h>
7#include <asm-generic/hugetlb.h>
8
9extern struct kmem_cache *hugepte_cache;
10
11#ifdef CONFIG_PPC_BOOK3S_64
12
13#include <asm/book3s/64/hugetlb.h>
14/*
15 * This should work for other subarchs too. But right now we use the
16 * new format only for 64bit book3s
17 */
18static inline pte_t *hugepd_page(hugepd_t hpd)
19{
20 BUG_ON(!hugepd_ok(hpd));
21 /*
22 * We have only four bits to encode, MMU page size
23 */
24 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
25 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
26}
27
28static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
29{
30 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
31}
32
33static inline unsigned int hugepd_shift(hugepd_t hpd)
34{
35 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
36}
37static inline void flush_hugetlb_page(struct vm_area_struct *vma,
38 unsigned long vmaddr)
39{
40 if (radix_enabled())
41 return radix__flush_hugetlb_page(vma, vmaddr);
42}
43
44#else
45
46static inline pte_t *hugepd_page(hugepd_t hpd)
47{
48 BUG_ON(!hugepd_ok(hpd));
49#ifdef CONFIG_PPC_8xx
50 return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
51#else
52 return (pte_t *)((hpd_val(hpd) &
53 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
54#endif
55}
56
57static inline unsigned int hugepd_shift(hugepd_t hpd)
58{
59#ifdef CONFIG_PPC_8xx
60 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
61#else
62 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
63#endif
64}
65
66#endif /* CONFIG_PPC_BOOK3S_64 */
67
68
69static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
70 unsigned pdshift)
71{
72 /*
73 * On FSL BookE, we have multiple higher-level table entries that
74 * point to the same hugepte. Just use the first one since they're all
75 * identical. So for that case, idx=0.
76 */
77 unsigned long idx = 0;
78
79 pte_t *dir = hugepd_page(hpd);
80#ifndef CONFIG_PPC_FSL_BOOK3E
81 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
82#endif
83
84 return dir + idx;
85}
86
87void flush_dcache_icache_hugepage(struct page *page);
88
89int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
90 unsigned long len);
91
92static inline int is_hugepage_only_range(struct mm_struct *mm,
93 unsigned long addr,
94 unsigned long len)
95{
96 if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
97 return slice_is_hugepage_only_range(mm, addr, len);
98 return 0;
99}
100
101void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
102 pte_t pte);
103#ifdef CONFIG_PPC_8xx
104static inline void flush_hugetlb_page(struct vm_area_struct *vma,
105 unsigned long vmaddr)
106{
107 flush_tlb_page(vma, vmaddr);
108}
109#else
110void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
111#endif
112
113void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
114 unsigned long end, unsigned long floor,
115 unsigned long ceiling);
116
117/*
118 * If the arch doesn't supply something else, assume that hugepage
119 * size aligned regions are ok without further preparation.
120 */
121static inline int prepare_hugepage_range(struct file *file,
122 unsigned long addr, unsigned long len)
123{
124 struct hstate *h = hstate_file(file);
125 if (len & ~huge_page_mask(h))
126 return -EINVAL;
127 if (addr & ~huge_page_mask(h))
128 return -EINVAL;
129 return 0;
130}
131
132static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
133 pte_t *ptep, pte_t pte)
134{
135 set_pte_at(mm, addr, ptep, pte);
136}
137
138static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
139 unsigned long addr, pte_t *ptep)
140{
141#ifdef CONFIG_PPC64
142 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
143#else
144 return __pte(pte_update(ptep, ~0UL, 0));
145#endif
146}
147
148static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
149 unsigned long addr, pte_t *ptep)
150{
151 pte_t pte;
152 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
153 flush_hugetlb_page(vma, addr);
154}
155
156static inline int huge_pte_none(pte_t pte)
157{
158 return pte_none(pte);
159}
160
161static inline pte_t huge_pte_wrprotect(pte_t pte)
162{
163 return pte_wrprotect(pte);
164}
165
166extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
167 unsigned long addr, pte_t *ptep,
168 pte_t pte, int dirty);
169
170static inline pte_t huge_ptep_get(pte_t *ptep)
171{
172 return *ptep;
173}
174
175static inline void arch_clear_hugepage_flags(struct page *page)
176{
177}
178
179#else /* ! CONFIG_HUGETLB_PAGE */
180static inline void flush_hugetlb_page(struct vm_area_struct *vma,
181 unsigned long vmaddr)
182{
183}
184
185#define hugepd_shift(x) 0
186static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
187 unsigned pdshift)
188{
189 return NULL;
190}
191#endif /* CONFIG_HUGETLB_PAGE */
192
193#endif /* _ASM_POWERPC_HUGETLB_H */