blob: 255a1837e9f7fb50514c0e3e69fd5f16a47a87ad [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_MMU_H_
3#define _ASM_POWERPC_MMU_H_
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7
8#include <asm/asm-const.h>
9
10/*
11 * MMU features bit definitions
12 */
13
14/*
15 * MMU families
16 */
17#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
18#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
23
24/* Radix page table supported and enabled */
25#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040)
26
27/*
28 * Individual features below.
29 */
30
31/*
Olivier Deprez157378f2022-04-04 15:47:50 +020032 * Support for KUEP feature.
33 */
34#define MMU_FTR_KUEP ASM_CONST(0x00000400)
35
36/*
37 * Support for memory protection keys.
38 */
39#define MMU_FTR_PKEY ASM_CONST(0x00000800)
40
41/* Guest Translation Shootdown Enable */
42#define MMU_FTR_GTSE ASM_CONST(0x00001000)
43
44/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 * Support for 68 bit VA space. We added that from ISA 2.05
46 */
47#define MMU_FTR_68_BIT_VA ASM_CONST(0x00002000)
48/*
49 * Kernel read only support.
50 * We added the ppp value 0b110 in ISA 2.04.
51 */
52#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
53
54/*
55 * We need to clear top 16bits of va (from the remaining 64 bits )in
56 * tlbie* instructions
57 */
58#define MMU_FTR_TLBIE_CROP_VA ASM_CONST(0x00008000)
59
60/* Enable use of high BAT registers */
61#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
62
63/* Enable >32-bit physical addresses on 32-bit processor, only used
David Brazdil0f672f62019-12-10 10:32:29 +000064 * by CONFIG_PPC_BOOK3S_32 currently as BookE supports that from day 1
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065 */
66#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
67
68/* Enable use of broadcast TLB invalidations. We don't always set it
69 * on processors that support it due to other constraints with the
70 * use of such invalidations
71 */
72#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
73
74/* Enable use of tlbilx invalidate instructions.
75 */
76#define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000)
77
78/* This indicates that the processor cannot handle multiple outstanding
79 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
80 * around such invalidate forms.
81 */
82#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
83
84/* This indicates that the processor doesn't handle way selection
85 * properly and needs SW to track and update the LRU state. This
86 * is specific to an errata on e300c2/c3/c4 class parts
87 */
88#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
89
90/* Enable use of TLB reservation. Processor should support tlbsrx.
91 * instruction and MAS0[WQ].
92 */
93#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
94
95/* Use paired MAS registers (MAS7||MAS3, etc.)
96 */
97#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
98
99/* Doesn't support the B bit (1T segment) in SLBIE
100 */
101#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
102
103/* Support 16M large pages
104 */
105#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000)
106
107/* Supports TLBIEL variant
108 */
109#define MMU_FTR_TLBIEL ASM_CONST(0x08000000)
110
111/* Supports tlbies w/o locking
112 */
113#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000)
114
115/* Large pages can be marked CI
116 */
117#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000)
118
119/* 1T segments available
120 */
121#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
122
David Brazdil0f672f62019-12-10 10:32:29 +0000123/*
124 * Supports KUAP (key 0 controlling userspace addresses) on radix
125 */
126#define MMU_FTR_RADIX_KUAP ASM_CONST(0x80000000)
127
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128/* MMU feature bit sets for various CPUs */
129#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
130 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
131#define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2
132#define MMU_FTRS_PPC970 MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA
133#define MMU_FTRS_POWER5 MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE
134#define MMU_FTRS_POWER6 MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA
135#define MMU_FTRS_POWER7 MMU_FTRS_POWER6
136#define MMU_FTRS_POWER8 MMU_FTRS_POWER6
137#define MMU_FTRS_POWER9 MMU_FTRS_POWER6
Olivier Deprez157378f2022-04-04 15:47:50 +0200138#define MMU_FTRS_POWER10 MMU_FTRS_POWER6
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
140 MMU_FTR_CI_LARGE_PAGE
141#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
142 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
143#ifndef __ASSEMBLY__
144#include <linux/bug.h>
145#include <asm/cputable.h>
David Brazdil0f672f62019-12-10 10:32:29 +0000146#include <asm/page.h>
147
148typedef pte_t *pgtable_t;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149
150#ifdef CONFIG_PPC_FSL_BOOK3E
151#include <asm/percpu.h>
152DECLARE_PER_CPU(int, next_tlbcam_idx);
153#endif
154
155enum {
David Brazdil0f672f62019-12-10 10:32:29 +0000156 MMU_FTRS_POSSIBLE =
157#ifdef CONFIG_PPC_BOOK3S
158 MMU_FTR_HPTE_TABLE |
159#endif
160#ifdef CONFIG_PPC_8xx
161 MMU_FTR_TYPE_8xx |
162#endif
163#ifdef CONFIG_40x
164 MMU_FTR_TYPE_40x |
165#endif
166#ifdef CONFIG_44x
167 MMU_FTR_TYPE_44x |
168#endif
169#if defined(CONFIG_E200) || defined(CONFIG_E500)
170 MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
171#endif
172#ifdef CONFIG_PPC_47x
173 MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
174#endif
175#ifdef CONFIG_PPC_BOOK3S_32
176 MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU |
177#endif
178#ifdef CONFIG_PPC_BOOK3E_64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179 MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
David Brazdil0f672f62019-12-10 10:32:29 +0000180#endif
181#ifdef CONFIG_PPC_BOOK3S_64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
183 MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
184 MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
185 MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
David Brazdil0f672f62019-12-10 10:32:29 +0000186#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187#ifdef CONFIG_PPC_RADIX_MMU
188 MMU_FTR_TYPE_RADIX |
Olivier Deprez157378f2022-04-04 15:47:50 +0200189 MMU_FTR_GTSE |
David Brazdil0f672f62019-12-10 10:32:29 +0000190#ifdef CONFIG_PPC_KUAP
191 MMU_FTR_RADIX_KUAP |
192#endif /* CONFIG_PPC_KUAP */
193#endif /* CONFIG_PPC_RADIX_MMU */
Olivier Deprez157378f2022-04-04 15:47:50 +0200194#ifdef CONFIG_PPC_MEM_KEYS
195 MMU_FTR_PKEY |
196#endif
197#ifdef CONFIG_PPC_KUEP
198 MMU_FTR_KUEP |
199#endif /* CONFIG_PPC_KUAP */
200
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201 0,
202};
203
204static inline bool early_mmu_has_feature(unsigned long feature)
205{
206 return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
207}
208
209#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
210#include <linux/jump_label.h>
211
212#define NUM_MMU_FTR_KEYS 32
213
214extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
215
216extern void mmu_feature_keys_init(void);
217
218static __always_inline bool mmu_has_feature(unsigned long feature)
219{
220 int i;
221
222#ifndef __clang__ /* clang can't cope with this */
223 BUILD_BUG_ON(!__builtin_constant_p(feature));
224#endif
225
226#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
227 if (!static_key_initialized) {
228 printk("Warning! mmu_has_feature() used prior to jump label init!\n");
229 dump_stack();
230 return early_mmu_has_feature(feature);
231 }
232#endif
233
234 if (!(MMU_FTRS_POSSIBLE & feature))
235 return false;
236
237 i = __builtin_ctzl(feature);
238 return static_branch_likely(&mmu_feature_keys[i]);
239}
240
241static inline void mmu_clear_feature(unsigned long feature)
242{
243 int i;
244
245 i = __builtin_ctzl(feature);
246 cur_cpu_spec->mmu_features &= ~feature;
247 static_branch_disable(&mmu_feature_keys[i]);
248}
249#else
250
251static inline void mmu_feature_keys_init(void)
252{
253
254}
255
256static inline bool mmu_has_feature(unsigned long feature)
257{
258 return early_mmu_has_feature(feature);
259}
260
261static inline void mmu_clear_feature(unsigned long feature)
262{
263 cur_cpu_spec->mmu_features &= ~feature;
264}
265#endif /* CONFIG_JUMP_LABEL */
266
267extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
268
269#ifdef CONFIG_PPC64
270/* This is our real memory area size on ppc64 server, on embedded, we
271 * make it match the size our of bolted TLB area
272 */
273extern u64 ppc64_rma_size;
274
275/* Cleanup function used by kexec */
276extern void mmu_cleanup_all(void);
277extern void radix__mmu_cleanup_all(void);
278
279/* Functions for creating and updating partition table on POWER9 */
280extern void mmu_partition_table_init(void);
281extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
David Brazdil0f672f62019-12-10 10:32:29 +0000282 unsigned long dw1, bool flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283#endif /* CONFIG_PPC64 */
284
285struct mm_struct;
286#ifdef CONFIG_DEBUG_VM
287extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
288#else /* CONFIG_DEBUG_VM */
289static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
290{
291}
292#endif /* !CONFIG_DEBUG_VM */
293
294#ifdef CONFIG_PPC_RADIX_MMU
295static inline bool radix_enabled(void)
296{
297 return mmu_has_feature(MMU_FTR_TYPE_RADIX);
298}
299
300static inline bool early_radix_enabled(void)
301{
302 return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
303}
304#else
305static inline bool radix_enabled(void)
306{
307 return false;
308}
309
310static inline bool early_radix_enabled(void)
311{
312 return false;
313}
314#endif
315
David Brazdil0f672f62019-12-10 10:32:29 +0000316#ifdef CONFIG_STRICT_KERNEL_RWX
317static inline bool strict_kernel_rwx_enabled(void)
318{
319 return rodata_enabled;
320}
321#else
322static inline bool strict_kernel_rwx_enabled(void)
323{
324 return false;
325}
326#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327#endif /* !__ASSEMBLY__ */
328
329/* The kernel use the constants below to index in the page sizes array.
330 * The use of fixed constants for this purpose is better for performances
331 * of the low level hash refill handlers.
332 *
333 * A non supported page size has a "shift" field set to 0
334 *
335 * Any new page size being implemented can get a new entry in here. Whether
336 * the kernel will use it or not is a different matter though. The actual page
337 * size used by hugetlbfs is not defined here and may be made variable
338 *
339 * Note: This array ended up being a false good idea as it's growing to the
340 * point where I wonder if we should replace it with something different,
341 * to think about, feedback welcome. --BenH.
342 */
343
344/* These are #defines as they have to be used in assembly */
345#define MMU_PAGE_4K 0
346#define MMU_PAGE_16K 1
347#define MMU_PAGE_64K 2
348#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
349#define MMU_PAGE_256K 4
350#define MMU_PAGE_512K 5
351#define MMU_PAGE_1M 6
352#define MMU_PAGE_2M 7
353#define MMU_PAGE_4M 8
354#define MMU_PAGE_8M 9
355#define MMU_PAGE_16M 10
356#define MMU_PAGE_64M 11
357#define MMU_PAGE_256M 12
358#define MMU_PAGE_1G 13
359#define MMU_PAGE_16G 14
360#define MMU_PAGE_64G 15
361
362/*
363 * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16
364 * Also we need to change he type of mm_context.low/high_slices_psize.
365 */
366#define MMU_PAGE_COUNT 16
367
368#ifdef CONFIG_PPC_BOOK3S_64
369#include <asm/book3s/64/mmu.h>
370#else /* CONFIG_PPC_BOOK3S_64 */
371
372#ifndef __ASSEMBLY__
373/* MMU initialization */
374extern void early_init_mmu(void);
375extern void early_init_mmu_secondary(void);
376extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
377 phys_addr_t first_memblock_size);
378static inline void mmu_early_init_devtree(void) { }
David Brazdil0f672f62019-12-10 10:32:29 +0000379
Olivier Deprez157378f2022-04-04 15:47:50 +0200380static inline void pkey_early_init_devtree(void) {}
381
David Brazdil0f672f62019-12-10 10:32:29 +0000382extern void *abatron_pteptrs[2];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383#endif /* __ASSEMBLY__ */
384#endif
385
David Brazdil0f672f62019-12-10 10:32:29 +0000386#if defined(CONFIG_PPC_BOOK3S_32)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000387/* 32-bit classic hash table MMU */
388#include <asm/book3s/32/mmu-hash.h>
David Brazdil0f672f62019-12-10 10:32:29 +0000389#elif defined(CONFIG_PPC_MMU_NOHASH)
390#include <asm/nohash/mmu.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391#endif
392
393#endif /* __KERNEL__ */
394#endif /* _ASM_POWERPC_MMU_H_ */