blob: c8bb14ff47135d7913c2392f10ff5b96d2c2a19d [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002#ifndef _ASM_POWERPC_PAGE_H
3#define _ASM_POWERPC_PAGE_H
4
5/*
6 * Copyright (C) 2001,2005 IBM Corporation.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#ifndef __ASSEMBLY__
10#include <linux/types.h>
11#include <linux/kernel.h>
12#else
13#include <asm/types.h>
14#endif
15#include <asm/asm-const.h>
16
17/*
18 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
David Brazdil0f672f62019-12-10 10:32:29 +000019 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020 * page size. When using 64K pages however, whether we are really supporting
21 * 64K pages in HW or not is irrelevant to those definitions.
22 */
David Brazdil0f672f62019-12-10 10:32:29 +000023#define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25
26#ifndef __ASSEMBLY__
David Brazdil0f672f62019-12-10 10:32:29 +000027#ifndef CONFIG_HUGETLB_PAGE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028#define HPAGE_SHIFT PAGE_SHIFT
David Brazdil0f672f62019-12-10 10:32:29 +000029#elif defined(CONFIG_PPC_BOOK3S_64)
30extern unsigned int hpage_shift;
31#define HPAGE_SHIFT hpage_shift
32#elif defined(CONFIG_PPC_8xx)
33#define HPAGE_SHIFT 19 /* 512k pages */
34#elif defined(CONFIG_PPC_FSL_BOOK3E)
35#define HPAGE_SHIFT 22 /* 4M pages */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036#endif
37#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
38#define HPAGE_MASK (~(HPAGE_SIZE - 1))
39#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
41#endif
42
43/*
44 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
45 * assign PAGE_MASK to a larger type it gets extended the way we want
46 * (i.e. with 1s in the high bits)
47 */
48#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
49
50/*
51 * KERNELBASE is the virtual address of the start of the kernel, it's often
52 * the same as PAGE_OFFSET, but _might not be_.
53 *
54 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
55 *
56 * PAGE_OFFSET is the virtual address of the start of lowmem.
57 *
58 * PHYSICAL_START is the physical address of the start of the kernel.
59 *
60 * MEMORY_START is the physical address of the start of lowmem.
61 *
62 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
63 * ppc32 and based on how they are set we determine MEMORY_START.
64 *
65 * For the linear mapping the following equation should be true:
66 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
67 *
68 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
69 *
70 * There are two ways to determine a physical address from a virtual one:
71 * va = pa + PAGE_OFFSET - MEMORY_START
72 * va = pa + KERNELBASE - PHYSICAL_START
73 *
74 * If you want to know something's offset from the start of the kernel you
75 * should subtract KERNELBASE.
76 *
77 * If you want to test if something's a kernel address, use is_kernel_addr().
78 */
79
80#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
81#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
82#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
83
84#if defined(CONFIG_NONSTATIC_KERNEL)
85#ifndef __ASSEMBLY__
86
87extern phys_addr_t memstart_addr;
88extern phys_addr_t kernstart_addr;
89
90#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
91extern long long virt_phys_offset;
92#endif
93
94#endif /* __ASSEMBLY__ */
95#define PHYSICAL_START kernstart_addr
96
97#else /* !CONFIG_NONSTATIC_KERNEL */
98#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
99#endif
100
101/* See Description below for VIRT_PHYS_OFFSET */
102#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
103#ifdef CONFIG_RELOCATABLE
104#define VIRT_PHYS_OFFSET virt_phys_offset
105#else
106#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
107#endif
108#endif
109
110#ifdef CONFIG_PPC64
111#define MEMORY_START 0UL
112#elif defined(CONFIG_NONSTATIC_KERNEL)
113#define MEMORY_START memstart_addr
114#else
115#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
116#endif
117
118#ifdef CONFIG_FLATMEM
119#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
120#ifndef __ASSEMBLY__
121extern unsigned long max_mapnr;
122static inline bool pfn_valid(unsigned long pfn)
123{
124 unsigned long min_pfn = ARCH_PFN_OFFSET;
125
126 return pfn >= min_pfn && pfn < max_mapnr;
127}
128#endif
129#endif
130
131#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
132#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
133#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
134
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136
137/*
138 * On Book-E parts we need __va to parse the device tree and we can't
139 * determine MEMORY_START until then. However we can determine PHYSICAL_START
140 * from information at hand (program counter, TLB lookup).
141 *
142 * On BookE with RELOCATABLE && PPC32
143 *
144 * With RELOCATABLE && PPC32, we support loading the kernel at any physical
145 * address without any restriction on the page alignment.
146 *
147 * We find the runtime address of _stext and relocate ourselves based on
148 * the following calculation:
149 *
150 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
151 * MODULO(_stext.run,256M)
152 * and create the following mapping:
153 *
154 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
155 *
156 * When we process relocations, we cannot depend on the
157 * existing equation for the __va()/__pa() translations:
158 *
159 * __va(x) = (x) - PHYSICAL_START + KERNELBASE
160 *
161 * Where:
162 * PHYSICAL_START = kernstart_addr = Physical address of _stext
163 * KERNELBASE = Compiled virtual address of _stext.
164 *
165 * This formula holds true iff, kernel load address is TLB page aligned.
166 *
167 * In our case, we need to also account for the shift in the kernel Virtual
168 * address.
169 *
170 * E.g.,
171 *
172 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
173 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
174 *
175 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
176 * = 0xbc100000 , which is wrong.
177 *
178 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
179 * according to our mapping.
180 *
181 * Hence we use the following formula to get the translations right:
182 *
183 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
184 *
185 * Where :
186 * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
187 * Effective KERNELBASE = virtual_base =
188 * = ALIGN_DOWN(KERNELBASE,256M) +
189 * MODULO(PHYSICAL_START,256M)
190 *
191 * To make the cost of __va() / __pa() more light weight, we introduce
192 * a new variable virt_phys_offset, which will hold :
193 *
194 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
195 * = ALIGN_DOWN(KERNELBASE,256M) -
196 * ALIGN_DOWN(PHYSICALSTART,256M)
197 *
198 * Hence :
199 *
200 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
201 * = x + virt_phys_offset
202 *
203 * and
204 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
205 * = x - virt_phys_offset
206 *
207 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
208 * the other definitions for __va & __pa.
209 */
210#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213#else
214#ifdef CONFIG_PPC64
215/*
216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
David Brazdil0f672f62019-12-10 10:32:29 +0000218 * This also results in better code generation.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000219 */
David Brazdil0f672f62019-12-10 10:32:29 +0000220#define __va(x) \
221({ \
222 VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
223 (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
224})
225
226#define __pa(x) \
227({ \
228 VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
229 (unsigned long)(x) & 0x0fffffffffffffffUL; \
230})
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231
232#else /* 32-bit, non book E */
233#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
234#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
235#endif
236#endif
237
238/*
239 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
240 * and needs to be executable. This means the whole heap ends
241 * up being executable.
242 */
243#define VM_DATA_DEFAULT_FLAGS32 \
244 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
245 VM_READ | VM_WRITE | \
246 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
247
248#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
249 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
250
251#ifdef __powerpc64__
252#include <asm/page_64.h>
253#else
254#include <asm/page_32.h>
255#endif
256
257/* align addr on a size boundary - adjust address up/down if needed */
258#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
259#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
260
261/* align addr on a size boundary - adjust address up if needed */
262#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
263
264/*
265 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
266 * "kernelness", use is_kernel_addr() - it should do what you want.
267 */
268#ifdef CONFIG_PPC_BOOK3E_64
269#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
270#else
271#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
272#endif
273
274#ifndef CONFIG_PPC_BOOK3S_64
275/*
276 * Use the top bit of the higher-level page table entries to indicate whether
277 * the entries we point to contain hugepages. This works because we know that
278 * the page tables live in kernel space. If we ever decide to support having
279 * page tables at arbitrary addresses, this breaks and will have to change.
280 */
281#ifdef CONFIG_PPC64
David Brazdil0f672f62019-12-10 10:32:29 +0000282#define PD_HUGE 0x8000000000000000UL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283#else
284#define PD_HUGE 0x80000000
285#endif
286
287#else /* CONFIG_PPC_BOOK3S_64 */
288/*
289 * Book3S 64 stores real addresses in the hugepd entries to
290 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
291 */
292#define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
293#endif /* CONFIG_PPC_BOOK3S_64 */
294
295/*
296 * Some number of bits at the level of the page table that points to
297 * a hugepte are used to encode the size. This masks those bits.
298 */
299#define HUGEPD_SHIFT_MASK 0x3f
300
301#ifndef __ASSEMBLY__
302
303#ifdef CONFIG_PPC_BOOK3S_64
304#include <asm/pgtable-be-types.h>
305#else
306#include <asm/pgtable-types.h>
307#endif
308
309
310#ifndef CONFIG_HUGETLB_PAGE
311#define is_hugepd(pdep) (0)
312#define pgd_huge(pgd) (0)
313#endif /* CONFIG_HUGETLB_PAGE */
314
315struct page;
316extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
317extern void copy_user_page(void *to, void *from, unsigned long vaddr,
318 struct page *p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000319extern int devmem_is_allowed(unsigned long pfn);
320
321#ifdef CONFIG_PPC_SMLPAR
322void arch_free_page(struct page *page, int order);
323#define HAVE_ARCH_FREE_PAGE
324#endif
325
326struct vm_area_struct;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327
328#include <asm-generic/memory_model.h>
329#endif /* __ASSEMBLY__ */
330#include <asm/slice.h>
331
David Brazdil0f672f62019-12-10 10:32:29 +0000332/*
333 * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
334 */
335#ifdef CONFIG_PPC32
336#define ARCH_ZONE_DMA_BITS 30
337#else
338#define ARCH_ZONE_DMA_BITS 31
339#endif
340
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341#endif /* _ASM_POWERPC_PAGE_H */