Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle |
| 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ |
| 9 | #ifndef _ASM_PAGE_H |
| 10 | #define _ASM_PAGE_H |
| 11 | |
| 12 | #include <spaces.h> |
| 13 | #include <linux/const.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <asm/mipsregs.h> |
| 16 | |
| 17 | /* |
| 18 | * PAGE_SHIFT determines the page size |
| 19 | */ |
| 20 | #ifdef CONFIG_PAGE_SIZE_4KB |
| 21 | #define PAGE_SHIFT 12 |
| 22 | #endif |
| 23 | #ifdef CONFIG_PAGE_SIZE_8KB |
| 24 | #define PAGE_SHIFT 13 |
| 25 | #endif |
| 26 | #ifdef CONFIG_PAGE_SIZE_16KB |
| 27 | #define PAGE_SHIFT 14 |
| 28 | #endif |
| 29 | #ifdef CONFIG_PAGE_SIZE_32KB |
| 30 | #define PAGE_SHIFT 15 |
| 31 | #endif |
| 32 | #ifdef CONFIG_PAGE_SIZE_64KB |
| 33 | #define PAGE_SHIFT 16 |
| 34 | #endif |
| 35 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
| 36 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
| 37 | |
| 38 | /* |
| 39 | * This is used for calculating the real page sizes |
| 40 | * for FTLB or VTLB + FTLB configurations. |
| 41 | */ |
| 42 | static inline unsigned int page_size_ftlb(unsigned int mmuextdef) |
| 43 | { |
| 44 | switch (mmuextdef) { |
| 45 | case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: |
| 46 | if (PAGE_SIZE == (1 << 30)) |
| 47 | return 5; |
| 48 | if (PAGE_SIZE == (1llu << 32)) |
| 49 | return 6; |
| 50 | if (PAGE_SIZE > (256 << 10)) |
| 51 | return 7; /* reserved */ |
| 52 | /* fall through */ |
| 53 | case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT: |
| 54 | return (PAGE_SHIFT - 10) / 2; |
| 55 | default: |
| 56 | panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n", |
| 57 | mmuextdef >> 14); |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
| 62 | #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) |
| 63 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
| 64 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
| 65 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 66 | #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
| 67 | #define HPAGE_SHIFT ({BUILD_BUG(); 0; }) |
| 68 | #define HPAGE_SIZE ({BUILD_BUG(); 0; }) |
| 69 | #define HPAGE_MASK ({BUILD_BUG(); 0; }) |
| 70 | #define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; }) |
| 71 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
| 72 | |
| 73 | #include <linux/pfn.h> |
| 74 | |
| 75 | extern void build_clear_page(void); |
| 76 | extern void build_copy_page(void); |
| 77 | |
| 78 | /* |
| 79 | * It's normally defined only for FLATMEM config but it's |
| 80 | * used in our early mem init code for all memory models. |
| 81 | * So always define it. |
| 82 | */ |
| 83 | #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET |
| 84 | extern unsigned long ARCH_PFN_OFFSET; |
| 85 | # define ARCH_PFN_OFFSET ARCH_PFN_OFFSET |
| 86 | #else |
| 87 | # define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) |
| 88 | #endif |
| 89 | |
| 90 | extern void clear_page(void * page); |
| 91 | extern void copy_page(void * to, void * from); |
| 92 | |
| 93 | extern unsigned long shm_align_mask; |
| 94 | |
| 95 | static inline unsigned long pages_do_alias(unsigned long addr1, |
| 96 | unsigned long addr2) |
| 97 | { |
| 98 | return (addr1 ^ addr2) & shm_align_mask; |
| 99 | } |
| 100 | |
| 101 | struct page; |
| 102 | |
| 103 | static inline void clear_user_page(void *addr, unsigned long vaddr, |
| 104 | struct page *page) |
| 105 | { |
| 106 | extern void (*flush_data_cache_page)(unsigned long addr); |
| 107 | |
| 108 | clear_page(addr); |
| 109 | if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) |
| 110 | flush_data_cache_page((unsigned long)addr); |
| 111 | } |
| 112 | |
| 113 | struct vm_area_struct; |
| 114 | extern void copy_user_highpage(struct page *to, struct page *from, |
| 115 | unsigned long vaddr, struct vm_area_struct *vma); |
| 116 | |
| 117 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 118 | |
| 119 | /* |
| 120 | * These are used to make use of C type-checking.. |
| 121 | */ |
| 122 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 123 | #ifdef CONFIG_CPU_MIPS32 |
| 124 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
| 125 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) |
| 126 | #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) |
| 127 | #else |
| 128 | typedef struct { unsigned long long pte; } pte_t; |
| 129 | #define pte_val(x) ((x).pte) |
| 130 | #define __pte(x) ((pte_t) { (x) } ) |
| 131 | #endif |
| 132 | #else |
| 133 | typedef struct { unsigned long pte; } pte_t; |
| 134 | #define pte_val(x) ((x).pte) |
| 135 | #define __pte(x) ((pte_t) { (x) } ) |
| 136 | #endif |
| 137 | typedef struct page *pgtable_t; |
| 138 | |
| 139 | /* |
| 140 | * Right now we don't support 4-level pagetables, so all pud-related |
| 141 | * definitions come from <asm-generic/pgtable-nopud.h>. |
| 142 | */ |
| 143 | |
| 144 | /* |
| 145 | * Finall the top of the hierarchy, the pgd |
| 146 | */ |
| 147 | typedef struct { unsigned long pgd; } pgd_t; |
| 148 | #define pgd_val(x) ((x).pgd) |
| 149 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 150 | |
| 151 | /* |
| 152 | * Manipulate page protection bits |
| 153 | */ |
| 154 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 155 | #define pgprot_val(x) ((x).pgprot) |
| 156 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 157 | #define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | |
| 159 | /* |
| 160 | * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd |
| 161 | * pair of pages we only have a single global bit per pair of pages. When |
| 162 | * writing to the TLB make sure we always have the bit set for both pages |
| 163 | * or none. This macro is used to access the `buddy' of the pte we're just |
| 164 | * working on. |
| 165 | */ |
| 166 | #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) |
| 167 | |
| 168 | /* |
| 169 | * __pa()/__va() should be used only during mem init. |
| 170 | */ |
| 171 | static inline unsigned long ___pa(unsigned long x) |
| 172 | { |
| 173 | if (IS_ENABLED(CONFIG_64BIT)) { |
| 174 | /* |
| 175 | * For MIPS64 the virtual address may either be in one of |
| 176 | * the compatibility segements ckseg0 or ckseg1, or it may |
| 177 | * be in xkphys. |
| 178 | */ |
| 179 | return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); |
| 180 | } |
| 181 | |
| 182 | if (!IS_ENABLED(CONFIG_EVA)) { |
| 183 | /* |
| 184 | * We're using the standard MIPS32 legacy memory map, ie. |
| 185 | * the address x is going to be in kseg0 or kseg1. We can |
| 186 | * handle either case by masking out the desired bits using |
| 187 | * CPHYSADDR. |
| 188 | */ |
| 189 | return CPHYSADDR(x); |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * EVA is in use so the memory map could be anything, making it not |
| 194 | * safe to just mask out bits. |
| 195 | */ |
| 196 | return x - PAGE_OFFSET + PHYS_OFFSET; |
| 197 | } |
| 198 | #define __pa(x) ___pa((unsigned long)(x)) |
| 199 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
| 200 | #include <asm/io.h> |
| 201 | |
| 202 | /* |
| 203 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad |
| 204 | * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The |
| 205 | * discussion can be found in lkml posting |
| 206 | * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is |
| 207 | * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html |
| 208 | * |
| 209 | * It is unclear if the misscompilations mentioned in |
| 210 | * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one |
| 211 | * until GCC 3.x has been retired before we can apply |
| 212 | * https://patchwork.linux-mips.org/patch/1541/ |
| 213 | */ |
| 214 | |
| 215 | #ifndef __pa_symbol |
| 216 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
| 217 | #endif |
| 218 | |
| 219 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 220 | |
| 221 | #ifdef CONFIG_FLATMEM |
| 222 | |
| 223 | static inline int pfn_valid(unsigned long pfn) |
| 224 | { |
| 225 | /* avoid <linux/mm.h> include hell */ |
| 226 | extern unsigned long max_mapnr; |
| 227 | unsigned long pfn_offset = ARCH_PFN_OFFSET; |
| 228 | |
| 229 | return pfn >= pfn_offset && pfn < max_mapnr; |
| 230 | } |
| 231 | |
| 232 | #elif defined(CONFIG_SPARSEMEM) |
| 233 | |
| 234 | /* pfn_valid is defined in linux/mmzone.h */ |
| 235 | |
| 236 | #elif defined(CONFIG_NEED_MULTIPLE_NODES) |
| 237 | |
| 238 | #define pfn_valid(pfn) \ |
| 239 | ({ \ |
| 240 | unsigned long __pfn = (pfn); \ |
| 241 | int __n = pfn_to_nid(__pfn); \ |
| 242 | ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \ |
| 243 | NODE_DATA(__n)->node_spanned_pages) \ |
| 244 | : 0); \ |
| 245 | }) |
| 246 | |
| 247 | #endif |
| 248 | |
| 249 | #define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr))) |
| 250 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
| 251 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 252 | extern bool __virt_addr_valid(const volatile void *kaddr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | #define virt_addr_valid(kaddr) \ |
| 254 | __virt_addr_valid((const volatile void *) (kaddr)) |
| 255 | |
| 256 | #define VM_DATA_DEFAULT_FLAGS \ |
| 257 | (VM_READ | VM_WRITE | \ |
| 258 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ |
| 259 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 260 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 261 | #include <asm-generic/memory_model.h> |
| 262 | #include <asm-generic/getorder.h> |
| 263 | |
| 264 | #endif /* _ASM_PAGE_H */ |