Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * This file contains ioremap and related functions for 64-bit machines. |
| 3 | * |
| 4 | * Derived from arch/ppc64/mm/init.c |
| 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 6 | * |
| 7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) |
| 8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 9 | * Copyright (C) 1996 Paul Mackerras |
| 10 | * |
| 11 | * Derived from "arch/i386/mm/init.c" |
| 12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 13 | * |
| 14 | * Dave Engebretsen <engebret@us.ibm.com> |
| 15 | * Rework for PPC64 port. |
| 16 | * |
| 17 | * This program is free software; you can redistribute it and/or |
| 18 | * modify it under the terms of the GNU General Public License |
| 19 | * as published by the Free Software Foundation; either version |
| 20 | * 2 of the License, or (at your option) any later version. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <linux/signal.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/string.h> |
| 29 | #include <linux/export.h> |
| 30 | #include <linux/types.h> |
| 31 | #include <linux/mman.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/swap.h> |
| 34 | #include <linux/stddef.h> |
| 35 | #include <linux/vmalloc.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/hugetlb.h> |
| 38 | |
| 39 | #include <asm/pgalloc.h> |
| 40 | #include <asm/page.h> |
| 41 | #include <asm/prom.h> |
| 42 | #include <asm/io.h> |
| 43 | #include <asm/mmu_context.h> |
| 44 | #include <asm/pgtable.h> |
| 45 | #include <asm/mmu.h> |
| 46 | #include <asm/smp.h> |
| 47 | #include <asm/machdep.h> |
| 48 | #include <asm/tlb.h> |
| 49 | #include <asm/processor.h> |
| 50 | #include <asm/cputable.h> |
| 51 | #include <asm/sections.h> |
| 52 | #include <asm/firmware.h> |
| 53 | #include <asm/dma.h> |
| 54 | |
| 55 | #include "mmu_decl.h" |
| 56 | |
| 57 | |
| 58 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 59 | /* |
| 60 | * partition table and process table for ISA 3.0 |
| 61 | */ |
| 62 | struct prtb_entry *process_tb; |
| 63 | struct patb_entry *partition_tb; |
| 64 | /* |
| 65 | * page table size |
| 66 | */ |
| 67 | unsigned long __pte_index_size; |
| 68 | EXPORT_SYMBOL(__pte_index_size); |
| 69 | unsigned long __pmd_index_size; |
| 70 | EXPORT_SYMBOL(__pmd_index_size); |
| 71 | unsigned long __pud_index_size; |
| 72 | EXPORT_SYMBOL(__pud_index_size); |
| 73 | unsigned long __pgd_index_size; |
| 74 | EXPORT_SYMBOL(__pgd_index_size); |
| 75 | unsigned long __pud_cache_index; |
| 76 | EXPORT_SYMBOL(__pud_cache_index); |
| 77 | unsigned long __pte_table_size; |
| 78 | EXPORT_SYMBOL(__pte_table_size); |
| 79 | unsigned long __pmd_table_size; |
| 80 | EXPORT_SYMBOL(__pmd_table_size); |
| 81 | unsigned long __pud_table_size; |
| 82 | EXPORT_SYMBOL(__pud_table_size); |
| 83 | unsigned long __pgd_table_size; |
| 84 | EXPORT_SYMBOL(__pgd_table_size); |
| 85 | unsigned long __pmd_val_bits; |
| 86 | EXPORT_SYMBOL(__pmd_val_bits); |
| 87 | unsigned long __pud_val_bits; |
| 88 | EXPORT_SYMBOL(__pud_val_bits); |
| 89 | unsigned long __pgd_val_bits; |
| 90 | EXPORT_SYMBOL(__pgd_val_bits); |
| 91 | unsigned long __kernel_virt_start; |
| 92 | EXPORT_SYMBOL(__kernel_virt_start); |
| 93 | unsigned long __kernel_virt_size; |
| 94 | EXPORT_SYMBOL(__kernel_virt_size); |
| 95 | unsigned long __vmalloc_start; |
| 96 | EXPORT_SYMBOL(__vmalloc_start); |
| 97 | unsigned long __vmalloc_end; |
| 98 | EXPORT_SYMBOL(__vmalloc_end); |
| 99 | unsigned long __kernel_io_start; |
| 100 | EXPORT_SYMBOL(__kernel_io_start); |
| 101 | struct page *vmemmap; |
| 102 | EXPORT_SYMBOL(vmemmap); |
| 103 | unsigned long __pte_frag_nr; |
| 104 | EXPORT_SYMBOL(__pte_frag_nr); |
| 105 | unsigned long __pte_frag_size_shift; |
| 106 | EXPORT_SYMBOL(__pte_frag_size_shift); |
| 107 | unsigned long ioremap_bot; |
| 108 | #else /* !CONFIG_PPC_BOOK3S_64 */ |
| 109 | unsigned long ioremap_bot = IOREMAP_BASE; |
| 110 | #endif |
| 111 | |
| 112 | /** |
| 113 | * __ioremap_at - Low level function to establish the page tables |
| 114 | * for an IO mapping |
| 115 | */ |
| 116 | void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, |
| 117 | unsigned long flags) |
| 118 | { |
| 119 | unsigned long i; |
| 120 | |
| 121 | /* Make sure we have the base flags */ |
| 122 | if ((flags & _PAGE_PRESENT) == 0) |
| 123 | flags |= pgprot_val(PAGE_KERNEL); |
| 124 | |
| 125 | /* We don't support the 4K PFN hack with ioremap */ |
| 126 | if (flags & H_PAGE_4K_PFN) |
| 127 | return NULL; |
| 128 | |
| 129 | WARN_ON(pa & ~PAGE_MASK); |
| 130 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
| 131 | WARN_ON(size & ~PAGE_MASK); |
| 132 | |
| 133 | for (i = 0; i < size; i += PAGE_SIZE) |
| 134 | if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) |
| 135 | return NULL; |
| 136 | |
| 137 | return (void __iomem *)ea; |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * __iounmap_from - Low level function to tear down the page tables |
| 142 | * for an IO mapping. This is used for mappings that |
| 143 | * are manipulated manually, like partial unmapping of |
| 144 | * PCI IOs or ISA space. |
| 145 | */ |
| 146 | void __iounmap_at(void *ea, unsigned long size) |
| 147 | { |
| 148 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
| 149 | WARN_ON(size & ~PAGE_MASK); |
| 150 | |
| 151 | unmap_kernel_range((unsigned long)ea, size); |
| 152 | } |
| 153 | |
| 154 | void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, |
| 155 | unsigned long flags, void *caller) |
| 156 | { |
| 157 | phys_addr_t paligned; |
| 158 | void __iomem *ret; |
| 159 | |
| 160 | /* |
| 161 | * Choose an address to map it to. |
| 162 | * Once the imalloc system is running, we use it. |
| 163 | * Before that, we map using addresses going |
| 164 | * up from ioremap_bot. imalloc will use |
| 165 | * the addresses from ioremap_bot through |
| 166 | * IMALLOC_END |
| 167 | * |
| 168 | */ |
| 169 | paligned = addr & PAGE_MASK; |
| 170 | size = PAGE_ALIGN(addr + size) - paligned; |
| 171 | |
| 172 | if ((size == 0) || (paligned == 0)) |
| 173 | return NULL; |
| 174 | |
| 175 | if (slab_is_available()) { |
| 176 | struct vm_struct *area; |
| 177 | |
| 178 | area = __get_vm_area_caller(size, VM_IOREMAP, |
| 179 | ioremap_bot, IOREMAP_END, |
| 180 | caller); |
| 181 | if (area == NULL) |
| 182 | return NULL; |
| 183 | |
| 184 | area->phys_addr = paligned; |
| 185 | ret = __ioremap_at(paligned, area->addr, size, flags); |
| 186 | if (!ret) |
| 187 | vunmap(area->addr); |
| 188 | } else { |
| 189 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); |
| 190 | if (ret) |
| 191 | ioremap_bot += size; |
| 192 | } |
| 193 | |
| 194 | if (ret) |
| 195 | ret += addr & ~PAGE_MASK; |
| 196 | return ret; |
| 197 | } |
| 198 | |
| 199 | void __iomem * __ioremap(phys_addr_t addr, unsigned long size, |
| 200 | unsigned long flags) |
| 201 | { |
| 202 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
| 203 | } |
| 204 | |
| 205 | void __iomem * ioremap(phys_addr_t addr, unsigned long size) |
| 206 | { |
| 207 | unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); |
| 208 | void *caller = __builtin_return_address(0); |
| 209 | |
| 210 | if (ppc_md.ioremap) |
| 211 | return ppc_md.ioremap(addr, size, flags, caller); |
| 212 | return __ioremap_caller(addr, size, flags, caller); |
| 213 | } |
| 214 | |
| 215 | void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) |
| 216 | { |
| 217 | unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); |
| 218 | void *caller = __builtin_return_address(0); |
| 219 | |
| 220 | if (ppc_md.ioremap) |
| 221 | return ppc_md.ioremap(addr, size, flags, caller); |
| 222 | return __ioremap_caller(addr, size, flags, caller); |
| 223 | } |
| 224 | |
| 225 | void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, |
| 226 | unsigned long flags) |
| 227 | { |
| 228 | void *caller = __builtin_return_address(0); |
| 229 | |
| 230 | /* writeable implies dirty for kernel addresses */ |
| 231 | if (flags & _PAGE_WRITE) |
| 232 | flags |= _PAGE_DIRTY; |
| 233 | |
| 234 | /* we don't want to let _PAGE_EXEC leak out */ |
| 235 | flags &= ~_PAGE_EXEC; |
| 236 | /* |
| 237 | * Force kernel mapping. |
| 238 | */ |
| 239 | flags &= ~_PAGE_USER; |
| 240 | flags |= _PAGE_PRIVILEGED; |
| 241 | |
| 242 | if (ppc_md.ioremap) |
| 243 | return ppc_md.ioremap(addr, size, flags, caller); |
| 244 | return __ioremap_caller(addr, size, flags, caller); |
| 245 | } |
| 246 | |
| 247 | |
| 248 | /* |
| 249 | * Unmap an IO region and remove it from imalloc'd list. |
| 250 | * Access to IO memory should be serialized by driver. |
| 251 | */ |
| 252 | void __iounmap(volatile void __iomem *token) |
| 253 | { |
| 254 | void *addr; |
| 255 | |
| 256 | if (!slab_is_available()) |
| 257 | return; |
| 258 | |
| 259 | addr = (void *) ((unsigned long __force) |
| 260 | PCI_FIX_ADDR(token) & PAGE_MASK); |
| 261 | if ((unsigned long)addr < ioremap_bot) { |
| 262 | printk(KERN_WARNING "Attempt to iounmap early bolted mapping" |
| 263 | " at 0x%p\n", addr); |
| 264 | return; |
| 265 | } |
| 266 | vunmap(addr); |
| 267 | } |
| 268 | |
| 269 | void iounmap(volatile void __iomem *token) |
| 270 | { |
| 271 | if (ppc_md.iounmap) |
| 272 | ppc_md.iounmap(token); |
| 273 | else |
| 274 | __iounmap(token); |
| 275 | } |
| 276 | |
| 277 | EXPORT_SYMBOL(ioremap); |
| 278 | EXPORT_SYMBOL(ioremap_wc); |
| 279 | EXPORT_SYMBOL(ioremap_prot); |
| 280 | EXPORT_SYMBOL(__ioremap); |
| 281 | EXPORT_SYMBOL(__ioremap_at); |
| 282 | EXPORT_SYMBOL(iounmap); |
| 283 | EXPORT_SYMBOL(__iounmap); |
| 284 | EXPORT_SYMBOL(__iounmap_at); |
| 285 | |
| 286 | #ifndef __PAGETABLE_PUD_FOLDED |
| 287 | /* 4 level page table */ |
| 288 | struct page *pgd_page(pgd_t pgd) |
| 289 | { |
| 290 | if (pgd_huge(pgd)) |
| 291 | return pte_page(pgd_pte(pgd)); |
| 292 | return virt_to_page(pgd_page_vaddr(pgd)); |
| 293 | } |
| 294 | #endif |
| 295 | |
| 296 | struct page *pud_page(pud_t pud) |
| 297 | { |
| 298 | if (pud_huge(pud)) |
| 299 | return pte_page(pud_pte(pud)); |
| 300 | return virt_to_page(pud_page_vaddr(pud)); |
| 301 | } |
| 302 | |
| 303 | /* |
| 304 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags |
| 305 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. |
| 306 | */ |
| 307 | struct page *pmd_page(pmd_t pmd) |
| 308 | { |
| 309 | if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) |
| 310 | return pte_page(pmd_pte(pmd)); |
| 311 | return virt_to_page(pmd_page_vaddr(pmd)); |
| 312 | } |
| 313 | |
| 314 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 315 | void mark_rodata_ro(void) |
| 316 | { |
| 317 | if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { |
| 318 | pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); |
| 319 | return; |
| 320 | } |
| 321 | |
| 322 | if (radix_enabled()) |
| 323 | radix__mark_rodata_ro(); |
| 324 | else |
| 325 | hash__mark_rodata_ro(); |
| 326 | } |
| 327 | |
| 328 | void mark_initmem_nx(void) |
| 329 | { |
| 330 | if (radix_enabled()) |
| 331 | radix__mark_initmem_nx(); |
| 332 | else |
| 333 | hash__mark_initmem_nx(); |
| 334 | } |
| 335 | #endif |