Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | * (C) Copyright 2001, 2002 Ralf Baechle |
| 8 | */ |
| 9 | #include <linux/export.h> |
| 10 | #include <asm/addrspace.h> |
| 11 | #include <asm/byteorder.h> |
| 12 | #include <linux/ioport.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/mm_types.h> |
| 17 | #include <asm/cacheflush.h> |
| 18 | #include <asm/io.h> |
| 19 | #include <asm/tlbflush.h> |
| 20 | |
| 21 | static inline void remap_area_pte(pte_t * pte, unsigned long address, |
| 22 | phys_addr_t size, phys_addr_t phys_addr, unsigned long flags) |
| 23 | { |
| 24 | phys_addr_t end; |
| 25 | unsigned long pfn; |
| 26 | pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE |
| 27 | | __WRITEABLE | flags); |
| 28 | |
| 29 | address &= ~PMD_MASK; |
| 30 | end = address + size; |
| 31 | if (end > PMD_SIZE) |
| 32 | end = PMD_SIZE; |
| 33 | BUG_ON(address >= end); |
| 34 | pfn = phys_addr >> PAGE_SHIFT; |
| 35 | do { |
| 36 | if (!pte_none(*pte)) { |
| 37 | printk("remap_area_pte: page already exists\n"); |
| 38 | BUG(); |
| 39 | } |
| 40 | set_pte(pte, pfn_pte(pfn, pgprot)); |
| 41 | address += PAGE_SIZE; |
| 42 | pfn++; |
| 43 | pte++; |
| 44 | } while (address && (address < end)); |
| 45 | } |
| 46 | |
| 47 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, |
| 48 | phys_addr_t size, phys_addr_t phys_addr, unsigned long flags) |
| 49 | { |
| 50 | phys_addr_t end; |
| 51 | |
| 52 | address &= ~PGDIR_MASK; |
| 53 | end = address + size; |
| 54 | if (end > PGDIR_SIZE) |
| 55 | end = PGDIR_SIZE; |
| 56 | phys_addr -= address; |
| 57 | BUG_ON(address >= end); |
| 58 | do { |
| 59 | pte_t * pte = pte_alloc_kernel(pmd, address); |
| 60 | if (!pte) |
| 61 | return -ENOMEM; |
| 62 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
| 63 | address = (address + PMD_SIZE) & PMD_MASK; |
| 64 | pmd++; |
| 65 | } while (address && (address < end)); |
| 66 | return 0; |
| 67 | } |
| 68 | |
| 69 | static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, |
| 70 | phys_addr_t size, unsigned long flags) |
| 71 | { |
| 72 | int error; |
| 73 | pgd_t * dir; |
| 74 | unsigned long end = address + size; |
| 75 | |
| 76 | phys_addr -= address; |
| 77 | dir = pgd_offset(&init_mm, address); |
| 78 | flush_cache_all(); |
| 79 | BUG_ON(address >= end); |
| 80 | do { |
| 81 | pud_t *pud; |
| 82 | pmd_t *pmd; |
| 83 | |
| 84 | error = -ENOMEM; |
| 85 | pud = pud_alloc(&init_mm, dir, address); |
| 86 | if (!pud) |
| 87 | break; |
| 88 | pmd = pmd_alloc(&init_mm, pud, address); |
| 89 | if (!pmd) |
| 90 | break; |
| 91 | if (remap_area_pmd(pmd, address, end - address, |
| 92 | phys_addr + address, flags)) |
| 93 | break; |
| 94 | error = 0; |
| 95 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
| 96 | dir++; |
| 97 | } while (address && (address < end)); |
| 98 | flush_tlb_all(); |
| 99 | return error; |
| 100 | } |
| 101 | |
| 102 | static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
| 103 | void *arg) |
| 104 | { |
| 105 | unsigned long i; |
| 106 | |
| 107 | for (i = 0; i < nr_pages; i++) { |
| 108 | if (pfn_valid(start_pfn + i) && |
| 109 | !PageReserved(pfn_to_page(start_pfn + i))) |
| 110 | return 1; |
| 111 | } |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * Generic mapping function (not visible outside): |
| 118 | */ |
| 119 | |
| 120 | /* |
| 121 | * Remap an arbitrary physical address space into the kernel virtual |
| 122 | * address space. Needed when the kernel wants to access high addresses |
| 123 | * directly. |
| 124 | * |
| 125 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 126 | * have to convert them into an offset in a page-aligned mapping, but the |
| 127 | * caller shouldn't need to know that small detail. |
| 128 | */ |
| 129 | |
| 130 | #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) |
| 131 | |
| 132 | void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) |
| 133 | { |
| 134 | unsigned long offset, pfn, last_pfn; |
| 135 | struct vm_struct * area; |
| 136 | phys_addr_t last_addr; |
| 137 | void * addr; |
| 138 | |
| 139 | phys_addr = fixup_bigphys_addr(phys_addr, size); |
| 140 | |
| 141 | /* Don't allow wraparound or zero size */ |
| 142 | last_addr = phys_addr + size - 1; |
| 143 | if (!size || last_addr < phys_addr) |
| 144 | return NULL; |
| 145 | |
| 146 | /* |
| 147 | * Map uncached objects in the low 512mb of address space using KSEG1, |
| 148 | * otherwise map using page tables. |
| 149 | */ |
| 150 | if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && |
| 151 | flags == _CACHE_UNCACHED) |
| 152 | return (void __iomem *) CKSEG1ADDR(phys_addr); |
| 153 | |
| 154 | /* |
| 155 | * Don't allow anybody to remap RAM that may be allocated by the page |
| 156 | * allocator, since that could lead to races & data clobbering. |
| 157 | */ |
| 158 | pfn = PFN_DOWN(phys_addr); |
| 159 | last_pfn = PFN_DOWN(last_addr); |
| 160 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
| 161 | __ioremap_check_ram) == 1) { |
| 162 | WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", |
| 163 | &phys_addr, &last_addr); |
| 164 | return NULL; |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Mappings have to be page-aligned |
| 169 | */ |
| 170 | offset = phys_addr & ~PAGE_MASK; |
| 171 | phys_addr &= PAGE_MASK; |
| 172 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
| 173 | |
| 174 | /* |
| 175 | * Ok, go for it.. |
| 176 | */ |
| 177 | area = get_vm_area(size, VM_IOREMAP); |
| 178 | if (!area) |
| 179 | return NULL; |
| 180 | addr = area->addr; |
| 181 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { |
| 182 | vunmap(addr); |
| 183 | return NULL; |
| 184 | } |
| 185 | |
| 186 | return (void __iomem *) (offset + (char *)addr); |
| 187 | } |
| 188 | |
| 189 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
| 190 | |
| 191 | void __iounmap(const volatile void __iomem *addr) |
| 192 | { |
| 193 | struct vm_struct *p; |
| 194 | |
| 195 | if (IS_KSEG1(addr)) |
| 196 | return; |
| 197 | |
| 198 | p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); |
| 199 | if (!p) |
| 200 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
| 201 | |
| 202 | kfree(p); |
| 203 | } |
| 204 | |
| 205 | EXPORT_SYMBOL(__ioremap); |
| 206 | EXPORT_SYMBOL(__iounmap); |