David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/vmalloc.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/io.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/cache.h> |
| 13 | |
| 14 | static inline bool arc_uncached_addr_space(phys_addr_t paddr) |
| 15 | { |
| 16 | if (is_isa_arcompact()) { |
| 17 | if (paddr >= ARC_UNCACHED_ADDR_SPACE) |
| 18 | return true; |
| 19 | } else if (paddr >= perip_base && paddr <= perip_end) { |
| 20 | return true; |
| 21 | } |
| 22 | |
| 23 | return false; |
| 24 | } |
| 25 | |
| 26 | void __iomem *ioremap(phys_addr_t paddr, unsigned long size) |
| 27 | { |
| 28 | phys_addr_t end; |
| 29 | |
| 30 | /* Don't allow wraparound or zero size */ |
| 31 | end = paddr + size - 1; |
| 32 | if (!size || (end < paddr)) |
| 33 | return NULL; |
| 34 | |
| 35 | /* |
| 36 | * If the region is h/w uncached, MMU mapping can be elided as optim |
| 37 | * The cast to u32 is fine as this region can only be inside 4GB |
| 38 | */ |
| 39 | if (arc_uncached_addr_space(paddr)) |
| 40 | return (void __iomem *)(u32)paddr; |
| 41 | |
| 42 | return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE); |
| 43 | } |
| 44 | EXPORT_SYMBOL(ioremap); |
| 45 | |
| 46 | /* |
| 47 | * ioremap with access flags |
| 48 | * Cache semantics wise it is same as ioremap - "forced" uncached. |
| 49 | * However unlike vanilla ioremap which bypasses ARC MMU for addresses in |
| 50 | * ARC hardware uncached region, this one still goes thru the MMU as caller |
| 51 | * might need finer access control (R/W/X) |
| 52 | */ |
| 53 | void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, |
| 54 | unsigned long flags) |
| 55 | { |
| 56 | unsigned long vaddr; |
| 57 | struct vm_struct *area; |
| 58 | phys_addr_t off, end; |
| 59 | pgprot_t prot = __pgprot(flags); |
| 60 | |
| 61 | /* Don't allow wraparound, zero size */ |
| 62 | end = paddr + size - 1; |
| 63 | if ((!size) || (end < paddr)) |
| 64 | return NULL; |
| 65 | |
| 66 | /* An early platform driver might end up here */ |
| 67 | if (!slab_is_available()) |
| 68 | return NULL; |
| 69 | |
| 70 | /* force uncached */ |
| 71 | prot = pgprot_noncached(prot); |
| 72 | |
| 73 | /* Mappings have to be page-aligned */ |
| 74 | off = paddr & ~PAGE_MASK; |
| 75 | paddr &= PAGE_MASK; |
| 76 | size = PAGE_ALIGN(end + 1) - paddr; |
| 77 | |
| 78 | /* |
| 79 | * Ok, go for it.. |
| 80 | */ |
| 81 | area = get_vm_area(size, VM_IOREMAP); |
| 82 | if (!area) |
| 83 | return NULL; |
| 84 | area->phys_addr = paddr; |
| 85 | vaddr = (unsigned long)area->addr; |
| 86 | if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) { |
| 87 | vunmap((void __force *)vaddr); |
| 88 | return NULL; |
| 89 | } |
| 90 | return (void __iomem *)(off + (char __iomem *)vaddr); |
| 91 | } |
| 92 | EXPORT_SYMBOL(ioremap_prot); |
| 93 | |
| 94 | |
| 95 | void iounmap(const void __iomem *addr) |
| 96 | { |
| 97 | /* weird double cast to handle phys_addr_t > 32 bits */ |
| 98 | if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) |
| 99 | return; |
| 100 | |
| 101 | vfree((void *)(PAGE_MASK & (unsigned long __force)addr)); |
| 102 | } |
| 103 | EXPORT_SYMBOL(iounmap); |