blob: 345ff0b664993584dd257f8b37039074c183fcbf [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/parisc/mm/ioremap.c
4 *
5 * (C) Copyright 1995 1996 Linus Torvalds
David Brazdil0f672f62019-12-10 10:32:29 +00006 * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
8 */
9
10#include <linux/vmalloc.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/io.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020014#include <linux/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015
16/*
17 * Generic mapping function (not visible outside):
18 */
19
20/*
21 * Remap an arbitrary physical address space into the kernel virtual
22 * address space.
23 *
24 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
25 * have to convert them into an offset in a page-aligned mapping, but the
26 * caller shouldn't need to know that small detail.
27 */
Olivier Deprez157378f2022-04-04 15:47:50 +020028void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029{
30 void __iomem *addr;
31 struct vm_struct *area;
32 unsigned long offset, last_addr;
33 pgprot_t pgprot;
34
35#ifdef CONFIG_EISA
36 unsigned long end = phys_addr + size - 1;
37 /* Support EISA addresses */
38 if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
Olivier Deprez157378f2022-04-04 15:47:50 +020039 (phys_addr >= 0x00500000 && end < 0x03bfffff))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040 phys_addr |= F_EXTEND(0xfc000000);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041#endif
42
43 /* Don't allow wraparound or zero size */
44 last_addr = phys_addr + size - 1;
45 if (!size || last_addr < phys_addr)
46 return NULL;
47
48 /*
49 * Don't allow anybody to remap normal RAM that we're using..
50 */
51 if (phys_addr < virt_to_phys(high_memory)) {
52 char *t_addr, *t_end;
53 struct page *page;
54
55 t_addr = __va(phys_addr);
56 t_end = t_addr + (size - 1);
57
58 for (page = virt_to_page(t_addr);
59 page <= virt_to_page(t_end); page++) {
60 if(!PageReserved(page))
61 return NULL;
62 }
63 }
64
65 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
Olivier Deprez157378f2022-04-04 15:47:50 +020066 _PAGE_ACCESSED | _PAGE_NO_CACHE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067
68 /*
69 * Mappings have to be page-aligned
70 */
71 offset = phys_addr & ~PAGE_MASK;
72 phys_addr &= PAGE_MASK;
73 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
74
75 /*
76 * Ok, go for it..
77 */
78 area = get_vm_area(size, VM_IOREMAP);
79 if (!area)
80 return NULL;
81
82 addr = (void __iomem *) area->addr;
83 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
84 phys_addr, pgprot)) {
David Brazdil0f672f62019-12-10 10:32:29 +000085 vunmap(addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086 return NULL;
87 }
88
89 return (void __iomem *) (offset + (char __iomem *)addr);
90}
Olivier Deprez157378f2022-04-04 15:47:50 +020091EXPORT_SYMBOL(ioremap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092
David Brazdil0f672f62019-12-10 10:32:29 +000093void iounmap(const volatile void __iomem *io_addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094{
David Brazdil0f672f62019-12-10 10:32:29 +000095 unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
96
97 if (is_vmalloc_addr((void *)addr))
98 vunmap((void *)addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099}
100EXPORT_SYMBOL(iounmap);