blob: b6dad2fd5575d7dbfffa201ec5187ab065423108 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
8 */
9#include <linux/export.h>
10#include <asm/addrspace.h>
11#include <asm/byteorder.h>
12#include <linux/ioport.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mm_types.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020017#include <linux/io.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <asm/cacheflush.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019#include <asm/tlbflush.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020020#include <ioremap.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021
Olivier Deprez157378f2022-04-04 15:47:50 +020022#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
23#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
25static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
26 void *arg)
27{
28 unsigned long i;
29
30 for (i = 0; i < nr_pages; i++) {
31 if (pfn_valid(start_pfn + i) &&
32 !PageReserved(pfn_to_page(start_pfn + i)))
33 return 1;
34 }
35
36 return 0;
37}
38
39/*
Olivier Deprez157378f2022-04-04 15:47:50 +020040 * ioremap_prot - map bus memory into CPU space
41 * @phys_addr: bus address of the memory
42 * @size: size of the resource to map
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043 *
Olivier Deprez157378f2022-04-04 15:47:50 +020044 * ioremap_prot gives the caller control over cache coherency attributes (CCA)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 */
Olivier Deprez157378f2022-04-04 15:47:50 +020046void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
47 unsigned long prot_val)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048{
Olivier Deprez157378f2022-04-04 15:47:50 +020049 unsigned long flags = prot_val & _CACHE_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 unsigned long offset, pfn, last_pfn;
Olivier Deprez157378f2022-04-04 15:47:50 +020051 struct vm_struct *area;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 phys_addr_t last_addr;
Olivier Deprez157378f2022-04-04 15:47:50 +020053 unsigned long vaddr;
54 void __iomem *cpu_addr;
55
56 cpu_addr = plat_ioremap(phys_addr, size, flags);
57 if (cpu_addr)
58 return cpu_addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
60 phys_addr = fixup_bigphys_addr(phys_addr, size);
61
62 /* Don't allow wraparound or zero size */
63 last_addr = phys_addr + size - 1;
64 if (!size || last_addr < phys_addr)
65 return NULL;
66
67 /*
68 * Map uncached objects in the low 512mb of address space using KSEG1,
69 * otherwise map using page tables.
70 */
71 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
72 flags == _CACHE_UNCACHED)
73 return (void __iomem *) CKSEG1ADDR(phys_addr);
74
75 /*
76 * Don't allow anybody to remap RAM that may be allocated by the page
77 * allocator, since that could lead to races & data clobbering.
78 */
79 pfn = PFN_DOWN(phys_addr);
80 last_pfn = PFN_DOWN(last_addr);
81 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
82 __ioremap_check_ram) == 1) {
83 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
84 &phys_addr, &last_addr);
85 return NULL;
86 }
87
88 /*
89 * Mappings have to be page-aligned
90 */
91 offset = phys_addr & ~PAGE_MASK;
92 phys_addr &= PAGE_MASK;
93 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
94
95 /*
96 * Ok, go for it..
97 */
98 area = get_vm_area(size, VM_IOREMAP);
99 if (!area)
100 return NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200101 vaddr = (unsigned long)area->addr;
102
103 flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
104 if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
105 __pgprot(flags))) {
106 free_vm_area(area);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 return NULL;
108 }
109
Olivier Deprez157378f2022-04-04 15:47:50 +0200110 return (void __iomem *)(vaddr + offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111}
Olivier Deprez157378f2022-04-04 15:47:50 +0200112EXPORT_SYMBOL(ioremap_prot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
Olivier Deprez157378f2022-04-04 15:47:50 +0200114void iounmap(const volatile void __iomem *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115{
Olivier Deprez157378f2022-04-04 15:47:50 +0200116 if (!plat_iounmap(addr) && !IS_KSEG1(addr))
117 vunmap((void *)((unsigned long)addr & PAGE_MASK));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118}
Olivier Deprez157378f2022-04-04 15:47:50 +0200119EXPORT_SYMBOL(iounmap);