blob: 9be71bee902ca890c911ec864b2c869724fcc57b [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/mm/ioremap.c
4 *
5 * (C) Copyright 1995 1996 Linus Torvalds
6 * Hacked for ARM by Phil Blundell <philb@gnu.org>
7 * Hacked to allow all architectures to build, and various cleanups
8 * by Russell King
9 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 */
11
12#include <linux/export.h>
13#include <linux/mm.h>
14#include <linux/vmalloc.h>
15#include <linux/io.h>
16
17#include <asm/fixmap.h>
18#include <asm/tlbflush.h>
19#include <asm/pgalloc.h>
20
21static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
22 pgprot_t prot, void *caller)
23{
24 unsigned long last_addr;
25 unsigned long offset = phys_addr & ~PAGE_MASK;
26 int err;
27 unsigned long addr;
28 struct vm_struct *area;
29
30 /*
31 * Page align the mapping address and size, taking account of any
32 * offset.
33 */
34 phys_addr &= PAGE_MASK;
35 size = PAGE_ALIGN(size + offset);
36
37 /*
38 * Don't allow wraparound, zero size or outside PHYS_MASK.
39 */
40 last_addr = phys_addr + size - 1;
41 if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
42 return NULL;
43
44 /*
45 * Don't allow RAM to be mapped.
46 */
47 if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
48 return NULL;
49
50 area = get_vm_area_caller(size, VM_IOREMAP, caller);
51 if (!area)
52 return NULL;
53 addr = (unsigned long)area->addr;
54 area->phys_addr = phys_addr;
55
56 err = ioremap_page_range(addr, addr + size, phys_addr, prot);
57 if (err) {
58 vunmap((void *)addr);
59 return NULL;
60 }
61
62 return (void __iomem *)(offset + addr);
63}
64
65void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
66{
67 return __ioremap_caller(phys_addr, size, prot,
68 __builtin_return_address(0));
69}
70EXPORT_SYMBOL(__ioremap);
71
David Brazdil0f672f62019-12-10 10:32:29 +000072void iounmap(volatile void __iomem *io_addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073{
74 unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
75
76 /*
77 * We could get an address outside vmalloc range in case
78 * of ioremap_cache() reusing a RAM mapping.
79 */
80 if (is_vmalloc_addr((void *)addr))
81 vunmap((void *)addr);
82}
David Brazdil0f672f62019-12-10 10:32:29 +000083EXPORT_SYMBOL(iounmap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084
85void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
86{
87 /* For normal memory we already have a cacheable mapping. */
88 if (pfn_valid(__phys_to_pfn(phys_addr)))
89 return (void __iomem *)__phys_to_virt(phys_addr);
90
91 return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
92 __builtin_return_address(0));
93}
94EXPORT_SYMBOL(ioremap_cache);
95
96/*
97 * Must be called after early_fixmap_init
98 */
99void __init early_ioremap_init(void)
100{
101 early_ioremap_setup();
102}