blob: b9c91d32124078843fd7941d34d8d2d8a1cf7295 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright © 2008 Keith Packard <keithp@keithp.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
6#ifndef _LINUX_IO_MAPPING_H
7#define _LINUX_IO_MAPPING_H
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bug.h>
12#include <linux/io.h>
13#include <asm/page.h>
14
15/*
16 * The io_mapping mechanism provides an abstraction for mapping
17 * individual pages from an io device to the CPU in an efficient fashion.
18 *
19 * See Documentation/io-mapping.txt
20 */
21
22struct io_mapping {
23 resource_size_t base;
24 unsigned long size;
25 pgprot_t prot;
26 void __iomem *iomem;
27};
28
29#ifdef CONFIG_HAVE_ATOMIC_IOMAP
30
31#include <asm/iomap.h>
32/*
33 * For small address space machines, mapping large objects
34 * into the kernel virtual space isn't practical. Where
35 * available, use fixmap support to dynamically map pages
36 * of the object at run time.
37 */
38
39static inline struct io_mapping *
40io_mapping_init_wc(struct io_mapping *iomap,
41 resource_size_t base,
42 unsigned long size)
43{
44 pgprot_t prot;
45
46 if (iomap_create_wc(base, size, &prot))
47 return NULL;
48
49 iomap->base = base;
50 iomap->size = size;
51 iomap->prot = prot;
52 return iomap;
53}
54
55static inline void
56io_mapping_fini(struct io_mapping *mapping)
57{
58 iomap_free(mapping->base, mapping->size);
59}
60
61/* Atomic map/unmap */
62static inline void __iomem *
63io_mapping_map_atomic_wc(struct io_mapping *mapping,
64 unsigned long offset)
65{
66 resource_size_t phys_addr;
67 unsigned long pfn;
68
69 BUG_ON(offset >= mapping->size);
70 phys_addr = mapping->base + offset;
71 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
72 return iomap_atomic_prot_pfn(pfn, mapping->prot);
73}
74
75static inline void
76io_mapping_unmap_atomic(void __iomem *vaddr)
77{
78 iounmap_atomic(vaddr);
79}
80
81static inline void __iomem *
82io_mapping_map_wc(struct io_mapping *mapping,
83 unsigned long offset,
84 unsigned long size)
85{
86 resource_size_t phys_addr;
87
88 BUG_ON(offset >= mapping->size);
89 phys_addr = mapping->base + offset;
90
91 return ioremap_wc(phys_addr, size);
92}
93
94static inline void
95io_mapping_unmap(void __iomem *vaddr)
96{
97 iounmap(vaddr);
98}
99
100#else
101
102#include <linux/uaccess.h>
103#include <asm/pgtable.h>
104
105/* Create the io_mapping object*/
106static inline struct io_mapping *
107io_mapping_init_wc(struct io_mapping *iomap,
108 resource_size_t base,
109 unsigned long size)
110{
Olivier Deprez0e641232021-09-23 10:07:05 +0200111 iomap->iomem = ioremap_wc(base, size);
112 if (!iomap->iomem)
113 return NULL;
114
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 iomap->base = base;
116 iomap->size = size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
118 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
119#elif defined(pgprot_writecombine)
120 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
121#else
122 iomap->prot = pgprot_noncached(PAGE_KERNEL);
123#endif
124
125 return iomap;
126}
127
128static inline void
129io_mapping_fini(struct io_mapping *mapping)
130{
131 iounmap(mapping->iomem);
132}
133
134/* Non-atomic map/unmap */
135static inline void __iomem *
136io_mapping_map_wc(struct io_mapping *mapping,
137 unsigned long offset,
138 unsigned long size)
139{
140 return mapping->iomem + offset;
141}
142
143static inline void
144io_mapping_unmap(void __iomem *vaddr)
145{
146}
147
148/* Atomic map/unmap */
149static inline void __iomem *
150io_mapping_map_atomic_wc(struct io_mapping *mapping,
151 unsigned long offset)
152{
153 preempt_disable();
154 pagefault_disable();
155 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
156}
157
158static inline void
159io_mapping_unmap_atomic(void __iomem *vaddr)
160{
161 io_mapping_unmap(vaddr);
162 pagefault_enable();
163 preempt_enable();
164}
165
166#endif /* HAVE_ATOMIC_IOMAP */
167
168static inline struct io_mapping *
169io_mapping_create_wc(resource_size_t base,
170 unsigned long size)
171{
172 struct io_mapping *iomap;
173
174 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
175 if (!iomap)
176 return NULL;
177
178 if (!io_mapping_init_wc(iomap, base, size)) {
179 kfree(iomap);
180 return NULL;
181 }
182
183 return iomap;
184}
185
186static inline void
187io_mapping_free(struct io_mapping *iomap)
188{
189 io_mapping_fini(iomap);
190 kfree(iomap);
191}
192
193#endif /* _LINUX_IO_MAPPING_H */