blob: 14e6202ce47f1395b61a45dc2d7455723dfdc0aa [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
5#include <linux/fs.h>
6#include <linux/kernel.h>
7#include <linux/bug.h>
8#include <linux/mm.h>
9#include <linux/uaccess.h>
10#include <linux/hardirq.h>
11
12#include <asm/cacheflush.h>
13
14#ifndef ARCH_HAS_FLUSH_ANON_PAGE
15static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
16{
17}
18#endif
19
20#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
21static inline void flush_kernel_dcache_page(struct page *page)
22{
23}
24static inline void flush_kernel_vmap_range(void *vaddr, int size)
25{
26}
27static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28{
29}
30#endif
31
32#include <asm/kmap_types.h>
33
34#ifdef CONFIG_HIGHMEM
Olivier Deprez157378f2022-04-04 15:47:50 +020035extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
36extern void kunmap_atomic_high(void *kvaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037#include <asm/highmem.h>
38
Olivier Deprez157378f2022-04-04 15:47:50 +020039#ifndef ARCH_HAS_KMAP_FLUSH_TLB
40static inline void kmap_flush_tlb(unsigned long addr) { }
41#endif
42
43#ifndef kmap_prot
44#define kmap_prot PAGE_KERNEL
45#endif
46
47void *kmap_high(struct page *page);
48static inline void *kmap(struct page *page)
49{
50 void *addr;
51
52 might_sleep();
53 if (!PageHighMem(page))
54 addr = page_address(page);
55 else
56 addr = kmap_high(page);
57 kmap_flush_tlb((unsigned long)addr);
58 return addr;
59}
60
61void kunmap_high(struct page *page);
62
63static inline void kunmap(struct page *page)
64{
65 might_sleep();
66 if (!PageHighMem(page))
67 return;
68 kunmap_high(page);
69}
70
71/*
72 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
73 * no global lock is needed and because the kmap code must perform a global TLB
74 * invalidation when the kmap pool wraps.
75 *
76 * However when holding an atomic kmap it is not legal to sleep, so atomic
77 * kmaps are appropriate for short, tight code paths only.
78 *
79 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
80 * gives a more generic (and caching) interface. But kmap_atomic can
81 * be used in IRQ contexts, so in some (very limited) cases we need
82 * it.
83 */
84static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
85{
86 preempt_disable();
87 pagefault_disable();
88 if (!PageHighMem(page))
89 return page_address(page);
90 return kmap_atomic_high_prot(page, prot);
91}
92#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
93
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094/* declarations for linux/mm/highmem.c */
95unsigned int nr_free_highpages(void);
David Brazdil0f672f62019-12-10 10:32:29 +000096extern atomic_long_t _totalhigh_pages;
97static inline unsigned long totalhigh_pages(void)
98{
99 return (unsigned long)atomic_long_read(&_totalhigh_pages);
100}
101
102static inline void totalhigh_pages_inc(void)
103{
104 atomic_long_inc(&_totalhigh_pages);
105}
106
107static inline void totalhigh_pages_dec(void)
108{
109 atomic_long_dec(&_totalhigh_pages);
110}
111
112static inline void totalhigh_pages_add(long count)
113{
114 atomic_long_add(count, &_totalhigh_pages);
115}
116
117static inline void totalhigh_pages_set(long val)
118{
119 atomic_long_set(&_totalhigh_pages, val);
120}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121
122void kmap_flush_unused(void);
123
124struct page *kmap_to_page(void *addr);
125
126#else /* CONFIG_HIGHMEM */
127
128static inline unsigned int nr_free_highpages(void) { return 0; }
129
130static inline struct page *kmap_to_page(void *addr)
131{
132 return virt_to_page(addr);
133}
134
David Brazdil0f672f62019-12-10 10:32:29 +0000135static inline unsigned long totalhigh_pages(void) { return 0UL; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137static inline void *kmap(struct page *page)
138{
139 might_sleep();
140 return page_address(page);
141}
142
Olivier Deprez157378f2022-04-04 15:47:50 +0200143static inline void kunmap_high(struct page *page)
144{
145}
146
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147static inline void kunmap(struct page *page)
148{
Olivier Deprez157378f2022-04-04 15:47:50 +0200149#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
150 kunmap_flush_on_unmap(page_address(page));
151#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000152}
153
154static inline void *kmap_atomic(struct page *page)
155{
156 preempt_disable();
157 pagefault_disable();
158 return page_address(page);
159}
160#define kmap_atomic_prot(page, prot) kmap_atomic(page)
161
Olivier Deprez157378f2022-04-04 15:47:50 +0200162static inline void kunmap_atomic_high(void *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163{
Olivier Deprez157378f2022-04-04 15:47:50 +0200164 /*
165 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
166 * handles re-enabling faults + preemption
167 */
168#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
169 kunmap_flush_on_unmap(addr);
170#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171}
172
173#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
174
175#define kmap_flush_unused() do {} while(0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176
177#endif /* CONFIG_HIGHMEM */
178
179#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
180
181DECLARE_PER_CPU(int, __kmap_atomic_idx);
182
183static inline int kmap_atomic_idx_push(void)
184{
185 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
186
187#ifdef CONFIG_DEBUG_HIGHMEM
188 WARN_ON_ONCE(in_irq() && !irqs_disabled());
189 BUG_ON(idx >= KM_TYPE_NR);
190#endif
191 return idx;
192}
193
194static inline int kmap_atomic_idx(void)
195{
196 return __this_cpu_read(__kmap_atomic_idx) - 1;
197}
198
199static inline void kmap_atomic_idx_pop(void)
200{
201#ifdef CONFIG_DEBUG_HIGHMEM
202 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
203
204 BUG_ON(idx < 0);
205#else
206 __this_cpu_dec(__kmap_atomic_idx);
207#endif
208}
209
210#endif
211
212/*
213 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
214 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
215 */
216#define kunmap_atomic(addr) \
217do { \
218 BUILD_BUG_ON(__same_type((addr), struct page *)); \
Olivier Deprez157378f2022-04-04 15:47:50 +0200219 kunmap_atomic_high(addr); \
220 pagefault_enable(); \
221 preempt_enable(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222} while (0)
223
224
225/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
226#ifndef clear_user_highpage
227static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
228{
229 void *addr = kmap_atomic(page);
230 clear_user_page(addr, vaddr, page);
231 kunmap_atomic(addr);
232}
233#endif
234
235#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
236/**
237 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
238 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
239 * @vma: The VMA the page is to be allocated for
240 * @vaddr: The virtual address the page will be inserted into
241 *
242 * This function will allocate a page for a VMA but the caller is expected
243 * to specify via movableflags whether the page will be movable in the
244 * future or not
245 *
246 * An architecture may override this function by defining
247 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
248 * implementation.
249 */
250static inline struct page *
251__alloc_zeroed_user_highpage(gfp_t movableflags,
252 struct vm_area_struct *vma,
253 unsigned long vaddr)
254{
255 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
256 vma, vaddr);
257
258 if (page)
259 clear_user_highpage(page, vaddr);
260
261 return page;
262}
263#endif
264
265/**
266 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
267 * @vma: The VMA the page is to be allocated for
268 * @vaddr: The virtual address the page will be inserted into
269 *
270 * This function will allocate a page for a VMA that the caller knows will
271 * be able to migrate in the future using move_pages() or reclaimed
272 */
273static inline struct page *
274alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
275 unsigned long vaddr)
276{
277 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
278}
279
280static inline void clear_highpage(struct page *page)
281{
282 void *kaddr = kmap_atomic(page);
283 clear_page(kaddr);
284 kunmap_atomic(kaddr);
285}
286
287static inline void zero_user_segments(struct page *page,
288 unsigned start1, unsigned end1,
289 unsigned start2, unsigned end2)
290{
291 void *kaddr = kmap_atomic(page);
292
293 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
294
295 if (end1 > start1)
296 memset(kaddr + start1, 0, end1 - start1);
297
298 if (end2 > start2)
299 memset(kaddr + start2, 0, end2 - start2);
300
301 kunmap_atomic(kaddr);
302 flush_dcache_page(page);
303}
304
305static inline void zero_user_segment(struct page *page,
306 unsigned start, unsigned end)
307{
308 zero_user_segments(page, start, end, 0, 0);
309}
310
311static inline void zero_user(struct page *page,
312 unsigned start, unsigned size)
313{
314 zero_user_segments(page, start, start + size, 0, 0);
315}
316
317#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
318
319static inline void copy_user_highpage(struct page *to, struct page *from,
320 unsigned long vaddr, struct vm_area_struct *vma)
321{
322 char *vfrom, *vto;
323
324 vfrom = kmap_atomic(from);
325 vto = kmap_atomic(to);
326 copy_user_page(vto, vfrom, vaddr, to);
327 kunmap_atomic(vto);
328 kunmap_atomic(vfrom);
329}
330
331#endif
332
333#ifndef __HAVE_ARCH_COPY_HIGHPAGE
334
335static inline void copy_highpage(struct page *to, struct page *from)
336{
337 char *vfrom, *vto;
338
339 vfrom = kmap_atomic(from);
340 vto = kmap_atomic(to);
341 copy_page(vto, vfrom);
342 kunmap_atomic(vto);
343 kunmap_atomic(vfrom);
344}
345
346#endif
347
348#endif /* _LINUX_HIGHMEM_H */