blob: 508c81e97ab1025b38357efc9070f8e1d2f98ab7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * User address space access functions.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
8 */
9#include <linux/export.h>
10#include <linux/uaccess.h>
11#include <linux/highmem.h>
12
13/*
14 * Zero Userspace
15 */
16
17unsigned long __clear_user(void __user *addr, unsigned long size)
18{
19 long __d0;
20 might_fault();
21 /* no memory constraint because it doesn't change any memory gcc knows
22 about */
23 stac();
24 asm volatile(
25 " testq %[size8],%[size8]\n"
26 " jz 4f\n"
Olivier Deprez0e641232021-09-23 10:07:05 +020027 " .align 16\n"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028 "0: movq $0,(%[dst])\n"
29 " addq $8,%[dst]\n"
30 " decl %%ecx ; jnz 0b\n"
31 "4: movq %[size1],%%rcx\n"
32 " testl %%ecx,%%ecx\n"
33 " jz 2f\n"
34 "1: movb $0,(%[dst])\n"
35 " incq %[dst]\n"
36 " decl %%ecx ; jnz 1b\n"
37 "2:\n"
38 ".section .fixup,\"ax\"\n"
39 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
40 " jmp 2b\n"
41 ".previous\n"
David Brazdil0f672f62019-12-10 10:32:29 +000042 _ASM_EXTABLE_UA(0b, 3b)
43 _ASM_EXTABLE_UA(1b, 2b)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 : [size8] "=&c"(size), [dst] "=&D" (__d0)
45 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
46 clac();
47 return size;
48}
49EXPORT_SYMBOL(__clear_user);
50
51unsigned long clear_user(void __user *to, unsigned long n)
52{
David Brazdil0f672f62019-12-10 10:32:29 +000053 if (access_ok(to, n))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054 return __clear_user(to, n);
55 return n;
56}
57EXPORT_SYMBOL(clear_user);
58
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
60/**
61 * clean_cache_range - write back a cache range with CLWB
62 * @vaddr: virtual start address
63 * @size: number of bytes to write back
64 *
65 * Write back a cache range using the CLWB (cache line write back)
66 * instruction. Note that @size is internally rounded up to be cache
67 * line size aligned.
68 */
69static void clean_cache_range(void *addr, size_t size)
70{
71 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
72 unsigned long clflush_mask = x86_clflush_size - 1;
73 void *vend = addr + size;
74 void *p;
75
76 for (p = (void *)((unsigned long)addr & ~clflush_mask);
77 p < vend; p += x86_clflush_size)
78 clwb(p);
79}
80
81void arch_wb_cache_pmem(void *addr, size_t size)
82{
83 clean_cache_range(addr, size);
84}
85EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
86
87long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
88{
89 unsigned long flushed, dest = (unsigned long) dst;
90 long rc = __copy_user_nocache(dst, src, size, 0);
91
92 /*
93 * __copy_user_nocache() uses non-temporal stores for the bulk
94 * of the transfer, but we need to manually flush if the
95 * transfer is unaligned. A cached memory copy is used when
96 * destination or size is not naturally aligned. That is:
97 * - Require 8-byte alignment when size is 8 bytes or larger.
98 * - Require 4-byte alignment when size is 4 bytes.
99 */
100 if (size < 8) {
101 if (!IS_ALIGNED(dest, 4) || size != 4)
Olivier Deprez0e641232021-09-23 10:07:05 +0200102 clean_cache_range(dst, size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103 } else {
104 if (!IS_ALIGNED(dest, 8)) {
105 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
106 clean_cache_range(dst, 1);
107 }
108
109 flushed = dest - (unsigned long) dst;
110 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
111 clean_cache_range(dst + size - 1, 1);
112 }
113
114 return rc;
115}
116
David Brazdil0f672f62019-12-10 10:32:29 +0000117void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118{
119 unsigned long dest = (unsigned long) _dst;
120 unsigned long source = (unsigned long) _src;
121
122 /* cache copy and flush to align dest */
123 if (!IS_ALIGNED(dest, 8)) {
124 unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
125
126 memcpy((void *) dest, (void *) source, len);
127 clean_cache_range((void *) dest, len);
128 dest += len;
129 source += len;
130 size -= len;
131 if (!size)
132 return;
133 }
134
135 /* 4x8 movnti loop */
136 while (size >= 32) {
137 asm("movq (%0), %%r8\n"
138 "movq 8(%0), %%r9\n"
139 "movq 16(%0), %%r10\n"
140 "movq 24(%0), %%r11\n"
141 "movnti %%r8, (%1)\n"
142 "movnti %%r9, 8(%1)\n"
143 "movnti %%r10, 16(%1)\n"
144 "movnti %%r11, 24(%1)\n"
145 :: "r" (source), "r" (dest)
146 : "memory", "r8", "r9", "r10", "r11");
147 dest += 32;
148 source += 32;
149 size -= 32;
150 }
151
152 /* 1x8 movnti loop */
153 while (size >= 8) {
154 asm("movq (%0), %%r8\n"
155 "movnti %%r8, (%1)\n"
156 :: "r" (source), "r" (dest)
157 : "memory", "r8");
158 dest += 8;
159 source += 8;
160 size -= 8;
161 }
162
163 /* 1x4 movnti loop */
164 while (size >= 4) {
165 asm("movl (%0), %%r8d\n"
166 "movnti %%r8d, (%1)\n"
167 :: "r" (source), "r" (dest)
168 : "memory", "r8");
169 dest += 4;
170 source += 4;
171 size -= 4;
172 }
173
174 /* cache copy for remaining bytes */
175 if (size) {
176 memcpy((void *) dest, (void *) source, size);
177 clean_cache_range((void *) dest, size);
178 }
179}
David Brazdil0f672f62019-12-10 10:32:29 +0000180EXPORT_SYMBOL_GPL(__memcpy_flushcache);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181
182void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
183 size_t len)
184{
185 char *from = kmap_atomic(page);
186
187 memcpy_flushcache(to, from + offset, len);
188 kunmap_atomic(from);
189}
190#endif