David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2002, 2003 Andi Kleen, SuSE Labs. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | * |
| 5 | * Wrappers of assembly checksum functions for x86-64. |
| 6 | */ |
| 7 | #include <asm/checksum.h> |
| 8 | #include <linux/export.h> |
| 9 | #include <linux/uaccess.h> |
| 10 | #include <asm/smap.h> |
| 11 | |
| 12 | /** |
| 13 | * csum_partial_copy_from_user - Copy and checksum from user space. |
| 14 | * @src: source address (user space) |
| 15 | * @dst: destination address |
| 16 | * @len: number of bytes to be copied. |
| 17 | * @isum: initial sum that is added into the result (32bit unfolded) |
| 18 | * @errp: set to -EFAULT for an bad source address. |
| 19 | * |
| 20 | * Returns an 32bit unfolded checksum of the buffer. |
| 21 | * src and dst are best aligned to 64bits. |
| 22 | */ |
| 23 | __wsum |
| 24 | csum_partial_copy_from_user(const void __user *src, void *dst, |
| 25 | int len, __wsum isum, int *errp) |
| 26 | { |
| 27 | might_sleep(); |
| 28 | *errp = 0; |
| 29 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 30 | if (!likely(access_ok(src, len))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | goto out_err; |
| 32 | |
| 33 | /* |
| 34 | * Why 6, not 7? To handle odd addresses aligned we |
| 35 | * would need to do considerable complications to fix the |
| 36 | * checksum which is defined as an 16bit accumulator. The |
| 37 | * fix alignment code is primarily for performance |
| 38 | * compatibility with 32bit and that will handle odd |
| 39 | * addresses slowly too. |
| 40 | */ |
| 41 | if (unlikely((unsigned long)src & 6)) { |
| 42 | while (((unsigned long)src & 6) && len >= 2) { |
| 43 | __u16 val16; |
| 44 | |
| 45 | if (__get_user(val16, (const __u16 __user *)src)) |
| 46 | goto out_err; |
| 47 | |
| 48 | *(__u16 *)dst = val16; |
| 49 | isum = (__force __wsum)add32_with_carry( |
| 50 | (__force unsigned)isum, val16); |
| 51 | src += 2; |
| 52 | dst += 2; |
| 53 | len -= 2; |
| 54 | } |
| 55 | } |
| 56 | stac(); |
| 57 | isum = csum_partial_copy_generic((__force const void *)src, |
| 58 | dst, len, isum, errp, NULL); |
| 59 | clac(); |
| 60 | if (unlikely(*errp)) |
| 61 | goto out_err; |
| 62 | |
| 63 | return isum; |
| 64 | |
| 65 | out_err: |
| 66 | *errp = -EFAULT; |
| 67 | memset(dst, 0, len); |
| 68 | |
| 69 | return isum; |
| 70 | } |
| 71 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
| 72 | |
| 73 | /** |
| 74 | * csum_partial_copy_to_user - Copy and checksum to user space. |
| 75 | * @src: source address |
| 76 | * @dst: destination address (user space) |
| 77 | * @len: number of bytes to be copied. |
| 78 | * @isum: initial sum that is added into the result (32bit unfolded) |
| 79 | * @errp: set to -EFAULT for an bad destination address. |
| 80 | * |
| 81 | * Returns an 32bit unfolded checksum of the buffer. |
| 82 | * src and dst are best aligned to 64bits. |
| 83 | */ |
| 84 | __wsum |
| 85 | csum_partial_copy_to_user(const void *src, void __user *dst, |
| 86 | int len, __wsum isum, int *errp) |
| 87 | { |
| 88 | __wsum ret; |
| 89 | |
| 90 | might_sleep(); |
| 91 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 92 | if (unlikely(!access_ok(dst, len))) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | *errp = -EFAULT; |
| 94 | return 0; |
| 95 | } |
| 96 | |
| 97 | if (unlikely((unsigned long)dst & 6)) { |
| 98 | while (((unsigned long)dst & 6) && len >= 2) { |
| 99 | __u16 val16 = *(__u16 *)src; |
| 100 | |
| 101 | isum = (__force __wsum)add32_with_carry( |
| 102 | (__force unsigned)isum, val16); |
| 103 | *errp = __put_user(val16, (__u16 __user *)dst); |
| 104 | if (*errp) |
| 105 | return isum; |
| 106 | src += 2; |
| 107 | dst += 2; |
| 108 | len -= 2; |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | *errp = 0; |
| 113 | stac(); |
| 114 | ret = csum_partial_copy_generic(src, (void __force *)dst, |
| 115 | len, isum, NULL, errp); |
| 116 | clac(); |
| 117 | return ret; |
| 118 | } |
| 119 | EXPORT_SYMBOL(csum_partial_copy_to_user); |
| 120 | |
| 121 | /** |
| 122 | * csum_partial_copy_nocheck - Copy and checksum. |
| 123 | * @src: source address |
| 124 | * @dst: destination address |
| 125 | * @len: number of bytes to be copied. |
| 126 | * @sum: initial sum that is added into the result (32bit unfolded) |
| 127 | * |
| 128 | * Returns an 32bit unfolded checksum of the buffer. |
| 129 | */ |
| 130 | __wsum |
| 131 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) |
| 132 | { |
| 133 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
| 134 | } |
| 135 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
| 136 | |
| 137 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
| 138 | const struct in6_addr *daddr, |
| 139 | __u32 len, __u8 proto, __wsum sum) |
| 140 | { |
| 141 | __u64 rest, sum64; |
| 142 | |
| 143 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + |
| 144 | (__force __u64)sum; |
| 145 | |
| 146 | asm(" addq (%[saddr]),%[sum]\n" |
| 147 | " adcq 8(%[saddr]),%[sum]\n" |
| 148 | " adcq (%[daddr]),%[sum]\n" |
| 149 | " adcq 8(%[daddr]),%[sum]\n" |
| 150 | " adcq $0,%[sum]\n" |
| 151 | |
| 152 | : [sum] "=r" (sum64) |
| 153 | : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); |
| 154 | |
| 155 | return csum_fold( |
| 156 | (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); |
| 157 | } |
| 158 | EXPORT_SYMBOL(csum_ipv6_magic); |