David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | #ifndef _ASM_POWERPC_CHECKSUM_H |
| 3 | #define _ASM_POWERPC_CHECKSUM_H |
| 4 | #ifdef __KERNEL__ |
| 5 | |
| 6 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | #include <linux/bitops.h> |
| 10 | #include <linux/in6.h> |
| 11 | /* |
| 12 | * Computes the checksum of a memory block at src, length len, |
| 13 | * and adds in "sum" (32-bit), while copying the block to dst. |
| 14 | * If an access exception occurs on src or dst, it stores -EFAULT |
| 15 | * to *src_err or *dst_err respectively (if that pointer is not |
| 16 | * NULL), and, for an error on src, zeroes the rest of dst. |
| 17 | * |
| 18 | * Like csum_partial, this must be called with even lengths, |
| 19 | * except for the last fragment. |
| 20 | */ |
| 21 | extern __wsum csum_partial_copy_generic(const void *src, void *dst, |
| 22 | int len, __wsum sum, |
| 23 | int *src_err, int *dst_err); |
| 24 | |
| 25 | #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER |
| 26 | extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, |
| 27 | int len, __wsum sum, int *err_ptr); |
| 28 | #define HAVE_CSUM_COPY_USER |
| 29 | extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, |
| 30 | int len, __wsum sum, int *err_ptr); |
| 31 | |
| 32 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ |
| 33 | csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) |
| 34 | |
| 35 | |
| 36 | /* |
| 37 | * turns a 32-bit partial checksum (e.g. from csum_partial) into a |
| 38 | * 1's complement 16-bit checksum. |
| 39 | */ |
| 40 | static inline __sum16 csum_fold(__wsum sum) |
| 41 | { |
| 42 | unsigned int tmp; |
| 43 | |
| 44 | /* swap the two 16-bit halves of sum */ |
| 45 | __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); |
| 46 | /* if there is a carry from adding the two 16-bit halves, |
| 47 | it will carry from the lower half into the upper half, |
| 48 | giving us the correct sum in the upper half. */ |
| 49 | return (__force __sum16)(~((__force u32)sum + tmp) >> 16); |
| 50 | } |
| 51 | |
| 52 | static inline u32 from64to32(u64 x) |
| 53 | { |
| 54 | return (x + ror64(x, 32)) >> 32; |
| 55 | } |
| 56 | |
| 57 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, |
| 58 | __u8 proto, __wsum sum) |
| 59 | { |
| 60 | #ifdef __powerpc64__ |
| 61 | u64 s = (__force u32)sum; |
| 62 | |
| 63 | s += (__force u32)saddr; |
| 64 | s += (__force u32)daddr; |
| 65 | #ifdef __BIG_ENDIAN__ |
| 66 | s += proto + len; |
| 67 | #else |
| 68 | s += (proto + len) << 8; |
| 69 | #endif |
| 70 | return (__force __wsum) from64to32(s); |
| 71 | #else |
| 72 | __asm__("\n\ |
| 73 | addc %0,%0,%1 \n\ |
| 74 | adde %0,%0,%2 \n\ |
| 75 | adde %0,%0,%3 \n\ |
| 76 | addze %0,%0 \n\ |
| 77 | " |
| 78 | : "=r" (sum) |
| 79 | : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum)); |
| 80 | return sum; |
| 81 | #endif |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * computes the checksum of the TCP/UDP pseudo-header |
| 86 | * returns a 16-bit checksum, already complemented |
| 87 | */ |
| 88 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, |
| 89 | __u8 proto, __wsum sum) |
| 90 | { |
| 91 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
| 92 | } |
| 93 | |
| 94 | #define HAVE_ARCH_CSUM_ADD |
| 95 | static inline __wsum csum_add(__wsum csum, __wsum addend) |
| 96 | { |
| 97 | #ifdef __powerpc64__ |
| 98 | u64 res = (__force u64)csum; |
| 99 | #endif |
| 100 | if (__builtin_constant_p(csum) && csum == 0) |
| 101 | return addend; |
| 102 | if (__builtin_constant_p(addend) && addend == 0) |
| 103 | return csum; |
| 104 | |
| 105 | #ifdef __powerpc64__ |
| 106 | res += (__force u64)addend; |
| 107 | return (__force __wsum)((u32)res + (res >> 32)); |
| 108 | #else |
| 109 | asm("addc %0,%0,%1;" |
| 110 | "addze %0,%0;" |
| 111 | : "+r" (csum) : "r" (addend) : "xer"); |
| 112 | return csum; |
| 113 | #endif |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * This is a version of ip_compute_csum() optimized for IP headers, |
| 118 | * which always checksum on 4 octet boundaries. ihl is the number |
| 119 | * of 32-bit words and is always >= 5. |
| 120 | */ |
| 121 | static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl) |
| 122 | { |
| 123 | const u32 *ptr = (const u32 *)iph + 1; |
| 124 | #ifdef __powerpc64__ |
| 125 | unsigned int i; |
| 126 | u64 s = *(const u32 *)iph; |
| 127 | |
| 128 | for (i = 0; i < ihl - 1; i++, ptr++) |
| 129 | s += *ptr; |
| 130 | return (__force __wsum)from64to32(s); |
| 131 | #else |
| 132 | __wsum sum, tmp; |
| 133 | |
| 134 | asm("mtctr %3;" |
| 135 | "addc %0,%4,%5;" |
| 136 | "1: lwzu %1, 4(%2);" |
| 137 | "adde %0,%0,%1;" |
| 138 | "bdnz 1b;" |
| 139 | "addze %0,%0;" |
| 140 | : "=r" (sum), "=r" (tmp), "+b" (ptr) |
| 141 | : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr) |
| 142 | : "ctr", "xer", "memory"); |
| 143 | |
| 144 | return sum; |
| 145 | #endif |
| 146 | } |
| 147 | |
| 148 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
| 149 | { |
| 150 | return csum_fold(ip_fast_csum_nofold(iph, ihl)); |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * computes the checksum of a memory block at buff, length len, |
| 155 | * and adds in "sum" (32-bit) |
| 156 | * |
| 157 | * returns a 32-bit number suitable for feeding into itself |
| 158 | * or csum_tcpudp_magic |
| 159 | * |
| 160 | * this function must be called with even lengths, except |
| 161 | * for the last fragment, which may be odd |
| 162 | * |
| 163 | * it's best to have buff aligned on a 32-bit boundary |
| 164 | */ |
| 165 | __wsum __csum_partial(const void *buff, int len, __wsum sum); |
| 166 | |
| 167 | static inline __wsum csum_partial(const void *buff, int len, __wsum sum) |
| 168 | { |
| 169 | if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) { |
| 170 | if (len == 2) |
| 171 | sum = csum_add(sum, (__force __wsum)*(const u16 *)buff); |
| 172 | if (len >= 4) |
| 173 | sum = csum_add(sum, (__force __wsum)*(const u32 *)buff); |
| 174 | if (len == 6) |
| 175 | sum = csum_add(sum, (__force __wsum) |
| 176 | *(const u16 *)(buff + 4)); |
| 177 | if (len >= 8) |
| 178 | sum = csum_add(sum, (__force __wsum) |
| 179 | *(const u32 *)(buff + 4)); |
| 180 | if (len == 10) |
| 181 | sum = csum_add(sum, (__force __wsum) |
| 182 | *(const u16 *)(buff + 8)); |
| 183 | if (len >= 12) |
| 184 | sum = csum_add(sum, (__force __wsum) |
| 185 | *(const u32 *)(buff + 8)); |
| 186 | if (len == 14) |
| 187 | sum = csum_add(sum, (__force __wsum) |
| 188 | *(const u16 *)(buff + 12)); |
| 189 | if (len >= 16) |
| 190 | sum = csum_add(sum, (__force __wsum) |
| 191 | *(const u32 *)(buff + 12)); |
| 192 | } else if (__builtin_constant_p(len) && (len & 3) == 0) { |
| 193 | sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2)); |
| 194 | } else { |
| 195 | sum = __csum_partial(buff, len, sum); |
| 196 | } |
| 197 | return sum; |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * this routine is used for miscellaneous IP-like checksums, mainly |
| 202 | * in icmp.c |
| 203 | */ |
| 204 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
| 205 | { |
| 206 | return csum_fold(csum_partial(buff, len, 0)); |
| 207 | } |
| 208 | |
| 209 | #define _HAVE_ARCH_IPV6_CSUM |
| 210 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
| 211 | const struct in6_addr *daddr, |
| 212 | __u32 len, __u8 proto, __wsum sum); |
| 213 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | #endif /* __KERNEL__ */ |
| 215 | #endif |