David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * IPv6 Syncookies implementation for the Linux kernel |
| 4 | * |
| 5 | * Authors: |
| 6 | * Glenn Griffin <ggriffin.kernel@gmail.com> |
| 7 | * |
| 8 | * Based on IPv4 implementation by Andi Kleen |
| 9 | * linux/net/ipv4/syncookies.c |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/tcp.h> |
| 13 | #include <linux/random.h> |
| 14 | #include <linux/siphash.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <net/secure_seq.h> |
| 17 | #include <net/ipv6.h> |
| 18 | #include <net/tcp.h> |
| 19 | |
| 20 | #define COOKIEBITS 24 /* Upper bits store count */ |
| 21 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
| 22 | |
| 23 | static siphash_key_t syncookie6_secret[2] __read_mostly; |
| 24 | |
| 25 | /* RFC 2460, Section 8.3: |
| 26 | * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] |
| 27 | * |
| 28 | * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows |
| 29 | * using higher values than ipv4 tcp syncookies. |
| 30 | * The other values are chosen based on ethernet (1500 and 9k MTU), plus |
| 31 | * one that accounts for common encap (PPPoe) overhead. Table must be sorted. |
| 32 | */ |
| 33 | static __u16 const msstab[] = { |
| 34 | 1280 - 60, /* IPV6_MIN_MTU - 60 */ |
| 35 | 1480 - 60, |
| 36 | 1500 - 60, |
| 37 | 9000 - 60, |
| 38 | }; |
| 39 | |
| 40 | static u32 cookie_hash(const struct in6_addr *saddr, |
| 41 | const struct in6_addr *daddr, |
| 42 | __be16 sport, __be16 dport, u32 count, int c) |
| 43 | { |
| 44 | const struct { |
| 45 | struct in6_addr saddr; |
| 46 | struct in6_addr daddr; |
| 47 | u32 count; |
| 48 | __be16 sport; |
| 49 | __be16 dport; |
| 50 | } __aligned(SIPHASH_ALIGNMENT) combined = { |
| 51 | .saddr = *saddr, |
| 52 | .daddr = *daddr, |
| 53 | .count = count, |
| 54 | .sport = sport, |
| 55 | .dport = dport |
| 56 | }; |
| 57 | |
| 58 | net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); |
| 59 | return siphash(&combined, offsetofend(typeof(combined), dport), |
| 60 | &syncookie6_secret[c]); |
| 61 | } |
| 62 | |
| 63 | static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr, |
| 64 | const struct in6_addr *daddr, |
| 65 | __be16 sport, __be16 dport, __u32 sseq, |
| 66 | __u32 data) |
| 67 | { |
| 68 | u32 count = tcp_cookie_time(); |
| 69 | return (cookie_hash(saddr, daddr, sport, dport, 0, 0) + |
| 70 | sseq + (count << COOKIEBITS) + |
| 71 | ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data) |
| 72 | & COOKIEMASK)); |
| 73 | } |
| 74 | |
| 75 | static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr, |
| 76 | const struct in6_addr *daddr, __be16 sport, |
| 77 | __be16 dport, __u32 sseq) |
| 78 | { |
| 79 | __u32 diff, count = tcp_cookie_time(); |
| 80 | |
| 81 | cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq; |
| 82 | |
| 83 | diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS); |
| 84 | if (diff >= MAX_SYNCOOKIE_AGE) |
| 85 | return (__u32)-1; |
| 86 | |
| 87 | return (cookie - |
| 88 | cookie_hash(saddr, daddr, sport, dport, count - diff, 1)) |
| 89 | & COOKIEMASK; |
| 90 | } |
| 91 | |
| 92 | u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, |
| 93 | const struct tcphdr *th, __u16 *mssp) |
| 94 | { |
| 95 | int mssind; |
| 96 | const __u16 mss = *mssp; |
| 97 | |
| 98 | for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) |
| 99 | if (mss >= msstab[mssind]) |
| 100 | break; |
| 101 | |
| 102 | *mssp = msstab[mssind]; |
| 103 | |
| 104 | return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, |
| 105 | th->dest, ntohl(th->seq), mssind); |
| 106 | } |
| 107 | EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence); |
| 108 | |
| 109 | __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp) |
| 110 | { |
| 111 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
| 112 | const struct tcphdr *th = tcp_hdr(skb); |
| 113 | |
| 114 | return __cookie_v6_init_sequence(iph, th, mssp); |
| 115 | } |
| 116 | |
| 117 | int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, |
| 118 | __u32 cookie) |
| 119 | { |
| 120 | __u32 seq = ntohl(th->seq) - 1; |
| 121 | __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, |
| 122 | th->source, th->dest, seq); |
| 123 | |
| 124 | return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; |
| 125 | } |
| 126 | EXPORT_SYMBOL_GPL(__cookie_v6_check); |
| 127 | |
| 128 | struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
| 129 | { |
| 130 | struct tcp_options_received tcp_opt; |
| 131 | struct inet_request_sock *ireq; |
| 132 | struct tcp_request_sock *treq; |
| 133 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 134 | struct tcp_sock *tp = tcp_sk(sk); |
| 135 | const struct tcphdr *th = tcp_hdr(skb); |
| 136 | __u32 cookie = ntohl(th->ack_seq) - 1; |
| 137 | struct sock *ret = sk; |
| 138 | struct request_sock *req; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 139 | int full_space, mss; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | struct dst_entry *dst; |
| 141 | __u8 rcv_wscale; |
| 142 | u32 tsoff = 0; |
| 143 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 144 | if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || |
| 145 | !th->ack || th->rst) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | goto out; |
| 147 | |
| 148 | if (tcp_synq_no_recent_overflow(sk)) |
| 149 | goto out; |
| 150 | |
| 151 | mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); |
| 152 | if (mss == 0) { |
| 153 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); |
| 154 | goto out; |
| 155 | } |
| 156 | |
| 157 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
| 158 | |
| 159 | /* check for timestamp cookie support */ |
| 160 | memset(&tcp_opt, 0, sizeof(tcp_opt)); |
| 161 | tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL); |
| 162 | |
| 163 | if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { |
| 164 | tsoff = secure_tcpv6_ts_off(sock_net(sk), |
| 165 | ipv6_hdr(skb)->daddr.s6_addr32, |
| 166 | ipv6_hdr(skb)->saddr.s6_addr32); |
| 167 | tcp_opt.rcv_tsecr -= tsoff; |
| 168 | } |
| 169 | |
| 170 | if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt)) |
| 171 | goto out; |
| 172 | |
| 173 | ret = NULL; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 174 | req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, |
| 175 | &tcp_request_sock_ipv6_ops, sk, skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | if (!req) |
| 177 | goto out; |
| 178 | |
| 179 | ireq = inet_rsk(req); |
| 180 | treq = tcp_rsk(req); |
| 181 | treq->tfo_listener = false; |
| 182 | |
| 183 | if (security_inet_conn_request(sk, skb, req)) |
| 184 | goto out_free; |
| 185 | |
| 186 | req->mss = mss; |
| 187 | ireq->ir_rmt_port = th->source; |
| 188 | ireq->ir_num = ntohs(th->dest); |
| 189 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
| 190 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
| 191 | if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || |
| 192 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || |
| 193 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { |
| 194 | refcount_inc(&skb->users); |
| 195 | ireq->pktopts = skb; |
| 196 | } |
| 197 | |
| 198 | ireq->ir_iif = inet_request_bound_dev_if(sk, skb); |
| 199 | /* So that link locals have meaning */ |
| 200 | if (!sk->sk_bound_dev_if && |
| 201 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
| 202 | ireq->ir_iif = tcp_v6_iif(skb); |
| 203 | |
| 204 | ireq->ir_mark = inet_request_mark(sk, skb); |
| 205 | |
| 206 | req->num_retrans = 0; |
| 207 | ireq->snd_wscale = tcp_opt.snd_wscale; |
| 208 | ireq->sack_ok = tcp_opt.sack_ok; |
| 209 | ireq->wscale_ok = tcp_opt.wscale_ok; |
| 210 | ireq->tstamp_ok = tcp_opt.saw_tstamp; |
| 211 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; |
| 212 | treq->snt_synack = 0; |
| 213 | treq->rcv_isn = ntohl(th->seq) - 1; |
| 214 | treq->snt_isn = cookie; |
| 215 | treq->ts_off = 0; |
| 216 | treq->txhash = net_tx_rndhash(); |
| 217 | if (IS_ENABLED(CONFIG_SMC)) |
| 218 | ireq->smc_ok = 0; |
| 219 | |
| 220 | /* |
| 221 | * We need to lookup the dst_entry to get the correct window size. |
| 222 | * This is taken from tcp_v6_syn_recv_sock. Somebody please enlighten |
| 223 | * me if there is a preferred way. |
| 224 | */ |
| 225 | { |
| 226 | struct in6_addr *final_p, final; |
| 227 | struct flowi6 fl6; |
| 228 | memset(&fl6, 0, sizeof(fl6)); |
| 229 | fl6.flowi6_proto = IPPROTO_TCP; |
| 230 | fl6.daddr = ireq->ir_v6_rmt_addr; |
| 231 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); |
| 232 | fl6.saddr = ireq->ir_v6_loc_addr; |
| 233 | fl6.flowi6_oif = ireq->ir_iif; |
| 234 | fl6.flowi6_mark = ireq->ir_mark; |
| 235 | fl6.fl6_dport = ireq->ir_rmt_port; |
| 236 | fl6.fl6_sport = inet_sk(sk)->inet_sport; |
| 237 | fl6.flowi6_uid = sk->sk_uid; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 238 | security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 239 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 240 | dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 241 | if (IS_ERR(dst)) |
| 242 | goto out_free; |
| 243 | } |
| 244 | |
| 245 | req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 246 | /* limit the window selection if the user enforce a smaller rx buffer */ |
| 247 | full_space = tcp_full_space(sk); |
| 248 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && |
| 249 | (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) |
| 250 | req->rsk_window_clamp = full_space; |
| 251 | |
| 252 | tcp_select_initial_window(sk, full_space, req->mss, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | &req->rsk_rcv_wnd, &req->rsk_window_clamp, |
| 254 | ireq->wscale_ok, &rcv_wscale, |
| 255 | dst_metric(dst, RTAX_INITRWND)); |
| 256 | |
| 257 | ireq->rcv_wscale = rcv_wscale; |
| 258 | ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); |
| 259 | |
| 260 | ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff); |
| 261 | out: |
| 262 | return ret; |
| 263 | out_free: |
| 264 | reqsk_free(req); |
| 265 | return NULL; |
| 266 | } |