blob: c599e14be414dedc31af758f0022a933ac64bd0b [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * TCP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on:
10 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020 */
21
22#include <linux/bottom_half.h>
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
37#include <linux/slab.h>
38#include <linux/uaccess.h>
39#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
David Brazdil0f672f62019-12-10 10:32:29 +000042#include <linux/indirect_call_wrapper.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043
44#include <net/tcp.h>
45#include <net/ndisc.h>
46#include <net/inet6_hashtables.h>
47#include <net/inet6_connection_sock.h>
48#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
56#include <net/snmp.h>
57#include <net/dsfield.h>
58#include <net/timewait_sock.h>
59#include <net/inet_common.h>
60#include <net/secure_seq.h>
61#include <net/busy_poll.h>
62
63#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
66#include <crypto/hash.h>
67#include <linux/scatterlist.h>
68
69#include <trace/events/tcp.h>
70
71static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
73 struct request_sock *req);
74
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
76
77static const struct inet_connection_sock_af_ops ipv6_mapped;
Olivier Deprez157378f2022-04-04 15:47:50 +020078const struct inet_connection_sock_af_ops ipv6_specific;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079#ifdef CONFIG_TCP_MD5SIG
80static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
82#else
83static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Olivier Deprez157378f2022-04-04 15:47:50 +020084 const struct in6_addr *addr,
85 int l3index)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086{
87 return NULL;
88}
89#endif
90
David Brazdil0f672f62019-12-10 10:32:29 +000091/* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
94 * It is a specialized version of inet6_sk_generic().
95 */
96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97{
98 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
99
100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
101}
102
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104{
105 struct dst_entry *dst = skb_dst(skb);
106
107 if (dst && dst_hold_safe(dst)) {
108 const struct rt6_info *rt = (const struct rt6_info *)dst;
109
Olivier Deprez92d4c212022-12-06 15:05:30 +0100110 rcu_assign_pointer(sk->sk_rx_dst, dst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
David Brazdil0f672f62019-12-10 10:32:29 +0000112 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 }
114}
115
116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
117{
118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
125{
126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
127 ipv6_hdr(skb)->saddr.s6_addr32);
128}
129
130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 int addr_len)
132{
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
136 */
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 sock_owned_by_me(sk);
141
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143}
144
145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
146 int addr_len)
147{
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
149 struct inet_sock *inet = inet_sk(sk);
150 struct inet_connection_sock *icsk = inet_csk(sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000152 struct tcp_sock *tp = tcp_sk(sk);
153 struct in6_addr *saddr = NULL, *final_p, final;
154 struct ipv6_txoptions *opt;
155 struct flowi6 fl6;
156 struct dst_entry *dst;
157 int addr_type;
158 int err;
159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
160
161 if (addr_len < SIN6_LEN_RFC2133)
162 return -EINVAL;
163
164 if (usin->sin6_family != AF_INET6)
165 return -EAFNOSUPPORT;
166
167 memset(&fl6, 0, sizeof(fl6));
168
169 if (np->sndflow) {
170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
173 struct ip6_flowlabel *flowlabel;
174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
David Brazdil0f672f62019-12-10 10:32:29 +0000175 if (IS_ERR(flowlabel))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176 return -EINVAL;
177 fl6_sock_release(flowlabel);
178 }
179 }
180
181 /*
182 * connect() to INADDR_ANY means loopback (BSD'ism).
183 */
184
185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
188 &usin->sin6_addr);
189 else
190 usin->sin6_addr = in6addr_loopback;
191 }
192
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
194
195 if (addr_type & IPV6_ADDR_MULTICAST)
196 return -ENETUNREACH;
197
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
202 * must coincide.
203 */
204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
205 return -EINVAL;
206
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
208 }
209
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
212 return -EINVAL;
213 }
214
215 if (tp->rx_opt.ts_recent_stamp &&
216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000219 WRITE_ONCE(tp->write_seq, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 }
221
222 sk->sk_v6_daddr = usin->sin6_addr;
223 np->flow_label = fl6.flowlabel;
224
225 /*
226 * TCP over IPv4
227 */
228
229 if (addr_type & IPV6_ADDR_MAPPED) {
230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
231 struct sockaddr_in sin;
232
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233 if (__ipv6_only_sock(sk))
234 return -ENETUNREACH;
235
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
239
240 icsk->icsk_af_ops = &ipv6_mapped;
Olivier Deprez157378f2022-04-04 15:47:50 +0200241 if (sk_is_mptcp(sk))
242 mptcpv6_handle_mapped(sk, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
246#endif
247
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
249
250 if (err) {
251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
Olivier Deprez157378f2022-04-04 15:47:50 +0200253 if (sk_is_mptcp(sk))
254 mptcpv6_handle_mapped(sk, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
256#ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
258#endif
259 goto failure;
260 }
261 np->saddr = sk->sk_v6_rcv_saddr;
262
263 return err;
264 }
265
266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
268
269 fl6.flowi6_proto = IPPROTO_TCP;
270 fl6.daddr = sk->sk_v6_daddr;
271 fl6.saddr = saddr ? *saddr : np->saddr;
272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
276 fl6.flowi6_uid = sk->sk_uid;
277
278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
279 final_p = fl6_update_dst(&fl6, opt, &final);
280
Olivier Deprez92d4c212022-12-06 15:05:30 +0100281 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282
Olivier Deprez0e641232021-09-23 10:07:05 +0200283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284 if (IS_ERR(dst)) {
285 err = PTR_ERR(dst);
286 goto failure;
287 }
288
289 if (!saddr) {
290 saddr = &fl6.saddr;
291 sk->sk_v6_rcv_saddr = *saddr;
292 }
293
294 /* set the source address */
295 np->saddr = *saddr;
296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
297
298 sk->sk_gso_type = SKB_GSO_TCPV6;
299 ip6_dst_store(sk, dst, NULL, NULL);
300
301 icsk->icsk_ext_hdr_len = 0;
302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
308 inet->inet_dport = usin->sin6_port;
309
310 tcp_set_state(sk, TCP_SYN_SENT);
311 err = inet6_hash_connect(tcp_death_row, sk);
312 if (err)
313 goto late_failure;
314
315 sk_set_txhash(sk);
316
317 if (likely(!tp->repair)) {
318 if (!tp->write_seq)
David Brazdil0f672f62019-12-10 10:32:29 +0000319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
325 np->saddr.s6_addr32,
326 sk->sk_v6_daddr.s6_addr32);
327 }
328
329 if (tcp_fastopen_defer_connect(sk, &err))
330 return err;
331 if (err)
332 goto late_failure;
333
334 err = tcp_connect(sk);
335 if (err)
336 goto late_failure;
337
338 return 0;
339
340late_failure:
341 tcp_set_state(sk, TCP_CLOSE);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100342 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
343 inet_reset_saddr(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344failure:
345 inet->inet_dport = 0;
346 sk->sk_route_caps = 0;
347 return err;
348}
349
350static void tcp_v6_mtu_reduced(struct sock *sk)
351{
352 struct dst_entry *dst;
Olivier Deprez0e641232021-09-23 10:07:05 +0200353 u32 mtu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354
355 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
356 return;
357
Olivier Deprez0e641232021-09-23 10:07:05 +0200358 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
359
360 /* Drop requests trying to increase our current mss.
361 * Check done in __ip6_rt_update_pmtu() is too late.
362 */
363 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
364 return;
365
366 dst = inet6_csk_update_pmtu(sk, mtu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367 if (!dst)
368 return;
369
370 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
371 tcp_sync_mss(sk, dst_mtu(dst));
372 tcp_simple_retransmit(sk);
373 }
374}
375
David Brazdil0f672f62019-12-10 10:32:29 +0000376static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377 u8 type, u8 code, int offset, __be32 info)
378{
379 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
380 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
381 struct net *net = dev_net(skb->dev);
382 struct request_sock *fastopen;
383 struct ipv6_pinfo *np;
384 struct tcp_sock *tp;
385 __u32 seq, snd_una;
386 struct sock *sk;
387 bool fatal;
388 int err;
389
390 sk = __inet6_lookup_established(net, &tcp_hashinfo,
391 &hdr->daddr, th->dest,
392 &hdr->saddr, ntohs(th->source),
393 skb->dev->ifindex, inet6_sdif(skb));
394
395 if (!sk) {
396 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
397 ICMP6_MIB_INERRORS);
David Brazdil0f672f62019-12-10 10:32:29 +0000398 return -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399 }
400
401 if (sk->sk_state == TCP_TIME_WAIT) {
402 inet_twsk_put(inet_twsk(sk));
David Brazdil0f672f62019-12-10 10:32:29 +0000403 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404 }
405 seq = ntohl(th->seq);
406 fatal = icmpv6_err_convert(type, code, &err);
David Brazdil0f672f62019-12-10 10:32:29 +0000407 if (sk->sk_state == TCP_NEW_SYN_RECV) {
408 tcp_req_err(sk, seq, fatal);
409 return 0;
410 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411
412 bh_lock_sock(sk);
413 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
414 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
415
416 if (sk->sk_state == TCP_CLOSE)
417 goto out;
418
David Brazdil0f672f62019-12-10 10:32:29 +0000419 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
421 goto out;
422 }
423
424 tp = tcp_sk(sk);
425 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
David Brazdil0f672f62019-12-10 10:32:29 +0000426 fastopen = rcu_dereference(tp->fastopen_rsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
428 if (sk->sk_state != TCP_LISTEN &&
429 !between(seq, snd_una, tp->snd_nxt)) {
430 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
431 goto out;
432 }
433
David Brazdil0f672f62019-12-10 10:32:29 +0000434 np = tcp_inet6_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435
436 if (type == NDISC_REDIRECT) {
437 if (!sock_owned_by_user(sk)) {
438 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
439
440 if (dst)
441 dst->ops->redirect(dst, sk, skb);
442 }
443 goto out;
444 }
445
446 if (type == ICMPV6_PKT_TOOBIG) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200447 u32 mtu = ntohl(info);
448
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449 /* We are not interested in TCP_LISTEN and open_requests
450 * (SYN-ACKs send out by Linux are always <576bytes so
451 * they should go through unfragmented).
452 */
453 if (sk->sk_state == TCP_LISTEN)
454 goto out;
455
456 if (!ip6_sk_accept_pmtu(sk))
457 goto out;
458
Olivier Deprez0e641232021-09-23 10:07:05 +0200459 if (mtu < IPV6_MIN_MTU)
460 goto out;
461
462 WRITE_ONCE(tp->mtu_info, mtu);
463
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 if (!sock_owned_by_user(sk))
465 tcp_v6_mtu_reduced(sk);
466 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
467 &sk->sk_tsq_flags))
468 sock_hold(sk);
469 goto out;
470 }
471
472
473 /* Might be for an request_sock */
474 switch (sk->sk_state) {
475 case TCP_SYN_SENT:
476 case TCP_SYN_RECV:
477 /* Only in fast or simultaneous open. If a fast open socket is
Olivier Deprez157378f2022-04-04 15:47:50 +0200478 * already accepted it is treated as a connected one below.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000479 */
480 if (fastopen && !fastopen->sk)
481 break;
482
Olivier Deprez157378f2022-04-04 15:47:50 +0200483 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
484
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000485 if (!sock_owned_by_user(sk)) {
486 sk->sk_err = err;
487 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
488
489 tcp_done(sk);
490 } else
491 sk->sk_err_soft = err;
492 goto out;
Olivier Deprez157378f2022-04-04 15:47:50 +0200493 case TCP_LISTEN:
494 break;
495 default:
496 /* check if this ICMP message allows revert of backoff.
497 * (see RFC 6069)
498 */
499 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
500 code == ICMPV6_NOROUTE)
501 tcp_ld_RTO_revert(sk, seq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502 }
503
504 if (!sock_owned_by_user(sk) && np->recverr) {
505 sk->sk_err = err;
506 sk->sk_error_report(sk);
507 } else
508 sk->sk_err_soft = err;
509
510out:
511 bh_unlock_sock(sk);
512 sock_put(sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000513 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514}
515
516
517static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
518 struct flowi *fl,
519 struct request_sock *req,
520 struct tcp_fastopen_cookie *foc,
Olivier Deprez157378f2022-04-04 15:47:50 +0200521 enum tcp_synack_type synack_type,
522 struct sk_buff *syn_skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523{
524 struct inet_request_sock *ireq = inet_rsk(req);
David Brazdil0f672f62019-12-10 10:32:29 +0000525 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000526 struct ipv6_txoptions *opt;
527 struct flowi6 *fl6 = &fl->u.ip6;
528 struct sk_buff *skb;
529 int err = -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200530 u8 tclass;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000531
532 /* First, grab a route. */
533 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
534 IPPROTO_TCP)) == NULL)
535 goto done;
536
Olivier Deprez157378f2022-04-04 15:47:50 +0200537 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538
539 if (skb) {
540 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
541 &ireq->ir_v6_rmt_addr);
542
543 fl6->daddr = ireq->ir_v6_rmt_addr;
544 if (np->repflow && ireq->pktopts)
545 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
546
Olivier Deprez92d4c212022-12-06 15:05:30 +0100547 tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
Olivier Deprez157378f2022-04-04 15:47:50 +0200548 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
549 (np->tclass & INET_ECN_MASK) :
550 np->tclass;
551
552 if (!INET_ECN_is_capable(tclass) &&
553 tcp_bpf_ca_needs_ecn((struct sock *)req))
554 tclass |= INET_ECN_ECT_0;
555
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000556 rcu_read_lock();
557 opt = ireq->ipv6_opt;
558 if (!opt)
559 opt = rcu_dereference(np->opt);
Olivier Deprez0e641232021-09-23 10:07:05 +0200560 err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
Olivier Deprez157378f2022-04-04 15:47:50 +0200561 tclass, sk->sk_priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 rcu_read_unlock();
563 err = net_xmit_eval(err);
564 }
565
566done:
567 return err;
568}
569
570
571static void tcp_v6_reqsk_destructor(struct request_sock *req)
572{
573 kfree(inet_rsk(req)->ipv6_opt);
574 kfree_skb(inet_rsk(req)->pktopts);
575}
576
577#ifdef CONFIG_TCP_MD5SIG
578static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Olivier Deprez157378f2022-04-04 15:47:50 +0200579 const struct in6_addr *addr,
580 int l3index)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581{
Olivier Deprez157378f2022-04-04 15:47:50 +0200582 return tcp_md5_do_lookup(sk, l3index,
583 (union tcp_md5_addr *)addr, AF_INET6);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584}
585
586static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
587 const struct sock *addr_sk)
588{
Olivier Deprez157378f2022-04-04 15:47:50 +0200589 int l3index;
590
591 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
592 addr_sk->sk_bound_dev_if);
593 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
594 l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000595}
596
597static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
Olivier Deprez157378f2022-04-04 15:47:50 +0200598 sockptr_t optval, int optlen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599{
600 struct tcp_md5sig cmd;
601 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
Olivier Deprez157378f2022-04-04 15:47:50 +0200602 int l3index = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000603 u8 prefixlen;
604
605 if (optlen < sizeof(cmd))
606 return -EINVAL;
607
Olivier Deprez157378f2022-04-04 15:47:50 +0200608 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000609 return -EFAULT;
610
611 if (sin6->sin6_family != AF_INET6)
612 return -EINVAL;
613
614 if (optname == TCP_MD5SIG_EXT &&
615 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
616 prefixlen = cmd.tcpm_prefixlen;
617 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
618 prefixlen > 32))
619 return -EINVAL;
620 } else {
621 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
622 }
623
Olivier Deprez157378f2022-04-04 15:47:50 +0200624 if (optname == TCP_MD5SIG_EXT &&
625 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
626 struct net_device *dev;
627
628 rcu_read_lock();
629 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
630 if (dev && netif_is_l3_master(dev))
631 l3index = dev->ifindex;
632 rcu_read_unlock();
633
634 /* ok to reference set/not set outside of rcu;
635 * right now device MUST be an L3 master
636 */
637 if (!dev || !l3index)
638 return -EINVAL;
639 }
640
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000641 if (!cmd.tcpm_keylen) {
642 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
643 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Olivier Deprez157378f2022-04-04 15:47:50 +0200644 AF_INET, prefixlen,
645 l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Olivier Deprez157378f2022-04-04 15:47:50 +0200647 AF_INET6, prefixlen, l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648 }
649
650 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
651 return -EINVAL;
652
653 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
654 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Olivier Deprez157378f2022-04-04 15:47:50 +0200655 AF_INET, prefixlen, l3index,
656 cmd.tcpm_key, cmd.tcpm_keylen,
657 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658
659 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Olivier Deprez157378f2022-04-04 15:47:50 +0200660 AF_INET6, prefixlen, l3index,
661 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662}
663
664static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
665 const struct in6_addr *daddr,
666 const struct in6_addr *saddr,
667 const struct tcphdr *th, int nbytes)
668{
669 struct tcp6_pseudohdr *bp;
670 struct scatterlist sg;
671 struct tcphdr *_th;
672
673 bp = hp->scratch;
674 /* 1. TCP pseudo-header (RFC2460) */
675 bp->saddr = *saddr;
676 bp->daddr = *daddr;
677 bp->protocol = cpu_to_be32(IPPROTO_TCP);
678 bp->len = cpu_to_be32(nbytes);
679
680 _th = (struct tcphdr *)(bp + 1);
681 memcpy(_th, th, sizeof(*th));
682 _th->check = 0;
683
684 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
685 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
686 sizeof(*bp) + sizeof(*th));
687 return crypto_ahash_update(hp->md5_req);
688}
689
690static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
691 const struct in6_addr *daddr, struct in6_addr *saddr,
692 const struct tcphdr *th)
693{
694 struct tcp_md5sig_pool *hp;
695 struct ahash_request *req;
696
697 hp = tcp_get_md5sig_pool();
698 if (!hp)
699 goto clear_hash_noput;
700 req = hp->md5_req;
701
702 if (crypto_ahash_init(req))
703 goto clear_hash;
704 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
705 goto clear_hash;
706 if (tcp_md5_hash_key(hp, key))
707 goto clear_hash;
708 ahash_request_set_crypt(req, NULL, md5_hash, 0);
709 if (crypto_ahash_final(req))
710 goto clear_hash;
711
712 tcp_put_md5sig_pool();
713 return 0;
714
715clear_hash:
716 tcp_put_md5sig_pool();
717clear_hash_noput:
718 memset(md5_hash, 0, 16);
719 return 1;
720}
721
722static int tcp_v6_md5_hash_skb(char *md5_hash,
723 const struct tcp_md5sig_key *key,
724 const struct sock *sk,
725 const struct sk_buff *skb)
726{
727 const struct in6_addr *saddr, *daddr;
728 struct tcp_md5sig_pool *hp;
729 struct ahash_request *req;
730 const struct tcphdr *th = tcp_hdr(skb);
731
732 if (sk) { /* valid for establish/request sockets */
733 saddr = &sk->sk_v6_rcv_saddr;
734 daddr = &sk->sk_v6_daddr;
735 } else {
736 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
737 saddr = &ip6h->saddr;
738 daddr = &ip6h->daddr;
739 }
740
741 hp = tcp_get_md5sig_pool();
742 if (!hp)
743 goto clear_hash_noput;
744 req = hp->md5_req;
745
746 if (crypto_ahash_init(req))
747 goto clear_hash;
748
749 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
750 goto clear_hash;
751 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
752 goto clear_hash;
753 if (tcp_md5_hash_key(hp, key))
754 goto clear_hash;
755 ahash_request_set_crypt(req, NULL, md5_hash, 0);
756 if (crypto_ahash_final(req))
757 goto clear_hash;
758
759 tcp_put_md5sig_pool();
760 return 0;
761
762clear_hash:
763 tcp_put_md5sig_pool();
764clear_hash_noput:
765 memset(md5_hash, 0, 16);
766 return 1;
767}
768
769#endif
770
771static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
Olivier Deprez157378f2022-04-04 15:47:50 +0200772 const struct sk_buff *skb,
773 int dif, int sdif)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774{
775#ifdef CONFIG_TCP_MD5SIG
776 const __u8 *hash_location = NULL;
777 struct tcp_md5sig_key *hash_expected;
778 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
779 const struct tcphdr *th = tcp_hdr(skb);
Olivier Deprez157378f2022-04-04 15:47:50 +0200780 int genhash, l3index;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 u8 newhash[16];
782
Olivier Deprez157378f2022-04-04 15:47:50 +0200783 /* sdif set, means packet ingressed via a device
784 * in an L3 domain and dif is set to the l3mdev
785 */
786 l3index = sdif ? dif : 0;
787
788 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000789 hash_location = tcp_parse_md5sig_option(th);
790
791 /* We've parsed the options - do we have a hash? */
792 if (!hash_expected && !hash_location)
793 return false;
794
795 if (hash_expected && !hash_location) {
796 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
797 return true;
798 }
799
800 if (!hash_expected && hash_location) {
801 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
802 return true;
803 }
804
805 /* check the signature */
806 genhash = tcp_v6_md5_hash_skb(newhash,
807 hash_expected,
808 NULL, skb);
809
810 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
811 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Olivier Deprez157378f2022-04-04 15:47:50 +0200812 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813 genhash ? "failed" : "mismatch",
814 &ip6h->saddr, ntohs(th->source),
Olivier Deprez157378f2022-04-04 15:47:50 +0200815 &ip6h->daddr, ntohs(th->dest), l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 return true;
817 }
818#endif
819 return false;
820}
821
822static void tcp_v6_init_req(struct request_sock *req,
823 const struct sock *sk_listener,
824 struct sk_buff *skb)
825{
David Brazdil0f672f62019-12-10 10:32:29 +0000826 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000827 struct inet_request_sock *ireq = inet_rsk(req);
David Brazdil0f672f62019-12-10 10:32:29 +0000828 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829
830 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
831 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
832
833 /* So that link locals have meaning */
David Brazdil0f672f62019-12-10 10:32:29 +0000834 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000835 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
836 ireq->ir_iif = tcp_v6_iif(skb);
837
838 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
839 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
840 np->rxopt.bits.rxinfo ||
841 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
842 np->rxopt.bits.rxohlim || np->repflow)) {
843 refcount_inc(&skb->users);
844 ireq->pktopts = skb;
845 }
846}
847
848static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
849 struct flowi *fl,
850 const struct request_sock *req)
851{
852 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
853}
854
855struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
856 .family = AF_INET6,
857 .obj_size = sizeof(struct tcp6_request_sock),
858 .rtx_syn_ack = tcp_rtx_synack,
859 .send_ack = tcp_v6_reqsk_send_ack,
860 .destructor = tcp_v6_reqsk_destructor,
861 .send_reset = tcp_v6_send_reset,
862 .syn_ack_timeout = tcp_syn_ack_timeout,
863};
864
Olivier Deprez157378f2022-04-04 15:47:50 +0200865const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
867 sizeof(struct ipv6hdr),
868#ifdef CONFIG_TCP_MD5SIG
869 .req_md5_lookup = tcp_v6_md5_lookup,
870 .calc_md5_hash = tcp_v6_md5_hash_skb,
871#endif
872 .init_req = tcp_v6_init_req,
873#ifdef CONFIG_SYN_COOKIES
874 .cookie_init_seq = cookie_v6_init_sequence,
875#endif
876 .route_req = tcp_v6_route_req,
877 .init_seq = tcp_v6_init_seq,
878 .init_ts_off = tcp_v6_init_ts_off,
879 .send_synack = tcp_v6_send_synack,
880};
881
882static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
883 u32 ack, u32 win, u32 tsval, u32 tsecr,
884 int oif, struct tcp_md5sig_key *key, int rst,
David Brazdil0f672f62019-12-10 10:32:29 +0000885 u8 tclass, __be32 label, u32 priority)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000886{
887 const struct tcphdr *th = tcp_hdr(skb);
888 struct tcphdr *t1;
889 struct sk_buff *buff;
890 struct flowi6 fl6;
891 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
892 struct sock *ctl_sk = net->ipv6.tcp_sk;
893 unsigned int tot_len = sizeof(struct tcphdr);
894 struct dst_entry *dst;
895 __be32 *topt;
896 __u32 mark = 0;
897
898 if (tsecr)
899 tot_len += TCPOLEN_TSTAMP_ALIGNED;
900#ifdef CONFIG_TCP_MD5SIG
901 if (key)
902 tot_len += TCPOLEN_MD5SIG_ALIGNED;
903#endif
904
905 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
906 GFP_ATOMIC);
907 if (!buff)
908 return;
909
910 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
911
912 t1 = skb_push(buff, tot_len);
913 skb_reset_transport_header(buff);
914
915 /* Swap the send and the receive. */
916 memset(t1, 0, sizeof(*t1));
917 t1->dest = th->source;
918 t1->source = th->dest;
919 t1->doff = tot_len / 4;
920 t1->seq = htonl(seq);
921 t1->ack_seq = htonl(ack);
922 t1->ack = !rst || !th->ack;
923 t1->rst = rst;
924 t1->window = htons(win);
925
926 topt = (__be32 *)(t1 + 1);
927
928 if (tsecr) {
929 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
930 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
931 *topt++ = htonl(tsval);
932 *topt++ = htonl(tsecr);
933 }
934
935#ifdef CONFIG_TCP_MD5SIG
936 if (key) {
937 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
938 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
939 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
940 &ipv6_hdr(skb)->saddr,
941 &ipv6_hdr(skb)->daddr, t1);
942 }
943#endif
944
945 memset(&fl6, 0, sizeof(fl6));
946 fl6.daddr = ipv6_hdr(skb)->saddr;
947 fl6.saddr = ipv6_hdr(skb)->daddr;
948 fl6.flowlabel = label;
949
950 buff->ip_summed = CHECKSUM_PARTIAL;
951 buff->csum = 0;
952
953 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
954
955 fl6.flowi6_proto = IPPROTO_TCP;
956 if (rt6_need_strict(&fl6.daddr) && !oif)
957 fl6.flowi6_oif = tcp_v6_iif(skb);
958 else {
959 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
960 oif = skb->skb_iif;
961
962 fl6.flowi6_oif = oif;
963 }
964
David Brazdil0f672f62019-12-10 10:32:29 +0000965 if (sk) {
966 if (sk->sk_state == TCP_TIME_WAIT) {
967 mark = inet_twsk(sk)->tw_mark;
968 /* autoflowlabel relies on buff->hash */
969 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
970 PKT_HASH_TYPE_L4);
971 } else {
972 mark = sk->sk_mark;
973 }
974 buff->tstamp = tcp_transmit_time(sk);
975 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000976 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
977 fl6.fl6_dport = t1->dest;
978 fl6.fl6_sport = t1->source;
979 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100980 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000981
982 /* Pass a socket to ip6_dst_lookup either it is for RST
983 * Underlying function will use this to retrieve the network
984 * namespace
985 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200986 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000987 if (!IS_ERR(dst)) {
988 skb_dst_set(buff, dst);
Olivier Deprez157378f2022-04-04 15:47:50 +0200989 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
990 tclass & ~INET_ECN_MASK, priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000991 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
992 if (rst)
993 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
994 return;
995 }
996
997 kfree_skb(buff);
998}
999
1000static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1001{
1002 const struct tcphdr *th = tcp_hdr(skb);
David Brazdil0f672f62019-12-10 10:32:29 +00001003 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001004 u32 seq = 0, ack_seq = 0;
1005 struct tcp_md5sig_key *key = NULL;
1006#ifdef CONFIG_TCP_MD5SIG
1007 const __u8 *hash_location = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001008 unsigned char newhash[16];
1009 int genhash;
1010 struct sock *sk1 = NULL;
1011#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001012 __be32 label = 0;
1013 u32 priority = 0;
1014 struct net *net;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001015 int oif = 0;
1016
1017 if (th->rst)
1018 return;
1019
1020 /* If sk not NULL, it means we did a successful lookup and incoming
1021 * route had to be correct. prequeue might have dropped our dst.
1022 */
1023 if (!sk && !ipv6_unicast_destination(skb))
1024 return;
1025
David Brazdil0f672f62019-12-10 10:32:29 +00001026 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001027#ifdef CONFIG_TCP_MD5SIG
1028 rcu_read_lock();
1029 hash_location = tcp_parse_md5sig_option(th);
1030 if (sk && sk_fullsock(sk)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001031 int l3index;
1032
1033 /* sdif set, means packet ingressed via a device
1034 * in an L3 domain and inet_iif is set to it.
1035 */
1036 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1037 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001038 } else if (hash_location) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001039 int dif = tcp_v6_iif_l3_slave(skb);
1040 int sdif = tcp_v6_sdif(skb);
1041 int l3index;
1042
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001043 /*
1044 * active side is lost. Try to find listening socket through
1045 * source port, and then find md5 key through listening socket.
1046 * we are not loose security here:
1047 * Incoming packet is checked with md5 hash with finding key,
1048 * no RST generated if md5 hash doesn't match.
1049 */
David Brazdil0f672f62019-12-10 10:32:29 +00001050 sk1 = inet6_lookup_listener(net,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001051 &tcp_hashinfo, NULL, 0,
1052 &ipv6h->saddr,
1053 th->source, &ipv6h->daddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02001054 ntohs(th->source), dif, sdif);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001055 if (!sk1)
1056 goto out;
1057
Olivier Deprez157378f2022-04-04 15:47:50 +02001058 /* sdif set, means packet ingressed via a device
1059 * in an L3 domain and dif is set to it.
1060 */
1061 l3index = tcp_v6_sdif(skb) ? dif : 0;
1062
1063 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064 if (!key)
1065 goto out;
1066
1067 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
1068 if (genhash || memcmp(hash_location, newhash, 16) != 0)
1069 goto out;
1070 }
1071#endif
1072
1073 if (th->ack)
1074 seq = ntohl(th->ack_seq);
1075 else
1076 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1077 (th->doff << 2);
1078
1079 if (sk) {
1080 oif = sk->sk_bound_dev_if;
David Brazdil0f672f62019-12-10 10:32:29 +00001081 if (sk_fullsock(sk)) {
1082 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1083
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001084 trace_tcp_send_reset(sk, skb);
David Brazdil0f672f62019-12-10 10:32:29 +00001085 if (np->repflow)
1086 label = ip6_flowlabel(ipv6h);
1087 priority = sk->sk_priority;
1088 }
1089 if (sk->sk_state == TCP_TIME_WAIT) {
1090 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1091 priority = inet_twsk(sk)->tw_priority;
1092 }
1093 } else {
1094 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1095 label = ip6_flowlabel(ipv6h);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096 }
1097
Olivier Deprez157378f2022-04-04 15:47:50 +02001098 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1099 ipv6_get_dsfield(ipv6h), label, priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100
1101#ifdef CONFIG_TCP_MD5SIG
1102out:
1103 rcu_read_unlock();
1104#endif
1105}
1106
1107static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1108 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1109 struct tcp_md5sig_key *key, u8 tclass,
David Brazdil0f672f62019-12-10 10:32:29 +00001110 __be32 label, u32 priority)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111{
1112 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00001113 tclass, label, priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001114}
1115
1116static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1117{
1118 struct inet_timewait_sock *tw = inet_twsk(sk);
1119 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1120
1121 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1122 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1123 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1124 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
David Brazdil0f672f62019-12-10 10:32:29 +00001125 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001126
1127 inet_twsk_put(tw);
1128}
1129
1130static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1131 struct request_sock *req)
1132{
Olivier Deprez157378f2022-04-04 15:47:50 +02001133 int l3index;
1134
1135 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1136
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1138 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1139 */
1140 /* RFC 7323 2.3
1141 * The window field (SEG.WND) of every outgoing segment, with the
1142 * exception of <SYN> segments, MUST be right-shifted by
1143 * Rcv.Wind.Shift bits:
1144 */
1145 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1146 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1147 tcp_rsk(req)->rcv_nxt,
1148 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1149 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1150 req->ts_recent, sk->sk_bound_dev_if,
Olivier Deprez157378f2022-04-04 15:47:50 +02001151 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
1152 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153}
1154
1155
1156static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1157{
1158#ifdef CONFIG_SYN_COOKIES
1159 const struct tcphdr *th = tcp_hdr(skb);
1160
1161 if (!th->syn)
1162 sk = cookie_v6_check(sk, skb);
1163#endif
1164 return sk;
1165}
1166
David Brazdil0f672f62019-12-10 10:32:29 +00001167u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1168 struct tcphdr *th, u32 *cookie)
1169{
1170 u16 mss = 0;
1171#ifdef CONFIG_SYN_COOKIES
1172 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1173 &tcp_request_sock_ipv6_ops, sk, th);
1174 if (mss) {
1175 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1176 tcp_synq_overflow(sk);
1177 }
1178#endif
1179 return mss;
1180}
1181
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001182static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1183{
1184 if (skb->protocol == htons(ETH_P_IP))
1185 return tcp_v4_conn_request(sk, skb);
1186
1187 if (!ipv6_unicast_destination(skb))
1188 goto drop;
1189
Olivier Deprez0e641232021-09-23 10:07:05 +02001190 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1191 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1192 return 0;
1193 }
1194
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001195 return tcp_conn_request(&tcp6_request_sock_ops,
1196 &tcp_request_sock_ipv6_ops, sk, skb);
1197
1198drop:
1199 tcp_listendrop(sk);
1200 return 0; /* don't send reset */
1201}
1202
1203static void tcp_v6_restore_cb(struct sk_buff *skb)
1204{
1205 /* We need to move header back to the beginning if xfrm6_policy_check()
1206 * and tcp_v6_fill_cb() are going to be called again.
1207 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1208 */
1209 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1210 sizeof(struct inet6_skb_parm));
1211}
1212
1213static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1214 struct request_sock *req,
1215 struct dst_entry *dst,
1216 struct request_sock *req_unhash,
1217 bool *own_req)
1218{
1219 struct inet_request_sock *ireq;
1220 struct ipv6_pinfo *newnp;
David Brazdil0f672f62019-12-10 10:32:29 +00001221 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222 struct ipv6_txoptions *opt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223 struct inet_sock *newinet;
Olivier Deprez157378f2022-04-04 15:47:50 +02001224 bool found_dup_sk = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001225 struct tcp_sock *newtp;
1226 struct sock *newsk;
1227#ifdef CONFIG_TCP_MD5SIG
1228 struct tcp_md5sig_key *key;
Olivier Deprez157378f2022-04-04 15:47:50 +02001229 int l3index;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001230#endif
1231 struct flowi6 fl6;
1232
1233 if (skb->protocol == htons(ETH_P_IP)) {
1234 /*
1235 * v6 mapped
1236 */
1237
1238 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1239 req_unhash, own_req);
1240
1241 if (!newsk)
1242 return NULL;
1243
David Brazdil0f672f62019-12-10 10:32:29 +00001244 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001245
1246 newinet = inet_sk(newsk);
David Brazdil0f672f62019-12-10 10:32:29 +00001247 newnp = tcp_inet6_sk(newsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 newtp = tcp_sk(newsk);
1249
1250 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1251
1252 newnp->saddr = newsk->sk_v6_rcv_saddr;
1253
1254 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Olivier Deprez157378f2022-04-04 15:47:50 +02001255 if (sk_is_mptcp(newsk))
1256 mptcpv6_handle_mapped(newsk, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1258#ifdef CONFIG_TCP_MD5SIG
1259 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1260#endif
1261
1262 newnp->ipv6_mc_list = NULL;
1263 newnp->ipv6_ac_list = NULL;
1264 newnp->ipv6_fl_list = NULL;
1265 newnp->pktoptions = NULL;
1266 newnp->opt = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001267 newnp->mcast_oif = inet_iif(skb);
1268 newnp->mcast_hops = ip_hdr(skb)->ttl;
1269 newnp->rcv_flowinfo = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001270 if (np->repflow)
David Brazdil0f672f62019-12-10 10:32:29 +00001271 newnp->flow_label = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272
1273 /*
1274 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1275 * here, tcp_create_openreq_child now does this for us, see the comment in
1276 * that function for the gory details. -acme
1277 */
1278
1279 /* It is tricky place. Until this moment IPv4 tcp
1280 worked with IPv6 icsk.icsk_af_ops.
1281 Sync it now.
1282 */
1283 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1284
1285 return newsk;
1286 }
1287
1288 ireq = inet_rsk(req);
1289
1290 if (sk_acceptq_is_full(sk))
1291 goto out_overflow;
1292
1293 if (!dst) {
1294 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1295 if (!dst)
1296 goto out;
1297 }
1298
1299 newsk = tcp_create_openreq_child(sk, req, skb);
1300 if (!newsk)
1301 goto out_nonewsk;
1302
1303 /*
1304 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1305 * count here, tcp_create_openreq_child now does this for us, see the
1306 * comment in that function for the gory details. -acme
1307 */
1308
1309 newsk->sk_gso_type = SKB_GSO_TCPV6;
1310 ip6_dst_store(newsk, dst, NULL, NULL);
1311 inet6_sk_rx_dst_set(newsk, skb);
1312
David Brazdil0f672f62019-12-10 10:32:29 +00001313 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001314
1315 newtp = tcp_sk(newsk);
1316 newinet = inet_sk(newsk);
David Brazdil0f672f62019-12-10 10:32:29 +00001317 newnp = tcp_inet6_sk(newsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001318
1319 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1320
1321 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1322 newnp->saddr = ireq->ir_v6_loc_addr;
1323 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1324 newsk->sk_bound_dev_if = ireq->ir_iif;
1325
1326 /* Now IPv6 options...
1327
1328 First: no IPv4 options.
1329 */
1330 newinet->inet_opt = NULL;
1331 newnp->ipv6_mc_list = NULL;
1332 newnp->ipv6_ac_list = NULL;
1333 newnp->ipv6_fl_list = NULL;
1334
1335 /* Clone RX bits */
1336 newnp->rxopt.all = np->rxopt.all;
1337
1338 newnp->pktoptions = NULL;
1339 newnp->opt = NULL;
1340 newnp->mcast_oif = tcp_v6_iif(skb);
1341 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1342 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1343 if (np->repflow)
1344 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1345
Olivier Deprez157378f2022-04-04 15:47:50 +02001346 /* Set ToS of the new socket based upon the value of incoming SYN.
1347 * ECT bits are set later in tcp_init_transfer().
1348 */
Olivier Deprez92d4c212022-12-06 15:05:30 +01001349 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
Olivier Deprez157378f2022-04-04 15:47:50 +02001350 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1351
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001352 /* Clone native IPv6 options from listening socket (if any)
1353
1354 Yes, keeping reference count would be much more clever,
1355 but we make one more one thing there: reattach optmem
1356 to newsk.
1357 */
1358 opt = ireq->ipv6_opt;
1359 if (!opt)
1360 opt = rcu_dereference(np->opt);
1361 if (opt) {
1362 opt = ipv6_dup_options(newsk, opt);
1363 RCU_INIT_POINTER(newnp->opt, opt);
1364 }
1365 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1366 if (opt)
1367 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1368 opt->opt_flen;
1369
1370 tcp_ca_openreq_child(newsk, dst);
1371
1372 tcp_sync_mss(newsk, dst_mtu(dst));
1373 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1374
1375 tcp_initialize_rcv_mss(newsk);
1376
1377 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1378 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1379
1380#ifdef CONFIG_TCP_MD5SIG
Olivier Deprez157378f2022-04-04 15:47:50 +02001381 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1382
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001383 /* Copy over the MD5 key from the original socket */
Olivier Deprez157378f2022-04-04 15:47:50 +02001384 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001385 if (key) {
1386 /* We're using one, so create a matching key
1387 * on the newsk structure. If we fail to get
1388 * memory, then we end up not copying the key
1389 * across. Shucks.
1390 */
1391 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02001392 AF_INET6, 128, l3index, key->key, key->keylen,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001393 sk_gfp_mask(sk, GFP_ATOMIC));
1394 }
1395#endif
1396
1397 if (__inet_inherit_port(sk, newsk) < 0) {
1398 inet_csk_prepare_forced_close(newsk);
1399 tcp_done(newsk);
1400 goto out;
1401 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001402 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1403 &found_dup_sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001404 if (*own_req) {
1405 tcp_move_syn(newtp, req);
1406
1407 /* Clone pktoptions received with SYN, if we own the req */
1408 if (ireq->pktopts) {
1409 newnp->pktoptions = skb_clone(ireq->pktopts,
1410 sk_gfp_mask(sk, GFP_ATOMIC));
1411 consume_skb(ireq->pktopts);
1412 ireq->pktopts = NULL;
1413 if (newnp->pktoptions) {
1414 tcp_v6_restore_cb(newnp->pktoptions);
1415 skb_set_owner_r(newnp->pktoptions, newsk);
1416 }
1417 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001418 } else {
1419 if (!req_unhash && found_dup_sk) {
1420 /* This code path should only be executed in the
1421 * syncookie case only
1422 */
1423 bh_unlock_sock(newsk);
1424 sock_put(newsk);
1425 newsk = NULL;
1426 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001427 }
1428
1429 return newsk;
1430
1431out_overflow:
1432 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1433out_nonewsk:
1434 dst_release(dst);
1435out:
1436 tcp_listendrop(sk);
1437 return NULL;
1438}
1439
1440/* The socket must have it's spinlock held when we get
1441 * here, unless it is a TCP_LISTEN socket.
1442 *
1443 * We have a potential double-lock case here, so even when
1444 * doing backlog processing we use the BH locking scheme.
1445 * This is because we cannot sleep with the original spinlock
1446 * held.
1447 */
1448static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1449{
David Brazdil0f672f62019-12-10 10:32:29 +00001450 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001451 struct sk_buff *opt_skb = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001452 struct tcp_sock *tp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001453
1454 /* Imagine: socket is IPv6. IPv4 packet arrives,
1455 goes to IPv4 receive handler and backlogged.
1456 From backlog it always goes here. Kerboom...
1457 Fortunately, tcp_rcv_established and rcv_established
1458 handle them correctly, but it is not case with
1459 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1460 */
1461
1462 if (skb->protocol == htons(ETH_P_IP))
1463 return tcp_v4_do_rcv(sk, skb);
1464
1465 /*
1466 * socket locking is here for SMP purposes as backlog rcv
1467 * is currently called with bh processing disabled.
1468 */
1469
1470 /* Do Stevens' IPV6_PKTOPTIONS.
1471
1472 Yes, guys, it is the only place in our code, where we
1473 may make it not affecting IPv4.
1474 The rest of code is protocol independent,
1475 and I do not like idea to uglify IPv4.
1476
1477 Actually, all the idea behind IPV6_PKTOPTIONS
1478 looks not very well thought. For now we latch
1479 options, received in the last packet, enqueued
1480 by tcp. Feel free to propose better solution.
1481 --ANK (980728)
1482 */
1483 if (np->rxopt.all)
1484 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1485
1486 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Olivier Deprez92d4c212022-12-06 15:05:30 +01001487 struct dst_entry *dst;
1488
1489 dst = rcu_dereference_protected(sk->sk_rx_dst,
1490 lockdep_sock_is_held(sk));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001491
1492 sock_rps_save_rxhash(sk, skb);
1493 sk_mark_napi_id(sk, skb);
1494 if (dst) {
1495 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1496 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
Olivier Deprez92d4c212022-12-06 15:05:30 +01001497 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001498 dst_release(dst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001499 }
1500 }
1501
1502 tcp_rcv_established(sk, skb);
1503 if (opt_skb)
1504 goto ipv6_pktoptions;
1505 return 0;
1506 }
1507
1508 if (tcp_checksum_complete(skb))
1509 goto csum_err;
1510
1511 if (sk->sk_state == TCP_LISTEN) {
1512 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1513
1514 if (!nsk)
1515 goto discard;
1516
1517 if (nsk != sk) {
1518 if (tcp_child_process(sk, nsk, skb))
1519 goto reset;
1520 if (opt_skb)
1521 __kfree_skb(opt_skb);
1522 return 0;
1523 }
1524 } else
1525 sock_rps_save_rxhash(sk, skb);
1526
1527 if (tcp_rcv_state_process(sk, skb))
1528 goto reset;
1529 if (opt_skb)
1530 goto ipv6_pktoptions;
1531 return 0;
1532
1533reset:
1534 tcp_v6_send_reset(sk, skb);
1535discard:
1536 if (opt_skb)
1537 __kfree_skb(opt_skb);
1538 kfree_skb(skb);
1539 return 0;
1540csum_err:
1541 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1542 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1543 goto discard;
1544
1545
1546ipv6_pktoptions:
1547 /* Do you ask, what is it?
1548
1549 1. skb was enqueued by tcp.
1550 2. skb is added to tail of read queue, rather than out of order.
1551 3. socket is not in passive state.
1552 4. Finally, it really contains options, which user wants to receive.
1553 */
1554 tp = tcp_sk(sk);
1555 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1556 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1557 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1558 np->mcast_oif = tcp_v6_iif(opt_skb);
1559 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1560 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1561 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1562 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1563 if (np->repflow)
1564 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1565 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1566 skb_set_owner_r(opt_skb, sk);
1567 tcp_v6_restore_cb(opt_skb);
1568 opt_skb = xchg(&np->pktoptions, opt_skb);
1569 } else {
1570 __kfree_skb(opt_skb);
1571 opt_skb = xchg(&np->pktoptions, NULL);
1572 }
1573 }
1574
1575 kfree_skb(opt_skb);
1576 return 0;
1577}
1578
1579static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1580 const struct tcphdr *th)
1581{
1582 /* This is tricky: we move IP6CB at its correct location into
1583 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1584 * _decode_session6() uses IP6CB().
1585 * barrier() makes sure compiler won't play aliasing games.
1586 */
1587 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1588 sizeof(struct inet6_skb_parm));
1589 barrier();
1590
1591 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1592 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1593 skb->len - th->doff*4);
1594 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1595 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1596 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1597 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1598 TCP_SKB_CB(skb)->sacked = 0;
1599 TCP_SKB_CB(skb)->has_rxtstamp =
1600 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1601}
1602
David Brazdil0f672f62019-12-10 10:32:29 +00001603INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001604{
David Brazdil0f672f62019-12-10 10:32:29 +00001605 struct sk_buff *skb_to_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001606 int sdif = inet6_sdif(skb);
Olivier Deprez157378f2022-04-04 15:47:50 +02001607 int dif = inet6_iif(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001608 const struct tcphdr *th;
1609 const struct ipv6hdr *hdr;
1610 bool refcounted;
1611 struct sock *sk;
1612 int ret;
1613 struct net *net = dev_net(skb->dev);
1614
1615 if (skb->pkt_type != PACKET_HOST)
1616 goto discard_it;
1617
1618 /*
1619 * Count it even if it's bad.
1620 */
1621 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1622
1623 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1624 goto discard_it;
1625
1626 th = (const struct tcphdr *)skb->data;
1627
1628 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1629 goto bad_packet;
1630 if (!pskb_may_pull(skb, th->doff*4))
1631 goto discard_it;
1632
1633 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1634 goto csum_error;
1635
1636 th = (const struct tcphdr *)skb->data;
1637 hdr = ipv6_hdr(skb);
1638
1639lookup:
1640 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1641 th->source, th->dest, inet6_iif(skb), sdif,
1642 &refcounted);
1643 if (!sk)
1644 goto no_tcp_socket;
1645
1646process:
1647 if (sk->sk_state == TCP_TIME_WAIT)
1648 goto do_time_wait;
1649
1650 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1651 struct request_sock *req = inet_reqsk(sk);
1652 bool req_stolen = false;
1653 struct sock *nsk;
1654
1655 sk = req->rsk_listener;
Olivier Deprez157378f2022-04-04 15:47:50 +02001656 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001657 sk_drops_add(sk, skb);
1658 reqsk_put(req);
1659 goto discard_it;
1660 }
1661 if (tcp_checksum_complete(skb)) {
1662 reqsk_put(req);
1663 goto csum_error;
1664 }
1665 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1666 inet_csk_reqsk_queue_drop_and_put(sk, req);
1667 goto lookup;
1668 }
1669 sock_hold(sk);
1670 refcounted = true;
1671 nsk = NULL;
1672 if (!tcp_filter(sk, skb)) {
1673 th = (const struct tcphdr *)skb->data;
1674 hdr = ipv6_hdr(skb);
1675 tcp_v6_fill_cb(skb, hdr, th);
1676 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1677 }
1678 if (!nsk) {
1679 reqsk_put(req);
1680 if (req_stolen) {
1681 /* Another cpu got exclusive access to req
1682 * and created a full blown socket.
1683 * Try to feed this packet to this socket
1684 * instead of discarding it.
1685 */
1686 tcp_v6_restore_cb(skb);
1687 sock_put(sk);
1688 goto lookup;
1689 }
1690 goto discard_and_relse;
1691 }
1692 if (nsk == sk) {
1693 reqsk_put(req);
1694 tcp_v6_restore_cb(skb);
1695 } else if (tcp_child_process(sk, nsk, skb)) {
1696 tcp_v6_send_reset(nsk, skb);
1697 goto discard_and_relse;
1698 } else {
1699 sock_put(sk);
1700 return 0;
1701 }
1702 }
David Brazdil0f672f62019-12-10 10:32:29 +00001703 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001704 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1705 goto discard_and_relse;
1706 }
1707
1708 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1709 goto discard_and_relse;
1710
Olivier Deprez157378f2022-04-04 15:47:50 +02001711 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001712 goto discard_and_relse;
1713
1714 if (tcp_filter(sk, skb))
1715 goto discard_and_relse;
1716 th = (const struct tcphdr *)skb->data;
1717 hdr = ipv6_hdr(skb);
1718 tcp_v6_fill_cb(skb, hdr, th);
1719
1720 skb->dev = NULL;
1721
1722 if (sk->sk_state == TCP_LISTEN) {
1723 ret = tcp_v6_do_rcv(sk, skb);
1724 goto put_and_return;
1725 }
1726
1727 sk_incoming_cpu_update(sk);
1728
1729 bh_lock_sock_nested(sk);
1730 tcp_segs_in(tcp_sk(sk), skb);
1731 ret = 0;
1732 if (!sock_owned_by_user(sk)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001733 skb_to_free = sk->sk_rx_skb_cache;
1734 sk->sk_rx_skb_cache = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001735 ret = tcp_v6_do_rcv(sk, skb);
David Brazdil0f672f62019-12-10 10:32:29 +00001736 } else {
1737 if (tcp_add_backlog(sk, skb))
1738 goto discard_and_relse;
1739 skb_to_free = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001740 }
1741 bh_unlock_sock(sk);
David Brazdil0f672f62019-12-10 10:32:29 +00001742 if (skb_to_free)
1743 __kfree_skb(skb_to_free);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001744put_and_return:
1745 if (refcounted)
1746 sock_put(sk);
1747 return ret ? -1 : 0;
1748
1749no_tcp_socket:
1750 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1751 goto discard_it;
1752
1753 tcp_v6_fill_cb(skb, hdr, th);
1754
1755 if (tcp_checksum_complete(skb)) {
1756csum_error:
1757 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1758bad_packet:
1759 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1760 } else {
1761 tcp_v6_send_reset(NULL, skb);
1762 }
1763
1764discard_it:
1765 kfree_skb(skb);
1766 return 0;
1767
1768discard_and_relse:
1769 sk_drops_add(sk, skb);
1770 if (refcounted)
1771 sock_put(sk);
1772 goto discard_it;
1773
1774do_time_wait:
1775 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1776 inet_twsk_put(inet_twsk(sk));
1777 goto discard_it;
1778 }
1779
1780 tcp_v6_fill_cb(skb, hdr, th);
1781
1782 if (tcp_checksum_complete(skb)) {
1783 inet_twsk_put(inet_twsk(sk));
1784 goto csum_error;
1785 }
1786
1787 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1788 case TCP_TW_SYN:
1789 {
1790 struct sock *sk2;
1791
1792 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1793 skb, __tcp_hdrlen(th),
1794 &ipv6_hdr(skb)->saddr, th->source,
1795 &ipv6_hdr(skb)->daddr,
1796 ntohs(th->dest),
1797 tcp_v6_iif_l3_slave(skb),
1798 sdif);
1799 if (sk2) {
1800 struct inet_timewait_sock *tw = inet_twsk(sk);
1801 inet_twsk_deschedule_put(tw);
1802 sk = sk2;
1803 tcp_v6_restore_cb(skb);
1804 refcounted = false;
1805 goto process;
1806 }
1807 }
1808 /* to ACK */
Olivier Deprez157378f2022-04-04 15:47:50 +02001809 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001810 case TCP_TW_ACK:
1811 tcp_v6_timewait_ack(sk, skb);
1812 break;
1813 case TCP_TW_RST:
1814 tcp_v6_send_reset(sk, skb);
1815 inet_twsk_deschedule_put(inet_twsk(sk));
1816 goto discard_it;
1817 case TCP_TW_SUCCESS:
1818 ;
1819 }
1820 goto discard_it;
1821}
1822
Olivier Deprez92d4c212022-12-06 15:05:30 +01001823void tcp_v6_early_demux(struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001824{
1825 const struct ipv6hdr *hdr;
1826 const struct tcphdr *th;
1827 struct sock *sk;
1828
1829 if (skb->pkt_type != PACKET_HOST)
1830 return;
1831
1832 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1833 return;
1834
1835 hdr = ipv6_hdr(skb);
1836 th = tcp_hdr(skb);
1837
1838 if (th->doff < sizeof(struct tcphdr) / 4)
1839 return;
1840
1841 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1842 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1843 &hdr->saddr, th->source,
1844 &hdr->daddr, ntohs(th->dest),
1845 inet6_iif(skb), inet6_sdif(skb));
1846 if (sk) {
1847 skb->sk = sk;
1848 skb->destructor = sock_edemux;
1849 if (sk_fullsock(sk)) {
Olivier Deprez92d4c212022-12-06 15:05:30 +01001850 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001851
1852 if (dst)
David Brazdil0f672f62019-12-10 10:32:29 +00001853 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001854 if (dst &&
1855 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1856 skb_dst_set_noref(skb, dst);
1857 }
1858 }
1859}
1860
1861static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1862 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1863 .twsk_unique = tcp_twsk_unique,
1864 .twsk_destructor = tcp_twsk_destructor,
1865};
1866
Olivier Deprez157378f2022-04-04 15:47:50 +02001867INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1868{
1869 struct ipv6_pinfo *np = inet6_sk(sk);
1870
1871 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
1872}
1873
1874const struct inet_connection_sock_af_ops ipv6_specific = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001875 .queue_xmit = inet6_csk_xmit,
1876 .send_check = tcp_v6_send_check,
1877 .rebuild_header = inet6_sk_rebuild_header,
1878 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1879 .conn_request = tcp_v6_conn_request,
1880 .syn_recv_sock = tcp_v6_syn_recv_sock,
1881 .net_header_len = sizeof(struct ipv6hdr),
1882 .net_frag_header_len = sizeof(struct frag_hdr),
1883 .setsockopt = ipv6_setsockopt,
1884 .getsockopt = ipv6_getsockopt,
1885 .addr2sockaddr = inet6_csk_addr2sockaddr,
1886 .sockaddr_len = sizeof(struct sockaddr_in6),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001887 .mtu_reduced = tcp_v6_mtu_reduced,
1888};
1889
1890#ifdef CONFIG_TCP_MD5SIG
1891static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1892 .md5_lookup = tcp_v6_md5_lookup,
1893 .calc_md5_hash = tcp_v6_md5_hash_skb,
1894 .md5_parse = tcp_v6_parse_md5_keys,
1895};
1896#endif
1897
1898/*
1899 * TCP over IPv4 via INET6 API
1900 */
1901static const struct inet_connection_sock_af_ops ipv6_mapped = {
1902 .queue_xmit = ip_queue_xmit,
1903 .send_check = tcp_v4_send_check,
1904 .rebuild_header = inet_sk_rebuild_header,
1905 .sk_rx_dst_set = inet_sk_rx_dst_set,
1906 .conn_request = tcp_v6_conn_request,
1907 .syn_recv_sock = tcp_v6_syn_recv_sock,
1908 .net_header_len = sizeof(struct iphdr),
1909 .setsockopt = ipv6_setsockopt,
1910 .getsockopt = ipv6_getsockopt,
1911 .addr2sockaddr = inet6_csk_addr2sockaddr,
1912 .sockaddr_len = sizeof(struct sockaddr_in6),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001913 .mtu_reduced = tcp_v4_mtu_reduced,
1914};
1915
1916#ifdef CONFIG_TCP_MD5SIG
1917static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1918 .md5_lookup = tcp_v4_md5_lookup,
1919 .calc_md5_hash = tcp_v4_md5_hash_skb,
1920 .md5_parse = tcp_v6_parse_md5_keys,
1921};
1922#endif
1923
1924/* NOTE: A lot of things set to zero explicitly by call to
1925 * sk_alloc() so need not be done here.
1926 */
1927static int tcp_v6_init_sock(struct sock *sk)
1928{
1929 struct inet_connection_sock *icsk = inet_csk(sk);
1930
1931 tcp_init_sock(sk);
1932
1933 icsk->icsk_af_ops = &ipv6_specific;
1934
1935#ifdef CONFIG_TCP_MD5SIG
1936 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1937#endif
1938
1939 return 0;
1940}
1941
1942static void tcp_v6_destroy_sock(struct sock *sk)
1943{
1944 tcp_v4_destroy_sock(sk);
1945 inet6_destroy_sock(sk);
1946}
1947
1948#ifdef CONFIG_PROC_FS
1949/* Proc filesystem TCPv6 sock list dumping. */
1950static void get_openreq6(struct seq_file *seq,
1951 const struct request_sock *req, int i)
1952{
1953 long ttd = req->rsk_timer.expires - jiffies;
1954 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1955 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1956
1957 if (ttd < 0)
1958 ttd = 0;
1959
1960 seq_printf(seq,
1961 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1962 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1963 i,
1964 src->s6_addr32[0], src->s6_addr32[1],
1965 src->s6_addr32[2], src->s6_addr32[3],
1966 inet_rsk(req)->ir_num,
1967 dest->s6_addr32[0], dest->s6_addr32[1],
1968 dest->s6_addr32[2], dest->s6_addr32[3],
1969 ntohs(inet_rsk(req)->ir_rmt_port),
1970 TCP_SYN_RECV,
1971 0, 0, /* could print option size, but that is af dependent. */
1972 1, /* timers active (only the expire timer) */
1973 jiffies_to_clock_t(ttd),
1974 req->num_timeout,
1975 from_kuid_munged(seq_user_ns(seq),
1976 sock_i_uid(req->rsk_listener)),
1977 0, /* non standard timer */
1978 0, /* open_requests have no inode */
1979 0, req);
1980}
1981
1982static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1983{
1984 const struct in6_addr *dest, *src;
1985 __u16 destp, srcp;
1986 int timer_active;
1987 unsigned long timer_expires;
1988 const struct inet_sock *inet = inet_sk(sp);
1989 const struct tcp_sock *tp = tcp_sk(sp);
1990 const struct inet_connection_sock *icsk = inet_csk(sp);
1991 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1992 int rx_queue;
1993 int state;
1994
1995 dest = &sp->sk_v6_daddr;
1996 src = &sp->sk_v6_rcv_saddr;
1997 destp = ntohs(inet->inet_dport);
1998 srcp = ntohs(inet->inet_sport);
1999
2000 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2001 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2002 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2003 timer_active = 1;
2004 timer_expires = icsk->icsk_timeout;
2005 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2006 timer_active = 4;
2007 timer_expires = icsk->icsk_timeout;
2008 } else if (timer_pending(&sp->sk_timer)) {
2009 timer_active = 2;
2010 timer_expires = sp->sk_timer.expires;
2011 } else {
2012 timer_active = 0;
2013 timer_expires = jiffies;
2014 }
2015
2016 state = inet_sk_state_load(sp);
2017 if (state == TCP_LISTEN)
Olivier Deprez157378f2022-04-04 15:47:50 +02002018 rx_queue = READ_ONCE(sp->sk_ack_backlog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002019 else
2020 /* Because we don't lock the socket,
2021 * we might find a transient negative value.
2022 */
David Brazdil0f672f62019-12-10 10:32:29 +00002023 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2024 READ_ONCE(tp->copied_seq), 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002025
2026 seq_printf(seq,
2027 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2028 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2029 i,
2030 src->s6_addr32[0], src->s6_addr32[1],
2031 src->s6_addr32[2], src->s6_addr32[3], srcp,
2032 dest->s6_addr32[0], dest->s6_addr32[1],
2033 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2034 state,
David Brazdil0f672f62019-12-10 10:32:29 +00002035 READ_ONCE(tp->write_seq) - tp->snd_una,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002036 rx_queue,
2037 timer_active,
2038 jiffies_delta_to_clock_t(timer_expires - jiffies),
2039 icsk->icsk_retransmits,
2040 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2041 icsk->icsk_probes_out,
2042 sock_i_ino(sp),
2043 refcount_read(&sp->sk_refcnt), sp,
2044 jiffies_to_clock_t(icsk->icsk_rto),
2045 jiffies_to_clock_t(icsk->icsk_ack.ato),
David Brazdil0f672f62019-12-10 10:32:29 +00002046 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002047 tp->snd_cwnd,
2048 state == TCP_LISTEN ?
2049 fastopenq->max_qlen :
2050 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2051 );
2052}
2053
2054static void get_timewait6_sock(struct seq_file *seq,
2055 struct inet_timewait_sock *tw, int i)
2056{
2057 long delta = tw->tw_timer.expires - jiffies;
2058 const struct in6_addr *dest, *src;
2059 __u16 destp, srcp;
2060
2061 dest = &tw->tw_v6_daddr;
2062 src = &tw->tw_v6_rcv_saddr;
2063 destp = ntohs(tw->tw_dport);
2064 srcp = ntohs(tw->tw_sport);
2065
2066 seq_printf(seq,
2067 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2068 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2069 i,
2070 src->s6_addr32[0], src->s6_addr32[1],
2071 src->s6_addr32[2], src->s6_addr32[3], srcp,
2072 dest->s6_addr32[0], dest->s6_addr32[1],
2073 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2074 tw->tw_substate, 0, 0,
2075 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2076 refcount_read(&tw->tw_refcnt), tw);
2077}
2078
2079static int tcp6_seq_show(struct seq_file *seq, void *v)
2080{
2081 struct tcp_iter_state *st;
2082 struct sock *sk = v;
2083
2084 if (v == SEQ_START_TOKEN) {
2085 seq_puts(seq,
2086 " sl "
2087 "local_address "
2088 "remote_address "
2089 "st tx_queue rx_queue tr tm->when retrnsmt"
2090 " uid timeout inode\n");
2091 goto out;
2092 }
2093 st = seq->private;
2094
2095 if (sk->sk_state == TCP_TIME_WAIT)
2096 get_timewait6_sock(seq, v, st->num);
2097 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2098 get_openreq6(seq, v, st->num);
2099 else
2100 get_tcp6_sock(seq, v, st->num);
2101out:
2102 return 0;
2103}
2104
2105static const struct seq_operations tcp6_seq_ops = {
2106 .show = tcp6_seq_show,
2107 .start = tcp_seq_start,
2108 .next = tcp_seq_next,
2109 .stop = tcp_seq_stop,
2110};
2111
2112static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2113 .family = AF_INET6,
2114};
2115
2116int __net_init tcp6_proc_init(struct net *net)
2117{
2118 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2119 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2120 return -ENOMEM;
2121 return 0;
2122}
2123
2124void tcp6_proc_exit(struct net *net)
2125{
2126 remove_proc_entry("tcp6", net->proc_net);
2127}
2128#endif
2129
2130struct proto tcpv6_prot = {
2131 .name = "TCPv6",
2132 .owner = THIS_MODULE,
2133 .close = tcp_close,
2134 .pre_connect = tcp_v6_pre_connect,
2135 .connect = tcp_v6_connect,
2136 .disconnect = tcp_disconnect,
2137 .accept = inet_csk_accept,
2138 .ioctl = tcp_ioctl,
2139 .init = tcp_v6_init_sock,
2140 .destroy = tcp_v6_destroy_sock,
2141 .shutdown = tcp_shutdown,
2142 .setsockopt = tcp_setsockopt,
2143 .getsockopt = tcp_getsockopt,
2144 .keepalive = tcp_set_keepalive,
2145 .recvmsg = tcp_recvmsg,
2146 .sendmsg = tcp_sendmsg,
2147 .sendpage = tcp_sendpage,
2148 .backlog_rcv = tcp_v6_do_rcv,
2149 .release_cb = tcp_release_cb,
2150 .hash = inet6_hash,
2151 .unhash = inet_unhash,
2152 .get_port = inet_csk_get_port,
2153 .enter_memory_pressure = tcp_enter_memory_pressure,
2154 .leave_memory_pressure = tcp_leave_memory_pressure,
2155 .stream_memory_free = tcp_stream_memory_free,
2156 .sockets_allocated = &tcp_sockets_allocated,
2157 .memory_allocated = &tcp_memory_allocated,
2158 .memory_pressure = &tcp_memory_pressure,
2159 .orphan_count = &tcp_orphan_count,
2160 .sysctl_mem = sysctl_tcp_mem,
2161 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2162 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2163 .max_header = MAX_TCP_HEADER,
2164 .obj_size = sizeof(struct tcp6_sock),
2165 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2166 .twsk_prot = &tcp6_timewait_sock_ops,
2167 .rsk_prot = &tcp6_request_sock_ops,
2168 .h.hashinfo = &tcp_hashinfo,
2169 .no_autobind = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002170 .diag_destroy = tcp_abort,
2171};
Olivier Deprez157378f2022-04-04 15:47:50 +02002172EXPORT_SYMBOL_GPL(tcpv6_prot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002173
Olivier Deprez92d4c212022-12-06 15:05:30 +01002174static const struct inet6_protocol tcpv6_protocol = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002175 .handler = tcp_v6_rcv,
2176 .err_handler = tcp_v6_err,
2177 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2178};
2179
2180static struct inet_protosw tcpv6_protosw = {
2181 .type = SOCK_STREAM,
2182 .protocol = IPPROTO_TCP,
2183 .prot = &tcpv6_prot,
2184 .ops = &inet6_stream_ops,
2185 .flags = INET_PROTOSW_PERMANENT |
2186 INET_PROTOSW_ICSK,
2187};
2188
2189static int __net_init tcpv6_net_init(struct net *net)
2190{
2191 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2192 SOCK_RAW, IPPROTO_TCP, net);
2193}
2194
2195static void __net_exit tcpv6_net_exit(struct net *net)
2196{
2197 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2198}
2199
2200static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2201{
2202 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2203}
2204
2205static struct pernet_operations tcpv6_net_ops = {
2206 .init = tcpv6_net_init,
2207 .exit = tcpv6_net_exit,
2208 .exit_batch = tcpv6_net_exit_batch,
2209};
2210
2211int __init tcpv6_init(void)
2212{
2213 int ret;
2214
2215 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2216 if (ret)
2217 goto out;
2218
2219 /* register inet6 protocol */
2220 ret = inet6_register_protosw(&tcpv6_protosw);
2221 if (ret)
2222 goto out_tcpv6_protocol;
2223
2224 ret = register_pernet_subsys(&tcpv6_net_ops);
2225 if (ret)
2226 goto out_tcpv6_protosw;
Olivier Deprez157378f2022-04-04 15:47:50 +02002227
2228 ret = mptcpv6_init();
2229 if (ret)
2230 goto out_tcpv6_pernet_subsys;
2231
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002232out:
2233 return ret;
2234
Olivier Deprez157378f2022-04-04 15:47:50 +02002235out_tcpv6_pernet_subsys:
2236 unregister_pernet_subsys(&tcpv6_net_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002237out_tcpv6_protosw:
2238 inet6_unregister_protosw(&tcpv6_protosw);
2239out_tcpv6_protocol:
2240 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2241 goto out;
2242}
2243
2244void tcpv6_exit(void)
2245{
2246 unregister_pernet_subsys(&tcpv6_net_ops);
2247 inet6_unregister_protosw(&tcpv6_protosw);
2248 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2249}