blob: 2ce85e52aea7c5f36a1da696e324f5288d530383 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * IPv4 specific functions
10 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017 */
18
19/*
20 * Changes:
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
29 * ACK bit.
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
32 * request_sock handling and moved
33 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
35 * Added new listen semantics.
36 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
40 * coma.
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
46 */
47
48#define pr_fmt(fmt) "TCP: " fmt
49
50#include <linux/bottom_half.h>
51#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/module.h>
54#include <linux/random.h>
55#include <linux/cache.h>
56#include <linux/jhash.h>
57#include <linux/init.h>
58#include <linux/times.h>
59#include <linux/slab.h>
60
61#include <net/net_namespace.h>
62#include <net/icmp.h>
63#include <net/inet_hashtables.h>
64#include <net/tcp.h>
65#include <net/transp_v6.h>
66#include <net/ipv6.h>
67#include <net/inet_common.h>
68#include <net/timewait_sock.h>
69#include <net/xfrm.h>
70#include <net/secure_seq.h>
71#include <net/busy_poll.h>
72
73#include <linux/inet.h>
74#include <linux/ipv6.h>
75#include <linux/stddef.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
78#include <linux/inetdevice.h>
79
80#include <crypto/hash.h>
81#include <linux/scatterlist.h>
82
83#include <trace/events/tcp.h>
84
85#ifdef CONFIG_TCP_MD5SIG
86static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87 __be32 daddr, __be32 saddr, const struct tcphdr *th);
88#endif
89
90struct inet_hashinfo tcp_hashinfo;
91EXPORT_SYMBOL(tcp_hashinfo);
92
93static u32 tcp_v4_init_seq(const struct sk_buff *skb)
94{
95 return secure_tcp_seq(ip_hdr(skb)->daddr,
96 ip_hdr(skb)->saddr,
97 tcp_hdr(skb)->dest,
98 tcp_hdr(skb)->source);
99}
100
101static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
102{
103 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
104}
105
106int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
107{
108 const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
112
113 if (reuse == 2) {
114 /* Still does not detect *everything* that goes through
115 * lo, since we require a loopback src or dst address
116 * or direct binding to 'lo' interface.
117 */
118 bool loopback = false;
119 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
120 loopback = true;
121#if IS_ENABLED(CONFIG_IPV6)
122 if (tw->tw_family == AF_INET6) {
123 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
125 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
126 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
127 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
128 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
129 loopback = true;
130 } else
131#endif
132 {
133 if (ipv4_is_loopback(tw->tw_daddr) ||
134 ipv4_is_loopback(tw->tw_rcv_saddr))
135 loopback = true;
136 }
137 if (!loopback)
138 reuse = 0;
139 }
140
141 /* With PAWS, it is safe from the viewpoint
142 of data integrity. Even without PAWS it is safe provided sequence
143 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
144
145 Actually, the idea is close to VJ's one, only timestamp cache is
146 held not per host, but per port pair and TW bucket is used as state
147 holder.
148
149 If TW bucket has been already destroyed we fall back to VJ's scheme
150 and use initial timestamp retrieved from peer table.
151 */
152 if (tcptw->tw_ts_recent_stamp &&
153 (!twp || (reuse && time_after32(ktime_get_seconds(),
154 tcptw->tw_ts_recent_stamp)))) {
155 /* In case of repair and re-using TIME-WAIT sockets we still
156 * want to be sure that it is safe as above but honor the
157 * sequence numbers and time stamps set as part of the repair
158 * process.
159 *
160 * Without this check re-using a TIME-WAIT socket with TCP
161 * repair would accumulate a -1 on the repair assigned
162 * sequence number. The first time it is reused the sequence
163 * is -1, the second time -2, etc. This fixes that issue
164 * without appearing to create any others.
165 */
166 if (likely(!tp->repair)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000167 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
168
169 if (!seq)
170 seq = 1;
171 WRITE_ONCE(tp->write_seq, seq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
173 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
174 }
175 sock_hold(sktw);
176 return 1;
177 }
178
179 return 0;
180}
181EXPORT_SYMBOL_GPL(tcp_twsk_unique);
182
183static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
184 int addr_len)
185{
186 /* This check is replicated from tcp_v4_connect() and intended to
187 * prevent BPF program called below from accessing bytes that are out
188 * of the bound specified by user in addr_len.
189 */
190 if (addr_len < sizeof(struct sockaddr_in))
191 return -EINVAL;
192
193 sock_owned_by_me(sk);
194
195 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
196}
197
198/* This will initiate an outgoing connection. */
199int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
200{
201 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
202 struct inet_sock *inet = inet_sk(sk);
203 struct tcp_sock *tp = tcp_sk(sk);
204 __be16 orig_sport, orig_dport;
205 __be32 daddr, nexthop;
206 struct flowi4 *fl4;
207 struct rtable *rt;
208 int err;
209 struct ip_options_rcu *inet_opt;
210 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
211
212 if (addr_len < sizeof(struct sockaddr_in))
213 return -EINVAL;
214
215 if (usin->sin_family != AF_INET)
216 return -EAFNOSUPPORT;
217
218 nexthop = daddr = usin->sin_addr.s_addr;
219 inet_opt = rcu_dereference_protected(inet->inet_opt,
220 lockdep_sock_is_held(sk));
221 if (inet_opt && inet_opt->opt.srr) {
222 if (!daddr)
223 return -EINVAL;
224 nexthop = inet_opt->opt.faddr;
225 }
226
227 orig_sport = inet->inet_sport;
228 orig_dport = usin->sin_port;
229 fl4 = &inet->cork.fl.u.ip4;
230 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
231 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
232 IPPROTO_TCP,
233 orig_sport, orig_dport, sk);
234 if (IS_ERR(rt)) {
235 err = PTR_ERR(rt);
236 if (err == -ENETUNREACH)
237 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
238 return err;
239 }
240
241 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
242 ip_rt_put(rt);
243 return -ENETUNREACH;
244 }
245
246 if (!inet_opt || !inet_opt->opt.srr)
247 daddr = fl4->daddr;
248
249 if (!inet->inet_saddr)
250 inet->inet_saddr = fl4->saddr;
251 sk_rcv_saddr_set(sk, inet->inet_saddr);
252
253 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
254 /* Reset inherited state */
255 tp->rx_opt.ts_recent = 0;
256 tp->rx_opt.ts_recent_stamp = 0;
257 if (likely(!tp->repair))
David Brazdil0f672f62019-12-10 10:32:29 +0000258 WRITE_ONCE(tp->write_seq, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259 }
260
261 inet->inet_dport = usin->sin_port;
262 sk_daddr_set(sk, daddr);
263
264 inet_csk(sk)->icsk_ext_hdr_len = 0;
265 if (inet_opt)
266 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
267
268 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
269
270 /* Socket identity is still unknown (sport may be zero).
271 * However we set state to SYN-SENT and not releasing socket
272 * lock select source port, enter ourselves into the hash tables and
273 * complete initialization after this.
274 */
275 tcp_set_state(sk, TCP_SYN_SENT);
276 err = inet_hash_connect(tcp_death_row, sk);
277 if (err)
278 goto failure;
279
280 sk_set_txhash(sk);
281
282 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
283 inet->inet_sport, inet->inet_dport, sk);
284 if (IS_ERR(rt)) {
285 err = PTR_ERR(rt);
286 rt = NULL;
287 goto failure;
288 }
289 /* OK, now commit destination to socket. */
290 sk->sk_gso_type = SKB_GSO_TCPV4;
291 sk_setup_caps(sk, &rt->dst);
292 rt = NULL;
293
294 if (likely(!tp->repair)) {
295 if (!tp->write_seq)
David Brazdil0f672f62019-12-10 10:32:29 +0000296 WRITE_ONCE(tp->write_seq,
297 secure_tcp_seq(inet->inet_saddr,
298 inet->inet_daddr,
299 inet->inet_sport,
300 usin->sin_port));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000301 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
302 inet->inet_saddr,
303 inet->inet_daddr);
304 }
305
David Brazdil0f672f62019-12-10 10:32:29 +0000306 inet->inet_id = prandom_u32();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307
308 if (tcp_fastopen_defer_connect(sk, &err))
309 return err;
310 if (err)
311 goto failure;
312
313 err = tcp_connect(sk);
314
315 if (err)
316 goto failure;
317
318 return 0;
319
320failure:
321 /*
322 * This unhashes the socket and releases the local port,
323 * if necessary.
324 */
325 tcp_set_state(sk, TCP_CLOSE);
326 ip_rt_put(rt);
327 sk->sk_route_caps = 0;
328 inet->inet_dport = 0;
329 return err;
330}
331EXPORT_SYMBOL(tcp_v4_connect);
332
333/*
334 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
335 * It can be called through tcp_release_cb() if socket was owned by user
336 * at the time tcp_v4_err() was called to handle ICMP message.
337 */
338void tcp_v4_mtu_reduced(struct sock *sk)
339{
340 struct inet_sock *inet = inet_sk(sk);
341 struct dst_entry *dst;
342 u32 mtu;
343
344 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
345 return;
Olivier Deprez0e641232021-09-23 10:07:05 +0200346 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347 dst = inet_csk_update_pmtu(sk, mtu);
348 if (!dst)
349 return;
350
351 /* Something is about to be wrong... Remember soft error
352 * for the case, if this connection will not able to recover.
353 */
354 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
355 sk->sk_err_soft = EMSGSIZE;
356
357 mtu = dst_mtu(dst);
358
359 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
360 ip_sk_accept_pmtu(sk) &&
361 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
362 tcp_sync_mss(sk, mtu);
363
364 /* Resend the TCP packet because it's
365 * clear that the old packet has been
366 * dropped. This is the new "fast" path mtu
367 * discovery.
368 */
369 tcp_simple_retransmit(sk);
370 } /* else let the usual retransmit timer handle it */
371}
372EXPORT_SYMBOL(tcp_v4_mtu_reduced);
373
374static void do_redirect(struct sk_buff *skb, struct sock *sk)
375{
376 struct dst_entry *dst = __sk_dst_check(sk, 0);
377
378 if (dst)
379 dst->ops->redirect(dst, sk, skb);
380}
381
382
383/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
384void tcp_req_err(struct sock *sk, u32 seq, bool abort)
385{
386 struct request_sock *req = inet_reqsk(sk);
387 struct net *net = sock_net(sk);
388
389 /* ICMPs are not backlogged, hence we cannot get
390 * an established socket here.
391 */
392 if (seq != tcp_rsk(req)->snt_isn) {
393 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
394 } else if (abort) {
395 /*
396 * Still in SYN_RECV, just remove it silently.
397 * There is no good way to pass the error to the newly
398 * created socket, and POSIX does not want network
399 * errors returned from accept().
400 */
401 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
402 tcp_listendrop(req->rsk_listener);
403 }
404 reqsk_put(req);
405}
406EXPORT_SYMBOL(tcp_req_err);
407
408/*
409 * This routine is called by the ICMP module when it gets some
410 * sort of error condition. If err < 0 then the socket should
411 * be closed and the error returned to the user. If err > 0
412 * it's just the icmp type << 8 | icmp code. After adjustment
413 * header points to the first 8 bytes of the tcp header. We need
414 * to find the appropriate port.
415 *
416 * The locking strategy used here is very "optimistic". When
417 * someone else accesses the socket the ICMP is just dropped
418 * and for some paths there is no check at all.
419 * A more general error queue to queue errors for later handling
420 * is probably better.
421 *
422 */
423
David Brazdil0f672f62019-12-10 10:32:29 +0000424int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425{
426 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
427 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
428 struct inet_connection_sock *icsk;
429 struct tcp_sock *tp;
430 struct inet_sock *inet;
431 const int type = icmp_hdr(icmp_skb)->type;
432 const int code = icmp_hdr(icmp_skb)->code;
433 struct sock *sk;
434 struct sk_buff *skb;
435 struct request_sock *fastopen;
436 u32 seq, snd_una;
437 s32 remaining;
438 u32 delta_us;
439 int err;
440 struct net *net = dev_net(icmp_skb->dev);
441
442 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
443 th->dest, iph->saddr, ntohs(th->source),
444 inet_iif(icmp_skb), 0);
445 if (!sk) {
446 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
David Brazdil0f672f62019-12-10 10:32:29 +0000447 return -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448 }
449 if (sk->sk_state == TCP_TIME_WAIT) {
450 inet_twsk_put(inet_twsk(sk));
David Brazdil0f672f62019-12-10 10:32:29 +0000451 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452 }
453 seq = ntohl(th->seq);
David Brazdil0f672f62019-12-10 10:32:29 +0000454 if (sk->sk_state == TCP_NEW_SYN_RECV) {
455 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
456 type == ICMP_TIME_EXCEEDED ||
457 (type == ICMP_DEST_UNREACH &&
458 (code == ICMP_NET_UNREACH ||
459 code == ICMP_HOST_UNREACH)));
460 return 0;
461 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000462
463 bh_lock_sock(sk);
464 /* If too many ICMPs get dropped on busy
465 * servers this needs to be solved differently.
466 * We do take care of PMTU discovery (RFC1191) special case :
467 * we can receive locally generated ICMP messages while socket is held.
468 */
469 if (sock_owned_by_user(sk)) {
470 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
471 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
472 }
473 if (sk->sk_state == TCP_CLOSE)
474 goto out;
475
476 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
477 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
478 goto out;
479 }
480
481 icsk = inet_csk(sk);
482 tp = tcp_sk(sk);
483 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
David Brazdil0f672f62019-12-10 10:32:29 +0000484 fastopen = rcu_dereference(tp->fastopen_rsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000485 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
486 if (sk->sk_state != TCP_LISTEN &&
487 !between(seq, snd_una, tp->snd_nxt)) {
488 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
489 goto out;
490 }
491
492 switch (type) {
493 case ICMP_REDIRECT:
494 if (!sock_owned_by_user(sk))
495 do_redirect(icmp_skb, sk);
496 goto out;
497 case ICMP_SOURCE_QUENCH:
498 /* Just silently ignore these. */
499 goto out;
500 case ICMP_PARAMETERPROB:
501 err = EPROTO;
502 break;
503 case ICMP_DEST_UNREACH:
504 if (code > NR_ICMP_UNREACH)
505 goto out;
506
507 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
508 /* We are not interested in TCP_LISTEN and open_requests
509 * (SYN-ACKs send out by Linux are always <576bytes so
510 * they should go through unfragmented).
511 */
512 if (sk->sk_state == TCP_LISTEN)
513 goto out;
514
Olivier Deprez0e641232021-09-23 10:07:05 +0200515 WRITE_ONCE(tp->mtu_info, info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516 if (!sock_owned_by_user(sk)) {
517 tcp_v4_mtu_reduced(sk);
518 } else {
519 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
520 sock_hold(sk);
521 }
522 goto out;
523 }
524
525 err = icmp_err_convert[code].errno;
526 /* check if icmp_skb allows revert of backoff
527 * (see draft-zimmermann-tcp-lcd) */
528 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
529 break;
530 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
531 !icsk->icsk_backoff || fastopen)
532 break;
533
534 if (sock_owned_by_user(sk))
535 break;
536
David Brazdil0f672f62019-12-10 10:32:29 +0000537 skb = tcp_rtx_queue_head(sk);
538 if (WARN_ON_ONCE(!skb))
539 break;
540
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000541 icsk->icsk_backoff--;
542 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
543 TCP_TIMEOUT_INIT;
544 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
545
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546
547 tcp_mstamp_refresh(tp);
David Brazdil0f672f62019-12-10 10:32:29 +0000548 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 remaining = icsk->icsk_rto -
550 usecs_to_jiffies(delta_us);
551
552 if (remaining > 0) {
553 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
554 remaining, TCP_RTO_MAX);
555 } else {
556 /* RTO revert clocked out retransmission.
557 * Will retransmit now */
558 tcp_retransmit_timer(sk);
559 }
560
561 break;
562 case ICMP_TIME_EXCEEDED:
563 err = EHOSTUNREACH;
564 break;
565 default:
566 goto out;
567 }
568
569 switch (sk->sk_state) {
570 case TCP_SYN_SENT:
571 case TCP_SYN_RECV:
572 /* Only in fast or simultaneous open. If a fast open socket is
573 * is already accepted it is treated as a connected one below.
574 */
575 if (fastopen && !fastopen->sk)
576 break;
577
578 if (!sock_owned_by_user(sk)) {
579 sk->sk_err = err;
580
581 sk->sk_error_report(sk);
582
583 tcp_done(sk);
584 } else {
585 sk->sk_err_soft = err;
586 }
587 goto out;
588 }
589
590 /* If we've already connected we will keep trying
591 * until we time out, or the user gives up.
592 *
593 * rfc1122 4.2.3.9 allows to consider as hard errors
594 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
595 * but it is obsoleted by pmtu discovery).
596 *
597 * Note, that in modern internet, where routing is unreliable
598 * and in each dark corner broken firewalls sit, sending random
599 * errors ordered by their masters even this two messages finally lose
600 * their original sense (even Linux sends invalid PORT_UNREACHs)
601 *
602 * Now we are in compliance with RFCs.
603 * --ANK (980905)
604 */
605
606 inet = inet_sk(sk);
607 if (!sock_owned_by_user(sk) && inet->recverr) {
608 sk->sk_err = err;
609 sk->sk_error_report(sk);
610 } else { /* Only an error on timeout */
611 sk->sk_err_soft = err;
612 }
613
614out:
615 bh_unlock_sock(sk);
616 sock_put(sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000617 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618}
619
620void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
621{
622 struct tcphdr *th = tcp_hdr(skb);
623
624 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
625 skb->csum_start = skb_transport_header(skb) - skb->head;
626 skb->csum_offset = offsetof(struct tcphdr, check);
627}
628
629/* This routine computes an IPv4 TCP checksum. */
630void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
631{
632 const struct inet_sock *inet = inet_sk(sk);
633
634 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
635}
636EXPORT_SYMBOL(tcp_v4_send_check);
637
638/*
639 * This routine will send an RST to the other tcp.
640 *
641 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
642 * for reset.
643 * Answer: if a packet caused RST, it is not for a socket
644 * existing in our system, if it is matched to a socket,
645 * it is just duplicate segment or bug in other side's TCP.
646 * So that we build reply only basing on parameters
647 * arrived with segment.
648 * Exception: precedence violation. We do not implement it in any case.
649 */
650
651static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
652{
653 const struct tcphdr *th = tcp_hdr(skb);
654 struct {
655 struct tcphdr th;
656#ifdef CONFIG_TCP_MD5SIG
657 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
658#endif
659 } rep;
660 struct ip_reply_arg arg;
661#ifdef CONFIG_TCP_MD5SIG
662 struct tcp_md5sig_key *key = NULL;
663 const __u8 *hash_location = NULL;
664 unsigned char newhash[16];
665 int genhash;
666 struct sock *sk1 = NULL;
667#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000668 u64 transmit_time = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000669 struct sock *ctl_sk;
David Brazdil0f672f62019-12-10 10:32:29 +0000670 struct net *net;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671
672 /* Never send a reset in response to a reset. */
673 if (th->rst)
674 return;
675
676 /* If sk not NULL, it means we did a successful lookup and incoming
677 * route had to be correct. prequeue might have dropped our dst.
678 */
679 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
680 return;
681
682 /* Swap the send and the receive. */
683 memset(&rep, 0, sizeof(rep));
684 rep.th.dest = th->source;
685 rep.th.source = th->dest;
686 rep.th.doff = sizeof(struct tcphdr) / 4;
687 rep.th.rst = 1;
688
689 if (th->ack) {
690 rep.th.seq = th->ack_seq;
691 } else {
692 rep.th.ack = 1;
693 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
694 skb->len - (th->doff << 2));
695 }
696
697 memset(&arg, 0, sizeof(arg));
698 arg.iov[0].iov_base = (unsigned char *)&rep;
699 arg.iov[0].iov_len = sizeof(rep.th);
700
701 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
702#ifdef CONFIG_TCP_MD5SIG
703 rcu_read_lock();
704 hash_location = tcp_parse_md5sig_option(th);
705 if (sk && sk_fullsock(sk)) {
706 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
707 &ip_hdr(skb)->saddr, AF_INET);
708 } else if (hash_location) {
709 /*
710 * active side is lost. Try to find listening socket through
711 * source port, and then find md5 key through listening socket.
712 * we are not loose security here:
713 * Incoming packet is checked with md5 hash with finding key,
714 * no RST generated if md5 hash doesn't match.
715 */
716 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
717 ip_hdr(skb)->saddr,
718 th->source, ip_hdr(skb)->daddr,
719 ntohs(th->source), inet_iif(skb),
720 tcp_v4_sdif(skb));
721 /* don't send rst if it can't find key */
722 if (!sk1)
723 goto out;
724
725 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
726 &ip_hdr(skb)->saddr, AF_INET);
727 if (!key)
728 goto out;
729
730
731 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
732 if (genhash || memcmp(hash_location, newhash, 16) != 0)
733 goto out;
734
735 }
736
737 if (key) {
738 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
739 (TCPOPT_NOP << 16) |
740 (TCPOPT_MD5SIG << 8) |
741 TCPOLEN_MD5SIG);
742 /* Update length and the length the header thinks exists */
743 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
744 rep.th.doff = arg.iov[0].iov_len / 4;
745
746 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
747 key, ip_hdr(skb)->saddr,
748 ip_hdr(skb)->daddr, &rep.th);
749 }
750#endif
751 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
752 ip_hdr(skb)->saddr, /* XXX */
753 arg.iov[0].iov_len, IPPROTO_TCP, 0);
754 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
755 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
756
757 /* When socket is gone, all binding information is lost.
758 * routing might fail in this case. No choice here, if we choose to force
759 * input interface, we will misroute in case of asymmetric route.
760 */
761 if (sk) {
762 arg.bound_dev_if = sk->sk_bound_dev_if;
763 if (sk_fullsock(sk))
764 trace_tcp_send_reset(sk, skb);
765 }
766
767 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
768 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
769
770 arg.tos = ip_hdr(skb)->tos;
771 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
772 local_bh_disable();
David Brazdil0f672f62019-12-10 10:32:29 +0000773 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
774 if (sk) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
776 inet_twsk(sk)->tw_mark : sk->sk_mark;
David Brazdil0f672f62019-12-10 10:32:29 +0000777 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
778 inet_twsk(sk)->tw_priority : sk->sk_priority;
779 transmit_time = tcp_transmit_time(sk);
780 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 ip_send_unicast_reply(ctl_sk,
782 skb, &TCP_SKB_CB(skb)->header.h4.opt,
783 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
David Brazdil0f672f62019-12-10 10:32:29 +0000784 &arg, arg.iov[0].iov_len,
785 transmit_time);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000786
787 ctl_sk->sk_mark = 0;
788 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
789 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
790 local_bh_enable();
791
792#ifdef CONFIG_TCP_MD5SIG
793out:
794 rcu_read_unlock();
795#endif
796}
797
798/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
799 outside socket context is ugly, certainly. What can I do?
800 */
801
802static void tcp_v4_send_ack(const struct sock *sk,
803 struct sk_buff *skb, u32 seq, u32 ack,
804 u32 win, u32 tsval, u32 tsecr, int oif,
805 struct tcp_md5sig_key *key,
806 int reply_flags, u8 tos)
807{
808 const struct tcphdr *th = tcp_hdr(skb);
809 struct {
810 struct tcphdr th;
811 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
812#ifdef CONFIG_TCP_MD5SIG
813 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
814#endif
815 ];
816 } rep;
817 struct net *net = sock_net(sk);
818 struct ip_reply_arg arg;
819 struct sock *ctl_sk;
David Brazdil0f672f62019-12-10 10:32:29 +0000820 u64 transmit_time;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000821
822 memset(&rep.th, 0, sizeof(struct tcphdr));
823 memset(&arg, 0, sizeof(arg));
824
825 arg.iov[0].iov_base = (unsigned char *)&rep;
826 arg.iov[0].iov_len = sizeof(rep.th);
827 if (tsecr) {
828 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
829 (TCPOPT_TIMESTAMP << 8) |
830 TCPOLEN_TIMESTAMP);
831 rep.opt[1] = htonl(tsval);
832 rep.opt[2] = htonl(tsecr);
833 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
834 }
835
836 /* Swap the send and the receive. */
837 rep.th.dest = th->source;
838 rep.th.source = th->dest;
839 rep.th.doff = arg.iov[0].iov_len / 4;
840 rep.th.seq = htonl(seq);
841 rep.th.ack_seq = htonl(ack);
842 rep.th.ack = 1;
843 rep.th.window = htons(win);
844
845#ifdef CONFIG_TCP_MD5SIG
846 if (key) {
847 int offset = (tsecr) ? 3 : 0;
848
849 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
850 (TCPOPT_NOP << 16) |
851 (TCPOPT_MD5SIG << 8) |
852 TCPOLEN_MD5SIG);
853 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
854 rep.th.doff = arg.iov[0].iov_len/4;
855
856 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
857 key, ip_hdr(skb)->saddr,
858 ip_hdr(skb)->daddr, &rep.th);
859 }
860#endif
861 arg.flags = reply_flags;
862 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
863 ip_hdr(skb)->saddr, /* XXX */
864 arg.iov[0].iov_len, IPPROTO_TCP, 0);
865 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
866 if (oif)
867 arg.bound_dev_if = oif;
868 arg.tos = tos;
869 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
870 local_bh_disable();
David Brazdil0f672f62019-12-10 10:32:29 +0000871 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
872 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
873 inet_twsk(sk)->tw_mark : sk->sk_mark;
874 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
875 inet_twsk(sk)->tw_priority : sk->sk_priority;
876 transmit_time = tcp_transmit_time(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 ip_send_unicast_reply(ctl_sk,
878 skb, &TCP_SKB_CB(skb)->header.h4.opt,
879 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
David Brazdil0f672f62019-12-10 10:32:29 +0000880 &arg, arg.iov[0].iov_len,
881 transmit_time);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882
883 ctl_sk->sk_mark = 0;
884 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
885 local_bh_enable();
886}
887
888static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
889{
890 struct inet_timewait_sock *tw = inet_twsk(sk);
891 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
892
893 tcp_v4_send_ack(sk, skb,
894 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
895 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
896 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
897 tcptw->tw_ts_recent,
898 tw->tw_bound_dev_if,
899 tcp_twsk_md5_key(tcptw),
900 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
901 tw->tw_tos
902 );
903
904 inet_twsk_put(tw);
905}
906
907static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
908 struct request_sock *req)
909{
910 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
911 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
912 */
913 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
914 tcp_sk(sk)->snd_nxt;
915
916 /* RFC 7323 2.3
917 * The window field (SEG.WND) of every outgoing segment, with the
918 * exception of <SYN> segments, MUST be right-shifted by
919 * Rcv.Wind.Shift bits:
920 */
921 tcp_v4_send_ack(sk, skb, seq,
922 tcp_rsk(req)->rcv_nxt,
923 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
924 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
925 req->ts_recent,
926 0,
927 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
928 AF_INET),
929 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
930 ip_hdr(skb)->tos);
931}
932
933/*
934 * Send a SYN-ACK after having received a SYN.
935 * This still operates on a request_sock only, not on a big
936 * socket.
937 */
938static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
939 struct flowi *fl,
940 struct request_sock *req,
941 struct tcp_fastopen_cookie *foc,
942 enum tcp_synack_type synack_type)
943{
944 const struct inet_request_sock *ireq = inet_rsk(req);
945 struct flowi4 fl4;
946 int err = -1;
947 struct sk_buff *skb;
948
949 /* First, grab a route. */
950 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
951 return -1;
952
953 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
954
955 if (skb) {
956 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
957
958 rcu_read_lock();
959 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
960 ireq->ir_rmt_addr,
961 rcu_dereference(ireq->ireq_opt));
962 rcu_read_unlock();
963 err = net_xmit_eval(err);
964 }
965
966 return err;
967}
968
969/*
970 * IPv4 request_sock destructor.
971 */
972static void tcp_v4_reqsk_destructor(struct request_sock *req)
973{
974 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
975}
976
977#ifdef CONFIG_TCP_MD5SIG
978/*
979 * RFC2385 MD5 checksumming requires a mapping of
980 * IP address->MD5 Key.
981 * We need to maintain these in the sk structure.
982 */
983
David Brazdil0f672f62019-12-10 10:32:29 +0000984DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
985EXPORT_SYMBOL(tcp_md5_needed);
986
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000987/* Find the Key structure for an address. */
David Brazdil0f672f62019-12-10 10:32:29 +0000988struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
989 const union tcp_md5_addr *addr,
990 int family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000991{
992 const struct tcp_sock *tp = tcp_sk(sk);
993 struct tcp_md5sig_key *key;
994 const struct tcp_md5sig_info *md5sig;
995 __be32 mask;
996 struct tcp_md5sig_key *best_match = NULL;
997 bool match;
998
999 /* caller either holds rcu_read_lock() or socket lock */
1000 md5sig = rcu_dereference_check(tp->md5sig_info,
1001 lockdep_sock_is_held(sk));
1002 if (!md5sig)
1003 return NULL;
1004
1005 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1006 if (key->family != family)
1007 continue;
1008
1009 if (family == AF_INET) {
1010 mask = inet_make_mask(key->prefixlen);
1011 match = (key->addr.a4.s_addr & mask) ==
1012 (addr->a4.s_addr & mask);
1013#if IS_ENABLED(CONFIG_IPV6)
1014 } else if (family == AF_INET6) {
1015 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1016 key->prefixlen);
1017#endif
1018 } else {
1019 match = false;
1020 }
1021
1022 if (match && (!best_match ||
1023 key->prefixlen > best_match->prefixlen))
1024 best_match = key;
1025 }
1026 return best_match;
1027}
David Brazdil0f672f62019-12-10 10:32:29 +00001028EXPORT_SYMBOL(__tcp_md5_do_lookup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001029
1030static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1031 const union tcp_md5_addr *addr,
1032 int family, u8 prefixlen)
1033{
1034 const struct tcp_sock *tp = tcp_sk(sk);
1035 struct tcp_md5sig_key *key;
1036 unsigned int size = sizeof(struct in_addr);
1037 const struct tcp_md5sig_info *md5sig;
1038
1039 /* caller either holds rcu_read_lock() or socket lock */
1040 md5sig = rcu_dereference_check(tp->md5sig_info,
1041 lockdep_sock_is_held(sk));
1042 if (!md5sig)
1043 return NULL;
1044#if IS_ENABLED(CONFIG_IPV6)
1045 if (family == AF_INET6)
1046 size = sizeof(struct in6_addr);
1047#endif
1048 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1049 if (key->family != family)
1050 continue;
1051 if (!memcmp(&key->addr, addr, size) &&
1052 key->prefixlen == prefixlen)
1053 return key;
1054 }
1055 return NULL;
1056}
1057
1058struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1059 const struct sock *addr_sk)
1060{
1061 const union tcp_md5_addr *addr;
1062
1063 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1064 return tcp_md5_do_lookup(sk, addr, AF_INET);
1065}
1066EXPORT_SYMBOL(tcp_v4_md5_lookup);
1067
1068/* This can be called on a newly created socket, from other files */
1069int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1070 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1071 gfp_t gfp)
1072{
1073 /* Add Key to the list */
1074 struct tcp_md5sig_key *key;
1075 struct tcp_sock *tp = tcp_sk(sk);
1076 struct tcp_md5sig_info *md5sig;
1077
1078 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1079 if (key) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001080 /* Pre-existing entry - just update that one.
1081 * Note that the key might be used concurrently.
1082 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083 memcpy(key->key, newkey, newkeylen);
Olivier Deprez0e641232021-09-23 10:07:05 +02001084
1085 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1086 * Also note that a reader could catch new key->keylen value
1087 * but old key->key[], this is the reason we use __GFP_ZERO
1088 * at sock_kmalloc() time below these lines.
1089 */
1090 WRITE_ONCE(key->keylen, newkeylen);
1091
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092 return 0;
1093 }
1094
1095 md5sig = rcu_dereference_protected(tp->md5sig_info,
1096 lockdep_sock_is_held(sk));
1097 if (!md5sig) {
1098 md5sig = kmalloc(sizeof(*md5sig), gfp);
1099 if (!md5sig)
1100 return -ENOMEM;
1101
1102 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1103 INIT_HLIST_HEAD(&md5sig->head);
1104 rcu_assign_pointer(tp->md5sig_info, md5sig);
1105 }
1106
Olivier Deprez0e641232021-09-23 10:07:05 +02001107 key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001108 if (!key)
1109 return -ENOMEM;
1110 if (!tcp_alloc_md5sig_pool()) {
1111 sock_kfree_s(sk, key, sizeof(*key));
1112 return -ENOMEM;
1113 }
1114
1115 memcpy(key->key, newkey, newkeylen);
1116 key->keylen = newkeylen;
1117 key->family = family;
1118 key->prefixlen = prefixlen;
1119 memcpy(&key->addr, addr,
1120 (family == AF_INET6) ? sizeof(struct in6_addr) :
1121 sizeof(struct in_addr));
1122 hlist_add_head_rcu(&key->node, &md5sig->head);
1123 return 0;
1124}
1125EXPORT_SYMBOL(tcp_md5_do_add);
1126
1127int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1128 u8 prefixlen)
1129{
1130 struct tcp_md5sig_key *key;
1131
1132 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1133 if (!key)
1134 return -ENOENT;
1135 hlist_del_rcu(&key->node);
1136 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1137 kfree_rcu(key, rcu);
1138 return 0;
1139}
1140EXPORT_SYMBOL(tcp_md5_do_del);
1141
1142static void tcp_clear_md5_list(struct sock *sk)
1143{
1144 struct tcp_sock *tp = tcp_sk(sk);
1145 struct tcp_md5sig_key *key;
1146 struct hlist_node *n;
1147 struct tcp_md5sig_info *md5sig;
1148
1149 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1150
1151 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1152 hlist_del_rcu(&key->node);
1153 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1154 kfree_rcu(key, rcu);
1155 }
1156}
1157
1158static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1159 char __user *optval, int optlen)
1160{
1161 struct tcp_md5sig cmd;
1162 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1163 u8 prefixlen = 32;
1164
1165 if (optlen < sizeof(cmd))
1166 return -EINVAL;
1167
1168 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1169 return -EFAULT;
1170
1171 if (sin->sin_family != AF_INET)
1172 return -EINVAL;
1173
1174 if (optname == TCP_MD5SIG_EXT &&
1175 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1176 prefixlen = cmd.tcpm_prefixlen;
1177 if (prefixlen > 32)
1178 return -EINVAL;
1179 }
1180
1181 if (!cmd.tcpm_keylen)
1182 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1183 AF_INET, prefixlen);
1184
1185 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1186 return -EINVAL;
1187
1188 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1189 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1190 GFP_KERNEL);
1191}
1192
1193static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1194 __be32 daddr, __be32 saddr,
1195 const struct tcphdr *th, int nbytes)
1196{
1197 struct tcp4_pseudohdr *bp;
1198 struct scatterlist sg;
1199 struct tcphdr *_th;
1200
1201 bp = hp->scratch;
1202 bp->saddr = saddr;
1203 bp->daddr = daddr;
1204 bp->pad = 0;
1205 bp->protocol = IPPROTO_TCP;
1206 bp->len = cpu_to_be16(nbytes);
1207
1208 _th = (struct tcphdr *)(bp + 1);
1209 memcpy(_th, th, sizeof(*th));
1210 _th->check = 0;
1211
1212 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1213 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1214 sizeof(*bp) + sizeof(*th));
1215 return crypto_ahash_update(hp->md5_req);
1216}
1217
1218static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1219 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1220{
1221 struct tcp_md5sig_pool *hp;
1222 struct ahash_request *req;
1223
1224 hp = tcp_get_md5sig_pool();
1225 if (!hp)
1226 goto clear_hash_noput;
1227 req = hp->md5_req;
1228
1229 if (crypto_ahash_init(req))
1230 goto clear_hash;
1231 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1232 goto clear_hash;
1233 if (tcp_md5_hash_key(hp, key))
1234 goto clear_hash;
1235 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1236 if (crypto_ahash_final(req))
1237 goto clear_hash;
1238
1239 tcp_put_md5sig_pool();
1240 return 0;
1241
1242clear_hash:
1243 tcp_put_md5sig_pool();
1244clear_hash_noput:
1245 memset(md5_hash, 0, 16);
1246 return 1;
1247}
1248
1249int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1250 const struct sock *sk,
1251 const struct sk_buff *skb)
1252{
1253 struct tcp_md5sig_pool *hp;
1254 struct ahash_request *req;
1255 const struct tcphdr *th = tcp_hdr(skb);
1256 __be32 saddr, daddr;
1257
1258 if (sk) { /* valid for establish/request sockets */
1259 saddr = sk->sk_rcv_saddr;
1260 daddr = sk->sk_daddr;
1261 } else {
1262 const struct iphdr *iph = ip_hdr(skb);
1263 saddr = iph->saddr;
1264 daddr = iph->daddr;
1265 }
1266
1267 hp = tcp_get_md5sig_pool();
1268 if (!hp)
1269 goto clear_hash_noput;
1270 req = hp->md5_req;
1271
1272 if (crypto_ahash_init(req))
1273 goto clear_hash;
1274
1275 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1276 goto clear_hash;
1277 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1278 goto clear_hash;
1279 if (tcp_md5_hash_key(hp, key))
1280 goto clear_hash;
1281 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1282 if (crypto_ahash_final(req))
1283 goto clear_hash;
1284
1285 tcp_put_md5sig_pool();
1286 return 0;
1287
1288clear_hash:
1289 tcp_put_md5sig_pool();
1290clear_hash_noput:
1291 memset(md5_hash, 0, 16);
1292 return 1;
1293}
1294EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1295
1296#endif
1297
1298/* Called with rcu_read_lock() */
1299static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1300 const struct sk_buff *skb)
1301{
1302#ifdef CONFIG_TCP_MD5SIG
1303 /*
1304 * This gets called for each TCP segment that arrives
1305 * so we want to be efficient.
1306 * We have 3 drop cases:
1307 * o No MD5 hash and one expected.
1308 * o MD5 hash and we're not expecting one.
1309 * o MD5 hash and its wrong.
1310 */
1311 const __u8 *hash_location = NULL;
1312 struct tcp_md5sig_key *hash_expected;
1313 const struct iphdr *iph = ip_hdr(skb);
1314 const struct tcphdr *th = tcp_hdr(skb);
1315 int genhash;
1316 unsigned char newhash[16];
1317
1318 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1319 AF_INET);
1320 hash_location = tcp_parse_md5sig_option(th);
1321
1322 /* We've parsed the options - do we have a hash? */
1323 if (!hash_expected && !hash_location)
1324 return false;
1325
1326 if (hash_expected && !hash_location) {
1327 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1328 return true;
1329 }
1330
1331 if (!hash_expected && hash_location) {
1332 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1333 return true;
1334 }
1335
1336 /* Okay, so this is hash_expected and hash_location -
1337 * so we need to calculate the checksum.
1338 */
1339 genhash = tcp_v4_md5_hash_skb(newhash,
1340 hash_expected,
1341 NULL, skb);
1342
1343 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1344 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1345 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1346 &iph->saddr, ntohs(th->source),
1347 &iph->daddr, ntohs(th->dest),
1348 genhash ? " tcp_v4_calc_md5_hash failed"
1349 : "");
1350 return true;
1351 }
1352 return false;
1353#endif
1354 return false;
1355}
1356
1357static void tcp_v4_init_req(struct request_sock *req,
1358 const struct sock *sk_listener,
1359 struct sk_buff *skb)
1360{
1361 struct inet_request_sock *ireq = inet_rsk(req);
1362 struct net *net = sock_net(sk_listener);
1363
1364 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1365 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1366 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1367}
1368
1369static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1370 struct flowi *fl,
1371 const struct request_sock *req)
1372{
1373 return inet_csk_route_req(sk, &fl->u.ip4, req);
1374}
1375
1376struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1377 .family = PF_INET,
1378 .obj_size = sizeof(struct tcp_request_sock),
1379 .rtx_syn_ack = tcp_rtx_synack,
1380 .send_ack = tcp_v4_reqsk_send_ack,
1381 .destructor = tcp_v4_reqsk_destructor,
1382 .send_reset = tcp_v4_send_reset,
1383 .syn_ack_timeout = tcp_syn_ack_timeout,
1384};
1385
1386static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1387 .mss_clamp = TCP_MSS_DEFAULT,
1388#ifdef CONFIG_TCP_MD5SIG
1389 .req_md5_lookup = tcp_v4_md5_lookup,
1390 .calc_md5_hash = tcp_v4_md5_hash_skb,
1391#endif
1392 .init_req = tcp_v4_init_req,
1393#ifdef CONFIG_SYN_COOKIES
1394 .cookie_init_seq = cookie_v4_init_sequence,
1395#endif
1396 .route_req = tcp_v4_route_req,
1397 .init_seq = tcp_v4_init_seq,
1398 .init_ts_off = tcp_v4_init_ts_off,
1399 .send_synack = tcp_v4_send_synack,
1400};
1401
1402int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1403{
1404 /* Never answer to SYNs send to broadcast or multicast */
1405 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1406 goto drop;
1407
1408 return tcp_conn_request(&tcp_request_sock_ops,
1409 &tcp_request_sock_ipv4_ops, sk, skb);
1410
1411drop:
1412 tcp_listendrop(sk);
1413 return 0;
1414}
1415EXPORT_SYMBOL(tcp_v4_conn_request);
1416
1417
1418/*
1419 * The three way handshake has completed - we got a valid synack -
1420 * now create the new socket.
1421 */
1422struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1423 struct request_sock *req,
1424 struct dst_entry *dst,
1425 struct request_sock *req_unhash,
1426 bool *own_req)
1427{
1428 struct inet_request_sock *ireq;
1429 struct inet_sock *newinet;
1430 struct tcp_sock *newtp;
1431 struct sock *newsk;
1432#ifdef CONFIG_TCP_MD5SIG
1433 struct tcp_md5sig_key *key;
1434#endif
1435 struct ip_options_rcu *inet_opt;
1436
1437 if (sk_acceptq_is_full(sk))
1438 goto exit_overflow;
1439
1440 newsk = tcp_create_openreq_child(sk, req, skb);
1441 if (!newsk)
1442 goto exit_nonewsk;
1443
1444 newsk->sk_gso_type = SKB_GSO_TCPV4;
1445 inet_sk_rx_dst_set(newsk, skb);
1446
1447 newtp = tcp_sk(newsk);
1448 newinet = inet_sk(newsk);
1449 ireq = inet_rsk(req);
1450 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1451 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1452 newsk->sk_bound_dev_if = ireq->ir_iif;
1453 newinet->inet_saddr = ireq->ir_loc_addr;
1454 inet_opt = rcu_dereference(ireq->ireq_opt);
1455 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1456 newinet->mc_index = inet_iif(skb);
1457 newinet->mc_ttl = ip_hdr(skb)->ttl;
1458 newinet->rcv_tos = ip_hdr(skb)->tos;
1459 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1460 if (inet_opt)
1461 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
David Brazdil0f672f62019-12-10 10:32:29 +00001462 newinet->inet_id = prandom_u32();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001463
1464 if (!dst) {
1465 dst = inet_csk_route_child_sock(sk, newsk, req);
1466 if (!dst)
1467 goto put_and_exit;
1468 } else {
1469 /* syncookie case : see end of cookie_v4_check() */
1470 }
1471 sk_setup_caps(newsk, dst);
1472
1473 tcp_ca_openreq_child(newsk, dst);
1474
1475 tcp_sync_mss(newsk, dst_mtu(dst));
1476 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1477
1478 tcp_initialize_rcv_mss(newsk);
1479
1480#ifdef CONFIG_TCP_MD5SIG
1481 /* Copy over the MD5 key from the original socket */
1482 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1483 AF_INET);
1484 if (key) {
1485 /*
1486 * We're using one, so create a matching key
1487 * on the newsk structure. If we fail to get
1488 * memory, then we end up not copying the key
1489 * across. Shucks.
1490 */
1491 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1492 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1493 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1494 }
1495#endif
1496
1497 if (__inet_inherit_port(sk, newsk) < 0)
1498 goto put_and_exit;
1499 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1500 if (likely(*own_req)) {
1501 tcp_move_syn(newtp, req);
1502 ireq->ireq_opt = NULL;
1503 } else {
1504 newinet->inet_opt = NULL;
1505 }
1506 return newsk;
1507
1508exit_overflow:
1509 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1510exit_nonewsk:
1511 dst_release(dst);
1512exit:
1513 tcp_listendrop(sk);
1514 return NULL;
1515put_and_exit:
1516 newinet->inet_opt = NULL;
1517 inet_csk_prepare_forced_close(newsk);
1518 tcp_done(newsk);
1519 goto exit;
1520}
1521EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1522
1523static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1524{
1525#ifdef CONFIG_SYN_COOKIES
1526 const struct tcphdr *th = tcp_hdr(skb);
1527
1528 if (!th->syn)
1529 sk = cookie_v4_check(sk, skb);
1530#endif
1531 return sk;
1532}
1533
David Brazdil0f672f62019-12-10 10:32:29 +00001534u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1535 struct tcphdr *th, u32 *cookie)
1536{
1537 u16 mss = 0;
1538#ifdef CONFIG_SYN_COOKIES
1539 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1540 &tcp_request_sock_ipv4_ops, sk, th);
1541 if (mss) {
1542 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1543 tcp_synq_overflow(sk);
1544 }
1545#endif
1546 return mss;
1547}
1548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001549/* The socket must have it's spinlock held when we get
1550 * here, unless it is a TCP_LISTEN socket.
1551 *
1552 * We have a potential double-lock case here, so even when
1553 * doing backlog processing we use the BH locking scheme.
1554 * This is because we cannot sleep with the original spinlock
1555 * held.
1556 */
1557int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1558{
1559 struct sock *rsk;
1560
1561 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1562 struct dst_entry *dst = sk->sk_rx_dst;
1563
1564 sock_rps_save_rxhash(sk, skb);
1565 sk_mark_napi_id(sk, skb);
1566 if (dst) {
1567 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1568 !dst->ops->check(dst, 0)) {
1569 dst_release(dst);
1570 sk->sk_rx_dst = NULL;
1571 }
1572 }
1573 tcp_rcv_established(sk, skb);
1574 return 0;
1575 }
1576
1577 if (tcp_checksum_complete(skb))
1578 goto csum_err;
1579
1580 if (sk->sk_state == TCP_LISTEN) {
1581 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1582
1583 if (!nsk)
1584 goto discard;
1585 if (nsk != sk) {
1586 if (tcp_child_process(sk, nsk, skb)) {
1587 rsk = nsk;
1588 goto reset;
1589 }
1590 return 0;
1591 }
1592 } else
1593 sock_rps_save_rxhash(sk, skb);
1594
1595 if (tcp_rcv_state_process(sk, skb)) {
1596 rsk = sk;
1597 goto reset;
1598 }
1599 return 0;
1600
1601reset:
1602 tcp_v4_send_reset(rsk, skb);
1603discard:
1604 kfree_skb(skb);
1605 /* Be careful here. If this function gets more complicated and
1606 * gcc suffers from register pressure on the x86, sk (in %ebx)
1607 * might be destroyed here. This current version compiles correctly,
1608 * but you have been warned.
1609 */
1610 return 0;
1611
1612csum_err:
1613 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1614 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1615 goto discard;
1616}
1617EXPORT_SYMBOL(tcp_v4_do_rcv);
1618
1619int tcp_v4_early_demux(struct sk_buff *skb)
1620{
1621 const struct iphdr *iph;
1622 const struct tcphdr *th;
1623 struct sock *sk;
1624
1625 if (skb->pkt_type != PACKET_HOST)
1626 return 0;
1627
1628 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1629 return 0;
1630
1631 iph = ip_hdr(skb);
1632 th = tcp_hdr(skb);
1633
1634 if (th->doff < sizeof(struct tcphdr) / 4)
1635 return 0;
1636
1637 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1638 iph->saddr, th->source,
1639 iph->daddr, ntohs(th->dest),
1640 skb->skb_iif, inet_sdif(skb));
1641 if (sk) {
1642 skb->sk = sk;
1643 skb->destructor = sock_edemux;
1644 if (sk_fullsock(sk)) {
1645 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1646
1647 if (dst)
1648 dst = dst_check(dst, 0);
1649 if (dst &&
1650 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1651 skb_dst_set_noref(skb, dst);
1652 }
1653 }
1654 return 0;
1655}
1656
1657bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1658{
David Brazdil0f672f62019-12-10 10:32:29 +00001659 u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
Olivier Deprez0e641232021-09-23 10:07:05 +02001660 u32 tail_gso_size, tail_gso_segs;
David Brazdil0f672f62019-12-10 10:32:29 +00001661 struct skb_shared_info *shinfo;
1662 const struct tcphdr *th;
1663 struct tcphdr *thtail;
1664 struct sk_buff *tail;
1665 unsigned int hdrlen;
1666 bool fragstolen;
1667 u32 gso_segs;
Olivier Deprez0e641232021-09-23 10:07:05 +02001668 u32 gso_size;
David Brazdil0f672f62019-12-10 10:32:29 +00001669 int delta;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001670
1671 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1672 * we can fix skb->truesize to its real value to avoid future drops.
1673 * This is valid because skb is not yet charged to the socket.
1674 * It has been noticed pure SACK packets were sometimes dropped
1675 * (if cooked by drivers without copybreak feature).
1676 */
1677 skb_condense(skb);
1678
David Brazdil0f672f62019-12-10 10:32:29 +00001679 skb_dst_drop(skb);
1680
1681 if (unlikely(tcp_checksum_complete(skb))) {
1682 bh_unlock_sock(sk);
1683 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1684 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1685 return true;
1686 }
1687
1688 /* Attempt coalescing to last skb in backlog, even if we are
1689 * above the limits.
1690 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1691 */
1692 th = (const struct tcphdr *)skb->data;
1693 hdrlen = th->doff * 4;
David Brazdil0f672f62019-12-10 10:32:29 +00001694
1695 tail = sk->sk_backlog.tail;
1696 if (!tail)
1697 goto no_coalesce;
1698 thtail = (struct tcphdr *)tail->data;
1699
1700 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1701 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1702 ((TCP_SKB_CB(tail)->tcp_flags |
1703 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1704 !((TCP_SKB_CB(tail)->tcp_flags &
1705 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1706 ((TCP_SKB_CB(tail)->tcp_flags ^
1707 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1708#ifdef CONFIG_TLS_DEVICE
1709 tail->decrypted != skb->decrypted ||
1710#endif
1711 thtail->doff != th->doff ||
1712 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1713 goto no_coalesce;
1714
1715 __skb_pull(skb, hdrlen);
David Brazdil0f672f62019-12-10 10:32:29 +00001716
Olivier Deprez0e641232021-09-23 10:07:05 +02001717 shinfo = skb_shinfo(skb);
1718 gso_size = shinfo->gso_size ?: skb->len;
1719 gso_segs = shinfo->gso_segs ?: 1;
1720
1721 shinfo = skb_shinfo(tail);
1722 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1723 tail_gso_segs = shinfo->gso_segs ?: 1;
1724
1725 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001726 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1727
Olivier Deprez0e641232021-09-23 10:07:05 +02001728 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
David Brazdil0f672f62019-12-10 10:32:29 +00001729 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
Olivier Deprez0e641232021-09-23 10:07:05 +02001730 thtail->window = th->window;
1731 }
David Brazdil0f672f62019-12-10 10:32:29 +00001732
1733 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1734 * thtail->fin, so that the fast path in tcp_rcv_established()
1735 * is not entered if we append a packet with a FIN.
1736 * SYN, RST, URG are not present.
1737 * ACK is set on both packets.
1738 * PSH : we do not really care in TCP stack,
1739 * at least for 'GRO' packets.
1740 */
1741 thtail->fin |= th->fin;
1742 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1743
1744 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1745 TCP_SKB_CB(tail)->has_rxtstamp = true;
1746 tail->tstamp = skb->tstamp;
1747 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1748 }
1749
1750 /* Not as strict as GRO. We only need to carry mss max value */
Olivier Deprez0e641232021-09-23 10:07:05 +02001751 shinfo->gso_size = max(gso_size, tail_gso_size);
1752 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
David Brazdil0f672f62019-12-10 10:32:29 +00001753
1754 sk->sk_backlog.len += delta;
1755 __NET_INC_STATS(sock_net(sk),
1756 LINUX_MIB_TCPBACKLOGCOALESCE);
1757 kfree_skb_partial(skb, fragstolen);
1758 return false;
1759 }
1760 __skb_push(skb, hdrlen);
1761
1762no_coalesce:
1763 /* Only socket owner can try to collapse/prune rx queues
1764 * to reduce memory overhead, so add a little headroom here.
1765 * Few sockets backlog are possibly concurrently non empty.
1766 */
1767 limit += 64*1024;
1768
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001769 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1770 bh_unlock_sock(sk);
1771 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1772 return true;
1773 }
1774 return false;
1775}
1776EXPORT_SYMBOL(tcp_add_backlog);
1777
1778int tcp_filter(struct sock *sk, struct sk_buff *skb)
1779{
1780 struct tcphdr *th = (struct tcphdr *)skb->data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001781
David Brazdil0f672f62019-12-10 10:32:29 +00001782 return sk_filter_trim_cap(sk, skb, th->doff * 4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001783}
1784EXPORT_SYMBOL(tcp_filter);
1785
1786static void tcp_v4_restore_cb(struct sk_buff *skb)
1787{
1788 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1789 sizeof(struct inet_skb_parm));
1790}
1791
1792static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1793 const struct tcphdr *th)
1794{
1795 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1796 * barrier() makes sure compiler wont play fool^Waliasing games.
1797 */
1798 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1799 sizeof(struct inet_skb_parm));
1800 barrier();
1801
1802 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1803 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1804 skb->len - th->doff * 4);
1805 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1806 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1807 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1808 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1809 TCP_SKB_CB(skb)->sacked = 0;
1810 TCP_SKB_CB(skb)->has_rxtstamp =
1811 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1812}
1813
1814/*
1815 * From tcp_input.c
1816 */
1817
1818int tcp_v4_rcv(struct sk_buff *skb)
1819{
1820 struct net *net = dev_net(skb->dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001821 struct sk_buff *skb_to_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001822 int sdif = inet_sdif(skb);
1823 const struct iphdr *iph;
1824 const struct tcphdr *th;
1825 bool refcounted;
1826 struct sock *sk;
1827 int ret;
1828
1829 if (skb->pkt_type != PACKET_HOST)
1830 goto discard_it;
1831
1832 /* Count it even if it's bad */
1833 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1834
1835 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1836 goto discard_it;
1837
1838 th = (const struct tcphdr *)skb->data;
1839
1840 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1841 goto bad_packet;
1842 if (!pskb_may_pull(skb, th->doff * 4))
1843 goto discard_it;
1844
1845 /* An explanation is required here, I think.
1846 * Packet length and doff are validated by header prediction,
1847 * provided case of th->doff==0 is eliminated.
1848 * So, we defer the checks. */
1849
1850 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1851 goto csum_error;
1852
1853 th = (const struct tcphdr *)skb->data;
1854 iph = ip_hdr(skb);
1855lookup:
1856 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1857 th->dest, sdif, &refcounted);
1858 if (!sk)
1859 goto no_tcp_socket;
1860
1861process:
1862 if (sk->sk_state == TCP_TIME_WAIT)
1863 goto do_time_wait;
1864
1865 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1866 struct request_sock *req = inet_reqsk(sk);
1867 bool req_stolen = false;
1868 struct sock *nsk;
1869
1870 sk = req->rsk_listener;
1871 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1872 sk_drops_add(sk, skb);
1873 reqsk_put(req);
1874 goto discard_it;
1875 }
1876 if (tcp_checksum_complete(skb)) {
1877 reqsk_put(req);
1878 goto csum_error;
1879 }
1880 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1881 inet_csk_reqsk_queue_drop_and_put(sk, req);
1882 goto lookup;
1883 }
1884 /* We own a reference on the listener, increase it again
1885 * as we might lose it too soon.
1886 */
1887 sock_hold(sk);
1888 refcounted = true;
1889 nsk = NULL;
1890 if (!tcp_filter(sk, skb)) {
1891 th = (const struct tcphdr *)skb->data;
1892 iph = ip_hdr(skb);
1893 tcp_v4_fill_cb(skb, iph, th);
1894 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1895 }
1896 if (!nsk) {
1897 reqsk_put(req);
1898 if (req_stolen) {
1899 /* Another cpu got exclusive access to req
1900 * and created a full blown socket.
1901 * Try to feed this packet to this socket
1902 * instead of discarding it.
1903 */
1904 tcp_v4_restore_cb(skb);
1905 sock_put(sk);
1906 goto lookup;
1907 }
1908 goto discard_and_relse;
1909 }
1910 if (nsk == sk) {
1911 reqsk_put(req);
1912 tcp_v4_restore_cb(skb);
1913 } else if (tcp_child_process(sk, nsk, skb)) {
1914 tcp_v4_send_reset(nsk, skb);
1915 goto discard_and_relse;
1916 } else {
1917 sock_put(sk);
1918 return 0;
1919 }
1920 }
1921 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1922 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1923 goto discard_and_relse;
1924 }
1925
1926 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1927 goto discard_and_relse;
1928
1929 if (tcp_v4_inbound_md5_hash(sk, skb))
1930 goto discard_and_relse;
1931
David Brazdil0f672f62019-12-10 10:32:29 +00001932 nf_reset_ct(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001933
1934 if (tcp_filter(sk, skb))
1935 goto discard_and_relse;
1936 th = (const struct tcphdr *)skb->data;
1937 iph = ip_hdr(skb);
1938 tcp_v4_fill_cb(skb, iph, th);
1939
1940 skb->dev = NULL;
1941
1942 if (sk->sk_state == TCP_LISTEN) {
1943 ret = tcp_v4_do_rcv(sk, skb);
1944 goto put_and_return;
1945 }
1946
1947 sk_incoming_cpu_update(sk);
1948
1949 bh_lock_sock_nested(sk);
1950 tcp_segs_in(tcp_sk(sk), skb);
1951 ret = 0;
1952 if (!sock_owned_by_user(sk)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001953 skb_to_free = sk->sk_rx_skb_cache;
1954 sk->sk_rx_skb_cache = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001955 ret = tcp_v4_do_rcv(sk, skb);
David Brazdil0f672f62019-12-10 10:32:29 +00001956 } else {
1957 if (tcp_add_backlog(sk, skb))
1958 goto discard_and_relse;
1959 skb_to_free = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001960 }
1961 bh_unlock_sock(sk);
David Brazdil0f672f62019-12-10 10:32:29 +00001962 if (skb_to_free)
1963 __kfree_skb(skb_to_free);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001964
1965put_and_return:
1966 if (refcounted)
1967 sock_put(sk);
1968
1969 return ret;
1970
1971no_tcp_socket:
1972 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1973 goto discard_it;
1974
1975 tcp_v4_fill_cb(skb, iph, th);
1976
1977 if (tcp_checksum_complete(skb)) {
1978csum_error:
1979 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1980bad_packet:
1981 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1982 } else {
1983 tcp_v4_send_reset(NULL, skb);
1984 }
1985
1986discard_it:
1987 /* Discard frame. */
1988 kfree_skb(skb);
1989 return 0;
1990
1991discard_and_relse:
1992 sk_drops_add(sk, skb);
1993 if (refcounted)
1994 sock_put(sk);
1995 goto discard_it;
1996
1997do_time_wait:
1998 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1999 inet_twsk_put(inet_twsk(sk));
2000 goto discard_it;
2001 }
2002
2003 tcp_v4_fill_cb(skb, iph, th);
2004
2005 if (tcp_checksum_complete(skb)) {
2006 inet_twsk_put(inet_twsk(sk));
2007 goto csum_error;
2008 }
2009 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2010 case TCP_TW_SYN: {
2011 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2012 &tcp_hashinfo, skb,
2013 __tcp_hdrlen(th),
2014 iph->saddr, th->source,
2015 iph->daddr, th->dest,
2016 inet_iif(skb),
2017 sdif);
2018 if (sk2) {
2019 inet_twsk_deschedule_put(inet_twsk(sk));
2020 sk = sk2;
2021 tcp_v4_restore_cb(skb);
2022 refcounted = false;
2023 goto process;
2024 }
2025 }
2026 /* to ACK */
2027 /* fall through */
2028 case TCP_TW_ACK:
2029 tcp_v4_timewait_ack(sk, skb);
2030 break;
2031 case TCP_TW_RST:
2032 tcp_v4_send_reset(sk, skb);
2033 inet_twsk_deschedule_put(inet_twsk(sk));
2034 goto discard_it;
2035 case TCP_TW_SUCCESS:;
2036 }
2037 goto discard_it;
2038}
2039
2040static struct timewait_sock_ops tcp_timewait_sock_ops = {
2041 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2042 .twsk_unique = tcp_twsk_unique,
2043 .twsk_destructor= tcp_twsk_destructor,
2044};
2045
2046void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2047{
2048 struct dst_entry *dst = skb_dst(skb);
2049
2050 if (dst && dst_hold_safe(dst)) {
2051 sk->sk_rx_dst = dst;
2052 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2053 }
2054}
2055EXPORT_SYMBOL(inet_sk_rx_dst_set);
2056
2057const struct inet_connection_sock_af_ops ipv4_specific = {
2058 .queue_xmit = ip_queue_xmit,
2059 .send_check = tcp_v4_send_check,
2060 .rebuild_header = inet_sk_rebuild_header,
2061 .sk_rx_dst_set = inet_sk_rx_dst_set,
2062 .conn_request = tcp_v4_conn_request,
2063 .syn_recv_sock = tcp_v4_syn_recv_sock,
2064 .net_header_len = sizeof(struct iphdr),
2065 .setsockopt = ip_setsockopt,
2066 .getsockopt = ip_getsockopt,
2067 .addr2sockaddr = inet_csk_addr2sockaddr,
2068 .sockaddr_len = sizeof(struct sockaddr_in),
2069#ifdef CONFIG_COMPAT
2070 .compat_setsockopt = compat_ip_setsockopt,
2071 .compat_getsockopt = compat_ip_getsockopt,
2072#endif
2073 .mtu_reduced = tcp_v4_mtu_reduced,
2074};
2075EXPORT_SYMBOL(ipv4_specific);
2076
2077#ifdef CONFIG_TCP_MD5SIG
2078static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2079 .md5_lookup = tcp_v4_md5_lookup,
2080 .calc_md5_hash = tcp_v4_md5_hash_skb,
2081 .md5_parse = tcp_v4_parse_md5_keys,
2082};
2083#endif
2084
2085/* NOTE: A lot of things set to zero explicitly by call to
2086 * sk_alloc() so need not be done here.
2087 */
2088static int tcp_v4_init_sock(struct sock *sk)
2089{
2090 struct inet_connection_sock *icsk = inet_csk(sk);
2091
2092 tcp_init_sock(sk);
2093
2094 icsk->icsk_af_ops = &ipv4_specific;
2095
2096#ifdef CONFIG_TCP_MD5SIG
2097 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2098#endif
2099
2100 return 0;
2101}
2102
2103void tcp_v4_destroy_sock(struct sock *sk)
2104{
2105 struct tcp_sock *tp = tcp_sk(sk);
2106
2107 trace_tcp_destroy_sock(sk);
2108
2109 tcp_clear_xmit_timers(sk);
2110
2111 tcp_cleanup_congestion_control(sk);
2112
2113 tcp_cleanup_ulp(sk);
2114
2115 /* Cleanup up the write buffer. */
2116 tcp_write_queue_purge(sk);
2117
2118 /* Check if we want to disable active TFO */
2119 tcp_fastopen_active_disable_ofo_check(sk);
2120
2121 /* Cleans up our, hopefully empty, out_of_order_queue. */
2122 skb_rbtree_purge(&tp->out_of_order_queue);
2123
2124#ifdef CONFIG_TCP_MD5SIG
2125 /* Clean up the MD5 key list, if any */
2126 if (tp->md5sig_info) {
2127 tcp_clear_md5_list(sk);
2128 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2129 tp->md5sig_info = NULL;
2130 }
2131#endif
2132
2133 /* Clean up a referenced TCP bind bucket. */
2134 if (inet_csk(sk)->icsk_bind_hash)
2135 inet_put_port(sk);
2136
David Brazdil0f672f62019-12-10 10:32:29 +00002137 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002138
2139 /* If socket is aborted during connect operation */
2140 tcp_free_fastopen_req(tp);
2141 tcp_fastopen_destroy_cipher(sk);
2142 tcp_saved_syn_free(tp);
2143
2144 sk_sockets_allocated_dec(sk);
2145}
2146EXPORT_SYMBOL(tcp_v4_destroy_sock);
2147
2148#ifdef CONFIG_PROC_FS
2149/* Proc filesystem TCP sock list dumping. */
2150
2151/*
2152 * Get next listener socket follow cur. If cur is NULL, get first socket
2153 * starting from bucket given in st->bucket; when st->bucket is zero the
2154 * very first socket in the hash table is returned.
2155 */
2156static void *listening_get_next(struct seq_file *seq, void *cur)
2157{
2158 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2159 struct tcp_iter_state *st = seq->private;
2160 struct net *net = seq_file_net(seq);
2161 struct inet_listen_hashbucket *ilb;
Olivier Deprez0e641232021-09-23 10:07:05 +02002162 struct hlist_nulls_node *node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002163 struct sock *sk = cur;
2164
2165 if (!sk) {
2166get_head:
2167 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2168 spin_lock(&ilb->lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002169 sk = sk_nulls_head(&ilb->nulls_head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002170 st->offset = 0;
2171 goto get_sk;
2172 }
2173 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2174 ++st->num;
2175 ++st->offset;
2176
Olivier Deprez0e641232021-09-23 10:07:05 +02002177 sk = sk_nulls_next(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002178get_sk:
Olivier Deprez0e641232021-09-23 10:07:05 +02002179 sk_nulls_for_each_from(sk, node) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002180 if (!net_eq(sock_net(sk), net))
2181 continue;
2182 if (sk->sk_family == afinfo->family)
2183 return sk;
2184 }
2185 spin_unlock(&ilb->lock);
2186 st->offset = 0;
2187 if (++st->bucket < INET_LHTABLE_SIZE)
2188 goto get_head;
2189 return NULL;
2190}
2191
2192static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2193{
2194 struct tcp_iter_state *st = seq->private;
2195 void *rc;
2196
2197 st->bucket = 0;
2198 st->offset = 0;
2199 rc = listening_get_next(seq, NULL);
2200
2201 while (rc && *pos) {
2202 rc = listening_get_next(seq, rc);
2203 --*pos;
2204 }
2205 return rc;
2206}
2207
2208static inline bool empty_bucket(const struct tcp_iter_state *st)
2209{
2210 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2211}
2212
2213/*
2214 * Get first established socket starting from bucket given in st->bucket.
2215 * If st->bucket is zero, the very first socket in the hash is returned.
2216 */
2217static void *established_get_first(struct seq_file *seq)
2218{
2219 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2220 struct tcp_iter_state *st = seq->private;
2221 struct net *net = seq_file_net(seq);
2222 void *rc = NULL;
2223
2224 st->offset = 0;
2225 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2226 struct sock *sk;
2227 struct hlist_nulls_node *node;
2228 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2229
2230 /* Lockless fast path for the common case of empty buckets */
2231 if (empty_bucket(st))
2232 continue;
2233
2234 spin_lock_bh(lock);
2235 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2236 if (sk->sk_family != afinfo->family ||
2237 !net_eq(sock_net(sk), net)) {
2238 continue;
2239 }
2240 rc = sk;
2241 goto out;
2242 }
2243 spin_unlock_bh(lock);
2244 }
2245out:
2246 return rc;
2247}
2248
2249static void *established_get_next(struct seq_file *seq, void *cur)
2250{
2251 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2252 struct sock *sk = cur;
2253 struct hlist_nulls_node *node;
2254 struct tcp_iter_state *st = seq->private;
2255 struct net *net = seq_file_net(seq);
2256
2257 ++st->num;
2258 ++st->offset;
2259
2260 sk = sk_nulls_next(sk);
2261
2262 sk_nulls_for_each_from(sk, node) {
2263 if (sk->sk_family == afinfo->family &&
2264 net_eq(sock_net(sk), net))
2265 return sk;
2266 }
2267
2268 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2269 ++st->bucket;
2270 return established_get_first(seq);
2271}
2272
2273static void *established_get_idx(struct seq_file *seq, loff_t pos)
2274{
2275 struct tcp_iter_state *st = seq->private;
2276 void *rc;
2277
2278 st->bucket = 0;
2279 rc = established_get_first(seq);
2280
2281 while (rc && pos) {
2282 rc = established_get_next(seq, rc);
2283 --pos;
2284 }
2285 return rc;
2286}
2287
2288static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2289{
2290 void *rc;
2291 struct tcp_iter_state *st = seq->private;
2292
2293 st->state = TCP_SEQ_STATE_LISTENING;
2294 rc = listening_get_idx(seq, &pos);
2295
2296 if (!rc) {
2297 st->state = TCP_SEQ_STATE_ESTABLISHED;
2298 rc = established_get_idx(seq, pos);
2299 }
2300
2301 return rc;
2302}
2303
2304static void *tcp_seek_last_pos(struct seq_file *seq)
2305{
2306 struct tcp_iter_state *st = seq->private;
Olivier Deprez0e641232021-09-23 10:07:05 +02002307 int bucket = st->bucket;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002308 int offset = st->offset;
2309 int orig_num = st->num;
2310 void *rc = NULL;
2311
2312 switch (st->state) {
2313 case TCP_SEQ_STATE_LISTENING:
2314 if (st->bucket >= INET_LHTABLE_SIZE)
2315 break;
2316 st->state = TCP_SEQ_STATE_LISTENING;
2317 rc = listening_get_next(seq, NULL);
Olivier Deprez0e641232021-09-23 10:07:05 +02002318 while (offset-- && rc && bucket == st->bucket)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002319 rc = listening_get_next(seq, rc);
2320 if (rc)
2321 break;
2322 st->bucket = 0;
2323 st->state = TCP_SEQ_STATE_ESTABLISHED;
2324 /* Fallthrough */
2325 case TCP_SEQ_STATE_ESTABLISHED:
2326 if (st->bucket > tcp_hashinfo.ehash_mask)
2327 break;
2328 rc = established_get_first(seq);
Olivier Deprez0e641232021-09-23 10:07:05 +02002329 while (offset-- && rc && bucket == st->bucket)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002330 rc = established_get_next(seq, rc);
2331 }
2332
2333 st->num = orig_num;
2334
2335 return rc;
2336}
2337
2338void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2339{
2340 struct tcp_iter_state *st = seq->private;
2341 void *rc;
2342
2343 if (*pos && *pos == st->last_pos) {
2344 rc = tcp_seek_last_pos(seq);
2345 if (rc)
2346 goto out;
2347 }
2348
2349 st->state = TCP_SEQ_STATE_LISTENING;
2350 st->num = 0;
2351 st->bucket = 0;
2352 st->offset = 0;
2353 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2354
2355out:
2356 st->last_pos = *pos;
2357 return rc;
2358}
2359EXPORT_SYMBOL(tcp_seq_start);
2360
2361void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2362{
2363 struct tcp_iter_state *st = seq->private;
2364 void *rc = NULL;
2365
2366 if (v == SEQ_START_TOKEN) {
2367 rc = tcp_get_idx(seq, 0);
2368 goto out;
2369 }
2370
2371 switch (st->state) {
2372 case TCP_SEQ_STATE_LISTENING:
2373 rc = listening_get_next(seq, v);
2374 if (!rc) {
2375 st->state = TCP_SEQ_STATE_ESTABLISHED;
2376 st->bucket = 0;
2377 st->offset = 0;
2378 rc = established_get_first(seq);
2379 }
2380 break;
2381 case TCP_SEQ_STATE_ESTABLISHED:
2382 rc = established_get_next(seq, v);
2383 break;
2384 }
2385out:
2386 ++*pos;
2387 st->last_pos = *pos;
2388 return rc;
2389}
2390EXPORT_SYMBOL(tcp_seq_next);
2391
2392void tcp_seq_stop(struct seq_file *seq, void *v)
2393{
2394 struct tcp_iter_state *st = seq->private;
2395
2396 switch (st->state) {
2397 case TCP_SEQ_STATE_LISTENING:
2398 if (v != SEQ_START_TOKEN)
2399 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2400 break;
2401 case TCP_SEQ_STATE_ESTABLISHED:
2402 if (v)
2403 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2404 break;
2405 }
2406}
2407EXPORT_SYMBOL(tcp_seq_stop);
2408
2409static void get_openreq4(const struct request_sock *req,
2410 struct seq_file *f, int i)
2411{
2412 const struct inet_request_sock *ireq = inet_rsk(req);
2413 long delta = req->rsk_timer.expires - jiffies;
2414
2415 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2416 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2417 i,
2418 ireq->ir_loc_addr,
2419 ireq->ir_num,
2420 ireq->ir_rmt_addr,
2421 ntohs(ireq->ir_rmt_port),
2422 TCP_SYN_RECV,
2423 0, 0, /* could print option size, but that is af dependent. */
2424 1, /* timers active (only the expire timer) */
2425 jiffies_delta_to_clock_t(delta),
2426 req->num_timeout,
2427 from_kuid_munged(seq_user_ns(f),
2428 sock_i_uid(req->rsk_listener)),
2429 0, /* non standard timer */
2430 0, /* open_requests have no inode */
2431 0,
2432 req);
2433}
2434
2435static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2436{
2437 int timer_active;
2438 unsigned long timer_expires;
2439 const struct tcp_sock *tp = tcp_sk(sk);
2440 const struct inet_connection_sock *icsk = inet_csk(sk);
2441 const struct inet_sock *inet = inet_sk(sk);
2442 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2443 __be32 dest = inet->inet_daddr;
2444 __be32 src = inet->inet_rcv_saddr;
2445 __u16 destp = ntohs(inet->inet_dport);
2446 __u16 srcp = ntohs(inet->inet_sport);
2447 int rx_queue;
2448 int state;
2449
2450 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2451 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2452 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2453 timer_active = 1;
2454 timer_expires = icsk->icsk_timeout;
2455 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2456 timer_active = 4;
2457 timer_expires = icsk->icsk_timeout;
2458 } else if (timer_pending(&sk->sk_timer)) {
2459 timer_active = 2;
2460 timer_expires = sk->sk_timer.expires;
2461 } else {
2462 timer_active = 0;
2463 timer_expires = jiffies;
2464 }
2465
2466 state = inet_sk_state_load(sk);
2467 if (state == TCP_LISTEN)
2468 rx_queue = sk->sk_ack_backlog;
2469 else
2470 /* Because we don't lock the socket,
2471 * we might find a transient negative value.
2472 */
David Brazdil0f672f62019-12-10 10:32:29 +00002473 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2474 READ_ONCE(tp->copied_seq), 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002475
2476 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2477 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2478 i, src, srcp, dest, destp, state,
David Brazdil0f672f62019-12-10 10:32:29 +00002479 READ_ONCE(tp->write_seq) - tp->snd_una,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002480 rx_queue,
2481 timer_active,
2482 jiffies_delta_to_clock_t(timer_expires - jiffies),
2483 icsk->icsk_retransmits,
2484 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2485 icsk->icsk_probes_out,
2486 sock_i_ino(sk),
2487 refcount_read(&sk->sk_refcnt), sk,
2488 jiffies_to_clock_t(icsk->icsk_rto),
2489 jiffies_to_clock_t(icsk->icsk_ack.ato),
David Brazdil0f672f62019-12-10 10:32:29 +00002490 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002491 tp->snd_cwnd,
2492 state == TCP_LISTEN ?
2493 fastopenq->max_qlen :
2494 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2495}
2496
2497static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2498 struct seq_file *f, int i)
2499{
2500 long delta = tw->tw_timer.expires - jiffies;
2501 __be32 dest, src;
2502 __u16 destp, srcp;
2503
2504 dest = tw->tw_daddr;
2505 src = tw->tw_rcv_saddr;
2506 destp = ntohs(tw->tw_dport);
2507 srcp = ntohs(tw->tw_sport);
2508
2509 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2510 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2511 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2512 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2513 refcount_read(&tw->tw_refcnt), tw);
2514}
2515
2516#define TMPSZ 150
2517
2518static int tcp4_seq_show(struct seq_file *seq, void *v)
2519{
2520 struct tcp_iter_state *st;
2521 struct sock *sk = v;
2522
2523 seq_setwidth(seq, TMPSZ - 1);
2524 if (v == SEQ_START_TOKEN) {
2525 seq_puts(seq, " sl local_address rem_address st tx_queue "
2526 "rx_queue tr tm->when retrnsmt uid timeout "
2527 "inode");
2528 goto out;
2529 }
2530 st = seq->private;
2531
2532 if (sk->sk_state == TCP_TIME_WAIT)
2533 get_timewait4_sock(v, seq, st->num);
2534 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2535 get_openreq4(v, seq, st->num);
2536 else
2537 get_tcp4_sock(v, seq, st->num);
2538out:
2539 seq_pad(seq, '\n');
2540 return 0;
2541}
2542
2543static const struct seq_operations tcp4_seq_ops = {
2544 .show = tcp4_seq_show,
2545 .start = tcp_seq_start,
2546 .next = tcp_seq_next,
2547 .stop = tcp_seq_stop,
2548};
2549
2550static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2551 .family = AF_INET,
2552};
2553
2554static int __net_init tcp4_proc_init_net(struct net *net)
2555{
2556 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2557 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2558 return -ENOMEM;
2559 return 0;
2560}
2561
2562static void __net_exit tcp4_proc_exit_net(struct net *net)
2563{
2564 remove_proc_entry("tcp", net->proc_net);
2565}
2566
2567static struct pernet_operations tcp4_net_ops = {
2568 .init = tcp4_proc_init_net,
2569 .exit = tcp4_proc_exit_net,
2570};
2571
2572int __init tcp4_proc_init(void)
2573{
2574 return register_pernet_subsys(&tcp4_net_ops);
2575}
2576
2577void tcp4_proc_exit(void)
2578{
2579 unregister_pernet_subsys(&tcp4_net_ops);
2580}
2581#endif /* CONFIG_PROC_FS */
2582
2583struct proto tcp_prot = {
2584 .name = "TCP",
2585 .owner = THIS_MODULE,
2586 .close = tcp_close,
2587 .pre_connect = tcp_v4_pre_connect,
2588 .connect = tcp_v4_connect,
2589 .disconnect = tcp_disconnect,
2590 .accept = inet_csk_accept,
2591 .ioctl = tcp_ioctl,
2592 .init = tcp_v4_init_sock,
2593 .destroy = tcp_v4_destroy_sock,
2594 .shutdown = tcp_shutdown,
2595 .setsockopt = tcp_setsockopt,
2596 .getsockopt = tcp_getsockopt,
2597 .keepalive = tcp_set_keepalive,
2598 .recvmsg = tcp_recvmsg,
2599 .sendmsg = tcp_sendmsg,
2600 .sendpage = tcp_sendpage,
2601 .backlog_rcv = tcp_v4_do_rcv,
2602 .release_cb = tcp_release_cb,
2603 .hash = inet_hash,
2604 .unhash = inet_unhash,
2605 .get_port = inet_csk_get_port,
2606 .enter_memory_pressure = tcp_enter_memory_pressure,
2607 .leave_memory_pressure = tcp_leave_memory_pressure,
2608 .stream_memory_free = tcp_stream_memory_free,
2609 .sockets_allocated = &tcp_sockets_allocated,
2610 .orphan_count = &tcp_orphan_count,
2611 .memory_allocated = &tcp_memory_allocated,
2612 .memory_pressure = &tcp_memory_pressure,
2613 .sysctl_mem = sysctl_tcp_mem,
2614 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2615 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2616 .max_header = MAX_TCP_HEADER,
2617 .obj_size = sizeof(struct tcp_sock),
2618 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2619 .twsk_prot = &tcp_timewait_sock_ops,
2620 .rsk_prot = &tcp_request_sock_ops,
2621 .h.hashinfo = &tcp_hashinfo,
2622 .no_autobind = true,
2623#ifdef CONFIG_COMPAT
2624 .compat_setsockopt = compat_tcp_setsockopt,
2625 .compat_getsockopt = compat_tcp_getsockopt,
2626#endif
2627 .diag_destroy = tcp_abort,
2628};
2629EXPORT_SYMBOL(tcp_prot);
2630
2631static void __net_exit tcp_sk_exit(struct net *net)
2632{
2633 int cpu;
2634
David Brazdil0f672f62019-12-10 10:32:29 +00002635 if (net->ipv4.tcp_congestion_control)
2636 module_put(net->ipv4.tcp_congestion_control->owner);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002637
2638 for_each_possible_cpu(cpu)
2639 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2640 free_percpu(net->ipv4.tcp_sk);
2641}
2642
2643static int __net_init tcp_sk_init(struct net *net)
2644{
2645 int res, cpu, cnt;
2646
2647 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2648 if (!net->ipv4.tcp_sk)
2649 return -ENOMEM;
2650
2651 for_each_possible_cpu(cpu) {
2652 struct sock *sk;
2653
2654 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2655 IPPROTO_TCP, net);
2656 if (res)
2657 goto fail;
2658 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2659
2660 /* Please enforce IP_DF and IPID==0 for RST and
2661 * ACK sent in SYN-RECV and TIME-WAIT state.
2662 */
2663 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2664
2665 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2666 }
2667
2668 net->ipv4.sysctl_tcp_ecn = 2;
2669 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2670
2671 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
David Brazdil0f672f62019-12-10 10:32:29 +00002672 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002673 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2674 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00002675 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002676
2677 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2678 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2679 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2680
2681 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2682 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2683 net->ipv4.sysctl_tcp_syncookies = 1;
2684 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2685 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2686 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2687 net->ipv4.sysctl_tcp_orphan_retries = 0;
2688 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2689 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2690 net->ipv4.sysctl_tcp_tw_reuse = 2;
2691
2692 cnt = tcp_hashinfo.ehash_mask + 1;
David Brazdil0f672f62019-12-10 10:32:29 +00002693 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002694 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2695
David Brazdil0f672f62019-12-10 10:32:29 +00002696 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002697 net->ipv4.sysctl_tcp_sack = 1;
2698 net->ipv4.sysctl_tcp_window_scaling = 1;
2699 net->ipv4.sysctl_tcp_timestamps = 1;
2700 net->ipv4.sysctl_tcp_early_retrans = 3;
2701 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2702 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
2703 net->ipv4.sysctl_tcp_retrans_collapse = 1;
2704 net->ipv4.sysctl_tcp_max_reordering = 300;
2705 net->ipv4.sysctl_tcp_dsack = 1;
2706 net->ipv4.sysctl_tcp_app_win = 31;
2707 net->ipv4.sysctl_tcp_adv_win_scale = 1;
2708 net->ipv4.sysctl_tcp_frto = 2;
2709 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2710 /* This limits the percentage of the congestion window which we
2711 * will allow a single TSO frame to consume. Building TSO frames
2712 * which are too large can cause TCP streams to be bursty.
2713 */
2714 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
David Brazdil0f672f62019-12-10 10:32:29 +00002715 /* Default TSQ limit of 16 TSO segments */
2716 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002717 /* rfc5961 challenge ack rate limiting */
2718 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2719 net->ipv4.sysctl_tcp_min_tso_segs = 2;
2720 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2721 net->ipv4.sysctl_tcp_autocorking = 1;
2722 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2723 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2724 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2725 if (net != &init_net) {
2726 memcpy(net->ipv4.sysctl_tcp_rmem,
2727 init_net.ipv4.sysctl_tcp_rmem,
2728 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2729 memcpy(net->ipv4.sysctl_tcp_wmem,
2730 init_net.ipv4.sysctl_tcp_wmem,
2731 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2732 }
2733 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2734 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2735 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2736 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2737 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2738 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2739
2740 /* Reno is always built in */
2741 if (!net_eq(net, &init_net) &&
2742 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2743 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2744 else
2745 net->ipv4.tcp_congestion_control = &tcp_reno;
2746
2747 return 0;
2748fail:
2749 tcp_sk_exit(net);
2750
2751 return res;
2752}
2753
2754static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2755{
2756 struct net *net;
2757
2758 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2759
2760 list_for_each_entry(net, net_exit_list, exit_list)
2761 tcp_fastopen_ctx_destroy(net);
2762}
2763
2764static struct pernet_operations __net_initdata tcp_sk_ops = {
2765 .init = tcp_sk_init,
2766 .exit = tcp_sk_exit,
2767 .exit_batch = tcp_sk_exit_batch,
2768};
2769
2770void __init tcp_v4_init(void)
2771{
2772 if (register_pernet_subsys(&tcp_sk_ops))
2773 panic("Failed to create the TCP control socket.\n");
2774}