David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * Implementation of the Transmission Control Protocol(TCP). |
| 8 | * |
| 9 | * Authors: Ross Biro |
| 10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| 12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
| 13 | * Florian La Roche, <flla@stud.uni-sb.de> |
| 14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
| 15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
| 16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
| 17 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
| 18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
| 19 | * Jorge Cwik, <jorge@laser.satlink.net> |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * Changes: Pedro Roque : Retransmit queue handled by TCP. |
| 24 | * : Fragmentation on mtu decrease |
| 25 | * : Segment collapse on retransmit |
| 26 | * : AF independence |
| 27 | * |
| 28 | * Linus Torvalds : send_delayed_ack |
| 29 | * David S. Miller : Charge memory using the right skb |
| 30 | * during syn/ack processing. |
| 31 | * David S. Miller : Output engine completely rewritten. |
| 32 | * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. |
| 33 | * Cacophonix Gaul : draft-minshall-nagle-01 |
| 34 | * J Hadi Salim : ECN support |
| 35 | * |
| 36 | */ |
| 37 | |
| 38 | #define pr_fmt(fmt) "TCP: " fmt |
| 39 | |
| 40 | #include <net/tcp.h> |
| 41 | |
| 42 | #include <linux/compiler.h> |
| 43 | #include <linux/gfp.h> |
| 44 | #include <linux/module.h> |
| 45 | #include <linux/static_key.h> |
| 46 | |
| 47 | #include <trace/events/tcp.h> |
| 48 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 49 | /* Refresh clocks of a TCP socket, |
| 50 | * ensuring monotically increasing values. |
| 51 | */ |
| 52 | void tcp_mstamp_refresh(struct tcp_sock *tp) |
| 53 | { |
| 54 | u64 val = tcp_clock_ns(); |
| 55 | |
| 56 | tp->tcp_clock_cache = val; |
| 57 | tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); |
| 58 | } |
| 59 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
| 61 | int push_one, gfp_t gfp); |
| 62 | |
| 63 | /* Account for new data that has been sent to the network. */ |
| 64 | static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) |
| 65 | { |
| 66 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 67 | struct tcp_sock *tp = tcp_sk(sk); |
| 68 | unsigned int prior_packets = tp->packets_out; |
| 69 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 70 | WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | |
| 72 | __skb_unlink(skb, &sk->sk_write_queue); |
| 73 | tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); |
| 74 | |
| 75 | tp->packets_out += tcp_skb_pcount(skb); |
| 76 | if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) |
| 77 | tcp_rearm_rto(sk); |
| 78 | |
| 79 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, |
| 80 | tcp_skb_pcount(skb)); |
| 81 | } |
| 82 | |
| 83 | /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one |
| 84 | * window scaling factor due to loss of precision. |
| 85 | * If window has been shrunk, what should we make? It is not clear at all. |
| 86 | * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( |
| 87 | * Anything in between SND.UNA...SND.UNA+SND.WND also can be already |
| 88 | * invalid. OK, let's make this for now: |
| 89 | */ |
| 90 | static inline __u32 tcp_acceptable_seq(const struct sock *sk) |
| 91 | { |
| 92 | const struct tcp_sock *tp = tcp_sk(sk); |
| 93 | |
| 94 | if (!before(tcp_wnd_end(tp), tp->snd_nxt) || |
| 95 | (tp->rx_opt.wscale_ok && |
| 96 | ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) |
| 97 | return tp->snd_nxt; |
| 98 | else |
| 99 | return tcp_wnd_end(tp); |
| 100 | } |
| 101 | |
| 102 | /* Calculate mss to advertise in SYN segment. |
| 103 | * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: |
| 104 | * |
| 105 | * 1. It is independent of path mtu. |
| 106 | * 2. Ideally, it is maximal possible segment size i.e. 65535-40. |
| 107 | * 3. For IPv4 it is reasonable to calculate it from maximal MTU of |
| 108 | * attached devices, because some buggy hosts are confused by |
| 109 | * large MSS. |
| 110 | * 4. We do not make 3, we advertise MSS, calculated from first |
| 111 | * hop device mtu, but allow to raise it to ip_rt_min_advmss. |
| 112 | * This may be overridden via information stored in routing table. |
| 113 | * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, |
| 114 | * probably even Jumbo". |
| 115 | */ |
| 116 | static __u16 tcp_advertise_mss(struct sock *sk) |
| 117 | { |
| 118 | struct tcp_sock *tp = tcp_sk(sk); |
| 119 | const struct dst_entry *dst = __sk_dst_get(sk); |
| 120 | int mss = tp->advmss; |
| 121 | |
| 122 | if (dst) { |
| 123 | unsigned int metric = dst_metric_advmss(dst); |
| 124 | |
| 125 | if (metric < mss) { |
| 126 | mss = metric; |
| 127 | tp->advmss = mss; |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | return (__u16)mss; |
| 132 | } |
| 133 | |
| 134 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". |
| 135 | * This is the first part of cwnd validation mechanism. |
| 136 | */ |
| 137 | void tcp_cwnd_restart(struct sock *sk, s32 delta) |
| 138 | { |
| 139 | struct tcp_sock *tp = tcp_sk(sk); |
| 140 | u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); |
| 141 | u32 cwnd = tp->snd_cwnd; |
| 142 | |
| 143 | tcp_ca_event(sk, CA_EVENT_CWND_RESTART); |
| 144 | |
| 145 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
| 146 | restart_cwnd = min(restart_cwnd, cwnd); |
| 147 | |
| 148 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) |
| 149 | cwnd >>= 1; |
| 150 | tp->snd_cwnd = max(cwnd, restart_cwnd); |
| 151 | tp->snd_cwnd_stamp = tcp_jiffies32; |
| 152 | tp->snd_cwnd_used = 0; |
| 153 | } |
| 154 | |
| 155 | /* Congestion state accounting after a packet has been sent. */ |
| 156 | static void tcp_event_data_sent(struct tcp_sock *tp, |
| 157 | struct sock *sk) |
| 158 | { |
| 159 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 160 | const u32 now = tcp_jiffies32; |
| 161 | |
| 162 | if (tcp_packets_in_flight(tp) == 0) |
| 163 | tcp_ca_event(sk, CA_EVENT_TX_START); |
| 164 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 165 | /* If this is the first data packet sent in response to the |
| 166 | * previous received data, |
| 167 | * and it is a reply for ato after last received packet, |
| 168 | * increase pingpong count. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 169 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 170 | if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) && |
| 171 | (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) |
| 172 | inet_csk_inc_pingpong_cnt(sk); |
| 173 | |
| 174 | tp->lsndtime = now; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* Account for an ACK we sent. */ |
| 178 | static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, |
| 179 | u32 rcv_nxt) |
| 180 | { |
| 181 | struct tcp_sock *tp = tcp_sk(sk); |
| 182 | |
| 183 | if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { |
| 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, |
| 185 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); |
| 186 | tp->compressed_ack = TCP_FASTRETRANS_THRESH; |
| 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) |
| 188 | __sock_put(sk); |
| 189 | } |
| 190 | |
| 191 | if (unlikely(rcv_nxt != tp->rcv_nxt)) |
| 192 | return; /* Special ACK sent by DCTCP to reflect ECN */ |
| 193 | tcp_dec_quickack_mode(sk, pkts); |
| 194 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
| 195 | } |
| 196 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 197 | /* Determine a window scaling and initial window to offer. |
| 198 | * Based on the assumption that the given amount of space |
| 199 | * will be offered. Store the results in the tp structure. |
| 200 | * NOTE: for smooth operation initial space offering should |
| 201 | * be a multiple of mss if possible. We assume here that mss >= 1. |
| 202 | * This MUST be enforced by all callers. |
| 203 | */ |
| 204 | void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, |
| 205 | __u32 *rcv_wnd, __u32 *window_clamp, |
| 206 | int wscale_ok, __u8 *rcv_wscale, |
| 207 | __u32 init_rcv_wnd) |
| 208 | { |
| 209 | unsigned int space = (__space < 0 ? 0 : __space); |
| 210 | |
| 211 | /* If no clamp set the clamp to the max possible scaled window */ |
| 212 | if (*window_clamp == 0) |
| 213 | (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE); |
| 214 | space = min(*window_clamp, space); |
| 215 | |
| 216 | /* Quantize space offering to a multiple of mss if possible. */ |
| 217 | if (space > mss) |
| 218 | space = rounddown(space, mss); |
| 219 | |
| 220 | /* NOTE: offering an initial window larger than 32767 |
| 221 | * will break some buggy TCP stacks. If the admin tells us |
| 222 | * it is likely we could be speaking with such a buggy stack |
| 223 | * we will truncate our initial window offering to 32K-1 |
| 224 | * unless the remote has sent us a window scaling option, |
| 225 | * which we interpret as a sign the remote TCP is not |
| 226 | * misinterpreting the window field as a signed quantity. |
| 227 | */ |
| 228 | if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) |
| 229 | (*rcv_wnd) = min(space, MAX_TCP_WINDOW); |
| 230 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 231 | (*rcv_wnd) = min_t(u32, space, U16_MAX); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 232 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 233 | if (init_rcv_wnd) |
| 234 | *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); |
| 235 | |
| 236 | *rcv_wscale = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 237 | if (wscale_ok) { |
| 238 | /* Set window scaling on max possible window */ |
| 239 | space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); |
| 240 | space = max_t(u32, space, sysctl_rmem_max); |
| 241 | space = min_t(u32, space, *window_clamp); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 242 | *rcv_wscale = clamp_t(int, ilog2(space) - 15, |
| 243 | 0, TCP_MAX_WSCALE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 244 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | /* Set the clamp no higher than max representable value */ |
| 246 | (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); |
| 247 | } |
| 248 | EXPORT_SYMBOL(tcp_select_initial_window); |
| 249 | |
| 250 | /* Chose a new window to advertise, update state in tcp_sock for the |
| 251 | * socket, and return result with RFC1323 scaling applied. The return |
| 252 | * value can be stuffed directly into th->window for an outgoing |
| 253 | * frame. |
| 254 | */ |
| 255 | static u16 tcp_select_window(struct sock *sk) |
| 256 | { |
| 257 | struct tcp_sock *tp = tcp_sk(sk); |
| 258 | u32 old_win = tp->rcv_wnd; |
| 259 | u32 cur_win = tcp_receive_window(tp); |
| 260 | u32 new_win = __tcp_select_window(sk); |
| 261 | |
| 262 | /* Never shrink the offered window */ |
| 263 | if (new_win < cur_win) { |
| 264 | /* Danger Will Robinson! |
| 265 | * Don't update rcv_wup/rcv_wnd here or else |
| 266 | * we will not be able to advertise a zero |
| 267 | * window in time. --DaveM |
| 268 | * |
| 269 | * Relax Will Robinson. |
| 270 | */ |
| 271 | if (new_win == 0) |
| 272 | NET_INC_STATS(sock_net(sk), |
| 273 | LINUX_MIB_TCPWANTZEROWINDOWADV); |
| 274 | new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); |
| 275 | } |
| 276 | tp->rcv_wnd = new_win; |
| 277 | tp->rcv_wup = tp->rcv_nxt; |
| 278 | |
| 279 | /* Make sure we do not exceed the maximum possible |
| 280 | * scaled window. |
| 281 | */ |
| 282 | if (!tp->rx_opt.rcv_wscale && |
| 283 | sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) |
| 284 | new_win = min(new_win, MAX_TCP_WINDOW); |
| 285 | else |
| 286 | new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); |
| 287 | |
| 288 | /* RFC1323 scaling applied */ |
| 289 | new_win >>= tp->rx_opt.rcv_wscale; |
| 290 | |
| 291 | /* If we advertise zero window, disable fast path. */ |
| 292 | if (new_win == 0) { |
| 293 | tp->pred_flags = 0; |
| 294 | if (old_win) |
| 295 | NET_INC_STATS(sock_net(sk), |
| 296 | LINUX_MIB_TCPTOZEROWINDOWADV); |
| 297 | } else if (old_win == 0) { |
| 298 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); |
| 299 | } |
| 300 | |
| 301 | return new_win; |
| 302 | } |
| 303 | |
| 304 | /* Packet ECN state for a SYN-ACK */ |
| 305 | static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) |
| 306 | { |
| 307 | const struct tcp_sock *tp = tcp_sk(sk); |
| 308 | |
| 309 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; |
| 310 | if (!(tp->ecn_flags & TCP_ECN_OK)) |
| 311 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; |
| 312 | else if (tcp_ca_needs_ecn(sk) || |
| 313 | tcp_bpf_ca_needs_ecn(sk)) |
| 314 | INET_ECN_xmit(sk); |
| 315 | } |
| 316 | |
| 317 | /* Packet ECN state for a SYN. */ |
| 318 | static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) |
| 319 | { |
| 320 | struct tcp_sock *tp = tcp_sk(sk); |
| 321 | bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); |
| 322 | bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || |
| 323 | tcp_ca_needs_ecn(sk) || bpf_needs_ecn; |
| 324 | |
| 325 | if (!use_ecn) { |
| 326 | const struct dst_entry *dst = __sk_dst_get(sk); |
| 327 | |
| 328 | if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) |
| 329 | use_ecn = true; |
| 330 | } |
| 331 | |
| 332 | tp->ecn_flags = 0; |
| 333 | |
| 334 | if (use_ecn) { |
| 335 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; |
| 336 | tp->ecn_flags = TCP_ECN_OK; |
| 337 | if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn) |
| 338 | INET_ECN_xmit(sk); |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) |
| 343 | { |
| 344 | if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) |
| 345 | /* tp->ecn_flags are cleared at a later point in time when |
| 346 | * SYN ACK is ultimatively being received. |
| 347 | */ |
| 348 | TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); |
| 349 | } |
| 350 | |
| 351 | static void |
| 352 | tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th) |
| 353 | { |
| 354 | if (inet_rsk(req)->ecn_ok) |
| 355 | th->ece = 1; |
| 356 | } |
| 357 | |
| 358 | /* Set up ECN state for a packet on a ESTABLISHED socket that is about to |
| 359 | * be sent. |
| 360 | */ |
| 361 | static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, |
| 362 | struct tcphdr *th, int tcp_header_len) |
| 363 | { |
| 364 | struct tcp_sock *tp = tcp_sk(sk); |
| 365 | |
| 366 | if (tp->ecn_flags & TCP_ECN_OK) { |
| 367 | /* Not-retransmitted data segment: set ECT and inject CWR. */ |
| 368 | if (skb->len != tcp_header_len && |
| 369 | !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { |
| 370 | INET_ECN_xmit(sk); |
| 371 | if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { |
| 372 | tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; |
| 373 | th->cwr = 1; |
| 374 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 375 | } |
| 376 | } else if (!tcp_ca_needs_ecn(sk)) { |
| 377 | /* ACK or retransmitted segment: clear ECT|CE */ |
| 378 | INET_ECN_dontxmit(sk); |
| 379 | } |
| 380 | if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) |
| 381 | th->ece = 1; |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | /* Constructs common control bits of non-data skb. If SYN/FIN is present, |
| 386 | * auto increment end seqno. |
| 387 | */ |
| 388 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) |
| 389 | { |
| 390 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 391 | |
| 392 | TCP_SKB_CB(skb)->tcp_flags = flags; |
| 393 | TCP_SKB_CB(skb)->sacked = 0; |
| 394 | |
| 395 | tcp_skb_pcount_set(skb, 1); |
| 396 | |
| 397 | TCP_SKB_CB(skb)->seq = seq; |
| 398 | if (flags & (TCPHDR_SYN | TCPHDR_FIN)) |
| 399 | seq++; |
| 400 | TCP_SKB_CB(skb)->end_seq = seq; |
| 401 | } |
| 402 | |
| 403 | static inline bool tcp_urg_mode(const struct tcp_sock *tp) |
| 404 | { |
| 405 | return tp->snd_una != tp->snd_up; |
| 406 | } |
| 407 | |
| 408 | #define OPTION_SACK_ADVERTISE (1 << 0) |
| 409 | #define OPTION_TS (1 << 1) |
| 410 | #define OPTION_MD5 (1 << 2) |
| 411 | #define OPTION_WSCALE (1 << 3) |
| 412 | #define OPTION_FAST_OPEN_COOKIE (1 << 8) |
| 413 | #define OPTION_SMC (1 << 9) |
| 414 | |
| 415 | static void smc_options_write(__be32 *ptr, u16 *options) |
| 416 | { |
| 417 | #if IS_ENABLED(CONFIG_SMC) |
| 418 | if (static_branch_unlikely(&tcp_have_smc)) { |
| 419 | if (unlikely(OPTION_SMC & *options)) { |
| 420 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
| 421 | (TCPOPT_NOP << 16) | |
| 422 | (TCPOPT_EXP << 8) | |
| 423 | (TCPOLEN_EXP_SMC_BASE)); |
| 424 | *ptr++ = htonl(TCPOPT_SMC_MAGIC); |
| 425 | } |
| 426 | } |
| 427 | #endif |
| 428 | } |
| 429 | |
| 430 | struct tcp_out_options { |
| 431 | u16 options; /* bit field of OPTION_* */ |
| 432 | u16 mss; /* 0 to disable */ |
| 433 | u8 ws; /* window scale, 0 to disable */ |
| 434 | u8 num_sack_blocks; /* number of SACK blocks to include */ |
| 435 | u8 hash_size; /* bytes in hash_location */ |
| 436 | __u8 *hash_location; /* temporary pointer, overloaded */ |
| 437 | __u32 tsval, tsecr; /* need to include OPTION_TS */ |
| 438 | struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ |
| 439 | }; |
| 440 | |
| 441 | /* Write previously computed TCP options to the packet. |
| 442 | * |
| 443 | * Beware: Something in the Internet is very sensitive to the ordering of |
| 444 | * TCP options, we learned this through the hard way, so be careful here. |
| 445 | * Luckily we can at least blame others for their non-compliance but from |
| 446 | * inter-operability perspective it seems that we're somewhat stuck with |
| 447 | * the ordering which we have been using if we want to keep working with |
| 448 | * those broken things (not that it currently hurts anybody as there isn't |
| 449 | * particular reason why the ordering would need to be changed). |
| 450 | * |
| 451 | * At least SACK_PERM as the first option is known to lead to a disaster |
| 452 | * (but it may well be that other scenarios fail similarly). |
| 453 | */ |
| 454 | static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, |
| 455 | struct tcp_out_options *opts) |
| 456 | { |
| 457 | u16 options = opts->options; /* mungable copy */ |
| 458 | |
| 459 | if (unlikely(OPTION_MD5 & options)) { |
| 460 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
| 461 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); |
| 462 | /* overload cookie hash location */ |
| 463 | opts->hash_location = (__u8 *)ptr; |
| 464 | ptr += 4; |
| 465 | } |
| 466 | |
| 467 | if (unlikely(opts->mss)) { |
| 468 | *ptr++ = htonl((TCPOPT_MSS << 24) | |
| 469 | (TCPOLEN_MSS << 16) | |
| 470 | opts->mss); |
| 471 | } |
| 472 | |
| 473 | if (likely(OPTION_TS & options)) { |
| 474 | if (unlikely(OPTION_SACK_ADVERTISE & options)) { |
| 475 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | |
| 476 | (TCPOLEN_SACK_PERM << 16) | |
| 477 | (TCPOPT_TIMESTAMP << 8) | |
| 478 | TCPOLEN_TIMESTAMP); |
| 479 | options &= ~OPTION_SACK_ADVERTISE; |
| 480 | } else { |
| 481 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
| 482 | (TCPOPT_NOP << 16) | |
| 483 | (TCPOPT_TIMESTAMP << 8) | |
| 484 | TCPOLEN_TIMESTAMP); |
| 485 | } |
| 486 | *ptr++ = htonl(opts->tsval); |
| 487 | *ptr++ = htonl(opts->tsecr); |
| 488 | } |
| 489 | |
| 490 | if (unlikely(OPTION_SACK_ADVERTISE & options)) { |
| 491 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
| 492 | (TCPOPT_NOP << 16) | |
| 493 | (TCPOPT_SACK_PERM << 8) | |
| 494 | TCPOLEN_SACK_PERM); |
| 495 | } |
| 496 | |
| 497 | if (unlikely(OPTION_WSCALE & options)) { |
| 498 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
| 499 | (TCPOPT_WINDOW << 16) | |
| 500 | (TCPOLEN_WINDOW << 8) | |
| 501 | opts->ws); |
| 502 | } |
| 503 | |
| 504 | if (unlikely(opts->num_sack_blocks)) { |
| 505 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? |
| 506 | tp->duplicate_sack : tp->selective_acks; |
| 507 | int this_sack; |
| 508 | |
| 509 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
| 510 | (TCPOPT_NOP << 16) | |
| 511 | (TCPOPT_SACK << 8) | |
| 512 | (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * |
| 513 | TCPOLEN_SACK_PERBLOCK))); |
| 514 | |
| 515 | for (this_sack = 0; this_sack < opts->num_sack_blocks; |
| 516 | ++this_sack) { |
| 517 | *ptr++ = htonl(sp[this_sack].start_seq); |
| 518 | *ptr++ = htonl(sp[this_sack].end_seq); |
| 519 | } |
| 520 | |
| 521 | tp->rx_opt.dsack = 0; |
| 522 | } |
| 523 | |
| 524 | if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { |
| 525 | struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; |
| 526 | u8 *p = (u8 *)ptr; |
| 527 | u32 len; /* Fast Open option length */ |
| 528 | |
| 529 | if (foc->exp) { |
| 530 | len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; |
| 531 | *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | |
| 532 | TCPOPT_FASTOPEN_MAGIC); |
| 533 | p += TCPOLEN_EXP_FASTOPEN_BASE; |
| 534 | } else { |
| 535 | len = TCPOLEN_FASTOPEN_BASE + foc->len; |
| 536 | *p++ = TCPOPT_FASTOPEN; |
| 537 | *p++ = len; |
| 538 | } |
| 539 | |
| 540 | memcpy(p, foc->val, foc->len); |
| 541 | if ((len & 3) == 2) { |
| 542 | p[foc->len] = TCPOPT_NOP; |
| 543 | p[foc->len + 1] = TCPOPT_NOP; |
| 544 | } |
| 545 | ptr += (len + 3) >> 2; |
| 546 | } |
| 547 | |
| 548 | smc_options_write(ptr, &options); |
| 549 | } |
| 550 | |
| 551 | static void smc_set_option(const struct tcp_sock *tp, |
| 552 | struct tcp_out_options *opts, |
| 553 | unsigned int *remaining) |
| 554 | { |
| 555 | #if IS_ENABLED(CONFIG_SMC) |
| 556 | if (static_branch_unlikely(&tcp_have_smc)) { |
| 557 | if (tp->syn_smc) { |
| 558 | if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { |
| 559 | opts->options |= OPTION_SMC; |
| 560 | *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; |
| 561 | } |
| 562 | } |
| 563 | } |
| 564 | #endif |
| 565 | } |
| 566 | |
| 567 | static void smc_set_option_cond(const struct tcp_sock *tp, |
| 568 | const struct inet_request_sock *ireq, |
| 569 | struct tcp_out_options *opts, |
| 570 | unsigned int *remaining) |
| 571 | { |
| 572 | #if IS_ENABLED(CONFIG_SMC) |
| 573 | if (static_branch_unlikely(&tcp_have_smc)) { |
| 574 | if (tp->syn_smc && ireq->smc_ok) { |
| 575 | if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { |
| 576 | opts->options |= OPTION_SMC; |
| 577 | *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; |
| 578 | } |
| 579 | } |
| 580 | } |
| 581 | #endif |
| 582 | } |
| 583 | |
| 584 | /* Compute TCP options for SYN packets. This is not the final |
| 585 | * network wire format yet. |
| 586 | */ |
| 587 | static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, |
| 588 | struct tcp_out_options *opts, |
| 589 | struct tcp_md5sig_key **md5) |
| 590 | { |
| 591 | struct tcp_sock *tp = tcp_sk(sk); |
| 592 | unsigned int remaining = MAX_TCP_OPTION_SPACE; |
| 593 | struct tcp_fastopen_request *fastopen = tp->fastopen_req; |
| 594 | |
| 595 | *md5 = NULL; |
| 596 | #ifdef CONFIG_TCP_MD5SIG |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 597 | if (static_branch_unlikely(&tcp_md5_needed) && |
| 598 | rcu_access_pointer(tp->md5sig_info)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 599 | *md5 = tp->af_specific->md5_lookup(sk, sk); |
| 600 | if (*md5) { |
| 601 | opts->options |= OPTION_MD5; |
| 602 | remaining -= TCPOLEN_MD5SIG_ALIGNED; |
| 603 | } |
| 604 | } |
| 605 | #endif |
| 606 | |
| 607 | /* We always get an MSS option. The option bytes which will be seen in |
| 608 | * normal data packets should timestamps be used, must be in the MSS |
| 609 | * advertised. But we subtract them from tp->mss_cache so that |
| 610 | * calculations in tcp_sendmsg are simpler etc. So account for this |
| 611 | * fact here if necessary. If we don't do this correctly, as a |
| 612 | * receiver we won't recognize data packets as being full sized when we |
| 613 | * should, and thus we won't abide by the delayed ACK rules correctly. |
| 614 | * SACKs don't matter, we never delay an ACK when we have any of those |
| 615 | * going out. */ |
| 616 | opts->mss = tcp_advertise_mss(sk); |
| 617 | remaining -= TCPOLEN_MSS_ALIGNED; |
| 618 | |
| 619 | if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { |
| 620 | opts->options |= OPTION_TS; |
| 621 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; |
| 622 | opts->tsecr = tp->rx_opt.ts_recent; |
| 623 | remaining -= TCPOLEN_TSTAMP_ALIGNED; |
| 624 | } |
| 625 | if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { |
| 626 | opts->ws = tp->rx_opt.rcv_wscale; |
| 627 | opts->options |= OPTION_WSCALE; |
| 628 | remaining -= TCPOLEN_WSCALE_ALIGNED; |
| 629 | } |
| 630 | if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { |
| 631 | opts->options |= OPTION_SACK_ADVERTISE; |
| 632 | if (unlikely(!(OPTION_TS & opts->options))) |
| 633 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
| 634 | } |
| 635 | |
| 636 | if (fastopen && fastopen->cookie.len >= 0) { |
| 637 | u32 need = fastopen->cookie.len; |
| 638 | |
| 639 | need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : |
| 640 | TCPOLEN_FASTOPEN_BASE; |
| 641 | need = (need + 3) & ~3U; /* Align to 32 bits */ |
| 642 | if (remaining >= need) { |
| 643 | opts->options |= OPTION_FAST_OPEN_COOKIE; |
| 644 | opts->fastopen_cookie = &fastopen->cookie; |
| 645 | remaining -= need; |
| 646 | tp->syn_fastopen = 1; |
| 647 | tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | smc_set_option(tp, opts, &remaining); |
| 652 | |
| 653 | return MAX_TCP_OPTION_SPACE - remaining; |
| 654 | } |
| 655 | |
| 656 | /* Set up TCP options for SYN-ACKs. */ |
| 657 | static unsigned int tcp_synack_options(const struct sock *sk, |
| 658 | struct request_sock *req, |
| 659 | unsigned int mss, struct sk_buff *skb, |
| 660 | struct tcp_out_options *opts, |
| 661 | const struct tcp_md5sig_key *md5, |
| 662 | struct tcp_fastopen_cookie *foc) |
| 663 | { |
| 664 | struct inet_request_sock *ireq = inet_rsk(req); |
| 665 | unsigned int remaining = MAX_TCP_OPTION_SPACE; |
| 666 | |
| 667 | #ifdef CONFIG_TCP_MD5SIG |
| 668 | if (md5) { |
| 669 | opts->options |= OPTION_MD5; |
| 670 | remaining -= TCPOLEN_MD5SIG_ALIGNED; |
| 671 | |
| 672 | /* We can't fit any SACK blocks in a packet with MD5 + TS |
| 673 | * options. There was discussion about disabling SACK |
| 674 | * rather than TS in order to fit in better with old, |
| 675 | * buggy kernels, but that was deemed to be unnecessary. |
| 676 | */ |
| 677 | ireq->tstamp_ok &= !ireq->sack_ok; |
| 678 | } |
| 679 | #endif |
| 680 | |
| 681 | /* We always send an MSS option. */ |
| 682 | opts->mss = mss; |
| 683 | remaining -= TCPOLEN_MSS_ALIGNED; |
| 684 | |
| 685 | if (likely(ireq->wscale_ok)) { |
| 686 | opts->ws = ireq->rcv_wscale; |
| 687 | opts->options |= OPTION_WSCALE; |
| 688 | remaining -= TCPOLEN_WSCALE_ALIGNED; |
| 689 | } |
| 690 | if (likely(ireq->tstamp_ok)) { |
| 691 | opts->options |= OPTION_TS; |
| 692 | opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off; |
| 693 | opts->tsecr = req->ts_recent; |
| 694 | remaining -= TCPOLEN_TSTAMP_ALIGNED; |
| 695 | } |
| 696 | if (likely(ireq->sack_ok)) { |
| 697 | opts->options |= OPTION_SACK_ADVERTISE; |
| 698 | if (unlikely(!ireq->tstamp_ok)) |
| 699 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
| 700 | } |
| 701 | if (foc != NULL && foc->len >= 0) { |
| 702 | u32 need = foc->len; |
| 703 | |
| 704 | need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : |
| 705 | TCPOLEN_FASTOPEN_BASE; |
| 706 | need = (need + 3) & ~3U; /* Align to 32 bits */ |
| 707 | if (remaining >= need) { |
| 708 | opts->options |= OPTION_FAST_OPEN_COOKIE; |
| 709 | opts->fastopen_cookie = foc; |
| 710 | remaining -= need; |
| 711 | } |
| 712 | } |
| 713 | |
| 714 | smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); |
| 715 | |
| 716 | return MAX_TCP_OPTION_SPACE - remaining; |
| 717 | } |
| 718 | |
| 719 | /* Compute TCP options for ESTABLISHED sockets. This is not the |
| 720 | * final wire format yet. |
| 721 | */ |
| 722 | static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, |
| 723 | struct tcp_out_options *opts, |
| 724 | struct tcp_md5sig_key **md5) |
| 725 | { |
| 726 | struct tcp_sock *tp = tcp_sk(sk); |
| 727 | unsigned int size = 0; |
| 728 | unsigned int eff_sacks; |
| 729 | |
| 730 | opts->options = 0; |
| 731 | |
| 732 | *md5 = NULL; |
| 733 | #ifdef CONFIG_TCP_MD5SIG |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 734 | if (static_branch_unlikely(&tcp_md5_needed) && |
| 735 | rcu_access_pointer(tp->md5sig_info)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 736 | *md5 = tp->af_specific->md5_lookup(sk, sk); |
| 737 | if (*md5) { |
| 738 | opts->options |= OPTION_MD5; |
| 739 | size += TCPOLEN_MD5SIG_ALIGNED; |
| 740 | } |
| 741 | } |
| 742 | #endif |
| 743 | |
| 744 | if (likely(tp->rx_opt.tstamp_ok)) { |
| 745 | opts->options |= OPTION_TS; |
| 746 | opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; |
| 747 | opts->tsecr = tp->rx_opt.ts_recent; |
| 748 | size += TCPOLEN_TSTAMP_ALIGNED; |
| 749 | } |
| 750 | |
| 751 | eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; |
| 752 | if (unlikely(eff_sacks)) { |
| 753 | const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; |
| 754 | opts->num_sack_blocks = |
| 755 | min_t(unsigned int, eff_sacks, |
| 756 | (remaining - TCPOLEN_SACK_BASE_ALIGNED) / |
| 757 | TCPOLEN_SACK_PERBLOCK); |
| 758 | size += TCPOLEN_SACK_BASE_ALIGNED + |
| 759 | opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; |
| 760 | } |
| 761 | |
| 762 | return size; |
| 763 | } |
| 764 | |
| 765 | |
| 766 | /* TCP SMALL QUEUES (TSQ) |
| 767 | * |
| 768 | * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) |
| 769 | * to reduce RTT and bufferbloat. |
| 770 | * We do this using a special skb destructor (tcp_wfree). |
| 771 | * |
| 772 | * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb |
| 773 | * needs to be reallocated in a driver. |
| 774 | * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc |
| 775 | * |
| 776 | * Since transmit from skb destructor is forbidden, we use a tasklet |
| 777 | * to process all sockets that eventually need to send more skbs. |
| 778 | * We use one tasklet per cpu, with its own queue of sockets. |
| 779 | */ |
| 780 | struct tsq_tasklet { |
| 781 | struct tasklet_struct tasklet; |
| 782 | struct list_head head; /* queue of tcp sockets */ |
| 783 | }; |
| 784 | static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); |
| 785 | |
| 786 | static void tcp_tsq_write(struct sock *sk) |
| 787 | { |
| 788 | if ((1 << sk->sk_state) & |
| 789 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | |
| 790 | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) { |
| 791 | struct tcp_sock *tp = tcp_sk(sk); |
| 792 | |
| 793 | if (tp->lost_out > tp->retrans_out && |
| 794 | tp->snd_cwnd > tcp_packets_in_flight(tp)) { |
| 795 | tcp_mstamp_refresh(tp); |
| 796 | tcp_xmit_retransmit_queue(sk); |
| 797 | } |
| 798 | |
| 799 | tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, |
| 800 | 0, GFP_ATOMIC); |
| 801 | } |
| 802 | } |
| 803 | |
| 804 | static void tcp_tsq_handler(struct sock *sk) |
| 805 | { |
| 806 | bh_lock_sock(sk); |
| 807 | if (!sock_owned_by_user(sk)) |
| 808 | tcp_tsq_write(sk); |
| 809 | else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) |
| 810 | sock_hold(sk); |
| 811 | bh_unlock_sock(sk); |
| 812 | } |
| 813 | /* |
| 814 | * One tasklet per cpu tries to send more skbs. |
| 815 | * We run in tasklet context but need to disable irqs when |
| 816 | * transferring tsq->head because tcp_wfree() might |
| 817 | * interrupt us (non NAPI drivers) |
| 818 | */ |
| 819 | static void tcp_tasklet_func(unsigned long data) |
| 820 | { |
| 821 | struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; |
| 822 | LIST_HEAD(list); |
| 823 | unsigned long flags; |
| 824 | struct list_head *q, *n; |
| 825 | struct tcp_sock *tp; |
| 826 | struct sock *sk; |
| 827 | |
| 828 | local_irq_save(flags); |
| 829 | list_splice_init(&tsq->head, &list); |
| 830 | local_irq_restore(flags); |
| 831 | |
| 832 | list_for_each_safe(q, n, &list) { |
| 833 | tp = list_entry(q, struct tcp_sock, tsq_node); |
| 834 | list_del(&tp->tsq_node); |
| 835 | |
| 836 | sk = (struct sock *)tp; |
| 837 | smp_mb__before_atomic(); |
| 838 | clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); |
| 839 | |
| 840 | tcp_tsq_handler(sk); |
| 841 | sk_free(sk); |
| 842 | } |
| 843 | } |
| 844 | |
| 845 | #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ |
| 846 | TCPF_WRITE_TIMER_DEFERRED | \ |
| 847 | TCPF_DELACK_TIMER_DEFERRED | \ |
| 848 | TCPF_MTU_REDUCED_DEFERRED) |
| 849 | /** |
| 850 | * tcp_release_cb - tcp release_sock() callback |
| 851 | * @sk: socket |
| 852 | * |
| 853 | * called from release_sock() to perform protocol dependent |
| 854 | * actions before socket release. |
| 855 | */ |
| 856 | void tcp_release_cb(struct sock *sk) |
| 857 | { |
| 858 | unsigned long flags, nflags; |
| 859 | |
| 860 | /* perform an atomic operation only if at least one flag is set */ |
| 861 | do { |
| 862 | flags = sk->sk_tsq_flags; |
| 863 | if (!(flags & TCP_DEFERRED_ALL)) |
| 864 | return; |
| 865 | nflags = flags & ~TCP_DEFERRED_ALL; |
| 866 | } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); |
| 867 | |
| 868 | if (flags & TCPF_TSQ_DEFERRED) { |
| 869 | tcp_tsq_write(sk); |
| 870 | __sock_put(sk); |
| 871 | } |
| 872 | /* Here begins the tricky part : |
| 873 | * We are called from release_sock() with : |
| 874 | * 1) BH disabled |
| 875 | * 2) sk_lock.slock spinlock held |
| 876 | * 3) socket owned by us (sk->sk_lock.owned == 1) |
| 877 | * |
| 878 | * But following code is meant to be called from BH handlers, |
| 879 | * so we should keep BH disabled, but early release socket ownership |
| 880 | */ |
| 881 | sock_release_ownership(sk); |
| 882 | |
| 883 | if (flags & TCPF_WRITE_TIMER_DEFERRED) { |
| 884 | tcp_write_timer_handler(sk); |
| 885 | __sock_put(sk); |
| 886 | } |
| 887 | if (flags & TCPF_DELACK_TIMER_DEFERRED) { |
| 888 | tcp_delack_timer_handler(sk); |
| 889 | __sock_put(sk); |
| 890 | } |
| 891 | if (flags & TCPF_MTU_REDUCED_DEFERRED) { |
| 892 | inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); |
| 893 | __sock_put(sk); |
| 894 | } |
| 895 | } |
| 896 | EXPORT_SYMBOL(tcp_release_cb); |
| 897 | |
| 898 | void __init tcp_tasklet_init(void) |
| 899 | { |
| 900 | int i; |
| 901 | |
| 902 | for_each_possible_cpu(i) { |
| 903 | struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); |
| 904 | |
| 905 | INIT_LIST_HEAD(&tsq->head); |
| 906 | tasklet_init(&tsq->tasklet, |
| 907 | tcp_tasklet_func, |
| 908 | (unsigned long)tsq); |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | /* |
| 913 | * Write buffer destructor automatically called from kfree_skb. |
| 914 | * We can't xmit new skbs from this context, as we might already |
| 915 | * hold qdisc lock. |
| 916 | */ |
| 917 | void tcp_wfree(struct sk_buff *skb) |
| 918 | { |
| 919 | struct sock *sk = skb->sk; |
| 920 | struct tcp_sock *tp = tcp_sk(sk); |
| 921 | unsigned long flags, nval, oval; |
| 922 | |
| 923 | /* Keep one reference on sk_wmem_alloc. |
| 924 | * Will be released by sk_free() from here or tcp_tasklet_func() |
| 925 | */ |
| 926 | WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); |
| 927 | |
| 928 | /* If this softirq is serviced by ksoftirqd, we are likely under stress. |
| 929 | * Wait until our queues (qdisc + devices) are drained. |
| 930 | * This gives : |
| 931 | * - less callbacks to tcp_write_xmit(), reducing stress (batches) |
| 932 | * - chance for incoming ACK (processed by another cpu maybe) |
| 933 | * to migrate this flow (skb->ooo_okay will be eventually set) |
| 934 | */ |
| 935 | if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) |
| 936 | goto out; |
| 937 | |
| 938 | for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { |
| 939 | struct tsq_tasklet *tsq; |
| 940 | bool empty; |
| 941 | |
| 942 | if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) |
| 943 | goto out; |
| 944 | |
| 945 | nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; |
| 946 | nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); |
| 947 | if (nval != oval) |
| 948 | continue; |
| 949 | |
| 950 | /* queue this socket to tasklet queue */ |
| 951 | local_irq_save(flags); |
| 952 | tsq = this_cpu_ptr(&tsq_tasklet); |
| 953 | empty = list_empty(&tsq->head); |
| 954 | list_add(&tp->tsq_node, &tsq->head); |
| 955 | if (empty) |
| 956 | tasklet_schedule(&tsq->tasklet); |
| 957 | local_irq_restore(flags); |
| 958 | return; |
| 959 | } |
| 960 | out: |
| 961 | sk_free(sk); |
| 962 | } |
| 963 | |
| 964 | /* Note: Called under soft irq. |
| 965 | * We can call TCP stack right away, unless socket is owned by user. |
| 966 | */ |
| 967 | enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) |
| 968 | { |
| 969 | struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); |
| 970 | struct sock *sk = (struct sock *)tp; |
| 971 | |
| 972 | tcp_tsq_handler(sk); |
| 973 | sock_put(sk); |
| 974 | |
| 975 | return HRTIMER_NORESTART; |
| 976 | } |
| 977 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 978 | static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, |
| 979 | u64 prior_wstamp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 980 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 981 | struct tcp_sock *tp = tcp_sk(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 982 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 983 | if (sk->sk_pacing_status != SK_PACING_NONE) { |
| 984 | unsigned long rate = sk->sk_pacing_rate; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 985 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 986 | /* Original sch_fq does not pace first 10 MSS |
| 987 | * Note that tp->data_segs_out overflows after 2^32 packets, |
| 988 | * this is a minor annoyance. |
| 989 | */ |
| 990 | if (rate != ~0UL && rate && tp->data_segs_out >= 10) { |
| 991 | u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); |
| 992 | u64 credit = tp->tcp_wstamp_ns - prior_wstamp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 993 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 994 | /* take into account OS jitter */ |
| 995 | len_ns -= min_t(u64, len_ns / 2, credit); |
| 996 | tp->tcp_wstamp_ns += len_ns; |
| 997 | } |
| 998 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 999 | list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); |
| 1000 | } |
| 1001 | |
| 1002 | /* This routine actually transmits TCP packets queued in by |
| 1003 | * tcp_do_sendmsg(). This is used by both the initial |
| 1004 | * transmission and possible later retransmissions. |
| 1005 | * All SKB's seen here are completely headerless. It is our |
| 1006 | * job to build the TCP header, and pass the packet down to |
| 1007 | * IP so it can do the same plus pass the packet off to the |
| 1008 | * device. |
| 1009 | * |
| 1010 | * We are working here with either a clone of the original |
| 1011 | * SKB, or a fresh unique copy made by the retransmit engine. |
| 1012 | */ |
| 1013 | static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, |
| 1014 | int clone_it, gfp_t gfp_mask, u32 rcv_nxt) |
| 1015 | { |
| 1016 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 1017 | struct inet_sock *inet; |
| 1018 | struct tcp_sock *tp; |
| 1019 | struct tcp_skb_cb *tcb; |
| 1020 | struct tcp_out_options opts; |
| 1021 | unsigned int tcp_options_size, tcp_header_size; |
| 1022 | struct sk_buff *oskb = NULL; |
| 1023 | struct tcp_md5sig_key *md5; |
| 1024 | struct tcphdr *th; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1025 | u64 prior_wstamp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1026 | int err; |
| 1027 | |
| 1028 | BUG_ON(!skb || !tcp_skb_pcount(skb)); |
| 1029 | tp = tcp_sk(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1030 | prior_wstamp = tp->tcp_wstamp_ns; |
| 1031 | tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); |
| 1032 | skb->skb_mstamp_ns = tp->tcp_wstamp_ns; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1033 | if (clone_it) { |
| 1034 | TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq |
| 1035 | - tp->snd_una; |
| 1036 | oskb = skb; |
| 1037 | |
| 1038 | tcp_skb_tsorted_save(oskb) { |
| 1039 | if (unlikely(skb_cloned(oskb))) |
| 1040 | skb = pskb_copy(oskb, gfp_mask); |
| 1041 | else |
| 1042 | skb = skb_clone(oskb, gfp_mask); |
| 1043 | } tcp_skb_tsorted_restore(oskb); |
| 1044 | |
| 1045 | if (unlikely(!skb)) |
| 1046 | return -ENOBUFS; |
| 1047 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1048 | |
| 1049 | inet = inet_sk(sk); |
| 1050 | tcb = TCP_SKB_CB(skb); |
| 1051 | memset(&opts, 0, sizeof(opts)); |
| 1052 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1053 | if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1054 | tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1055 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1056 | tcp_options_size = tcp_established_options(sk, skb, &opts, |
| 1057 | &md5); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1058 | /* Force a PSH flag on all (GSO) packets to expedite GRO flush |
| 1059 | * at receiver : This slightly improve GRO performance. |
| 1060 | * Note that we do not force the PSH flag for non GSO packets, |
| 1061 | * because they might be sent under high congestion events, |
| 1062 | * and in this case it is better to delay the delivery of 1-MSS |
| 1063 | * packets and thus the corresponding ACK packet that would |
| 1064 | * release the following packet. |
| 1065 | */ |
| 1066 | if (tcp_skb_pcount(skb) > 1) |
| 1067 | tcb->tcp_flags |= TCPHDR_PSH; |
| 1068 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1069 | tcp_header_size = tcp_options_size + sizeof(struct tcphdr); |
| 1070 | |
| 1071 | /* if no packet is in qdisc/device queue, then allow XPS to select |
| 1072 | * another queue. We can be called from tcp_tsq_handler() |
| 1073 | * which holds one reference to sk. |
| 1074 | * |
| 1075 | * TODO: Ideally, in-flight pure ACK packets should not matter here. |
| 1076 | * One way to get this would be to set skb->truesize = 2 on them. |
| 1077 | */ |
| 1078 | skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); |
| 1079 | |
| 1080 | /* If we had to use memory reserve to allocate this skb, |
| 1081 | * this might cause drops if packet is looped back : |
| 1082 | * Other socket might not have SOCK_MEMALLOC. |
| 1083 | * Packets not looped back do not care about pfmemalloc. |
| 1084 | */ |
| 1085 | skb->pfmemalloc = 0; |
| 1086 | |
| 1087 | skb_push(skb, tcp_header_size); |
| 1088 | skb_reset_transport_header(skb); |
| 1089 | |
| 1090 | skb_orphan(skb); |
| 1091 | skb->sk = sk; |
| 1092 | skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; |
| 1093 | skb_set_hash_from_sk(skb, sk); |
| 1094 | refcount_add(skb->truesize, &sk->sk_wmem_alloc); |
| 1095 | |
| 1096 | skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); |
| 1097 | |
| 1098 | /* Build TCP header and checksum it. */ |
| 1099 | th = (struct tcphdr *)skb->data; |
| 1100 | th->source = inet->inet_sport; |
| 1101 | th->dest = inet->inet_dport; |
| 1102 | th->seq = htonl(tcb->seq); |
| 1103 | th->ack_seq = htonl(rcv_nxt); |
| 1104 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
| 1105 | tcb->tcp_flags); |
| 1106 | |
| 1107 | th->check = 0; |
| 1108 | th->urg_ptr = 0; |
| 1109 | |
| 1110 | /* The urg_mode check is necessary during a below snd_una win probe */ |
| 1111 | if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { |
| 1112 | if (before(tp->snd_up, tcb->seq + 0x10000)) { |
| 1113 | th->urg_ptr = htons(tp->snd_up - tcb->seq); |
| 1114 | th->urg = 1; |
| 1115 | } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { |
| 1116 | th->urg_ptr = htons(0xFFFF); |
| 1117 | th->urg = 1; |
| 1118 | } |
| 1119 | } |
| 1120 | |
| 1121 | tcp_options_write((__be32 *)(th + 1), tp, &opts); |
| 1122 | skb_shinfo(skb)->gso_type = sk->sk_gso_type; |
| 1123 | if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { |
| 1124 | th->window = htons(tcp_select_window(sk)); |
| 1125 | tcp_ecn_send(sk, skb, th, tcp_header_size); |
| 1126 | } else { |
| 1127 | /* RFC1323: The window in SYN & SYN/ACK segments |
| 1128 | * is never scaled. |
| 1129 | */ |
| 1130 | th->window = htons(min(tp->rcv_wnd, 65535U)); |
| 1131 | } |
| 1132 | #ifdef CONFIG_TCP_MD5SIG |
| 1133 | /* Calculate the MD5 hash, as we have all we need now */ |
| 1134 | if (md5) { |
| 1135 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
| 1136 | tp->af_specific->calc_md5_hash(opts.hash_location, |
| 1137 | md5, sk, skb); |
| 1138 | } |
| 1139 | #endif |
| 1140 | |
| 1141 | icsk->icsk_af_ops->send_check(sk, skb); |
| 1142 | |
| 1143 | if (likely(tcb->tcp_flags & TCPHDR_ACK)) |
| 1144 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); |
| 1145 | |
| 1146 | if (skb->len != tcp_header_size) { |
| 1147 | tcp_event_data_sent(tp, sk); |
| 1148 | tp->data_segs_out += tcp_skb_pcount(skb); |
| 1149 | tp->bytes_sent += skb->len - tcp_header_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1150 | } |
| 1151 | |
| 1152 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |
| 1153 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, |
| 1154 | tcp_skb_pcount(skb)); |
| 1155 | |
| 1156 | tp->segs_out += tcp_skb_pcount(skb); |
| 1157 | /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ |
| 1158 | skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); |
| 1159 | skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); |
| 1160 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1161 | /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1162 | |
| 1163 | /* Cleanup our debris for IP stacks */ |
| 1164 | memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), |
| 1165 | sizeof(struct inet6_skb_parm))); |
| 1166 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1167 | tcp_add_tx_delay(skb, tp); |
| 1168 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1169 | err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); |
| 1170 | |
| 1171 | if (unlikely(err > 0)) { |
| 1172 | tcp_enter_cwr(sk); |
| 1173 | err = net_xmit_eval(err); |
| 1174 | } |
| 1175 | if (!err && oskb) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1176 | tcp_update_skb_after_send(sk, oskb, prior_wstamp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1177 | tcp_rate_skb_sent(sk, oskb); |
| 1178 | } |
| 1179 | return err; |
| 1180 | } |
| 1181 | |
| 1182 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
| 1183 | gfp_t gfp_mask) |
| 1184 | { |
| 1185 | return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, |
| 1186 | tcp_sk(sk)->rcv_nxt); |
| 1187 | } |
| 1188 | |
| 1189 | /* This routine just queues the buffer for sending. |
| 1190 | * |
| 1191 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
| 1192 | * otherwise socket can stall. |
| 1193 | */ |
| 1194 | static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) |
| 1195 | { |
| 1196 | struct tcp_sock *tp = tcp_sk(sk); |
| 1197 | |
| 1198 | /* Advance write_seq and place onto the write_queue. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1199 | WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1200 | __skb_header_release(skb); |
| 1201 | tcp_add_write_queue_tail(sk, skb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1202 | sk_wmem_queued_add(sk, skb->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1203 | sk_mem_charge(sk, skb->truesize); |
| 1204 | } |
| 1205 | |
| 1206 | /* Initialize TSO segments for a packet. */ |
| 1207 | static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) |
| 1208 | { |
| 1209 | if (skb->len <= mss_now) { |
| 1210 | /* Avoid the costly divide in the normal |
| 1211 | * non-TSO case. |
| 1212 | */ |
| 1213 | tcp_skb_pcount_set(skb, 1); |
| 1214 | TCP_SKB_CB(skb)->tcp_gso_size = 0; |
| 1215 | } else { |
| 1216 | tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); |
| 1217 | TCP_SKB_CB(skb)->tcp_gso_size = mss_now; |
| 1218 | } |
| 1219 | } |
| 1220 | |
| 1221 | /* Pcount in the middle of the write queue got changed, we need to do various |
| 1222 | * tweaks to fix counters |
| 1223 | */ |
| 1224 | static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) |
| 1225 | { |
| 1226 | struct tcp_sock *tp = tcp_sk(sk); |
| 1227 | |
| 1228 | tp->packets_out -= decr; |
| 1229 | |
| 1230 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) |
| 1231 | tp->sacked_out -= decr; |
| 1232 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) |
| 1233 | tp->retrans_out -= decr; |
| 1234 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) |
| 1235 | tp->lost_out -= decr; |
| 1236 | |
| 1237 | /* Reno case is special. Sigh... */ |
| 1238 | if (tcp_is_reno(tp) && decr > 0) |
| 1239 | tp->sacked_out -= min_t(u32, tp->sacked_out, decr); |
| 1240 | |
| 1241 | if (tp->lost_skb_hint && |
| 1242 | before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && |
| 1243 | (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) |
| 1244 | tp->lost_cnt_hint -= decr; |
| 1245 | |
| 1246 | tcp_verify_left_out(tp); |
| 1247 | } |
| 1248 | |
| 1249 | static bool tcp_has_tx_tstamp(const struct sk_buff *skb) |
| 1250 | { |
| 1251 | return TCP_SKB_CB(skb)->txstamp_ack || |
| 1252 | (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); |
| 1253 | } |
| 1254 | |
| 1255 | static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) |
| 1256 | { |
| 1257 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 1258 | |
| 1259 | if (unlikely(tcp_has_tx_tstamp(skb)) && |
| 1260 | !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { |
| 1261 | struct skb_shared_info *shinfo2 = skb_shinfo(skb2); |
| 1262 | u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; |
| 1263 | |
| 1264 | shinfo->tx_flags &= ~tsflags; |
| 1265 | shinfo2->tx_flags |= tsflags; |
| 1266 | swap(shinfo->tskey, shinfo2->tskey); |
| 1267 | TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; |
| 1268 | TCP_SKB_CB(skb)->txstamp_ack = 0; |
| 1269 | } |
| 1270 | } |
| 1271 | |
| 1272 | static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) |
| 1273 | { |
| 1274 | TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; |
| 1275 | TCP_SKB_CB(skb)->eor = 0; |
| 1276 | } |
| 1277 | |
| 1278 | /* Insert buff after skb on the write or rtx queue of sk. */ |
| 1279 | static void tcp_insert_write_queue_after(struct sk_buff *skb, |
| 1280 | struct sk_buff *buff, |
| 1281 | struct sock *sk, |
| 1282 | enum tcp_queue tcp_queue) |
| 1283 | { |
| 1284 | if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE) |
| 1285 | __skb_queue_after(&sk->sk_write_queue, skb, buff); |
| 1286 | else |
| 1287 | tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); |
| 1288 | } |
| 1289 | |
| 1290 | /* Function to create two new TCP segments. Shrinks the given segment |
| 1291 | * to the specified size and appends a new segment with the rest of the |
| 1292 | * packet to the list. This won't be called frequently, I hope. |
| 1293 | * Remember, these are still headerless SKBs at this point. |
| 1294 | */ |
| 1295 | int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, |
| 1296 | struct sk_buff *skb, u32 len, |
| 1297 | unsigned int mss_now, gfp_t gfp) |
| 1298 | { |
| 1299 | struct tcp_sock *tp = tcp_sk(sk); |
| 1300 | struct sk_buff *buff; |
| 1301 | int nsize, old_factor; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1302 | long limit; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1303 | int nlen; |
| 1304 | u8 flags; |
| 1305 | |
| 1306 | if (WARN_ON(len > skb->len)) |
| 1307 | return -EINVAL; |
| 1308 | |
| 1309 | nsize = skb_headlen(skb) - len; |
| 1310 | if (nsize < 0) |
| 1311 | nsize = 0; |
| 1312 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1313 | /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. |
| 1314 | * We need some allowance to not penalize applications setting small |
| 1315 | * SO_SNDBUF values. |
| 1316 | * Also allow first and last skb in retransmit queue to be split. |
| 1317 | */ |
| 1318 | limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); |
| 1319 | if (unlikely((sk->sk_wmem_queued >> 1) > limit && |
| 1320 | tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && |
| 1321 | skb != tcp_rtx_queue_head(sk) && |
| 1322 | skb != tcp_rtx_queue_tail(sk))) { |
| 1323 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); |
| 1324 | return -ENOMEM; |
| 1325 | } |
| 1326 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1327 | if (skb_unclone(skb, gfp)) |
| 1328 | return -ENOMEM; |
| 1329 | |
| 1330 | /* Get a new skb... force flag on. */ |
| 1331 | buff = sk_stream_alloc_skb(sk, nsize, gfp, true); |
| 1332 | if (!buff) |
| 1333 | return -ENOMEM; /* We'll just try again later. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1334 | skb_copy_decrypted(buff, skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1335 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1336 | sk_wmem_queued_add(sk, buff->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1337 | sk_mem_charge(sk, buff->truesize); |
| 1338 | nlen = skb->len - len - nsize; |
| 1339 | buff->truesize += nlen; |
| 1340 | skb->truesize -= nlen; |
| 1341 | |
| 1342 | /* Correct the sequence numbers. */ |
| 1343 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; |
| 1344 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; |
| 1345 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; |
| 1346 | |
| 1347 | /* PSH and FIN should only be set in the second packet. */ |
| 1348 | flags = TCP_SKB_CB(skb)->tcp_flags; |
| 1349 | TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); |
| 1350 | TCP_SKB_CB(buff)->tcp_flags = flags; |
| 1351 | TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; |
| 1352 | tcp_skb_fragment_eor(skb, buff); |
| 1353 | |
| 1354 | skb_split(skb, buff, len); |
| 1355 | |
| 1356 | buff->ip_summed = CHECKSUM_PARTIAL; |
| 1357 | |
| 1358 | buff->tstamp = skb->tstamp; |
| 1359 | tcp_fragment_tstamp(skb, buff); |
| 1360 | |
| 1361 | old_factor = tcp_skb_pcount(skb); |
| 1362 | |
| 1363 | /* Fix up tso_factor for both original and new SKB. */ |
| 1364 | tcp_set_skb_tso_segs(skb, mss_now); |
| 1365 | tcp_set_skb_tso_segs(buff, mss_now); |
| 1366 | |
| 1367 | /* Update delivered info for the new segment */ |
| 1368 | TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; |
| 1369 | |
| 1370 | /* If this packet has been sent out already, we must |
| 1371 | * adjust the various packet counters. |
| 1372 | */ |
| 1373 | if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { |
| 1374 | int diff = old_factor - tcp_skb_pcount(skb) - |
| 1375 | tcp_skb_pcount(buff); |
| 1376 | |
| 1377 | if (diff) |
| 1378 | tcp_adjust_pcount(sk, skb, diff); |
| 1379 | } |
| 1380 | |
| 1381 | /* Link BUFF into the send queue. */ |
| 1382 | __skb_header_release(buff); |
| 1383 | tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); |
| 1384 | if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE) |
| 1385 | list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); |
| 1386 | |
| 1387 | return 0; |
| 1388 | } |
| 1389 | |
| 1390 | /* This is similar to __pskb_pull_tail(). The difference is that pulled |
| 1391 | * data is not copied, but immediately discarded. |
| 1392 | */ |
| 1393 | static int __pskb_trim_head(struct sk_buff *skb, int len) |
| 1394 | { |
| 1395 | struct skb_shared_info *shinfo; |
| 1396 | int i, k, eat; |
| 1397 | |
| 1398 | eat = min_t(int, len, skb_headlen(skb)); |
| 1399 | if (eat) { |
| 1400 | __skb_pull(skb, eat); |
| 1401 | len -= eat; |
| 1402 | if (!len) |
| 1403 | return 0; |
| 1404 | } |
| 1405 | eat = len; |
| 1406 | k = 0; |
| 1407 | shinfo = skb_shinfo(skb); |
| 1408 | for (i = 0; i < shinfo->nr_frags; i++) { |
| 1409 | int size = skb_frag_size(&shinfo->frags[i]); |
| 1410 | |
| 1411 | if (size <= eat) { |
| 1412 | skb_frag_unref(skb, i); |
| 1413 | eat -= size; |
| 1414 | } else { |
| 1415 | shinfo->frags[k] = shinfo->frags[i]; |
| 1416 | if (eat) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1417 | skb_frag_off_add(&shinfo->frags[k], eat); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1418 | skb_frag_size_sub(&shinfo->frags[k], eat); |
| 1419 | eat = 0; |
| 1420 | } |
| 1421 | k++; |
| 1422 | } |
| 1423 | } |
| 1424 | shinfo->nr_frags = k; |
| 1425 | |
| 1426 | skb->data_len -= len; |
| 1427 | skb->len = skb->data_len; |
| 1428 | return len; |
| 1429 | } |
| 1430 | |
| 1431 | /* Remove acked data from a packet in the transmit queue. */ |
| 1432 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) |
| 1433 | { |
| 1434 | u32 delta_truesize; |
| 1435 | |
| 1436 | if (skb_unclone(skb, GFP_ATOMIC)) |
| 1437 | return -ENOMEM; |
| 1438 | |
| 1439 | delta_truesize = __pskb_trim_head(skb, len); |
| 1440 | |
| 1441 | TCP_SKB_CB(skb)->seq += len; |
| 1442 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 1443 | |
| 1444 | if (delta_truesize) { |
| 1445 | skb->truesize -= delta_truesize; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1446 | sk_wmem_queued_add(sk, -delta_truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1447 | sk_mem_uncharge(sk, delta_truesize); |
| 1448 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); |
| 1449 | } |
| 1450 | |
| 1451 | /* Any change of skb->len requires recalculation of tso factor. */ |
| 1452 | if (tcp_skb_pcount(skb) > 1) |
| 1453 | tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); |
| 1454 | |
| 1455 | return 0; |
| 1456 | } |
| 1457 | |
| 1458 | /* Calculate MSS not accounting any TCP options. */ |
| 1459 | static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) |
| 1460 | { |
| 1461 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1462 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 1463 | int mss_now; |
| 1464 | |
| 1465 | /* Calculate base mss without TCP options: |
| 1466 | It is MMS_S - sizeof(tcphdr) of rfc1122 |
| 1467 | */ |
| 1468 | mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); |
| 1469 | |
| 1470 | /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ |
| 1471 | if (icsk->icsk_af_ops->net_frag_header_len) { |
| 1472 | const struct dst_entry *dst = __sk_dst_get(sk); |
| 1473 | |
| 1474 | if (dst && dst_allfrag(dst)) |
| 1475 | mss_now -= icsk->icsk_af_ops->net_frag_header_len; |
| 1476 | } |
| 1477 | |
| 1478 | /* Clamp it (mss_clamp does not include tcp options) */ |
| 1479 | if (mss_now > tp->rx_opt.mss_clamp) |
| 1480 | mss_now = tp->rx_opt.mss_clamp; |
| 1481 | |
| 1482 | /* Now subtract optional transport overhead */ |
| 1483 | mss_now -= icsk->icsk_ext_hdr_len; |
| 1484 | |
| 1485 | /* Then reserve room for full set of TCP options and 8 bytes of data */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1486 | mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1487 | return mss_now; |
| 1488 | } |
| 1489 | |
| 1490 | /* Calculate MSS. Not accounting for SACKs here. */ |
| 1491 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) |
| 1492 | { |
| 1493 | /* Subtract TCP options size, not including SACKs */ |
| 1494 | return __tcp_mtu_to_mss(sk, pmtu) - |
| 1495 | (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); |
| 1496 | } |
| 1497 | |
| 1498 | /* Inverse of above */ |
| 1499 | int tcp_mss_to_mtu(struct sock *sk, int mss) |
| 1500 | { |
| 1501 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1502 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 1503 | int mtu; |
| 1504 | |
| 1505 | mtu = mss + |
| 1506 | tp->tcp_header_len + |
| 1507 | icsk->icsk_ext_hdr_len + |
| 1508 | icsk->icsk_af_ops->net_header_len; |
| 1509 | |
| 1510 | /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ |
| 1511 | if (icsk->icsk_af_ops->net_frag_header_len) { |
| 1512 | const struct dst_entry *dst = __sk_dst_get(sk); |
| 1513 | |
| 1514 | if (dst && dst_allfrag(dst)) |
| 1515 | mtu += icsk->icsk_af_ops->net_frag_header_len; |
| 1516 | } |
| 1517 | return mtu; |
| 1518 | } |
| 1519 | EXPORT_SYMBOL(tcp_mss_to_mtu); |
| 1520 | |
| 1521 | /* MTU probing init per socket */ |
| 1522 | void tcp_mtup_init(struct sock *sk) |
| 1523 | { |
| 1524 | struct tcp_sock *tp = tcp_sk(sk); |
| 1525 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 1526 | struct net *net = sock_net(sk); |
| 1527 | |
| 1528 | icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; |
| 1529 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + |
| 1530 | icsk->icsk_af_ops->net_header_len; |
| 1531 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); |
| 1532 | icsk->icsk_mtup.probe_size = 0; |
| 1533 | if (icsk->icsk_mtup.enabled) |
| 1534 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; |
| 1535 | } |
| 1536 | EXPORT_SYMBOL(tcp_mtup_init); |
| 1537 | |
| 1538 | /* This function synchronize snd mss to current pmtu/exthdr set. |
| 1539 | |
| 1540 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts |
| 1541 | for TCP options, but includes only bare TCP header. |
| 1542 | |
| 1543 | tp->rx_opt.mss_clamp is mss negotiated at connection setup. |
| 1544 | It is minimum of user_mss and mss received with SYN. |
| 1545 | It also does not include TCP options. |
| 1546 | |
| 1547 | inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. |
| 1548 | |
| 1549 | tp->mss_cache is current effective sending mss, including |
| 1550 | all tcp options except for SACKs. It is evaluated, |
| 1551 | taking into account current pmtu, but never exceeds |
| 1552 | tp->rx_opt.mss_clamp. |
| 1553 | |
| 1554 | NOTE1. rfc1122 clearly states that advertised MSS |
| 1555 | DOES NOT include either tcp or ip options. |
| 1556 | |
| 1557 | NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache |
| 1558 | are READ ONLY outside this function. --ANK (980731) |
| 1559 | */ |
| 1560 | unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) |
| 1561 | { |
| 1562 | struct tcp_sock *tp = tcp_sk(sk); |
| 1563 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 1564 | int mss_now; |
| 1565 | |
| 1566 | if (icsk->icsk_mtup.search_high > pmtu) |
| 1567 | icsk->icsk_mtup.search_high = pmtu; |
| 1568 | |
| 1569 | mss_now = tcp_mtu_to_mss(sk, pmtu); |
| 1570 | mss_now = tcp_bound_to_half_wnd(tp, mss_now); |
| 1571 | |
| 1572 | /* And store cached results */ |
| 1573 | icsk->icsk_pmtu_cookie = pmtu; |
| 1574 | if (icsk->icsk_mtup.enabled) |
| 1575 | mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); |
| 1576 | tp->mss_cache = mss_now; |
| 1577 | |
| 1578 | return mss_now; |
| 1579 | } |
| 1580 | EXPORT_SYMBOL(tcp_sync_mss); |
| 1581 | |
| 1582 | /* Compute the current effective MSS, taking SACKs and IP options, |
| 1583 | * and even PMTU discovery events into account. |
| 1584 | */ |
| 1585 | unsigned int tcp_current_mss(struct sock *sk) |
| 1586 | { |
| 1587 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1588 | const struct dst_entry *dst = __sk_dst_get(sk); |
| 1589 | u32 mss_now; |
| 1590 | unsigned int header_len; |
| 1591 | struct tcp_out_options opts; |
| 1592 | struct tcp_md5sig_key *md5; |
| 1593 | |
| 1594 | mss_now = tp->mss_cache; |
| 1595 | |
| 1596 | if (dst) { |
| 1597 | u32 mtu = dst_mtu(dst); |
| 1598 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) |
| 1599 | mss_now = tcp_sync_mss(sk, mtu); |
| 1600 | } |
| 1601 | |
| 1602 | header_len = tcp_established_options(sk, NULL, &opts, &md5) + |
| 1603 | sizeof(struct tcphdr); |
| 1604 | /* The mss_cache is sized based on tp->tcp_header_len, which assumes |
| 1605 | * some common options. If this is an odd packet (because we have SACK |
| 1606 | * blocks etc) then our calculated header_len will be different, and |
| 1607 | * we have to adjust mss_now correspondingly */ |
| 1608 | if (header_len != tp->tcp_header_len) { |
| 1609 | int delta = (int) header_len - tp->tcp_header_len; |
| 1610 | mss_now -= delta; |
| 1611 | } |
| 1612 | |
| 1613 | return mss_now; |
| 1614 | } |
| 1615 | |
| 1616 | /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. |
| 1617 | * As additional protections, we do not touch cwnd in retransmission phases, |
| 1618 | * and if application hit its sndbuf limit recently. |
| 1619 | */ |
| 1620 | static void tcp_cwnd_application_limited(struct sock *sk) |
| 1621 | { |
| 1622 | struct tcp_sock *tp = tcp_sk(sk); |
| 1623 | |
| 1624 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && |
| 1625 | sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
| 1626 | /* Limited by application or receiver window. */ |
| 1627 | u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); |
| 1628 | u32 win_used = max(tp->snd_cwnd_used, init_win); |
| 1629 | if (win_used < tp->snd_cwnd) { |
| 1630 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
| 1631 | tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; |
| 1632 | } |
| 1633 | tp->snd_cwnd_used = 0; |
| 1634 | } |
| 1635 | tp->snd_cwnd_stamp = tcp_jiffies32; |
| 1636 | } |
| 1637 | |
| 1638 | static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) |
| 1639 | { |
| 1640 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; |
| 1641 | struct tcp_sock *tp = tcp_sk(sk); |
| 1642 | |
| 1643 | /* Track the maximum number of outstanding packets in each |
| 1644 | * window, and remember whether we were cwnd-limited then. |
| 1645 | */ |
| 1646 | if (!before(tp->snd_una, tp->max_packets_seq) || |
| 1647 | tp->packets_out > tp->max_packets_out) { |
| 1648 | tp->max_packets_out = tp->packets_out; |
| 1649 | tp->max_packets_seq = tp->snd_nxt; |
| 1650 | tp->is_cwnd_limited = is_cwnd_limited; |
| 1651 | } |
| 1652 | |
| 1653 | if (tcp_is_cwnd_limited(sk)) { |
| 1654 | /* Network is feed fully. */ |
| 1655 | tp->snd_cwnd_used = 0; |
| 1656 | tp->snd_cwnd_stamp = tcp_jiffies32; |
| 1657 | } else { |
| 1658 | /* Network starves. */ |
| 1659 | if (tp->packets_out > tp->snd_cwnd_used) |
| 1660 | tp->snd_cwnd_used = tp->packets_out; |
| 1661 | |
| 1662 | if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && |
| 1663 | (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && |
| 1664 | !ca_ops->cong_control) |
| 1665 | tcp_cwnd_application_limited(sk); |
| 1666 | |
| 1667 | /* The following conditions together indicate the starvation |
| 1668 | * is caused by insufficient sender buffer: |
| 1669 | * 1) just sent some data (see tcp_write_xmit) |
| 1670 | * 2) not cwnd limited (this else condition) |
| 1671 | * 3) no more data to send (tcp_write_queue_empty()) |
| 1672 | * 4) application is hitting buffer limit (SOCK_NOSPACE) |
| 1673 | */ |
| 1674 | if (tcp_write_queue_empty(sk) && sk->sk_socket && |
| 1675 | test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && |
| 1676 | (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) |
| 1677 | tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); |
| 1678 | } |
| 1679 | } |
| 1680 | |
| 1681 | /* Minshall's variant of the Nagle send check. */ |
| 1682 | static bool tcp_minshall_check(const struct tcp_sock *tp) |
| 1683 | { |
| 1684 | return after(tp->snd_sml, tp->snd_una) && |
| 1685 | !after(tp->snd_sml, tp->snd_nxt); |
| 1686 | } |
| 1687 | |
| 1688 | /* Update snd_sml if this skb is under mss |
| 1689 | * Note that a TSO packet might end with a sub-mss segment |
| 1690 | * The test is really : |
| 1691 | * if ((skb->len % mss) != 0) |
| 1692 | * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; |
| 1693 | * But we can avoid doing the divide again given we already have |
| 1694 | * skb_pcount = skb->len / mss_now |
| 1695 | */ |
| 1696 | static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, |
| 1697 | const struct sk_buff *skb) |
| 1698 | { |
| 1699 | if (skb->len < tcp_skb_pcount(skb) * mss_now) |
| 1700 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; |
| 1701 | } |
| 1702 | |
| 1703 | /* Return false, if packet can be sent now without violation Nagle's rules: |
| 1704 | * 1. It is full sized. (provided by caller in %partial bool) |
| 1705 | * 2. Or it contains FIN. (already checked by caller) |
| 1706 | * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. |
| 1707 | * 4. Or TCP_CORK is not set, and all sent packets are ACKed. |
| 1708 | * With Minshall's modification: all sent small packets are ACKed. |
| 1709 | */ |
| 1710 | static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, |
| 1711 | int nonagle) |
| 1712 | { |
| 1713 | return partial && |
| 1714 | ((nonagle & TCP_NAGLE_CORK) || |
| 1715 | (!nonagle && tp->packets_out && tcp_minshall_check(tp))); |
| 1716 | } |
| 1717 | |
| 1718 | /* Return how many segs we'd like on a TSO packet, |
| 1719 | * to send one TSO packet per ms |
| 1720 | */ |
| 1721 | static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, |
| 1722 | int min_tso_segs) |
| 1723 | { |
| 1724 | u32 bytes, segs; |
| 1725 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1726 | bytes = min_t(unsigned long, |
| 1727 | sk->sk_pacing_rate >> sk->sk_pacing_shift, |
| 1728 | sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1729 | |
| 1730 | /* Goal is to send at least one packet per ms, |
| 1731 | * not one big TSO packet every 100 ms. |
| 1732 | * This preserves ACK clocking and is consistent |
| 1733 | * with tcp_tso_should_defer() heuristic. |
| 1734 | */ |
| 1735 | segs = max_t(u32, bytes / mss_now, min_tso_segs); |
| 1736 | |
| 1737 | return segs; |
| 1738 | } |
| 1739 | |
| 1740 | /* Return the number of segments we want in the skb we are transmitting. |
| 1741 | * See if congestion control module wants to decide; otherwise, autosize. |
| 1742 | */ |
| 1743 | static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) |
| 1744 | { |
| 1745 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; |
| 1746 | u32 min_tso, tso_segs; |
| 1747 | |
| 1748 | min_tso = ca_ops->min_tso_segs ? |
| 1749 | ca_ops->min_tso_segs(sk) : |
| 1750 | sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs; |
| 1751 | |
| 1752 | tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); |
| 1753 | return min_t(u32, tso_segs, sk->sk_gso_max_segs); |
| 1754 | } |
| 1755 | |
| 1756 | /* Returns the portion of skb which can be sent right away */ |
| 1757 | static unsigned int tcp_mss_split_point(const struct sock *sk, |
| 1758 | const struct sk_buff *skb, |
| 1759 | unsigned int mss_now, |
| 1760 | unsigned int max_segs, |
| 1761 | int nonagle) |
| 1762 | { |
| 1763 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1764 | u32 partial, needed, window, max_len; |
| 1765 | |
| 1766 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
| 1767 | max_len = mss_now * max_segs; |
| 1768 | |
| 1769 | if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) |
| 1770 | return max_len; |
| 1771 | |
| 1772 | needed = min(skb->len, window); |
| 1773 | |
| 1774 | if (max_len <= needed) |
| 1775 | return max_len; |
| 1776 | |
| 1777 | partial = needed % mss_now; |
| 1778 | /* If last segment is not a full MSS, check if Nagle rules allow us |
| 1779 | * to include this last segment in this skb. |
| 1780 | * Otherwise, we'll split the skb at last MSS boundary |
| 1781 | */ |
| 1782 | if (tcp_nagle_check(partial != 0, tp, nonagle)) |
| 1783 | return needed - partial; |
| 1784 | |
| 1785 | return needed; |
| 1786 | } |
| 1787 | |
| 1788 | /* Can at least one segment of SKB be sent right now, according to the |
| 1789 | * congestion window rules? If so, return how many segments are allowed. |
| 1790 | */ |
| 1791 | static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, |
| 1792 | const struct sk_buff *skb) |
| 1793 | { |
| 1794 | u32 in_flight, cwnd, halfcwnd; |
| 1795 | |
| 1796 | /* Don't be strict about the congestion window for the final FIN. */ |
| 1797 | if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && |
| 1798 | tcp_skb_pcount(skb) == 1) |
| 1799 | return 1; |
| 1800 | |
| 1801 | in_flight = tcp_packets_in_flight(tp); |
| 1802 | cwnd = tp->snd_cwnd; |
| 1803 | if (in_flight >= cwnd) |
| 1804 | return 0; |
| 1805 | |
| 1806 | /* For better scheduling, ensure we have at least |
| 1807 | * 2 GSO packets in flight. |
| 1808 | */ |
| 1809 | halfcwnd = max(cwnd >> 1, 1U); |
| 1810 | return min(halfcwnd, cwnd - in_flight); |
| 1811 | } |
| 1812 | |
| 1813 | /* Initialize TSO state of a skb. |
| 1814 | * This must be invoked the first time we consider transmitting |
| 1815 | * SKB onto the wire. |
| 1816 | */ |
| 1817 | static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) |
| 1818 | { |
| 1819 | int tso_segs = tcp_skb_pcount(skb); |
| 1820 | |
| 1821 | if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { |
| 1822 | tcp_set_skb_tso_segs(skb, mss_now); |
| 1823 | tso_segs = tcp_skb_pcount(skb); |
| 1824 | } |
| 1825 | return tso_segs; |
| 1826 | } |
| 1827 | |
| 1828 | |
| 1829 | /* Return true if the Nagle test allows this packet to be |
| 1830 | * sent now. |
| 1831 | */ |
| 1832 | static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, |
| 1833 | unsigned int cur_mss, int nonagle) |
| 1834 | { |
| 1835 | /* Nagle rule does not apply to frames, which sit in the middle of the |
| 1836 | * write_queue (they have no chances to get new data). |
| 1837 | * |
| 1838 | * This is implemented in the callers, where they modify the 'nonagle' |
| 1839 | * argument based upon the location of SKB in the send queue. |
| 1840 | */ |
| 1841 | if (nonagle & TCP_NAGLE_PUSH) |
| 1842 | return true; |
| 1843 | |
| 1844 | /* Don't use the nagle rule for urgent data (or for the final FIN). */ |
| 1845 | if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) |
| 1846 | return true; |
| 1847 | |
| 1848 | if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) |
| 1849 | return true; |
| 1850 | |
| 1851 | return false; |
| 1852 | } |
| 1853 | |
| 1854 | /* Does at least the first segment of SKB fit into the send window? */ |
| 1855 | static bool tcp_snd_wnd_test(const struct tcp_sock *tp, |
| 1856 | const struct sk_buff *skb, |
| 1857 | unsigned int cur_mss) |
| 1858 | { |
| 1859 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
| 1860 | |
| 1861 | if (skb->len > cur_mss) |
| 1862 | end_seq = TCP_SKB_CB(skb)->seq + cur_mss; |
| 1863 | |
| 1864 | return !after(end_seq, tcp_wnd_end(tp)); |
| 1865 | } |
| 1866 | |
| 1867 | /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet |
| 1868 | * which is put after SKB on the list. It is very much like |
| 1869 | * tcp_fragment() except that it may make several kinds of assumptions |
| 1870 | * in order to speed up the splitting operation. In particular, we |
| 1871 | * know that all the data is in scatter-gather pages, and that the |
| 1872 | * packet has never been sent out before (and thus is not cloned). |
| 1873 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1874 | static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1875 | unsigned int mss_now, gfp_t gfp) |
| 1876 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1877 | int nlen = skb->len - len; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1878 | struct sk_buff *buff; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1879 | u8 flags; |
| 1880 | |
| 1881 | /* All of a TSO frame must be composed of paged data. */ |
| 1882 | if (skb->len != skb->data_len) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1883 | return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, |
| 1884 | skb, len, mss_now, gfp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1885 | |
| 1886 | buff = sk_stream_alloc_skb(sk, 0, gfp, true); |
| 1887 | if (unlikely(!buff)) |
| 1888 | return -ENOMEM; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1889 | skb_copy_decrypted(buff, skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1890 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1891 | sk_wmem_queued_add(sk, buff->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1892 | sk_mem_charge(sk, buff->truesize); |
| 1893 | buff->truesize += nlen; |
| 1894 | skb->truesize -= nlen; |
| 1895 | |
| 1896 | /* Correct the sequence numbers. */ |
| 1897 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; |
| 1898 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; |
| 1899 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; |
| 1900 | |
| 1901 | /* PSH and FIN should only be set in the second packet. */ |
| 1902 | flags = TCP_SKB_CB(skb)->tcp_flags; |
| 1903 | TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); |
| 1904 | TCP_SKB_CB(buff)->tcp_flags = flags; |
| 1905 | |
| 1906 | /* This packet was never sent out yet, so no SACK bits. */ |
| 1907 | TCP_SKB_CB(buff)->sacked = 0; |
| 1908 | |
| 1909 | tcp_skb_fragment_eor(skb, buff); |
| 1910 | |
| 1911 | buff->ip_summed = CHECKSUM_PARTIAL; |
| 1912 | skb_split(skb, buff, len); |
| 1913 | tcp_fragment_tstamp(skb, buff); |
| 1914 | |
| 1915 | /* Fix up tso_factor for both original and new SKB. */ |
| 1916 | tcp_set_skb_tso_segs(skb, mss_now); |
| 1917 | tcp_set_skb_tso_segs(buff, mss_now); |
| 1918 | |
| 1919 | /* Link BUFF into the send queue. */ |
| 1920 | __skb_header_release(buff); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1921 | tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1922 | |
| 1923 | return 0; |
| 1924 | } |
| 1925 | |
| 1926 | /* Try to defer sending, if possible, in order to minimize the amount |
| 1927 | * of TSO splitting we do. View it as a kind of TSO Nagle test. |
| 1928 | * |
| 1929 | * This algorithm is from John Heffner. |
| 1930 | */ |
| 1931 | static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, |
| 1932 | bool *is_cwnd_limited, |
| 1933 | bool *is_rwnd_limited, |
| 1934 | u32 max_segs) |
| 1935 | { |
| 1936 | const struct inet_connection_sock *icsk = inet_csk(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1937 | u32 send_win, cong_win, limit, in_flight; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1938 | struct tcp_sock *tp = tcp_sk(sk); |
| 1939 | struct sk_buff *head; |
| 1940 | int win_divisor; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1941 | s64 delta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1942 | |
| 1943 | if (icsk->icsk_ca_state >= TCP_CA_Recovery) |
| 1944 | goto send_now; |
| 1945 | |
| 1946 | /* Avoid bursty behavior by allowing defer |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1947 | * only if the last write was recent (1 ms). |
| 1948 | * Note that tp->tcp_wstamp_ns can be in the future if we have |
| 1949 | * packets waiting in a qdisc or device for EDT delivery. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1950 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1951 | delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; |
| 1952 | if (delta > 0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1953 | goto send_now; |
| 1954 | |
| 1955 | in_flight = tcp_packets_in_flight(tp); |
| 1956 | |
| 1957 | BUG_ON(tcp_skb_pcount(skb) <= 1); |
| 1958 | BUG_ON(tp->snd_cwnd <= in_flight); |
| 1959 | |
| 1960 | send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
| 1961 | |
| 1962 | /* From in_flight test above, we know that cwnd > in_flight. */ |
| 1963 | cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; |
| 1964 | |
| 1965 | limit = min(send_win, cong_win); |
| 1966 | |
| 1967 | /* If a full-sized TSO skb can be sent, do it. */ |
| 1968 | if (limit >= max_segs * tp->mss_cache) |
| 1969 | goto send_now; |
| 1970 | |
| 1971 | /* Middle in queue won't get any more data, full sendable already? */ |
| 1972 | if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) |
| 1973 | goto send_now; |
| 1974 | |
| 1975 | win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); |
| 1976 | if (win_divisor) { |
| 1977 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
| 1978 | |
| 1979 | /* If at least some fraction of a window is available, |
| 1980 | * just use it. |
| 1981 | */ |
| 1982 | chunk /= win_divisor; |
| 1983 | if (limit >= chunk) |
| 1984 | goto send_now; |
| 1985 | } else { |
| 1986 | /* Different approach, try not to defer past a single |
| 1987 | * ACK. Receiver should ACK every other full sized |
| 1988 | * frame, so if we have space for more than 3 frames |
| 1989 | * then send now. |
| 1990 | */ |
| 1991 | if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) |
| 1992 | goto send_now; |
| 1993 | } |
| 1994 | |
| 1995 | /* TODO : use tsorted_sent_queue ? */ |
| 1996 | head = tcp_rtx_queue_head(sk); |
| 1997 | if (!head) |
| 1998 | goto send_now; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1999 | delta = tp->tcp_clock_cache - head->tstamp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2000 | /* If next ACK is likely to come too late (half srtt), do not defer */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2001 | if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2002 | goto send_now; |
| 2003 | |
| 2004 | /* Ok, it looks like it is advisable to defer. |
| 2005 | * Three cases are tracked : |
| 2006 | * 1) We are cwnd-limited |
| 2007 | * 2) We are rwnd-limited |
| 2008 | * 3) We are application limited. |
| 2009 | */ |
| 2010 | if (cong_win < send_win) { |
| 2011 | if (cong_win <= skb->len) { |
| 2012 | *is_cwnd_limited = true; |
| 2013 | return true; |
| 2014 | } |
| 2015 | } else { |
| 2016 | if (send_win <= skb->len) { |
| 2017 | *is_rwnd_limited = true; |
| 2018 | return true; |
| 2019 | } |
| 2020 | } |
| 2021 | |
| 2022 | /* If this packet won't get more data, do not wait. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2023 | if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || |
| 2024 | TCP_SKB_CB(skb)->eor) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2025 | goto send_now; |
| 2026 | |
| 2027 | return true; |
| 2028 | |
| 2029 | send_now: |
| 2030 | return false; |
| 2031 | } |
| 2032 | |
| 2033 | static inline void tcp_mtu_check_reprobe(struct sock *sk) |
| 2034 | { |
| 2035 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2036 | struct tcp_sock *tp = tcp_sk(sk); |
| 2037 | struct net *net = sock_net(sk); |
| 2038 | u32 interval; |
| 2039 | s32 delta; |
| 2040 | |
| 2041 | interval = net->ipv4.sysctl_tcp_probe_interval; |
| 2042 | delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; |
| 2043 | if (unlikely(delta >= interval * HZ)) { |
| 2044 | int mss = tcp_current_mss(sk); |
| 2045 | |
| 2046 | /* Update current search range */ |
| 2047 | icsk->icsk_mtup.probe_size = 0; |
| 2048 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + |
| 2049 | sizeof(struct tcphdr) + |
| 2050 | icsk->icsk_af_ops->net_header_len; |
| 2051 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
| 2052 | |
| 2053 | /* Update probe time stamp */ |
| 2054 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; |
| 2055 | } |
| 2056 | } |
| 2057 | |
| 2058 | static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) |
| 2059 | { |
| 2060 | struct sk_buff *skb, *next; |
| 2061 | |
| 2062 | skb = tcp_send_head(sk); |
| 2063 | tcp_for_write_queue_from_safe(skb, next, sk) { |
| 2064 | if (len <= skb->len) |
| 2065 | break; |
| 2066 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2067 | if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2068 | return false; |
| 2069 | |
| 2070 | len -= skb->len; |
| 2071 | } |
| 2072 | |
| 2073 | return true; |
| 2074 | } |
| 2075 | |
| 2076 | /* Create a new MTU probe if we are ready. |
| 2077 | * MTU probe is regularly attempting to increase the path MTU by |
| 2078 | * deliberately sending larger packets. This discovers routing |
| 2079 | * changes resulting in larger path MTUs. |
| 2080 | * |
| 2081 | * Returns 0 if we should wait to probe (no cwnd available), |
| 2082 | * 1 if a probe was sent, |
| 2083 | * -1 otherwise |
| 2084 | */ |
| 2085 | static int tcp_mtu_probe(struct sock *sk) |
| 2086 | { |
| 2087 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2088 | struct tcp_sock *tp = tcp_sk(sk); |
| 2089 | struct sk_buff *skb, *nskb, *next; |
| 2090 | struct net *net = sock_net(sk); |
| 2091 | int probe_size; |
| 2092 | int size_needed; |
| 2093 | int copy, len; |
| 2094 | int mss_now; |
| 2095 | int interval; |
| 2096 | |
| 2097 | /* Not currently probing/verifying, |
| 2098 | * not in recovery, |
| 2099 | * have enough cwnd, and |
| 2100 | * not SACKing (the variable headers throw things off) |
| 2101 | */ |
| 2102 | if (likely(!icsk->icsk_mtup.enabled || |
| 2103 | icsk->icsk_mtup.probe_size || |
| 2104 | inet_csk(sk)->icsk_ca_state != TCP_CA_Open || |
| 2105 | tp->snd_cwnd < 11 || |
| 2106 | tp->rx_opt.num_sacks || tp->rx_opt.dsack)) |
| 2107 | return -1; |
| 2108 | |
| 2109 | /* Use binary search for probe_size between tcp_mss_base, |
| 2110 | * and current mss_clamp. if (search_high - search_low) |
| 2111 | * smaller than a threshold, backoff from probing. |
| 2112 | */ |
| 2113 | mss_now = tcp_current_mss(sk); |
| 2114 | probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + |
| 2115 | icsk->icsk_mtup.search_low) >> 1); |
| 2116 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; |
| 2117 | interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; |
| 2118 | /* When misfortune happens, we are reprobing actively, |
| 2119 | * and then reprobe timer has expired. We stick with current |
| 2120 | * probing process by not resetting search range to its orignal. |
| 2121 | */ |
| 2122 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || |
| 2123 | interval < net->ipv4.sysctl_tcp_probe_threshold) { |
| 2124 | /* Check whether enough time has elaplased for |
| 2125 | * another round of probing. |
| 2126 | */ |
| 2127 | tcp_mtu_check_reprobe(sk); |
| 2128 | return -1; |
| 2129 | } |
| 2130 | |
| 2131 | /* Have enough data in the send queue to probe? */ |
| 2132 | if (tp->write_seq - tp->snd_nxt < size_needed) |
| 2133 | return -1; |
| 2134 | |
| 2135 | if (tp->snd_wnd < size_needed) |
| 2136 | return -1; |
| 2137 | if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) |
| 2138 | return 0; |
| 2139 | |
| 2140 | /* Do we need to wait to drain cwnd? With none in flight, don't stall */ |
| 2141 | if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { |
| 2142 | if (!tcp_packets_in_flight(tp)) |
| 2143 | return -1; |
| 2144 | else |
| 2145 | return 0; |
| 2146 | } |
| 2147 | |
| 2148 | if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) |
| 2149 | return -1; |
| 2150 | |
| 2151 | /* We're allowed to probe. Build it now. */ |
| 2152 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); |
| 2153 | if (!nskb) |
| 2154 | return -1; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2155 | sk_wmem_queued_add(sk, nskb->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2156 | sk_mem_charge(sk, nskb->truesize); |
| 2157 | |
| 2158 | skb = tcp_send_head(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2159 | skb_copy_decrypted(nskb, skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2160 | |
| 2161 | TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; |
| 2162 | TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; |
| 2163 | TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; |
| 2164 | TCP_SKB_CB(nskb)->sacked = 0; |
| 2165 | nskb->csum = 0; |
| 2166 | nskb->ip_summed = CHECKSUM_PARTIAL; |
| 2167 | |
| 2168 | tcp_insert_write_queue_before(nskb, skb, sk); |
| 2169 | tcp_highest_sack_replace(sk, skb, nskb); |
| 2170 | |
| 2171 | len = 0; |
| 2172 | tcp_for_write_queue_from_safe(skb, next, sk) { |
| 2173 | copy = min_t(int, skb->len, probe_size - len); |
| 2174 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); |
| 2175 | |
| 2176 | if (skb->len <= copy) { |
| 2177 | /* We've eaten all the data from this skb. |
| 2178 | * Throw it away. */ |
| 2179 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
| 2180 | /* If this is the last SKB we copy and eor is set |
| 2181 | * we need to propagate it to the new skb. |
| 2182 | */ |
| 2183 | TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2184 | tcp_skb_collapse_tstamp(nskb, skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2185 | tcp_unlink_write_queue(skb, sk); |
| 2186 | sk_wmem_free_skb(sk, skb); |
| 2187 | } else { |
| 2188 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & |
| 2189 | ~(TCPHDR_FIN|TCPHDR_PSH); |
| 2190 | if (!skb_shinfo(skb)->nr_frags) { |
| 2191 | skb_pull(skb, copy); |
| 2192 | } else { |
| 2193 | __pskb_trim_head(skb, copy); |
| 2194 | tcp_set_skb_tso_segs(skb, mss_now); |
| 2195 | } |
| 2196 | TCP_SKB_CB(skb)->seq += copy; |
| 2197 | } |
| 2198 | |
| 2199 | len += copy; |
| 2200 | |
| 2201 | if (len >= probe_size) |
| 2202 | break; |
| 2203 | } |
| 2204 | tcp_init_tso_segs(nskb, nskb->len); |
| 2205 | |
| 2206 | /* We're ready to send. If this fails, the probe will |
| 2207 | * be resegmented into mss-sized pieces by tcp_write_xmit(). |
| 2208 | */ |
| 2209 | if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { |
| 2210 | /* Decrement cwnd here because we are sending |
| 2211 | * effectively two packets. */ |
| 2212 | tp->snd_cwnd--; |
| 2213 | tcp_event_new_data_sent(sk, nskb); |
| 2214 | |
| 2215 | icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); |
| 2216 | tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; |
| 2217 | tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; |
| 2218 | |
| 2219 | return 1; |
| 2220 | } |
| 2221 | |
| 2222 | return -1; |
| 2223 | } |
| 2224 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2225 | static bool tcp_pacing_check(struct sock *sk) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2226 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2227 | struct tcp_sock *tp = tcp_sk(sk); |
| 2228 | |
| 2229 | if (!tcp_needs_internal_pacing(sk)) |
| 2230 | return false; |
| 2231 | |
| 2232 | if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) |
| 2233 | return false; |
| 2234 | |
| 2235 | if (!hrtimer_is_queued(&tp->pacing_timer)) { |
| 2236 | hrtimer_start(&tp->pacing_timer, |
| 2237 | ns_to_ktime(tp->tcp_wstamp_ns), |
| 2238 | HRTIMER_MODE_ABS_PINNED_SOFT); |
| 2239 | sock_hold(sk); |
| 2240 | } |
| 2241 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2242 | } |
| 2243 | |
| 2244 | /* TCP Small Queues : |
| 2245 | * Control number of packets in qdisc/devices to two packets / or ~1 ms. |
| 2246 | * (These limits are doubled for retransmits) |
| 2247 | * This allows for : |
| 2248 | * - better RTT estimation and ACK scheduling |
| 2249 | * - faster recovery |
| 2250 | * - high rates |
| 2251 | * Alas, some drivers / subsystems require a fair amount |
| 2252 | * of queued bytes to ensure line rate. |
| 2253 | * One example is wifi aggregation (802.11 AMPDU) |
| 2254 | */ |
| 2255 | static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, |
| 2256 | unsigned int factor) |
| 2257 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2258 | unsigned long limit; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2259 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2260 | limit = max_t(unsigned long, |
| 2261 | 2 * skb->truesize, |
| 2262 | sk->sk_pacing_rate >> sk->sk_pacing_shift); |
| 2263 | if (sk->sk_pacing_status == SK_PACING_NONE) |
| 2264 | limit = min_t(unsigned long, limit, |
| 2265 | sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2266 | limit <<= factor; |
| 2267 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2268 | if (static_branch_unlikely(&tcp_tx_delay_enabled) && |
| 2269 | tcp_sk(sk)->tcp_tx_delay) { |
| 2270 | u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; |
| 2271 | |
| 2272 | /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we |
| 2273 | * approximate our needs assuming an ~100% skb->truesize overhead. |
| 2274 | * USEC_PER_SEC is approximated by 2^20. |
| 2275 | * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. |
| 2276 | */ |
| 2277 | extra_bytes >>= (20 - 1); |
| 2278 | limit += extra_bytes; |
| 2279 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2280 | if (refcount_read(&sk->sk_wmem_alloc) > limit) { |
| 2281 | /* Always send skb if rtx queue is empty. |
| 2282 | * No need to wait for TX completion to call us back, |
| 2283 | * after softirq/tasklet schedule. |
| 2284 | * This helps when TX completions are delayed too much. |
| 2285 | */ |
| 2286 | if (tcp_rtx_queue_empty(sk)) |
| 2287 | return false; |
| 2288 | |
| 2289 | set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); |
| 2290 | /* It is possible TX completion already happened |
| 2291 | * before we set TSQ_THROTTLED, so we must |
| 2292 | * test again the condition. |
| 2293 | */ |
| 2294 | smp_mb__after_atomic(); |
| 2295 | if (refcount_read(&sk->sk_wmem_alloc) > limit) |
| 2296 | return true; |
| 2297 | } |
| 2298 | return false; |
| 2299 | } |
| 2300 | |
| 2301 | static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) |
| 2302 | { |
| 2303 | const u32 now = tcp_jiffies32; |
| 2304 | enum tcp_chrono old = tp->chrono_type; |
| 2305 | |
| 2306 | if (old > TCP_CHRONO_UNSPEC) |
| 2307 | tp->chrono_stat[old - 1] += now - tp->chrono_start; |
| 2308 | tp->chrono_start = now; |
| 2309 | tp->chrono_type = new; |
| 2310 | } |
| 2311 | |
| 2312 | void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) |
| 2313 | { |
| 2314 | struct tcp_sock *tp = tcp_sk(sk); |
| 2315 | |
| 2316 | /* If there are multiple conditions worthy of tracking in a |
| 2317 | * chronograph then the highest priority enum takes precedence |
| 2318 | * over the other conditions. So that if something "more interesting" |
| 2319 | * starts happening, stop the previous chrono and start a new one. |
| 2320 | */ |
| 2321 | if (type > tp->chrono_type) |
| 2322 | tcp_chrono_set(tp, type); |
| 2323 | } |
| 2324 | |
| 2325 | void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) |
| 2326 | { |
| 2327 | struct tcp_sock *tp = tcp_sk(sk); |
| 2328 | |
| 2329 | |
| 2330 | /* There are multiple conditions worthy of tracking in a |
| 2331 | * chronograph, so that the highest priority enum takes |
| 2332 | * precedence over the other conditions (see tcp_chrono_start). |
| 2333 | * If a condition stops, we only stop chrono tracking if |
| 2334 | * it's the "most interesting" or current chrono we are |
| 2335 | * tracking and starts busy chrono if we have pending data. |
| 2336 | */ |
| 2337 | if (tcp_rtx_and_write_queues_empty(sk)) |
| 2338 | tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); |
| 2339 | else if (type == tp->chrono_type) |
| 2340 | tcp_chrono_set(tp, TCP_CHRONO_BUSY); |
| 2341 | } |
| 2342 | |
| 2343 | /* This routine writes packets to the network. It advances the |
| 2344 | * send_head. This happens as incoming acks open up the remote |
| 2345 | * window for us. |
| 2346 | * |
| 2347 | * LARGESEND note: !tcp_urg_mode is overkill, only frames between |
| 2348 | * snd_up-64k-mss .. snd_up cannot be large. However, taking into |
| 2349 | * account rare use of URG, this is not a big flaw. |
| 2350 | * |
| 2351 | * Send at most one packet when push_one > 0. Temporarily ignore |
| 2352 | * cwnd limit to force at most one packet out when push_one == 2. |
| 2353 | |
| 2354 | * Returns true, if no segments are in flight and we have queued segments, |
| 2355 | * but cannot send anything now because of SWS or another problem. |
| 2356 | */ |
| 2357 | static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
| 2358 | int push_one, gfp_t gfp) |
| 2359 | { |
| 2360 | struct tcp_sock *tp = tcp_sk(sk); |
| 2361 | struct sk_buff *skb; |
| 2362 | unsigned int tso_segs, sent_pkts; |
| 2363 | int cwnd_quota; |
| 2364 | int result; |
| 2365 | bool is_cwnd_limited = false, is_rwnd_limited = false; |
| 2366 | u32 max_segs; |
| 2367 | |
| 2368 | sent_pkts = 0; |
| 2369 | |
| 2370 | tcp_mstamp_refresh(tp); |
| 2371 | if (!push_one) { |
| 2372 | /* Do MTU probing. */ |
| 2373 | result = tcp_mtu_probe(sk); |
| 2374 | if (!result) { |
| 2375 | return false; |
| 2376 | } else if (result > 0) { |
| 2377 | sent_pkts = 1; |
| 2378 | } |
| 2379 | } |
| 2380 | |
| 2381 | max_segs = tcp_tso_segs(sk, mss_now); |
| 2382 | while ((skb = tcp_send_head(sk))) { |
| 2383 | unsigned int limit; |
| 2384 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2385 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { |
| 2386 | /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ |
| 2387 | skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; |
| 2388 | list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); |
| 2389 | tcp_init_tso_segs(skb, mss_now); |
| 2390 | goto repair; /* Skip network transmission */ |
| 2391 | } |
| 2392 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2393 | if (tcp_pacing_check(sk)) |
| 2394 | break; |
| 2395 | |
| 2396 | tso_segs = tcp_init_tso_segs(skb, mss_now); |
| 2397 | BUG_ON(!tso_segs); |
| 2398 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2399 | cwnd_quota = tcp_cwnd_test(tp, skb); |
| 2400 | if (!cwnd_quota) { |
| 2401 | if (push_one == 2) |
| 2402 | /* Force out a loss probe pkt. */ |
| 2403 | cwnd_quota = 1; |
| 2404 | else |
| 2405 | break; |
| 2406 | } |
| 2407 | |
| 2408 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { |
| 2409 | is_rwnd_limited = true; |
| 2410 | break; |
| 2411 | } |
| 2412 | |
| 2413 | if (tso_segs == 1) { |
| 2414 | if (unlikely(!tcp_nagle_test(tp, skb, mss_now, |
| 2415 | (tcp_skb_is_last(sk, skb) ? |
| 2416 | nonagle : TCP_NAGLE_PUSH)))) |
| 2417 | break; |
| 2418 | } else { |
| 2419 | if (!push_one && |
| 2420 | tcp_tso_should_defer(sk, skb, &is_cwnd_limited, |
| 2421 | &is_rwnd_limited, max_segs)) |
| 2422 | break; |
| 2423 | } |
| 2424 | |
| 2425 | limit = mss_now; |
| 2426 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
| 2427 | limit = tcp_mss_split_point(sk, skb, mss_now, |
| 2428 | min_t(unsigned int, |
| 2429 | cwnd_quota, |
| 2430 | max_segs), |
| 2431 | nonagle); |
| 2432 | |
| 2433 | if (skb->len > limit && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2434 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2435 | break; |
| 2436 | |
| 2437 | if (tcp_small_queue_check(sk, skb, 0)) |
| 2438 | break; |
| 2439 | |
| 2440 | if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) |
| 2441 | break; |
| 2442 | |
| 2443 | repair: |
| 2444 | /* Advance the send_head. This one is sent out. |
| 2445 | * This call will increment packets_out. |
| 2446 | */ |
| 2447 | tcp_event_new_data_sent(sk, skb); |
| 2448 | |
| 2449 | tcp_minshall_update(tp, mss_now, skb); |
| 2450 | sent_pkts += tcp_skb_pcount(skb); |
| 2451 | |
| 2452 | if (push_one) |
| 2453 | break; |
| 2454 | } |
| 2455 | |
| 2456 | if (is_rwnd_limited) |
| 2457 | tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); |
| 2458 | else |
| 2459 | tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); |
| 2460 | |
| 2461 | if (likely(sent_pkts)) { |
| 2462 | if (tcp_in_cwnd_reduction(sk)) |
| 2463 | tp->prr_out += sent_pkts; |
| 2464 | |
| 2465 | /* Send one loss probe per tail loss episode. */ |
| 2466 | if (push_one != 2) |
| 2467 | tcp_schedule_loss_probe(sk, false); |
| 2468 | is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); |
| 2469 | tcp_cwnd_validate(sk, is_cwnd_limited); |
| 2470 | return false; |
| 2471 | } |
| 2472 | return !tp->packets_out && !tcp_write_queue_empty(sk); |
| 2473 | } |
| 2474 | |
| 2475 | bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) |
| 2476 | { |
| 2477 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2478 | struct tcp_sock *tp = tcp_sk(sk); |
| 2479 | u32 timeout, rto_delta_us; |
| 2480 | int early_retrans; |
| 2481 | |
| 2482 | /* Don't do any loss probe on a Fast Open connection before 3WHS |
| 2483 | * finishes. |
| 2484 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2485 | if (rcu_access_pointer(tp->fastopen_rsk)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2486 | return false; |
| 2487 | |
| 2488 | early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; |
| 2489 | /* Schedule a loss probe in 2*RTT for SACK capable connections |
| 2490 | * not in loss recovery, that are either limited by cwnd or application. |
| 2491 | */ |
| 2492 | if ((early_retrans != 3 && early_retrans != 4) || |
| 2493 | !tp->packets_out || !tcp_is_sack(tp) || |
| 2494 | (icsk->icsk_ca_state != TCP_CA_Open && |
| 2495 | icsk->icsk_ca_state != TCP_CA_CWR)) |
| 2496 | return false; |
| 2497 | |
| 2498 | /* Probe timeout is 2*rtt. Add minimum RTO to account |
| 2499 | * for delayed ack when there's one outstanding packet. If no RTT |
| 2500 | * sample is available then probe after TCP_TIMEOUT_INIT. |
| 2501 | */ |
| 2502 | if (tp->srtt_us) { |
| 2503 | timeout = usecs_to_jiffies(tp->srtt_us >> 2); |
| 2504 | if (tp->packets_out == 1) |
| 2505 | timeout += TCP_RTO_MIN; |
| 2506 | else |
| 2507 | timeout += TCP_TIMEOUT_MIN; |
| 2508 | } else { |
| 2509 | timeout = TCP_TIMEOUT_INIT; |
| 2510 | } |
| 2511 | |
| 2512 | /* If the RTO formula yields an earlier time, then use that time. */ |
| 2513 | rto_delta_us = advancing_rto ? |
| 2514 | jiffies_to_usecs(inet_csk(sk)->icsk_rto) : |
| 2515 | tcp_rto_delta_us(sk); /* How far in future is RTO? */ |
| 2516 | if (rto_delta_us > 0) |
| 2517 | timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); |
| 2518 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2519 | tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, |
| 2520 | TCP_RTO_MAX, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2521 | return true; |
| 2522 | } |
| 2523 | |
| 2524 | /* Thanks to skb fast clones, we can detect if a prior transmit of |
| 2525 | * a packet is still in a qdisc or driver queue. |
| 2526 | * In this case, there is very little point doing a retransmit ! |
| 2527 | */ |
| 2528 | static bool skb_still_in_host_queue(const struct sock *sk, |
| 2529 | const struct sk_buff *skb) |
| 2530 | { |
| 2531 | if (unlikely(skb_fclone_busy(sk, skb))) { |
| 2532 | NET_INC_STATS(sock_net(sk), |
| 2533 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); |
| 2534 | return true; |
| 2535 | } |
| 2536 | return false; |
| 2537 | } |
| 2538 | |
| 2539 | /* When probe timeout (PTO) fires, try send a new segment if possible, else |
| 2540 | * retransmit the last segment. |
| 2541 | */ |
| 2542 | void tcp_send_loss_probe(struct sock *sk) |
| 2543 | { |
| 2544 | struct tcp_sock *tp = tcp_sk(sk); |
| 2545 | struct sk_buff *skb; |
| 2546 | int pcount; |
| 2547 | int mss = tcp_current_mss(sk); |
| 2548 | |
| 2549 | skb = tcp_send_head(sk); |
| 2550 | if (skb && tcp_snd_wnd_test(tp, skb, mss)) { |
| 2551 | pcount = tp->packets_out; |
| 2552 | tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); |
| 2553 | if (tp->packets_out > pcount) |
| 2554 | goto probe_sent; |
| 2555 | goto rearm_timer; |
| 2556 | } |
| 2557 | skb = skb_rb_last(&sk->tcp_rtx_queue); |
| 2558 | if (unlikely(!skb)) { |
| 2559 | WARN_ONCE(tp->packets_out, |
| 2560 | "invalid inflight: %u state %u cwnd %u mss %d\n", |
| 2561 | tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); |
| 2562 | inet_csk(sk)->icsk_pending = 0; |
| 2563 | return; |
| 2564 | } |
| 2565 | |
| 2566 | /* At most one outstanding TLP retransmission. */ |
| 2567 | if (tp->tlp_high_seq) |
| 2568 | goto rearm_timer; |
| 2569 | |
| 2570 | if (skb_still_in_host_queue(sk, skb)) |
| 2571 | goto rearm_timer; |
| 2572 | |
| 2573 | pcount = tcp_skb_pcount(skb); |
| 2574 | if (WARN_ON(!pcount)) |
| 2575 | goto rearm_timer; |
| 2576 | |
| 2577 | if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { |
| 2578 | if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, |
| 2579 | (pcount - 1) * mss, mss, |
| 2580 | GFP_ATOMIC))) |
| 2581 | goto rearm_timer; |
| 2582 | skb = skb_rb_next(skb); |
| 2583 | } |
| 2584 | |
| 2585 | if (WARN_ON(!skb || !tcp_skb_pcount(skb))) |
| 2586 | goto rearm_timer; |
| 2587 | |
| 2588 | if (__tcp_retransmit_skb(sk, skb, 1)) |
| 2589 | goto rearm_timer; |
| 2590 | |
| 2591 | /* Record snd_nxt for loss detection. */ |
| 2592 | tp->tlp_high_seq = tp->snd_nxt; |
| 2593 | |
| 2594 | probe_sent: |
| 2595 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); |
| 2596 | /* Reset s.t. tcp_rearm_rto will restart timer from now */ |
| 2597 | inet_csk(sk)->icsk_pending = 0; |
| 2598 | rearm_timer: |
| 2599 | tcp_rearm_rto(sk); |
| 2600 | } |
| 2601 | |
| 2602 | /* Push out any pending frames which were held back due to |
| 2603 | * TCP_CORK or attempt at coalescing tiny packets. |
| 2604 | * The socket must be locked by the caller. |
| 2605 | */ |
| 2606 | void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
| 2607 | int nonagle) |
| 2608 | { |
| 2609 | /* If we are closed, the bytes will have to remain here. |
| 2610 | * In time closedown will finish, we empty the write queue and |
| 2611 | * all will be happy. |
| 2612 | */ |
| 2613 | if (unlikely(sk->sk_state == TCP_CLOSE)) |
| 2614 | return; |
| 2615 | |
| 2616 | if (tcp_write_xmit(sk, cur_mss, nonagle, 0, |
| 2617 | sk_gfp_mask(sk, GFP_ATOMIC))) |
| 2618 | tcp_check_probe_timer(sk); |
| 2619 | } |
| 2620 | |
| 2621 | /* Send _single_ skb sitting at the send head. This function requires |
| 2622 | * true push pending frames to setup probe timer etc. |
| 2623 | */ |
| 2624 | void tcp_push_one(struct sock *sk, unsigned int mss_now) |
| 2625 | { |
| 2626 | struct sk_buff *skb = tcp_send_head(sk); |
| 2627 | |
| 2628 | BUG_ON(!skb || skb->len < mss_now); |
| 2629 | |
| 2630 | tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); |
| 2631 | } |
| 2632 | |
| 2633 | /* This function returns the amount that we can raise the |
| 2634 | * usable window based on the following constraints |
| 2635 | * |
| 2636 | * 1. The window can never be shrunk once it is offered (RFC 793) |
| 2637 | * 2. We limit memory per socket |
| 2638 | * |
| 2639 | * RFC 1122: |
| 2640 | * "the suggested [SWS] avoidance algorithm for the receiver is to keep |
| 2641 | * RECV.NEXT + RCV.WIN fixed until: |
| 2642 | * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" |
| 2643 | * |
| 2644 | * i.e. don't raise the right edge of the window until you can raise |
| 2645 | * it at least MSS bytes. |
| 2646 | * |
| 2647 | * Unfortunately, the recommended algorithm breaks header prediction, |
| 2648 | * since header prediction assumes th->window stays fixed. |
| 2649 | * |
| 2650 | * Strictly speaking, keeping th->window fixed violates the receiver |
| 2651 | * side SWS prevention criteria. The problem is that under this rule |
| 2652 | * a stream of single byte packets will cause the right side of the |
| 2653 | * window to always advance by a single byte. |
| 2654 | * |
| 2655 | * Of course, if the sender implements sender side SWS prevention |
| 2656 | * then this will not be a problem. |
| 2657 | * |
| 2658 | * BSD seems to make the following compromise: |
| 2659 | * |
| 2660 | * If the free space is less than the 1/4 of the maximum |
| 2661 | * space available and the free space is less than 1/2 mss, |
| 2662 | * then set the window to 0. |
| 2663 | * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] |
| 2664 | * Otherwise, just prevent the window from shrinking |
| 2665 | * and from being larger than the largest representable value. |
| 2666 | * |
| 2667 | * This prevents incremental opening of the window in the regime |
| 2668 | * where TCP is limited by the speed of the reader side taking |
| 2669 | * data out of the TCP receive queue. It does nothing about |
| 2670 | * those cases where the window is constrained on the sender side |
| 2671 | * because the pipeline is full. |
| 2672 | * |
| 2673 | * BSD also seems to "accidentally" limit itself to windows that are a |
| 2674 | * multiple of MSS, at least until the free space gets quite small. |
| 2675 | * This would appear to be a side effect of the mbuf implementation. |
| 2676 | * Combining these two algorithms results in the observed behavior |
| 2677 | * of having a fixed window size at almost all times. |
| 2678 | * |
| 2679 | * Below we obtain similar behavior by forcing the offered window to |
| 2680 | * a multiple of the mss when it is feasible to do so. |
| 2681 | * |
| 2682 | * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. |
| 2683 | * Regular options like TIMESTAMP are taken into account. |
| 2684 | */ |
| 2685 | u32 __tcp_select_window(struct sock *sk) |
| 2686 | { |
| 2687 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2688 | struct tcp_sock *tp = tcp_sk(sk); |
| 2689 | /* MSS for the peer's data. Previous versions used mss_clamp |
| 2690 | * here. I don't know if the value based on our guesses |
| 2691 | * of peer's MSS is better for the performance. It's more correct |
| 2692 | * but may be worse for the performance because of rcv_mss |
| 2693 | * fluctuations. --SAW 1998/11/1 |
| 2694 | */ |
| 2695 | int mss = icsk->icsk_ack.rcv_mss; |
| 2696 | int free_space = tcp_space(sk); |
| 2697 | int allowed_space = tcp_full_space(sk); |
| 2698 | int full_space = min_t(int, tp->window_clamp, allowed_space); |
| 2699 | int window; |
| 2700 | |
| 2701 | if (unlikely(mss > full_space)) { |
| 2702 | mss = full_space; |
| 2703 | if (mss <= 0) |
| 2704 | return 0; |
| 2705 | } |
| 2706 | if (free_space < (full_space >> 1)) { |
| 2707 | icsk->icsk_ack.quick = 0; |
| 2708 | |
| 2709 | if (tcp_under_memory_pressure(sk)) |
| 2710 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, |
| 2711 | 4U * tp->advmss); |
| 2712 | |
| 2713 | /* free_space might become our new window, make sure we don't |
| 2714 | * increase it due to wscale. |
| 2715 | */ |
| 2716 | free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); |
| 2717 | |
| 2718 | /* if free space is less than mss estimate, or is below 1/16th |
| 2719 | * of the maximum allowed, try to move to zero-window, else |
| 2720 | * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and |
| 2721 | * new incoming data is dropped due to memory limits. |
| 2722 | * With large window, mss test triggers way too late in order |
| 2723 | * to announce zero window in time before rmem limit kicks in. |
| 2724 | */ |
| 2725 | if (free_space < (allowed_space >> 4) || free_space < mss) |
| 2726 | return 0; |
| 2727 | } |
| 2728 | |
| 2729 | if (free_space > tp->rcv_ssthresh) |
| 2730 | free_space = tp->rcv_ssthresh; |
| 2731 | |
| 2732 | /* Don't do rounding if we are using window scaling, since the |
| 2733 | * scaled window will not line up with the MSS boundary anyway. |
| 2734 | */ |
| 2735 | if (tp->rx_opt.rcv_wscale) { |
| 2736 | window = free_space; |
| 2737 | |
| 2738 | /* Advertise enough space so that it won't get scaled away. |
| 2739 | * Import case: prevent zero window announcement if |
| 2740 | * 1<<rcv_wscale > mss. |
| 2741 | */ |
| 2742 | window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); |
| 2743 | } else { |
| 2744 | window = tp->rcv_wnd; |
| 2745 | /* Get the largest window that is a nice multiple of mss. |
| 2746 | * Window clamp already applied above. |
| 2747 | * If our current window offering is within 1 mss of the |
| 2748 | * free space we just keep it. This prevents the divide |
| 2749 | * and multiply from happening most of the time. |
| 2750 | * We also don't do any window rounding when the free space |
| 2751 | * is too small. |
| 2752 | */ |
| 2753 | if (window <= free_space - mss || window > free_space) |
| 2754 | window = rounddown(free_space, mss); |
| 2755 | else if (mss == full_space && |
| 2756 | free_space > window + (full_space >> 1)) |
| 2757 | window = free_space; |
| 2758 | } |
| 2759 | |
| 2760 | return window; |
| 2761 | } |
| 2762 | |
| 2763 | void tcp_skb_collapse_tstamp(struct sk_buff *skb, |
| 2764 | const struct sk_buff *next_skb) |
| 2765 | { |
| 2766 | if (unlikely(tcp_has_tx_tstamp(next_skb))) { |
| 2767 | const struct skb_shared_info *next_shinfo = |
| 2768 | skb_shinfo(next_skb); |
| 2769 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 2770 | |
| 2771 | shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; |
| 2772 | shinfo->tskey = next_shinfo->tskey; |
| 2773 | TCP_SKB_CB(skb)->txstamp_ack |= |
| 2774 | TCP_SKB_CB(next_skb)->txstamp_ack; |
| 2775 | } |
| 2776 | } |
| 2777 | |
| 2778 | /* Collapses two adjacent SKB's during retransmission. */ |
| 2779 | static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) |
| 2780 | { |
| 2781 | struct tcp_sock *tp = tcp_sk(sk); |
| 2782 | struct sk_buff *next_skb = skb_rb_next(skb); |
| 2783 | int next_skb_size; |
| 2784 | |
| 2785 | next_skb_size = next_skb->len; |
| 2786 | |
| 2787 | BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); |
| 2788 | |
| 2789 | if (next_skb_size) { |
| 2790 | if (next_skb_size <= skb_availroom(skb)) |
| 2791 | skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), |
| 2792 | next_skb_size); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2793 | else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2794 | return false; |
| 2795 | } |
| 2796 | tcp_highest_sack_replace(sk, next_skb, skb); |
| 2797 | |
| 2798 | /* Update sequence range on original skb. */ |
| 2799 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; |
| 2800 | |
| 2801 | /* Merge over control information. This moves PSH/FIN etc. over */ |
| 2802 | TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; |
| 2803 | |
| 2804 | /* All done, get rid of second SKB and account for it so |
| 2805 | * packet counting does not break. |
| 2806 | */ |
| 2807 | TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; |
| 2808 | TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; |
| 2809 | |
| 2810 | /* changed transmit queue under us so clear hints */ |
| 2811 | tcp_clear_retrans_hints_partial(tp); |
| 2812 | if (next_skb == tp->retransmit_skb_hint) |
| 2813 | tp->retransmit_skb_hint = skb; |
| 2814 | |
| 2815 | tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); |
| 2816 | |
| 2817 | tcp_skb_collapse_tstamp(skb, next_skb); |
| 2818 | |
| 2819 | tcp_rtx_queue_unlink_and_free(next_skb, sk); |
| 2820 | return true; |
| 2821 | } |
| 2822 | |
| 2823 | /* Check if coalescing SKBs is legal. */ |
| 2824 | static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) |
| 2825 | { |
| 2826 | if (tcp_skb_pcount(skb) > 1) |
| 2827 | return false; |
| 2828 | if (skb_cloned(skb)) |
| 2829 | return false; |
| 2830 | /* Some heuristics for collapsing over SACK'd could be invented */ |
| 2831 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) |
| 2832 | return false; |
| 2833 | |
| 2834 | return true; |
| 2835 | } |
| 2836 | |
| 2837 | /* Collapse packets in the retransmit queue to make to create |
| 2838 | * less packets on the wire. This is only done on retransmission. |
| 2839 | */ |
| 2840 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, |
| 2841 | int space) |
| 2842 | { |
| 2843 | struct tcp_sock *tp = tcp_sk(sk); |
| 2844 | struct sk_buff *skb = to, *tmp; |
| 2845 | bool first = true; |
| 2846 | |
| 2847 | if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) |
| 2848 | return; |
| 2849 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) |
| 2850 | return; |
| 2851 | |
| 2852 | skb_rbtree_walk_from_safe(skb, tmp) { |
| 2853 | if (!tcp_can_collapse(sk, skb)) |
| 2854 | break; |
| 2855 | |
| 2856 | if (!tcp_skb_can_collapse_to(to)) |
| 2857 | break; |
| 2858 | |
| 2859 | space -= skb->len; |
| 2860 | |
| 2861 | if (first) { |
| 2862 | first = false; |
| 2863 | continue; |
| 2864 | } |
| 2865 | |
| 2866 | if (space < 0) |
| 2867 | break; |
| 2868 | |
| 2869 | if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) |
| 2870 | break; |
| 2871 | |
| 2872 | if (!tcp_collapse_retrans(sk, to)) |
| 2873 | break; |
| 2874 | } |
| 2875 | } |
| 2876 | |
| 2877 | /* This retransmits one SKB. Policy decisions and retransmit queue |
| 2878 | * state updates are done by the caller. Returns non-zero if an |
| 2879 | * error occurred which prevented the send. |
| 2880 | */ |
| 2881 | int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) |
| 2882 | { |
| 2883 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2884 | struct tcp_sock *tp = tcp_sk(sk); |
| 2885 | unsigned int cur_mss; |
| 2886 | int diff, len, err; |
| 2887 | |
| 2888 | |
| 2889 | /* Inconclusive MTU probe */ |
| 2890 | if (icsk->icsk_mtup.probe_size) |
| 2891 | icsk->icsk_mtup.probe_size = 0; |
| 2892 | |
| 2893 | /* Do not sent more than we queued. 1/4 is reserved for possible |
| 2894 | * copying overhead: fragmentation, tunneling, mangling etc. |
| 2895 | */ |
| 2896 | if (refcount_read(&sk->sk_wmem_alloc) > |
| 2897 | min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), |
| 2898 | sk->sk_sndbuf)) |
| 2899 | return -EAGAIN; |
| 2900 | |
| 2901 | if (skb_still_in_host_queue(sk, skb)) |
| 2902 | return -EBUSY; |
| 2903 | |
| 2904 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { |
| 2905 | if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { |
| 2906 | WARN_ON_ONCE(1); |
| 2907 | return -EINVAL; |
| 2908 | } |
| 2909 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) |
| 2910 | return -ENOMEM; |
| 2911 | } |
| 2912 | |
| 2913 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) |
| 2914 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
| 2915 | |
| 2916 | cur_mss = tcp_current_mss(sk); |
| 2917 | |
| 2918 | /* If receiver has shrunk his window, and skb is out of |
| 2919 | * new window, do not retransmit it. The exception is the |
| 2920 | * case, when window is shrunk to zero. In this case |
| 2921 | * our retransmit serves as a zero window probe. |
| 2922 | */ |
| 2923 | if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && |
| 2924 | TCP_SKB_CB(skb)->seq != tp->snd_una) |
| 2925 | return -EAGAIN; |
| 2926 | |
| 2927 | len = cur_mss * segs; |
| 2928 | if (skb->len > len) { |
| 2929 | if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, |
| 2930 | cur_mss, GFP_ATOMIC)) |
| 2931 | return -ENOMEM; /* We'll try again later. */ |
| 2932 | } else { |
| 2933 | if (skb_unclone(skb, GFP_ATOMIC)) |
| 2934 | return -ENOMEM; |
| 2935 | |
| 2936 | diff = tcp_skb_pcount(skb); |
| 2937 | tcp_set_skb_tso_segs(skb, cur_mss); |
| 2938 | diff -= tcp_skb_pcount(skb); |
| 2939 | if (diff) |
| 2940 | tcp_adjust_pcount(sk, skb, diff); |
| 2941 | if (skb->len < cur_mss) |
| 2942 | tcp_retrans_try_collapse(sk, skb, cur_mss); |
| 2943 | } |
| 2944 | |
| 2945 | /* RFC3168, section 6.1.1.1. ECN fallback */ |
| 2946 | if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) |
| 2947 | tcp_ecn_clear_syn(sk, skb); |
| 2948 | |
| 2949 | /* Update global and local TCP statistics. */ |
| 2950 | segs = tcp_skb_pcount(skb); |
| 2951 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); |
| 2952 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) |
| 2953 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); |
| 2954 | tp->total_retrans += segs; |
| 2955 | tp->bytes_retrans += skb->len; |
| 2956 | |
| 2957 | /* make sure skb->data is aligned on arches that require it |
| 2958 | * and check if ack-trimming & collapsing extended the headroom |
| 2959 | * beyond what csum_start can cover. |
| 2960 | */ |
| 2961 | if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || |
| 2962 | skb_headroom(skb) >= 0xFFFF)) { |
| 2963 | struct sk_buff *nskb; |
| 2964 | |
| 2965 | tcp_skb_tsorted_save(skb) { |
| 2966 | nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); |
| 2967 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
| 2968 | -ENOBUFS; |
| 2969 | } tcp_skb_tsorted_restore(skb); |
| 2970 | |
| 2971 | if (!err) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2972 | tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2973 | tcp_rate_skb_sent(sk, skb); |
| 2974 | } |
| 2975 | } else { |
| 2976 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
| 2977 | } |
| 2978 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2979 | /* To avoid taking spuriously low RTT samples based on a timestamp |
| 2980 | * for a transmit that never happened, always mark EVER_RETRANS |
| 2981 | */ |
| 2982 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; |
| 2983 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2984 | if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) |
| 2985 | tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, |
| 2986 | TCP_SKB_CB(skb)->seq, segs, err); |
| 2987 | |
| 2988 | if (likely(!err)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2989 | trace_tcp_retransmit_skb(sk, skb); |
| 2990 | } else if (err != -EBUSY) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2991 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2992 | } |
| 2993 | return err; |
| 2994 | } |
| 2995 | |
| 2996 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) |
| 2997 | { |
| 2998 | struct tcp_sock *tp = tcp_sk(sk); |
| 2999 | int err = __tcp_retransmit_skb(sk, skb, segs); |
| 3000 | |
| 3001 | if (err == 0) { |
| 3002 | #if FASTRETRANS_DEBUG > 0 |
| 3003 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { |
| 3004 | net_dbg_ratelimited("retrans_out leaked\n"); |
| 3005 | } |
| 3006 | #endif |
| 3007 | TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; |
| 3008 | tp->retrans_out += tcp_skb_pcount(skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3009 | } |
| 3010 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3011 | /* Save stamp of the first (attempted) retransmit. */ |
| 3012 | if (!tp->retrans_stamp) |
| 3013 | tp->retrans_stamp = tcp_skb_timestamp(skb); |
| 3014 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3015 | if (tp->undo_retrans < 0) |
| 3016 | tp->undo_retrans = 0; |
| 3017 | tp->undo_retrans += tcp_skb_pcount(skb); |
| 3018 | return err; |
| 3019 | } |
| 3020 | |
| 3021 | /* This gets called after a retransmit timeout, and the initially |
| 3022 | * retransmitted data is acknowledged. It tries to continue |
| 3023 | * resending the rest of the retransmit queue, until either |
| 3024 | * we've sent it all or the congestion window limit is reached. |
| 3025 | */ |
| 3026 | void tcp_xmit_retransmit_queue(struct sock *sk) |
| 3027 | { |
| 3028 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 3029 | struct sk_buff *skb, *rtx_head, *hole = NULL; |
| 3030 | struct tcp_sock *tp = tcp_sk(sk); |
| 3031 | u32 max_segs; |
| 3032 | int mib_idx; |
| 3033 | |
| 3034 | if (!tp->packets_out) |
| 3035 | return; |
| 3036 | |
| 3037 | rtx_head = tcp_rtx_queue_head(sk); |
| 3038 | skb = tp->retransmit_skb_hint ?: rtx_head; |
| 3039 | max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); |
| 3040 | skb_rbtree_walk_from(skb) { |
| 3041 | __u8 sacked; |
| 3042 | int segs; |
| 3043 | |
| 3044 | if (tcp_pacing_check(sk)) |
| 3045 | break; |
| 3046 | |
| 3047 | /* we could do better than to assign each time */ |
| 3048 | if (!hole) |
| 3049 | tp->retransmit_skb_hint = skb; |
| 3050 | |
| 3051 | segs = tp->snd_cwnd - tcp_packets_in_flight(tp); |
| 3052 | if (segs <= 0) |
| 3053 | return; |
| 3054 | sacked = TCP_SKB_CB(skb)->sacked; |
| 3055 | /* In case tcp_shift_skb_data() have aggregated large skbs, |
| 3056 | * we need to make sure not sending too bigs TSO packets |
| 3057 | */ |
| 3058 | segs = min_t(int, segs, max_segs); |
| 3059 | |
| 3060 | if (tp->retrans_out >= tp->lost_out) { |
| 3061 | break; |
| 3062 | } else if (!(sacked & TCPCB_LOST)) { |
| 3063 | if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) |
| 3064 | hole = skb; |
| 3065 | continue; |
| 3066 | |
| 3067 | } else { |
| 3068 | if (icsk->icsk_ca_state != TCP_CA_Loss) |
| 3069 | mib_idx = LINUX_MIB_TCPFASTRETRANS; |
| 3070 | else |
| 3071 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; |
| 3072 | } |
| 3073 | |
| 3074 | if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) |
| 3075 | continue; |
| 3076 | |
| 3077 | if (tcp_small_queue_check(sk, skb, 1)) |
| 3078 | return; |
| 3079 | |
| 3080 | if (tcp_retransmit_skb(sk, skb, segs)) |
| 3081 | return; |
| 3082 | |
| 3083 | NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); |
| 3084 | |
| 3085 | if (tcp_in_cwnd_reduction(sk)) |
| 3086 | tp->prr_out += tcp_skb_pcount(skb); |
| 3087 | |
| 3088 | if (skb == rtx_head && |
| 3089 | icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3090 | tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 3091 | inet_csk(sk)->icsk_rto, |
| 3092 | TCP_RTO_MAX, |
| 3093 | skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3094 | } |
| 3095 | } |
| 3096 | |
| 3097 | /* We allow to exceed memory limits for FIN packets to expedite |
| 3098 | * connection tear down and (memory) recovery. |
| 3099 | * Otherwise tcp_send_fin() could be tempted to either delay FIN |
| 3100 | * or even be forced to close flow without any FIN. |
| 3101 | * In general, we want to allow one skb per socket to avoid hangs |
| 3102 | * with edge trigger epoll() |
| 3103 | */ |
| 3104 | void sk_forced_mem_schedule(struct sock *sk, int size) |
| 3105 | { |
| 3106 | int amt; |
| 3107 | |
| 3108 | if (size <= sk->sk_forward_alloc) |
| 3109 | return; |
| 3110 | amt = sk_mem_pages(size); |
| 3111 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
| 3112 | sk_memory_allocated_add(sk, amt); |
| 3113 | |
| 3114 | if (mem_cgroup_sockets_enabled && sk->sk_memcg) |
| 3115 | mem_cgroup_charge_skmem(sk->sk_memcg, amt); |
| 3116 | } |
| 3117 | |
| 3118 | /* Send a FIN. The caller locks the socket for us. |
| 3119 | * We should try to send a FIN packet really hard, but eventually give up. |
| 3120 | */ |
| 3121 | void tcp_send_fin(struct sock *sk) |
| 3122 | { |
| 3123 | struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); |
| 3124 | struct tcp_sock *tp = tcp_sk(sk); |
| 3125 | |
| 3126 | /* Optimization, tack on the FIN if we have one skb in write queue and |
| 3127 | * this skb was not yet sent, or we are under memory pressure. |
| 3128 | * Note: in the latter case, FIN packet will be sent after a timeout, |
| 3129 | * as TCP stack thinks it has already been transmitted. |
| 3130 | */ |
| 3131 | if (!tskb && tcp_under_memory_pressure(sk)) |
| 3132 | tskb = skb_rb_last(&sk->tcp_rtx_queue); |
| 3133 | |
| 3134 | if (tskb) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3135 | TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; |
| 3136 | TCP_SKB_CB(tskb)->end_seq++; |
| 3137 | tp->write_seq++; |
| 3138 | if (tcp_write_queue_empty(sk)) { |
| 3139 | /* This means tskb was already sent. |
| 3140 | * Pretend we included the FIN on previous transmit. |
| 3141 | * We need to set tp->snd_nxt to the value it would have |
| 3142 | * if FIN had been sent. This is because retransmit path |
| 3143 | * does not change tp->snd_nxt. |
| 3144 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3145 | WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3146 | return; |
| 3147 | } |
| 3148 | } else { |
| 3149 | skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3150 | if (unlikely(!skb)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3151 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3152 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3153 | INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); |
| 3154 | skb_reserve(skb, MAX_TCP_HEADER); |
| 3155 | sk_forced_mem_schedule(sk, skb->truesize); |
| 3156 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
| 3157 | tcp_init_nondata_skb(skb, tp->write_seq, |
| 3158 | TCPHDR_ACK | TCPHDR_FIN); |
| 3159 | tcp_queue_skb(sk, skb); |
| 3160 | } |
| 3161 | __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); |
| 3162 | } |
| 3163 | |
| 3164 | /* We get here when a process closes a file descriptor (either due to |
| 3165 | * an explicit close() or as a byproduct of exit()'ing) and there |
| 3166 | * was unread data in the receive queue. This behavior is recommended |
| 3167 | * by RFC 2525, section 2.17. -DaveM |
| 3168 | */ |
| 3169 | void tcp_send_active_reset(struct sock *sk, gfp_t priority) |
| 3170 | { |
| 3171 | struct sk_buff *skb; |
| 3172 | |
| 3173 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); |
| 3174 | |
| 3175 | /* NOTE: No TCP options attached and we never retransmit this. */ |
| 3176 | skb = alloc_skb(MAX_TCP_HEADER, priority); |
| 3177 | if (!skb) { |
| 3178 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
| 3179 | return; |
| 3180 | } |
| 3181 | |
| 3182 | /* Reserve space for headers and prepare control bits. */ |
| 3183 | skb_reserve(skb, MAX_TCP_HEADER); |
| 3184 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), |
| 3185 | TCPHDR_ACK | TCPHDR_RST); |
| 3186 | tcp_mstamp_refresh(tcp_sk(sk)); |
| 3187 | /* Send it off. */ |
| 3188 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
| 3189 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
| 3190 | |
| 3191 | /* skb of trace_tcp_send_reset() keeps the skb that caused RST, |
| 3192 | * skb here is different to the troublesome skb, so use NULL |
| 3193 | */ |
| 3194 | trace_tcp_send_reset(sk, NULL); |
| 3195 | } |
| 3196 | |
| 3197 | /* Send a crossed SYN-ACK during socket establishment. |
| 3198 | * WARNING: This routine must only be called when we have already sent |
| 3199 | * a SYN packet that crossed the incoming SYN that caused this routine |
| 3200 | * to get called. If this assumption fails then the initial rcv_wnd |
| 3201 | * and rcv_wscale values will not be correct. |
| 3202 | */ |
| 3203 | int tcp_send_synack(struct sock *sk) |
| 3204 | { |
| 3205 | struct sk_buff *skb; |
| 3206 | |
| 3207 | skb = tcp_rtx_queue_head(sk); |
| 3208 | if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
| 3209 | pr_err("%s: wrong queue state\n", __func__); |
| 3210 | return -EFAULT; |
| 3211 | } |
| 3212 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { |
| 3213 | if (skb_cloned(skb)) { |
| 3214 | struct sk_buff *nskb; |
| 3215 | |
| 3216 | tcp_skb_tsorted_save(skb) { |
| 3217 | nskb = skb_copy(skb, GFP_ATOMIC); |
| 3218 | } tcp_skb_tsorted_restore(skb); |
| 3219 | if (!nskb) |
| 3220 | return -ENOMEM; |
| 3221 | INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); |
| 3222 | tcp_rtx_queue_unlink_and_free(skb, sk); |
| 3223 | __skb_header_release(nskb); |
| 3224 | tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3225 | sk_wmem_queued_add(sk, nskb->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3226 | sk_mem_charge(sk, nskb->truesize); |
| 3227 | skb = nskb; |
| 3228 | } |
| 3229 | |
| 3230 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; |
| 3231 | tcp_ecn_send_synack(sk, skb); |
| 3232 | } |
| 3233 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
| 3234 | } |
| 3235 | |
| 3236 | /** |
| 3237 | * tcp_make_synack - Prepare a SYN-ACK. |
| 3238 | * sk: listener socket |
| 3239 | * dst: dst entry attached to the SYNACK |
| 3240 | * req: request_sock pointer |
| 3241 | * |
| 3242 | * Allocate one skb and build a SYNACK packet. |
| 3243 | * @dst is consumed : Caller should not use it again. |
| 3244 | */ |
| 3245 | struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, |
| 3246 | struct request_sock *req, |
| 3247 | struct tcp_fastopen_cookie *foc, |
| 3248 | enum tcp_synack_type synack_type) |
| 3249 | { |
| 3250 | struct inet_request_sock *ireq = inet_rsk(req); |
| 3251 | const struct tcp_sock *tp = tcp_sk(sk); |
| 3252 | struct tcp_md5sig_key *md5 = NULL; |
| 3253 | struct tcp_out_options opts; |
| 3254 | struct sk_buff *skb; |
| 3255 | int tcp_header_size; |
| 3256 | struct tcphdr *th; |
| 3257 | int mss; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3258 | u64 now; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3259 | |
| 3260 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
| 3261 | if (unlikely(!skb)) { |
| 3262 | dst_release(dst); |
| 3263 | return NULL; |
| 3264 | } |
| 3265 | /* Reserve space for headers. */ |
| 3266 | skb_reserve(skb, MAX_TCP_HEADER); |
| 3267 | |
| 3268 | switch (synack_type) { |
| 3269 | case TCP_SYNACK_NORMAL: |
| 3270 | skb_set_owner_w(skb, req_to_sk(req)); |
| 3271 | break; |
| 3272 | case TCP_SYNACK_COOKIE: |
| 3273 | /* Under synflood, we do not attach skb to a socket, |
| 3274 | * to avoid false sharing. |
| 3275 | */ |
| 3276 | break; |
| 3277 | case TCP_SYNACK_FASTOPEN: |
| 3278 | /* sk is a const pointer, because we want to express multiple |
| 3279 | * cpu might call us concurrently. |
| 3280 | * sk->sk_wmem_alloc in an atomic, we can promote to rw. |
| 3281 | */ |
| 3282 | skb_set_owner_w(skb, (struct sock *)sk); |
| 3283 | break; |
| 3284 | } |
| 3285 | skb_dst_set(skb, dst); |
| 3286 | |
| 3287 | mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); |
| 3288 | |
| 3289 | memset(&opts, 0, sizeof(opts)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3290 | now = tcp_clock_ns(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3291 | #ifdef CONFIG_SYN_COOKIES |
| 3292 | if (unlikely(req->cookie_ts)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3293 | skb->skb_mstamp_ns = cookie_init_timestamp(req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3294 | else |
| 3295 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3296 | { |
| 3297 | skb->skb_mstamp_ns = now; |
| 3298 | if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ |
| 3299 | tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); |
| 3300 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3301 | |
| 3302 | #ifdef CONFIG_TCP_MD5SIG |
| 3303 | rcu_read_lock(); |
| 3304 | md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); |
| 3305 | #endif |
| 3306 | skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); |
| 3307 | tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, |
| 3308 | foc) + sizeof(*th); |
| 3309 | |
| 3310 | skb_push(skb, tcp_header_size); |
| 3311 | skb_reset_transport_header(skb); |
| 3312 | |
| 3313 | th = (struct tcphdr *)skb->data; |
| 3314 | memset(th, 0, sizeof(struct tcphdr)); |
| 3315 | th->syn = 1; |
| 3316 | th->ack = 1; |
| 3317 | tcp_ecn_make_synack(req, th); |
| 3318 | th->source = htons(ireq->ir_num); |
| 3319 | th->dest = ireq->ir_rmt_port; |
| 3320 | skb->mark = ireq->ir_mark; |
| 3321 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3322 | th->seq = htonl(tcp_rsk(req)->snt_isn); |
| 3323 | /* XXX data is queued and acked as is. No buffer/window check */ |
| 3324 | th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); |
| 3325 | |
| 3326 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
| 3327 | th->window = htons(min(req->rsk_rcv_wnd, 65535U)); |
| 3328 | tcp_options_write((__be32 *)(th + 1), NULL, &opts); |
| 3329 | th->doff = (tcp_header_size >> 2); |
| 3330 | __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); |
| 3331 | |
| 3332 | #ifdef CONFIG_TCP_MD5SIG |
| 3333 | /* Okay, we have all we need - do the md5 hash if needed */ |
| 3334 | if (md5) |
| 3335 | tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, |
| 3336 | md5, req_to_sk(req), skb); |
| 3337 | rcu_read_unlock(); |
| 3338 | #endif |
| 3339 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3340 | skb->skb_mstamp_ns = now; |
| 3341 | tcp_add_tx_delay(skb, tp); |
| 3342 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3343 | return skb; |
| 3344 | } |
| 3345 | EXPORT_SYMBOL(tcp_make_synack); |
| 3346 | |
| 3347 | static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) |
| 3348 | { |
| 3349 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 3350 | const struct tcp_congestion_ops *ca; |
| 3351 | u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); |
| 3352 | |
| 3353 | if (ca_key == TCP_CA_UNSPEC) |
| 3354 | return; |
| 3355 | |
| 3356 | rcu_read_lock(); |
| 3357 | ca = tcp_ca_find_key(ca_key); |
| 3358 | if (likely(ca && try_module_get(ca->owner))) { |
| 3359 | module_put(icsk->icsk_ca_ops->owner); |
| 3360 | icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); |
| 3361 | icsk->icsk_ca_ops = ca; |
| 3362 | } |
| 3363 | rcu_read_unlock(); |
| 3364 | } |
| 3365 | |
| 3366 | /* Do all connect socket setups that can be done AF independent. */ |
| 3367 | static void tcp_connect_init(struct sock *sk) |
| 3368 | { |
| 3369 | const struct dst_entry *dst = __sk_dst_get(sk); |
| 3370 | struct tcp_sock *tp = tcp_sk(sk); |
| 3371 | __u8 rcv_wscale; |
| 3372 | u32 rcv_wnd; |
| 3373 | |
| 3374 | /* We'll fix this up when we get a response from the other end. |
| 3375 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
| 3376 | */ |
| 3377 | tp->tcp_header_len = sizeof(struct tcphdr); |
| 3378 | if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) |
| 3379 | tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; |
| 3380 | |
| 3381 | #ifdef CONFIG_TCP_MD5SIG |
| 3382 | if (tp->af_specific->md5_lookup(sk, sk)) |
| 3383 | tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; |
| 3384 | #endif |
| 3385 | |
| 3386 | /* If user gave his TCP_MAXSEG, record it to clamp */ |
| 3387 | if (tp->rx_opt.user_mss) |
| 3388 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; |
| 3389 | tp->max_window = 0; |
| 3390 | tcp_mtup_init(sk); |
| 3391 | tcp_sync_mss(sk, dst_mtu(dst)); |
| 3392 | |
| 3393 | tcp_ca_dst_init(sk, dst); |
| 3394 | |
| 3395 | if (!tp->window_clamp) |
| 3396 | tp->window_clamp = dst_metric(dst, RTAX_WINDOW); |
| 3397 | tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); |
| 3398 | |
| 3399 | tcp_initialize_rcv_mss(sk); |
| 3400 | |
| 3401 | /* limit the window selection if the user enforce a smaller rx buffer */ |
| 3402 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && |
| 3403 | (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) |
| 3404 | tp->window_clamp = tcp_full_space(sk); |
| 3405 | |
| 3406 | rcv_wnd = tcp_rwnd_init_bpf(sk); |
| 3407 | if (rcv_wnd == 0) |
| 3408 | rcv_wnd = dst_metric(dst, RTAX_INITRWND); |
| 3409 | |
| 3410 | tcp_select_initial_window(sk, tcp_full_space(sk), |
| 3411 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
| 3412 | &tp->rcv_wnd, |
| 3413 | &tp->window_clamp, |
| 3414 | sock_net(sk)->ipv4.sysctl_tcp_window_scaling, |
| 3415 | &rcv_wscale, |
| 3416 | rcv_wnd); |
| 3417 | |
| 3418 | tp->rx_opt.rcv_wscale = rcv_wscale; |
| 3419 | tp->rcv_ssthresh = tp->rcv_wnd; |
| 3420 | |
| 3421 | sk->sk_err = 0; |
| 3422 | sock_reset_flag(sk, SOCK_DONE); |
| 3423 | tp->snd_wnd = 0; |
| 3424 | tcp_init_wl(tp, 0); |
| 3425 | tcp_write_queue_purge(sk); |
| 3426 | tp->snd_una = tp->write_seq; |
| 3427 | tp->snd_sml = tp->write_seq; |
| 3428 | tp->snd_up = tp->write_seq; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3429 | WRITE_ONCE(tp->snd_nxt, tp->write_seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3430 | |
| 3431 | if (likely(!tp->repair)) |
| 3432 | tp->rcv_nxt = 0; |
| 3433 | else |
| 3434 | tp->rcv_tstamp = tcp_jiffies32; |
| 3435 | tp->rcv_wup = tp->rcv_nxt; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3436 | WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3437 | |
| 3438 | inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); |
| 3439 | inet_csk(sk)->icsk_retransmits = 0; |
| 3440 | tcp_clear_retrans(tp); |
| 3441 | } |
| 3442 | |
| 3443 | static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) |
| 3444 | { |
| 3445 | struct tcp_sock *tp = tcp_sk(sk); |
| 3446 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
| 3447 | |
| 3448 | tcb->end_seq += skb->len; |
| 3449 | __skb_header_release(skb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3450 | sk_wmem_queued_add(sk, skb->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3451 | sk_mem_charge(sk, skb->truesize); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3452 | WRITE_ONCE(tp->write_seq, tcb->end_seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3453 | tp->packets_out += tcp_skb_pcount(skb); |
| 3454 | } |
| 3455 | |
| 3456 | /* Build and send a SYN with data and (cached) Fast Open cookie. However, |
| 3457 | * queue a data-only packet after the regular SYN, such that regular SYNs |
| 3458 | * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges |
| 3459 | * only the SYN sequence, the data are retransmitted in the first ACK. |
| 3460 | * If cookie is not cached or other error occurs, falls back to send a |
| 3461 | * regular SYN with Fast Open cookie request option. |
| 3462 | */ |
| 3463 | static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) |
| 3464 | { |
| 3465 | struct tcp_sock *tp = tcp_sk(sk); |
| 3466 | struct tcp_fastopen_request *fo = tp->fastopen_req; |
| 3467 | int space, err = 0; |
| 3468 | struct sk_buff *syn_data; |
| 3469 | |
| 3470 | tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ |
| 3471 | if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) |
| 3472 | goto fallback; |
| 3473 | |
| 3474 | /* MSS for SYN-data is based on cached MSS and bounded by PMTU and |
| 3475 | * user-MSS. Reserve maximum option space for middleboxes that add |
| 3476 | * private TCP options. The cost is reduced data space in SYN :( |
| 3477 | */ |
| 3478 | tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); |
| 3479 | |
| 3480 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - |
| 3481 | MAX_TCP_OPTION_SPACE; |
| 3482 | |
| 3483 | space = min_t(size_t, space, fo->size); |
| 3484 | |
| 3485 | /* limit to order-0 allocations */ |
| 3486 | space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); |
| 3487 | |
| 3488 | syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); |
| 3489 | if (!syn_data) |
| 3490 | goto fallback; |
| 3491 | syn_data->ip_summed = CHECKSUM_PARTIAL; |
| 3492 | memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); |
| 3493 | if (space) { |
| 3494 | int copied = copy_from_iter(skb_put(syn_data, space), space, |
| 3495 | &fo->data->msg_iter); |
| 3496 | if (unlikely(!copied)) { |
| 3497 | tcp_skb_tsorted_anchor_cleanup(syn_data); |
| 3498 | kfree_skb(syn_data); |
| 3499 | goto fallback; |
| 3500 | } |
| 3501 | if (copied != space) { |
| 3502 | skb_trim(syn_data, copied); |
| 3503 | space = copied; |
| 3504 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3505 | skb_zcopy_set(syn_data, fo->uarg, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3506 | } |
| 3507 | /* No more data pending in inet_wait_for_connect() */ |
| 3508 | if (space == fo->size) |
| 3509 | fo->data = NULL; |
| 3510 | fo->copied = space; |
| 3511 | |
| 3512 | tcp_connect_queue_skb(sk, syn_data); |
| 3513 | if (syn_data->len) |
| 3514 | tcp_chrono_start(sk, TCP_CHRONO_BUSY); |
| 3515 | |
| 3516 | err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); |
| 3517 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3518 | syn->skb_mstamp_ns = syn_data->skb_mstamp_ns; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3519 | |
| 3520 | /* Now full SYN+DATA was cloned and sent (or not), |
| 3521 | * remove the SYN from the original skb (syn_data) |
| 3522 | * we keep in write queue in case of a retransmit, as we |
| 3523 | * also have the SYN packet (with no data) in the same queue. |
| 3524 | */ |
| 3525 | TCP_SKB_CB(syn_data)->seq++; |
| 3526 | TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; |
| 3527 | if (!err) { |
| 3528 | tp->syn_data = (fo->copied > 0); |
| 3529 | tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); |
| 3530 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); |
| 3531 | goto done; |
| 3532 | } |
| 3533 | |
| 3534 | /* data was not sent, put it in write_queue */ |
| 3535 | __skb_queue_tail(&sk->sk_write_queue, syn_data); |
| 3536 | tp->packets_out -= tcp_skb_pcount(syn_data); |
| 3537 | |
| 3538 | fallback: |
| 3539 | /* Send a regular SYN with Fast Open cookie request option */ |
| 3540 | if (fo->cookie.len > 0) |
| 3541 | fo->cookie.len = 0; |
| 3542 | err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); |
| 3543 | if (err) |
| 3544 | tp->syn_fastopen = 0; |
| 3545 | done: |
| 3546 | fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ |
| 3547 | return err; |
| 3548 | } |
| 3549 | |
| 3550 | /* Build a SYN and send it off. */ |
| 3551 | int tcp_connect(struct sock *sk) |
| 3552 | { |
| 3553 | struct tcp_sock *tp = tcp_sk(sk); |
| 3554 | struct sk_buff *buff; |
| 3555 | int err; |
| 3556 | |
| 3557 | tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL); |
| 3558 | |
| 3559 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) |
| 3560 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
| 3561 | |
| 3562 | tcp_connect_init(sk); |
| 3563 | |
| 3564 | if (unlikely(tp->repair)) { |
| 3565 | tcp_finish_connect(sk, NULL); |
| 3566 | return 0; |
| 3567 | } |
| 3568 | |
| 3569 | buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); |
| 3570 | if (unlikely(!buff)) |
| 3571 | return -ENOBUFS; |
| 3572 | |
| 3573 | tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); |
| 3574 | tcp_mstamp_refresh(tp); |
| 3575 | tp->retrans_stamp = tcp_time_stamp(tp); |
| 3576 | tcp_connect_queue_skb(sk, buff); |
| 3577 | tcp_ecn_send_syn(sk, buff); |
| 3578 | tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); |
| 3579 | |
| 3580 | /* Send off SYN; include data in Fast Open. */ |
| 3581 | err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : |
| 3582 | tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); |
| 3583 | if (err == -ECONNREFUSED) |
| 3584 | return err; |
| 3585 | |
| 3586 | /* We change tp->snd_nxt after the tcp_transmit_skb() call |
| 3587 | * in order to make this packet get counted in tcpOutSegs. |
| 3588 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3589 | WRITE_ONCE(tp->snd_nxt, tp->write_seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3590 | tp->pushed_seq = tp->write_seq; |
| 3591 | buff = tcp_send_head(sk); |
| 3592 | if (unlikely(buff)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3593 | WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3594 | tp->pushed_seq = TCP_SKB_CB(buff)->seq; |
| 3595 | } |
| 3596 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); |
| 3597 | |
| 3598 | /* Timer for repeating the SYN until an answer. */ |
| 3599 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 3600 | inet_csk(sk)->icsk_rto, TCP_RTO_MAX); |
| 3601 | return 0; |
| 3602 | } |
| 3603 | EXPORT_SYMBOL(tcp_connect); |
| 3604 | |
| 3605 | /* Send out a delayed ack, the caller does the policy checking |
| 3606 | * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() |
| 3607 | * for details. |
| 3608 | */ |
| 3609 | void tcp_send_delayed_ack(struct sock *sk) |
| 3610 | { |
| 3611 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 3612 | int ato = icsk->icsk_ack.ato; |
| 3613 | unsigned long timeout; |
| 3614 | |
| 3615 | if (ato > TCP_DELACK_MIN) { |
| 3616 | const struct tcp_sock *tp = tcp_sk(sk); |
| 3617 | int max_ato = HZ / 2; |
| 3618 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3619 | if (inet_csk_in_pingpong_mode(sk) || |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3620 | (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) |
| 3621 | max_ato = TCP_DELACK_MAX; |
| 3622 | |
| 3623 | /* Slow path, intersegment interval is "high". */ |
| 3624 | |
| 3625 | /* If some rtt estimate is known, use it to bound delayed ack. |
| 3626 | * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements |
| 3627 | * directly. |
| 3628 | */ |
| 3629 | if (tp->srtt_us) { |
| 3630 | int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), |
| 3631 | TCP_DELACK_MIN); |
| 3632 | |
| 3633 | if (rtt < max_ato) |
| 3634 | max_ato = rtt; |
| 3635 | } |
| 3636 | |
| 3637 | ato = min(ato, max_ato); |
| 3638 | } |
| 3639 | |
| 3640 | /* Stay within the limit we were given */ |
| 3641 | timeout = jiffies + ato; |
| 3642 | |
| 3643 | /* Use new timeout only if there wasn't a older one earlier. */ |
| 3644 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { |
| 3645 | /* If delack timer was blocked or is about to expire, |
| 3646 | * send ACK now. |
| 3647 | */ |
| 3648 | if (icsk->icsk_ack.blocked || |
| 3649 | time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { |
| 3650 | tcp_send_ack(sk); |
| 3651 | return; |
| 3652 | } |
| 3653 | |
| 3654 | if (!time_before(timeout, icsk->icsk_ack.timeout)) |
| 3655 | timeout = icsk->icsk_ack.timeout; |
| 3656 | } |
| 3657 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
| 3658 | icsk->icsk_ack.timeout = timeout; |
| 3659 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
| 3660 | } |
| 3661 | |
| 3662 | /* This routine sends an ack and also updates the window. */ |
| 3663 | void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) |
| 3664 | { |
| 3665 | struct sk_buff *buff; |
| 3666 | |
| 3667 | /* If we have been reset, we may not send again. */ |
| 3668 | if (sk->sk_state == TCP_CLOSE) |
| 3669 | return; |
| 3670 | |
| 3671 | /* We are not putting this on the write queue, so |
| 3672 | * tcp_transmit_skb() will set the ownership to this |
| 3673 | * sock. |
| 3674 | */ |
| 3675 | buff = alloc_skb(MAX_TCP_HEADER, |
| 3676 | sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); |
| 3677 | if (unlikely(!buff)) { |
| 3678 | inet_csk_schedule_ack(sk); |
| 3679 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
| 3680 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
| 3681 | TCP_DELACK_MAX, TCP_RTO_MAX); |
| 3682 | return; |
| 3683 | } |
| 3684 | |
| 3685 | /* Reserve space for headers and prepare control bits. */ |
| 3686 | skb_reserve(buff, MAX_TCP_HEADER); |
| 3687 | tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); |
| 3688 | |
| 3689 | /* We do not want pure acks influencing TCP Small Queues or fq/pacing |
| 3690 | * too much. |
| 3691 | * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 |
| 3692 | */ |
| 3693 | skb_set_tcp_pure_ack(buff); |
| 3694 | |
| 3695 | /* Send it off, this clears delayed acks for us. */ |
| 3696 | __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); |
| 3697 | } |
| 3698 | EXPORT_SYMBOL_GPL(__tcp_send_ack); |
| 3699 | |
| 3700 | void tcp_send_ack(struct sock *sk) |
| 3701 | { |
| 3702 | __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); |
| 3703 | } |
| 3704 | |
| 3705 | /* This routine sends a packet with an out of date sequence |
| 3706 | * number. It assumes the other end will try to ack it. |
| 3707 | * |
| 3708 | * Question: what should we make while urgent mode? |
| 3709 | * 4.4BSD forces sending single byte of data. We cannot send |
| 3710 | * out of window data, because we have SND.NXT==SND.MAX... |
| 3711 | * |
| 3712 | * Current solution: to send TWO zero-length segments in urgent mode: |
| 3713 | * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is |
| 3714 | * out-of-date with SND.UNA-1 to probe window. |
| 3715 | */ |
| 3716 | static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) |
| 3717 | { |
| 3718 | struct tcp_sock *tp = tcp_sk(sk); |
| 3719 | struct sk_buff *skb; |
| 3720 | |
| 3721 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
| 3722 | skb = alloc_skb(MAX_TCP_HEADER, |
| 3723 | sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); |
| 3724 | if (!skb) |
| 3725 | return -1; |
| 3726 | |
| 3727 | /* Reserve space for headers and set control bits. */ |
| 3728 | skb_reserve(skb, MAX_TCP_HEADER); |
| 3729 | /* Use a previous sequence. This should cause the other |
| 3730 | * end to send an ack. Don't queue or clone SKB, just |
| 3731 | * send it. |
| 3732 | */ |
| 3733 | tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); |
| 3734 | NET_INC_STATS(sock_net(sk), mib); |
| 3735 | return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); |
| 3736 | } |
| 3737 | |
| 3738 | /* Called from setsockopt( ... TCP_REPAIR ) */ |
| 3739 | void tcp_send_window_probe(struct sock *sk) |
| 3740 | { |
| 3741 | if (sk->sk_state == TCP_ESTABLISHED) { |
| 3742 | tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; |
| 3743 | tcp_mstamp_refresh(tcp_sk(sk)); |
| 3744 | tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); |
| 3745 | } |
| 3746 | } |
| 3747 | |
| 3748 | /* Initiate keepalive or window probe from timer. */ |
| 3749 | int tcp_write_wakeup(struct sock *sk, int mib) |
| 3750 | { |
| 3751 | struct tcp_sock *tp = tcp_sk(sk); |
| 3752 | struct sk_buff *skb; |
| 3753 | |
| 3754 | if (sk->sk_state == TCP_CLOSE) |
| 3755 | return -1; |
| 3756 | |
| 3757 | skb = tcp_send_head(sk); |
| 3758 | if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { |
| 3759 | int err; |
| 3760 | unsigned int mss = tcp_current_mss(sk); |
| 3761 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
| 3762 | |
| 3763 | if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) |
| 3764 | tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; |
| 3765 | |
| 3766 | /* We are probing the opening of a window |
| 3767 | * but the window size is != 0 |
| 3768 | * must have been a result SWS avoidance ( sender ) |
| 3769 | */ |
| 3770 | if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || |
| 3771 | skb->len > mss) { |
| 3772 | seg_size = min(seg_size, mss); |
| 3773 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
| 3774 | if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, |
| 3775 | skb, seg_size, mss, GFP_ATOMIC)) |
| 3776 | return -1; |
| 3777 | } else if (!tcp_skb_pcount(skb)) |
| 3778 | tcp_set_skb_tso_segs(skb, mss); |
| 3779 | |
| 3780 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
| 3781 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
| 3782 | if (!err) |
| 3783 | tcp_event_new_data_sent(sk, skb); |
| 3784 | return err; |
| 3785 | } else { |
| 3786 | if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) |
| 3787 | tcp_xmit_probe_skb(sk, 1, mib); |
| 3788 | return tcp_xmit_probe_skb(sk, 0, mib); |
| 3789 | } |
| 3790 | } |
| 3791 | |
| 3792 | /* A window probe timeout has occurred. If window is not closed send |
| 3793 | * a partial packet else a zero probe. |
| 3794 | */ |
| 3795 | void tcp_send_probe0(struct sock *sk) |
| 3796 | { |
| 3797 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 3798 | struct tcp_sock *tp = tcp_sk(sk); |
| 3799 | struct net *net = sock_net(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3800 | unsigned long timeout; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3801 | int err; |
| 3802 | |
| 3803 | err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); |
| 3804 | |
| 3805 | if (tp->packets_out || tcp_write_queue_empty(sk)) { |
| 3806 | /* Cancel probe timer, if it is not required. */ |
| 3807 | icsk->icsk_probes_out = 0; |
| 3808 | icsk->icsk_backoff = 0; |
| 3809 | return; |
| 3810 | } |
| 3811 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3812 | icsk->icsk_probes_out++; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3813 | if (err <= 0) { |
| 3814 | if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) |
| 3815 | icsk->icsk_backoff++; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3816 | timeout = tcp_probe0_when(sk, TCP_RTO_MAX); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3817 | } else { |
| 3818 | /* If packet was not sent due to local congestion, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3819 | * Let senders fight for local resources conservatively. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3820 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3821 | timeout = TCP_RESOURCE_PROBE_INTERVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3822 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3823 | tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3824 | } |
| 3825 | |
| 3826 | int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) |
| 3827 | { |
| 3828 | const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; |
| 3829 | struct flowi fl; |
| 3830 | int res; |
| 3831 | |
| 3832 | tcp_rsk(req)->txhash = net_tx_rndhash(); |
| 3833 | res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); |
| 3834 | if (!res) { |
| 3835 | __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); |
| 3836 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); |
| 3837 | if (unlikely(tcp_passive_fastopen(sk))) |
| 3838 | tcp_sk(sk)->total_retrans++; |
| 3839 | trace_tcp_retransmit_synack(sk, req); |
| 3840 | } |
| 3841 | return res; |
| 3842 | } |
| 3843 | EXPORT_SYMBOL(tcp_rtx_synack); |