David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 4 | * operating system. INET is implemented using the BSD Socket |
| 5 | * interface as the means of communication with the user level. |
| 6 | * |
| 7 | * Implementation of the Transmission Control Protocol(TCP). |
| 8 | * |
| 9 | * Authors: Ross Biro |
| 10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| 12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
| 13 | * Florian La Roche, <flla@stud.uni-sb.de> |
| 14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
| 15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
| 16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
| 17 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
| 18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
| 19 | * Jorge Cwik, <jorge@laser.satlink.net> |
| 20 | * |
| 21 | * Fixes: |
| 22 | * Alan Cox : Numerous verify_area() calls |
| 23 | * Alan Cox : Set the ACK bit on a reset |
| 24 | * Alan Cox : Stopped it crashing if it closed while |
| 25 | * sk->inuse=1 and was trying to connect |
| 26 | * (tcp_err()). |
| 27 | * Alan Cox : All icmp error handling was broken |
| 28 | * pointers passed where wrong and the |
| 29 | * socket was looked up backwards. Nobody |
| 30 | * tested any icmp error code obviously. |
| 31 | * Alan Cox : tcp_err() now handled properly. It |
| 32 | * wakes people on errors. poll |
| 33 | * behaves and the icmp error race |
| 34 | * has gone by moving it into sock.c |
| 35 | * Alan Cox : tcp_send_reset() fixed to work for |
| 36 | * everything not just packets for |
| 37 | * unknown sockets. |
| 38 | * Alan Cox : tcp option processing. |
| 39 | * Alan Cox : Reset tweaked (still not 100%) [Had |
| 40 | * syn rule wrong] |
| 41 | * Herp Rosmanith : More reset fixes |
| 42 | * Alan Cox : No longer acks invalid rst frames. |
| 43 | * Acking any kind of RST is right out. |
| 44 | * Alan Cox : Sets an ignore me flag on an rst |
| 45 | * receive otherwise odd bits of prattle |
| 46 | * escape still |
| 47 | * Alan Cox : Fixed another acking RST frame bug. |
| 48 | * Should stop LAN workplace lockups. |
| 49 | * Alan Cox : Some tidyups using the new skb list |
| 50 | * facilities |
| 51 | * Alan Cox : sk->keepopen now seems to work |
| 52 | * Alan Cox : Pulls options out correctly on accepts |
| 53 | * Alan Cox : Fixed assorted sk->rqueue->next errors |
| 54 | * Alan Cox : PSH doesn't end a TCP read. Switched a |
| 55 | * bit to skb ops. |
| 56 | * Alan Cox : Tidied tcp_data to avoid a potential |
| 57 | * nasty. |
| 58 | * Alan Cox : Added some better commenting, as the |
| 59 | * tcp is hard to follow |
| 60 | * Alan Cox : Removed incorrect check for 20 * psh |
| 61 | * Michael O'Reilly : ack < copied bug fix. |
| 62 | * Johannes Stille : Misc tcp fixes (not all in yet). |
| 63 | * Alan Cox : FIN with no memory -> CRASH |
| 64 | * Alan Cox : Added socket option proto entries. |
| 65 | * Also added awareness of them to accept. |
| 66 | * Alan Cox : Added TCP options (SOL_TCP) |
| 67 | * Alan Cox : Switched wakeup calls to callbacks, |
| 68 | * so the kernel can layer network |
| 69 | * sockets. |
| 70 | * Alan Cox : Use ip_tos/ip_ttl settings. |
| 71 | * Alan Cox : Handle FIN (more) properly (we hope). |
| 72 | * Alan Cox : RST frames sent on unsynchronised |
| 73 | * state ack error. |
| 74 | * Alan Cox : Put in missing check for SYN bit. |
| 75 | * Alan Cox : Added tcp_select_window() aka NET2E |
| 76 | * window non shrink trick. |
| 77 | * Alan Cox : Added a couple of small NET2E timer |
| 78 | * fixes |
| 79 | * Charles Hedrick : TCP fixes |
| 80 | * Toomas Tamm : TCP window fixes |
| 81 | * Alan Cox : Small URG fix to rlogin ^C ack fight |
| 82 | * Charles Hedrick : Rewrote most of it to actually work |
| 83 | * Linus : Rewrote tcp_read() and URG handling |
| 84 | * completely |
| 85 | * Gerhard Koerting: Fixed some missing timer handling |
| 86 | * Matthew Dillon : Reworked TCP machine states as per RFC |
| 87 | * Gerhard Koerting: PC/TCP workarounds |
| 88 | * Adam Caldwell : Assorted timer/timing errors |
| 89 | * Matthew Dillon : Fixed another RST bug |
| 90 | * Alan Cox : Move to kernel side addressing changes. |
| 91 | * Alan Cox : Beginning work on TCP fastpathing |
| 92 | * (not yet usable) |
| 93 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. |
| 94 | * Alan Cox : TCP fast path debugging |
| 95 | * Alan Cox : Window clamping |
| 96 | * Michael Riepe : Bug in tcp_check() |
| 97 | * Matt Dillon : More TCP improvements and RST bug fixes |
| 98 | * Matt Dillon : Yet more small nasties remove from the |
| 99 | * TCP code (Be very nice to this man if |
| 100 | * tcp finally works 100%) 8) |
| 101 | * Alan Cox : BSD accept semantics. |
| 102 | * Alan Cox : Reset on closedown bug. |
| 103 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). |
| 104 | * Michael Pall : Handle poll() after URG properly in |
| 105 | * all cases. |
| 106 | * Michael Pall : Undo the last fix in tcp_read_urg() |
| 107 | * (multi URG PUSH broke rlogin). |
| 108 | * Michael Pall : Fix the multi URG PUSH problem in |
| 109 | * tcp_readable(), poll() after URG |
| 110 | * works now. |
| 111 | * Michael Pall : recv(...,MSG_OOB) never blocks in the |
| 112 | * BSD api. |
| 113 | * Alan Cox : Changed the semantics of sk->socket to |
| 114 | * fix a race and a signal problem with |
| 115 | * accept() and async I/O. |
| 116 | * Alan Cox : Relaxed the rules on tcp_sendto(). |
| 117 | * Yury Shevchuk : Really fixed accept() blocking problem. |
| 118 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for |
| 119 | * clients/servers which listen in on |
| 120 | * fixed ports. |
| 121 | * Alan Cox : Cleaned the above up and shrank it to |
| 122 | * a sensible code size. |
| 123 | * Alan Cox : Self connect lockup fix. |
| 124 | * Alan Cox : No connect to multicast. |
| 125 | * Ross Biro : Close unaccepted children on master |
| 126 | * socket close. |
| 127 | * Alan Cox : Reset tracing code. |
| 128 | * Alan Cox : Spurious resets on shutdown. |
| 129 | * Alan Cox : Giant 15 minute/60 second timer error |
| 130 | * Alan Cox : Small whoops in polling before an |
| 131 | * accept. |
| 132 | * Alan Cox : Kept the state trace facility since |
| 133 | * it's handy for debugging. |
| 134 | * Alan Cox : More reset handler fixes. |
| 135 | * Alan Cox : Started rewriting the code based on |
| 136 | * the RFC's for other useful protocol |
| 137 | * references see: Comer, KA9Q NOS, and |
| 138 | * for a reference on the difference |
| 139 | * between specifications and how BSD |
| 140 | * works see the 4.4lite source. |
| 141 | * A.N.Kuznetsov : Don't time wait on completion of tidy |
| 142 | * close. |
| 143 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. |
| 144 | * Linus Torvalds : Fixed BSD port reuse to work first syn |
| 145 | * Alan Cox : Reimplemented timers as per the RFC |
| 146 | * and using multiple timers for sanity. |
| 147 | * Alan Cox : Small bug fixes, and a lot of new |
| 148 | * comments. |
| 149 | * Alan Cox : Fixed dual reader crash by locking |
| 150 | * the buffers (much like datagram.c) |
| 151 | * Alan Cox : Fixed stuck sockets in probe. A probe |
| 152 | * now gets fed up of retrying without |
| 153 | * (even a no space) answer. |
| 154 | * Alan Cox : Extracted closing code better |
| 155 | * Alan Cox : Fixed the closing state machine to |
| 156 | * resemble the RFC. |
| 157 | * Alan Cox : More 'per spec' fixes. |
| 158 | * Jorge Cwik : Even faster checksumming. |
| 159 | * Alan Cox : tcp_data() doesn't ack illegal PSH |
| 160 | * only frames. At least one pc tcp stack |
| 161 | * generates them. |
| 162 | * Alan Cox : Cache last socket. |
| 163 | * Alan Cox : Per route irtt. |
| 164 | * Matt Day : poll()->select() match BSD precisely on error |
| 165 | * Alan Cox : New buffers |
| 166 | * Marc Tamsky : Various sk->prot->retransmits and |
| 167 | * sk->retransmits misupdating fixed. |
| 168 | * Fixed tcp_write_timeout: stuck close, |
| 169 | * and TCP syn retries gets used now. |
| 170 | * Mark Yarvis : In tcp_read_wakeup(), don't send an |
| 171 | * ack if state is TCP_CLOSED. |
| 172 | * Alan Cox : Look up device on a retransmit - routes may |
| 173 | * change. Doesn't yet cope with MSS shrink right |
| 174 | * but it's a start! |
| 175 | * Marc Tamsky : Closing in closing fixes. |
| 176 | * Mike Shaver : RFC1122 verifications. |
| 177 | * Alan Cox : rcv_saddr errors. |
| 178 | * Alan Cox : Block double connect(). |
| 179 | * Alan Cox : Small hooks for enSKIP. |
| 180 | * Alexey Kuznetsov: Path MTU discovery. |
| 181 | * Alan Cox : Support soft errors. |
| 182 | * Alan Cox : Fix MTU discovery pathological case |
| 183 | * when the remote claims no mtu! |
| 184 | * Marc Tamsky : TCP_CLOSE fix. |
| 185 | * Colin (G3TNE) : Send a reset on syn ack replies in |
| 186 | * window but wrong (fixes NT lpd problems) |
| 187 | * Pedro Roque : Better TCP window handling, delayed ack. |
| 188 | * Joerg Reuter : No modification of locked buffers in |
| 189 | * tcp_do_retransmit() |
| 190 | * Eric Schenk : Changed receiver side silly window |
| 191 | * avoidance algorithm to BSD style |
| 192 | * algorithm. This doubles throughput |
| 193 | * against machines running Solaris, |
| 194 | * and seems to result in general |
| 195 | * improvement. |
| 196 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD |
| 197 | * Willy Konynenberg : Transparent proxying support. |
| 198 | * Mike McLagan : Routing by source |
| 199 | * Keith Owens : Do proper merging with partial SKB's in |
| 200 | * tcp_do_sendmsg to avoid burstiness. |
| 201 | * Eric Schenk : Fix fast close down bug with |
| 202 | * shutdown() followed by close(). |
| 203 | * Andi Kleen : Make poll agree with SIGIO |
| 204 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and |
| 205 | * lingertime == 0 (RFC 793 ABORT Call) |
| 206 | * Hirokazu Takahashi : Use copy_from_user() instead of |
| 207 | * csum_and_copy_from_user() if possible. |
| 208 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | * Description of States: |
| 210 | * |
| 211 | * TCP_SYN_SENT sent a connection request, waiting for ack |
| 212 | * |
| 213 | * TCP_SYN_RECV received a connection request, sent ack, |
| 214 | * waiting for final ack in three-way handshake. |
| 215 | * |
| 216 | * TCP_ESTABLISHED connection established |
| 217 | * |
| 218 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete |
| 219 | * transmission of remaining buffered data |
| 220 | * |
| 221 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote |
| 222 | * to shutdown |
| 223 | * |
| 224 | * TCP_CLOSING both sides have shutdown but we still have |
| 225 | * data we have to finish sending |
| 226 | * |
| 227 | * TCP_TIME_WAIT timeout to catch resent junk before entering |
| 228 | * closed, can only be entered from FIN_WAIT2 |
| 229 | * or CLOSING. Required because the other end |
| 230 | * may not have gotten our last ACK causing it |
| 231 | * to retransmit the data packet (which we ignore) |
| 232 | * |
| 233 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for |
| 234 | * us to finish writing our data and to shutdown |
| 235 | * (we have to close() to move on to LAST_ACK) |
| 236 | * |
| 237 | * TCP_LAST_ACK out side has shutdown after remote has |
| 238 | * shutdown. There may still be data in our |
| 239 | * buffer that we have to finish sending |
| 240 | * |
| 241 | * TCP_CLOSE socket is finished |
| 242 | */ |
| 243 | |
| 244 | #define pr_fmt(fmt) "TCP: " fmt |
| 245 | |
| 246 | #include <crypto/hash.h> |
| 247 | #include <linux/kernel.h> |
| 248 | #include <linux/module.h> |
| 249 | #include <linux/types.h> |
| 250 | #include <linux/fcntl.h> |
| 251 | #include <linux/poll.h> |
| 252 | #include <linux/inet_diag.h> |
| 253 | #include <linux/init.h> |
| 254 | #include <linux/fs.h> |
| 255 | #include <linux/skbuff.h> |
| 256 | #include <linux/scatterlist.h> |
| 257 | #include <linux/splice.h> |
| 258 | #include <linux/net.h> |
| 259 | #include <linux/socket.h> |
| 260 | #include <linux/random.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 261 | #include <linux/memblock.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 262 | #include <linux/highmem.h> |
| 263 | #include <linux/swap.h> |
| 264 | #include <linux/cache.h> |
| 265 | #include <linux/err.h> |
| 266 | #include <linux/time.h> |
| 267 | #include <linux/slab.h> |
| 268 | #include <linux/errqueue.h> |
| 269 | #include <linux/static_key.h> |
| 270 | |
| 271 | #include <net/icmp.h> |
| 272 | #include <net/inet_common.h> |
| 273 | #include <net/tcp.h> |
| 274 | #include <net/xfrm.h> |
| 275 | #include <net/ip.h> |
| 276 | #include <net/sock.h> |
| 277 | |
| 278 | #include <linux/uaccess.h> |
| 279 | #include <asm/ioctls.h> |
| 280 | #include <net/busy_poll.h> |
| 281 | |
| 282 | struct percpu_counter tcp_orphan_count; |
| 283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
| 284 | |
| 285 | long sysctl_tcp_mem[3] __read_mostly; |
| 286 | EXPORT_SYMBOL(sysctl_tcp_mem); |
| 287 | |
| 288 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ |
| 289 | EXPORT_SYMBOL(tcp_memory_allocated); |
| 290 | |
| 291 | #if IS_ENABLED(CONFIG_SMC) |
| 292 | DEFINE_STATIC_KEY_FALSE(tcp_have_smc); |
| 293 | EXPORT_SYMBOL(tcp_have_smc); |
| 294 | #endif |
| 295 | |
| 296 | /* |
| 297 | * Current number of TCP sockets. |
| 298 | */ |
| 299 | struct percpu_counter tcp_sockets_allocated; |
| 300 | EXPORT_SYMBOL(tcp_sockets_allocated); |
| 301 | |
| 302 | /* |
| 303 | * TCP splice context |
| 304 | */ |
| 305 | struct tcp_splice_state { |
| 306 | struct pipe_inode_info *pipe; |
| 307 | size_t len; |
| 308 | unsigned int flags; |
| 309 | }; |
| 310 | |
| 311 | /* |
| 312 | * Pressure flag: try to collapse. |
| 313 | * Technical note: it is used by multiple contexts non atomically. |
| 314 | * All the __sk_mem_schedule() is of this nature: accounting |
| 315 | * is strict, actions are advisory and have some latency. |
| 316 | */ |
| 317 | unsigned long tcp_memory_pressure __read_mostly; |
| 318 | EXPORT_SYMBOL_GPL(tcp_memory_pressure); |
| 319 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 320 | DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); |
| 321 | EXPORT_SYMBOL(tcp_rx_skb_cache_key); |
| 322 | |
| 323 | DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); |
| 324 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 325 | void tcp_enter_memory_pressure(struct sock *sk) |
| 326 | { |
| 327 | unsigned long val; |
| 328 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 329 | if (READ_ONCE(tcp_memory_pressure)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 330 | return; |
| 331 | val = jiffies; |
| 332 | |
| 333 | if (!val) |
| 334 | val--; |
| 335 | if (!cmpxchg(&tcp_memory_pressure, 0, val)) |
| 336 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
| 337 | } |
| 338 | EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); |
| 339 | |
| 340 | void tcp_leave_memory_pressure(struct sock *sk) |
| 341 | { |
| 342 | unsigned long val; |
| 343 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 344 | if (!READ_ONCE(tcp_memory_pressure)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 345 | return; |
| 346 | val = xchg(&tcp_memory_pressure, 0); |
| 347 | if (val) |
| 348 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, |
| 349 | jiffies_to_msecs(jiffies - val)); |
| 350 | } |
| 351 | EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); |
| 352 | |
| 353 | /* Convert seconds to retransmits based on initial and max timeout */ |
| 354 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) |
| 355 | { |
| 356 | u8 res = 0; |
| 357 | |
| 358 | if (seconds > 0) { |
| 359 | int period = timeout; |
| 360 | |
| 361 | res = 1; |
| 362 | while (seconds > period && res < 255) { |
| 363 | res++; |
| 364 | timeout <<= 1; |
| 365 | if (timeout > rto_max) |
| 366 | timeout = rto_max; |
| 367 | period += timeout; |
| 368 | } |
| 369 | } |
| 370 | return res; |
| 371 | } |
| 372 | |
| 373 | /* Convert retransmits to seconds based on initial and max timeout */ |
| 374 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) |
| 375 | { |
| 376 | int period = 0; |
| 377 | |
| 378 | if (retrans > 0) { |
| 379 | period = timeout; |
| 380 | while (--retrans) { |
| 381 | timeout <<= 1; |
| 382 | if (timeout > rto_max) |
| 383 | timeout = rto_max; |
| 384 | period += timeout; |
| 385 | } |
| 386 | } |
| 387 | return period; |
| 388 | } |
| 389 | |
| 390 | static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) |
| 391 | { |
| 392 | u32 rate = READ_ONCE(tp->rate_delivered); |
| 393 | u32 intv = READ_ONCE(tp->rate_interval_us); |
| 394 | u64 rate64 = 0; |
| 395 | |
| 396 | if (rate && intv) { |
| 397 | rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; |
| 398 | do_div(rate64, intv); |
| 399 | } |
| 400 | return rate64; |
| 401 | } |
| 402 | |
| 403 | /* Address-family independent initialization for a tcp_sock. |
| 404 | * |
| 405 | * NOTE: A lot of things set to zero explicitly by call to |
| 406 | * sk_alloc() so need not be done here. |
| 407 | */ |
| 408 | void tcp_init_sock(struct sock *sk) |
| 409 | { |
| 410 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 411 | struct tcp_sock *tp = tcp_sk(sk); |
| 412 | |
| 413 | tp->out_of_order_queue = RB_ROOT; |
| 414 | sk->tcp_rtx_queue = RB_ROOT; |
| 415 | tcp_init_xmit_timers(sk); |
| 416 | INIT_LIST_HEAD(&tp->tsq_node); |
| 417 | INIT_LIST_HEAD(&tp->tsorted_sent_queue); |
| 418 | |
| 419 | icsk->icsk_rto = TCP_TIMEOUT_INIT; |
| 420 | tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); |
| 421 | minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); |
| 422 | |
| 423 | /* So many TCP implementations out there (incorrectly) count the |
| 424 | * initial SYN frame in their delayed-ACK and congestion control |
| 425 | * algorithms that we must have the following bandaid to talk |
| 426 | * efficiently to them. -DaveM |
| 427 | */ |
| 428 | tp->snd_cwnd = TCP_INIT_CWND; |
| 429 | |
| 430 | /* There's a bubble in the pipe until at least the first ACK. */ |
| 431 | tp->app_limited = ~0U; |
| 432 | |
| 433 | /* See draft-stevens-tcpca-spec-01 for discussion of the |
| 434 | * initialization of these values. |
| 435 | */ |
| 436 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
| 437 | tp->snd_cwnd_clamp = ~0; |
| 438 | tp->mss_cache = TCP_MSS_DEFAULT; |
| 439 | |
| 440 | tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; |
| 441 | tcp_assign_congestion_control(sk); |
| 442 | |
| 443 | tp->tsoffset = 0; |
| 444 | tp->rack.reo_wnd_steps = 1; |
| 445 | |
| 446 | sk->sk_state = TCP_CLOSE; |
| 447 | |
| 448 | sk->sk_write_space = sk_stream_write_space; |
| 449 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
| 450 | |
| 451 | icsk->icsk_sync_mss = tcp_sync_mss; |
| 452 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 453 | WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); |
| 454 | WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 455 | |
| 456 | sk_sockets_allocated_inc(sk); |
| 457 | sk->sk_route_forced_caps = NETIF_F_GSO; |
| 458 | } |
| 459 | EXPORT_SYMBOL(tcp_init_sock); |
| 460 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 461 | static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) |
| 462 | { |
| 463 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
| 464 | |
| 465 | if (tsflags && skb) { |
| 466 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 467 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
| 468 | |
| 469 | sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); |
| 470 | if (tsflags & SOF_TIMESTAMPING_TX_ACK) |
| 471 | tcb->txstamp_ack = 1; |
| 472 | if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) |
| 473 | shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; |
| 474 | } |
| 475 | } |
| 476 | |
| 477 | static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, |
| 478 | int target, struct sock *sk) |
| 479 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 480 | return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) || |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 481 | (sk->sk_prot->stream_memory_read ? |
| 482 | sk->sk_prot->stream_memory_read(sk) : false); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * Wait for a TCP event. |
| 487 | * |
| 488 | * Note that we don't need to lock the socket, as the upper poll layers |
| 489 | * take care of normal races (between the test and the event) and we don't |
| 490 | * go look at any of the socket buffers directly. |
| 491 | */ |
| 492 | __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) |
| 493 | { |
| 494 | __poll_t mask; |
| 495 | struct sock *sk = sock->sk; |
| 496 | const struct tcp_sock *tp = tcp_sk(sk); |
| 497 | int state; |
| 498 | |
| 499 | sock_poll_wait(file, sock, wait); |
| 500 | |
| 501 | state = inet_sk_state_load(sk); |
| 502 | if (state == TCP_LISTEN) |
| 503 | return inet_csk_listen_poll(sk); |
| 504 | |
| 505 | /* Socket is not locked. We are protected from async events |
| 506 | * by poll logic and correct handling of state changes |
| 507 | * made by other threads is impossible in any case. |
| 508 | */ |
| 509 | |
| 510 | mask = 0; |
| 511 | |
| 512 | /* |
| 513 | * EPOLLHUP is certainly not done right. But poll() doesn't |
| 514 | * have a notion of HUP in just one direction, and for a |
| 515 | * socket the read side is more interesting. |
| 516 | * |
| 517 | * Some poll() documentation says that EPOLLHUP is incompatible |
| 518 | * with the EPOLLOUT/POLLWR flags, so somebody should check this |
| 519 | * all. But careful, it tends to be safer to return too many |
| 520 | * bits than too few, and you can easily break real applications |
| 521 | * if you don't tell them that something has hung up! |
| 522 | * |
| 523 | * Check-me. |
| 524 | * |
| 525 | * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and |
| 526 | * our fs/select.c). It means that after we received EOF, |
| 527 | * poll always returns immediately, making impossible poll() on write() |
| 528 | * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP |
| 529 | * if and only if shutdown has been made in both directions. |
| 530 | * Actually, it is interesting to look how Solaris and DUX |
| 531 | * solve this dilemma. I would prefer, if EPOLLHUP were maskable, |
| 532 | * then we could set it on SND_SHUTDOWN. BTW examples given |
| 533 | * in Stevens' books assume exactly this behaviour, it explains |
| 534 | * why EPOLLHUP is incompatible with EPOLLOUT. --ANK |
| 535 | * |
| 536 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent |
| 537 | * blocking on fresh not-connected or disconnected socket. --ANK |
| 538 | */ |
| 539 | if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) |
| 540 | mask |= EPOLLHUP; |
| 541 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 542 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
| 543 | |
| 544 | /* Connected or passive Fast Open socket? */ |
| 545 | if (state != TCP_SYN_SENT && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 546 | (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 547 | int target = sock_rcvlowat(sk, 0, INT_MAX); |
| 548 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 549 | if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 550 | !sock_flag(sk, SOCK_URGINLINE) && |
| 551 | tp->urg_data) |
| 552 | target++; |
| 553 | |
| 554 | if (tcp_stream_is_readable(tp, target, sk)) |
| 555 | mask |= EPOLLIN | EPOLLRDNORM; |
| 556 | |
| 557 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
| 558 | if (sk_stream_is_writeable(sk)) { |
| 559 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 560 | } else { /* send SIGIO later */ |
| 561 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
| 562 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 563 | |
| 564 | /* Race breaker. If space is freed after |
| 565 | * wspace test but before the flags are set, |
| 566 | * IO signal will be lost. Memory barrier |
| 567 | * pairs with the input side. |
| 568 | */ |
| 569 | smp_mb__after_atomic(); |
| 570 | if (sk_stream_is_writeable(sk)) |
| 571 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 572 | } |
| 573 | } else |
| 574 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 575 | |
| 576 | if (tp->urg_data & TCP_URG_VALID) |
| 577 | mask |= EPOLLPRI; |
| 578 | } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { |
| 579 | /* Active TCP fastopen socket with defer_connect |
| 580 | * Return EPOLLOUT so application can call write() |
| 581 | * in order for kernel to generate SYN+data |
| 582 | */ |
| 583 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 584 | } |
| 585 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ |
| 586 | smp_rmb(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 587 | if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 588 | mask |= EPOLLERR; |
| 589 | |
| 590 | return mask; |
| 591 | } |
| 592 | EXPORT_SYMBOL(tcp_poll); |
| 593 | |
| 594 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
| 595 | { |
| 596 | struct tcp_sock *tp = tcp_sk(sk); |
| 597 | int answ; |
| 598 | bool slow; |
| 599 | |
| 600 | switch (cmd) { |
| 601 | case SIOCINQ: |
| 602 | if (sk->sk_state == TCP_LISTEN) |
| 603 | return -EINVAL; |
| 604 | |
| 605 | slow = lock_sock_fast(sk); |
| 606 | answ = tcp_inq(sk); |
| 607 | unlock_sock_fast(sk, slow); |
| 608 | break; |
| 609 | case SIOCATMARK: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 610 | answ = tp->urg_data && |
| 611 | READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 612 | break; |
| 613 | case SIOCOUTQ: |
| 614 | if (sk->sk_state == TCP_LISTEN) |
| 615 | return -EINVAL; |
| 616 | |
| 617 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
| 618 | answ = 0; |
| 619 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 620 | answ = READ_ONCE(tp->write_seq) - tp->snd_una; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 621 | break; |
| 622 | case SIOCOUTQNSD: |
| 623 | if (sk->sk_state == TCP_LISTEN) |
| 624 | return -EINVAL; |
| 625 | |
| 626 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
| 627 | answ = 0; |
| 628 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 629 | answ = READ_ONCE(tp->write_seq) - |
| 630 | READ_ONCE(tp->snd_nxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | break; |
| 632 | default: |
| 633 | return -ENOIOCTLCMD; |
| 634 | } |
| 635 | |
| 636 | return put_user(answ, (int __user *)arg); |
| 637 | } |
| 638 | EXPORT_SYMBOL(tcp_ioctl); |
| 639 | |
| 640 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
| 641 | { |
| 642 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
| 643 | tp->pushed_seq = tp->write_seq; |
| 644 | } |
| 645 | |
| 646 | static inline bool forced_push(const struct tcp_sock *tp) |
| 647 | { |
| 648 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); |
| 649 | } |
| 650 | |
| 651 | static void skb_entail(struct sock *sk, struct sk_buff *skb) |
| 652 | { |
| 653 | struct tcp_sock *tp = tcp_sk(sk); |
| 654 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
| 655 | |
| 656 | skb->csum = 0; |
| 657 | tcb->seq = tcb->end_seq = tp->write_seq; |
| 658 | tcb->tcp_flags = TCPHDR_ACK; |
| 659 | tcb->sacked = 0; |
| 660 | __skb_header_release(skb); |
| 661 | tcp_add_write_queue_tail(sk, skb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 662 | sk_wmem_queued_add(sk, skb->truesize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 663 | sk_mem_charge(sk, skb->truesize); |
| 664 | if (tp->nonagle & TCP_NAGLE_PUSH) |
| 665 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
| 666 | |
| 667 | tcp_slow_start_after_idle_check(sk); |
| 668 | } |
| 669 | |
| 670 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) |
| 671 | { |
| 672 | if (flags & MSG_OOB) |
| 673 | tp->snd_up = tp->write_seq; |
| 674 | } |
| 675 | |
| 676 | /* If a not yet filled skb is pushed, do not send it if |
| 677 | * we have data packets in Qdisc or NIC queues : |
| 678 | * Because TX completion will happen shortly, it gives a chance |
| 679 | * to coalesce future sendmsg() payload into this skb, without |
| 680 | * need for a timer, and with no latency trade off. |
| 681 | * As packets containing data payload have a bigger truesize |
| 682 | * than pure acks (dataless) packets, the last checks prevent |
| 683 | * autocorking if we only have an ACK in Qdisc/NIC queues, |
| 684 | * or if TX completion was delayed after we processed ACK packet. |
| 685 | */ |
| 686 | static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, |
| 687 | int size_goal) |
| 688 | { |
| 689 | return skb->len < size_goal && |
| 690 | sock_net(sk)->ipv4.sysctl_tcp_autocorking && |
| 691 | !tcp_rtx_queue_empty(sk) && |
| 692 | refcount_read(&sk->sk_wmem_alloc) > skb->truesize; |
| 693 | } |
| 694 | |
| 695 | static void tcp_push(struct sock *sk, int flags, int mss_now, |
| 696 | int nonagle, int size_goal) |
| 697 | { |
| 698 | struct tcp_sock *tp = tcp_sk(sk); |
| 699 | struct sk_buff *skb; |
| 700 | |
| 701 | skb = tcp_write_queue_tail(sk); |
| 702 | if (!skb) |
| 703 | return; |
| 704 | if (!(flags & MSG_MORE) || forced_push(tp)) |
| 705 | tcp_mark_push(tp, skb); |
| 706 | |
| 707 | tcp_mark_urg(tp, flags); |
| 708 | |
| 709 | if (tcp_should_autocork(sk, skb, size_goal)) { |
| 710 | |
| 711 | /* avoid atomic op if TSQ_THROTTLED bit is already set */ |
| 712 | if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { |
| 713 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); |
| 714 | set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); |
| 715 | } |
| 716 | /* It is possible TX completion already happened |
| 717 | * before we set TSQ_THROTTLED. |
| 718 | */ |
| 719 | if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) |
| 720 | return; |
| 721 | } |
| 722 | |
| 723 | if (flags & MSG_MORE) |
| 724 | nonagle = TCP_NAGLE_CORK; |
| 725 | |
| 726 | __tcp_push_pending_frames(sk, mss_now, nonagle); |
| 727 | } |
| 728 | |
| 729 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, |
| 730 | unsigned int offset, size_t len) |
| 731 | { |
| 732 | struct tcp_splice_state *tss = rd_desc->arg.data; |
| 733 | int ret; |
| 734 | |
| 735 | ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, |
| 736 | min(rd_desc->count, len), tss->flags); |
| 737 | if (ret > 0) |
| 738 | rd_desc->count -= ret; |
| 739 | return ret; |
| 740 | } |
| 741 | |
| 742 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) |
| 743 | { |
| 744 | /* Store TCP splice context information in read_descriptor_t. */ |
| 745 | read_descriptor_t rd_desc = { |
| 746 | .arg.data = tss, |
| 747 | .count = tss->len, |
| 748 | }; |
| 749 | |
| 750 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); |
| 751 | } |
| 752 | |
| 753 | /** |
| 754 | * tcp_splice_read - splice data from TCP socket to a pipe |
| 755 | * @sock: socket to splice from |
| 756 | * @ppos: position (not valid) |
| 757 | * @pipe: pipe to splice to |
| 758 | * @len: number of bytes to splice |
| 759 | * @flags: splice modifier flags |
| 760 | * |
| 761 | * Description: |
| 762 | * Will read pages from given socket and fill them into a pipe. |
| 763 | * |
| 764 | **/ |
| 765 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, |
| 766 | struct pipe_inode_info *pipe, size_t len, |
| 767 | unsigned int flags) |
| 768 | { |
| 769 | struct sock *sk = sock->sk; |
| 770 | struct tcp_splice_state tss = { |
| 771 | .pipe = pipe, |
| 772 | .len = len, |
| 773 | .flags = flags, |
| 774 | }; |
| 775 | long timeo; |
| 776 | ssize_t spliced; |
| 777 | int ret; |
| 778 | |
| 779 | sock_rps_record_flow(sk); |
| 780 | /* |
| 781 | * We can't seek on a socket input |
| 782 | */ |
| 783 | if (unlikely(*ppos)) |
| 784 | return -ESPIPE; |
| 785 | |
| 786 | ret = spliced = 0; |
| 787 | |
| 788 | lock_sock(sk); |
| 789 | |
| 790 | timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); |
| 791 | while (tss.len) { |
| 792 | ret = __tcp_splice_read(sk, &tss); |
| 793 | if (ret < 0) |
| 794 | break; |
| 795 | else if (!ret) { |
| 796 | if (spliced) |
| 797 | break; |
| 798 | if (sock_flag(sk, SOCK_DONE)) |
| 799 | break; |
| 800 | if (sk->sk_err) { |
| 801 | ret = sock_error(sk); |
| 802 | break; |
| 803 | } |
| 804 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 805 | break; |
| 806 | if (sk->sk_state == TCP_CLOSE) { |
| 807 | /* |
| 808 | * This occurs when user tries to read |
| 809 | * from never connected socket. |
| 810 | */ |
| 811 | ret = -ENOTCONN; |
| 812 | break; |
| 813 | } |
| 814 | if (!timeo) { |
| 815 | ret = -EAGAIN; |
| 816 | break; |
| 817 | } |
| 818 | /* if __tcp_splice_read() got nothing while we have |
| 819 | * an skb in receive queue, we do not want to loop. |
| 820 | * This might happen with URG data. |
| 821 | */ |
| 822 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
| 823 | break; |
| 824 | sk_wait_data(sk, &timeo, NULL); |
| 825 | if (signal_pending(current)) { |
| 826 | ret = sock_intr_errno(timeo); |
| 827 | break; |
| 828 | } |
| 829 | continue; |
| 830 | } |
| 831 | tss.len -= ret; |
| 832 | spliced += ret; |
| 833 | |
| 834 | if (!timeo) |
| 835 | break; |
| 836 | release_sock(sk); |
| 837 | lock_sock(sk); |
| 838 | |
| 839 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || |
| 840 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
| 841 | signal_pending(current)) |
| 842 | break; |
| 843 | } |
| 844 | |
| 845 | release_sock(sk); |
| 846 | |
| 847 | if (spliced) |
| 848 | return spliced; |
| 849 | |
| 850 | return ret; |
| 851 | } |
| 852 | EXPORT_SYMBOL(tcp_splice_read); |
| 853 | |
| 854 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, |
| 855 | bool force_schedule) |
| 856 | { |
| 857 | struct sk_buff *skb; |
| 858 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 859 | if (likely(!size)) { |
| 860 | skb = sk->sk_tx_skb_cache; |
| 861 | if (skb) { |
| 862 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
| 863 | sk->sk_tx_skb_cache = NULL; |
| 864 | pskb_trim(skb, 0); |
| 865 | INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); |
| 866 | skb_shinfo(skb)->tx_flags = 0; |
| 867 | memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); |
| 868 | return skb; |
| 869 | } |
| 870 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 871 | /* The TCP header must be at least 32-bit aligned. */ |
| 872 | size = ALIGN(size, 4); |
| 873 | |
| 874 | if (unlikely(tcp_under_memory_pressure(sk))) |
| 875 | sk_mem_reclaim_partial(sk); |
| 876 | |
| 877 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); |
| 878 | if (likely(skb)) { |
| 879 | bool mem_scheduled; |
| 880 | |
| 881 | if (force_schedule) { |
| 882 | mem_scheduled = true; |
| 883 | sk_forced_mem_schedule(sk, skb->truesize); |
| 884 | } else { |
| 885 | mem_scheduled = sk_wmem_schedule(sk, skb->truesize); |
| 886 | } |
| 887 | if (likely(mem_scheduled)) { |
| 888 | skb_reserve(skb, sk->sk_prot->max_header); |
| 889 | /* |
| 890 | * Make sure that we have exactly size bytes |
| 891 | * available to the caller, no more, no less. |
| 892 | */ |
| 893 | skb->reserved_tailroom = skb->end - skb->tail - size; |
| 894 | INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); |
| 895 | return skb; |
| 896 | } |
| 897 | __kfree_skb(skb); |
| 898 | } else { |
| 899 | sk->sk_prot->enter_memory_pressure(sk); |
| 900 | sk_stream_moderate_sndbuf(sk); |
| 901 | } |
| 902 | return NULL; |
| 903 | } |
| 904 | |
| 905 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, |
| 906 | int large_allowed) |
| 907 | { |
| 908 | struct tcp_sock *tp = tcp_sk(sk); |
| 909 | u32 new_size_goal, size_goal; |
| 910 | |
| 911 | if (!large_allowed) |
| 912 | return mss_now; |
| 913 | |
| 914 | /* Note : tcp_tso_autosize() will eventually split this later */ |
| 915 | new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; |
| 916 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); |
| 917 | |
| 918 | /* We try hard to avoid divides here */ |
| 919 | size_goal = tp->gso_segs * mss_now; |
| 920 | if (unlikely(new_size_goal < size_goal || |
| 921 | new_size_goal >= size_goal + mss_now)) { |
| 922 | tp->gso_segs = min_t(u16, new_size_goal / mss_now, |
| 923 | sk->sk_gso_max_segs); |
| 924 | size_goal = tp->gso_segs * mss_now; |
| 925 | } |
| 926 | |
| 927 | return max(size_goal, mss_now); |
| 928 | } |
| 929 | |
| 930 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) |
| 931 | { |
| 932 | int mss_now; |
| 933 | |
| 934 | mss_now = tcp_current_mss(sk); |
| 935 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); |
| 936 | |
| 937 | return mss_now; |
| 938 | } |
| 939 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 940 | /* In some cases, both sendpage() and sendmsg() could have added |
| 941 | * an skb to the write queue, but failed adding payload on it. |
| 942 | * We need to remove it to consume less memory, but more |
| 943 | * importantly be able to generate EPOLLOUT for Edge Trigger epoll() |
| 944 | * users. |
| 945 | */ |
| 946 | static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) |
| 947 | { |
| 948 | if (skb && !skb->len) { |
| 949 | tcp_unlink_write_queue(skb, sk); |
| 950 | if (tcp_write_queue_empty(sk)) |
| 951 | tcp_chrono_stop(sk, TCP_CHRONO_BUSY); |
| 952 | sk_wmem_free_skb(sk, skb); |
| 953 | } |
| 954 | } |
| 955 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 956 | ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, |
| 957 | size_t size, int flags) |
| 958 | { |
| 959 | struct tcp_sock *tp = tcp_sk(sk); |
| 960 | int mss_now, size_goal; |
| 961 | int err; |
| 962 | ssize_t copied; |
| 963 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
| 964 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 965 | if (IS_ENABLED(CONFIG_DEBUG_VM) && |
| 966 | WARN_ONCE(PageSlab(page), "page must not be a Slab one")) |
| 967 | return -EINVAL; |
| 968 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 969 | /* Wait for a connection to finish. One exception is TCP Fast Open |
| 970 | * (passive side) where data is allowed to be sent before a connection |
| 971 | * is fully established. |
| 972 | */ |
| 973 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && |
| 974 | !tcp_passive_fastopen(sk)) { |
| 975 | err = sk_stream_wait_connect(sk, &timeo); |
| 976 | if (err != 0) |
| 977 | goto out_err; |
| 978 | } |
| 979 | |
| 980 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
| 981 | |
| 982 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
| 983 | copied = 0; |
| 984 | |
| 985 | err = -EPIPE; |
| 986 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
| 987 | goto out_err; |
| 988 | |
| 989 | while (size > 0) { |
| 990 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
| 991 | int copy, i; |
| 992 | bool can_coalesce; |
| 993 | |
| 994 | if (!skb || (copy = size_goal - skb->len) <= 0 || |
| 995 | !tcp_skb_can_collapse_to(skb)) { |
| 996 | new_segment: |
| 997 | if (!sk_stream_memory_free(sk)) |
| 998 | goto wait_for_sndbuf; |
| 999 | |
| 1000 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, |
| 1001 | tcp_rtx_and_write_queues_empty(sk)); |
| 1002 | if (!skb) |
| 1003 | goto wait_for_memory; |
| 1004 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1005 | #ifdef CONFIG_TLS_DEVICE |
| 1006 | skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); |
| 1007 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1008 | skb_entail(sk, skb); |
| 1009 | copy = size_goal; |
| 1010 | } |
| 1011 | |
| 1012 | if (copy > size) |
| 1013 | copy = size; |
| 1014 | |
| 1015 | i = skb_shinfo(skb)->nr_frags; |
| 1016 | can_coalesce = skb_can_coalesce(skb, i, page, offset); |
| 1017 | if (!can_coalesce && i >= sysctl_max_skb_frags) { |
| 1018 | tcp_mark_push(tp, skb); |
| 1019 | goto new_segment; |
| 1020 | } |
| 1021 | if (!sk_wmem_schedule(sk, copy)) |
| 1022 | goto wait_for_memory; |
| 1023 | |
| 1024 | if (can_coalesce) { |
| 1025 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
| 1026 | } else { |
| 1027 | get_page(page); |
| 1028 | skb_fill_page_desc(skb, i, page, offset, copy); |
| 1029 | } |
| 1030 | |
| 1031 | if (!(flags & MSG_NO_SHARED_FRAGS)) |
| 1032 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
| 1033 | |
| 1034 | skb->len += copy; |
| 1035 | skb->data_len += copy; |
| 1036 | skb->truesize += copy; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1037 | sk_wmem_queued_add(sk, copy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1038 | sk_mem_charge(sk, copy); |
| 1039 | skb->ip_summed = CHECKSUM_PARTIAL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1040 | WRITE_ONCE(tp->write_seq, tp->write_seq + copy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1041 | TCP_SKB_CB(skb)->end_seq += copy; |
| 1042 | tcp_skb_pcount_set(skb, 0); |
| 1043 | |
| 1044 | if (!copied) |
| 1045 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
| 1046 | |
| 1047 | copied += copy; |
| 1048 | offset += copy; |
| 1049 | size -= copy; |
| 1050 | if (!size) |
| 1051 | goto out; |
| 1052 | |
| 1053 | if (skb->len < size_goal || (flags & MSG_OOB)) |
| 1054 | continue; |
| 1055 | |
| 1056 | if (forced_push(tp)) { |
| 1057 | tcp_mark_push(tp, skb); |
| 1058 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
| 1059 | } else if (skb == tcp_send_head(sk)) |
| 1060 | tcp_push_one(sk, mss_now); |
| 1061 | continue; |
| 1062 | |
| 1063 | wait_for_sndbuf: |
| 1064 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 1065 | wait_for_memory: |
| 1066 | tcp_push(sk, flags & ~MSG_MORE, mss_now, |
| 1067 | TCP_NAGLE_PUSH, size_goal); |
| 1068 | |
| 1069 | err = sk_stream_wait_memory(sk, &timeo); |
| 1070 | if (err != 0) |
| 1071 | goto do_error; |
| 1072 | |
| 1073 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
| 1074 | } |
| 1075 | |
| 1076 | out: |
| 1077 | if (copied) { |
| 1078 | tcp_tx_timestamp(sk, sk->sk_tsflags); |
| 1079 | if (!(flags & MSG_SENDPAGE_NOTLAST)) |
| 1080 | tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); |
| 1081 | } |
| 1082 | return copied; |
| 1083 | |
| 1084 | do_error: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1085 | tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1086 | if (copied) |
| 1087 | goto out; |
| 1088 | out_err: |
| 1089 | /* make sure we wake any epoll edge trigger waiter */ |
| 1090 | if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && |
| 1091 | err == -EAGAIN)) { |
| 1092 | sk->sk_write_space(sk); |
| 1093 | tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); |
| 1094 | } |
| 1095 | return sk_stream_error(sk, flags, err); |
| 1096 | } |
| 1097 | EXPORT_SYMBOL_GPL(do_tcp_sendpages); |
| 1098 | |
| 1099 | int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, |
| 1100 | size_t size, int flags) |
| 1101 | { |
| 1102 | if (!(sk->sk_route_caps & NETIF_F_SG)) |
| 1103 | return sock_no_sendpage_locked(sk, page, offset, size, flags); |
| 1104 | |
| 1105 | tcp_rate_check_app_limited(sk); /* is sending application-limited? */ |
| 1106 | |
| 1107 | return do_tcp_sendpages(sk, page, offset, size, flags); |
| 1108 | } |
| 1109 | EXPORT_SYMBOL_GPL(tcp_sendpage_locked); |
| 1110 | |
| 1111 | int tcp_sendpage(struct sock *sk, struct page *page, int offset, |
| 1112 | size_t size, int flags) |
| 1113 | { |
| 1114 | int ret; |
| 1115 | |
| 1116 | lock_sock(sk); |
| 1117 | ret = tcp_sendpage_locked(sk, page, offset, size, flags); |
| 1118 | release_sock(sk); |
| 1119 | |
| 1120 | return ret; |
| 1121 | } |
| 1122 | EXPORT_SYMBOL(tcp_sendpage); |
| 1123 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1124 | void tcp_free_fastopen_req(struct tcp_sock *tp) |
| 1125 | { |
| 1126 | if (tp->fastopen_req) { |
| 1127 | kfree(tp->fastopen_req); |
| 1128 | tp->fastopen_req = NULL; |
| 1129 | } |
| 1130 | } |
| 1131 | |
| 1132 | static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1133 | int *copied, size_t size, |
| 1134 | struct ubuf_info *uarg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1135 | { |
| 1136 | struct tcp_sock *tp = tcp_sk(sk); |
| 1137 | struct inet_sock *inet = inet_sk(sk); |
| 1138 | struct sockaddr *uaddr = msg->msg_name; |
| 1139 | int err, flags; |
| 1140 | |
| 1141 | if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || |
| 1142 | (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && |
| 1143 | uaddr->sa_family == AF_UNSPEC)) |
| 1144 | return -EOPNOTSUPP; |
| 1145 | if (tp->fastopen_req) |
| 1146 | return -EALREADY; /* Another Fast Open is in progress */ |
| 1147 | |
| 1148 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), |
| 1149 | sk->sk_allocation); |
| 1150 | if (unlikely(!tp->fastopen_req)) |
| 1151 | return -ENOBUFS; |
| 1152 | tp->fastopen_req->data = msg; |
| 1153 | tp->fastopen_req->size = size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1154 | tp->fastopen_req->uarg = uarg; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1155 | |
| 1156 | if (inet->defer_connect) { |
| 1157 | err = tcp_connect(sk); |
| 1158 | /* Same failure procedure as in tcp_v4/6_connect */ |
| 1159 | if (err) { |
| 1160 | tcp_set_state(sk, TCP_CLOSE); |
| 1161 | inet->inet_dport = 0; |
| 1162 | sk->sk_route_caps = 0; |
| 1163 | } |
| 1164 | } |
| 1165 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; |
| 1166 | err = __inet_stream_connect(sk->sk_socket, uaddr, |
| 1167 | msg->msg_namelen, flags, 1); |
| 1168 | /* fastopen_req could already be freed in __inet_stream_connect |
| 1169 | * if the connection times out or gets rst |
| 1170 | */ |
| 1171 | if (tp->fastopen_req) { |
| 1172 | *copied = tp->fastopen_req->copied; |
| 1173 | tcp_free_fastopen_req(tp); |
| 1174 | inet->defer_connect = 0; |
| 1175 | } |
| 1176 | return err; |
| 1177 | } |
| 1178 | |
| 1179 | int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) |
| 1180 | { |
| 1181 | struct tcp_sock *tp = tcp_sk(sk); |
| 1182 | struct ubuf_info *uarg = NULL; |
| 1183 | struct sk_buff *skb; |
| 1184 | struct sockcm_cookie sockc; |
| 1185 | int flags, err, copied = 0; |
| 1186 | int mss_now = 0, size_goal, copied_syn = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1187 | int process_backlog = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1188 | bool zc = false; |
| 1189 | long timeo; |
| 1190 | |
| 1191 | flags = msg->msg_flags; |
| 1192 | |
| 1193 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1194 | skb = tcp_write_queue_tail(sk); |
| 1195 | uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb)); |
| 1196 | if (!uarg) { |
| 1197 | err = -ENOBUFS; |
| 1198 | goto out_err; |
| 1199 | } |
| 1200 | |
| 1201 | zc = sk->sk_route_caps & NETIF_F_SG; |
| 1202 | if (!zc) |
| 1203 | uarg->zerocopy = 0; |
| 1204 | } |
| 1205 | |
| 1206 | if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && |
| 1207 | !tp->repair) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1208 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1209 | if (err == -EINPROGRESS && copied_syn > 0) |
| 1210 | goto out; |
| 1211 | else if (err) |
| 1212 | goto out_err; |
| 1213 | } |
| 1214 | |
| 1215 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
| 1216 | |
| 1217 | tcp_rate_check_app_limited(sk); /* is sending application-limited? */ |
| 1218 | |
| 1219 | /* Wait for a connection to finish. One exception is TCP Fast Open |
| 1220 | * (passive side) where data is allowed to be sent before a connection |
| 1221 | * is fully established. |
| 1222 | */ |
| 1223 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && |
| 1224 | !tcp_passive_fastopen(sk)) { |
| 1225 | err = sk_stream_wait_connect(sk, &timeo); |
| 1226 | if (err != 0) |
| 1227 | goto do_error; |
| 1228 | } |
| 1229 | |
| 1230 | if (unlikely(tp->repair)) { |
| 1231 | if (tp->repair_queue == TCP_RECV_QUEUE) { |
| 1232 | copied = tcp_send_rcvq(sk, msg, size); |
| 1233 | goto out_nopush; |
| 1234 | } |
| 1235 | |
| 1236 | err = -EINVAL; |
| 1237 | if (tp->repair_queue == TCP_NO_QUEUE) |
| 1238 | goto out_err; |
| 1239 | |
| 1240 | /* 'common' sending to sendq */ |
| 1241 | } |
| 1242 | |
| 1243 | sockcm_init(&sockc, sk); |
| 1244 | if (msg->msg_controllen) { |
| 1245 | err = sock_cmsg_send(sk, msg, &sockc); |
| 1246 | if (unlikely(err)) { |
| 1247 | err = -EINVAL; |
| 1248 | goto out_err; |
| 1249 | } |
| 1250 | } |
| 1251 | |
| 1252 | /* This should be in poll */ |
| 1253 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
| 1254 | |
| 1255 | /* Ok commence sending. */ |
| 1256 | copied = 0; |
| 1257 | |
| 1258 | restart: |
| 1259 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
| 1260 | |
| 1261 | err = -EPIPE; |
| 1262 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
| 1263 | goto do_error; |
| 1264 | |
| 1265 | while (msg_data_left(msg)) { |
| 1266 | int copy = 0; |
| 1267 | |
| 1268 | skb = tcp_write_queue_tail(sk); |
| 1269 | if (skb) |
| 1270 | copy = size_goal - skb->len; |
| 1271 | |
| 1272 | if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { |
| 1273 | bool first_skb; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1274 | |
| 1275 | new_segment: |
| 1276 | if (!sk_stream_memory_free(sk)) |
| 1277 | goto wait_for_sndbuf; |
| 1278 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1279 | if (unlikely(process_backlog >= 16)) { |
| 1280 | process_backlog = 0; |
| 1281 | if (sk_flush_backlog(sk)) |
| 1282 | goto restart; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1283 | } |
| 1284 | first_skb = tcp_rtx_and_write_queues_empty(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1285 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1286 | first_skb); |
| 1287 | if (!skb) |
| 1288 | goto wait_for_memory; |
| 1289 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1290 | process_backlog++; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1291 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 1292 | |
| 1293 | skb_entail(sk, skb); |
| 1294 | copy = size_goal; |
| 1295 | |
| 1296 | /* All packets are restored as if they have |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1297 | * already been sent. skb_mstamp_ns isn't set to |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1298 | * avoid wrong rtt estimation. |
| 1299 | */ |
| 1300 | if (tp->repair) |
| 1301 | TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; |
| 1302 | } |
| 1303 | |
| 1304 | /* Try to append data to the end of skb. */ |
| 1305 | if (copy > msg_data_left(msg)) |
| 1306 | copy = msg_data_left(msg); |
| 1307 | |
| 1308 | /* Where to copy to? */ |
| 1309 | if (skb_availroom(skb) > 0 && !zc) { |
| 1310 | /* We have some space in skb head. Superb! */ |
| 1311 | copy = min_t(int, copy, skb_availroom(skb)); |
| 1312 | err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); |
| 1313 | if (err) |
| 1314 | goto do_fault; |
| 1315 | } else if (!zc) { |
| 1316 | bool merge = true; |
| 1317 | int i = skb_shinfo(skb)->nr_frags; |
| 1318 | struct page_frag *pfrag = sk_page_frag(sk); |
| 1319 | |
| 1320 | if (!sk_page_frag_refill(sk, pfrag)) |
| 1321 | goto wait_for_memory; |
| 1322 | |
| 1323 | if (!skb_can_coalesce(skb, i, pfrag->page, |
| 1324 | pfrag->offset)) { |
| 1325 | if (i >= sysctl_max_skb_frags) { |
| 1326 | tcp_mark_push(tp, skb); |
| 1327 | goto new_segment; |
| 1328 | } |
| 1329 | merge = false; |
| 1330 | } |
| 1331 | |
| 1332 | copy = min_t(int, copy, pfrag->size - pfrag->offset); |
| 1333 | |
| 1334 | if (!sk_wmem_schedule(sk, copy)) |
| 1335 | goto wait_for_memory; |
| 1336 | |
| 1337 | err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, |
| 1338 | pfrag->page, |
| 1339 | pfrag->offset, |
| 1340 | copy); |
| 1341 | if (err) |
| 1342 | goto do_error; |
| 1343 | |
| 1344 | /* Update the skb. */ |
| 1345 | if (merge) { |
| 1346 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
| 1347 | } else { |
| 1348 | skb_fill_page_desc(skb, i, pfrag->page, |
| 1349 | pfrag->offset, copy); |
| 1350 | page_ref_inc(pfrag->page); |
| 1351 | } |
| 1352 | pfrag->offset += copy; |
| 1353 | } else { |
| 1354 | err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); |
| 1355 | if (err == -EMSGSIZE || err == -EEXIST) { |
| 1356 | tcp_mark_push(tp, skb); |
| 1357 | goto new_segment; |
| 1358 | } |
| 1359 | if (err < 0) |
| 1360 | goto do_error; |
| 1361 | copy = err; |
| 1362 | } |
| 1363 | |
| 1364 | if (!copied) |
| 1365 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
| 1366 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1367 | WRITE_ONCE(tp->write_seq, tp->write_seq + copy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1368 | TCP_SKB_CB(skb)->end_seq += copy; |
| 1369 | tcp_skb_pcount_set(skb, 0); |
| 1370 | |
| 1371 | copied += copy; |
| 1372 | if (!msg_data_left(msg)) { |
| 1373 | if (unlikely(flags & MSG_EOR)) |
| 1374 | TCP_SKB_CB(skb)->eor = 1; |
| 1375 | goto out; |
| 1376 | } |
| 1377 | |
| 1378 | if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) |
| 1379 | continue; |
| 1380 | |
| 1381 | if (forced_push(tp)) { |
| 1382 | tcp_mark_push(tp, skb); |
| 1383 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
| 1384 | } else if (skb == tcp_send_head(sk)) |
| 1385 | tcp_push_one(sk, mss_now); |
| 1386 | continue; |
| 1387 | |
| 1388 | wait_for_sndbuf: |
| 1389 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 1390 | wait_for_memory: |
| 1391 | if (copied) |
| 1392 | tcp_push(sk, flags & ~MSG_MORE, mss_now, |
| 1393 | TCP_NAGLE_PUSH, size_goal); |
| 1394 | |
| 1395 | err = sk_stream_wait_memory(sk, &timeo); |
| 1396 | if (err != 0) |
| 1397 | goto do_error; |
| 1398 | |
| 1399 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
| 1400 | } |
| 1401 | |
| 1402 | out: |
| 1403 | if (copied) { |
| 1404 | tcp_tx_timestamp(sk, sockc.tsflags); |
| 1405 | tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); |
| 1406 | } |
| 1407 | out_nopush: |
| 1408 | sock_zerocopy_put(uarg); |
| 1409 | return copied + copied_syn; |
| 1410 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1411 | do_error: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1412 | skb = tcp_write_queue_tail(sk); |
| 1413 | do_fault: |
| 1414 | tcp_remove_empty_skb(sk, skb); |
| 1415 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1416 | if (copied + copied_syn) |
| 1417 | goto out; |
| 1418 | out_err: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1419 | sock_zerocopy_put_abort(uarg, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1420 | err = sk_stream_error(sk, flags, err); |
| 1421 | /* make sure we wake any epoll edge trigger waiter */ |
| 1422 | if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && |
| 1423 | err == -EAGAIN)) { |
| 1424 | sk->sk_write_space(sk); |
| 1425 | tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); |
| 1426 | } |
| 1427 | return err; |
| 1428 | } |
| 1429 | EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); |
| 1430 | |
| 1431 | int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
| 1432 | { |
| 1433 | int ret; |
| 1434 | |
| 1435 | lock_sock(sk); |
| 1436 | ret = tcp_sendmsg_locked(sk, msg, size); |
| 1437 | release_sock(sk); |
| 1438 | |
| 1439 | return ret; |
| 1440 | } |
| 1441 | EXPORT_SYMBOL(tcp_sendmsg); |
| 1442 | |
| 1443 | /* |
| 1444 | * Handle reading urgent data. BSD has very simple semantics for |
| 1445 | * this, no blocking and very strange errors 8) |
| 1446 | */ |
| 1447 | |
| 1448 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) |
| 1449 | { |
| 1450 | struct tcp_sock *tp = tcp_sk(sk); |
| 1451 | |
| 1452 | /* No URG data to read. */ |
| 1453 | if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || |
| 1454 | tp->urg_data == TCP_URG_READ) |
| 1455 | return -EINVAL; /* Yes this is right ! */ |
| 1456 | |
| 1457 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) |
| 1458 | return -ENOTCONN; |
| 1459 | |
| 1460 | if (tp->urg_data & TCP_URG_VALID) { |
| 1461 | int err = 0; |
| 1462 | char c = tp->urg_data; |
| 1463 | |
| 1464 | if (!(flags & MSG_PEEK)) |
| 1465 | tp->urg_data = TCP_URG_READ; |
| 1466 | |
| 1467 | /* Read urgent data. */ |
| 1468 | msg->msg_flags |= MSG_OOB; |
| 1469 | |
| 1470 | if (len > 0) { |
| 1471 | if (!(flags & MSG_TRUNC)) |
| 1472 | err = memcpy_to_msg(msg, &c, 1); |
| 1473 | len = 1; |
| 1474 | } else |
| 1475 | msg->msg_flags |= MSG_TRUNC; |
| 1476 | |
| 1477 | return err ? -EFAULT : len; |
| 1478 | } |
| 1479 | |
| 1480 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) |
| 1481 | return 0; |
| 1482 | |
| 1483 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and |
| 1484 | * the available implementations agree in this case: |
| 1485 | * this call should never block, independent of the |
| 1486 | * blocking state of the socket. |
| 1487 | * Mike <pall@rz.uni-karlsruhe.de> |
| 1488 | */ |
| 1489 | return -EAGAIN; |
| 1490 | } |
| 1491 | |
| 1492 | static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) |
| 1493 | { |
| 1494 | struct sk_buff *skb; |
| 1495 | int copied = 0, err = 0; |
| 1496 | |
| 1497 | /* XXX -- need to support SO_PEEK_OFF */ |
| 1498 | |
| 1499 | skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { |
| 1500 | err = skb_copy_datagram_msg(skb, 0, msg, skb->len); |
| 1501 | if (err) |
| 1502 | return err; |
| 1503 | copied += skb->len; |
| 1504 | } |
| 1505 | |
| 1506 | skb_queue_walk(&sk->sk_write_queue, skb) { |
| 1507 | err = skb_copy_datagram_msg(skb, 0, msg, skb->len); |
| 1508 | if (err) |
| 1509 | break; |
| 1510 | |
| 1511 | copied += skb->len; |
| 1512 | } |
| 1513 | |
| 1514 | return err ?: copied; |
| 1515 | } |
| 1516 | |
| 1517 | /* Clean up the receive buffer for full frames taken by the user, |
| 1518 | * then send an ACK if necessary. COPIED is the number of bytes |
| 1519 | * tcp_recvmsg has given to the user so far, it speeds up the |
| 1520 | * calculation of whether or not we must ACK for the sake of |
| 1521 | * a window update. |
| 1522 | */ |
| 1523 | static void tcp_cleanup_rbuf(struct sock *sk, int copied) |
| 1524 | { |
| 1525 | struct tcp_sock *tp = tcp_sk(sk); |
| 1526 | bool time_to_ack = false; |
| 1527 | |
| 1528 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
| 1529 | |
| 1530 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), |
| 1531 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", |
| 1532 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); |
| 1533 | |
| 1534 | if (inet_csk_ack_scheduled(sk)) { |
| 1535 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 1536 | /* Delayed ACKs frequently hit locked sockets during bulk |
| 1537 | * receive. */ |
| 1538 | if (icsk->icsk_ack.blocked || |
| 1539 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ |
| 1540 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
| 1541 | /* |
| 1542 | * If this read emptied read buffer, we send ACK, if |
| 1543 | * connection is not bidirectional, user drained |
| 1544 | * receive buffer and there was a small segment |
| 1545 | * in queue. |
| 1546 | */ |
| 1547 | (copied > 0 && |
| 1548 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || |
| 1549 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1550 | !inet_csk_in_pingpong_mode(sk))) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1551 | !atomic_read(&sk->sk_rmem_alloc))) |
| 1552 | time_to_ack = true; |
| 1553 | } |
| 1554 | |
| 1555 | /* We send an ACK if we can now advertise a non-zero window |
| 1556 | * which has been raised "significantly". |
| 1557 | * |
| 1558 | * Even if window raised up to infinity, do not send window open ACK |
| 1559 | * in states, where we will not receive more. It is useless. |
| 1560 | */ |
| 1561 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { |
| 1562 | __u32 rcv_window_now = tcp_receive_window(tp); |
| 1563 | |
| 1564 | /* Optimize, __tcp_select_window() is not cheap. */ |
| 1565 | if (2*rcv_window_now <= tp->window_clamp) { |
| 1566 | __u32 new_window = __tcp_select_window(sk); |
| 1567 | |
| 1568 | /* Send ACK now, if this read freed lots of space |
| 1569 | * in our buffer. Certainly, new_window is new window. |
| 1570 | * We can advertise it now, if it is not less than current one. |
| 1571 | * "Lots" means "at least twice" here. |
| 1572 | */ |
| 1573 | if (new_window && new_window >= 2 * rcv_window_now) |
| 1574 | time_to_ack = true; |
| 1575 | } |
| 1576 | } |
| 1577 | if (time_to_ack) |
| 1578 | tcp_send_ack(sk); |
| 1579 | } |
| 1580 | |
| 1581 | static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
| 1582 | { |
| 1583 | struct sk_buff *skb; |
| 1584 | u32 offset; |
| 1585 | |
| 1586 | while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { |
| 1587 | offset = seq - TCP_SKB_CB(skb)->seq; |
| 1588 | if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
| 1589 | pr_err_once("%s: found a SYN, please report !\n", __func__); |
| 1590 | offset--; |
| 1591 | } |
| 1592 | if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { |
| 1593 | *off = offset; |
| 1594 | return skb; |
| 1595 | } |
| 1596 | /* This looks weird, but this can happen if TCP collapsing |
| 1597 | * splitted a fat GRO packet, while we released socket lock |
| 1598 | * in skb_splice_bits() |
| 1599 | */ |
| 1600 | sk_eat_skb(sk, skb); |
| 1601 | } |
| 1602 | return NULL; |
| 1603 | } |
| 1604 | |
| 1605 | /* |
| 1606 | * This routine provides an alternative to tcp_recvmsg() for routines |
| 1607 | * that would like to handle copying from skbuffs directly in 'sendfile' |
| 1608 | * fashion. |
| 1609 | * Note: |
| 1610 | * - It is assumed that the socket was locked by the caller. |
| 1611 | * - The routine does not block. |
| 1612 | * - At present, there is no support for reading OOB data |
| 1613 | * or for 'peeking' the socket using this routine |
| 1614 | * (although both would be easy to implement). |
| 1615 | */ |
| 1616 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, |
| 1617 | sk_read_actor_t recv_actor) |
| 1618 | { |
| 1619 | struct sk_buff *skb; |
| 1620 | struct tcp_sock *tp = tcp_sk(sk); |
| 1621 | u32 seq = tp->copied_seq; |
| 1622 | u32 offset; |
| 1623 | int copied = 0; |
| 1624 | |
| 1625 | if (sk->sk_state == TCP_LISTEN) |
| 1626 | return -ENOTCONN; |
| 1627 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { |
| 1628 | if (offset < skb->len) { |
| 1629 | int used; |
| 1630 | size_t len; |
| 1631 | |
| 1632 | len = skb->len - offset; |
| 1633 | /* Stop reading if we hit a patch of urgent data */ |
| 1634 | if (tp->urg_data) { |
| 1635 | u32 urg_offset = tp->urg_seq - seq; |
| 1636 | if (urg_offset < len) |
| 1637 | len = urg_offset; |
| 1638 | if (!len) |
| 1639 | break; |
| 1640 | } |
| 1641 | used = recv_actor(desc, skb, offset, len); |
| 1642 | if (used <= 0) { |
| 1643 | if (!copied) |
| 1644 | copied = used; |
| 1645 | break; |
| 1646 | } else if (used <= len) { |
| 1647 | seq += used; |
| 1648 | copied += used; |
| 1649 | offset += used; |
| 1650 | } |
| 1651 | /* If recv_actor drops the lock (e.g. TCP splice |
| 1652 | * receive) the skb pointer might be invalid when |
| 1653 | * getting here: tcp_collapse might have deleted it |
| 1654 | * while aggregating skbs from the socket queue. |
| 1655 | */ |
| 1656 | skb = tcp_recv_skb(sk, seq - 1, &offset); |
| 1657 | if (!skb) |
| 1658 | break; |
| 1659 | /* TCP coalescing might have appended data to the skb. |
| 1660 | * Try to splice more frags |
| 1661 | */ |
| 1662 | if (offset + 1 != skb->len) |
| 1663 | continue; |
| 1664 | } |
| 1665 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
| 1666 | sk_eat_skb(sk, skb); |
| 1667 | ++seq; |
| 1668 | break; |
| 1669 | } |
| 1670 | sk_eat_skb(sk, skb); |
| 1671 | if (!desc->count) |
| 1672 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1673 | WRITE_ONCE(tp->copied_seq, seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1674 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1675 | WRITE_ONCE(tp->copied_seq, seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1676 | |
| 1677 | tcp_rcv_space_adjust(sk); |
| 1678 | |
| 1679 | /* Clean up data we have read: This will do ACK frames. */ |
| 1680 | if (copied > 0) { |
| 1681 | tcp_recv_skb(sk, seq, &offset); |
| 1682 | tcp_cleanup_rbuf(sk, copied); |
| 1683 | } |
| 1684 | return copied; |
| 1685 | } |
| 1686 | EXPORT_SYMBOL(tcp_read_sock); |
| 1687 | |
| 1688 | int tcp_peek_len(struct socket *sock) |
| 1689 | { |
| 1690 | return tcp_inq(sock->sk); |
| 1691 | } |
| 1692 | EXPORT_SYMBOL(tcp_peek_len); |
| 1693 | |
| 1694 | /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ |
| 1695 | int tcp_set_rcvlowat(struct sock *sk, int val) |
| 1696 | { |
| 1697 | int cap; |
| 1698 | |
| 1699 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) |
| 1700 | cap = sk->sk_rcvbuf >> 1; |
| 1701 | else |
| 1702 | cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1; |
| 1703 | val = min(val, cap); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1704 | WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1705 | |
| 1706 | /* Check if we need to signal EPOLLIN right now */ |
| 1707 | tcp_data_ready(sk); |
| 1708 | |
| 1709 | if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) |
| 1710 | return 0; |
| 1711 | |
| 1712 | val <<= 1; |
| 1713 | if (val > sk->sk_rcvbuf) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1714 | WRITE_ONCE(sk->sk_rcvbuf, val); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1715 | tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); |
| 1716 | } |
| 1717 | return 0; |
| 1718 | } |
| 1719 | EXPORT_SYMBOL(tcp_set_rcvlowat); |
| 1720 | |
| 1721 | #ifdef CONFIG_MMU |
| 1722 | static const struct vm_operations_struct tcp_vm_ops = { |
| 1723 | }; |
| 1724 | |
| 1725 | int tcp_mmap(struct file *file, struct socket *sock, |
| 1726 | struct vm_area_struct *vma) |
| 1727 | { |
| 1728 | if (vma->vm_flags & (VM_WRITE | VM_EXEC)) |
| 1729 | return -EPERM; |
| 1730 | vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); |
| 1731 | |
| 1732 | /* Instruct vm_insert_page() to not down_read(mmap_sem) */ |
| 1733 | vma->vm_flags |= VM_MIXEDMAP; |
| 1734 | |
| 1735 | vma->vm_ops = &tcp_vm_ops; |
| 1736 | return 0; |
| 1737 | } |
| 1738 | EXPORT_SYMBOL(tcp_mmap); |
| 1739 | |
| 1740 | static int tcp_zerocopy_receive(struct sock *sk, |
| 1741 | struct tcp_zerocopy_receive *zc) |
| 1742 | { |
| 1743 | unsigned long address = (unsigned long)zc->address; |
| 1744 | const skb_frag_t *frags = NULL; |
| 1745 | u32 length = 0, seq, offset; |
| 1746 | struct vm_area_struct *vma; |
| 1747 | struct sk_buff *skb = NULL; |
| 1748 | struct tcp_sock *tp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1749 | int inq; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1750 | int ret; |
| 1751 | |
| 1752 | if (address & (PAGE_SIZE - 1) || address != zc->address) |
| 1753 | return -EINVAL; |
| 1754 | |
| 1755 | if (sk->sk_state == TCP_LISTEN) |
| 1756 | return -ENOTCONN; |
| 1757 | |
| 1758 | sock_rps_record_flow(sk); |
| 1759 | |
| 1760 | down_read(¤t->mm->mmap_sem); |
| 1761 | |
| 1762 | ret = -EINVAL; |
| 1763 | vma = find_vma(current->mm, address); |
| 1764 | if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) |
| 1765 | goto out; |
| 1766 | zc->length = min_t(unsigned long, zc->length, vma->vm_end - address); |
| 1767 | |
| 1768 | tp = tcp_sk(sk); |
| 1769 | seq = tp->copied_seq; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1770 | inq = tcp_inq(sk); |
| 1771 | zc->length = min_t(u32, zc->length, inq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1772 | zc->length &= ~(PAGE_SIZE - 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1773 | if (zc->length) { |
| 1774 | zap_page_range(vma, address, zc->length); |
| 1775 | zc->recv_skip_hint = 0; |
| 1776 | } else { |
| 1777 | zc->recv_skip_hint = inq; |
| 1778 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1779 | ret = 0; |
| 1780 | while (length + PAGE_SIZE <= zc->length) { |
| 1781 | if (zc->recv_skip_hint < PAGE_SIZE) { |
| 1782 | if (skb) { |
| 1783 | skb = skb->next; |
| 1784 | offset = seq - TCP_SKB_CB(skb)->seq; |
| 1785 | } else { |
| 1786 | skb = tcp_recv_skb(sk, seq, &offset); |
| 1787 | } |
| 1788 | |
| 1789 | zc->recv_skip_hint = skb->len - offset; |
| 1790 | offset -= skb_headlen(skb); |
| 1791 | if ((int)offset < 0 || skb_has_frag_list(skb)) |
| 1792 | break; |
| 1793 | frags = skb_shinfo(skb)->frags; |
| 1794 | while (offset) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1795 | if (skb_frag_size(frags) > offset) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1796 | goto out; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1797 | offset -= skb_frag_size(frags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1798 | frags++; |
| 1799 | } |
| 1800 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1801 | if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) { |
| 1802 | int remaining = zc->recv_skip_hint; |
| 1803 | |
| 1804 | while (remaining && (skb_frag_size(frags) != PAGE_SIZE || |
| 1805 | skb_frag_off(frags))) { |
| 1806 | remaining -= skb_frag_size(frags); |
| 1807 | frags++; |
| 1808 | } |
| 1809 | zc->recv_skip_hint -= remaining; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1810 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1811 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1812 | ret = vm_insert_page(vma, address + length, |
| 1813 | skb_frag_page(frags)); |
| 1814 | if (ret) |
| 1815 | break; |
| 1816 | length += PAGE_SIZE; |
| 1817 | seq += PAGE_SIZE; |
| 1818 | zc->recv_skip_hint -= PAGE_SIZE; |
| 1819 | frags++; |
| 1820 | } |
| 1821 | out: |
| 1822 | up_read(¤t->mm->mmap_sem); |
| 1823 | if (length) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1824 | WRITE_ONCE(tp->copied_seq, seq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1825 | tcp_rcv_space_adjust(sk); |
| 1826 | |
| 1827 | /* Clean up data we have read: This will do ACK frames. */ |
| 1828 | tcp_recv_skb(sk, seq, &offset); |
| 1829 | tcp_cleanup_rbuf(sk, length); |
| 1830 | ret = 0; |
| 1831 | if (length == zc->length) |
| 1832 | zc->recv_skip_hint = 0; |
| 1833 | } else { |
| 1834 | if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) |
| 1835 | ret = -EIO; |
| 1836 | } |
| 1837 | zc->length = length; |
| 1838 | return ret; |
| 1839 | } |
| 1840 | #endif |
| 1841 | |
| 1842 | static void tcp_update_recv_tstamps(struct sk_buff *skb, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1843 | struct scm_timestamping_internal *tss) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1844 | { |
| 1845 | if (skb->tstamp) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1846 | tss->ts[0] = ktime_to_timespec64(skb->tstamp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1847 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1848 | tss->ts[0] = (struct timespec64) {0}; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1849 | |
| 1850 | if (skb_hwtstamps(skb)->hwtstamp) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1851 | tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1852 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1853 | tss->ts[2] = (struct timespec64) {0}; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1854 | } |
| 1855 | |
| 1856 | /* Similar to __sock_recv_timestamp, but does not require an skb */ |
| 1857 | static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1858 | struct scm_timestamping_internal *tss) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1859 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1860 | int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1861 | bool has_timestamping = false; |
| 1862 | |
| 1863 | if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { |
| 1864 | if (sock_flag(sk, SOCK_RCVTSTAMP)) { |
| 1865 | if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1866 | if (new_tstamp) { |
| 1867 | struct __kernel_timespec kts = {tss->ts[0].tv_sec, tss->ts[0].tv_nsec}; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1868 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1869 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, |
| 1870 | sizeof(kts), &kts); |
| 1871 | } else { |
| 1872 | struct timespec ts_old = timespec64_to_timespec(tss->ts[0]); |
| 1873 | |
| 1874 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, |
| 1875 | sizeof(ts_old), &ts_old); |
| 1876 | } |
| 1877 | } else { |
| 1878 | if (new_tstamp) { |
| 1879 | struct __kernel_sock_timeval stv; |
| 1880 | |
| 1881 | stv.tv_sec = tss->ts[0].tv_sec; |
| 1882 | stv.tv_usec = tss->ts[0].tv_nsec / 1000; |
| 1883 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, |
| 1884 | sizeof(stv), &stv); |
| 1885 | } else { |
| 1886 | struct __kernel_old_timeval tv; |
| 1887 | |
| 1888 | tv.tv_sec = tss->ts[0].tv_sec; |
| 1889 | tv.tv_usec = tss->ts[0].tv_nsec / 1000; |
| 1890 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, |
| 1891 | sizeof(tv), &tv); |
| 1892 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1893 | } |
| 1894 | } |
| 1895 | |
| 1896 | if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) |
| 1897 | has_timestamping = true; |
| 1898 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1899 | tss->ts[0] = (struct timespec64) {0}; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1900 | } |
| 1901 | |
| 1902 | if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { |
| 1903 | if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) |
| 1904 | has_timestamping = true; |
| 1905 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1906 | tss->ts[2] = (struct timespec64) {0}; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1907 | } |
| 1908 | |
| 1909 | if (has_timestamping) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1910 | tss->ts[1] = (struct timespec64) {0}; |
| 1911 | if (sock_flag(sk, SOCK_TSTAMP_NEW)) |
| 1912 | put_cmsg_scm_timestamping64(msg, tss); |
| 1913 | else |
| 1914 | put_cmsg_scm_timestamping(msg, tss); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1915 | } |
| 1916 | } |
| 1917 | |
| 1918 | static int tcp_inq_hint(struct sock *sk) |
| 1919 | { |
| 1920 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1921 | u32 copied_seq = READ_ONCE(tp->copied_seq); |
| 1922 | u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); |
| 1923 | int inq; |
| 1924 | |
| 1925 | inq = rcv_nxt - copied_seq; |
| 1926 | if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { |
| 1927 | lock_sock(sk); |
| 1928 | inq = tp->rcv_nxt - tp->copied_seq; |
| 1929 | release_sock(sk); |
| 1930 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1931 | /* After receiving a FIN, tell the user-space to continue reading |
| 1932 | * by returning a non-zero inq. |
| 1933 | */ |
| 1934 | if (inq == 0 && sock_flag(sk, SOCK_DONE)) |
| 1935 | inq = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1936 | return inq; |
| 1937 | } |
| 1938 | |
| 1939 | /* |
| 1940 | * This routine copies from a sock struct into the user buffer. |
| 1941 | * |
| 1942 | * Technical note: in 2.3 we work on _locked_ socket, so that |
| 1943 | * tricks with *seq access order and skb->users are not required. |
| 1944 | * Probably, code can be easily improved even more. |
| 1945 | */ |
| 1946 | |
| 1947 | int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, |
| 1948 | int flags, int *addr_len) |
| 1949 | { |
| 1950 | struct tcp_sock *tp = tcp_sk(sk); |
| 1951 | int copied = 0; |
| 1952 | u32 peek_seq; |
| 1953 | u32 *seq; |
| 1954 | unsigned long used; |
| 1955 | int err, inq; |
| 1956 | int target; /* Read at least this many bytes */ |
| 1957 | long timeo; |
| 1958 | struct sk_buff *skb, *last; |
| 1959 | u32 urg_hole = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1960 | struct scm_timestamping_internal tss; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1961 | bool has_tss = false; |
| 1962 | bool has_cmsg; |
| 1963 | |
| 1964 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 1965 | return inet_recv_error(sk, msg, len, addr_len); |
| 1966 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1967 | if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1968 | (sk->sk_state == TCP_ESTABLISHED)) |
| 1969 | sk_busy_loop(sk, nonblock); |
| 1970 | |
| 1971 | lock_sock(sk); |
| 1972 | |
| 1973 | err = -ENOTCONN; |
| 1974 | if (sk->sk_state == TCP_LISTEN) |
| 1975 | goto out; |
| 1976 | |
| 1977 | has_cmsg = tp->recvmsg_inq; |
| 1978 | timeo = sock_rcvtimeo(sk, nonblock); |
| 1979 | |
| 1980 | /* Urgent data needs to be handled specially. */ |
| 1981 | if (flags & MSG_OOB) |
| 1982 | goto recv_urg; |
| 1983 | |
| 1984 | if (unlikely(tp->repair)) { |
| 1985 | err = -EPERM; |
| 1986 | if (!(flags & MSG_PEEK)) |
| 1987 | goto out; |
| 1988 | |
| 1989 | if (tp->repair_queue == TCP_SEND_QUEUE) |
| 1990 | goto recv_sndq; |
| 1991 | |
| 1992 | err = -EINVAL; |
| 1993 | if (tp->repair_queue == TCP_NO_QUEUE) |
| 1994 | goto out; |
| 1995 | |
| 1996 | /* 'common' recv queue MSG_PEEK-ing */ |
| 1997 | } |
| 1998 | |
| 1999 | seq = &tp->copied_seq; |
| 2000 | if (flags & MSG_PEEK) { |
| 2001 | peek_seq = tp->copied_seq; |
| 2002 | seq = &peek_seq; |
| 2003 | } |
| 2004 | |
| 2005 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); |
| 2006 | |
| 2007 | do { |
| 2008 | u32 offset; |
| 2009 | |
| 2010 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ |
| 2011 | if (tp->urg_data && tp->urg_seq == *seq) { |
| 2012 | if (copied) |
| 2013 | break; |
| 2014 | if (signal_pending(current)) { |
| 2015 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; |
| 2016 | break; |
| 2017 | } |
| 2018 | } |
| 2019 | |
| 2020 | /* Next get a buffer. */ |
| 2021 | |
| 2022 | last = skb_peek_tail(&sk->sk_receive_queue); |
| 2023 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
| 2024 | last = skb; |
| 2025 | /* Now that we have two receive queues this |
| 2026 | * shouldn't happen. |
| 2027 | */ |
| 2028 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), |
| 2029 | "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", |
| 2030 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, |
| 2031 | flags)) |
| 2032 | break; |
| 2033 | |
| 2034 | offset = *seq - TCP_SKB_CB(skb)->seq; |
| 2035 | if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
| 2036 | pr_err_once("%s: found a SYN, please report !\n", __func__); |
| 2037 | offset--; |
| 2038 | } |
| 2039 | if (offset < skb->len) |
| 2040 | goto found_ok_skb; |
| 2041 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 2042 | goto found_fin_ok; |
| 2043 | WARN(!(flags & MSG_PEEK), |
| 2044 | "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", |
| 2045 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); |
| 2046 | } |
| 2047 | |
| 2048 | /* Well, if we have backlog, try to process it now yet. */ |
| 2049 | |
| 2050 | if (copied >= target && !sk->sk_backlog.tail) |
| 2051 | break; |
| 2052 | |
| 2053 | if (copied) { |
| 2054 | if (sk->sk_err || |
| 2055 | sk->sk_state == TCP_CLOSE || |
| 2056 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
| 2057 | !timeo || |
| 2058 | signal_pending(current)) |
| 2059 | break; |
| 2060 | } else { |
| 2061 | if (sock_flag(sk, SOCK_DONE)) |
| 2062 | break; |
| 2063 | |
| 2064 | if (sk->sk_err) { |
| 2065 | copied = sock_error(sk); |
| 2066 | break; |
| 2067 | } |
| 2068 | |
| 2069 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 2070 | break; |
| 2071 | |
| 2072 | if (sk->sk_state == TCP_CLOSE) { |
| 2073 | /* This occurs when user tries to read |
| 2074 | * from never connected socket. |
| 2075 | */ |
| 2076 | copied = -ENOTCONN; |
| 2077 | break; |
| 2078 | } |
| 2079 | |
| 2080 | if (!timeo) { |
| 2081 | copied = -EAGAIN; |
| 2082 | break; |
| 2083 | } |
| 2084 | |
| 2085 | if (signal_pending(current)) { |
| 2086 | copied = sock_intr_errno(timeo); |
| 2087 | break; |
| 2088 | } |
| 2089 | } |
| 2090 | |
| 2091 | tcp_cleanup_rbuf(sk, copied); |
| 2092 | |
| 2093 | if (copied >= target) { |
| 2094 | /* Do not sleep, just process backlog. */ |
| 2095 | release_sock(sk); |
| 2096 | lock_sock(sk); |
| 2097 | } else { |
| 2098 | sk_wait_data(sk, &timeo, last); |
| 2099 | } |
| 2100 | |
| 2101 | if ((flags & MSG_PEEK) && |
| 2102 | (peek_seq - copied - urg_hole != tp->copied_seq)) { |
| 2103 | net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", |
| 2104 | current->comm, |
| 2105 | task_pid_nr(current)); |
| 2106 | peek_seq = tp->copied_seq; |
| 2107 | } |
| 2108 | continue; |
| 2109 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2110 | found_ok_skb: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2111 | /* Ok so how much can we use? */ |
| 2112 | used = skb->len - offset; |
| 2113 | if (len < used) |
| 2114 | used = len; |
| 2115 | |
| 2116 | /* Do we have urgent data here? */ |
| 2117 | if (tp->urg_data) { |
| 2118 | u32 urg_offset = tp->urg_seq - *seq; |
| 2119 | if (urg_offset < used) { |
| 2120 | if (!urg_offset) { |
| 2121 | if (!sock_flag(sk, SOCK_URGINLINE)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2122 | WRITE_ONCE(*seq, *seq + 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2123 | urg_hole++; |
| 2124 | offset++; |
| 2125 | used--; |
| 2126 | if (!used) |
| 2127 | goto skip_copy; |
| 2128 | } |
| 2129 | } else |
| 2130 | used = urg_offset; |
| 2131 | } |
| 2132 | } |
| 2133 | |
| 2134 | if (!(flags & MSG_TRUNC)) { |
| 2135 | err = skb_copy_datagram_msg(skb, offset, msg, used); |
| 2136 | if (err) { |
| 2137 | /* Exception. Bailout! */ |
| 2138 | if (!copied) |
| 2139 | copied = -EFAULT; |
| 2140 | break; |
| 2141 | } |
| 2142 | } |
| 2143 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2144 | WRITE_ONCE(*seq, *seq + used); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2145 | copied += used; |
| 2146 | len -= used; |
| 2147 | |
| 2148 | tcp_rcv_space_adjust(sk); |
| 2149 | |
| 2150 | skip_copy: |
| 2151 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { |
| 2152 | tp->urg_data = 0; |
| 2153 | tcp_fast_path_check(sk); |
| 2154 | } |
| 2155 | if (used + offset < skb->len) |
| 2156 | continue; |
| 2157 | |
| 2158 | if (TCP_SKB_CB(skb)->has_rxtstamp) { |
| 2159 | tcp_update_recv_tstamps(skb, &tss); |
| 2160 | has_tss = true; |
| 2161 | has_cmsg = true; |
| 2162 | } |
| 2163 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 2164 | goto found_fin_ok; |
| 2165 | if (!(flags & MSG_PEEK)) |
| 2166 | sk_eat_skb(sk, skb); |
| 2167 | continue; |
| 2168 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2169 | found_fin_ok: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2170 | /* Process the FIN. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2171 | WRITE_ONCE(*seq, *seq + 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2172 | if (!(flags & MSG_PEEK)) |
| 2173 | sk_eat_skb(sk, skb); |
| 2174 | break; |
| 2175 | } while (len > 0); |
| 2176 | |
| 2177 | /* According to UNIX98, msg_name/msg_namelen are ignored |
| 2178 | * on connected socket. I was just happy when found this 8) --ANK |
| 2179 | */ |
| 2180 | |
| 2181 | /* Clean up data we have read: This will do ACK frames. */ |
| 2182 | tcp_cleanup_rbuf(sk, copied); |
| 2183 | |
| 2184 | release_sock(sk); |
| 2185 | |
| 2186 | if (has_cmsg) { |
| 2187 | if (has_tss) |
| 2188 | tcp_recv_timestamp(msg, sk, &tss); |
| 2189 | if (tp->recvmsg_inq) { |
| 2190 | inq = tcp_inq_hint(sk); |
| 2191 | put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); |
| 2192 | } |
| 2193 | } |
| 2194 | |
| 2195 | return copied; |
| 2196 | |
| 2197 | out: |
| 2198 | release_sock(sk); |
| 2199 | return err; |
| 2200 | |
| 2201 | recv_urg: |
| 2202 | err = tcp_recv_urg(sk, msg, len, flags); |
| 2203 | goto out; |
| 2204 | |
| 2205 | recv_sndq: |
| 2206 | err = tcp_peek_sndq(sk, msg, len); |
| 2207 | goto out; |
| 2208 | } |
| 2209 | EXPORT_SYMBOL(tcp_recvmsg); |
| 2210 | |
| 2211 | void tcp_set_state(struct sock *sk, int state) |
| 2212 | { |
| 2213 | int oldstate = sk->sk_state; |
| 2214 | |
| 2215 | /* We defined a new enum for TCP states that are exported in BPF |
| 2216 | * so as not force the internal TCP states to be frozen. The |
| 2217 | * following checks will detect if an internal state value ever |
| 2218 | * differs from the BPF value. If this ever happens, then we will |
| 2219 | * need to remap the internal value to the BPF value before calling |
| 2220 | * tcp_call_bpf_2arg. |
| 2221 | */ |
| 2222 | BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); |
| 2223 | BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); |
| 2224 | BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); |
| 2225 | BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); |
| 2226 | BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); |
| 2227 | BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); |
| 2228 | BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); |
| 2229 | BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); |
| 2230 | BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); |
| 2231 | BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); |
| 2232 | BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); |
| 2233 | BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); |
| 2234 | BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); |
| 2235 | |
| 2236 | if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) |
| 2237 | tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); |
| 2238 | |
| 2239 | switch (state) { |
| 2240 | case TCP_ESTABLISHED: |
| 2241 | if (oldstate != TCP_ESTABLISHED) |
| 2242 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
| 2243 | break; |
| 2244 | |
| 2245 | case TCP_CLOSE: |
| 2246 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) |
| 2247 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
| 2248 | |
| 2249 | sk->sk_prot->unhash(sk); |
| 2250 | if (inet_csk(sk)->icsk_bind_hash && |
| 2251 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
| 2252 | inet_put_port(sk); |
| 2253 | /* fall through */ |
| 2254 | default: |
| 2255 | if (oldstate == TCP_ESTABLISHED) |
| 2256 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
| 2257 | } |
| 2258 | |
| 2259 | /* Change state AFTER socket is unhashed to avoid closed |
| 2260 | * socket sitting in hash tables. |
| 2261 | */ |
| 2262 | inet_sk_state_store(sk, state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2263 | } |
| 2264 | EXPORT_SYMBOL_GPL(tcp_set_state); |
| 2265 | |
| 2266 | /* |
| 2267 | * State processing on a close. This implements the state shift for |
| 2268 | * sending our FIN frame. Note that we only send a FIN for some |
| 2269 | * states. A shutdown() may have already sent the FIN, or we may be |
| 2270 | * closed. |
| 2271 | */ |
| 2272 | |
| 2273 | static const unsigned char new_state[16] = { |
| 2274 | /* current state: new state: action: */ |
| 2275 | [0 /* (Invalid) */] = TCP_CLOSE, |
| 2276 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
| 2277 | [TCP_SYN_SENT] = TCP_CLOSE, |
| 2278 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
| 2279 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, |
| 2280 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, |
| 2281 | [TCP_TIME_WAIT] = TCP_CLOSE, |
| 2282 | [TCP_CLOSE] = TCP_CLOSE, |
| 2283 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, |
| 2284 | [TCP_LAST_ACK] = TCP_LAST_ACK, |
| 2285 | [TCP_LISTEN] = TCP_CLOSE, |
| 2286 | [TCP_CLOSING] = TCP_CLOSING, |
| 2287 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ |
| 2288 | }; |
| 2289 | |
| 2290 | static int tcp_close_state(struct sock *sk) |
| 2291 | { |
| 2292 | int next = (int)new_state[sk->sk_state]; |
| 2293 | int ns = next & TCP_STATE_MASK; |
| 2294 | |
| 2295 | tcp_set_state(sk, ns); |
| 2296 | |
| 2297 | return next & TCP_ACTION_FIN; |
| 2298 | } |
| 2299 | |
| 2300 | /* |
| 2301 | * Shutdown the sending side of a connection. Much like close except |
| 2302 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). |
| 2303 | */ |
| 2304 | |
| 2305 | void tcp_shutdown(struct sock *sk, int how) |
| 2306 | { |
| 2307 | /* We need to grab some memory, and put together a FIN, |
| 2308 | * and then put it into the queue to be sent. |
| 2309 | * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. |
| 2310 | */ |
| 2311 | if (!(how & SEND_SHUTDOWN)) |
| 2312 | return; |
| 2313 | |
| 2314 | /* If we've already sent a FIN, or it's a closed state, skip this. */ |
| 2315 | if ((1 << sk->sk_state) & |
| 2316 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | |
| 2317 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { |
| 2318 | /* Clear out any half completed packets. FIN if needed. */ |
| 2319 | if (tcp_close_state(sk)) |
| 2320 | tcp_send_fin(sk); |
| 2321 | } |
| 2322 | } |
| 2323 | EXPORT_SYMBOL(tcp_shutdown); |
| 2324 | |
| 2325 | bool tcp_check_oom(struct sock *sk, int shift) |
| 2326 | { |
| 2327 | bool too_many_orphans, out_of_socket_memory; |
| 2328 | |
| 2329 | too_many_orphans = tcp_too_many_orphans(sk, shift); |
| 2330 | out_of_socket_memory = tcp_out_of_memory(sk); |
| 2331 | |
| 2332 | if (too_many_orphans) |
| 2333 | net_info_ratelimited("too many orphaned sockets\n"); |
| 2334 | if (out_of_socket_memory) |
| 2335 | net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); |
| 2336 | return too_many_orphans || out_of_socket_memory; |
| 2337 | } |
| 2338 | |
| 2339 | void tcp_close(struct sock *sk, long timeout) |
| 2340 | { |
| 2341 | struct sk_buff *skb; |
| 2342 | int data_was_unread = 0; |
| 2343 | int state; |
| 2344 | |
| 2345 | lock_sock(sk); |
| 2346 | sk->sk_shutdown = SHUTDOWN_MASK; |
| 2347 | |
| 2348 | if (sk->sk_state == TCP_LISTEN) { |
| 2349 | tcp_set_state(sk, TCP_CLOSE); |
| 2350 | |
| 2351 | /* Special case. */ |
| 2352 | inet_csk_listen_stop(sk); |
| 2353 | |
| 2354 | goto adjudge_to_death; |
| 2355 | } |
| 2356 | |
| 2357 | /* We need to flush the recv. buffs. We do this only on the |
| 2358 | * descriptor close, not protocol-sourced closes, because the |
| 2359 | * reader process may not have drained the data yet! |
| 2360 | */ |
| 2361 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { |
| 2362 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; |
| 2363 | |
| 2364 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 2365 | len--; |
| 2366 | data_was_unread += len; |
| 2367 | __kfree_skb(skb); |
| 2368 | } |
| 2369 | |
| 2370 | sk_mem_reclaim(sk); |
| 2371 | |
| 2372 | /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ |
| 2373 | if (sk->sk_state == TCP_CLOSE) |
| 2374 | goto adjudge_to_death; |
| 2375 | |
| 2376 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
| 2377 | * data was lost. To witness the awful effects of the old behavior of |
| 2378 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk |
| 2379 | * GET in an FTP client, suspend the process, wait for the client to |
| 2380 | * advertise a zero window, then kill -9 the FTP client, wheee... |
| 2381 | * Note: timeout is always zero in such a case. |
| 2382 | */ |
| 2383 | if (unlikely(tcp_sk(sk)->repair)) { |
| 2384 | sk->sk_prot->disconnect(sk, 0); |
| 2385 | } else if (data_was_unread) { |
| 2386 | /* Unread data was tossed, zap the connection. */ |
| 2387 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
| 2388 | tcp_set_state(sk, TCP_CLOSE); |
| 2389 | tcp_send_active_reset(sk, sk->sk_allocation); |
| 2390 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
| 2391 | /* Check zero linger _after_ checking for unread data. */ |
| 2392 | sk->sk_prot->disconnect(sk, 0); |
| 2393 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
| 2394 | } else if (tcp_close_state(sk)) { |
| 2395 | /* We FIN if the application ate all the data before |
| 2396 | * zapping the connection. |
| 2397 | */ |
| 2398 | |
| 2399 | /* RED-PEN. Formally speaking, we have broken TCP state |
| 2400 | * machine. State transitions: |
| 2401 | * |
| 2402 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 |
| 2403 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) |
| 2404 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK |
| 2405 | * |
| 2406 | * are legal only when FIN has been sent (i.e. in window), |
| 2407 | * rather than queued out of window. Purists blame. |
| 2408 | * |
| 2409 | * F.e. "RFC state" is ESTABLISHED, |
| 2410 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. |
| 2411 | * |
| 2412 | * The visible declinations are that sometimes |
| 2413 | * we enter time-wait state, when it is not required really |
| 2414 | * (harmless), do not send active resets, when they are |
| 2415 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when |
| 2416 | * they look as CLOSING or LAST_ACK for Linux) |
| 2417 | * Probably, I missed some more holelets. |
| 2418 | * --ANK |
| 2419 | * XXX (TFO) - To start off we don't support SYN+ACK+FIN |
| 2420 | * in a single packet! (May consider it later but will |
| 2421 | * probably need API support or TCP_CORK SYN-ACK until |
| 2422 | * data is written and socket is closed.) |
| 2423 | */ |
| 2424 | tcp_send_fin(sk); |
| 2425 | } |
| 2426 | |
| 2427 | sk_stream_wait_close(sk, timeout); |
| 2428 | |
| 2429 | adjudge_to_death: |
| 2430 | state = sk->sk_state; |
| 2431 | sock_hold(sk); |
| 2432 | sock_orphan(sk); |
| 2433 | |
| 2434 | local_bh_disable(); |
| 2435 | bh_lock_sock(sk); |
| 2436 | /* remove backlog if any, without releasing ownership. */ |
| 2437 | __release_sock(sk); |
| 2438 | |
| 2439 | percpu_counter_inc(sk->sk_prot->orphan_count); |
| 2440 | |
| 2441 | /* Have we already been destroyed by a softirq or backlog? */ |
| 2442 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
| 2443 | goto out; |
| 2444 | |
| 2445 | /* This is a (useful) BSD violating of the RFC. There is a |
| 2446 | * problem with TCP as specified in that the other end could |
| 2447 | * keep a socket open forever with no application left this end. |
| 2448 | * We use a 1 minute timeout (about the same as BSD) then kill |
| 2449 | * our end. If they send after that then tough - BUT: long enough |
| 2450 | * that we won't make the old 4*rto = almost no time - whoops |
| 2451 | * reset mistake. |
| 2452 | * |
| 2453 | * Nope, it was not mistake. It is really desired behaviour |
| 2454 | * f.e. on http servers, when such sockets are useless, but |
| 2455 | * consume significant resources. Let's do it with special |
| 2456 | * linger2 option. --ANK |
| 2457 | */ |
| 2458 | |
| 2459 | if (sk->sk_state == TCP_FIN_WAIT2) { |
| 2460 | struct tcp_sock *tp = tcp_sk(sk); |
| 2461 | if (tp->linger2 < 0) { |
| 2462 | tcp_set_state(sk, TCP_CLOSE); |
| 2463 | tcp_send_active_reset(sk, GFP_ATOMIC); |
| 2464 | __NET_INC_STATS(sock_net(sk), |
| 2465 | LINUX_MIB_TCPABORTONLINGER); |
| 2466 | } else { |
| 2467 | const int tmo = tcp_fin_time(sk); |
| 2468 | |
| 2469 | if (tmo > TCP_TIMEWAIT_LEN) { |
| 2470 | inet_csk_reset_keepalive_timer(sk, |
| 2471 | tmo - TCP_TIMEWAIT_LEN); |
| 2472 | } else { |
| 2473 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
| 2474 | goto out; |
| 2475 | } |
| 2476 | } |
| 2477 | } |
| 2478 | if (sk->sk_state != TCP_CLOSE) { |
| 2479 | sk_mem_reclaim(sk); |
| 2480 | if (tcp_check_oom(sk, 0)) { |
| 2481 | tcp_set_state(sk, TCP_CLOSE); |
| 2482 | tcp_send_active_reset(sk, GFP_ATOMIC); |
| 2483 | __NET_INC_STATS(sock_net(sk), |
| 2484 | LINUX_MIB_TCPABORTONMEMORY); |
| 2485 | } else if (!check_net(sock_net(sk))) { |
| 2486 | /* Not possible to send reset; just close */ |
| 2487 | tcp_set_state(sk, TCP_CLOSE); |
| 2488 | } |
| 2489 | } |
| 2490 | |
| 2491 | if (sk->sk_state == TCP_CLOSE) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2492 | struct request_sock *req; |
| 2493 | |
| 2494 | req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, |
| 2495 | lockdep_sock_is_held(sk)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2496 | /* We could get here with a non-NULL req if the socket is |
| 2497 | * aborted (e.g., closed with unread data) before 3WHS |
| 2498 | * finishes. |
| 2499 | */ |
| 2500 | if (req) |
| 2501 | reqsk_fastopen_remove(sk, req, false); |
| 2502 | inet_csk_destroy_sock(sk); |
| 2503 | } |
| 2504 | /* Otherwise, socket is reprieved until protocol close. */ |
| 2505 | |
| 2506 | out: |
| 2507 | bh_unlock_sock(sk); |
| 2508 | local_bh_enable(); |
| 2509 | release_sock(sk); |
| 2510 | sock_put(sk); |
| 2511 | } |
| 2512 | EXPORT_SYMBOL(tcp_close); |
| 2513 | |
| 2514 | /* These states need RST on ABORT according to RFC793 */ |
| 2515 | |
| 2516 | static inline bool tcp_need_reset(int state) |
| 2517 | { |
| 2518 | return (1 << state) & |
| 2519 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | |
| 2520 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); |
| 2521 | } |
| 2522 | |
| 2523 | static void tcp_rtx_queue_purge(struct sock *sk) |
| 2524 | { |
| 2525 | struct rb_node *p = rb_first(&sk->tcp_rtx_queue); |
| 2526 | |
| 2527 | while (p) { |
| 2528 | struct sk_buff *skb = rb_to_skb(p); |
| 2529 | |
| 2530 | p = rb_next(p); |
| 2531 | /* Since we are deleting whole queue, no need to |
| 2532 | * list_del(&skb->tcp_tsorted_anchor) |
| 2533 | */ |
| 2534 | tcp_rtx_queue_unlink(skb, sk); |
| 2535 | sk_wmem_free_skb(sk, skb); |
| 2536 | } |
| 2537 | } |
| 2538 | |
| 2539 | void tcp_write_queue_purge(struct sock *sk) |
| 2540 | { |
| 2541 | struct sk_buff *skb; |
| 2542 | |
| 2543 | tcp_chrono_stop(sk, TCP_CHRONO_BUSY); |
| 2544 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { |
| 2545 | tcp_skb_tsorted_anchor_cleanup(skb); |
| 2546 | sk_wmem_free_skb(sk, skb); |
| 2547 | } |
| 2548 | tcp_rtx_queue_purge(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2549 | skb = sk->sk_tx_skb_cache; |
| 2550 | if (skb) { |
| 2551 | __kfree_skb(skb); |
| 2552 | sk->sk_tx_skb_cache = NULL; |
| 2553 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2554 | INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); |
| 2555 | sk_mem_reclaim(sk); |
| 2556 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
| 2557 | tcp_sk(sk)->packets_out = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2558 | inet_csk(sk)->icsk_backoff = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2559 | } |
| 2560 | |
| 2561 | int tcp_disconnect(struct sock *sk, int flags) |
| 2562 | { |
| 2563 | struct inet_sock *inet = inet_sk(sk); |
| 2564 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2565 | struct tcp_sock *tp = tcp_sk(sk); |
| 2566 | int old_state = sk->sk_state; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2567 | u32 seq; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2568 | |
| 2569 | if (old_state != TCP_CLOSE) |
| 2570 | tcp_set_state(sk, TCP_CLOSE); |
| 2571 | |
| 2572 | /* ABORT function of RFC793 */ |
| 2573 | if (old_state == TCP_LISTEN) { |
| 2574 | inet_csk_listen_stop(sk); |
| 2575 | } else if (unlikely(tp->repair)) { |
| 2576 | sk->sk_err = ECONNABORTED; |
| 2577 | } else if (tcp_need_reset(old_state) || |
| 2578 | (tp->snd_nxt != tp->write_seq && |
| 2579 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { |
| 2580 | /* The last check adjusts for discrepancy of Linux wrt. RFC |
| 2581 | * states |
| 2582 | */ |
| 2583 | tcp_send_active_reset(sk, gfp_any()); |
| 2584 | sk->sk_err = ECONNRESET; |
| 2585 | } else if (old_state == TCP_SYN_SENT) |
| 2586 | sk->sk_err = ECONNRESET; |
| 2587 | |
| 2588 | tcp_clear_xmit_timers(sk); |
| 2589 | __skb_queue_purge(&sk->sk_receive_queue); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2590 | if (sk->sk_rx_skb_cache) { |
| 2591 | __kfree_skb(sk->sk_rx_skb_cache); |
| 2592 | sk->sk_rx_skb_cache = NULL; |
| 2593 | } |
| 2594 | WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2595 | tp->urg_data = 0; |
| 2596 | tcp_write_queue_purge(sk); |
| 2597 | tcp_fastopen_active_disable_ofo_check(sk); |
| 2598 | skb_rbtree_purge(&tp->out_of_order_queue); |
| 2599 | |
| 2600 | inet->inet_dport = 0; |
| 2601 | |
| 2602 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
| 2603 | inet_reset_saddr(sk); |
| 2604 | |
| 2605 | sk->sk_shutdown = 0; |
| 2606 | sock_reset_flag(sk, SOCK_DONE); |
| 2607 | tp->srtt_us = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2608 | tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2609 | tp->rcv_rtt_last_tsecr = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2610 | |
| 2611 | seq = tp->write_seq + tp->max_window + 2; |
| 2612 | if (!seq) |
| 2613 | seq = 1; |
| 2614 | WRITE_ONCE(tp->write_seq, seq); |
| 2615 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2616 | icsk->icsk_backoff = 0; |
| 2617 | tp->snd_cwnd = 2; |
| 2618 | icsk->icsk_probes_out = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2619 | icsk->icsk_rto = TCP_TIMEOUT_INIT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2620 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2621 | tp->snd_cwnd = TCP_INIT_CWND; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2622 | tp->snd_cwnd_cnt = 0; |
| 2623 | tp->window_clamp = 0; |
| 2624 | tp->delivered_ce = 0; |
| 2625 | tcp_set_ca_state(sk, TCP_CA_Open); |
| 2626 | tp->is_sack_reneg = 0; |
| 2627 | tcp_clear_retrans(tp); |
| 2628 | inet_csk_delack_init(sk); |
| 2629 | /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 |
| 2630 | * issue in __tcp_select_window() |
| 2631 | */ |
| 2632 | icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; |
| 2633 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
| 2634 | __sk_dst_reset(sk); |
| 2635 | dst_release(sk->sk_rx_dst); |
| 2636 | sk->sk_rx_dst = NULL; |
| 2637 | tcp_saved_syn_free(tp); |
| 2638 | tp->compressed_ack = 0; |
| 2639 | tp->bytes_sent = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2640 | tp->bytes_acked = 0; |
| 2641 | tp->bytes_received = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2642 | tp->bytes_retrans = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2643 | tp->duplicate_sack[0].start_seq = 0; |
| 2644 | tp->duplicate_sack[0].end_seq = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2645 | tp->dsack_dups = 0; |
| 2646 | tp->reord_seen = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2647 | tp->retrans_out = 0; |
| 2648 | tp->sacked_out = 0; |
| 2649 | tp->tlp_high_seq = 0; |
| 2650 | tp->last_oow_ack_time = 0; |
| 2651 | /* There's a bubble in the pipe until at least the first ACK. */ |
| 2652 | tp->app_limited = ~0U; |
| 2653 | tp->rack.mstamp = 0; |
| 2654 | tp->rack.advanced = 0; |
| 2655 | tp->rack.reo_wnd_steps = 1; |
| 2656 | tp->rack.last_delivered = 0; |
| 2657 | tp->rack.reo_wnd_persist = 0; |
| 2658 | tp->rack.dsack_seen = 0; |
| 2659 | tp->syn_data_acked = 0; |
| 2660 | tp->rx_opt.saw_tstamp = 0; |
| 2661 | tp->rx_opt.dsack = 0; |
| 2662 | tp->rx_opt.num_sacks = 0; |
| 2663 | tp->rcv_ooopack = 0; |
| 2664 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2665 | |
| 2666 | /* Clean up fastopen related fields */ |
| 2667 | tcp_free_fastopen_req(tp); |
| 2668 | inet->defer_connect = 0; |
| 2669 | |
| 2670 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
| 2671 | |
| 2672 | if (sk->sk_frag.page) { |
| 2673 | put_page(sk->sk_frag.page); |
| 2674 | sk->sk_frag.page = NULL; |
| 2675 | sk->sk_frag.offset = 0; |
| 2676 | } |
| 2677 | |
| 2678 | sk->sk_error_report(sk); |
| 2679 | return 0; |
| 2680 | } |
| 2681 | EXPORT_SYMBOL(tcp_disconnect); |
| 2682 | |
| 2683 | static inline bool tcp_can_repair_sock(const struct sock *sk) |
| 2684 | { |
| 2685 | return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && |
| 2686 | (sk->sk_state != TCP_LISTEN); |
| 2687 | } |
| 2688 | |
| 2689 | static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) |
| 2690 | { |
| 2691 | struct tcp_repair_window opt; |
| 2692 | |
| 2693 | if (!tp->repair) |
| 2694 | return -EPERM; |
| 2695 | |
| 2696 | if (len != sizeof(opt)) |
| 2697 | return -EINVAL; |
| 2698 | |
| 2699 | if (copy_from_user(&opt, optbuf, sizeof(opt))) |
| 2700 | return -EFAULT; |
| 2701 | |
| 2702 | if (opt.max_window < opt.snd_wnd) |
| 2703 | return -EINVAL; |
| 2704 | |
| 2705 | if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) |
| 2706 | return -EINVAL; |
| 2707 | |
| 2708 | if (after(opt.rcv_wup, tp->rcv_nxt)) |
| 2709 | return -EINVAL; |
| 2710 | |
| 2711 | tp->snd_wl1 = opt.snd_wl1; |
| 2712 | tp->snd_wnd = opt.snd_wnd; |
| 2713 | tp->max_window = opt.max_window; |
| 2714 | |
| 2715 | tp->rcv_wnd = opt.rcv_wnd; |
| 2716 | tp->rcv_wup = opt.rcv_wup; |
| 2717 | |
| 2718 | return 0; |
| 2719 | } |
| 2720 | |
| 2721 | static int tcp_repair_options_est(struct sock *sk, |
| 2722 | struct tcp_repair_opt __user *optbuf, unsigned int len) |
| 2723 | { |
| 2724 | struct tcp_sock *tp = tcp_sk(sk); |
| 2725 | struct tcp_repair_opt opt; |
| 2726 | |
| 2727 | while (len >= sizeof(opt)) { |
| 2728 | if (copy_from_user(&opt, optbuf, sizeof(opt))) |
| 2729 | return -EFAULT; |
| 2730 | |
| 2731 | optbuf++; |
| 2732 | len -= sizeof(opt); |
| 2733 | |
| 2734 | switch (opt.opt_code) { |
| 2735 | case TCPOPT_MSS: |
| 2736 | tp->rx_opt.mss_clamp = opt.opt_val; |
| 2737 | tcp_mtup_init(sk); |
| 2738 | break; |
| 2739 | case TCPOPT_WINDOW: |
| 2740 | { |
| 2741 | u16 snd_wscale = opt.opt_val & 0xFFFF; |
| 2742 | u16 rcv_wscale = opt.opt_val >> 16; |
| 2743 | |
| 2744 | if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) |
| 2745 | return -EFBIG; |
| 2746 | |
| 2747 | tp->rx_opt.snd_wscale = snd_wscale; |
| 2748 | tp->rx_opt.rcv_wscale = rcv_wscale; |
| 2749 | tp->rx_opt.wscale_ok = 1; |
| 2750 | } |
| 2751 | break; |
| 2752 | case TCPOPT_SACK_PERM: |
| 2753 | if (opt.opt_val != 0) |
| 2754 | return -EINVAL; |
| 2755 | |
| 2756 | tp->rx_opt.sack_ok |= TCP_SACK_SEEN; |
| 2757 | break; |
| 2758 | case TCPOPT_TIMESTAMP: |
| 2759 | if (opt.opt_val != 0) |
| 2760 | return -EINVAL; |
| 2761 | |
| 2762 | tp->rx_opt.tstamp_ok = 1; |
| 2763 | break; |
| 2764 | } |
| 2765 | } |
| 2766 | |
| 2767 | return 0; |
| 2768 | } |
| 2769 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2770 | DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); |
| 2771 | EXPORT_SYMBOL(tcp_tx_delay_enabled); |
| 2772 | |
| 2773 | static void tcp_enable_tx_delay(void) |
| 2774 | { |
| 2775 | if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { |
| 2776 | static int __tcp_tx_delay_enabled = 0; |
| 2777 | |
| 2778 | if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { |
| 2779 | static_branch_enable(&tcp_tx_delay_enabled); |
| 2780 | pr_info("TCP_TX_DELAY enabled\n"); |
| 2781 | } |
| 2782 | } |
| 2783 | } |
| 2784 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2785 | /* |
| 2786 | * Socket option code for TCP. |
| 2787 | */ |
| 2788 | static int do_tcp_setsockopt(struct sock *sk, int level, |
| 2789 | int optname, char __user *optval, unsigned int optlen) |
| 2790 | { |
| 2791 | struct tcp_sock *tp = tcp_sk(sk); |
| 2792 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 2793 | struct net *net = sock_net(sk); |
| 2794 | int val; |
| 2795 | int err = 0; |
| 2796 | |
| 2797 | /* These are data/string values, all the others are ints */ |
| 2798 | switch (optname) { |
| 2799 | case TCP_CONGESTION: { |
| 2800 | char name[TCP_CA_NAME_MAX]; |
| 2801 | |
| 2802 | if (optlen < 1) |
| 2803 | return -EINVAL; |
| 2804 | |
| 2805 | val = strncpy_from_user(name, optval, |
| 2806 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); |
| 2807 | if (val < 0) |
| 2808 | return -EFAULT; |
| 2809 | name[val] = 0; |
| 2810 | |
| 2811 | lock_sock(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2812 | err = tcp_set_congestion_control(sk, name, true, true, |
| 2813 | ns_capable(sock_net(sk)->user_ns, |
| 2814 | CAP_NET_ADMIN)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2815 | release_sock(sk); |
| 2816 | return err; |
| 2817 | } |
| 2818 | case TCP_ULP: { |
| 2819 | char name[TCP_ULP_NAME_MAX]; |
| 2820 | |
| 2821 | if (optlen < 1) |
| 2822 | return -EINVAL; |
| 2823 | |
| 2824 | val = strncpy_from_user(name, optval, |
| 2825 | min_t(long, TCP_ULP_NAME_MAX - 1, |
| 2826 | optlen)); |
| 2827 | if (val < 0) |
| 2828 | return -EFAULT; |
| 2829 | name[val] = 0; |
| 2830 | |
| 2831 | lock_sock(sk); |
| 2832 | err = tcp_set_ulp(sk, name); |
| 2833 | release_sock(sk); |
| 2834 | return err; |
| 2835 | } |
| 2836 | case TCP_FASTOPEN_KEY: { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2837 | __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; |
| 2838 | __u8 *backup_key = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2839 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2840 | /* Allow a backup key as well to facilitate key rotation |
| 2841 | * First key is the active one. |
| 2842 | */ |
| 2843 | if (optlen != TCP_FASTOPEN_KEY_LENGTH && |
| 2844 | optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2845 | return -EINVAL; |
| 2846 | |
| 2847 | if (copy_from_user(key, optval, optlen)) |
| 2848 | return -EFAULT; |
| 2849 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2850 | if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) |
| 2851 | backup_key = key + TCP_FASTOPEN_KEY_LENGTH; |
| 2852 | |
| 2853 | return tcp_fastopen_reset_cipher(net, sk, key, backup_key); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2854 | } |
| 2855 | default: |
| 2856 | /* fallthru */ |
| 2857 | break; |
| 2858 | } |
| 2859 | |
| 2860 | if (optlen < sizeof(int)) |
| 2861 | return -EINVAL; |
| 2862 | |
| 2863 | if (get_user(val, (int __user *)optval)) |
| 2864 | return -EFAULT; |
| 2865 | |
| 2866 | lock_sock(sk); |
| 2867 | |
| 2868 | switch (optname) { |
| 2869 | case TCP_MAXSEG: |
| 2870 | /* Values greater than interface MTU won't take effect. However |
| 2871 | * at the point when this call is done we typically don't yet |
| 2872 | * know which interface is going to be used |
| 2873 | */ |
| 2874 | if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { |
| 2875 | err = -EINVAL; |
| 2876 | break; |
| 2877 | } |
| 2878 | tp->rx_opt.user_mss = val; |
| 2879 | break; |
| 2880 | |
| 2881 | case TCP_NODELAY: |
| 2882 | if (val) { |
| 2883 | /* TCP_NODELAY is weaker than TCP_CORK, so that |
| 2884 | * this option on corked socket is remembered, but |
| 2885 | * it is not activated until cork is cleared. |
| 2886 | * |
| 2887 | * However, when TCP_NODELAY is set we make |
| 2888 | * an explicit push, which overrides even TCP_CORK |
| 2889 | * for currently queued segments. |
| 2890 | */ |
| 2891 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; |
| 2892 | tcp_push_pending_frames(sk); |
| 2893 | } else { |
| 2894 | tp->nonagle &= ~TCP_NAGLE_OFF; |
| 2895 | } |
| 2896 | break; |
| 2897 | |
| 2898 | case TCP_THIN_LINEAR_TIMEOUTS: |
| 2899 | if (val < 0 || val > 1) |
| 2900 | err = -EINVAL; |
| 2901 | else |
| 2902 | tp->thin_lto = val; |
| 2903 | break; |
| 2904 | |
| 2905 | case TCP_THIN_DUPACK: |
| 2906 | if (val < 0 || val > 1) |
| 2907 | err = -EINVAL; |
| 2908 | break; |
| 2909 | |
| 2910 | case TCP_REPAIR: |
| 2911 | if (!tcp_can_repair_sock(sk)) |
| 2912 | err = -EPERM; |
| 2913 | else if (val == TCP_REPAIR_ON) { |
| 2914 | tp->repair = 1; |
| 2915 | sk->sk_reuse = SK_FORCE_REUSE; |
| 2916 | tp->repair_queue = TCP_NO_QUEUE; |
| 2917 | } else if (val == TCP_REPAIR_OFF) { |
| 2918 | tp->repair = 0; |
| 2919 | sk->sk_reuse = SK_NO_REUSE; |
| 2920 | tcp_send_window_probe(sk); |
| 2921 | } else if (val == TCP_REPAIR_OFF_NO_WP) { |
| 2922 | tp->repair = 0; |
| 2923 | sk->sk_reuse = SK_NO_REUSE; |
| 2924 | } else |
| 2925 | err = -EINVAL; |
| 2926 | |
| 2927 | break; |
| 2928 | |
| 2929 | case TCP_REPAIR_QUEUE: |
| 2930 | if (!tp->repair) |
| 2931 | err = -EPERM; |
| 2932 | else if ((unsigned int)val < TCP_QUEUES_NR) |
| 2933 | tp->repair_queue = val; |
| 2934 | else |
| 2935 | err = -EINVAL; |
| 2936 | break; |
| 2937 | |
| 2938 | case TCP_QUEUE_SEQ: |
| 2939 | if (sk->sk_state != TCP_CLOSE) |
| 2940 | err = -EPERM; |
| 2941 | else if (tp->repair_queue == TCP_SEND_QUEUE) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2942 | WRITE_ONCE(tp->write_seq, val); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2943 | else if (tp->repair_queue == TCP_RECV_QUEUE) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 2944 | WRITE_ONCE(tp->rcv_nxt, val); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2945 | else |
| 2946 | err = -EINVAL; |
| 2947 | break; |
| 2948 | |
| 2949 | case TCP_REPAIR_OPTIONS: |
| 2950 | if (!tp->repair) |
| 2951 | err = -EINVAL; |
| 2952 | else if (sk->sk_state == TCP_ESTABLISHED) |
| 2953 | err = tcp_repair_options_est(sk, |
| 2954 | (struct tcp_repair_opt __user *)optval, |
| 2955 | optlen); |
| 2956 | else |
| 2957 | err = -EPERM; |
| 2958 | break; |
| 2959 | |
| 2960 | case TCP_CORK: |
| 2961 | /* When set indicates to always queue non-full frames. |
| 2962 | * Later the user clears this option and we transmit |
| 2963 | * any pending partial frames in the queue. This is |
| 2964 | * meant to be used alongside sendfile() to get properly |
| 2965 | * filled frames when the user (for example) must write |
| 2966 | * out headers with a write() call first and then use |
| 2967 | * sendfile to send out the data parts. |
| 2968 | * |
| 2969 | * TCP_CORK can be set together with TCP_NODELAY and it is |
| 2970 | * stronger than TCP_NODELAY. |
| 2971 | */ |
| 2972 | if (val) { |
| 2973 | tp->nonagle |= TCP_NAGLE_CORK; |
| 2974 | } else { |
| 2975 | tp->nonagle &= ~TCP_NAGLE_CORK; |
| 2976 | if (tp->nonagle&TCP_NAGLE_OFF) |
| 2977 | tp->nonagle |= TCP_NAGLE_PUSH; |
| 2978 | tcp_push_pending_frames(sk); |
| 2979 | } |
| 2980 | break; |
| 2981 | |
| 2982 | case TCP_KEEPIDLE: |
| 2983 | if (val < 1 || val > MAX_TCP_KEEPIDLE) |
| 2984 | err = -EINVAL; |
| 2985 | else { |
| 2986 | tp->keepalive_time = val * HZ; |
| 2987 | if (sock_flag(sk, SOCK_KEEPOPEN) && |
| 2988 | !((1 << sk->sk_state) & |
| 2989 | (TCPF_CLOSE | TCPF_LISTEN))) { |
| 2990 | u32 elapsed = keepalive_time_elapsed(tp); |
| 2991 | if (tp->keepalive_time > elapsed) |
| 2992 | elapsed = tp->keepalive_time - elapsed; |
| 2993 | else |
| 2994 | elapsed = 0; |
| 2995 | inet_csk_reset_keepalive_timer(sk, elapsed); |
| 2996 | } |
| 2997 | } |
| 2998 | break; |
| 2999 | case TCP_KEEPINTVL: |
| 3000 | if (val < 1 || val > MAX_TCP_KEEPINTVL) |
| 3001 | err = -EINVAL; |
| 3002 | else |
| 3003 | tp->keepalive_intvl = val * HZ; |
| 3004 | break; |
| 3005 | case TCP_KEEPCNT: |
| 3006 | if (val < 1 || val > MAX_TCP_KEEPCNT) |
| 3007 | err = -EINVAL; |
| 3008 | else |
| 3009 | tp->keepalive_probes = val; |
| 3010 | break; |
| 3011 | case TCP_SYNCNT: |
| 3012 | if (val < 1 || val > MAX_TCP_SYNCNT) |
| 3013 | err = -EINVAL; |
| 3014 | else |
| 3015 | icsk->icsk_syn_retries = val; |
| 3016 | break; |
| 3017 | |
| 3018 | case TCP_SAVE_SYN: |
| 3019 | if (val < 0 || val > 1) |
| 3020 | err = -EINVAL; |
| 3021 | else |
| 3022 | tp->save_syn = val; |
| 3023 | break; |
| 3024 | |
| 3025 | case TCP_LINGER2: |
| 3026 | if (val < 0) |
| 3027 | tp->linger2 = -1; |
| 3028 | else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ) |
| 3029 | tp->linger2 = 0; |
| 3030 | else |
| 3031 | tp->linger2 = val * HZ; |
| 3032 | break; |
| 3033 | |
| 3034 | case TCP_DEFER_ACCEPT: |
| 3035 | /* Translate value in seconds to number of retransmits */ |
| 3036 | icsk->icsk_accept_queue.rskq_defer_accept = |
| 3037 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, |
| 3038 | TCP_RTO_MAX / HZ); |
| 3039 | break; |
| 3040 | |
| 3041 | case TCP_WINDOW_CLAMP: |
| 3042 | if (!val) { |
| 3043 | if (sk->sk_state != TCP_CLOSE) { |
| 3044 | err = -EINVAL; |
| 3045 | break; |
| 3046 | } |
| 3047 | tp->window_clamp = 0; |
| 3048 | } else |
| 3049 | tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? |
| 3050 | SOCK_MIN_RCVBUF / 2 : val; |
| 3051 | break; |
| 3052 | |
| 3053 | case TCP_QUICKACK: |
| 3054 | if (!val) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3055 | inet_csk_enter_pingpong_mode(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3056 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3057 | inet_csk_exit_pingpong_mode(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3058 | if ((1 << sk->sk_state) & |
| 3059 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && |
| 3060 | inet_csk_ack_scheduled(sk)) { |
| 3061 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
| 3062 | tcp_cleanup_rbuf(sk, 1); |
| 3063 | if (!(val & 1)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3064 | inet_csk_enter_pingpong_mode(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3065 | } |
| 3066 | } |
| 3067 | break; |
| 3068 | |
| 3069 | #ifdef CONFIG_TCP_MD5SIG |
| 3070 | case TCP_MD5SIG: |
| 3071 | case TCP_MD5SIG_EXT: |
| 3072 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
| 3073 | err = tp->af_specific->md5_parse(sk, optname, optval, optlen); |
| 3074 | else |
| 3075 | err = -EINVAL; |
| 3076 | break; |
| 3077 | #endif |
| 3078 | case TCP_USER_TIMEOUT: |
| 3079 | /* Cap the max time in ms TCP will retry or probe the window |
| 3080 | * before giving up and aborting (ETIMEDOUT) a connection. |
| 3081 | */ |
| 3082 | if (val < 0) |
| 3083 | err = -EINVAL; |
| 3084 | else |
| 3085 | icsk->icsk_user_timeout = val; |
| 3086 | break; |
| 3087 | |
| 3088 | case TCP_FASTOPEN: |
| 3089 | if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | |
| 3090 | TCPF_LISTEN))) { |
| 3091 | tcp_fastopen_init_key_once(net); |
| 3092 | |
| 3093 | fastopen_queue_tune(sk, val); |
| 3094 | } else { |
| 3095 | err = -EINVAL; |
| 3096 | } |
| 3097 | break; |
| 3098 | case TCP_FASTOPEN_CONNECT: |
| 3099 | if (val > 1 || val < 0) { |
| 3100 | err = -EINVAL; |
| 3101 | } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { |
| 3102 | if (sk->sk_state == TCP_CLOSE) |
| 3103 | tp->fastopen_connect = val; |
| 3104 | else |
| 3105 | err = -EINVAL; |
| 3106 | } else { |
| 3107 | err = -EOPNOTSUPP; |
| 3108 | } |
| 3109 | break; |
| 3110 | case TCP_FASTOPEN_NO_COOKIE: |
| 3111 | if (val > 1 || val < 0) |
| 3112 | err = -EINVAL; |
| 3113 | else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
| 3114 | err = -EINVAL; |
| 3115 | else |
| 3116 | tp->fastopen_no_cookie = val; |
| 3117 | break; |
| 3118 | case TCP_TIMESTAMP: |
| 3119 | if (!tp->repair) |
| 3120 | err = -EPERM; |
| 3121 | else |
| 3122 | tp->tsoffset = val - tcp_time_stamp_raw(); |
| 3123 | break; |
| 3124 | case TCP_REPAIR_WINDOW: |
| 3125 | err = tcp_repair_set_window(tp, optval, optlen); |
| 3126 | break; |
| 3127 | case TCP_NOTSENT_LOWAT: |
| 3128 | tp->notsent_lowat = val; |
| 3129 | sk->sk_write_space(sk); |
| 3130 | break; |
| 3131 | case TCP_INQ: |
| 3132 | if (val > 1 || val < 0) |
| 3133 | err = -EINVAL; |
| 3134 | else |
| 3135 | tp->recvmsg_inq = val; |
| 3136 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3137 | case TCP_TX_DELAY: |
| 3138 | if (val) |
| 3139 | tcp_enable_tx_delay(); |
| 3140 | tp->tcp_tx_delay = val; |
| 3141 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3142 | default: |
| 3143 | err = -ENOPROTOOPT; |
| 3144 | break; |
| 3145 | } |
| 3146 | |
| 3147 | release_sock(sk); |
| 3148 | return err; |
| 3149 | } |
| 3150 | |
| 3151 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
| 3152 | unsigned int optlen) |
| 3153 | { |
| 3154 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 3155 | |
| 3156 | if (level != SOL_TCP) |
| 3157 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, |
| 3158 | optval, optlen); |
| 3159 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
| 3160 | } |
| 3161 | EXPORT_SYMBOL(tcp_setsockopt); |
| 3162 | |
| 3163 | #ifdef CONFIG_COMPAT |
| 3164 | int compat_tcp_setsockopt(struct sock *sk, int level, int optname, |
| 3165 | char __user *optval, unsigned int optlen) |
| 3166 | { |
| 3167 | if (level != SOL_TCP) |
| 3168 | return inet_csk_compat_setsockopt(sk, level, optname, |
| 3169 | optval, optlen); |
| 3170 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
| 3171 | } |
| 3172 | EXPORT_SYMBOL(compat_tcp_setsockopt); |
| 3173 | #endif |
| 3174 | |
| 3175 | static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, |
| 3176 | struct tcp_info *info) |
| 3177 | { |
| 3178 | u64 stats[__TCP_CHRONO_MAX], total = 0; |
| 3179 | enum tcp_chrono i; |
| 3180 | |
| 3181 | for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { |
| 3182 | stats[i] = tp->chrono_stat[i - 1]; |
| 3183 | if (i == tp->chrono_type) |
| 3184 | stats[i] += tcp_jiffies32 - tp->chrono_start; |
| 3185 | stats[i] *= USEC_PER_SEC / HZ; |
| 3186 | total += stats[i]; |
| 3187 | } |
| 3188 | |
| 3189 | info->tcpi_busy_time = total; |
| 3190 | info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; |
| 3191 | info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; |
| 3192 | } |
| 3193 | |
| 3194 | /* Return information about state of tcp endpoint in API format. */ |
| 3195 | void tcp_get_info(struct sock *sk, struct tcp_info *info) |
| 3196 | { |
| 3197 | const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ |
| 3198 | const struct inet_connection_sock *icsk = inet_csk(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3199 | unsigned long rate; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3200 | u32 now; |
| 3201 | u64 rate64; |
| 3202 | bool slow; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3203 | |
| 3204 | memset(info, 0, sizeof(*info)); |
| 3205 | if (sk->sk_type != SOCK_STREAM) |
| 3206 | return; |
| 3207 | |
| 3208 | info->tcpi_state = inet_sk_state_load(sk); |
| 3209 | |
| 3210 | /* Report meaningful fields for all TCP states, including listeners */ |
| 3211 | rate = READ_ONCE(sk->sk_pacing_rate); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3212 | rate64 = (rate != ~0UL) ? rate : ~0ULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3213 | info->tcpi_pacing_rate = rate64; |
| 3214 | |
| 3215 | rate = READ_ONCE(sk->sk_max_pacing_rate); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3216 | rate64 = (rate != ~0UL) ? rate : ~0ULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3217 | info->tcpi_max_pacing_rate = rate64; |
| 3218 | |
| 3219 | info->tcpi_reordering = tp->reordering; |
| 3220 | info->tcpi_snd_cwnd = tp->snd_cwnd; |
| 3221 | |
| 3222 | if (info->tcpi_state == TCP_LISTEN) { |
| 3223 | /* listeners aliased fields : |
| 3224 | * tcpi_unacked -> Number of children ready for accept() |
| 3225 | * tcpi_sacked -> max backlog |
| 3226 | */ |
| 3227 | info->tcpi_unacked = sk->sk_ack_backlog; |
| 3228 | info->tcpi_sacked = sk->sk_max_ack_backlog; |
| 3229 | return; |
| 3230 | } |
| 3231 | |
| 3232 | slow = lock_sock_fast(sk); |
| 3233 | |
| 3234 | info->tcpi_ca_state = icsk->icsk_ca_state; |
| 3235 | info->tcpi_retransmits = icsk->icsk_retransmits; |
| 3236 | info->tcpi_probes = icsk->icsk_probes_out; |
| 3237 | info->tcpi_backoff = icsk->icsk_backoff; |
| 3238 | |
| 3239 | if (tp->rx_opt.tstamp_ok) |
| 3240 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
| 3241 | if (tcp_is_sack(tp)) |
| 3242 | info->tcpi_options |= TCPI_OPT_SACK; |
| 3243 | if (tp->rx_opt.wscale_ok) { |
| 3244 | info->tcpi_options |= TCPI_OPT_WSCALE; |
| 3245 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; |
| 3246 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; |
| 3247 | } |
| 3248 | |
| 3249 | if (tp->ecn_flags & TCP_ECN_OK) |
| 3250 | info->tcpi_options |= TCPI_OPT_ECN; |
| 3251 | if (tp->ecn_flags & TCP_ECN_SEEN) |
| 3252 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; |
| 3253 | if (tp->syn_data_acked) |
| 3254 | info->tcpi_options |= TCPI_OPT_SYN_DATA; |
| 3255 | |
| 3256 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
| 3257 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); |
| 3258 | info->tcpi_snd_mss = tp->mss_cache; |
| 3259 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
| 3260 | |
| 3261 | info->tcpi_unacked = tp->packets_out; |
| 3262 | info->tcpi_sacked = tp->sacked_out; |
| 3263 | |
| 3264 | info->tcpi_lost = tp->lost_out; |
| 3265 | info->tcpi_retrans = tp->retrans_out; |
| 3266 | |
| 3267 | now = tcp_jiffies32; |
| 3268 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); |
| 3269 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
| 3270 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
| 3271 | |
| 3272 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; |
| 3273 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; |
| 3274 | info->tcpi_rtt = tp->srtt_us >> 3; |
| 3275 | info->tcpi_rttvar = tp->mdev_us >> 2; |
| 3276 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; |
| 3277 | info->tcpi_advmss = tp->advmss; |
| 3278 | |
| 3279 | info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; |
| 3280 | info->tcpi_rcv_space = tp->rcvq_space.space; |
| 3281 | |
| 3282 | info->tcpi_total_retrans = tp->total_retrans; |
| 3283 | |
| 3284 | info->tcpi_bytes_acked = tp->bytes_acked; |
| 3285 | info->tcpi_bytes_received = tp->bytes_received; |
| 3286 | info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); |
| 3287 | tcp_get_info_chrono_stats(tp, info); |
| 3288 | |
| 3289 | info->tcpi_segs_out = tp->segs_out; |
| 3290 | info->tcpi_segs_in = tp->segs_in; |
| 3291 | |
| 3292 | info->tcpi_min_rtt = tcp_min_rtt(tp); |
| 3293 | info->tcpi_data_segs_in = tp->data_segs_in; |
| 3294 | info->tcpi_data_segs_out = tp->data_segs_out; |
| 3295 | |
| 3296 | info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; |
| 3297 | rate64 = tcp_compute_delivery_rate(tp); |
| 3298 | if (rate64) |
| 3299 | info->tcpi_delivery_rate = rate64; |
| 3300 | info->tcpi_delivered = tp->delivered; |
| 3301 | info->tcpi_delivered_ce = tp->delivered_ce; |
| 3302 | info->tcpi_bytes_sent = tp->bytes_sent; |
| 3303 | info->tcpi_bytes_retrans = tp->bytes_retrans; |
| 3304 | info->tcpi_dsack_dups = tp->dsack_dups; |
| 3305 | info->tcpi_reord_seen = tp->reord_seen; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3306 | info->tcpi_rcv_ooopack = tp->rcv_ooopack; |
| 3307 | info->tcpi_snd_wnd = tp->snd_wnd; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3308 | unlock_sock_fast(sk, slow); |
| 3309 | } |
| 3310 | EXPORT_SYMBOL_GPL(tcp_get_info); |
| 3311 | |
| 3312 | static size_t tcp_opt_stats_get_size(void) |
| 3313 | { |
| 3314 | return |
| 3315 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ |
| 3316 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ |
| 3317 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ |
| 3318 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ |
| 3319 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ |
| 3320 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ |
| 3321 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ |
| 3322 | nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ |
| 3323 | nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ |
| 3324 | nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ |
| 3325 | nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ |
| 3326 | nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ |
| 3327 | nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ |
| 3328 | nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ |
| 3329 | nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ |
| 3330 | nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ |
| 3331 | nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ |
| 3332 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ |
| 3333 | nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ |
| 3334 | nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ |
| 3335 | nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3336 | nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3337 | 0; |
| 3338 | } |
| 3339 | |
| 3340 | struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) |
| 3341 | { |
| 3342 | const struct tcp_sock *tp = tcp_sk(sk); |
| 3343 | struct sk_buff *stats; |
| 3344 | struct tcp_info info; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3345 | unsigned long rate; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3346 | u64 rate64; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3347 | |
| 3348 | stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); |
| 3349 | if (!stats) |
| 3350 | return NULL; |
| 3351 | |
| 3352 | tcp_get_info_chrono_stats(tp, &info); |
| 3353 | nla_put_u64_64bit(stats, TCP_NLA_BUSY, |
| 3354 | info.tcpi_busy_time, TCP_NLA_PAD); |
| 3355 | nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, |
| 3356 | info.tcpi_rwnd_limited, TCP_NLA_PAD); |
| 3357 | nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, |
| 3358 | info.tcpi_sndbuf_limited, TCP_NLA_PAD); |
| 3359 | nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, |
| 3360 | tp->data_segs_out, TCP_NLA_PAD); |
| 3361 | nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, |
| 3362 | tp->total_retrans, TCP_NLA_PAD); |
| 3363 | |
| 3364 | rate = READ_ONCE(sk->sk_pacing_rate); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3365 | rate64 = (rate != ~0UL) ? rate : ~0ULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3366 | nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); |
| 3367 | |
| 3368 | rate64 = tcp_compute_delivery_rate(tp); |
| 3369 | nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); |
| 3370 | |
| 3371 | nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd); |
| 3372 | nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); |
| 3373 | nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); |
| 3374 | |
| 3375 | nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); |
| 3376 | nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); |
| 3377 | nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); |
| 3378 | nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); |
| 3379 | nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); |
| 3380 | |
| 3381 | nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); |
| 3382 | nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); |
| 3383 | |
| 3384 | nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, |
| 3385 | TCP_NLA_PAD); |
| 3386 | nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, |
| 3387 | TCP_NLA_PAD); |
| 3388 | nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); |
| 3389 | nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3390 | nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3391 | |
| 3392 | return stats; |
| 3393 | } |
| 3394 | |
| 3395 | static int do_tcp_getsockopt(struct sock *sk, int level, |
| 3396 | int optname, char __user *optval, int __user *optlen) |
| 3397 | { |
| 3398 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 3399 | struct tcp_sock *tp = tcp_sk(sk); |
| 3400 | struct net *net = sock_net(sk); |
| 3401 | int val, len; |
| 3402 | |
| 3403 | if (get_user(len, optlen)) |
| 3404 | return -EFAULT; |
| 3405 | |
| 3406 | len = min_t(unsigned int, len, sizeof(int)); |
| 3407 | |
| 3408 | if (len < 0) |
| 3409 | return -EINVAL; |
| 3410 | |
| 3411 | switch (optname) { |
| 3412 | case TCP_MAXSEG: |
| 3413 | val = tp->mss_cache; |
| 3414 | if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
| 3415 | val = tp->rx_opt.user_mss; |
| 3416 | if (tp->repair) |
| 3417 | val = tp->rx_opt.mss_clamp; |
| 3418 | break; |
| 3419 | case TCP_NODELAY: |
| 3420 | val = !!(tp->nonagle&TCP_NAGLE_OFF); |
| 3421 | break; |
| 3422 | case TCP_CORK: |
| 3423 | val = !!(tp->nonagle&TCP_NAGLE_CORK); |
| 3424 | break; |
| 3425 | case TCP_KEEPIDLE: |
| 3426 | val = keepalive_time_when(tp) / HZ; |
| 3427 | break; |
| 3428 | case TCP_KEEPINTVL: |
| 3429 | val = keepalive_intvl_when(tp) / HZ; |
| 3430 | break; |
| 3431 | case TCP_KEEPCNT: |
| 3432 | val = keepalive_probes(tp); |
| 3433 | break; |
| 3434 | case TCP_SYNCNT: |
| 3435 | val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
| 3436 | break; |
| 3437 | case TCP_LINGER2: |
| 3438 | val = tp->linger2; |
| 3439 | if (val >= 0) |
| 3440 | val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; |
| 3441 | break; |
| 3442 | case TCP_DEFER_ACCEPT: |
| 3443 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, |
| 3444 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); |
| 3445 | break; |
| 3446 | case TCP_WINDOW_CLAMP: |
| 3447 | val = tp->window_clamp; |
| 3448 | break; |
| 3449 | case TCP_INFO: { |
| 3450 | struct tcp_info info; |
| 3451 | |
| 3452 | if (get_user(len, optlen)) |
| 3453 | return -EFAULT; |
| 3454 | |
| 3455 | tcp_get_info(sk, &info); |
| 3456 | |
| 3457 | len = min_t(unsigned int, len, sizeof(info)); |
| 3458 | if (put_user(len, optlen)) |
| 3459 | return -EFAULT; |
| 3460 | if (copy_to_user(optval, &info, len)) |
| 3461 | return -EFAULT; |
| 3462 | return 0; |
| 3463 | } |
| 3464 | case TCP_CC_INFO: { |
| 3465 | const struct tcp_congestion_ops *ca_ops; |
| 3466 | union tcp_cc_info info; |
| 3467 | size_t sz = 0; |
| 3468 | int attr; |
| 3469 | |
| 3470 | if (get_user(len, optlen)) |
| 3471 | return -EFAULT; |
| 3472 | |
| 3473 | ca_ops = icsk->icsk_ca_ops; |
| 3474 | if (ca_ops && ca_ops->get_info) |
| 3475 | sz = ca_ops->get_info(sk, ~0U, &attr, &info); |
| 3476 | |
| 3477 | len = min_t(unsigned int, len, sz); |
| 3478 | if (put_user(len, optlen)) |
| 3479 | return -EFAULT; |
| 3480 | if (copy_to_user(optval, &info, len)) |
| 3481 | return -EFAULT; |
| 3482 | return 0; |
| 3483 | } |
| 3484 | case TCP_QUICKACK: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3485 | val = !inet_csk_in_pingpong_mode(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3486 | break; |
| 3487 | |
| 3488 | case TCP_CONGESTION: |
| 3489 | if (get_user(len, optlen)) |
| 3490 | return -EFAULT; |
| 3491 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); |
| 3492 | if (put_user(len, optlen)) |
| 3493 | return -EFAULT; |
| 3494 | if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) |
| 3495 | return -EFAULT; |
| 3496 | return 0; |
| 3497 | |
| 3498 | case TCP_ULP: |
| 3499 | if (get_user(len, optlen)) |
| 3500 | return -EFAULT; |
| 3501 | len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); |
| 3502 | if (!icsk->icsk_ulp_ops) { |
| 3503 | if (put_user(0, optlen)) |
| 3504 | return -EFAULT; |
| 3505 | return 0; |
| 3506 | } |
| 3507 | if (put_user(len, optlen)) |
| 3508 | return -EFAULT; |
| 3509 | if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) |
| 3510 | return -EFAULT; |
| 3511 | return 0; |
| 3512 | |
| 3513 | case TCP_FASTOPEN_KEY: { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3514 | __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3515 | struct tcp_fastopen_context *ctx; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3516 | unsigned int key_len = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3517 | |
| 3518 | if (get_user(len, optlen)) |
| 3519 | return -EFAULT; |
| 3520 | |
| 3521 | rcu_read_lock(); |
| 3522 | ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3523 | if (ctx) { |
| 3524 | key_len = tcp_fastopen_context_len(ctx) * |
| 3525 | TCP_FASTOPEN_KEY_LENGTH; |
| 3526 | memcpy(&key[0], &ctx->key[0], key_len); |
| 3527 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3528 | rcu_read_unlock(); |
| 3529 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3530 | len = min_t(unsigned int, len, key_len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3531 | if (put_user(len, optlen)) |
| 3532 | return -EFAULT; |
| 3533 | if (copy_to_user(optval, key, len)) |
| 3534 | return -EFAULT; |
| 3535 | return 0; |
| 3536 | } |
| 3537 | case TCP_THIN_LINEAR_TIMEOUTS: |
| 3538 | val = tp->thin_lto; |
| 3539 | break; |
| 3540 | |
| 3541 | case TCP_THIN_DUPACK: |
| 3542 | val = 0; |
| 3543 | break; |
| 3544 | |
| 3545 | case TCP_REPAIR: |
| 3546 | val = tp->repair; |
| 3547 | break; |
| 3548 | |
| 3549 | case TCP_REPAIR_QUEUE: |
| 3550 | if (tp->repair) |
| 3551 | val = tp->repair_queue; |
| 3552 | else |
| 3553 | return -EINVAL; |
| 3554 | break; |
| 3555 | |
| 3556 | case TCP_REPAIR_WINDOW: { |
| 3557 | struct tcp_repair_window opt; |
| 3558 | |
| 3559 | if (get_user(len, optlen)) |
| 3560 | return -EFAULT; |
| 3561 | |
| 3562 | if (len != sizeof(opt)) |
| 3563 | return -EINVAL; |
| 3564 | |
| 3565 | if (!tp->repair) |
| 3566 | return -EPERM; |
| 3567 | |
| 3568 | opt.snd_wl1 = tp->snd_wl1; |
| 3569 | opt.snd_wnd = tp->snd_wnd; |
| 3570 | opt.max_window = tp->max_window; |
| 3571 | opt.rcv_wnd = tp->rcv_wnd; |
| 3572 | opt.rcv_wup = tp->rcv_wup; |
| 3573 | |
| 3574 | if (copy_to_user(optval, &opt, len)) |
| 3575 | return -EFAULT; |
| 3576 | return 0; |
| 3577 | } |
| 3578 | case TCP_QUEUE_SEQ: |
| 3579 | if (tp->repair_queue == TCP_SEND_QUEUE) |
| 3580 | val = tp->write_seq; |
| 3581 | else if (tp->repair_queue == TCP_RECV_QUEUE) |
| 3582 | val = tp->rcv_nxt; |
| 3583 | else |
| 3584 | return -EINVAL; |
| 3585 | break; |
| 3586 | |
| 3587 | case TCP_USER_TIMEOUT: |
| 3588 | val = icsk->icsk_user_timeout; |
| 3589 | break; |
| 3590 | |
| 3591 | case TCP_FASTOPEN: |
| 3592 | val = icsk->icsk_accept_queue.fastopenq.max_qlen; |
| 3593 | break; |
| 3594 | |
| 3595 | case TCP_FASTOPEN_CONNECT: |
| 3596 | val = tp->fastopen_connect; |
| 3597 | break; |
| 3598 | |
| 3599 | case TCP_FASTOPEN_NO_COOKIE: |
| 3600 | val = tp->fastopen_no_cookie; |
| 3601 | break; |
| 3602 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3603 | case TCP_TX_DELAY: |
| 3604 | val = tp->tcp_tx_delay; |
| 3605 | break; |
| 3606 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3607 | case TCP_TIMESTAMP: |
| 3608 | val = tcp_time_stamp_raw() + tp->tsoffset; |
| 3609 | break; |
| 3610 | case TCP_NOTSENT_LOWAT: |
| 3611 | val = tp->notsent_lowat; |
| 3612 | break; |
| 3613 | case TCP_INQ: |
| 3614 | val = tp->recvmsg_inq; |
| 3615 | break; |
| 3616 | case TCP_SAVE_SYN: |
| 3617 | val = tp->save_syn; |
| 3618 | break; |
| 3619 | case TCP_SAVED_SYN: { |
| 3620 | if (get_user(len, optlen)) |
| 3621 | return -EFAULT; |
| 3622 | |
| 3623 | lock_sock(sk); |
| 3624 | if (tp->saved_syn) { |
| 3625 | if (len < tp->saved_syn[0]) { |
| 3626 | if (put_user(tp->saved_syn[0], optlen)) { |
| 3627 | release_sock(sk); |
| 3628 | return -EFAULT; |
| 3629 | } |
| 3630 | release_sock(sk); |
| 3631 | return -EINVAL; |
| 3632 | } |
| 3633 | len = tp->saved_syn[0]; |
| 3634 | if (put_user(len, optlen)) { |
| 3635 | release_sock(sk); |
| 3636 | return -EFAULT; |
| 3637 | } |
| 3638 | if (copy_to_user(optval, tp->saved_syn + 1, len)) { |
| 3639 | release_sock(sk); |
| 3640 | return -EFAULT; |
| 3641 | } |
| 3642 | tcp_saved_syn_free(tp); |
| 3643 | release_sock(sk); |
| 3644 | } else { |
| 3645 | release_sock(sk); |
| 3646 | len = 0; |
| 3647 | if (put_user(len, optlen)) |
| 3648 | return -EFAULT; |
| 3649 | } |
| 3650 | return 0; |
| 3651 | } |
| 3652 | #ifdef CONFIG_MMU |
| 3653 | case TCP_ZEROCOPY_RECEIVE: { |
| 3654 | struct tcp_zerocopy_receive zc; |
| 3655 | int err; |
| 3656 | |
| 3657 | if (get_user(len, optlen)) |
| 3658 | return -EFAULT; |
| 3659 | if (len != sizeof(zc)) |
| 3660 | return -EINVAL; |
| 3661 | if (copy_from_user(&zc, optval, len)) |
| 3662 | return -EFAULT; |
| 3663 | lock_sock(sk); |
| 3664 | err = tcp_zerocopy_receive(sk, &zc); |
| 3665 | release_sock(sk); |
| 3666 | if (!err && copy_to_user(optval, &zc, len)) |
| 3667 | err = -EFAULT; |
| 3668 | return err; |
| 3669 | } |
| 3670 | #endif |
| 3671 | default: |
| 3672 | return -ENOPROTOOPT; |
| 3673 | } |
| 3674 | |
| 3675 | if (put_user(len, optlen)) |
| 3676 | return -EFAULT; |
| 3677 | if (copy_to_user(optval, &val, len)) |
| 3678 | return -EFAULT; |
| 3679 | return 0; |
| 3680 | } |
| 3681 | |
| 3682 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, |
| 3683 | int __user *optlen) |
| 3684 | { |
| 3685 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 3686 | |
| 3687 | if (level != SOL_TCP) |
| 3688 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, |
| 3689 | optval, optlen); |
| 3690 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); |
| 3691 | } |
| 3692 | EXPORT_SYMBOL(tcp_getsockopt); |
| 3693 | |
| 3694 | #ifdef CONFIG_COMPAT |
| 3695 | int compat_tcp_getsockopt(struct sock *sk, int level, int optname, |
| 3696 | char __user *optval, int __user *optlen) |
| 3697 | { |
| 3698 | if (level != SOL_TCP) |
| 3699 | return inet_csk_compat_getsockopt(sk, level, optname, |
| 3700 | optval, optlen); |
| 3701 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); |
| 3702 | } |
| 3703 | EXPORT_SYMBOL(compat_tcp_getsockopt); |
| 3704 | #endif |
| 3705 | |
| 3706 | #ifdef CONFIG_TCP_MD5SIG |
| 3707 | static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); |
| 3708 | static DEFINE_MUTEX(tcp_md5sig_mutex); |
| 3709 | static bool tcp_md5sig_pool_populated = false; |
| 3710 | |
| 3711 | static void __tcp_alloc_md5sig_pool(void) |
| 3712 | { |
| 3713 | struct crypto_ahash *hash; |
| 3714 | int cpu; |
| 3715 | |
| 3716 | hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); |
| 3717 | if (IS_ERR(hash)) |
| 3718 | return; |
| 3719 | |
| 3720 | for_each_possible_cpu(cpu) { |
| 3721 | void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; |
| 3722 | struct ahash_request *req; |
| 3723 | |
| 3724 | if (!scratch) { |
| 3725 | scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + |
| 3726 | sizeof(struct tcphdr), |
| 3727 | GFP_KERNEL, |
| 3728 | cpu_to_node(cpu)); |
| 3729 | if (!scratch) |
| 3730 | return; |
| 3731 | per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; |
| 3732 | } |
| 3733 | if (per_cpu(tcp_md5sig_pool, cpu).md5_req) |
| 3734 | continue; |
| 3735 | |
| 3736 | req = ahash_request_alloc(hash, GFP_KERNEL); |
| 3737 | if (!req) |
| 3738 | return; |
| 3739 | |
| 3740 | ahash_request_set_callback(req, 0, NULL, NULL); |
| 3741 | |
| 3742 | per_cpu(tcp_md5sig_pool, cpu).md5_req = req; |
| 3743 | } |
| 3744 | /* before setting tcp_md5sig_pool_populated, we must commit all writes |
| 3745 | * to memory. See smp_rmb() in tcp_get_md5sig_pool() |
| 3746 | */ |
| 3747 | smp_wmb(); |
| 3748 | tcp_md5sig_pool_populated = true; |
| 3749 | } |
| 3750 | |
| 3751 | bool tcp_alloc_md5sig_pool(void) |
| 3752 | { |
| 3753 | if (unlikely(!tcp_md5sig_pool_populated)) { |
| 3754 | mutex_lock(&tcp_md5sig_mutex); |
| 3755 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3756 | if (!tcp_md5sig_pool_populated) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3757 | __tcp_alloc_md5sig_pool(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3758 | if (tcp_md5sig_pool_populated) |
| 3759 | static_branch_inc(&tcp_md5_needed); |
| 3760 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3761 | |
| 3762 | mutex_unlock(&tcp_md5sig_mutex); |
| 3763 | } |
| 3764 | return tcp_md5sig_pool_populated; |
| 3765 | } |
| 3766 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
| 3767 | |
| 3768 | |
| 3769 | /** |
| 3770 | * tcp_get_md5sig_pool - get md5sig_pool for this user |
| 3771 | * |
| 3772 | * We use percpu structure, so if we succeed, we exit with preemption |
| 3773 | * and BH disabled, to make sure another thread or softirq handling |
| 3774 | * wont try to get same context. |
| 3775 | */ |
| 3776 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) |
| 3777 | { |
| 3778 | local_bh_disable(); |
| 3779 | |
| 3780 | if (tcp_md5sig_pool_populated) { |
| 3781 | /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ |
| 3782 | smp_rmb(); |
| 3783 | return this_cpu_ptr(&tcp_md5sig_pool); |
| 3784 | } |
| 3785 | local_bh_enable(); |
| 3786 | return NULL; |
| 3787 | } |
| 3788 | EXPORT_SYMBOL(tcp_get_md5sig_pool); |
| 3789 | |
| 3790 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, |
| 3791 | const struct sk_buff *skb, unsigned int header_len) |
| 3792 | { |
| 3793 | struct scatterlist sg; |
| 3794 | const struct tcphdr *tp = tcp_hdr(skb); |
| 3795 | struct ahash_request *req = hp->md5_req; |
| 3796 | unsigned int i; |
| 3797 | const unsigned int head_data_len = skb_headlen(skb) > header_len ? |
| 3798 | skb_headlen(skb) - header_len : 0; |
| 3799 | const struct skb_shared_info *shi = skb_shinfo(skb); |
| 3800 | struct sk_buff *frag_iter; |
| 3801 | |
| 3802 | sg_init_table(&sg, 1); |
| 3803 | |
| 3804 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); |
| 3805 | ahash_request_set_crypt(req, &sg, NULL, head_data_len); |
| 3806 | if (crypto_ahash_update(req)) |
| 3807 | return 1; |
| 3808 | |
| 3809 | for (i = 0; i < shi->nr_frags; ++i) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3810 | const skb_frag_t *f = &shi->frags[i]; |
| 3811 | unsigned int offset = skb_frag_off(f); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3812 | struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); |
| 3813 | |
| 3814 | sg_set_page(&sg, page, skb_frag_size(f), |
| 3815 | offset_in_page(offset)); |
| 3816 | ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); |
| 3817 | if (crypto_ahash_update(req)) |
| 3818 | return 1; |
| 3819 | } |
| 3820 | |
| 3821 | skb_walk_frags(skb, frag_iter) |
| 3822 | if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) |
| 3823 | return 1; |
| 3824 | |
| 3825 | return 0; |
| 3826 | } |
| 3827 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); |
| 3828 | |
| 3829 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) |
| 3830 | { |
| 3831 | struct scatterlist sg; |
| 3832 | |
| 3833 | sg_init_one(&sg, key->key, key->keylen); |
| 3834 | ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); |
| 3835 | return crypto_ahash_update(hp->md5_req); |
| 3836 | } |
| 3837 | EXPORT_SYMBOL(tcp_md5_hash_key); |
| 3838 | |
| 3839 | #endif |
| 3840 | |
| 3841 | void tcp_done(struct sock *sk) |
| 3842 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3843 | struct request_sock *req; |
| 3844 | |
| 3845 | /* We might be called with a new socket, after |
| 3846 | * inet_csk_prepare_forced_close() has been called |
| 3847 | * so we can not use lockdep_sock_is_held(sk) |
| 3848 | */ |
| 3849 | req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3850 | |
| 3851 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
| 3852 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
| 3853 | |
| 3854 | tcp_set_state(sk, TCP_CLOSE); |
| 3855 | tcp_clear_xmit_timers(sk); |
| 3856 | if (req) |
| 3857 | reqsk_fastopen_remove(sk, req, false); |
| 3858 | |
| 3859 | sk->sk_shutdown = SHUTDOWN_MASK; |
| 3860 | |
| 3861 | if (!sock_flag(sk, SOCK_DEAD)) |
| 3862 | sk->sk_state_change(sk); |
| 3863 | else |
| 3864 | inet_csk_destroy_sock(sk); |
| 3865 | } |
| 3866 | EXPORT_SYMBOL_GPL(tcp_done); |
| 3867 | |
| 3868 | int tcp_abort(struct sock *sk, int err) |
| 3869 | { |
| 3870 | if (!sk_fullsock(sk)) { |
| 3871 | if (sk->sk_state == TCP_NEW_SYN_RECV) { |
| 3872 | struct request_sock *req = inet_reqsk(sk); |
| 3873 | |
| 3874 | local_bh_disable(); |
| 3875 | inet_csk_reqsk_queue_drop(req->rsk_listener, req); |
| 3876 | local_bh_enable(); |
| 3877 | return 0; |
| 3878 | } |
| 3879 | return -EOPNOTSUPP; |
| 3880 | } |
| 3881 | |
| 3882 | /* Don't race with userspace socket closes such as tcp_close. */ |
| 3883 | lock_sock(sk); |
| 3884 | |
| 3885 | if (sk->sk_state == TCP_LISTEN) { |
| 3886 | tcp_set_state(sk, TCP_CLOSE); |
| 3887 | inet_csk_listen_stop(sk); |
| 3888 | } |
| 3889 | |
| 3890 | /* Don't race with BH socket closes such as inet_csk_listen_stop. */ |
| 3891 | local_bh_disable(); |
| 3892 | bh_lock_sock(sk); |
| 3893 | |
| 3894 | if (!sock_flag(sk, SOCK_DEAD)) { |
| 3895 | sk->sk_err = err; |
| 3896 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ |
| 3897 | smp_wmb(); |
| 3898 | sk->sk_error_report(sk); |
| 3899 | if (tcp_need_reset(sk->sk_state)) |
| 3900 | tcp_send_active_reset(sk, GFP_ATOMIC); |
| 3901 | tcp_done(sk); |
| 3902 | } |
| 3903 | |
| 3904 | bh_unlock_sock(sk); |
| 3905 | local_bh_enable(); |
| 3906 | tcp_write_queue_purge(sk); |
| 3907 | release_sock(sk); |
| 3908 | return 0; |
| 3909 | } |
| 3910 | EXPORT_SYMBOL_GPL(tcp_abort); |
| 3911 | |
| 3912 | extern struct tcp_congestion_ops tcp_reno; |
| 3913 | |
| 3914 | static __initdata unsigned long thash_entries; |
| 3915 | static int __init set_thash_entries(char *str) |
| 3916 | { |
| 3917 | ssize_t ret; |
| 3918 | |
| 3919 | if (!str) |
| 3920 | return 0; |
| 3921 | |
| 3922 | ret = kstrtoul(str, 0, &thash_entries); |
| 3923 | if (ret) |
| 3924 | return 0; |
| 3925 | |
| 3926 | return 1; |
| 3927 | } |
| 3928 | __setup("thash_entries=", set_thash_entries); |
| 3929 | |
| 3930 | static void __init tcp_init_mem(void) |
| 3931 | { |
| 3932 | unsigned long limit = nr_free_buffer_pages() / 16; |
| 3933 | |
| 3934 | limit = max(limit, 128UL); |
| 3935 | sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ |
| 3936 | sysctl_tcp_mem[1] = limit; /* 6.25 % */ |
| 3937 | sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ |
| 3938 | } |
| 3939 | |
| 3940 | void __init tcp_init(void) |
| 3941 | { |
| 3942 | int max_rshare, max_wshare, cnt; |
| 3943 | unsigned long limit; |
| 3944 | unsigned int i; |
| 3945 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3946 | BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3947 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > |
| 3948 | FIELD_SIZEOF(struct sk_buff, cb)); |
| 3949 | |
| 3950 | percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); |
| 3951 | percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); |
| 3952 | inet_hashinfo_init(&tcp_hashinfo); |
| 3953 | inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", |
| 3954 | thash_entries, 21, /* one slot per 2 MB*/ |
| 3955 | 0, 64 * 1024); |
| 3956 | tcp_hashinfo.bind_bucket_cachep = |
| 3957 | kmem_cache_create("tcp_bind_bucket", |
| 3958 | sizeof(struct inet_bind_bucket), 0, |
| 3959 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 3960 | |
| 3961 | /* Size and allocate the main established and bind bucket |
| 3962 | * hash tables. |
| 3963 | * |
| 3964 | * The methodology is similar to that of the buffer cache. |
| 3965 | */ |
| 3966 | tcp_hashinfo.ehash = |
| 3967 | alloc_large_system_hash("TCP established", |
| 3968 | sizeof(struct inet_ehash_bucket), |
| 3969 | thash_entries, |
| 3970 | 17, /* one slot per 128 KB of memory */ |
| 3971 | 0, |
| 3972 | NULL, |
| 3973 | &tcp_hashinfo.ehash_mask, |
| 3974 | 0, |
| 3975 | thash_entries ? 0 : 512 * 1024); |
| 3976 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) |
| 3977 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
| 3978 | |
| 3979 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) |
| 3980 | panic("TCP: failed to alloc ehash_locks"); |
| 3981 | tcp_hashinfo.bhash = |
| 3982 | alloc_large_system_hash("TCP bind", |
| 3983 | sizeof(struct inet_bind_hashbucket), |
| 3984 | tcp_hashinfo.ehash_mask + 1, |
| 3985 | 17, /* one slot per 128 KB of memory */ |
| 3986 | 0, |
| 3987 | &tcp_hashinfo.bhash_size, |
| 3988 | NULL, |
| 3989 | 0, |
| 3990 | 64 * 1024); |
| 3991 | tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; |
| 3992 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { |
| 3993 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); |
| 3994 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); |
| 3995 | } |
| 3996 | |
| 3997 | |
| 3998 | cnt = tcp_hashinfo.ehash_mask + 1; |
| 3999 | sysctl_tcp_max_orphans = cnt / 2; |
| 4000 | |
| 4001 | tcp_init_mem(); |
| 4002 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ |
| 4003 | limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); |
| 4004 | max_wshare = min(4UL*1024*1024, limit); |
| 4005 | max_rshare = min(6UL*1024*1024, limit); |
| 4006 | |
| 4007 | init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
| 4008 | init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; |
| 4009 | init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); |
| 4010 | |
| 4011 | init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 4012 | init_net.ipv4.sysctl_tcp_rmem[1] = 131072; |
| 4013 | init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4014 | |
| 4015 | pr_info("Hash tables configured (established %u bind %u)\n", |
| 4016 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
| 4017 | |
| 4018 | tcp_v4_init(); |
| 4019 | tcp_metrics_init(); |
| 4020 | BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); |
| 4021 | tcp_tasklet_init(); |
| 4022 | } |