Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 770917d..ab4eb5e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -9,11 +10,6 @@
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _TCP_H
#define _TCP_H
@@ -47,6 +43,7 @@
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
#include <linux/bpf-cgroup.h>
+#include <linux/siphash.h>
extern struct inet_hashinfo tcp_hashinfo;
@@ -55,6 +52,8 @@
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
+#define TCP_MIN_SND_MSS 48
+#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
/*
* Never offer a window over 32767 without using window scaling. Some
@@ -65,7 +64,7 @@
/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
#define TCP_MIN_MSS 88U
-/* The least MTU to use for probing */
+/* The initial MTU to use for probing */
#define TCP_BASE_MSS 1024
/* probing interval, default to 10 minutes as per RFC4821 */
@@ -259,7 +258,7 @@
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return tcp_memory_pressure;
+ return READ_ONCE(tcp_memory_pressure);
}
/*
* The next routines deal with comparing 32 bit unsigned ints
@@ -313,7 +312,7 @@
void tcp_tasklet_init(void);
-void tcp_v4_err(struct sk_buff *skb, u32);
+int tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
@@ -406,14 +405,26 @@
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
void tcp_data_ready(struct sock *sk);
+#ifdef CONFIG_MMU
int tcp_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
+#endif
void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
+ * BPF SKB-less helpers
+ */
+u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
+ struct tcphdr *th, u32 *cookie);
+u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
+ struct tcphdr *th, u32 *cookie);
+u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
+ const struct tcp_request_sock_ops *af_ops,
+ struct sock *sk, struct tcphdr *th);
+/*
* TCP v4 functions exported for the inet6 API
*/
@@ -732,7 +743,7 @@
static inline u64 tcp_clock_ns(void)
{
- return local_clock();
+ return ktime_get_ns();
}
static inline u64 tcp_clock_us(void)
@@ -752,17 +763,7 @@
return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
}
-
-/* Refresh 1us clock of a TCP socket,
- * ensuring monotically increasing values.
- */
-static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
-{
- u64 val = tcp_clock_us();
-
- if (val > tp->tcp_mstamp)
- tp->tcp_mstamp = val;
-}
+void tcp_mstamp_refresh(struct tcp_sock *tp);
static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
{
@@ -771,7 +772,13 @@
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}
@@ -817,7 +824,7 @@
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
#define TCPCB_LOST 0x04 /* SKB is lost */
#define TCPCB_TAGBITS 0x07 /* All tag bits */
-#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
+#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
TCPCB_REPAIRED)
@@ -862,6 +869,21 @@
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
}
+static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
+}
+
+static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->bpf.sk_redir;
+}
+
+static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
* as TCP moves IP6CB into a different location in skb->cb[]
@@ -1052,7 +1074,8 @@
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+ bool reinit, bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1113,7 +1136,7 @@
*/
static inline int tcp_is_sack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok;
+ return likely(tp->rx_opt.sack_ok);
}
static inline bool tcp_is_reno(const struct tcp_sock *tp)
@@ -1234,8 +1257,31 @@
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
+/* Return in jiffies the delay before one skb is sent.
+ * If @skb is NULL, we look at EDT for next packet being sent on the socket.
+ */
+static inline unsigned long tcp_pacing_delay(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
+
+ pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
+
+ return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
+}
+
+static inline void tcp_reset_xmit_timer(struct sock *sk,
+ const int what,
+ unsigned long when,
+ const unsigned long max_when,
+ const struct sk_buff *skb)
+{
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
+ max_when);
+}
+
/* Something is really bad, we could not queue an additional packet,
- * because qdisc is full or receiver sent a 0 window.
+ * because qdisc is full or receiver sent a 0 window, or we are paced.
* We do not want to add fuel to the fire, or abort too early,
* so make sure the timer we arm now is at least 200ms in the future,
* regardless of current icsk_rto value (as it could be ~2ms)
@@ -1257,8 +1303,9 @@
static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ tcp_probe0_base(sk), TCP_RTO_MAX,
+ NULL);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1277,36 +1324,19 @@
static inline __sum16 tcp_v4_check(int len, __be32 saddr,
__be32 daddr, __wsum base)
{
- return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
-}
-
-static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
-{
- return __skb_checksum_complete(skb);
+ return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
}
static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
- __tcp_checksum_complete(skb);
+ __skb_checksum_complete(skb);
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
int tcp_filter(struct sock *sk, struct sk_buff *skb);
-
-#undef STATE_TRACE
-
-#ifdef STATE_TRACE
-static const char *statename[]={
- "Unused","Established","Syn Sent","Syn Recv",
- "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
- "Close Wait","Last ACK","Listen","Closing"
-};
-#endif
void tcp_set_state(struct sock *sk, int state);
-
void tcp_done(struct sock *sk);
-
int tcp_abort(struct sock *sk, int err);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
@@ -1350,13 +1380,14 @@
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf -
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(sk->sk_backlog.len) -
atomic_read(&sk->sk_rmem_alloc));
}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf);
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1537,9 +1568,21 @@
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family);
+#include <linux/jump_label.h>
+extern struct static_key_false tcp_md5_needed;
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family);
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
+{
+ if (!static_branch_unlikely(&tcp_md5_needed))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, addr, family);
+}
+
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
@@ -1576,12 +1619,13 @@
struct msghdr *data; /* data in MSG_FASTOPEN */
size_t size;
int copied; /* queued in tcp_connect() */
+ struct ubuf_info *uarg;
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
void tcp_fastopen_destroy_cipher(struct sock *sk);
void tcp_fastopen_ctx_destroy(struct net *net);
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
- void *key, unsigned int len);
+ void *primary_key, void *backup_key);
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
@@ -1591,13 +1635,16 @@
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie);
bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
-#define TCP_FASTOPEN_KEY_LENGTH 16
+#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
+#define TCP_FASTOPEN_KEY_MAX 2
+#define TCP_FASTOPEN_KEY_BUF_LENGTH \
+ (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
/* Fastopen key context */
struct tcp_fastopen_context {
- struct crypto_cipher *tfm;
- __u8 key[TCP_FASTOPEN_KEY_LENGTH];
- struct rcu_head rcu;
+ siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
+ int num;
+ struct rcu_head rcu;
};
extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
@@ -1606,6 +1653,35 @@
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
+/* Caller needs to wrap with rcu_read_(un)lock() */
+static inline
+struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
+{
+ struct tcp_fastopen_context *ctx;
+
+ ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
+ if (!ctx)
+ ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
+ return ctx;
+}
+
+static inline
+bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
+ const struct tcp_fastopen_cookie *orig)
+{
+ if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
+ orig->len == foc->len &&
+ !memcmp(orig->val, foc->val, foc->len))
+ return true;
+ return false;
+}
+
+static inline
+int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
+{
+ return ctx->num;
+}
+
/* Latencies incurred by various limits for a sender. They are
* chronograph-like stats that are mutually exclusive.
*/
@@ -1644,6 +1720,11 @@
return skb_rb_first(&sk->tcp_rtx_queue);
}
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+ return skb_rb_last(&sk->tcp_rtx_queue);
+}
+
static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);
@@ -1683,20 +1764,9 @@
return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
}
-static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
-{
- if (tcp_write_queue_empty(sk))
- tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
-}
-
-static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
-{
- __skb_queue_tail(&sk->sk_write_queue, skb);
-}
-
static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
- __tcp_add_write_queue_tail(sk, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
/* Queue it, remembering where we must start sending. */
if (sk->sk_write_queue.next == skb)
@@ -1840,12 +1910,17 @@
return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
}
-static inline bool tcp_stream_memory_free(const struct sock *sk)
+/* @wake is one when sk_stream_write_space() calls us.
+ * This sends EPOLLOUT only if notsent_bytes is half the limit.
+ * This mimics the strategy used in sock_def_write_space().
+ */
+static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+ u32 notsent_bytes = READ_ONCE(tp->write_seq) -
+ READ_ONCE(tp->snd_nxt);
- return notsent_bytes < tcp_notsent_lowat(tp);
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
#ifdef CONFIG_PROC_FS
@@ -1940,7 +2015,7 @@
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
}
@@ -2040,35 +2115,45 @@
#define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
-enum {
- TCP_ULP_TLS,
- TCP_ULP_BPF,
-};
-
struct tcp_ulp_ops {
struct list_head list;
/* initialize ulp */
int (*init)(struct sock *sk);
+ /* update ulp */
+ void (*update)(struct sock *sk, struct proto *p);
/* cleanup ulp */
void (*release)(struct sock *sk);
+ /* diagnostic */
+ int (*get_info)(const struct sock *sk, struct sk_buff *skb);
+ size_t (*get_info_size)(const struct sock *sk);
- int uid;
char name[TCP_ULP_NAME_MAX];
- bool user_visible;
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
-int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
+void tcp_update_ulp(struct sock *sk, struct proto *p);
#define MODULE_ALIAS_TCP_ULP(name) \
__MODULE_INFO(alias, alias_userspace, name); \
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+struct sk_msg;
+struct sk_psock;
+
+int tcp_bpf_init(struct sock *sk);
+void tcp_bpf_reinit(struct sock *sk);
+int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
+ int flags);
+int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len);
+int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ struct msghdr *msg, int len, int flags);
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
@@ -2160,6 +2245,12 @@
return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
}
+static inline void tcp_bpf_rtt(struct sock *sk)
+{
+ if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
+ tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
+}
+
#if IS_ENABLED(CONFIG_SMC)
extern struct static_key_false tcp_have_smc;
#endif
@@ -2168,7 +2259,29 @@
void clean_acked_data_enable(struct inet_connection_sock *icsk,
void (*cad)(struct sock *sk, u32 ack_seq));
void clean_acked_data_disable(struct inet_connection_sock *icsk);
-
+void clean_acked_data_flush(void);
#endif
+DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
+static inline void tcp_add_tx_delay(struct sk_buff *skb,
+ const struct tcp_sock *tp)
+{
+ if (static_branch_unlikely(&tcp_tx_delay_enabled))
+ skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
+}
+
+/* Compute Earliest Departure Time for some control packets
+ * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
+ */
+static inline u64 tcp_transmit_time(const struct sock *sk)
+{
+ if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
+ u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
+ tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
+
+ return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
+ }
+ return 0;
+}
+
#endif /* _TCP_H */