Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
index e4328b3..fa0724f 100644
--- a/net/tls/Kconfig
+++ b/net/tls/Kconfig
@@ -11,7 +11,7 @@
select STREAM_PARSER
select NET_SOCK_MSG
default n
- ---help---
+ help
Enable kernel support for TLS protocol. This allows symmetric
encryption handling of the TLS protocol to be done in-kernel.
@@ -26,3 +26,13 @@
Enable kernel support for HW offload of the TLS protocol.
If unsure, say N.
+
+config TLS_TOE
+ bool "Transport Layer Security TCP stack bypass"
+ depends on TLS
+ default n
+ help
+ Enable kernel support for legacy HW offload of the TLS protocol,
+ which is incompatible with the Linux networking stack semantics.
+
+ If unsure, say N.
diff --git a/net/tls/Makefile b/net/tls/Makefile
index ef0dc74..f1ffbfe 100644
--- a/net/tls/Makefile
+++ b/net/tls/Makefile
@@ -3,8 +3,11 @@
# Makefile for the TLS subsystem.
#
+CFLAGS_trace.o := -I$(src)
+
obj-$(CONFIG_TLS) += tls.o
-tls-y := tls_main.o tls_sw.o
+tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
+tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 0f034c3..f718c73 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -38,6 +38,8 @@
#include <net/tcp.h>
#include <net/tls.h>
+#include "trace.h"
+
/* device_offload_lock is used to synchronize tls_dev_add
* against NETDEV_DOWN notifications.
*/
@@ -48,6 +50,7 @@
static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
static LIST_HEAD(tls_device_gc_list);
static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
@@ -176,7 +179,7 @@
* socket and no in-flight SKBs associated with this
* socket, so it is safe to free all the resources.
*/
-static void tls_device_sk_destruct(struct sock *sk)
+void tls_device_sk_destruct(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
@@ -194,6 +197,7 @@
if (refcount_dec_and_test(&tls_ctx->refcount))
tls_device_queue_ctx_destruction(tls_ctx);
}
+EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
void tls_device_free_resources_tx(struct sock *sk)
{
@@ -202,6 +206,15 @@
tls_free_partial_record(sk, tls_ctx);
}
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
+}
+EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
+
static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
@@ -216,6 +229,7 @@
rcd_sn = tls_ctx->tx.rec_seq;
+ trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
down_read(&device_offload_lock);
netdev = tls_ctx->netdev;
if (netdev)
@@ -353,7 +367,7 @@
if (!offload_ctx->open_record) {
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
sk->sk_allocation))) {
- sk->sk_prot->enter_memory_pressure(sk);
+ READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return -ENOMEM;
}
@@ -419,7 +433,7 @@
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
return -EOPNOTSUPP;
- if (sk->sk_err)
+ if (unlikely(sk->sk_err))
return -sk->sk_err;
flags |= MSG_SENDPAGE_DECRYPTED;
@@ -440,9 +454,8 @@
max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
prot->prepend_size;
do {
- rc = tls_do_allocation(sk, ctx, pfrag,
- prot->prepend_size);
- if (rc) {
+ rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
+ if (unlikely(rc)) {
rc = sk_stream_wait_memory(sk, &timeo);
if (!rc)
continue;
@@ -665,15 +678,73 @@
static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct sock *sk, u32 seq, u8 *rcd_sn)
{
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
struct net_device *netdev;
- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
- return;
+ trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+ rcu_read_lock();
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+ rcu_read_unlock();
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
+}
+
+static bool
+tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
+ s64 resync_req, u32 *seq, u16 *rcd_delta)
+{
+ u32 is_async = resync_req & RESYNC_REQ_ASYNC;
+ u32 req_seq = resync_req >> 32;
+ u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
+ u16 i;
+
+ *rcd_delta = 0;
+
+ if (is_async) {
+ /* shouldn't get to wraparound:
+ * too long in async stage, something bad happened
+ */
+ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+ return false;
+
+ /* asynchronous stage: log all headers seq such that
+ * req_seq <= seq <= end_seq, and wait for real resync request
+ */
+ if (before(*seq, req_seq))
+ return false;
+ if (!after(*seq, req_end) &&
+ resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
+ resync_async->log[resync_async->loglen++] = *seq;
+
+ resync_async->rcd_delta++;
+
+ return false;
+ }
+
+ /* synchronous stage: check against the logged entries and
+ * proceed to check the next entries if no match was found
+ */
+ for (i = 0; i < resync_async->loglen; i++)
+ if (req_seq == resync_async->log[i] &&
+ atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
+ *rcd_delta = resync_async->rcd_delta - i;
+ *seq = req_seq;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
+ return true;
+ }
+
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
+
+ if (req_seq == *seq &&
+ atomic64_try_cmpxchg(&resync_async->req,
+ &resync_req, 0))
+ return true;
+
+ return false;
}
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
@@ -681,13 +752,16 @@
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
- u32 is_req_pending;
s64 resync_req;
+ u16 rcd_delta;
u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW)
return;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+ return;
prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
@@ -711,13 +785,28 @@
/* head of next rec is already in, note that the sock_inq will
* include the currently parsed message when called from parser
*/
- if (tcp_inq(sk) > rcd_len)
+ sock_data = tcp_inq(sk);
+ if (sock_data > rcd_len) {
+ trace_tls_device_rx_resync_nh_delay(sk, sock_data,
+ rcd_len);
return;
+ }
rx_ctx->resync_nh_do_now = 0;
seq += rcd_len;
tls_bigint_increment(rcd_sn, prot->rec_seq_size);
break;
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
+ resync_req = atomic64_read(&rx_ctx->resync_async->req);
+ is_req_pending = resync_req;
+ if (likely(!is_req_pending))
+ return;
+
+ if (!tls_device_rx_resync_async(rx_ctx->resync_async,
+ resync_req, &seq, &rcd_delta))
+ return;
+ tls_bigint_subtract(rcd_sn, rcd_delta);
+ break;
}
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
@@ -756,6 +845,7 @@
/* head of next rec is already in, parser will sync for us */
if (tcp_inq(sk) > rxm->full_len) {
+ trace_tls_device_rx_resync_nh_schedule(sk);
ctx->resync_nh_do_now = 1;
} else {
struct tls_prot_info *prot = &tls_ctx->prot_info;
@@ -854,9 +944,9 @@
return err;
}
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ struct sk_buff *skb, struct strp_msg *rxm)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
int is_decrypted = skb->decrypted;
int is_encrypted = !is_decrypted;
@@ -868,8 +958,23 @@
is_encrypted &= !skb_iter->decrypted;
}
+ trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
+ tls_ctx->rx.rec_seq, rxm->full_len,
+ is_encrypted, is_decrypted);
+
ctx->sw.decrypted |= is_decrypted;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+ if (likely(is_encrypted || is_decrypted))
+ return 0;
+
+ /* After tls_device_down disables the offload, the next SKB will
+ * likely have initial fragments decrypted, and final ones not
+ * decrypted. We need to reencrypt that single SKB.
+ */
+ return tls_device_reencrypt(sk, skb);
+ }
+
/* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
@@ -899,7 +1004,7 @@
spin_unlock_irq(&tls_device_lock);
ctx->sk_destruct = sk->sk_destruct;
- sk->sk_destruct = tls_device_sk_destruct;
+ smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
}
}
@@ -1041,6 +1146,8 @@
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
&ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
+ tcp_sk(sk)->write_seq, rec_seq, rc);
if (rc)
goto release_lock;
@@ -1077,6 +1184,7 @@
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
{
+ struct tls12_crypto_info_aes_gcm_128 *info;
struct tls_offload_context_rx *context;
struct net_device *netdev;
int rc = 0;
@@ -1124,6 +1232,9 @@
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
&ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
+ info = (void *)&ctx->crypto_recv.info;
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
+ tcp_sk(sk)->copied_seq, info->rec_seq, rc);
if (rc)
goto free_sw_resources;
@@ -1191,6 +1302,26 @@
spin_unlock_irqrestore(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &list, list) {
+ /* Stop offloaded TX and switch to the fallback.
+ * tls_is_sk_tx_device_offloaded will return false.
+ */
+ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+ /* Stop the RX and TX resync.
+ * tls_dev_resync must not be called after tls_dev_del.
+ */
+ WRITE_ONCE(ctx->netdev, NULL);
+
+ /* Start skipping the RX resync logic completely. */
+ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+ /* Sync with inflight packets. After this point:
+ * TX: no non-encrypted packets will be passed to the driver.
+ * RX: resync requests from the driver will be ignored.
+ */
+ synchronize_net();
+
+ /* Release the offload context on the driver side. */
if (ctx->tx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_TX);
@@ -1198,15 +1329,21 @@
!test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
- WRITE_ONCE(ctx->netdev, NULL);
- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
- usleep_range(10, 200);
- dev_put(netdev);
- list_del_init(&ctx->list);
- if (refcount_dec_and_test(&ctx->refcount))
- tls_device_free_ctx(ctx);
+ dev_put(netdev);
+
+ /* Move the context to a separate list for two reasons:
+ * 1. When the context is deallocated, list_del is called.
+ * 2. It's no longer an offloaded context, so we don't want to
+ * run offload-specific code on this context.
+ */
+ spin_lock_irqsave(&tls_device_lock, flags);
+ list_move_tail(&ctx->list, &tls_device_down_list);
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ /* Device contexts for RX and TX will be freed in on sk_destruct
+ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+ */
}
up_write(&device_offload_lock);
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 2889533..0d40016 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -430,6 +430,13 @@
}
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return tls_sw_fallback(sk, skb);
+}
+
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
{
return tls_sw_fallback(skb->sk, skb);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 7aba4ee..58d22d6 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -41,7 +41,9 @@
#include <linux/inetdevice.h>
#include <linux/inet_diag.h>
+#include <net/snmp.h>
#include <net/tls.h>
+#include <net/tls_toe.h>
MODULE_AUTHOR("Mellanox Technologies");
MODULE_DESCRIPTION("Transport Layer Security Support");
@@ -54,22 +56,23 @@
TLS_NUM_PROTS,
};
-static struct proto *saved_tcpv6_prot;
+static const struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
-static struct proto *saved_tcpv4_prot;
+static const struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
-static LIST_HEAD(device_list);
-static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
-static struct proto_ops tls_sw_proto_ops;
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
- struct proto *base);
+ const struct proto *base);
-static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
- sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
+ WRITE_ONCE(sk->sk_prot,
+ &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+ WRITE_ONCE(sk->sk_socket->ops,
+ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -278,14 +281,19 @@
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_release_resources_tx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
}
- if (ctx->rx_conf == TLS_SW)
+ if (ctx->rx_conf == TLS_SW) {
tls_sw_release_resources_rx(sk);
- else if (ctx->rx_conf == TLS_HW)
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
+ } else if (ctx->rx_conf == TLS_HW) {
tls_device_offload_cleanup_rx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
+ }
}
static void tls_sk_proto_close(struct sock *sk, long timeout)
@@ -307,7 +315,7 @@
write_lock_bh(&sk->sk_callback_lock);
if (free_ctx)
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
- sk->sk_prot = ctx->sk_proto;
+ WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
if (sk->sk_write_space == tls_write_space)
sk->sk_write_space = ctx->sk_write_space;
write_unlock_bh(&sk->sk_callback_lock);
@@ -324,12 +332,13 @@
tls_ctx_free(sk, ctx);
}
-static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
- int __user *optlen)
+static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ int __user *optlen, int tx)
{
int rc = 0;
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_crypto_info *crypto_info;
+ struct cipher_context *cctx;
int len;
if (get_user(len, optlen))
@@ -346,7 +355,13 @@
}
/* get user crypto info */
- crypto_info = &ctx->crypto_send.info;
+ if (tx) {
+ crypto_info = &ctx->crypto_send.info;
+ cctx = &ctx->tx;
+ } else {
+ crypto_info = &ctx->crypto_recv.info;
+ cctx = &ctx->rx;
+ }
if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
rc = -EBUSY;
@@ -373,9 +388,9 @@
}
lock_sock(sk);
memcpy(crypto_info_aes_gcm_128->iv,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
- memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
+ memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
release_sock(sk);
if (copy_to_user(optval,
@@ -397,9 +412,9 @@
}
lock_sock(sk);
memcpy(crypto_info_aes_gcm_256->iv,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
TLS_CIPHER_AES_GCM_256_IV_SIZE);
- memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
+ memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
release_sock(sk);
if (copy_to_user(optval,
@@ -423,7 +438,9 @@
switch (optname) {
case TLS_TX:
- rc = do_tls_getsockopt_tx(sk, optval, optlen);
+ case TLS_RX:
+ rc = do_tls_getsockopt_conf(sk, optval, optlen,
+ optname == TLS_TX);
break;
default:
rc = -ENOPROTOOPT;
@@ -444,7 +461,7 @@
return do_tls_getsockopt(sk, optname, optval, optlen);
}
-static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
+static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
unsigned int optlen, int tx)
{
struct tls_crypto_info *crypto_info;
@@ -454,7 +471,7 @@
int rc = 0;
int conf;
- if (!optval || (optlen < sizeof(*crypto_info))) {
+ if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) {
rc = -EINVAL;
goto out;
}
@@ -473,7 +490,7 @@
goto out;
}
- rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
+ rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
goto err_crypto_info;
@@ -516,8 +533,9 @@
goto err_crypto_info;
}
- rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
- optlen - sizeof(*crypto_info));
+ rc = copy_from_sockptr_offset(crypto_info + 1, optval,
+ sizeof(*crypto_info),
+ optlen - sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
goto err_crypto_info;
@@ -526,19 +544,29 @@
if (tx) {
rc = tls_set_device_offload(sk, ctx);
conf = TLS_HW;
- if (rc) {
+ if (!rc) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
+ } else {
rc = tls_set_sw_offload(sk, ctx, 1);
if (rc)
goto err_crypto_info;
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
conf = TLS_SW;
}
} else {
rc = tls_set_device_offload_rx(sk, ctx);
conf = TLS_HW;
- if (rc) {
+ if (!rc) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
+ } else {
rc = tls_set_sw_offload(sk, ctx, 0);
if (rc)
goto err_crypto_info;
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
conf = TLS_SW;
}
tls_sw_strparser_arm(sk, ctx);
@@ -552,8 +580,6 @@
if (tx) {
ctx->sk_write_space = sk->sk_write_space;
sk->sk_write_space = tls_write_space;
- } else {
- sk->sk_socket->ops = &tls_sw_proto_ops;
}
goto out;
@@ -563,8 +589,8 @@
return rc;
}
-static int do_tls_setsockopt(struct sock *sk, int optname,
- char __user *optval, unsigned int optlen)
+static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
+ unsigned int optlen)
{
int rc = 0;
@@ -584,7 +610,7 @@
}
static int tls_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen)
+ sockptr_t optval, unsigned int optlen)
{
struct tls_context *ctx = tls_get_ctx(sk);
@@ -595,7 +621,7 @@
return do_tls_setsockopt(sk, optname, optval, optlen);
}
-static struct tls_context *create_ctx(struct sock *sk)
+struct tls_context *tls_ctx_create(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx;
@@ -606,122 +632,77 @@
mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
- ctx->sk_proto = sk->sk_prot;
+ ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ ctx->sk = sk;
return ctx;
}
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto_ops *base)
+{
+ ops[TLS_BASE][TLS_BASE] = *base;
+
+ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
+
+ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
+ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
+
+ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
+ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
+
+ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
+
+ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
+
+ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
+ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+ struct proto *prot = READ_ONCE(sk->sk_prot);
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+ unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
mutex_lock(&tcpv6_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv6_prot)) {
- build_protos(tls_prots[TLSV6], sk->sk_prot);
- smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+ if (likely(prot != saved_tcpv6_prot)) {
+ build_protos(tls_prots[TLSV6], prot);
+ build_proto_ops(tls_proto_ops[TLSV6],
+ sk->sk_socket->ops);
+ smp_store_release(&saved_tcpv6_prot, prot);
}
mutex_unlock(&tcpv6_prot_mutex);
}
if (ip_ver == TLSV4 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
+ unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
mutex_lock(&tcpv4_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv4_prot)) {
- build_protos(tls_prots[TLSV4], sk->sk_prot);
- smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
+ if (likely(prot != saved_tcpv4_prot)) {
+ build_protos(tls_prots[TLSV4], prot);
+ build_proto_ops(tls_proto_ops[TLSV4],
+ sk->sk_socket->ops);
+ smp_store_release(&saved_tcpv4_prot, prot);
}
mutex_unlock(&tcpv4_prot_mutex);
}
}
-static void tls_hw_sk_destruct(struct sock *sk)
-{
- struct tls_context *ctx = tls_get_ctx(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- ctx->sk_destruct(sk);
- /* Free ctx */
- rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
- tls_ctx_free(sk, ctx);
-}
-
-static int tls_hw_prot(struct sock *sk)
-{
- struct tls_context *ctx;
- struct tls_device *dev;
- int rc = 0;
-
- spin_lock_bh(&device_spinlock);
- list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->feature && dev->feature(dev)) {
- ctx = create_ctx(sk);
- if (!ctx)
- goto out;
-
- spin_unlock_bh(&device_spinlock);
- tls_build_proto(sk);
- ctx->sk_destruct = sk->sk_destruct;
- sk->sk_destruct = tls_hw_sk_destruct;
- ctx->rx_conf = TLS_HW_RECORD;
- ctx->tx_conf = TLS_HW_RECORD;
- update_sk_prot(sk, ctx);
- spin_lock_bh(&device_spinlock);
- rc = 1;
- break;
- }
- }
-out:
- spin_unlock_bh(&device_spinlock);
- return rc;
-}
-
-static void tls_hw_unhash(struct sock *sk)
-{
- struct tls_context *ctx = tls_get_ctx(sk);
- struct tls_device *dev;
-
- spin_lock_bh(&device_spinlock);
- list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->unhash) {
- kref_get(&dev->kref);
- spin_unlock_bh(&device_spinlock);
- dev->unhash(dev, sk);
- kref_put(&dev->kref, dev->release);
- spin_lock_bh(&device_spinlock);
- }
- }
- spin_unlock_bh(&device_spinlock);
- ctx->sk_proto->unhash(sk);
-}
-
-static int tls_hw_hash(struct sock *sk)
-{
- struct tls_context *ctx = tls_get_ctx(sk);
- struct tls_device *dev;
- int err;
-
- err = ctx->sk_proto->hash(sk);
- spin_lock_bh(&device_spinlock);
- list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->hash) {
- kref_get(&dev->kref);
- spin_unlock_bh(&device_spinlock);
- err |= dev->hash(dev, sk);
- kref_put(&dev->kref, dev->release);
- spin_lock_bh(&device_spinlock);
- }
- }
- spin_unlock_bh(&device_spinlock);
-
- if (err)
- tls_hw_unhash(sk);
- return err;
-}
-
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
- struct proto *base)
+ const struct proto *base)
{
prot[TLS_BASE][TLS_BASE] = *base;
prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
@@ -757,10 +738,11 @@
prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
#endif
-
+#ifdef CONFIG_TLS_TOE
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
- prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
- prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
+ prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
+ prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
+#endif
}
static int tls_init(struct sock *sk)
@@ -768,8 +750,12 @@
struct tls_context *ctx;
int rc = 0;
- if (tls_hw_prot(sk))
+ tls_build_proto(sk);
+
+#ifdef CONFIG_TLS_TOE
+ if (tls_toe_bypass(sk))
return 0;
+#endif
/* The TLS ulp is currently supported only for TCP sockets
* in ESTABLISHED state.
@@ -780,11 +766,9 @@
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
- tls_build_proto(sk);
-
/* allocate tls context */
write_lock_bh(&sk->sk_callback_lock);
- ctx = create_ctx(sk);
+ ctx = tls_ctx_create(sk);
if (!ctx) {
rc = -ENOMEM;
goto out;
@@ -808,7 +792,8 @@
ctx->sk_write_space = write_space;
ctx->sk_proto = p;
} else {
- sk->sk_prot = p;
+ /* Pairs with lockless read in sk_clone_lock(). */
+ WRITE_ONCE(sk->sk_prot, p);
sk->sk_write_space = write_space;
}
}
@@ -874,21 +859,34 @@
return size;
}
-void tls_register_device(struct tls_device *device)
+static int __net_init tls_init_net(struct net *net)
{
- spin_lock_bh(&device_spinlock);
- list_add_tail(&device->dev_list, &device_list);
- spin_unlock_bh(&device_spinlock);
-}
-EXPORT_SYMBOL(tls_register_device);
+ int err;
-void tls_unregister_device(struct tls_device *device)
-{
- spin_lock_bh(&device_spinlock);
- list_del(&device->dev_list);
- spin_unlock_bh(&device_spinlock);
+ net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
+ if (!net->mib.tls_statistics)
+ return -ENOMEM;
+
+ err = tls_proc_init(net);
+ if (err)
+ goto err_free_stats;
+
+ return 0;
+err_free_stats:
+ free_percpu(net->mib.tls_statistics);
+ return err;
}
-EXPORT_SYMBOL(tls_unregister_device);
+
+static void __net_exit tls_exit_net(struct net *net)
+{
+ tls_proc_fini(net);
+ free_percpu(net->mib.tls_statistics);
+}
+
+static struct pernet_operations tls_proc_ops = {
+ .init = tls_init_net,
+ .exit = tls_exit_net,
+};
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
@@ -901,9 +899,11 @@
static int __init tls_register(void)
{
- tls_sw_proto_ops = inet_stream_ops;
- tls_sw_proto_ops.splice_read = tls_sw_splice_read;
- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
+ int err;
+
+ err = register_pernet_subsys(&tls_proc_ops);
+ if (err)
+ return err;
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
@@ -915,6 +915,7 @@
{
tcp_unregister_ulp(&tcp_tls_ulp_ops);
tls_device_cleanup();
+ unregister_pernet_subsys(&tls_proc_ops);
}
module_init(tls_register);
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
new file mode 100644
index 0000000..feeceb0
--- /dev/null
+++ b/net/tls/tls_proc.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <net/snmp.h>
+#include <net/tls.h>
+
+#ifdef CONFIG_PROC_FS
+static const struct snmp_mib tls_mib_list[] = {
+ SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
+ SNMP_MIB_ITEM("TlsCurrRxSw", LINUX_MIB_TLSCURRRXSW),
+ SNMP_MIB_ITEM("TlsCurrTxDevice", LINUX_MIB_TLSCURRTXDEVICE),
+ SNMP_MIB_ITEM("TlsCurrRxDevice", LINUX_MIB_TLSCURRRXDEVICE),
+ SNMP_MIB_ITEM("TlsTxSw", LINUX_MIB_TLSTXSW),
+ SNMP_MIB_ITEM("TlsRxSw", LINUX_MIB_TLSRXSW),
+ SNMP_MIB_ITEM("TlsTxDevice", LINUX_MIB_TLSTXDEVICE),
+ SNMP_MIB_ITEM("TlsRxDevice", LINUX_MIB_TLSRXDEVICE),
+ SNMP_MIB_ITEM("TlsDecryptError", LINUX_MIB_TLSDECRYPTERROR),
+ SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
+ SNMP_MIB_SENTINEL
+};
+
+static int tls_statistics_seq_show(struct seq_file *seq, void *v)
+{
+ unsigned long buf[LINUX_MIB_TLSMAX] = {};
+ struct net *net = seq->private;
+ int i;
+
+ snmp_get_cpu_field_batch(buf, tls_mib_list, net->mib.tls_statistics);
+ for (i = 0; tls_mib_list[i].name; i++)
+ seq_printf(seq, "%-32s\t%lu\n", tls_mib_list[i].name, buf[i]);
+
+ return 0;
+}
+#endif
+
+int __net_init tls_proc_init(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+ if (!proc_create_net_single("tls_stat", 0444, net->proc_net,
+ tls_statistics_seq_show, NULL))
+ return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+
+ return 0;
+}
+
+void __net_exit tls_proc_fini(struct net *net)
+{
+ remove_proc_entry("tls_stat", net->proc_net);
+}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 7fb5c06..8cd011e 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -35,6 +35,7 @@
* SOFTWARE.
*/
+#include <linux/bug.h>
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/splice.h>
@@ -43,6 +44,14 @@
#include <net/strparser.h>
#include <net/tls.h>
+noinline void tls_err_abort(struct sock *sk, int err)
+{
+ WARN_ON_ONCE(err >= 0);
+ /* sk->sk_err should contain a positive error code. */
+ sk->sk_err = -err;
+ sk->sk_error_report(sk);
+}
+
static int __skb_nsg(struct sk_buff *skb, int offset, int len,
unsigned int recursion_level)
{
@@ -169,6 +178,9 @@
/* Propagate if there was an err */
if (err) {
+ if (err == -EBADMSG)
+ TLS_INC_STATS(sock_net(skb->sk),
+ LINUX_MIB_TLSDECRYPTERROR);
ctx->async_wait.err = err;
tls_err_abort(skb->sk, err);
} else {
@@ -416,7 +428,7 @@
tx_err:
if (rc < 0 && rc != -EAGAIN)
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
return rc;
}
@@ -447,7 +459,7 @@
/* If err is already set on socket, return the same code */
if (sk->sk_err) {
- ctx->async_wait.err = sk->sk_err;
+ ctx->async_wait.err = -sk->sk_err;
} else {
ctx->async_wait.err = err;
tls_err_abort(sk, err);
@@ -503,7 +515,7 @@
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
+ xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -668,7 +680,7 @@
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
- u32 i, split_point, uninitialized_var(orig_end);
+ u32 i, split_point, orig_end;
struct sk_msg *msg_pl, *msg_en;
struct aead_request *req;
bool split;
@@ -761,7 +773,7 @@
msg_pl->sg.size + prot->tail_size, i);
if (rc < 0) {
if (rc != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
if (split) {
tls_ctx->pending_open_record_frags = true;
tls_merge_open_record(sk, rec, tmp, orig_end);
@@ -933,7 +945,8 @@
int ret = 0;
int pending;
- if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
+ if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_CMSG_COMPAT))
return -EOPNOTSUPP;
mutex_lock(&tls_ctx->tx_lock);
@@ -1474,7 +1487,7 @@
else
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
- xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
+ xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
@@ -1543,7 +1556,7 @@
if (!ctx->decrypted) {
if (tls_ctx->rx_conf == TLS_HW) {
- err = tls_device_decrypted(sk, skb);
+ err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
if (err < 0)
return err;
}
@@ -1556,7 +1569,9 @@
if (err == -EINPROGRESS)
tls_advance_record_sn(sk, prot,
&tls_ctx->rx);
-
+ else if (err == -EBADMSG)
+ TLS_INC_STATS(sock_net(sk),
+ LINUX_MIB_TLSDECRYPTERROR);
return err;
}
} else {
@@ -1571,7 +1586,7 @@
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
- ctx->decrypted = true;
+ ctx->decrypted = 1;
ctx->saved_data_ready(sk);
} else {
*zc = false;
@@ -1822,7 +1837,7 @@
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc, async_capable);
if (err < 0 && err != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
@@ -1992,21 +2007,18 @@
if (!skb)
goto splice_read_end;
- if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
-
- /* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
- err = -EINVAL;
- goto splice_read_end;
- }
-
- if (err < 0) {
- tls_err_abort(sk, EBADMSG);
- goto splice_read_end;
- }
- ctx->decrypted = true;
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
+ if (err < 0) {
+ tls_err_abort(sk, -EBADMSG);
+ goto splice_read_end;
}
+
+ /* splice does not support reading control messages */
+ if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ err = -EINVAL;
+ goto splice_read_end;
+ }
+
rxm = strp_msg(skb);
chunk = min_t(unsigned int, rxm->full_len, len);
@@ -2106,7 +2118,7 @@
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- ctx->decrypted = false;
+ ctx->decrypted = 0;
ctx->recv_pkt = skb;
strp_pause(strp);
@@ -2471,10 +2483,11 @@
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
if (crypto_info->version == TLS_1_3_VERSION)
- sw_ctx_rx->async_capable = false;
+ sw_ctx_rx->async_capable = 0;
else
sw_ctx_rx->async_capable =
- tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+ !!(tfm->__crt_alg->cra_flags &
+ CRYPTO_ALG_ASYNC);
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
diff --git a/net/tls/tls_toe.c b/net/tls/tls_toe.c
new file mode 100644
index 0000000..7e1330f
--- /dev/null
+++ b/net/tls/tls_toe.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <net/inet_connection_sock.h>
+#include <net/tls.h>
+#include <net/tls_toe.h>
+
+static LIST_HEAD(device_list);
+static DEFINE_SPINLOCK(device_spinlock);
+
+static void tls_toe_sk_destruct(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ ctx->sk_destruct(sk);
+ /* Free ctx */
+ rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
+ tls_ctx_free(sk, ctx);
+}
+
+int tls_toe_bypass(struct sock *sk)
+{
+ struct tls_toe_device *dev;
+ struct tls_context *ctx;
+ int rc = 0;
+
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->feature && dev->feature(dev)) {
+ ctx = tls_ctx_create(sk);
+ if (!ctx)
+ goto out;
+
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_toe_sk_destruct;
+ ctx->rx_conf = TLS_HW_RECORD;
+ ctx->tx_conf = TLS_HW_RECORD;
+ update_sk_prot(sk, ctx);
+ rc = 1;
+ break;
+ }
+ }
+out:
+ spin_unlock_bh(&device_spinlock);
+ return rc;
+}
+
+void tls_toe_unhash(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_toe_device *dev;
+
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->unhash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
+ dev->unhash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
+ }
+ spin_unlock_bh(&device_spinlock);
+ ctx->sk_proto->unhash(sk);
+}
+
+int tls_toe_hash(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_toe_device *dev;
+ int err;
+
+ err = ctx->sk_proto->hash(sk);
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->hash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
+ err |= dev->hash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
+ }
+ spin_unlock_bh(&device_spinlock);
+
+ if (err)
+ tls_toe_unhash(sk);
+ return err;
+}
+
+void tls_toe_register_device(struct tls_toe_device *device)
+{
+ spin_lock_bh(&device_spinlock);
+ list_add_tail(&device->dev_list, &device_list);
+ spin_unlock_bh(&device_spinlock);
+}
+EXPORT_SYMBOL(tls_toe_register_device);
+
+void tls_toe_unregister_device(struct tls_toe_device *device)
+{
+ spin_lock_bh(&device_spinlock);
+ list_del(&device->dev_list);
+ spin_unlock_bh(&device_spinlock);
+}
+EXPORT_SYMBOL(tls_toe_unregister_device);
diff --git a/net/tls/trace.c b/net/tls/trace.c
new file mode 100644
index 0000000..e374913
--- /dev/null
+++ b/net/tls/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/tls/trace.h b/net/tls/trace.h
new file mode 100644
index 0000000..9ba5f60
--- /dev/null
+++ b/net/tls/trace.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tls
+
+#if !defined(_TLS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TLS_TRACE_H_
+
+#include <asm/unaligned.h>
+#include <linux/tracepoint.h>
+
+struct sock;
+
+TRACE_EVENT(tls_device_offload_set,
+
+ TP_PROTO(struct sock *sk, int dir, u32 tcp_seq, u8 *rec_no, int ret),
+
+ TP_ARGS(sk, dir, tcp_seq, rec_no, ret),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( int, dir )
+ __field( u32, tcp_seq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->dir = dir;
+ __entry->tcp_seq = tcp_seq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ "sk=%p direction=%d tcp_seq=%u rec_no=%llu ret=%d",
+ __entry->sk, __entry->dir, __entry->tcp_seq, __entry->rec_no,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(tls_device_decrypted,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, u32 rec_len,
+ bool encrypted, bool decrypted),
+
+ TP_ARGS(sk, tcp_seq, rec_no, rec_len, encrypted, decrypted),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ __field( u32, rec_len )
+ __field( bool, encrypted )
+ __field( bool, decrypted )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ __entry->rec_len = rec_len;
+ __entry->encrypted = encrypted;
+ __entry->decrypted = decrypted;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu len=%u encrypted=%d decrypted=%d",
+ __entry->sk, __entry->tcp_seq,
+ __entry->rec_no, __entry->rec_len,
+ __entry->encrypted, __entry->decrypted
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_send,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, int sync_type),
+
+ TP_ARGS(sk, tcp_seq, rec_no, sync_type),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ __field( int, sync_type )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ __entry->sync_type = sync_type;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu sync_type=%d",
+ __entry->sk, __entry->tcp_seq, __entry->rec_no,
+ __entry->sync_type
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_nh_schedule,
+
+ TP_PROTO(struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ ),
+
+ TP_printk(
+ "sk=%p", __entry->sk
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_nh_delay,
+
+ TP_PROTO(struct sock *sk, u32 sock_data, u32 rec_len),
+
+ TP_ARGS(sk, sock_data, rec_len),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u32, sock_data )
+ __field( u32, rec_len )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->sock_data = sock_data;
+ __entry->rec_len = rec_len;
+ ),
+
+ TP_printk(
+ "sk=%p sock_data=%u rec_len=%u",
+ __entry->sk, __entry->sock_data, __entry->rec_len
+ )
+);
+
+TRACE_EVENT(tls_device_tx_resync_req,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u32 exp_tcp_seq),
+
+ TP_ARGS(sk, tcp_seq, exp_tcp_seq),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u32, tcp_seq )
+ __field( u32, exp_tcp_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->tcp_seq = tcp_seq;
+ __entry->exp_tcp_seq = exp_tcp_seq;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u exp_tcp_seq=%u",
+ __entry->sk, __entry->tcp_seq, __entry->exp_tcp_seq
+ )
+);
+
+TRACE_EVENT(tls_device_tx_resync_send,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no),
+
+ TP_ARGS(sk, tcp_seq, rec_no),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu",
+ __entry->sk, __entry->tcp_seq, __entry->rec_no
+ )
+);
+
+#endif /* _TLS_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>