Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* Copyright (c) 2018, Mellanox Technologies All rights reserved. |
| 2 | * |
| 3 | * This software is available to you under a choice of one of two |
| 4 | * licenses. You may choose to be licensed under the terms of the GNU |
| 5 | * General Public License (GPL) Version 2, available from the file |
| 6 | * COPYING in the main directory of this source tree, or the |
| 7 | * OpenIB.org BSD license below: |
| 8 | * |
| 9 | * Redistribution and use in source and binary forms, with or |
| 10 | * without modification, are permitted provided that the following |
| 11 | * conditions are met: |
| 12 | * |
| 13 | * - Redistributions of source code must retain the above |
| 14 | * copyright notice, this list of conditions and the following |
| 15 | * disclaimer. |
| 16 | * |
| 17 | * - Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials |
| 20 | * provided with the distribution. |
| 21 | * |
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 29 | * SOFTWARE. |
| 30 | */ |
| 31 | |
| 32 | #include <crypto/aead.h> |
| 33 | #include <linux/highmem.h> |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/netdevice.h> |
| 36 | #include <net/dst.h> |
| 37 | #include <net/inet_connection_sock.h> |
| 38 | #include <net/tcp.h> |
| 39 | #include <net/tls.h> |
| 40 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 41 | #include "trace.h" |
| 42 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | /* device_offload_lock is used to synchronize tls_dev_add |
| 44 | * against NETDEV_DOWN notifications. |
| 45 | */ |
| 46 | static DECLARE_RWSEM(device_offload_lock); |
| 47 | |
| 48 | static void tls_device_gc_task(struct work_struct *work); |
| 49 | |
| 50 | static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); |
| 51 | static LIST_HEAD(tls_device_gc_list); |
| 52 | static LIST_HEAD(tls_device_list); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 53 | static LIST_HEAD(tls_device_down_list); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | static DEFINE_SPINLOCK(tls_device_lock); |
| 55 | |
| 56 | static void tls_device_free_ctx(struct tls_context *ctx) |
| 57 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 58 | if (ctx->tx_conf == TLS_HW) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | kfree(tls_offload_ctx_tx(ctx)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 60 | kfree(ctx->tx.rec_seq); |
| 61 | kfree(ctx->tx.iv); |
| 62 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 63 | |
| 64 | if (ctx->rx_conf == TLS_HW) |
| 65 | kfree(tls_offload_ctx_rx(ctx)); |
| 66 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 67 | tls_ctx_free(NULL, ctx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static void tls_device_gc_task(struct work_struct *work) |
| 71 | { |
| 72 | struct tls_context *ctx, *tmp; |
| 73 | unsigned long flags; |
| 74 | LIST_HEAD(gc_list); |
| 75 | |
| 76 | spin_lock_irqsave(&tls_device_lock, flags); |
| 77 | list_splice_init(&tls_device_gc_list, &gc_list); |
| 78 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 79 | |
| 80 | list_for_each_entry_safe(ctx, tmp, &gc_list, list) { |
| 81 | struct net_device *netdev = ctx->netdev; |
| 82 | |
| 83 | if (netdev && ctx->tx_conf == TLS_HW) { |
| 84 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
| 85 | TLS_OFFLOAD_CTX_DIR_TX); |
| 86 | dev_put(netdev); |
| 87 | ctx->netdev = NULL; |
| 88 | } |
| 89 | |
| 90 | list_del(&ctx->list); |
| 91 | tls_device_free_ctx(ctx); |
| 92 | } |
| 93 | } |
| 94 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) |
| 96 | { |
| 97 | unsigned long flags; |
| 98 | |
| 99 | spin_lock_irqsave(&tls_device_lock, flags); |
| 100 | list_move_tail(&ctx->list, &tls_device_gc_list); |
| 101 | |
| 102 | /* schedule_work inside the spinlock |
| 103 | * to make sure tls_device_down waits for that work. |
| 104 | */ |
| 105 | schedule_work(&tls_device_gc_work); |
| 106 | |
| 107 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 108 | } |
| 109 | |
| 110 | /* We assume that the socket is already connected */ |
| 111 | static struct net_device *get_netdev_for_sock(struct sock *sk) |
| 112 | { |
| 113 | struct dst_entry *dst = sk_dst_get(sk); |
| 114 | struct net_device *netdev = NULL; |
| 115 | |
| 116 | if (likely(dst)) { |
| 117 | netdev = dst->dev; |
| 118 | dev_hold(netdev); |
| 119 | } |
| 120 | |
| 121 | dst_release(dst); |
| 122 | |
| 123 | return netdev; |
| 124 | } |
| 125 | |
| 126 | static void destroy_record(struct tls_record_info *record) |
| 127 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 128 | int i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 130 | for (i = 0; i < record->num_frags; i++) |
| 131 | __skb_frag_unref(&record->frags[i]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | kfree(record); |
| 133 | } |
| 134 | |
| 135 | static void delete_all_records(struct tls_offload_context_tx *offload_ctx) |
| 136 | { |
| 137 | struct tls_record_info *info, *temp; |
| 138 | |
| 139 | list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) { |
| 140 | list_del(&info->list); |
| 141 | destroy_record(info); |
| 142 | } |
| 143 | |
| 144 | offload_ctx->retransmit_hint = NULL; |
| 145 | } |
| 146 | |
| 147 | static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) |
| 148 | { |
| 149 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 150 | struct tls_record_info *info, *temp; |
| 151 | struct tls_offload_context_tx *ctx; |
| 152 | u64 deleted_records = 0; |
| 153 | unsigned long flags; |
| 154 | |
| 155 | if (!tls_ctx) |
| 156 | return; |
| 157 | |
| 158 | ctx = tls_offload_ctx_tx(tls_ctx); |
| 159 | |
| 160 | spin_lock_irqsave(&ctx->lock, flags); |
| 161 | info = ctx->retransmit_hint; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 162 | if (info && !before(acked_seq, info->end_seq)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | ctx->retransmit_hint = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | |
| 165 | list_for_each_entry_safe(info, temp, &ctx->records_list, list) { |
| 166 | if (before(acked_seq, info->end_seq)) |
| 167 | break; |
| 168 | list_del(&info->list); |
| 169 | |
| 170 | destroy_record(info); |
| 171 | deleted_records++; |
| 172 | } |
| 173 | |
| 174 | ctx->unacked_record_sn += deleted_records; |
| 175 | spin_unlock_irqrestore(&ctx->lock, flags); |
| 176 | } |
| 177 | |
| 178 | /* At this point, there should be no references on this |
| 179 | * socket and no in-flight SKBs associated with this |
| 180 | * socket, so it is safe to free all the resources. |
| 181 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 182 | void tls_device_sk_destruct(struct sock *sk) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 183 | { |
| 184 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 185 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
| 186 | |
| 187 | tls_ctx->sk_destruct(sk); |
| 188 | |
| 189 | if (tls_ctx->tx_conf == TLS_HW) { |
| 190 | if (ctx->open_record) |
| 191 | destroy_record(ctx->open_record); |
| 192 | delete_all_records(ctx); |
| 193 | crypto_free_aead(ctx->aead_send); |
| 194 | clean_acked_data_disable(inet_csk(sk)); |
| 195 | } |
| 196 | |
| 197 | if (refcount_dec_and_test(&tls_ctx->refcount)) |
| 198 | tls_device_queue_ctx_destruction(tls_ctx); |
| 199 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 200 | EXPORT_SYMBOL_GPL(tls_device_sk_destruct); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 201 | |
| 202 | void tls_device_free_resources_tx(struct sock *sk) |
| 203 | { |
| 204 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 205 | |
| 206 | tls_free_partial_record(sk, tls_ctx); |
| 207 | } |
| 208 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 209 | void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq) |
| 210 | { |
| 211 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 212 | |
| 213 | trace_tls_device_tx_resync_req(sk, got_seq, exp_seq); |
| 214 | WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); |
| 215 | } |
| 216 | EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request); |
| 217 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 218 | static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, |
| 219 | u32 seq) |
| 220 | { |
| 221 | struct net_device *netdev; |
| 222 | struct sk_buff *skb; |
| 223 | int err = 0; |
| 224 | u8 *rcd_sn; |
| 225 | |
| 226 | skb = tcp_write_queue_tail(sk); |
| 227 | if (skb) |
| 228 | TCP_SKB_CB(skb)->eor = 1; |
| 229 | |
| 230 | rcd_sn = tls_ctx->tx.rec_seq; |
| 231 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 232 | trace_tls_device_tx_resync_send(sk, seq, rcd_sn); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 233 | down_read(&device_offload_lock); |
| 234 | netdev = tls_ctx->netdev; |
| 235 | if (netdev) |
| 236 | err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, |
| 237 | rcd_sn, |
| 238 | TLS_OFFLOAD_CTX_DIR_TX); |
| 239 | up_read(&device_offload_lock); |
| 240 | if (err) |
| 241 | return; |
| 242 | |
| 243 | clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); |
| 244 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | |
| 246 | static void tls_append_frag(struct tls_record_info *record, |
| 247 | struct page_frag *pfrag, |
| 248 | int size) |
| 249 | { |
| 250 | skb_frag_t *frag; |
| 251 | |
| 252 | frag = &record->frags[record->num_frags - 1]; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 253 | if (skb_frag_page(frag) == pfrag->page && |
| 254 | skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { |
| 255 | skb_frag_size_add(frag, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | } else { |
| 257 | ++frag; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 258 | __skb_frag_set_page(frag, pfrag->page); |
| 259 | skb_frag_off_set(frag, pfrag->offset); |
| 260 | skb_frag_size_set(frag, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 261 | ++record->num_frags; |
| 262 | get_page(pfrag->page); |
| 263 | } |
| 264 | |
| 265 | pfrag->offset += size; |
| 266 | record->len += size; |
| 267 | } |
| 268 | |
| 269 | static int tls_push_record(struct sock *sk, |
| 270 | struct tls_context *ctx, |
| 271 | struct tls_offload_context_tx *offload_ctx, |
| 272 | struct tls_record_info *record, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 273 | int flags) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 274 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 275 | struct tls_prot_info *prot = &ctx->prot_info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | struct tcp_sock *tp = tcp_sk(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 277 | skb_frag_t *frag; |
| 278 | int i; |
| 279 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | record->end_seq = tp->write_seq + record->len; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 281 | list_add_tail_rcu(&record->list, &offload_ctx->records_list); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 282 | offload_ctx->open_record = NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 283 | |
| 284 | if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags)) |
| 285 | tls_device_resync_tx(sk, ctx, tp->write_seq); |
| 286 | |
| 287 | tls_advance_record_sn(sk, prot, &ctx->tx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 288 | |
| 289 | for (i = 0; i < record->num_frags; i++) { |
| 290 | frag = &record->frags[i]; |
| 291 | sg_unmark_end(&offload_ctx->sg_tx_data[i]); |
| 292 | sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 293 | skb_frag_size(frag), skb_frag_off(frag)); |
| 294 | sk_mem_charge(sk, skb_frag_size(frag)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 295 | get_page(skb_frag_page(frag)); |
| 296 | } |
| 297 | sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); |
| 298 | |
| 299 | /* all ready, send */ |
| 300 | return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); |
| 301 | } |
| 302 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 303 | static int tls_device_record_close(struct sock *sk, |
| 304 | struct tls_context *ctx, |
| 305 | struct tls_record_info *record, |
| 306 | struct page_frag *pfrag, |
| 307 | unsigned char record_type) |
| 308 | { |
| 309 | struct tls_prot_info *prot = &ctx->prot_info; |
| 310 | int ret; |
| 311 | |
| 312 | /* append tag |
| 313 | * device will fill in the tag, we just need to append a placeholder |
| 314 | * use socket memory to improve coalescing (re-using a single buffer |
| 315 | * increases frag count) |
| 316 | * if we can't allocate memory now, steal some back from data |
| 317 | */ |
| 318 | if (likely(skb_page_frag_refill(prot->tag_size, pfrag, |
| 319 | sk->sk_allocation))) { |
| 320 | ret = 0; |
| 321 | tls_append_frag(record, pfrag, prot->tag_size); |
| 322 | } else { |
| 323 | ret = prot->tag_size; |
| 324 | if (record->len <= prot->overhead_size) |
| 325 | return -ENOMEM; |
| 326 | } |
| 327 | |
| 328 | /* fill prepend */ |
| 329 | tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), |
| 330 | record->len - prot->overhead_size, |
| 331 | record_type, prot->version); |
| 332 | return ret; |
| 333 | } |
| 334 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 335 | static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, |
| 336 | struct page_frag *pfrag, |
| 337 | size_t prepend_size) |
| 338 | { |
| 339 | struct tls_record_info *record; |
| 340 | skb_frag_t *frag; |
| 341 | |
| 342 | record = kmalloc(sizeof(*record), GFP_KERNEL); |
| 343 | if (!record) |
| 344 | return -ENOMEM; |
| 345 | |
| 346 | frag = &record->frags[0]; |
| 347 | __skb_frag_set_page(frag, pfrag->page); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 348 | skb_frag_off_set(frag, pfrag->offset); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 349 | skb_frag_size_set(frag, prepend_size); |
| 350 | |
| 351 | get_page(pfrag->page); |
| 352 | pfrag->offset += prepend_size; |
| 353 | |
| 354 | record->num_frags = 1; |
| 355 | record->len = prepend_size; |
| 356 | offload_ctx->open_record = record; |
| 357 | return 0; |
| 358 | } |
| 359 | |
| 360 | static int tls_do_allocation(struct sock *sk, |
| 361 | struct tls_offload_context_tx *offload_ctx, |
| 362 | struct page_frag *pfrag, |
| 363 | size_t prepend_size) |
| 364 | { |
| 365 | int ret; |
| 366 | |
| 367 | if (!offload_ctx->open_record) { |
| 368 | if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, |
| 369 | sk->sk_allocation))) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 370 | READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 371 | sk_stream_moderate_sndbuf(sk); |
| 372 | return -ENOMEM; |
| 373 | } |
| 374 | |
| 375 | ret = tls_create_new_record(offload_ctx, pfrag, prepend_size); |
| 376 | if (ret) |
| 377 | return ret; |
| 378 | |
| 379 | if (pfrag->size > pfrag->offset) |
| 380 | return 0; |
| 381 | } |
| 382 | |
| 383 | if (!sk_page_frag_refill(sk, pfrag)) |
| 384 | return -ENOMEM; |
| 385 | |
| 386 | return 0; |
| 387 | } |
| 388 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i) |
| 390 | { |
| 391 | size_t pre_copy, nocache; |
| 392 | |
| 393 | pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1); |
| 394 | if (pre_copy) { |
| 395 | pre_copy = min(pre_copy, bytes); |
| 396 | if (copy_from_iter(addr, pre_copy, i) != pre_copy) |
| 397 | return -EFAULT; |
| 398 | bytes -= pre_copy; |
| 399 | addr += pre_copy; |
| 400 | } |
| 401 | |
| 402 | nocache = round_down(bytes, SMP_CACHE_BYTES); |
| 403 | if (copy_from_iter_nocache(addr, nocache, i) != nocache) |
| 404 | return -EFAULT; |
| 405 | bytes -= nocache; |
| 406 | addr += nocache; |
| 407 | |
| 408 | if (bytes && copy_from_iter(addr, bytes, i) != bytes) |
| 409 | return -EFAULT; |
| 410 | |
| 411 | return 0; |
| 412 | } |
| 413 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 414 | static int tls_push_data(struct sock *sk, |
| 415 | struct iov_iter *msg_iter, |
| 416 | size_t size, int flags, |
| 417 | unsigned char record_type) |
| 418 | { |
| 419 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 420 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 421 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 422 | struct tls_record_info *record = ctx->open_record; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | int tls_push_record_flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 424 | struct page_frag *pfrag; |
| 425 | size_t orig_size = size; |
| 426 | u32 max_open_record_len; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 427 | bool more = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 428 | bool done = false; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 429 | int copy, rc = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 430 | long timeo; |
| 431 | |
| 432 | if (flags & |
| 433 | ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 434 | return -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 435 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 436 | if (unlikely(sk->sk_err)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 437 | return -sk->sk_err; |
| 438 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 439 | flags |= MSG_SENDPAGE_DECRYPTED; |
| 440 | tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; |
| 441 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 442 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 443 | if (tls_is_partially_sent_record(tls_ctx)) { |
| 444 | rc = tls_push_partial_record(sk, tls_ctx, flags); |
| 445 | if (rc < 0) |
| 446 | return rc; |
| 447 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 448 | |
| 449 | pfrag = sk_page_frag(sk); |
| 450 | |
| 451 | /* TLS_HEADER_SIZE is not counted as part of the TLS record, and |
| 452 | * we need to leave room for an authentication tag. |
| 453 | */ |
| 454 | max_open_record_len = TLS_MAX_PAYLOAD_SIZE + |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 455 | prot->prepend_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 456 | do { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 457 | rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size); |
| 458 | if (unlikely(rc)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 459 | rc = sk_stream_wait_memory(sk, &timeo); |
| 460 | if (!rc) |
| 461 | continue; |
| 462 | |
| 463 | record = ctx->open_record; |
| 464 | if (!record) |
| 465 | break; |
| 466 | handle_error: |
| 467 | if (record_type != TLS_RECORD_TYPE_DATA) { |
| 468 | /* avoid sending partial |
| 469 | * record with type != |
| 470 | * application_data |
| 471 | */ |
| 472 | size = orig_size; |
| 473 | destroy_record(record); |
| 474 | ctx->open_record = NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 475 | } else if (record->len > prot->prepend_size) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 476 | goto last_record; |
| 477 | } |
| 478 | |
| 479 | break; |
| 480 | } |
| 481 | |
| 482 | record = ctx->open_record; |
| 483 | copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); |
| 484 | copy = min_t(size_t, copy, (max_open_record_len - record->len)); |
| 485 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 486 | rc = tls_device_copy_data(page_address(pfrag->page) + |
| 487 | pfrag->offset, copy, msg_iter); |
| 488 | if (rc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 489 | goto handle_error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 490 | tls_append_frag(record, pfrag, copy); |
| 491 | |
| 492 | size -= copy; |
| 493 | if (!size) { |
| 494 | last_record: |
| 495 | tls_push_record_flags = flags; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 496 | if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { |
| 497 | more = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 498 | break; |
| 499 | } |
| 500 | |
| 501 | done = true; |
| 502 | } |
| 503 | |
| 504 | if (done || record->len >= max_open_record_len || |
| 505 | (record->num_frags >= MAX_SKB_FRAGS - 1)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 506 | rc = tls_device_record_close(sk, tls_ctx, record, |
| 507 | pfrag, record_type); |
| 508 | if (rc) { |
| 509 | if (rc > 0) { |
| 510 | size += rc; |
| 511 | } else { |
| 512 | size = orig_size; |
| 513 | destroy_record(record); |
| 514 | ctx->open_record = NULL; |
| 515 | break; |
| 516 | } |
| 517 | } |
| 518 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 519 | rc = tls_push_record(sk, |
| 520 | tls_ctx, |
| 521 | ctx, |
| 522 | record, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 523 | tls_push_record_flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 524 | if (rc < 0) |
| 525 | break; |
| 526 | } |
| 527 | } while (!done); |
| 528 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 529 | tls_ctx->pending_open_record_frags = more; |
| 530 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 531 | if (orig_size - size > 0) |
| 532 | rc = orig_size - size; |
| 533 | |
| 534 | return rc; |
| 535 | } |
| 536 | |
| 537 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
| 538 | { |
| 539 | unsigned char record_type = TLS_RECORD_TYPE_DATA; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 540 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 541 | int rc; |
| 542 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 543 | mutex_lock(&tls_ctx->tx_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 544 | lock_sock(sk); |
| 545 | |
| 546 | if (unlikely(msg->msg_controllen)) { |
| 547 | rc = tls_proccess_cmsg(sk, msg, &record_type); |
| 548 | if (rc) |
| 549 | goto out; |
| 550 | } |
| 551 | |
| 552 | rc = tls_push_data(sk, &msg->msg_iter, size, |
| 553 | msg->msg_flags, record_type); |
| 554 | |
| 555 | out: |
| 556 | release_sock(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 557 | mutex_unlock(&tls_ctx->tx_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 558 | return rc; |
| 559 | } |
| 560 | |
| 561 | int tls_device_sendpage(struct sock *sk, struct page *page, |
| 562 | int offset, size_t size, int flags) |
| 563 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 564 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 565 | struct iov_iter msg_iter; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 566 | char *kaddr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 567 | struct kvec iov; |
| 568 | int rc; |
| 569 | |
| 570 | if (flags & MSG_SENDPAGE_NOTLAST) |
| 571 | flags |= MSG_MORE; |
| 572 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 573 | mutex_lock(&tls_ctx->tx_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 574 | lock_sock(sk); |
| 575 | |
| 576 | if (flags & MSG_OOB) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 577 | rc = -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 578 | goto out; |
| 579 | } |
| 580 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 581 | kaddr = kmap(page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 582 | iov.iov_base = kaddr + offset; |
| 583 | iov.iov_len = size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 584 | iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 585 | rc = tls_push_data(sk, &msg_iter, size, |
| 586 | flags, TLS_RECORD_TYPE_DATA); |
| 587 | kunmap(page); |
| 588 | |
| 589 | out: |
| 590 | release_sock(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 591 | mutex_unlock(&tls_ctx->tx_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 592 | return rc; |
| 593 | } |
| 594 | |
| 595 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
| 596 | u32 seq, u64 *p_record_sn) |
| 597 | { |
| 598 | u64 record_sn = context->hint_record_sn; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 599 | struct tls_record_info *info, *last; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 600 | |
| 601 | info = context->retransmit_hint; |
| 602 | if (!info || |
| 603 | before(seq, info->end_seq - info->len)) { |
| 604 | /* if retransmit_hint is irrelevant start |
| 605 | * from the beggining of the list |
| 606 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 607 | info = list_first_entry_or_null(&context->records_list, |
| 608 | struct tls_record_info, list); |
| 609 | if (!info) |
| 610 | return NULL; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 611 | /* send the start_marker record if seq number is before the |
| 612 | * tls offload start marker sequence number. This record is |
| 613 | * required to handle TCP packets which are before TLS offload |
| 614 | * started. |
| 615 | * And if it's not start marker, look if this seq number |
| 616 | * belongs to the list. |
| 617 | */ |
| 618 | if (likely(!tls_record_is_start_marker(info))) { |
| 619 | /* we have the first record, get the last record to see |
| 620 | * if this seq number belongs to the list. |
| 621 | */ |
| 622 | last = list_last_entry(&context->records_list, |
| 623 | struct tls_record_info, list); |
| 624 | |
| 625 | if (!between(seq, tls_record_start_seq(info), |
| 626 | last->end_seq)) |
| 627 | return NULL; |
| 628 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 629 | record_sn = context->unacked_record_sn; |
| 630 | } |
| 631 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 632 | /* We just need the _rcu for the READ_ONCE() */ |
| 633 | rcu_read_lock(); |
| 634 | list_for_each_entry_from_rcu(info, &context->records_list, list) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 635 | if (before(seq, info->end_seq)) { |
| 636 | if (!context->retransmit_hint || |
| 637 | after(info->end_seq, |
| 638 | context->retransmit_hint->end_seq)) { |
| 639 | context->hint_record_sn = record_sn; |
| 640 | context->retransmit_hint = info; |
| 641 | } |
| 642 | *p_record_sn = record_sn; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 643 | goto exit_rcu_unlock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 644 | } |
| 645 | record_sn++; |
| 646 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 647 | info = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 648 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 649 | exit_rcu_unlock: |
| 650 | rcu_read_unlock(); |
| 651 | return info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 652 | } |
| 653 | EXPORT_SYMBOL(tls_get_record); |
| 654 | |
| 655 | static int tls_device_push_pending_record(struct sock *sk, int flags) |
| 656 | { |
| 657 | struct iov_iter msg_iter; |
| 658 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 659 | iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 660 | return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); |
| 661 | } |
| 662 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 663 | void tls_device_write_space(struct sock *sk, struct tls_context *ctx) |
| 664 | { |
| 665 | if (tls_is_partially_sent_record(ctx)) { |
| 666 | gfp_t sk_allocation = sk->sk_allocation; |
| 667 | |
| 668 | WARN_ON_ONCE(sk->sk_write_pending); |
| 669 | |
| 670 | sk->sk_allocation = GFP_ATOMIC; |
| 671 | tls_push_partial_record(sk, ctx, |
| 672 | MSG_DONTWAIT | MSG_NOSIGNAL | |
| 673 | MSG_SENDPAGE_DECRYPTED); |
| 674 | sk->sk_allocation = sk_allocation; |
| 675 | } |
| 676 | } |
| 677 | |
| 678 | static void tls_device_resync_rx(struct tls_context *tls_ctx, |
| 679 | struct sock *sk, u32 seq, u8 *rcd_sn) |
| 680 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 681 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 682 | struct net_device *netdev; |
| 683 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 684 | trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); |
| 685 | rcu_read_lock(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 686 | netdev = READ_ONCE(tls_ctx->netdev); |
| 687 | if (netdev) |
| 688 | netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, |
| 689 | TLS_OFFLOAD_CTX_DIR_RX); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 690 | rcu_read_unlock(); |
| 691 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); |
| 692 | } |
| 693 | |
| 694 | static bool |
| 695 | tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, |
| 696 | s64 resync_req, u32 *seq, u16 *rcd_delta) |
| 697 | { |
| 698 | u32 is_async = resync_req & RESYNC_REQ_ASYNC; |
| 699 | u32 req_seq = resync_req >> 32; |
| 700 | u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); |
| 701 | u16 i; |
| 702 | |
| 703 | *rcd_delta = 0; |
| 704 | |
| 705 | if (is_async) { |
| 706 | /* shouldn't get to wraparound: |
| 707 | * too long in async stage, something bad happened |
| 708 | */ |
| 709 | if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) |
| 710 | return false; |
| 711 | |
| 712 | /* asynchronous stage: log all headers seq such that |
| 713 | * req_seq <= seq <= end_seq, and wait for real resync request |
| 714 | */ |
| 715 | if (before(*seq, req_seq)) |
| 716 | return false; |
| 717 | if (!after(*seq, req_end) && |
| 718 | resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) |
| 719 | resync_async->log[resync_async->loglen++] = *seq; |
| 720 | |
| 721 | resync_async->rcd_delta++; |
| 722 | |
| 723 | return false; |
| 724 | } |
| 725 | |
| 726 | /* synchronous stage: check against the logged entries and |
| 727 | * proceed to check the next entries if no match was found |
| 728 | */ |
| 729 | for (i = 0; i < resync_async->loglen; i++) |
| 730 | if (req_seq == resync_async->log[i] && |
| 731 | atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) { |
| 732 | *rcd_delta = resync_async->rcd_delta - i; |
| 733 | *seq = req_seq; |
| 734 | resync_async->loglen = 0; |
| 735 | resync_async->rcd_delta = 0; |
| 736 | return true; |
| 737 | } |
| 738 | |
| 739 | resync_async->loglen = 0; |
| 740 | resync_async->rcd_delta = 0; |
| 741 | |
| 742 | if (req_seq == *seq && |
| 743 | atomic64_try_cmpxchg(&resync_async->req, |
| 744 | &resync_req, 0)) |
| 745 | return true; |
| 746 | |
| 747 | return false; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 751 | { |
| 752 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 753 | struct tls_offload_context_rx *rx_ctx; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 754 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 755 | u32 sock_data, is_req_pending; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 756 | struct tls_prot_info *prot; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 757 | s64 resync_req; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 758 | u16 rcd_delta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 759 | u32 req_seq; |
| 760 | |
| 761 | if (tls_ctx->rx_conf != TLS_HW) |
| 762 | return; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 763 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) |
| 764 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 765 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 766 | prot = &tls_ctx->prot_info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 767 | rx_ctx = tls_offload_ctx_rx(tls_ctx); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 768 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 769 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 770 | switch (rx_ctx->resync_type) { |
| 771 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ: |
| 772 | resync_req = atomic64_read(&rx_ctx->resync_req); |
| 773 | req_seq = resync_req >> 32; |
| 774 | seq += TLS_HEADER_SIZE - 1; |
| 775 | is_req_pending = resync_req; |
| 776 | |
| 777 | if (likely(!is_req_pending) || req_seq != seq || |
| 778 | !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) |
| 779 | return; |
| 780 | break; |
| 781 | case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT: |
| 782 | if (likely(!rx_ctx->resync_nh_do_now)) |
| 783 | return; |
| 784 | |
| 785 | /* head of next rec is already in, note that the sock_inq will |
| 786 | * include the currently parsed message when called from parser |
| 787 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 788 | sock_data = tcp_inq(sk); |
| 789 | if (sock_data > rcd_len) { |
| 790 | trace_tls_device_rx_resync_nh_delay(sk, sock_data, |
| 791 | rcd_len); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 792 | return; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 793 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 794 | |
| 795 | rx_ctx->resync_nh_do_now = 0; |
| 796 | seq += rcd_len; |
| 797 | tls_bigint_increment(rcd_sn, prot->rec_seq_size); |
| 798 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 799 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: |
| 800 | resync_req = atomic64_read(&rx_ctx->resync_async->req); |
| 801 | is_req_pending = resync_req; |
| 802 | if (likely(!is_req_pending)) |
| 803 | return; |
| 804 | |
| 805 | if (!tls_device_rx_resync_async(rx_ctx->resync_async, |
| 806 | resync_req, &seq, &rcd_delta)) |
| 807 | return; |
| 808 | tls_bigint_subtract(rcd_sn, rcd_delta); |
| 809 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 810 | } |
| 811 | |
| 812 | tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); |
| 813 | } |
| 814 | |
| 815 | static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx, |
| 816 | struct tls_offload_context_rx *ctx, |
| 817 | struct sock *sk, struct sk_buff *skb) |
| 818 | { |
| 819 | struct strp_msg *rxm; |
| 820 | |
| 821 | /* device will request resyncs by itself based on stream scan */ |
| 822 | if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT) |
| 823 | return; |
| 824 | /* already scheduled */ |
| 825 | if (ctx->resync_nh_do_now) |
| 826 | return; |
| 827 | /* seen decrypted fragments since last fully-failed record */ |
| 828 | if (ctx->resync_nh_reset) { |
| 829 | ctx->resync_nh_reset = 0; |
| 830 | ctx->resync_nh.decrypted_failed = 1; |
| 831 | ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL; |
| 832 | return; |
| 833 | } |
| 834 | |
| 835 | if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt) |
| 836 | return; |
| 837 | |
| 838 | /* doing resync, bump the next target in case it fails */ |
| 839 | if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL) |
| 840 | ctx->resync_nh.decrypted_tgt *= 2; |
| 841 | else |
| 842 | ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL; |
| 843 | |
| 844 | rxm = strp_msg(skb); |
| 845 | |
| 846 | /* head of next rec is already in, parser will sync for us */ |
| 847 | if (tcp_inq(sk) > rxm->full_len) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 848 | trace_tls_device_rx_resync_nh_schedule(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 849 | ctx->resync_nh_do_now = 1; |
| 850 | } else { |
| 851 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
| 852 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
| 853 | |
| 854 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
| 855 | tls_bigint_increment(rcd_sn, prot->rec_seq_size); |
| 856 | |
| 857 | tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq, |
| 858 | rcd_sn); |
| 859 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 860 | } |
| 861 | |
| 862 | static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) |
| 863 | { |
| 864 | struct strp_msg *rxm = strp_msg(skb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 865 | int err = 0, offset = rxm->offset, copy, nsg, data_len, pos; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 866 | struct sk_buff *skb_iter, *unused; |
| 867 | struct scatterlist sg[1]; |
| 868 | char *orig_buf, *buf; |
| 869 | |
| 870 | orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + |
| 871 | TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); |
| 872 | if (!orig_buf) |
| 873 | return -ENOMEM; |
| 874 | buf = orig_buf; |
| 875 | |
| 876 | nsg = skb_cow_data(skb, 0, &unused); |
| 877 | if (unlikely(nsg < 0)) { |
| 878 | err = nsg; |
| 879 | goto free_buf; |
| 880 | } |
| 881 | |
| 882 | sg_init_table(sg, 1); |
| 883 | sg_set_buf(&sg[0], buf, |
| 884 | rxm->full_len + TLS_HEADER_SIZE + |
| 885 | TLS_CIPHER_AES_GCM_128_IV_SIZE); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 886 | err = skb_copy_bits(skb, offset, buf, |
| 887 | TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE); |
| 888 | if (err) |
| 889 | goto free_buf; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 890 | |
| 891 | /* We are interested only in the decrypted data not the auth */ |
| 892 | err = decrypt_skb(sk, skb, sg); |
| 893 | if (err != -EBADMSG) |
| 894 | goto free_buf; |
| 895 | else |
| 896 | err = 0; |
| 897 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 898 | data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 899 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 900 | if (skb_pagelen(skb) > offset) { |
| 901 | copy = min_t(int, skb_pagelen(skb) - offset, data_len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 902 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 903 | if (skb->decrypted) { |
| 904 | err = skb_store_bits(skb, offset, buf, copy); |
| 905 | if (err) |
| 906 | goto free_buf; |
| 907 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 908 | |
| 909 | offset += copy; |
| 910 | buf += copy; |
| 911 | } |
| 912 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 913 | pos = skb_pagelen(skb); |
| 914 | skb_walk_frags(skb, skb_iter) { |
| 915 | int frag_pos; |
| 916 | |
| 917 | /* Practically all frags must belong to msg if reencrypt |
| 918 | * is needed with current strparser and coalescing logic, |
| 919 | * but strparser may "get optimized", so let's be safe. |
| 920 | */ |
| 921 | if (pos + skb_iter->len <= offset) |
| 922 | goto done_with_frag; |
| 923 | if (pos >= data_len + rxm->offset) |
| 924 | break; |
| 925 | |
| 926 | frag_pos = offset - pos; |
| 927 | copy = min_t(int, skb_iter->len - frag_pos, |
| 928 | data_len + rxm->offset - offset); |
| 929 | |
| 930 | if (skb_iter->decrypted) { |
| 931 | err = skb_store_bits(skb_iter, frag_pos, buf, copy); |
| 932 | if (err) |
| 933 | goto free_buf; |
| 934 | } |
| 935 | |
| 936 | offset += copy; |
| 937 | buf += copy; |
| 938 | done_with_frag: |
| 939 | pos += skb_iter->len; |
| 940 | } |
| 941 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 942 | free_buf: |
| 943 | kfree(orig_buf); |
| 944 | return err; |
| 945 | } |
| 946 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 947 | int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, |
| 948 | struct sk_buff *skb, struct strp_msg *rxm) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 949 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 950 | struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); |
| 951 | int is_decrypted = skb->decrypted; |
| 952 | int is_encrypted = !is_decrypted; |
| 953 | struct sk_buff *skb_iter; |
| 954 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 955 | /* Check if all the data is decrypted already */ |
| 956 | skb_walk_frags(skb, skb_iter) { |
| 957 | is_decrypted &= skb_iter->decrypted; |
| 958 | is_encrypted &= !skb_iter->decrypted; |
| 959 | } |
| 960 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 961 | trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, |
| 962 | tls_ctx->rx.rec_seq, rxm->full_len, |
| 963 | is_encrypted, is_decrypted); |
| 964 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 965 | ctx->sw.decrypted |= is_decrypted; |
| 966 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 967 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { |
| 968 | if (likely(is_encrypted || is_decrypted)) |
| 969 | return 0; |
| 970 | |
| 971 | /* After tls_device_down disables the offload, the next SKB will |
| 972 | * likely have initial fragments decrypted, and final ones not |
| 973 | * decrypted. We need to reencrypt that single SKB. |
| 974 | */ |
| 975 | return tls_device_reencrypt(sk, skb); |
| 976 | } |
| 977 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 978 | /* Return immediately if the record is either entirely plaintext or |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 979 | * entirely ciphertext. Otherwise handle reencrypt partially decrypted |
| 980 | * record. |
| 981 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 982 | if (is_decrypted) { |
| 983 | ctx->resync_nh_reset = 1; |
| 984 | return 0; |
| 985 | } |
| 986 | if (is_encrypted) { |
| 987 | tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb); |
| 988 | return 0; |
| 989 | } |
| 990 | |
| 991 | ctx->resync_nh_reset = 1; |
| 992 | return tls_device_reencrypt(sk, skb); |
| 993 | } |
| 994 | |
| 995 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, |
| 996 | struct net_device *netdev) |
| 997 | { |
| 998 | if (sk->sk_destruct != tls_device_sk_destruct) { |
| 999 | refcount_set(&ctx->refcount, 1); |
| 1000 | dev_hold(netdev); |
| 1001 | ctx->netdev = netdev; |
| 1002 | spin_lock_irq(&tls_device_lock); |
| 1003 | list_add_tail(&ctx->list, &tls_device_list); |
| 1004 | spin_unlock_irq(&tls_device_lock); |
| 1005 | |
| 1006 | ctx->sk_destruct = sk->sk_destruct; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1007 | smp_store_release(&sk->sk_destruct, tls_device_sk_destruct); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1008 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1009 | } |
| 1010 | |
| 1011 | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) |
| 1012 | { |
| 1013 | u16 nonce_size, tag_size, iv_size, rec_seq_size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1014 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 1015 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1016 | struct tls_record_info *start_marker_record; |
| 1017 | struct tls_offload_context_tx *offload_ctx; |
| 1018 | struct tls_crypto_info *crypto_info; |
| 1019 | struct net_device *netdev; |
| 1020 | char *iv, *rec_seq; |
| 1021 | struct sk_buff *skb; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1022 | __be64 rcd_sn; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1023 | int rc; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1024 | |
| 1025 | if (!ctx) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1026 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1027 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1028 | if (ctx->priv_ctx_tx) |
| 1029 | return -EEXIST; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1030 | |
| 1031 | start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1032 | if (!start_marker_record) |
| 1033 | return -ENOMEM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1034 | |
| 1035 | offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); |
| 1036 | if (!offload_ctx) { |
| 1037 | rc = -ENOMEM; |
| 1038 | goto free_marker_record; |
| 1039 | } |
| 1040 | |
| 1041 | crypto_info = &ctx->crypto_send.info; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1042 | if (crypto_info->version != TLS_1_2_VERSION) { |
| 1043 | rc = -EOPNOTSUPP; |
| 1044 | goto free_offload_ctx; |
| 1045 | } |
| 1046 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1047 | switch (crypto_info->cipher_type) { |
| 1048 | case TLS_CIPHER_AES_GCM_128: |
| 1049 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; |
| 1050 | tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; |
| 1051 | iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; |
| 1052 | iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; |
| 1053 | rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; |
| 1054 | rec_seq = |
| 1055 | ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; |
| 1056 | break; |
| 1057 | default: |
| 1058 | rc = -EINVAL; |
| 1059 | goto free_offload_ctx; |
| 1060 | } |
| 1061 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1062 | /* Sanity-check the rec_seq_size for stack allocations */ |
| 1063 | if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) { |
| 1064 | rc = -EINVAL; |
| 1065 | goto free_offload_ctx; |
| 1066 | } |
| 1067 | |
| 1068 | prot->version = crypto_info->version; |
| 1069 | prot->cipher_type = crypto_info->cipher_type; |
| 1070 | prot->prepend_size = TLS_HEADER_SIZE + nonce_size; |
| 1071 | prot->tag_size = tag_size; |
| 1072 | prot->overhead_size = prot->prepend_size + prot->tag_size; |
| 1073 | prot->iv_size = iv_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1074 | ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, |
| 1075 | GFP_KERNEL); |
| 1076 | if (!ctx->tx.iv) { |
| 1077 | rc = -ENOMEM; |
| 1078 | goto free_offload_ctx; |
| 1079 | } |
| 1080 | |
| 1081 | memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); |
| 1082 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1083 | prot->rec_seq_size = rec_seq_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1084 | ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); |
| 1085 | if (!ctx->tx.rec_seq) { |
| 1086 | rc = -ENOMEM; |
| 1087 | goto free_iv; |
| 1088 | } |
| 1089 | |
| 1090 | rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info); |
| 1091 | if (rc) |
| 1092 | goto free_rec_seq; |
| 1093 | |
| 1094 | /* start at rec_seq - 1 to account for the start marker record */ |
| 1095 | memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn)); |
| 1096 | offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1; |
| 1097 | |
| 1098 | start_marker_record->end_seq = tcp_sk(sk)->write_seq; |
| 1099 | start_marker_record->len = 0; |
| 1100 | start_marker_record->num_frags = 0; |
| 1101 | |
| 1102 | INIT_LIST_HEAD(&offload_ctx->records_list); |
| 1103 | list_add_tail(&start_marker_record->list, &offload_ctx->records_list); |
| 1104 | spin_lock_init(&offload_ctx->lock); |
| 1105 | sg_init_table(offload_ctx->sg_tx_data, |
| 1106 | ARRAY_SIZE(offload_ctx->sg_tx_data)); |
| 1107 | |
| 1108 | clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); |
| 1109 | ctx->push_pending_record = tls_device_push_pending_record; |
| 1110 | |
| 1111 | /* TLS offload is greatly simplified if we don't send |
| 1112 | * SKBs where only part of the payload needs to be encrypted. |
| 1113 | * So mark the last skb in the write queue as end of record. |
| 1114 | */ |
| 1115 | skb = tcp_write_queue_tail(sk); |
| 1116 | if (skb) |
| 1117 | TCP_SKB_CB(skb)->eor = 1; |
| 1118 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1119 | netdev = get_netdev_for_sock(sk); |
| 1120 | if (!netdev) { |
| 1121 | pr_err_ratelimited("%s: netdev not found\n", __func__); |
| 1122 | rc = -EINVAL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1123 | goto disable_cad; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1124 | } |
| 1125 | |
| 1126 | if (!(netdev->features & NETIF_F_HW_TLS_TX)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1127 | rc = -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1128 | goto release_netdev; |
| 1129 | } |
| 1130 | |
| 1131 | /* Avoid offloading if the device is down |
| 1132 | * We don't want to offload new flows after |
| 1133 | * the NETDEV_DOWN event |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1134 | * |
| 1135 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN |
| 1136 | * handler thus protecting from the device going down before |
| 1137 | * ctx was added to tls_device_list. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1138 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1139 | down_read(&device_offload_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1140 | if (!(netdev->flags & IFF_UP)) { |
| 1141 | rc = -EINVAL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1142 | goto release_lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1143 | } |
| 1144 | |
| 1145 | ctx->priv_ctx_tx = offload_ctx; |
| 1146 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, |
| 1147 | &ctx->crypto_send.info, |
| 1148 | tcp_sk(sk)->write_seq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1149 | trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX, |
| 1150 | tcp_sk(sk)->write_seq, rec_seq, rc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1151 | if (rc) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1152 | goto release_lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1153 | |
| 1154 | tls_device_attach(ctx, sk, netdev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1155 | up_read(&device_offload_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1156 | |
| 1157 | /* following this assignment tls_is_sk_tx_device_offloaded |
| 1158 | * will return true and the context might be accessed |
| 1159 | * by the netdev's xmit function. |
| 1160 | */ |
| 1161 | smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); |
| 1162 | dev_put(netdev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1163 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1164 | return 0; |
| 1165 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1166 | release_lock: |
| 1167 | up_read(&device_offload_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1168 | release_netdev: |
| 1169 | dev_put(netdev); |
| 1170 | disable_cad: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1171 | clean_acked_data_disable(inet_csk(sk)); |
| 1172 | crypto_free_aead(offload_ctx->aead_send); |
| 1173 | free_rec_seq: |
| 1174 | kfree(ctx->tx.rec_seq); |
| 1175 | free_iv: |
| 1176 | kfree(ctx->tx.iv); |
| 1177 | free_offload_ctx: |
| 1178 | kfree(offload_ctx); |
| 1179 | ctx->priv_ctx_tx = NULL; |
| 1180 | free_marker_record: |
| 1181 | kfree(start_marker_record); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1182 | return rc; |
| 1183 | } |
| 1184 | |
| 1185 | int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) |
| 1186 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1187 | struct tls12_crypto_info_aes_gcm_128 *info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1188 | struct tls_offload_context_rx *context; |
| 1189 | struct net_device *netdev; |
| 1190 | int rc = 0; |
| 1191 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1192 | if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) |
| 1193 | return -EOPNOTSUPP; |
| 1194 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1195 | netdev = get_netdev_for_sock(sk); |
| 1196 | if (!netdev) { |
| 1197 | pr_err_ratelimited("%s: netdev not found\n", __func__); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1198 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1199 | } |
| 1200 | |
| 1201 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1202 | rc = -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1203 | goto release_netdev; |
| 1204 | } |
| 1205 | |
| 1206 | /* Avoid offloading if the device is down |
| 1207 | * We don't want to offload new flows after |
| 1208 | * the NETDEV_DOWN event |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1209 | * |
| 1210 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN |
| 1211 | * handler thus protecting from the device going down before |
| 1212 | * ctx was added to tls_device_list. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1213 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1214 | down_read(&device_offload_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1215 | if (!(netdev->flags & IFF_UP)) { |
| 1216 | rc = -EINVAL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1217 | goto release_lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1218 | } |
| 1219 | |
| 1220 | context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); |
| 1221 | if (!context) { |
| 1222 | rc = -ENOMEM; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1223 | goto release_lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1224 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1225 | context->resync_nh_reset = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1226 | |
| 1227 | ctx->priv_ctx_rx = context; |
| 1228 | rc = tls_set_sw_offload(sk, ctx, 0); |
| 1229 | if (rc) |
| 1230 | goto release_ctx; |
| 1231 | |
| 1232 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, |
| 1233 | &ctx->crypto_recv.info, |
| 1234 | tcp_sk(sk)->copied_seq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1235 | info = (void *)&ctx->crypto_recv.info; |
| 1236 | trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX, |
| 1237 | tcp_sk(sk)->copied_seq, info->rec_seq, rc); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1238 | if (rc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1239 | goto free_sw_resources; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1240 | |
| 1241 | tls_device_attach(ctx, sk, netdev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1242 | up_read(&device_offload_lock); |
| 1243 | |
| 1244 | dev_put(netdev); |
| 1245 | |
| 1246 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1247 | |
| 1248 | free_sw_resources: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1249 | up_read(&device_offload_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1250 | tls_sw_free_resources_rx(sk); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1251 | down_read(&device_offload_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1252 | release_ctx: |
| 1253 | ctx->priv_ctx_rx = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1254 | release_lock: |
| 1255 | up_read(&device_offload_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1256 | release_netdev: |
| 1257 | dev_put(netdev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1258 | return rc; |
| 1259 | } |
| 1260 | |
| 1261 | void tls_device_offload_cleanup_rx(struct sock *sk) |
| 1262 | { |
| 1263 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 1264 | struct net_device *netdev; |
| 1265 | |
| 1266 | down_read(&device_offload_lock); |
| 1267 | netdev = tls_ctx->netdev; |
| 1268 | if (!netdev) |
| 1269 | goto out; |
| 1270 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1271 | netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, |
| 1272 | TLS_OFFLOAD_CTX_DIR_RX); |
| 1273 | |
| 1274 | if (tls_ctx->tx_conf != TLS_HW) { |
| 1275 | dev_put(netdev); |
| 1276 | tls_ctx->netdev = NULL; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1277 | } else { |
| 1278 | set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1279 | } |
| 1280 | out: |
| 1281 | up_read(&device_offload_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1282 | tls_sw_release_resources_rx(sk); |
| 1283 | } |
| 1284 | |
| 1285 | static int tls_device_down(struct net_device *netdev) |
| 1286 | { |
| 1287 | struct tls_context *ctx, *tmp; |
| 1288 | unsigned long flags; |
| 1289 | LIST_HEAD(list); |
| 1290 | |
| 1291 | /* Request a write lock to block new offload attempts */ |
| 1292 | down_write(&device_offload_lock); |
| 1293 | |
| 1294 | spin_lock_irqsave(&tls_device_lock, flags); |
| 1295 | list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) { |
| 1296 | if (ctx->netdev != netdev || |
| 1297 | !refcount_inc_not_zero(&ctx->refcount)) |
| 1298 | continue; |
| 1299 | |
| 1300 | list_move(&ctx->list, &list); |
| 1301 | } |
| 1302 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 1303 | |
| 1304 | list_for_each_entry_safe(ctx, tmp, &list, list) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1305 | /* Stop offloaded TX and switch to the fallback. |
| 1306 | * tls_is_sk_tx_device_offloaded will return false. |
| 1307 | */ |
| 1308 | WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); |
| 1309 | |
| 1310 | /* Stop the RX and TX resync. |
| 1311 | * tls_dev_resync must not be called after tls_dev_del. |
| 1312 | */ |
| 1313 | WRITE_ONCE(ctx->netdev, NULL); |
| 1314 | |
| 1315 | /* Start skipping the RX resync logic completely. */ |
| 1316 | set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); |
| 1317 | |
| 1318 | /* Sync with inflight packets. After this point: |
| 1319 | * TX: no non-encrypted packets will be passed to the driver. |
| 1320 | * RX: resync requests from the driver will be ignored. |
| 1321 | */ |
| 1322 | synchronize_net(); |
| 1323 | |
| 1324 | /* Release the offload context on the driver side. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1325 | if (ctx->tx_conf == TLS_HW) |
| 1326 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
| 1327 | TLS_OFFLOAD_CTX_DIR_TX); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1328 | if (ctx->rx_conf == TLS_HW && |
| 1329 | !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1330 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
| 1331 | TLS_OFFLOAD_CTX_DIR_RX); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1332 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1333 | dev_put(netdev); |
| 1334 | |
| 1335 | /* Move the context to a separate list for two reasons: |
| 1336 | * 1. When the context is deallocated, list_del is called. |
| 1337 | * 2. It's no longer an offloaded context, so we don't want to |
| 1338 | * run offload-specific code on this context. |
| 1339 | */ |
| 1340 | spin_lock_irqsave(&tls_device_lock, flags); |
| 1341 | list_move_tail(&ctx->list, &tls_device_down_list); |
| 1342 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 1343 | |
| 1344 | /* Device contexts for RX and TX will be freed in on sk_destruct |
| 1345 | * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. |
| 1346 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1347 | } |
| 1348 | |
| 1349 | up_write(&device_offload_lock); |
| 1350 | |
| 1351 | flush_work(&tls_device_gc_work); |
| 1352 | |
| 1353 | return NOTIFY_DONE; |
| 1354 | } |
| 1355 | |
| 1356 | static int tls_dev_event(struct notifier_block *this, unsigned long event, |
| 1357 | void *ptr) |
| 1358 | { |
| 1359 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 1360 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1361 | if (!dev->tlsdev_ops && |
| 1362 | !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1363 | return NOTIFY_DONE; |
| 1364 | |
| 1365 | switch (event) { |
| 1366 | case NETDEV_REGISTER: |
| 1367 | case NETDEV_FEAT_CHANGE: |
| 1368 | if ((dev->features & NETIF_F_HW_TLS_RX) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1369 | !dev->tlsdev_ops->tls_dev_resync) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1370 | return NOTIFY_BAD; |
| 1371 | |
| 1372 | if (dev->tlsdev_ops && |
| 1373 | dev->tlsdev_ops->tls_dev_add && |
| 1374 | dev->tlsdev_ops->tls_dev_del) |
| 1375 | return NOTIFY_DONE; |
| 1376 | else |
| 1377 | return NOTIFY_BAD; |
| 1378 | case NETDEV_DOWN: |
| 1379 | return tls_device_down(dev); |
| 1380 | } |
| 1381 | return NOTIFY_DONE; |
| 1382 | } |
| 1383 | |
| 1384 | static struct notifier_block tls_dev_notifier = { |
| 1385 | .notifier_call = tls_dev_event, |
| 1386 | }; |
| 1387 | |
| 1388 | void __init tls_device_init(void) |
| 1389 | { |
| 1390 | register_netdevice_notifier(&tls_dev_notifier); |
| 1391 | } |
| 1392 | |
| 1393 | void __exit tls_device_cleanup(void) |
| 1394 | { |
| 1395 | unregister_netdevice_notifier(&tls_dev_notifier); |
| 1396 | flush_work(&tls_device_gc_work); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1397 | clean_acked_data_flush(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1398 | } |