blob: 12570a73def80b3ff227b75bd0af332f91fab165 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C)2002 USAGI/WIDE Project
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Authors
6 *
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 *
11 * This file is derived from net/ipv4/esp.c
12 */
13
14#define pr_fmt(fmt) "IPv6: " fmt
15
16#include <crypto/aead.h>
17#include <crypto/authenc.h>
18#include <linux/err.h>
19#include <linux/module.h>
20#include <net/ip.h>
21#include <net/xfrm.h>
22#include <net/esp.h>
23#include <linux/scatterlist.h>
24#include <linux/kernel.h>
25#include <linux/pfkeyv2.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <net/ip6_route.h>
30#include <net/icmp.h>
31#include <net/ipv6.h>
32#include <net/protocol.h>
33#include <linux/icmpv6.h>
34
35#include <linux/highmem.h>
36
37struct esp_skb_cb {
38 struct xfrm_skb_cb xfrm;
39 void *tmp;
40};
41
42#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
43
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044/*
45 * Allocate an AEAD request structure with extra space for SG and IV.
46 *
47 * For alignment considerations the upper 32 bits of the sequence number are
48 * placed at the front, if present. Followed by the IV, the request and finally
49 * the SG list.
50 *
51 * TODO: Use spare space in skb for this where possible.
52 */
53static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
54{
55 unsigned int len;
56
57 len = seqihlen;
58
59 len += crypto_aead_ivsize(aead);
60
61 if (len) {
62 len += crypto_aead_alignmask(aead) &
63 ~(crypto_tfm_ctx_alignment() - 1);
64 len = ALIGN(len, crypto_tfm_ctx_alignment());
65 }
66
67 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
68 len = ALIGN(len, __alignof__(struct scatterlist));
69
70 len += sizeof(struct scatterlist) * nfrags;
71
72 return kmalloc(len, GFP_ATOMIC);
73}
74
75static inline __be32 *esp_tmp_seqhi(void *tmp)
76{
77 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
78}
79
80static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
81{
82 return crypto_aead_ivsize(aead) ?
83 PTR_ALIGN((u8 *)tmp + seqhilen,
84 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
85}
86
87static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
88{
89 struct aead_request *req;
90
91 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
92 crypto_tfm_ctx_alignment());
93 aead_request_set_tfm(req, aead);
94 return req;
95}
96
97static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
98 struct aead_request *req)
99{
100 return (void *)ALIGN((unsigned long)(req + 1) +
101 crypto_aead_reqsize(aead),
102 __alignof__(struct scatterlist));
103}
104
105static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
106{
107 struct crypto_aead *aead = x->data;
108 int seqhilen = 0;
109 u8 *iv;
110 struct aead_request *req;
111 struct scatterlist *sg;
112
113 if (x->props.flags & XFRM_STATE_ESN)
114 seqhilen += sizeof(__be32);
115
116 iv = esp_tmp_iv(aead, tmp, seqhilen);
117 req = esp_tmp_req(aead, iv);
118
119 /* Unref skb_frag_pages in the src scatterlist if necessary.
120 * Skip the first sg which comes from skb->data.
121 */
122 if (req->src != req->dst)
123 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
124 put_page(sg_page(sg));
125}
126
127static void esp_output_done(struct crypto_async_request *base, int err)
128{
129 struct sk_buff *skb = base->data;
130 struct xfrm_offload *xo = xfrm_offload(skb);
131 void *tmp;
132 struct xfrm_state *x;
133
David Brazdil0f672f62019-12-10 10:32:29 +0000134 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
135 struct sec_path *sp = skb_sec_path(skb);
136
137 x = sp->xvec[sp->len - 1];
138 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139 x = skb_dst(skb)->xfrm;
David Brazdil0f672f62019-12-10 10:32:29 +0000140 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141
142 tmp = ESP_SKB_CB(skb)->tmp;
143 esp_ssg_unref(x, tmp);
144 kfree(tmp);
145
146 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
147 if (err) {
148 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
149 kfree_skb(skb);
150 return;
151 }
152
153 skb_push(skb, skb->data - skb_mac_header(skb));
154 secpath_reset(skb);
155 xfrm_dev_resume(skb);
156 } else {
157 xfrm_output_resume(skb, err);
158 }
159}
160
161/* Move ESP header back into place. */
162static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
163{
164 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
165 void *tmp = ESP_SKB_CB(skb)->tmp;
166 __be32 *seqhi = esp_tmp_seqhi(tmp);
167
168 esph->seq_no = esph->spi;
169 esph->spi = *seqhi;
170}
171
172static void esp_output_restore_header(struct sk_buff *skb)
173{
174 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
175}
176
177static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
178 struct xfrm_state *x,
179 struct ip_esp_hdr *esph,
180 __be32 *seqhi)
181{
182 /* For ESN we move the header forward by 4 bytes to
183 * accomodate the high bits. We will move it back after
184 * encryption.
185 */
186 if ((x->props.flags & XFRM_STATE_ESN)) {
187 struct xfrm_offload *xo = xfrm_offload(skb);
188
189 esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
190 *seqhi = esph->spi;
191 if (xo)
192 esph->seq_no = htonl(xo->seq.hi);
193 else
194 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
195 }
196
197 esph->spi = x->id.spi;
198
199 return esph;
200}
201
202static void esp_output_done_esn(struct crypto_async_request *base, int err)
203{
204 struct sk_buff *skb = base->data;
205
206 esp_output_restore_header(skb);
207 esp_output_done(base, err);
208}
209
210static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
211{
212 /* Fill padding... */
213 if (tfclen) {
214 memset(tail, 0, tfclen);
215 tail += tfclen;
216 }
217 do {
218 int i;
219 for (i = 0; i < plen - 2; i++)
220 tail[i] = i + 1;
221 } while (0);
222 tail[plen - 2] = plen - 2;
223 tail[plen - 1] = proto;
224}
225
226int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
227{
228 u8 *tail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229 int nfrags;
230 struct page *page;
231 struct sk_buff *trailer;
232 int tailen = esp->tailen;
233
234 if (!skb_cloned(skb)) {
235 if (tailen <= skb_tailroom(skb)) {
236 nfrags = 1;
237 trailer = skb;
238 tail = skb_tail_pointer(trailer);
239
240 goto skip_cow;
241 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
242 && !skb_has_frag_list(skb)) {
243 int allocsize;
244 struct sock *sk = skb->sk;
245 struct page_frag *pfrag = &x->xfrag;
246
247 esp->inplace = false;
248
249 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
250
251 spin_lock_bh(&x->lock);
252
253 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
254 spin_unlock_bh(&x->lock);
255 goto cow;
256 }
257
258 page = pfrag->page;
259 get_page(page);
260
Olivier Deprez0e641232021-09-23 10:07:05 +0200261 tail = page_address(page) + pfrag->offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262
263 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
264
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 nfrags = skb_shinfo(skb)->nr_frags;
266
267 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
268 tailen);
269 skb_shinfo(skb)->nr_frags = ++nfrags;
270
271 pfrag->offset = pfrag->offset + allocsize;
272
273 spin_unlock_bh(&x->lock);
274
275 nfrags++;
276
277 skb->len += tailen;
278 skb->data_len += tailen;
279 skb->truesize += tailen;
David Brazdil0f672f62019-12-10 10:32:29 +0000280 if (sk && sk_fullsock(sk))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281 refcount_add(tailen, &sk->sk_wmem_alloc);
282
283 goto out;
284 }
285 }
286
287cow:
288 nfrags = skb_cow_data(skb, tailen, &trailer);
289 if (nfrags < 0)
290 goto out;
291 tail = skb_tail_pointer(trailer);
292
293skip_cow:
294 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
295 pskb_put(skb, trailer, tailen);
296
297out:
298 return nfrags;
299}
300EXPORT_SYMBOL_GPL(esp6_output_head);
301
302int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
303{
304 u8 *iv;
305 int alen;
306 void *tmp;
307 int ivlen;
308 int assoclen;
309 int seqhilen;
310 __be32 *seqhi;
311 struct page *page;
312 struct ip_esp_hdr *esph;
313 struct aead_request *req;
314 struct crypto_aead *aead;
315 struct scatterlist *sg, *dsg;
316 int err = -ENOMEM;
317
318 assoclen = sizeof(struct ip_esp_hdr);
319 seqhilen = 0;
320
321 if (x->props.flags & XFRM_STATE_ESN) {
322 seqhilen += sizeof(__be32);
323 assoclen += sizeof(__be32);
324 }
325
326 aead = x->data;
327 alen = crypto_aead_authsize(aead);
328 ivlen = crypto_aead_ivsize(aead);
329
330 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
331 if (!tmp)
332 goto error;
333
334 seqhi = esp_tmp_seqhi(tmp);
335 iv = esp_tmp_iv(aead, tmp, seqhilen);
336 req = esp_tmp_req(aead, iv);
337 sg = esp_req_sg(aead, req);
338
339 if (esp->inplace)
340 dsg = sg;
341 else
342 dsg = &sg[esp->nfrags];
343
344 esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
345
346 sg_init_table(sg, esp->nfrags);
347 err = skb_to_sgvec(skb, sg,
348 (unsigned char *)esph - skb->data,
349 assoclen + ivlen + esp->clen + alen);
350 if (unlikely(err < 0))
351 goto error_free;
352
353 if (!esp->inplace) {
354 int allocsize;
355 struct page_frag *pfrag = &x->xfrag;
356
357 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
358
359 spin_lock_bh(&x->lock);
360 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
361 spin_unlock_bh(&x->lock);
362 goto error_free;
363 }
364
365 skb_shinfo(skb)->nr_frags = 1;
366
367 page = pfrag->page;
368 get_page(page);
369 /* replace page frags in skb with new page */
370 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
371 pfrag->offset = pfrag->offset + allocsize;
372 spin_unlock_bh(&x->lock);
373
374 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
375 err = skb_to_sgvec(skb, dsg,
376 (unsigned char *)esph - skb->data,
377 assoclen + ivlen + esp->clen + alen);
378 if (unlikely(err < 0))
379 goto error_free;
380 }
381
382 if ((x->props.flags & XFRM_STATE_ESN))
383 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
384 else
385 aead_request_set_callback(req, 0, esp_output_done, skb);
386
387 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
388 aead_request_set_ad(req, assoclen);
389
390 memset(iv, 0, ivlen);
391 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
392 min(ivlen, 8));
393
394 ESP_SKB_CB(skb)->tmp = tmp;
395 err = crypto_aead_encrypt(req);
396
397 switch (err) {
398 case -EINPROGRESS:
399 goto error;
400
401 case -ENOSPC:
402 err = NET_XMIT_DROP;
403 break;
404
405 case 0:
406 if ((x->props.flags & XFRM_STATE_ESN))
407 esp_output_restore_header(skb);
408 }
409
410 if (sg != dsg)
411 esp_ssg_unref(x, tmp);
412
413error_free:
414 kfree(tmp);
415error:
416 return err;
417}
418EXPORT_SYMBOL_GPL(esp6_output_tail);
419
420static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
421{
422 int alen;
423 int blksize;
424 struct ip_esp_hdr *esph;
425 struct crypto_aead *aead;
426 struct esp_info esp;
427
428 esp.inplace = true;
429
430 esp.proto = *skb_mac_header(skb);
431 *skb_mac_header(skb) = IPPROTO_ESP;
432
433 /* skb is pure payload to encrypt */
434
435 aead = x->data;
436 alen = crypto_aead_authsize(aead);
437
438 esp.tfclen = 0;
439 if (x->tfcpad) {
440 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
441 u32 padto;
442
Olivier Deprez0e641232021-09-23 10:07:05 +0200443 padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 if (skb->len < padto)
445 esp.tfclen = padto - skb->len;
446 }
447 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
448 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
449 esp.plen = esp.clen - skb->len - esp.tfclen;
450 esp.tailen = esp.tfclen + esp.plen + alen;
451
452 esp.nfrags = esp6_output_head(x, skb, &esp);
453 if (esp.nfrags < 0)
454 return esp.nfrags;
455
456 esph = ip_esp_hdr(skb);
457 esph->spi = x->id.spi;
458
459 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
460 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
461 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
462
463 skb_push(skb, -skb_network_offset(skb));
464
465 return esp6_output_tail(x, skb, &esp);
466}
467
468static inline int esp_remove_trailer(struct sk_buff *skb)
469{
470 struct xfrm_state *x = xfrm_input_state(skb);
471 struct xfrm_offload *xo = xfrm_offload(skb);
472 struct crypto_aead *aead = x->data;
473 int alen, hlen, elen;
474 int padlen, trimlen;
475 __wsum csumdiff;
476 u8 nexthdr[2];
477 int ret;
478
479 alen = crypto_aead_authsize(aead);
480 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
481 elen = skb->len - hlen;
482
483 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
484 ret = xo->proto;
485 goto out;
486 }
487
488 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
489 BUG_ON(ret);
490
491 ret = -EINVAL;
492 padlen = nexthdr[0];
493 if (padlen + 2 + alen >= elen) {
494 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
495 padlen + 2, elen - alen);
496 goto out;
497 }
498
499 trimlen = alen + padlen + 2;
500 if (skb->ip_summed == CHECKSUM_COMPLETE) {
501 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
502 skb->csum = csum_block_sub(skb->csum, csumdiff,
503 skb->len - trimlen);
504 }
505 pskb_trim(skb, skb->len - trimlen);
506
507 ret = nexthdr[1];
508
509out:
510 return ret;
511}
512
513int esp6_input_done2(struct sk_buff *skb, int err)
514{
515 struct xfrm_state *x = xfrm_input_state(skb);
516 struct xfrm_offload *xo = xfrm_offload(skb);
517 struct crypto_aead *aead = x->data;
518 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
519 int hdr_len = skb_network_header_len(skb);
520
521 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
522 kfree(ESP_SKB_CB(skb)->tmp);
523
524 if (unlikely(err))
525 goto out;
526
527 err = esp_remove_trailer(skb);
528 if (unlikely(err < 0))
529 goto out;
530
531 skb_postpull_rcsum(skb, skb_network_header(skb),
532 skb_network_header_len(skb));
533 skb_pull_rcsum(skb, hlen);
534 if (x->props.mode == XFRM_MODE_TUNNEL)
535 skb_reset_transport_header(skb);
536 else
537 skb_set_transport_header(skb, -hdr_len);
538
539 /* RFC4303: Drop dummy packets without any error */
540 if (err == IPPROTO_NONE)
541 err = -EINVAL;
542
543out:
544 return err;
545}
546EXPORT_SYMBOL_GPL(esp6_input_done2);
547
548static void esp_input_done(struct crypto_async_request *base, int err)
549{
550 struct sk_buff *skb = base->data;
551
552 xfrm_input_resume(skb, esp6_input_done2(skb, err));
553}
554
555static void esp_input_restore_header(struct sk_buff *skb)
556{
557 esp_restore_header(skb, 0);
558 __skb_pull(skb, 4);
559}
560
561static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
562{
563 struct xfrm_state *x = xfrm_input_state(skb);
564
565 /* For ESN we move the header forward by 4 bytes to
566 * accomodate the high bits. We will move it back after
567 * decryption.
568 */
569 if ((x->props.flags & XFRM_STATE_ESN)) {
570 struct ip_esp_hdr *esph = skb_push(skb, 4);
571
572 *seqhi = esph->spi;
573 esph->spi = esph->seq_no;
574 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
575 }
576}
577
578static void esp_input_done_esn(struct crypto_async_request *base, int err)
579{
580 struct sk_buff *skb = base->data;
581
582 esp_input_restore_header(skb);
583 esp_input_done(base, err);
584}
585
586static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
587{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588 struct crypto_aead *aead = x->data;
589 struct aead_request *req;
590 struct sk_buff *trailer;
591 int ivlen = crypto_aead_ivsize(aead);
David Brazdil0f672f62019-12-10 10:32:29 +0000592 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000593 int nfrags;
594 int assoclen;
595 int seqhilen;
596 int ret = 0;
597 void *tmp;
598 __be32 *seqhi;
599 u8 *iv;
600 struct scatterlist *sg;
601
David Brazdil0f672f62019-12-10 10:32:29 +0000602 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000603 ret = -EINVAL;
604 goto out;
605 }
606
607 if (elen <= 0) {
608 ret = -EINVAL;
609 goto out;
610 }
611
David Brazdil0f672f62019-12-10 10:32:29 +0000612 assoclen = sizeof(struct ip_esp_hdr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000613 seqhilen = 0;
614
615 if (x->props.flags & XFRM_STATE_ESN) {
616 seqhilen += sizeof(__be32);
617 assoclen += seqhilen;
618 }
619
620 if (!skb_cloned(skb)) {
621 if (!skb_is_nonlinear(skb)) {
622 nfrags = 1;
623
624 goto skip_cow;
625 } else if (!skb_has_frag_list(skb)) {
626 nfrags = skb_shinfo(skb)->nr_frags;
627 nfrags++;
628
629 goto skip_cow;
630 }
631 }
632
633 nfrags = skb_cow_data(skb, 0, &trailer);
634 if (nfrags < 0) {
635 ret = -EINVAL;
636 goto out;
637 }
638
639skip_cow:
640 ret = -ENOMEM;
641 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
642 if (!tmp)
643 goto out;
644
645 ESP_SKB_CB(skb)->tmp = tmp;
646 seqhi = esp_tmp_seqhi(tmp);
647 iv = esp_tmp_iv(aead, tmp, seqhilen);
648 req = esp_tmp_req(aead, iv);
649 sg = esp_req_sg(aead, req);
650
651 esp_input_set_header(skb, seqhi);
652
653 sg_init_table(sg, nfrags);
654 ret = skb_to_sgvec(skb, sg, 0, skb->len);
655 if (unlikely(ret < 0)) {
656 kfree(tmp);
657 goto out;
658 }
659
660 skb->ip_summed = CHECKSUM_NONE;
661
662 if ((x->props.flags & XFRM_STATE_ESN))
663 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
664 else
665 aead_request_set_callback(req, 0, esp_input_done, skb);
666
667 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
668 aead_request_set_ad(req, assoclen);
669
670 ret = crypto_aead_decrypt(req);
671 if (ret == -EINPROGRESS)
672 goto out;
673
674 if ((x->props.flags & XFRM_STATE_ESN))
675 esp_input_restore_header(skb);
676
677 ret = esp6_input_done2(skb, ret);
678
679out:
680 return ret;
681}
682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
684 u8 type, u8 code, int offset, __be32 info)
685{
686 struct net *net = dev_net(skb->dev);
687 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
688 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
689 struct xfrm_state *x;
690
691 if (type != ICMPV6_PKT_TOOBIG &&
692 type != NDISC_REDIRECT)
693 return 0;
694
695 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
696 esph->spi, IPPROTO_ESP, AF_INET6);
697 if (!x)
698 return 0;
699
700 if (type == NDISC_REDIRECT)
701 ip6_redirect(skb, net, skb->dev->ifindex, 0,
702 sock_net_uid(net, NULL));
703 else
704 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
705 xfrm_state_put(x);
706
707 return 0;
708}
709
710static void esp6_destroy(struct xfrm_state *x)
711{
712 struct crypto_aead *aead = x->data;
713
714 if (!aead)
715 return;
716
717 crypto_free_aead(aead);
718}
719
720static int esp_init_aead(struct xfrm_state *x)
721{
722 char aead_name[CRYPTO_MAX_ALG_NAME];
723 struct crypto_aead *aead;
724 int err;
725
726 err = -ENAMETOOLONG;
727 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
728 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
729 goto error;
730
731 aead = crypto_alloc_aead(aead_name, 0, 0);
732 err = PTR_ERR(aead);
733 if (IS_ERR(aead))
734 goto error;
735
736 x->data = aead;
737
738 err = crypto_aead_setkey(aead, x->aead->alg_key,
739 (x->aead->alg_key_len + 7) / 8);
740 if (err)
741 goto error;
742
743 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
744 if (err)
745 goto error;
746
747error:
748 return err;
749}
750
751static int esp_init_authenc(struct xfrm_state *x)
752{
753 struct crypto_aead *aead;
754 struct crypto_authenc_key_param *param;
755 struct rtattr *rta;
756 char *key;
757 char *p;
758 char authenc_name[CRYPTO_MAX_ALG_NAME];
759 unsigned int keylen;
760 int err;
761
762 err = -EINVAL;
763 if (!x->ealg)
764 goto error;
765
766 err = -ENAMETOOLONG;
767
768 if ((x->props.flags & XFRM_STATE_ESN)) {
769 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
770 "%s%sauthencesn(%s,%s)%s",
771 x->geniv ?: "", x->geniv ? "(" : "",
772 x->aalg ? x->aalg->alg_name : "digest_null",
773 x->ealg->alg_name,
774 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
775 goto error;
776 } else {
777 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
778 "%s%sauthenc(%s,%s)%s",
779 x->geniv ?: "", x->geniv ? "(" : "",
780 x->aalg ? x->aalg->alg_name : "digest_null",
781 x->ealg->alg_name,
782 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
783 goto error;
784 }
785
786 aead = crypto_alloc_aead(authenc_name, 0, 0);
787 err = PTR_ERR(aead);
788 if (IS_ERR(aead))
789 goto error;
790
791 x->data = aead;
792
793 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
794 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
795 err = -ENOMEM;
796 key = kmalloc(keylen, GFP_KERNEL);
797 if (!key)
798 goto error;
799
800 p = key;
801 rta = (void *)p;
802 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
803 rta->rta_len = RTA_LENGTH(sizeof(*param));
804 param = RTA_DATA(rta);
805 p += RTA_SPACE(sizeof(*param));
806
807 if (x->aalg) {
808 struct xfrm_algo_desc *aalg_desc;
809
810 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
811 p += (x->aalg->alg_key_len + 7) / 8;
812
813 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
814 BUG_ON(!aalg_desc);
815
816 err = -EINVAL;
817 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
818 crypto_aead_authsize(aead)) {
819 pr_info("ESP: %s digestsize %u != %hu\n",
820 x->aalg->alg_name,
821 crypto_aead_authsize(aead),
822 aalg_desc->uinfo.auth.icv_fullbits / 8);
823 goto free_key;
824 }
825
826 err = crypto_aead_setauthsize(
827 aead, x->aalg->alg_trunc_len / 8);
828 if (err)
829 goto free_key;
830 }
831
832 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
833 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
834
835 err = crypto_aead_setkey(aead, key, keylen);
836
837free_key:
838 kfree(key);
839
840error:
841 return err;
842}
843
844static int esp6_init_state(struct xfrm_state *x)
845{
846 struct crypto_aead *aead;
847 u32 align;
848 int err;
849
850 if (x->encap)
851 return -EINVAL;
852
853 x->data = NULL;
854
855 if (x->aead)
856 err = esp_init_aead(x);
857 else
858 err = esp_init_authenc(x);
859
860 if (err)
861 goto error;
862
863 aead = x->data;
864
865 x->props.header_len = sizeof(struct ip_esp_hdr) +
866 crypto_aead_ivsize(aead);
867 switch (x->props.mode) {
868 case XFRM_MODE_BEET:
869 if (x->sel.family != AF_INET6)
870 x->props.header_len += IPV4_BEET_PHMAXLEN +
871 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
872 break;
873 default:
874 case XFRM_MODE_TRANSPORT:
875 break;
876 case XFRM_MODE_TUNNEL:
877 x->props.header_len += sizeof(struct ipv6hdr);
878 break;
879 }
880
881 align = ALIGN(crypto_aead_blocksize(aead), 4);
882 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
883
884error:
885 return err;
886}
887
888static int esp6_rcv_cb(struct sk_buff *skb, int err)
889{
890 return 0;
891}
892
893static const struct xfrm_type esp6_type = {
894 .description = "ESP6",
895 .owner = THIS_MODULE,
896 .proto = IPPROTO_ESP,
897 .flags = XFRM_TYPE_REPLAY_PROT,
898 .init_state = esp6_init_state,
899 .destructor = esp6_destroy,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000900 .input = esp6_input,
901 .output = esp6_output,
902 .hdr_offset = xfrm6_find_1stfragopt,
903};
904
905static struct xfrm6_protocol esp6_protocol = {
906 .handler = xfrm6_rcv,
907 .cb_handler = esp6_rcv_cb,
908 .err_handler = esp6_err,
909 .priority = 0,
910};
911
912static int __init esp6_init(void)
913{
914 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
915 pr_info("%s: can't add xfrm type\n", __func__);
916 return -EAGAIN;
917 }
918 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
919 pr_info("%s: can't add protocol\n", __func__);
920 xfrm_unregister_type(&esp6_type, AF_INET6);
921 return -EAGAIN;
922 }
923
924 return 0;
925}
926
927static void __exit esp6_fini(void)
928{
929 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
930 pr_info("%s: can't remove protocol\n", __func__);
David Brazdil0f672f62019-12-10 10:32:29 +0000931 xfrm_unregister_type(&esp6_type, AF_INET6);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000932}
933
934module_init(esp6_init);
935module_exit(esp6_fini);
936
937MODULE_LICENSE("GPL");
938MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);