blob: c6b2c99b501b9dffe4b186b5a52e9f86710d5cf7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * xfrm_state.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * YOSHIFUJI Hideaki @USAGI
11 * Split up af-specific functions
12 * Derek Atkins <derek@ihtfp.com>
13 * Add UDP Encapsulation
14 *
15 */
16
17#include <linux/workqueue.h>
18#include <net/xfrm.h>
19#include <linux/pfkeyv2.h>
20#include <linux/ipsec.h>
21#include <linux/module.h>
22#include <linux/cache.h>
23#include <linux/audit.h>
24#include <linux/uaccess.h>
25#include <linux/ktime.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29
David Brazdil0f672f62019-12-10 10:32:29 +000030#include <crypto/aead.h>
31
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032#include "xfrm_hash.h"
33
34#define xfrm_state_deref_prot(table, net) \
35 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
36
37static void xfrm_state_gc_task(struct work_struct *work);
38
39/* Each xfrm_state may be linked to two tables:
40
41 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
42 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
43 destination/tunnel endpoint. (output)
44 */
45
46static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047static struct kmem_cache *xfrm_state_cache __ro_after_init;
48
49static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
50static HLIST_HEAD(xfrm_state_gc_list);
51
52static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
53{
54 return refcount_inc_not_zero(&x->refcnt);
55}
56
57static inline unsigned int xfrm_dst_hash(struct net *net,
58 const xfrm_address_t *daddr,
59 const xfrm_address_t *saddr,
60 u32 reqid,
61 unsigned short family)
62{
63 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
64}
65
66static inline unsigned int xfrm_src_hash(struct net *net,
67 const xfrm_address_t *daddr,
68 const xfrm_address_t *saddr,
69 unsigned short family)
70{
71 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
72}
73
74static inline unsigned int
75xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
76 __be32 spi, u8 proto, unsigned short family)
77{
78 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
79}
80
81static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
86{
87 struct hlist_node *tmp;
88 struct xfrm_state *x;
89
90 hlist_for_each_entry_safe(x, tmp, list, bydst) {
91 unsigned int h;
92
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
95 nhashmask);
96 hlist_add_head_rcu(&x->bydst, ndsttable + h);
97
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
99 x->props.family,
100 nhashmask);
101 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
102
103 if (x->id.spi) {
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
106 nhashmask);
107 hlist_add_head_rcu(&x->byspi, nspitable + h);
108 }
109 }
110}
111
112static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
113{
114 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
115}
116
117static void xfrm_hash_resize(struct work_struct *work)
118{
119 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
121 unsigned long nsize, osize;
122 unsigned int nhashmask, ohashmask;
123 int i;
124
125 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
126 ndst = xfrm_hash_alloc(nsize);
127 if (!ndst)
128 return;
129 nsrc = xfrm_hash_alloc(nsize);
130 if (!nsrc) {
131 xfrm_hash_free(ndst, nsize);
132 return;
133 }
134 nspi = xfrm_hash_alloc(nsize);
135 if (!nspi) {
136 xfrm_hash_free(ndst, nsize);
137 xfrm_hash_free(nsrc, nsize);
138 return;
139 }
140
141 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200142 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143
144 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
145 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
146 for (i = net->xfrm.state_hmask; i >= 0; i--)
147 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
148
149 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
150 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
151 ohashmask = net->xfrm.state_hmask;
152
153 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
154 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
155 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
156 net->xfrm.state_hmask = nhashmask;
157
Olivier Deprez0e641232021-09-23 10:07:05 +0200158 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
160
161 osize = (ohashmask + 1) * sizeof(struct hlist_head);
162
163 synchronize_rcu();
164
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
168}
169
170static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
171static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
172
173static DEFINE_SPINLOCK(xfrm_state_gc_lock);
174
175int __xfrm_state_delete(struct xfrm_state *x);
176
177int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
David Brazdil0f672f62019-12-10 10:32:29 +0000178static bool km_is_alive(const struct km_event *c);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
180
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
182{
183 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184 int err = 0;
185
David Brazdil0f672f62019-12-10 10:32:29 +0000186 if (!afinfo)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 return -EAFNOSUPPORT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188
David Brazdil0f672f62019-12-10 10:32:29 +0000189#define X(afi, T, name) do { \
190 WARN_ON((afi)->type_ ## name); \
191 (afi)->type_ ## name = (T); \
192 } while (0)
193
194 switch (type->proto) {
195 case IPPROTO_COMP:
196 X(afinfo, type, comp);
197 break;
198 case IPPROTO_AH:
199 X(afinfo, type, ah);
200 break;
201 case IPPROTO_ESP:
202 X(afinfo, type, esp);
203 break;
204 case IPPROTO_IPIP:
205 X(afinfo, type, ipip);
206 break;
207 case IPPROTO_DSTOPTS:
208 X(afinfo, type, dstopts);
209 break;
210 case IPPROTO_ROUTING:
211 X(afinfo, type, routing);
212 break;
213 case IPPROTO_IPV6:
214 X(afinfo, type, ipip6);
215 break;
216 default:
217 WARN_ON(1);
218 err = -EPROTONOSUPPORT;
219 break;
220 }
221#undef X
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 rcu_read_unlock();
223 return err;
224}
225EXPORT_SYMBOL(xfrm_register_type);
226
David Brazdil0f672f62019-12-10 10:32:29 +0000227void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000228{
229 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
231 if (unlikely(afinfo == NULL))
David Brazdil0f672f62019-12-10 10:32:29 +0000232 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233
David Brazdil0f672f62019-12-10 10:32:29 +0000234#define X(afi, T, name) do { \
235 WARN_ON((afi)->type_ ## name != (T)); \
236 (afi)->type_ ## name = NULL; \
237 } while (0)
238
239 switch (type->proto) {
240 case IPPROTO_COMP:
241 X(afinfo, type, comp);
242 break;
243 case IPPROTO_AH:
244 X(afinfo, type, ah);
245 break;
246 case IPPROTO_ESP:
247 X(afinfo, type, esp);
248 break;
249 case IPPROTO_IPIP:
250 X(afinfo, type, ipip);
251 break;
252 case IPPROTO_DSTOPTS:
253 X(afinfo, type, dstopts);
254 break;
255 case IPPROTO_ROUTING:
256 X(afinfo, type, routing);
257 break;
258 case IPPROTO_IPV6:
259 X(afinfo, type, ipip6);
260 break;
261 default:
262 WARN_ON(1);
263 break;
264 }
265#undef X
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267}
268EXPORT_SYMBOL(xfrm_unregister_type);
269
270static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
271{
David Brazdil0f672f62019-12-10 10:32:29 +0000272 const struct xfrm_type *type = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000273 struct xfrm_state_afinfo *afinfo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 int modload_attempted = 0;
275
276retry:
277 afinfo = xfrm_state_get_afinfo(family);
278 if (unlikely(afinfo == NULL))
279 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280
David Brazdil0f672f62019-12-10 10:32:29 +0000281 switch (proto) {
282 case IPPROTO_COMP:
283 type = afinfo->type_comp;
284 break;
285 case IPPROTO_AH:
286 type = afinfo->type_ah;
287 break;
288 case IPPROTO_ESP:
289 type = afinfo->type_esp;
290 break;
291 case IPPROTO_IPIP:
292 type = afinfo->type_ipip;
293 break;
294 case IPPROTO_DSTOPTS:
295 type = afinfo->type_dstopts;
296 break;
297 case IPPROTO_ROUTING:
298 type = afinfo->type_routing;
299 break;
300 case IPPROTO_IPV6:
301 type = afinfo->type_ipip6;
302 break;
303 default:
304 break;
305 }
306
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 if (unlikely(type && !try_module_get(type->owner)))
308 type = NULL;
309
310 rcu_read_unlock();
311
312 if (!type && !modload_attempted) {
313 request_module("xfrm-type-%d-%d", family, proto);
314 modload_attempted = 1;
315 goto retry;
316 }
317
318 return type;
319}
320
321static void xfrm_put_type(const struct xfrm_type *type)
322{
323 module_put(type->owner);
324}
325
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326int xfrm_register_type_offload(const struct xfrm_type_offload *type,
327 unsigned short family)
328{
329 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330 int err = 0;
331
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334
David Brazdil0f672f62019-12-10 10:32:29 +0000335 switch (type->proto) {
336 case IPPROTO_ESP:
337 WARN_ON(afinfo->type_offload_esp);
338 afinfo->type_offload_esp = type;
339 break;
340 default:
341 WARN_ON(1);
342 err = -EPROTONOSUPPORT;
343 break;
344 }
345
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346 rcu_read_unlock();
347 return err;
348}
349EXPORT_SYMBOL(xfrm_register_type_offload);
350
David Brazdil0f672f62019-12-10 10:32:29 +0000351void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
352 unsigned short family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000353{
354 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355
356 if (unlikely(afinfo == NULL))
David Brazdil0f672f62019-12-10 10:32:29 +0000357 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358
David Brazdil0f672f62019-12-10 10:32:29 +0000359 switch (type->proto) {
360 case IPPROTO_ESP:
361 WARN_ON(afinfo->type_offload_esp != type);
362 afinfo->type_offload_esp = NULL;
363 break;
364 default:
365 WARN_ON(1);
366 break;
367 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000369}
370EXPORT_SYMBOL(xfrm_unregister_type_offload);
371
372static const struct xfrm_type_offload *
373xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
374{
David Brazdil0f672f62019-12-10 10:32:29 +0000375 const struct xfrm_type_offload *type = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376 struct xfrm_state_afinfo *afinfo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377
378retry:
379 afinfo = xfrm_state_get_afinfo(family);
380 if (unlikely(afinfo == NULL))
381 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382
David Brazdil0f672f62019-12-10 10:32:29 +0000383 switch (proto) {
384 case IPPROTO_ESP:
385 type = afinfo->type_offload_esp;
386 break;
387 default:
388 break;
389 }
390
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 if ((type && !try_module_get(type->owner)))
392 type = NULL;
393
394 rcu_read_unlock();
395
396 if (!type && try_load) {
397 request_module("xfrm-offload-%d-%d", family, proto);
398 try_load = false;
399 goto retry;
400 }
401
402 return type;
403}
404
405static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
406{
407 module_put(type->owner);
408}
409
David Brazdil0f672f62019-12-10 10:32:29 +0000410static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
411 [XFRM_MODE_BEET] = {
412 .encap = XFRM_MODE_BEET,
413 .flags = XFRM_MODE_FLAG_TUNNEL,
414 .family = AF_INET,
415 },
416 [XFRM_MODE_TRANSPORT] = {
417 .encap = XFRM_MODE_TRANSPORT,
418 .family = AF_INET,
419 },
420 [XFRM_MODE_TUNNEL] = {
421 .encap = XFRM_MODE_TUNNEL,
422 .flags = XFRM_MODE_FLAG_TUNNEL,
423 .family = AF_INET,
424 },
425};
426
427static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
428 [XFRM_MODE_BEET] = {
429 .encap = XFRM_MODE_BEET,
430 .flags = XFRM_MODE_FLAG_TUNNEL,
431 .family = AF_INET6,
432 },
433 [XFRM_MODE_ROUTEOPTIMIZATION] = {
434 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
435 .family = AF_INET6,
436 },
437 [XFRM_MODE_TRANSPORT] = {
438 .encap = XFRM_MODE_TRANSPORT,
439 .family = AF_INET6,
440 },
441 [XFRM_MODE_TUNNEL] = {
442 .encap = XFRM_MODE_TUNNEL,
443 .flags = XFRM_MODE_FLAG_TUNNEL,
444 .family = AF_INET6,
445 },
446};
447
448static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449{
David Brazdil0f672f62019-12-10 10:32:29 +0000450 const struct xfrm_mode *mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451
452 if (unlikely(encap >= XFRM_MODE_MAX))
453 return NULL;
454
David Brazdil0f672f62019-12-10 10:32:29 +0000455 switch (family) {
456 case AF_INET:
457 mode = &xfrm4_mode_map[encap];
458 if (mode->family == family)
459 return mode;
460 break;
461 case AF_INET6:
462 mode = &xfrm6_mode_map[encap];
463 if (mode->family == family)
464 return mode;
465 break;
466 default:
467 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468 }
469
David Brazdil0f672f62019-12-10 10:32:29 +0000470 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471}
472
473void xfrm_state_free(struct xfrm_state *x)
474{
475 kmem_cache_free(xfrm_state_cache, x);
476}
477EXPORT_SYMBOL(xfrm_state_free);
478
David Brazdil0f672f62019-12-10 10:32:29 +0000479static void ___xfrm_state_destroy(struct xfrm_state *x)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480{
David Brazdil0f672f62019-12-10 10:32:29 +0000481 hrtimer_cancel(&x->mtimer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482 del_timer_sync(&x->rtimer);
483 kfree(x->aead);
484 kfree(x->aalg);
485 kfree(x->ealg);
486 kfree(x->calg);
487 kfree(x->encap);
488 kfree(x->coaddr);
489 kfree(x->replay_esn);
490 kfree(x->preplay_esn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491 if (x->type_offload)
492 xfrm_put_type_offload(x->type_offload);
493 if (x->type) {
494 x->type->destructor(x);
495 xfrm_put_type(x->type);
496 }
David Brazdil0f672f62019-12-10 10:32:29 +0000497 if (x->xfrag.page)
498 put_page(x->xfrag.page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499 xfrm_dev_state_free(x);
500 security_xfrm_state_free(x);
501 xfrm_state_free(x);
502}
503
504static void xfrm_state_gc_task(struct work_struct *work)
505{
506 struct xfrm_state *x;
507 struct hlist_node *tmp;
508 struct hlist_head gc_list;
509
510 spin_lock_bh(&xfrm_state_gc_lock);
511 hlist_move_list(&xfrm_state_gc_list, &gc_list);
512 spin_unlock_bh(&xfrm_state_gc_lock);
513
514 synchronize_rcu();
515
516 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
David Brazdil0f672f62019-12-10 10:32:29 +0000517 ___xfrm_state_destroy(x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518}
519
520static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
521{
David Brazdil0f672f62019-12-10 10:32:29 +0000522 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
523 enum hrtimer_restart ret = HRTIMER_NORESTART;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 time64_t now = ktime_get_real_seconds();
525 time64_t next = TIME64_MAX;
526 int warn = 0;
527 int err = 0;
528
529 spin_lock(&x->lock);
530 if (x->km.state == XFRM_STATE_DEAD)
531 goto out;
532 if (x->km.state == XFRM_STATE_EXPIRED)
533 goto expired;
534 if (x->lft.hard_add_expires_seconds) {
535 long tmo = x->lft.hard_add_expires_seconds +
536 x->curlft.add_time - now;
537 if (tmo <= 0) {
538 if (x->xflags & XFRM_SOFT_EXPIRE) {
539 /* enter hard expire without soft expire first?!
540 * setting a new date could trigger this.
541 * workaround: fix x->curflt.add_time by below:
542 */
543 x->curlft.add_time = now - x->saved_tmo - 1;
544 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
545 } else
546 goto expired;
547 }
548 if (tmo < next)
549 next = tmo;
550 }
551 if (x->lft.hard_use_expires_seconds) {
552 long tmo = x->lft.hard_use_expires_seconds +
553 (x->curlft.use_time ? : now) - now;
554 if (tmo <= 0)
555 goto expired;
556 if (tmo < next)
557 next = tmo;
558 }
559 if (x->km.dying)
560 goto resched;
561 if (x->lft.soft_add_expires_seconds) {
562 long tmo = x->lft.soft_add_expires_seconds +
563 x->curlft.add_time - now;
564 if (tmo <= 0) {
565 warn = 1;
566 x->xflags &= ~XFRM_SOFT_EXPIRE;
567 } else if (tmo < next) {
568 next = tmo;
569 x->xflags |= XFRM_SOFT_EXPIRE;
570 x->saved_tmo = tmo;
571 }
572 }
573 if (x->lft.soft_use_expires_seconds) {
574 long tmo = x->lft.soft_use_expires_seconds +
575 (x->curlft.use_time ? : now) - now;
576 if (tmo <= 0)
577 warn = 1;
578 else if (tmo < next)
579 next = tmo;
580 }
581
582 x->km.dying = warn;
583 if (warn)
584 km_state_expired(x, 0, 0);
585resched:
586 if (next != TIME64_MAX) {
David Brazdil0f672f62019-12-10 10:32:29 +0000587 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
588 ret = HRTIMER_RESTART;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589 }
590
591 goto out;
592
593expired:
594 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
595 x->km.state = XFRM_STATE_EXPIRED;
596
597 err = __xfrm_state_delete(x);
598 if (!err)
599 km_state_expired(x, 1, 0);
600
601 xfrm_audit_state_delete(x, err ? 0 : 1, true);
602
603out:
604 spin_unlock(&x->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000605 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606}
607
608static void xfrm_replay_timer_handler(struct timer_list *t);
609
610struct xfrm_state *xfrm_state_alloc(struct net *net)
611{
612 struct xfrm_state *x;
613
614 x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
615
616 if (x) {
617 write_pnet(&x->xs_net, net);
618 refcount_set(&x->refcnt, 1);
619 atomic_set(&x->tunnel_users, 0);
620 INIT_LIST_HEAD(&x->km.all);
621 INIT_HLIST_NODE(&x->bydst);
622 INIT_HLIST_NODE(&x->bysrc);
623 INIT_HLIST_NODE(&x->byspi);
David Brazdil0f672f62019-12-10 10:32:29 +0000624 hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
625 x->mtimer.function = xfrm_timer_handler;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
627 x->curlft.add_time = ktime_get_real_seconds();
628 x->lft.soft_byte_limit = XFRM_INF;
629 x->lft.soft_packet_limit = XFRM_INF;
630 x->lft.hard_byte_limit = XFRM_INF;
631 x->lft.hard_packet_limit = XFRM_INF;
632 x->replay_maxage = 0;
633 x->replay_maxdiff = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 spin_lock_init(&x->lock);
635 }
636 return x;
637}
638EXPORT_SYMBOL(xfrm_state_alloc);
639
David Brazdil0f672f62019-12-10 10:32:29 +0000640void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000641{
642 WARN_ON(x->km.state != XFRM_STATE_DEAD);
643
David Brazdil0f672f62019-12-10 10:32:29 +0000644 if (sync) {
645 synchronize_rcu();
646 ___xfrm_state_destroy(x);
647 } else {
648 spin_lock_bh(&xfrm_state_gc_lock);
649 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
650 spin_unlock_bh(&xfrm_state_gc_lock);
651 schedule_work(&xfrm_state_gc_work);
652 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653}
654EXPORT_SYMBOL(__xfrm_state_destroy);
655
656int __xfrm_state_delete(struct xfrm_state *x)
657{
658 struct net *net = xs_net(x);
659 int err = -ESRCH;
660
661 if (x->km.state != XFRM_STATE_DEAD) {
662 x->km.state = XFRM_STATE_DEAD;
663 spin_lock(&net->xfrm.xfrm_state_lock);
664 list_del(&x->km.all);
665 hlist_del_rcu(&x->bydst);
666 hlist_del_rcu(&x->bysrc);
667 if (x->id.spi)
668 hlist_del_rcu(&x->byspi);
669 net->xfrm.state_num--;
670 spin_unlock(&net->xfrm.xfrm_state_lock);
671
672 xfrm_dev_state_delete(x);
673
674 /* All xfrm_state objects are created by xfrm_state_alloc.
675 * The xfrm_state_alloc call gives a reference, and that
676 * is what we are dropping here.
677 */
678 xfrm_state_put(x);
679 err = 0;
680 }
681
682 return err;
683}
684EXPORT_SYMBOL(__xfrm_state_delete);
685
686int xfrm_state_delete(struct xfrm_state *x)
687{
688 int err;
689
690 spin_lock_bh(&x->lock);
691 err = __xfrm_state_delete(x);
692 spin_unlock_bh(&x->lock);
693
694 return err;
695}
696EXPORT_SYMBOL(xfrm_state_delete);
697
698#ifdef CONFIG_SECURITY_NETWORK_XFRM
699static inline int
700xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
701{
702 int i, err = 0;
703
704 for (i = 0; i <= net->xfrm.state_hmask; i++) {
705 struct xfrm_state *x;
706
707 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
708 if (xfrm_id_proto_match(x->id.proto, proto) &&
709 (err = security_xfrm_state_delete(x)) != 0) {
710 xfrm_audit_state_delete(x, 0, task_valid);
711 return err;
712 }
713 }
714 }
715
716 return err;
717}
718
719static inline int
720xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
721{
722 int i, err = 0;
723
724 for (i = 0; i <= net->xfrm.state_hmask; i++) {
725 struct xfrm_state *x;
726 struct xfrm_state_offload *xso;
727
728 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
729 xso = &x->xso;
730
731 if (xso->dev == dev &&
732 (err = security_xfrm_state_delete(x)) != 0) {
733 xfrm_audit_state_delete(x, 0, task_valid);
734 return err;
735 }
736 }
737 }
738
739 return err;
740}
741#else
742static inline int
743xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
744{
745 return 0;
746}
747
748static inline int
749xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
750{
751 return 0;
752}
753#endif
754
David Brazdil0f672f62019-12-10 10:32:29 +0000755int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756{
757 int i, err = 0, cnt = 0;
758
759 spin_lock_bh(&net->xfrm.xfrm_state_lock);
760 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
761 if (err)
762 goto out;
763
764 err = -ESRCH;
765 for (i = 0; i <= net->xfrm.state_hmask; i++) {
766 struct xfrm_state *x;
767restart:
768 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
769 if (!xfrm_state_kern(x) &&
770 xfrm_id_proto_match(x->id.proto, proto)) {
771 xfrm_state_hold(x);
772 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
773
774 err = xfrm_state_delete(x);
775 xfrm_audit_state_delete(x, err ? 0 : 1,
776 task_valid);
David Brazdil0f672f62019-12-10 10:32:29 +0000777 if (sync)
778 xfrm_state_put_sync(x);
779 else
780 xfrm_state_put(x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 if (!err)
782 cnt++;
783
784 spin_lock_bh(&net->xfrm.xfrm_state_lock);
785 goto restart;
786 }
787 }
788 }
789out:
790 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
791 if (cnt)
792 err = 0;
793
794 return err;
795}
796EXPORT_SYMBOL(xfrm_state_flush);
797
798int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
799{
800 int i, err = 0, cnt = 0;
801
802 spin_lock_bh(&net->xfrm.xfrm_state_lock);
803 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
804 if (err)
805 goto out;
806
807 err = -ESRCH;
808 for (i = 0; i <= net->xfrm.state_hmask; i++) {
809 struct xfrm_state *x;
810 struct xfrm_state_offload *xso;
811restart:
812 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
813 xso = &x->xso;
814
815 if (!xfrm_state_kern(x) && xso->dev == dev) {
816 xfrm_state_hold(x);
817 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
818
819 err = xfrm_state_delete(x);
820 xfrm_audit_state_delete(x, err ? 0 : 1,
821 task_valid);
822 xfrm_state_put(x);
823 if (!err)
824 cnt++;
825
826 spin_lock_bh(&net->xfrm.xfrm_state_lock);
827 goto restart;
828 }
829 }
830 }
831 if (cnt)
832 err = 0;
833
834out:
835 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
836 return err;
837}
838EXPORT_SYMBOL(xfrm_dev_state_flush);
839
840void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
841{
842 spin_lock_bh(&net->xfrm.xfrm_state_lock);
843 si->sadcnt = net->xfrm.state_num;
David Brazdil0f672f62019-12-10 10:32:29 +0000844 si->sadhcnt = net->xfrm.state_hmask + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 si->sadhmcnt = xfrm_state_hashmax;
846 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
847}
848EXPORT_SYMBOL(xfrm_sad_getinfo);
849
850static void
David Brazdil0f672f62019-12-10 10:32:29 +0000851__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
852{
853 const struct flowi4 *fl4 = &fl->u.ip4;
854
855 sel->daddr.a4 = fl4->daddr;
856 sel->saddr.a4 = fl4->saddr;
857 sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
858 sel->dport_mask = htons(0xffff);
859 sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
860 sel->sport_mask = htons(0xffff);
861 sel->family = AF_INET;
862 sel->prefixlen_d = 32;
863 sel->prefixlen_s = 32;
864 sel->proto = fl4->flowi4_proto;
865 sel->ifindex = fl4->flowi4_oif;
866}
867
868static void
869__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
870{
871 const struct flowi6 *fl6 = &fl->u.ip6;
872
873 /* Initialize temporary selector matching only to current session. */
874 *(struct in6_addr *)&sel->daddr = fl6->daddr;
875 *(struct in6_addr *)&sel->saddr = fl6->saddr;
876 sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
877 sel->dport_mask = htons(0xffff);
878 sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
879 sel->sport_mask = htons(0xffff);
880 sel->family = AF_INET6;
881 sel->prefixlen_d = 128;
882 sel->prefixlen_s = 128;
883 sel->proto = fl6->flowi6_proto;
884 sel->ifindex = fl6->flowi6_oif;
885}
886
887static void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000888xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
889 const struct xfrm_tmpl *tmpl,
890 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
891 unsigned short family)
892{
David Brazdil0f672f62019-12-10 10:32:29 +0000893 switch (family) {
894 case AF_INET:
895 __xfrm4_init_tempsel(&x->sel, fl);
896 break;
897 case AF_INET6:
898 __xfrm6_init_tempsel(&x->sel, fl);
899 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000900 }
David Brazdil0f672f62019-12-10 10:32:29 +0000901
902 x->id = tmpl->id;
903
904 switch (tmpl->encap_family) {
905 case AF_INET:
906 if (x->id.daddr.a4 == 0)
907 x->id.daddr.a4 = daddr->a4;
908 x->props.saddr = tmpl->saddr;
909 if (x->props.saddr.a4 == 0)
910 x->props.saddr.a4 = saddr->a4;
911 break;
912 case AF_INET6:
913 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
914 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
915 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
916 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
917 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
918 break;
919 }
920
921 x->props.mode = tmpl->mode;
922 x->props.reqid = tmpl->reqid;
923 x->props.family = tmpl->encap_family;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000924}
925
926static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
927 const xfrm_address_t *daddr,
928 __be32 spi, u8 proto,
929 unsigned short family)
930{
931 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
932 struct xfrm_state *x;
933
934 hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
935 if (x->props.family != family ||
936 x->id.spi != spi ||
937 x->id.proto != proto ||
938 !xfrm_addr_equal(&x->id.daddr, daddr, family))
939 continue;
940
941 if ((mark & x->mark.m) != x->mark.v)
942 continue;
943 if (!xfrm_state_hold_rcu(x))
944 continue;
945 return x;
946 }
947
948 return NULL;
949}
950
951static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
952 const xfrm_address_t *daddr,
953 const xfrm_address_t *saddr,
954 u8 proto, unsigned short family)
955{
956 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
957 struct xfrm_state *x;
958
959 hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
960 if (x->props.family != family ||
961 x->id.proto != proto ||
962 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
963 !xfrm_addr_equal(&x->props.saddr, saddr, family))
964 continue;
965
966 if ((mark & x->mark.m) != x->mark.v)
967 continue;
968 if (!xfrm_state_hold_rcu(x))
969 continue;
970 return x;
971 }
972
973 return NULL;
974}
975
976static inline struct xfrm_state *
977__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
978{
979 struct net *net = xs_net(x);
980 u32 mark = x->mark.v & x->mark.m;
981
982 if (use_spi)
983 return __xfrm_state_lookup(net, mark, &x->id.daddr,
984 x->id.spi, x->id.proto, family);
985 else
986 return __xfrm_state_lookup_byaddr(net, mark,
987 &x->id.daddr,
988 &x->props.saddr,
989 x->id.proto, family);
990}
991
992static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
993{
994 if (have_hash_collision &&
995 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
996 net->xfrm.state_num > net->xfrm.state_hmask)
997 schedule_work(&net->xfrm.state_hash_work);
998}
999
1000static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
1001 const struct flowi *fl, unsigned short family,
1002 struct xfrm_state **best, int *acq_in_progress,
1003 int *error)
1004{
1005 /* Resolution logic:
1006 * 1. There is a valid state with matching selector. Done.
1007 * 2. Valid state with inappropriate selector. Skip.
1008 *
1009 * Entering area of "sysdeps".
1010 *
1011 * 3. If state is not valid, selector is temporary, it selects
1012 * only session which triggered previous resolution. Key
1013 * manager will do something to install a state with proper
1014 * selector.
1015 */
1016 if (x->km.state == XFRM_STATE_VALID) {
1017 if ((x->sel.family &&
Olivier Deprez0e641232021-09-23 10:07:05 +02001018 (x->sel.family != family ||
1019 !xfrm_selector_match(&x->sel, fl, family))) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001020 !security_xfrm_state_pol_flow_match(x, pol, fl))
1021 return;
1022
1023 if (!*best ||
1024 (*best)->km.dying > x->km.dying ||
1025 ((*best)->km.dying == x->km.dying &&
1026 (*best)->curlft.add_time < x->curlft.add_time))
1027 *best = x;
1028 } else if (x->km.state == XFRM_STATE_ACQ) {
1029 *acq_in_progress = 1;
1030 } else if (x->km.state == XFRM_STATE_ERROR ||
1031 x->km.state == XFRM_STATE_EXPIRED) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001032 if ((!x->sel.family ||
1033 (x->sel.family == family &&
1034 xfrm_selector_match(&x->sel, fl, family))) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035 security_xfrm_state_pol_flow_match(x, pol, fl))
1036 *error = -ESRCH;
1037 }
1038}
1039
1040struct xfrm_state *
1041xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1042 const struct flowi *fl, struct xfrm_tmpl *tmpl,
1043 struct xfrm_policy *pol, int *err,
1044 unsigned short family, u32 if_id)
1045{
1046 static xfrm_address_t saddr_wildcard = { };
1047 struct net *net = xp_net(pol);
1048 unsigned int h, h_wildcard;
1049 struct xfrm_state *x, *x0, *to_put;
1050 int acquire_in_progress = 0;
1051 int error = 0;
1052 struct xfrm_state *best = NULL;
1053 u32 mark = pol->mark.v & pol->mark.m;
1054 unsigned short encap_family = tmpl->encap_family;
1055 unsigned int sequence;
1056 struct km_event c;
1057
1058 to_put = NULL;
1059
Olivier Deprez0e641232021-09-23 10:07:05 +02001060 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001061
1062 rcu_read_lock();
1063 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
1064 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
1065 if (x->props.family == encap_family &&
1066 x->props.reqid == tmpl->reqid &&
1067 (mark & x->mark.m) == x->mark.v &&
1068 x->if_id == if_id &&
1069 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1070 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1071 tmpl->mode == x->props.mode &&
1072 tmpl->id.proto == x->id.proto &&
1073 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
Olivier Deprez0e641232021-09-23 10:07:05 +02001074 xfrm_state_look_at(pol, x, fl, family,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001075 &best, &acquire_in_progress, &error);
1076 }
1077 if (best || acquire_in_progress)
1078 goto found;
1079
1080 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
1081 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
1082 if (x->props.family == encap_family &&
1083 x->props.reqid == tmpl->reqid &&
1084 (mark & x->mark.m) == x->mark.v &&
1085 x->if_id == if_id &&
1086 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1087 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1088 tmpl->mode == x->props.mode &&
1089 tmpl->id.proto == x->id.proto &&
1090 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
Olivier Deprez0e641232021-09-23 10:07:05 +02001091 xfrm_state_look_at(pol, x, fl, family,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092 &best, &acquire_in_progress, &error);
1093 }
1094
1095found:
1096 x = best;
1097 if (!x && !error && !acquire_in_progress) {
1098 if (tmpl->id.spi &&
1099 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
1100 tmpl->id.proto, encap_family)) != NULL) {
1101 to_put = x0;
1102 error = -EEXIST;
1103 goto out;
1104 }
1105
1106 c.net = net;
1107 /* If the KMs have no listeners (yet...), avoid allocating an SA
1108 * for each and every packet - garbage collection might not
1109 * handle the flood.
1110 */
1111 if (!km_is_alive(&c)) {
1112 error = -ESRCH;
1113 goto out;
1114 }
1115
1116 x = xfrm_state_alloc(net);
1117 if (x == NULL) {
1118 error = -ENOMEM;
1119 goto out;
1120 }
1121 /* Initialize temporary state matching only
1122 * to current session. */
1123 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1124 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1125 x->if_id = if_id;
1126
1127 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1128 if (error) {
1129 x->km.state = XFRM_STATE_DEAD;
1130 to_put = x;
1131 x = NULL;
1132 goto out;
1133 }
1134
1135 if (km_query(x, tmpl, pol) == 0) {
1136 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1137 x->km.state = XFRM_STATE_ACQ;
1138 list_add(&x->km.all, &net->xfrm.state_all);
1139 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1140 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1141 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1142 if (x->id.spi) {
1143 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1144 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1145 }
1146 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
David Brazdil0f672f62019-12-10 10:32:29 +00001147 hrtimer_start(&x->mtimer,
1148 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1149 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001150 net->xfrm.state_num++;
1151 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1152 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1153 } else {
1154 x->km.state = XFRM_STATE_DEAD;
1155 to_put = x;
1156 x = NULL;
1157 error = -ESRCH;
1158 }
1159 }
1160out:
1161 if (x) {
1162 if (!xfrm_state_hold_rcu(x)) {
1163 *err = -EAGAIN;
1164 x = NULL;
1165 }
1166 } else {
1167 *err = acquire_in_progress ? -EAGAIN : error;
1168 }
1169 rcu_read_unlock();
1170 if (to_put)
1171 xfrm_state_put(to_put);
1172
Olivier Deprez0e641232021-09-23 10:07:05 +02001173 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001174 *err = -EAGAIN;
1175 if (x) {
1176 xfrm_state_put(x);
1177 x = NULL;
1178 }
1179 }
1180
1181 return x;
1182}
1183
1184struct xfrm_state *
1185xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1186 xfrm_address_t *daddr, xfrm_address_t *saddr,
1187 unsigned short family, u8 mode, u8 proto, u32 reqid)
1188{
1189 unsigned int h;
1190 struct xfrm_state *rx = NULL, *x = NULL;
1191
1192 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1193 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1194 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1195 if (x->props.family == family &&
1196 x->props.reqid == reqid &&
1197 (mark & x->mark.m) == x->mark.v &&
1198 x->if_id == if_id &&
1199 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1200 xfrm_state_addr_check(x, daddr, saddr, family) &&
1201 mode == x->props.mode &&
1202 proto == x->id.proto &&
1203 x->km.state == XFRM_STATE_VALID) {
1204 rx = x;
1205 break;
1206 }
1207 }
1208
1209 if (rx)
1210 xfrm_state_hold(rx);
1211 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1212
1213
1214 return rx;
1215}
1216EXPORT_SYMBOL(xfrm_stateonly_find);
1217
1218struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1219 unsigned short family)
1220{
1221 struct xfrm_state *x;
1222 struct xfrm_state_walk *w;
1223
1224 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1225 list_for_each_entry(w, &net->xfrm.state_all, all) {
1226 x = container_of(w, struct xfrm_state, km);
1227 if (x->props.family != family ||
1228 x->id.spi != spi)
1229 continue;
1230
1231 xfrm_state_hold(x);
1232 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1233 return x;
1234 }
1235 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1236 return NULL;
1237}
1238EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1239
1240static void __xfrm_state_insert(struct xfrm_state *x)
1241{
1242 struct net *net = xs_net(x);
1243 unsigned int h;
1244
1245 list_add(&x->km.all, &net->xfrm.state_all);
1246
1247 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1248 x->props.reqid, x->props.family);
1249 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1250
1251 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1252 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1253
1254 if (x->id.spi) {
1255 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1256 x->props.family);
1257
1258 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1259 }
1260
David Brazdil0f672f62019-12-10 10:32:29 +00001261 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001262 if (x->replay_maxage)
1263 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1264
1265 net->xfrm.state_num++;
1266
1267 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1268}
1269
1270/* net->xfrm.xfrm_state_lock is held */
1271static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1272{
1273 struct net *net = xs_net(xnew);
1274 unsigned short family = xnew->props.family;
1275 u32 reqid = xnew->props.reqid;
1276 struct xfrm_state *x;
1277 unsigned int h;
1278 u32 mark = xnew->mark.v & xnew->mark.m;
1279 u32 if_id = xnew->if_id;
1280
1281 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1282 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1283 if (x->props.family == family &&
1284 x->props.reqid == reqid &&
1285 x->if_id == if_id &&
1286 (mark & x->mark.m) == x->mark.v &&
1287 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1288 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1289 x->genid++;
1290 }
1291}
1292
1293void xfrm_state_insert(struct xfrm_state *x)
1294{
1295 struct net *net = xs_net(x);
1296
1297 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1298 __xfrm_state_bump_genids(x);
1299 __xfrm_state_insert(x);
1300 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1301}
1302EXPORT_SYMBOL(xfrm_state_insert);
1303
1304/* net->xfrm.xfrm_state_lock is held */
1305static struct xfrm_state *__find_acq_core(struct net *net,
1306 const struct xfrm_mark *m,
1307 unsigned short family, u8 mode,
1308 u32 reqid, u32 if_id, u8 proto,
1309 const xfrm_address_t *daddr,
1310 const xfrm_address_t *saddr,
1311 int create)
1312{
1313 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1314 struct xfrm_state *x;
1315 u32 mark = m->v & m->m;
1316
1317 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1318 if (x->props.reqid != reqid ||
1319 x->props.mode != mode ||
1320 x->props.family != family ||
1321 x->km.state != XFRM_STATE_ACQ ||
1322 x->id.spi != 0 ||
1323 x->id.proto != proto ||
1324 (mark & x->mark.m) != x->mark.v ||
1325 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1326 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1327 continue;
1328
1329 xfrm_state_hold(x);
1330 return x;
1331 }
1332
1333 if (!create)
1334 return NULL;
1335
1336 x = xfrm_state_alloc(net);
1337 if (likely(x)) {
1338 switch (family) {
1339 case AF_INET:
1340 x->sel.daddr.a4 = daddr->a4;
1341 x->sel.saddr.a4 = saddr->a4;
1342 x->sel.prefixlen_d = 32;
1343 x->sel.prefixlen_s = 32;
1344 x->props.saddr.a4 = saddr->a4;
1345 x->id.daddr.a4 = daddr->a4;
1346 break;
1347
1348 case AF_INET6:
1349 x->sel.daddr.in6 = daddr->in6;
1350 x->sel.saddr.in6 = saddr->in6;
1351 x->sel.prefixlen_d = 128;
1352 x->sel.prefixlen_s = 128;
1353 x->props.saddr.in6 = saddr->in6;
1354 x->id.daddr.in6 = daddr->in6;
1355 break;
1356 }
1357
1358 x->km.state = XFRM_STATE_ACQ;
1359 x->id.proto = proto;
1360 x->props.family = family;
1361 x->props.mode = mode;
1362 x->props.reqid = reqid;
1363 x->if_id = if_id;
1364 x->mark.v = m->v;
1365 x->mark.m = m->m;
1366 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1367 xfrm_state_hold(x);
David Brazdil0f672f62019-12-10 10:32:29 +00001368 hrtimer_start(&x->mtimer,
1369 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1370 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001371 list_add(&x->km.all, &net->xfrm.state_all);
1372 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1373 h = xfrm_src_hash(net, daddr, saddr, family);
1374 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1375
1376 net->xfrm.state_num++;
1377
1378 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1379 }
1380
1381 return x;
1382}
1383
1384static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1385
1386int xfrm_state_add(struct xfrm_state *x)
1387{
1388 struct net *net = xs_net(x);
1389 struct xfrm_state *x1, *to_put;
1390 int family;
1391 int err;
1392 u32 mark = x->mark.v & x->mark.m;
1393 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1394
1395 family = x->props.family;
1396
1397 to_put = NULL;
1398
1399 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1400
1401 x1 = __xfrm_state_locate(x, use_spi, family);
1402 if (x1) {
1403 to_put = x1;
1404 x1 = NULL;
1405 err = -EEXIST;
1406 goto out;
1407 }
1408
1409 if (use_spi && x->km.seq) {
1410 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1411 if (x1 && ((x1->id.proto != x->id.proto) ||
1412 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1413 to_put = x1;
1414 x1 = NULL;
1415 }
1416 }
1417
1418 if (use_spi && !x1)
1419 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1420 x->props.reqid, x->if_id, x->id.proto,
1421 &x->id.daddr, &x->props.saddr, 0);
1422
1423 __xfrm_state_bump_genids(x);
1424 __xfrm_state_insert(x);
1425 err = 0;
1426
1427out:
1428 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1429
1430 if (x1) {
1431 xfrm_state_delete(x1);
1432 xfrm_state_put(x1);
1433 }
1434
1435 if (to_put)
1436 xfrm_state_put(to_put);
1437
1438 return err;
1439}
1440EXPORT_SYMBOL(xfrm_state_add);
1441
1442#ifdef CONFIG_XFRM_MIGRATE
Olivier Deprez0e641232021-09-23 10:07:05 +02001443static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1444{
1445 struct xfrm_user_sec_ctx *uctx;
1446 int size = sizeof(*uctx) + security->ctx_len;
1447 int err;
1448
1449 uctx = kmalloc(size, GFP_KERNEL);
1450 if (!uctx)
1451 return -ENOMEM;
1452
1453 uctx->exttype = XFRMA_SEC_CTX;
1454 uctx->len = size;
1455 uctx->ctx_doi = security->ctx_doi;
1456 uctx->ctx_alg = security->ctx_alg;
1457 uctx->ctx_len = security->ctx_len;
1458 memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1459 err = security_xfrm_state_alloc(x, uctx);
1460 kfree(uctx);
1461 if (err)
1462 return err;
1463
1464 return 0;
1465}
1466
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001467static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1468 struct xfrm_encap_tmpl *encap)
1469{
1470 struct net *net = xs_net(orig);
1471 struct xfrm_state *x = xfrm_state_alloc(net);
1472 if (!x)
1473 goto out;
1474
1475 memcpy(&x->id, &orig->id, sizeof(x->id));
1476 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1477 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1478 x->props.mode = orig->props.mode;
1479 x->props.replay_window = orig->props.replay_window;
1480 x->props.reqid = orig->props.reqid;
1481 x->props.family = orig->props.family;
1482 x->props.saddr = orig->props.saddr;
1483
1484 if (orig->aalg) {
1485 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1486 if (!x->aalg)
1487 goto error;
1488 }
1489 x->props.aalgo = orig->props.aalgo;
1490
1491 if (orig->aead) {
1492 x->aead = xfrm_algo_aead_clone(orig->aead);
1493 x->geniv = orig->geniv;
1494 if (!x->aead)
1495 goto error;
1496 }
1497 if (orig->ealg) {
1498 x->ealg = xfrm_algo_clone(orig->ealg);
1499 if (!x->ealg)
1500 goto error;
1501 }
1502 x->props.ealgo = orig->props.ealgo;
1503
1504 if (orig->calg) {
1505 x->calg = xfrm_algo_clone(orig->calg);
1506 if (!x->calg)
1507 goto error;
1508 }
1509 x->props.calgo = orig->props.calgo;
1510
1511 if (encap || orig->encap) {
1512 if (encap)
1513 x->encap = kmemdup(encap, sizeof(*x->encap),
1514 GFP_KERNEL);
1515 else
1516 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1517 GFP_KERNEL);
1518
1519 if (!x->encap)
1520 goto error;
1521 }
1522
Olivier Deprez0e641232021-09-23 10:07:05 +02001523 if (orig->security)
1524 if (clone_security(x, orig->security))
1525 goto error;
1526
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001527 if (orig->coaddr) {
1528 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1529 GFP_KERNEL);
1530 if (!x->coaddr)
1531 goto error;
1532 }
1533
1534 if (orig->replay_esn) {
1535 if (xfrm_replay_clone(x, orig))
1536 goto error;
1537 }
1538
1539 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
Olivier Deprez0e641232021-09-23 10:07:05 +02001540 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001541
1542 if (xfrm_init_state(x) < 0)
1543 goto error;
1544
1545 x->props.flags = orig->props.flags;
1546 x->props.extra_flags = orig->props.extra_flags;
1547
1548 x->if_id = orig->if_id;
1549 x->tfcpad = orig->tfcpad;
1550 x->replay_maxdiff = orig->replay_maxdiff;
1551 x->replay_maxage = orig->replay_maxage;
Olivier Deprez0e641232021-09-23 10:07:05 +02001552 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001553 x->km.state = orig->km.state;
1554 x->km.seq = orig->km.seq;
1555 x->replay = orig->replay;
1556 x->preplay = orig->preplay;
1557
1558 return x;
1559
1560 error:
1561 xfrm_state_put(x);
1562out:
1563 return NULL;
1564}
1565
1566struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1567{
1568 unsigned int h;
1569 struct xfrm_state *x = NULL;
1570
1571 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1572
1573 if (m->reqid) {
1574 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1575 m->reqid, m->old_family);
1576 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1577 if (x->props.mode != m->mode ||
1578 x->id.proto != m->proto)
1579 continue;
1580 if (m->reqid && x->props.reqid != m->reqid)
1581 continue;
1582 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1583 m->old_family) ||
1584 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1585 m->old_family))
1586 continue;
1587 xfrm_state_hold(x);
1588 break;
1589 }
1590 } else {
1591 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1592 m->old_family);
1593 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1594 if (x->props.mode != m->mode ||
1595 x->id.proto != m->proto)
1596 continue;
1597 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1598 m->old_family) ||
1599 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1600 m->old_family))
1601 continue;
1602 xfrm_state_hold(x);
1603 break;
1604 }
1605 }
1606
1607 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1608
1609 return x;
1610}
1611EXPORT_SYMBOL(xfrm_migrate_state_find);
1612
1613struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1614 struct xfrm_migrate *m,
1615 struct xfrm_encap_tmpl *encap)
1616{
1617 struct xfrm_state *xc;
1618
1619 xc = xfrm_state_clone(x, encap);
1620 if (!xc)
1621 return NULL;
1622
1623 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1624 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1625
1626 /* add state */
1627 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1628 /* a care is needed when the destination address of the
1629 state is to be updated as it is a part of triplet */
1630 xfrm_state_insert(xc);
1631 } else {
1632 if (xfrm_state_add(xc) < 0)
1633 goto error;
1634 }
1635
1636 return xc;
1637error:
1638 xfrm_state_put(xc);
1639 return NULL;
1640}
1641EXPORT_SYMBOL(xfrm_state_migrate);
1642#endif
1643
1644int xfrm_state_update(struct xfrm_state *x)
1645{
1646 struct xfrm_state *x1, *to_put;
1647 int err;
1648 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1649 struct net *net = xs_net(x);
1650
1651 to_put = NULL;
1652
1653 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1654 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1655
1656 err = -ESRCH;
1657 if (!x1)
1658 goto out;
1659
1660 if (xfrm_state_kern(x1)) {
1661 to_put = x1;
1662 err = -EEXIST;
1663 goto out;
1664 }
1665
1666 if (x1->km.state == XFRM_STATE_ACQ) {
1667 __xfrm_state_insert(x);
1668 x = NULL;
1669 }
1670 err = 0;
1671
1672out:
1673 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1674
1675 if (to_put)
1676 xfrm_state_put(to_put);
1677
1678 if (err)
1679 return err;
1680
1681 if (!x) {
1682 xfrm_state_delete(x1);
1683 xfrm_state_put(x1);
1684 return 0;
1685 }
1686
1687 err = -EINVAL;
1688 spin_lock_bh(&x1->lock);
1689 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1690 if (x->encap && x1->encap &&
1691 x->encap->encap_type == x1->encap->encap_type)
1692 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1693 else if (x->encap || x1->encap)
1694 goto fail;
1695
1696 if (x->coaddr && x1->coaddr) {
1697 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1698 }
1699 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1700 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1701 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1702 x1->km.dying = 0;
1703
David Brazdil0f672f62019-12-10 10:32:29 +00001704 hrtimer_start(&x1->mtimer, ktime_set(1, 0),
1705 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001706 if (x1->curlft.use_time)
1707 xfrm_state_check_expire(x1);
1708
1709 if (x->props.smark.m || x->props.smark.v || x->if_id) {
1710 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1711
1712 if (x->props.smark.m || x->props.smark.v)
1713 x1->props.smark = x->props.smark;
1714
1715 if (x->if_id)
1716 x1->if_id = x->if_id;
1717
1718 __xfrm_state_bump_genids(x1);
1719 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1720 }
1721
1722 err = 0;
1723 x->km.state = XFRM_STATE_DEAD;
1724 __xfrm_state_put(x);
1725 }
1726
1727fail:
1728 spin_unlock_bh(&x1->lock);
1729
1730 xfrm_state_put(x1);
1731
1732 return err;
1733}
1734EXPORT_SYMBOL(xfrm_state_update);
1735
1736int xfrm_state_check_expire(struct xfrm_state *x)
1737{
1738 if (!x->curlft.use_time)
1739 x->curlft.use_time = ktime_get_real_seconds();
1740
1741 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1742 x->curlft.packets >= x->lft.hard_packet_limit) {
1743 x->km.state = XFRM_STATE_EXPIRED;
David Brazdil0f672f62019-12-10 10:32:29 +00001744 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001745 return -EINVAL;
1746 }
1747
1748 if (!x->km.dying &&
1749 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1750 x->curlft.packets >= x->lft.soft_packet_limit)) {
1751 x->km.dying = 1;
1752 km_state_expired(x, 0, 0);
1753 }
1754 return 0;
1755}
1756EXPORT_SYMBOL(xfrm_state_check_expire);
1757
1758struct xfrm_state *
1759xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1760 u8 proto, unsigned short family)
1761{
1762 struct xfrm_state *x;
1763
1764 rcu_read_lock();
1765 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1766 rcu_read_unlock();
1767 return x;
1768}
1769EXPORT_SYMBOL(xfrm_state_lookup);
1770
1771struct xfrm_state *
1772xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1773 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1774 u8 proto, unsigned short family)
1775{
1776 struct xfrm_state *x;
1777
1778 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1779 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1780 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1781 return x;
1782}
1783EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1784
1785struct xfrm_state *
1786xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1787 u32 if_id, u8 proto, const xfrm_address_t *daddr,
1788 const xfrm_address_t *saddr, int create, unsigned short family)
1789{
1790 struct xfrm_state *x;
1791
1792 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1793 x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1794 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1795
1796 return x;
1797}
1798EXPORT_SYMBOL(xfrm_find_acq);
1799
1800#ifdef CONFIG_XFRM_SUB_POLICY
David Brazdil0f672f62019-12-10 10:32:29 +00001801#if IS_ENABLED(CONFIG_IPV6)
1802/* distribution counting sort function for xfrm_state and xfrm_tmpl */
1803static void
1804__xfrm6_sort(void **dst, void **src, int n,
1805 int (*cmp)(const void *p), int maxclass)
1806{
1807 int count[XFRM_MAX_DEPTH] = { };
1808 int class[XFRM_MAX_DEPTH];
1809 int i;
1810
1811 for (i = 0; i < n; i++) {
1812 int c = cmp(src[i]);
1813
1814 class[i] = c;
1815 count[c]++;
1816 }
1817
1818 for (i = 2; i < maxclass; i++)
1819 count[i] += count[i - 1];
1820
1821 for (i = 0; i < n; i++) {
1822 dst[count[class[i] - 1]++] = src[i];
1823 src[i] = NULL;
1824 }
1825}
1826
1827/* Rule for xfrm_state:
1828 *
1829 * rule 1: select IPsec transport except AH
1830 * rule 2: select MIPv6 RO or inbound trigger
1831 * rule 3: select IPsec transport AH
1832 * rule 4: select IPsec tunnel
1833 * rule 5: others
1834 */
1835static int __xfrm6_state_sort_cmp(const void *p)
1836{
1837 const struct xfrm_state *v = p;
1838
1839 switch (v->props.mode) {
1840 case XFRM_MODE_TRANSPORT:
1841 if (v->id.proto != IPPROTO_AH)
1842 return 1;
1843 else
1844 return 3;
1845#if IS_ENABLED(CONFIG_IPV6_MIP6)
1846 case XFRM_MODE_ROUTEOPTIMIZATION:
1847 case XFRM_MODE_IN_TRIGGER:
1848 return 2;
1849#endif
1850 case XFRM_MODE_TUNNEL:
1851 case XFRM_MODE_BEET:
1852 return 4;
1853 }
1854 return 5;
1855}
1856
1857/* Rule for xfrm_tmpl:
1858 *
1859 * rule 1: select IPsec transport
1860 * rule 2: select MIPv6 RO or inbound trigger
1861 * rule 3: select IPsec tunnel
1862 * rule 4: others
1863 */
1864static int __xfrm6_tmpl_sort_cmp(const void *p)
1865{
1866 const struct xfrm_tmpl *v = p;
1867
1868 switch (v->mode) {
1869 case XFRM_MODE_TRANSPORT:
1870 return 1;
1871#if IS_ENABLED(CONFIG_IPV6_MIP6)
1872 case XFRM_MODE_ROUTEOPTIMIZATION:
1873 case XFRM_MODE_IN_TRIGGER:
1874 return 2;
1875#endif
1876 case XFRM_MODE_TUNNEL:
1877 case XFRM_MODE_BEET:
1878 return 3;
1879 }
1880 return 4;
1881}
1882#else
1883static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
1884static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
1885
1886static inline void
1887__xfrm6_sort(void **dst, void **src, int n,
1888 int (*cmp)(const void *p), int maxclass)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001889{
1890 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001891
David Brazdil0f672f62019-12-10 10:32:29 +00001892 for (i = 0; i < n; i++)
1893 dst[i] = src[i];
1894}
1895#endif /* CONFIG_IPV6 */
1896
1897void
1898xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1899 unsigned short family)
1900{
1901 int i;
1902
1903 if (family == AF_INET6)
1904 __xfrm6_sort((void **)dst, (void **)src, n,
1905 __xfrm6_tmpl_sort_cmp, 5);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001906 else
1907 for (i = 0; i < n; i++)
1908 dst[i] = src[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001909}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001910
David Brazdil0f672f62019-12-10 10:32:29 +00001911void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001912xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1913 unsigned short family)
1914{
1915 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001916
David Brazdil0f672f62019-12-10 10:32:29 +00001917 if (family == AF_INET6)
1918 __xfrm6_sort((void **)dst, (void **)src, n,
1919 __xfrm6_state_sort_cmp, 6);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001920 else
1921 for (i = 0; i < n; i++)
1922 dst[i] = src[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001923}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001924#endif
1925
1926/* Silly enough, but I'm lazy to build resolution list */
1927
1928static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1929{
1930 int i;
1931
1932 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1933 struct xfrm_state *x;
1934
1935 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1936 if (x->km.seq == seq &&
1937 (mark & x->mark.m) == x->mark.v &&
1938 x->km.state == XFRM_STATE_ACQ) {
1939 xfrm_state_hold(x);
1940 return x;
1941 }
1942 }
1943 }
1944 return NULL;
1945}
1946
1947struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1948{
1949 struct xfrm_state *x;
1950
1951 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1952 x = __xfrm_find_acq_byseq(net, mark, seq);
1953 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1954 return x;
1955}
1956EXPORT_SYMBOL(xfrm_find_acq_byseq);
1957
1958u32 xfrm_get_acqseq(void)
1959{
1960 u32 res;
1961 static atomic_t acqseq;
1962
1963 do {
1964 res = atomic_inc_return(&acqseq);
1965 } while (!res);
1966
1967 return res;
1968}
1969EXPORT_SYMBOL(xfrm_get_acqseq);
1970
1971int verify_spi_info(u8 proto, u32 min, u32 max)
1972{
1973 switch (proto) {
1974 case IPPROTO_AH:
1975 case IPPROTO_ESP:
1976 break;
1977
1978 case IPPROTO_COMP:
1979 /* IPCOMP spi is 16-bits. */
1980 if (max >= 0x10000)
1981 return -EINVAL;
1982 break;
1983
1984 default:
1985 return -EINVAL;
1986 }
1987
1988 if (min > max)
1989 return -EINVAL;
1990
1991 return 0;
1992}
1993EXPORT_SYMBOL(verify_spi_info);
1994
1995int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1996{
1997 struct net *net = xs_net(x);
1998 unsigned int h;
1999 struct xfrm_state *x0;
2000 int err = -ENOENT;
2001 __be32 minspi = htonl(low);
2002 __be32 maxspi = htonl(high);
Olivier Deprez0e641232021-09-23 10:07:05 +02002003 __be32 newspi = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002004 u32 mark = x->mark.v & x->mark.m;
2005
2006 spin_lock_bh(&x->lock);
2007 if (x->km.state == XFRM_STATE_DEAD)
2008 goto unlock;
2009
2010 err = 0;
2011 if (x->id.spi)
2012 goto unlock;
2013
2014 err = -ENOENT;
2015
2016 if (minspi == maxspi) {
2017 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
2018 if (x0) {
2019 xfrm_state_put(x0);
2020 goto unlock;
2021 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002022 newspi = minspi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002023 } else {
2024 u32 spi = 0;
2025 for (h = 0; h < high-low+1; h++) {
2026 spi = low + prandom_u32()%(high-low+1);
2027 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
2028 if (x0 == NULL) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002029 newspi = htonl(spi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002030 break;
2031 }
2032 xfrm_state_put(x0);
2033 }
2034 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002035 if (newspi) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002036 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002037 x->id.spi = newspi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002038 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
2039 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
2040 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2041
2042 err = 0;
2043 }
2044
2045unlock:
2046 spin_unlock_bh(&x->lock);
2047
2048 return err;
2049}
2050EXPORT_SYMBOL(xfrm_alloc_spi);
2051
2052static bool __xfrm_state_filter_match(struct xfrm_state *x,
2053 struct xfrm_address_filter *filter)
2054{
2055 if (filter) {
2056 if ((filter->family == AF_INET ||
2057 filter->family == AF_INET6) &&
2058 x->props.family != filter->family)
2059 return false;
2060
2061 return addr_match(&x->props.saddr, &filter->saddr,
2062 filter->splen) &&
2063 addr_match(&x->id.daddr, &filter->daddr,
2064 filter->dplen);
2065 }
2066 return true;
2067}
2068
2069int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
2070 int (*func)(struct xfrm_state *, int, void*),
2071 void *data)
2072{
2073 struct xfrm_state *state;
2074 struct xfrm_state_walk *x;
2075 int err = 0;
2076
2077 if (walk->seq != 0 && list_empty(&walk->all))
2078 return 0;
2079
2080 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2081 if (list_empty(&walk->all))
2082 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
2083 else
2084 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
2085 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
2086 if (x->state == XFRM_STATE_DEAD)
2087 continue;
2088 state = container_of(x, struct xfrm_state, km);
2089 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
2090 continue;
2091 if (!__xfrm_state_filter_match(state, walk->filter))
2092 continue;
2093 err = func(state, walk->seq, data);
2094 if (err) {
2095 list_move_tail(&walk->all, &x->all);
2096 goto out;
2097 }
2098 walk->seq++;
2099 }
2100 if (walk->seq == 0) {
2101 err = -ENOENT;
2102 goto out;
2103 }
2104 list_del_init(&walk->all);
2105out:
2106 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2107 return err;
2108}
2109EXPORT_SYMBOL(xfrm_state_walk);
2110
2111void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
2112 struct xfrm_address_filter *filter)
2113{
2114 INIT_LIST_HEAD(&walk->all);
2115 walk->proto = proto;
2116 walk->state = XFRM_STATE_DEAD;
2117 walk->seq = 0;
2118 walk->filter = filter;
2119}
2120EXPORT_SYMBOL(xfrm_state_walk_init);
2121
2122void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
2123{
2124 kfree(walk->filter);
2125
2126 if (list_empty(&walk->all))
2127 return;
2128
2129 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2130 list_del(&walk->all);
2131 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2132}
2133EXPORT_SYMBOL(xfrm_state_walk_done);
2134
2135static void xfrm_replay_timer_handler(struct timer_list *t)
2136{
2137 struct xfrm_state *x = from_timer(x, t, rtimer);
2138
2139 spin_lock(&x->lock);
2140
2141 if (x->km.state == XFRM_STATE_VALID) {
2142 if (xfrm_aevent_is_on(xs_net(x)))
2143 x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
2144 else
2145 x->xflags |= XFRM_TIME_DEFER;
2146 }
2147
2148 spin_unlock(&x->lock);
2149}
2150
2151static LIST_HEAD(xfrm_km_list);
2152
2153void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2154{
2155 struct xfrm_mgr *km;
2156
2157 rcu_read_lock();
2158 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2159 if (km->notify_policy)
2160 km->notify_policy(xp, dir, c);
2161 rcu_read_unlock();
2162}
2163
2164void km_state_notify(struct xfrm_state *x, const struct km_event *c)
2165{
2166 struct xfrm_mgr *km;
2167 rcu_read_lock();
2168 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2169 if (km->notify)
2170 km->notify(x, c);
2171 rcu_read_unlock();
2172}
2173
2174EXPORT_SYMBOL(km_policy_notify);
2175EXPORT_SYMBOL(km_state_notify);
2176
2177void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2178{
2179 struct km_event c;
2180
2181 c.data.hard = hard;
2182 c.portid = portid;
2183 c.event = XFRM_MSG_EXPIRE;
2184 km_state_notify(x, &c);
2185}
2186
2187EXPORT_SYMBOL(km_state_expired);
2188/*
2189 * We send to all registered managers regardless of failure
2190 * We are happy with one success
2191*/
2192int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2193{
2194 int err = -EINVAL, acqret;
2195 struct xfrm_mgr *km;
2196
2197 rcu_read_lock();
2198 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2199 acqret = km->acquire(x, t, pol);
2200 if (!acqret)
2201 err = acqret;
2202 }
2203 rcu_read_unlock();
2204 return err;
2205}
2206EXPORT_SYMBOL(km_query);
2207
2208int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2209{
2210 int err = -EINVAL;
2211 struct xfrm_mgr *km;
2212
2213 rcu_read_lock();
2214 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2215 if (km->new_mapping)
2216 err = km->new_mapping(x, ipaddr, sport);
2217 if (!err)
2218 break;
2219 }
2220 rcu_read_unlock();
2221 return err;
2222}
2223EXPORT_SYMBOL(km_new_mapping);
2224
2225void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2226{
2227 struct km_event c;
2228
2229 c.data.hard = hard;
2230 c.portid = portid;
2231 c.event = XFRM_MSG_POLEXPIRE;
2232 km_policy_notify(pol, dir, &c);
2233}
2234EXPORT_SYMBOL(km_policy_expired);
2235
2236#ifdef CONFIG_XFRM_MIGRATE
2237int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2238 const struct xfrm_migrate *m, int num_migrate,
2239 const struct xfrm_kmaddress *k,
2240 const struct xfrm_encap_tmpl *encap)
2241{
2242 int err = -EINVAL;
2243 int ret;
2244 struct xfrm_mgr *km;
2245
2246 rcu_read_lock();
2247 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2248 if (km->migrate) {
2249 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2250 encap);
2251 if (!ret)
2252 err = ret;
2253 }
2254 }
2255 rcu_read_unlock();
2256 return err;
2257}
2258EXPORT_SYMBOL(km_migrate);
2259#endif
2260
2261int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2262{
2263 int err = -EINVAL;
2264 int ret;
2265 struct xfrm_mgr *km;
2266
2267 rcu_read_lock();
2268 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2269 if (km->report) {
2270 ret = km->report(net, proto, sel, addr);
2271 if (!ret)
2272 err = ret;
2273 }
2274 }
2275 rcu_read_unlock();
2276 return err;
2277}
2278EXPORT_SYMBOL(km_report);
2279
David Brazdil0f672f62019-12-10 10:32:29 +00002280static bool km_is_alive(const struct km_event *c)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002281{
2282 struct xfrm_mgr *km;
2283 bool is_alive = false;
2284
2285 rcu_read_lock();
2286 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2287 if (km->is_alive && km->is_alive(c)) {
2288 is_alive = true;
2289 break;
2290 }
2291 }
2292 rcu_read_unlock();
2293
2294 return is_alive;
2295}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002296
2297int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2298{
2299 int err;
2300 u8 *data;
2301 struct xfrm_mgr *km;
2302 struct xfrm_policy *pol = NULL;
2303
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002304 if (in_compat_syscall())
2305 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002306
2307 if (!optval && !optlen) {
2308 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2309 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2310 __sk_dst_reset(sk);
2311 return 0;
2312 }
2313
2314 if (optlen <= 0 || optlen > PAGE_SIZE)
2315 return -EMSGSIZE;
2316
2317 data = memdup_user(optval, optlen);
2318 if (IS_ERR(data))
2319 return PTR_ERR(data);
2320
2321 err = -EINVAL;
2322 rcu_read_lock();
2323 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2324 pol = km->compile_policy(sk, optname, data,
2325 optlen, &err);
2326 if (err >= 0)
2327 break;
2328 }
2329 rcu_read_unlock();
2330
2331 if (err >= 0) {
2332 xfrm_sk_policy_insert(sk, err, pol);
2333 xfrm_pol_put(pol);
2334 __sk_dst_reset(sk);
2335 err = 0;
2336 }
2337
2338 kfree(data);
2339 return err;
2340}
2341EXPORT_SYMBOL(xfrm_user_policy);
2342
2343static DEFINE_SPINLOCK(xfrm_km_lock);
2344
2345int xfrm_register_km(struct xfrm_mgr *km)
2346{
2347 spin_lock_bh(&xfrm_km_lock);
2348 list_add_tail_rcu(&km->list, &xfrm_km_list);
2349 spin_unlock_bh(&xfrm_km_lock);
2350 return 0;
2351}
2352EXPORT_SYMBOL(xfrm_register_km);
2353
2354int xfrm_unregister_km(struct xfrm_mgr *km)
2355{
2356 spin_lock_bh(&xfrm_km_lock);
2357 list_del_rcu(&km->list);
2358 spin_unlock_bh(&xfrm_km_lock);
2359 synchronize_rcu();
2360 return 0;
2361}
2362EXPORT_SYMBOL(xfrm_unregister_km);
2363
2364int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2365{
2366 int err = 0;
2367
2368 if (WARN_ON(afinfo->family >= NPROTO))
2369 return -EAFNOSUPPORT;
2370
2371 spin_lock_bh(&xfrm_state_afinfo_lock);
2372 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2373 err = -EEXIST;
2374 else
2375 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2376 spin_unlock_bh(&xfrm_state_afinfo_lock);
2377 return err;
2378}
2379EXPORT_SYMBOL(xfrm_state_register_afinfo);
2380
2381int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2382{
2383 int err = 0, family = afinfo->family;
2384
2385 if (WARN_ON(family >= NPROTO))
2386 return -EAFNOSUPPORT;
2387
2388 spin_lock_bh(&xfrm_state_afinfo_lock);
2389 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2390 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2391 err = -EINVAL;
2392 else
2393 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2394 }
2395 spin_unlock_bh(&xfrm_state_afinfo_lock);
2396 synchronize_rcu();
2397 return err;
2398}
2399EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2400
2401struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2402{
2403 if (unlikely(family >= NPROTO))
2404 return NULL;
2405
2406 return rcu_dereference(xfrm_state_afinfo[family]);
2407}
David Brazdil0f672f62019-12-10 10:32:29 +00002408EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002409
2410struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2411{
2412 struct xfrm_state_afinfo *afinfo;
2413 if (unlikely(family >= NPROTO))
2414 return NULL;
2415 rcu_read_lock();
2416 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2417 if (unlikely(!afinfo))
2418 rcu_read_unlock();
2419 return afinfo;
2420}
2421
2422void xfrm_flush_gc(void)
2423{
2424 flush_work(&xfrm_state_gc_work);
2425}
2426EXPORT_SYMBOL(xfrm_flush_gc);
2427
2428/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2429void xfrm_state_delete_tunnel(struct xfrm_state *x)
2430{
2431 if (x->tunnel) {
2432 struct xfrm_state *t = x->tunnel;
2433
2434 if (atomic_read(&t->tunnel_users) == 2)
2435 xfrm_state_delete(t);
2436 atomic_dec(&t->tunnel_users);
David Brazdil0f672f62019-12-10 10:32:29 +00002437 xfrm_state_put_sync(t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002438 x->tunnel = NULL;
2439 }
2440}
2441EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2442
Olivier Deprez0e641232021-09-23 10:07:05 +02002443u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002444{
2445 const struct xfrm_type *type = READ_ONCE(x->type);
David Brazdil0f672f62019-12-10 10:32:29 +00002446 struct crypto_aead *aead;
2447 u32 blksize, net_adj = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002448
David Brazdil0f672f62019-12-10 10:32:29 +00002449 if (x->km.state != XFRM_STATE_VALID ||
2450 !type || type->proto != IPPROTO_ESP)
2451 return mtu - x->props.header_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002452
David Brazdil0f672f62019-12-10 10:32:29 +00002453 aead = x->data;
2454 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
2455
2456 switch (x->props.mode) {
2457 case XFRM_MODE_TRANSPORT:
2458 case XFRM_MODE_BEET:
2459 if (x->props.family == AF_INET)
2460 net_adj = sizeof(struct iphdr);
2461 else if (x->props.family == AF_INET6)
2462 net_adj = sizeof(struct ipv6hdr);
2463 break;
2464 case XFRM_MODE_TUNNEL:
2465 break;
2466 default:
2467 WARN_ON_ONCE(1);
2468 break;
2469 }
2470
2471 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
2472 net_adj) & ~(blksize - 1)) + net_adj - 2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002473}
Olivier Deprez0e641232021-09-23 10:07:05 +02002474EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
2475
2476u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
2477{
2478 mtu = __xfrm_state_mtu(x, mtu);
2479
2480 if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
2481 return IPV6_MIN_MTU;
2482
2483 return mtu;
2484}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002485
2486int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2487{
David Brazdil0f672f62019-12-10 10:32:29 +00002488 const struct xfrm_mode *inner_mode;
2489 const struct xfrm_mode *outer_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002490 int family = x->props.family;
2491 int err;
2492
David Brazdil0f672f62019-12-10 10:32:29 +00002493 if (family == AF_INET &&
2494 xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)
2495 x->props.flags |= XFRM_STATE_NOPMTUDISC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002496
2497 err = -EPROTONOSUPPORT;
2498
2499 if (x->sel.family != AF_UNSPEC) {
2500 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2501 if (inner_mode == NULL)
2502 goto error;
2503
2504 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
David Brazdil0f672f62019-12-10 10:32:29 +00002505 family != x->sel.family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002506 goto error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002507
David Brazdil0f672f62019-12-10 10:32:29 +00002508 x->inner_mode = *inner_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002509 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00002510 const struct xfrm_mode *inner_mode_iaf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002511 int iafamily = AF_INET;
2512
2513 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2514 if (inner_mode == NULL)
2515 goto error;
2516
David Brazdil0f672f62019-12-10 10:32:29 +00002517 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002518 goto error;
David Brazdil0f672f62019-12-10 10:32:29 +00002519
2520 x->inner_mode = *inner_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002521
2522 if (x->props.family == AF_INET)
2523 iafamily = AF_INET6;
2524
2525 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2526 if (inner_mode_iaf) {
2527 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
David Brazdil0f672f62019-12-10 10:32:29 +00002528 x->inner_mode_iaf = *inner_mode_iaf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002529 }
2530 }
2531
2532 x->type = xfrm_get_type(x->id.proto, family);
2533 if (x->type == NULL)
2534 goto error;
2535
2536 x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2537
2538 err = x->type->init_state(x);
2539 if (err)
2540 goto error;
2541
David Brazdil0f672f62019-12-10 10:32:29 +00002542 outer_mode = xfrm_get_mode(x->props.mode, family);
2543 if (!outer_mode) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002544 err = -EPROTONOSUPPORT;
2545 goto error;
2546 }
2547
David Brazdil0f672f62019-12-10 10:32:29 +00002548 x->outer_mode = *outer_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002549 if (init_replay) {
2550 err = xfrm_init_replay(x);
2551 if (err)
2552 goto error;
2553 }
2554
2555error:
2556 return err;
2557}
2558
2559EXPORT_SYMBOL(__xfrm_init_state);
2560
2561int xfrm_init_state(struct xfrm_state *x)
2562{
2563 int err;
2564
2565 err = __xfrm_init_state(x, true, false);
2566 if (!err)
2567 x->km.state = XFRM_STATE_VALID;
2568
2569 return err;
2570}
2571
2572EXPORT_SYMBOL(xfrm_init_state);
2573
2574int __net_init xfrm_state_init(struct net *net)
2575{
2576 unsigned int sz;
2577
2578 if (net_eq(net, &init_net))
2579 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2580 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2581
2582 INIT_LIST_HEAD(&net->xfrm.state_all);
2583
2584 sz = sizeof(struct hlist_head) * 8;
2585
2586 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2587 if (!net->xfrm.state_bydst)
2588 goto out_bydst;
2589 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2590 if (!net->xfrm.state_bysrc)
2591 goto out_bysrc;
2592 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2593 if (!net->xfrm.state_byspi)
2594 goto out_byspi;
2595 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2596
2597 net->xfrm.state_num = 0;
2598 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2599 spin_lock_init(&net->xfrm.xfrm_state_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002600 seqcount_init(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002601 return 0;
2602
2603out_byspi:
2604 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2605out_bysrc:
2606 xfrm_hash_free(net->xfrm.state_bydst, sz);
2607out_bydst:
2608 return -ENOMEM;
2609}
2610
2611void xfrm_state_fini(struct net *net)
2612{
2613 unsigned int sz;
2614
2615 flush_work(&net->xfrm.state_hash_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002616 flush_work(&xfrm_state_gc_work);
David Brazdil0f672f62019-12-10 10:32:29 +00002617 xfrm_state_flush(net, 0, false, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002618
2619 WARN_ON(!list_empty(&net->xfrm.state_all));
2620
2621 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2622 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2623 xfrm_hash_free(net->xfrm.state_byspi, sz);
2624 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2625 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2626 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2627 xfrm_hash_free(net->xfrm.state_bydst, sz);
2628}
2629
2630#ifdef CONFIG_AUDITSYSCALL
2631static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2632 struct audit_buffer *audit_buf)
2633{
2634 struct xfrm_sec_ctx *ctx = x->security;
2635 u32 spi = ntohl(x->id.spi);
2636
2637 if (ctx)
2638 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2639 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2640
2641 switch (x->props.family) {
2642 case AF_INET:
2643 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2644 &x->props.saddr.a4, &x->id.daddr.a4);
2645 break;
2646 case AF_INET6:
2647 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2648 x->props.saddr.a6, x->id.daddr.a6);
2649 break;
2650 }
2651
2652 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2653}
2654
2655static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2656 struct audit_buffer *audit_buf)
2657{
2658 const struct iphdr *iph4;
2659 const struct ipv6hdr *iph6;
2660
2661 switch (family) {
2662 case AF_INET:
2663 iph4 = ip_hdr(skb);
2664 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2665 &iph4->saddr, &iph4->daddr);
2666 break;
2667 case AF_INET6:
2668 iph6 = ipv6_hdr(skb);
2669 audit_log_format(audit_buf,
2670 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2671 &iph6->saddr, &iph6->daddr,
2672 iph6->flow_lbl[0] & 0x0f,
2673 iph6->flow_lbl[1],
2674 iph6->flow_lbl[2]);
2675 break;
2676 }
2677}
2678
2679void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2680{
2681 struct audit_buffer *audit_buf;
2682
2683 audit_buf = xfrm_audit_start("SAD-add");
2684 if (audit_buf == NULL)
2685 return;
2686 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2687 xfrm_audit_helper_sainfo(x, audit_buf);
2688 audit_log_format(audit_buf, " res=%u", result);
2689 audit_log_end(audit_buf);
2690}
2691EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2692
2693void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2694{
2695 struct audit_buffer *audit_buf;
2696
2697 audit_buf = xfrm_audit_start("SAD-delete");
2698 if (audit_buf == NULL)
2699 return;
2700 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2701 xfrm_audit_helper_sainfo(x, audit_buf);
2702 audit_log_format(audit_buf, " res=%u", result);
2703 audit_log_end(audit_buf);
2704}
2705EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2706
2707void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2708 struct sk_buff *skb)
2709{
2710 struct audit_buffer *audit_buf;
2711 u32 spi;
2712
2713 audit_buf = xfrm_audit_start("SA-replay-overflow");
2714 if (audit_buf == NULL)
2715 return;
2716 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2717 /* don't record the sequence number because it's inherent in this kind
2718 * of audit message */
2719 spi = ntohl(x->id.spi);
2720 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2721 audit_log_end(audit_buf);
2722}
2723EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2724
2725void xfrm_audit_state_replay(struct xfrm_state *x,
2726 struct sk_buff *skb, __be32 net_seq)
2727{
2728 struct audit_buffer *audit_buf;
2729 u32 spi;
2730
2731 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2732 if (audit_buf == NULL)
2733 return;
2734 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2735 spi = ntohl(x->id.spi);
2736 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2737 spi, spi, ntohl(net_seq));
2738 audit_log_end(audit_buf);
2739}
2740EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2741
2742void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2743{
2744 struct audit_buffer *audit_buf;
2745
2746 audit_buf = xfrm_audit_start("SA-notfound");
2747 if (audit_buf == NULL)
2748 return;
2749 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2750 audit_log_end(audit_buf);
2751}
2752EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2753
2754void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2755 __be32 net_spi, __be32 net_seq)
2756{
2757 struct audit_buffer *audit_buf;
2758 u32 spi;
2759
2760 audit_buf = xfrm_audit_start("SA-notfound");
2761 if (audit_buf == NULL)
2762 return;
2763 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2764 spi = ntohl(net_spi);
2765 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2766 spi, spi, ntohl(net_seq));
2767 audit_log_end(audit_buf);
2768}
2769EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2770
2771void xfrm_audit_state_icvfail(struct xfrm_state *x,
2772 struct sk_buff *skb, u8 proto)
2773{
2774 struct audit_buffer *audit_buf;
2775 __be32 net_spi;
2776 __be32 net_seq;
2777
2778 audit_buf = xfrm_audit_start("SA-icv-failure");
2779 if (audit_buf == NULL)
2780 return;
2781 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2782 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2783 u32 spi = ntohl(net_spi);
2784 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2785 spi, spi, ntohl(net_seq));
2786 }
2787 audit_log_end(audit_buf);
2788}
2789EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2790#endif /* CONFIG_AUDITSYSCALL */