blob: 1befc6db723b0ef286bd662f6b2e666e1b4a0197 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * xfrm_state.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * YOSHIFUJI Hideaki @USAGI
11 * Split up af-specific functions
12 * Derek Atkins <derek@ihtfp.com>
13 * Add UDP Encapsulation
14 *
15 */
16
17#include <linux/workqueue.h>
18#include <net/xfrm.h>
19#include <linux/pfkeyv2.h>
20#include <linux/ipsec.h>
21#include <linux/module.h>
22#include <linux/cache.h>
23#include <linux/audit.h>
24#include <linux/uaccess.h>
25#include <linux/ktime.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29
David Brazdil0f672f62019-12-10 10:32:29 +000030#include <crypto/aead.h>
31
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032#include "xfrm_hash.h"
33
34#define xfrm_state_deref_prot(table, net) \
35 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
36
37static void xfrm_state_gc_task(struct work_struct *work);
38
39/* Each xfrm_state may be linked to two tables:
40
41 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
42 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
43 destination/tunnel endpoint. (output)
44 */
45
46static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047static struct kmem_cache *xfrm_state_cache __ro_after_init;
48
49static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
50static HLIST_HEAD(xfrm_state_gc_list);
51
52static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
53{
54 return refcount_inc_not_zero(&x->refcnt);
55}
56
57static inline unsigned int xfrm_dst_hash(struct net *net,
58 const xfrm_address_t *daddr,
59 const xfrm_address_t *saddr,
60 u32 reqid,
61 unsigned short family)
62{
63 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
64}
65
66static inline unsigned int xfrm_src_hash(struct net *net,
67 const xfrm_address_t *daddr,
68 const xfrm_address_t *saddr,
69 unsigned short family)
70{
71 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
72}
73
74static inline unsigned int
75xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
76 __be32 spi, u8 proto, unsigned short family)
77{
78 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
79}
80
81static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
86{
87 struct hlist_node *tmp;
88 struct xfrm_state *x;
89
90 hlist_for_each_entry_safe(x, tmp, list, bydst) {
91 unsigned int h;
92
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
95 nhashmask);
96 hlist_add_head_rcu(&x->bydst, ndsttable + h);
97
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
99 x->props.family,
100 nhashmask);
101 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
102
103 if (x->id.spi) {
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
106 nhashmask);
107 hlist_add_head_rcu(&x->byspi, nspitable + h);
108 }
109 }
110}
111
112static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
113{
114 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
115}
116
117static void xfrm_hash_resize(struct work_struct *work)
118{
119 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
121 unsigned long nsize, osize;
122 unsigned int nhashmask, ohashmask;
123 int i;
124
125 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
126 ndst = xfrm_hash_alloc(nsize);
127 if (!ndst)
128 return;
129 nsrc = xfrm_hash_alloc(nsize);
130 if (!nsrc) {
131 xfrm_hash_free(ndst, nsize);
132 return;
133 }
134 nspi = xfrm_hash_alloc(nsize);
135 if (!nspi) {
136 xfrm_hash_free(ndst, nsize);
137 xfrm_hash_free(nsrc, nsize);
138 return;
139 }
140
141 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200142 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143
144 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
145 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
146 for (i = net->xfrm.state_hmask; i >= 0; i--)
147 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
148
149 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
150 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
151 ohashmask = net->xfrm.state_hmask;
152
153 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
154 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
155 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
156 net->xfrm.state_hmask = nhashmask;
157
Olivier Deprez0e641232021-09-23 10:07:05 +0200158 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
160
161 osize = (ohashmask + 1) * sizeof(struct hlist_head);
162
163 synchronize_rcu();
164
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
168}
169
170static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
171static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
172
173static DEFINE_SPINLOCK(xfrm_state_gc_lock);
174
175int __xfrm_state_delete(struct xfrm_state *x);
176
177int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
David Brazdil0f672f62019-12-10 10:32:29 +0000178static bool km_is_alive(const struct km_event *c);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
180
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
182{
183 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184 int err = 0;
185
David Brazdil0f672f62019-12-10 10:32:29 +0000186 if (!afinfo)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 return -EAFNOSUPPORT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188
David Brazdil0f672f62019-12-10 10:32:29 +0000189#define X(afi, T, name) do { \
190 WARN_ON((afi)->type_ ## name); \
191 (afi)->type_ ## name = (T); \
192 } while (0)
193
194 switch (type->proto) {
195 case IPPROTO_COMP:
196 X(afinfo, type, comp);
197 break;
198 case IPPROTO_AH:
199 X(afinfo, type, ah);
200 break;
201 case IPPROTO_ESP:
202 X(afinfo, type, esp);
203 break;
204 case IPPROTO_IPIP:
205 X(afinfo, type, ipip);
206 break;
207 case IPPROTO_DSTOPTS:
208 X(afinfo, type, dstopts);
209 break;
210 case IPPROTO_ROUTING:
211 X(afinfo, type, routing);
212 break;
213 case IPPROTO_IPV6:
214 X(afinfo, type, ipip6);
215 break;
216 default:
217 WARN_ON(1);
218 err = -EPROTONOSUPPORT;
219 break;
220 }
221#undef X
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 rcu_read_unlock();
223 return err;
224}
225EXPORT_SYMBOL(xfrm_register_type);
226
David Brazdil0f672f62019-12-10 10:32:29 +0000227void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000228{
229 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
231 if (unlikely(afinfo == NULL))
David Brazdil0f672f62019-12-10 10:32:29 +0000232 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233
David Brazdil0f672f62019-12-10 10:32:29 +0000234#define X(afi, T, name) do { \
235 WARN_ON((afi)->type_ ## name != (T)); \
236 (afi)->type_ ## name = NULL; \
237 } while (0)
238
239 switch (type->proto) {
240 case IPPROTO_COMP:
241 X(afinfo, type, comp);
242 break;
243 case IPPROTO_AH:
244 X(afinfo, type, ah);
245 break;
246 case IPPROTO_ESP:
247 X(afinfo, type, esp);
248 break;
249 case IPPROTO_IPIP:
250 X(afinfo, type, ipip);
251 break;
252 case IPPROTO_DSTOPTS:
253 X(afinfo, type, dstopts);
254 break;
255 case IPPROTO_ROUTING:
256 X(afinfo, type, routing);
257 break;
258 case IPPROTO_IPV6:
259 X(afinfo, type, ipip6);
260 break;
261 default:
262 WARN_ON(1);
263 break;
264 }
265#undef X
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267}
268EXPORT_SYMBOL(xfrm_unregister_type);
269
270static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
271{
David Brazdil0f672f62019-12-10 10:32:29 +0000272 const struct xfrm_type *type = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000273 struct xfrm_state_afinfo *afinfo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 int modload_attempted = 0;
275
276retry:
277 afinfo = xfrm_state_get_afinfo(family);
278 if (unlikely(afinfo == NULL))
279 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280
David Brazdil0f672f62019-12-10 10:32:29 +0000281 switch (proto) {
282 case IPPROTO_COMP:
283 type = afinfo->type_comp;
284 break;
285 case IPPROTO_AH:
286 type = afinfo->type_ah;
287 break;
288 case IPPROTO_ESP:
289 type = afinfo->type_esp;
290 break;
291 case IPPROTO_IPIP:
292 type = afinfo->type_ipip;
293 break;
294 case IPPROTO_DSTOPTS:
295 type = afinfo->type_dstopts;
296 break;
297 case IPPROTO_ROUTING:
298 type = afinfo->type_routing;
299 break;
300 case IPPROTO_IPV6:
301 type = afinfo->type_ipip6;
302 break;
303 default:
304 break;
305 }
306
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 if (unlikely(type && !try_module_get(type->owner)))
308 type = NULL;
309
310 rcu_read_unlock();
311
312 if (!type && !modload_attempted) {
313 request_module("xfrm-type-%d-%d", family, proto);
314 modload_attempted = 1;
315 goto retry;
316 }
317
318 return type;
319}
320
321static void xfrm_put_type(const struct xfrm_type *type)
322{
323 module_put(type->owner);
324}
325
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326int xfrm_register_type_offload(const struct xfrm_type_offload *type,
327 unsigned short family)
328{
329 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330 int err = 0;
331
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334
David Brazdil0f672f62019-12-10 10:32:29 +0000335 switch (type->proto) {
336 case IPPROTO_ESP:
337 WARN_ON(afinfo->type_offload_esp);
338 afinfo->type_offload_esp = type;
339 break;
340 default:
341 WARN_ON(1);
342 err = -EPROTONOSUPPORT;
343 break;
344 }
345
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346 rcu_read_unlock();
347 return err;
348}
349EXPORT_SYMBOL(xfrm_register_type_offload);
350
David Brazdil0f672f62019-12-10 10:32:29 +0000351void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
352 unsigned short family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000353{
354 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355
356 if (unlikely(afinfo == NULL))
David Brazdil0f672f62019-12-10 10:32:29 +0000357 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358
David Brazdil0f672f62019-12-10 10:32:29 +0000359 switch (type->proto) {
360 case IPPROTO_ESP:
361 WARN_ON(afinfo->type_offload_esp != type);
362 afinfo->type_offload_esp = NULL;
363 break;
364 default:
365 WARN_ON(1);
366 break;
367 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000369}
370EXPORT_SYMBOL(xfrm_unregister_type_offload);
371
372static const struct xfrm_type_offload *
373xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
374{
David Brazdil0f672f62019-12-10 10:32:29 +0000375 const struct xfrm_type_offload *type = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376 struct xfrm_state_afinfo *afinfo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377
378retry:
379 afinfo = xfrm_state_get_afinfo(family);
380 if (unlikely(afinfo == NULL))
381 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382
David Brazdil0f672f62019-12-10 10:32:29 +0000383 switch (proto) {
384 case IPPROTO_ESP:
385 type = afinfo->type_offload_esp;
386 break;
387 default:
388 break;
389 }
390
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 if ((type && !try_module_get(type->owner)))
392 type = NULL;
393
394 rcu_read_unlock();
395
396 if (!type && try_load) {
397 request_module("xfrm-offload-%d-%d", family, proto);
398 try_load = false;
399 goto retry;
400 }
401
402 return type;
403}
404
405static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
406{
407 module_put(type->owner);
408}
409
David Brazdil0f672f62019-12-10 10:32:29 +0000410static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
411 [XFRM_MODE_BEET] = {
412 .encap = XFRM_MODE_BEET,
413 .flags = XFRM_MODE_FLAG_TUNNEL,
414 .family = AF_INET,
415 },
416 [XFRM_MODE_TRANSPORT] = {
417 .encap = XFRM_MODE_TRANSPORT,
418 .family = AF_INET,
419 },
420 [XFRM_MODE_TUNNEL] = {
421 .encap = XFRM_MODE_TUNNEL,
422 .flags = XFRM_MODE_FLAG_TUNNEL,
423 .family = AF_INET,
424 },
425};
426
427static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
428 [XFRM_MODE_BEET] = {
429 .encap = XFRM_MODE_BEET,
430 .flags = XFRM_MODE_FLAG_TUNNEL,
431 .family = AF_INET6,
432 },
433 [XFRM_MODE_ROUTEOPTIMIZATION] = {
434 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
435 .family = AF_INET6,
436 },
437 [XFRM_MODE_TRANSPORT] = {
438 .encap = XFRM_MODE_TRANSPORT,
439 .family = AF_INET6,
440 },
441 [XFRM_MODE_TUNNEL] = {
442 .encap = XFRM_MODE_TUNNEL,
443 .flags = XFRM_MODE_FLAG_TUNNEL,
444 .family = AF_INET6,
445 },
446};
447
448static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449{
David Brazdil0f672f62019-12-10 10:32:29 +0000450 const struct xfrm_mode *mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451
452 if (unlikely(encap >= XFRM_MODE_MAX))
453 return NULL;
454
David Brazdil0f672f62019-12-10 10:32:29 +0000455 switch (family) {
456 case AF_INET:
457 mode = &xfrm4_mode_map[encap];
458 if (mode->family == family)
459 return mode;
460 break;
461 case AF_INET6:
462 mode = &xfrm6_mode_map[encap];
463 if (mode->family == family)
464 return mode;
465 break;
466 default:
467 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468 }
469
David Brazdil0f672f62019-12-10 10:32:29 +0000470 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471}
472
473void xfrm_state_free(struct xfrm_state *x)
474{
475 kmem_cache_free(xfrm_state_cache, x);
476}
477EXPORT_SYMBOL(xfrm_state_free);
478
David Brazdil0f672f62019-12-10 10:32:29 +0000479static void ___xfrm_state_destroy(struct xfrm_state *x)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480{
David Brazdil0f672f62019-12-10 10:32:29 +0000481 hrtimer_cancel(&x->mtimer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482 del_timer_sync(&x->rtimer);
483 kfree(x->aead);
484 kfree(x->aalg);
485 kfree(x->ealg);
486 kfree(x->calg);
487 kfree(x->encap);
488 kfree(x->coaddr);
489 kfree(x->replay_esn);
490 kfree(x->preplay_esn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491 if (x->type_offload)
492 xfrm_put_type_offload(x->type_offload);
493 if (x->type) {
494 x->type->destructor(x);
495 xfrm_put_type(x->type);
496 }
David Brazdil0f672f62019-12-10 10:32:29 +0000497 if (x->xfrag.page)
498 put_page(x->xfrag.page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499 xfrm_dev_state_free(x);
500 security_xfrm_state_free(x);
501 xfrm_state_free(x);
502}
503
504static void xfrm_state_gc_task(struct work_struct *work)
505{
506 struct xfrm_state *x;
507 struct hlist_node *tmp;
508 struct hlist_head gc_list;
509
510 spin_lock_bh(&xfrm_state_gc_lock);
511 hlist_move_list(&xfrm_state_gc_list, &gc_list);
512 spin_unlock_bh(&xfrm_state_gc_lock);
513
514 synchronize_rcu();
515
516 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
David Brazdil0f672f62019-12-10 10:32:29 +0000517 ___xfrm_state_destroy(x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518}
519
520static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
521{
David Brazdil0f672f62019-12-10 10:32:29 +0000522 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
523 enum hrtimer_restart ret = HRTIMER_NORESTART;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 time64_t now = ktime_get_real_seconds();
525 time64_t next = TIME64_MAX;
526 int warn = 0;
527 int err = 0;
528
529 spin_lock(&x->lock);
530 if (x->km.state == XFRM_STATE_DEAD)
531 goto out;
532 if (x->km.state == XFRM_STATE_EXPIRED)
533 goto expired;
534 if (x->lft.hard_add_expires_seconds) {
535 long tmo = x->lft.hard_add_expires_seconds +
536 x->curlft.add_time - now;
537 if (tmo <= 0) {
538 if (x->xflags & XFRM_SOFT_EXPIRE) {
539 /* enter hard expire without soft expire first?!
540 * setting a new date could trigger this.
541 * workaround: fix x->curflt.add_time by below:
542 */
543 x->curlft.add_time = now - x->saved_tmo - 1;
544 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
545 } else
546 goto expired;
547 }
548 if (tmo < next)
549 next = tmo;
550 }
551 if (x->lft.hard_use_expires_seconds) {
552 long tmo = x->lft.hard_use_expires_seconds +
553 (x->curlft.use_time ? : now) - now;
554 if (tmo <= 0)
555 goto expired;
556 if (tmo < next)
557 next = tmo;
558 }
559 if (x->km.dying)
560 goto resched;
561 if (x->lft.soft_add_expires_seconds) {
562 long tmo = x->lft.soft_add_expires_seconds +
563 x->curlft.add_time - now;
564 if (tmo <= 0) {
565 warn = 1;
566 x->xflags &= ~XFRM_SOFT_EXPIRE;
567 } else if (tmo < next) {
568 next = tmo;
569 x->xflags |= XFRM_SOFT_EXPIRE;
570 x->saved_tmo = tmo;
571 }
572 }
573 if (x->lft.soft_use_expires_seconds) {
574 long tmo = x->lft.soft_use_expires_seconds +
575 (x->curlft.use_time ? : now) - now;
576 if (tmo <= 0)
577 warn = 1;
578 else if (tmo < next)
579 next = tmo;
580 }
581
582 x->km.dying = warn;
583 if (warn)
584 km_state_expired(x, 0, 0);
585resched:
586 if (next != TIME64_MAX) {
David Brazdil0f672f62019-12-10 10:32:29 +0000587 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
588 ret = HRTIMER_RESTART;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589 }
590
591 goto out;
592
593expired:
594 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
595 x->km.state = XFRM_STATE_EXPIRED;
596
597 err = __xfrm_state_delete(x);
598 if (!err)
599 km_state_expired(x, 1, 0);
600
601 xfrm_audit_state_delete(x, err ? 0 : 1, true);
602
603out:
604 spin_unlock(&x->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000605 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606}
607
608static void xfrm_replay_timer_handler(struct timer_list *t);
609
610struct xfrm_state *xfrm_state_alloc(struct net *net)
611{
612 struct xfrm_state *x;
613
Olivier Deprez157378f2022-04-04 15:47:50 +0200614 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000615
616 if (x) {
617 write_pnet(&x->xs_net, net);
618 refcount_set(&x->refcnt, 1);
619 atomic_set(&x->tunnel_users, 0);
620 INIT_LIST_HEAD(&x->km.all);
621 INIT_HLIST_NODE(&x->bydst);
622 INIT_HLIST_NODE(&x->bysrc);
623 INIT_HLIST_NODE(&x->byspi);
David Brazdil0f672f62019-12-10 10:32:29 +0000624 hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
625 x->mtimer.function = xfrm_timer_handler;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
627 x->curlft.add_time = ktime_get_real_seconds();
628 x->lft.soft_byte_limit = XFRM_INF;
629 x->lft.soft_packet_limit = XFRM_INF;
630 x->lft.hard_byte_limit = XFRM_INF;
631 x->lft.hard_packet_limit = XFRM_INF;
632 x->replay_maxage = 0;
633 x->replay_maxdiff = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 spin_lock_init(&x->lock);
635 }
636 return x;
637}
638EXPORT_SYMBOL(xfrm_state_alloc);
639
David Brazdil0f672f62019-12-10 10:32:29 +0000640void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000641{
642 WARN_ON(x->km.state != XFRM_STATE_DEAD);
643
David Brazdil0f672f62019-12-10 10:32:29 +0000644 if (sync) {
645 synchronize_rcu();
646 ___xfrm_state_destroy(x);
647 } else {
648 spin_lock_bh(&xfrm_state_gc_lock);
649 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
650 spin_unlock_bh(&xfrm_state_gc_lock);
651 schedule_work(&xfrm_state_gc_work);
652 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653}
654EXPORT_SYMBOL(__xfrm_state_destroy);
655
656int __xfrm_state_delete(struct xfrm_state *x)
657{
658 struct net *net = xs_net(x);
659 int err = -ESRCH;
660
661 if (x->km.state != XFRM_STATE_DEAD) {
662 x->km.state = XFRM_STATE_DEAD;
663 spin_lock(&net->xfrm.xfrm_state_lock);
664 list_del(&x->km.all);
665 hlist_del_rcu(&x->bydst);
666 hlist_del_rcu(&x->bysrc);
667 if (x->id.spi)
668 hlist_del_rcu(&x->byspi);
669 net->xfrm.state_num--;
670 spin_unlock(&net->xfrm.xfrm_state_lock);
671
Olivier Deprez157378f2022-04-04 15:47:50 +0200672 if (x->encap_sk)
673 sock_put(rcu_dereference_raw(x->encap_sk));
674
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000675 xfrm_dev_state_delete(x);
676
677 /* All xfrm_state objects are created by xfrm_state_alloc.
678 * The xfrm_state_alloc call gives a reference, and that
679 * is what we are dropping here.
680 */
681 xfrm_state_put(x);
682 err = 0;
683 }
684
685 return err;
686}
687EXPORT_SYMBOL(__xfrm_state_delete);
688
689int xfrm_state_delete(struct xfrm_state *x)
690{
691 int err;
692
693 spin_lock_bh(&x->lock);
694 err = __xfrm_state_delete(x);
695 spin_unlock_bh(&x->lock);
696
697 return err;
698}
699EXPORT_SYMBOL(xfrm_state_delete);
700
701#ifdef CONFIG_SECURITY_NETWORK_XFRM
702static inline int
703xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
704{
705 int i, err = 0;
706
707 for (i = 0; i <= net->xfrm.state_hmask; i++) {
708 struct xfrm_state *x;
709
710 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
711 if (xfrm_id_proto_match(x->id.proto, proto) &&
712 (err = security_xfrm_state_delete(x)) != 0) {
713 xfrm_audit_state_delete(x, 0, task_valid);
714 return err;
715 }
716 }
717 }
718
719 return err;
720}
721
722static inline int
723xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
724{
725 int i, err = 0;
726
727 for (i = 0; i <= net->xfrm.state_hmask; i++) {
728 struct xfrm_state *x;
729 struct xfrm_state_offload *xso;
730
731 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
732 xso = &x->xso;
733
734 if (xso->dev == dev &&
735 (err = security_xfrm_state_delete(x)) != 0) {
736 xfrm_audit_state_delete(x, 0, task_valid);
737 return err;
738 }
739 }
740 }
741
742 return err;
743}
744#else
745static inline int
746xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
747{
748 return 0;
749}
750
751static inline int
752xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
753{
754 return 0;
755}
756#endif
757
David Brazdil0f672f62019-12-10 10:32:29 +0000758int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759{
760 int i, err = 0, cnt = 0;
761
762 spin_lock_bh(&net->xfrm.xfrm_state_lock);
763 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
764 if (err)
765 goto out;
766
767 err = -ESRCH;
768 for (i = 0; i <= net->xfrm.state_hmask; i++) {
769 struct xfrm_state *x;
770restart:
771 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
772 if (!xfrm_state_kern(x) &&
773 xfrm_id_proto_match(x->id.proto, proto)) {
774 xfrm_state_hold(x);
775 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
776
777 err = xfrm_state_delete(x);
778 xfrm_audit_state_delete(x, err ? 0 : 1,
779 task_valid);
David Brazdil0f672f62019-12-10 10:32:29 +0000780 if (sync)
781 xfrm_state_put_sync(x);
782 else
783 xfrm_state_put(x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000784 if (!err)
785 cnt++;
786
787 spin_lock_bh(&net->xfrm.xfrm_state_lock);
788 goto restart;
789 }
790 }
791 }
792out:
793 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
794 if (cnt)
795 err = 0;
796
797 return err;
798}
799EXPORT_SYMBOL(xfrm_state_flush);
800
801int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
802{
803 int i, err = 0, cnt = 0;
804
805 spin_lock_bh(&net->xfrm.xfrm_state_lock);
806 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
807 if (err)
808 goto out;
809
810 err = -ESRCH;
811 for (i = 0; i <= net->xfrm.state_hmask; i++) {
812 struct xfrm_state *x;
813 struct xfrm_state_offload *xso;
814restart:
815 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
816 xso = &x->xso;
817
818 if (!xfrm_state_kern(x) && xso->dev == dev) {
819 xfrm_state_hold(x);
820 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
821
822 err = xfrm_state_delete(x);
823 xfrm_audit_state_delete(x, err ? 0 : 1,
824 task_valid);
825 xfrm_state_put(x);
826 if (!err)
827 cnt++;
828
829 spin_lock_bh(&net->xfrm.xfrm_state_lock);
830 goto restart;
831 }
832 }
833 }
834 if (cnt)
835 err = 0;
836
837out:
838 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
839 return err;
840}
841EXPORT_SYMBOL(xfrm_dev_state_flush);
842
843void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
844{
845 spin_lock_bh(&net->xfrm.xfrm_state_lock);
846 si->sadcnt = net->xfrm.state_num;
David Brazdil0f672f62019-12-10 10:32:29 +0000847 si->sadhcnt = net->xfrm.state_hmask + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000848 si->sadhmcnt = xfrm_state_hashmax;
849 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
850}
851EXPORT_SYMBOL(xfrm_sad_getinfo);
852
853static void
David Brazdil0f672f62019-12-10 10:32:29 +0000854__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
855{
856 const struct flowi4 *fl4 = &fl->u.ip4;
857
858 sel->daddr.a4 = fl4->daddr;
859 sel->saddr.a4 = fl4->saddr;
860 sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
861 sel->dport_mask = htons(0xffff);
862 sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
863 sel->sport_mask = htons(0xffff);
864 sel->family = AF_INET;
865 sel->prefixlen_d = 32;
866 sel->prefixlen_s = 32;
867 sel->proto = fl4->flowi4_proto;
868 sel->ifindex = fl4->flowi4_oif;
869}
870
871static void
872__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
873{
874 const struct flowi6 *fl6 = &fl->u.ip6;
875
876 /* Initialize temporary selector matching only to current session. */
877 *(struct in6_addr *)&sel->daddr = fl6->daddr;
878 *(struct in6_addr *)&sel->saddr = fl6->saddr;
879 sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
880 sel->dport_mask = htons(0xffff);
881 sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
882 sel->sport_mask = htons(0xffff);
883 sel->family = AF_INET6;
884 sel->prefixlen_d = 128;
885 sel->prefixlen_s = 128;
886 sel->proto = fl6->flowi6_proto;
887 sel->ifindex = fl6->flowi6_oif;
888}
889
890static void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000891xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
892 const struct xfrm_tmpl *tmpl,
893 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
894 unsigned short family)
895{
David Brazdil0f672f62019-12-10 10:32:29 +0000896 switch (family) {
897 case AF_INET:
898 __xfrm4_init_tempsel(&x->sel, fl);
899 break;
900 case AF_INET6:
901 __xfrm6_init_tempsel(&x->sel, fl);
902 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000903 }
David Brazdil0f672f62019-12-10 10:32:29 +0000904
905 x->id = tmpl->id;
906
907 switch (tmpl->encap_family) {
908 case AF_INET:
909 if (x->id.daddr.a4 == 0)
910 x->id.daddr.a4 = daddr->a4;
911 x->props.saddr = tmpl->saddr;
912 if (x->props.saddr.a4 == 0)
913 x->props.saddr.a4 = saddr->a4;
914 break;
915 case AF_INET6:
916 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
917 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
918 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
919 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
920 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
921 break;
922 }
923
924 x->props.mode = tmpl->mode;
925 x->props.reqid = tmpl->reqid;
926 x->props.family = tmpl->encap_family;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000927}
928
929static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
930 const xfrm_address_t *daddr,
931 __be32 spi, u8 proto,
932 unsigned short family)
933{
934 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
935 struct xfrm_state *x;
936
937 hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
938 if (x->props.family != family ||
939 x->id.spi != spi ||
940 x->id.proto != proto ||
941 !xfrm_addr_equal(&x->id.daddr, daddr, family))
942 continue;
943
944 if ((mark & x->mark.m) != x->mark.v)
945 continue;
946 if (!xfrm_state_hold_rcu(x))
947 continue;
948 return x;
949 }
950
951 return NULL;
952}
953
954static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
955 const xfrm_address_t *daddr,
956 const xfrm_address_t *saddr,
957 u8 proto, unsigned short family)
958{
959 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
960 struct xfrm_state *x;
961
962 hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
963 if (x->props.family != family ||
964 x->id.proto != proto ||
965 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
966 !xfrm_addr_equal(&x->props.saddr, saddr, family))
967 continue;
968
969 if ((mark & x->mark.m) != x->mark.v)
970 continue;
971 if (!xfrm_state_hold_rcu(x))
972 continue;
973 return x;
974 }
975
976 return NULL;
977}
978
979static inline struct xfrm_state *
980__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
981{
982 struct net *net = xs_net(x);
983 u32 mark = x->mark.v & x->mark.m;
984
985 if (use_spi)
986 return __xfrm_state_lookup(net, mark, &x->id.daddr,
987 x->id.spi, x->id.proto, family);
988 else
989 return __xfrm_state_lookup_byaddr(net, mark,
990 &x->id.daddr,
991 &x->props.saddr,
992 x->id.proto, family);
993}
994
995static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
996{
997 if (have_hash_collision &&
998 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
999 net->xfrm.state_num > net->xfrm.state_hmask)
1000 schedule_work(&net->xfrm.state_hash_work);
1001}
1002
1003static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
1004 const struct flowi *fl, unsigned short family,
1005 struct xfrm_state **best, int *acq_in_progress,
1006 int *error)
1007{
1008 /* Resolution logic:
1009 * 1. There is a valid state with matching selector. Done.
1010 * 2. Valid state with inappropriate selector. Skip.
1011 *
1012 * Entering area of "sysdeps".
1013 *
1014 * 3. If state is not valid, selector is temporary, it selects
1015 * only session which triggered previous resolution. Key
1016 * manager will do something to install a state with proper
1017 * selector.
1018 */
1019 if (x->km.state == XFRM_STATE_VALID) {
1020 if ((x->sel.family &&
Olivier Deprez0e641232021-09-23 10:07:05 +02001021 (x->sel.family != family ||
1022 !xfrm_selector_match(&x->sel, fl, family))) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001023 !security_xfrm_state_pol_flow_match(x, pol, fl))
1024 return;
1025
1026 if (!*best ||
1027 (*best)->km.dying > x->km.dying ||
1028 ((*best)->km.dying == x->km.dying &&
1029 (*best)->curlft.add_time < x->curlft.add_time))
1030 *best = x;
1031 } else if (x->km.state == XFRM_STATE_ACQ) {
1032 *acq_in_progress = 1;
1033 } else if (x->km.state == XFRM_STATE_ERROR ||
1034 x->km.state == XFRM_STATE_EXPIRED) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001035 if ((!x->sel.family ||
1036 (x->sel.family == family &&
1037 xfrm_selector_match(&x->sel, fl, family))) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001038 security_xfrm_state_pol_flow_match(x, pol, fl))
1039 *error = -ESRCH;
1040 }
1041}
1042
1043struct xfrm_state *
1044xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1045 const struct flowi *fl, struct xfrm_tmpl *tmpl,
1046 struct xfrm_policy *pol, int *err,
1047 unsigned short family, u32 if_id)
1048{
1049 static xfrm_address_t saddr_wildcard = { };
1050 struct net *net = xp_net(pol);
1051 unsigned int h, h_wildcard;
1052 struct xfrm_state *x, *x0, *to_put;
1053 int acquire_in_progress = 0;
1054 int error = 0;
1055 struct xfrm_state *best = NULL;
1056 u32 mark = pol->mark.v & pol->mark.m;
1057 unsigned short encap_family = tmpl->encap_family;
1058 unsigned int sequence;
1059 struct km_event c;
1060
1061 to_put = NULL;
1062
Olivier Deprez0e641232021-09-23 10:07:05 +02001063 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064
1065 rcu_read_lock();
1066 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
1067 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
1068 if (x->props.family == encap_family &&
1069 x->props.reqid == tmpl->reqid &&
1070 (mark & x->mark.m) == x->mark.v &&
1071 x->if_id == if_id &&
1072 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1073 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1074 tmpl->mode == x->props.mode &&
1075 tmpl->id.proto == x->id.proto &&
1076 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
Olivier Deprez0e641232021-09-23 10:07:05 +02001077 xfrm_state_look_at(pol, x, fl, family,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001078 &best, &acquire_in_progress, &error);
1079 }
1080 if (best || acquire_in_progress)
1081 goto found;
1082
1083 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
1084 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
1085 if (x->props.family == encap_family &&
1086 x->props.reqid == tmpl->reqid &&
1087 (mark & x->mark.m) == x->mark.v &&
1088 x->if_id == if_id &&
1089 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1090 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1091 tmpl->mode == x->props.mode &&
1092 tmpl->id.proto == x->id.proto &&
1093 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
Olivier Deprez0e641232021-09-23 10:07:05 +02001094 xfrm_state_look_at(pol, x, fl, family,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001095 &best, &acquire_in_progress, &error);
1096 }
1097
1098found:
1099 x = best;
1100 if (!x && !error && !acquire_in_progress) {
1101 if (tmpl->id.spi &&
1102 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
1103 tmpl->id.proto, encap_family)) != NULL) {
1104 to_put = x0;
1105 error = -EEXIST;
1106 goto out;
1107 }
1108
1109 c.net = net;
1110 /* If the KMs have no listeners (yet...), avoid allocating an SA
1111 * for each and every packet - garbage collection might not
1112 * handle the flood.
1113 */
1114 if (!km_is_alive(&c)) {
1115 error = -ESRCH;
1116 goto out;
1117 }
1118
1119 x = xfrm_state_alloc(net);
1120 if (x == NULL) {
1121 error = -ENOMEM;
1122 goto out;
1123 }
1124 /* Initialize temporary state matching only
1125 * to current session. */
1126 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1127 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1128 x->if_id = if_id;
1129
1130 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1131 if (error) {
1132 x->km.state = XFRM_STATE_DEAD;
1133 to_put = x;
1134 x = NULL;
1135 goto out;
1136 }
1137
1138 if (km_query(x, tmpl, pol) == 0) {
1139 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1140 x->km.state = XFRM_STATE_ACQ;
1141 list_add(&x->km.all, &net->xfrm.state_all);
1142 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1143 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1144 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1145 if (x->id.spi) {
1146 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1147 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1148 }
1149 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
David Brazdil0f672f62019-12-10 10:32:29 +00001150 hrtimer_start(&x->mtimer,
1151 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1152 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153 net->xfrm.state_num++;
1154 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1155 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1156 } else {
1157 x->km.state = XFRM_STATE_DEAD;
1158 to_put = x;
1159 x = NULL;
1160 error = -ESRCH;
1161 }
1162 }
1163out:
1164 if (x) {
1165 if (!xfrm_state_hold_rcu(x)) {
1166 *err = -EAGAIN;
1167 x = NULL;
1168 }
1169 } else {
1170 *err = acquire_in_progress ? -EAGAIN : error;
1171 }
1172 rcu_read_unlock();
1173 if (to_put)
1174 xfrm_state_put(to_put);
1175
Olivier Deprez0e641232021-09-23 10:07:05 +02001176 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001177 *err = -EAGAIN;
1178 if (x) {
1179 xfrm_state_put(x);
1180 x = NULL;
1181 }
1182 }
1183
1184 return x;
1185}
1186
1187struct xfrm_state *
1188xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1189 xfrm_address_t *daddr, xfrm_address_t *saddr,
1190 unsigned short family, u8 mode, u8 proto, u32 reqid)
1191{
1192 unsigned int h;
1193 struct xfrm_state *rx = NULL, *x = NULL;
1194
1195 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1196 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1197 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1198 if (x->props.family == family &&
1199 x->props.reqid == reqid &&
1200 (mark & x->mark.m) == x->mark.v &&
1201 x->if_id == if_id &&
1202 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1203 xfrm_state_addr_check(x, daddr, saddr, family) &&
1204 mode == x->props.mode &&
1205 proto == x->id.proto &&
1206 x->km.state == XFRM_STATE_VALID) {
1207 rx = x;
1208 break;
1209 }
1210 }
1211
1212 if (rx)
1213 xfrm_state_hold(rx);
1214 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1215
1216
1217 return rx;
1218}
1219EXPORT_SYMBOL(xfrm_stateonly_find);
1220
1221struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1222 unsigned short family)
1223{
1224 struct xfrm_state *x;
1225 struct xfrm_state_walk *w;
1226
1227 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1228 list_for_each_entry(w, &net->xfrm.state_all, all) {
1229 x = container_of(w, struct xfrm_state, km);
1230 if (x->props.family != family ||
1231 x->id.spi != spi)
1232 continue;
1233
1234 xfrm_state_hold(x);
1235 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1236 return x;
1237 }
1238 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1239 return NULL;
1240}
1241EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1242
1243static void __xfrm_state_insert(struct xfrm_state *x)
1244{
1245 struct net *net = xs_net(x);
1246 unsigned int h;
1247
1248 list_add(&x->km.all, &net->xfrm.state_all);
1249
1250 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1251 x->props.reqid, x->props.family);
1252 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1253
1254 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1255 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1256
1257 if (x->id.spi) {
1258 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1259 x->props.family);
1260
1261 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1262 }
1263
David Brazdil0f672f62019-12-10 10:32:29 +00001264 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001265 if (x->replay_maxage)
1266 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1267
1268 net->xfrm.state_num++;
1269
1270 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1271}
1272
1273/* net->xfrm.xfrm_state_lock is held */
1274static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1275{
1276 struct net *net = xs_net(xnew);
1277 unsigned short family = xnew->props.family;
1278 u32 reqid = xnew->props.reqid;
1279 struct xfrm_state *x;
1280 unsigned int h;
1281 u32 mark = xnew->mark.v & xnew->mark.m;
1282 u32 if_id = xnew->if_id;
1283
1284 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1285 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1286 if (x->props.family == family &&
1287 x->props.reqid == reqid &&
1288 x->if_id == if_id &&
1289 (mark & x->mark.m) == x->mark.v &&
1290 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1291 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1292 x->genid++;
1293 }
1294}
1295
1296void xfrm_state_insert(struct xfrm_state *x)
1297{
1298 struct net *net = xs_net(x);
1299
1300 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1301 __xfrm_state_bump_genids(x);
1302 __xfrm_state_insert(x);
1303 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1304}
1305EXPORT_SYMBOL(xfrm_state_insert);
1306
1307/* net->xfrm.xfrm_state_lock is held */
1308static struct xfrm_state *__find_acq_core(struct net *net,
1309 const struct xfrm_mark *m,
1310 unsigned short family, u8 mode,
1311 u32 reqid, u32 if_id, u8 proto,
1312 const xfrm_address_t *daddr,
1313 const xfrm_address_t *saddr,
1314 int create)
1315{
1316 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1317 struct xfrm_state *x;
1318 u32 mark = m->v & m->m;
1319
1320 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1321 if (x->props.reqid != reqid ||
1322 x->props.mode != mode ||
1323 x->props.family != family ||
1324 x->km.state != XFRM_STATE_ACQ ||
1325 x->id.spi != 0 ||
1326 x->id.proto != proto ||
1327 (mark & x->mark.m) != x->mark.v ||
1328 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1329 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1330 continue;
1331
1332 xfrm_state_hold(x);
1333 return x;
1334 }
1335
1336 if (!create)
1337 return NULL;
1338
1339 x = xfrm_state_alloc(net);
1340 if (likely(x)) {
1341 switch (family) {
1342 case AF_INET:
1343 x->sel.daddr.a4 = daddr->a4;
1344 x->sel.saddr.a4 = saddr->a4;
1345 x->sel.prefixlen_d = 32;
1346 x->sel.prefixlen_s = 32;
1347 x->props.saddr.a4 = saddr->a4;
1348 x->id.daddr.a4 = daddr->a4;
1349 break;
1350
1351 case AF_INET6:
1352 x->sel.daddr.in6 = daddr->in6;
1353 x->sel.saddr.in6 = saddr->in6;
1354 x->sel.prefixlen_d = 128;
1355 x->sel.prefixlen_s = 128;
1356 x->props.saddr.in6 = saddr->in6;
1357 x->id.daddr.in6 = daddr->in6;
1358 break;
1359 }
1360
1361 x->km.state = XFRM_STATE_ACQ;
1362 x->id.proto = proto;
1363 x->props.family = family;
1364 x->props.mode = mode;
1365 x->props.reqid = reqid;
1366 x->if_id = if_id;
1367 x->mark.v = m->v;
1368 x->mark.m = m->m;
1369 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1370 xfrm_state_hold(x);
David Brazdil0f672f62019-12-10 10:32:29 +00001371 hrtimer_start(&x->mtimer,
1372 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1373 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001374 list_add(&x->km.all, &net->xfrm.state_all);
1375 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1376 h = xfrm_src_hash(net, daddr, saddr, family);
1377 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1378
1379 net->xfrm.state_num++;
1380
1381 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1382 }
1383
1384 return x;
1385}
1386
1387static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1388
1389int xfrm_state_add(struct xfrm_state *x)
1390{
1391 struct net *net = xs_net(x);
1392 struct xfrm_state *x1, *to_put;
1393 int family;
1394 int err;
1395 u32 mark = x->mark.v & x->mark.m;
1396 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1397
1398 family = x->props.family;
1399
1400 to_put = NULL;
1401
1402 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1403
1404 x1 = __xfrm_state_locate(x, use_spi, family);
1405 if (x1) {
1406 to_put = x1;
1407 x1 = NULL;
1408 err = -EEXIST;
1409 goto out;
1410 }
1411
1412 if (use_spi && x->km.seq) {
1413 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1414 if (x1 && ((x1->id.proto != x->id.proto) ||
1415 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1416 to_put = x1;
1417 x1 = NULL;
1418 }
1419 }
1420
1421 if (use_spi && !x1)
1422 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1423 x->props.reqid, x->if_id, x->id.proto,
1424 &x->id.daddr, &x->props.saddr, 0);
1425
1426 __xfrm_state_bump_genids(x);
1427 __xfrm_state_insert(x);
1428 err = 0;
1429
1430out:
1431 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1432
1433 if (x1) {
1434 xfrm_state_delete(x1);
1435 xfrm_state_put(x1);
1436 }
1437
1438 if (to_put)
1439 xfrm_state_put(to_put);
1440
1441 return err;
1442}
1443EXPORT_SYMBOL(xfrm_state_add);
1444
1445#ifdef CONFIG_XFRM_MIGRATE
Olivier Deprez0e641232021-09-23 10:07:05 +02001446static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1447{
1448 struct xfrm_user_sec_ctx *uctx;
1449 int size = sizeof(*uctx) + security->ctx_len;
1450 int err;
1451
1452 uctx = kmalloc(size, GFP_KERNEL);
1453 if (!uctx)
1454 return -ENOMEM;
1455
1456 uctx->exttype = XFRMA_SEC_CTX;
1457 uctx->len = size;
1458 uctx->ctx_doi = security->ctx_doi;
1459 uctx->ctx_alg = security->ctx_alg;
1460 uctx->ctx_len = security->ctx_len;
1461 memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1462 err = security_xfrm_state_alloc(x, uctx);
1463 kfree(uctx);
1464 if (err)
1465 return err;
1466
1467 return 0;
1468}
1469
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001470static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1471 struct xfrm_encap_tmpl *encap)
1472{
1473 struct net *net = xs_net(orig);
1474 struct xfrm_state *x = xfrm_state_alloc(net);
1475 if (!x)
1476 goto out;
1477
1478 memcpy(&x->id, &orig->id, sizeof(x->id));
1479 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1480 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1481 x->props.mode = orig->props.mode;
1482 x->props.replay_window = orig->props.replay_window;
1483 x->props.reqid = orig->props.reqid;
1484 x->props.family = orig->props.family;
1485 x->props.saddr = orig->props.saddr;
1486
1487 if (orig->aalg) {
1488 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1489 if (!x->aalg)
1490 goto error;
1491 }
1492 x->props.aalgo = orig->props.aalgo;
1493
1494 if (orig->aead) {
1495 x->aead = xfrm_algo_aead_clone(orig->aead);
1496 x->geniv = orig->geniv;
1497 if (!x->aead)
1498 goto error;
1499 }
1500 if (orig->ealg) {
1501 x->ealg = xfrm_algo_clone(orig->ealg);
1502 if (!x->ealg)
1503 goto error;
1504 }
1505 x->props.ealgo = orig->props.ealgo;
1506
1507 if (orig->calg) {
1508 x->calg = xfrm_algo_clone(orig->calg);
1509 if (!x->calg)
1510 goto error;
1511 }
1512 x->props.calgo = orig->props.calgo;
1513
1514 if (encap || orig->encap) {
1515 if (encap)
1516 x->encap = kmemdup(encap, sizeof(*x->encap),
1517 GFP_KERNEL);
1518 else
1519 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1520 GFP_KERNEL);
1521
1522 if (!x->encap)
1523 goto error;
1524 }
1525
Olivier Deprez0e641232021-09-23 10:07:05 +02001526 if (orig->security)
1527 if (clone_security(x, orig->security))
1528 goto error;
1529
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001530 if (orig->coaddr) {
1531 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1532 GFP_KERNEL);
1533 if (!x->coaddr)
1534 goto error;
1535 }
1536
1537 if (orig->replay_esn) {
1538 if (xfrm_replay_clone(x, orig))
1539 goto error;
1540 }
1541
1542 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
Olivier Deprez0e641232021-09-23 10:07:05 +02001543 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001545 x->props.flags = orig->props.flags;
1546 x->props.extra_flags = orig->props.extra_flags;
1547
1548 x->if_id = orig->if_id;
1549 x->tfcpad = orig->tfcpad;
1550 x->replay_maxdiff = orig->replay_maxdiff;
1551 x->replay_maxage = orig->replay_maxage;
Olivier Deprez0e641232021-09-23 10:07:05 +02001552 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001553 x->km.state = orig->km.state;
1554 x->km.seq = orig->km.seq;
1555 x->replay = orig->replay;
1556 x->preplay = orig->preplay;
Olivier Deprez157378f2022-04-04 15:47:50 +02001557 x->mapping_maxage = orig->mapping_maxage;
1558 x->new_mapping = 0;
1559 x->new_mapping_sport = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001560
1561 return x;
1562
1563 error:
1564 xfrm_state_put(x);
1565out:
1566 return NULL;
1567}
1568
Olivier Deprez157378f2022-04-04 15:47:50 +02001569struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1570 u32 if_id)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001571{
1572 unsigned int h;
1573 struct xfrm_state *x = NULL;
1574
1575 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1576
1577 if (m->reqid) {
1578 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1579 m->reqid, m->old_family);
1580 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1581 if (x->props.mode != m->mode ||
1582 x->id.proto != m->proto)
1583 continue;
1584 if (m->reqid && x->props.reqid != m->reqid)
1585 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02001586 if (if_id != 0 && x->if_id != if_id)
1587 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001588 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1589 m->old_family) ||
1590 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1591 m->old_family))
1592 continue;
1593 xfrm_state_hold(x);
1594 break;
1595 }
1596 } else {
1597 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1598 m->old_family);
1599 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1600 if (x->props.mode != m->mode ||
1601 x->id.proto != m->proto)
1602 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02001603 if (if_id != 0 && x->if_id != if_id)
1604 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001605 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1606 m->old_family) ||
1607 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1608 m->old_family))
1609 continue;
1610 xfrm_state_hold(x);
1611 break;
1612 }
1613 }
1614
1615 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1616
1617 return x;
1618}
1619EXPORT_SYMBOL(xfrm_migrate_state_find);
1620
1621struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1622 struct xfrm_migrate *m,
1623 struct xfrm_encap_tmpl *encap)
1624{
1625 struct xfrm_state *xc;
1626
1627 xc = xfrm_state_clone(x, encap);
1628 if (!xc)
1629 return NULL;
1630
Olivier Deprez157378f2022-04-04 15:47:50 +02001631 xc->props.family = m->new_family;
1632
1633 if (xfrm_init_state(xc) < 0)
1634 goto error;
1635
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001636 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1637 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1638
1639 /* add state */
1640 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1641 /* a care is needed when the destination address of the
1642 state is to be updated as it is a part of triplet */
1643 xfrm_state_insert(xc);
1644 } else {
1645 if (xfrm_state_add(xc) < 0)
1646 goto error;
1647 }
1648
1649 return xc;
1650error:
1651 xfrm_state_put(xc);
1652 return NULL;
1653}
1654EXPORT_SYMBOL(xfrm_state_migrate);
1655#endif
1656
1657int xfrm_state_update(struct xfrm_state *x)
1658{
1659 struct xfrm_state *x1, *to_put;
1660 int err;
1661 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1662 struct net *net = xs_net(x);
1663
1664 to_put = NULL;
1665
1666 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1667 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1668
1669 err = -ESRCH;
1670 if (!x1)
1671 goto out;
1672
1673 if (xfrm_state_kern(x1)) {
1674 to_put = x1;
1675 err = -EEXIST;
1676 goto out;
1677 }
1678
1679 if (x1->km.state == XFRM_STATE_ACQ) {
1680 __xfrm_state_insert(x);
1681 x = NULL;
1682 }
1683 err = 0;
1684
1685out:
1686 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1687
1688 if (to_put)
1689 xfrm_state_put(to_put);
1690
1691 if (err)
1692 return err;
1693
1694 if (!x) {
1695 xfrm_state_delete(x1);
1696 xfrm_state_put(x1);
1697 return 0;
1698 }
1699
1700 err = -EINVAL;
1701 spin_lock_bh(&x1->lock);
1702 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1703 if (x->encap && x1->encap &&
1704 x->encap->encap_type == x1->encap->encap_type)
1705 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1706 else if (x->encap || x1->encap)
1707 goto fail;
1708
1709 if (x->coaddr && x1->coaddr) {
1710 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1711 }
1712 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1713 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1714 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1715 x1->km.dying = 0;
1716
David Brazdil0f672f62019-12-10 10:32:29 +00001717 hrtimer_start(&x1->mtimer, ktime_set(1, 0),
1718 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001719 if (x1->curlft.use_time)
1720 xfrm_state_check_expire(x1);
1721
1722 if (x->props.smark.m || x->props.smark.v || x->if_id) {
1723 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1724
1725 if (x->props.smark.m || x->props.smark.v)
1726 x1->props.smark = x->props.smark;
1727
1728 if (x->if_id)
1729 x1->if_id = x->if_id;
1730
1731 __xfrm_state_bump_genids(x1);
1732 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1733 }
1734
1735 err = 0;
1736 x->km.state = XFRM_STATE_DEAD;
1737 __xfrm_state_put(x);
1738 }
1739
1740fail:
1741 spin_unlock_bh(&x1->lock);
1742
1743 xfrm_state_put(x1);
1744
1745 return err;
1746}
1747EXPORT_SYMBOL(xfrm_state_update);
1748
1749int xfrm_state_check_expire(struct xfrm_state *x)
1750{
1751 if (!x->curlft.use_time)
1752 x->curlft.use_time = ktime_get_real_seconds();
1753
1754 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1755 x->curlft.packets >= x->lft.hard_packet_limit) {
1756 x->km.state = XFRM_STATE_EXPIRED;
David Brazdil0f672f62019-12-10 10:32:29 +00001757 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001758 return -EINVAL;
1759 }
1760
1761 if (!x->km.dying &&
1762 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1763 x->curlft.packets >= x->lft.soft_packet_limit)) {
1764 x->km.dying = 1;
1765 km_state_expired(x, 0, 0);
1766 }
1767 return 0;
1768}
1769EXPORT_SYMBOL(xfrm_state_check_expire);
1770
1771struct xfrm_state *
1772xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1773 u8 proto, unsigned short family)
1774{
1775 struct xfrm_state *x;
1776
1777 rcu_read_lock();
1778 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1779 rcu_read_unlock();
1780 return x;
1781}
1782EXPORT_SYMBOL(xfrm_state_lookup);
1783
1784struct xfrm_state *
1785xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1786 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1787 u8 proto, unsigned short family)
1788{
1789 struct xfrm_state *x;
1790
1791 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1792 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1793 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1794 return x;
1795}
1796EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1797
1798struct xfrm_state *
1799xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1800 u32 if_id, u8 proto, const xfrm_address_t *daddr,
1801 const xfrm_address_t *saddr, int create, unsigned short family)
1802{
1803 struct xfrm_state *x;
1804
1805 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1806 x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1807 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1808
1809 return x;
1810}
1811EXPORT_SYMBOL(xfrm_find_acq);
1812
1813#ifdef CONFIG_XFRM_SUB_POLICY
David Brazdil0f672f62019-12-10 10:32:29 +00001814#if IS_ENABLED(CONFIG_IPV6)
1815/* distribution counting sort function for xfrm_state and xfrm_tmpl */
1816static void
1817__xfrm6_sort(void **dst, void **src, int n,
1818 int (*cmp)(const void *p), int maxclass)
1819{
1820 int count[XFRM_MAX_DEPTH] = { };
1821 int class[XFRM_MAX_DEPTH];
1822 int i;
1823
1824 for (i = 0; i < n; i++) {
1825 int c = cmp(src[i]);
1826
1827 class[i] = c;
1828 count[c]++;
1829 }
1830
1831 for (i = 2; i < maxclass; i++)
1832 count[i] += count[i - 1];
1833
1834 for (i = 0; i < n; i++) {
1835 dst[count[class[i] - 1]++] = src[i];
1836 src[i] = NULL;
1837 }
1838}
1839
1840/* Rule for xfrm_state:
1841 *
1842 * rule 1: select IPsec transport except AH
1843 * rule 2: select MIPv6 RO or inbound trigger
1844 * rule 3: select IPsec transport AH
1845 * rule 4: select IPsec tunnel
1846 * rule 5: others
1847 */
1848static int __xfrm6_state_sort_cmp(const void *p)
1849{
1850 const struct xfrm_state *v = p;
1851
1852 switch (v->props.mode) {
1853 case XFRM_MODE_TRANSPORT:
1854 if (v->id.proto != IPPROTO_AH)
1855 return 1;
1856 else
1857 return 3;
1858#if IS_ENABLED(CONFIG_IPV6_MIP6)
1859 case XFRM_MODE_ROUTEOPTIMIZATION:
1860 case XFRM_MODE_IN_TRIGGER:
1861 return 2;
1862#endif
1863 case XFRM_MODE_TUNNEL:
1864 case XFRM_MODE_BEET:
1865 return 4;
1866 }
1867 return 5;
1868}
1869
1870/* Rule for xfrm_tmpl:
1871 *
1872 * rule 1: select IPsec transport
1873 * rule 2: select MIPv6 RO or inbound trigger
1874 * rule 3: select IPsec tunnel
1875 * rule 4: others
1876 */
1877static int __xfrm6_tmpl_sort_cmp(const void *p)
1878{
1879 const struct xfrm_tmpl *v = p;
1880
1881 switch (v->mode) {
1882 case XFRM_MODE_TRANSPORT:
1883 return 1;
1884#if IS_ENABLED(CONFIG_IPV6_MIP6)
1885 case XFRM_MODE_ROUTEOPTIMIZATION:
1886 case XFRM_MODE_IN_TRIGGER:
1887 return 2;
1888#endif
1889 case XFRM_MODE_TUNNEL:
1890 case XFRM_MODE_BEET:
1891 return 3;
1892 }
1893 return 4;
1894}
1895#else
1896static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
1897static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
1898
1899static inline void
1900__xfrm6_sort(void **dst, void **src, int n,
1901 int (*cmp)(const void *p), int maxclass)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001902{
1903 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001904
David Brazdil0f672f62019-12-10 10:32:29 +00001905 for (i = 0; i < n; i++)
1906 dst[i] = src[i];
1907}
1908#endif /* CONFIG_IPV6 */
1909
1910void
1911xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1912 unsigned short family)
1913{
1914 int i;
1915
1916 if (family == AF_INET6)
1917 __xfrm6_sort((void **)dst, (void **)src, n,
1918 __xfrm6_tmpl_sort_cmp, 5);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001919 else
1920 for (i = 0; i < n; i++)
1921 dst[i] = src[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001922}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001923
David Brazdil0f672f62019-12-10 10:32:29 +00001924void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001925xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1926 unsigned short family)
1927{
1928 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001929
David Brazdil0f672f62019-12-10 10:32:29 +00001930 if (family == AF_INET6)
1931 __xfrm6_sort((void **)dst, (void **)src, n,
1932 __xfrm6_state_sort_cmp, 6);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001933 else
1934 for (i = 0; i < n; i++)
1935 dst[i] = src[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001936}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001937#endif
1938
1939/* Silly enough, but I'm lazy to build resolution list */
1940
1941static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1942{
1943 int i;
1944
1945 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1946 struct xfrm_state *x;
1947
1948 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1949 if (x->km.seq == seq &&
1950 (mark & x->mark.m) == x->mark.v &&
1951 x->km.state == XFRM_STATE_ACQ) {
1952 xfrm_state_hold(x);
1953 return x;
1954 }
1955 }
1956 }
1957 return NULL;
1958}
1959
1960struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1961{
1962 struct xfrm_state *x;
1963
1964 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1965 x = __xfrm_find_acq_byseq(net, mark, seq);
1966 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1967 return x;
1968}
1969EXPORT_SYMBOL(xfrm_find_acq_byseq);
1970
1971u32 xfrm_get_acqseq(void)
1972{
1973 u32 res;
1974 static atomic_t acqseq;
1975
1976 do {
1977 res = atomic_inc_return(&acqseq);
1978 } while (!res);
1979
1980 return res;
1981}
1982EXPORT_SYMBOL(xfrm_get_acqseq);
1983
1984int verify_spi_info(u8 proto, u32 min, u32 max)
1985{
1986 switch (proto) {
1987 case IPPROTO_AH:
1988 case IPPROTO_ESP:
1989 break;
1990
1991 case IPPROTO_COMP:
1992 /* IPCOMP spi is 16-bits. */
1993 if (max >= 0x10000)
1994 return -EINVAL;
1995 break;
1996
1997 default:
1998 return -EINVAL;
1999 }
2000
2001 if (min > max)
2002 return -EINVAL;
2003
2004 return 0;
2005}
2006EXPORT_SYMBOL(verify_spi_info);
2007
2008int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
2009{
2010 struct net *net = xs_net(x);
2011 unsigned int h;
2012 struct xfrm_state *x0;
2013 int err = -ENOENT;
2014 __be32 minspi = htonl(low);
2015 __be32 maxspi = htonl(high);
Olivier Deprez0e641232021-09-23 10:07:05 +02002016 __be32 newspi = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002017 u32 mark = x->mark.v & x->mark.m;
2018
2019 spin_lock_bh(&x->lock);
2020 if (x->km.state == XFRM_STATE_DEAD)
2021 goto unlock;
2022
2023 err = 0;
2024 if (x->id.spi)
2025 goto unlock;
2026
2027 err = -ENOENT;
2028
2029 if (minspi == maxspi) {
2030 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
2031 if (x0) {
2032 xfrm_state_put(x0);
2033 goto unlock;
2034 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002035 newspi = minspi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002036 } else {
2037 u32 spi = 0;
2038 for (h = 0; h < high-low+1; h++) {
2039 spi = low + prandom_u32()%(high-low+1);
2040 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
2041 if (x0 == NULL) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002042 newspi = htonl(spi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002043 break;
2044 }
2045 xfrm_state_put(x0);
2046 }
2047 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002048 if (newspi) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002049 spin_lock_bh(&net->xfrm.xfrm_state_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002050 x->id.spi = newspi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002051 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
2052 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
2053 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2054
2055 err = 0;
2056 }
2057
2058unlock:
2059 spin_unlock_bh(&x->lock);
2060
2061 return err;
2062}
2063EXPORT_SYMBOL(xfrm_alloc_spi);
2064
2065static bool __xfrm_state_filter_match(struct xfrm_state *x,
2066 struct xfrm_address_filter *filter)
2067{
2068 if (filter) {
2069 if ((filter->family == AF_INET ||
2070 filter->family == AF_INET6) &&
2071 x->props.family != filter->family)
2072 return false;
2073
2074 return addr_match(&x->props.saddr, &filter->saddr,
2075 filter->splen) &&
2076 addr_match(&x->id.daddr, &filter->daddr,
2077 filter->dplen);
2078 }
2079 return true;
2080}
2081
2082int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
2083 int (*func)(struct xfrm_state *, int, void*),
2084 void *data)
2085{
2086 struct xfrm_state *state;
2087 struct xfrm_state_walk *x;
2088 int err = 0;
2089
2090 if (walk->seq != 0 && list_empty(&walk->all))
2091 return 0;
2092
2093 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2094 if (list_empty(&walk->all))
2095 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
2096 else
2097 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
2098 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
2099 if (x->state == XFRM_STATE_DEAD)
2100 continue;
2101 state = container_of(x, struct xfrm_state, km);
2102 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
2103 continue;
2104 if (!__xfrm_state_filter_match(state, walk->filter))
2105 continue;
2106 err = func(state, walk->seq, data);
2107 if (err) {
2108 list_move_tail(&walk->all, &x->all);
2109 goto out;
2110 }
2111 walk->seq++;
2112 }
2113 if (walk->seq == 0) {
2114 err = -ENOENT;
2115 goto out;
2116 }
2117 list_del_init(&walk->all);
2118out:
2119 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2120 return err;
2121}
2122EXPORT_SYMBOL(xfrm_state_walk);
2123
2124void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
2125 struct xfrm_address_filter *filter)
2126{
2127 INIT_LIST_HEAD(&walk->all);
2128 walk->proto = proto;
2129 walk->state = XFRM_STATE_DEAD;
2130 walk->seq = 0;
2131 walk->filter = filter;
2132}
2133EXPORT_SYMBOL(xfrm_state_walk_init);
2134
2135void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
2136{
2137 kfree(walk->filter);
2138
2139 if (list_empty(&walk->all))
2140 return;
2141
2142 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2143 list_del(&walk->all);
2144 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2145}
2146EXPORT_SYMBOL(xfrm_state_walk_done);
2147
2148static void xfrm_replay_timer_handler(struct timer_list *t)
2149{
2150 struct xfrm_state *x = from_timer(x, t, rtimer);
2151
2152 spin_lock(&x->lock);
2153
2154 if (x->km.state == XFRM_STATE_VALID) {
2155 if (xfrm_aevent_is_on(xs_net(x)))
2156 x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
2157 else
2158 x->xflags |= XFRM_TIME_DEFER;
2159 }
2160
2161 spin_unlock(&x->lock);
2162}
2163
2164static LIST_HEAD(xfrm_km_list);
2165
2166void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2167{
2168 struct xfrm_mgr *km;
2169
2170 rcu_read_lock();
2171 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2172 if (km->notify_policy)
2173 km->notify_policy(xp, dir, c);
2174 rcu_read_unlock();
2175}
2176
2177void km_state_notify(struct xfrm_state *x, const struct km_event *c)
2178{
2179 struct xfrm_mgr *km;
2180 rcu_read_lock();
2181 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2182 if (km->notify)
2183 km->notify(x, c);
2184 rcu_read_unlock();
2185}
2186
2187EXPORT_SYMBOL(km_policy_notify);
2188EXPORT_SYMBOL(km_state_notify);
2189
2190void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2191{
2192 struct km_event c;
2193
2194 c.data.hard = hard;
2195 c.portid = portid;
2196 c.event = XFRM_MSG_EXPIRE;
2197 km_state_notify(x, &c);
2198}
2199
2200EXPORT_SYMBOL(km_state_expired);
2201/*
2202 * We send to all registered managers regardless of failure
2203 * We are happy with one success
2204*/
2205int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2206{
2207 int err = -EINVAL, acqret;
2208 struct xfrm_mgr *km;
2209
2210 rcu_read_lock();
2211 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2212 acqret = km->acquire(x, t, pol);
2213 if (!acqret)
2214 err = acqret;
2215 }
2216 rcu_read_unlock();
2217 return err;
2218}
2219EXPORT_SYMBOL(km_query);
2220
Olivier Deprez157378f2022-04-04 15:47:50 +02002221static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002222{
2223 int err = -EINVAL;
2224 struct xfrm_mgr *km;
2225
2226 rcu_read_lock();
2227 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2228 if (km->new_mapping)
2229 err = km->new_mapping(x, ipaddr, sport);
2230 if (!err)
2231 break;
2232 }
2233 rcu_read_unlock();
2234 return err;
2235}
Olivier Deprez157378f2022-04-04 15:47:50 +02002236
2237int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2238{
2239 int ret = 0;
2240
2241 if (x->mapping_maxage) {
2242 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
2243 x->new_mapping_sport != sport) {
2244 x->new_mapping_sport = sport;
2245 x->new_mapping = jiffies / HZ;
2246 ret = __km_new_mapping(x, ipaddr, sport);
2247 }
2248 } else {
2249 ret = __km_new_mapping(x, ipaddr, sport);
2250 }
2251
2252 return ret;
2253}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002254EXPORT_SYMBOL(km_new_mapping);
2255
2256void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2257{
2258 struct km_event c;
2259
2260 c.data.hard = hard;
2261 c.portid = portid;
2262 c.event = XFRM_MSG_POLEXPIRE;
2263 km_policy_notify(pol, dir, &c);
2264}
2265EXPORT_SYMBOL(km_policy_expired);
2266
2267#ifdef CONFIG_XFRM_MIGRATE
2268int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2269 const struct xfrm_migrate *m, int num_migrate,
2270 const struct xfrm_kmaddress *k,
2271 const struct xfrm_encap_tmpl *encap)
2272{
2273 int err = -EINVAL;
2274 int ret;
2275 struct xfrm_mgr *km;
2276
2277 rcu_read_lock();
2278 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2279 if (km->migrate) {
2280 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2281 encap);
2282 if (!ret)
2283 err = ret;
2284 }
2285 }
2286 rcu_read_unlock();
2287 return err;
2288}
2289EXPORT_SYMBOL(km_migrate);
2290#endif
2291
2292int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2293{
2294 int err = -EINVAL;
2295 int ret;
2296 struct xfrm_mgr *km;
2297
2298 rcu_read_lock();
2299 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2300 if (km->report) {
2301 ret = km->report(net, proto, sel, addr);
2302 if (!ret)
2303 err = ret;
2304 }
2305 }
2306 rcu_read_unlock();
2307 return err;
2308}
2309EXPORT_SYMBOL(km_report);
2310
David Brazdil0f672f62019-12-10 10:32:29 +00002311static bool km_is_alive(const struct km_event *c)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002312{
2313 struct xfrm_mgr *km;
2314 bool is_alive = false;
2315
2316 rcu_read_lock();
2317 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2318 if (km->is_alive && km->is_alive(c)) {
2319 is_alive = true;
2320 break;
2321 }
2322 }
2323 rcu_read_unlock();
2324
2325 return is_alive;
2326}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002327
Olivier Deprez157378f2022-04-04 15:47:50 +02002328#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2329static DEFINE_SPINLOCK(xfrm_translator_lock);
2330static struct xfrm_translator __rcu *xfrm_translator;
2331
2332struct xfrm_translator *xfrm_get_translator(void)
2333{
2334 struct xfrm_translator *xtr;
2335
2336 rcu_read_lock();
2337 xtr = rcu_dereference(xfrm_translator);
2338 if (unlikely(!xtr))
2339 goto out;
2340 if (!try_module_get(xtr->owner))
2341 xtr = NULL;
2342out:
2343 rcu_read_unlock();
2344 return xtr;
2345}
2346EXPORT_SYMBOL_GPL(xfrm_get_translator);
2347
2348void xfrm_put_translator(struct xfrm_translator *xtr)
2349{
2350 module_put(xtr->owner);
2351}
2352EXPORT_SYMBOL_GPL(xfrm_put_translator);
2353
2354int xfrm_register_translator(struct xfrm_translator *xtr)
2355{
2356 int err = 0;
2357
2358 spin_lock_bh(&xfrm_translator_lock);
2359 if (unlikely(xfrm_translator != NULL))
2360 err = -EEXIST;
2361 else
2362 rcu_assign_pointer(xfrm_translator, xtr);
2363 spin_unlock_bh(&xfrm_translator_lock);
2364
2365 return err;
2366}
2367EXPORT_SYMBOL_GPL(xfrm_register_translator);
2368
2369int xfrm_unregister_translator(struct xfrm_translator *xtr)
2370{
2371 int err = 0;
2372
2373 spin_lock_bh(&xfrm_translator_lock);
2374 if (likely(xfrm_translator != NULL)) {
2375 if (rcu_access_pointer(xfrm_translator) != xtr)
2376 err = -EINVAL;
2377 else
2378 RCU_INIT_POINTER(xfrm_translator, NULL);
2379 }
2380 spin_unlock_bh(&xfrm_translator_lock);
2381 synchronize_rcu();
2382
2383 return err;
2384}
2385EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
2386#endif
2387
2388int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002389{
2390 int err;
2391 u8 *data;
2392 struct xfrm_mgr *km;
2393 struct xfrm_policy *pol = NULL;
2394
Olivier Deprez157378f2022-04-04 15:47:50 +02002395 if (sockptr_is_null(optval) && !optlen) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002396 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2397 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2398 __sk_dst_reset(sk);
2399 return 0;
2400 }
2401
2402 if (optlen <= 0 || optlen > PAGE_SIZE)
2403 return -EMSGSIZE;
2404
Olivier Deprez157378f2022-04-04 15:47:50 +02002405 data = memdup_sockptr(optval, optlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002406 if (IS_ERR(data))
2407 return PTR_ERR(data);
2408
Olivier Deprez157378f2022-04-04 15:47:50 +02002409 if (in_compat_syscall()) {
2410 struct xfrm_translator *xtr = xfrm_get_translator();
2411
2412 if (!xtr) {
2413 kfree(data);
2414 return -EOPNOTSUPP;
2415 }
2416
2417 err = xtr->xlate_user_policy_sockptr(&data, optlen);
2418 xfrm_put_translator(xtr);
2419 if (err) {
2420 kfree(data);
2421 return err;
2422 }
2423 }
2424
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002425 err = -EINVAL;
2426 rcu_read_lock();
2427 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2428 pol = km->compile_policy(sk, optname, data,
2429 optlen, &err);
2430 if (err >= 0)
2431 break;
2432 }
2433 rcu_read_unlock();
2434
2435 if (err >= 0) {
2436 xfrm_sk_policy_insert(sk, err, pol);
2437 xfrm_pol_put(pol);
2438 __sk_dst_reset(sk);
2439 err = 0;
2440 }
2441
2442 kfree(data);
2443 return err;
2444}
2445EXPORT_SYMBOL(xfrm_user_policy);
2446
2447static DEFINE_SPINLOCK(xfrm_km_lock);
2448
2449int xfrm_register_km(struct xfrm_mgr *km)
2450{
2451 spin_lock_bh(&xfrm_km_lock);
2452 list_add_tail_rcu(&km->list, &xfrm_km_list);
2453 spin_unlock_bh(&xfrm_km_lock);
2454 return 0;
2455}
2456EXPORT_SYMBOL(xfrm_register_km);
2457
2458int xfrm_unregister_km(struct xfrm_mgr *km)
2459{
2460 spin_lock_bh(&xfrm_km_lock);
2461 list_del_rcu(&km->list);
2462 spin_unlock_bh(&xfrm_km_lock);
2463 synchronize_rcu();
2464 return 0;
2465}
2466EXPORT_SYMBOL(xfrm_unregister_km);
2467
2468int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2469{
2470 int err = 0;
2471
2472 if (WARN_ON(afinfo->family >= NPROTO))
2473 return -EAFNOSUPPORT;
2474
2475 spin_lock_bh(&xfrm_state_afinfo_lock);
2476 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2477 err = -EEXIST;
2478 else
2479 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2480 spin_unlock_bh(&xfrm_state_afinfo_lock);
2481 return err;
2482}
2483EXPORT_SYMBOL(xfrm_state_register_afinfo);
2484
2485int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2486{
2487 int err = 0, family = afinfo->family;
2488
2489 if (WARN_ON(family >= NPROTO))
2490 return -EAFNOSUPPORT;
2491
2492 spin_lock_bh(&xfrm_state_afinfo_lock);
2493 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2494 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2495 err = -EINVAL;
2496 else
2497 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2498 }
2499 spin_unlock_bh(&xfrm_state_afinfo_lock);
2500 synchronize_rcu();
2501 return err;
2502}
2503EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2504
2505struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2506{
2507 if (unlikely(family >= NPROTO))
2508 return NULL;
2509
2510 return rcu_dereference(xfrm_state_afinfo[family]);
2511}
David Brazdil0f672f62019-12-10 10:32:29 +00002512EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002513
2514struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2515{
2516 struct xfrm_state_afinfo *afinfo;
2517 if (unlikely(family >= NPROTO))
2518 return NULL;
2519 rcu_read_lock();
2520 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2521 if (unlikely(!afinfo))
2522 rcu_read_unlock();
2523 return afinfo;
2524}
2525
2526void xfrm_flush_gc(void)
2527{
2528 flush_work(&xfrm_state_gc_work);
2529}
2530EXPORT_SYMBOL(xfrm_flush_gc);
2531
2532/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2533void xfrm_state_delete_tunnel(struct xfrm_state *x)
2534{
2535 if (x->tunnel) {
2536 struct xfrm_state *t = x->tunnel;
2537
2538 if (atomic_read(&t->tunnel_users) == 2)
2539 xfrm_state_delete(t);
2540 atomic_dec(&t->tunnel_users);
David Brazdil0f672f62019-12-10 10:32:29 +00002541 xfrm_state_put_sync(t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002542 x->tunnel = NULL;
2543 }
2544}
2545EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2546
Olivier Deprez157378f2022-04-04 15:47:50 +02002547u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002548{
2549 const struct xfrm_type *type = READ_ONCE(x->type);
David Brazdil0f672f62019-12-10 10:32:29 +00002550 struct crypto_aead *aead;
2551 u32 blksize, net_adj = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002552
David Brazdil0f672f62019-12-10 10:32:29 +00002553 if (x->km.state != XFRM_STATE_VALID ||
2554 !type || type->proto != IPPROTO_ESP)
2555 return mtu - x->props.header_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002556
David Brazdil0f672f62019-12-10 10:32:29 +00002557 aead = x->data;
2558 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
2559
2560 switch (x->props.mode) {
2561 case XFRM_MODE_TRANSPORT:
2562 case XFRM_MODE_BEET:
2563 if (x->props.family == AF_INET)
2564 net_adj = sizeof(struct iphdr);
2565 else if (x->props.family == AF_INET6)
2566 net_adj = sizeof(struct ipv6hdr);
2567 break;
2568 case XFRM_MODE_TUNNEL:
2569 break;
2570 default:
2571 WARN_ON_ONCE(1);
2572 break;
2573 }
2574
2575 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
2576 net_adj) & ~(blksize - 1)) + net_adj - 2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002577}
Olivier Deprez157378f2022-04-04 15:47:50 +02002578EXPORT_SYMBOL_GPL(xfrm_state_mtu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002579
2580int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2581{
David Brazdil0f672f62019-12-10 10:32:29 +00002582 const struct xfrm_mode *inner_mode;
2583 const struct xfrm_mode *outer_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002584 int family = x->props.family;
2585 int err;
2586
David Brazdil0f672f62019-12-10 10:32:29 +00002587 if (family == AF_INET &&
2588 xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)
2589 x->props.flags |= XFRM_STATE_NOPMTUDISC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002590
2591 err = -EPROTONOSUPPORT;
2592
2593 if (x->sel.family != AF_UNSPEC) {
2594 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2595 if (inner_mode == NULL)
2596 goto error;
2597
2598 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
David Brazdil0f672f62019-12-10 10:32:29 +00002599 family != x->sel.family)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002600 goto error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002601
David Brazdil0f672f62019-12-10 10:32:29 +00002602 x->inner_mode = *inner_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002603 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00002604 const struct xfrm_mode *inner_mode_iaf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002605 int iafamily = AF_INET;
2606
2607 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2608 if (inner_mode == NULL)
2609 goto error;
2610
David Brazdil0f672f62019-12-10 10:32:29 +00002611 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002612 goto error;
David Brazdil0f672f62019-12-10 10:32:29 +00002613
2614 x->inner_mode = *inner_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002615
2616 if (x->props.family == AF_INET)
2617 iafamily = AF_INET6;
2618
2619 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2620 if (inner_mode_iaf) {
2621 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
David Brazdil0f672f62019-12-10 10:32:29 +00002622 x->inner_mode_iaf = *inner_mode_iaf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002623 }
2624 }
2625
2626 x->type = xfrm_get_type(x->id.proto, family);
2627 if (x->type == NULL)
2628 goto error;
2629
2630 x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2631
2632 err = x->type->init_state(x);
2633 if (err)
2634 goto error;
2635
David Brazdil0f672f62019-12-10 10:32:29 +00002636 outer_mode = xfrm_get_mode(x->props.mode, family);
2637 if (!outer_mode) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002638 err = -EPROTONOSUPPORT;
2639 goto error;
2640 }
2641
David Brazdil0f672f62019-12-10 10:32:29 +00002642 x->outer_mode = *outer_mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002643 if (init_replay) {
2644 err = xfrm_init_replay(x);
2645 if (err)
2646 goto error;
2647 }
2648
2649error:
2650 return err;
2651}
2652
2653EXPORT_SYMBOL(__xfrm_init_state);
2654
2655int xfrm_init_state(struct xfrm_state *x)
2656{
2657 int err;
2658
2659 err = __xfrm_init_state(x, true, false);
2660 if (!err)
2661 x->km.state = XFRM_STATE_VALID;
2662
2663 return err;
2664}
2665
2666EXPORT_SYMBOL(xfrm_init_state);
2667
2668int __net_init xfrm_state_init(struct net *net)
2669{
2670 unsigned int sz;
2671
2672 if (net_eq(net, &init_net))
2673 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2674 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2675
2676 INIT_LIST_HEAD(&net->xfrm.state_all);
2677
2678 sz = sizeof(struct hlist_head) * 8;
2679
2680 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2681 if (!net->xfrm.state_bydst)
2682 goto out_bydst;
2683 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2684 if (!net->xfrm.state_bysrc)
2685 goto out_bysrc;
2686 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2687 if (!net->xfrm.state_byspi)
2688 goto out_byspi;
2689 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2690
2691 net->xfrm.state_num = 0;
2692 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2693 spin_lock_init(&net->xfrm.xfrm_state_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002694 seqcount_init(&net->xfrm.xfrm_state_hash_generation);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002695 return 0;
2696
2697out_byspi:
2698 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2699out_bysrc:
2700 xfrm_hash_free(net->xfrm.state_bydst, sz);
2701out_bydst:
2702 return -ENOMEM;
2703}
2704
2705void xfrm_state_fini(struct net *net)
2706{
2707 unsigned int sz;
2708
2709 flush_work(&net->xfrm.state_hash_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002710 flush_work(&xfrm_state_gc_work);
David Brazdil0f672f62019-12-10 10:32:29 +00002711 xfrm_state_flush(net, 0, false, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002712
2713 WARN_ON(!list_empty(&net->xfrm.state_all));
2714
2715 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2716 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2717 xfrm_hash_free(net->xfrm.state_byspi, sz);
2718 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2719 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2720 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2721 xfrm_hash_free(net->xfrm.state_bydst, sz);
2722}
2723
2724#ifdef CONFIG_AUDITSYSCALL
2725static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2726 struct audit_buffer *audit_buf)
2727{
2728 struct xfrm_sec_ctx *ctx = x->security;
2729 u32 spi = ntohl(x->id.spi);
2730
2731 if (ctx)
2732 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2733 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2734
2735 switch (x->props.family) {
2736 case AF_INET:
2737 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2738 &x->props.saddr.a4, &x->id.daddr.a4);
2739 break;
2740 case AF_INET6:
2741 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2742 x->props.saddr.a6, x->id.daddr.a6);
2743 break;
2744 }
2745
2746 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2747}
2748
2749static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2750 struct audit_buffer *audit_buf)
2751{
2752 const struct iphdr *iph4;
2753 const struct ipv6hdr *iph6;
2754
2755 switch (family) {
2756 case AF_INET:
2757 iph4 = ip_hdr(skb);
2758 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2759 &iph4->saddr, &iph4->daddr);
2760 break;
2761 case AF_INET6:
2762 iph6 = ipv6_hdr(skb);
2763 audit_log_format(audit_buf,
2764 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2765 &iph6->saddr, &iph6->daddr,
2766 iph6->flow_lbl[0] & 0x0f,
2767 iph6->flow_lbl[1],
2768 iph6->flow_lbl[2]);
2769 break;
2770 }
2771}
2772
2773void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2774{
2775 struct audit_buffer *audit_buf;
2776
2777 audit_buf = xfrm_audit_start("SAD-add");
2778 if (audit_buf == NULL)
2779 return;
2780 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2781 xfrm_audit_helper_sainfo(x, audit_buf);
2782 audit_log_format(audit_buf, " res=%u", result);
2783 audit_log_end(audit_buf);
2784}
2785EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2786
2787void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2788{
2789 struct audit_buffer *audit_buf;
2790
2791 audit_buf = xfrm_audit_start("SAD-delete");
2792 if (audit_buf == NULL)
2793 return;
2794 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2795 xfrm_audit_helper_sainfo(x, audit_buf);
2796 audit_log_format(audit_buf, " res=%u", result);
2797 audit_log_end(audit_buf);
2798}
2799EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2800
2801void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2802 struct sk_buff *skb)
2803{
2804 struct audit_buffer *audit_buf;
2805 u32 spi;
2806
2807 audit_buf = xfrm_audit_start("SA-replay-overflow");
2808 if (audit_buf == NULL)
2809 return;
2810 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2811 /* don't record the sequence number because it's inherent in this kind
2812 * of audit message */
2813 spi = ntohl(x->id.spi);
2814 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2815 audit_log_end(audit_buf);
2816}
2817EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2818
2819void xfrm_audit_state_replay(struct xfrm_state *x,
2820 struct sk_buff *skb, __be32 net_seq)
2821{
2822 struct audit_buffer *audit_buf;
2823 u32 spi;
2824
2825 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2826 if (audit_buf == NULL)
2827 return;
2828 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2829 spi = ntohl(x->id.spi);
2830 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2831 spi, spi, ntohl(net_seq));
2832 audit_log_end(audit_buf);
2833}
2834EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2835
2836void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2837{
2838 struct audit_buffer *audit_buf;
2839
2840 audit_buf = xfrm_audit_start("SA-notfound");
2841 if (audit_buf == NULL)
2842 return;
2843 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2844 audit_log_end(audit_buf);
2845}
2846EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2847
2848void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2849 __be32 net_spi, __be32 net_seq)
2850{
2851 struct audit_buffer *audit_buf;
2852 u32 spi;
2853
2854 audit_buf = xfrm_audit_start("SA-notfound");
2855 if (audit_buf == NULL)
2856 return;
2857 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2858 spi = ntohl(net_spi);
2859 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2860 spi, spi, ntohl(net_seq));
2861 audit_log_end(audit_buf);
2862}
2863EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2864
2865void xfrm_audit_state_icvfail(struct xfrm_state *x,
2866 struct sk_buff *skb, u8 proto)
2867{
2868 struct audit_buffer *audit_buf;
2869 __be32 net_spi;
2870 __be32 net_seq;
2871
2872 audit_buf = xfrm_audit_start("SA-icv-failure");
2873 if (audit_buf == NULL)
2874 return;
2875 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2876 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2877 u32 spi = ntohl(net_spi);
2878 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2879 spi, spi, ntohl(net_seq));
2880 }
2881 audit_log_end(audit_buf);
2882}
2883EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2884#endif /* CONFIG_AUDITSYSCALL */