blob: 858bb10d8341e59b916b519671035cf2a1d2c7d7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2/* Generic nexthop implementation
3 *
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6 */
7
8#include <linux/nexthop.h>
9#include <linux/rtnetlink.h>
10#include <linux/slab.h>
11#include <net/arp.h>
12#include <net/ipv6_stubs.h>
13#include <net/lwtunnel.h>
14#include <net/ndisc.h>
15#include <net/nexthop.h>
16#include <net/route.h>
17#include <net/sock.h>
18
19static void remove_nexthop(struct net *net, struct nexthop *nh,
20 struct nl_info *nlinfo);
21
22#define NH_DEV_HASHBITS 8
23#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
24
25static const struct nla_policy rtm_nh_policy[NHA_MAX + 1] = {
26 [NHA_UNSPEC] = { .strict_start_type = NHA_UNSPEC + 1 },
27 [NHA_ID] = { .type = NLA_U32 },
28 [NHA_GROUP] = { .type = NLA_BINARY },
29 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
30 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
31 [NHA_OIF] = { .type = NLA_U32 },
32 [NHA_GATEWAY] = { .type = NLA_BINARY },
33 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
34 [NHA_ENCAP] = { .type = NLA_NESTED },
35 [NHA_GROUPS] = { .type = NLA_FLAG },
36 [NHA_MASTER] = { .type = NLA_U32 },
37};
38
39static unsigned int nh_dev_hashfn(unsigned int val)
40{
41 unsigned int mask = NH_DEV_HASHSIZE - 1;
42
43 return (val ^
44 (val >> NH_DEV_HASHBITS) ^
45 (val >> (NH_DEV_HASHBITS * 2))) & mask;
46}
47
48static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
49{
50 struct net_device *dev = nhi->fib_nhc.nhc_dev;
51 struct hlist_head *head;
52 unsigned int hash;
53
54 WARN_ON(!dev);
55
56 hash = nh_dev_hashfn(dev->ifindex);
57 head = &net->nexthop.devhash[hash];
58 hlist_add_head(&nhi->dev_hash, head);
59}
60
61static void nexthop_free_mpath(struct nexthop *nh)
62{
63 struct nh_group *nhg;
64 int i;
65
66 nhg = rcu_dereference_raw(nh->nh_grp);
Olivier Deprez0e641232021-09-23 10:07:05 +020067 for (i = 0; i < nhg->num_nh; ++i) {
68 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
David Brazdil0f672f62019-12-10 10:32:29 +000069
Olivier Deprez0e641232021-09-23 10:07:05 +020070 WARN_ON(!list_empty(&nhge->nh_list));
71 nexthop_put(nhge->nh);
72 }
73
74 WARN_ON(nhg->spare == nhg);
75
76 kfree(nhg->spare);
David Brazdil0f672f62019-12-10 10:32:29 +000077 kfree(nhg);
78}
79
80static void nexthop_free_single(struct nexthop *nh)
81{
82 struct nh_info *nhi;
83
84 nhi = rcu_dereference_raw(nh->nh_info);
85 switch (nhi->family) {
86 case AF_INET:
87 fib_nh_release(nh->net, &nhi->fib_nh);
88 break;
89 case AF_INET6:
90 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
91 break;
92 }
93 kfree(nhi);
94}
95
96void nexthop_free_rcu(struct rcu_head *head)
97{
98 struct nexthop *nh = container_of(head, struct nexthop, rcu);
99
100 if (nh->is_group)
101 nexthop_free_mpath(nh);
102 else
103 nexthop_free_single(nh);
104
105 kfree(nh);
106}
107EXPORT_SYMBOL_GPL(nexthop_free_rcu);
108
109static struct nexthop *nexthop_alloc(void)
110{
111 struct nexthop *nh;
112
113 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
114 if (nh) {
115 INIT_LIST_HEAD(&nh->fi_list);
116 INIT_LIST_HEAD(&nh->f6i_list);
117 INIT_LIST_HEAD(&nh->grp_list);
118 }
119 return nh;
120}
121
122static struct nh_group *nexthop_grp_alloc(u16 num_nh)
123{
124 size_t sz = offsetof(struct nexthop, nh_grp)
125 + sizeof(struct nh_group)
126 + sizeof(struct nh_grp_entry) * num_nh;
127 struct nh_group *nhg;
128
129 nhg = kzalloc(sz, GFP_KERNEL);
130 if (nhg)
131 nhg->num_nh = num_nh;
132
133 return nhg;
134}
135
136static void nh_base_seq_inc(struct net *net)
137{
138 while (++net->nexthop.seq == 0)
139 ;
140}
141
142/* no reference taken; rcu lock or rtnl must be held */
143struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
144{
145 struct rb_node **pp, *parent = NULL, *next;
146
147 pp = &net->nexthop.rb_root.rb_node;
148 while (1) {
149 struct nexthop *nh;
150
151 next = rcu_dereference_raw(*pp);
152 if (!next)
153 break;
154 parent = next;
155
156 nh = rb_entry(parent, struct nexthop, rb_node);
157 if (id < nh->id)
158 pp = &next->rb_left;
159 else if (id > nh->id)
160 pp = &next->rb_right;
161 else
162 return nh;
163 }
164 return NULL;
165}
166EXPORT_SYMBOL_GPL(nexthop_find_by_id);
167
168/* used for auto id allocation; called with rtnl held */
169static u32 nh_find_unused_id(struct net *net)
170{
171 u32 id_start = net->nexthop.last_id_allocated;
172
173 while (1) {
174 net->nexthop.last_id_allocated++;
175 if (net->nexthop.last_id_allocated == id_start)
176 break;
177
178 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
179 return net->nexthop.last_id_allocated;
180 }
181 return 0;
182}
183
184static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
185{
186 struct nexthop_grp *p;
187 size_t len = nhg->num_nh * sizeof(*p);
188 struct nlattr *nla;
189 u16 group_type = 0;
190 int i;
191
192 if (nhg->mpath)
193 group_type = NEXTHOP_GRP_TYPE_MPATH;
194
195 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
196 goto nla_put_failure;
197
198 nla = nla_reserve(skb, NHA_GROUP, len);
199 if (!nla)
200 goto nla_put_failure;
201
202 p = nla_data(nla);
203 for (i = 0; i < nhg->num_nh; ++i) {
204 p->id = nhg->nh_entries[i].nh->id;
205 p->weight = nhg->nh_entries[i].weight - 1;
206 p += 1;
207 }
208
209 return 0;
210
211nla_put_failure:
212 return -EMSGSIZE;
213}
214
215static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
216 int event, u32 portid, u32 seq, unsigned int nlflags)
217{
218 struct fib6_nh *fib6_nh;
219 struct fib_nh *fib_nh;
220 struct nlmsghdr *nlh;
221 struct nh_info *nhi;
222 struct nhmsg *nhm;
223
224 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
225 if (!nlh)
226 return -EMSGSIZE;
227
228 nhm = nlmsg_data(nlh);
229 nhm->nh_family = AF_UNSPEC;
230 nhm->nh_flags = nh->nh_flags;
231 nhm->nh_protocol = nh->protocol;
232 nhm->nh_scope = 0;
233 nhm->resvd = 0;
234
235 if (nla_put_u32(skb, NHA_ID, nh->id))
236 goto nla_put_failure;
237
238 if (nh->is_group) {
239 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
240
241 if (nla_put_nh_group(skb, nhg))
242 goto nla_put_failure;
243 goto out;
244 }
245
246 nhi = rtnl_dereference(nh->nh_info);
247 nhm->nh_family = nhi->family;
248 if (nhi->reject_nh) {
249 if (nla_put_flag(skb, NHA_BLACKHOLE))
250 goto nla_put_failure;
251 goto out;
252 } else {
253 const struct net_device *dev;
254
255 dev = nhi->fib_nhc.nhc_dev;
256 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
257 goto nla_put_failure;
258 }
259
260 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
261 switch (nhi->family) {
262 case AF_INET:
263 fib_nh = &nhi->fib_nh;
264 if (fib_nh->fib_nh_gw_family &&
265 nla_put_u32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
266 goto nla_put_failure;
267 break;
268
269 case AF_INET6:
270 fib6_nh = &nhi->fib6_nh;
271 if (fib6_nh->fib_nh_gw_family &&
272 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
273 goto nla_put_failure;
274 break;
275 }
276
277 if (nhi->fib_nhc.nhc_lwtstate &&
278 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
279 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
280 goto nla_put_failure;
281
282out:
283 nlmsg_end(skb, nlh);
284 return 0;
285
286nla_put_failure:
Olivier Deprez0e641232021-09-23 10:07:05 +0200287 nlmsg_cancel(skb, nlh);
David Brazdil0f672f62019-12-10 10:32:29 +0000288 return -EMSGSIZE;
289}
290
291static size_t nh_nlmsg_size_grp(struct nexthop *nh)
292{
293 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
294 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
295
296 return nla_total_size(sz) +
297 nla_total_size(2); /* NHA_GROUP_TYPE */
298}
299
300static size_t nh_nlmsg_size_single(struct nexthop *nh)
301{
302 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
303 size_t sz;
304
305 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
306 * are mutually exclusive
307 */
308 sz = nla_total_size(4); /* NHA_OIF */
309
310 switch (nhi->family) {
311 case AF_INET:
312 if (nhi->fib_nh.fib_nh_gw_family)
313 sz += nla_total_size(4); /* NHA_GATEWAY */
314 break;
315
316 case AF_INET6:
317 /* NHA_GATEWAY */
318 if (nhi->fib6_nh.fib_nh_gw_family)
319 sz += nla_total_size(sizeof(const struct in6_addr));
320 break;
321 }
322
323 if (nhi->fib_nhc.nhc_lwtstate) {
324 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
325 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
326 }
327
328 return sz;
329}
330
331static size_t nh_nlmsg_size(struct nexthop *nh)
332{
Olivier Deprez0e641232021-09-23 10:07:05 +0200333 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
334
335 sz += nla_total_size(4); /* NHA_ID */
David Brazdil0f672f62019-12-10 10:32:29 +0000336
337 if (nh->is_group)
338 sz += nh_nlmsg_size_grp(nh);
339 else
340 sz += nh_nlmsg_size_single(nh);
341
342 return sz;
343}
344
345static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
346{
347 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
348 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
349 struct sk_buff *skb;
350 int err = -ENOBUFS;
351
352 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
353 if (!skb)
354 goto errout;
355
356 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
357 if (err < 0) {
358 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
359 WARN_ON(err == -EMSGSIZE);
360 kfree_skb(skb);
361 goto errout;
362 }
363
364 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
365 info->nlh, gfp_any());
366 return;
367errout:
368 if (err < 0)
369 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
370}
371
372static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
373 struct netlink_ext_ack *extack)
374{
375 if (nh->is_group) {
376 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
377
378 /* nested multipath (group within a group) is not
379 * supported
380 */
381 if (nhg->mpath) {
382 NL_SET_ERR_MSG(extack,
383 "Multipath group can not be a nexthop within a group");
384 return false;
385 }
386 } else {
387 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
388
389 if (nhi->reject_nh && npaths > 1) {
390 NL_SET_ERR_MSG(extack,
391 "Blackhole nexthop can not be used in a group with more than 1 path");
392 return false;
393 }
394 }
395
396 return true;
397}
398
399static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
400 struct netlink_ext_ack *extack)
401{
402 unsigned int len = nla_len(tb[NHA_GROUP]);
403 struct nexthop_grp *nhg;
404 unsigned int i, j;
405
Olivier Deprez0e641232021-09-23 10:07:05 +0200406 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000407 NL_SET_ERR_MSG(extack,
408 "Invalid length for nexthop group attribute");
409 return -EINVAL;
410 }
411
412 /* convert len to number of nexthop ids */
413 len /= sizeof(*nhg);
414
415 nhg = nla_data(tb[NHA_GROUP]);
416 for (i = 0; i < len; ++i) {
417 if (nhg[i].resvd1 || nhg[i].resvd2) {
418 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
419 return -EINVAL;
420 }
421 if (nhg[i].weight > 254) {
422 NL_SET_ERR_MSG(extack, "Invalid value for weight");
423 return -EINVAL;
424 }
425 for (j = i + 1; j < len; ++j) {
426 if (nhg[i].id == nhg[j].id) {
427 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
428 return -EINVAL;
429 }
430 }
431 }
432
433 nhg = nla_data(tb[NHA_GROUP]);
434 for (i = 0; i < len; ++i) {
435 struct nexthop *nh;
436
437 nh = nexthop_find_by_id(net, nhg[i].id);
438 if (!nh) {
439 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
440 return -EINVAL;
441 }
442 if (!valid_group_nh(nh, len, extack))
443 return -EINVAL;
444 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200445 for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
David Brazdil0f672f62019-12-10 10:32:29 +0000446 if (!tb[i])
447 continue;
448
449 NL_SET_ERR_MSG(extack,
450 "No other attributes can be set in nexthop groups");
451 return -EINVAL;
452 }
453
454 return 0;
455}
456
457static bool ipv6_good_nh(const struct fib6_nh *nh)
458{
459 int state = NUD_REACHABLE;
460 struct neighbour *n;
461
462 rcu_read_lock_bh();
463
464 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
465 if (n)
466 state = n->nud_state;
467
468 rcu_read_unlock_bh();
469
470 return !!(state & NUD_VALID);
471}
472
473static bool ipv4_good_nh(const struct fib_nh *nh)
474{
475 int state = NUD_REACHABLE;
476 struct neighbour *n;
477
478 rcu_read_lock_bh();
479
480 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
481 (__force u32)nh->fib_nh_gw4);
482 if (n)
483 state = n->nud_state;
484
485 rcu_read_unlock_bh();
486
487 return !!(state & NUD_VALID);
488}
489
490struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
491{
492 struct nexthop *rc = NULL;
493 struct nh_group *nhg;
494 int i;
495
496 if (!nh->is_group)
497 return nh;
498
499 nhg = rcu_dereference(nh->nh_grp);
500 for (i = 0; i < nhg->num_nh; ++i) {
501 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
502 struct nh_info *nhi;
503
504 if (hash > atomic_read(&nhge->upper_bound))
505 continue;
506
507 /* nexthops always check if it is good and does
508 * not rely on a sysctl for this behavior
509 */
510 nhi = rcu_dereference(nhge->nh->nh_info);
511 switch (nhi->family) {
512 case AF_INET:
513 if (ipv4_good_nh(&nhi->fib_nh))
514 return nhge->nh;
515 break;
516 case AF_INET6:
517 if (ipv6_good_nh(&nhi->fib6_nh))
518 return nhge->nh;
519 break;
520 }
521
522 if (!rc)
523 rc = nhge->nh;
524 }
525
526 return rc;
527}
528EXPORT_SYMBOL_GPL(nexthop_select_path);
529
530int nexthop_for_each_fib6_nh(struct nexthop *nh,
531 int (*cb)(struct fib6_nh *nh, void *arg),
532 void *arg)
533{
534 struct nh_info *nhi;
535 int err;
536
537 if (nh->is_group) {
538 struct nh_group *nhg;
539 int i;
540
541 nhg = rcu_dereference_rtnl(nh->nh_grp);
542 for (i = 0; i < nhg->num_nh; i++) {
543 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
544
545 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
546 err = cb(&nhi->fib6_nh, arg);
547 if (err)
548 return err;
549 }
550 } else {
551 nhi = rcu_dereference_rtnl(nh->nh_info);
552 err = cb(&nhi->fib6_nh, arg);
553 if (err)
554 return err;
555 }
556
557 return 0;
558}
559EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
560
561static int check_src_addr(const struct in6_addr *saddr,
562 struct netlink_ext_ack *extack)
563{
564 if (!ipv6_addr_any(saddr)) {
565 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
566 return -EINVAL;
567 }
568 return 0;
569}
570
571int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
572 struct netlink_ext_ack *extack)
573{
574 struct nh_info *nhi;
575
576 /* fib6_src is unique to a fib6_info and limits the ability to cache
577 * routes in fib6_nh within a nexthop that is potentially shared
578 * across multiple fib entries. If the config wants to use source
579 * routing it can not use nexthop objects. mlxsw also does not allow
580 * fib6_src on routes.
581 */
582 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
583 return -EINVAL;
584
585 if (nh->is_group) {
586 struct nh_group *nhg;
587
588 nhg = rtnl_dereference(nh->nh_grp);
589 if (nhg->has_v4)
590 goto no_v4_nh;
591 } else {
592 nhi = rtnl_dereference(nh->nh_info);
593 if (nhi->family == AF_INET)
594 goto no_v4_nh;
595 }
596
597 return 0;
598no_v4_nh:
599 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
600 return -EINVAL;
601}
602EXPORT_SYMBOL_GPL(fib6_check_nexthop);
603
604/* if existing nexthop has ipv6 routes linked to it, need
605 * to verify this new spec works with ipv6
606 */
607static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
608 struct netlink_ext_ack *extack)
609{
610 struct fib6_info *f6i;
611
612 if (list_empty(&old->f6i_list))
613 return 0;
614
615 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
616 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
617 return -EINVAL;
618 }
619
620 return fib6_check_nexthop(new, NULL, extack);
621}
622
623static int nexthop_check_scope(struct nexthop *nh, u8 scope,
624 struct netlink_ext_ack *extack)
625{
626 struct nh_info *nhi;
627
628 nhi = rtnl_dereference(nh->nh_info);
629 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
630 NL_SET_ERR_MSG(extack,
631 "Route with host scope can not have a gateway");
632 return -EINVAL;
633 }
634
635 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
636 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
637 return -EINVAL;
638 }
639
640 return 0;
641}
642
643/* Invoked by fib add code to verify nexthop by id is ok with
644 * config for prefix; parts of fib_check_nh not done when nexthop
645 * object is used.
646 */
647int fib_check_nexthop(struct nexthop *nh, u8 scope,
648 struct netlink_ext_ack *extack)
649{
650 int err = 0;
651
652 if (nh->is_group) {
653 struct nh_group *nhg;
654
655 if (scope == RT_SCOPE_HOST) {
656 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
657 err = -EINVAL;
658 goto out;
659 }
660
661 nhg = rtnl_dereference(nh->nh_grp);
662 /* all nexthops in a group have the same scope */
663 err = nexthop_check_scope(nhg->nh_entries[0].nh, scope, extack);
664 } else {
665 err = nexthop_check_scope(nh, scope, extack);
666 }
667out:
668 return err;
669}
670
671static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
672 struct netlink_ext_ack *extack)
673{
674 struct fib_info *fi;
675
676 list_for_each_entry(fi, &old->fi_list, nh_list) {
677 int err;
678
679 err = fib_check_nexthop(new, fi->fib_scope, extack);
680 if (err)
681 return err;
682 }
683 return 0;
684}
685
686static void nh_group_rebalance(struct nh_group *nhg)
687{
688 int total = 0;
689 int w = 0;
690 int i;
691
692 for (i = 0; i < nhg->num_nh; ++i)
693 total += nhg->nh_entries[i].weight;
694
695 for (i = 0; i < nhg->num_nh; ++i) {
696 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
697 int upper_bound;
698
699 w += nhge->weight;
700 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
701 atomic_set(&nhge->upper_bound, upper_bound);
702 }
703}
704
Olivier Deprez0e641232021-09-23 10:07:05 +0200705static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
David Brazdil0f672f62019-12-10 10:32:29 +0000706 struct nl_info *nlinfo)
707{
Olivier Deprez0e641232021-09-23 10:07:05 +0200708 struct nh_grp_entry *nhges, *new_nhges;
709 struct nexthop *nhp = nhge->nh_parent;
David Brazdil0f672f62019-12-10 10:32:29 +0000710 struct nexthop *nh = nhge->nh;
Olivier Deprez0e641232021-09-23 10:07:05 +0200711 struct nh_group *nhg, *newg;
712 int i, j;
David Brazdil0f672f62019-12-10 10:32:29 +0000713
714 WARN_ON(!nh);
715
Olivier Deprez0e641232021-09-23 10:07:05 +0200716 nhg = rtnl_dereference(nhp->nh_grp);
717 newg = nhg->spare;
718
719 /* last entry, keep it visible and remove the parent */
720 if (nhg->num_nh == 1) {
721 remove_nexthop(net, nhp, nlinfo);
722 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000723 }
724
Olivier Deprez0e641232021-09-23 10:07:05 +0200725 newg->has_v4 = nhg->has_v4;
726 newg->mpath = nhg->mpath;
727 newg->num_nh = nhg->num_nh;
David Brazdil0f672f62019-12-10 10:32:29 +0000728
Olivier Deprez0e641232021-09-23 10:07:05 +0200729 /* copy old entries to new except the one getting removed */
730 nhges = nhg->nh_entries;
731 new_nhges = newg->nh_entries;
732 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
733 /* current nexthop getting removed */
734 if (nhg->nh_entries[i].nh == nh) {
735 newg->num_nh--;
736 continue;
737 }
David Brazdil0f672f62019-12-10 10:32:29 +0000738
Olivier Deprez0e641232021-09-23 10:07:05 +0200739 list_del(&nhges[i].nh_list);
740 new_nhges[j].nh_parent = nhges[i].nh_parent;
741 new_nhges[j].nh = nhges[i].nh;
742 new_nhges[j].weight = nhges[i].weight;
743 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
744 j++;
745 }
David Brazdil0f672f62019-12-10 10:32:29 +0000746
Olivier Deprez0e641232021-09-23 10:07:05 +0200747 nh_group_rebalance(newg);
748 rcu_assign_pointer(nhp->nh_grp, newg);
749
750 list_del(&nhge->nh_list);
751 nexthop_put(nhge->nh);
David Brazdil0f672f62019-12-10 10:32:29 +0000752
753 if (nlinfo)
Olivier Deprez0e641232021-09-23 10:07:05 +0200754 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
David Brazdil0f672f62019-12-10 10:32:29 +0000755}
756
757static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
758 struct nl_info *nlinfo)
759{
760 struct nh_grp_entry *nhge, *tmp;
761
Olivier Deprez0e641232021-09-23 10:07:05 +0200762 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
763 remove_nh_grp_entry(net, nhge, nlinfo);
David Brazdil0f672f62019-12-10 10:32:29 +0000764
Olivier Deprez0e641232021-09-23 10:07:05 +0200765 /* make sure all see the newly published array before releasing rtnl */
766 synchronize_net();
David Brazdil0f672f62019-12-10 10:32:29 +0000767}
768
769static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
770{
771 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
772 int i, num_nh = nhg->num_nh;
773
774 for (i = 0; i < num_nh; ++i) {
775 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
776
777 if (WARN_ON(!nhge->nh))
778 continue;
779
Olivier Deprez0e641232021-09-23 10:07:05 +0200780 list_del_init(&nhge->nh_list);
David Brazdil0f672f62019-12-10 10:32:29 +0000781 }
782}
783
784/* not called for nexthop replace */
785static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
786{
787 struct fib6_info *f6i, *tmp;
788 bool do_flush = false;
789 struct fib_info *fi;
790
791 list_for_each_entry(fi, &nh->fi_list, nh_list) {
792 fi->fib_flags |= RTNH_F_DEAD;
793 do_flush = true;
794 }
795 if (do_flush)
796 fib_flush(net);
797
798 /* ip6_del_rt removes the entry from this list hence the _safe */
799 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
800 /* __ip6_del_rt does a release, so do a hold here */
801 fib6_info_hold(f6i);
802 ipv6_stub->ip6_del_rt(net, f6i);
803 }
804}
805
806static void __remove_nexthop(struct net *net, struct nexthop *nh,
807 struct nl_info *nlinfo)
808{
809 __remove_nexthop_fib(net, nh);
810
811 if (nh->is_group) {
812 remove_nexthop_group(nh, nlinfo);
813 } else {
814 struct nh_info *nhi;
815
816 nhi = rtnl_dereference(nh->nh_info);
817 if (nhi->fib_nhc.nhc_dev)
818 hlist_del(&nhi->dev_hash);
819
820 remove_nexthop_from_groups(net, nh, nlinfo);
821 }
822}
823
824static void remove_nexthop(struct net *net, struct nexthop *nh,
825 struct nl_info *nlinfo)
826{
827 /* remove from the tree */
828 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
829
830 if (nlinfo)
831 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
832
833 __remove_nexthop(net, nh, nlinfo);
834 nh_base_seq_inc(net);
835
836 nexthop_put(nh);
837}
838
839/* if any FIB entries reference this nexthop, any dst entries
840 * need to be regenerated
841 */
842static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
843{
844 struct fib6_info *f6i;
845
846 if (!list_empty(&nh->fi_list))
847 rt_cache_flush(net);
848
849 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
850 ipv6_stub->fib6_update_sernum(net, f6i);
851}
852
853static int replace_nexthop_grp(struct net *net, struct nexthop *old,
854 struct nexthop *new,
855 struct netlink_ext_ack *extack)
856{
857 struct nh_group *oldg, *newg;
858 int i;
859
860 if (!new->is_group) {
861 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
862 return -EINVAL;
863 }
864
865 oldg = rtnl_dereference(old->nh_grp);
866 newg = rtnl_dereference(new->nh_grp);
867
868 /* update parents - used by nexthop code for cleanup */
869 for (i = 0; i < newg->num_nh; i++)
870 newg->nh_entries[i].nh_parent = old;
871
872 rcu_assign_pointer(old->nh_grp, newg);
873
874 for (i = 0; i < oldg->num_nh; i++)
875 oldg->nh_entries[i].nh_parent = new;
876
877 rcu_assign_pointer(new->nh_grp, oldg);
878
879 return 0;
880}
881
882static int replace_nexthop_single(struct net *net, struct nexthop *old,
883 struct nexthop *new,
884 struct netlink_ext_ack *extack)
885{
886 struct nh_info *oldi, *newi;
887
888 if (new->is_group) {
889 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
890 return -EINVAL;
891 }
892
893 oldi = rtnl_dereference(old->nh_info);
894 newi = rtnl_dereference(new->nh_info);
895
896 newi->nh_parent = old;
897 oldi->nh_parent = new;
898
899 old->protocol = new->protocol;
900 old->nh_flags = new->nh_flags;
901
902 rcu_assign_pointer(old->nh_info, newi);
903 rcu_assign_pointer(new->nh_info, oldi);
904
905 return 0;
906}
907
908static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
909 struct nl_info *info)
910{
911 struct fib6_info *f6i;
912
913 if (!list_empty(&nh->fi_list)) {
914 struct fib_info *fi;
915
916 /* expectation is a few fib_info per nexthop and then
917 * a lot of routes per fib_info. So mark the fib_info
918 * and then walk the fib tables once
919 */
920 list_for_each_entry(fi, &nh->fi_list, nh_list)
921 fi->nh_updated = true;
922
923 fib_info_notify_update(net, info);
924
925 list_for_each_entry(fi, &nh->fi_list, nh_list)
926 fi->nh_updated = false;
927 }
928
929 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
930 ipv6_stub->fib6_rt_update(net, f6i, info);
931}
932
933/* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
934 * linked to this nexthop and for all groups that the nexthop
935 * is a member of
936 */
937static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
938 struct nl_info *info)
939{
940 struct nh_grp_entry *nhge;
941
942 __nexthop_replace_notify(net, nh, info);
943
944 list_for_each_entry(nhge, &nh->grp_list, nh_list)
945 __nexthop_replace_notify(net, nhge->nh_parent, info);
946}
947
948static int replace_nexthop(struct net *net, struct nexthop *old,
949 struct nexthop *new, struct netlink_ext_ack *extack)
950{
951 bool new_is_reject = false;
952 struct nh_grp_entry *nhge;
953 int err;
954
955 /* check that existing FIB entries are ok with the
956 * new nexthop definition
957 */
958 err = fib_check_nh_list(old, new, extack);
959 if (err)
960 return err;
961
962 err = fib6_check_nh_list(old, new, extack);
963 if (err)
964 return err;
965
966 if (!new->is_group) {
967 struct nh_info *nhi = rtnl_dereference(new->nh_info);
968
969 new_is_reject = nhi->reject_nh;
970 }
971
972 list_for_each_entry(nhge, &old->grp_list, nh_list) {
973 /* if new nexthop is a blackhole, any groups using this
974 * nexthop cannot have more than 1 path
975 */
976 if (new_is_reject &&
977 nexthop_num_path(nhge->nh_parent) > 1) {
978 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
979 return -EINVAL;
980 }
981
982 err = fib_check_nh_list(nhge->nh_parent, new, extack);
983 if (err)
984 return err;
985
986 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
987 if (err)
988 return err;
989 }
990
991 if (old->is_group)
992 err = replace_nexthop_grp(net, old, new, extack);
993 else
994 err = replace_nexthop_single(net, old, new, extack);
995
996 if (!err) {
997 nh_rt_cache_flush(net, old);
998
999 __remove_nexthop(net, new, NULL);
1000 nexthop_put(new);
1001 }
1002
1003 return err;
1004}
1005
1006/* called with rtnl_lock held */
1007static int insert_nexthop(struct net *net, struct nexthop *new_nh,
1008 struct nh_config *cfg, struct netlink_ext_ack *extack)
1009{
1010 struct rb_node **pp, *parent = NULL, *next;
1011 struct rb_root *root = &net->nexthop.rb_root;
1012 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
1013 bool create = !!(cfg->nlflags & NLM_F_CREATE);
1014 u32 new_id = new_nh->id;
1015 int replace_notify = 0;
1016 int rc = -EEXIST;
1017
1018 pp = &root->rb_node;
1019 while (1) {
1020 struct nexthop *nh;
1021
1022 next = rtnl_dereference(*pp);
1023 if (!next)
1024 break;
1025
1026 parent = next;
1027
1028 nh = rb_entry(parent, struct nexthop, rb_node);
1029 if (new_id < nh->id) {
1030 pp = &next->rb_left;
1031 } else if (new_id > nh->id) {
1032 pp = &next->rb_right;
1033 } else if (replace) {
1034 rc = replace_nexthop(net, nh, new_nh, extack);
1035 if (!rc) {
1036 new_nh = nh; /* send notification with old nh */
1037 replace_notify = 1;
1038 }
1039 goto out;
1040 } else {
1041 /* id already exists and not a replace */
1042 goto out;
1043 }
1044 }
1045
1046 if (replace && !create) {
1047 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
1048 rc = -ENOENT;
1049 goto out;
1050 }
1051
1052 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
1053 rb_insert_color(&new_nh->rb_node, root);
1054 rc = 0;
1055out:
1056 if (!rc) {
1057 nh_base_seq_inc(net);
1058 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
1059 if (replace_notify)
1060 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
1061 }
1062
1063 return rc;
1064}
1065
1066/* rtnl */
1067/* remove all nexthops tied to a device being deleted */
Olivier Deprez0e641232021-09-23 10:07:05 +02001068static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
David Brazdil0f672f62019-12-10 10:32:29 +00001069{
1070 unsigned int hash = nh_dev_hashfn(dev->ifindex);
1071 struct net *net = dev_net(dev);
1072 struct hlist_head *head = &net->nexthop.devhash[hash];
1073 struct hlist_node *n;
1074 struct nh_info *nhi;
1075
1076 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
1077 if (nhi->fib_nhc.nhc_dev != dev)
1078 continue;
1079
Olivier Deprez0e641232021-09-23 10:07:05 +02001080 if (nhi->reject_nh &&
1081 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
1082 continue;
1083
David Brazdil0f672f62019-12-10 10:32:29 +00001084 remove_nexthop(net, nhi->nh_parent, NULL);
1085 }
1086}
1087
1088/* rtnl; called when net namespace is deleted */
1089static void flush_all_nexthops(struct net *net)
1090{
1091 struct rb_root *root = &net->nexthop.rb_root;
1092 struct rb_node *node;
1093 struct nexthop *nh;
1094
1095 while ((node = rb_first(root))) {
1096 nh = rb_entry(node, struct nexthop, rb_node);
1097 remove_nexthop(net, nh, NULL);
1098 cond_resched();
1099 }
1100}
1101
1102static struct nexthop *nexthop_create_group(struct net *net,
1103 struct nh_config *cfg)
1104{
1105 struct nlattr *grps_attr = cfg->nh_grp;
1106 struct nexthop_grp *entry = nla_data(grps_attr);
Olivier Deprez0e641232021-09-23 10:07:05 +02001107 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
David Brazdil0f672f62019-12-10 10:32:29 +00001108 struct nh_group *nhg;
1109 struct nexthop *nh;
1110 int i;
1111
Olivier Deprez0e641232021-09-23 10:07:05 +02001112 if (WARN_ON(!num_nh))
1113 return ERR_PTR(-EINVAL);
1114
David Brazdil0f672f62019-12-10 10:32:29 +00001115 nh = nexthop_alloc();
1116 if (!nh)
1117 return ERR_PTR(-ENOMEM);
1118
1119 nh->is_group = 1;
1120
Olivier Deprez0e641232021-09-23 10:07:05 +02001121 nhg = nexthop_grp_alloc(num_nh);
David Brazdil0f672f62019-12-10 10:32:29 +00001122 if (!nhg) {
1123 kfree(nh);
1124 return ERR_PTR(-ENOMEM);
1125 }
1126
Olivier Deprez0e641232021-09-23 10:07:05 +02001127 /* spare group used for removals */
1128 nhg->spare = nexthop_grp_alloc(num_nh);
1129 if (!nhg) {
1130 kfree(nhg);
1131 kfree(nh);
1132 return NULL;
1133 }
1134 nhg->spare->spare = nhg;
1135
David Brazdil0f672f62019-12-10 10:32:29 +00001136 for (i = 0; i < nhg->num_nh; ++i) {
1137 struct nexthop *nhe;
1138 struct nh_info *nhi;
1139
1140 nhe = nexthop_find_by_id(net, entry[i].id);
1141 if (!nexthop_get(nhe))
1142 goto out_no_nh;
1143
1144 nhi = rtnl_dereference(nhe->nh_info);
1145 if (nhi->family == AF_INET)
1146 nhg->has_v4 = true;
1147
1148 nhg->nh_entries[i].nh = nhe;
1149 nhg->nh_entries[i].weight = entry[i].weight + 1;
1150 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
1151 nhg->nh_entries[i].nh_parent = nh;
1152 }
1153
1154 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
1155 nhg->mpath = 1;
1156 nh_group_rebalance(nhg);
1157 }
1158
1159 rcu_assign_pointer(nh->nh_grp, nhg);
1160
1161 return nh;
1162
1163out_no_nh:
Olivier Deprez0e641232021-09-23 10:07:05 +02001164 for (i--; i >= 0; --i) {
1165 list_del(&nhg->nh_entries[i].nh_list);
David Brazdil0f672f62019-12-10 10:32:29 +00001166 nexthop_put(nhg->nh_entries[i].nh);
Olivier Deprez0e641232021-09-23 10:07:05 +02001167 }
David Brazdil0f672f62019-12-10 10:32:29 +00001168
Olivier Deprez0e641232021-09-23 10:07:05 +02001169 kfree(nhg->spare);
David Brazdil0f672f62019-12-10 10:32:29 +00001170 kfree(nhg);
1171 kfree(nh);
1172
1173 return ERR_PTR(-ENOENT);
1174}
1175
1176static int nh_create_ipv4(struct net *net, struct nexthop *nh,
1177 struct nh_info *nhi, struct nh_config *cfg,
1178 struct netlink_ext_ack *extack)
1179{
1180 struct fib_nh *fib_nh = &nhi->fib_nh;
1181 struct fib_config fib_cfg = {
1182 .fc_oif = cfg->nh_ifindex,
1183 .fc_gw4 = cfg->gw.ipv4,
1184 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
1185 .fc_flags = cfg->nh_flags,
Olivier Deprez0e641232021-09-23 10:07:05 +02001186 .fc_nlinfo = cfg->nlinfo,
David Brazdil0f672f62019-12-10 10:32:29 +00001187 .fc_encap = cfg->nh_encap,
1188 .fc_encap_type = cfg->nh_encap_type,
1189 };
1190 u32 tb_id = l3mdev_fib_table(cfg->dev);
1191 int err;
1192
1193 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
1194 if (err) {
1195 fib_nh_release(net, fib_nh);
1196 goto out;
1197 }
1198
1199 /* sets nh_dev if successful */
1200 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
1201 if (!err) {
1202 nh->nh_flags = fib_nh->fib_nh_flags;
1203 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
1204 fib_nh->fib_nh_scope);
1205 } else {
1206 fib_nh_release(net, fib_nh);
1207 }
1208out:
1209 return err;
1210}
1211
1212static int nh_create_ipv6(struct net *net, struct nexthop *nh,
1213 struct nh_info *nhi, struct nh_config *cfg,
1214 struct netlink_ext_ack *extack)
1215{
1216 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
1217 struct fib6_config fib6_cfg = {
1218 .fc_table = l3mdev_fib_table(cfg->dev),
1219 .fc_ifindex = cfg->nh_ifindex,
1220 .fc_gateway = cfg->gw.ipv6,
1221 .fc_flags = cfg->nh_flags,
Olivier Deprez0e641232021-09-23 10:07:05 +02001222 .fc_nlinfo = cfg->nlinfo,
David Brazdil0f672f62019-12-10 10:32:29 +00001223 .fc_encap = cfg->nh_encap,
1224 .fc_encap_type = cfg->nh_encap_type,
1225 };
1226 int err;
1227
1228 if (!ipv6_addr_any(&cfg->gw.ipv6))
1229 fib6_cfg.fc_flags |= RTF_GATEWAY;
1230
1231 /* sets nh_dev if successful */
1232 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
1233 extack);
1234 if (err)
1235 ipv6_stub->fib6_nh_release(fib6_nh);
1236 else
1237 nh->nh_flags = fib6_nh->fib_nh_flags;
1238
1239 return err;
1240}
1241
1242static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
1243 struct netlink_ext_ack *extack)
1244{
1245 struct nh_info *nhi;
1246 struct nexthop *nh;
1247 int err = 0;
1248
1249 nh = nexthop_alloc();
1250 if (!nh)
1251 return ERR_PTR(-ENOMEM);
1252
1253 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
1254 if (!nhi) {
1255 kfree(nh);
1256 return ERR_PTR(-ENOMEM);
1257 }
1258
1259 nh->nh_flags = cfg->nh_flags;
1260 nh->net = net;
1261
1262 nhi->nh_parent = nh;
1263 nhi->family = cfg->nh_family;
1264 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
1265
1266 if (cfg->nh_blackhole) {
1267 nhi->reject_nh = 1;
1268 cfg->nh_ifindex = net->loopback_dev->ifindex;
1269 }
1270
1271 switch (cfg->nh_family) {
1272 case AF_INET:
1273 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
1274 break;
1275 case AF_INET6:
1276 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
1277 break;
1278 }
1279
1280 if (err) {
1281 kfree(nhi);
1282 kfree(nh);
1283 return ERR_PTR(err);
1284 }
1285
1286 /* add the entry to the device based hash */
1287 nexthop_devhash_add(net, nhi);
1288
1289 rcu_assign_pointer(nh->nh_info, nhi);
1290
1291 return nh;
1292}
1293
1294/* called with rtnl lock held */
1295static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
1296 struct netlink_ext_ack *extack)
1297{
1298 struct nexthop *nh;
1299 int err;
1300
1301 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
1302 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
1303 return ERR_PTR(-EINVAL);
1304 }
1305
1306 if (!cfg->nh_id) {
1307 cfg->nh_id = nh_find_unused_id(net);
1308 if (!cfg->nh_id) {
1309 NL_SET_ERR_MSG(extack, "No unused id");
1310 return ERR_PTR(-EINVAL);
1311 }
1312 }
1313
1314 if (cfg->nh_grp)
1315 nh = nexthop_create_group(net, cfg);
1316 else
1317 nh = nexthop_create(net, cfg, extack);
1318
1319 if (IS_ERR(nh))
1320 return nh;
1321
1322 refcount_set(&nh->refcnt, 1);
1323 nh->id = cfg->nh_id;
1324 nh->protocol = cfg->nh_protocol;
1325 nh->net = net;
1326
1327 err = insert_nexthop(net, nh, cfg, extack);
1328 if (err) {
1329 __remove_nexthop(net, nh, NULL);
1330 nexthop_put(nh);
1331 nh = ERR_PTR(err);
1332 }
1333
1334 return nh;
1335}
1336
1337static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
1338 struct nlmsghdr *nlh, struct nh_config *cfg,
1339 struct netlink_ext_ack *extack)
1340{
1341 struct nhmsg *nhm = nlmsg_data(nlh);
1342 struct nlattr *tb[NHA_MAX + 1];
1343 int err;
1344
1345 err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
1346 extack);
1347 if (err < 0)
1348 return err;
1349
1350 err = -EINVAL;
1351 if (nhm->resvd || nhm->nh_scope) {
1352 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
1353 goto out;
1354 }
1355 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
1356 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
1357 goto out;
1358 }
1359
1360 switch (nhm->nh_family) {
1361 case AF_INET:
1362 case AF_INET6:
1363 break;
1364 case AF_UNSPEC:
1365 if (tb[NHA_GROUP])
1366 break;
1367 /* fallthrough */
1368 default:
1369 NL_SET_ERR_MSG(extack, "Invalid address family");
1370 goto out;
1371 }
1372
1373 if (tb[NHA_GROUPS] || tb[NHA_MASTER]) {
1374 NL_SET_ERR_MSG(extack, "Invalid attributes in request");
1375 goto out;
1376 }
1377
1378 memset(cfg, 0, sizeof(*cfg));
1379 cfg->nlflags = nlh->nlmsg_flags;
1380 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
1381 cfg->nlinfo.nlh = nlh;
1382 cfg->nlinfo.nl_net = net;
1383
1384 cfg->nh_family = nhm->nh_family;
1385 cfg->nh_protocol = nhm->nh_protocol;
1386 cfg->nh_flags = nhm->nh_flags;
1387
1388 if (tb[NHA_ID])
1389 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
1390
1391 if (tb[NHA_GROUP]) {
1392 if (nhm->nh_family != AF_UNSPEC) {
1393 NL_SET_ERR_MSG(extack, "Invalid family for group");
1394 goto out;
1395 }
1396 cfg->nh_grp = tb[NHA_GROUP];
1397
1398 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
1399 if (tb[NHA_GROUP_TYPE])
1400 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
1401
1402 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
1403 NL_SET_ERR_MSG(extack, "Invalid group type");
1404 goto out;
1405 }
1406 err = nh_check_attr_group(net, tb, extack);
1407
1408 /* no other attributes should be set */
1409 goto out;
1410 }
1411
1412 if (tb[NHA_BLACKHOLE]) {
1413 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
1414 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
1415 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway or oif");
1416 goto out;
1417 }
1418
1419 cfg->nh_blackhole = 1;
1420 err = 0;
1421 goto out;
1422 }
1423
1424 if (!tb[NHA_OIF]) {
1425 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole nexthops");
1426 goto out;
1427 }
1428
1429 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
1430 if (cfg->nh_ifindex)
1431 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
1432
1433 if (!cfg->dev) {
1434 NL_SET_ERR_MSG(extack, "Invalid device index");
1435 goto out;
1436 } else if (!(cfg->dev->flags & IFF_UP)) {
1437 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
1438 err = -ENETDOWN;
1439 goto out;
1440 } else if (!netif_carrier_ok(cfg->dev)) {
1441 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
1442 err = -ENETDOWN;
1443 goto out;
1444 }
1445
1446 err = -EINVAL;
1447 if (tb[NHA_GATEWAY]) {
1448 struct nlattr *gwa = tb[NHA_GATEWAY];
1449
1450 switch (cfg->nh_family) {
1451 case AF_INET:
1452 if (nla_len(gwa) != sizeof(u32)) {
1453 NL_SET_ERR_MSG(extack, "Invalid gateway");
1454 goto out;
1455 }
1456 cfg->gw.ipv4 = nla_get_be32(gwa);
1457 break;
1458 case AF_INET6:
1459 if (nla_len(gwa) != sizeof(struct in6_addr)) {
1460 NL_SET_ERR_MSG(extack, "Invalid gateway");
1461 goto out;
1462 }
1463 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
1464 break;
1465 default:
1466 NL_SET_ERR_MSG(extack,
1467 "Unknown address family for gateway");
1468 goto out;
1469 }
1470 } else {
1471 /* device only nexthop (no gateway) */
1472 if (cfg->nh_flags & RTNH_F_ONLINK) {
1473 NL_SET_ERR_MSG(extack,
1474 "ONLINK flag can not be set for nexthop without a gateway");
1475 goto out;
1476 }
1477 }
1478
1479 if (tb[NHA_ENCAP]) {
1480 cfg->nh_encap = tb[NHA_ENCAP];
1481
1482 if (!tb[NHA_ENCAP_TYPE]) {
1483 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
1484 goto out;
1485 }
1486
1487 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
1488 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
1489 if (err < 0)
1490 goto out;
1491
1492 } else if (tb[NHA_ENCAP_TYPE]) {
1493 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
1494 goto out;
1495 }
1496
1497
1498 err = 0;
1499out:
1500 return err;
1501}
1502
1503/* rtnl */
1504static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
1505 struct netlink_ext_ack *extack)
1506{
1507 struct net *net = sock_net(skb->sk);
1508 struct nh_config cfg;
1509 struct nexthop *nh;
1510 int err;
1511
1512 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
1513 if (!err) {
1514 nh = nexthop_add(net, &cfg, extack);
1515 if (IS_ERR(nh))
1516 err = PTR_ERR(nh);
1517 }
1518
1519 return err;
1520}
1521
1522static int nh_valid_get_del_req(struct nlmsghdr *nlh, u32 *id,
1523 struct netlink_ext_ack *extack)
1524{
1525 struct nhmsg *nhm = nlmsg_data(nlh);
1526 struct nlattr *tb[NHA_MAX + 1];
1527 int err, i;
1528
1529 err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
1530 extack);
1531 if (err < 0)
1532 return err;
1533
1534 err = -EINVAL;
1535 for (i = 0; i < __NHA_MAX; ++i) {
1536 if (!tb[i])
1537 continue;
1538
1539 switch (i) {
1540 case NHA_ID:
1541 break;
1542 default:
1543 NL_SET_ERR_MSG_ATTR(extack, tb[i],
1544 "Unexpected attribute in request");
1545 goto out;
1546 }
1547 }
1548 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
1549 NL_SET_ERR_MSG(extack, "Invalid values in header");
1550 goto out;
1551 }
1552
1553 if (!tb[NHA_ID]) {
1554 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
1555 goto out;
1556 }
1557
1558 *id = nla_get_u32(tb[NHA_ID]);
1559 if (!(*id))
1560 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1561 else
1562 err = 0;
1563out:
1564 return err;
1565}
1566
1567/* rtnl */
1568static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
1569 struct netlink_ext_ack *extack)
1570{
1571 struct net *net = sock_net(skb->sk);
1572 struct nl_info nlinfo = {
1573 .nlh = nlh,
1574 .nl_net = net,
1575 .portid = NETLINK_CB(skb).portid,
1576 };
1577 struct nexthop *nh;
1578 int err;
1579 u32 id;
1580
1581 err = nh_valid_get_del_req(nlh, &id, extack);
1582 if (err)
1583 return err;
1584
1585 nh = nexthop_find_by_id(net, id);
1586 if (!nh)
1587 return -ENOENT;
1588
1589 remove_nexthop(net, nh, &nlinfo);
1590
1591 return 0;
1592}
1593
1594/* rtnl */
1595static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
1596 struct netlink_ext_ack *extack)
1597{
1598 struct net *net = sock_net(in_skb->sk);
1599 struct sk_buff *skb = NULL;
1600 struct nexthop *nh;
1601 int err;
1602 u32 id;
1603
1604 err = nh_valid_get_del_req(nlh, &id, extack);
1605 if (err)
1606 return err;
1607
1608 err = -ENOBUFS;
1609 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1610 if (!skb)
1611 goto out;
1612
1613 err = -ENOENT;
1614 nh = nexthop_find_by_id(net, id);
1615 if (!nh)
1616 goto errout_free;
1617
1618 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
1619 nlh->nlmsg_seq, 0);
1620 if (err < 0) {
1621 WARN_ON(err == -EMSGSIZE);
1622 goto errout_free;
1623 }
1624
1625 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1626out:
1627 return err;
1628errout_free:
1629 kfree_skb(skb);
1630 goto out;
1631}
1632
1633static bool nh_dump_filtered(struct nexthop *nh, int dev_idx, int master_idx,
1634 bool group_filter, u8 family)
1635{
1636 const struct net_device *dev;
1637 const struct nh_info *nhi;
1638
1639 if (group_filter && !nh->is_group)
1640 return true;
1641
1642 if (!dev_idx && !master_idx && !family)
1643 return false;
1644
1645 if (nh->is_group)
1646 return true;
1647
1648 nhi = rtnl_dereference(nh->nh_info);
1649 if (family && nhi->family != family)
1650 return true;
1651
1652 dev = nhi->fib_nhc.nhc_dev;
1653 if (dev_idx && (!dev || dev->ifindex != dev_idx))
1654 return true;
1655
1656 if (master_idx) {
1657 struct net_device *master;
1658
1659 if (!dev)
1660 return true;
1661
1662 master = netdev_master_upper_dev_get((struct net_device *)dev);
1663 if (!master || master->ifindex != master_idx)
1664 return true;
1665 }
1666
1667 return false;
1668}
1669
1670static int nh_valid_dump_req(const struct nlmsghdr *nlh, int *dev_idx,
1671 int *master_idx, bool *group_filter,
1672 struct netlink_callback *cb)
1673{
1674 struct netlink_ext_ack *extack = cb->extack;
1675 struct nlattr *tb[NHA_MAX + 1];
1676 struct nhmsg *nhm;
1677 int err, i;
1678 u32 idx;
1679
1680 err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
1681 NULL);
1682 if (err < 0)
1683 return err;
1684
1685 for (i = 0; i <= NHA_MAX; ++i) {
1686 if (!tb[i])
1687 continue;
1688
1689 switch (i) {
1690 case NHA_OIF:
1691 idx = nla_get_u32(tb[i]);
1692 if (idx > INT_MAX) {
1693 NL_SET_ERR_MSG(extack, "Invalid device index");
1694 return -EINVAL;
1695 }
1696 *dev_idx = idx;
1697 break;
1698 case NHA_MASTER:
1699 idx = nla_get_u32(tb[i]);
1700 if (idx > INT_MAX) {
1701 NL_SET_ERR_MSG(extack, "Invalid master device index");
1702 return -EINVAL;
1703 }
1704 *master_idx = idx;
1705 break;
1706 case NHA_GROUPS:
1707 *group_filter = true;
1708 break;
1709 default:
1710 NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request");
1711 return -EINVAL;
1712 }
1713 }
1714
1715 nhm = nlmsg_data(nlh);
1716 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
1717 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
1718 return -EINVAL;
1719 }
1720
1721 return 0;
1722}
1723
1724/* rtnl */
1725static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
1726{
1727 struct nhmsg *nhm = nlmsg_data(cb->nlh);
1728 int dev_filter_idx = 0, master_idx = 0;
1729 struct net *net = sock_net(skb->sk);
1730 struct rb_root *root = &net->nexthop.rb_root;
1731 bool group_filter = false;
1732 struct rb_node *node;
1733 int idx = 0, s_idx;
1734 int err;
1735
1736 err = nh_valid_dump_req(cb->nlh, &dev_filter_idx, &master_idx,
1737 &group_filter, cb);
1738 if (err < 0)
1739 return err;
1740
1741 s_idx = cb->args[0];
1742 for (node = rb_first(root); node; node = rb_next(node)) {
1743 struct nexthop *nh;
1744
1745 if (idx < s_idx)
1746 goto cont;
1747
1748 nh = rb_entry(node, struct nexthop, rb_node);
1749 if (nh_dump_filtered(nh, dev_filter_idx, master_idx,
1750 group_filter, nhm->nh_family))
1751 goto cont;
1752
1753 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
1754 NETLINK_CB(cb->skb).portid,
1755 cb->nlh->nlmsg_seq, NLM_F_MULTI);
1756 if (err < 0) {
1757 if (likely(skb->len))
1758 goto out;
1759
1760 goto out_err;
1761 }
1762cont:
1763 idx++;
1764 }
1765
1766out:
1767 err = skb->len;
1768out_err:
1769 cb->args[0] = idx;
1770 cb->seq = net->nexthop.seq;
1771 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1772
1773 return err;
1774}
1775
1776static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
1777{
1778 unsigned int hash = nh_dev_hashfn(dev->ifindex);
1779 struct net *net = dev_net(dev);
1780 struct hlist_head *head = &net->nexthop.devhash[hash];
1781 struct hlist_node *n;
1782 struct nh_info *nhi;
1783
1784 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
1785 if (nhi->fib_nhc.nhc_dev == dev) {
1786 if (nhi->family == AF_INET)
1787 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
1788 orig_mtu);
1789 }
1790 }
1791}
1792
1793/* rtnl */
1794static int nh_netdev_event(struct notifier_block *this,
1795 unsigned long event, void *ptr)
1796{
1797 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1798 struct netdev_notifier_info_ext *info_ext;
1799
1800 switch (event) {
1801 case NETDEV_DOWN:
1802 case NETDEV_UNREGISTER:
Olivier Deprez0e641232021-09-23 10:07:05 +02001803 nexthop_flush_dev(dev, event);
David Brazdil0f672f62019-12-10 10:32:29 +00001804 break;
1805 case NETDEV_CHANGE:
1806 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
Olivier Deprez0e641232021-09-23 10:07:05 +02001807 nexthop_flush_dev(dev, event);
David Brazdil0f672f62019-12-10 10:32:29 +00001808 break;
1809 case NETDEV_CHANGEMTU:
1810 info_ext = ptr;
1811 nexthop_sync_mtu(dev, info_ext->ext.mtu);
1812 rt_cache_flush(dev_net(dev));
1813 break;
1814 }
1815 return NOTIFY_DONE;
1816}
1817
1818static struct notifier_block nh_netdev_notifier = {
1819 .notifier_call = nh_netdev_event,
1820};
1821
1822static void __net_exit nexthop_net_exit(struct net *net)
1823{
1824 rtnl_lock();
1825 flush_all_nexthops(net);
1826 rtnl_unlock();
1827 kfree(net->nexthop.devhash);
1828}
1829
1830static int __net_init nexthop_net_init(struct net *net)
1831{
1832 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
1833
1834 net->nexthop.rb_root = RB_ROOT;
1835 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
1836 if (!net->nexthop.devhash)
1837 return -ENOMEM;
1838
1839 return 0;
1840}
1841
1842static struct pernet_operations nexthop_net_ops = {
1843 .init = nexthop_net_init,
1844 .exit = nexthop_net_exit,
1845};
1846
1847static int __init nexthop_init(void)
1848{
1849 register_pernet_subsys(&nexthop_net_ops);
1850
1851 register_netdevice_notifier(&nh_netdev_notifier);
1852
1853 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
1854 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
1855 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
1856 rtm_dump_nexthop, 0);
1857
1858 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
1859 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
1860
1861 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
1862 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
1863
1864 return 0;
1865}
1866subsys_initcall(nexthop_init);