blob: 6dec8e9b34511aedcf0b0558e694a6a91fc32ab6 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/export.h>
15#include <linux/if_ether.h>
16#include <linux/igmp.h>
17#include <linux/jhash.h>
18#include <linux/kernel.h>
19#include <linux/log2.h>
20#include <linux/netdevice.h>
21#include <linux/netfilter_bridge.h>
22#include <linux/random.h>
23#include <linux/rculist.h>
24#include <linux/skbuff.h>
25#include <linux/slab.h>
26#include <linux/timer.h>
27#include <linux/inetdevice.h>
28#include <linux/mroute.h>
29#include <net/ip.h>
30#include <net/switchdev.h>
31#if IS_ENABLED(CONFIG_IPV6)
32#include <net/ipv6.h>
33#include <net/mld.h>
34#include <net/ip6_checksum.h>
35#include <net/addrconf.h>
36#endif
37
38#include "br_private.h"
39
40static void br_multicast_start_querier(struct net_bridge *br,
41 struct bridge_mcast_own_query *query);
42static void br_multicast_add_router(struct net_bridge *br,
43 struct net_bridge_port *port);
44static void br_ip4_multicast_leave_group(struct net_bridge *br,
45 struct net_bridge_port *port,
46 __be32 group,
47 __u16 vid,
48 const unsigned char *src);
49
50static void __del_port_router(struct net_bridge_port *p);
51#if IS_ENABLED(CONFIG_IPV6)
52static void br_ip6_multicast_leave_group(struct net_bridge *br,
53 struct net_bridge_port *port,
54 const struct in6_addr *group,
55 __u16 vid, const unsigned char *src);
56#endif
57unsigned int br_mdb_rehash_seq;
58
59static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
60{
61 if (a->proto != b->proto)
62 return 0;
63 if (a->vid != b->vid)
64 return 0;
65 switch (a->proto) {
66 case htons(ETH_P_IP):
67 return a->u.ip4 == b->u.ip4;
68#if IS_ENABLED(CONFIG_IPV6)
69 case htons(ETH_P_IPV6):
70 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
71#endif
72 }
73 return 0;
74}
75
76static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
77 __u16 vid)
78{
79 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
80}
81
82#if IS_ENABLED(CONFIG_IPV6)
83static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
84 const struct in6_addr *ip,
85 __u16 vid)
86{
87 return jhash_2words(ipv6_addr_hash(ip), vid,
88 mdb->secret) & (mdb->max - 1);
89}
90#endif
91
92static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
93 struct br_ip *ip)
94{
95 switch (ip->proto) {
96 case htons(ETH_P_IP):
97 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
98#if IS_ENABLED(CONFIG_IPV6)
99 case htons(ETH_P_IPV6):
100 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
101#endif
102 }
103 return 0;
104}
105
106static struct net_bridge_mdb_entry *__br_mdb_ip_get(
107 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
108{
109 struct net_bridge_mdb_entry *mp;
110
111 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
112 if (br_ip_equal(&mp->addr, dst))
113 return mp;
114 }
115
116 return NULL;
117}
118
119struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
120 struct br_ip *dst)
121{
122 if (!mdb)
123 return NULL;
124
125 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
126}
127
128static struct net_bridge_mdb_entry *br_mdb_ip4_get(
129 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
130{
131 struct br_ip br_dst;
132
133 br_dst.u.ip4 = dst;
134 br_dst.proto = htons(ETH_P_IP);
135 br_dst.vid = vid;
136
137 return br_mdb_ip_get(mdb, &br_dst);
138}
139
140#if IS_ENABLED(CONFIG_IPV6)
141static struct net_bridge_mdb_entry *br_mdb_ip6_get(
142 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
143 __u16 vid)
144{
145 struct br_ip br_dst;
146
147 br_dst.u.ip6 = *dst;
148 br_dst.proto = htons(ETH_P_IPV6);
149 br_dst.vid = vid;
150
151 return br_mdb_ip_get(mdb, &br_dst);
152}
153#endif
154
155struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
156 struct sk_buff *skb, u16 vid)
157{
158 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
159 struct br_ip ip;
160
161 if (br->multicast_disabled)
162 return NULL;
163
164 if (BR_INPUT_SKB_CB(skb)->igmp)
165 return NULL;
166
167 ip.proto = skb->protocol;
168 ip.vid = vid;
169
170 switch (skb->protocol) {
171 case htons(ETH_P_IP):
172 ip.u.ip4 = ip_hdr(skb)->daddr;
173 break;
174#if IS_ENABLED(CONFIG_IPV6)
175 case htons(ETH_P_IPV6):
176 ip.u.ip6 = ipv6_hdr(skb)->daddr;
177 break;
178#endif
179 default:
180 return NULL;
181 }
182
183 return br_mdb_ip_get(mdb, &ip);
184}
185
186static void br_mdb_free(struct rcu_head *head)
187{
188 struct net_bridge_mdb_htable *mdb =
189 container_of(head, struct net_bridge_mdb_htable, rcu);
190 struct net_bridge_mdb_htable *old = mdb->old;
191
192 mdb->old = NULL;
193 kfree(old->mhash);
194 kfree(old);
195}
196
197static int br_mdb_copy(struct net_bridge_mdb_htable *new,
198 struct net_bridge_mdb_htable *old,
199 int elasticity)
200{
201 struct net_bridge_mdb_entry *mp;
202 int maxlen;
203 int len;
204 int i;
205
206 for (i = 0; i < old->max; i++)
207 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
208 hlist_add_head(&mp->hlist[new->ver],
209 &new->mhash[br_ip_hash(new, &mp->addr)]);
210
211 if (!elasticity)
212 return 0;
213
214 maxlen = 0;
215 for (i = 0; i < new->max; i++) {
216 len = 0;
217 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
218 len++;
219 if (len > maxlen)
220 maxlen = len;
221 }
222
223 return maxlen > elasticity ? -EINVAL : 0;
224}
225
226void br_multicast_free_pg(struct rcu_head *head)
227{
228 struct net_bridge_port_group *p =
229 container_of(head, struct net_bridge_port_group, rcu);
230
231 kfree(p);
232}
233
234static void br_multicast_free_group(struct rcu_head *head)
235{
236 struct net_bridge_mdb_entry *mp =
237 container_of(head, struct net_bridge_mdb_entry, rcu);
238
239 kfree(mp);
240}
241
242static void br_multicast_group_expired(struct timer_list *t)
243{
244 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
245 struct net_bridge *br = mp->br;
246 struct net_bridge_mdb_htable *mdb;
247
248 spin_lock(&br->multicast_lock);
249 if (!netif_running(br->dev) || timer_pending(&mp->timer))
250 goto out;
251
252 mp->host_joined = false;
253 br_mdb_notify(br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
254
255 if (mp->ports)
256 goto out;
257
258 mdb = mlock_dereference(br->mdb, br);
259
260 hlist_del_rcu(&mp->hlist[mdb->ver]);
261 mdb->size--;
262
263 call_rcu_bh(&mp->rcu, br_multicast_free_group);
264
265out:
266 spin_unlock(&br->multicast_lock);
267}
268
269static void br_multicast_del_pg(struct net_bridge *br,
270 struct net_bridge_port_group *pg)
271{
272 struct net_bridge_mdb_htable *mdb;
273 struct net_bridge_mdb_entry *mp;
274 struct net_bridge_port_group *p;
275 struct net_bridge_port_group __rcu **pp;
276
277 mdb = mlock_dereference(br->mdb, br);
278
279 mp = br_mdb_ip_get(mdb, &pg->addr);
280 if (WARN_ON(!mp))
281 return;
282
283 for (pp = &mp->ports;
284 (p = mlock_dereference(*pp, br)) != NULL;
285 pp = &p->next) {
286 if (p != pg)
287 continue;
288
289 rcu_assign_pointer(*pp, p->next);
290 hlist_del_init(&p->mglist);
291 del_timer(&p->timer);
292 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
293 p->flags);
294 call_rcu_bh(&p->rcu, br_multicast_free_pg);
295
296 if (!mp->ports && !mp->host_joined &&
297 netif_running(br->dev))
298 mod_timer(&mp->timer, jiffies);
299
300 return;
301 }
302
303 WARN_ON(1);
304}
305
306static void br_multicast_port_group_expired(struct timer_list *t)
307{
308 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
309 struct net_bridge *br = pg->port->br;
310
311 spin_lock(&br->multicast_lock);
312 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
313 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
314 goto out;
315
316 br_multicast_del_pg(br, pg);
317
318out:
319 spin_unlock(&br->multicast_lock);
320}
321
322static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
323 int elasticity)
324{
325 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
326 struct net_bridge_mdb_htable *mdb;
327 int err;
328
329 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
330 if (!mdb)
331 return -ENOMEM;
332
333 mdb->max = max;
334 mdb->old = old;
335
336 mdb->mhash = kcalloc(max, sizeof(*mdb->mhash), GFP_ATOMIC);
337 if (!mdb->mhash) {
338 kfree(mdb);
339 return -ENOMEM;
340 }
341
342 mdb->size = old ? old->size : 0;
343 mdb->ver = old ? old->ver ^ 1 : 0;
344
345 if (!old || elasticity)
346 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
347 else
348 mdb->secret = old->secret;
349
350 if (!old)
351 goto out;
352
353 err = br_mdb_copy(mdb, old, elasticity);
354 if (err) {
355 kfree(mdb->mhash);
356 kfree(mdb);
357 return err;
358 }
359
360 br_mdb_rehash_seq++;
361 call_rcu_bh(&mdb->rcu, br_mdb_free);
362
363out:
364 rcu_assign_pointer(*mdbp, mdb);
365
366 return 0;
367}
368
369static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
370 __be32 group,
371 u8 *igmp_type)
372{
373 struct igmpv3_query *ihv3;
374 size_t igmp_hdr_size;
375 struct sk_buff *skb;
376 struct igmphdr *ih;
377 struct ethhdr *eth;
378 struct iphdr *iph;
379
380 igmp_hdr_size = sizeof(*ih);
381 if (br->multicast_igmp_version == 3)
382 igmp_hdr_size = sizeof(*ihv3);
383 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
384 igmp_hdr_size + 4);
385 if (!skb)
386 goto out;
387
388 skb->protocol = htons(ETH_P_IP);
389
390 skb_reset_mac_header(skb);
391 eth = eth_hdr(skb);
392
393 ether_addr_copy(eth->h_source, br->dev->dev_addr);
394 eth->h_dest[0] = 1;
395 eth->h_dest[1] = 0;
396 eth->h_dest[2] = 0x5e;
397 eth->h_dest[3] = 0;
398 eth->h_dest[4] = 0;
399 eth->h_dest[5] = 1;
400 eth->h_proto = htons(ETH_P_IP);
401 skb_put(skb, sizeof(*eth));
402
403 skb_set_network_header(skb, skb->len);
404 iph = ip_hdr(skb);
405
406 iph->version = 4;
407 iph->ihl = 6;
408 iph->tos = 0xc0;
409 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
410 iph->id = 0;
411 iph->frag_off = htons(IP_DF);
412 iph->ttl = 1;
413 iph->protocol = IPPROTO_IGMP;
414 iph->saddr = br->multicast_query_use_ifaddr ?
415 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
416 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
417 ((u8 *)&iph[1])[0] = IPOPT_RA;
418 ((u8 *)&iph[1])[1] = 4;
419 ((u8 *)&iph[1])[2] = 0;
420 ((u8 *)&iph[1])[3] = 0;
421 ip_send_check(iph);
422 skb_put(skb, 24);
423
424 skb_set_transport_header(skb, skb->len);
425 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
426
427 switch (br->multicast_igmp_version) {
428 case 2:
429 ih = igmp_hdr(skb);
430 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
431 ih->code = (group ? br->multicast_last_member_interval :
432 br->multicast_query_response_interval) /
433 (HZ / IGMP_TIMER_SCALE);
434 ih->group = group;
435 ih->csum = 0;
436 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
437 break;
438 case 3:
439 ihv3 = igmpv3_query_hdr(skb);
440 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
441 ihv3->code = (group ? br->multicast_last_member_interval :
442 br->multicast_query_response_interval) /
443 (HZ / IGMP_TIMER_SCALE);
444 ihv3->group = group;
445 ihv3->qqic = br->multicast_query_interval / HZ;
446 ihv3->nsrcs = 0;
447 ihv3->resv = 0;
448 ihv3->suppress = 0;
449 ihv3->qrv = 2;
450 ihv3->csum = 0;
451 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
452 break;
453 }
454
455 skb_put(skb, igmp_hdr_size);
456 __skb_pull(skb, sizeof(*eth));
457
458out:
459 return skb;
460}
461
462#if IS_ENABLED(CONFIG_IPV6)
463static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
464 const struct in6_addr *grp,
465 u8 *igmp_type)
466{
467 struct mld2_query *mld2q;
468 unsigned long interval;
469 struct ipv6hdr *ip6h;
470 struct mld_msg *mldq;
471 size_t mld_hdr_size;
472 struct sk_buff *skb;
473 struct ethhdr *eth;
474 u8 *hopopt;
475
476 mld_hdr_size = sizeof(*mldq);
477 if (br->multicast_mld_version == 2)
478 mld_hdr_size = sizeof(*mld2q);
479 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
480 8 + mld_hdr_size);
481 if (!skb)
482 goto out;
483
484 skb->protocol = htons(ETH_P_IPV6);
485
486 /* Ethernet header */
487 skb_reset_mac_header(skb);
488 eth = eth_hdr(skb);
489
490 ether_addr_copy(eth->h_source, br->dev->dev_addr);
491 eth->h_proto = htons(ETH_P_IPV6);
492 skb_put(skb, sizeof(*eth));
493
494 /* IPv6 header + HbH option */
495 skb_set_network_header(skb, skb->len);
496 ip6h = ipv6_hdr(skb);
497
498 *(__force __be32 *)ip6h = htonl(0x60000000);
499 ip6h->payload_len = htons(8 + mld_hdr_size);
500 ip6h->nexthdr = IPPROTO_HOPOPTS;
501 ip6h->hop_limit = 1;
502 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
503 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
504 &ip6h->saddr)) {
505 kfree_skb(skb);
506 br->has_ipv6_addr = 0;
507 return NULL;
508 }
509
510 br->has_ipv6_addr = 1;
511 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
512
513 hopopt = (u8 *)(ip6h + 1);
514 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
515 hopopt[1] = 0; /* length of HbH */
516 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
517 hopopt[3] = 2; /* Length of RA Option */
518 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
519 hopopt[5] = 0;
520 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
521 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
522
523 skb_put(skb, sizeof(*ip6h) + 8);
524
525 /* ICMPv6 */
526 skb_set_transport_header(skb, skb->len);
527 interval = ipv6_addr_any(grp) ?
528 br->multicast_query_response_interval :
529 br->multicast_last_member_interval;
530 *igmp_type = ICMPV6_MGM_QUERY;
531 switch (br->multicast_mld_version) {
532 case 1:
533 mldq = (struct mld_msg *)icmp6_hdr(skb);
534 mldq->mld_type = ICMPV6_MGM_QUERY;
535 mldq->mld_code = 0;
536 mldq->mld_cksum = 0;
537 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
538 mldq->mld_reserved = 0;
539 mldq->mld_mca = *grp;
540 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
541 sizeof(*mldq), IPPROTO_ICMPV6,
542 csum_partial(mldq,
543 sizeof(*mldq),
544 0));
545 break;
546 case 2:
547 mld2q = (struct mld2_query *)icmp6_hdr(skb);
548 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
549 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
550 mld2q->mld2q_code = 0;
551 mld2q->mld2q_cksum = 0;
552 mld2q->mld2q_resv1 = 0;
553 mld2q->mld2q_resv2 = 0;
554 mld2q->mld2q_suppress = 0;
555 mld2q->mld2q_qrv = 2;
556 mld2q->mld2q_nsrcs = 0;
557 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
558 mld2q->mld2q_mca = *grp;
559 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
560 sizeof(*mld2q),
561 IPPROTO_ICMPV6,
562 csum_partial(mld2q,
563 sizeof(*mld2q),
564 0));
565 break;
566 }
567 skb_put(skb, mld_hdr_size);
568
569 __skb_pull(skb, sizeof(*eth));
570
571out:
572 return skb;
573}
574#endif
575
576static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
577 struct br_ip *addr,
578 u8 *igmp_type)
579{
580 switch (addr->proto) {
581 case htons(ETH_P_IP):
582 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
583#if IS_ENABLED(CONFIG_IPV6)
584 case htons(ETH_P_IPV6):
585 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
586 igmp_type);
587#endif
588 }
589 return NULL;
590}
591
592static struct net_bridge_mdb_entry *br_multicast_get_group(
593 struct net_bridge *br, struct net_bridge_port *port,
594 struct br_ip *group, int hash)
595{
596 struct net_bridge_mdb_htable *mdb;
597 struct net_bridge_mdb_entry *mp;
598 unsigned int count = 0;
599 unsigned int max;
600 int elasticity;
601 int err;
602
603 mdb = rcu_dereference_protected(br->mdb, 1);
604 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
605 count++;
606 if (unlikely(br_ip_equal(group, &mp->addr)))
607 return mp;
608 }
609
610 elasticity = 0;
611 max = mdb->max;
612
613 if (unlikely(count > br->hash_elasticity && count)) {
614 if (net_ratelimit())
615 br_info(br, "Multicast hash table "
616 "chain limit reached: %s\n",
617 port ? port->dev->name : br->dev->name);
618
619 elasticity = br->hash_elasticity;
620 }
621
622 if (mdb->size >= max) {
623 max *= 2;
624 if (unlikely(max > br->hash_max)) {
625 br_warn(br, "Multicast hash table maximum of %d "
626 "reached, disabling snooping: %s\n",
627 br->hash_max,
628 port ? port->dev->name : br->dev->name);
629 err = -E2BIG;
630disable:
631 br->multicast_disabled = 1;
632 goto err;
633 }
634 }
635
636 if (max > mdb->max || elasticity) {
637 if (mdb->old) {
638 if (net_ratelimit())
639 br_info(br, "Multicast hash table "
640 "on fire: %s\n",
641 port ? port->dev->name : br->dev->name);
642 err = -EEXIST;
643 goto err;
644 }
645
646 err = br_mdb_rehash(&br->mdb, max, elasticity);
647 if (err) {
648 br_warn(br, "Cannot rehash multicast "
649 "hash table, disabling snooping: %s, %d, %d\n",
650 port ? port->dev->name : br->dev->name,
651 mdb->size, err);
652 goto disable;
653 }
654
655 err = -EAGAIN;
656 goto err;
657 }
658
659 return NULL;
660
661err:
662 mp = ERR_PTR(err);
663 return mp;
664}
665
666struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
667 struct net_bridge_port *p,
668 struct br_ip *group)
669{
670 struct net_bridge_mdb_htable *mdb;
671 struct net_bridge_mdb_entry *mp;
672 int hash;
673 int err;
674
675 mdb = rcu_dereference_protected(br->mdb, 1);
676 if (!mdb) {
677 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
678 if (err)
679 return ERR_PTR(err);
680 goto rehash;
681 }
682
683 hash = br_ip_hash(mdb, group);
684 mp = br_multicast_get_group(br, p, group, hash);
685 switch (PTR_ERR(mp)) {
686 case 0:
687 break;
688
689 case -EAGAIN:
690rehash:
691 mdb = rcu_dereference_protected(br->mdb, 1);
692 hash = br_ip_hash(mdb, group);
693 break;
694
695 default:
696 goto out;
697 }
698
699 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
700 if (unlikely(!mp))
701 return ERR_PTR(-ENOMEM);
702
703 mp->br = br;
704 mp->addr = *group;
705 timer_setup(&mp->timer, br_multicast_group_expired, 0);
706
707 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
708 mdb->size++;
709
710out:
711 return mp;
712}
713
714struct net_bridge_port_group *br_multicast_new_port_group(
715 struct net_bridge_port *port,
716 struct br_ip *group,
717 struct net_bridge_port_group __rcu *next,
718 unsigned char flags,
719 const unsigned char *src)
720{
721 struct net_bridge_port_group *p;
722
723 p = kzalloc(sizeof(*p), GFP_ATOMIC);
724 if (unlikely(!p))
725 return NULL;
726
727 p->addr = *group;
728 p->port = port;
729 p->flags = flags;
730 rcu_assign_pointer(p->next, next);
731 hlist_add_head(&p->mglist, &port->mglist);
732 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
733
734 if (src)
735 memcpy(p->eth_addr, src, ETH_ALEN);
736 else
737 memset(p->eth_addr, 0xff, ETH_ALEN);
738
739 return p;
740}
741
742static bool br_port_group_equal(struct net_bridge_port_group *p,
743 struct net_bridge_port *port,
744 const unsigned char *src)
745{
746 if (p->port != port)
747 return false;
748
749 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
750 return true;
751
752 return ether_addr_equal(src, p->eth_addr);
753}
754
755static int br_multicast_add_group(struct net_bridge *br,
756 struct net_bridge_port *port,
757 struct br_ip *group,
758 const unsigned char *src)
759{
760 struct net_bridge_port_group __rcu **pp;
761 struct net_bridge_port_group *p;
762 struct net_bridge_mdb_entry *mp;
763 unsigned long now = jiffies;
764 int err;
765
766 spin_lock(&br->multicast_lock);
767 if (!netif_running(br->dev) ||
768 (port && port->state == BR_STATE_DISABLED))
769 goto out;
770
771 mp = br_multicast_new_group(br, port, group);
772 err = PTR_ERR(mp);
773 if (IS_ERR(mp))
774 goto err;
775
776 if (!port) {
777 if (!mp->host_joined) {
778 mp->host_joined = true;
779 br_mdb_notify(br->dev, NULL, &mp->addr, RTM_NEWMDB, 0);
780 }
781 mod_timer(&mp->timer, now + br->multicast_membership_interval);
782 goto out;
783 }
784
785 for (pp = &mp->ports;
786 (p = mlock_dereference(*pp, br)) != NULL;
787 pp = &p->next) {
788 if (br_port_group_equal(p, port, src))
789 goto found;
790 if ((unsigned long)p->port < (unsigned long)port)
791 break;
792 }
793
794 p = br_multicast_new_port_group(port, group, *pp, 0, src);
795 if (unlikely(!p))
796 goto err;
797 rcu_assign_pointer(*pp, p);
798 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
799
800found:
801 mod_timer(&p->timer, now + br->multicast_membership_interval);
802out:
803 err = 0;
804
805err:
806 spin_unlock(&br->multicast_lock);
807 return err;
808}
809
810static int br_ip4_multicast_add_group(struct net_bridge *br,
811 struct net_bridge_port *port,
812 __be32 group,
813 __u16 vid,
814 const unsigned char *src)
815{
816 struct br_ip br_group;
817
818 if (ipv4_is_local_multicast(group))
819 return 0;
820
821 br_group.u.ip4 = group;
822 br_group.proto = htons(ETH_P_IP);
823 br_group.vid = vid;
824
825 return br_multicast_add_group(br, port, &br_group, src);
826}
827
828#if IS_ENABLED(CONFIG_IPV6)
829static int br_ip6_multicast_add_group(struct net_bridge *br,
830 struct net_bridge_port *port,
831 const struct in6_addr *group,
832 __u16 vid,
833 const unsigned char *src)
834{
835 struct br_ip br_group;
836
837 if (ipv6_addr_is_ll_all_nodes(group))
838 return 0;
839
840 br_group.u.ip6 = *group;
841 br_group.proto = htons(ETH_P_IPV6);
842 br_group.vid = vid;
843
844 return br_multicast_add_group(br, port, &br_group, src);
845}
846#endif
847
848static void br_multicast_router_expired(struct timer_list *t)
849{
850 struct net_bridge_port *port =
851 from_timer(port, t, multicast_router_timer);
852 struct net_bridge *br = port->br;
853
854 spin_lock(&br->multicast_lock);
855 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
856 port->multicast_router == MDB_RTR_TYPE_PERM ||
857 timer_pending(&port->multicast_router_timer))
858 goto out;
859
860 __del_port_router(port);
861out:
862 spin_unlock(&br->multicast_lock);
863}
864
865static void br_mc_router_state_change(struct net_bridge *p,
866 bool is_mc_router)
867{
868 struct switchdev_attr attr = {
869 .orig_dev = p->dev,
870 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
871 .flags = SWITCHDEV_F_DEFER,
872 .u.mrouter = is_mc_router,
873 };
874
875 switchdev_port_attr_set(p->dev, &attr);
876}
877
878static void br_multicast_local_router_expired(struct timer_list *t)
879{
880 struct net_bridge *br = from_timer(br, t, multicast_router_timer);
881
882 spin_lock(&br->multicast_lock);
883 if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
884 br->multicast_router == MDB_RTR_TYPE_PERM ||
885 timer_pending(&br->multicast_router_timer))
886 goto out;
887
888 br_mc_router_state_change(br, false);
889out:
890 spin_unlock(&br->multicast_lock);
891}
892
893static void br_multicast_querier_expired(struct net_bridge *br,
894 struct bridge_mcast_own_query *query)
895{
896 spin_lock(&br->multicast_lock);
897 if (!netif_running(br->dev) || br->multicast_disabled)
898 goto out;
899
900 br_multicast_start_querier(br, query);
901
902out:
903 spin_unlock(&br->multicast_lock);
904}
905
906static void br_ip4_multicast_querier_expired(struct timer_list *t)
907{
908 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
909
910 br_multicast_querier_expired(br, &br->ip4_own_query);
911}
912
913#if IS_ENABLED(CONFIG_IPV6)
914static void br_ip6_multicast_querier_expired(struct timer_list *t)
915{
916 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
917
918 br_multicast_querier_expired(br, &br->ip6_own_query);
919}
920#endif
921
922static void br_multicast_select_own_querier(struct net_bridge *br,
923 struct br_ip *ip,
924 struct sk_buff *skb)
925{
926 if (ip->proto == htons(ETH_P_IP))
927 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
928#if IS_ENABLED(CONFIG_IPV6)
929 else
930 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
931#endif
932}
933
934static void __br_multicast_send_query(struct net_bridge *br,
935 struct net_bridge_port *port,
936 struct br_ip *ip)
937{
938 struct sk_buff *skb;
939 u8 igmp_type;
940
941 skb = br_multicast_alloc_query(br, ip, &igmp_type);
942 if (!skb)
943 return;
944
945 if (port) {
946 skb->dev = port->dev;
947 br_multicast_count(br, port, skb, igmp_type,
948 BR_MCAST_DIR_TX);
949 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
950 dev_net(port->dev), NULL, skb, NULL, skb->dev,
951 br_dev_queue_push_xmit);
952 } else {
953 br_multicast_select_own_querier(br, ip, skb);
954 br_multicast_count(br, port, skb, igmp_type,
955 BR_MCAST_DIR_RX);
956 netif_rx(skb);
957 }
958}
959
960static void br_multicast_send_query(struct net_bridge *br,
961 struct net_bridge_port *port,
962 struct bridge_mcast_own_query *own_query)
963{
964 struct bridge_mcast_other_query *other_query = NULL;
965 struct br_ip br_group;
966 unsigned long time;
967
968 if (!netif_running(br->dev) || br->multicast_disabled ||
969 !br->multicast_querier)
970 return;
971
972 memset(&br_group.u, 0, sizeof(br_group.u));
973
974 if (port ? (own_query == &port->ip4_own_query) :
975 (own_query == &br->ip4_own_query)) {
976 other_query = &br->ip4_other_query;
977 br_group.proto = htons(ETH_P_IP);
978#if IS_ENABLED(CONFIG_IPV6)
979 } else {
980 other_query = &br->ip6_other_query;
981 br_group.proto = htons(ETH_P_IPV6);
982#endif
983 }
984
985 if (!other_query || timer_pending(&other_query->timer))
986 return;
987
988 __br_multicast_send_query(br, port, &br_group);
989
990 time = jiffies;
991 time += own_query->startup_sent < br->multicast_startup_query_count ?
992 br->multicast_startup_query_interval :
993 br->multicast_query_interval;
994 mod_timer(&own_query->timer, time);
995}
996
997static void
998br_multicast_port_query_expired(struct net_bridge_port *port,
999 struct bridge_mcast_own_query *query)
1000{
1001 struct net_bridge *br = port->br;
1002
1003 spin_lock(&br->multicast_lock);
1004 if (port->state == BR_STATE_DISABLED ||
1005 port->state == BR_STATE_BLOCKING)
1006 goto out;
1007
1008 if (query->startup_sent < br->multicast_startup_query_count)
1009 query->startup_sent++;
1010
1011 br_multicast_send_query(port->br, port, query);
1012
1013out:
1014 spin_unlock(&br->multicast_lock);
1015}
1016
1017static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1018{
1019 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
1020
1021 br_multicast_port_query_expired(port, &port->ip4_own_query);
1022}
1023
1024#if IS_ENABLED(CONFIG_IPV6)
1025static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1026{
1027 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
1028
1029 br_multicast_port_query_expired(port, &port->ip6_own_query);
1030}
1031#endif
1032
1033static void br_mc_disabled_update(struct net_device *dev, bool value)
1034{
1035 struct switchdev_attr attr = {
1036 .orig_dev = dev,
1037 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1038 .flags = SWITCHDEV_F_DEFER,
1039 .u.mc_disabled = value,
1040 };
1041
1042 switchdev_port_attr_set(dev, &attr);
1043}
1044
1045int br_multicast_add_port(struct net_bridge_port *port)
1046{
1047 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1048
1049 timer_setup(&port->multicast_router_timer,
1050 br_multicast_router_expired, 0);
1051 timer_setup(&port->ip4_own_query.timer,
1052 br_ip4_multicast_port_query_expired, 0);
1053#if IS_ENABLED(CONFIG_IPV6)
1054 timer_setup(&port->ip6_own_query.timer,
1055 br_ip6_multicast_port_query_expired, 0);
1056#endif
1057 br_mc_disabled_update(port->dev, port->br->multicast_disabled);
1058
1059 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1060 if (!port->mcast_stats)
1061 return -ENOMEM;
1062
1063 return 0;
1064}
1065
1066void br_multicast_del_port(struct net_bridge_port *port)
1067{
1068 struct net_bridge *br = port->br;
1069 struct net_bridge_port_group *pg;
1070 struct hlist_node *n;
1071
1072 /* Take care of the remaining groups, only perm ones should be left */
1073 spin_lock_bh(&br->multicast_lock);
1074 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1075 br_multicast_del_pg(br, pg);
1076 spin_unlock_bh(&br->multicast_lock);
1077 del_timer_sync(&port->multicast_router_timer);
1078 free_percpu(port->mcast_stats);
1079}
1080
1081static void br_multicast_enable(struct bridge_mcast_own_query *query)
1082{
1083 query->startup_sent = 0;
1084
1085 if (try_to_del_timer_sync(&query->timer) >= 0 ||
1086 del_timer(&query->timer))
1087 mod_timer(&query->timer, jiffies);
1088}
1089
1090static void __br_multicast_enable_port(struct net_bridge_port *port)
1091{
1092 struct net_bridge *br = port->br;
1093
1094 if (br->multicast_disabled || !netif_running(br->dev))
1095 return;
1096
1097 br_multicast_enable(&port->ip4_own_query);
1098#if IS_ENABLED(CONFIG_IPV6)
1099 br_multicast_enable(&port->ip6_own_query);
1100#endif
1101 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1102 hlist_unhashed(&port->rlist))
1103 br_multicast_add_router(br, port);
1104}
1105
1106void br_multicast_enable_port(struct net_bridge_port *port)
1107{
1108 struct net_bridge *br = port->br;
1109
1110 spin_lock(&br->multicast_lock);
1111 __br_multicast_enable_port(port);
1112 spin_unlock(&br->multicast_lock);
1113}
1114
1115void br_multicast_disable_port(struct net_bridge_port *port)
1116{
1117 struct net_bridge *br = port->br;
1118 struct net_bridge_port_group *pg;
1119 struct hlist_node *n;
1120
1121 spin_lock(&br->multicast_lock);
1122 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1123 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1124 br_multicast_del_pg(br, pg);
1125
1126 __del_port_router(port);
1127
1128 del_timer(&port->multicast_router_timer);
1129 del_timer(&port->ip4_own_query.timer);
1130#if IS_ENABLED(CONFIG_IPV6)
1131 del_timer(&port->ip6_own_query.timer);
1132#endif
1133 spin_unlock(&br->multicast_lock);
1134}
1135
1136static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1137 struct net_bridge_port *port,
1138 struct sk_buff *skb,
1139 u16 vid)
1140{
1141 const unsigned char *src;
1142 struct igmpv3_report *ih;
1143 struct igmpv3_grec *grec;
1144 int i;
1145 int len;
1146 int num;
1147 int type;
1148 int err = 0;
1149 __be32 group;
1150
1151 ih = igmpv3_report_hdr(skb);
1152 num = ntohs(ih->ngrec);
1153 len = skb_transport_offset(skb) + sizeof(*ih);
1154
1155 for (i = 0; i < num; i++) {
1156 len += sizeof(*grec);
1157 if (!pskb_may_pull(skb, len))
1158 return -EINVAL;
1159
1160 grec = (void *)(skb->data + len - sizeof(*grec));
1161 group = grec->grec_mca;
1162 type = grec->grec_type;
1163
1164 len += ntohs(grec->grec_nsrcs) * 4;
1165 if (!pskb_may_pull(skb, len))
1166 return -EINVAL;
1167
1168 /* We treat this as an IGMPv2 report for now. */
1169 switch (type) {
1170 case IGMPV3_MODE_IS_INCLUDE:
1171 case IGMPV3_MODE_IS_EXCLUDE:
1172 case IGMPV3_CHANGE_TO_INCLUDE:
1173 case IGMPV3_CHANGE_TO_EXCLUDE:
1174 case IGMPV3_ALLOW_NEW_SOURCES:
1175 case IGMPV3_BLOCK_OLD_SOURCES:
1176 break;
1177
1178 default:
1179 continue;
1180 }
1181
1182 src = eth_hdr(skb)->h_source;
1183 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1184 type == IGMPV3_MODE_IS_INCLUDE) &&
1185 ntohs(grec->grec_nsrcs) == 0) {
1186 br_ip4_multicast_leave_group(br, port, group, vid, src);
1187 } else {
1188 err = br_ip4_multicast_add_group(br, port, group, vid,
1189 src);
1190 if (err)
1191 break;
1192 }
1193 }
1194
1195 return err;
1196}
1197
1198#if IS_ENABLED(CONFIG_IPV6)
1199static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1200 struct net_bridge_port *port,
1201 struct sk_buff *skb,
1202 u16 vid)
1203{
1204 const unsigned char *src;
1205 struct icmp6hdr *icmp6h;
1206 struct mld2_grec *grec;
1207 int i;
1208 int len;
1209 int num;
1210 int err = 0;
1211
1212 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1213 return -EINVAL;
1214
1215 icmp6h = icmp6_hdr(skb);
1216 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1217 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1218
1219 for (i = 0; i < num; i++) {
1220 __be16 *nsrcs, _nsrcs;
1221
1222 nsrcs = skb_header_pointer(skb,
1223 len + offsetof(struct mld2_grec,
1224 grec_nsrcs),
1225 sizeof(_nsrcs), &_nsrcs);
1226 if (!nsrcs)
1227 return -EINVAL;
1228
1229 if (!pskb_may_pull(skb,
1230 len + sizeof(*grec) +
1231 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1232 return -EINVAL;
1233
1234 grec = (struct mld2_grec *)(skb->data + len);
1235 len += sizeof(*grec) +
1236 sizeof(struct in6_addr) * ntohs(*nsrcs);
1237
1238 /* We treat these as MLDv1 reports for now. */
1239 switch (grec->grec_type) {
1240 case MLD2_MODE_IS_INCLUDE:
1241 case MLD2_MODE_IS_EXCLUDE:
1242 case MLD2_CHANGE_TO_INCLUDE:
1243 case MLD2_CHANGE_TO_EXCLUDE:
1244 case MLD2_ALLOW_NEW_SOURCES:
1245 case MLD2_BLOCK_OLD_SOURCES:
1246 break;
1247
1248 default:
1249 continue;
1250 }
1251
1252 src = eth_hdr(skb)->h_source;
1253 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1254 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1255 ntohs(*nsrcs) == 0) {
1256 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1257 vid, src);
1258 } else {
1259 err = br_ip6_multicast_add_group(br, port,
1260 &grec->grec_mca, vid,
1261 src);
1262 if (err)
1263 break;
1264 }
1265 }
1266
1267 return err;
1268}
1269#endif
1270
1271static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1272 struct net_bridge_port *port,
1273 __be32 saddr)
1274{
1275 if (!timer_pending(&br->ip4_own_query.timer) &&
1276 !timer_pending(&br->ip4_other_query.timer))
1277 goto update;
1278
1279 if (!br->ip4_querier.addr.u.ip4)
1280 goto update;
1281
1282 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1283 goto update;
1284
1285 return false;
1286
1287update:
1288 br->ip4_querier.addr.u.ip4 = saddr;
1289
1290 /* update protected by general multicast_lock by caller */
1291 rcu_assign_pointer(br->ip4_querier.port, port);
1292
1293 return true;
1294}
1295
1296#if IS_ENABLED(CONFIG_IPV6)
1297static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1298 struct net_bridge_port *port,
1299 struct in6_addr *saddr)
1300{
1301 if (!timer_pending(&br->ip6_own_query.timer) &&
1302 !timer_pending(&br->ip6_other_query.timer))
1303 goto update;
1304
1305 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1306 goto update;
1307
1308 return false;
1309
1310update:
1311 br->ip6_querier.addr.u.ip6 = *saddr;
1312
1313 /* update protected by general multicast_lock by caller */
1314 rcu_assign_pointer(br->ip6_querier.port, port);
1315
1316 return true;
1317}
1318#endif
1319
1320static bool br_multicast_select_querier(struct net_bridge *br,
1321 struct net_bridge_port *port,
1322 struct br_ip *saddr)
1323{
1324 switch (saddr->proto) {
1325 case htons(ETH_P_IP):
1326 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1327#if IS_ENABLED(CONFIG_IPV6)
1328 case htons(ETH_P_IPV6):
1329 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1330#endif
1331 }
1332
1333 return false;
1334}
1335
1336static void
1337br_multicast_update_query_timer(struct net_bridge *br,
1338 struct bridge_mcast_other_query *query,
1339 unsigned long max_delay)
1340{
1341 if (!timer_pending(&query->timer))
1342 query->delay_time = jiffies + max_delay;
1343
1344 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1345}
1346
1347static void br_port_mc_router_state_change(struct net_bridge_port *p,
1348 bool is_mc_router)
1349{
1350 struct switchdev_attr attr = {
1351 .orig_dev = p->dev,
1352 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1353 .flags = SWITCHDEV_F_DEFER,
1354 .u.mrouter = is_mc_router,
1355 };
1356
1357 switchdev_port_attr_set(p->dev, &attr);
1358}
1359
1360/*
1361 * Add port to router_list
1362 * list is maintained ordered by pointer value
1363 * and locked by br->multicast_lock and RCU
1364 */
1365static void br_multicast_add_router(struct net_bridge *br,
1366 struct net_bridge_port *port)
1367{
1368 struct net_bridge_port *p;
1369 struct hlist_node *slot = NULL;
1370
1371 if (!hlist_unhashed(&port->rlist))
1372 return;
1373
1374 hlist_for_each_entry(p, &br->router_list, rlist) {
1375 if ((unsigned long) port >= (unsigned long) p)
1376 break;
1377 slot = &p->rlist;
1378 }
1379
1380 if (slot)
1381 hlist_add_behind_rcu(&port->rlist, slot);
1382 else
1383 hlist_add_head_rcu(&port->rlist, &br->router_list);
1384 br_rtr_notify(br->dev, port, RTM_NEWMDB);
1385 br_port_mc_router_state_change(port, true);
1386}
1387
1388static void br_multicast_mark_router(struct net_bridge *br,
1389 struct net_bridge_port *port)
1390{
1391 unsigned long now = jiffies;
1392
1393 if (!port) {
1394 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1395 if (!timer_pending(&br->multicast_router_timer))
1396 br_mc_router_state_change(br, true);
1397 mod_timer(&br->multicast_router_timer,
1398 now + br->multicast_querier_interval);
1399 }
1400 return;
1401 }
1402
1403 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1404 port->multicast_router == MDB_RTR_TYPE_PERM)
1405 return;
1406
1407 br_multicast_add_router(br, port);
1408
1409 mod_timer(&port->multicast_router_timer,
1410 now + br->multicast_querier_interval);
1411}
1412
1413static void br_multicast_query_received(struct net_bridge *br,
1414 struct net_bridge_port *port,
1415 struct bridge_mcast_other_query *query,
1416 struct br_ip *saddr,
1417 unsigned long max_delay)
1418{
1419 if (!br_multicast_select_querier(br, port, saddr))
1420 return;
1421
1422 br_multicast_update_query_timer(br, query, max_delay);
1423
1424 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
1425 * the arrival port for IGMP Queries where the source address
1426 * is 0.0.0.0 should not be added to router port list.
1427 */
1428 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1429 saddr->proto == htons(ETH_P_IPV6))
1430 br_multicast_mark_router(br, port);
1431}
1432
1433static void br_ip4_multicast_query(struct net_bridge *br,
1434 struct net_bridge_port *port,
1435 struct sk_buff *skb,
1436 u16 vid)
1437{
1438 const struct iphdr *iph = ip_hdr(skb);
1439 struct igmphdr *ih = igmp_hdr(skb);
1440 struct net_bridge_mdb_entry *mp;
1441 struct igmpv3_query *ih3;
1442 struct net_bridge_port_group *p;
1443 struct net_bridge_port_group __rcu **pp;
1444 struct br_ip saddr;
1445 unsigned long max_delay;
1446 unsigned long now = jiffies;
1447 unsigned int offset = skb_transport_offset(skb);
1448 __be32 group;
1449
1450 spin_lock(&br->multicast_lock);
1451 if (!netif_running(br->dev) ||
1452 (port && port->state == BR_STATE_DISABLED))
1453 goto out;
1454
1455 group = ih->group;
1456
1457 if (skb->len == offset + sizeof(*ih)) {
1458 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1459
1460 if (!max_delay) {
1461 max_delay = 10 * HZ;
1462 group = 0;
1463 }
1464 } else if (skb->len >= offset + sizeof(*ih3)) {
1465 ih3 = igmpv3_query_hdr(skb);
1466 if (ih3->nsrcs)
1467 goto out;
1468
1469 max_delay = ih3->code ?
1470 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1471 } else {
1472 goto out;
1473 }
1474
1475 if (!group) {
1476 saddr.proto = htons(ETH_P_IP);
1477 saddr.u.ip4 = iph->saddr;
1478
1479 br_multicast_query_received(br, port, &br->ip4_other_query,
1480 &saddr, max_delay);
1481 goto out;
1482 }
1483
1484 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1485 if (!mp)
1486 goto out;
1487
1488 max_delay *= br->multicast_last_member_count;
1489
1490 if (mp->host_joined &&
1491 (timer_pending(&mp->timer) ?
1492 time_after(mp->timer.expires, now + max_delay) :
1493 try_to_del_timer_sync(&mp->timer) >= 0))
1494 mod_timer(&mp->timer, now + max_delay);
1495
1496 for (pp = &mp->ports;
1497 (p = mlock_dereference(*pp, br)) != NULL;
1498 pp = &p->next) {
1499 if (timer_pending(&p->timer) ?
1500 time_after(p->timer.expires, now + max_delay) :
1501 try_to_del_timer_sync(&p->timer) >= 0)
1502 mod_timer(&p->timer, now + max_delay);
1503 }
1504
1505out:
1506 spin_unlock(&br->multicast_lock);
1507}
1508
1509#if IS_ENABLED(CONFIG_IPV6)
1510static int br_ip6_multicast_query(struct net_bridge *br,
1511 struct net_bridge_port *port,
1512 struct sk_buff *skb,
1513 u16 vid)
1514{
1515 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1516 struct mld_msg *mld;
1517 struct net_bridge_mdb_entry *mp;
1518 struct mld2_query *mld2q;
1519 struct net_bridge_port_group *p;
1520 struct net_bridge_port_group __rcu **pp;
1521 struct br_ip saddr;
1522 unsigned long max_delay;
1523 unsigned long now = jiffies;
1524 unsigned int offset = skb_transport_offset(skb);
1525 const struct in6_addr *group = NULL;
1526 bool is_general_query;
1527 int err = 0;
1528
1529 spin_lock(&br->multicast_lock);
1530 if (!netif_running(br->dev) ||
1531 (port && port->state == BR_STATE_DISABLED))
1532 goto out;
1533
1534 if (skb->len == offset + sizeof(*mld)) {
1535 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1536 err = -EINVAL;
1537 goto out;
1538 }
1539 mld = (struct mld_msg *) icmp6_hdr(skb);
1540 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1541 if (max_delay)
1542 group = &mld->mld_mca;
1543 } else {
1544 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1545 err = -EINVAL;
1546 goto out;
1547 }
1548 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1549 if (!mld2q->mld2q_nsrcs)
1550 group = &mld2q->mld2q_mca;
1551
1552 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1553 }
1554
1555 is_general_query = group && ipv6_addr_any(group);
1556
1557 if (is_general_query) {
1558 saddr.proto = htons(ETH_P_IPV6);
1559 saddr.u.ip6 = ip6h->saddr;
1560
1561 br_multicast_query_received(br, port, &br->ip6_other_query,
1562 &saddr, max_delay);
1563 goto out;
1564 } else if (!group) {
1565 goto out;
1566 }
1567
1568 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1569 if (!mp)
1570 goto out;
1571
1572 max_delay *= br->multicast_last_member_count;
1573 if (mp->host_joined &&
1574 (timer_pending(&mp->timer) ?
1575 time_after(mp->timer.expires, now + max_delay) :
1576 try_to_del_timer_sync(&mp->timer) >= 0))
1577 mod_timer(&mp->timer, now + max_delay);
1578
1579 for (pp = &mp->ports;
1580 (p = mlock_dereference(*pp, br)) != NULL;
1581 pp = &p->next) {
1582 if (timer_pending(&p->timer) ?
1583 time_after(p->timer.expires, now + max_delay) :
1584 try_to_del_timer_sync(&p->timer) >= 0)
1585 mod_timer(&p->timer, now + max_delay);
1586 }
1587
1588out:
1589 spin_unlock(&br->multicast_lock);
1590 return err;
1591}
1592#endif
1593
1594static void
1595br_multicast_leave_group(struct net_bridge *br,
1596 struct net_bridge_port *port,
1597 struct br_ip *group,
1598 struct bridge_mcast_other_query *other_query,
1599 struct bridge_mcast_own_query *own_query,
1600 const unsigned char *src)
1601{
1602 struct net_bridge_mdb_htable *mdb;
1603 struct net_bridge_mdb_entry *mp;
1604 struct net_bridge_port_group *p;
1605 unsigned long now;
1606 unsigned long time;
1607
1608 spin_lock(&br->multicast_lock);
1609 if (!netif_running(br->dev) ||
1610 (port && port->state == BR_STATE_DISABLED))
1611 goto out;
1612
1613 mdb = mlock_dereference(br->mdb, br);
1614 mp = br_mdb_ip_get(mdb, group);
1615 if (!mp)
1616 goto out;
1617
1618 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1619 struct net_bridge_port_group __rcu **pp;
1620
1621 for (pp = &mp->ports;
1622 (p = mlock_dereference(*pp, br)) != NULL;
1623 pp = &p->next) {
1624 if (!br_port_group_equal(p, port, src))
1625 continue;
1626
1627 rcu_assign_pointer(*pp, p->next);
1628 hlist_del_init(&p->mglist);
1629 del_timer(&p->timer);
1630 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1631 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1632 p->flags);
1633
1634 if (!mp->ports && !mp->host_joined &&
1635 netif_running(br->dev))
1636 mod_timer(&mp->timer, jiffies);
1637 }
1638 goto out;
1639 }
1640
1641 if (timer_pending(&other_query->timer))
1642 goto out;
1643
1644 if (br->multicast_querier) {
1645 __br_multicast_send_query(br, port, &mp->addr);
1646
1647 time = jiffies + br->multicast_last_member_count *
1648 br->multicast_last_member_interval;
1649
1650 mod_timer(&own_query->timer, time);
1651
1652 for (p = mlock_dereference(mp->ports, br);
1653 p != NULL;
1654 p = mlock_dereference(p->next, br)) {
1655 if (!br_port_group_equal(p, port, src))
1656 continue;
1657
1658 if (!hlist_unhashed(&p->mglist) &&
1659 (timer_pending(&p->timer) ?
1660 time_after(p->timer.expires, time) :
1661 try_to_del_timer_sync(&p->timer) >= 0)) {
1662 mod_timer(&p->timer, time);
1663 }
1664
1665 break;
1666 }
1667 }
1668
1669 now = jiffies;
1670 time = now + br->multicast_last_member_count *
1671 br->multicast_last_member_interval;
1672
1673 if (!port) {
1674 if (mp->host_joined &&
1675 (timer_pending(&mp->timer) ?
1676 time_after(mp->timer.expires, time) :
1677 try_to_del_timer_sync(&mp->timer) >= 0)) {
1678 mod_timer(&mp->timer, time);
1679 }
1680
1681 goto out;
1682 }
1683
1684 for (p = mlock_dereference(mp->ports, br);
1685 p != NULL;
1686 p = mlock_dereference(p->next, br)) {
1687 if (p->port != port)
1688 continue;
1689
1690 if (!hlist_unhashed(&p->mglist) &&
1691 (timer_pending(&p->timer) ?
1692 time_after(p->timer.expires, time) :
1693 try_to_del_timer_sync(&p->timer) >= 0)) {
1694 mod_timer(&p->timer, time);
1695 }
1696
1697 break;
1698 }
1699out:
1700 spin_unlock(&br->multicast_lock);
1701}
1702
1703static void br_ip4_multicast_leave_group(struct net_bridge *br,
1704 struct net_bridge_port *port,
1705 __be32 group,
1706 __u16 vid,
1707 const unsigned char *src)
1708{
1709 struct br_ip br_group;
1710 struct bridge_mcast_own_query *own_query;
1711
1712 if (ipv4_is_local_multicast(group))
1713 return;
1714
1715 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1716
1717 br_group.u.ip4 = group;
1718 br_group.proto = htons(ETH_P_IP);
1719 br_group.vid = vid;
1720
1721 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1722 own_query, src);
1723}
1724
1725#if IS_ENABLED(CONFIG_IPV6)
1726static void br_ip6_multicast_leave_group(struct net_bridge *br,
1727 struct net_bridge_port *port,
1728 const struct in6_addr *group,
1729 __u16 vid,
1730 const unsigned char *src)
1731{
1732 struct br_ip br_group;
1733 struct bridge_mcast_own_query *own_query;
1734
1735 if (ipv6_addr_is_ll_all_nodes(group))
1736 return;
1737
1738 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1739
1740 br_group.u.ip6 = *group;
1741 br_group.proto = htons(ETH_P_IPV6);
1742 br_group.vid = vid;
1743
1744 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1745 own_query, src);
1746}
1747#endif
1748
1749static void br_multicast_err_count(const struct net_bridge *br,
1750 const struct net_bridge_port *p,
1751 __be16 proto)
1752{
1753 struct bridge_mcast_stats __percpu *stats;
1754 struct bridge_mcast_stats *pstats;
1755
1756 if (!br->multicast_stats_enabled)
1757 return;
1758
1759 if (p)
1760 stats = p->mcast_stats;
1761 else
1762 stats = br->mcast_stats;
1763 if (WARN_ON(!stats))
1764 return;
1765
1766 pstats = this_cpu_ptr(stats);
1767
1768 u64_stats_update_begin(&pstats->syncp);
1769 switch (proto) {
1770 case htons(ETH_P_IP):
1771 pstats->mstats.igmp_parse_errors++;
1772 break;
1773#if IS_ENABLED(CONFIG_IPV6)
1774 case htons(ETH_P_IPV6):
1775 pstats->mstats.mld_parse_errors++;
1776 break;
1777#endif
1778 }
1779 u64_stats_update_end(&pstats->syncp);
1780}
1781
1782static void br_multicast_pim(struct net_bridge *br,
1783 struct net_bridge_port *port,
1784 const struct sk_buff *skb)
1785{
1786 unsigned int offset = skb_transport_offset(skb);
1787 struct pimhdr *pimhdr, _pimhdr;
1788
1789 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1790 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1791 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1792 return;
1793
1794 br_multicast_mark_router(br, port);
1795}
1796
1797static int br_multicast_ipv4_rcv(struct net_bridge *br,
1798 struct net_bridge_port *port,
1799 struct sk_buff *skb,
1800 u16 vid)
1801{
1802 struct sk_buff *skb_trimmed = NULL;
1803 const unsigned char *src;
1804 struct igmphdr *ih;
1805 int err;
1806
1807 err = ip_mc_check_igmp(skb, &skb_trimmed);
1808
1809 if (err == -ENOMSG) {
1810 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1811 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1812 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1813 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1814 br_multicast_pim(br, port, skb);
1815 }
1816 return 0;
1817 } else if (err < 0) {
1818 br_multicast_err_count(br, port, skb->protocol);
1819 return err;
1820 }
1821
1822 ih = igmp_hdr(skb);
1823 src = eth_hdr(skb)->h_source;
1824 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1825
1826 switch (ih->type) {
1827 case IGMP_HOST_MEMBERSHIP_REPORT:
1828 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1829 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1830 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1831 break;
1832 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1833 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1834 break;
1835 case IGMP_HOST_MEMBERSHIP_QUERY:
1836 br_ip4_multicast_query(br, port, skb_trimmed, vid);
1837 break;
1838 case IGMP_HOST_LEAVE_MESSAGE:
1839 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1840 break;
1841 }
1842
1843 if (skb_trimmed && skb_trimmed != skb)
1844 kfree_skb(skb_trimmed);
1845
1846 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1847 BR_MCAST_DIR_RX);
1848
1849 return err;
1850}
1851
1852#if IS_ENABLED(CONFIG_IPV6)
1853static int br_multicast_ipv6_rcv(struct net_bridge *br,
1854 struct net_bridge_port *port,
1855 struct sk_buff *skb,
1856 u16 vid)
1857{
1858 struct sk_buff *skb_trimmed = NULL;
1859 const unsigned char *src;
1860 struct mld_msg *mld;
1861 int err;
1862
1863 err = ipv6_mc_check_mld(skb, &skb_trimmed);
1864
1865 if (err == -ENOMSG) {
1866 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1867 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1868 return 0;
1869 } else if (err < 0) {
1870 br_multicast_err_count(br, port, skb->protocol);
1871 return err;
1872 }
1873
1874 mld = (struct mld_msg *)skb_transport_header(skb);
1875 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1876
1877 switch (mld->mld_type) {
1878 case ICMPV6_MGM_REPORT:
1879 src = eth_hdr(skb)->h_source;
1880 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1881 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1882 src);
1883 break;
1884 case ICMPV6_MLD2_REPORT:
1885 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1886 break;
1887 case ICMPV6_MGM_QUERY:
1888 err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1889 break;
1890 case ICMPV6_MGM_REDUCTION:
1891 src = eth_hdr(skb)->h_source;
1892 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1893 break;
1894 }
1895
1896 if (skb_trimmed && skb_trimmed != skb)
1897 kfree_skb(skb_trimmed);
1898
1899 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1900 BR_MCAST_DIR_RX);
1901
1902 return err;
1903}
1904#endif
1905
1906int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1907 struct sk_buff *skb, u16 vid)
1908{
1909 int ret = 0;
1910
1911 BR_INPUT_SKB_CB(skb)->igmp = 0;
1912 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1913
1914 if (br->multicast_disabled)
1915 return 0;
1916
1917 switch (skb->protocol) {
1918 case htons(ETH_P_IP):
1919 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1920 break;
1921#if IS_ENABLED(CONFIG_IPV6)
1922 case htons(ETH_P_IPV6):
1923 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1924 break;
1925#endif
1926 }
1927
1928 return ret;
1929}
1930
1931static void br_multicast_query_expired(struct net_bridge *br,
1932 struct bridge_mcast_own_query *query,
1933 struct bridge_mcast_querier *querier)
1934{
1935 spin_lock(&br->multicast_lock);
1936 if (query->startup_sent < br->multicast_startup_query_count)
1937 query->startup_sent++;
1938
1939 RCU_INIT_POINTER(querier->port, NULL);
1940 br_multicast_send_query(br, NULL, query);
1941 spin_unlock(&br->multicast_lock);
1942}
1943
1944static void br_ip4_multicast_query_expired(struct timer_list *t)
1945{
1946 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1947
1948 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1949}
1950
1951#if IS_ENABLED(CONFIG_IPV6)
1952static void br_ip6_multicast_query_expired(struct timer_list *t)
1953{
1954 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1955
1956 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1957}
1958#endif
1959
1960void br_multicast_init(struct net_bridge *br)
1961{
1962 br->hash_elasticity = 4;
1963 br->hash_max = 512;
1964
1965 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1966 br->multicast_querier = 0;
1967 br->multicast_query_use_ifaddr = 0;
1968 br->multicast_last_member_count = 2;
1969 br->multicast_startup_query_count = 2;
1970
1971 br->multicast_last_member_interval = HZ;
1972 br->multicast_query_response_interval = 10 * HZ;
1973 br->multicast_startup_query_interval = 125 * HZ / 4;
1974 br->multicast_query_interval = 125 * HZ;
1975 br->multicast_querier_interval = 255 * HZ;
1976 br->multicast_membership_interval = 260 * HZ;
1977
1978 br->ip4_other_query.delay_time = 0;
1979 br->ip4_querier.port = NULL;
1980 br->multicast_igmp_version = 2;
1981#if IS_ENABLED(CONFIG_IPV6)
1982 br->multicast_mld_version = 1;
1983 br->ip6_other_query.delay_time = 0;
1984 br->ip6_querier.port = NULL;
1985#endif
1986 br->has_ipv6_addr = 1;
1987
1988 spin_lock_init(&br->multicast_lock);
1989 timer_setup(&br->multicast_router_timer,
1990 br_multicast_local_router_expired, 0);
1991 timer_setup(&br->ip4_other_query.timer,
1992 br_ip4_multicast_querier_expired, 0);
1993 timer_setup(&br->ip4_own_query.timer,
1994 br_ip4_multicast_query_expired, 0);
1995#if IS_ENABLED(CONFIG_IPV6)
1996 timer_setup(&br->ip6_other_query.timer,
1997 br_ip6_multicast_querier_expired, 0);
1998 timer_setup(&br->ip6_own_query.timer,
1999 br_ip6_multicast_query_expired, 0);
2000#endif
2001}
2002
2003static void __br_multicast_open(struct net_bridge *br,
2004 struct bridge_mcast_own_query *query)
2005{
2006 query->startup_sent = 0;
2007
2008 if (br->multicast_disabled)
2009 return;
2010
2011 mod_timer(&query->timer, jiffies);
2012}
2013
2014void br_multicast_open(struct net_bridge *br)
2015{
2016 __br_multicast_open(br, &br->ip4_own_query);
2017#if IS_ENABLED(CONFIG_IPV6)
2018 __br_multicast_open(br, &br->ip6_own_query);
2019#endif
2020}
2021
2022void br_multicast_stop(struct net_bridge *br)
2023{
2024 del_timer_sync(&br->multicast_router_timer);
2025 del_timer_sync(&br->ip4_other_query.timer);
2026 del_timer_sync(&br->ip4_own_query.timer);
2027#if IS_ENABLED(CONFIG_IPV6)
2028 del_timer_sync(&br->ip6_other_query.timer);
2029 del_timer_sync(&br->ip6_own_query.timer);
2030#endif
2031}
2032
2033void br_multicast_dev_del(struct net_bridge *br)
2034{
2035 struct net_bridge_mdb_htable *mdb;
2036 struct net_bridge_mdb_entry *mp;
2037 struct hlist_node *n;
2038 u32 ver;
2039 int i;
2040
2041 spin_lock_bh(&br->multicast_lock);
2042 mdb = mlock_dereference(br->mdb, br);
2043 if (!mdb)
2044 goto out;
2045
2046 br->mdb = NULL;
2047
2048 ver = mdb->ver;
2049 for (i = 0; i < mdb->max; i++) {
2050 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
2051 hlist[ver]) {
2052 del_timer(&mp->timer);
2053 call_rcu_bh(&mp->rcu, br_multicast_free_group);
2054 }
2055 }
2056
2057 if (mdb->old) {
2058 spin_unlock_bh(&br->multicast_lock);
2059 rcu_barrier_bh();
2060 spin_lock_bh(&br->multicast_lock);
2061 WARN_ON(mdb->old);
2062 }
2063
2064 mdb->old = mdb;
2065 call_rcu_bh(&mdb->rcu, br_mdb_free);
2066
2067out:
2068 spin_unlock_bh(&br->multicast_lock);
2069}
2070
2071int br_multicast_set_router(struct net_bridge *br, unsigned long val)
2072{
2073 int err = -EINVAL;
2074
2075 spin_lock_bh(&br->multicast_lock);
2076
2077 switch (val) {
2078 case MDB_RTR_TYPE_DISABLED:
2079 case MDB_RTR_TYPE_PERM:
2080 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
2081 del_timer(&br->multicast_router_timer);
2082 br->multicast_router = val;
2083 err = 0;
2084 break;
2085 case MDB_RTR_TYPE_TEMP_QUERY:
2086 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
2087 br_mc_router_state_change(br, false);
2088 br->multicast_router = val;
2089 err = 0;
2090 break;
2091 }
2092
2093 spin_unlock_bh(&br->multicast_lock);
2094
2095 return err;
2096}
2097
2098static void __del_port_router(struct net_bridge_port *p)
2099{
2100 if (hlist_unhashed(&p->rlist))
2101 return;
2102 hlist_del_init_rcu(&p->rlist);
2103 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2104 br_port_mc_router_state_change(p, false);
2105
2106 /* don't allow timer refresh */
2107 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2108 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2109}
2110
2111int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
2112{
2113 struct net_bridge *br = p->br;
2114 unsigned long now = jiffies;
2115 int err = -EINVAL;
2116
2117 spin_lock(&br->multicast_lock);
2118 if (p->multicast_router == val) {
2119 /* Refresh the temp router port timer */
2120 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2121 mod_timer(&p->multicast_router_timer,
2122 now + br->multicast_querier_interval);
2123 err = 0;
2124 goto unlock;
2125 }
2126 switch (val) {
2127 case MDB_RTR_TYPE_DISABLED:
2128 p->multicast_router = MDB_RTR_TYPE_DISABLED;
2129 __del_port_router(p);
2130 del_timer(&p->multicast_router_timer);
2131 break;
2132 case MDB_RTR_TYPE_TEMP_QUERY:
2133 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2134 __del_port_router(p);
2135 break;
2136 case MDB_RTR_TYPE_PERM:
2137 p->multicast_router = MDB_RTR_TYPE_PERM;
2138 del_timer(&p->multicast_router_timer);
2139 br_multicast_add_router(br, p);
2140 break;
2141 case MDB_RTR_TYPE_TEMP:
2142 p->multicast_router = MDB_RTR_TYPE_TEMP;
2143 br_multicast_mark_router(br, p);
2144 break;
2145 default:
2146 goto unlock;
2147 }
2148 err = 0;
2149unlock:
2150 spin_unlock(&br->multicast_lock);
2151
2152 return err;
2153}
2154
2155static void br_multicast_start_querier(struct net_bridge *br,
2156 struct bridge_mcast_own_query *query)
2157{
2158 struct net_bridge_port *port;
2159
2160 __br_multicast_open(br, query);
2161
2162 list_for_each_entry(port, &br->port_list, list) {
2163 if (port->state == BR_STATE_DISABLED ||
2164 port->state == BR_STATE_BLOCKING)
2165 continue;
2166
2167 if (query == &br->ip4_own_query)
2168 br_multicast_enable(&port->ip4_own_query);
2169#if IS_ENABLED(CONFIG_IPV6)
2170 else
2171 br_multicast_enable(&port->ip6_own_query);
2172#endif
2173 }
2174}
2175
2176int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2177{
2178 struct net_bridge_mdb_htable *mdb;
2179 struct net_bridge_port *port;
2180 int err = 0;
2181
2182 spin_lock_bh(&br->multicast_lock);
2183 if (br->multicast_disabled == !val)
2184 goto unlock;
2185
2186 br_mc_disabled_update(br->dev, !val);
2187 br->multicast_disabled = !val;
2188 if (br->multicast_disabled)
2189 goto unlock;
2190
2191 if (!netif_running(br->dev))
2192 goto unlock;
2193
2194 mdb = mlock_dereference(br->mdb, br);
2195 if (mdb) {
2196 if (mdb->old) {
2197 err = -EEXIST;
2198rollback:
2199 br->multicast_disabled = !!val;
2200 goto unlock;
2201 }
2202
2203 err = br_mdb_rehash(&br->mdb, mdb->max,
2204 br->hash_elasticity);
2205 if (err)
2206 goto rollback;
2207 }
2208
2209 br_multicast_open(br);
2210 list_for_each_entry(port, &br->port_list, list)
2211 __br_multicast_enable_port(port);
2212
2213unlock:
2214 spin_unlock_bh(&br->multicast_lock);
2215
2216 return err;
2217}
2218
2219bool br_multicast_enabled(const struct net_device *dev)
2220{
2221 struct net_bridge *br = netdev_priv(dev);
2222
2223 return !br->multicast_disabled;
2224}
2225EXPORT_SYMBOL_GPL(br_multicast_enabled);
2226
2227bool br_multicast_router(const struct net_device *dev)
2228{
2229 struct net_bridge *br = netdev_priv(dev);
2230 bool is_router;
2231
2232 spin_lock_bh(&br->multicast_lock);
2233 is_router = br_multicast_is_router(br);
2234 spin_unlock_bh(&br->multicast_lock);
2235 return is_router;
2236}
2237EXPORT_SYMBOL_GPL(br_multicast_router);
2238
2239int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2240{
2241 unsigned long max_delay;
2242
2243 val = !!val;
2244
2245 spin_lock_bh(&br->multicast_lock);
2246 if (br->multicast_querier == val)
2247 goto unlock;
2248
2249 br->multicast_querier = val;
2250 if (!val)
2251 goto unlock;
2252
2253 max_delay = br->multicast_query_response_interval;
2254
2255 if (!timer_pending(&br->ip4_other_query.timer))
2256 br->ip4_other_query.delay_time = jiffies + max_delay;
2257
2258 br_multicast_start_querier(br, &br->ip4_own_query);
2259
2260#if IS_ENABLED(CONFIG_IPV6)
2261 if (!timer_pending(&br->ip6_other_query.timer))
2262 br->ip6_other_query.delay_time = jiffies + max_delay;
2263
2264 br_multicast_start_querier(br, &br->ip6_own_query);
2265#endif
2266
2267unlock:
2268 spin_unlock_bh(&br->multicast_lock);
2269
2270 return 0;
2271}
2272
2273int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
2274{
2275 int err = -EINVAL;
2276 u32 old;
2277 struct net_bridge_mdb_htable *mdb;
2278
2279 spin_lock_bh(&br->multicast_lock);
2280 if (!is_power_of_2(val))
2281 goto unlock;
2282
2283 mdb = mlock_dereference(br->mdb, br);
2284 if (mdb && val < mdb->size)
2285 goto unlock;
2286
2287 err = 0;
2288
2289 old = br->hash_max;
2290 br->hash_max = val;
2291
2292 if (mdb) {
2293 if (mdb->old) {
2294 err = -EEXIST;
2295rollback:
2296 br->hash_max = old;
2297 goto unlock;
2298 }
2299
2300 err = br_mdb_rehash(&br->mdb, br->hash_max,
2301 br->hash_elasticity);
2302 if (err)
2303 goto rollback;
2304 }
2305
2306unlock:
2307 spin_unlock_bh(&br->multicast_lock);
2308
2309 return err;
2310}
2311
2312int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2313{
2314 /* Currently we support only version 2 and 3 */
2315 switch (val) {
2316 case 2:
2317 case 3:
2318 break;
2319 default:
2320 return -EINVAL;
2321 }
2322
2323 spin_lock_bh(&br->multicast_lock);
2324 br->multicast_igmp_version = val;
2325 spin_unlock_bh(&br->multicast_lock);
2326
2327 return 0;
2328}
2329
2330#if IS_ENABLED(CONFIG_IPV6)
2331int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2332{
2333 /* Currently we support version 1 and 2 */
2334 switch (val) {
2335 case 1:
2336 case 2:
2337 break;
2338 default:
2339 return -EINVAL;
2340 }
2341
2342 spin_lock_bh(&br->multicast_lock);
2343 br->multicast_mld_version = val;
2344 spin_unlock_bh(&br->multicast_lock);
2345
2346 return 0;
2347}
2348#endif
2349
2350/**
2351 * br_multicast_list_adjacent - Returns snooped multicast addresses
2352 * @dev: The bridge port adjacent to which to retrieve addresses
2353 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2354 *
2355 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2356 * snooping feature on all bridge ports of dev's bridge device, excluding
2357 * the addresses from dev itself.
2358 *
2359 * Returns the number of items added to br_ip_list.
2360 *
2361 * Notes:
2362 * - br_ip_list needs to be initialized by caller
2363 * - br_ip_list might contain duplicates in the end
2364 * (needs to be taken care of by caller)
2365 * - br_ip_list needs to be freed by caller
2366 */
2367int br_multicast_list_adjacent(struct net_device *dev,
2368 struct list_head *br_ip_list)
2369{
2370 struct net_bridge *br;
2371 struct net_bridge_port *port;
2372 struct net_bridge_port_group *group;
2373 struct br_ip_list *entry;
2374 int count = 0;
2375
2376 rcu_read_lock();
2377 if (!br_ip_list || !br_port_exists(dev))
2378 goto unlock;
2379
2380 port = br_port_get_rcu(dev);
2381 if (!port || !port->br)
2382 goto unlock;
2383
2384 br = port->br;
2385
2386 list_for_each_entry_rcu(port, &br->port_list, list) {
2387 if (!port->dev || port->dev == dev)
2388 continue;
2389
2390 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2391 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2392 if (!entry)
2393 goto unlock;
2394
2395 entry->addr = group->addr;
2396 list_add(&entry->list, br_ip_list);
2397 count++;
2398 }
2399 }
2400
2401unlock:
2402 rcu_read_unlock();
2403 return count;
2404}
2405EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2406
2407/**
2408 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2409 * @dev: The bridge port providing the bridge on which to check for a querier
2410 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2411 *
2412 * Checks whether the given interface has a bridge on top and if so returns
2413 * true if a valid querier exists anywhere on the bridged link layer.
2414 * Otherwise returns false.
2415 */
2416bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2417{
2418 struct net_bridge *br;
2419 struct net_bridge_port *port;
2420 struct ethhdr eth;
2421 bool ret = false;
2422
2423 rcu_read_lock();
2424 if (!br_port_exists(dev))
2425 goto unlock;
2426
2427 port = br_port_get_rcu(dev);
2428 if (!port || !port->br)
2429 goto unlock;
2430
2431 br = port->br;
2432
2433 memset(&eth, 0, sizeof(eth));
2434 eth.h_proto = htons(proto);
2435
2436 ret = br_multicast_querier_exists(br, &eth);
2437
2438unlock:
2439 rcu_read_unlock();
2440 return ret;
2441}
2442EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2443
2444/**
2445 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2446 * @dev: The bridge port adjacent to which to check for a querier
2447 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2448 *
2449 * Checks whether the given interface has a bridge on top and if so returns
2450 * true if a selected querier is behind one of the other ports of this
2451 * bridge. Otherwise returns false.
2452 */
2453bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2454{
2455 struct net_bridge *br;
2456 struct net_bridge_port *port;
2457 bool ret = false;
2458
2459 rcu_read_lock();
2460 if (!br_port_exists(dev))
2461 goto unlock;
2462
2463 port = br_port_get_rcu(dev);
2464 if (!port || !port->br)
2465 goto unlock;
2466
2467 br = port->br;
2468
2469 switch (proto) {
2470 case ETH_P_IP:
2471 if (!timer_pending(&br->ip4_other_query.timer) ||
2472 rcu_dereference(br->ip4_querier.port) == port)
2473 goto unlock;
2474 break;
2475#if IS_ENABLED(CONFIG_IPV6)
2476 case ETH_P_IPV6:
2477 if (!timer_pending(&br->ip6_other_query.timer) ||
2478 rcu_dereference(br->ip6_querier.port) == port)
2479 goto unlock;
2480 break;
2481#endif
2482 default:
2483 goto unlock;
2484 }
2485
2486 ret = true;
2487unlock:
2488 rcu_read_unlock();
2489 return ret;
2490}
2491EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2492
2493static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2494 const struct sk_buff *skb, u8 type, u8 dir)
2495{
2496 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2497 __be16 proto = skb->protocol;
2498 unsigned int t_len;
2499
2500 u64_stats_update_begin(&pstats->syncp);
2501 switch (proto) {
2502 case htons(ETH_P_IP):
2503 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2504 switch (type) {
2505 case IGMP_HOST_MEMBERSHIP_REPORT:
2506 pstats->mstats.igmp_v1reports[dir]++;
2507 break;
2508 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2509 pstats->mstats.igmp_v2reports[dir]++;
2510 break;
2511 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2512 pstats->mstats.igmp_v3reports[dir]++;
2513 break;
2514 case IGMP_HOST_MEMBERSHIP_QUERY:
2515 if (t_len != sizeof(struct igmphdr)) {
2516 pstats->mstats.igmp_v3queries[dir]++;
2517 } else {
2518 unsigned int offset = skb_transport_offset(skb);
2519 struct igmphdr *ih, _ihdr;
2520
2521 ih = skb_header_pointer(skb, offset,
2522 sizeof(_ihdr), &_ihdr);
2523 if (!ih)
2524 break;
2525 if (!ih->code)
2526 pstats->mstats.igmp_v1queries[dir]++;
2527 else
2528 pstats->mstats.igmp_v2queries[dir]++;
2529 }
2530 break;
2531 case IGMP_HOST_LEAVE_MESSAGE:
2532 pstats->mstats.igmp_leaves[dir]++;
2533 break;
2534 }
2535 break;
2536#if IS_ENABLED(CONFIG_IPV6)
2537 case htons(ETH_P_IPV6):
2538 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2539 sizeof(struct ipv6hdr);
2540 t_len -= skb_network_header_len(skb);
2541 switch (type) {
2542 case ICMPV6_MGM_REPORT:
2543 pstats->mstats.mld_v1reports[dir]++;
2544 break;
2545 case ICMPV6_MLD2_REPORT:
2546 pstats->mstats.mld_v2reports[dir]++;
2547 break;
2548 case ICMPV6_MGM_QUERY:
2549 if (t_len != sizeof(struct mld_msg))
2550 pstats->mstats.mld_v2queries[dir]++;
2551 else
2552 pstats->mstats.mld_v1queries[dir]++;
2553 break;
2554 case ICMPV6_MGM_REDUCTION:
2555 pstats->mstats.mld_leaves[dir]++;
2556 break;
2557 }
2558 break;
2559#endif /* CONFIG_IPV6 */
2560 }
2561 u64_stats_update_end(&pstats->syncp);
2562}
2563
2564void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2565 const struct sk_buff *skb, u8 type, u8 dir)
2566{
2567 struct bridge_mcast_stats __percpu *stats;
2568
2569 /* if multicast_disabled is true then igmp type can't be set */
2570 if (!type || !br->multicast_stats_enabled)
2571 return;
2572
2573 if (p)
2574 stats = p->mcast_stats;
2575 else
2576 stats = br->mcast_stats;
2577 if (WARN_ON(!stats))
2578 return;
2579
2580 br_mcast_stats_add(stats, skb, type, dir);
2581}
2582
2583int br_multicast_init_stats(struct net_bridge *br)
2584{
2585 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2586 if (!br->mcast_stats)
2587 return -ENOMEM;
2588
2589 return 0;
2590}
2591
2592void br_multicast_uninit_stats(struct net_bridge *br)
2593{
2594 free_percpu(br->mcast_stats);
2595}
2596
2597static void mcast_stats_add_dir(u64 *dst, u64 *src)
2598{
2599 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2600 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2601}
2602
2603void br_multicast_get_stats(const struct net_bridge *br,
2604 const struct net_bridge_port *p,
2605 struct br_mcast_stats *dest)
2606{
2607 struct bridge_mcast_stats __percpu *stats;
2608 struct br_mcast_stats tdst;
2609 int i;
2610
2611 memset(dest, 0, sizeof(*dest));
2612 if (p)
2613 stats = p->mcast_stats;
2614 else
2615 stats = br->mcast_stats;
2616 if (WARN_ON(!stats))
2617 return;
2618
2619 memset(&tdst, 0, sizeof(tdst));
2620 for_each_possible_cpu(i) {
2621 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2622 struct br_mcast_stats temp;
2623 unsigned int start;
2624
2625 do {
2626 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2627 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2628 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2629
2630 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2631 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2632 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2633 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2634 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2635 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2636 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2637 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2638
2639 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2640 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2641 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2642 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2643 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2644 tdst.mld_parse_errors += temp.mld_parse_errors;
2645 }
2646 memcpy(dest, &tdst, sizeof(*dest));
2647}