blob: b1ecc91955172a5204dd8de6ba67790adf883df9 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Linux NET3: Internet Group Management Protocol [IGMP]
4 *
5 * This code implements the IGMP protocol as defined in RFC1112. There has
6 * been a further revision of this protocol since which is now supported.
7 *
8 * If you have trouble with this module be careful what gcc you have used,
9 * the older version didn't come out right using gcc 2.5.8, the newer one
10 * seems to fall out with gcc 2.6.2.
11 *
12 * Authors:
13 * Alan Cox <alan@lxorguk.ukuu.org.uk>
14 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015 * Fixes:
16 *
17 * Alan Cox : Added lots of __inline__ to optimise
18 * the memory usage of all the tiny little
19 * functions.
20 * Alan Cox : Dumped the header building experiment.
21 * Alan Cox : Minor tweaks ready for multicast routing
22 * and extended IGMP protocol.
23 * Alan Cox : Removed a load of inline directives. Gcc 2.5.8
24 * writes utterly bogus code otherwise (sigh)
25 * fixed IGMP loopback to behave in the manner
26 * desired by mrouted, fixed the fact it has been
27 * broken since 1.3.6 and cleaned up a few minor
28 * points.
29 *
30 * Chih-Jen Chang : Tried to revise IGMP to Version 2
31 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
32 * The enhancements are mainly based on Steve Deering's
33 * ipmulti-3.5 source code.
34 * Chih-Jen Chang : Added the igmp_get_mrouter_info and
35 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
36 * the mrouted version on that device.
37 * Chih-Jen Chang : Added the max_resp_time parameter to
38 * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
39 * to identify the multicast router version
40 * and do what the IGMP version 2 specified.
41 * Chih-Jen Chang : Added a timer to revert to IGMP V2 router
42 * Tsu-Sheng Tsao if the specified time expired.
43 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
44 * Alan Cox : Use GFP_ATOMIC in the right places.
45 * Christian Daudt : igmp timer wasn't set for local group
46 * memberships but was being deleted,
47 * which caused a "del_timer() called
48 * from %p with timer not initialized\n"
49 * message (960131).
50 * Christian Daudt : removed del_timer from
51 * igmp_timer_expire function (960205).
52 * Christian Daudt : igmp_heard_report now only calls
53 * igmp_timer_expire if tm->running is
54 * true (960216).
55 * Malcolm Beattie : ttl comparison wrong in igmp_rcv made
56 * igmp_heard_query never trigger. Expiry
57 * miscalculation fixed in igmp_heard_query
58 * and random() made to return unsigned to
59 * prevent negative expiry times.
60 * Alexey Kuznetsov: Wrong group leaving behaviour, backport
61 * fix from pending 2.1.x patches.
62 * Alan Cox: Forget to enable FDDI support earlier.
63 * Alexey Kuznetsov: Fixed leaving groups on device down.
64 * Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
65 * David L Stevens: IGMPv3 support, with help from
66 * Vinay Kulkarni
67 */
68
69#include <linux/module.h>
70#include <linux/slab.h>
71#include <linux/uaccess.h>
72#include <linux/types.h>
73#include <linux/kernel.h>
74#include <linux/jiffies.h>
75#include <linux/string.h>
76#include <linux/socket.h>
77#include <linux/sockios.h>
78#include <linux/in.h>
79#include <linux/inet.h>
80#include <linux/netdevice.h>
81#include <linux/skbuff.h>
82#include <linux/inetdevice.h>
83#include <linux/igmp.h>
84#include <linux/if_arp.h>
85#include <linux/rtnetlink.h>
86#include <linux/times.h>
87#include <linux/pkt_sched.h>
88#include <linux/byteorder/generic.h>
89
90#include <net/net_namespace.h>
91#include <net/arp.h>
92#include <net/ip.h>
93#include <net/protocol.h>
94#include <net/route.h>
95#include <net/sock.h>
96#include <net/checksum.h>
97#include <net/inet_common.h>
98#include <linux/netfilter_ipv4.h>
99#ifdef CONFIG_IP_MROUTE
100#include <linux/mroute.h>
101#endif
102#ifdef CONFIG_PROC_FS
103#include <linux/proc_fs.h>
104#include <linux/seq_file.h>
105#endif
106
107#ifdef CONFIG_IP_MULTICAST
108/* Parameter names and values are taken from igmp-v2-06 draft */
109
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ)
111#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ)
David Brazdil0f672f62019-12-10 10:32:29 +0000112#define IGMP_QUERY_INTERVAL (125*HZ)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
115#define IGMP_INITIAL_REPORT_DELAY (1)
116
117/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs!
118 * IGMP specs require to report membership immediately after
119 * joining a group, but we delay the first report by a
120 * small interval. It seems more natural and still does not
121 * contradict to specs provided this delay is small enough.
122 */
123
124#define IGMP_V1_SEEN(in_dev) \
125 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
126 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
127 ((in_dev)->mr_v1_seen && \
128 time_before(jiffies, (in_dev)->mr_v1_seen)))
129#define IGMP_V2_SEEN(in_dev) \
130 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
131 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
132 ((in_dev)->mr_v2_seen && \
133 time_before(jiffies, (in_dev)->mr_v2_seen)))
134
135static int unsolicited_report_interval(struct in_device *in_dev)
136{
137 int interval_ms, interval_jiffies;
138
139 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
140 interval_ms = IN_DEV_CONF_GET(
141 in_dev,
142 IGMPV2_UNSOLICITED_REPORT_INTERVAL);
143 else /* v3 */
144 interval_ms = IN_DEV_CONF_GET(
145 in_dev,
146 IGMPV3_UNSOLICITED_REPORT_INTERVAL);
147
148 interval_jiffies = msecs_to_jiffies(interval_ms);
149
150 /* _timer functions can't handle a delay of 0 jiffies so ensure
151 * we always return a positive value.
152 */
153 if (interval_jiffies <= 0)
154 interval_jiffies = 1;
155 return interval_jiffies;
156}
157
David Brazdil0f672f62019-12-10 10:32:29 +0000158static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
159 gfp_t gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
161static void igmpv3_clear_delrec(struct in_device *in_dev);
162static int sf_setstate(struct ip_mc_list *pmc);
163static void sf_markstate(struct ip_mc_list *pmc);
164#endif
165static void ip_mc_clear_src(struct ip_mc_list *pmc);
166static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
167 int sfcount, __be32 *psfsrc, int delta);
168
169static void ip_ma_put(struct ip_mc_list *im)
170{
171 if (refcount_dec_and_test(&im->refcnt)) {
172 in_dev_put(im->interface);
173 kfree_rcu(im, rcu);
174 }
175}
176
177#define for_each_pmc_rcu(in_dev, pmc) \
178 for (pmc = rcu_dereference(in_dev->mc_list); \
179 pmc != NULL; \
180 pmc = rcu_dereference(pmc->next_rcu))
181
182#define for_each_pmc_rtnl(in_dev, pmc) \
183 for (pmc = rtnl_dereference(in_dev->mc_list); \
184 pmc != NULL; \
185 pmc = rtnl_dereference(pmc->next_rcu))
186
David Brazdil0f672f62019-12-10 10:32:29 +0000187static void ip_sf_list_clear_all(struct ip_sf_list *psf)
188{
189 struct ip_sf_list *next;
190
191 while (psf) {
192 next = psf->sf_next;
193 kfree(psf);
194 psf = next;
195 }
196}
197
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198#ifdef CONFIG_IP_MULTICAST
199
200/*
201 * Timer management
202 */
203
204static void igmp_stop_timer(struct ip_mc_list *im)
205{
206 spin_lock_bh(&im->lock);
207 if (del_timer(&im->timer))
208 refcount_dec(&im->refcnt);
209 im->tm_running = 0;
210 im->reporter = 0;
211 im->unsolicit_count = 0;
212 spin_unlock_bh(&im->lock);
213}
214
215/* It must be called with locked im->lock */
216static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
217{
218 int tv = prandom_u32() % max_delay;
219
220 im->tm_running = 1;
221 if (!mod_timer(&im->timer, jiffies+tv+2))
222 refcount_inc(&im->refcnt);
223}
224
225static void igmp_gq_start_timer(struct in_device *in_dev)
226{
227 int tv = prandom_u32() % in_dev->mr_maxdelay;
228 unsigned long exp = jiffies + tv + 2;
229
230 if (in_dev->mr_gq_running &&
231 time_after_eq(exp, (in_dev->mr_gq_timer).expires))
232 return;
233
234 in_dev->mr_gq_running = 1;
235 if (!mod_timer(&in_dev->mr_gq_timer, exp))
236 in_dev_hold(in_dev);
237}
238
239static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
240{
241 int tv = prandom_u32() % delay;
242
243 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
244 in_dev_hold(in_dev);
245}
246
247static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
248{
249 spin_lock_bh(&im->lock);
250 im->unsolicit_count = 0;
251 if (del_timer(&im->timer)) {
252 if ((long)(im->timer.expires-jiffies) < max_delay) {
253 add_timer(&im->timer);
254 im->tm_running = 1;
255 spin_unlock_bh(&im->lock);
256 return;
257 }
258 refcount_dec(&im->refcnt);
259 }
260 igmp_start_timer(im, max_delay);
261 spin_unlock_bh(&im->lock);
262}
263
264
265/*
266 * Send an IGMP report.
267 */
268
269#define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
270
271
272static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
273 int gdeleted, int sdeleted)
274{
275 switch (type) {
276 case IGMPV3_MODE_IS_INCLUDE:
277 case IGMPV3_MODE_IS_EXCLUDE:
278 if (gdeleted || sdeleted)
279 return 0;
280 if (!(pmc->gsquery && !psf->sf_gsresp)) {
281 if (pmc->sfmode == MCAST_INCLUDE)
282 return 1;
283 /* don't include if this source is excluded
284 * in all filters
285 */
286 if (psf->sf_count[MCAST_INCLUDE])
287 return type == IGMPV3_MODE_IS_INCLUDE;
288 return pmc->sfcount[MCAST_EXCLUDE] ==
289 psf->sf_count[MCAST_EXCLUDE];
290 }
291 return 0;
292 case IGMPV3_CHANGE_TO_INCLUDE:
293 if (gdeleted || sdeleted)
294 return 0;
295 return psf->sf_count[MCAST_INCLUDE] != 0;
296 case IGMPV3_CHANGE_TO_EXCLUDE:
297 if (gdeleted || sdeleted)
298 return 0;
299 if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
300 psf->sf_count[MCAST_INCLUDE])
301 return 0;
302 return pmc->sfcount[MCAST_EXCLUDE] ==
303 psf->sf_count[MCAST_EXCLUDE];
304 case IGMPV3_ALLOW_NEW_SOURCES:
305 if (gdeleted || !psf->sf_crcount)
306 return 0;
307 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
308 case IGMPV3_BLOCK_OLD_SOURCES:
309 if (pmc->sfmode == MCAST_INCLUDE)
310 return gdeleted || (psf->sf_crcount && sdeleted);
311 return psf->sf_crcount && !gdeleted && !sdeleted;
312 }
313 return 0;
314}
315
316static int
317igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
318{
319 struct ip_sf_list *psf;
320 int scount = 0;
321
322 for (psf = pmc->sources; psf; psf = psf->sf_next) {
323 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
324 continue;
325 scount++;
326 }
327 return scount;
328}
329
330/* source address selection per RFC 3376 section 4.2.13 */
331static __be32 igmpv3_get_srcaddr(struct net_device *dev,
332 const struct flowi4 *fl4)
333{
334 struct in_device *in_dev = __in_dev_get_rcu(dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000335 const struct in_ifaddr *ifa;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336
337 if (!in_dev)
338 return htonl(INADDR_ANY);
339
David Brazdil0f672f62019-12-10 10:32:29 +0000340 in_dev_for_each_ifa_rcu(ifa, in_dev) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341 if (fl4->saddr == ifa->ifa_local)
342 return fl4->saddr;
David Brazdil0f672f62019-12-10 10:32:29 +0000343 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344
345 return htonl(INADDR_ANY);
346}
347
348static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
349{
350 struct sk_buff *skb;
351 struct rtable *rt;
352 struct iphdr *pip;
353 struct igmpv3_report *pig;
354 struct net *net = dev_net(dev);
355 struct flowi4 fl4;
356 int hlen = LL_RESERVED_SPACE(dev);
357 int tlen = dev->needed_tailroom;
358 unsigned int size = mtu;
359
360 while (1) {
361 skb = alloc_skb(size + hlen + tlen,
362 GFP_ATOMIC | __GFP_NOWARN);
363 if (skb)
364 break;
365 size >>= 1;
366 if (size < 256)
367 return NULL;
368 }
369 skb->priority = TC_PRIO_CONTROL;
370
371 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
372 0, 0,
373 IPPROTO_IGMP, 0, dev->ifindex);
374 if (IS_ERR(rt)) {
375 kfree_skb(skb);
376 return NULL;
377 }
378
379 skb_dst_set(skb, &rt->dst);
380 skb->dev = dev;
381
382 skb_reserve(skb, hlen);
383 skb_tailroom_reserve(skb, mtu, tlen);
384
385 skb_reset_network_header(skb);
386 pip = ip_hdr(skb);
387 skb_put(skb, sizeof(struct iphdr) + 4);
388
389 pip->version = 4;
390 pip->ihl = (sizeof(struct iphdr)+4)>>2;
391 pip->tos = 0xc0;
392 pip->frag_off = htons(IP_DF);
393 pip->ttl = 1;
394 pip->daddr = fl4.daddr;
395
396 rcu_read_lock();
397 pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
398 rcu_read_unlock();
399
400 pip->protocol = IPPROTO_IGMP;
401 pip->tot_len = 0; /* filled in later */
402 ip_select_ident(net, skb, NULL);
403 ((u8 *)&pip[1])[0] = IPOPT_RA;
404 ((u8 *)&pip[1])[1] = 4;
405 ((u8 *)&pip[1])[2] = 0;
406 ((u8 *)&pip[1])[3] = 0;
407
408 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
409 skb_put(skb, sizeof(*pig));
410 pig = igmpv3_report_hdr(skb);
411 pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
412 pig->resv1 = 0;
413 pig->csum = 0;
414 pig->resv2 = 0;
415 pig->ngrec = 0;
416 return skb;
417}
418
419static int igmpv3_sendpack(struct sk_buff *skb)
420{
421 struct igmphdr *pig = igmp_hdr(skb);
422 const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
423
424 pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
425
426 return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
427}
428
429static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
430{
431 return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
432}
433
434static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
435 int type, struct igmpv3_grec **ppgr, unsigned int mtu)
436{
437 struct net_device *dev = pmc->interface->dev;
438 struct igmpv3_report *pih;
439 struct igmpv3_grec *pgr;
440
441 if (!skb) {
442 skb = igmpv3_newpack(dev, mtu);
443 if (!skb)
444 return NULL;
445 }
446 pgr = skb_put(skb, sizeof(struct igmpv3_grec));
447 pgr->grec_type = type;
448 pgr->grec_auxwords = 0;
449 pgr->grec_nsrcs = 0;
450 pgr->grec_mca = pmc->multiaddr;
451 pih = igmpv3_report_hdr(skb);
452 pih->ngrec = htons(ntohs(pih->ngrec)+1);
453 *ppgr = pgr;
454 return skb;
455}
456
457#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
458
459static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
460 int type, int gdeleted, int sdeleted)
461{
462 struct net_device *dev = pmc->interface->dev;
463 struct net *net = dev_net(dev);
464 struct igmpv3_report *pih;
465 struct igmpv3_grec *pgr = NULL;
466 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
467 int scount, stotal, first, isquery, truncate;
468 unsigned int mtu;
469
470 if (pmc->multiaddr == IGMP_ALL_HOSTS)
471 return skb;
472 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
473 return skb;
474
475 mtu = READ_ONCE(dev->mtu);
476 if (mtu < IPV4_MIN_MTU)
477 return skb;
478
479 isquery = type == IGMPV3_MODE_IS_INCLUDE ||
480 type == IGMPV3_MODE_IS_EXCLUDE;
481 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
482 type == IGMPV3_CHANGE_TO_EXCLUDE;
483
484 stotal = scount = 0;
485
486 psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
487
488 if (!*psf_list)
489 goto empty_source;
490
491 pih = skb ? igmpv3_report_hdr(skb) : NULL;
492
493 /* EX and TO_EX get a fresh packet, if needed */
494 if (truncate) {
495 if (pih && pih->ngrec &&
496 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
497 if (skb)
498 igmpv3_sendpack(skb);
499 skb = igmpv3_newpack(dev, mtu);
500 }
501 }
502 first = 1;
503 psf_prev = NULL;
504 for (psf = *psf_list; psf; psf = psf_next) {
505 __be32 *psrc;
506
507 psf_next = psf->sf_next;
508
509 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
510 psf_prev = psf;
511 continue;
512 }
513
514 /* Based on RFC3376 5.1. Should not send source-list change
515 * records when there is a filter mode change.
516 */
517 if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) ||
518 (!gdeleted && pmc->crcount)) &&
519 (type == IGMPV3_ALLOW_NEW_SOURCES ||
520 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount)
521 goto decrease_sf_crcount;
522
523 /* clear marks on query responses */
524 if (isquery)
525 psf->sf_gsresp = 0;
526
527 if (AVAILABLE(skb) < sizeof(__be32) +
528 first*sizeof(struct igmpv3_grec)) {
529 if (truncate && !first)
530 break; /* truncate these */
531 if (pgr)
532 pgr->grec_nsrcs = htons(scount);
533 if (skb)
534 igmpv3_sendpack(skb);
535 skb = igmpv3_newpack(dev, mtu);
536 first = 1;
537 scount = 0;
538 }
539 if (first) {
540 skb = add_grhead(skb, pmc, type, &pgr, mtu);
541 first = 0;
542 }
543 if (!skb)
544 return NULL;
545 psrc = skb_put(skb, sizeof(__be32));
546 *psrc = psf->sf_inaddr;
547 scount++; stotal++;
548 if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
549 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
550decrease_sf_crcount:
551 psf->sf_crcount--;
552 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
553 if (psf_prev)
554 psf_prev->sf_next = psf->sf_next;
555 else
556 *psf_list = psf->sf_next;
557 kfree(psf);
558 continue;
559 }
560 }
561 psf_prev = psf;
562 }
563
564empty_source:
565 if (!stotal) {
566 if (type == IGMPV3_ALLOW_NEW_SOURCES ||
567 type == IGMPV3_BLOCK_OLD_SOURCES)
568 return skb;
569 if (pmc->crcount || isquery) {
570 /* make sure we have room for group header */
571 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
572 igmpv3_sendpack(skb);
573 skb = NULL; /* add_grhead will get a new one */
574 }
575 skb = add_grhead(skb, pmc, type, &pgr, mtu);
576 }
577 }
578 if (pgr)
579 pgr->grec_nsrcs = htons(scount);
580
581 if (isquery)
582 pmc->gsquery = 0; /* clear query state on report */
583 return skb;
584}
585
586static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
587{
588 struct sk_buff *skb = NULL;
589 struct net *net = dev_net(in_dev->dev);
590 int type;
591
592 if (!pmc) {
593 rcu_read_lock();
594 for_each_pmc_rcu(in_dev, pmc) {
595 if (pmc->multiaddr == IGMP_ALL_HOSTS)
596 continue;
597 if (ipv4_is_local_multicast(pmc->multiaddr) &&
598 !net->ipv4.sysctl_igmp_llm_reports)
599 continue;
600 spin_lock_bh(&pmc->lock);
601 if (pmc->sfcount[MCAST_EXCLUDE])
602 type = IGMPV3_MODE_IS_EXCLUDE;
603 else
604 type = IGMPV3_MODE_IS_INCLUDE;
605 skb = add_grec(skb, pmc, type, 0, 0);
606 spin_unlock_bh(&pmc->lock);
607 }
608 rcu_read_unlock();
609 } else {
610 spin_lock_bh(&pmc->lock);
611 if (pmc->sfcount[MCAST_EXCLUDE])
612 type = IGMPV3_MODE_IS_EXCLUDE;
613 else
614 type = IGMPV3_MODE_IS_INCLUDE;
615 skb = add_grec(skb, pmc, type, 0, 0);
616 spin_unlock_bh(&pmc->lock);
617 }
618 if (!skb)
619 return 0;
620 return igmpv3_sendpack(skb);
621}
622
623/*
624 * remove zero-count source records from a source filter list
625 */
626static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
627{
628 struct ip_sf_list *psf_prev, *psf_next, *psf;
629
630 psf_prev = NULL;
631 for (psf = *ppsf; psf; psf = psf_next) {
632 psf_next = psf->sf_next;
633 if (psf->sf_crcount == 0) {
634 if (psf_prev)
635 psf_prev->sf_next = psf->sf_next;
636 else
637 *ppsf = psf->sf_next;
638 kfree(psf);
639 } else
640 psf_prev = psf;
641 }
642}
643
David Brazdil0f672f62019-12-10 10:32:29 +0000644static void kfree_pmc(struct ip_mc_list *pmc)
645{
646 ip_sf_list_clear_all(pmc->sources);
647 ip_sf_list_clear_all(pmc->tomb);
648 kfree(pmc);
649}
650
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000651static void igmpv3_send_cr(struct in_device *in_dev)
652{
653 struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
654 struct sk_buff *skb = NULL;
655 int type, dtype;
656
657 rcu_read_lock();
658 spin_lock_bh(&in_dev->mc_tomb_lock);
659
660 /* deleted MCA's */
661 pmc_prev = NULL;
662 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) {
663 pmc_next = pmc->next;
664 if (pmc->sfmode == MCAST_INCLUDE) {
665 type = IGMPV3_BLOCK_OLD_SOURCES;
666 dtype = IGMPV3_BLOCK_OLD_SOURCES;
667 skb = add_grec(skb, pmc, type, 1, 0);
668 skb = add_grec(skb, pmc, dtype, 1, 1);
669 }
670 if (pmc->crcount) {
671 if (pmc->sfmode == MCAST_EXCLUDE) {
672 type = IGMPV3_CHANGE_TO_INCLUDE;
673 skb = add_grec(skb, pmc, type, 1, 0);
674 }
675 pmc->crcount--;
676 if (pmc->crcount == 0) {
677 igmpv3_clear_zeros(&pmc->tomb);
678 igmpv3_clear_zeros(&pmc->sources);
679 }
680 }
681 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
682 if (pmc_prev)
683 pmc_prev->next = pmc_next;
684 else
685 in_dev->mc_tomb = pmc_next;
686 in_dev_put(pmc->interface);
David Brazdil0f672f62019-12-10 10:32:29 +0000687 kfree_pmc(pmc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000688 } else
689 pmc_prev = pmc;
690 }
691 spin_unlock_bh(&in_dev->mc_tomb_lock);
692
693 /* change recs */
694 for_each_pmc_rcu(in_dev, pmc) {
695 spin_lock_bh(&pmc->lock);
696 if (pmc->sfcount[MCAST_EXCLUDE]) {
697 type = IGMPV3_BLOCK_OLD_SOURCES;
698 dtype = IGMPV3_ALLOW_NEW_SOURCES;
699 } else {
700 type = IGMPV3_ALLOW_NEW_SOURCES;
701 dtype = IGMPV3_BLOCK_OLD_SOURCES;
702 }
703 skb = add_grec(skb, pmc, type, 0, 0);
704 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
705
706 /* filter mode changes */
707 if (pmc->crcount) {
708 if (pmc->sfmode == MCAST_EXCLUDE)
709 type = IGMPV3_CHANGE_TO_EXCLUDE;
710 else
711 type = IGMPV3_CHANGE_TO_INCLUDE;
712 skb = add_grec(skb, pmc, type, 0, 0);
713 pmc->crcount--;
714 }
715 spin_unlock_bh(&pmc->lock);
716 }
717 rcu_read_unlock();
718
719 if (!skb)
720 return;
721 (void) igmpv3_sendpack(skb);
722}
723
724static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
725 int type)
726{
727 struct sk_buff *skb;
728 struct iphdr *iph;
729 struct igmphdr *ih;
730 struct rtable *rt;
731 struct net_device *dev = in_dev->dev;
732 struct net *net = dev_net(dev);
733 __be32 group = pmc ? pmc->multiaddr : 0;
734 struct flowi4 fl4;
735 __be32 dst;
736 int hlen, tlen;
737
738 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
739 return igmpv3_send_report(in_dev, pmc);
740
741 if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
742 return 0;
743
744 if (type == IGMP_HOST_LEAVE_MESSAGE)
745 dst = IGMP_ALL_ROUTER;
746 else
747 dst = group;
748
749 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
750 0, 0,
751 IPPROTO_IGMP, 0, dev->ifindex);
752 if (IS_ERR(rt))
753 return -1;
754
755 hlen = LL_RESERVED_SPACE(dev);
756 tlen = dev->needed_tailroom;
757 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
758 if (!skb) {
759 ip_rt_put(rt);
760 return -1;
761 }
762 skb->priority = TC_PRIO_CONTROL;
763
764 skb_dst_set(skb, &rt->dst);
765
766 skb_reserve(skb, hlen);
767
768 skb_reset_network_header(skb);
769 iph = ip_hdr(skb);
770 skb_put(skb, sizeof(struct iphdr) + 4);
771
772 iph->version = 4;
773 iph->ihl = (sizeof(struct iphdr)+4)>>2;
774 iph->tos = 0xc0;
775 iph->frag_off = htons(IP_DF);
776 iph->ttl = 1;
777 iph->daddr = dst;
778 iph->saddr = fl4.saddr;
779 iph->protocol = IPPROTO_IGMP;
780 ip_select_ident(net, skb, NULL);
781 ((u8 *)&iph[1])[0] = IPOPT_RA;
782 ((u8 *)&iph[1])[1] = 4;
783 ((u8 *)&iph[1])[2] = 0;
784 ((u8 *)&iph[1])[3] = 0;
785
786 ih = skb_put(skb, sizeof(struct igmphdr));
787 ih->type = type;
788 ih->code = 0;
789 ih->csum = 0;
790 ih->group = group;
791 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
792
793 return ip_local_out(net, skb->sk, skb);
794}
795
796static void igmp_gq_timer_expire(struct timer_list *t)
797{
798 struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
799
800 in_dev->mr_gq_running = 0;
801 igmpv3_send_report(in_dev, NULL);
802 in_dev_put(in_dev);
803}
804
805static void igmp_ifc_timer_expire(struct timer_list *t)
806{
807 struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
Olivier Deprez0e641232021-09-23 10:07:05 +0200808 u32 mr_ifc_count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809
810 igmpv3_send_cr(in_dev);
Olivier Deprez0e641232021-09-23 10:07:05 +0200811restart:
812 mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
813
814 if (mr_ifc_count) {
815 if (cmpxchg(&in_dev->mr_ifc_count,
816 mr_ifc_count,
817 mr_ifc_count - 1) != mr_ifc_count)
818 goto restart;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 igmp_ifc_start_timer(in_dev,
820 unsolicited_report_interval(in_dev));
821 }
822 in_dev_put(in_dev);
823}
824
825static void igmp_ifc_event(struct in_device *in_dev)
826{
827 struct net *net = dev_net(in_dev->dev);
828 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
829 return;
Olivier Deprez0e641232021-09-23 10:07:05 +0200830 WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831 igmp_ifc_start_timer(in_dev, 1);
832}
833
834
835static void igmp_timer_expire(struct timer_list *t)
836{
837 struct ip_mc_list *im = from_timer(im, t, timer);
838 struct in_device *in_dev = im->interface;
839
840 spin_lock(&im->lock);
841 im->tm_running = 0;
842
843 if (im->unsolicit_count && --im->unsolicit_count)
844 igmp_start_timer(im, unsolicited_report_interval(in_dev));
845
846 im->reporter = 1;
847 spin_unlock(&im->lock);
848
849 if (IGMP_V1_SEEN(in_dev))
850 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
851 else if (IGMP_V2_SEEN(in_dev))
852 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
853 else
854 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
855
856 ip_ma_put(im);
857}
858
859/* mark EXCLUDE-mode sources */
860static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
861{
862 struct ip_sf_list *psf;
863 int i, scount;
864
865 scount = 0;
866 for (psf = pmc->sources; psf; psf = psf->sf_next) {
867 if (scount == nsrcs)
868 break;
869 for (i = 0; i < nsrcs; i++) {
870 /* skip inactive filters */
871 if (psf->sf_count[MCAST_INCLUDE] ||
872 pmc->sfcount[MCAST_EXCLUDE] !=
873 psf->sf_count[MCAST_EXCLUDE])
874 break;
875 if (srcs[i] == psf->sf_inaddr) {
876 scount++;
877 break;
878 }
879 }
880 }
881 pmc->gsquery = 0;
882 if (scount == nsrcs) /* all sources excluded */
883 return 0;
884 return 1;
885}
886
887static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
888{
889 struct ip_sf_list *psf;
890 int i, scount;
891
892 if (pmc->sfmode == MCAST_EXCLUDE)
893 return igmp_xmarksources(pmc, nsrcs, srcs);
894
895 /* mark INCLUDE-mode sources */
896 scount = 0;
897 for (psf = pmc->sources; psf; psf = psf->sf_next) {
898 if (scount == nsrcs)
899 break;
900 for (i = 0; i < nsrcs; i++)
901 if (srcs[i] == psf->sf_inaddr) {
902 psf->sf_gsresp = 1;
903 scount++;
904 break;
905 }
906 }
907 if (!scount) {
908 pmc->gsquery = 0;
909 return 0;
910 }
911 pmc->gsquery = 1;
912 return 1;
913}
914
915/* return true if packet was dropped */
916static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
917{
918 struct ip_mc_list *im;
919 struct net *net = dev_net(in_dev->dev);
920
921 /* Timers are only set for non-local groups */
922
923 if (group == IGMP_ALL_HOSTS)
924 return false;
925 if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
926 return false;
927
928 rcu_read_lock();
929 for_each_pmc_rcu(in_dev, im) {
930 if (im->multiaddr == group) {
931 igmp_stop_timer(im);
932 break;
933 }
934 }
935 rcu_read_unlock();
936 return false;
937}
938
939/* return true if packet was dropped */
940static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
941 int len)
942{
943 struct igmphdr *ih = igmp_hdr(skb);
944 struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
945 struct ip_mc_list *im;
946 __be32 group = ih->group;
947 int max_delay;
948 int mark = 0;
949 struct net *net = dev_net(in_dev->dev);
950
951
952 if (len == 8) {
953 if (ih->code == 0) {
954 /* Alas, old v1 router presents here. */
955
956 max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
957 in_dev->mr_v1_seen = jiffies +
David Brazdil0f672f62019-12-10 10:32:29 +0000958 (in_dev->mr_qrv * in_dev->mr_qi) +
959 in_dev->mr_qri;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000960 group = 0;
961 } else {
962 /* v2 router present */
963 max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
964 in_dev->mr_v2_seen = jiffies +
David Brazdil0f672f62019-12-10 10:32:29 +0000965 (in_dev->mr_qrv * in_dev->mr_qi) +
966 in_dev->mr_qri;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967 }
968 /* cancel the interface change timer */
Olivier Deprez0e641232021-09-23 10:07:05 +0200969 WRITE_ONCE(in_dev->mr_ifc_count, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000970 if (del_timer(&in_dev->mr_ifc_timer))
971 __in_dev_put(in_dev);
972 /* clear deleted report items */
973 igmpv3_clear_delrec(in_dev);
974 } else if (len < 12) {
975 return true; /* ignore bogus packet; freed by caller */
976 } else if (IGMP_V1_SEEN(in_dev)) {
977 /* This is a v3 query with v1 queriers present */
978 max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
979 group = 0;
980 } else if (IGMP_V2_SEEN(in_dev)) {
981 /* this is a v3 query with v2 queriers present;
982 * Interpretation of the max_delay code is problematic here.
983 * A real v2 host would use ih_code directly, while v3 has a
984 * different encoding. We use the v3 encoding as more likely
985 * to be intended in a v3 query.
986 */
987 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
988 if (!max_delay)
989 max_delay = 1; /* can't mod w/ 0 */
990 } else { /* v3 */
991 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
992 return true;
993
994 ih3 = igmpv3_query_hdr(skb);
995 if (ih3->nsrcs) {
996 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
997 + ntohs(ih3->nsrcs)*sizeof(__be32)))
998 return true;
999 ih3 = igmpv3_query_hdr(skb);
1000 }
1001
1002 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
1003 if (!max_delay)
1004 max_delay = 1; /* can't mod w/ 0 */
1005 in_dev->mr_maxdelay = max_delay;
David Brazdil0f672f62019-12-10 10:32:29 +00001006
1007 /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently
1008 * received value was zero, use the default or statically
1009 * configured value.
1010 */
1011 in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
1012 in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
1013
1014 /* RFC3376, 8.3. Query Response Interval:
1015 * The number of seconds represented by the [Query Response
1016 * Interval] must be less than the [Query Interval].
1017 */
1018 if (in_dev->mr_qri >= in_dev->mr_qi)
1019 in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ;
1020
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001021 if (!group) { /* general query */
1022 if (ih3->nsrcs)
1023 return true; /* no sources allowed */
1024 igmp_gq_start_timer(in_dev);
1025 return false;
1026 }
1027 /* mark sources to include, if group & source-specific */
1028 mark = ih3->nsrcs != 0;
1029 }
1030
1031 /*
1032 * - Start the timers in all of our membership records
1033 * that the query applies to for the interface on
1034 * which the query arrived excl. those that belong
1035 * to a "local" group (224.0.0.X)
1036 * - For timers already running check if they need to
1037 * be reset.
1038 * - Use the igmp->igmp_code field as the maximum
1039 * delay possible
1040 */
1041 rcu_read_lock();
1042 for_each_pmc_rcu(in_dev, im) {
1043 int changed;
1044
1045 if (group && group != im->multiaddr)
1046 continue;
1047 if (im->multiaddr == IGMP_ALL_HOSTS)
1048 continue;
1049 if (ipv4_is_local_multicast(im->multiaddr) &&
1050 !net->ipv4.sysctl_igmp_llm_reports)
1051 continue;
1052 spin_lock_bh(&im->lock);
1053 if (im->tm_running)
1054 im->gsquery = im->gsquery && mark;
1055 else
1056 im->gsquery = mark;
1057 changed = !im->gsquery ||
1058 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
1059 spin_unlock_bh(&im->lock);
1060 if (changed)
1061 igmp_mod_timer(im, max_delay);
1062 }
1063 rcu_read_unlock();
1064 return false;
1065}
1066
1067/* called in rcu_read_lock() section */
1068int igmp_rcv(struct sk_buff *skb)
1069{
1070 /* This basically follows the spec line by line -- see RFC1112 */
1071 struct igmphdr *ih;
1072 struct net_device *dev = skb->dev;
1073 struct in_device *in_dev;
1074 int len = skb->len;
1075 bool dropped = true;
1076
1077 if (netif_is_l3_master(dev)) {
1078 dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
1079 if (!dev)
1080 goto drop;
1081 }
1082
1083 in_dev = __in_dev_get_rcu(dev);
1084 if (!in_dev)
1085 goto drop;
1086
1087 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
1088 goto drop;
1089
1090 if (skb_checksum_simple_validate(skb))
1091 goto drop;
1092
1093 ih = igmp_hdr(skb);
1094 switch (ih->type) {
1095 case IGMP_HOST_MEMBERSHIP_QUERY:
1096 dropped = igmp_heard_query(in_dev, skb, len);
1097 break;
1098 case IGMP_HOST_MEMBERSHIP_REPORT:
1099 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1100 /* Is it our report looped back? */
1101 if (rt_is_output_route(skb_rtable(skb)))
1102 break;
1103 /* don't rely on MC router hearing unicast reports */
1104 if (skb->pkt_type == PACKET_MULTICAST ||
1105 skb->pkt_type == PACKET_BROADCAST)
1106 dropped = igmp_heard_report(in_dev, ih->group);
1107 break;
1108 case IGMP_PIM:
1109#ifdef CONFIG_IP_PIMSM_V1
1110 return pim_rcv_v1(skb);
1111#endif
1112 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1113 case IGMP_DVMRP:
1114 case IGMP_TRACE:
1115 case IGMP_HOST_LEAVE_MESSAGE:
1116 case IGMP_MTRACE:
1117 case IGMP_MTRACE_RESP:
1118 break;
1119 default:
1120 break;
1121 }
1122
1123drop:
1124 if (dropped)
1125 kfree_skb(skb);
1126 else
1127 consume_skb(skb);
1128 return 0;
1129}
1130
1131#endif
1132
1133
1134/*
1135 * Add a filter to a device
1136 */
1137
1138static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1139{
1140 char buf[MAX_ADDR_LEN];
1141 struct net_device *dev = in_dev->dev;
1142
1143 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1144 We will get multicast token leakage, when IFF_MULTICAST
1145 is changed. This check should be done in ndo_set_rx_mode
1146 routine. Something sort of:
1147 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1148 --ANK
1149 */
1150 if (arp_mc_map(addr, buf, dev, 0) == 0)
1151 dev_mc_add(dev, buf);
1152}
1153
1154/*
1155 * Remove a filter from a device
1156 */
1157
1158static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1159{
1160 char buf[MAX_ADDR_LEN];
1161 struct net_device *dev = in_dev->dev;
1162
1163 if (arp_mc_map(addr, buf, dev, 0) == 0)
1164 dev_mc_del(dev, buf);
1165}
1166
1167#ifdef CONFIG_IP_MULTICAST
1168/*
1169 * deleted ip_mc_list manipulation
1170 */
David Brazdil0f672f62019-12-10 10:32:29 +00001171static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
1172 gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001173{
1174 struct ip_mc_list *pmc;
1175 struct net *net = dev_net(in_dev->dev);
1176
1177 /* this is an "ip_mc_list" for convenience; only the fields below
1178 * are actually used. In particular, the refcnt and users are not
1179 * used for management of the delete list. Using the same structure
1180 * for deleted items allows change reports to use common code with
1181 * non-deleted or query-response MCA's.
1182 */
David Brazdil0f672f62019-12-10 10:32:29 +00001183 pmc = kzalloc(sizeof(*pmc), gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001184 if (!pmc)
1185 return;
1186 spin_lock_init(&pmc->lock);
1187 spin_lock_bh(&im->lock);
1188 pmc->interface = im->interface;
1189 in_dev_hold(in_dev);
1190 pmc->multiaddr = im->multiaddr;
1191 pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1192 pmc->sfmode = im->sfmode;
1193 if (pmc->sfmode == MCAST_INCLUDE) {
1194 struct ip_sf_list *psf;
1195
1196 pmc->tomb = im->tomb;
1197 pmc->sources = im->sources;
1198 im->tomb = im->sources = NULL;
1199 for (psf = pmc->sources; psf; psf = psf->sf_next)
1200 psf->sf_crcount = pmc->crcount;
1201 }
1202 spin_unlock_bh(&im->lock);
1203
1204 spin_lock_bh(&in_dev->mc_tomb_lock);
1205 pmc->next = in_dev->mc_tomb;
1206 in_dev->mc_tomb = pmc;
1207 spin_unlock_bh(&in_dev->mc_tomb_lock);
1208}
1209
1210/*
1211 * restore ip_mc_list deleted records
1212 */
1213static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1214{
1215 struct ip_mc_list *pmc, *pmc_prev;
1216 struct ip_sf_list *psf;
1217 struct net *net = dev_net(in_dev->dev);
1218 __be32 multiaddr = im->multiaddr;
1219
1220 spin_lock_bh(&in_dev->mc_tomb_lock);
1221 pmc_prev = NULL;
1222 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) {
1223 if (pmc->multiaddr == multiaddr)
1224 break;
1225 pmc_prev = pmc;
1226 }
1227 if (pmc) {
1228 if (pmc_prev)
1229 pmc_prev->next = pmc->next;
1230 else
1231 in_dev->mc_tomb = pmc->next;
1232 }
1233 spin_unlock_bh(&in_dev->mc_tomb_lock);
1234
1235 spin_lock_bh(&im->lock);
1236 if (pmc) {
1237 im->interface = pmc->interface;
1238 if (im->sfmode == MCAST_INCLUDE) {
David Brazdil0f672f62019-12-10 10:32:29 +00001239 swap(im->tomb, pmc->tomb);
1240 swap(im->sources, pmc->sources);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001241 for (psf = im->sources; psf; psf = psf->sf_next)
1242 psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1243 } else {
1244 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1245 }
1246 in_dev_put(pmc->interface);
David Brazdil0f672f62019-12-10 10:32:29 +00001247 kfree_pmc(pmc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 }
1249 spin_unlock_bh(&im->lock);
1250}
1251
1252/*
1253 * flush ip_mc_list deleted records
1254 */
1255static void igmpv3_clear_delrec(struct in_device *in_dev)
1256{
1257 struct ip_mc_list *pmc, *nextpmc;
1258
1259 spin_lock_bh(&in_dev->mc_tomb_lock);
1260 pmc = in_dev->mc_tomb;
1261 in_dev->mc_tomb = NULL;
1262 spin_unlock_bh(&in_dev->mc_tomb_lock);
1263
1264 for (; pmc; pmc = nextpmc) {
1265 nextpmc = pmc->next;
1266 ip_mc_clear_src(pmc);
1267 in_dev_put(pmc->interface);
David Brazdil0f672f62019-12-10 10:32:29 +00001268 kfree_pmc(pmc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269 }
1270 /* clear dead sources, too */
1271 rcu_read_lock();
1272 for_each_pmc_rcu(in_dev, pmc) {
David Brazdil0f672f62019-12-10 10:32:29 +00001273 struct ip_sf_list *psf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001274
1275 spin_lock_bh(&pmc->lock);
1276 psf = pmc->tomb;
1277 pmc->tomb = NULL;
1278 spin_unlock_bh(&pmc->lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001279 ip_sf_list_clear_all(psf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280 }
1281 rcu_read_unlock();
1282}
1283#endif
1284
David Brazdil0f672f62019-12-10 10:32:29 +00001285static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001286{
1287 struct in_device *in_dev = im->interface;
1288#ifdef CONFIG_IP_MULTICAST
1289 struct net *net = dev_net(in_dev->dev);
1290 int reporter;
1291#endif
1292
1293 if (im->loaded) {
1294 im->loaded = 0;
1295 ip_mc_filter_del(in_dev, im->multiaddr);
1296 }
1297
1298#ifdef CONFIG_IP_MULTICAST
1299 if (im->multiaddr == IGMP_ALL_HOSTS)
1300 return;
1301 if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
1302 return;
1303
1304 reporter = im->reporter;
1305 igmp_stop_timer(im);
1306
1307 if (!in_dev->dead) {
1308 if (IGMP_V1_SEEN(in_dev))
1309 return;
1310 if (IGMP_V2_SEEN(in_dev)) {
1311 if (reporter)
1312 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1313 return;
1314 }
1315 /* IGMPv3 */
David Brazdil0f672f62019-12-10 10:32:29 +00001316 igmpv3_add_delrec(in_dev, im, gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001317
1318 igmp_ifc_event(in_dev);
1319 }
1320#endif
1321}
1322
David Brazdil0f672f62019-12-10 10:32:29 +00001323static void igmp_group_dropped(struct ip_mc_list *im)
1324{
1325 __igmp_group_dropped(im, GFP_KERNEL);
1326}
1327
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001328static void igmp_group_added(struct ip_mc_list *im)
1329{
1330 struct in_device *in_dev = im->interface;
1331#ifdef CONFIG_IP_MULTICAST
1332 struct net *net = dev_net(in_dev->dev);
1333#endif
1334
1335 if (im->loaded == 0) {
1336 im->loaded = 1;
1337 ip_mc_filter_add(in_dev, im->multiaddr);
1338 }
1339
1340#ifdef CONFIG_IP_MULTICAST
1341 if (im->multiaddr == IGMP_ALL_HOSTS)
1342 return;
1343 if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
1344 return;
1345
1346 if (in_dev->dead)
1347 return;
1348
1349 im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
1350 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1351 spin_lock_bh(&im->lock);
1352 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
1353 spin_unlock_bh(&im->lock);
1354 return;
1355 }
1356 /* else, v3 */
1357
1358 /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should
1359 * not send filter-mode change record as the mode should be from
1360 * IN() to IN(A).
1361 */
1362 if (im->sfmode == MCAST_EXCLUDE)
1363 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1364
1365 igmp_ifc_event(in_dev);
1366#endif
1367}
1368
1369
1370/*
1371 * Multicast list managers
1372 */
1373
1374static u32 ip_mc_hash(const struct ip_mc_list *im)
1375{
1376 return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
1377}
1378
1379static void ip_mc_hash_add(struct in_device *in_dev,
1380 struct ip_mc_list *im)
1381{
1382 struct ip_mc_list __rcu **mc_hash;
1383 u32 hash;
1384
1385 mc_hash = rtnl_dereference(in_dev->mc_hash);
1386 if (mc_hash) {
1387 hash = ip_mc_hash(im);
1388 im->next_hash = mc_hash[hash];
1389 rcu_assign_pointer(mc_hash[hash], im);
1390 return;
1391 }
1392
1393 /* do not use a hash table for small number of items */
1394 if (in_dev->mc_count < 4)
1395 return;
1396
1397 mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
1398 GFP_KERNEL);
1399 if (!mc_hash)
1400 return;
1401
1402 for_each_pmc_rtnl(in_dev, im) {
1403 hash = ip_mc_hash(im);
1404 im->next_hash = mc_hash[hash];
1405 RCU_INIT_POINTER(mc_hash[hash], im);
1406 }
1407
1408 rcu_assign_pointer(in_dev->mc_hash, mc_hash);
1409}
1410
1411static void ip_mc_hash_remove(struct in_device *in_dev,
1412 struct ip_mc_list *im)
1413{
1414 struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
1415 struct ip_mc_list *aux;
1416
1417 if (!mc_hash)
1418 return;
1419 mc_hash += ip_mc_hash(im);
1420 while ((aux = rtnl_dereference(*mc_hash)) != im)
1421 mc_hash = &aux->next_hash;
1422 *mc_hash = im->next_hash;
1423}
1424
1425
1426/*
1427 * A socket has joined a multicast group on device dev.
1428 */
David Brazdil0f672f62019-12-10 10:32:29 +00001429static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
1430 unsigned int mode, gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001431{
1432 struct ip_mc_list *im;
1433
1434 ASSERT_RTNL();
1435
1436 for_each_pmc_rtnl(in_dev, im) {
1437 if (im->multiaddr == addr) {
1438 im->users++;
1439 ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
1440 goto out;
1441 }
1442 }
1443
David Brazdil0f672f62019-12-10 10:32:29 +00001444 im = kzalloc(sizeof(*im), gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001445 if (!im)
1446 goto out;
1447
1448 im->users = 1;
1449 im->interface = in_dev;
1450 in_dev_hold(in_dev);
1451 im->multiaddr = addr;
1452 /* initial mode is (EX, empty) */
1453 im->sfmode = mode;
1454 im->sfcount[mode] = 1;
1455 refcount_set(&im->refcnt, 1);
1456 spin_lock_init(&im->lock);
1457#ifdef CONFIG_IP_MULTICAST
1458 timer_setup(&im->timer, igmp_timer_expire, 0);
1459#endif
1460
1461 im->next_rcu = in_dev->mc_list;
1462 in_dev->mc_count++;
1463 rcu_assign_pointer(in_dev->mc_list, im);
1464
1465 ip_mc_hash_add(in_dev, im);
1466
1467#ifdef CONFIG_IP_MULTICAST
1468 igmpv3_del_delrec(in_dev, im);
1469#endif
1470 igmp_group_added(im);
1471 if (!in_dev->dead)
1472 ip_rt_multicast_event(in_dev);
1473out:
1474 return;
1475}
1476
David Brazdil0f672f62019-12-10 10:32:29 +00001477void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
1478{
1479 ____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp);
1480}
1481EXPORT_SYMBOL(__ip_mc_inc_group);
1482
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001483void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1484{
David Brazdil0f672f62019-12-10 10:32:29 +00001485 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001486}
1487EXPORT_SYMBOL(ip_mc_inc_group);
1488
1489static int ip_mc_check_iphdr(struct sk_buff *skb)
1490{
1491 const struct iphdr *iph;
1492 unsigned int len;
1493 unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
1494
1495 if (!pskb_may_pull(skb, offset))
1496 return -EINVAL;
1497
1498 iph = ip_hdr(skb);
1499
1500 if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
1501 return -EINVAL;
1502
1503 offset += ip_hdrlen(skb) - sizeof(*iph);
1504
1505 if (!pskb_may_pull(skb, offset))
1506 return -EINVAL;
1507
1508 iph = ip_hdr(skb);
1509
1510 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1511 return -EINVAL;
1512
1513 len = skb_network_offset(skb) + ntohs(iph->tot_len);
1514 if (skb->len < len || len < offset)
1515 return -EINVAL;
1516
1517 skb_set_transport_header(skb, offset);
1518
1519 return 0;
1520}
1521
1522static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
1523{
1524 unsigned int len = skb_transport_offset(skb);
1525
1526 len += sizeof(struct igmpv3_report);
1527
David Brazdil0f672f62019-12-10 10:32:29 +00001528 return ip_mc_may_pull(skb, len) ? 0 : -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001529}
1530
1531static int ip_mc_check_igmp_query(struct sk_buff *skb)
1532{
David Brazdil0f672f62019-12-10 10:32:29 +00001533 unsigned int transport_len = ip_transport_len(skb);
1534 unsigned int len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001535
1536 /* IGMPv{1,2}? */
David Brazdil0f672f62019-12-10 10:32:29 +00001537 if (transport_len != sizeof(struct igmphdr)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001538 /* or IGMPv3? */
David Brazdil0f672f62019-12-10 10:32:29 +00001539 if (transport_len < sizeof(struct igmpv3_query))
1540 return -EINVAL;
1541
1542 len = skb_transport_offset(skb) + sizeof(struct igmpv3_query);
1543 if (!ip_mc_may_pull(skb, len))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544 return -EINVAL;
1545 }
1546
1547 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
1548 * all-systems destination addresses (224.0.0.1) for general queries
1549 */
1550 if (!igmp_hdr(skb)->group &&
1551 ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
1552 return -EINVAL;
1553
1554 return 0;
1555}
1556
1557static int ip_mc_check_igmp_msg(struct sk_buff *skb)
1558{
1559 switch (igmp_hdr(skb)->type) {
1560 case IGMP_HOST_LEAVE_MESSAGE:
1561 case IGMP_HOST_MEMBERSHIP_REPORT:
1562 case IGMPV2_HOST_MEMBERSHIP_REPORT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001563 return 0;
1564 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1565 return ip_mc_check_igmp_reportv3(skb);
1566 case IGMP_HOST_MEMBERSHIP_QUERY:
1567 return ip_mc_check_igmp_query(skb);
1568 default:
1569 return -ENOMSG;
1570 }
1571}
1572
1573static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
1574{
1575 return skb_checksum_simple_validate(skb);
1576}
1577
David Brazdil0f672f62019-12-10 10:32:29 +00001578static int ip_mc_check_igmp_csum(struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001579{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001580 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
David Brazdil0f672f62019-12-10 10:32:29 +00001581 unsigned int transport_len = ip_transport_len(skb);
1582 struct sk_buff *skb_chk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001583
David Brazdil0f672f62019-12-10 10:32:29 +00001584 if (!ip_mc_may_pull(skb, len))
1585 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001586
1587 skb_chk = skb_checksum_trimmed(skb, transport_len,
1588 ip_mc_validate_checksum);
1589 if (!skb_chk)
David Brazdil0f672f62019-12-10 10:32:29 +00001590 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001591
David Brazdil0f672f62019-12-10 10:32:29 +00001592 if (skb_chk != skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001593 kfree_skb(skb_chk);
1594
David Brazdil0f672f62019-12-10 10:32:29 +00001595 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001596}
1597
1598/**
1599 * ip_mc_check_igmp - checks whether this is a sane IGMP packet
1600 * @skb: the skb to validate
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001601 *
1602 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
1603 * skb transport header accordingly and returns zero.
1604 *
1605 * -EINVAL: A broken packet was detected, i.e. it violates some internet
1606 * standard
1607 * -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
1608 * -ENOMEM: A memory allocation failure happened.
1609 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001610 * Caller needs to set the skb network header and free any returned skb if it
1611 * differs from the provided skb.
1612 */
David Brazdil0f672f62019-12-10 10:32:29 +00001613int ip_mc_check_igmp(struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001614{
1615 int ret = ip_mc_check_iphdr(skb);
1616
1617 if (ret < 0)
1618 return ret;
1619
1620 if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
1621 return -ENOMSG;
1622
David Brazdil0f672f62019-12-10 10:32:29 +00001623 ret = ip_mc_check_igmp_csum(skb);
1624 if (ret < 0)
1625 return ret;
1626
1627 return ip_mc_check_igmp_msg(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001628}
1629EXPORT_SYMBOL(ip_mc_check_igmp);
1630
1631/*
1632 * Resend IGMP JOIN report; used by netdev notifier.
1633 */
1634static void ip_mc_rejoin_groups(struct in_device *in_dev)
1635{
1636#ifdef CONFIG_IP_MULTICAST
1637 struct ip_mc_list *im;
1638 int type;
1639 struct net *net = dev_net(in_dev->dev);
1640
1641 ASSERT_RTNL();
1642
1643 for_each_pmc_rtnl(in_dev, im) {
1644 if (im->multiaddr == IGMP_ALL_HOSTS)
1645 continue;
1646 if (ipv4_is_local_multicast(im->multiaddr) &&
1647 !net->ipv4.sysctl_igmp_llm_reports)
1648 continue;
1649
1650 /* a failover is happening and switches
1651 * must be notified immediately
1652 */
1653 if (IGMP_V1_SEEN(in_dev))
1654 type = IGMP_HOST_MEMBERSHIP_REPORT;
1655 else if (IGMP_V2_SEEN(in_dev))
1656 type = IGMPV2_HOST_MEMBERSHIP_REPORT;
1657 else
1658 type = IGMPV3_HOST_MEMBERSHIP_REPORT;
1659 igmp_send_report(in_dev, im, type);
1660 }
1661#endif
1662}
1663
1664/*
1665 * A socket has left a multicast group on device dev
1666 */
1667
David Brazdil0f672f62019-12-10 10:32:29 +00001668void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001669{
1670 struct ip_mc_list *i;
1671 struct ip_mc_list __rcu **ip;
1672
1673 ASSERT_RTNL();
1674
1675 for (ip = &in_dev->mc_list;
1676 (i = rtnl_dereference(*ip)) != NULL;
1677 ip = &i->next_rcu) {
1678 if (i->multiaddr == addr) {
1679 if (--i->users == 0) {
1680 ip_mc_hash_remove(in_dev, i);
1681 *ip = i->next_rcu;
1682 in_dev->mc_count--;
David Brazdil0f672f62019-12-10 10:32:29 +00001683 __igmp_group_dropped(i, gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001684 ip_mc_clear_src(i);
1685
1686 if (!in_dev->dead)
1687 ip_rt_multicast_event(in_dev);
1688
1689 ip_ma_put(i);
1690 return;
1691 }
1692 break;
1693 }
1694 }
1695}
David Brazdil0f672f62019-12-10 10:32:29 +00001696EXPORT_SYMBOL(__ip_mc_dec_group);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001697
1698/* Device changing type */
1699
1700void ip_mc_unmap(struct in_device *in_dev)
1701{
1702 struct ip_mc_list *pmc;
1703
1704 ASSERT_RTNL();
1705
1706 for_each_pmc_rtnl(in_dev, pmc)
1707 igmp_group_dropped(pmc);
1708}
1709
1710void ip_mc_remap(struct in_device *in_dev)
1711{
1712 struct ip_mc_list *pmc;
1713
1714 ASSERT_RTNL();
1715
1716 for_each_pmc_rtnl(in_dev, pmc) {
1717#ifdef CONFIG_IP_MULTICAST
1718 igmpv3_del_delrec(in_dev, pmc);
1719#endif
1720 igmp_group_added(pmc);
1721 }
1722}
1723
1724/* Device going down */
1725
1726void ip_mc_down(struct in_device *in_dev)
1727{
1728 struct ip_mc_list *pmc;
1729
1730 ASSERT_RTNL();
1731
1732 for_each_pmc_rtnl(in_dev, pmc)
1733 igmp_group_dropped(pmc);
1734
1735#ifdef CONFIG_IP_MULTICAST
Olivier Deprez0e641232021-09-23 10:07:05 +02001736 WRITE_ONCE(in_dev->mr_ifc_count, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001737 if (del_timer(&in_dev->mr_ifc_timer))
1738 __in_dev_put(in_dev);
1739 in_dev->mr_gq_running = 0;
1740 if (del_timer(&in_dev->mr_gq_timer))
1741 __in_dev_put(in_dev);
1742#endif
1743
1744 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
1745}
1746
David Brazdil0f672f62019-12-10 10:32:29 +00001747#ifdef CONFIG_IP_MULTICAST
1748static void ip_mc_reset(struct in_device *in_dev)
1749{
1750 struct net *net = dev_net(in_dev->dev);
1751
1752 in_dev->mr_qi = IGMP_QUERY_INTERVAL;
1753 in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
1754 in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
1755}
1756#else
1757static void ip_mc_reset(struct in_device *in_dev)
1758{
1759}
1760#endif
1761
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001762void ip_mc_init_dev(struct in_device *in_dev)
1763{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001764 ASSERT_RTNL();
1765
1766#ifdef CONFIG_IP_MULTICAST
1767 timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
1768 timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001769#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001770 ip_mc_reset(in_dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001771
1772 spin_lock_init(&in_dev->mc_tomb_lock);
1773}
1774
1775/* Device going up */
1776
1777void ip_mc_up(struct in_device *in_dev)
1778{
1779 struct ip_mc_list *pmc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001780
1781 ASSERT_RTNL();
1782
David Brazdil0f672f62019-12-10 10:32:29 +00001783 ip_mc_reset(in_dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001784 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1785
1786 for_each_pmc_rtnl(in_dev, pmc) {
1787#ifdef CONFIG_IP_MULTICAST
1788 igmpv3_del_delrec(in_dev, pmc);
1789#endif
1790 igmp_group_added(pmc);
1791 }
1792}
1793
1794/*
1795 * Device is about to be destroyed: clean up.
1796 */
1797
1798void ip_mc_destroy_dev(struct in_device *in_dev)
1799{
1800 struct ip_mc_list *i;
1801
1802 ASSERT_RTNL();
1803
1804 /* Deactivate timers */
1805 ip_mc_down(in_dev);
1806#ifdef CONFIG_IP_MULTICAST
1807 igmpv3_clear_delrec(in_dev);
1808#endif
1809
1810 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1811 in_dev->mc_list = i->next_rcu;
1812 in_dev->mc_count--;
Olivier Deprez0e641232021-09-23 10:07:05 +02001813 ip_mc_clear_src(i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001814 ip_ma_put(i);
1815 }
1816}
1817
1818/* RTNL is locked */
1819static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1820{
1821 struct net_device *dev = NULL;
1822 struct in_device *idev = NULL;
1823
1824 if (imr->imr_ifindex) {
1825 idev = inetdev_by_index(net, imr->imr_ifindex);
1826 return idev;
1827 }
1828 if (imr->imr_address.s_addr) {
1829 dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
1830 if (!dev)
1831 return NULL;
1832 }
1833
1834 if (!dev) {
1835 struct rtable *rt = ip_route_output(net,
1836 imr->imr_multiaddr.s_addr,
1837 0, 0, 0);
1838 if (!IS_ERR(rt)) {
1839 dev = rt->dst.dev;
1840 ip_rt_put(rt);
1841 }
1842 }
1843 if (dev) {
1844 imr->imr_ifindex = dev->ifindex;
1845 idev = __in_dev_get_rtnl(dev);
1846 }
1847 return idev;
1848}
1849
1850/*
1851 * Join a socket to a group
1852 */
1853
1854static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1855 __be32 *psfsrc)
1856{
1857 struct ip_sf_list *psf, *psf_prev;
1858 int rv = 0;
1859
1860 psf_prev = NULL;
1861 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1862 if (psf->sf_inaddr == *psfsrc)
1863 break;
1864 psf_prev = psf;
1865 }
1866 if (!psf || psf->sf_count[sfmode] == 0) {
1867 /* source filter not found, or count wrong => bug */
1868 return -ESRCH;
1869 }
1870 psf->sf_count[sfmode]--;
1871 if (psf->sf_count[sfmode] == 0) {
1872 ip_rt_multicast_event(pmc->interface);
1873 }
1874 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1875#ifdef CONFIG_IP_MULTICAST
1876 struct in_device *in_dev = pmc->interface;
1877 struct net *net = dev_net(in_dev->dev);
1878#endif
1879
1880 /* no more filters for this source */
1881 if (psf_prev)
1882 psf_prev->sf_next = psf->sf_next;
1883 else
1884 pmc->sources = psf->sf_next;
1885#ifdef CONFIG_IP_MULTICAST
1886 if (psf->sf_oldin &&
1887 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
1888 psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1889 psf->sf_next = pmc->tomb;
1890 pmc->tomb = psf;
1891 rv = 1;
1892 } else
1893#endif
1894 kfree(psf);
1895 }
1896 return rv;
1897}
1898
1899#ifndef CONFIG_IP_MULTICAST
1900#define igmp_ifc_event(x) do { } while (0)
1901#endif
1902
1903static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1904 int sfcount, __be32 *psfsrc, int delta)
1905{
1906 struct ip_mc_list *pmc;
1907 int changerec = 0;
1908 int i, err;
1909
1910 if (!in_dev)
1911 return -ENODEV;
1912 rcu_read_lock();
1913 for_each_pmc_rcu(in_dev, pmc) {
1914 if (*pmca == pmc->multiaddr)
1915 break;
1916 }
1917 if (!pmc) {
1918 /* MCA not found?? bug */
1919 rcu_read_unlock();
1920 return -ESRCH;
1921 }
1922 spin_lock_bh(&pmc->lock);
1923 rcu_read_unlock();
1924#ifdef CONFIG_IP_MULTICAST
1925 sf_markstate(pmc);
1926#endif
1927 if (!delta) {
1928 err = -EINVAL;
1929 if (!pmc->sfcount[sfmode])
1930 goto out_unlock;
1931 pmc->sfcount[sfmode]--;
1932 }
1933 err = 0;
1934 for (i = 0; i < sfcount; i++) {
1935 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1936
1937 changerec |= rv > 0;
1938 if (!err && rv < 0)
1939 err = rv;
1940 }
1941 if (pmc->sfmode == MCAST_EXCLUDE &&
1942 pmc->sfcount[MCAST_EXCLUDE] == 0 &&
1943 pmc->sfcount[MCAST_INCLUDE]) {
1944#ifdef CONFIG_IP_MULTICAST
1945 struct ip_sf_list *psf;
1946 struct net *net = dev_net(in_dev->dev);
1947#endif
1948
1949 /* filter mode change */
1950 pmc->sfmode = MCAST_INCLUDE;
1951#ifdef CONFIG_IP_MULTICAST
1952 pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
Olivier Deprez0e641232021-09-23 10:07:05 +02001953 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001954 for (psf = pmc->sources; psf; psf = psf->sf_next)
1955 psf->sf_crcount = 0;
1956 igmp_ifc_event(pmc->interface);
1957 } else if (sf_setstate(pmc) || changerec) {
1958 igmp_ifc_event(pmc->interface);
1959#endif
1960 }
1961out_unlock:
1962 spin_unlock_bh(&pmc->lock);
1963 return err;
1964}
1965
1966/*
1967 * Add multicast single-source filter to the interface list
1968 */
1969static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1970 __be32 *psfsrc)
1971{
1972 struct ip_sf_list *psf, *psf_prev;
1973
1974 psf_prev = NULL;
1975 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1976 if (psf->sf_inaddr == *psfsrc)
1977 break;
1978 psf_prev = psf;
1979 }
1980 if (!psf) {
1981 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1982 if (!psf)
1983 return -ENOBUFS;
1984 psf->sf_inaddr = *psfsrc;
1985 if (psf_prev) {
1986 psf_prev->sf_next = psf;
1987 } else
1988 pmc->sources = psf;
1989 }
1990 psf->sf_count[sfmode]++;
1991 if (psf->sf_count[sfmode] == 1) {
1992 ip_rt_multicast_event(pmc->interface);
1993 }
1994 return 0;
1995}
1996
1997#ifdef CONFIG_IP_MULTICAST
1998static void sf_markstate(struct ip_mc_list *pmc)
1999{
2000 struct ip_sf_list *psf;
2001 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
2002
2003 for (psf = pmc->sources; psf; psf = psf->sf_next)
2004 if (pmc->sfcount[MCAST_EXCLUDE]) {
2005 psf->sf_oldin = mca_xcount ==
2006 psf->sf_count[MCAST_EXCLUDE] &&
2007 !psf->sf_count[MCAST_INCLUDE];
2008 } else
2009 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2010}
2011
2012static int sf_setstate(struct ip_mc_list *pmc)
2013{
2014 struct ip_sf_list *psf, *dpsf;
2015 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
2016 int qrv = pmc->interface->mr_qrv;
2017 int new_in, rv;
2018
2019 rv = 0;
2020 for (psf = pmc->sources; psf; psf = psf->sf_next) {
2021 if (pmc->sfcount[MCAST_EXCLUDE]) {
2022 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2023 !psf->sf_count[MCAST_INCLUDE];
2024 } else
2025 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2026 if (new_in) {
2027 if (!psf->sf_oldin) {
2028 struct ip_sf_list *prev = NULL;
2029
2030 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) {
2031 if (dpsf->sf_inaddr == psf->sf_inaddr)
2032 break;
2033 prev = dpsf;
2034 }
2035 if (dpsf) {
2036 if (prev)
2037 prev->sf_next = dpsf->sf_next;
2038 else
2039 pmc->tomb = dpsf->sf_next;
2040 kfree(dpsf);
2041 }
2042 psf->sf_crcount = qrv;
2043 rv++;
2044 }
2045 } else if (psf->sf_oldin) {
2046
2047 psf->sf_crcount = 0;
2048 /*
2049 * add or update "delete" records if an active filter
2050 * is now inactive
2051 */
2052 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next)
2053 if (dpsf->sf_inaddr == psf->sf_inaddr)
2054 break;
2055 if (!dpsf) {
2056 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2057 if (!dpsf)
2058 continue;
2059 *dpsf = *psf;
2060 /* pmc->lock held by callers */
2061 dpsf->sf_next = pmc->tomb;
2062 pmc->tomb = dpsf;
2063 }
2064 dpsf->sf_crcount = qrv;
2065 rv++;
2066 }
2067 }
2068 return rv;
2069}
2070#endif
2071
2072/*
2073 * Add multicast source filter list to the interface list
2074 */
2075static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2076 int sfcount, __be32 *psfsrc, int delta)
2077{
2078 struct ip_mc_list *pmc;
2079 int isexclude;
2080 int i, err;
2081
2082 if (!in_dev)
2083 return -ENODEV;
2084 rcu_read_lock();
2085 for_each_pmc_rcu(in_dev, pmc) {
2086 if (*pmca == pmc->multiaddr)
2087 break;
2088 }
2089 if (!pmc) {
2090 /* MCA not found?? bug */
2091 rcu_read_unlock();
2092 return -ESRCH;
2093 }
2094 spin_lock_bh(&pmc->lock);
2095 rcu_read_unlock();
2096
2097#ifdef CONFIG_IP_MULTICAST
2098 sf_markstate(pmc);
2099#endif
2100 isexclude = pmc->sfmode == MCAST_EXCLUDE;
2101 if (!delta)
2102 pmc->sfcount[sfmode]++;
2103 err = 0;
2104 for (i = 0; i < sfcount; i++) {
2105 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2106 if (err)
2107 break;
2108 }
2109 if (err) {
2110 int j;
2111
2112 if (!delta)
2113 pmc->sfcount[sfmode]--;
2114 for (j = 0; j < i; j++)
2115 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2116 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
2117#ifdef CONFIG_IP_MULTICAST
2118 struct ip_sf_list *psf;
2119 struct net *net = dev_net(pmc->interface->dev);
2120 in_dev = pmc->interface;
2121#endif
2122
2123 /* filter mode change */
2124 if (pmc->sfcount[MCAST_EXCLUDE])
2125 pmc->sfmode = MCAST_EXCLUDE;
2126 else if (pmc->sfcount[MCAST_INCLUDE])
2127 pmc->sfmode = MCAST_INCLUDE;
2128#ifdef CONFIG_IP_MULTICAST
2129 /* else no filters; keep old mode for reports */
2130
2131 pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
Olivier Deprez0e641232021-09-23 10:07:05 +02002132 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002133 for (psf = pmc->sources; psf; psf = psf->sf_next)
2134 psf->sf_crcount = 0;
2135 igmp_ifc_event(in_dev);
2136 } else if (sf_setstate(pmc)) {
2137 igmp_ifc_event(in_dev);
2138#endif
2139 }
2140 spin_unlock_bh(&pmc->lock);
2141 return err;
2142}
2143
2144static void ip_mc_clear_src(struct ip_mc_list *pmc)
2145{
David Brazdil0f672f62019-12-10 10:32:29 +00002146 struct ip_sf_list *tomb, *sources;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002147
2148 spin_lock_bh(&pmc->lock);
2149 tomb = pmc->tomb;
2150 pmc->tomb = NULL;
2151 sources = pmc->sources;
2152 pmc->sources = NULL;
2153 pmc->sfmode = MCAST_EXCLUDE;
2154 pmc->sfcount[MCAST_INCLUDE] = 0;
2155 pmc->sfcount[MCAST_EXCLUDE] = 1;
2156 spin_unlock_bh(&pmc->lock);
2157
David Brazdil0f672f62019-12-10 10:32:29 +00002158 ip_sf_list_clear_all(tomb);
2159 ip_sf_list_clear_all(sources);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002160}
2161
2162/* Join a multicast group
2163 */
2164static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2165 unsigned int mode)
2166{
2167 __be32 addr = imr->imr_multiaddr.s_addr;
2168 struct ip_mc_socklist *iml, *i;
2169 struct in_device *in_dev;
2170 struct inet_sock *inet = inet_sk(sk);
2171 struct net *net = sock_net(sk);
2172 int ifindex;
2173 int count = 0;
2174 int err;
2175
2176 ASSERT_RTNL();
2177
2178 if (!ipv4_is_multicast(addr))
2179 return -EINVAL;
2180
2181 in_dev = ip_mc_find_dev(net, imr);
2182
2183 if (!in_dev) {
2184 err = -ENODEV;
2185 goto done;
2186 }
2187
2188 err = -EADDRINUSE;
2189 ifindex = imr->imr_ifindex;
2190 for_each_pmc_rtnl(inet, i) {
2191 if (i->multi.imr_multiaddr.s_addr == addr &&
2192 i->multi.imr_ifindex == ifindex)
2193 goto done;
2194 count++;
2195 }
2196 err = -ENOBUFS;
2197 if (count >= net->ipv4.sysctl_igmp_max_memberships)
2198 goto done;
2199 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
2200 if (!iml)
2201 goto done;
2202
2203 memcpy(&iml->multi, imr, sizeof(*imr));
2204 iml->next_rcu = inet->mc_list;
2205 iml->sflist = NULL;
2206 iml->sfmode = mode;
2207 rcu_assign_pointer(inet->mc_list, iml);
David Brazdil0f672f62019-12-10 10:32:29 +00002208 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002209 err = 0;
2210done:
2211 return err;
2212}
2213
2214/* Join ASM (Any-Source Multicast) group
2215 */
2216int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
2217{
2218 return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
2219}
2220EXPORT_SYMBOL(ip_mc_join_group);
2221
2222/* Join SSM (Source-Specific Multicast) group
2223 */
2224int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
2225 unsigned int mode)
2226{
2227 return __ip_mc_join_group(sk, imr, mode);
2228}
2229
2230static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
2231 struct in_device *in_dev)
2232{
2233 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
2234 int err;
2235
2236 if (!psf) {
2237 /* any-source empty exclude case */
2238 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
2239 iml->sfmode, 0, NULL, 0);
2240 }
2241 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
2242 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
2243 RCU_INIT_POINTER(iml->sflist, NULL);
2244 /* decrease mem now to avoid the memleak warning */
2245 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
2246 kfree_rcu(psf, rcu);
2247 return err;
2248}
2249
2250int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2251{
2252 struct inet_sock *inet = inet_sk(sk);
2253 struct ip_mc_socklist *iml;
2254 struct ip_mc_socklist __rcu **imlp;
2255 struct in_device *in_dev;
2256 struct net *net = sock_net(sk);
2257 __be32 group = imr->imr_multiaddr.s_addr;
2258 u32 ifindex;
2259 int ret = -EADDRNOTAVAIL;
2260
2261 ASSERT_RTNL();
2262
2263 in_dev = ip_mc_find_dev(net, imr);
2264 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
2265 ret = -ENODEV;
2266 goto out;
2267 }
2268 ifindex = imr->imr_ifindex;
2269 for (imlp = &inet->mc_list;
2270 (iml = rtnl_dereference(*imlp)) != NULL;
2271 imlp = &iml->next_rcu) {
2272 if (iml->multi.imr_multiaddr.s_addr != group)
2273 continue;
2274 if (ifindex) {
2275 if (iml->multi.imr_ifindex != ifindex)
2276 continue;
2277 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
2278 iml->multi.imr_address.s_addr)
2279 continue;
2280
2281 (void) ip_mc_leave_src(sk, iml, in_dev);
2282
2283 *imlp = iml->next_rcu;
2284
2285 if (in_dev)
2286 ip_mc_dec_group(in_dev, group);
2287
2288 /* decrease mem now to avoid the memleak warning */
2289 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2290 kfree_rcu(iml, rcu);
2291 return 0;
2292 }
2293out:
2294 return ret;
2295}
2296EXPORT_SYMBOL(ip_mc_leave_group);
2297
2298int ip_mc_source(int add, int omode, struct sock *sk, struct
2299 ip_mreq_source *mreqs, int ifindex)
2300{
2301 int err;
2302 struct ip_mreqn imr;
2303 __be32 addr = mreqs->imr_multiaddr;
2304 struct ip_mc_socklist *pmc;
2305 struct in_device *in_dev = NULL;
2306 struct inet_sock *inet = inet_sk(sk);
2307 struct ip_sf_socklist *psl;
2308 struct net *net = sock_net(sk);
2309 int leavegroup = 0;
2310 int i, j, rv;
2311
2312 if (!ipv4_is_multicast(addr))
2313 return -EINVAL;
2314
2315 ASSERT_RTNL();
2316
2317 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
2318 imr.imr_address.s_addr = mreqs->imr_interface;
2319 imr.imr_ifindex = ifindex;
2320 in_dev = ip_mc_find_dev(net, &imr);
2321
2322 if (!in_dev) {
2323 err = -ENODEV;
2324 goto done;
2325 }
2326 err = -EADDRNOTAVAIL;
2327
2328 for_each_pmc_rtnl(inet, pmc) {
2329 if ((pmc->multi.imr_multiaddr.s_addr ==
2330 imr.imr_multiaddr.s_addr) &&
2331 (pmc->multi.imr_ifindex == imr.imr_ifindex))
2332 break;
2333 }
2334 if (!pmc) { /* must have a prior join */
2335 err = -EINVAL;
2336 goto done;
2337 }
2338 /* if a source filter was set, must be the same mode as before */
2339 if (pmc->sflist) {
2340 if (pmc->sfmode != omode) {
2341 err = -EINVAL;
2342 goto done;
2343 }
2344 } else if (pmc->sfmode != omode) {
2345 /* allow mode switches for empty-set filters */
2346 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
2347 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
2348 NULL, 0);
2349 pmc->sfmode = omode;
2350 }
2351
2352 psl = rtnl_dereference(pmc->sflist);
2353 if (!add) {
2354 if (!psl)
2355 goto done; /* err = -EADDRNOTAVAIL */
2356 rv = !0;
2357 for (i = 0; i < psl->sl_count; i++) {
2358 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
2359 sizeof(__be32));
2360 if (rv == 0)
2361 break;
2362 }
2363 if (rv) /* source not found */
2364 goto done; /* err = -EADDRNOTAVAIL */
2365
2366 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
2367 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
2368 leavegroup = 1;
2369 goto done;
2370 }
2371
2372 /* update the interface filter */
2373 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2374 &mreqs->imr_sourceaddr, 1);
2375
2376 for (j = i+1; j < psl->sl_count; j++)
2377 psl->sl_addr[j-1] = psl->sl_addr[j];
2378 psl->sl_count--;
2379 err = 0;
2380 goto done;
2381 }
2382 /* else, add a new source to the filter */
2383
2384 if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) {
2385 err = -ENOBUFS;
2386 goto done;
2387 }
2388 if (!psl || psl->sl_count == psl->sl_max) {
2389 struct ip_sf_socklist *newpsl;
2390 int count = IP_SFBLOCK;
2391
2392 if (psl)
2393 count += psl->sl_max;
2394 newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
2395 if (!newpsl) {
2396 err = -ENOBUFS;
2397 goto done;
2398 }
2399 newpsl->sl_max = count;
2400 newpsl->sl_count = count - IP_SFBLOCK;
2401 if (psl) {
2402 for (i = 0; i < psl->sl_count; i++)
2403 newpsl->sl_addr[i] = psl->sl_addr[i];
2404 /* decrease mem now to avoid the memleak warning */
2405 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2406 kfree_rcu(psl, rcu);
2407 }
2408 rcu_assign_pointer(pmc->sflist, newpsl);
2409 psl = newpsl;
2410 }
2411 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
2412 for (i = 0; i < psl->sl_count; i++) {
2413 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
2414 sizeof(__be32));
2415 if (rv == 0)
2416 break;
2417 }
2418 if (rv == 0) /* address already there is an error */
2419 goto done;
2420 for (j = psl->sl_count-1; j >= i; j--)
2421 psl->sl_addr[j+1] = psl->sl_addr[j];
2422 psl->sl_addr[i] = mreqs->imr_sourceaddr;
2423 psl->sl_count++;
2424 err = 0;
2425 /* update the interface list */
2426 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2427 &mreqs->imr_sourceaddr, 1);
2428done:
2429 if (leavegroup)
2430 err = ip_mc_leave_group(sk, &imr);
2431 return err;
2432}
2433
2434int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2435{
2436 int err = 0;
2437 struct ip_mreqn imr;
2438 __be32 addr = msf->imsf_multiaddr;
2439 struct ip_mc_socklist *pmc;
2440 struct in_device *in_dev;
2441 struct inet_sock *inet = inet_sk(sk);
2442 struct ip_sf_socklist *newpsl, *psl;
2443 struct net *net = sock_net(sk);
2444 int leavegroup = 0;
2445
2446 if (!ipv4_is_multicast(addr))
2447 return -EINVAL;
2448 if (msf->imsf_fmode != MCAST_INCLUDE &&
2449 msf->imsf_fmode != MCAST_EXCLUDE)
2450 return -EINVAL;
2451
2452 ASSERT_RTNL();
2453
2454 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2455 imr.imr_address.s_addr = msf->imsf_interface;
2456 imr.imr_ifindex = ifindex;
2457 in_dev = ip_mc_find_dev(net, &imr);
2458
2459 if (!in_dev) {
2460 err = -ENODEV;
2461 goto done;
2462 }
2463
2464 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
2465 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
2466 leavegroup = 1;
2467 goto done;
2468 }
2469
2470 for_each_pmc_rtnl(inet, pmc) {
2471 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2472 pmc->multi.imr_ifindex == imr.imr_ifindex)
2473 break;
2474 }
2475 if (!pmc) { /* must have a prior join */
2476 err = -EINVAL;
2477 goto done;
2478 }
2479 if (msf->imsf_numsrc) {
2480 newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
2481 GFP_KERNEL);
2482 if (!newpsl) {
2483 err = -ENOBUFS;
2484 goto done;
2485 }
2486 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
2487 memcpy(newpsl->sl_addr, msf->imsf_slist,
2488 msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
2489 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2490 msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
2491 if (err) {
2492 sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
2493 goto done;
2494 }
2495 } else {
2496 newpsl = NULL;
2497 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2498 msf->imsf_fmode, 0, NULL, 0);
2499 }
2500 psl = rtnl_dereference(pmc->sflist);
2501 if (psl) {
2502 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2503 psl->sl_count, psl->sl_addr, 0);
2504 /* decrease mem now to avoid the memleak warning */
2505 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2506 kfree_rcu(psl, rcu);
2507 } else
2508 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2509 0, NULL, 0);
2510 rcu_assign_pointer(pmc->sflist, newpsl);
2511 pmc->sfmode = msf->imsf_fmode;
2512 err = 0;
2513done:
2514 if (leavegroup)
2515 err = ip_mc_leave_group(sk, &imr);
2516 return err;
2517}
2518
2519int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2520 struct ip_msfilter __user *optval, int __user *optlen)
2521{
2522 int err, len, count, copycount;
2523 struct ip_mreqn imr;
2524 __be32 addr = msf->imsf_multiaddr;
2525 struct ip_mc_socklist *pmc;
2526 struct in_device *in_dev;
2527 struct inet_sock *inet = inet_sk(sk);
2528 struct ip_sf_socklist *psl;
2529 struct net *net = sock_net(sk);
2530
2531 ASSERT_RTNL();
2532
2533 if (!ipv4_is_multicast(addr))
2534 return -EINVAL;
2535
2536 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2537 imr.imr_address.s_addr = msf->imsf_interface;
2538 imr.imr_ifindex = 0;
2539 in_dev = ip_mc_find_dev(net, &imr);
2540
2541 if (!in_dev) {
2542 err = -ENODEV;
2543 goto done;
2544 }
2545 err = -EADDRNOTAVAIL;
2546
2547 for_each_pmc_rtnl(inet, pmc) {
2548 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2549 pmc->multi.imr_ifindex == imr.imr_ifindex)
2550 break;
2551 }
2552 if (!pmc) /* must have a prior join */
2553 goto done;
2554 msf->imsf_fmode = pmc->sfmode;
2555 psl = rtnl_dereference(pmc->sflist);
2556 if (!psl) {
2557 len = 0;
2558 count = 0;
2559 } else {
2560 count = psl->sl_count;
2561 }
2562 copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
2563 len = copycount * sizeof(psl->sl_addr[0]);
2564 msf->imsf_numsrc = count;
2565 if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
2566 copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
2567 return -EFAULT;
2568 }
2569 if (len &&
2570 copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
2571 return -EFAULT;
2572 return 0;
2573done:
2574 return err;
2575}
2576
2577int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2578 struct group_filter __user *optval, int __user *optlen)
2579{
2580 int err, i, count, copycount;
2581 struct sockaddr_in *psin;
2582 __be32 addr;
2583 struct ip_mc_socklist *pmc;
2584 struct inet_sock *inet = inet_sk(sk);
2585 struct ip_sf_socklist *psl;
2586
2587 ASSERT_RTNL();
2588
2589 psin = (struct sockaddr_in *)&gsf->gf_group;
2590 if (psin->sin_family != AF_INET)
2591 return -EINVAL;
2592 addr = psin->sin_addr.s_addr;
2593 if (!ipv4_is_multicast(addr))
2594 return -EINVAL;
2595
2596 err = -EADDRNOTAVAIL;
2597
2598 for_each_pmc_rtnl(inet, pmc) {
2599 if (pmc->multi.imr_multiaddr.s_addr == addr &&
2600 pmc->multi.imr_ifindex == gsf->gf_interface)
2601 break;
2602 }
2603 if (!pmc) /* must have a prior join */
2604 goto done;
2605 gsf->gf_fmode = pmc->sfmode;
2606 psl = rtnl_dereference(pmc->sflist);
2607 count = psl ? psl->sl_count : 0;
2608 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
2609 gsf->gf_numsrc = count;
2610 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
2611 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
2612 return -EFAULT;
2613 }
2614 for (i = 0; i < copycount; i++) {
2615 struct sockaddr_storage ss;
2616
2617 psin = (struct sockaddr_in *)&ss;
2618 memset(&ss, 0, sizeof(ss));
2619 psin->sin_family = AF_INET;
2620 psin->sin_addr.s_addr = psl->sl_addr[i];
2621 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
2622 return -EFAULT;
2623 }
2624 return 0;
2625done:
2626 return err;
2627}
2628
2629/*
2630 * check if a multicast source filter allows delivery for a given <src,dst,intf>
2631 */
2632int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr,
2633 int dif, int sdif)
2634{
2635 struct inet_sock *inet = inet_sk(sk);
2636 struct ip_mc_socklist *pmc;
2637 struct ip_sf_socklist *psl;
2638 int i;
2639 int ret;
2640
2641 ret = 1;
2642 if (!ipv4_is_multicast(loc_addr))
2643 goto out;
2644
2645 rcu_read_lock();
2646 for_each_pmc_rcu(inet, pmc) {
2647 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2648 (pmc->multi.imr_ifindex == dif ||
2649 (sdif && pmc->multi.imr_ifindex == sdif)))
2650 break;
2651 }
2652 ret = inet->mc_all;
2653 if (!pmc)
2654 goto unlock;
2655 psl = rcu_dereference(pmc->sflist);
2656 ret = (pmc->sfmode == MCAST_EXCLUDE);
2657 if (!psl)
2658 goto unlock;
2659
2660 for (i = 0; i < psl->sl_count; i++) {
2661 if (psl->sl_addr[i] == rmt_addr)
2662 break;
2663 }
2664 ret = 0;
2665 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2666 goto unlock;
2667 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2668 goto unlock;
2669 ret = 1;
2670unlock:
2671 rcu_read_unlock();
2672out:
2673 return ret;
2674}
2675
2676/*
2677 * A socket is closing.
2678 */
2679
2680void ip_mc_drop_socket(struct sock *sk)
2681{
2682 struct inet_sock *inet = inet_sk(sk);
2683 struct ip_mc_socklist *iml;
2684 struct net *net = sock_net(sk);
2685
2686 if (!inet->mc_list)
2687 return;
2688
2689 rtnl_lock();
2690 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
2691 struct in_device *in_dev;
2692
2693 inet->mc_list = iml->next_rcu;
2694 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2695 (void) ip_mc_leave_src(sk, iml, in_dev);
2696 if (in_dev)
2697 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2698 /* decrease mem now to avoid the memleak warning */
2699 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2700 kfree_rcu(iml, rcu);
2701 }
2702 rtnl_unlock();
2703}
2704
2705/* called with rcu_read_lock() */
2706int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto)
2707{
2708 struct ip_mc_list *im;
2709 struct ip_mc_list __rcu **mc_hash;
2710 struct ip_sf_list *psf;
2711 int rv = 0;
2712
2713 mc_hash = rcu_dereference(in_dev->mc_hash);
2714 if (mc_hash) {
2715 u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
2716
2717 for (im = rcu_dereference(mc_hash[hash]);
2718 im != NULL;
2719 im = rcu_dereference(im->next_hash)) {
2720 if (im->multiaddr == mc_addr)
2721 break;
2722 }
2723 } else {
2724 for_each_pmc_rcu(in_dev, im) {
2725 if (im->multiaddr == mc_addr)
2726 break;
2727 }
2728 }
2729 if (im && proto == IPPROTO_IGMP) {
2730 rv = 1;
2731 } else if (im) {
2732 if (src_addr) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002733 spin_lock_bh(&im->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002734 for (psf = im->sources; psf; psf = psf->sf_next) {
2735 if (psf->sf_inaddr == src_addr)
2736 break;
2737 }
2738 if (psf)
2739 rv = psf->sf_count[MCAST_INCLUDE] ||
2740 psf->sf_count[MCAST_EXCLUDE] !=
2741 im->sfcount[MCAST_EXCLUDE];
2742 else
2743 rv = im->sfcount[MCAST_EXCLUDE] != 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02002744 spin_unlock_bh(&im->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002745 } else
2746 rv = 1; /* unspecified source; tentatively allow */
2747 }
2748 return rv;
2749}
2750
2751#if defined(CONFIG_PROC_FS)
2752struct igmp_mc_iter_state {
2753 struct seq_net_private p;
2754 struct net_device *dev;
2755 struct in_device *in_dev;
2756};
2757
2758#define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
2759
2760static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2761{
2762 struct net *net = seq_file_net(seq);
2763 struct ip_mc_list *im = NULL;
2764 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2765
2766 state->in_dev = NULL;
2767 for_each_netdev_rcu(net, state->dev) {
2768 struct in_device *in_dev;
2769
2770 in_dev = __in_dev_get_rcu(state->dev);
2771 if (!in_dev)
2772 continue;
2773 im = rcu_dereference(in_dev->mc_list);
2774 if (im) {
2775 state->in_dev = in_dev;
2776 break;
2777 }
2778 }
2779 return im;
2780}
2781
2782static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2783{
2784 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2785
2786 im = rcu_dereference(im->next_rcu);
2787 while (!im) {
2788 state->dev = next_net_device_rcu(state->dev);
2789 if (!state->dev) {
2790 state->in_dev = NULL;
2791 break;
2792 }
2793 state->in_dev = __in_dev_get_rcu(state->dev);
2794 if (!state->in_dev)
2795 continue;
2796 im = rcu_dereference(state->in_dev->mc_list);
2797 }
2798 return im;
2799}
2800
2801static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2802{
2803 struct ip_mc_list *im = igmp_mc_get_first(seq);
2804 if (im)
2805 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
2806 --pos;
2807 return pos ? NULL : im;
2808}
2809
2810static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
2811 __acquires(rcu)
2812{
2813 rcu_read_lock();
2814 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2815}
2816
2817static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2818{
2819 struct ip_mc_list *im;
2820 if (v == SEQ_START_TOKEN)
2821 im = igmp_mc_get_first(seq);
2822 else
2823 im = igmp_mc_get_next(seq, v);
2824 ++*pos;
2825 return im;
2826}
2827
2828static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2829 __releases(rcu)
2830{
2831 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2832
2833 state->in_dev = NULL;
2834 state->dev = NULL;
2835 rcu_read_unlock();
2836}
2837
2838static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2839{
2840 if (v == SEQ_START_TOKEN)
2841 seq_puts(seq,
2842 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
2843 else {
2844 struct ip_mc_list *im = (struct ip_mc_list *)v;
2845 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2846 char *querier;
2847 long delta;
2848
2849#ifdef CONFIG_IP_MULTICAST
2850 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2851 IGMP_V2_SEEN(state->in_dev) ? "V2" :
2852 "V3";
2853#else
2854 querier = "NONE";
2855#endif
2856
2857 if (rcu_access_pointer(state->in_dev->mc_list) == im) {
2858 seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2859 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2860 }
2861
2862 delta = im->timer.expires - jiffies;
2863 seq_printf(seq,
2864 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
2865 im->multiaddr, im->users,
2866 im->tm_running,
2867 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
2868 im->reporter);
2869 }
2870 return 0;
2871}
2872
2873static const struct seq_operations igmp_mc_seq_ops = {
2874 .start = igmp_mc_seq_start,
2875 .next = igmp_mc_seq_next,
2876 .stop = igmp_mc_seq_stop,
2877 .show = igmp_mc_seq_show,
2878};
2879
2880struct igmp_mcf_iter_state {
2881 struct seq_net_private p;
2882 struct net_device *dev;
2883 struct in_device *idev;
2884 struct ip_mc_list *im;
2885};
2886
2887#define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
2888
2889static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2890{
2891 struct net *net = seq_file_net(seq);
2892 struct ip_sf_list *psf = NULL;
2893 struct ip_mc_list *im = NULL;
2894 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2895
2896 state->idev = NULL;
2897 state->im = NULL;
2898 for_each_netdev_rcu(net, state->dev) {
2899 struct in_device *idev;
2900 idev = __in_dev_get_rcu(state->dev);
2901 if (unlikely(!idev))
2902 continue;
2903 im = rcu_dereference(idev->mc_list);
2904 if (likely(im)) {
2905 spin_lock_bh(&im->lock);
2906 psf = im->sources;
2907 if (likely(psf)) {
2908 state->im = im;
2909 state->idev = idev;
2910 break;
2911 }
2912 spin_unlock_bh(&im->lock);
2913 }
2914 }
2915 return psf;
2916}
2917
2918static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
2919{
2920 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2921
2922 psf = psf->sf_next;
2923 while (!psf) {
2924 spin_unlock_bh(&state->im->lock);
2925 state->im = state->im->next;
2926 while (!state->im) {
2927 state->dev = next_net_device_rcu(state->dev);
2928 if (!state->dev) {
2929 state->idev = NULL;
2930 goto out;
2931 }
2932 state->idev = __in_dev_get_rcu(state->dev);
2933 if (!state->idev)
2934 continue;
2935 state->im = rcu_dereference(state->idev->mc_list);
2936 }
2937 if (!state->im)
2938 break;
2939 spin_lock_bh(&state->im->lock);
2940 psf = state->im->sources;
2941 }
2942out:
2943 return psf;
2944}
2945
2946static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2947{
2948 struct ip_sf_list *psf = igmp_mcf_get_first(seq);
2949 if (psf)
2950 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
2951 --pos;
2952 return pos ? NULL : psf;
2953}
2954
2955static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2956 __acquires(rcu)
2957{
2958 rcu_read_lock();
2959 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2960}
2961
2962static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2963{
2964 struct ip_sf_list *psf;
2965 if (v == SEQ_START_TOKEN)
2966 psf = igmp_mcf_get_first(seq);
2967 else
2968 psf = igmp_mcf_get_next(seq, v);
2969 ++*pos;
2970 return psf;
2971}
2972
2973static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2974 __releases(rcu)
2975{
2976 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2977 if (likely(state->im)) {
2978 spin_unlock_bh(&state->im->lock);
2979 state->im = NULL;
2980 }
2981 state->idev = NULL;
2982 state->dev = NULL;
2983 rcu_read_unlock();
2984}
2985
2986static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2987{
2988 struct ip_sf_list *psf = (struct ip_sf_list *)v;
2989 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2990
2991 if (v == SEQ_START_TOKEN) {
2992 seq_puts(seq, "Idx Device MCA SRC INC EXC\n");
2993 } else {
2994 seq_printf(seq,
2995 "%3d %6.6s 0x%08x "
2996 "0x%08x %6lu %6lu\n",
2997 state->dev->ifindex, state->dev->name,
2998 ntohl(state->im->multiaddr),
2999 ntohl(psf->sf_inaddr),
3000 psf->sf_count[MCAST_INCLUDE],
3001 psf->sf_count[MCAST_EXCLUDE]);
3002 }
3003 return 0;
3004}
3005
3006static const struct seq_operations igmp_mcf_seq_ops = {
3007 .start = igmp_mcf_seq_start,
3008 .next = igmp_mcf_seq_next,
3009 .stop = igmp_mcf_seq_stop,
3010 .show = igmp_mcf_seq_show,
3011};
3012
3013static int __net_init igmp_net_init(struct net *net)
3014{
3015 struct proc_dir_entry *pde;
3016 int err;
3017
3018 pde = proc_create_net("igmp", 0444, net->proc_net, &igmp_mc_seq_ops,
3019 sizeof(struct igmp_mc_iter_state));
3020 if (!pde)
3021 goto out_igmp;
3022 pde = proc_create_net("mcfilter", 0444, net->proc_net,
3023 &igmp_mcf_seq_ops, sizeof(struct igmp_mcf_iter_state));
3024 if (!pde)
3025 goto out_mcfilter;
3026 err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET,
3027 SOCK_DGRAM, 0, net);
3028 if (err < 0) {
3029 pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n",
3030 err);
3031 goto out_sock;
3032 }
3033
3034 return 0;
3035
3036out_sock:
3037 remove_proc_entry("mcfilter", net->proc_net);
3038out_mcfilter:
3039 remove_proc_entry("igmp", net->proc_net);
3040out_igmp:
3041 return -ENOMEM;
3042}
3043
3044static void __net_exit igmp_net_exit(struct net *net)
3045{
3046 remove_proc_entry("mcfilter", net->proc_net);
3047 remove_proc_entry("igmp", net->proc_net);
3048 inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk);
3049}
3050
3051static struct pernet_operations igmp_net_ops = {
3052 .init = igmp_net_init,
3053 .exit = igmp_net_exit,
3054};
3055#endif
3056
3057static int igmp_netdev_event(struct notifier_block *this,
3058 unsigned long event, void *ptr)
3059{
3060 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3061 struct in_device *in_dev;
3062
3063 switch (event) {
3064 case NETDEV_RESEND_IGMP:
3065 in_dev = __in_dev_get_rtnl(dev);
3066 if (in_dev)
3067 ip_mc_rejoin_groups(in_dev);
3068 break;
3069 default:
3070 break;
3071 }
3072 return NOTIFY_DONE;
3073}
3074
3075static struct notifier_block igmp_notifier = {
3076 .notifier_call = igmp_netdev_event,
3077};
3078
3079int __init igmp_mc_init(void)
3080{
3081#if defined(CONFIG_PROC_FS)
3082 int err;
3083
3084 err = register_pernet_subsys(&igmp_net_ops);
3085 if (err)
3086 return err;
3087 err = register_netdevice_notifier(&igmp_notifier);
3088 if (err)
3089 goto reg_notif_fail;
3090 return 0;
3091
3092reg_notif_fail:
3093 unregister_pernet_subsys(&igmp_net_ops);
3094 return err;
3095#else
3096 return register_netdevice_notifier(&igmp_notifier);
3097#endif
3098}