1/*
2 *	Linux NET3:	Internet Group Management Protocol  [IGMP]
3 *
4 *	This code implements the IGMP protocol as defined in RFC1112. There has
5 *	been a further revision of this protocol since which is now supported.
6 *
7 *	If you have trouble with this module be careful what gcc you have used,
8 *	the older version didn't come out right using gcc 2.5.8, the newer one
9 *	seems to fall out with gcc 2.6.2.
10 *
11 *	Authors:
12 *		Alan Cox <alan@lxorguk.ukuu.org.uk>
13 *
14 *	This program is free software; you can redistribute it and/or
15 *	modify it under the terms of the GNU General Public License
16 *	as published by the Free Software Foundation; either version
17 *	2 of the License, or (at your option) any later version.
18 *
19 *	Fixes:
20 *
21 *		Alan Cox	:	Added lots of __inline__ to optimise
22 *					the memory usage of all the tiny little
23 *					functions.
24 *		Alan Cox	:	Dumped the header building experiment.
25 *		Alan Cox	:	Minor tweaks ready for multicast routing
26 *					and extended IGMP protocol.
27 *		Alan Cox	:	Removed a load of inline directives. Gcc 2.5.8
28 *					writes utterly bogus code otherwise (sigh)
29 *					fixed IGMP loopback to behave in the manner
30 *					desired by mrouted, fixed the fact it has been
31 *					broken since 1.3.6 and cleaned up a few minor
32 *					points.
33 *
34 *		Chih-Jen Chang	:	Tried to revise IGMP to Version 2
35 *		Tsu-Sheng Tsao		E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
36 *					The enhancements are mainly based on Steve Deering's
37 * 					ipmulti-3.5 source code.
38 *		Chih-Jen Chang	:	Added the igmp_get_mrouter_info and
39 *		Tsu-Sheng Tsao		igmp_set_mrouter_info to keep track of
40 *					the mrouted version on that device.
41 *		Chih-Jen Chang	:	Added the max_resp_time parameter to
42 *		Tsu-Sheng Tsao		igmp_heard_query(). Using this parameter
43 *					to identify the multicast router version
44 *					and do what the IGMP version 2 specified.
45 *		Chih-Jen Chang	:	Added a timer to revert to IGMP V2 router
46 *		Tsu-Sheng Tsao		if the specified time expired.
47 *		Alan Cox	:	Stop IGMP from 0.0.0.0 being accepted.
48 *		Alan Cox	:	Use GFP_ATOMIC in the right places.
49 *		Christian Daudt :	igmp timer wasn't set for local group
50 *					memberships but was being deleted,
51 *					which caused a "del_timer() called
52 *					from %p with timer not initialized\n"
53 *					message (960131).
54 *		Christian Daudt :	removed del_timer from
55 *					igmp_timer_expire function (960205).
56 *             Christian Daudt :       igmp_heard_report now only calls
57 *                                     igmp_timer_expire if tm->running is
58 *                                     true (960216).
59 *		Malcolm Beattie :	ttl comparison wrong in igmp_rcv made
60 *					igmp_heard_query never trigger. Expiry
61 *					miscalculation fixed in igmp_heard_query
62 *					and random() made to return unsigned to
63 *					prevent negative expiry times.
64 *		Alexey Kuznetsov:	Wrong group leaving behaviour, backport
65 *					fix from pending 2.1.x patches.
66 *		Alan Cox:		Forget to enable FDDI support earlier.
67 *		Alexey Kuznetsov:	Fixed leaving groups on device down.
68 *		Alexey Kuznetsov:	Accordance to igmp-v2-06 draft.
69 *		David L Stevens:	IGMPv3 support, with help from
70 *					Vinay Kulkarni
71 */
72
73#include <linux/module.h>
74#include <linux/slab.h>
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/types.h>
78#include <linux/kernel.h>
79#include <linux/jiffies.h>
80#include <linux/string.h>
81#include <linux/socket.h>
82#include <linux/sockios.h>
83#include <linux/in.h>
84#include <linux/inet.h>
85#include <linux/netdevice.h>
86#include <linux/skbuff.h>
87#include <linux/inetdevice.h>
88#include <linux/igmp.h>
89#include <linux/if_arp.h>
90#include <linux/rtnetlink.h>
91#include <linux/times.h>
92
93#include <net/net_namespace.h>
94#include <net/arp.h>
95#include <net/ip.h>
96#include <net/protocol.h>
97#include <net/route.h>
98#include <net/sock.h>
99#include <net/checksum.h>
100#include <linux/netfilter_ipv4.h>
101#ifdef CONFIG_IP_MROUTE
102#include <linux/mroute.h>
103#endif
104#ifdef CONFIG_PROC_FS
105#include <linux/proc_fs.h>
106#include <linux/seq_file.h>
107#endif
108
109#define IP_MAX_MEMBERSHIPS	20
110#define IP_MAX_MSF		10
111
112#ifdef CONFIG_IP_MULTICAST
113/* Parameter names and values are taken from igmp-v2-06 draft */
114
115#define IGMP_V1_Router_Present_Timeout		(400*HZ)
116#define IGMP_V2_Router_Present_Timeout		(400*HZ)
117#define IGMP_Unsolicited_Report_Interval	(10*HZ)
118#define IGMP_Query_Response_Interval		(10*HZ)
119#define IGMP_Unsolicited_Report_Count		2
120
121
122#define IGMP_Initial_Report_Delay		(1)
123
124/* IGMP_Initial_Report_Delay is not from IGMP specs!
125 * IGMP specs require to report membership immediately after
126 * joining a group, but we delay the first report by a
127 * small interval. It seems more natural and still does not
128 * contradict to specs provided this delay is small enough.
129 */
130
131#define IGMP_V1_SEEN(in_dev) \
132	(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
133	 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
134	 ((in_dev)->mr_v1_seen && \
135	  time_before(jiffies, (in_dev)->mr_v1_seen)))
136#define IGMP_V2_SEEN(in_dev) \
137	(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
138	 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
139	 ((in_dev)->mr_v2_seen && \
140	  time_before(jiffies, (in_dev)->mr_v2_seen)))
141
142static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
143static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
144static void igmpv3_clear_delrec(struct in_device *in_dev);
145static int sf_setstate(struct ip_mc_list *pmc);
146static void sf_markstate(struct ip_mc_list *pmc);
147#endif
148static void ip_mc_clear_src(struct ip_mc_list *pmc);
149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
150			 int sfcount, __be32 *psfsrc, int delta);
151
152static void ip_ma_put(struct ip_mc_list *im)
153{
154	if (atomic_dec_and_test(&im->refcnt)) {
155		in_dev_put(im->interface);
156		kfree(im);
157	}
158}
159
160#ifdef CONFIG_IP_MULTICAST
161
162/*
163 *	Timer management
164 */
165
166static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
167{
168	spin_lock_bh(&im->lock);
169	if (del_timer(&im->timer))
170		atomic_dec(&im->refcnt);
171	im->tm_running = 0;
172	im->reporter = 0;
173	im->unsolicit_count = 0;
174	spin_unlock_bh(&im->lock);
175}
176
177/* It must be called with locked im->lock */
178static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
179{
180	int tv = net_random() % max_delay;
181
182	im->tm_running = 1;
183	if (!mod_timer(&im->timer, jiffies+tv+2))
184		atomic_inc(&im->refcnt);
185}
186
187static void igmp_gq_start_timer(struct in_device *in_dev)
188{
189	int tv = net_random() % in_dev->mr_maxdelay;
190
191	in_dev->mr_gq_running = 1;
192	if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
193		in_dev_hold(in_dev);
194}
195
196static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
197{
198	int tv = net_random() % delay;
199
200	if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
201		in_dev_hold(in_dev);
202}
203
204static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
205{
206	spin_lock_bh(&im->lock);
207	im->unsolicit_count = 0;
208	if (del_timer(&im->timer)) {
209		if ((long)(im->timer.expires-jiffies) < max_delay) {
210			add_timer(&im->timer);
211			im->tm_running = 1;
212			spin_unlock_bh(&im->lock);
213			return;
214		}
215		atomic_dec(&im->refcnt);
216	}
217	igmp_start_timer(im, max_delay);
218	spin_unlock_bh(&im->lock);
219}
220
221
222/*
223 *	Send an IGMP report.
224 */
225
226#define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
227
228
229static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
230	int gdeleted, int sdeleted)
231{
232	switch (type) {
233	case IGMPV3_MODE_IS_INCLUDE:
234	case IGMPV3_MODE_IS_EXCLUDE:
235		if (gdeleted || sdeleted)
236			return 0;
237		if (!(pmc->gsquery && !psf->sf_gsresp)) {
238			if (pmc->sfmode == MCAST_INCLUDE)
239				return 1;
240			/* don't include if this source is excluded
241			 * in all filters
242			 */
243			if (psf->sf_count[MCAST_INCLUDE])
244				return type == IGMPV3_MODE_IS_INCLUDE;
245			return pmc->sfcount[MCAST_EXCLUDE] ==
246				psf->sf_count[MCAST_EXCLUDE];
247		}
248		return 0;
249	case IGMPV3_CHANGE_TO_INCLUDE:
250		if (gdeleted || sdeleted)
251			return 0;
252		return psf->sf_count[MCAST_INCLUDE] != 0;
253	case IGMPV3_CHANGE_TO_EXCLUDE:
254		if (gdeleted || sdeleted)
255			return 0;
256		if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
257		    psf->sf_count[MCAST_INCLUDE])
258			return 0;
259		return pmc->sfcount[MCAST_EXCLUDE] ==
260			psf->sf_count[MCAST_EXCLUDE];
261	case IGMPV3_ALLOW_NEW_SOURCES:
262		if (gdeleted || !psf->sf_crcount)
263			return 0;
264		return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
265	case IGMPV3_BLOCK_OLD_SOURCES:
266		if (pmc->sfmode == MCAST_INCLUDE)
267			return gdeleted || (psf->sf_crcount && sdeleted);
268		return psf->sf_crcount && !gdeleted && !sdeleted;
269	}
270	return 0;
271}
272
273static int
274igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
275{
276	struct ip_sf_list *psf;
277	int scount = 0;
278
279	for (psf=pmc->sources; psf; psf=psf->sf_next) {
280		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
281			continue;
282		scount++;
283	}
284	return scount;
285}
286
287static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
288{
289	struct sk_buff *skb;
290	struct rtable *rt;
291	struct iphdr *pip;
292	struct igmpv3_report *pig;
293	struct net *net = dev_net(dev);
294
295	skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
296	if (skb == NULL)
297		return NULL;
298
299	{
300		struct flowi fl = { .oif = dev->ifindex,
301				    .nl_u = { .ip4_u = {
302				    .daddr = IGMPV3_ALL_MCR } },
303				    .proto = IPPROTO_IGMP };
304		if (ip_route_output_key(net, &rt, &fl)) {
305			kfree_skb(skb);
306			return NULL;
307		}
308	}
309	if (rt->rt_src == 0) {
310		kfree_skb(skb);
311		ip_rt_put(rt);
312		return NULL;
313	}
314
315	skb_dst_set(skb, &rt->dst);
316	skb->dev = dev;
317
318	skb_reserve(skb, LL_RESERVED_SPACE(dev));
319
320	skb_reset_network_header(skb);
321	pip = ip_hdr(skb);
322	skb_put(skb, sizeof(struct iphdr) + 4);
323
324	pip->version  = 4;
325	pip->ihl      = (sizeof(struct iphdr)+4)>>2;
326	pip->tos      = 0xc0;
327	pip->frag_off = htons(IP_DF);
328	pip->ttl      = 1;
329	pip->daddr    = rt->rt_dst;
330	pip->saddr    = rt->rt_src;
331	pip->protocol = IPPROTO_IGMP;
332	pip->tot_len  = 0;	/* filled in later */
333	ip_select_ident(pip, &rt->dst, NULL);
334	((u8*)&pip[1])[0] = IPOPT_RA;
335	((u8*)&pip[1])[1] = 4;
336	((u8*)&pip[1])[2] = 0;
337	((u8*)&pip[1])[3] = 0;
338
339	skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
340	skb_put(skb, sizeof(*pig));
341	pig = igmpv3_report_hdr(skb);
342	pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
343	pig->resv1 = 0;
344	pig->csum = 0;
345	pig->resv2 = 0;
346	pig->ngrec = 0;
347	return skb;
348}
349
350static int igmpv3_sendpack(struct sk_buff *skb)
351{
352	struct igmphdr *pig = igmp_hdr(skb);
353	const int igmplen = skb->tail - skb->transport_header;
354
355	pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
356
357	return ip_local_out(skb);
358}
359
360static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
361{
362	return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
363}
364
365static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
366	int type, struct igmpv3_grec **ppgr)
367{
368	struct net_device *dev = pmc->interface->dev;
369	struct igmpv3_report *pih;
370	struct igmpv3_grec *pgr;
371
372	if (!skb)
373		skb = igmpv3_newpack(dev, dev->mtu);
374	if (!skb)
375		return NULL;
376	pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
377	pgr->grec_type = type;
378	pgr->grec_auxwords = 0;
379	pgr->grec_nsrcs = 0;
380	pgr->grec_mca = pmc->multiaddr;
381	pih = igmpv3_report_hdr(skb);
382	pih->ngrec = htons(ntohs(pih->ngrec)+1);
383	*ppgr = pgr;
384	return skb;
385}
386
387#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
388	skb_tailroom(skb)) : 0)
389
390static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
391	int type, int gdeleted, int sdeleted)
392{
393	struct net_device *dev = pmc->interface->dev;
394	struct igmpv3_report *pih;
395	struct igmpv3_grec *pgr = NULL;
396	struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
397	int scount, stotal, first, isquery, truncate;
398
399	if (pmc->multiaddr == IGMP_ALL_HOSTS)
400		return skb;
401
402	isquery = type == IGMPV3_MODE_IS_INCLUDE ||
403		  type == IGMPV3_MODE_IS_EXCLUDE;
404	truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
405		    type == IGMPV3_CHANGE_TO_EXCLUDE;
406
407	stotal = scount = 0;
408
409	psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
410
411	if (!*psf_list)
412		goto empty_source;
413
414	pih = skb ? igmpv3_report_hdr(skb) : NULL;
415
416	/* EX and TO_EX get a fresh packet, if needed */
417	if (truncate) {
418		if (pih && pih->ngrec &&
419		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
420			if (skb)
421				igmpv3_sendpack(skb);
422			skb = igmpv3_newpack(dev, dev->mtu);
423		}
424	}
425	first = 1;
426	psf_prev = NULL;
427	for (psf=*psf_list; psf; psf=psf_next) {
428		__be32 *psrc;
429
430		psf_next = psf->sf_next;
431
432		if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
433			psf_prev = psf;
434			continue;
435		}
436
437		/* clear marks on query responses */
438		if (isquery)
439			psf->sf_gsresp = 0;
440
441		if (AVAILABLE(skb) < sizeof(__be32) +
442		    first*sizeof(struct igmpv3_grec)) {
443			if (truncate && !first)
444				break;	 /* truncate these */
445			if (pgr)
446				pgr->grec_nsrcs = htons(scount);
447			if (skb)
448				igmpv3_sendpack(skb);
449			skb = igmpv3_newpack(dev, dev->mtu);
450			first = 1;
451			scount = 0;
452		}
453		if (first) {
454			skb = add_grhead(skb, pmc, type, &pgr);
455			first = 0;
456		}
457		if (!skb)
458			return NULL;
459		psrc = (__be32 *)skb_put(skb, sizeof(__be32));
460		*psrc = psf->sf_inaddr;
461		scount++; stotal++;
462		if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
463		     type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
464			psf->sf_crcount--;
465			if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
466				if (psf_prev)
467					psf_prev->sf_next = psf->sf_next;
468				else
469					*psf_list = psf->sf_next;
470				kfree(psf);
471				continue;
472			}
473		}
474		psf_prev = psf;
475	}
476
477empty_source:
478	if (!stotal) {
479		if (type == IGMPV3_ALLOW_NEW_SOURCES ||
480		    type == IGMPV3_BLOCK_OLD_SOURCES)
481			return skb;
482		if (pmc->crcount || isquery) {
483			/* make sure we have room for group header */
484			if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
485				igmpv3_sendpack(skb);
486				skb = NULL; /* add_grhead will get a new one */
487			}
488			skb = add_grhead(skb, pmc, type, &pgr);
489		}
490	}
491	if (pgr)
492		pgr->grec_nsrcs = htons(scount);
493
494	if (isquery)
495		pmc->gsquery = 0;	/* clear query state on report */
496	return skb;
497}
498
499static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
500{
501	struct sk_buff *skb = NULL;
502	int type;
503
504#if 0
505	/* foxconn added start, zacker, 11/19/2010 */
506	/* not to let kernel send igmp packets */
507#ifdef IGMP_PROXY
508	return 0;
509#endif
510	/* foxconn added end, zacker, 11/19/2010 */
511#endif
512
513	if (!pmc) {
514		read_lock(&in_dev->mc_list_lock);
515		for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
516			if (pmc->multiaddr == IGMP_ALL_HOSTS)
517				continue;
518			spin_lock_bh(&pmc->lock);
519			if (pmc->sfcount[MCAST_EXCLUDE])
520				type = IGMPV3_MODE_IS_EXCLUDE;
521			else
522				type = IGMPV3_MODE_IS_INCLUDE;
523			skb = add_grec(skb, pmc, type, 0, 0);
524			spin_unlock_bh(&pmc->lock);
525		}
526		read_unlock(&in_dev->mc_list_lock);
527	} else {
528		spin_lock_bh(&pmc->lock);
529		if (pmc->sfcount[MCAST_EXCLUDE])
530			type = IGMPV3_MODE_IS_EXCLUDE;
531		else
532			type = IGMPV3_MODE_IS_INCLUDE;
533		skb = add_grec(skb, pmc, type, 0, 0);
534		spin_unlock_bh(&pmc->lock);
535	}
536	if (!skb)
537		return 0;
538	return igmpv3_sendpack(skb);
539}
540
541/*
542 * remove zero-count source records from a source filter list
543 */
544static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
545{
546	struct ip_sf_list *psf_prev, *psf_next, *psf;
547
548	psf_prev = NULL;
549	for (psf=*ppsf; psf; psf = psf_next) {
550		psf_next = psf->sf_next;
551		if (psf->sf_crcount == 0) {
552			if (psf_prev)
553				psf_prev->sf_next = psf->sf_next;
554			else
555				*ppsf = psf->sf_next;
556			kfree(psf);
557		} else
558			psf_prev = psf;
559	}
560}
561
562static void igmpv3_send_cr(struct in_device *in_dev)
563{
564	struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
565	struct sk_buff *skb = NULL;
566	int type, dtype;
567
568#if 0
569	/* foxconn added start, zacker, 11/19/2010 */
570	/* not to let kernel send igmp packets */
571#ifdef IGMP_PROXY
572	return;
573#endif
574	/* foxconn added end, zacker, 11/19/2010 */
575#endif
576
577	read_lock(&in_dev->mc_list_lock);
578	spin_lock_bh(&in_dev->mc_tomb_lock);
579
580	/* deleted MCA's */
581	pmc_prev = NULL;
582	for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
583		pmc_next = pmc->next;
584		if (pmc->sfmode == MCAST_INCLUDE) {
585			type = IGMPV3_BLOCK_OLD_SOURCES;
586			dtype = IGMPV3_BLOCK_OLD_SOURCES;
587			skb = add_grec(skb, pmc, type, 1, 0);
588			skb = add_grec(skb, pmc, dtype, 1, 1);
589		}
590		if (pmc->crcount) {
591			if (pmc->sfmode == MCAST_EXCLUDE) {
592				type = IGMPV3_CHANGE_TO_INCLUDE;
593				skb = add_grec(skb, pmc, type, 1, 0);
594			}
595			pmc->crcount--;
596			if (pmc->crcount == 0) {
597				igmpv3_clear_zeros(&pmc->tomb);
598				igmpv3_clear_zeros(&pmc->sources);
599			}
600		}
601		if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
602			if (pmc_prev)
603				pmc_prev->next = pmc_next;
604			else
605				in_dev->mc_tomb = pmc_next;
606			in_dev_put(pmc->interface);
607			kfree(pmc);
608		} else
609			pmc_prev = pmc;
610	}
611	spin_unlock_bh(&in_dev->mc_tomb_lock);
612
613	/* change recs */
614	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
615		spin_lock_bh(&pmc->lock);
616		if (pmc->sfcount[MCAST_EXCLUDE]) {
617			type = IGMPV3_BLOCK_OLD_SOURCES;
618			dtype = IGMPV3_ALLOW_NEW_SOURCES;
619		} else {
620			type = IGMPV3_ALLOW_NEW_SOURCES;
621			dtype = IGMPV3_BLOCK_OLD_SOURCES;
622		}
623		skb = add_grec(skb, pmc, type, 0, 0);
624		skb = add_grec(skb, pmc, dtype, 0, 1);	/* deleted sources */
625
626		/* filter mode changes */
627		if (pmc->crcount) {
628			if (pmc->sfmode == MCAST_EXCLUDE)
629				type = IGMPV3_CHANGE_TO_EXCLUDE;
630			else
631				type = IGMPV3_CHANGE_TO_INCLUDE;
632			skb = add_grec(skb, pmc, type, 0, 0);
633			pmc->crcount--;
634		}
635		spin_unlock_bh(&pmc->lock);
636	}
637	read_unlock(&in_dev->mc_list_lock);
638
639	if (!skb)
640		return;
641	(void) igmpv3_sendpack(skb);
642}
643
644static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
645	int type)
646{
647	struct sk_buff *skb;
648	struct iphdr *iph;
649	struct igmphdr *ih;
650	struct rtable *rt;
651	struct net_device *dev = in_dev->dev;
652	struct net *net = dev_net(dev);
653	__be32	group = pmc ? pmc->multiaddr : 0;
654	__be32	dst;
655
656#if 0
657	/* foxconn added start, zacker, 11/19/2010 */
658	/* not to let kernel send igmp packets */
659#ifdef IGMP_PROXY
660	return 0;
661#endif
662	/* foxconn added end, zacker, 11/19/2010 */
663#endif
664
665	if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
666		return igmpv3_send_report(in_dev, pmc);
667	else if (type == IGMP_HOST_LEAVE_MESSAGE)
668		dst = IGMP_ALL_ROUTER;
669	else
670		dst = group;
671
672	{
673		struct flowi fl = { .oif = dev->ifindex,
674				    .nl_u = { .ip4_u = { .daddr = dst } },
675				    .proto = IPPROTO_IGMP };
676		if (ip_route_output_key(net, &rt, &fl))
677			return -1;
678	}
679	if (rt->rt_src == 0) {
680		ip_rt_put(rt);
681		return -1;
682	}
683
684	skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
685	if (skb == NULL) {
686		ip_rt_put(rt);
687		return -1;
688	}
689
690	skb_dst_set(skb, &rt->dst);
691
692	skb_reserve(skb, LL_RESERVED_SPACE(dev));
693
694	skb_reset_network_header(skb);
695	iph = ip_hdr(skb);
696	skb_put(skb, sizeof(struct iphdr) + 4);
697
698	iph->version  = 4;
699	iph->ihl      = (sizeof(struct iphdr)+4)>>2;
700	iph->tos      = 0xc0;
701	iph->frag_off = htons(IP_DF);
702	iph->ttl      = 1;
703	iph->daddr    = dst;
704	iph->saddr    = rt->rt_src;
705	iph->protocol = IPPROTO_IGMP;
706	ip_select_ident(iph, &rt->dst, NULL);
707	((u8*)&iph[1])[0] = IPOPT_RA;
708	((u8*)&iph[1])[1] = 4;
709	((u8*)&iph[1])[2] = 0;
710	((u8*)&iph[1])[3] = 0;
711
712	ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
713	ih->type = type;
714	ih->code = 0;
715	ih->csum = 0;
716	ih->group = group;
717	ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
718
719	return ip_local_out(skb);
720}
721
722static void igmp_gq_timer_expire(unsigned long data)
723{
724	struct in_device *in_dev = (struct in_device *)data;
725
726	in_dev->mr_gq_running = 0;
727	igmpv3_send_report(in_dev, NULL);
728	__in_dev_put(in_dev);
729}
730
731static void igmp_ifc_timer_expire(unsigned long data)
732{
733	struct in_device *in_dev = (struct in_device *)data;
734
735	igmpv3_send_cr(in_dev);
736	if (in_dev->mr_ifc_count) {
737		in_dev->mr_ifc_count--;
738		igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
739	}
740	__in_dev_put(in_dev);
741}
742
743static void igmp_ifc_event(struct in_device *in_dev)
744{
745	if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
746		return;
747	in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
748		IGMP_Unsolicited_Report_Count;
749	igmp_ifc_start_timer(in_dev, 1);
750}
751
752
753static void igmp_timer_expire(unsigned long data)
754{
755	struct ip_mc_list *im=(struct ip_mc_list *)data;
756	struct in_device *in_dev = im->interface;
757
758	spin_lock(&im->lock);
759	im->tm_running = 0;
760
761	if (im->unsolicit_count) {
762		im->unsolicit_count--;
763		igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
764	}
765	im->reporter = 1;
766	spin_unlock(&im->lock);
767
768	if (IGMP_V1_SEEN(in_dev))
769		igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
770	else if (IGMP_V2_SEEN(in_dev))
771		igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
772	else
773		igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
774
775	ip_ma_put(im);
776}
777
778/* mark EXCLUDE-mode sources */
779static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
780{
781	struct ip_sf_list *psf;
782	int i, scount;
783
784	scount = 0;
785	for (psf=pmc->sources; psf; psf=psf->sf_next) {
786		if (scount == nsrcs)
787			break;
788		for (i=0; i<nsrcs; i++) {
789			/* skip inactive filters */
790			if (pmc->sfcount[MCAST_INCLUDE] ||
791			    pmc->sfcount[MCAST_EXCLUDE] !=
792			    psf->sf_count[MCAST_EXCLUDE])
793				continue;
794			if (srcs[i] == psf->sf_inaddr) {
795				scount++;
796				break;
797			}
798		}
799	}
800	pmc->gsquery = 0;
801	if (scount == nsrcs)	/* all sources excluded */
802		return 0;
803	return 1;
804}
805
806static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
807{
808	struct ip_sf_list *psf;
809	int i, scount;
810
811	if (pmc->sfmode == MCAST_EXCLUDE)
812		return igmp_xmarksources(pmc, nsrcs, srcs);
813
814	/* mark INCLUDE-mode sources */
815	scount = 0;
816	for (psf=pmc->sources; psf; psf=psf->sf_next) {
817		if (scount == nsrcs)
818			break;
819		for (i=0; i<nsrcs; i++)
820			if (srcs[i] == psf->sf_inaddr) {
821				psf->sf_gsresp = 1;
822				scount++;
823				break;
824			}
825	}
826	if (!scount) {
827		pmc->gsquery = 0;
828		return 0;
829	}
830	pmc->gsquery = 1;
831	return 1;
832}
833
834static void igmp_heard_report(struct in_device *in_dev, __be32 group)
835{
836	struct ip_mc_list *im;
837
838	/* Timers are only set for non-local groups */
839
840	if (group == IGMP_ALL_HOSTS)
841		return;
842
843	read_lock(&in_dev->mc_list_lock);
844	for (im=in_dev->mc_list; im!=NULL; im=im->next) {
845		if (im->multiaddr == group) {
846			igmp_stop_timer(im);
847			break;
848		}
849	}
850	read_unlock(&in_dev->mc_list_lock);
851}
852
853static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
854	int len)
855{
856	struct igmphdr 		*ih = igmp_hdr(skb);
857	struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
858	struct ip_mc_list	*im;
859	__be32			group = ih->group;
860	int			max_delay;
861	int			mark = 0;
862
863
864	if (len == 8) {
865		if (ih->code == 0) {
866			/* Alas, old v1 router presents here. */
867
868			max_delay = IGMP_Query_Response_Interval;
869			in_dev->mr_v1_seen = jiffies +
870				IGMP_V1_Router_Present_Timeout;
871			group = 0;
872		} else {
873			/* v2 router present */
874			max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
875			in_dev->mr_v2_seen = jiffies +
876				IGMP_V2_Router_Present_Timeout;
877		}
878		/* cancel the interface change timer */
879		in_dev->mr_ifc_count = 0;
880		if (del_timer(&in_dev->mr_ifc_timer))
881			__in_dev_put(in_dev);
882		/* clear deleted report items */
883		igmpv3_clear_delrec(in_dev);
884	} else if (len < 12) {
885		return;	/* ignore bogus packet; freed by caller */
886	} else if (IGMP_V1_SEEN(in_dev)) {
887		/* This is a v3 query with v1 queriers present */
888		max_delay = IGMP_Query_Response_Interval;
889		group = 0;
890	} else if (IGMP_V2_SEEN(in_dev)) {
891		/* this is a v3 query with v2 queriers present;
892		 * Interpretation of the max_delay code is problematic here.
893		 * A real v2 host would use ih_code directly, while v3 has a
894		 * different encoding. We use the v3 encoding as more likely
895		 * to be intended in a v3 query.
896		 */
897		max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
898	} else { /* v3 */
899		if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
900			return;
901
902		ih3 = igmpv3_query_hdr(skb);
903		if (ih3->nsrcs) {
904			if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
905					   + ntohs(ih3->nsrcs)*sizeof(__be32)))
906				return;
907			ih3 = igmpv3_query_hdr(skb);
908		}
909
910		max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
911		if (!max_delay)
912			max_delay = 1;	/* can't mod w/ 0 */
913		in_dev->mr_maxdelay = max_delay;
914		if (ih3->qrv)
915			in_dev->mr_qrv = ih3->qrv;
916		if (!group) { /* general query */
917			if (ih3->nsrcs)
918				return;	/* no sources allowed */
919			igmp_gq_start_timer(in_dev);
920			return;
921		}
922		/* mark sources to include, if group & source-specific */
923		mark = ih3->nsrcs != 0;
924	}
925
926	/*
927	 * - Start the timers in all of our membership records
928	 *   that the query applies to for the interface on
929	 *   which the query arrived excl. those that belong
930	 *   to a "local" group (224.0.0.X)
931	 * - For timers already running check if they need to
932	 *   be reset.
933	 * - Use the igmp->igmp_code field as the maximum
934	 *   delay possible
935	 */
936	read_lock(&in_dev->mc_list_lock);
937	for (im=in_dev->mc_list; im!=NULL; im=im->next) {
938		int changed;
939
940		if (group && group != im->multiaddr)
941			continue;
942		if (im->multiaddr == IGMP_ALL_HOSTS)
943			continue;
944		spin_lock_bh(&im->lock);
945		if (im->tm_running)
946			im->gsquery = im->gsquery && mark;
947		else
948			im->gsquery = mark;
949		changed = !im->gsquery ||
950			igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
951		spin_unlock_bh(&im->lock);
952		if (changed)
953			igmp_mod_timer(im, max_delay);
954	}
955	read_unlock(&in_dev->mc_list_lock);
956}
957
958/* called in rcu_read_lock() section */
959int igmp_rcv(struct sk_buff *skb)
960{
961	/* This basically follows the spec line by line -- see RFC1112 */
962	struct igmphdr *ih;
963	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
964	int len = skb->len;
965
966	if (in_dev == NULL)
967		goto drop;
968
969	if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
970		goto drop;
971
972	switch (skb->ip_summed) {
973	case CHECKSUM_COMPLETE:
974		if (!csum_fold(skb->csum))
975			break;
976		/* fall through */
977	case CHECKSUM_NONE:
978		skb->csum = 0;
979		if (__skb_checksum_complete(skb))
980			goto drop;
981	}
982
983	ih = igmp_hdr(skb);
984	switch (ih->type) {
985	case IGMP_HOST_MEMBERSHIP_QUERY:
986		igmp_heard_query(in_dev, skb, len);
987		break;
988	case IGMP_HOST_MEMBERSHIP_REPORT:
989	case IGMPV2_HOST_MEMBERSHIP_REPORT:
990		/* Is it our report looped back? */
991		if (skb_rtable(skb)->fl.iif == 0)
992			break;
993		/* don't rely on MC router hearing unicast reports */
994		if (skb->pkt_type == PACKET_MULTICAST ||
995		    skb->pkt_type == PACKET_BROADCAST)
996			igmp_heard_report(in_dev, ih->group);
997		break;
998	case IGMP_PIM:
999#ifdef CONFIG_IP_PIMSM_V1
1000		return pim_rcv_v1(skb);
1001#endif
1002	case IGMPV3_HOST_MEMBERSHIP_REPORT:
1003	case IGMP_DVMRP:
1004	case IGMP_TRACE:
1005	case IGMP_HOST_LEAVE_MESSAGE:
1006	case IGMP_MTRACE:
1007	case IGMP_MTRACE_RESP:
1008		break;
1009	default:
1010		break;
1011	}
1012
1013drop:
1014	kfree_skb(skb);
1015	return 0;
1016}
1017
1018#endif
1019
1020
1021/*
1022 *	Add a filter to a device
1023 */
1024
1025static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1026{
1027	char buf[MAX_ADDR_LEN];
1028	struct net_device *dev = in_dev->dev;
1029
1030	/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1031	   We will get multicast token leakage, when IFF_MULTICAST
1032	   is changed. This check should be done in dev->set_multicast_list
1033	   routine. Something sort of:
1034	   if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1035	   --ANK
1036	   */
1037	if (arp_mc_map(addr, buf, dev, 0) == 0)
1038		dev_mc_add(dev, buf);
1039}
1040
1041/*
1042 *	Remove a filter from a device
1043 */
1044
1045static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1046{
1047	char buf[MAX_ADDR_LEN];
1048	struct net_device *dev = in_dev->dev;
1049
1050	if (arp_mc_map(addr, buf, dev, 0) == 0)
1051		dev_mc_del(dev, buf);
1052}
1053
1054#ifdef CONFIG_IP_MULTICAST
1055/*
1056 * deleted ip_mc_list manipulation
1057 */
1058static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1059{
1060	struct ip_mc_list *pmc;
1061
1062	/* this is an "ip_mc_list" for convenience; only the fields below
1063	 * are actually used. In particular, the refcnt and users are not
1064	 * used for management of the delete list. Using the same structure
1065	 * for deleted items allows change reports to use common code with
1066	 * non-deleted or query-response MCA's.
1067	 */
1068	pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1069	if (!pmc)
1070		return;
1071	spin_lock_bh(&im->lock);
1072	pmc->interface = im->interface;
1073	in_dev_hold(in_dev);
1074	pmc->multiaddr = im->multiaddr;
1075	pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1076		IGMP_Unsolicited_Report_Count;
1077	pmc->sfmode = im->sfmode;
1078	if (pmc->sfmode == MCAST_INCLUDE) {
1079		struct ip_sf_list *psf;
1080
1081		pmc->tomb = im->tomb;
1082		pmc->sources = im->sources;
1083		im->tomb = im->sources = NULL;
1084		for (psf=pmc->sources; psf; psf=psf->sf_next)
1085			psf->sf_crcount = pmc->crcount;
1086	}
1087	spin_unlock_bh(&im->lock);
1088
1089	spin_lock_bh(&in_dev->mc_tomb_lock);
1090	pmc->next = in_dev->mc_tomb;
1091	in_dev->mc_tomb = pmc;
1092	spin_unlock_bh(&in_dev->mc_tomb_lock);
1093}
1094
1095static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
1096{
1097	struct ip_mc_list *pmc, *pmc_prev;
1098	struct ip_sf_list *psf, *psf_next;
1099
1100	spin_lock_bh(&in_dev->mc_tomb_lock);
1101	pmc_prev = NULL;
1102	for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
1103		if (pmc->multiaddr == multiaddr)
1104			break;
1105		pmc_prev = pmc;
1106	}
1107	if (pmc) {
1108		if (pmc_prev)
1109			pmc_prev->next = pmc->next;
1110		else
1111			in_dev->mc_tomb = pmc->next;
1112	}
1113	spin_unlock_bh(&in_dev->mc_tomb_lock);
1114	if (pmc) {
1115		for (psf=pmc->tomb; psf; psf=psf_next) {
1116			psf_next = psf->sf_next;
1117			kfree(psf);
1118		}
1119		in_dev_put(pmc->interface);
1120		kfree(pmc);
1121	}
1122}
1123
1124static void igmpv3_clear_delrec(struct in_device *in_dev)
1125{
1126	struct ip_mc_list *pmc, *nextpmc;
1127
1128	spin_lock_bh(&in_dev->mc_tomb_lock);
1129	pmc = in_dev->mc_tomb;
1130	in_dev->mc_tomb = NULL;
1131	spin_unlock_bh(&in_dev->mc_tomb_lock);
1132
1133	for (; pmc; pmc = nextpmc) {
1134		nextpmc = pmc->next;
1135		ip_mc_clear_src(pmc);
1136		in_dev_put(pmc->interface);
1137		kfree(pmc);
1138	}
1139	/* clear dead sources, too */
1140	read_lock(&in_dev->mc_list_lock);
1141	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
1142		struct ip_sf_list *psf, *psf_next;
1143
1144		spin_lock_bh(&pmc->lock);
1145		psf = pmc->tomb;
1146		pmc->tomb = NULL;
1147		spin_unlock_bh(&pmc->lock);
1148		for (; psf; psf=psf_next) {
1149			psf_next = psf->sf_next;
1150			kfree(psf);
1151		}
1152	}
1153	read_unlock(&in_dev->mc_list_lock);
1154}
1155#endif
1156
1157static void igmp_group_dropped(struct ip_mc_list *im)
1158{
1159	struct in_device *in_dev = im->interface;
1160#ifdef CONFIG_IP_MULTICAST
1161	int reporter;
1162#endif
1163
1164	if (im->loaded) {
1165		im->loaded = 0;
1166		ip_mc_filter_del(in_dev, im->multiaddr);
1167	}
1168
1169#ifdef CONFIG_IP_MULTICAST
1170	if (im->multiaddr == IGMP_ALL_HOSTS)
1171		return;
1172
1173	reporter = im->reporter;
1174	igmp_stop_timer(im);
1175
1176	if (!in_dev->dead) {
1177		if (IGMP_V1_SEEN(in_dev))
1178			goto done;
1179		if (IGMP_V2_SEEN(in_dev)) {
1180			if (reporter)
1181				igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1182			goto done;
1183		}
1184		/* IGMPv3 */
1185		igmpv3_add_delrec(in_dev, im);
1186
1187		igmp_ifc_event(in_dev);
1188	}
1189done:
1190#endif
1191	ip_mc_clear_src(im);
1192}
1193
1194static void igmp_group_added(struct ip_mc_list *im)
1195{
1196	struct in_device *in_dev = im->interface;
1197
1198	if (im->loaded == 0) {
1199		im->loaded = 1;
1200		ip_mc_filter_add(in_dev, im->multiaddr);
1201	}
1202
1203#ifdef CONFIG_IP_MULTICAST
1204	if (im->multiaddr == IGMP_ALL_HOSTS)
1205		return;
1206
1207	if (in_dev->dead)
1208		return;
1209	if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1210		spin_lock_bh(&im->lock);
1211		igmp_start_timer(im, IGMP_Initial_Report_Delay);
1212		spin_unlock_bh(&im->lock);
1213		return;
1214	}
1215	/* else, v3 */
1216
1217	im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1218		IGMP_Unsolicited_Report_Count;
1219	igmp_ifc_event(in_dev);
1220#endif
1221}
1222
1223
1224/*
1225 *	Multicast list managers
1226 */
1227
1228
1229/*
1230 *	A socket has joined a multicast group on device dev.
1231 */
1232
1233void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1234{
1235	struct ip_mc_list *im;
1236
1237	ASSERT_RTNL();
1238
1239	for (im=in_dev->mc_list; im; im=im->next) {
1240		if (im->multiaddr == addr) {
1241			im->users++;
1242			ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
1243			goto out;
1244		}
1245	}
1246
1247	im = kmalloc(sizeof(*im), GFP_KERNEL);
1248	if (!im)
1249		goto out;
1250
1251	im->users = 1;
1252	im->interface = in_dev;
1253	in_dev_hold(in_dev);
1254	im->multiaddr = addr;
1255	/* initial mode is (EX, empty) */
1256	im->sfmode = MCAST_EXCLUDE;
1257	im->sfcount[MCAST_INCLUDE] = 0;
1258	im->sfcount[MCAST_EXCLUDE] = 1;
1259	im->sources = NULL;
1260	im->tomb = NULL;
1261	im->crcount = 0;
1262	atomic_set(&im->refcnt, 1);
1263	spin_lock_init(&im->lock);
1264#ifdef CONFIG_IP_MULTICAST
1265	im->tm_running = 0;
1266	setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
1267	im->unsolicit_count = IGMP_Unsolicited_Report_Count;
1268	im->reporter = 0;
1269	im->gsquery = 0;
1270#endif
1271	im->loaded = 0;
1272	write_lock_bh(&in_dev->mc_list_lock);
1273	im->next = in_dev->mc_list;
1274	in_dev->mc_list = im;
1275	in_dev->mc_count++;
1276	write_unlock_bh(&in_dev->mc_list_lock);
1277#ifdef CONFIG_IP_MULTICAST
1278	igmpv3_del_delrec(in_dev, im->multiaddr);
1279#endif
1280	igmp_group_added(im);
1281	if (!in_dev->dead)
1282		ip_rt_multicast_event(in_dev);
1283out:
1284	return;
1285}
1286EXPORT_SYMBOL(ip_mc_inc_group);
1287
1288/*
1289 *	Resend IGMP JOIN report; used for bonding.
1290 */
1291void ip_mc_rejoin_group(struct ip_mc_list *im)
1292{
1293#ifdef CONFIG_IP_MULTICAST
1294	struct in_device *in_dev = im->interface;
1295
1296	if (im->multiaddr == IGMP_ALL_HOSTS)
1297		return;
1298
1299	if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1300		igmp_mod_timer(im, IGMP_Initial_Report_Delay);
1301		return;
1302	}
1303	/* else, v3 */
1304	im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1305		IGMP_Unsolicited_Report_Count;
1306	igmp_ifc_event(in_dev);
1307#endif
1308}
1309EXPORT_SYMBOL(ip_mc_rejoin_group);
1310
1311/*
1312 *	A socket has left a multicast group on device dev
1313 */
1314
1315void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1316{
1317	struct ip_mc_list *i, **ip;
1318
1319	ASSERT_RTNL();
1320
1321	for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1322		if (i->multiaddr == addr) {
1323			if (--i->users == 0) {
1324				write_lock_bh(&in_dev->mc_list_lock);
1325				*ip = i->next;
1326				in_dev->mc_count--;
1327				write_unlock_bh(&in_dev->mc_list_lock);
1328				igmp_group_dropped(i);
1329
1330				if (!in_dev->dead)
1331					ip_rt_multicast_event(in_dev);
1332
1333				ip_ma_put(i);
1334				return;
1335			}
1336			break;
1337		}
1338	}
1339}
1340EXPORT_SYMBOL(ip_mc_dec_group);
1341
1342/* Device changing type */
1343
1344void ip_mc_unmap(struct in_device *in_dev)
1345{
1346	struct ip_mc_list *i;
1347
1348	ASSERT_RTNL();
1349
1350	for (i = in_dev->mc_list; i; i = i->next)
1351		igmp_group_dropped(i);
1352}
1353
1354void ip_mc_remap(struct in_device *in_dev)
1355{
1356	struct ip_mc_list *i;
1357
1358	ASSERT_RTNL();
1359
1360	for (i = in_dev->mc_list; i; i = i->next)
1361		igmp_group_added(i);
1362}
1363
1364/* Device going down */
1365
1366void ip_mc_down(struct in_device *in_dev)
1367{
1368	struct ip_mc_list *i;
1369
1370	ASSERT_RTNL();
1371
1372	for (i=in_dev->mc_list; i; i=i->next)
1373		igmp_group_dropped(i);
1374
1375#ifdef CONFIG_IP_MULTICAST
1376	in_dev->mr_ifc_count = 0;
1377	if (del_timer(&in_dev->mr_ifc_timer))
1378		__in_dev_put(in_dev);
1379	in_dev->mr_gq_running = 0;
1380	if (del_timer(&in_dev->mr_gq_timer))
1381		__in_dev_put(in_dev);
1382	igmpv3_clear_delrec(in_dev);
1383#endif
1384
1385	ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
1386}
1387
1388void ip_mc_init_dev(struct in_device *in_dev)
1389{
1390	ASSERT_RTNL();
1391
1392	in_dev->mc_tomb = NULL;
1393#ifdef CONFIG_IP_MULTICAST
1394	in_dev->mr_gq_running = 0;
1395	setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
1396			(unsigned long)in_dev);
1397	in_dev->mr_ifc_count = 0;
1398	in_dev->mc_count     = 0;
1399	setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
1400			(unsigned long)in_dev);
1401	in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
1402#endif
1403
1404	rwlock_init(&in_dev->mc_list_lock);
1405	spin_lock_init(&in_dev->mc_tomb_lock);
1406}
1407
1408/* Device going up */
1409
1410void ip_mc_up(struct in_device *in_dev)
1411{
1412	struct ip_mc_list *i;
1413
1414	ASSERT_RTNL();
1415
1416	ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1417
1418	for (i=in_dev->mc_list; i; i=i->next)
1419		igmp_group_added(i);
1420}
1421
1422/*
1423 *	Device is about to be destroyed: clean up.
1424 */
1425
1426void ip_mc_destroy_dev(struct in_device *in_dev)
1427{
1428	struct ip_mc_list *i;
1429
1430	ASSERT_RTNL();
1431
1432	/* Deactivate timers */
1433	ip_mc_down(in_dev);
1434
1435	write_lock_bh(&in_dev->mc_list_lock);
1436	while ((i = in_dev->mc_list) != NULL) {
1437		in_dev->mc_list = i->next;
1438		in_dev->mc_count--;
1439		write_unlock_bh(&in_dev->mc_list_lock);
1440		igmp_group_dropped(i);
1441		ip_ma_put(i);
1442
1443		write_lock_bh(&in_dev->mc_list_lock);
1444	}
1445	write_unlock_bh(&in_dev->mc_list_lock);
1446}
1447
1448static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1449{
1450	struct flowi fl = { .nl_u = { .ip4_u =
1451				      { .daddr = imr->imr_multiaddr.s_addr } } };
1452	struct rtable *rt;
1453	struct net_device *dev = NULL;
1454	struct in_device *idev = NULL;
1455
1456	if (imr->imr_ifindex) {
1457		idev = inetdev_by_index(net, imr->imr_ifindex);
1458		if (idev)
1459			__in_dev_put(idev);
1460		return idev;
1461	}
1462	if (imr->imr_address.s_addr) {
1463		dev = ip_dev_find(net, imr->imr_address.s_addr);
1464		if (!dev)
1465			return NULL;
1466		dev_put(dev);
1467	}
1468
1469	if (!dev && !ip_route_output_key(net, &rt, &fl)) {
1470		dev = rt->dst.dev;
1471		ip_rt_put(rt);
1472	}
1473	if (dev) {
1474		imr->imr_ifindex = dev->ifindex;
1475		idev = __in_dev_get_rtnl(dev);
1476	}
1477	return idev;
1478}
1479
1480/*
1481 *	Join a socket to a group
1482 */
1483int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
1484int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
1485
1486
1487static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1488	__be32 *psfsrc)
1489{
1490	struct ip_sf_list *psf, *psf_prev;
1491	int rv = 0;
1492
1493	psf_prev = NULL;
1494	for (psf=pmc->sources; psf; psf=psf->sf_next) {
1495		if (psf->sf_inaddr == *psfsrc)
1496			break;
1497		psf_prev = psf;
1498	}
1499	if (!psf || psf->sf_count[sfmode] == 0) {
1500		/* source filter not found, or count wrong =>  bug */
1501		return -ESRCH;
1502	}
1503	psf->sf_count[sfmode]--;
1504	if (psf->sf_count[sfmode] == 0) {
1505		ip_rt_multicast_event(pmc->interface);
1506	}
1507	if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1508#ifdef CONFIG_IP_MULTICAST
1509		struct in_device *in_dev = pmc->interface;
1510#endif
1511
1512		/* no more filters for this source */
1513		if (psf_prev)
1514			psf_prev->sf_next = psf->sf_next;
1515		else
1516			pmc->sources = psf->sf_next;
1517#ifdef CONFIG_IP_MULTICAST
1518		if (psf->sf_oldin &&
1519		    !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
1520			psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1521				IGMP_Unsolicited_Report_Count;
1522			psf->sf_next = pmc->tomb;
1523			pmc->tomb = psf;
1524			rv = 1;
1525		} else
1526#endif
1527			kfree(psf);
1528	}
1529	return rv;
1530}
1531
1532#ifndef CONFIG_IP_MULTICAST
1533#define igmp_ifc_event(x)	do { } while (0)
1534#endif
1535
1536static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1537			 int sfcount, __be32 *psfsrc, int delta)
1538{
1539	struct ip_mc_list *pmc;
1540	int	changerec = 0;
1541	int	i, err;
1542
1543	if (!in_dev)
1544		return -ENODEV;
1545	read_lock(&in_dev->mc_list_lock);
1546	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
1547		if (*pmca == pmc->multiaddr)
1548			break;
1549	}
1550	if (!pmc) {
1551		/* MCA not found?? bug */
1552		read_unlock(&in_dev->mc_list_lock);
1553		return -ESRCH;
1554	}
1555	spin_lock_bh(&pmc->lock);
1556	read_unlock(&in_dev->mc_list_lock);
1557#ifdef CONFIG_IP_MULTICAST
1558	sf_markstate(pmc);
1559#endif
1560	if (!delta) {
1561		err = -EINVAL;
1562		if (!pmc->sfcount[sfmode])
1563			goto out_unlock;
1564		pmc->sfcount[sfmode]--;
1565	}
1566	err = 0;
1567	for (i=0; i<sfcount; i++) {
1568		int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1569
1570		changerec |= rv > 0;
1571		if (!err && rv < 0)
1572			err = rv;
1573	}
1574	if (pmc->sfmode == MCAST_EXCLUDE &&
1575	    pmc->sfcount[MCAST_EXCLUDE] == 0 &&
1576	    pmc->sfcount[MCAST_INCLUDE]) {
1577#ifdef CONFIG_IP_MULTICAST
1578		struct ip_sf_list *psf;
1579#endif
1580
1581		/* filter mode change */
1582		pmc->sfmode = MCAST_INCLUDE;
1583#ifdef CONFIG_IP_MULTICAST
1584		pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1585			IGMP_Unsolicited_Report_Count;
1586		in_dev->mr_ifc_count = pmc->crcount;
1587		for (psf=pmc->sources; psf; psf = psf->sf_next)
1588			psf->sf_crcount = 0;
1589		igmp_ifc_event(pmc->interface);
1590	} else if (sf_setstate(pmc) || changerec) {
1591		igmp_ifc_event(pmc->interface);
1592#endif
1593	}
1594out_unlock:
1595	spin_unlock_bh(&pmc->lock);
1596	return err;
1597}
1598
1599/*
1600 * Add multicast single-source filter to the interface list
1601 */
1602static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1603	__be32 *psfsrc, int delta)
1604{
1605	struct ip_sf_list *psf, *psf_prev;
1606
1607	psf_prev = NULL;
1608	for (psf=pmc->sources; psf; psf=psf->sf_next) {
1609		if (psf->sf_inaddr == *psfsrc)
1610			break;
1611		psf_prev = psf;
1612	}
1613	if (!psf) {
1614		psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1615		if (!psf)
1616			return -ENOBUFS;
1617		psf->sf_inaddr = *psfsrc;
1618		if (psf_prev) {
1619			psf_prev->sf_next = psf;
1620		} else
1621			pmc->sources = psf;
1622	}
1623	psf->sf_count[sfmode]++;
1624	if (psf->sf_count[sfmode] == 1) {
1625		ip_rt_multicast_event(pmc->interface);
1626	}
1627	return 0;
1628}
1629
1630#ifdef CONFIG_IP_MULTICAST
1631static void sf_markstate(struct ip_mc_list *pmc)
1632{
1633	struct ip_sf_list *psf;
1634	int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1635
1636	for (psf=pmc->sources; psf; psf=psf->sf_next)
1637		if (pmc->sfcount[MCAST_EXCLUDE]) {
1638			psf->sf_oldin = mca_xcount ==
1639				psf->sf_count[MCAST_EXCLUDE] &&
1640				!psf->sf_count[MCAST_INCLUDE];
1641		} else
1642			psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
1643}
1644
1645static int sf_setstate(struct ip_mc_list *pmc)
1646{
1647	struct ip_sf_list *psf, *dpsf;
1648	int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1649	int qrv = pmc->interface->mr_qrv;
1650	int new_in, rv;
1651
1652	rv = 0;
1653	for (psf=pmc->sources; psf; psf=psf->sf_next) {
1654		if (pmc->sfcount[MCAST_EXCLUDE]) {
1655			new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
1656				!psf->sf_count[MCAST_INCLUDE];
1657		} else
1658			new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1659		if (new_in) {
1660			if (!psf->sf_oldin) {
1661				struct ip_sf_list *prev = NULL;
1662
1663				for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
1664					if (dpsf->sf_inaddr == psf->sf_inaddr)
1665						break;
1666					prev = dpsf;
1667				}
1668				if (dpsf) {
1669					if (prev)
1670						prev->sf_next = dpsf->sf_next;
1671					else
1672						pmc->tomb = dpsf->sf_next;
1673					kfree(dpsf);
1674				}
1675				psf->sf_crcount = qrv;
1676				rv++;
1677			}
1678		} else if (psf->sf_oldin) {
1679
1680			psf->sf_crcount = 0;
1681			/*
1682			 * add or update "delete" records if an active filter
1683			 * is now inactive
1684			 */
1685			for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
1686				if (dpsf->sf_inaddr == psf->sf_inaddr)
1687					break;
1688			if (!dpsf) {
1689				dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1690				if (!dpsf)
1691					continue;
1692				*dpsf = *psf;
1693				/* pmc->lock held by callers */
1694				dpsf->sf_next = pmc->tomb;
1695				pmc->tomb = dpsf;
1696			}
1697			dpsf->sf_crcount = qrv;
1698			rv++;
1699		}
1700	}
1701	return rv;
1702}
1703#endif
1704
1705/*
1706 * Add multicast source filter list to the interface list
1707 */
1708static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1709			 int sfcount, __be32 *psfsrc, int delta)
1710{
1711	struct ip_mc_list *pmc;
1712	int	isexclude;
1713	int	i, err;
1714
1715	if (!in_dev)
1716		return -ENODEV;
1717	read_lock(&in_dev->mc_list_lock);
1718	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
1719		if (*pmca == pmc->multiaddr)
1720			break;
1721	}
1722	if (!pmc) {
1723		/* MCA not found?? bug */
1724		read_unlock(&in_dev->mc_list_lock);
1725		return -ESRCH;
1726	}
1727	spin_lock_bh(&pmc->lock);
1728	read_unlock(&in_dev->mc_list_lock);
1729
1730#ifdef CONFIG_IP_MULTICAST
1731	sf_markstate(pmc);
1732#endif
1733	isexclude = pmc->sfmode == MCAST_EXCLUDE;
1734	if (!delta)
1735		pmc->sfcount[sfmode]++;
1736	err = 0;
1737	for (i=0; i<sfcount; i++) {
1738		err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
1739		if (err)
1740			break;
1741	}
1742	if (err) {
1743		int j;
1744
1745		pmc->sfcount[sfmode]--;
1746		for (j=0; j<i; j++)
1747			(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1748	} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
1749#ifdef CONFIG_IP_MULTICAST
1750		struct ip_sf_list *psf;
1751		in_dev = pmc->interface;
1752#endif
1753
1754		/* filter mode change */
1755		if (pmc->sfcount[MCAST_EXCLUDE])
1756			pmc->sfmode = MCAST_EXCLUDE;
1757		else if (pmc->sfcount[MCAST_INCLUDE])
1758			pmc->sfmode = MCAST_INCLUDE;
1759#ifdef CONFIG_IP_MULTICAST
1760		/* else no filters; keep old mode for reports */
1761
1762		pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1763			IGMP_Unsolicited_Report_Count;
1764		in_dev->mr_ifc_count = pmc->crcount;
1765		for (psf=pmc->sources; psf; psf = psf->sf_next)
1766			psf->sf_crcount = 0;
1767		igmp_ifc_event(in_dev);
1768	} else if (sf_setstate(pmc)) {
1769		igmp_ifc_event(in_dev);
1770#endif
1771	}
1772	spin_unlock_bh(&pmc->lock);
1773	return err;
1774}
1775
1776static void ip_mc_clear_src(struct ip_mc_list *pmc)
1777{
1778	struct ip_sf_list *psf, *nextpsf;
1779
1780	for (psf=pmc->tomb; psf; psf=nextpsf) {
1781		nextpsf = psf->sf_next;
1782		kfree(psf);
1783	}
1784	pmc->tomb = NULL;
1785	for (psf=pmc->sources; psf; psf=nextpsf) {
1786		nextpsf = psf->sf_next;
1787		kfree(psf);
1788	}
1789	pmc->sources = NULL;
1790	pmc->sfmode = MCAST_EXCLUDE;
1791	pmc->sfcount[MCAST_INCLUDE] = 0;
1792	pmc->sfcount[MCAST_EXCLUDE] = 1;
1793}
1794
1795
1796/*
1797 * Join a multicast group
1798 */
1799int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1800{
1801	int err;
1802	__be32 addr = imr->imr_multiaddr.s_addr;
1803	struct ip_mc_socklist *iml = NULL, *i;
1804	struct in_device *in_dev;
1805	struct inet_sock *inet = inet_sk(sk);
1806	struct net *net = sock_net(sk);
1807	int ifindex;
1808	int count = 0;
1809
1810	if (!ipv4_is_multicast(addr))
1811		return -EINVAL;
1812
1813	rtnl_lock();
1814
1815	in_dev = ip_mc_find_dev(net, imr);
1816
1817	if (!in_dev) {
1818		iml = NULL;
1819		err = -ENODEV;
1820		goto done;
1821	}
1822
1823	err = -EADDRINUSE;
1824	ifindex = imr->imr_ifindex;
1825	for (i = inet->mc_list; i; i = i->next) {
1826		if (i->multi.imr_multiaddr.s_addr == addr &&
1827		    i->multi.imr_ifindex == ifindex)
1828			goto done;
1829		count++;
1830	}
1831	err = -ENOBUFS;
1832	if (count >= sysctl_igmp_max_memberships)
1833		goto done;
1834	iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
1835	if (iml == NULL)
1836		goto done;
1837
1838	memcpy(&iml->multi, imr, sizeof(*imr));
1839	iml->next = inet->mc_list;
1840	iml->sflist = NULL;
1841	iml->sfmode = MCAST_EXCLUDE;
1842	rcu_assign_pointer(inet->mc_list, iml);
1843	ip_mc_inc_group(in_dev, addr);
1844	err = 0;
1845done:
1846	rtnl_unlock();
1847	return err;
1848}
1849EXPORT_SYMBOL(ip_mc_join_group);
1850
1851static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1852{
1853	struct ip_sf_socklist *psf;
1854
1855	psf = container_of(rp, struct ip_sf_socklist, rcu);
1856	/* sk_omem_alloc should have been decreased by the caller*/
1857	kfree(psf);
1858}
1859
1860static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1861			   struct in_device *in_dev)
1862{
1863	struct ip_sf_socklist *psf = iml->sflist;
1864	int err;
1865
1866	if (psf == NULL) {
1867		/* any-source empty exclude case */
1868		return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1869			iml->sfmode, 0, NULL, 0);
1870	}
1871	err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1872			iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1873	rcu_assign_pointer(iml->sflist, NULL);
1874	/* decrease mem now to avoid the memleak warning */
1875	atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1876	call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
1877	return err;
1878}
1879
1880
1881static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1882{
1883	struct ip_mc_socklist *iml;
1884
1885	iml = container_of(rp, struct ip_mc_socklist, rcu);
1886	/* sk_omem_alloc should have been decreased by the caller*/
1887	kfree(iml);
1888}
1889
1890
1891/*
1892 *	Ask a socket to leave a group.
1893 */
1894
1895int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1896{
1897	struct inet_sock *inet = inet_sk(sk);
1898	struct ip_mc_socklist *iml, **imlp;
1899	struct in_device *in_dev;
1900	struct net *net = sock_net(sk);
1901	__be32 group = imr->imr_multiaddr.s_addr;
1902	u32 ifindex;
1903	int ret = -EADDRNOTAVAIL;
1904
1905	rtnl_lock();
1906	in_dev = ip_mc_find_dev(net, imr);
1907	ifindex = imr->imr_ifindex;
1908	for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1909		if (iml->multi.imr_multiaddr.s_addr != group)
1910			continue;
1911		if (ifindex) {
1912			if (iml->multi.imr_ifindex != ifindex)
1913				continue;
1914		} else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
1915				iml->multi.imr_address.s_addr)
1916			continue;
1917
1918		(void) ip_mc_leave_src(sk, iml, in_dev);
1919
1920		rcu_assign_pointer(*imlp, iml->next);
1921
1922		if (in_dev)
1923			ip_mc_dec_group(in_dev, group);
1924		rtnl_unlock();
1925		/* decrease mem now to avoid the memleak warning */
1926		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1927		call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
1928		return 0;
1929	}
1930	if (!in_dev)
1931		ret = -ENODEV;
1932	rtnl_unlock();
1933	return ret;
1934}
1935
1936int ip_mc_source(int add, int omode, struct sock *sk, struct
1937	ip_mreq_source *mreqs, int ifindex)
1938{
1939	int err;
1940	struct ip_mreqn imr;
1941	__be32 addr = mreqs->imr_multiaddr;
1942	struct ip_mc_socklist *pmc;
1943	struct in_device *in_dev = NULL;
1944	struct inet_sock *inet = inet_sk(sk);
1945	struct ip_sf_socklist *psl;
1946	struct net *net = sock_net(sk);
1947	int leavegroup = 0;
1948	int i, j, rv;
1949
1950	if (!ipv4_is_multicast(addr))
1951		return -EINVAL;
1952
1953	rtnl_lock();
1954
1955	imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
1956	imr.imr_address.s_addr = mreqs->imr_interface;
1957	imr.imr_ifindex = ifindex;
1958	in_dev = ip_mc_find_dev(net, &imr);
1959
1960	if (!in_dev) {
1961		err = -ENODEV;
1962		goto done;
1963	}
1964	err = -EADDRNOTAVAIL;
1965
1966	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1967		if ((pmc->multi.imr_multiaddr.s_addr ==
1968		     imr.imr_multiaddr.s_addr) &&
1969		    (pmc->multi.imr_ifindex == imr.imr_ifindex))
1970			break;
1971	}
1972	if (!pmc) {		/* must have a prior join */
1973		err = -EINVAL;
1974		goto done;
1975	}
1976	/* if a source filter was set, must be the same mode as before */
1977	if (pmc->sflist) {
1978		if (pmc->sfmode != omode) {
1979			err = -EINVAL;
1980			goto done;
1981		}
1982	} else if (pmc->sfmode != omode) {
1983		/* allow mode switches for empty-set filters */
1984		ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
1985		ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
1986			NULL, 0);
1987		pmc->sfmode = omode;
1988	}
1989
1990	psl = pmc->sflist;
1991	if (!add) {
1992		if (!psl)
1993			goto done;	/* err = -EADDRNOTAVAIL */
1994		rv = !0;
1995		for (i=0; i<psl->sl_count; i++) {
1996			rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
1997				sizeof(__be32));
1998			if (rv == 0)
1999				break;
2000		}
2001		if (rv)		/* source not found */
2002			goto done;	/* err = -EADDRNOTAVAIL */
2003
2004		/* special case - (INCLUDE, empty) == LEAVE_GROUP */
2005		if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
2006			leavegroup = 1;
2007			goto done;
2008		}
2009
2010		/* update the interface filter */
2011		ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2012			&mreqs->imr_sourceaddr, 1);
2013
2014		for (j=i+1; j<psl->sl_count; j++)
2015			psl->sl_addr[j-1] = psl->sl_addr[j];
2016		psl->sl_count--;
2017		err = 0;
2018		goto done;
2019	}
2020	/* else, add a new source to the filter */
2021
2022	if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
2023		err = -ENOBUFS;
2024		goto done;
2025	}
2026	if (!psl || psl->sl_count == psl->sl_max) {
2027		struct ip_sf_socklist *newpsl;
2028		int count = IP_SFBLOCK;
2029
2030		if (psl)
2031			count += psl->sl_max;
2032		newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
2033		if (!newpsl) {
2034			err = -ENOBUFS;
2035			goto done;
2036		}
2037		newpsl->sl_max = count;
2038		newpsl->sl_count = count - IP_SFBLOCK;
2039		if (psl) {
2040			for (i=0; i<psl->sl_count; i++)
2041				newpsl->sl_addr[i] = psl->sl_addr[i];
2042			/* decrease mem now to avoid the memleak warning */
2043			atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2044			call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
2045		}
2046		rcu_assign_pointer(pmc->sflist, newpsl);
2047		psl = newpsl;
2048	}
2049	rv = 1;	/* > 0 for insert logic below if sl_count is 0 */
2050	for (i=0; i<psl->sl_count; i++) {
2051		rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
2052			sizeof(__be32));
2053		if (rv == 0)
2054			break;
2055	}
2056	if (rv == 0)		/* address already there is an error */
2057		goto done;
2058	for (j=psl->sl_count-1; j>=i; j--)
2059		psl->sl_addr[j+1] = psl->sl_addr[j];
2060	psl->sl_addr[i] = mreqs->imr_sourceaddr;
2061	psl->sl_count++;
2062	err = 0;
2063	/* update the interface list */
2064	ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2065		&mreqs->imr_sourceaddr, 1);
2066done:
2067	rtnl_unlock();
2068	if (leavegroup)
2069		return ip_mc_leave_group(sk, &imr);
2070	return err;
2071}
2072
2073int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2074{
2075	int err = 0;
2076	struct ip_mreqn	imr;
2077	__be32 addr = msf->imsf_multiaddr;
2078	struct ip_mc_socklist *pmc;
2079	struct in_device *in_dev;
2080	struct inet_sock *inet = inet_sk(sk);
2081	struct ip_sf_socklist *newpsl, *psl;
2082	struct net *net = sock_net(sk);
2083	int leavegroup = 0;
2084
2085	if (!ipv4_is_multicast(addr))
2086		return -EINVAL;
2087	if (msf->imsf_fmode != MCAST_INCLUDE &&
2088	    msf->imsf_fmode != MCAST_EXCLUDE)
2089		return -EINVAL;
2090
2091	rtnl_lock();
2092
2093	imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2094	imr.imr_address.s_addr = msf->imsf_interface;
2095	imr.imr_ifindex = ifindex;
2096	in_dev = ip_mc_find_dev(net, &imr);
2097
2098	if (!in_dev) {
2099		err = -ENODEV;
2100		goto done;
2101	}
2102
2103	/* special case - (INCLUDE, empty) == LEAVE_GROUP */
2104	if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
2105		leavegroup = 1;
2106		goto done;
2107	}
2108
2109	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
2110		if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2111		    pmc->multi.imr_ifindex == imr.imr_ifindex)
2112			break;
2113	}
2114	if (!pmc) {		/* must have a prior join */
2115		err = -EINVAL;
2116		goto done;
2117	}
2118	if (msf->imsf_numsrc) {
2119		newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
2120							   GFP_KERNEL);
2121		if (!newpsl) {
2122			err = -ENOBUFS;
2123			goto done;
2124		}
2125		newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
2126		memcpy(newpsl->sl_addr, msf->imsf_slist,
2127			msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
2128		err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2129			msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
2130		if (err) {
2131			sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
2132			goto done;
2133		}
2134	} else {
2135		newpsl = NULL;
2136		(void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2137				     msf->imsf_fmode, 0, NULL, 0);
2138	}
2139	psl = pmc->sflist;
2140	if (psl) {
2141		(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2142			psl->sl_count, psl->sl_addr, 0);
2143		/* decrease mem now to avoid the memleak warning */
2144		atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2145		call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
2146	} else
2147		(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2148			0, NULL, 0);
2149	rcu_assign_pointer(pmc->sflist, newpsl);
2150	pmc->sfmode = msf->imsf_fmode;
2151	err = 0;
2152done:
2153	rtnl_unlock();
2154	if (leavegroup)
2155		err = ip_mc_leave_group(sk, &imr);
2156	return err;
2157}
2158
2159int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2160	struct ip_msfilter __user *optval, int __user *optlen)
2161{
2162	int err, len, count, copycount;
2163	struct ip_mreqn	imr;
2164	__be32 addr = msf->imsf_multiaddr;
2165	struct ip_mc_socklist *pmc;
2166	struct in_device *in_dev;
2167	struct inet_sock *inet = inet_sk(sk);
2168	struct ip_sf_socklist *psl;
2169	struct net *net = sock_net(sk);
2170
2171	if (!ipv4_is_multicast(addr))
2172		return -EINVAL;
2173
2174	rtnl_lock();
2175
2176	imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2177	imr.imr_address.s_addr = msf->imsf_interface;
2178	imr.imr_ifindex = 0;
2179	in_dev = ip_mc_find_dev(net, &imr);
2180
2181	if (!in_dev) {
2182		err = -ENODEV;
2183		goto done;
2184	}
2185	err = -EADDRNOTAVAIL;
2186
2187	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
2188		if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2189		    pmc->multi.imr_ifindex == imr.imr_ifindex)
2190			break;
2191	}
2192	if (!pmc)		/* must have a prior join */
2193		goto done;
2194	msf->imsf_fmode = pmc->sfmode;
2195	psl = pmc->sflist;
2196	rtnl_unlock();
2197	if (!psl) {
2198		len = 0;
2199		count = 0;
2200	} else {
2201		count = psl->sl_count;
2202	}
2203	copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
2204	len = copycount * sizeof(psl->sl_addr[0]);
2205	msf->imsf_numsrc = count;
2206	if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
2207	    copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
2208		return -EFAULT;
2209	}
2210	if (len &&
2211	    copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
2212		return -EFAULT;
2213	return 0;
2214done:
2215	rtnl_unlock();
2216	return err;
2217}
2218
2219int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2220	struct group_filter __user *optval, int __user *optlen)
2221{
2222	int err, i, count, copycount;
2223	struct sockaddr_in *psin;
2224	__be32 addr;
2225	struct ip_mc_socklist *pmc;
2226	struct inet_sock *inet = inet_sk(sk);
2227	struct ip_sf_socklist *psl;
2228
2229	psin = (struct sockaddr_in *)&gsf->gf_group;
2230	if (psin->sin_family != AF_INET)
2231		return -EINVAL;
2232	addr = psin->sin_addr.s_addr;
2233	if (!ipv4_is_multicast(addr))
2234		return -EINVAL;
2235
2236	rtnl_lock();
2237
2238	err = -EADDRNOTAVAIL;
2239
2240	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
2241		if (pmc->multi.imr_multiaddr.s_addr == addr &&
2242		    pmc->multi.imr_ifindex == gsf->gf_interface)
2243			break;
2244	}
2245	if (!pmc)		/* must have a prior join */
2246		goto done;
2247	gsf->gf_fmode = pmc->sfmode;
2248	psl = pmc->sflist;
2249	rtnl_unlock();
2250	count = psl ? psl->sl_count : 0;
2251	copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
2252	gsf->gf_numsrc = count;
2253	if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
2254	    copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
2255		return -EFAULT;
2256	}
2257	for (i=0; i<copycount; i++) {
2258		struct sockaddr_storage ss;
2259
2260		psin = (struct sockaddr_in *)&ss;
2261		memset(&ss, 0, sizeof(ss));
2262		psin->sin_family = AF_INET;
2263		psin->sin_addr.s_addr = psl->sl_addr[i];
2264		if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
2265			return -EFAULT;
2266	}
2267	return 0;
2268done:
2269	rtnl_unlock();
2270	return err;
2271}
2272
2273/*
2274 * check if a multicast source filter allows delivery for a given <src,dst,intf>
2275 */
2276int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2277{
2278	struct inet_sock *inet = inet_sk(sk);
2279	struct ip_mc_socklist *pmc;
2280	struct ip_sf_socklist *psl;
2281	int i;
2282	int ret;
2283
2284	ret = 1;
2285	if (!ipv4_is_multicast(loc_addr))
2286		goto out;
2287
2288	rcu_read_lock();
2289	for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
2290		if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2291		    pmc->multi.imr_ifindex == dif)
2292			break;
2293	}
2294	ret = inet->mc_all;
2295	if (!pmc)
2296		goto unlock;
2297	psl = pmc->sflist;
2298	ret = (pmc->sfmode == MCAST_EXCLUDE);
2299	if (!psl)
2300		goto unlock;
2301
2302	for (i=0; i<psl->sl_count; i++) {
2303		if (psl->sl_addr[i] == rmt_addr)
2304			break;
2305	}
2306	ret = 0;
2307	if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2308		goto unlock;
2309	if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2310		goto unlock;
2311	ret = 1;
2312unlock:
2313	rcu_read_unlock();
2314out:
2315	return ret;
2316}
2317
2318/*
2319 *	A socket is closing.
2320 */
2321
2322void ip_mc_drop_socket(struct sock *sk)
2323{
2324	struct inet_sock *inet = inet_sk(sk);
2325	struct ip_mc_socklist *iml;
2326	struct net *net = sock_net(sk);
2327
2328	if (inet->mc_list == NULL)
2329		return;
2330
2331	rtnl_lock();
2332	while ((iml = inet->mc_list) != NULL) {
2333		struct in_device *in_dev;
2334		rcu_assign_pointer(inet->mc_list, iml->next);
2335
2336		in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2337		(void) ip_mc_leave_src(sk, iml, in_dev);
2338		if (in_dev != NULL) {
2339			ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2340			in_dev_put(in_dev);
2341		}
2342		/* decrease mem now to avoid the memleak warning */
2343		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2344		call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
2345	}
2346	rtnl_unlock();
2347}
2348
2349int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
2350{
2351	struct ip_mc_list *im;
2352	struct ip_sf_list *psf;
2353	int rv = 0;
2354
2355	read_lock(&in_dev->mc_list_lock);
2356	for (im=in_dev->mc_list; im; im=im->next) {
2357		if (im->multiaddr == mc_addr)
2358			break;
2359	}
2360	if (im && proto == IPPROTO_IGMP) {
2361		rv = 1;
2362	} else if (im) {
2363		if (src_addr) {
2364			for (psf=im->sources; psf; psf=psf->sf_next) {
2365				if (psf->sf_inaddr == src_addr)
2366					break;
2367			}
2368			if (psf)
2369				rv = psf->sf_count[MCAST_INCLUDE] ||
2370					psf->sf_count[MCAST_EXCLUDE] !=
2371					im->sfcount[MCAST_EXCLUDE];
2372			else
2373				rv = im->sfcount[MCAST_EXCLUDE] != 0;
2374		} else
2375			rv = 1; /* unspecified source; tentatively allow */
2376	}
2377	read_unlock(&in_dev->mc_list_lock);
2378	return rv;
2379}
2380
2381#if defined(CONFIG_PROC_FS)
2382struct igmp_mc_iter_state {
2383	struct seq_net_private p;
2384	struct net_device *dev;
2385	struct in_device *in_dev;
2386};
2387
2388#define	igmp_mc_seq_private(seq)	((struct igmp_mc_iter_state *)(seq)->private)
2389
2390static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2391{
2392	struct net *net = seq_file_net(seq);
2393	struct ip_mc_list *im = NULL;
2394	struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2395
2396	state->in_dev = NULL;
2397	for_each_netdev_rcu(net, state->dev) {
2398		struct in_device *in_dev;
2399
2400		in_dev = __in_dev_get_rcu(state->dev);
2401		if (!in_dev)
2402			continue;
2403		read_lock(&in_dev->mc_list_lock);
2404		im = in_dev->mc_list;
2405		if (im) {
2406			state->in_dev = in_dev;
2407			break;
2408		}
2409		read_unlock(&in_dev->mc_list_lock);
2410	}
2411	return im;
2412}
2413
2414static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2415{
2416	struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2417	im = im->next;
2418	while (!im) {
2419		if (likely(state->in_dev != NULL))
2420			read_unlock(&state->in_dev->mc_list_lock);
2421
2422		state->dev = next_net_device_rcu(state->dev);
2423		if (!state->dev) {
2424			state->in_dev = NULL;
2425			break;
2426		}
2427		state->in_dev = __in_dev_get_rcu(state->dev);
2428		if (!state->in_dev)
2429			continue;
2430		read_lock(&state->in_dev->mc_list_lock);
2431		im = state->in_dev->mc_list;
2432	}
2433	return im;
2434}
2435
2436static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2437{
2438	struct ip_mc_list *im = igmp_mc_get_first(seq);
2439	if (im)
2440		while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
2441			--pos;
2442	return pos ? NULL : im;
2443}
2444
2445static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
2446	__acquires(rcu)
2447{
2448	rcu_read_lock();
2449	return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2450}
2451
2452static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2453{
2454	struct ip_mc_list *im;
2455	if (v == SEQ_START_TOKEN)
2456		im = igmp_mc_get_first(seq);
2457	else
2458		im = igmp_mc_get_next(seq, v);
2459	++*pos;
2460	return im;
2461}
2462
2463static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2464	__releases(rcu)
2465{
2466	struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2467	if (likely(state->in_dev != NULL)) {
2468		read_unlock(&state->in_dev->mc_list_lock);
2469		state->in_dev = NULL;
2470	}
2471	state->dev = NULL;
2472	rcu_read_unlock();
2473}
2474
2475static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2476{
2477	if (v == SEQ_START_TOKEN)
2478		seq_puts(seq,
2479			 "Idx\tDevice    : Count Querier\tGroup    Users Timer\tReporter\n");
2480	else {
2481		struct ip_mc_list *im = (struct ip_mc_list *)v;
2482		struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2483		char   *querier;
2484#ifdef CONFIG_IP_MULTICAST
2485		querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2486			  IGMP_V2_SEEN(state->in_dev) ? "V2" :
2487			  "V3";
2488#else
2489		querier = "NONE";
2490#endif
2491
2492		if (state->in_dev->mc_list == im) {
2493			seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2494				   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2495		}
2496
2497		seq_printf(seq,
2498			   "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
2499			   im->multiaddr, im->users,
2500			   im->tm_running, im->tm_running ?
2501			   jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
2502			   im->reporter);
2503	}
2504	return 0;
2505}
2506
2507static const struct seq_operations igmp_mc_seq_ops = {
2508	.start	=	igmp_mc_seq_start,
2509	.next	=	igmp_mc_seq_next,
2510	.stop	=	igmp_mc_seq_stop,
2511	.show	=	igmp_mc_seq_show,
2512};
2513
2514static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2515{
2516	return seq_open_net(inode, file, &igmp_mc_seq_ops,
2517			sizeof(struct igmp_mc_iter_state));
2518}
2519
2520static const struct file_operations igmp_mc_seq_fops = {
2521	.owner		=	THIS_MODULE,
2522	.open		=	igmp_mc_seq_open,
2523	.read		=	seq_read,
2524	.llseek		=	seq_lseek,
2525	.release	=	seq_release_net,
2526};
2527
2528struct igmp_mcf_iter_state {
2529	struct seq_net_private p;
2530	struct net_device *dev;
2531	struct in_device *idev;
2532	struct ip_mc_list *im;
2533};
2534
2535#define igmp_mcf_seq_private(seq)	((struct igmp_mcf_iter_state *)(seq)->private)
2536
2537static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2538{
2539	struct net *net = seq_file_net(seq);
2540	struct ip_sf_list *psf = NULL;
2541	struct ip_mc_list *im = NULL;
2542	struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2543
2544	state->idev = NULL;
2545	state->im = NULL;
2546	for_each_netdev_rcu(net, state->dev) {
2547		struct in_device *idev;
2548		idev = __in_dev_get_rcu(state->dev);
2549		if (unlikely(idev == NULL))
2550			continue;
2551		read_lock(&idev->mc_list_lock);
2552		im = idev->mc_list;
2553		if (likely(im != NULL)) {
2554			spin_lock_bh(&im->lock);
2555			psf = im->sources;
2556			if (likely(psf != NULL)) {
2557				state->im = im;
2558				state->idev = idev;
2559				break;
2560			}
2561			spin_unlock_bh(&im->lock);
2562		}
2563		read_unlock(&idev->mc_list_lock);
2564	}
2565	return psf;
2566}
2567
2568static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
2569{
2570	struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2571
2572	psf = psf->sf_next;
2573	while (!psf) {
2574		spin_unlock_bh(&state->im->lock);
2575		state->im = state->im->next;
2576		while (!state->im) {
2577			if (likely(state->idev != NULL))
2578				read_unlock(&state->idev->mc_list_lock);
2579
2580			state->dev = next_net_device_rcu(state->dev);
2581			if (!state->dev) {
2582				state->idev = NULL;
2583				goto out;
2584			}
2585			state->idev = __in_dev_get_rcu(state->dev);
2586			if (!state->idev)
2587				continue;
2588			read_lock(&state->idev->mc_list_lock);
2589			state->im = state->idev->mc_list;
2590		}
2591		if (!state->im)
2592			break;
2593		spin_lock_bh(&state->im->lock);
2594		psf = state->im->sources;
2595	}
2596out:
2597	return psf;
2598}
2599
2600static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2601{
2602	struct ip_sf_list *psf = igmp_mcf_get_first(seq);
2603	if (psf)
2604		while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
2605			--pos;
2606	return pos ? NULL : psf;
2607}
2608
2609static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2610	__acquires(rcu)
2611{
2612	rcu_read_lock();
2613	return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2614}
2615
2616static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2617{
2618	struct ip_sf_list *psf;
2619	if (v == SEQ_START_TOKEN)
2620		psf = igmp_mcf_get_first(seq);
2621	else
2622		psf = igmp_mcf_get_next(seq, v);
2623	++*pos;
2624	return psf;
2625}
2626
2627static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2628	__releases(rcu)
2629{
2630	struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2631	if (likely(state->im != NULL)) {
2632		spin_unlock_bh(&state->im->lock);
2633		state->im = NULL;
2634	}
2635	if (likely(state->idev != NULL)) {
2636		read_unlock(&state->idev->mc_list_lock);
2637		state->idev = NULL;
2638	}
2639	state->dev = NULL;
2640	rcu_read_unlock();
2641}
2642
2643static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2644{
2645	struct ip_sf_list *psf = (struct ip_sf_list *)v;
2646	struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2647
2648	if (v == SEQ_START_TOKEN) {
2649		seq_printf(seq,
2650			   "%3s %6s "
2651			   "%10s %10s %6s %6s\n", "Idx",
2652			   "Device", "MCA",
2653			   "SRC", "INC", "EXC");
2654	} else {
2655		seq_printf(seq,
2656			   "%3d %6.6s 0x%08x "
2657			   "0x%08x %6lu %6lu\n",
2658			   state->dev->ifindex, state->dev->name,
2659			   ntohl(state->im->multiaddr),
2660			   ntohl(psf->sf_inaddr),
2661			   psf->sf_count[MCAST_INCLUDE],
2662			   psf->sf_count[MCAST_EXCLUDE]);
2663	}
2664	return 0;
2665}
2666
2667static const struct seq_operations igmp_mcf_seq_ops = {
2668	.start	=	igmp_mcf_seq_start,
2669	.next	=	igmp_mcf_seq_next,
2670	.stop	=	igmp_mcf_seq_stop,
2671	.show	=	igmp_mcf_seq_show,
2672};
2673
2674static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2675{
2676	return seq_open_net(inode, file, &igmp_mcf_seq_ops,
2677			sizeof(struct igmp_mcf_iter_state));
2678}
2679
2680static const struct file_operations igmp_mcf_seq_fops = {
2681	.owner		=	THIS_MODULE,
2682	.open		=	igmp_mcf_seq_open,
2683	.read		=	seq_read,
2684	.llseek		=	seq_lseek,
2685	.release	=	seq_release_net,
2686};
2687
2688static int __net_init igmp_net_init(struct net *net)
2689{
2690	struct proc_dir_entry *pde;
2691
2692	pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops);
2693	if (!pde)
2694		goto out_igmp;
2695	pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
2696	if (!pde)
2697		goto out_mcfilter;
2698	return 0;
2699
2700out_mcfilter:
2701	proc_net_remove(net, "igmp");
2702out_igmp:
2703	return -ENOMEM;
2704}
2705
2706static void __net_exit igmp_net_exit(struct net *net)
2707{
2708	proc_net_remove(net, "mcfilter");
2709	proc_net_remove(net, "igmp");
2710}
2711
2712static struct pernet_operations igmp_net_ops = {
2713	.init = igmp_net_init,
2714	.exit = igmp_net_exit,
2715};
2716
2717int __init igmp_mc_proc_init(void)
2718{
2719	return register_pernet_subsys(&igmp_net_ops);
2720}
2721#endif
2722