• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/net/bridge/
1/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/if_ether.h>
15#include <linux/igmp.h>
16#include <linux/jhash.h>
17#include <linux/kernel.h>
18#include <linux/log2.h>
19#include <linux/netdevice.h>
20#include <linux/netfilter_bridge.h>
21#include <linux/random.h>
22#include <linux/rculist.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <net/ip.h>
27#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28#include <net/ipv6.h>
29#include <net/mld.h>
30#include <net/addrconf.h>
31#include <net/ip6_checksum.h>
32#endif
33
34#include "br_private.h"
35
36#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
37static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
38{
39	if (ipv6_addr_is_multicast(addr) &&
40	    IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
41		return 1;
42	return 0;
43}
44#endif
45
46static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
47{
48	if (a->proto != b->proto)
49		return 0;
50	switch (a->proto) {
51	case htons(ETH_P_IP):
52		return a->u.ip4 == b->u.ip4;
53#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
54	case htons(ETH_P_IPV6):
55		return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
56#endif
57	}
58	return 0;
59}
60
61static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
62{
63	return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
64}
65
66#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
68				const struct in6_addr *ip)
69{
70	return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
71}
72#endif
73
74static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
75			     struct br_ip *ip)
76{
77	switch (ip->proto) {
78	case htons(ETH_P_IP):
79		return __br_ip4_hash(mdb, ip->u.ip4);
80#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
81	case htons(ETH_P_IPV6):
82		return __br_ip6_hash(mdb, &ip->u.ip6);
83#endif
84	}
85	return 0;
86}
87
88static struct net_bridge_mdb_entry *__br_mdb_ip_get(
89	struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
90{
91	struct net_bridge_mdb_entry *mp;
92	struct hlist_node *p;
93
94	hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
95		if (br_ip_equal(&mp->addr, dst))
96			return mp;
97	}
98
99	return NULL;
100}
101
102static struct net_bridge_mdb_entry *br_mdb_ip_get(
103	struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
104{
105	if (!mdb)
106		return NULL;
107
108	return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
109}
110
111static struct net_bridge_mdb_entry *br_mdb_ip4_get(
112	struct net_bridge_mdb_htable *mdb, __be32 dst)
113{
114	struct br_ip br_dst;
115
116	br_dst.u.ip4 = dst;
117	br_dst.proto = htons(ETH_P_IP);
118
119	return br_mdb_ip_get(mdb, &br_dst);
120}
121
122#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
123static struct net_bridge_mdb_entry *br_mdb_ip6_get(
124	struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
125{
126	struct br_ip br_dst;
127
128	ipv6_addr_copy(&br_dst.u.ip6, dst);
129	br_dst.proto = htons(ETH_P_IPV6);
130
131	return br_mdb_ip_get(mdb, &br_dst);
132}
133#endif
134
135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
136					struct sk_buff *skb)
137{
138	struct net_bridge_mdb_htable *mdb = br->mdb;
139	struct br_ip ip;
140
141	if (br->multicast_disabled)
142		return NULL;
143
144	if (BR_INPUT_SKB_CB(skb)->igmp)
145		return NULL;
146
147	ip.proto = skb->protocol;
148
149	switch (skb->protocol) {
150	case htons(ETH_P_IP):
151		ip.u.ip4 = ip_hdr(skb)->daddr;
152		break;
153#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
154	case htons(ETH_P_IPV6):
155		ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
156		break;
157#endif
158	default:
159		return NULL;
160	}
161
162	return br_mdb_ip_get(mdb, &ip);
163}
164
165static void br_mdb_free(struct rcu_head *head)
166{
167	struct net_bridge_mdb_htable *mdb =
168		container_of(head, struct net_bridge_mdb_htable, rcu);
169	struct net_bridge_mdb_htable *old = mdb->old;
170
171	mdb->old = NULL;
172	kfree(old->mhash);
173	kfree(old);
174}
175
176static int br_mdb_copy(struct net_bridge_mdb_htable *new,
177		       struct net_bridge_mdb_htable *old,
178		       int elasticity)
179{
180	struct net_bridge_mdb_entry *mp;
181	struct hlist_node *p;
182	int maxlen;
183	int len;
184	int i;
185
186	for (i = 0; i < old->max; i++)
187		hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
188			hlist_add_head(&mp->hlist[new->ver],
189				       &new->mhash[br_ip_hash(new, &mp->addr)]);
190
191	if (!elasticity)
192		return 0;
193
194	maxlen = 0;
195	for (i = 0; i < new->max; i++) {
196		len = 0;
197		hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
198			len++;
199		if (len > maxlen)
200			maxlen = len;
201	}
202
203	return maxlen > elasticity ? -EINVAL : 0;
204}
205
206static void br_multicast_free_pg(struct rcu_head *head)
207{
208	struct net_bridge_port_group *p =
209		container_of(head, struct net_bridge_port_group, rcu);
210
211	kfree(p);
212}
213
214static void br_multicast_free_group(struct rcu_head *head)
215{
216	struct net_bridge_mdb_entry *mp =
217		container_of(head, struct net_bridge_mdb_entry, rcu);
218
219	kfree(mp);
220}
221
222static void br_multicast_group_expired(unsigned long data)
223{
224	struct net_bridge_mdb_entry *mp = (void *)data;
225	struct net_bridge *br = mp->br;
226	struct net_bridge_mdb_htable *mdb;
227
228	spin_lock(&br->multicast_lock);
229	if (!netif_running(br->dev) || timer_pending(&mp->timer))
230		goto out;
231
232	mp->mglist = false;
233
234	if (mp->ports)
235		goto out;
236
237	mdb = br->mdb;
238	hlist_del_rcu(&mp->hlist[mdb->ver]);
239	mdb->size--;
240
241	del_timer(&mp->query_timer);
242	call_rcu_bh(&mp->rcu, br_multicast_free_group);
243
244out:
245	spin_unlock(&br->multicast_lock);
246}
247
248static void br_multicast_del_pg(struct net_bridge *br,
249				struct net_bridge_port_group *pg)
250{
251	struct net_bridge_mdb_htable *mdb = br->mdb;
252	struct net_bridge_mdb_entry *mp;
253	struct net_bridge_port_group *p;
254	struct net_bridge_port_group **pp;
255
256	mp = br_mdb_ip_get(mdb, &pg->addr);
257	if (WARN_ON(!mp))
258		return;
259
260	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
261		if (p != pg)
262			continue;
263
264		rcu_assign_pointer(*pp, p->next);
265		hlist_del_init(&p->mglist);
266		del_timer(&p->timer);
267		del_timer(&p->query_timer);
268		call_rcu_bh(&p->rcu, br_multicast_free_pg);
269
270		if (!mp->ports && !mp->mglist &&
271		    netif_running(br->dev))
272			mod_timer(&mp->timer, jiffies);
273
274		return;
275	}
276
277	WARN_ON(1);
278}
279
280static void br_multicast_port_group_expired(unsigned long data)
281{
282	struct net_bridge_port_group *pg = (void *)data;
283	struct net_bridge *br = pg->port->br;
284
285	spin_lock(&br->multicast_lock);
286	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
287	    hlist_unhashed(&pg->mglist))
288		goto out;
289
290	br_multicast_del_pg(br, pg);
291
292out:
293	spin_unlock(&br->multicast_lock);
294}
295
296static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
297			 int elasticity)
298{
299	struct net_bridge_mdb_htable *old = *mdbp;
300	struct net_bridge_mdb_htable *mdb;
301	int err;
302
303	mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
304	if (!mdb)
305		return -ENOMEM;
306
307	mdb->max = max;
308	mdb->old = old;
309
310	mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
311	if (!mdb->mhash) {
312		kfree(mdb);
313		return -ENOMEM;
314	}
315
316	mdb->size = old ? old->size : 0;
317	mdb->ver = old ? old->ver ^ 1 : 0;
318
319	if (!old || elasticity)
320		get_random_bytes(&mdb->secret, sizeof(mdb->secret));
321	else
322		mdb->secret = old->secret;
323
324	if (!old)
325		goto out;
326
327	err = br_mdb_copy(mdb, old, elasticity);
328	if (err) {
329		kfree(mdb->mhash);
330		kfree(mdb);
331		return err;
332	}
333
334	call_rcu_bh(&mdb->rcu, br_mdb_free);
335
336out:
337	rcu_assign_pointer(*mdbp, mdb);
338
339	return 0;
340}
341
342static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
343						    __be32 group)
344{
345	struct sk_buff *skb;
346	struct igmphdr *ih;
347	struct ethhdr *eth;
348	struct iphdr *iph;
349
350	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
351						 sizeof(*ih) + 4);
352	if (!skb)
353		goto out;
354
355	skb->protocol = htons(ETH_P_IP);
356
357	skb_reset_mac_header(skb);
358	eth = eth_hdr(skb);
359
360	memcpy(eth->h_source, br->dev->dev_addr, 6);
361	eth->h_dest[0] = 1;
362	eth->h_dest[1] = 0;
363	eth->h_dest[2] = 0x5e;
364	eth->h_dest[3] = 0;
365	eth->h_dest[4] = 0;
366	eth->h_dest[5] = 1;
367	eth->h_proto = htons(ETH_P_IP);
368	skb_put(skb, sizeof(*eth));
369
370	skb_set_network_header(skb, skb->len);
371	iph = ip_hdr(skb);
372
373	iph->version = 4;
374	iph->ihl = 6;
375	iph->tos = 0xc0;
376	iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
377	iph->id = 0;
378	iph->frag_off = htons(IP_DF);
379	iph->ttl = 1;
380	iph->protocol = IPPROTO_IGMP;
381	iph->saddr = 0;
382	iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
383	((u8 *)&iph[1])[0] = IPOPT_RA;
384	((u8 *)&iph[1])[1] = 4;
385	((u8 *)&iph[1])[2] = 0;
386	((u8 *)&iph[1])[3] = 0;
387	ip_send_check(iph);
388	skb_put(skb, 24);
389
390	skb_set_transport_header(skb, skb->len);
391	ih = igmp_hdr(skb);
392	ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
393	ih->code = (group ? br->multicast_last_member_interval :
394			    br->multicast_query_response_interval) /
395		   (HZ / IGMP_TIMER_SCALE);
396	ih->group = group;
397	ih->csum = 0;
398	ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
399	skb_put(skb, sizeof(*ih));
400
401	__skb_pull(skb, sizeof(*eth));
402
403out:
404	return skb;
405}
406
407#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
408static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
409						    struct in6_addr *group)
410{
411	struct sk_buff *skb;
412	struct ipv6hdr *ip6h;
413	struct mld_msg *mldq;
414	struct ethhdr *eth;
415	u8 *hopopt;
416	unsigned long interval;
417
418	skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
419						 8 + sizeof(*mldq));
420	if (!skb)
421		goto out;
422
423	skb->protocol = htons(ETH_P_IPV6);
424
425	/* Ethernet header */
426	skb_reset_mac_header(skb);
427	eth = eth_hdr(skb);
428
429	memcpy(eth->h_source, br->dev->dev_addr, 6);
430	ipv6_eth_mc_map(group, eth->h_dest);
431	eth->h_proto = htons(ETH_P_IPV6);
432	skb_put(skb, sizeof(*eth));
433
434	/* IPv6 header + HbH option */
435	skb_set_network_header(skb, skb->len);
436	ip6h = ipv6_hdr(skb);
437
438	*(__force __be32 *)ip6h = htonl(0x60000000);
439	ip6h->payload_len = htons(8 + sizeof(*mldq));
440	ip6h->nexthdr = IPPROTO_HOPOPTS;
441	ip6h->hop_limit = 1;
442	ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
443	ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
444
445	hopopt = (u8 *)(ip6h + 1);
446	hopopt[0] = IPPROTO_ICMPV6;		/* next hdr */
447	hopopt[1] = 0;				/* length of HbH */
448	hopopt[2] = IPV6_TLV_ROUTERALERT;	/* Router Alert */
449	hopopt[3] = 2;				/* Length of RA Option */
450	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
451	hopopt[5] = 0;
452	hopopt[6] = IPV6_TLV_PAD0;		/* Pad0 */
453	hopopt[7] = IPV6_TLV_PAD0;		/* Pad0 */
454
455	skb_put(skb, sizeof(*ip6h) + 8);
456
457	/* ICMPv6 */
458	skb_set_transport_header(skb, skb->len);
459	mldq = (struct mld_msg *) icmp6_hdr(skb);
460
461	interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
462					  br->multicast_query_response_interval;
463
464	mldq->mld_type = ICMPV6_MGM_QUERY;
465	mldq->mld_code = 0;
466	mldq->mld_cksum = 0;
467	mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
468	mldq->mld_reserved = 0;
469	ipv6_addr_copy(&mldq->mld_mca, group);
470
471	/* checksum */
472	mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
473					  sizeof(*mldq), IPPROTO_ICMPV6,
474					  csum_partial(mldq,
475						       sizeof(*mldq), 0));
476	skb_put(skb, sizeof(*mldq));
477
478	__skb_pull(skb, sizeof(*eth));
479
480out:
481	return skb;
482}
483#endif
484
485static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
486						struct br_ip *addr)
487{
488	switch (addr->proto) {
489	case htons(ETH_P_IP):
490		return br_ip4_multicast_alloc_query(br, addr->u.ip4);
491#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
492	case htons(ETH_P_IPV6):
493		return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
494#endif
495	}
496	return NULL;
497}
498
499static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
500{
501	struct net_bridge *br = mp->br;
502	struct sk_buff *skb;
503
504	skb = br_multicast_alloc_query(br, &mp->addr);
505	if (!skb)
506		goto timer;
507
508	netif_rx(skb);
509
510timer:
511	if (++mp->queries_sent < br->multicast_last_member_count)
512		mod_timer(&mp->query_timer,
513			  jiffies + br->multicast_last_member_interval);
514}
515
516static void br_multicast_group_query_expired(unsigned long data)
517{
518	struct net_bridge_mdb_entry *mp = (void *)data;
519	struct net_bridge *br = mp->br;
520
521	spin_lock(&br->multicast_lock);
522	if (!netif_running(br->dev) || !mp->mglist ||
523	    mp->queries_sent >= br->multicast_last_member_count)
524		goto out;
525
526	br_multicast_send_group_query(mp);
527
528out:
529	spin_unlock(&br->multicast_lock);
530}
531
532static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
533{
534	struct net_bridge_port *port = pg->port;
535	struct net_bridge *br = port->br;
536	struct sk_buff *skb;
537
538	skb = br_multicast_alloc_query(br, &pg->addr);
539	if (!skb)
540		goto timer;
541
542	br_deliver(port, skb);
543
544timer:
545	if (++pg->queries_sent < br->multicast_last_member_count)
546		mod_timer(&pg->query_timer,
547			  jiffies + br->multicast_last_member_interval);
548}
549
550static void br_multicast_port_group_query_expired(unsigned long data)
551{
552	struct net_bridge_port_group *pg = (void *)data;
553	struct net_bridge_port *port = pg->port;
554	struct net_bridge *br = port->br;
555
556	spin_lock(&br->multicast_lock);
557	if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
558	    pg->queries_sent >= br->multicast_last_member_count)
559		goto out;
560
561	br_multicast_send_port_group_query(pg);
562
563out:
564	spin_unlock(&br->multicast_lock);
565}
566
567static struct net_bridge_mdb_entry *br_multicast_get_group(
568	struct net_bridge *br, struct net_bridge_port *port,
569	struct br_ip *group, int hash)
570{
571	struct net_bridge_mdb_htable *mdb = br->mdb;
572	struct net_bridge_mdb_entry *mp;
573	struct hlist_node *p;
574	unsigned count = 0;
575	unsigned max;
576	int elasticity;
577	int err;
578
579	hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
580		count++;
581		if (unlikely(br_ip_equal(group, &mp->addr)))
582			return mp;
583	}
584
585	elasticity = 0;
586	max = mdb->max;
587
588	if (unlikely(count > br->hash_elasticity && count)) {
589		if (net_ratelimit())
590			br_info(br, "Multicast hash table "
591				"chain limit reached: %s\n",
592				port ? port->dev->name : br->dev->name);
593
594		elasticity = br->hash_elasticity;
595	}
596
597	if (mdb->size >= max) {
598		max *= 2;
599		if (unlikely(max >= br->hash_max)) {
600			br_warn(br, "Multicast hash table maximum "
601				"reached, disabling snooping: %s, %d\n",
602				port ? port->dev->name : br->dev->name, max);
603			err = -E2BIG;
604disable:
605			br->multicast_disabled = 1;
606			goto err;
607		}
608	}
609
610	if (max > mdb->max || elasticity) {
611		if (mdb->old) {
612			if (net_ratelimit())
613				br_info(br, "Multicast hash table "
614					"on fire: %s\n",
615					port ? port->dev->name : br->dev->name);
616			err = -EEXIST;
617			goto err;
618		}
619
620		err = br_mdb_rehash(&br->mdb, max, elasticity);
621		if (err) {
622			br_warn(br, "Cannot rehash multicast "
623				"hash table, disabling snooping: %s, %d, %d\n",
624				port ? port->dev->name : br->dev->name,
625				mdb->size, err);
626			goto disable;
627		}
628
629		err = -EAGAIN;
630		goto err;
631	}
632
633	return NULL;
634
635err:
636	mp = ERR_PTR(err);
637	return mp;
638}
639
640static struct net_bridge_mdb_entry *br_multicast_new_group(
641	struct net_bridge *br, struct net_bridge_port *port,
642	struct br_ip *group)
643{
644	struct net_bridge_mdb_htable *mdb = br->mdb;
645	struct net_bridge_mdb_entry *mp;
646	int hash;
647
648	if (!mdb) {
649		if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
650			return NULL;
651		goto rehash;
652	}
653
654	hash = br_ip_hash(mdb, group);
655	mp = br_multicast_get_group(br, port, group, hash);
656	switch (PTR_ERR(mp)) {
657	case 0:
658		break;
659
660	case -EAGAIN:
661rehash:
662		mdb = br->mdb;
663		hash = br_ip_hash(mdb, group);
664		break;
665
666	default:
667		goto out;
668	}
669
670	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
671	if (unlikely(!mp))
672		goto out;
673
674	mp->br = br;
675	mp->addr = *group;
676	setup_timer(&mp->timer, br_multicast_group_expired,
677		    (unsigned long)mp);
678	setup_timer(&mp->query_timer, br_multicast_group_query_expired,
679		    (unsigned long)mp);
680
681	hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
682	mdb->size++;
683
684out:
685	return mp;
686}
687
688static int br_multicast_add_group(struct net_bridge *br,
689				  struct net_bridge_port *port,
690				  struct br_ip *group)
691{
692	struct net_bridge_mdb_entry *mp;
693	struct net_bridge_port_group *p;
694	struct net_bridge_port_group **pp;
695	unsigned long now = jiffies;
696	int err;
697
698	spin_lock(&br->multicast_lock);
699	if (!netif_running(br->dev) ||
700	    (port && port->state == BR_STATE_DISABLED))
701		goto out;
702
703	mp = br_multicast_new_group(br, port, group);
704	err = PTR_ERR(mp);
705	if (unlikely(IS_ERR(mp) || !mp))
706		goto err;
707
708	if (!port) {
709		mp->mglist = true;
710		mod_timer(&mp->timer, now + br->multicast_membership_interval);
711		goto out;
712	}
713
714	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
715		if (p->port == port)
716			goto found;
717		if ((unsigned long)p->port < (unsigned long)port)
718			break;
719	}
720
721	p = kzalloc(sizeof(*p), GFP_ATOMIC);
722	err = -ENOMEM;
723	if (unlikely(!p))
724		goto err;
725
726	p->addr = *group;
727	p->port = port;
728	p->next = *pp;
729	hlist_add_head(&p->mglist, &port->mglist);
730	setup_timer(&p->timer, br_multicast_port_group_expired,
731		    (unsigned long)p);
732	setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
733		    (unsigned long)p);
734
735	rcu_assign_pointer(*pp, p);
736
737found:
738	mod_timer(&p->timer, now + br->multicast_membership_interval);
739out:
740	err = 0;
741
742err:
743	spin_unlock(&br->multicast_lock);
744	return err;
745}
746
747static int br_ip4_multicast_add_group(struct net_bridge *br,
748				      struct net_bridge_port *port,
749				      __be32 group)
750{
751	struct br_ip br_group;
752
753	if (ipv4_is_local_multicast(group))
754		return 0;
755
756	br_group.u.ip4 = group;
757	br_group.proto = htons(ETH_P_IP);
758
759	return br_multicast_add_group(br, port, &br_group);
760}
761
762#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
763static int br_ip6_multicast_add_group(struct net_bridge *br,
764				      struct net_bridge_port *port,
765				      const struct in6_addr *group)
766{
767	struct br_ip br_group;
768
769	if (ipv6_is_local_multicast(group))
770		return 0;
771
772	ipv6_addr_copy(&br_group.u.ip6, group);
773	br_group.proto = htons(ETH_P_IP);
774
775	return br_multicast_add_group(br, port, &br_group);
776}
777#endif
778
779static void br_multicast_router_expired(unsigned long data)
780{
781	struct net_bridge_port *port = (void *)data;
782	struct net_bridge *br = port->br;
783
784	spin_lock(&br->multicast_lock);
785	if (port->multicast_router != 1 ||
786	    timer_pending(&port->multicast_router_timer) ||
787	    hlist_unhashed(&port->rlist))
788		goto out;
789
790	hlist_del_init_rcu(&port->rlist);
791
792out:
793	spin_unlock(&br->multicast_lock);
794}
795
796static void br_multicast_local_router_expired(unsigned long data)
797{
798}
799
800static void __br_multicast_send_query(struct net_bridge *br,
801				      struct net_bridge_port *port,
802				      struct br_ip *ip)
803{
804	struct sk_buff *skb;
805
806	skb = br_multicast_alloc_query(br, ip);
807	if (!skb)
808		return;
809
810	if (port) {
811		__skb_push(skb, sizeof(struct ethhdr));
812		skb->dev = port->dev;
813		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
814			dev_queue_xmit);
815	} else
816		netif_rx(skb);
817}
818
819static void br_multicast_send_query(struct net_bridge *br,
820				    struct net_bridge_port *port, u32 sent)
821{
822	unsigned long time;
823	struct br_ip br_group;
824
825	if (!netif_running(br->dev) || br->multicast_disabled ||
826	    timer_pending(&br->multicast_querier_timer))
827		return;
828
829	memset(&br_group.u, 0, sizeof(br_group.u));
830
831	br_group.proto = htons(ETH_P_IP);
832	__br_multicast_send_query(br, port, &br_group);
833
834#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
835	br_group.proto = htons(ETH_P_IPV6);
836	__br_multicast_send_query(br, port, &br_group);
837#endif
838
839	time = jiffies;
840	time += sent < br->multicast_startup_query_count ?
841		br->multicast_startup_query_interval :
842		br->multicast_query_interval;
843	mod_timer(port ? &port->multicast_query_timer :
844			 &br->multicast_query_timer, time);
845}
846
847static void br_multicast_port_query_expired(unsigned long data)
848{
849	struct net_bridge_port *port = (void *)data;
850	struct net_bridge *br = port->br;
851
852	spin_lock(&br->multicast_lock);
853	if (port->state == BR_STATE_DISABLED ||
854	    port->state == BR_STATE_BLOCKING)
855		goto out;
856
857	if (port->multicast_startup_queries_sent <
858	    br->multicast_startup_query_count)
859		port->multicast_startup_queries_sent++;
860
861	br_multicast_send_query(port->br, port,
862				port->multicast_startup_queries_sent);
863
864out:
865	spin_unlock(&br->multicast_lock);
866}
867
868void br_multicast_add_port(struct net_bridge_port *port)
869{
870	port->multicast_router = 1;
871
872	setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
873		    (unsigned long)port);
874	setup_timer(&port->multicast_query_timer,
875		    br_multicast_port_query_expired, (unsigned long)port);
876}
877
878void br_multicast_del_port(struct net_bridge_port *port)
879{
880	del_timer_sync(&port->multicast_router_timer);
881}
882
883static void __br_multicast_enable_port(struct net_bridge_port *port)
884{
885	port->multicast_startup_queries_sent = 0;
886
887	if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
888	    del_timer(&port->multicast_query_timer))
889		mod_timer(&port->multicast_query_timer, jiffies);
890}
891
892void br_multicast_enable_port(struct net_bridge_port *port)
893{
894	struct net_bridge *br = port->br;
895
896	spin_lock(&br->multicast_lock);
897	if (br->multicast_disabled || !netif_running(br->dev))
898		goto out;
899
900	__br_multicast_enable_port(port);
901
902out:
903	spin_unlock(&br->multicast_lock);
904}
905
906void br_multicast_disable_port(struct net_bridge_port *port)
907{
908	struct net_bridge *br = port->br;
909	struct net_bridge_port_group *pg;
910	struct hlist_node *p, *n;
911
912	spin_lock(&br->multicast_lock);
913	hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
914		br_multicast_del_pg(br, pg);
915
916	if (!hlist_unhashed(&port->rlist))
917		hlist_del_init_rcu(&port->rlist);
918	del_timer(&port->multicast_router_timer);
919	del_timer(&port->multicast_query_timer);
920	spin_unlock(&br->multicast_lock);
921}
922
923static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
924					 struct net_bridge_port *port,
925					 struct sk_buff *skb)
926{
927	struct igmpv3_report *ih;
928	struct igmpv3_grec *grec;
929	int i;
930	int len;
931	int num;
932	int type;
933	int err = 0;
934	__be32 group;
935
936	if (!pskb_may_pull(skb, sizeof(*ih)))
937		return -EINVAL;
938
939	ih = igmpv3_report_hdr(skb);
940	num = ntohs(ih->ngrec);
941	len = sizeof(*ih);
942
943	for (i = 0; i < num; i++) {
944		len += sizeof(*grec);
945		if (!pskb_may_pull(skb, len))
946			return -EINVAL;
947
948		grec = (void *)(skb->data + len - sizeof(*grec));
949		group = grec->grec_mca;
950		type = grec->grec_type;
951
952		len += ntohs(grec->grec_nsrcs) * 4;
953		if (!pskb_may_pull(skb, len))
954			return -EINVAL;
955
956		/* We treat this as an IGMPv2 report for now. */
957		switch (type) {
958		case IGMPV3_MODE_IS_INCLUDE:
959		case IGMPV3_MODE_IS_EXCLUDE:
960		case IGMPV3_CHANGE_TO_INCLUDE:
961		case IGMPV3_CHANGE_TO_EXCLUDE:
962		case IGMPV3_ALLOW_NEW_SOURCES:
963		case IGMPV3_BLOCK_OLD_SOURCES:
964			break;
965
966		default:
967			continue;
968		}
969
970		err = br_ip4_multicast_add_group(br, port, group);
971		if (err)
972			break;
973	}
974
975	return err;
976}
977
978#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
979static int br_ip6_multicast_mld2_report(struct net_bridge *br,
980					struct net_bridge_port *port,
981					struct sk_buff *skb)
982{
983	struct icmp6hdr *icmp6h;
984	struct mld2_grec *grec;
985	int i;
986	int len;
987	int num;
988	int err = 0;
989
990	if (!pskb_may_pull(skb, sizeof(*icmp6h)))
991		return -EINVAL;
992
993	icmp6h = icmp6_hdr(skb);
994	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
995	len = sizeof(*icmp6h);
996
997	for (i = 0; i < num; i++) {
998		__be16 *nsrcs, _nsrcs;
999
1000		nsrcs = skb_header_pointer(skb,
1001					   len + offsetof(struct mld2_grec,
1002							  grec_mca),
1003					   sizeof(_nsrcs), &_nsrcs);
1004		if (!nsrcs)
1005			return -EINVAL;
1006
1007		if (!pskb_may_pull(skb,
1008				   len + sizeof(*grec) +
1009				   sizeof(struct in6_addr) * (*nsrcs)))
1010			return -EINVAL;
1011
1012		grec = (struct mld2_grec *)(skb->data + len);
1013		len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
1014
1015		/* We treat these as MLDv1 reports for now. */
1016		switch (grec->grec_type) {
1017		case MLD2_MODE_IS_INCLUDE:
1018		case MLD2_MODE_IS_EXCLUDE:
1019		case MLD2_CHANGE_TO_INCLUDE:
1020		case MLD2_CHANGE_TO_EXCLUDE:
1021		case MLD2_ALLOW_NEW_SOURCES:
1022		case MLD2_BLOCK_OLD_SOURCES:
1023			break;
1024
1025		default:
1026			continue;
1027		}
1028
1029		err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
1030		if (!err)
1031			break;
1032	}
1033
1034	return err;
1035}
1036#endif
1037
1038/*
1039 * Add port to rotuer_list
1040 *  list is maintained ordered by pointer value
1041 *  and locked by br->multicast_lock and RCU
1042 */
1043static void br_multicast_add_router(struct net_bridge *br,
1044				    struct net_bridge_port *port)
1045{
1046	struct net_bridge_port *p;
1047	struct hlist_node *n, *slot = NULL;
1048
1049	hlist_for_each_entry(p, n, &br->router_list, rlist) {
1050		if ((unsigned long) port >= (unsigned long) p)
1051			break;
1052		slot = n;
1053	}
1054
1055	if (slot)
1056		hlist_add_after_rcu(slot, &port->rlist);
1057	else
1058		hlist_add_head_rcu(&port->rlist, &br->router_list);
1059}
1060
1061static void br_multicast_mark_router(struct net_bridge *br,
1062				     struct net_bridge_port *port)
1063{
1064	unsigned long now = jiffies;
1065
1066	if (!port) {
1067		if (br->multicast_router == 1)
1068			mod_timer(&br->multicast_router_timer,
1069				  now + br->multicast_querier_interval);
1070		return;
1071	}
1072
1073	if (port->multicast_router != 1)
1074		return;
1075
1076	if (!hlist_unhashed(&port->rlist))
1077		goto timer;
1078
1079	br_multicast_add_router(br, port);
1080
1081timer:
1082	mod_timer(&port->multicast_router_timer,
1083		  now + br->multicast_querier_interval);
1084}
1085
1086static void br_multicast_query_received(struct net_bridge *br,
1087					struct net_bridge_port *port,
1088					int saddr)
1089{
1090	if (saddr)
1091		mod_timer(&br->multicast_querier_timer,
1092			  jiffies + br->multicast_querier_interval);
1093	else if (timer_pending(&br->multicast_querier_timer))
1094		return;
1095
1096	br_multicast_mark_router(br, port);
1097}
1098
1099static int br_ip4_multicast_query(struct net_bridge *br,
1100				  struct net_bridge_port *port,
1101				  struct sk_buff *skb)
1102{
1103	struct iphdr *iph = ip_hdr(skb);
1104	struct igmphdr *ih = igmp_hdr(skb);
1105	struct net_bridge_mdb_entry *mp;
1106	struct igmpv3_query *ih3;
1107	struct net_bridge_port_group *p;
1108	struct net_bridge_port_group **pp;
1109	unsigned long max_delay;
1110	unsigned long now = jiffies;
1111	__be32 group;
1112	int err = 0;
1113
1114	spin_lock(&br->multicast_lock);
1115	if (!netif_running(br->dev) ||
1116	    (port && port->state == BR_STATE_DISABLED))
1117		goto out;
1118
1119	br_multicast_query_received(br, port, !!iph->saddr);
1120
1121	group = ih->group;
1122
1123	if (skb->len == sizeof(*ih)) {
1124		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1125
1126		if (!max_delay) {
1127			max_delay = 10 * HZ;
1128			group = 0;
1129		}
1130	} else {
1131		if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
1132			err = -EINVAL;
1133			goto out;
1134		}
1135
1136		ih3 = igmpv3_query_hdr(skb);
1137		if (ih3->nsrcs)
1138			goto out;
1139
1140		max_delay = ih3->code ?
1141			    IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1142	}
1143
1144	if (!group)
1145		goto out;
1146
1147	mp = br_mdb_ip4_get(br->mdb, group);
1148	if (!mp)
1149		goto out;
1150
1151	max_delay *= br->multicast_last_member_count;
1152
1153	if (mp->mglist &&
1154	    (timer_pending(&mp->timer) ?
1155	     time_after(mp->timer.expires, now + max_delay) :
1156	     try_to_del_timer_sync(&mp->timer) >= 0))
1157		mod_timer(&mp->timer, now + max_delay);
1158
1159	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
1160		if (timer_pending(&p->timer) ?
1161		    time_after(p->timer.expires, now + max_delay) :
1162		    try_to_del_timer_sync(&p->timer) >= 0)
1163			mod_timer(&mp->timer, now + max_delay);
1164	}
1165
1166out:
1167	spin_unlock(&br->multicast_lock);
1168	return err;
1169}
1170
1171#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1172static int br_ip6_multicast_query(struct net_bridge *br,
1173				  struct net_bridge_port *port,
1174				  struct sk_buff *skb)
1175{
1176	struct ipv6hdr *ip6h = ipv6_hdr(skb);
1177	struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1178	struct net_bridge_mdb_entry *mp;
1179	struct mld2_query *mld2q;
1180	struct net_bridge_port_group *p, **pp;
1181	unsigned long max_delay;
1182	unsigned long now = jiffies;
1183	struct in6_addr *group = NULL;
1184	int err = 0;
1185
1186	spin_lock(&br->multicast_lock);
1187	if (!netif_running(br->dev) ||
1188	    (port && port->state == BR_STATE_DISABLED))
1189		goto out;
1190
1191	br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
1192
1193	if (skb->len == sizeof(*mld)) {
1194		if (!pskb_may_pull(skb, sizeof(*mld))) {
1195			err = -EINVAL;
1196			goto out;
1197		}
1198		mld = (struct mld_msg *) icmp6_hdr(skb);
1199		max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
1200		if (max_delay)
1201			group = &mld->mld_mca;
1202	} else if (skb->len >= sizeof(*mld2q)) {
1203		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1204			err = -EINVAL;
1205			goto out;
1206		}
1207		mld2q = (struct mld2_query *)icmp6_hdr(skb);
1208		if (!mld2q->mld2q_nsrcs)
1209			group = &mld2q->mld2q_mca;
1210		max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
1211	}
1212
1213	if (!group)
1214		goto out;
1215
1216	mp = br_mdb_ip6_get(br->mdb, group);
1217	if (!mp)
1218		goto out;
1219
1220	max_delay *= br->multicast_last_member_count;
1221	if (mp->mglist &&
1222	    (timer_pending(&mp->timer) ?
1223	     time_after(mp->timer.expires, now + max_delay) :
1224	     try_to_del_timer_sync(&mp->timer) >= 0))
1225		mod_timer(&mp->timer, now + max_delay);
1226
1227	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
1228		if (timer_pending(&p->timer) ?
1229		    time_after(p->timer.expires, now + max_delay) :
1230		    try_to_del_timer_sync(&p->timer) >= 0)
1231			mod_timer(&mp->timer, now + max_delay);
1232	}
1233
1234out:
1235	spin_unlock(&br->multicast_lock);
1236	return err;
1237}
1238#endif
1239
1240static void br_multicast_leave_group(struct net_bridge *br,
1241				     struct net_bridge_port *port,
1242				     struct br_ip *group)
1243{
1244	struct net_bridge_mdb_htable *mdb;
1245	struct net_bridge_mdb_entry *mp;
1246	struct net_bridge_port_group *p;
1247	unsigned long now;
1248	unsigned long time;
1249
1250	spin_lock(&br->multicast_lock);
1251	if (!netif_running(br->dev) ||
1252	    (port && port->state == BR_STATE_DISABLED) ||
1253	    timer_pending(&br->multicast_querier_timer))
1254		goto out;
1255
1256	mdb = br->mdb;
1257	mp = br_mdb_ip_get(mdb, group);
1258	if (!mp)
1259		goto out;
1260
1261	now = jiffies;
1262	time = now + br->multicast_last_member_count *
1263		     br->multicast_last_member_interval;
1264
1265	if (!port) {
1266		if (mp->mglist &&
1267		    (timer_pending(&mp->timer) ?
1268		     time_after(mp->timer.expires, time) :
1269		     try_to_del_timer_sync(&mp->timer) >= 0)) {
1270			mod_timer(&mp->timer, time);
1271
1272			mp->queries_sent = 0;
1273			mod_timer(&mp->query_timer, now);
1274		}
1275
1276		goto out;
1277	}
1278
1279	for (p = mp->ports; p; p = p->next) {
1280		if (p->port != port)
1281			continue;
1282
1283		if (!hlist_unhashed(&p->mglist) &&
1284		    (timer_pending(&p->timer) ?
1285		     time_after(p->timer.expires, time) :
1286		     try_to_del_timer_sync(&p->timer) >= 0)) {
1287			mod_timer(&p->timer, time);
1288
1289			p->queries_sent = 0;
1290			mod_timer(&p->query_timer, now);
1291		}
1292
1293		break;
1294	}
1295
1296out:
1297	spin_unlock(&br->multicast_lock);
1298}
1299
1300static void br_ip4_multicast_leave_group(struct net_bridge *br,
1301					 struct net_bridge_port *port,
1302					 __be32 group)
1303{
1304	struct br_ip br_group;
1305
1306	if (ipv4_is_local_multicast(group))
1307		return;
1308
1309	br_group.u.ip4 = group;
1310	br_group.proto = htons(ETH_P_IP);
1311
1312	br_multicast_leave_group(br, port, &br_group);
1313}
1314
1315#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1316static void br_ip6_multicast_leave_group(struct net_bridge *br,
1317					 struct net_bridge_port *port,
1318					 const struct in6_addr *group)
1319{
1320	struct br_ip br_group;
1321
1322	if (ipv6_is_local_multicast(group))
1323		return;
1324
1325	ipv6_addr_copy(&br_group.u.ip6, group);
1326	br_group.proto = htons(ETH_P_IPV6);
1327
1328	br_multicast_leave_group(br, port, &br_group);
1329}
1330#endif
1331
1332static int br_multicast_ipv4_rcv(struct net_bridge *br,
1333				 struct net_bridge_port *port,
1334				 struct sk_buff *skb)
1335{
1336	struct sk_buff *skb2 = skb;
1337	struct iphdr *iph;
1338	struct igmphdr *ih;
1339	unsigned len;
1340	unsigned offset;
1341	int err;
1342
1343	/* We treat OOM as packet loss for now. */
1344	if (!pskb_may_pull(skb, sizeof(*iph)))
1345		return -EINVAL;
1346
1347	iph = ip_hdr(skb);
1348
1349	if (iph->ihl < 5 || iph->version != 4)
1350		return -EINVAL;
1351
1352	if (!pskb_may_pull(skb, ip_hdrlen(skb)))
1353		return -EINVAL;
1354
1355	iph = ip_hdr(skb);
1356
1357	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1358		return -EINVAL;
1359
1360	if (iph->protocol != IPPROTO_IGMP)
1361		return 0;
1362
1363	len = ntohs(iph->tot_len);
1364	if (skb->len < len || len < ip_hdrlen(skb))
1365		return -EINVAL;
1366
1367	if (skb->len > len) {
1368		skb2 = skb_clone(skb, GFP_ATOMIC);
1369		if (!skb2)
1370			return -ENOMEM;
1371
1372		err = pskb_trim_rcsum(skb2, len);
1373		if (err)
1374			goto err_out;
1375	}
1376
1377	len -= ip_hdrlen(skb2);
1378	offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
1379	__skb_pull(skb2, offset);
1380	skb_reset_transport_header(skb2);
1381
1382	err = -EINVAL;
1383	if (!pskb_may_pull(skb2, sizeof(*ih)))
1384		goto out;
1385
1386	switch (skb2->ip_summed) {
1387	case CHECKSUM_COMPLETE:
1388		if (!csum_fold(skb2->csum))
1389			break;
1390		/* fall through */
1391	case CHECKSUM_NONE:
1392		skb2->csum = 0;
1393		if (skb_checksum_complete(skb2))
1394			goto out;
1395	}
1396
1397	err = 0;
1398
1399	BR_INPUT_SKB_CB(skb)->igmp = 1;
1400	ih = igmp_hdr(skb2);
1401
1402	switch (ih->type) {
1403	case IGMP_HOST_MEMBERSHIP_REPORT:
1404	case IGMPV2_HOST_MEMBERSHIP_REPORT:
1405		BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1406		err = br_ip4_multicast_add_group(br, port, ih->group);
1407		break;
1408	case IGMPV3_HOST_MEMBERSHIP_REPORT:
1409		err = br_ip4_multicast_igmp3_report(br, port, skb2);
1410		break;
1411	case IGMP_HOST_MEMBERSHIP_QUERY:
1412		err = br_ip4_multicast_query(br, port, skb2);
1413		break;
1414	case IGMP_HOST_LEAVE_MESSAGE:
1415		br_ip4_multicast_leave_group(br, port, ih->group);
1416		break;
1417	}
1418
1419out:
1420	__skb_push(skb2, offset);
1421err_out:
1422	if (skb2 != skb)
1423		kfree_skb(skb2);
1424	return err;
1425}
1426
1427#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1428static int br_multicast_ipv6_rcv(struct net_bridge *br,
1429				 struct net_bridge_port *port,
1430				 struct sk_buff *skb)
1431{
1432	struct sk_buff *skb2 = skb;
1433	struct ipv6hdr *ip6h;
1434	struct icmp6hdr *icmp6h;
1435	u8 nexthdr;
1436	unsigned len;
1437	int offset;
1438	int err;
1439
1440	if (!pskb_may_pull(skb, sizeof(*ip6h)))
1441		return -EINVAL;
1442
1443	ip6h = ipv6_hdr(skb);
1444
1445	/*
1446	 * We're interested in MLD messages only.
1447	 *  - Version is 6
1448	 *  - MLD has always Router Alert hop-by-hop option
1449	 *  - But we do not support jumbrograms.
1450	 */
1451	if (ip6h->version != 6 ||
1452	    ip6h->nexthdr != IPPROTO_HOPOPTS ||
1453	    ip6h->payload_len == 0)
1454		return 0;
1455
1456	len = ntohs(ip6h->payload_len);
1457	if (skb->len < len)
1458		return -EINVAL;
1459
1460	nexthdr = ip6h->nexthdr;
1461	offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
1462
1463	if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1464		return 0;
1465
1466	/* Okay, we found ICMPv6 header */
1467	skb2 = skb_clone(skb, GFP_ATOMIC);
1468	if (!skb2)
1469		return -ENOMEM;
1470
1471	len -= offset - skb_network_offset(skb2);
1472
1473	__skb_pull(skb2, offset);
1474	skb_reset_transport_header(skb2);
1475
1476	err = -EINVAL;
1477	if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
1478		goto out;
1479
1480	icmp6h = icmp6_hdr(skb2);
1481
1482	switch (icmp6h->icmp6_type) {
1483	case ICMPV6_MGM_QUERY:
1484	case ICMPV6_MGM_REPORT:
1485	case ICMPV6_MGM_REDUCTION:
1486	case ICMPV6_MLD2_REPORT:
1487		break;
1488	default:
1489		err = 0;
1490		goto out;
1491	}
1492
1493	/* Okay, we found MLD message. Check further. */
1494	if (skb2->len > len) {
1495		err = pskb_trim_rcsum(skb2, len);
1496		if (err)
1497			goto out;
1498	}
1499
1500	switch (skb2->ip_summed) {
1501	case CHECKSUM_COMPLETE:
1502		if (!csum_fold(skb2->csum))
1503			break;
1504		/*FALLTHROUGH*/
1505	case CHECKSUM_NONE:
1506		skb2->csum = 0;
1507		if (skb_checksum_complete(skb2))
1508			goto out;
1509	}
1510
1511	err = 0;
1512
1513	BR_INPUT_SKB_CB(skb)->igmp = 1;
1514
1515	switch (icmp6h->icmp6_type) {
1516	case ICMPV6_MGM_REPORT:
1517	    {
1518		struct mld_msg *mld = (struct mld_msg *)icmp6h;
1519		BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1520		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1521		break;
1522	    }
1523	case ICMPV6_MLD2_REPORT:
1524		err = br_ip6_multicast_mld2_report(br, port, skb2);
1525		break;
1526	case ICMPV6_MGM_QUERY:
1527		err = br_ip6_multicast_query(br, port, skb2);
1528		break;
1529	case ICMPV6_MGM_REDUCTION:
1530	    {
1531		struct mld_msg *mld = (struct mld_msg *)icmp6h;
1532		br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
1533	    }
1534	}
1535
1536out:
1537	__skb_push(skb2, offset);
1538	if (skb2 != skb)
1539		kfree_skb(skb2);
1540	return err;
1541}
1542#endif
1543
1544int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1545		     struct sk_buff *skb)
1546{
1547	BR_INPUT_SKB_CB(skb)->igmp = 0;
1548	BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1549
1550	if (br->multicast_disabled)
1551		return 0;
1552
1553	switch (skb->protocol) {
1554	case htons(ETH_P_IP):
1555		return br_multicast_ipv4_rcv(br, port, skb);
1556#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1557	case htons(ETH_P_IPV6):
1558		return br_multicast_ipv6_rcv(br, port, skb);
1559#endif
1560	}
1561
1562	return 0;
1563}
1564
1565static void br_multicast_query_expired(unsigned long data)
1566{
1567	struct net_bridge *br = (void *)data;
1568
1569	spin_lock(&br->multicast_lock);
1570	if (br->multicast_startup_queries_sent <
1571	    br->multicast_startup_query_count)
1572		br->multicast_startup_queries_sent++;
1573
1574	br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1575
1576	spin_unlock(&br->multicast_lock);
1577}
1578
1579void br_multicast_init(struct net_bridge *br)
1580{
1581	br->hash_elasticity = 4;
1582	br->hash_max = 512;
1583
1584	br->multicast_router = 1;
1585	br->multicast_last_member_count = 2;
1586	br->multicast_startup_query_count = 2;
1587
1588	br->multicast_last_member_interval = HZ;
1589	br->multicast_query_response_interval = 10 * HZ;
1590	br->multicast_startup_query_interval = 125 * HZ / 4;
1591	br->multicast_query_interval = 125 * HZ;
1592	br->multicast_querier_interval = 255 * HZ;
1593	br->multicast_membership_interval = 260 * HZ;
1594
1595	spin_lock_init(&br->multicast_lock);
1596	setup_timer(&br->multicast_router_timer,
1597		    br_multicast_local_router_expired, 0);
1598	setup_timer(&br->multicast_querier_timer,
1599		    br_multicast_local_router_expired, 0);
1600	setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1601		    (unsigned long)br);
1602}
1603
1604void br_multicast_open(struct net_bridge *br)
1605{
1606	br->multicast_startup_queries_sent = 0;
1607
1608	if (br->multicast_disabled)
1609		return;
1610
1611	mod_timer(&br->multicast_query_timer, jiffies);
1612}
1613
1614void br_multicast_stop(struct net_bridge *br)
1615{
1616	struct net_bridge_mdb_htable *mdb;
1617	struct net_bridge_mdb_entry *mp;
1618	struct hlist_node *p, *n;
1619	u32 ver;
1620	int i;
1621
1622	del_timer_sync(&br->multicast_router_timer);
1623	del_timer_sync(&br->multicast_querier_timer);
1624	del_timer_sync(&br->multicast_query_timer);
1625
1626	spin_lock_bh(&br->multicast_lock);
1627	mdb = br->mdb;
1628	if (!mdb)
1629		goto out;
1630
1631	br->mdb = NULL;
1632
1633	ver = mdb->ver;
1634	for (i = 0; i < mdb->max; i++) {
1635		hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1636					  hlist[ver]) {
1637			del_timer(&mp->timer);
1638			del_timer(&mp->query_timer);
1639			call_rcu_bh(&mp->rcu, br_multicast_free_group);
1640		}
1641	}
1642
1643	if (mdb->old) {
1644		spin_unlock_bh(&br->multicast_lock);
1645		rcu_barrier_bh();
1646		spin_lock_bh(&br->multicast_lock);
1647		WARN_ON(mdb->old);
1648	}
1649
1650	mdb->old = mdb;
1651	call_rcu_bh(&mdb->rcu, br_mdb_free);
1652
1653out:
1654	spin_unlock_bh(&br->multicast_lock);
1655}
1656
1657int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1658{
1659	int err = -ENOENT;
1660
1661	spin_lock_bh(&br->multicast_lock);
1662	if (!netif_running(br->dev))
1663		goto unlock;
1664
1665	switch (val) {
1666	case 0:
1667	case 2:
1668		del_timer(&br->multicast_router_timer);
1669		/* fall through */
1670	case 1:
1671		br->multicast_router = val;
1672		err = 0;
1673		break;
1674
1675	default:
1676		err = -EINVAL;
1677		break;
1678	}
1679
1680unlock:
1681	spin_unlock_bh(&br->multicast_lock);
1682
1683	return err;
1684}
1685
1686int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1687{
1688	struct net_bridge *br = p->br;
1689	int err = -ENOENT;
1690
1691	spin_lock(&br->multicast_lock);
1692	if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1693		goto unlock;
1694
1695	switch (val) {
1696	case 0:
1697	case 1:
1698	case 2:
1699		p->multicast_router = val;
1700		err = 0;
1701
1702		if (val < 2 && !hlist_unhashed(&p->rlist))
1703			hlist_del_init_rcu(&p->rlist);
1704
1705		if (val == 1)
1706			break;
1707
1708		del_timer(&p->multicast_router_timer);
1709
1710		if (val == 0)
1711			break;
1712
1713		br_multicast_add_router(br, p);
1714		break;
1715
1716	default:
1717		err = -EINVAL;
1718		break;
1719	}
1720
1721unlock:
1722	spin_unlock(&br->multicast_lock);
1723
1724	return err;
1725}
1726
1727int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1728{
1729	struct net_bridge_port *port;
1730	int err = 0;
1731
1732	spin_lock(&br->multicast_lock);
1733	if (br->multicast_disabled == !val)
1734		goto unlock;
1735
1736	br->multicast_disabled = !val;
1737	if (br->multicast_disabled)
1738		goto unlock;
1739
1740	if (!netif_running(br->dev))
1741		goto unlock;
1742
1743	if (br->mdb) {
1744		if (br->mdb->old) {
1745			err = -EEXIST;
1746rollback:
1747			br->multicast_disabled = !!val;
1748			goto unlock;
1749		}
1750
1751		err = br_mdb_rehash(&br->mdb, br->mdb->max,
1752				    br->hash_elasticity);
1753		if (err)
1754			goto rollback;
1755	}
1756
1757	br_multicast_open(br);
1758	list_for_each_entry(port, &br->port_list, list) {
1759		if (port->state == BR_STATE_DISABLED ||
1760		    port->state == BR_STATE_BLOCKING)
1761			continue;
1762
1763		__br_multicast_enable_port(port);
1764	}
1765
1766unlock:
1767	spin_unlock(&br->multicast_lock);
1768
1769	return err;
1770}
1771
1772int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1773{
1774	int err = -ENOENT;
1775	u32 old;
1776
1777	spin_lock(&br->multicast_lock);
1778	if (!netif_running(br->dev))
1779		goto unlock;
1780
1781	err = -EINVAL;
1782	if (!is_power_of_2(val))
1783		goto unlock;
1784	if (br->mdb && val < br->mdb->size)
1785		goto unlock;
1786
1787	err = 0;
1788
1789	old = br->hash_max;
1790	br->hash_max = val;
1791
1792	if (br->mdb) {
1793		if (br->mdb->old) {
1794			err = -EEXIST;
1795rollback:
1796			br->hash_max = old;
1797			goto unlock;
1798		}
1799
1800		err = br_mdb_rehash(&br->mdb, br->hash_max,
1801				    br->hash_elasticity);
1802		if (err)
1803			goto rollback;
1804	}
1805
1806unlock:
1807	spin_unlock(&br->multicast_lock);
1808
1809	return err;
1810}
1811