1/*
2 * Copyright (c) 2012, 2015 The Linux Foundation. All rights reserved.
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
13 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16#include <linux/kernel.h>
17#include <net/net_namespace.h>
18#include <net/sock.h>
19#include <linux/etherdevice.h>
20
21#include "mc_osdep.h"
22#include "mc_api.h"
23#include "mc_snooping.h"
24#include "mc_netlink.h"
25
26static struct sock *mc_nl_sk = NULL;
27
28static void mc_acltbl_update(struct mc_struct *mc, void *param)
29{
30	int i;
31	struct mc_param_pattern pattern;
32	struct __mc_param_acl_rule *ar = (struct __mc_param_acl_rule *)param;
33
34	memset(&pattern, 0, sizeof pattern);
35	if (ar->pattern_type == MC_ACL_PATTERN_IGMP) {
36		pattern.rule = ar->pattern.rule;
37		memcpy(pattern.mac, ar->pattern.mac, ETH_ALEN);
38		memcpy(pattern.mac_mask, ar->pattern.mac_mask, ETH_ALEN);
39		/* Input value in network byte order which be converted from the string by inet_pton() */
40		pattern.ip.ip4 = *((__be32 *)ar->pattern.ip);
41		pattern.ip_mask.ip4_mask = *((__be32 *)ar->pattern.ip_mask);
42
43		if (pattern.ip.ip4) {
44			for (i = 0; i < mc->igmp_acl.pattern_count; i++) {
45				if (mc->igmp_acl.patterns[i].ip.ip4 == pattern.ip.ip4)
46					break;
47			}
48		} else if (!is_zero_ether_addr(pattern.mac)) {
49			for (i = 0; i < mc->igmp_acl.pattern_count; i++) {
50				if (!memcmp(mc->igmp_acl.patterns[i].mac, pattern.mac,
51						sizeof pattern - sizeof pattern.rule))
52					break;
53			}
54		} else {
55			goto out;
56		}
57
58		if (ar->pattern.rule == MC_ACL_RULE_DISABLE) {
59			if (i != mc->igmp_acl.pattern_count) {
60				if (i < mc->igmp_acl.pattern_count - 1)
61					memcpy(&mc->igmp_acl.patterns[i], &mc->igmp_acl.patterns[i+1],
62						(sizeof pattern) * (mc->igmp_acl.pattern_count - 1 - i));
63				mc->igmp_acl.pattern_count--;
64			}
65			MC_PRINT(KERN_INFO "%s: Del IGMP acl rule, count=%d\n", __func__, mc->igmp_acl.pattern_count);
66		} else {
67			if (i != mc->igmp_acl.pattern_count) {
68				memcpy(&mc->igmp_acl.patterns[i], &pattern, sizeof pattern);
69				MC_PRINT(KERN_INFO "%s: Update acl rule\n", __func__);
70				goto out;
71			}
72
73			if (mc->igmp_acl.pattern_count == MC_ACL_RULE_MAX_COUNT) {
74				MC_PRINT(KERN_INFO "%s: Add IGMP acl rule failed, table is full\n", __func__);
75				goto out;
76			}
77
78			memcpy(&mc->igmp_acl.patterns[mc->igmp_acl.pattern_count], &pattern, sizeof pattern);
79			mc->igmp_acl.pattern_count++;
80			MC_PRINT(KERN_INFO "%s: Add IGMP acl rule, count=%d\n", __func__, mc->igmp_acl.pattern_count);
81		}
82	}
83#ifdef MC_SUPPORT_MLD
84	else { /* MLD */
85		pattern.rule = ar->pattern.rule;
86		memcpy(pattern.mac, ar->pattern.mac, ETH_ALEN);
87		memcpy(pattern.mac_mask, ar->pattern.mac_mask, ETH_ALEN);
88		/* Input value in network byte order which be converted from the string by inet_pton() */
89		ipv6_addr_set(&pattern.ip.ip6,
90				((struct in6_addr *)ar->pattern.ip)->s6_addr32[0],
91				((struct in6_addr *)ar->pattern.ip)->s6_addr32[1],
92				((struct in6_addr *)ar->pattern.ip)->s6_addr32[2],
93				((struct in6_addr *)ar->pattern.ip)->s6_addr32[3]);
94		ipv6_addr_set(&pattern.ip_mask.ip6_mask,
95				((struct in6_addr *)ar->pattern.ip_mask)->s6_addr32[0],
96				((struct in6_addr *)ar->pattern.ip_mask)->s6_addr32[1],
97				((struct in6_addr *)ar->pattern.ip_mask)->s6_addr32[2],
98				((struct in6_addr *)ar->pattern.ip_mask)->s6_addr32[3]);
99
100		if (!ipv6_addr_any(&pattern.ip.ip6)) {
101			for (i = 0; i < mc->mld_acl.pattern_count; i++) {
102				if (!ipv6_addr_cmp(&mc->mld_acl.patterns[i].ip.ip6, &pattern.ip.ip6))
103					break;
104			}
105		} else if (!is_zero_ether_addr(pattern.mac)) {
106			for (i = 0; i < mc->mld_acl.pattern_count; i++) {
107				u_int32_t pattern_size = sizeof pattern - sizeof pattern.rule;
108
109				if (pattern_size > ETH_ALEN)
110					pattern_size = ETH_ALEN;
111				if (!memcmp(mc->mld_acl.patterns[i].mac, pattern.mac, pattern_size))
112					break;
113			}
114		} else {
115			goto out;
116		}
117
118		if (ar->pattern.rule == MC_ACL_RULE_DISABLE) {
119			if (i != mc->mld_acl.pattern_count) {
120				if (i < mc->mld_acl.pattern_count - 1)
121					memcpy(&mc->mld_acl.patterns[i], &mc->mld_acl.patterns[i+1],
122						(sizeof pattern) * (mc->mld_acl.pattern_count - 1 - i));
123				mc->mld_acl.pattern_count--;
124			}
125			MC_PRINT(KERN_INFO "%s: Del MLD acl rule, count=%d\n", __func__, mc->mld_acl.pattern_count);
126		} else {
127			if (i != mc->mld_acl.pattern_count) {
128				memcpy(&mc->mld_acl.patterns[mc->mld_acl.pattern_count], &pattern, sizeof pattern);
129				MC_PRINT(KERN_INFO "%s: Update acl rule\n", __func__);
130				goto out;
131			}
132
133			if (mc->mld_acl.pattern_count == MC_ACL_RULE_MAX_COUNT) {
134				MC_PRINT(KERN_INFO "%s: Add MLD acl rule failed, table is full\n", __func__);
135				goto out;
136			}
137
138			memcpy(&mc->mld_acl.patterns[mc->mld_acl.pattern_count], &pattern, sizeof pattern);
139			mc->mld_acl.pattern_count++;
140			MC_PRINT(KERN_INFO "%s: Add MLD acl rule, count=%d\n", __func__, mc->mld_acl.pattern_count);
141		}
142	}
143#endif
144out:
145	return;
146}
147
148static void mc_acltbl_flush(struct mc_struct *mc, void *param)
149{
150	struct __mc_param_acl_rule *ar = (struct __mc_param_acl_rule *)param;
151
152	if (ar->pattern_type == MC_ACL_PATTERN_IGMP) {
153		memset(&mc->igmp_acl, 0, sizeof(mc->igmp_acl));
154		MC_PRINT(KERN_INFO "%s: Flush IGMP acl rule table.\n", __func__);
155	}
156#ifdef MC_SUPPORT_MLD
157	else { /* MLD */
158		memset(&mc->mld_acl, 0, sizeof(mc->mld_acl));
159		MC_PRINT(KERN_INFO "%s: Flush MLD acl rule table.\n", __func__);
160	}
161#endif
162}
163
164static int mc_acltbl_fillbuf(struct mc_struct *mc, void *buf,
165			     __be32 buflen, __be32 *bytes_written, __be32 *bytes_needed)
166{
167	struct __mc_param_acl_rule *entry = buf;
168	int i, total = 0, num = 0, num_entrys, ret = 0;
169	struct mc_param_pattern *p = &mc->igmp_acl.patterns[0];
170
171	num_entrys = buflen / sizeof(*entry);
172
173	for (i = 0; i < mc->igmp_acl.pattern_count; i++) {
174		total++;
175		if (num >= num_entrys) {
176			ret = -EAGAIN;
177			continue;
178		}
179
180		entry->pattern_type = MC_ACL_PATTERN_IGMP;
181		entry->pattern.rule = p[i].rule;
182		memcpy(entry->pattern.mac, p[i].mac, ETH_ALEN);
183		memcpy(entry->pattern.mac_mask, p[i].mac_mask, ETH_ALEN);
184		memcpy(entry->pattern.ip, &p[i].ip.ip4, sizeof(__be32));
185		memcpy(entry->pattern.ip_mask, &p[i].ip_mask.ip4_mask, sizeof(entry->pattern.ip_mask));
186
187		entry++;
188		num++;
189	}
190	if (ret < 0)
191		goto out;
192
193#ifdef MC_SUPPORT_MLD
194	p = &mc->mld_acl.patterns[0];
195	for (i = 0; i < mc->mld_acl.pattern_count; i++) {
196		total++;
197		if (num >= num_entrys) {
198			ret = -EAGAIN;
199			continue;
200		}
201
202		entry->pattern_type = MC_ACL_PATTERN_MLD;
203		entry->pattern.rule = p[i].rule;
204		memcpy(entry->pattern.mac, p[i].mac, ETH_ALEN);
205		memcpy(entry->pattern.mac_mask, p[i].mac_mask, ETH_ALEN);
206		memcpy(entry->pattern.ip, &p[i].ip.ip6, sizeof(entry->pattern.ip));
207		memcpy(entry->pattern.ip_mask, &p[i].ip_mask.ip6_mask, sizeof(entry->pattern.ip_mask));
208
209		entry++;
210		num++;
211	}
212#endif
213out:
214	if (bytes_written)
215		*bytes_written = num * sizeof(*entry);
216
217	if (bytes_needed) {
218		if (ret == -EAGAIN)
219			*bytes_needed = total * sizeof(*entry);
220		else
221			*bytes_needed = 0;
222	}
223	return ret;
224}
225
226/* call with rcu_read_lock() */
227static int mc_mdbtbl_fillbuf(struct mc_struct *mc, void *buf,
228			     __be32 buflen, __be32 *bytes_written, __be32 *bytes_needed)
229{
230	__be32 now = jiffies;
231	struct __mc_mdb_entry *entry = buf;
232	int i, total = 0, num = 0, num_entrys, ret = 0;
233
234	num_entrys = buflen / sizeof(*entry);
235
236	for (i = 0; i < MC_HASH_SIZE; i++) {
237		struct mc_mdb_entry *mdb;
238		struct hlist_node *mdbh;
239
240		os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
241			struct mc_port_group *pg;
242			struct hlist_node *pgh;
243
244			if (!atomic_read(&mdb->users) || hlist_empty(&mdb->pslist))
245				continue;
246
247			os_hlist_for_each_entry_rcu(pg, pgh, &mdb->pslist, pslist) {
248				struct mc_fdb_group *fg;
249				struct hlist_node *fgh;
250
251				if (hlist_empty(&pg->fslist))
252					continue;
253
254				os_hlist_for_each_entry_rcu(fg, fgh, &pg->fslist, fslist) {
255					total++;
256					if (num >= num_entrys) {
257						ret = -EAGAIN;
258						continue;
259					}
260
261					if (mdb->group.pro == htons(ETH_P_IP)) {
262						entry->nsrcs = fg->a.nsrcs;
263						entry->group.pro = mdb->group.pro;
264						entry->group.u.ip4 = mdb->group.u.ip4;
265						memcpy(entry->srcs, fg->a.srcs, fg->a.nsrcs * sizeof(int));
266					}
267#ifdef MC_SUPPORT_MLD
268					else {
269						entry->nsrcs = fg->a.nsrcs;
270						entry->group.pro = mdb->group.pro;
271						memcpy(entry->group.u.ip6, mdb->group.u.ip6.s6_addr, sizeof(struct in6_addr));
272						memcpy(entry->srcs, fg->a.srcs, fg->a.nsrcs * sizeof(struct in6_addr));
273					}
274#endif
275					entry->ifindex = ((struct net_bridge_port *)pg->port)->dev->ifindex;
276					entry->filter_mode = fg->filter_mode;
277					entry->aging = jiffies_to_msecs(now - fg->ageing_timer) / 1000;
278					entry->fdb_age_out = fg->fdb_age_out;
279					memcpy(entry->mac, mc_fdb_mac_get(fg), ETH_ALEN);
280
281					entry++;
282					num++;
283				}
284			}
285		}
286	}
287
288	if (bytes_written)
289		*bytes_written = num * sizeof(*entry);
290
291	if (bytes_needed) {
292		if (ret == -EAGAIN)
293			*bytes_needed = total * sizeof(*entry);
294		else
295			*bytes_needed = 0;
296	}
297	return ret;
298}
299
300static void *mc_find_entry_by_mdb(struct mc_struct *mc, struct mc_mdb_entry *mdb,
301				  __be32 entry_size, void *param, __be32 param_len)
302{
303	struct mc_ip group;
304	int i, entry_cnt = param_len / entry_size;
305	__u8 *entry = param;
306
307	for (i = 0; i < entry_cnt; i++, entry += entry_size) {
308		struct __mc_group *entry_group = (struct __mc_group *)entry;
309
310		memset(&group, 0, sizeof group);
311		group.pro = entry_group->pro;
312		if (group.pro == htons(ETH_P_IP))
313			group.u.ip4 = entry_group->u.ip4;
314#ifdef MC_SUPPORT_MLD
315		else
316			memcpy(group.u.ip6.s6_addr, entry_group->u.ip6, sizeof(struct in6_addr));
317#endif
318
319		if (!memcmp(&group, &mdb->group, sizeof(struct mc_ip)))
320			return entry;
321	}
322	return NULL;
323}
324
325/*call with rcu_read_lock()*/
326static void mc_group_list_add(struct mc_ip *pgroup, struct mc_glist_entry **ghead)
327{
328	struct mc_glist_entry *pge;
329
330	pge = kmalloc(sizeof(struct mc_glist_entry), GFP_ATOMIC);
331	if (!pge) {
332		printk("MC out-of-memory\n");
333		return;
334	}
335
336	memcpy(&pge->group, pgroup, sizeof(struct mc_ip));
337	pge->next = *ghead;
338	*ghead = pge;
339}
340
341void mc_group_notify_one(struct mc_struct *mc, struct mc_ip *pgroup)
342{
343	struct net_device *brdev;
344
345	brdev = mc->dev;
346	if (!brdev)
347		return;
348
349	if (pgroup->pro == htons(ETH_P_IP)) {
350		mc_bridge_ipv4_update_callback_t ipv4_mc_event_cb;
351		ipv4_mc_event_cb = mc_bridge_ipv4_update_callback_get();
352		if (!ipv4_mc_event_cb)
353			return;
354
355		MC_PRINT("Group "MC_IP4_STR"  changed\n", MC_IP4_FMT((u8 *)&pgroup->u.ip4));
356		ipv4_mc_event_cb(brdev, pgroup->u.ip4);
357	}
358#ifdef MC_SUPPORT_MLD
359	else {
360		mc_bridge_ipv6_update_callback_t ipv6_mc_event_cb;
361		ipv6_mc_event_cb = mc_bridge_ipv6_update_callback_get();
362		if (!ipv6_mc_event_cb)
363			return;
364
365		MC_PRINT("Group "MC_IP6_STR"  changed\n", MC_IP6_FMT((__be16 *)&pgroup->u.ip6));
366		ipv6_mc_event_cb(brdev, &pgroup->u.ip6);
367	}
368#endif
369}
370
371
372/*call with lock-free*/
373static void mc_group_notify(struct mc_struct *mc, struct mc_glist_entry **ghead)
374{
375	struct mc_glist_entry *pge;
376	struct mc_glist_entry *prev;
377
378	pge = *ghead;
379	*ghead = NULL;
380
381	while (pge) {
382		mc_group_notify_one(mc, &pge->group);
383		prev = pge;
384		pge = pge->next;
385		kfree(prev);
386	}
387
388}
389
390static void mc_set_psw_encap(struct mc_struct *mc, void *param, __be32 param_len)
391{
392	int i, entry_cnt = param_len / sizeof(struct __mc_encaptbl_entry);
393	struct __mc_encaptbl_entry *entry = param;
394
395	MC_PRINT("%s: %s encap table\n", __func__, entry_cnt ? "Update" : "Clear");
396	for (i = 0; i < MC_HASH_SIZE; i++) {
397		struct mc_mdb_entry *mdb;
398		struct hlist_node *mdbh;
399		os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
400			write_lock_bh(&mdb->rwlock);
401			if (entry_cnt && ((entry = mc_find_entry_by_mdb(mc, mdb,
402								sizeof(struct __mc_encaptbl_entry), param, param_len)) != NULL)) {
403				mdb->encap_dev_cnt = entry->dev_cnt > MC_ENCAP_DEV_MAX ? MC_ENCAP_DEV_MAX : entry->dev_cnt;
404				memcpy(mdb->encap_dev, entry->dev,	entry->dev_cnt * sizeof(struct __mc_encaptbl_dev));
405			} else {
406				mdb->encap_dev_cnt = 0;
407				memset(mdb->encap_dev, 0,  sizeof(mdb->encap_dev));
408			}
409			write_unlock_bh(&mdb->rwlock);
410		}
411	}
412}
413
414static void mc_set_psw_flood(struct mc_struct *mc, void *param, __be32 param_len, struct mc_glist_entry **ghead)
415{
416	int i, entry_cnt = param_len / sizeof(struct __mc_floodtbl_entry);
417	struct __mc_floodtbl_entry *entry = param;
418	int    flood_ifcnt;
419	int    entry_changed;
420
421	MC_PRINT("%s: Update flood table\n", __func__);
422	for (i = 0; i < MC_HASH_SIZE; i++) {
423		struct mc_mdb_entry *mdb;
424		struct hlist_node *mdbh;
425		os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
426			entry_changed = 0;
427
428			write_lock_bh(&mdb->rwlock);
429			if (entry_cnt && ((entry = mc_find_entry_by_mdb(mc, mdb,
430								sizeof(struct __mc_floodtbl_entry), param, param_len)) != NULL)) {
431				flood_ifcnt = mdb->flood_ifcnt;
432				mdb->flood_ifcnt = entry->ifcnt > MC_FLOOD_IF_MAX ? MC_FLOOD_IF_MAX : entry->ifcnt;
433				if (flood_ifcnt != mdb->flood_ifcnt ||
434					memcmp(mdb->flood_ifindex, entry->ifindex, entry->ifcnt * sizeof(__be32)))
435					entry_changed = 1;
436				memcpy(mdb->flood_ifindex, entry->ifindex, entry->ifcnt * sizeof(__be32));
437			} else {
438				if (mdb->flood_ifcnt != 0)
439					entry_changed = 1;
440				mdb->flood_ifcnt = 0;
441				memset(mdb->flood_ifindex, 0, sizeof(mdb->flood_ifindex));
442			}
443			write_unlock_bh(&mdb->rwlock);
444
445			if (entry_changed) {
446				mc_group_list_add(&mdb->group, ghead);
447			}
448		}
449	}
450}
451
452static void mc_netlink_receive(struct sk_buff *__skb)
453{
454	struct net_device *brdev = NULL;
455	struct sk_buff *skb;
456	struct nlmsghdr *nlh = NULL;
457	void *msgdata = NULL;
458	u32 pid, seq, msgtype;
459	struct __mcctl_msg_header *msghdr;
460	struct mc_struct *mc;
461
462#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
463	if ((skb = skb_clone(__skb, GFP_KERNEL)) == NULL)
464#else
465	if ((skb = skb_get(__skb)) == NULL)
466#endif
467		return;
468
469	/* process netlink message pointed by skb->data */
470	nlh = nlmsg_hdr(skb);
471	pid = nlh->nlmsg_pid;
472	seq = nlh->nlmsg_seq;
473	msghdr = NLMSG_DATA(nlh);
474	msghdr->status = MC_STATUS_SUCCESS;
475	msgdata = MC_MSG_DATA(nlh);
476	msgtype = nlh->nlmsg_type;
477
478	do {
479		brdev = dev_get_by_name(&init_net, msghdr->if_name);
480
481		if (!brdev) {
482			printk("Not a bridge device, or device not found: %s\n", msghdr->if_name);
483			msghdr->status = MC_STATUS_NOT_FOUND;
484			break;
485		}
486
487		if ((mc = MC_DEV(brdev)) == NULL && msgtype != MC_MSG_SET_ENABLE) {
488			printk("%s: mc module is not registered!\n", __func__);
489			msghdr->status = MC_STATUS_FAILURE;
490			dev_put(brdev);
491			break;
492		}
493
494		switch (msgtype) {
495		case MC_MSG_SET_ENABLE:
496			{
497				struct __mc_param_value *e = (struct __mc_param_value *)msgdata;
498				if (e->val) {
499					if (mc_attach(brdev) == 0)
500						printk("%s: Enable bridge snooping!\n", __func__);
501					else
502						printk("%s: Failed to enable bridge snooping!\n", __func__);
503				} else {
504					mc_detach(brdev);
505					printk("%s: Disable bridge snooping!\n", __func__);
506				}
507			}
508			break;
509		case MC_MSG_SET_EVENT_PID:
510			{
511				struct __event_info *p = msgdata;
512				mc->event_pid = p->event_pid;
513				MC_PRINT(KERN_INFO "%s: Set event process id %d\n", __func__, p->event_pid);
514			}
515			break;
516		case MC_MSG_SET_DEBUG:
517			{
518				struct __mc_param_value *e = (struct __mc_param_value *)msgdata;
519				mc->debug = e->val;
520				MC_PRINT(KERN_INFO "%s: %s MC debug\n", __func__, e->val ? "Enable" : "Disable");
521			}
522			break;
523		case MC_MSG_SET_POLICY:
524			{
525				struct __mc_param_value *p = (struct __mc_param_value *)msgdata;
526				mc->forward_policy = p->val;
527				MC_PRINT(KERN_INFO "%s: Set the forward policy %s\n", __func__,
528						p->val == MC_POLICY_FLOOD ? "FLOOD" : "DROP");
529			}
530			break;
531		case MC_MSG_SET_MEMBERSHIP_INTERVAL:
532			{
533				struct __mc_param_value *mi = (struct __mc_param_value *)msgdata;
534				mc->membership_interval = mi->val * HZ;
535				MC_PRINT(KERN_INFO "%s: Set membership interval to %ds\n", __func__, mi->val);
536			}
537			break;
538		case MC_MSG_SET_RETAG:
539			{
540				struct __mc_param_retag *t = (struct __mc_param_retag *)msgdata;
541				mc->enable_retag = t->enable;
542				mc->dscp = t->dscp & 0xff;
543				MC_PRINT(KERN_INFO "%s: %s retag, DSCP=%02x\n", __func__, t->enable ? "Enable" : "Disable", mc->dscp);
544			}
545			break;
546		case MC_MSG_SET_ROUTER_PORT:
547			{
548				struct __mc_param_router_port *rp = (struct __mc_param_router_port *)msgdata;
549				if (rp->type >= MC_RTPORT_MAX) {
550					MC_PRINT(KERN_ERR "%s: Invalid router port type %d!\n", __func__, rp->type);
551				} else {
552					mc->rp.type = rp->type;
553					mc->rp.ifindex = rp->ifindex;
554					MC_PRINT(KERN_INFO "%s: Set router port type=%d, ifindex=%d\n", __func__, rp->type, rp->ifindex);
555				}
556			}
557			break;
558		case MC_MSG_SET_ADD_ACL_RULE:
559			{
560				spin_lock(&mc->lock);
561				mc_acltbl_update(mc, msgdata);
562				spin_unlock(&mc->lock);
563			}
564			break;
565		case MC_MSG_SET_FLUSH_ACL_RULE:
566			{
567				spin_lock(&mc->lock);
568				mc_acltbl_flush(mc, msgdata);
569				spin_unlock(&mc->lock);
570			}
571			break;
572		case MC_MSG_SET_CONVERT_ALL:
573			{
574				struct __mc_param_value *e = (struct __mc_param_value *)msgdata;
575				mc->convert_all = e->val;
576				MC_PRINT(KERN_INFO "%s: %s MC convert all.\n", __func__, e->val ? "Enable" : "Disable");
577			}
578			break;
579		case MC_MSG_SET_TIMEOUT:
580			{
581				struct __mc_param_timeout *t = (struct __mc_param_timeout *)msgdata;
582				switch (t->from) {
583				case MC_TIMEOUT_FROM_GROUP_SPECIFIC_QUERIES:
584					mc->timeout_gsq_enable = t->enable;
585					MC_PRINT("%s: %s timeout from %s.\n", __func__,
586						t->enable ? "Enable" : "Disable", "group specific queries");
587					break;
588				case MC_TIMEOUT_FROM_ALL_SYSTEM_QUERIES:
589					mc->timeout_asq_enable = t->enable;
590					MC_PRINT("%s: %s timeout from %s.\n", __func__,
591							t->enable ? "Enable" : "Disable", "all system queries");
592					break;
593				case MC_TIMEOUT_FROM_GROUP_MEMBERSHIP_INTERVAL:
594					if (mc->timeout_gmi_enable == 1 && t->enable == 0)
595						mod_timer(&mc->atimer, jiffies);
596					mc->timeout_gmi_enable = t->enable;
597					MC_PRINT("%s: %s timeout from %s.\n", __func__,
598							t->enable ? "Enable" : "Disable", "group membership interval");
599					break;
600				default:
601					MC_PRINT("%s: Set timeout failed, invalid value %d.\n", __func__, t->from);
602				}
603			}
604			break;
605		case MC_MSG_SET_M2I3_FILTER:
606			{
607				struct __mc_param_value *e = (struct __mc_param_value *)msgdata;
608				mc->m2i3_filter_enable = e->val;
609				MC_PRINT(KERN_INFO "%s: %s IGMPv3/MLDv2 Leave Filter.\n", __func__, e->val ? "Enable" : "Disable");
610			}
611			break;
612		case MC_MSG_SET_TBIT:
613			{
614				struct __mc_param_value *e = (struct __mc_param_value *)msgdata;
615				mc->ignore_tbit = e->val;
616				MC_PRINT(KERN_INFO "%s: %s 'ignore T-bit'.\n", __func__, e->val ? "Enable" : "Disable");
617			}
618			break;
619		case MC_MSG_SET_LOCAL_QUERY_INTERVAL:
620			{
621				struct __mc_param_value *i = (struct __mc_param_value *)msgdata;
622				if (i->val) {
623					mc->local_query_interval = i->val * HZ;
624					if (timer_pending(&mc->qtimer) ?
625						time_after(mc->qtimer.expires, jiffies + mc->local_query_interval) :
626							try_to_del_timer_sync(&mc->qtimer) >= 0)
627						mod_timer(&mc->qtimer, jiffies + mc->local_query_interval);
628				}
629				MC_PRINT(KERN_INFO "%s: Set local query interval to %u\n", __func__, i->val);
630			}
631			break;
632		case MC_MSG_SET_PSW_ENCAP:
633			{
634				rcu_read_lock();
635				mc_set_psw_encap(mc, msgdata, msghdr->buf_len);
636				rcu_read_unlock();
637			}
638			break;
639		case MC_MSG_SET_PSW_FLOOD:
640			{
641				struct mc_glist_entry *ghead = NULL;
642				rcu_read_lock();
643				mc_set_psw_flood(mc, msgdata, msghdr->buf_len, &ghead);
644				rcu_read_unlock();
645
646				/*lock free callback*/
647				mc_group_notify(mc, &ghead);
648			}
649			break;
650		case MC_MSG_GET_ACL:
651			{
652				spin_lock(&mc->lock);
653				if (mc_acltbl_fillbuf(mc, msgdata, msghdr->buf_len,
654						&msghdr->bytes_written, &msghdr->bytes_needed))
655					msghdr->status = MC_STATUS_BUFFER_OVERFLOW;
656				spin_unlock(&mc->lock);
657			}
658			break;
659		case MC_MSG_GET_MDB:
660			{
661				rcu_read_lock();
662				if (mc_mdbtbl_fillbuf(mc, msgdata, msghdr->buf_len,
663							&msghdr->bytes_written, &msghdr->bytes_needed))
664					msghdr->status = MC_STATUS_BUFFER_OVERFLOW;
665				rcu_read_unlock();
666			}
667			break;
668		case MC_MSG_SET_ROUTER:
669			{
670				struct __mc_param_value *e = (struct __mc_param_value *)msgdata;
671				mc->multicast_router = e->val;
672				MC_PRINT(KERN_INFO "%s: %s multicast router.\n", __func__, e->val ? "Enable" : "Disable");
673			}
674			break;
675		default:
676			MC_PRINT("mc: Unknown message type 0x%x\n", msgtype);
677			msghdr->status = MC_STATUS_INVALID_PARAMETER;
678			break;
679		} /* switch */
680		dev_put(brdev);
681	} while (0);
682
683#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
684	NETLINK_CB(skb).portid = 0; /* from kernel */
685#else
686	NETLINK_CB(skb).pid = 0; /* from kernel */
687#endif
688	NETLINK_CB(skb).dst_group = 0; /* unicast */
689	netlink_unicast(mc_nl_sk, skb, pid, MSG_DONTWAIT);
690}
691
692void mc_netlink_event_send(struct mc_struct *mc, u32 event_type, u32 event_len, void *event_data)
693{
694	struct sk_buff *skb;
695	struct nlmsghdr *nlh;
696	int send_msg = 1;
697
698	if (!mc || mc->event_pid == MC_INVALID_PID ||
699			event_type >= MC_EVENT_MAX)
700		return;
701
702	if ((skb = nlmsg_new(event_len, gfp_any())) == NULL) {
703		MC_PRINT("nlmsg_new failed, event_type=%d\n", event_type);
704		return;
705	}
706
707	if ((nlh = nlmsg_put(skb, mc->event_pid, 0, event_type, event_len, 0)) == NULL) {
708		MC_PRINT("nlmsg_put failed, event_type=%d\n", event_type);
709		kfree_skb(skb);
710		return;
711	}
712
713	switch (event_type) {
714	case MC_EVENT_MDB_UPDATED:
715		/* No data; recipient needs to ask for the updated mdb table */
716		break;
717
718	default:
719		MC_PRINT("event type %d is not supported\n", event_type);
720		send_msg = 0;
721		kfree_skb(skb);
722		break;
723	}
724
725	if (send_msg) {
726#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
727		NETLINK_CB(skb).portid = 0; /* from kernel */
728#else
729		NETLINK_CB(skb).pid = 0; /* from kernel */
730#endif
731		NETLINK_CB(skb).dst_group = 0; /* unicast */
732		netlink_unicast(mc_nl_sk, skb, mc->event_pid, MSG_DONTWAIT);
733	}
734}
735
736int __init mc_netlink_init(void)
737{
738#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
739	struct netlink_kernel_cfg nlcfg;
740	memset(&nlcfg, 0, sizeof(nlcfg));
741	nlcfg.groups = 0;
742	nlcfg.input = mc_netlink_receive;
743	mc_nl_sk = netlink_kernel_create(&init_net,
744			NETLINK_QCA_MC,
745			&nlcfg);
746#else
747	mc_nl_sk = netlink_kernel_create(&init_net,
748			NETLINK_QCA_MC,
749			0,
750			mc_netlink_receive,
751			NULL,
752			THIS_MODULE);
753#endif
754	if (mc_nl_sk == NULL)
755		goto err;
756
757	return 0;
758err:
759	printk("mc: Failed to create netlink socket\n");
760	return -ENODEV;
761}
762
763void mc_netlink_exit(void)
764{
765	if (mc_nl_sk) {
766		sock_release(mc_nl_sk->sk_socket);
767		mc_nl_sk = NULL;
768	}
769}
770
771