• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7800-V1.0.2.28/package/qca-mcs/src/

Lines Matching refs:mc

53 static struct sk_buff *mc_ipv4_alloc_query(struct mc_struct *mc, __be32 group, __u8 *mac, int is_v3)
60 struct net_device *dev = mc->dev;
107 ih3->code = (group ? mc->last_member_interval :
108 mc->query_response_interval) / (HZ / IGMP_TIMER_SCALE);
121 ih->code = (group ? mc->last_member_interval :
122 mc->query_response_interval) / (HZ / IGMP_TIMER_SCALE);
136 static struct sk_buff *mc_ipv6_alloc_query(struct mc_struct *mc,
146 struct net_device *dev = mc->dev;
204 interval = !ipv6_addr_any(group) ? mc->last_member_interval :
205 mc->query_response_interval;
225 interval = !ipv6_addr_any(group) ? mc->last_member_interval :
226 mc->query_response_interval;
246 static struct sk_buff *mc_alloc_query(struct mc_struct *mc,
251 return mc_ipv4_alloc_query(mc, group->u.ip4, mac, type);
254 return mc_ipv6_alloc_query(mc, &group->u.ip6, mac, type);
260 static void mc_send_query(struct mc_struct *mc, void *port,
265 if (!mc || !group)
268 skb = mc_alloc_query(mc, group, mac, type);
276 skb->dev = mc->dev;
285 static void mc_ipv4_rp_reset(struct mc_struct *mc, struct mc_router_port *rp)
302 static void mc_ipv6_rp_reset(struct mc_struct *mc, struct mc_router_port *rp)
385 fg->pg->mdb->mc->active_group_count--;
389 mod_timer(&fg->pg->mdb->mc->evtimer, jiffies + msecs_to_jiffies(MC_EVENT_DELAY_MS));
405 fg->pg->mdb->mc->active_group_count++;
407 mod_timer(&pg->mdb->mc->evtimer, jiffies + msecs_to_jiffies(MC_EVENT_DELAY_MS));
498 mc_group_notify_one(mdb->mc, &mdb->group);
504 static void mc_atimer_reset(struct mc_struct *mc)
506 struct mc_querier_entry *igmp_root_qe = mc->rp.igmp_root_qe;
508 igmp_root_qe->qqic * igmp_root_qe->qrv : mc->membership_interval;
510 struct mc_querier_entry *mld_root_qe = mc->rp.mld_root_qe;
518 if (timer_pending(&mc->atimer) ?
519 time_after(mc->atimer.expires, jiffies + expires) :
520 try_to_del_timer_sync(&mc->atimer) >= 0) {
521 mod_timer(&mc->atimer, jiffies + expires);
527 static void mc_rtimer_reset(struct mc_struct *mc)
529 struct mc_querier_entry *igmp_root_qe = mc->rp.igmp_root_qe;
531 igmp_root_qe->qqic * igmp_root_qe->qrv : mc->querier_interval;
533 struct mc_querier_entry *mld_root_qe = mc->rp.mld_root_qe;
541 if (timer_pending(&mc->rtimer) ?
542 time_after(mc->rtimer.expires, jiffies + expires) :
543 try_to_del_timer_sync(&mc->rtimer) >= 0) {
544 if (!mc->started)
546 mod_timer(&mc->rtimer, jiffies + expires);
555 struct mc_struct *mc = mdb->mc;
559 spin_lock_bh(&mc->lock);
569 spin_unlock_bh(&mc->lock);
571 mc_atimer_reset(mc);
574 static struct mc_mdb_entry *mc_mdb_create(struct mc_struct *mc,
580 if (mc->active_group_count >= MC_GROUP_MAX) {
590 mdb->mc = mc;
598 static struct mc_fdb_group *mc_fdb_group_get(struct mc_struct *mc,
605 &mc->hash[mc_group_hash(mc->salt, group->u.ip4)];
631 static struct mc_fdb_group *mc_update_fdb_group(struct mc_struct *mc,
652 spin_lock_bh(&mc->lock);
654 spin_unlock_bh(&mc->lock);
666 static struct mc_fdb_group *mc_update_mdb(struct mc_struct *mc,
674 &mc->hash[mc_group_hash(mc->salt, group->u.ip4)];
680 spin_lock_bh(&mc->lock);
683 mdb = mc_mdb_create(mc, head, group);
685 spin_unlock_bh(&mc->lock);
691 if ((fg = mc_update_fdb_group(mc, &mdb->pslist, fdb->addr.addr, now, port)))
696 spin_lock_bh(&mc->lock);
701 spin_unlock_bh(&mc->lock);
708 spin_lock_bh(&mc->lock);
713 spin_unlock_bh(&mc->lock);
726 static struct mc_fdb_group *mc_ipv4_report(struct mc_struct *mc,
742 return mc_update_mdb(mc, &mc_group, skb);
752 static struct mc_fdb_group *mc_ipv6_report(struct mc_struct *mc,
760 if (!mc->ignore_tbit && !MC_CHECK_TBIT(group)) {
774 return mc_update_mdb(mc, &mc_group, skb);
940 static void mc_leave_group(struct mc_struct *mc,
946 fg = mc_fdb_group_get(mc, group, skb);
948 spin_lock_bh(&mc->lock);
955 spin_unlock_bh(&mc->lock);
959 static void mc_ipv4_leave_group(struct mc_struct *mc,
975 mc_leave_group(mc, &br_group, skb);
979 static void mc_ipv6_leave_group(struct mc_struct *mc,
995 mc_leave_group(mc, &br_group, skb);
1165 mod_timer(&fg->pg->mdb->mc->evtimer, jiffies + msecs_to_jiffies(MC_EVENT_DELAY_MS));
1258 static int mc_ipv4_igmp3_report(struct mc_struct *mc,
1294 if (mc_find_acl_rule(&mc->igmp_acl, group, NULL,
1296 !(fg = mc_ipv4_report(mc, group, skb)) ||
1307 mc_ipv4_leave_group(mc, group, skb);
1308 if (mc->m2i3_filter_enable)
1316 spin_lock(&mc->lock);
1318 if (mc->m2i3_filter_enable && mdb->filter_mode && type == IGMPV3_CHANGE_TO_INCLUDE) {
1332 spin_unlock(&mc->lock);
1334 if (!mc->m2i3_filter_enable || !mdb->filter_mode || type == IGMPV3_MODE_IS_INCLUDE || !tmp.nsrcs) {
1351 spin_lock(&mc->lock);
1371 spin_unlock(&mc->lock);
1375 if (!mc->m2i3_filter_enable || type == IGMPV3_MODE_IS_EXCLUDE || !mdb->x.nsrcs)
1385 spin_lock(&mc->lock);
1394 spin_unlock(&mc->lock);
1401 mc_ipv4_leave_group(mc, group, skb);
1406 mc_ipv4_leave_group(mc, group, skb);
1409 spin_lock(&mc->lock);
1426 spin_unlock(&mc->lock);
1428 if (!mc->m2i3_filter_enable)
1551 static int mc_ipv6_mld2_report(struct mc_struct *mc, struct sk_buff *skb)
1589 if (mc_find_acl_rule(&mc->mld_acl, 0, (void *)&grec->grec_mca,
1591 !(fg = mc_ipv6_report(mc, &grec->grec_mca, skb)) ||
1602 mc_ipv6_leave_group(mc, &grec->grec_mca, skb);
1603 if (mc->m2i3_filter_enable)
1610 spin_lock(&mc->lock);
1612 if (mc->m2i3_filter_enable && mdb->filter_mode && grec->grec_type == MLD2_CHANGE_TO_INCLUDE) {
1625 spin_unlock(&mc->lock);
1627 if (!mc->m2i3_filter_enable || !mdb->filter_mode || grec->grec_type == MLD2_MODE_IS_INCLUDE || !tmp.nsrcs) {
1644 spin_lock(&mc->lock);
1664 spin_unlock(&mc->lock);
1668 if (!mc->m2i3_filter_enable || grec->grec_type == MLD2_MODE_IS_EXCLUDE || !mdb->x.nsrcs)
1678 spin_lock(&mc->lock);
1687 spin_unlock(&mc->lock);
1694 mc_ipv6_leave_group(mc, &grec->grec_mca, skb);
1699 mc_ipv6_leave_group(mc, &grec->grec_mca, skb);
1705 spin_lock(&mc->lock);
1721 spin_unlock(&mc->lock);
1723 if (!mc->m2i3_filter_enable)
1779 static void mc_query_cycle_start(struct mc_struct *mc,
1791 if (!mc->timeout_gsq_enable)
1793 head = &mc->hash[mc_group_hash(mc->salt, group->u.ip4)];
1805 if (!mc->timeout_asq_enable)
1809 os_hlist_for_each_entry_rcu(mdb, h, &mc->hash[i], hlist) {
1823 mc_atimer_reset(mc);
1824 mc_rtimer_reset(mc);
1827 static void mc_ipv4_query(struct mc_struct *mc, struct sk_buff *skb, void *port)
1832 struct mc_router_port *rp = &mc->rp;
1849 qqic = mc->query_interval;
1850 qrv = mc->last_member_count;
1853 max_resp_time = mc->query_response_interval;
1864 max_resp_time = ih3->code ? IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : mc->query_response_interval;
1865 qqic = ih3->qqic ? IGMPV3_QQIC(ih3->qqic) * HZ : mc->query_interval;
1866 qrv = ih3->qrv ? ih3->qrv : mc->last_member_count;
1873 spin_lock_bh(&mc->lock);
1889 mc_ipv4_rp_reset(mc, rp);
1896 spin_unlock_bh(&mc->lock);
1902 mc_query_cycle_start(mc, &mc_group, ETH_P_IP, rp->igmp_root_qe);
1904 mc_query_cycle_start(mc, NULL, ETH_P_IP, rp->igmp_root_qe);
1909 static int mc_ipv6_query(struct mc_struct *mc, struct sk_buff *skb, void *port)
1914 struct mc_router_port *rp = &mc->rp;
1932 qqic = mc->query_interval;
1933 qrv = mc->last_member_count;
1936 max_resp_time = mc->query_response_interval;
1943 max_resp_time = mld2q->mld2q_mrc ? msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))) : mc->query_response_interval;
1944 qqic = mld2q->mld2q_qqic ? IGMPV3_QQIC(mld2q->mld2q_qqic) * HZ : mc->query_interval;
1945 qrv = mld2q->mld2q_qrv ? mld2q->mld2q_qrv : mc->last_member_count;
1953 spin_lock_bh(&mc->lock);
1969 mc_ipv6_rp_reset(mc, rp);
1976 spin_unlock_bh(&mc->lock);
1982 mc_query_cycle_start(mc, &mc_group, ETH_P_IPV6, rp->mld_root_qe);
1984 mc_query_cycle_start(mc, NULL, ETH_P_IPV6, rp->mld_root_qe);
1990 static int mc_ipv4_rcv(struct mc_struct *mc, struct sk_buff *skb)
2005 if (mc_find_acl_rule(&mc->igmp_acl, iph->daddr, NULL,
2008 if (mc->rp.type == MC_RTPORT_FLOOD)
2065 mc_querier_entry_find(&mc->rp.igmp_rlist, MC_SKB_CB(skb2)->port))
2075 if (mc_ipv4_report(mc, ih->group, skb) == NULL)
2083 err = mc_ipv4_igmp3_report(mc, skb2);
2088 mc_ipv4_query(mc, skb2, MC_SKB_CB(skb)->port);
2094 mc_ipv4_leave_group(mc, ih->group, skb);
2108 static int mc_ipv6_rcv(struct mc_struct *mc, struct sk_buff *skb)
2127 if (mc_find_acl_rule(&mc->mld_acl, 0, (void *)&ip6h->daddr,
2130 if (mc->rp.type == MC_RTPORT_FLOOD)
2177 mc_querier_entry_find(&mc->rp.mld_rlist, MC_SKB_CB(skb2)->port)) {
2223 if (mc_ipv6_report(mc, &mld->mld_mca, skb) == NULL)
2231 err = mc_ipv6_mld2_report(mc, skb2);
2237 err = mc_ipv6_query(mc, skb2, MC_SKB_CB(skb)->port);
2245 mc_ipv6_leave_group(mc, &mld->mld_mca, skb);
2263 struct mc_struct *mc = MC_DEV(NULL);
2266 if (!mac || !mc)
2274 if (!mc || !mc->started)
2279 spin_lock_bh(&mc->lock);
2284 os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
2306 spin_unlock_bh(&mc->lock);
2323 struct mc_struct *mc;
2329 mc = MC_DEV(p->br->dev);
2331 if (!mc || event != RTM_DELLINK)
2334 if (!mc->started)
2339 spin_lock_bh(&mc->lock);
2341 os_hlist_for_each_entry_rcu(qe, h, &mc->rp.igmp_rlist, rlist) {
2348 mc_ipv4_rp_reset(mc, &mc->rp);
2351 os_hlist_for_each_entry_rcu(qe, h, &mc->rp.mld_rlist, rlist) {
2358 mc_ipv6_rp_reset(mc, &mc->rp);
2365 os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
2380 spin_unlock_bh(&mc->lock);
2382 mc_rtimer_reset(mc);
2388 int mc_rcv(struct mc_struct *mc, struct sk_buff *skb)
2390 if (!mc || !mc->started) {
2402 return mc_ipv4_rcv(mc, skb);
2405 return mc_ipv6_rcv(mc, skb);
2412 static void mc_rlist_flush(struct mc_struct *mc)
2417 os_hlist_for_each_entry_safe(qe, h, n, &mc->rp.igmp_rlist, rlist) {
2420 mc_ipv4_rp_reset(mc, &mc->rp);
2422 os_hlist_for_each_entry_safe(qe, h, n, &mc->rp.mld_rlist, rlist) {
2425 mc_ipv6_rp_reset(mc, &mc->rp);
2427 mc_rtimer_reset(mc);
2431 static void mc_mdb_flush(struct mc_struct *mc)
2438 os_hlist_for_each_entry_safe(mdb, h, n, &mc->hash[i], hlist) {
2446 struct mc_struct *mc;
2448 mc = rcu_dereference(_g_mcs);
2449 if (!mc)
2455 if (mc->dev != dev)
2458 return mc;
2461 static int mc_dev_register(struct mc_struct *mc)
2469 rcu_assign_pointer(_g_mcs, mc);
2473 static int mc_dev_unregister(struct mc_struct *mc)
2478 if (oldmc != mc)
2485 int mc_open(struct mc_struct *mc)
2487 struct net_bridge *br = netdev_priv(mc->dev);
2489 if (!mc) {
2490 printk(KERN_ERR "%s: mc module is not registered!\n", __func__);
2494 if (!mc->enable) {
2495 MC_PRINT(KERN_DEBUG "%s: mc open failed, feature is disabled\n", __func__);
2499 if (mc->started) {
2500 MC_PRINT(KERN_DEBUG "%s: mc function is already enabled!\n", __func__);
2504 spin_lock(&mc->lock);
2505 mc->ageing_query = jiffies;
2506 mc->startup_queries_sent = 0;
2507 mc->started = 1;
2510 mod_timer(&mc->qtimer, jiffies + br->forward_delay);
2511 mod_timer(&mc->atimer, jiffies + br->forward_delay);
2512 mod_timer(&mc->rtimer, jiffies + br->forward_delay);
2513 spin_unlock(&mc->lock);
2518 int mc_stop(struct mc_struct *mc)
2520 if (!mc) {
2521 printk(KERN_ERR "%s: mc module is not registered!\n", __func__);
2525 if (!mc->enable) {
2526 MC_PRINT(KERN_DEBUG "%s: mc stop failed, feature is disabled\n", __func__);
2530 if (!mc->started) {
2531 MC_PRINT(KERN_DEBUG "%s: mc function is already disabled!\n", __func__);
2535 spin_lock_bh(&mc->lock);
2536 mc->started = 0;
2538 mc_mdb_flush(mc);
2539 mc_rlist_flush(mc);
2541 del_timer_sync(&mc->qtimer);
2542 del_timer_sync(&mc->atimer);
2543 del_timer_sync(&mc->rtimer);
2544 del_timer_sync(&mc->evtimer);
2546 spin_unlock_bh(&mc->lock);
2554 struct mc_struct *mc = (struct mc_struct *)data;
2555 unsigned long next_timer = jiffies + mc->querier_interval;
2558 struct mc_router_port *rp = &mc->rp;
2560 spin_lock_bh(&mc->lock);
2572 mc_ipv4_rp_reset(mc, rp);
2587 mc_ipv6_rp_reset(mc, rp);
2589 spin_unlock_bh(&mc->lock);
2590 mod_timer(&mc->rtimer, round_jiffies(next_timer + HZ/4));
2592 mc_rtimer_reset(mc);
2598 struct mc_struct *mc = (struct mc_struct *)data;
2599 unsigned long next_timer = jiffies + mc->local_query_interval;
2601 if (mc->timeout_gmi_enable)
2608 os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
2615 if (mdb->group.pro == htons(ETH_P_IP) && mc->rp.igmp_root_qe)
2618 else if (mdb->group.pro == htons(ETH_P_IPV6) && mc->rp.mld_root_qe)
2629 mc_send_query(mc, pg->port, &mdb->group, mc_fdb_mac_get(fg), fg->filter_mode);
2635 mod_timer(&mc->qtimer, round_jiffies(next_timer + HZ/4));
2641 struct mc_struct *mc = (struct mc_struct *)data;
2642 struct mc_querier_entry *igmp_root_qe = mc->rp.igmp_root_qe;
2644 unsigned long igmp_expire_time = mc->membership_interval;
2646 struct mc_querier_entry *mld_root_qe = mc->rp.mld_root_qe;
2647 unsigned long mld_expire_time = mc->membership_interval;
2653 if (!mc->timeout_gmi_enable) {
2654 next_timer = now + mc->membership_interval;
2667 spin_lock_bh(&mc->lock);
2672 os_hlist_for_each_entry_rcu(mdb, mdbh, &mc->hash[i], hlist) {
2676 unsigned long expire_time = mc->membership_interval;
2717 __mc_send_group_uery(mc, port, group);
2722 spin_unlock_bh(&mc->lock);
2724 mod_timer(&mc->atimer, round_jiffies(next_timer + HZ/4));
2727 static void mc_acl_table_init(struct mc_struct *mc)
2741 mc->igmp_acl.patterns[0].rule = MC_ACL_RULE_SWM;
2742 mc->igmp_acl.patterns[0].ip.ip4 = htonl(INADDR_ALLHOSTS_GROUP);
2743 mc->igmp_acl.patterns[0].ip_mask.ip4_mask = htonl(INADDR_BROADCAST);
2746 mc->igmp_acl.patterns[1].rule = MC_ACL_RULE_MANAGEMENT;
2747 mc->igmp_acl.patterns[1].ip.ip4 = htonl(INADDR_UNSPEC_GROUP);
2748 mc->igmp_acl.patterns[1].ip_mask.ip4_mask = htonl(0xffff0000);
2751 mc->igmp_acl.patterns[2].rule = MC_ACL_RULE_MANAGEMENT;
2752 mc->igmp_acl.patterns[2].ip.ip4 = htonl(0xefff0000);
2753 mc->igmp_acl.patterns[2].ip_mask.ip4_mask = htonl(0xffff0000);
2756 mc->igmp_acl.patterns[3].rule = MC_ACL_RULE_NON_SNOOPING;
2757 mc->igmp_acl.patterns[3].ip.ip4 = htonl(0xeffffffa);
2758 mc->igmp_acl.patterns[3].ip_mask.ip4_mask = htonl(INADDR_BROADCAST);
2761 mc->igmp_acl.patterns[4].rule = MC_ACL_RULE_NON_SNOOPING;
2762 mc->igmp_acl.patterns[4].ip.ip4 = htonl(0xe00000fb);
2763 mc->igmp_acl.patterns[4].ip_mask.ip4_mask = htonl(INADDR_BROADCAST);
2766 mc->igmp_acl.patterns[5].rule = MC_ACL_RULE_NON_SNOOPING;
2767 mc->igmp_acl.patterns[5].ip.ip4 = htonl(0xe00000fc);
2768 mc->igmp_acl.patterns[5].ip_mask.ip4_mask = htonl(INADDR_BROADCAST);
2773 mc->igmp_acl.patterns[6].rule = MC_ACL_RULE_MULTICAST;
2774 memcpy(mc->igmp_acl.patterns[6].mac, mc_mac0, ETH_ALEN);
2775 memcpy(mc->igmp_acl.patterns[6].mac_mask, mc_mac0_mask, ETH_ALEN);
2777 mc->igmp_acl.pattern_count = 7;
2781 mc->mld_acl.patterns[0].rule = MC_ACL_RULE_SWM;
2782 ipv6_addr_set(&mc->mld_acl.patterns[0].ip.ip6, htonl(0xff010000), 0, 0, htonl(1));
2783 ipv6_addr_set(&mc->mld_acl.patterns[0].ip_mask.ip6_mask, htonl(0xffffffff),
2787 mc->mld_acl.patterns[1].rule = MC_ACL_RULE_SWM;
2788 ipv6_addr_set(&mc->mld_acl.patterns[1].ip.ip6, htonl(0xff020000), 0, 0, htonl(1));
2789 ipv6_addr_set(&mc->mld_acl.patterns[1].ip_mask.ip6_mask, htonl(0xffffffff),
2794 mc->mld_acl.patterns[2].rule = MC_ACL_RULE_MANAGEMENT;
2795 memcpy(mc->mld_acl.patterns[2].mac, mc_mac2, ETH_ALEN);
2796 memcpy(mc->mld_acl.patterns[2].mac_mask, mc_mac2_mask, ETH_ALEN);
2799 mc->mld_acl.patterns[2].rule = MC_ACL_RULE_MANAGEMENT;
2800 ipv6_addr_set(&mc->mld_acl.patterns[2].ip.ip6, htonl(0xff000000), 0, 0, 0);
2801 ipv6_addr_set(&mc->mld_acl.patterns[2].ip_mask.ip6_mask, htonl(0xfff00000), 0, 0, 0);
2806 mc->mld_acl.patterns[3].rule = MC_ACL_RULE_MULTICAST;
2807 memcpy(mc->mld_acl.patterns[3].mac, mc_mac1, ETH_ALEN);
2808 memcpy(mc->mld_acl.patterns[3].mac_mask, mc_mac1_mask, ETH_ALEN);
2810 mc->mld_acl.pattern_count = 4;
2825 struct mc_struct *mc = MC_DEV(dev);
2827 if (mc) {
2832 mc = kzalloc(sizeof(struct mc_struct), GFP_KERNEL);
2833 if (!mc) {
2838 spin_lock_init(&mc->lock);
2839 get_random_bytes(&mc->salt, sizeof(mc->salt));
2840 mc->dev = dev;
2841 mc->enable = 1;
2842 INIT_HLIST_HEAD(&mc->rp.igmp_rlist);
2844 INIT_HLIST_HEAD(&mc->rp.mld_rlist);
2847 mc->last_member_count = 2;
2848 mc->startup_query_count = 2;
2850 mc->last_member_interval = HZ;
2851 mc->query_response_interval = 10 * HZ;
2852 mc->query_interval = 125 * HZ;
2853 mc->querier_interval = 255 * HZ;
2854 mc->membership_interval = 260 * HZ;
2855 mc->local_query_interval = 125 * HZ;
2857 mc->enable_retag = 1;
2858 mc->forward_policy = MC_POLICY_DROP; /* DROP as default policy */
2859 mc->dscp = MC_DEFAULT_DSCP;
2860 mc->convert_all = 1; /* Convert all as default */
2861 mc->timeout_gsq_enable = 1; /* enable timeout from group sepcific query */
2862 mc->timeout_asq_enable = 0; /* disable timeout from all system query */
2863 mc->timeout_gmi_enable = 1; /* enable timeout from membership interval */
2864 mc->m2i3_filter_enable = 0; /* enable mldv2/igmpv3 leave filter */
2865 mc->ignore_tbit = 0; /* Allow IPv6 Multicast Groups, that don���t have the T-Bit enabled, to be snooped */
2866 mc->multicast_router = 1; /* Enable router mode */
2867 mc->rp.type = MC_RTPORT_DEFAULT; /* If querier exist, forward IGMP/MLD message to the router port, else flood to all ports. */
2869 mc->event_pid = MC_INVALID_PID;
2871 mc_acl_table_init(mc);
2872 setup_timer(&mc->qtimer, mc_mdb_query,
2873 (unsigned long)mc);
2874 setup_timer(&mc->atimer, mc_mdb_cleanup,
2875 (unsigned long)mc);
2876 setup_timer(&mc->rtimer, mc_router_cleanup,
2877 (unsigned long)mc);
2878 setup_timer(&mc->evtimer, mc_event_delay,
2879 (unsigned long)mc);
2881 if (mc_dev_register(mc) < 0) {
2882 kfree(mc);
2886 mc_open(mc);
2893 struct mc_struct *mc = MC_DEV(dev);
2895 if (!mc)
2898 mc_stop(mc);
2899 mc_dev_unregister(mc);
2900 kfree(mc);
2910 struct mc_struct *mc;
2917 mc = MC_DEV(NULL);
2918 if (!mc)