Lines Matching refs:mp

231 			   struct net_bridge_mdb_entry *mp,
247 ifindex = mp->br->dev->ifindex;
248 mtimer = &mp->timer;
253 e.vid = mp->addr.vid;
254 if (mp->addr.proto == htons(ETH_P_IP)) {
255 e.addr.u.ip4 = mp->addr.dst.ip4;
257 } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
258 e.addr.u.ip6 = mp->addr.dst.ip6;
261 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
264 e.addr.proto = mp->addr.proto;
276 switch (mp->addr.proto) {
278 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
279 if (mp->addr.src.ip4) {
281 mp->addr.src.ip4))
288 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
289 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
291 &mp->addr.src.ip6))
298 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
323 struct net_bridge_mdb_entry *mp;
330 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
343 if (!s_pidx && mp->host_joined) {
344 err = __mdb_fill_info(skb, mp, NULL);
351 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
358 err = __mdb_fill_info(skb, mp, p);
415 struct net_bridge_mdb_entry *mp,
438 if (__mdb_fill_info(skb, mp, pg))
521 struct net_bridge_mdb_entry *mp,
529 br_switchdev_mdb_notify(dev, mp, pg, type);
535 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
722 struct net_bridge_mdb_entry *mp,
737 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
743 struct net_bridge_mdb_entry *mp,
752 for (pp = &mp->ports;
760 return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
776 br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
935 struct net_bridge_mdb_entry *mp,
958 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
967 struct net_bridge_mdb_entry *mp,
977 for (pp = &mp->ports;
985 return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
1007 br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
1027 struct net_bridge_mdb_entry *mp;
1037 mp = br_multicast_new_group(br, &group);
1038 if (IS_ERR(mp))
1039 return PTR_ERR(mp);
1043 if (mp->host_joined) {
1048 br_multicast_host_join(brmctx, mp, false);
1049 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1058 return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1060 return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1344 struct net_bridge_mdb_entry *mp;
1351 mp = br_mdb_ip_get(br, &ip);
1352 if (!mp)
1356 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1357 br_multicast_host_leave(mp, false);
1359 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1360 if (!mp->ports && netif_running(br->dev))
1361 mod_timer(&mp->timer, jiffies);
1365 for (pp = &mp->ports;
1371 br_multicast_del_pg(mp, p, pp);
1459 struct net_bridge_mdb_entry *mp,
1470 state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
1474 br_multicast_host_leave(mp, true);
1475 if (!mp->ports && netif_running(br->dev))
1476 mod_timer(&mp->timer, jiffies);
1480 struct net_bridge_mdb_entry *mp,
1486 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
1507 br_multicast_del_pg(mp, p, pp);
1514 struct net_bridge_mdb_entry *mp;
1521 hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
1522 if (desc->vid && desc->vid != mp->addr.vid)
1525 br_mdb_flush_host(br, mp, desc);
1526 br_mdb_flush_pgs(br, mp, desc);
1583 br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
1594 if (mp->host_joined)
1597 for (pg = mlock_dereference(mp->ports, mp->br); pg;
1598 pg = mlock_dereference(pg->next, mp->br))
1605 struct net_bridge_mdb_entry *mp, u32 portid,
1621 bpm->ifindex = mp->br->dev->ifindex;
1633 if (mp->host_joined) {
1634 err = __mdb_fill_info(skb, mp, NULL);
1639 for (pg = mlock_dereference(mp->ports, mp->br); pg;
1640 pg = mlock_dereference(pg->next, mp->br)) {
1641 err = __mdb_fill_info(skb, mp, pg);
1661 struct net_bridge_mdb_entry *mp;
1676 mp = br_mdb_ip_get(br, &group);
1677 if (!mp) {
1683 skb = br_mdb_get_reply_alloc(mp);
1689 err = br_mdb_get_reply_fill(skb, mp, portid, seq);