1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/list.h>
4#include <linux/netdevice.h>
5#include <linux/rtnetlink.h>
6#include <linux/skbuff.h>
7#include <net/ip.h>
8#include <net/switchdev.h>
9
10#include "br_private.h"
11
12static struct static_key_false br_switchdev_tx_fwd_offload;
13
14static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15					     const struct sk_buff *skb)
16{
17	if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
18		return false;
19
20	return (p->flags & BR_TX_FWD_OFFLOAD) &&
21	       (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
22}
23
24bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
25{
26	if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
27		return false;
28
29	return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
30}
31
32void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
33{
34	skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
35}
36
37/* Mark the frame for TX forwarding offload if this egress port supports it */
38void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
39					     struct sk_buff *skb)
40{
41	if (nbp_switchdev_can_offload_tx_fwd(p, skb))
42		BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
43}
44
45/* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46 * that the skb has been already forwarded to, to avoid further cloning to
47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
48 * return false.
49 */
50void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
51					      struct sk_buff *skb)
52{
53	if (nbp_switchdev_can_offload_tx_fwd(p, skb))
54		set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
55}
56
57void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
58			      struct sk_buff *skb)
59{
60	if (p->hwdom)
61		BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
62}
63
64bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
65				  const struct sk_buff *skb)
66{
67	struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
68
69	return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
70		(!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
71}
72
73/* Flags that can be offloaded to hardware */
74#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
75				  BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
76				  BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
77
78int br_switchdev_set_port_flag(struct net_bridge_port *p,
79			       unsigned long flags,
80			       unsigned long mask,
81			       struct netlink_ext_ack *extack)
82{
83	struct switchdev_attr attr = {
84		.orig_dev = p->dev,
85	};
86	struct switchdev_notifier_port_attr_info info = {
87		.attr = &attr,
88	};
89	int err;
90
91	mask &= BR_PORT_FLAGS_HW_OFFLOAD;
92	if (!mask)
93		return 0;
94
95	attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
96	attr.u.brport_flags.val = flags;
97	attr.u.brport_flags.mask = mask;
98
99	/* We run from atomic context here */
100	err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
101				       &info.info, extack);
102	err = notifier_to_errno(err);
103	if (err == -EOPNOTSUPP)
104		return 0;
105
106	if (err) {
107		NL_SET_ERR_MSG_WEAK_MOD(extack,
108					"bridge flag offload is not supported");
109		return -EOPNOTSUPP;
110	}
111
112	attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
113	attr.flags = SWITCHDEV_F_DEFER;
114
115	err = switchdev_port_attr_set(p->dev, &attr, extack);
116	if (err) {
117		NL_SET_ERR_MSG_WEAK_MOD(extack,
118					"error setting offload flag on port");
119		return err;
120	}
121
122	return 0;
123}
124
125static void br_switchdev_fdb_populate(struct net_bridge *br,
126				      struct switchdev_notifier_fdb_info *item,
127				      const struct net_bridge_fdb_entry *fdb,
128				      const void *ctx)
129{
130	const struct net_bridge_port *p = READ_ONCE(fdb->dst);
131
132	item->addr = fdb->key.addr.addr;
133	item->vid = fdb->key.vlan_id;
134	item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
135	item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
136	item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
137	item->locked = false;
138	item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
139	item->info.ctx = ctx;
140}
141
142void
143br_switchdev_fdb_notify(struct net_bridge *br,
144			const struct net_bridge_fdb_entry *fdb, int type)
145{
146	struct switchdev_notifier_fdb_info item;
147
148	if (test_bit(BR_FDB_LOCKED, &fdb->flags))
149		return;
150
151	/* Entries with these flags were created using ndm_state == NUD_REACHABLE,
152	 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
153	 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
154	 * Drivers don't know how to deal with these, so don't notify them to
155	 * avoid confusing them.
156	 */
157	if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
158	    !test_bit(BR_FDB_STATIC, &fdb->flags) &&
159	    !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
160		return;
161
162	br_switchdev_fdb_populate(br, &item, fdb, NULL);
163
164	switch (type) {
165	case RTM_DELNEIGH:
166		call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
167					 item.info.dev, &item.info, NULL);
168		break;
169	case RTM_NEWNEIGH:
170		call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
171					 item.info.dev, &item.info, NULL);
172		break;
173	}
174}
175
176int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
177			       bool changed, struct netlink_ext_ack *extack)
178{
179	struct switchdev_obj_port_vlan v = {
180		.obj.orig_dev = dev,
181		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
182		.flags = flags,
183		.vid = vid,
184		.changed = changed,
185	};
186
187	return switchdev_port_obj_add(dev, &v.obj, extack);
188}
189
190int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
191{
192	struct switchdev_obj_port_vlan v = {
193		.obj.orig_dev = dev,
194		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
195		.vid = vid,
196	};
197
198	return switchdev_port_obj_del(dev, &v.obj);
199}
200
201static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
202{
203	struct net_bridge *br = joining->br;
204	struct net_bridge_port *p;
205	int hwdom;
206
207	/* joining is yet to be added to the port list. */
208	list_for_each_entry(p, &br->port_list, list) {
209		if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
210			joining->hwdom = p->hwdom;
211			return 0;
212		}
213	}
214
215	hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
216	if (hwdom >= BR_HWDOM_MAX)
217		return -EBUSY;
218
219	set_bit(hwdom, &br->busy_hwdoms);
220	joining->hwdom = hwdom;
221	return 0;
222}
223
224static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
225{
226	struct net_bridge *br = leaving->br;
227	struct net_bridge_port *p;
228
229	/* leaving is no longer in the port list. */
230	list_for_each_entry(p, &br->port_list, list) {
231		if (p->hwdom == leaving->hwdom)
232			return;
233	}
234
235	clear_bit(leaving->hwdom, &br->busy_hwdoms);
236}
237
238static int nbp_switchdev_add(struct net_bridge_port *p,
239			     struct netdev_phys_item_id ppid,
240			     bool tx_fwd_offload,
241			     struct netlink_ext_ack *extack)
242{
243	int err;
244
245	if (p->offload_count) {
246		/* Prevent unsupported configurations such as a bridge port
247		 * which is a bonding interface, and the member ports are from
248		 * different hardware switches.
249		 */
250		if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
251			NL_SET_ERR_MSG_MOD(extack,
252					   "Same bridge port cannot be offloaded by two physical switches");
253			return -EBUSY;
254		}
255
256		/* Tolerate drivers that call switchdev_bridge_port_offload()
257		 * more than once for the same bridge port, such as when the
258		 * bridge port is an offloaded bonding/team interface.
259		 */
260		p->offload_count++;
261
262		return 0;
263	}
264
265	p->ppid = ppid;
266	p->offload_count = 1;
267
268	err = nbp_switchdev_hwdom_set(p);
269	if (err)
270		return err;
271
272	if (tx_fwd_offload) {
273		p->flags |= BR_TX_FWD_OFFLOAD;
274		static_branch_inc(&br_switchdev_tx_fwd_offload);
275	}
276
277	return 0;
278}
279
280static void nbp_switchdev_del(struct net_bridge_port *p)
281{
282	if (WARN_ON(!p->offload_count))
283		return;
284
285	p->offload_count--;
286
287	if (p->offload_count)
288		return;
289
290	if (p->hwdom)
291		nbp_switchdev_hwdom_put(p);
292
293	if (p->flags & BR_TX_FWD_OFFLOAD) {
294		p->flags &= ~BR_TX_FWD_OFFLOAD;
295		static_branch_dec(&br_switchdev_tx_fwd_offload);
296	}
297}
298
299static int
300br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
301			    const struct net_bridge_fdb_entry *fdb,
302			    unsigned long action, const void *ctx)
303{
304	struct switchdev_notifier_fdb_info item;
305	int err;
306
307	br_switchdev_fdb_populate(br, &item, fdb, ctx);
308
309	err = nb->notifier_call(nb, action, &item);
310	return notifier_to_errno(err);
311}
312
313static int
314br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
315			bool adding, struct notifier_block *nb)
316{
317	struct net_bridge_fdb_entry *fdb;
318	struct net_bridge *br;
319	unsigned long action;
320	int err = 0;
321
322	if (!nb)
323		return 0;
324
325	if (!netif_is_bridge_master(br_dev))
326		return -EINVAL;
327
328	br = netdev_priv(br_dev);
329
330	if (adding)
331		action = SWITCHDEV_FDB_ADD_TO_DEVICE;
332	else
333		action = SWITCHDEV_FDB_DEL_TO_DEVICE;
334
335	rcu_read_lock();
336
337	hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
338		err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
339		if (err)
340			break;
341	}
342
343	rcu_read_unlock();
344
345	return err;
346}
347
348static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
349					 const void *ctx,
350					 struct notifier_block *nb,
351					 struct netlink_ext_ack *extack)
352{
353	struct switchdev_notifier_port_attr_info attr_info = {
354		.info = {
355			.dev = br_dev,
356			.extack = extack,
357			.ctx = ctx,
358		},
359	};
360	struct net_bridge *br = netdev_priv(br_dev);
361	struct net_bridge_vlan_group *vg;
362	struct switchdev_attr attr;
363	struct net_bridge_vlan *v;
364	int err;
365
366	attr_info.attr = &attr;
367	attr.orig_dev = br_dev;
368
369	vg = br_vlan_group(br);
370	if (!vg)
371		return 0;
372
373	list_for_each_entry(v, &vg->vlan_list, vlist) {
374		if (v->msti) {
375			attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
376			attr.u.vlan_msti.vid = v->vid;
377			attr.u.vlan_msti.msti = v->msti;
378
379			err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
380						&attr_info);
381			err = notifier_to_errno(err);
382			if (err)
383				return err;
384		}
385	}
386
387	return 0;
388}
389
390static int
391br_switchdev_vlan_replay_one(struct notifier_block *nb,
392			     struct net_device *dev,
393			     struct switchdev_obj_port_vlan *vlan,
394			     const void *ctx, unsigned long action,
395			     struct netlink_ext_ack *extack)
396{
397	struct switchdev_notifier_port_obj_info obj_info = {
398		.info = {
399			.dev = dev,
400			.extack = extack,
401			.ctx = ctx,
402		},
403		.obj = &vlan->obj,
404	};
405	int err;
406
407	err = nb->notifier_call(nb, action, &obj_info);
408	return notifier_to_errno(err);
409}
410
411static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
412					  struct net_device *dev,
413					  struct net_bridge_vlan_group *vg,
414					  const void *ctx, unsigned long action,
415					  struct netlink_ext_ack *extack)
416{
417	struct net_bridge_vlan *v;
418	int err = 0;
419	u16 pvid;
420
421	if (!vg)
422		return 0;
423
424	pvid = br_get_pvid(vg);
425
426	list_for_each_entry(v, &vg->vlan_list, vlist) {
427		struct switchdev_obj_port_vlan vlan = {
428			.obj.orig_dev = dev,
429			.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
430			.flags = br_vlan_flags(v, pvid),
431			.vid = v->vid,
432		};
433
434		if (!br_vlan_should_use(v))
435			continue;
436
437		err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
438						   action, extack);
439		if (err)
440			return err;
441	}
442
443	return 0;
444}
445
446static int br_switchdev_vlan_replay(struct net_device *br_dev,
447				    const void *ctx, bool adding,
448				    struct notifier_block *nb,
449				    struct netlink_ext_ack *extack)
450{
451	struct net_bridge *br = netdev_priv(br_dev);
452	struct net_bridge_port *p;
453	unsigned long action;
454	int err;
455
456	ASSERT_RTNL();
457
458	if (!nb)
459		return 0;
460
461	if (!netif_is_bridge_master(br_dev))
462		return -EINVAL;
463
464	if (adding)
465		action = SWITCHDEV_PORT_OBJ_ADD;
466	else
467		action = SWITCHDEV_PORT_OBJ_DEL;
468
469	err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
470					     ctx, action, extack);
471	if (err)
472		return err;
473
474	list_for_each_entry(p, &br->port_list, list) {
475		struct net_device *dev = p->dev;
476
477		err = br_switchdev_vlan_replay_group(nb, dev,
478						     nbp_vlan_group(p),
479						     ctx, action, extack);
480		if (err)
481			return err;
482	}
483
484	if (adding) {
485		err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
486		if (err)
487			return err;
488	}
489
490	return 0;
491}
492
493#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
494struct br_switchdev_mdb_complete_info {
495	struct net_bridge_port *port;
496	struct br_ip ip;
497};
498
499static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
500{
501	struct br_switchdev_mdb_complete_info *data = priv;
502	struct net_bridge_port_group __rcu **pp;
503	struct net_bridge_port_group *p;
504	struct net_bridge_mdb_entry *mp;
505	struct net_bridge_port *port = data->port;
506	struct net_bridge *br = port->br;
507
508	if (err)
509		goto err;
510
511	spin_lock_bh(&br->multicast_lock);
512	mp = br_mdb_ip_get(br, &data->ip);
513	if (!mp)
514		goto out;
515	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
516	     pp = &p->next) {
517		if (p->key.port != port)
518			continue;
519		p->flags |= MDB_PG_FLAGS_OFFLOAD;
520	}
521out:
522	spin_unlock_bh(&br->multicast_lock);
523err:
524	kfree(priv);
525}
526
527static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
528				      const struct net_bridge_mdb_entry *mp)
529{
530	if (mp->addr.proto == htons(ETH_P_IP))
531		ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
532#if IS_ENABLED(CONFIG_IPV6)
533	else if (mp->addr.proto == htons(ETH_P_IPV6))
534		ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
535#endif
536	else
537		ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
538
539	mdb->vid = mp->addr.vid;
540}
541
542static void br_switchdev_host_mdb_one(struct net_device *dev,
543				      struct net_device *lower_dev,
544				      struct net_bridge_mdb_entry *mp,
545				      int type)
546{
547	struct switchdev_obj_port_mdb mdb = {
548		.obj = {
549			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
550			.flags = SWITCHDEV_F_DEFER,
551			.orig_dev = dev,
552		},
553	};
554
555	br_switchdev_mdb_populate(&mdb, mp);
556
557	switch (type) {
558	case RTM_NEWMDB:
559		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
560		break;
561	case RTM_DELMDB:
562		switchdev_port_obj_del(lower_dev, &mdb.obj);
563		break;
564	}
565}
566
567static void br_switchdev_host_mdb(struct net_device *dev,
568				  struct net_bridge_mdb_entry *mp, int type)
569{
570	struct net_device *lower_dev;
571	struct list_head *iter;
572
573	netdev_for_each_lower_dev(dev, lower_dev, iter)
574		br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
575}
576
577static int
578br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
579			    const struct switchdev_obj_port_mdb *mdb,
580			    unsigned long action, const void *ctx,
581			    struct netlink_ext_ack *extack)
582{
583	struct switchdev_notifier_port_obj_info obj_info = {
584		.info = {
585			.dev = dev,
586			.extack = extack,
587			.ctx = ctx,
588		},
589		.obj = &mdb->obj,
590	};
591	int err;
592
593	err = nb->notifier_call(nb, action, &obj_info);
594	return notifier_to_errno(err);
595}
596
597static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
598				      struct net_device *dev,
599				      unsigned long action,
600				      enum switchdev_obj_id id,
601				      const struct net_bridge_mdb_entry *mp,
602				      struct net_device *orig_dev)
603{
604	struct switchdev_obj_port_mdb mdb = {
605		.obj = {
606			.id = id,
607			.orig_dev = orig_dev,
608		},
609	};
610	struct switchdev_obj_port_mdb *pmdb;
611
612	br_switchdev_mdb_populate(&mdb, mp);
613
614	if (action == SWITCHDEV_PORT_OBJ_ADD &&
615	    switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
616		/* This event is already in the deferred queue of
617		 * events, so this replay must be elided, lest the
618		 * driver receives duplicate events for it. This can
619		 * only happen when replaying additions, since
620		 * modifications are always immediately visible in
621		 * br->mdb_list, whereas actual event delivery may be
622		 * delayed.
623		 */
624		return 0;
625	}
626
627	pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
628	if (!pmdb)
629		return -ENOMEM;
630
631	list_add_tail(&pmdb->obj.list, mdb_list);
632	return 0;
633}
634
635void br_switchdev_mdb_notify(struct net_device *dev,
636			     struct net_bridge_mdb_entry *mp,
637			     struct net_bridge_port_group *pg,
638			     int type)
639{
640	struct br_switchdev_mdb_complete_info *complete_info;
641	struct switchdev_obj_port_mdb mdb = {
642		.obj = {
643			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
644			.flags = SWITCHDEV_F_DEFER,
645		},
646	};
647
648	if (!pg)
649		return br_switchdev_host_mdb(dev, mp, type);
650
651	br_switchdev_mdb_populate(&mdb, mp);
652
653	mdb.obj.orig_dev = pg->key.port->dev;
654	switch (type) {
655	case RTM_NEWMDB:
656		complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
657		if (!complete_info)
658			break;
659		complete_info->port = pg->key.port;
660		complete_info->ip = mp->addr;
661		mdb.obj.complete_priv = complete_info;
662		mdb.obj.complete = br_switchdev_mdb_complete;
663		if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
664			kfree(complete_info);
665		break;
666	case RTM_DELMDB:
667		switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
668		break;
669	}
670}
671#endif
672
673static int
674br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
675			const void *ctx, bool adding, struct notifier_block *nb,
676			struct netlink_ext_ack *extack)
677{
678#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
679	const struct net_bridge_mdb_entry *mp;
680	struct switchdev_obj *obj, *tmp;
681	struct net_bridge *br;
682	unsigned long action;
683	LIST_HEAD(mdb_list);
684	int err = 0;
685
686	ASSERT_RTNL();
687
688	if (!nb)
689		return 0;
690
691	if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
692		return -EINVAL;
693
694	br = netdev_priv(br_dev);
695
696	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
697		return 0;
698
699	if (adding)
700		action = SWITCHDEV_PORT_OBJ_ADD;
701	else
702		action = SWITCHDEV_PORT_OBJ_DEL;
703
704	/* br_switchdev_mdb_queue_one() will take care to not queue a
705	 * replay of an event that is already pending in the switchdev
706	 * deferred queue. In order to safely determine that, there
707	 * must be no new deferred MDB notifications enqueued for the
708	 * duration of the MDB scan. Therefore, grab the write-side
709	 * lock to avoid racing with any concurrent IGMP/MLD snooping.
710	 */
711	spin_lock_bh(&br->multicast_lock);
712
713	hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
714		struct net_bridge_port_group __rcu * const *pp;
715		const struct net_bridge_port_group *p;
716
717		if (mp->host_joined) {
718			err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
719							 SWITCHDEV_OBJ_ID_HOST_MDB,
720							 mp, br_dev);
721			if (err) {
722				spin_unlock_bh(&br->multicast_lock);
723				goto out_free_mdb;
724			}
725		}
726
727		for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
728		     pp = &p->next) {
729			if (p->key.port->dev != dev)
730				continue;
731
732			err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
733							 SWITCHDEV_OBJ_ID_PORT_MDB,
734							 mp, dev);
735			if (err) {
736				spin_unlock_bh(&br->multicast_lock);
737				goto out_free_mdb;
738			}
739		}
740	}
741
742	spin_unlock_bh(&br->multicast_lock);
743
744	list_for_each_entry(obj, &mdb_list, list) {
745		err = br_switchdev_mdb_replay_one(nb, dev,
746						  SWITCHDEV_OBJ_PORT_MDB(obj),
747						  action, ctx, extack);
748		if (err == -EOPNOTSUPP)
749			err = 0;
750		if (err)
751			goto out_free_mdb;
752	}
753
754out_free_mdb:
755	list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
756		list_del(&obj->list);
757		kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
758	}
759
760	if (err)
761		return err;
762#endif
763
764	return 0;
765}
766
767static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
768				   struct notifier_block *atomic_nb,
769				   struct notifier_block *blocking_nb,
770				   struct netlink_ext_ack *extack)
771{
772	struct net_device *br_dev = p->br->dev;
773	struct net_device *dev = p->dev;
774	int err;
775
776	err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
777	if (err && err != -EOPNOTSUPP)
778		return err;
779
780	err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
781				      extack);
782	if (err) {
783		/* -EOPNOTSUPP not propagated from MDB replay. */
784		return err;
785	}
786
787	err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
788	if (err && err != -EOPNOTSUPP)
789		return err;
790
791	return 0;
792}
793
794static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
795				      const void *ctx,
796				      struct notifier_block *atomic_nb,
797				      struct notifier_block *blocking_nb)
798{
799	struct net_device *br_dev = p->br->dev;
800	struct net_device *dev = p->dev;
801
802	br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
803
804	br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
805
806	br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
807
808	/* Make sure that the device leaving this bridge has seen all
809	 * relevant events before it is disassociated. In the normal
810	 * case, when the device is directly attached to the bridge,
811	 * this is covered by del_nbp(). If the association was indirect
812	 * however, e.g. via a team or bond, and the device is leaving
813	 * that intermediate device, then the bridge port remains in
814	 * place.
815	 */
816	switchdev_deferred_process();
817}
818
819/* Let the bridge know that this port is offloaded, so that it can assign a
820 * switchdev hardware domain to it.
821 */
822int br_switchdev_port_offload(struct net_bridge_port *p,
823			      struct net_device *dev, const void *ctx,
824			      struct notifier_block *atomic_nb,
825			      struct notifier_block *blocking_nb,
826			      bool tx_fwd_offload,
827			      struct netlink_ext_ack *extack)
828{
829	struct netdev_phys_item_id ppid;
830	int err;
831
832	err = dev_get_port_parent_id(dev, &ppid, false);
833	if (err)
834		return err;
835
836	err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
837	if (err)
838		return err;
839
840	err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
841	if (err)
842		goto out_switchdev_del;
843
844	return 0;
845
846out_switchdev_del:
847	nbp_switchdev_del(p);
848
849	return err;
850}
851
852void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
853				 struct notifier_block *atomic_nb,
854				 struct notifier_block *blocking_nb)
855{
856	nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
857
858	nbp_switchdev_del(p);
859}
860
861int br_switchdev_port_replay(struct net_bridge_port *p,
862			     struct net_device *dev, const void *ctx,
863			     struct notifier_block *atomic_nb,
864			     struct notifier_block *blocking_nb,
865			     struct netlink_ext_ack *extack)
866{
867	return nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
868}
869