1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *	Device handling code
4 *	Linux ethernet bridge
5 *
6 *	Authors:
7 *	Lennert Buytenhek		<buytenh@gnu.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/netdevice.h>
12#include <linux/netpoll.h>
13#include <linux/etherdevice.h>
14#include <linux/ethtool.h>
15#include <linux/list.h>
16#include <linux/netfilter_bridge.h>
17
18#include <linux/uaccess.h>
19#include "br_private.h"
20
21#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
22			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
23
24const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25EXPORT_SYMBOL_GPL(nf_br_ops);
26
27/* net device transmit always called with BH disabled */
28netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
29{
30	struct net_bridge_mcast_port *pmctx_null = NULL;
31	struct net_bridge *br = netdev_priv(dev);
32	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
33	struct net_bridge_fdb_entry *dst;
34	struct net_bridge_mdb_entry *mdst;
35	const struct nf_br_ops *nf_ops;
36	u8 state = BR_STATE_FORWARDING;
37	struct net_bridge_vlan *vlan;
38	const unsigned char *dest;
39	u16 vid = 0;
40
41	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
42	br_tc_skb_miss_set(skb, false);
43
44	rcu_read_lock();
45	nf_ops = rcu_dereference(nf_br_ops);
46	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
47		rcu_read_unlock();
48		return NETDEV_TX_OK;
49	}
50
51	dev_sw_netstats_tx_add(dev, 1, skb->len);
52
53	br_switchdev_frame_unmark(skb);
54	BR_INPUT_SKB_CB(skb)->brdev = dev;
55	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
56
57	skb_reset_mac_header(skb);
58	skb_pull(skb, ETH_HLEN);
59
60	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid,
61				&state, &vlan))
62		goto out;
63
64	if (IS_ENABLED(CONFIG_INET) &&
65	    (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
66	     eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
67	    br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
68		br_do_proxy_suppress_arp(skb, br, vid, NULL);
69	} else if (IS_ENABLED(CONFIG_IPV6) &&
70		   skb->protocol == htons(ETH_P_IPV6) &&
71		   br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
72		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
73				 sizeof(struct nd_msg)) &&
74		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
75			struct nd_msg *msg, _msg;
76
77			msg = br_is_nd_neigh_msg(skb, &_msg);
78			if (msg)
79				br_do_suppress_nd(skb, br, vid, NULL, msg);
80	}
81
82	dest = eth_hdr(skb)->h_dest;
83	if (is_broadcast_ether_addr(dest)) {
84		br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid);
85	} else if (is_multicast_ether_addr(dest)) {
86		if (unlikely(netpoll_tx_running(dev))) {
87			br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
88			goto out;
89		}
90		if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) {
91			kfree_skb(skb);
92			goto out;
93		}
94
95		mdst = br_mdb_entry_skb_get(brmctx, skb, vid);
96		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
97		    br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst))
98			br_multicast_flood(mdst, skb, brmctx, false, true);
99		else
100			br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
101	} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
102		br_forward(dst->dst, skb, false, true);
103	} else {
104		br_flood(br, skb, BR_PKT_UNICAST, false, true, vid);
105	}
106out:
107	rcu_read_unlock();
108	return NETDEV_TX_OK;
109}
110
111static int br_dev_init(struct net_device *dev)
112{
113	struct net_bridge *br = netdev_priv(dev);
114	int err;
115
116	err = br_fdb_hash_init(br);
117	if (err)
118		return err;
119
120	err = br_mdb_hash_init(br);
121	if (err) {
122		br_fdb_hash_fini(br);
123		return err;
124	}
125
126	err = br_vlan_init(br);
127	if (err) {
128		br_mdb_hash_fini(br);
129		br_fdb_hash_fini(br);
130		return err;
131	}
132
133	err = br_multicast_init_stats(br);
134	if (err) {
135		br_vlan_flush(br);
136		br_mdb_hash_fini(br);
137		br_fdb_hash_fini(br);
138		return err;
139	}
140
141	netdev_lockdep_set_classes(dev);
142	return 0;
143}
144
145static void br_dev_uninit(struct net_device *dev)
146{
147	struct net_bridge *br = netdev_priv(dev);
148
149	br_multicast_dev_del(br);
150	br_multicast_uninit_stats(br);
151	br_vlan_flush(br);
152	br_mdb_hash_fini(br);
153	br_fdb_hash_fini(br);
154}
155
156static int br_dev_open(struct net_device *dev)
157{
158	struct net_bridge *br = netdev_priv(dev);
159
160	netdev_update_features(dev);
161	netif_start_queue(dev);
162	br_stp_enable_bridge(br);
163	br_multicast_open(br);
164
165	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
166		br_multicast_join_snoopers(br);
167
168	return 0;
169}
170
171static void br_dev_set_multicast_list(struct net_device *dev)
172{
173}
174
175static void br_dev_change_rx_flags(struct net_device *dev, int change)
176{
177	if (change & IFF_PROMISC)
178		br_manage_promisc(netdev_priv(dev));
179}
180
181static int br_dev_stop(struct net_device *dev)
182{
183	struct net_bridge *br = netdev_priv(dev);
184
185	br_stp_disable_bridge(br);
186	br_multicast_stop(br);
187
188	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
189		br_multicast_leave_snoopers(br);
190
191	netif_stop_queue(dev);
192
193	return 0;
194}
195
196static int br_change_mtu(struct net_device *dev, int new_mtu)
197{
198	struct net_bridge *br = netdev_priv(dev);
199
200	dev->mtu = new_mtu;
201
202	/* this flag will be cleared if the MTU was automatically adjusted */
203	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
204#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
205	/* remember the MTU in the rtable for PMTU */
206	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
207#endif
208
209	return 0;
210}
211
212/* Allow setting mac address to any valid ethernet address. */
213static int br_set_mac_address(struct net_device *dev, void *p)
214{
215	struct net_bridge *br = netdev_priv(dev);
216	struct sockaddr *addr = p;
217
218	if (!is_valid_ether_addr(addr->sa_data))
219		return -EADDRNOTAVAIL;
220
221	/* dev_set_mac_addr() can be called by a master device on bridge's
222	 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
223	 */
224	if (dev->reg_state != NETREG_REGISTERED)
225		return -EBUSY;
226
227	spin_lock_bh(&br->lock);
228	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
229		/* Mac address will be changed in br_stp_change_bridge_id(). */
230		br_stp_change_bridge_id(br, addr->sa_data);
231	}
232	spin_unlock_bh(&br->lock);
233
234	return 0;
235}
236
237static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
238{
239	strscpy(info->driver, "bridge", sizeof(info->driver));
240	strscpy(info->version, BR_VERSION, sizeof(info->version));
241	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
242	strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
243}
244
245static int br_get_link_ksettings(struct net_device *dev,
246				 struct ethtool_link_ksettings *cmd)
247{
248	struct net_bridge *br = netdev_priv(dev);
249	struct net_bridge_port *p;
250
251	cmd->base.duplex = DUPLEX_UNKNOWN;
252	cmd->base.port = PORT_OTHER;
253	cmd->base.speed = SPEED_UNKNOWN;
254
255	list_for_each_entry(p, &br->port_list, list) {
256		struct ethtool_link_ksettings ecmd;
257		struct net_device *pdev = p->dev;
258
259		if (!netif_running(pdev) || !netif_oper_up(pdev))
260			continue;
261
262		if (__ethtool_get_link_ksettings(pdev, &ecmd))
263			continue;
264
265		if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
266			continue;
267
268		if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
269		    cmd->base.speed < ecmd.base.speed)
270			cmd->base.speed = ecmd.base.speed;
271	}
272
273	return 0;
274}
275
276static netdev_features_t br_fix_features(struct net_device *dev,
277	netdev_features_t features)
278{
279	struct net_bridge *br = netdev_priv(dev);
280
281	return br_features_recompute(br, features);
282}
283
284#ifdef CONFIG_NET_POLL_CONTROLLER
285static void br_poll_controller(struct net_device *br_dev)
286{
287}
288
289static void br_netpoll_cleanup(struct net_device *dev)
290{
291	struct net_bridge *br = netdev_priv(dev);
292	struct net_bridge_port *p;
293
294	list_for_each_entry(p, &br->port_list, list)
295		br_netpoll_disable(p);
296}
297
298static int __br_netpoll_enable(struct net_bridge_port *p)
299{
300	struct netpoll *np;
301	int err;
302
303	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
304	if (!np)
305		return -ENOMEM;
306
307	err = __netpoll_setup(np, p->dev);
308	if (err) {
309		kfree(np);
310		return err;
311	}
312
313	p->np = np;
314	return err;
315}
316
317int br_netpoll_enable(struct net_bridge_port *p)
318{
319	if (!p->br->dev->npinfo)
320		return 0;
321
322	return __br_netpoll_enable(p);
323}
324
325static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
326{
327	struct net_bridge *br = netdev_priv(dev);
328	struct net_bridge_port *p;
329	int err = 0;
330
331	list_for_each_entry(p, &br->port_list, list) {
332		if (!p->dev)
333			continue;
334		err = __br_netpoll_enable(p);
335		if (err)
336			goto fail;
337	}
338
339out:
340	return err;
341
342fail:
343	br_netpoll_cleanup(dev);
344	goto out;
345}
346
347void br_netpoll_disable(struct net_bridge_port *p)
348{
349	struct netpoll *np = p->np;
350
351	if (!np)
352		return;
353
354	p->np = NULL;
355
356	__netpoll_free(np);
357}
358
359#endif
360
361static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
362			struct netlink_ext_ack *extack)
363
364{
365	struct net_bridge *br = netdev_priv(dev);
366
367	return br_add_if(br, slave_dev, extack);
368}
369
370static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
371{
372	struct net_bridge *br = netdev_priv(dev);
373
374	return br_del_if(br, slave_dev);
375}
376
377static int br_fill_forward_path(struct net_device_path_ctx *ctx,
378				struct net_device_path *path)
379{
380	struct net_bridge_fdb_entry *f;
381	struct net_bridge_port *dst;
382	struct net_bridge *br;
383
384	if (netif_is_bridge_port(ctx->dev))
385		return -1;
386
387	br = netdev_priv(ctx->dev);
388
389	br_vlan_fill_forward_path_pvid(br, ctx, path);
390
391	f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
392	if (!f || !f->dst)
393		return -1;
394
395	dst = READ_ONCE(f->dst);
396	if (!dst)
397		return -1;
398
399	if (br_vlan_fill_forward_path_mode(br, dst, path))
400		return -1;
401
402	path->type = DEV_PATH_BRIDGE;
403	path->dev = dst->br->dev;
404	ctx->dev = dst->dev;
405
406	switch (path->bridge.vlan_mode) {
407	case DEV_PATH_BR_VLAN_TAG:
408		if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
409			return -ENOSPC;
410		ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id;
411		ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
412		ctx->num_vlans++;
413		break;
414	case DEV_PATH_BR_VLAN_UNTAG_HW:
415	case DEV_PATH_BR_VLAN_UNTAG:
416		ctx->num_vlans--;
417		break;
418	case DEV_PATH_BR_VLAN_KEEP:
419		break;
420	}
421
422	return 0;
423}
424
425static const struct ethtool_ops br_ethtool_ops = {
426	.get_drvinfo		 = br_getinfo,
427	.get_link		 = ethtool_op_get_link,
428	.get_link_ksettings	 = br_get_link_ksettings,
429};
430
431static const struct net_device_ops br_netdev_ops = {
432	.ndo_open		 = br_dev_open,
433	.ndo_stop		 = br_dev_stop,
434	.ndo_init		 = br_dev_init,
435	.ndo_uninit		 = br_dev_uninit,
436	.ndo_start_xmit		 = br_dev_xmit,
437	.ndo_get_stats64	 = dev_get_tstats64,
438	.ndo_set_mac_address	 = br_set_mac_address,
439	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
440	.ndo_change_rx_flags	 = br_dev_change_rx_flags,
441	.ndo_change_mtu		 = br_change_mtu,
442	.ndo_siocdevprivate	 = br_dev_siocdevprivate,
443#ifdef CONFIG_NET_POLL_CONTROLLER
444	.ndo_netpoll_setup	 = br_netpoll_setup,
445	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
446	.ndo_poll_controller	 = br_poll_controller,
447#endif
448	.ndo_add_slave		 = br_add_slave,
449	.ndo_del_slave		 = br_del_slave,
450	.ndo_fix_features        = br_fix_features,
451	.ndo_fdb_add		 = br_fdb_add,
452	.ndo_fdb_del		 = br_fdb_delete,
453	.ndo_fdb_del_bulk	 = br_fdb_delete_bulk,
454	.ndo_fdb_dump		 = br_fdb_dump,
455	.ndo_fdb_get		 = br_fdb_get,
456	.ndo_mdb_add		 = br_mdb_add,
457	.ndo_mdb_del		 = br_mdb_del,
458	.ndo_mdb_del_bulk	 = br_mdb_del_bulk,
459	.ndo_mdb_dump		 = br_mdb_dump,
460	.ndo_mdb_get		 = br_mdb_get,
461	.ndo_bridge_getlink	 = br_getlink,
462	.ndo_bridge_setlink	 = br_setlink,
463	.ndo_bridge_dellink	 = br_dellink,
464	.ndo_features_check	 = passthru_features_check,
465	.ndo_fill_forward_path	 = br_fill_forward_path,
466};
467
468static const struct device_type br_type = {
469	.name	= "bridge",
470};
471
472void br_dev_setup(struct net_device *dev)
473{
474	struct net_bridge *br = netdev_priv(dev);
475
476	eth_hw_addr_random(dev);
477	ether_setup(dev);
478
479	dev->netdev_ops = &br_netdev_ops;
480	dev->needs_free_netdev = true;
481	dev->ethtool_ops = &br_ethtool_ops;
482	SET_NETDEV_DEVTYPE(dev, &br_type);
483	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
484
485	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
486			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
487	dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
488			   NETIF_F_HW_VLAN_STAG_TX;
489	dev->vlan_features = COMMON_FEATURES;
490	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
491
492	br->dev = dev;
493	spin_lock_init(&br->lock);
494	INIT_LIST_HEAD(&br->port_list);
495	INIT_HLIST_HEAD(&br->fdb_list);
496	INIT_HLIST_HEAD(&br->frame_type_list);
497#if IS_ENABLED(CONFIG_BRIDGE_MRP)
498	INIT_HLIST_HEAD(&br->mrp_list);
499#endif
500#if IS_ENABLED(CONFIG_BRIDGE_CFM)
501	INIT_HLIST_HEAD(&br->mep_list);
502#endif
503	spin_lock_init(&br->hash_lock);
504
505	br->bridge_id.prio[0] = 0x80;
506	br->bridge_id.prio[1] = 0x00;
507
508	ether_addr_copy(br->group_addr, eth_stp_addr);
509
510	br->stp_enabled = BR_NO_STP;
511	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
512	br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
513
514	br->designated_root = br->bridge_id;
515	br->bridge_max_age = br->max_age = 20 * HZ;
516	br->bridge_hello_time = br->hello_time = 2 * HZ;
517	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
518	br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
519	dev->max_mtu = ETH_MAX_MTU;
520
521	br_netfilter_rtable_init(br);
522	br_stp_timer_init(br);
523	br_multicast_init(br);
524	INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
525}
526