1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
2/*
3 *	Device handling code
4 *	Linux ethernet bridge
5 *
6 *	Authors:
7 *	Lennert Buytenhek		<buytenh@gnu.org>
8 *
9 *	This program is free software; you can redistribute it and/or
10 *	modify it under the terms of the GNU General Public License
11 *	as published by the Free Software Foundation; either version
12 *	2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/netpoll.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/list.h>
21#include <linux/netfilter_bridge.h>
22
23#include <asm/uaccess.h>
24#include "br_private.h"
25
26#include <typedefs.h>
27#include <bcmdefs.h>
28#ifdef HNDCTF
29#include <ctf/hndctf.h>
30#endif /* HNDCTF */
31
32/* net device transmit always called with BH disabled */
33netdev_tx_t BCMFASTPATH_HOST br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
34{
35	struct net_bridge *br = netdev_priv(dev);
36	const unsigned char *dest = skb->data;
37	struct net_bridge_fdb_entry *dst;
38	struct net_bridge_mdb_entry *mdst;
39	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
40
41#ifdef HNDCTF
42	/* For broadstream iqos inbound traffic.
43	  * Inbound traffic need to apply qdisc rule to br interface, and ctf need to use
44	  * dev_queue_xmit of bridge dev to transmit packet.
45	  * Add fastpath here to forward packet from br to eth0/1/2 directly if this packet
46	  * is cached in ctf ip entry.
47	  */
48	if (CTF_IS_PKTTOBR(skb)) {
49		const struct net_device_ops *ops = NULL;
50		struct net_device *tmpdev = skb->dev;
51		int rc = -1;
52
53		ops = ((struct net_device *)(skb->ctf_ipc_txif))->netdev_ops;
54		if (ops) {
55			skb->dev = (struct net_device *)(skb->ctf_ipc_txif);
56			rc = ops->ndo_start_xmit(skb, (struct net_device *)(skb->ctf_ipc_txif));
57			if (rc == NETDEV_TX_OK)
58				return rc;
59			skb->dev = tmpdev;
60		}
61	}
62#endif /* HNDCTF */
63
64#ifdef CONFIG_BRIDGE_NETFILTER
65	if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
66		br_nf_pre_routing_finish_bridge_slow(skb);
67		return NETDEV_TX_OK;
68	}
69#endif
70
71	u64_stats_update_begin(&brstats->syncp);
72	brstats->tx_packets++;
73	brstats->tx_bytes += skb->len;
74	u64_stats_update_end(&brstats->syncp);
75
76	BR_INPUT_SKB_CB(skb)->brdev = dev;
77
78	skb_reset_mac_header(skb);
79	skb_pull(skb, ETH_HLEN);
80
81	rcu_read_lock();
82	if (is_multicast_ether_addr(dest)) {
83		if (unlikely(netpoll_tx_running(dev))) {
84			br_flood_deliver(br, skb);
85			goto out;
86		}
87		if (br_multicast_rcv(br, NULL, skb)) {
88			kfree_skb(skb);
89			goto out;
90		}
91
92		mdst = br_mdb_get(br, skb);
93		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
94			br_multicast_deliver(mdst, skb);
95		else
96			br_flood_deliver(br, skb);
97	} else if ((dst = __br_fdb_get(br, dest)) != NULL)
98		br_deliver(dst->dst, skb);
99	else
100		br_flood_deliver(br, skb);
101
102out:
103	rcu_read_unlock();
104	return NETDEV_TX_OK;
105}
106
107static int br_dev_open(struct net_device *dev)
108{
109	struct net_bridge *br = netdev_priv(dev);
110
111	br_features_recompute(br);
112	netif_start_queue(dev);
113	br_stp_enable_bridge(br);
114	br_multicast_open(br);
115
116	return 0;
117}
118
119static void br_dev_set_multicast_list(struct net_device *dev)
120{
121}
122
123static int br_dev_stop(struct net_device *dev)
124{
125	struct net_bridge *br = netdev_priv(dev);
126
127	br_stp_disable_bridge(br);
128	br_multicast_stop(br);
129
130	netif_stop_queue(dev);
131
132	return 0;
133}
134
135static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
136						struct rtnl_link_stats64 *stats)
137{
138	struct net_bridge *br = netdev_priv(dev);
139	struct br_cpu_netstats tmp, sum = { 0 };
140	unsigned int cpu;
141
142	for_each_possible_cpu(cpu) {
143		unsigned int start;
144		const struct br_cpu_netstats *bstats
145			= per_cpu_ptr(br->stats, cpu);
146		do {
147			start = u64_stats_fetch_begin(&bstats->syncp);
148			memcpy(&tmp, bstats, sizeof(tmp));
149		} while (u64_stats_fetch_retry(&bstats->syncp, start));
150		sum.tx_bytes   += tmp.tx_bytes;
151		sum.tx_packets += tmp.tx_packets;
152		sum.rx_bytes   += tmp.rx_bytes;
153		sum.rx_packets += tmp.rx_packets;
154	}
155
156	stats->tx_bytes   = sum.tx_bytes;
157	stats->tx_packets = sum.tx_packets;
158	stats->rx_bytes   = sum.rx_bytes;
159	stats->rx_packets = sum.rx_packets;
160
161	return stats;
162}
163
164static int br_change_mtu(struct net_device *dev, int new_mtu)
165{
166	struct net_bridge *br = netdev_priv(dev);
167	if (new_mtu < 68 || new_mtu > br_min_mtu(br))
168		return -EINVAL;
169
170	dev->mtu = new_mtu;
171
172#ifdef CONFIG_BRIDGE_NETFILTER
173	/* remember the MTU in the rtable for PMTU */
174	br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
175#endif
176
177	return 0;
178}
179
180/* Allow setting mac address to any valid ethernet address. */
181static int br_set_mac_address(struct net_device *dev, void *p)
182{
183	struct net_bridge *br = netdev_priv(dev);
184	struct sockaddr *addr = p;
185
186	if (!is_valid_ether_addr(addr->sa_data))
187		return -EINVAL;
188
189	spin_lock_bh(&br->lock);
190	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
191	br_stp_change_bridge_id(br, addr->sa_data);
192	br->flags |= BR_SET_MAC_ADDR;
193	spin_unlock_bh(&br->lock);
194
195	return 0;
196}
197
198static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
199{
200	strcpy(info->driver, "bridge");
201	strcpy(info->version, BR_VERSION);
202	strcpy(info->fw_version, "N/A");
203	strcpy(info->bus_info, "N/A");
204}
205
206static int br_set_sg(struct net_device *dev, u32 data)
207{
208	struct net_bridge *br = netdev_priv(dev);
209
210	if (data)
211		br->feature_mask |= NETIF_F_SG;
212	else
213		br->feature_mask &= ~NETIF_F_SG;
214
215	br_features_recompute(br);
216	return 0;
217}
218
219static int br_set_tso(struct net_device *dev, u32 data)
220{
221	struct net_bridge *br = netdev_priv(dev);
222
223	if (data)
224		br->feature_mask |= NETIF_F_TSO;
225	else
226		br->feature_mask &= ~NETIF_F_TSO;
227
228	br_features_recompute(br);
229	return 0;
230}
231
232static int br_set_tx_csum(struct net_device *dev, u32 data)
233{
234	struct net_bridge *br = netdev_priv(dev);
235
236	if (data)
237		br->feature_mask |= NETIF_F_NO_CSUM;
238	else
239		br->feature_mask &= ~NETIF_F_ALL_CSUM;
240
241	br_features_recompute(br);
242	return 0;
243}
244
245#ifdef CONFIG_NET_POLL_CONTROLLER
246static void br_poll_controller(struct net_device *br_dev)
247{
248}
249
250static void br_netpoll_cleanup(struct net_device *dev)
251{
252	struct net_bridge *br = netdev_priv(dev);
253	struct net_bridge_port *p, *n;
254
255	list_for_each_entry_safe(p, n, &br->port_list, list) {
256		br_netpoll_disable(p);
257	}
258}
259
260static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
261{
262	struct net_bridge *br = netdev_priv(dev);
263	struct net_bridge_port *p, *n;
264	int err = 0;
265
266	list_for_each_entry_safe(p, n, &br->port_list, list) {
267		if (!p->dev)
268			continue;
269
270		err = br_netpoll_enable(p);
271		if (err)
272			goto fail;
273	}
274
275out:
276	return err;
277
278fail:
279	br_netpoll_cleanup(dev);
280	goto out;
281}
282
283int br_netpoll_enable(struct net_bridge_port *p)
284{
285	struct netpoll *np;
286	int err = 0;
287
288	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
289	err = -ENOMEM;
290	if (!np)
291		goto out;
292
293	np->dev = p->dev;
294
295	err = __netpoll_setup(np);
296	if (err) {
297		kfree(np);
298		goto out;
299	}
300
301	p->np = np;
302
303out:
304	return err;
305}
306
307void br_netpoll_disable(struct net_bridge_port *p)
308{
309	struct netpoll *np = p->np;
310
311	if (!np)
312		return;
313
314	p->np = NULL;
315
316	/* Wait for transmitting packets to finish before freeing. */
317	synchronize_rcu_bh();
318
319	__netpoll_cleanup(np);
320	kfree(np);
321}
322
323#endif
324
325static const struct ethtool_ops br_ethtool_ops = {
326	.get_drvinfo    = br_getinfo,
327	.get_link	= ethtool_op_get_link,
328	.get_tx_csum	= ethtool_op_get_tx_csum,
329	.set_tx_csum 	= br_set_tx_csum,
330	.get_sg		= ethtool_op_get_sg,
331	.set_sg		= br_set_sg,
332	.get_tso	= ethtool_op_get_tso,
333	.set_tso	= br_set_tso,
334	.get_ufo	= ethtool_op_get_ufo,
335	.set_ufo	= ethtool_op_set_ufo,
336	.get_flags	= ethtool_op_get_flags,
337};
338
339static const struct net_device_ops br_netdev_ops = {
340	.ndo_open		 = br_dev_open,
341	.ndo_stop		 = br_dev_stop,
342	.ndo_start_xmit		 = br_dev_xmit,
343	.ndo_get_stats64	 = br_get_stats64,
344	.ndo_set_mac_address	 = br_set_mac_address,
345	.ndo_set_multicast_list	 = br_dev_set_multicast_list,
346	.ndo_change_mtu		 = br_change_mtu,
347	.ndo_do_ioctl		 = br_dev_ioctl,
348#ifdef CONFIG_NET_POLL_CONTROLLER
349	.ndo_netpoll_setup	 = br_netpoll_setup,
350	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
351	.ndo_poll_controller	 = br_poll_controller,
352#endif
353};
354
355static void br_dev_free(struct net_device *dev)
356{
357	struct net_bridge *br = netdev_priv(dev);
358
359	free_percpu(br->stats);
360	free_netdev(dev);
361}
362
363void br_dev_setup(struct net_device *dev)
364{
365	random_ether_addr(dev->dev_addr);
366	ether_setup(dev);
367
368	dev->netdev_ops = &br_netdev_ops;
369	dev->destructor = br_dev_free;
370	SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
371	dev->tx_queue_len = 0;
372	dev->priv_flags = IFF_EBRIDGE;
373
374	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
375			NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
376			NETIF_F_NETNS_LOCAL | NETIF_F_GSO;
377}
378