• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/usb/gadget/
1/*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 */
22
23/* #define VERBOSE_DEBUG */
24
25#include <linux/kernel.h>
26#include <linux/gfp.h>
27#include <linux/device.h>
28#include <linux/ctype.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31
32#include "u_ether.h"
33
34
35/*
36 * This component encapsulates the Ethernet link glue needed to provide
37 * one (!) network link through the USB gadget stack, normally "usb0".
38 *
39 * The control and data models are handled by the function driver which
40 * connects to this code; such as CDC Ethernet (ECM or EEM),
41 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
42 * management.
43 *
44 * Link level addressing is handled by this component using module
45 * parameters; if no such parameters are provided, random link level
46 * addresses are used.  Each end of the link uses one address.  The
47 * host end address is exported in various ways, and is often recorded
48 * in configuration databases.
49 *
50 * The driver which assembles each configuration using such a link is
51 * responsible for ensuring that each configuration includes at most one
52 * instance of is network link.  (The network layer provides ways for
53 * this single "physical" link to be used by multiple virtual links.)
54 */
55
56#define UETH__VERSION	"29-May-2008"
57
58struct eth_dev {
59	/* lock is held while accessing port_usb
60	 * or updating its backlink port_usb->ioport
61	 */
62	spinlock_t		lock;
63	struct gether		*port_usb;
64
65	struct net_device	*net;
66	struct usb_gadget	*gadget;
67
68	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
69	struct list_head	tx_reqs, rx_reqs;
70	atomic_t		tx_qlen;
71
72	struct sk_buff_head	rx_frames;
73
74	unsigned		header_len;
75	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
76	int			(*unwrap)(struct gether *,
77						struct sk_buff *skb,
78						struct sk_buff_head *list);
79
80	struct work_struct	work;
81
82	unsigned long		todo;
83#define	WORK_RX_MEMORY		0
84
85	bool			zlp;
86	u8			host_mac[ETH_ALEN];
87};
88
89/*-------------------------------------------------------------------------*/
90
91#define RX_EXTRA	20	/* bytes guarding against rx overflows */
92
93#define DEFAULT_QLEN	2	/* double buffering by default */
94
95
96#ifdef CONFIG_USB_GADGET_DUALSPEED
97
98static unsigned qmult = 5;
99module_param(qmult, uint, S_IRUGO|S_IWUSR);
100MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
101
102#else	/* full speed (low speed doesn't do bulk) */
103#define qmult		1
104#endif
105
106/* for dual-speed hardware, use deeper queues at highspeed */
107static inline int qlen(struct usb_gadget *gadget)
108{
109	if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
110		return qmult * DEFAULT_QLEN;
111	else
112		return DEFAULT_QLEN;
113}
114
115/*-------------------------------------------------------------------------*/
116
117/* REVISIT there must be a better way than having two sets
118 * of debug calls ...
119 */
120
121#undef DBG
122#undef VDBG
123#undef ERROR
124#undef INFO
125
126#define xprintk(d, level, fmt, args...) \
127	printk(level "%s: " fmt , (d)->net->name , ## args)
128
129#ifdef DEBUG
130#undef DEBUG
131#define DBG(dev, fmt, args...) \
132	xprintk(dev , KERN_DEBUG , fmt , ## args)
133#else
134#define DBG(dev, fmt, args...) \
135	do { } while (0)
136#endif /* DEBUG */
137
138#ifdef VERBOSE_DEBUG
139#define VDBG	DBG
140#else
141#define VDBG(dev, fmt, args...) \
142	do { } while (0)
143#endif /* DEBUG */
144
145#define ERROR(dev, fmt, args...) \
146	xprintk(dev , KERN_ERR , fmt , ## args)
147#define INFO(dev, fmt, args...) \
148	xprintk(dev , KERN_INFO , fmt , ## args)
149
150/*-------------------------------------------------------------------------*/
151
152/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
153
154static int ueth_change_mtu(struct net_device *net, int new_mtu)
155{
156	struct eth_dev	*dev = netdev_priv(net);
157	unsigned long	flags;
158	int		status = 0;
159
160	/* don't change MTU on "live" link (peer won't know) */
161	spin_lock_irqsave(&dev->lock, flags);
162	if (dev->port_usb)
163		status = -EBUSY;
164	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
165		status = -ERANGE;
166	else
167		net->mtu = new_mtu;
168	spin_unlock_irqrestore(&dev->lock, flags);
169
170	return status;
171}
172
173static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
174{
175	struct eth_dev	*dev = netdev_priv(net);
176
177	strlcpy(p->driver, "g_ether", sizeof p->driver);
178	strlcpy(p->version, UETH__VERSION, sizeof p->version);
179	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
180	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
181}
182
183/* REVISIT can also support:
184 *   - WOL (by tracking suspends and issuing remote wakeup)
185 *   - msglevel (implies updated messaging)
186 *   - ... probably more ethtool ops
187 */
188
189static const struct ethtool_ops ops = {
190	.get_drvinfo = eth_get_drvinfo,
191	.get_link = ethtool_op_get_link,
192};
193
194static void defer_kevent(struct eth_dev *dev, int flag)
195{
196	if (test_and_set_bit(flag, &dev->todo))
197		return;
198	if (!schedule_work(&dev->work))
199		ERROR(dev, "kevent %d may have been dropped\n", flag);
200	else
201		DBG(dev, "kevent %d scheduled\n", flag);
202}
203
204static void rx_complete(struct usb_ep *ep, struct usb_request *req);
205
206static int
207rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
208{
209	struct sk_buff	*skb;
210	int		retval = -ENOMEM;
211	size_t		size = 0;
212	struct usb_ep	*out;
213	unsigned long	flags;
214
215	spin_lock_irqsave(&dev->lock, flags);
216	if (dev->port_usb)
217		out = dev->port_usb->out_ep;
218	else
219		out = NULL;
220	spin_unlock_irqrestore(&dev->lock, flags);
221
222	if (!out)
223		return -ENOTCONN;
224
225
226	/* Padding up to RX_EXTRA handles minor disagreements with host.
227	 * Normally we use the USB "terminate on short read" convention;
228	 * so allow up to (N*maxpacket), since that memory is normally
229	 * already allocated.  Some hardware doesn't deal well with short
230	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
231	 * byte off the end (to force hardware errors on overflow).
232	 *
233	 * RNDIS uses internal framing, and explicitly allows senders to
234	 * pad to end-of-packet.  That's potentially nice for speed, but
235	 * means receivers can't recover lost synch on their own (because
236	 * new packets don't only start after a short RX).
237	 */
238	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
239	size += dev->port_usb->header_len;
240	size += out->maxpacket - 1;
241	size -= size % out->maxpacket;
242
243	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
244	if (skb == NULL) {
245		DBG(dev, "no rx skb\n");
246		goto enomem;
247	}
248
249	/* Some platforms perform better when IP packets are aligned,
250	 * but on at least one, checksumming fails otherwise.  Note:
251	 * RNDIS headers involve variable numbers of LE32 values.
252	 */
253	skb_reserve(skb, NET_IP_ALIGN);
254
255	req->buf = skb->data;
256	req->length = size;
257	req->complete = rx_complete;
258	req->context = skb;
259
260	retval = usb_ep_queue(out, req, gfp_flags);
261	if (retval == -ENOMEM)
262enomem:
263		defer_kevent(dev, WORK_RX_MEMORY);
264	if (retval) {
265		DBG(dev, "rx submit --> %d\n", retval);
266		if (skb)
267			dev_kfree_skb_any(skb);
268		spin_lock_irqsave(&dev->req_lock, flags);
269		list_add(&req->list, &dev->rx_reqs);
270		spin_unlock_irqrestore(&dev->req_lock, flags);
271	}
272	return retval;
273}
274
275static void rx_complete(struct usb_ep *ep, struct usb_request *req)
276{
277	struct sk_buff	*skb = req->context, *skb2;
278	struct eth_dev	*dev = ep->driver_data;
279	int		status = req->status;
280
281	switch (status) {
282
283	/* normal completion */
284	case 0:
285		skb_put(skb, req->actual);
286
287		if (dev->unwrap) {
288			unsigned long	flags;
289
290			spin_lock_irqsave(&dev->lock, flags);
291			if (dev->port_usb) {
292				status = dev->unwrap(dev->port_usb,
293							skb,
294							&dev->rx_frames);
295			} else {
296				dev_kfree_skb_any(skb);
297				status = -ENOTCONN;
298			}
299			spin_unlock_irqrestore(&dev->lock, flags);
300		} else {
301			skb_queue_tail(&dev->rx_frames, skb);
302		}
303		skb = NULL;
304
305		skb2 = skb_dequeue(&dev->rx_frames);
306		while (skb2) {
307			if (status < 0
308					|| ETH_HLEN > skb2->len
309					|| skb2->len > ETH_FRAME_LEN) {
310				dev->net->stats.rx_errors++;
311				dev->net->stats.rx_length_errors++;
312				DBG(dev, "rx length %d\n", skb2->len);
313				dev_kfree_skb_any(skb2);
314				goto next_frame;
315			}
316			skb2->protocol = eth_type_trans(skb2, dev->net);
317			dev->net->stats.rx_packets++;
318			dev->net->stats.rx_bytes += skb2->len;
319
320			/* no buffer copies needed, unless hardware can't
321			 * use skb buffers.
322			 */
323			status = netif_rx(skb2);
324next_frame:
325			skb2 = skb_dequeue(&dev->rx_frames);
326		}
327		break;
328
329	/* software-driven interface shutdown */
330	case -ECONNRESET:		/* unlink */
331	case -ESHUTDOWN:		/* disconnect etc */
332		VDBG(dev, "rx shutdown, code %d\n", status);
333		goto quiesce;
334
335	/* for hardware automagic (such as pxa) */
336	case -ECONNABORTED:		/* endpoint reset */
337		DBG(dev, "rx %s reset\n", ep->name);
338		defer_kevent(dev, WORK_RX_MEMORY);
339quiesce:
340		dev_kfree_skb_any(skb);
341		goto clean;
342
343	/* data overrun */
344	case -EOVERFLOW:
345		dev->net->stats.rx_over_errors++;
346		/* FALLTHROUGH */
347
348	default:
349		dev->net->stats.rx_errors++;
350		DBG(dev, "rx status %d\n", status);
351		break;
352	}
353
354	if (skb)
355		dev_kfree_skb_any(skb);
356	if (!netif_running(dev->net)) {
357clean:
358		spin_lock(&dev->req_lock);
359		list_add(&req->list, &dev->rx_reqs);
360		spin_unlock(&dev->req_lock);
361		req = NULL;
362	}
363	if (req)
364		rx_submit(dev, req, GFP_ATOMIC);
365}
366
367static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
368{
369	unsigned		i;
370	struct usb_request	*req;
371
372	if (!n)
373		return -ENOMEM;
374
375	/* queue/recycle up to N requests */
376	i = n;
377	list_for_each_entry(req, list, list) {
378		if (i-- == 0)
379			goto extra;
380	}
381	while (i--) {
382		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
383		if (!req)
384			return list_empty(list) ? -ENOMEM : 0;
385		list_add(&req->list, list);
386	}
387	return 0;
388
389extra:
390	/* free extras */
391	for (;;) {
392		struct list_head	*next;
393
394		next = req->list.next;
395		list_del(&req->list);
396		usb_ep_free_request(ep, req);
397
398		if (next == list)
399			break;
400
401		req = container_of(next, struct usb_request, list);
402	}
403	return 0;
404}
405
406static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
407{
408	int	status;
409
410	spin_lock(&dev->req_lock);
411	status = prealloc(&dev->tx_reqs, link->in_ep, n);
412	if (status < 0)
413		goto fail;
414	status = prealloc(&dev->rx_reqs, link->out_ep, n);
415	if (status < 0)
416		goto fail;
417	goto done;
418fail:
419	DBG(dev, "can't alloc requests\n");
420done:
421	spin_unlock(&dev->req_lock);
422	return status;
423}
424
425static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
426{
427	struct usb_request	*req;
428	unsigned long		flags;
429
430	/* fill unused rxq slots with some skb */
431	spin_lock_irqsave(&dev->req_lock, flags);
432	while (!list_empty(&dev->rx_reqs)) {
433		req = container_of(dev->rx_reqs.next,
434				struct usb_request, list);
435		list_del_init(&req->list);
436		spin_unlock_irqrestore(&dev->req_lock, flags);
437
438		if (rx_submit(dev, req, gfp_flags) < 0) {
439			defer_kevent(dev, WORK_RX_MEMORY);
440			return;
441		}
442
443		spin_lock_irqsave(&dev->req_lock, flags);
444	}
445	spin_unlock_irqrestore(&dev->req_lock, flags);
446}
447
448static void eth_work(struct work_struct *work)
449{
450	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
451
452	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
453		if (netif_running(dev->net))
454			rx_fill(dev, GFP_KERNEL);
455	}
456
457	if (dev->todo)
458		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
459}
460
461static void tx_complete(struct usb_ep *ep, struct usb_request *req)
462{
463	struct sk_buff	*skb = req->context;
464	struct eth_dev	*dev = ep->driver_data;
465
466	switch (req->status) {
467	default:
468		dev->net->stats.tx_errors++;
469		VDBG(dev, "tx err %d\n", req->status);
470		/* FALLTHROUGH */
471	case -ECONNRESET:		/* unlink */
472	case -ESHUTDOWN:		/* disconnect etc */
473		break;
474	case 0:
475		dev->net->stats.tx_bytes += skb->len;
476	}
477	dev->net->stats.tx_packets++;
478
479	spin_lock(&dev->req_lock);
480	list_add(&req->list, &dev->tx_reqs);
481	spin_unlock(&dev->req_lock);
482	dev_kfree_skb_any(skb);
483
484	atomic_dec(&dev->tx_qlen);
485	if (netif_carrier_ok(dev->net))
486		netif_wake_queue(dev->net);
487}
488
489static inline int is_promisc(u16 cdc_filter)
490{
491	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
492}
493
494static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
495					struct net_device *net)
496{
497	struct eth_dev		*dev = netdev_priv(net);
498	int			length = skb->len;
499	int			retval;
500	struct usb_request	*req = NULL;
501	unsigned long		flags;
502	struct usb_ep		*in;
503	u16			cdc_filter;
504
505	spin_lock_irqsave(&dev->lock, flags);
506	if (dev->port_usb) {
507		in = dev->port_usb->in_ep;
508		cdc_filter = dev->port_usb->cdc_filter;
509	} else {
510		in = NULL;
511		cdc_filter = 0;
512	}
513	spin_unlock_irqrestore(&dev->lock, flags);
514
515	if (!in) {
516		dev_kfree_skb_any(skb);
517		return NETDEV_TX_OK;
518	}
519
520	/* apply outgoing CDC or RNDIS filters */
521	if (!is_promisc(cdc_filter)) {
522		u8		*dest = skb->data;
523
524		if (is_multicast_ether_addr(dest)) {
525			u16	type;
526
527			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
528			 * SET_ETHERNET_MULTICAST_FILTERS requests
529			 */
530			if (is_broadcast_ether_addr(dest))
531				type = USB_CDC_PACKET_TYPE_BROADCAST;
532			else
533				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
534			if (!(cdc_filter & type)) {
535				dev_kfree_skb_any(skb);
536				return NETDEV_TX_OK;
537			}
538		}
539		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
540	}
541
542	spin_lock_irqsave(&dev->req_lock, flags);
543	/*
544	 * this freelist can be empty if an interrupt triggered disconnect()
545	 * and reconfigured the gadget (shutting down this queue) after the
546	 * network stack decided to xmit but before we got the spinlock.
547	 */
548	if (list_empty(&dev->tx_reqs)) {
549		spin_unlock_irqrestore(&dev->req_lock, flags);
550		return NETDEV_TX_BUSY;
551	}
552
553	req = container_of(dev->tx_reqs.next, struct usb_request, list);
554	list_del(&req->list);
555
556	/* temporarily stop TX queue when the freelist empties */
557	if (list_empty(&dev->tx_reqs))
558		netif_stop_queue(net);
559	spin_unlock_irqrestore(&dev->req_lock, flags);
560
561	/* no buffer copies needed, unless the network stack did it
562	 * or the hardware can't use skb buffers.
563	 * or there's not enough space for extra headers we need
564	 */
565	if (dev->wrap) {
566		unsigned long	flags;
567
568		spin_lock_irqsave(&dev->lock, flags);
569		if (dev->port_usb)
570			skb = dev->wrap(dev->port_usb, skb);
571		spin_unlock_irqrestore(&dev->lock, flags);
572		if (!skb)
573			goto drop;
574
575		length = skb->len;
576	}
577	req->buf = skb->data;
578	req->context = skb;
579	req->complete = tx_complete;
580
581	/* use zlp framing on tx for strict CDC-Ether conformance,
582	 * though any robust network rx path ignores extra padding.
583	 * and some hardware doesn't like to write zlps.
584	 */
585	req->zero = 1;
586	if (!dev->zlp && (length % in->maxpacket) == 0)
587		length++;
588
589	req->length = length;
590
591	/* throttle highspeed IRQ rate back slightly */
592	if (gadget_is_dualspeed(dev->gadget))
593		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
594			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
595			: 0;
596
597	retval = usb_ep_queue(in, req, GFP_ATOMIC);
598	switch (retval) {
599	default:
600		DBG(dev, "tx queue err %d\n", retval);
601		break;
602	case 0:
603		net->trans_start = jiffies;
604		atomic_inc(&dev->tx_qlen);
605	}
606
607	if (retval) {
608		dev_kfree_skb_any(skb);
609drop:
610		dev->net->stats.tx_dropped++;
611		spin_lock_irqsave(&dev->req_lock, flags);
612		if (list_empty(&dev->tx_reqs))
613			netif_start_queue(net);
614		list_add(&req->list, &dev->tx_reqs);
615		spin_unlock_irqrestore(&dev->req_lock, flags);
616	}
617	return NETDEV_TX_OK;
618}
619
620/*-------------------------------------------------------------------------*/
621
622static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
623{
624	DBG(dev, "%s\n", __func__);
625
626	/* fill the rx queue */
627	rx_fill(dev, gfp_flags);
628
629	/* and open the tx floodgates */
630	atomic_set(&dev->tx_qlen, 0);
631	netif_wake_queue(dev->net);
632}
633
634static int eth_open(struct net_device *net)
635{
636	struct eth_dev	*dev = netdev_priv(net);
637	struct gether	*link;
638
639	DBG(dev, "%s\n", __func__);
640	if (netif_carrier_ok(dev->net))
641		eth_start(dev, GFP_KERNEL);
642
643	spin_lock_irq(&dev->lock);
644	link = dev->port_usb;
645	if (link && link->open)
646		link->open(link);
647	spin_unlock_irq(&dev->lock);
648
649	return 0;
650}
651
652static int eth_stop(struct net_device *net)
653{
654	struct eth_dev	*dev = netdev_priv(net);
655	unsigned long	flags;
656
657	VDBG(dev, "%s\n", __func__);
658	netif_stop_queue(net);
659
660	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
661		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
662		dev->net->stats.rx_errors, dev->net->stats.tx_errors
663		);
664
665	/* ensure there are no more active requests */
666	spin_lock_irqsave(&dev->lock, flags);
667	if (dev->port_usb) {
668		struct gether	*link = dev->port_usb;
669
670		if (link->close)
671			link->close(link);
672
673		/* NOTE:  we have no abort-queue primitive we could use
674		 * to cancel all pending I/O.  Instead, we disable then
675		 * reenable the endpoints ... this idiom may leave toggle
676		 * wrong, but that's a self-correcting error.
677		 *
678		 * REVISIT:  we *COULD* just let the transfers complete at
679		 * their own pace; the network stack can handle old packets.
680		 * For the moment we leave this here, since it works.
681		 */
682		usb_ep_disable(link->in_ep);
683		usb_ep_disable(link->out_ep);
684		if (netif_carrier_ok(net)) {
685			DBG(dev, "host still using in/out endpoints\n");
686			usb_ep_enable(link->in_ep, link->in);
687			usb_ep_enable(link->out_ep, link->out);
688		}
689	}
690	spin_unlock_irqrestore(&dev->lock, flags);
691
692	return 0;
693}
694
695/*-------------------------------------------------------------------------*/
696
697/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
698static char *dev_addr;
699module_param(dev_addr, charp, S_IRUGO);
700MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
701
702/* this address is invisible to ifconfig */
703static char *host_addr;
704module_param(host_addr, charp, S_IRUGO);
705MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
706
707static int get_ether_addr(const char *str, u8 *dev_addr)
708{
709	if (str) {
710		unsigned	i;
711
712		for (i = 0; i < 6; i++) {
713			unsigned char num;
714
715			if ((*str == '.') || (*str == ':'))
716				str++;
717			num = hex_to_bin(*str++) << 4;
718			num |= hex_to_bin(*str++);
719			dev_addr [i] = num;
720		}
721		if (is_valid_ether_addr(dev_addr))
722			return 0;
723	}
724	random_ether_addr(dev_addr);
725	return 1;
726}
727
728static struct eth_dev *the_dev;
729
730static const struct net_device_ops eth_netdev_ops = {
731	.ndo_open		= eth_open,
732	.ndo_stop		= eth_stop,
733	.ndo_start_xmit		= eth_start_xmit,
734	.ndo_change_mtu		= ueth_change_mtu,
735	.ndo_set_mac_address 	= eth_mac_addr,
736	.ndo_validate_addr	= eth_validate_addr,
737};
738
739static struct device_type gadget_type = {
740	.name	= "gadget",
741};
742
743/**
744 * gether_setup - initialize one ethernet-over-usb link
745 * @g: gadget to associated with these links
746 * @ethaddr: NULL, or a buffer in which the ethernet address of the
747 *	host side of the link is recorded
748 * Context: may sleep
749 *
750 * This sets up the single network link that may be exported by a
751 * gadget driver using this framework.  The link layer addresses are
752 * set up using module parameters.
753 *
754 * Returns negative errno, or zero on success
755 */
756int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
757{
758	struct eth_dev		*dev;
759	struct net_device	*net;
760	int			status;
761
762	if (the_dev)
763		return -EBUSY;
764
765	net = alloc_etherdev(sizeof *dev);
766	if (!net)
767		return -ENOMEM;
768
769	dev = netdev_priv(net);
770	spin_lock_init(&dev->lock);
771	spin_lock_init(&dev->req_lock);
772	INIT_WORK(&dev->work, eth_work);
773	INIT_LIST_HEAD(&dev->tx_reqs);
774	INIT_LIST_HEAD(&dev->rx_reqs);
775
776	skb_queue_head_init(&dev->rx_frames);
777
778	/* network device setup */
779	dev->net = net;
780	strcpy(net->name, "usb%d");
781
782	if (get_ether_addr(dev_addr, net->dev_addr))
783		dev_warn(&g->dev,
784			"using random %s ethernet address\n", "self");
785	if (get_ether_addr(host_addr, dev->host_mac))
786		dev_warn(&g->dev,
787			"using random %s ethernet address\n", "host");
788
789	if (ethaddr)
790		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
791
792	net->netdev_ops = &eth_netdev_ops;
793
794	SET_ETHTOOL_OPS(net, &ops);
795
796	/* two kinds of host-initiated state changes:
797	 *  - iff DATA transfer is active, carrier is "on"
798	 *  - tx queueing enabled if open *and* carrier is "on"
799	 */
800	netif_stop_queue(net);
801	netif_carrier_off(net);
802
803	dev->gadget = g;
804	SET_NETDEV_DEV(net, &g->dev);
805	SET_NETDEV_DEVTYPE(net, &gadget_type);
806
807	status = register_netdev(net);
808	if (status < 0) {
809		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
810		free_netdev(net);
811	} else {
812		INFO(dev, "MAC %pM\n", net->dev_addr);
813		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
814
815		the_dev = dev;
816	}
817
818	return status;
819}
820
821/**
822 * gether_cleanup - remove Ethernet-over-USB device
823 * Context: may sleep
824 *
825 * This is called to free all resources allocated by @gether_setup().
826 */
827void gether_cleanup(void)
828{
829	if (!the_dev)
830		return;
831
832	unregister_netdev(the_dev->net);
833	free_netdev(the_dev->net);
834
835	/* assuming we used keventd, it must quiesce too */
836	flush_scheduled_work();
837
838	the_dev = NULL;
839}
840
841
842/**
843 * gether_connect - notify network layer that USB link is active
844 * @link: the USB link, set up with endpoints, descriptors matching
845 *	current device speed, and any framing wrapper(s) set up.
846 * Context: irqs blocked
847 *
848 * This is called to activate endpoints and let the network layer know
849 * the connection is active ("carrier detect").  It may cause the I/O
850 * queues to open and start letting network packets flow, but will in
851 * any case activate the endpoints so that they respond properly to the
852 * USB host.
853 *
854 * Verify net_device pointer returned using IS_ERR().  If it doesn't
855 * indicate some error code (negative errno), ep->driver_data values
856 * have been overwritten.
857 */
858struct net_device *gether_connect(struct gether *link)
859{
860	struct eth_dev		*dev = the_dev;
861	int			result = 0;
862
863	if (!dev)
864		return ERR_PTR(-EINVAL);
865
866	link->in_ep->driver_data = dev;
867	result = usb_ep_enable(link->in_ep, link->in);
868	if (result != 0) {
869		DBG(dev, "enable %s --> %d\n",
870			link->in_ep->name, result);
871		goto fail0;
872	}
873
874	link->out_ep->driver_data = dev;
875	result = usb_ep_enable(link->out_ep, link->out);
876	if (result != 0) {
877		DBG(dev, "enable %s --> %d\n",
878			link->out_ep->name, result);
879		goto fail1;
880	}
881
882	if (result == 0)
883		result = alloc_requests(dev, link, qlen(dev->gadget));
884
885	if (result == 0) {
886		dev->zlp = link->is_zlp_ok;
887		DBG(dev, "qlen %d\n", qlen(dev->gadget));
888
889		dev->header_len = link->header_len;
890		dev->unwrap = link->unwrap;
891		dev->wrap = link->wrap;
892
893		spin_lock(&dev->lock);
894		dev->port_usb = link;
895		link->ioport = dev;
896		if (netif_running(dev->net)) {
897			if (link->open)
898				link->open(link);
899		} else {
900			if (link->close)
901				link->close(link);
902		}
903		spin_unlock(&dev->lock);
904
905		netif_carrier_on(dev->net);
906		if (netif_running(dev->net))
907			eth_start(dev, GFP_ATOMIC);
908
909	/* on error, disable any endpoints  */
910	} else {
911		(void) usb_ep_disable(link->out_ep);
912fail1:
913		(void) usb_ep_disable(link->in_ep);
914	}
915fail0:
916	/* caller is responsible for cleanup on error */
917	if (result < 0)
918		return ERR_PTR(result);
919	return dev->net;
920}
921
922/**
923 * gether_disconnect - notify network layer that USB link is inactive
924 * @link: the USB link, on which gether_connect() was called
925 * Context: irqs blocked
926 *
927 * This is called to deactivate endpoints and let the network layer know
928 * the connection went inactive ("no carrier").
929 *
930 * On return, the state is as if gether_connect() had never been called.
931 * The endpoints are inactive, and accordingly without active USB I/O.
932 * Pointers to endpoint descriptors and endpoint private data are nulled.
933 */
934void gether_disconnect(struct gether *link)
935{
936	struct eth_dev		*dev = link->ioport;
937	struct usb_request	*req;
938
939	WARN_ON(!dev);
940	if (!dev)
941		return;
942
943	DBG(dev, "%s\n", __func__);
944
945	netif_stop_queue(dev->net);
946	netif_carrier_off(dev->net);
947
948	/* disable endpoints, forcing (synchronous) completion
949	 * of all pending i/o.  then free the request objects
950	 * and forget about the endpoints.
951	 */
952	usb_ep_disable(link->in_ep);
953	spin_lock(&dev->req_lock);
954	while (!list_empty(&dev->tx_reqs)) {
955		req = container_of(dev->tx_reqs.next,
956					struct usb_request, list);
957		list_del(&req->list);
958
959		spin_unlock(&dev->req_lock);
960		usb_ep_free_request(link->in_ep, req);
961		spin_lock(&dev->req_lock);
962	}
963	spin_unlock(&dev->req_lock);
964	link->in_ep->driver_data = NULL;
965	link->in = NULL;
966
967	usb_ep_disable(link->out_ep);
968	spin_lock(&dev->req_lock);
969	while (!list_empty(&dev->rx_reqs)) {
970		req = container_of(dev->rx_reqs.next,
971					struct usb_request, list);
972		list_del(&req->list);
973
974		spin_unlock(&dev->req_lock);
975		usb_ep_free_request(link->out_ep, req);
976		spin_lock(&dev->req_lock);
977	}
978	spin_unlock(&dev->req_lock);
979	link->out_ep->driver_data = NULL;
980	link->out = NULL;
981
982	/* finish forgetting about this USB link episode */
983	dev->header_len = 0;
984	dev->unwrap = NULL;
985	dev->wrap = NULL;
986
987	spin_lock(&dev->lock);
988	dev->port_usb = NULL;
989	link->ioport = NULL;
990	spin_unlock(&dev->lock);
991}
992