• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/uwb/i1480/i1480u-wlp/
1
2
3#include <linux/slab.h>
4#include "i1480u-wlp.h"
5
6enum {
7	/* This is only for Next and Last TX packets */
8	i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
9		- sizeof(struct untd_hdr_rst),
10};
11
12/* Free resources allocated to a i1480u tx context. */
13static
14void i1480u_tx_free(struct i1480u_tx *wtx)
15{
16	kfree(wtx->buf);
17	if (wtx->skb)
18		dev_kfree_skb_irq(wtx->skb);
19	usb_free_urb(wtx->urb);
20	kfree(wtx);
21}
22
23static
24void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
25{
26	unsigned long flags;
27	spin_lock_irqsave(&i1480u->tx_list_lock, flags);	/* not active any more */
28	list_del(&wtx->list_node);
29	i1480u_tx_free(wtx);
30	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
31}
32
33static
34void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
35{
36	unsigned long flags;
37	struct i1480u_tx *wtx, *next;
38
39	spin_lock_irqsave(&i1480u->tx_list_lock, flags);
40	list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
41		usb_unlink_urb(wtx->urb);
42	}
43	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
44}
45
46
47static
48void i1480u_tx_cb(struct urb *urb)
49{
50	struct i1480u_tx *wtx = urb->context;
51	struct i1480u *i1480u = wtx->i1480u;
52	struct net_device *net_dev = i1480u->net_dev;
53	struct device *dev = &i1480u->usb_iface->dev;
54	unsigned long flags;
55
56	switch (urb->status) {
57	case 0:
58		spin_lock_irqsave(&i1480u->lock, flags);
59		net_dev->stats.tx_packets++;
60		net_dev->stats.tx_bytes += urb->actual_length;
61		spin_unlock_irqrestore(&i1480u->lock, flags);
62		break;
63	case -ECONNRESET:	/* Not an error, but a controlled situation; */
64	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
65		dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
66		netif_stop_queue(net_dev);
67		break;
68	case -ESHUTDOWN:	/* going away! */
69		dev_dbg(dev, "notif endp: down %d\n", urb->status);
70		netif_stop_queue(net_dev);
71		break;
72	default:
73		dev_err(dev, "TX: unknown URB status %d\n", urb->status);
74		if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
75					EDC_ERROR_TIMEFRAME)) {
76			dev_err(dev, "TX: max acceptable errors exceeded."
77					"Reset device.\n");
78			netif_stop_queue(net_dev);
79			i1480u_tx_unlink_urbs(i1480u);
80			wlp_reset_all(&i1480u->wlp);
81		}
82		break;
83	}
84	i1480u_tx_destroy(i1480u, wtx);
85	if (atomic_dec_return(&i1480u->tx_inflight.count)
86	    <= i1480u->tx_inflight.threshold
87	    && netif_queue_stopped(net_dev)
88	    && i1480u->tx_inflight.threshold != 0) {
89		netif_start_queue(net_dev);
90		atomic_inc(&i1480u->tx_inflight.restart_count);
91	}
92	return;
93}
94
95
96/*
97 * Given a buffer that doesn't fit in a single fragment, create an
98 * scatter/gather structure for delivery to the USB pipe.
99 *
100 * Implements functionality of i1480u_tx_create().
101 *
102 * @wtx:	tx descriptor
103 * @skb:	skb to send
104 * @gfp_mask:	gfp allocation mask
105 * @returns:    Pointer to @wtx if ok, NULL on error.
106 *
107 * Sorry, TOO LONG a function, but breaking it up is kind of hard
108 *
109 * This will break the buffer in chunks smaller than
110 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
111 * to each:
112 *
113 *   1st header           \
114 *   i1480 tx header      |  fragment 1
115 *   fragment data        /
116 *   nxt header           \  fragment 2
117 *   fragment data        /
118 *   ..
119 *   ..
120 *   last header          \  fragment 3
121 *   last fragment data   /
122 *
123 * This does not fill the i1480 TX header, it is left up to the
124 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
125 *
126 * This function consumes the skb unless there is an error.
127 */
128static
129int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
130		       gfp_t gfp_mask)
131{
132	int result;
133	void *pl;
134	size_t pl_size;
135
136	void *pl_itr, *buf_itr;
137	size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
138	struct untd_hdr_1st *untd_hdr_1st;
139	struct wlp_tx_hdr *wlp_tx_hdr;
140	struct untd_hdr_rst *untd_hdr_rst;
141
142	wtx->skb = NULL;
143	pl = skb->data;
144	pl_itr = pl;
145	pl_size = skb->len;
146	pl_size_left = pl_size;	/* payload size */
147	/* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
148	 * the headers */
149	pl_size_1st = i1480u_MAX_FRG_SIZE
150		- sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
151	BUG_ON(pl_size_1st > pl_size);
152	pl_size_left -= pl_size_1st;
153	/* The rest have an smaller header (no i1480 TX header). We
154	 * need to break up the payload in blocks smaller than
155	 * i1480u_MAX_PL_SIZE (payload excluding header). */
156	frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
157	/* Allocate space for the new buffer. In this new buffer we'll
158	 * place the headers followed by the data fragment, headers,
159	 * data fragments, etc..
160	 */
161	result = -ENOMEM;
162	wtx->buf_size = sizeof(*untd_hdr_1st)
163		+ sizeof(*wlp_tx_hdr)
164		+ frgs * sizeof(*untd_hdr_rst)
165		+ pl_size;
166	wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
167	if (wtx->buf == NULL)
168		goto error_buf_alloc;
169
170	buf_itr = wtx->buf;		/* We got the space, let's fill it up */
171	/* Fill 1st fragment */
172	untd_hdr_1st = buf_itr;
173	buf_itr += sizeof(*untd_hdr_1st);
174	untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
175	untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
176	untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
177	untd_hdr_1st->fragment_len =
178		cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
179	memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
180	/* Set up i1480 header info */
181	wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
182	buf_itr += sizeof(*wlp_tx_hdr);
183	/* Copy the first fragment */
184	memcpy(buf_itr, pl_itr, pl_size_1st);
185	pl_itr += pl_size_1st;
186	buf_itr += pl_size_1st;
187
188	/* Now do each remaining fragment */
189	result = -EINVAL;
190	while (pl_size_left > 0) {
191		if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
192		    > wtx->buf_size) {
193			printk(KERN_ERR "BUG: no space for header\n");
194			goto error_bug;
195		}
196		untd_hdr_rst = buf_itr;
197		buf_itr += sizeof(*untd_hdr_rst);
198		if (pl_size_left > i1480u_MAX_PL_SIZE) {
199			frg_pl_size = i1480u_MAX_PL_SIZE;
200			untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
201		} else {
202			frg_pl_size = pl_size_left;
203			untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
204		}
205		untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
206		untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
207		untd_hdr_rst->padding = 0;
208		if (buf_itr + frg_pl_size - wtx->buf
209		    > wtx->buf_size) {
210			printk(KERN_ERR "BUG: no space for payload\n");
211			goto error_bug;
212		}
213		memcpy(buf_itr, pl_itr, frg_pl_size);
214		buf_itr += frg_pl_size;
215		pl_itr += frg_pl_size;
216		pl_size_left -= frg_pl_size;
217	}
218	dev_kfree_skb_irq(skb);
219	return 0;
220
221error_bug:
222	printk(KERN_ERR
223	       "BUG: skb %u bytes\n"
224	       "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
225	       "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
226	       skb->len,
227	       frg_pl_size, i1480u_MAX_FRG_SIZE,
228	       buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
229
230	kfree(wtx->buf);
231error_buf_alloc:
232	return result;
233}
234
235
236/*
237 * Given a buffer that fits in a single fragment, fill out a @wtx
238 * struct for transmitting it down the USB pipe.
239 *
240 * Uses the fact that we have space reserved in front of the skbuff
241 * for hardware headers :]
242 *
243 * This does not fill the i1480 TX header, it is left up to the
244 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
245 *
246 * @pl:		pointer to payload data
247 * @pl_size:    size of the payuload
248 *
249 * This function does not consume the @skb.
250 */
251static
252int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
253		       gfp_t gfp_mask)
254{
255	struct untd_hdr_cmp *untd_hdr_cmp;
256	struct wlp_tx_hdr *wlp_tx_hdr;
257
258	wtx->buf = NULL;
259	wtx->skb = skb;
260	BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
261	wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
262	wtx->wlp_tx_hdr = wlp_tx_hdr;
263	BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
264	untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
265
266	untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
267	untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
268	untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
269	untd_hdr_cmp->padding = 0;
270	return 0;
271}
272
273
274/*
275 * Given a skb to transmit, massage it to become palatable for the TX pipe
276 *
277 * This will break the buffer in chunks smaller than
278 * i1480u_MAX_FRG_SIZE and add proper headers to each.
279 *
280 *   1st header           \
281 *   i1480 tx header      |  fragment 1
282 *   fragment data        /
283 *   nxt header           \  fragment 2
284 *   fragment data        /
285 *   ..
286 *   ..
287 *   last header          \  fragment 3
288 *   last fragment data   /
289 *
290 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
291 *
292 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
293 * following is composed:
294 *
295 *   complete header      \
296 *   i1480 tx header      | single fragment
297 *   packet data          /
298 *
299 * We were going to use s/g support, but because the interface is
300 * synch and at the end there is plenty of overhead to do it, it
301 * didn't seem that worth for data that is going to be smaller than
302 * one page.
303 */
304static
305struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
306				   struct sk_buff *skb, gfp_t gfp_mask)
307{
308	int result;
309	struct usb_endpoint_descriptor *epd;
310	int usb_pipe;
311	unsigned long flags;
312
313	struct i1480u_tx *wtx;
314	const size_t pl_max_size =
315		i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
316		- sizeof(struct wlp_tx_hdr);
317
318	wtx = kmalloc(sizeof(*wtx), gfp_mask);
319	if (wtx == NULL)
320		goto error_wtx_alloc;
321	wtx->urb = usb_alloc_urb(0, gfp_mask);
322	if (wtx->urb == NULL)
323		goto error_urb_alloc;
324	epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
325	usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
326	/* Fits in a single complete packet or need to split? */
327	if (skb->len > pl_max_size) {
328		result = i1480u_tx_create_n(wtx, skb, gfp_mask);
329		if (result < 0)
330			goto error_create;
331		usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
332				  wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
333	} else {
334		result = i1480u_tx_create_1(wtx, skb, gfp_mask);
335		if (result < 0)
336			goto error_create;
337		usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
338				  skb->data, skb->len, i1480u_tx_cb, wtx);
339	}
340	spin_lock_irqsave(&i1480u->tx_list_lock, flags);
341	list_add(&wtx->list_node, &i1480u->tx_list);
342	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
343	return wtx;
344
345error_create:
346	kfree(wtx->urb);
347error_urb_alloc:
348	kfree(wtx);
349error_wtx_alloc:
350	return NULL;
351}
352
353/*
354 * Actual fragmentation and transmission of frame
355 *
356 * @wlp:  WLP substack data structure
357 * @skb:  To be transmitted
358 * @dst:  Device address of destination
359 * @returns: 0 on success, <0 on failure
360 *
361 * This function can also be called directly (not just from
362 * hard_start_xmit), so we also check here if the interface is up before
363 * taking sending anything.
364 */
365int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
366		      struct uwb_dev_addr *dst)
367{
368	int result = -ENXIO;
369	struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
370	struct device *dev = &i1480u->usb_iface->dev;
371	struct net_device *net_dev = i1480u->net_dev;
372	struct i1480u_tx *wtx;
373	struct wlp_tx_hdr *wlp_tx_hdr;
374	static unsigned char dev_bcast[2] = { 0xff, 0xff };
375
376	BUG_ON(i1480u->wlp.rc == NULL);
377	if ((net_dev->flags & IFF_UP) == 0)
378		goto out;
379	result = -EBUSY;
380	if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
381		netif_stop_queue(net_dev);
382		goto error_max_inflight;
383	}
384	result = -ENOMEM;
385	wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
386	if (unlikely(wtx == NULL)) {
387		if (printk_ratelimit())
388			dev_err(dev, "TX: no memory for WLP TX URB,"
389				"dropping packet (in flight %d)\n",
390				atomic_read(&i1480u->tx_inflight.count));
391		netif_stop_queue(net_dev);
392		goto error_wtx_alloc;
393	}
394	wtx->i1480u = i1480u;
395	/* Fill out the i1480 header; @i1480u->def_tx_hdr read without
396	 * locking. We do so because they are kind of orthogonal to
397	 * each other (and thus not changed in an atomic batch).
398	 * The ETH header is right after the WLP TX header. */
399	wlp_tx_hdr = wtx->wlp_tx_hdr;
400	*wlp_tx_hdr = i1480u->options.def_tx_hdr;
401	wlp_tx_hdr->dstaddr = *dst;
402	if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
403	    && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
404		/*Broadcast message directed to DRP host. Send as best effort
405		 * on PCA. */
406		wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
407	}
408
409	result = usb_submit_urb(wtx->urb, GFP_ATOMIC);		/* Go baby */
410	if (result < 0) {
411		dev_err(dev, "TX: cannot submit URB: %d\n", result);
412		/* We leave the freeing of skb to calling function */
413		wtx->skb = NULL;
414		goto error_tx_urb_submit;
415	}
416	atomic_inc(&i1480u->tx_inflight.count);
417	net_dev->trans_start = jiffies;
418	return result;
419
420error_tx_urb_submit:
421	i1480u_tx_destroy(i1480u, wtx);
422error_wtx_alloc:
423error_max_inflight:
424out:
425	return result;
426}
427
428
429/*
430 * Transmit an skb  Called when an skbuf has to be transmitted
431 *
432 * The skb is first passed to WLP substack to ensure this is a valid
433 * frame. If valid the device address of destination will be filled and
434 * the WLP header prepended to the skb. If this step fails we fake sending
435 * the frame, if we return an error the network stack will just keep trying.
436 *
437 * Broadcast frames inside a WSS needs to be treated special as multicast is
438 * not supported. A broadcast frame is sent as unicast to each member of the
439 * WSS - this is done by the WLP substack when it finds a broadcast frame.
440 * So, we test if the WLP substack took over the skb and only transmit it
441 * if it has not (been taken over).
442 *
443 * @net_dev->xmit_lock is held
444 */
445netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
446					 struct net_device *net_dev)
447{
448	int result;
449	struct i1480u *i1480u = netdev_priv(net_dev);
450	struct device *dev = &i1480u->usb_iface->dev;
451	struct uwb_dev_addr dst;
452
453	if ((net_dev->flags & IFF_UP) == 0)
454		goto error;
455	result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
456	if (result < 0) {
457		dev_err(dev, "WLP verification of TX frame failed (%d). "
458			"Dropping packet.\n", result);
459		goto error;
460	} else if (result == 1) {
461		/* trans_start time will be set when WLP actually transmits
462		 * the frame */
463		goto out;
464	}
465	result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
466	if (result < 0) {
467		dev_err(dev, "Frame TX failed (%d).\n", result);
468		goto error;
469	}
470	return NETDEV_TX_OK;
471error:
472	dev_kfree_skb_any(skb);
473	net_dev->stats.tx_dropped++;
474out:
475	return NETDEV_TX_OK;
476}
477
478
479/*
480 * Called when a pkt transmission doesn't complete in a reasonable period
481 * Device reset may sleep - do it outside of interrupt context (delayed)
482 */
483void i1480u_tx_timeout(struct net_device *net_dev)
484{
485	struct i1480u *i1480u = netdev_priv(net_dev);
486
487	wlp_reset_all(&i1480u->wlp);
488}
489
490
491void i1480u_tx_release(struct i1480u *i1480u)
492{
493	unsigned long flags;
494	struct i1480u_tx *wtx, *next;
495	int count = 0, empty;
496
497	spin_lock_irqsave(&i1480u->tx_list_lock, flags);
498	list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
499		count++;
500		usb_unlink_urb(wtx->urb);
501	}
502	spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
503	count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
504	/*
505	 * We don't like this sollution too much (dirty as it is), but
506	 * it is cheaper than putting a refcount on each i1480u_tx and
507	 * i1480uting for all of them to go away...
508	 *
509	 * Called when no more packets can be added to tx_list
510	 * so can i1480ut for it to be empty.
511	 */
512	while (1) {
513		spin_lock_irqsave(&i1480u->tx_list_lock, flags);
514		empty = list_empty(&i1480u->tx_list);
515		spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
516		if (empty)
517			break;
518		count--;
519		BUG_ON(count == 0);
520		msleep(20);
521	}
522}
523