• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/usb/host/whci/
1/*
2 * Wireless Host Controller (WHC) qset management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/slab.h>
21#include <linux/uwb/umc.h>
22#include <linux/usb.h>
23
24#include "../../wusbcore/wusbhc.h"
25
26#include "whcd.h"
27
28struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
29{
30	struct whc_qset *qset;
31	dma_addr_t dma;
32
33	qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
34	if (qset == NULL)
35		return NULL;
36	memset(qset, 0, sizeof(struct whc_qset));
37
38	qset->qset_dma = dma;
39	qset->whc = whc;
40
41	INIT_LIST_HEAD(&qset->list_node);
42	INIT_LIST_HEAD(&qset->stds);
43
44	return qset;
45}
46
47/**
48 * qset_fill_qh - fill the static endpoint state in a qset's QHead
49 * @qset: the qset whose QH needs initializing with static endpoint
50 *        state
51 * @urb:  an urb for a transfer to this endpoint
52 */
53static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
54{
55	struct usb_device *usb_dev = urb->dev;
56	struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
57	struct usb_wireless_ep_comp_descriptor *epcd;
58	bool is_out;
59	uint8_t phy_rate;
60
61	is_out = usb_pipeout(urb->pipe);
62
63	qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
64
65	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
66	if (epcd) {
67		qset->max_seq = epcd->bMaxSequence;
68		qset->max_burst = epcd->bMaxBurst;
69	} else {
70		qset->max_seq = 2;
71		qset->max_burst = 1;
72	}
73
74	/*
75	 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
76	 * the maximum supported by the device for other endpoints
77	 * (unless limited by the user).
78	 */
79	if (usb_pipecontrol(urb->pipe))
80		phy_rate = UWB_PHY_RATE_53;
81	else {
82		uint16_t phy_rates;
83
84		phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
85		phy_rate = fls(phy_rates) - 1;
86		if (phy_rate > whc->wusbhc.phy_rate)
87			phy_rate = whc->wusbhc.phy_rate;
88	}
89
90	qset->qh.info1 = cpu_to_le32(
91		QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
92		| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
93		| usb_pipe_to_qh_type(urb->pipe)
94		| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
95		| QH_INFO1_MAX_PKT_LEN(qset->max_packet)
96		);
97	qset->qh.info2 = cpu_to_le32(
98		QH_INFO2_BURST(qset->max_burst)
99		| QH_INFO2_DBP(0)
100		| QH_INFO2_MAX_COUNT(3)
101		| QH_INFO2_MAX_RETRY(3)
102		| QH_INFO2_MAX_SEQ(qset->max_seq - 1)
103		);
104	qset->qh.info3 = cpu_to_le32(
105		QH_INFO3_TX_RATE(phy_rate)
106		| QH_INFO3_TX_PWR(0) /* 0 == max power */
107		);
108
109	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
110}
111
112/**
113 * qset_clear - clear fields in a qset so it may be reinserted into a
114 * schedule.
115 *
116 * The sequence number and current window are not cleared (see
117 * qset_reset()).
118 */
119void qset_clear(struct whc *whc, struct whc_qset *qset)
120{
121	qset->td_start = qset->td_end = qset->ntds = 0;
122
123	qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
124	qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
125	qset->qh.err_count = 0;
126	qset->qh.scratch[0] = 0;
127	qset->qh.scratch[1] = 0;
128	qset->qh.scratch[2] = 0;
129
130	memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
131
132	init_completion(&qset->remove_complete);
133}
134
135/**
136 * qset_reset - reset endpoint state in a qset.
137 *
138 * Clears the sequence number and current window.  This qset must not
139 * be in the ASL or PZL.
140 */
141void qset_reset(struct whc *whc, struct whc_qset *qset)
142{
143	qset->reset = 0;
144
145	qset->qh.status &= ~QH_STATUS_SEQ_MASK;
146	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
147}
148
149/**
150 * get_qset - get the qset for an async endpoint
151 *
152 * A new qset is created if one does not already exist.
153 */
154struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
155				 gfp_t mem_flags)
156{
157	struct whc_qset *qset;
158
159	qset = urb->ep->hcpriv;
160	if (qset == NULL) {
161		qset = qset_alloc(whc, mem_flags);
162		if (qset == NULL)
163			return NULL;
164
165		qset->ep = urb->ep;
166		urb->ep->hcpriv = qset;
167		qset_fill_qh(whc, qset, urb);
168	}
169	return qset;
170}
171
172void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
173{
174	qset->remove = 0;
175	list_del_init(&qset->list_node);
176	complete(&qset->remove_complete);
177}
178
179/**
180 * qset_add_qtds - add qTDs for an URB to a qset
181 *
182 * Returns true if the list (ASL/PZL) must be updated because (for a
183 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
184 */
185enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
186{
187	struct whc_std *std;
188	enum whc_update update = 0;
189
190	list_for_each_entry(std, &qset->stds, list_node) {
191		struct whc_qtd *qtd;
192		uint32_t status;
193
194		if (qset->ntds >= WHCI_QSET_TD_MAX
195		    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
196			break;
197
198		if (std->qtd)
199			continue; /* already has a qTD */
200
201		qtd = std->qtd = &qset->qtd[qset->td_end];
202
203		/* Fill in setup bytes for control transfers. */
204		if (usb_pipecontrol(std->urb->pipe))
205			memcpy(qtd->setup, std->urb->setup_packet, 8);
206
207		status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
208
209		if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
210			status |= QTD_STS_LAST_PKT;
211
212		/*
213		 * For an IN transfer the iAlt field should be set so
214		 * the h/w will automatically advance to the next
215		 * transfer. However, if there are 8 or more TDs
216		 * remaining in this transfer then iAlt cannot be set
217		 * as it could point to somewhere in this transfer.
218		 */
219		if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
220			int ialt;
221			ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
222			status |= QTD_STS_IALT(ialt);
223		} else if (usb_pipein(std->urb->pipe))
224			qset->pause_after_urb = std->urb;
225
226		if (std->num_pointers)
227			qtd->options = cpu_to_le32(QTD_OPT_IOC);
228		else
229			qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
230		qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
231
232		qtd->status = cpu_to_le32(status);
233
234		if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
235			update = WHC_UPDATE_UPDATED;
236
237		if (++qset->td_end >= WHCI_QSET_TD_MAX)
238			qset->td_end = 0;
239		qset->ntds++;
240	}
241
242	return update;
243}
244
245/**
246 * qset_remove_qtd - remove the first qTD from a qset.
247 *
248 * The qTD might be still active (if it's part of a IN URB that
249 * resulted in a short read) so ensure it's deactivated.
250 */
251static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
252{
253	qset->qtd[qset->td_start].status = 0;
254
255	if (++qset->td_start >= WHCI_QSET_TD_MAX)
256		qset->td_start = 0;
257	qset->ntds--;
258}
259
260static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
261{
262	struct scatterlist *sg;
263	void *bounce;
264	size_t remaining, offset;
265
266	bounce = std->bounce_buf;
267	remaining = std->len;
268
269	sg = std->bounce_sg;
270	offset = std->bounce_offset;
271
272	while (remaining) {
273		size_t len;
274
275		len = min(sg->length - offset, remaining);
276		memcpy(sg_virt(sg) + offset, bounce, len);
277
278		bounce += len;
279		remaining -= len;
280
281		offset += len;
282		if (offset >= sg->length) {
283			sg = sg_next(sg);
284			offset = 0;
285		}
286	}
287
288}
289
290/**
291 * qset_free_std - remove an sTD and free it.
292 * @whc: the WHCI host controller
293 * @std: the sTD to remove and free.
294 */
295void qset_free_std(struct whc *whc, struct whc_std *std)
296{
297	list_del(&std->list_node);
298	if (std->bounce_buf) {
299		bool is_out = usb_pipeout(std->urb->pipe);
300		dma_addr_t dma_addr;
301
302		if (std->num_pointers)
303			dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
304		else
305			dma_addr = std->dma_addr;
306
307		dma_unmap_single(whc->wusbhc.dev, dma_addr,
308				 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
309		if (!is_out)
310			qset_copy_bounce_to_sg(whc, std);
311		kfree(std->bounce_buf);
312	}
313	if (std->pl_virt) {
314		if (std->dma_addr)
315			dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
316					 std->num_pointers * sizeof(struct whc_page_list_entry),
317					 DMA_TO_DEVICE);
318		kfree(std->pl_virt);
319		std->pl_virt = NULL;
320	}
321	kfree(std);
322}
323
324/**
325 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
326 */
327static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
328			     struct urb *urb)
329{
330	struct whc_std *std, *t;
331
332	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
333		if (std->urb != urb)
334			break;
335		if (std->qtd != NULL)
336			qset_remove_qtd(whc, qset);
337		qset_free_std(whc, std);
338	}
339}
340
341/**
342 * qset_free_stds - free any remaining sTDs for an URB.
343 */
344static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
345{
346	struct whc_std *std, *t;
347
348	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
349		if (std->urb == urb)
350			qset_free_std(qset->whc, std);
351	}
352}
353
354static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
355{
356	dma_addr_t dma_addr = std->dma_addr;
357	dma_addr_t sp, ep;
358	size_t pl_len;
359	int p;
360
361	/* Short buffers don't need a page list. */
362	if (std->len <= WHCI_PAGE_SIZE) {
363		std->num_pointers = 0;
364		return 0;
365	}
366
367	sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
368	ep = dma_addr + std->len;
369	std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
370
371	pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
372	std->pl_virt = kmalloc(pl_len, mem_flags);
373	if (std->pl_virt == NULL)
374		return -ENOMEM;
375	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
376
377	for (p = 0; p < std->num_pointers; p++) {
378		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
379		dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
380	}
381
382	return 0;
383}
384
385/**
386 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
387 */
388static void urb_dequeue_work(struct work_struct *work)
389{
390	struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
391	struct whc_qset *qset = wurb->qset;
392	struct whc *whc = qset->whc;
393	unsigned long flags;
394
395	if (wurb->is_async == true)
396		asl_update(whc, WUSBCMD_ASYNC_UPDATED
397			   | WUSBCMD_ASYNC_SYNCED_DB
398			   | WUSBCMD_ASYNC_QSET_RM);
399	else
400		pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
401			   | WUSBCMD_PERIODIC_SYNCED_DB
402			   | WUSBCMD_PERIODIC_QSET_RM);
403
404	spin_lock_irqsave(&whc->lock, flags);
405	qset_remove_urb(whc, qset, wurb->urb, wurb->status);
406	spin_unlock_irqrestore(&whc->lock, flags);
407}
408
409static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
410				    struct urb *urb, gfp_t mem_flags)
411{
412	struct whc_std *std;
413
414	std = kzalloc(sizeof(struct whc_std), mem_flags);
415	if (std == NULL)
416		return NULL;
417
418	std->urb = urb;
419	std->qtd = NULL;
420
421	INIT_LIST_HEAD(&std->list_node);
422	list_add_tail(&std->list_node, &qset->stds);
423
424	return std;
425}
426
427static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
428			   gfp_t mem_flags)
429{
430	size_t remaining;
431	struct scatterlist *sg;
432	int i;
433	int ntds = 0;
434	struct whc_std *std = NULL;
435	struct whc_page_list_entry *entry;
436	dma_addr_t prev_end = 0;
437	size_t pl_len;
438	int p = 0;
439
440	remaining = urb->transfer_buffer_length;
441
442	for_each_sg(urb->sg, sg, urb->num_sgs, i) {
443		dma_addr_t dma_addr;
444		size_t dma_remaining;
445		dma_addr_t sp, ep;
446		int num_pointers;
447
448		if (remaining == 0) {
449			break;
450		}
451
452		dma_addr = sg_dma_address(sg);
453		dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
454
455		while (dma_remaining) {
456			size_t dma_len;
457
458			/*
459			 * We can use the previous std (if it exists) provided that:
460			 * - the previous one ended on a page boundary.
461			 * - the current one begins on a page boundary.
462			 * - the previous one isn't full.
463			 *
464			 * If a new std is needed but the previous one
465			 * was not a whole number of packets then this
466			 * sg list cannot be mapped onto multiple
467			 * qTDs.  Return an error and let the caller
468			 * sort it out.
469			 */
470			if (!std
471			    || (prev_end & (WHCI_PAGE_SIZE-1))
472			    || (dma_addr & (WHCI_PAGE_SIZE-1))
473			    || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
474				if (std && std->len % qset->max_packet != 0)
475					return -EINVAL;
476				std = qset_new_std(whc, qset, urb, mem_flags);
477				if (std == NULL) {
478					return -ENOMEM;
479				}
480				ntds++;
481				p = 0;
482			}
483
484			dma_len = dma_remaining;
485
486			/*
487			 * If the remainder of this element doesn't
488			 * fit in a single qTD, limit the qTD to a
489			 * whole number of packets.  This allows the
490			 * remainder to go into the next qTD.
491			 */
492			if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
493				dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
494					* qset->max_packet - std->len;
495			}
496
497			std->len += dma_len;
498			std->ntds_remaining = -1; /* filled in later */
499
500			sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
501			ep = dma_addr + dma_len;
502			num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
503			std->num_pointers += num_pointers;
504
505			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
506
507			std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
508			if (std->pl_virt == NULL) {
509				return -ENOMEM;
510			}
511
512			for (;p < std->num_pointers; p++, entry++) {
513				std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
514				dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
515			}
516
517			prev_end = dma_addr = ep;
518			dma_remaining -= dma_len;
519			remaining -= dma_len;
520		}
521	}
522
523	/* Now the number of stds is know, go back and fill in
524	   std->ntds_remaining. */
525	list_for_each_entry(std, &qset->stds, list_node) {
526		if (std->ntds_remaining == -1) {
527			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
528			std->ntds_remaining = ntds--;
529			std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
530						       pl_len, DMA_TO_DEVICE);
531		}
532	}
533	return 0;
534}
535
536/**
537 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
538 *
539 * If the URB contains an sg list whose elements cannot be directly
540 * mapped to qTDs then the data must be transferred via bounce
541 * buffers.
542 */
543static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
544				     struct urb *urb, gfp_t mem_flags)
545{
546	bool is_out = usb_pipeout(urb->pipe);
547	size_t max_std_len;
548	size_t remaining;
549	int ntds = 0;
550	struct whc_std *std = NULL;
551	void *bounce = NULL;
552	struct scatterlist *sg;
553	int i;
554
555	/* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
556	max_std_len = qset->max_burst * qset->max_packet;
557
558	remaining = urb->transfer_buffer_length;
559
560	for_each_sg(urb->sg, sg, urb->num_sgs, i) {
561		size_t len;
562		size_t sg_remaining;
563		void *orig;
564
565		if (remaining == 0) {
566			break;
567		}
568
569		sg_remaining = min_t(size_t, remaining, sg->length);
570		orig = sg_virt(sg);
571
572		while (sg_remaining) {
573			if (!std || std->len == max_std_len) {
574				std = qset_new_std(whc, qset, urb, mem_flags);
575				if (std == NULL)
576					return -ENOMEM;
577				std->bounce_buf = kmalloc(max_std_len, mem_flags);
578				if (std->bounce_buf == NULL)
579					return -ENOMEM;
580				std->bounce_sg = sg;
581				std->bounce_offset = orig - sg_virt(sg);
582				bounce = std->bounce_buf;
583				ntds++;
584			}
585
586			len = min(sg_remaining, max_std_len - std->len);
587
588			if (is_out)
589				memcpy(bounce, orig, len);
590
591			std->len += len;
592			std->ntds_remaining = -1; /* filled in later */
593
594			bounce += len;
595			orig += len;
596			sg_remaining -= len;
597			remaining -= len;
598		}
599	}
600
601	/*
602	 * For each of the new sTDs, map the bounce buffers, create
603	 * page lists (if necessary), and fill in std->ntds_remaining.
604	 */
605	list_for_each_entry(std, &qset->stds, list_node) {
606		if (std->ntds_remaining != -1)
607			continue;
608
609		std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
610					       is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
611
612		if (qset_fill_page_list(whc, std, mem_flags) < 0)
613			return -ENOMEM;
614
615		std->ntds_remaining = ntds--;
616	}
617
618	return 0;
619}
620
621/**
622 * qset_add_urb - add an urb to the qset's queue.
623 *
624 * The URB is chopped into sTDs, one for each qTD that will required.
625 * At least one qTD (and sTD) is required even if the transfer has no
626 * data (e.g., for some control transfers).
627 */
628int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
629	gfp_t mem_flags)
630{
631	struct whc_urb *wurb;
632	int remaining = urb->transfer_buffer_length;
633	u64 transfer_dma = urb->transfer_dma;
634	int ntds_remaining;
635	int ret;
636
637	wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
638	if (wurb == NULL)
639		goto err_no_mem;
640	urb->hcpriv = wurb;
641	wurb->qset = qset;
642	wurb->urb = urb;
643	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
644
645	if (urb->num_sgs) {
646		ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
647		if (ret == -EINVAL) {
648			qset_free_stds(qset, urb);
649			ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
650		}
651		if (ret < 0)
652			goto err_no_mem;
653		return 0;
654	}
655
656	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
657	if (ntds_remaining == 0)
658		ntds_remaining = 1;
659
660	while (ntds_remaining) {
661		struct whc_std *std;
662		size_t std_len;
663
664		std_len = remaining;
665		if (std_len > QTD_MAX_XFER_SIZE)
666			std_len = QTD_MAX_XFER_SIZE;
667
668		std = qset_new_std(whc, qset, urb, mem_flags);
669		if (std == NULL)
670			goto err_no_mem;
671
672		std->dma_addr = transfer_dma;
673		std->len = std_len;
674		std->ntds_remaining = ntds_remaining;
675
676		if (qset_fill_page_list(whc, std, mem_flags) < 0)
677			goto err_no_mem;
678
679		ntds_remaining--;
680		remaining -= std_len;
681		transfer_dma += std_len;
682	}
683
684	return 0;
685
686err_no_mem:
687	qset_free_stds(qset, urb);
688	return -ENOMEM;
689}
690
691/**
692 * qset_remove_urb - remove an URB from the urb queue.
693 *
694 * The URB is returned to the USB subsystem.
695 */
696void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
697			    struct urb *urb, int status)
698{
699	struct wusbhc *wusbhc = &whc->wusbhc;
700	struct whc_urb *wurb = urb->hcpriv;
701
702	usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
703	/* Drop the lock as urb->complete() may enqueue another urb. */
704	spin_unlock(&whc->lock);
705	wusbhc_giveback_urb(wusbhc, urb, status);
706	spin_lock(&whc->lock);
707
708	kfree(wurb);
709}
710
711/**
712 * get_urb_status_from_qtd - get the completed urb status from qTD status
713 * @urb:    completed urb
714 * @status: qTD status
715 */
716static int get_urb_status_from_qtd(struct urb *urb, u32 status)
717{
718	if (status & QTD_STS_HALTED) {
719		if (status & QTD_STS_DBE)
720			return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
721		else if (status & QTD_STS_BABBLE)
722			return -EOVERFLOW;
723		else if (status & QTD_STS_RCE)
724			return -ETIME;
725		return -EPIPE;
726	}
727	if (usb_pipein(urb->pipe)
728	    && (urb->transfer_flags & URB_SHORT_NOT_OK)
729	    && urb->actual_length < urb->transfer_buffer_length)
730		return -EREMOTEIO;
731	return 0;
732}
733
734/**
735 * process_inactive_qtd - process an inactive (but not halted) qTD.
736 *
737 * Update the urb with the transfer bytes from the qTD, if the urb is
738 * completely transfered or (in the case of an IN only) the LPF is
739 * set, then the transfer is complete and the urb should be returned
740 * to the system.
741 */
742void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
743				 struct whc_qtd *qtd)
744{
745	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
746	struct urb *urb = std->urb;
747	uint32_t status;
748	bool complete;
749
750	status = le32_to_cpu(qtd->status);
751
752	urb->actual_length += std->len - QTD_STS_TO_LEN(status);
753
754	if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
755		complete = true;
756	else
757		complete = whc_std_last(std);
758
759	qset_remove_qtd(whc, qset);
760	qset_free_std(whc, std);
761
762	/*
763	 * Transfers for this URB are complete?  Then return it to the
764	 * USB subsystem.
765	 */
766	if (complete) {
767		qset_remove_qtds(whc, qset, urb);
768		qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
769
770		/*
771		 * If iAlt isn't valid then the hardware didn't
772		 * advance iCur. Adjust the start and end pointers to
773		 * match iCur.
774		 */
775		if (!(status & QTD_STS_IALT_VALID))
776			qset->td_start = qset->td_end
777				= QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
778		qset->pause_after_urb = NULL;
779	}
780}
781
782void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
783			       struct whc_qtd *qtd)
784{
785	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
786	struct urb *urb = std->urb;
787	int urb_status;
788
789	urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
790
791	qset_remove_qtds(whc, qset, urb);
792	qset_remove_urb(whc, qset, urb, urb_status);
793
794	list_for_each_entry(std, &qset->stds, list_node) {
795		if (qset->ntds == 0)
796			break;
797		qset_remove_qtd(whc, qset);
798		std->qtd = NULL;
799	}
800
801	qset->remove = 1;
802}
803
804void qset_free(struct whc *whc, struct whc_qset *qset)
805{
806	dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
807}
808
809/**
810 * qset_delete - wait for a qset to be unused, then free it.
811 */
812void qset_delete(struct whc *whc, struct whc_qset *qset)
813{
814	wait_for_completion(&qset->remove_complete);
815	qset_free(whc, qset);
816}
817