• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/wusbcore/

Lines Matching defs:xfer

37 	struct wa_xfer *xfer;		/* out xfer */
52 * Protected by xfer->lock
76 static inline void wa_xfer_init(struct wa_xfer *xfer)
78 kref_init(&xfer->refcnt);
79 INIT_LIST_HEAD(&xfer->list_node);
80 spin_lock_init(&xfer->lock);
86 * Note that the xfer->seg[index] thingies follow the URB life cycle,
91 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
92 if (xfer->seg) {
94 for (cnt = 0; cnt < xfer->segs; cnt++) {
95 if (xfer->is_inbound)
96 usb_put_urb(xfer->seg[cnt]->dto_urb);
97 usb_put_urb(&xfer->seg[cnt]->urb);
100 kfree(xfer);
103 static void wa_xfer_get(struct wa_xfer *xfer)
105 kref_get(&xfer->refcnt);
108 static void wa_xfer_put(struct wa_xfer *xfer)
110 kref_put(&xfer->refcnt, wa_xfer_destroy);
114 * xfer is referenced
116 * xfer->lock has to be unlocked
118 * We take xfer->lock for setting the result; this is a barrier
123 static void wa_xfer_giveback(struct wa_xfer *xfer)
127 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
128 list_del_init(&xfer->list_node);
129 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
130 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
131 wa_put(xfer->wa);
132 wa_xfer_put(xfer);
136 * xfer is referenced
138 * xfer->lock has to be unlocked
140 static void wa_xfer_completion(struct wa_xfer *xfer)
142 if (xfer->wusb_dev)
143 wusb_dev_put(xfer->wusb_dev);
144 rpipe_put(xfer->ep->hcpriv);
145 wa_xfer_giveback(xfer);
151 * xfer->lock has to be locked
153 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
155 struct device *dev = &xfer->wa->usb_iface->dev;
158 struct urb *urb = xfer->urb;
161 result = xfer->segs_done == xfer->segs_submitted;
165 for (cnt = 0; cnt < xfer->segs; cnt++) {
166 seg = xfer->seg[cnt];
170 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
171 xfer, cnt, seg->result);
176 if (seg->result < xfer->seg_size
177 && cnt != xfer->segs-1)
179 dev_dbg(dev, "xfer %p#%u: DONE short %d "
181 xfer, seg->index, found_short, seg->result,
185 xfer->result = seg->result;
186 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
187 xfer, seg->index, seg->result);
190 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
191 xfer, seg->index, urb->status);
192 xfer->result = urb->status;
195 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
196 xfer, cnt, seg->status);
197 xfer->result = -EINVAL;
201 xfer->result = 0;
214 static void wa_xfer_id_init(struct wa_xfer *xfer)
216 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
220 * Return the xfer's ID associated with xfer
224 static u32 wa_xfer_id(struct wa_xfer *xfer)
226 return xfer->id;
275 * We'll get an 'aborted transaction' xfer result on DTI, that'll
279 static void __wa_xfer_abort(struct wa_xfer *xfer)
282 struct device *dev = &xfer->wa->usb_iface->dev;
284 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
292 b->cmd.dwTransferID = wa_xfer_id(xfer);
295 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
296 usb_sndbulkpipe(xfer->wa->usb_dev,
297 xfer->wa->dto_epd->bEndpointAddress),
307 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
308 xfer, result);
319 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
323 struct device *dev = &xfer->wa->usb_iface->dev;
325 struct urb *urb = xfer->urb;
326 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
347 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
348 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
349 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
350 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
352 if (xfer->seg_size < maxpktsize) {
354 "%zu\n", xfer->seg_size, maxpktsize);
358 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
359 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
360 / xfer->seg_size;
361 if (xfer->segs >= WA_SEGS_MAX) {
363 (int)(urb->transfer_buffer_length / xfer->seg_size),
368 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
369 xfer->segs = 1;
374 /* Fill in the common request header and xfer-type specific data. */
375 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
380 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
382 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
386 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
392 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
393 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
421 struct wa_xfer *xfer = seg->xfer;
431 spin_lock_irqsave(&xfer->lock, flags);
432 wa = xfer->wa;
434 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
435 xfer, seg->index, urb->actual_length);
439 spin_unlock_irqrestore(&xfer->lock, flags);
445 spin_lock_irqsave(&xfer->lock, flags);
446 wa = xfer->wa;
448 rpipe = xfer->ep->hcpriv;
449 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
450 xfer, seg->index, urb->status);
460 xfer->segs_done++;
461 __wa_xfer_abort(xfer);
463 done = __wa_xfer_is_done(xfer);
465 spin_unlock_irqrestore(&xfer->lock, flags);
467 wa_xfer_completion(xfer);
482 * seg->xfer could be already gone.
485 * because sometimes the xfer result callback arrives before this
494 struct wa_xfer *xfer = seg->xfer;
504 spin_lock_irqsave(&xfer->lock, flags);
505 wa = xfer->wa;
507 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
508 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
510 spin_unlock_irqrestore(&xfer->lock, flags);
516 spin_lock_irqsave(&xfer->lock, flags);
517 wa = xfer->wa;
519 rpipe = xfer->ep->hcpriv;
521 dev_err(dev, "xfer %p#%u: request error %d\n",
522 xfer, seg->index, urb->status);
532 xfer->segs_done++;
533 __wa_xfer_abort(xfer);
535 done = __wa_xfer_is_done(xfer);
536 spin_unlock_irqrestore(&xfer->lock, flags);
538 wa_xfer_completion(xfer);
547 * The segments are freed by wa_xfer_destroy() when the xfer use count
552 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
555 size_t alloc_size = sizeof(*xfer->seg[0])
556 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
557 struct usb_device *usb_dev = xfer->wa->usb_dev;
558 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
563 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
564 if (xfer->seg == NULL)
567 buf_size = xfer->urb->transfer_buffer_length;
568 for (cnt = 0; cnt < xfer->segs; cnt++) {
569 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
573 seg->xfer = xfer;
580 buf_itr_size = buf_size > xfer->seg_size ?
581 xfer->seg_size : buf_size;
582 if (xfer->is_inbound == 0 && buf_size > 0) {
591 if (xfer->is_dma) {
593 xfer->urb->transfer_dma + buf_itr;
598 xfer->urb->transfer_buffer + buf_itr;
608 kfree(xfer->seg[cnt]);
613 if (xfer->is_inbound == 0)
614 kfree(xfer->seg[cnt]->dto_urb);
615 kfree(xfer->seg[cnt]);
621 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
624 struct device *dev = &xfer->wa->usb_iface->dev;
629 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
633 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
635 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
636 xfer, xfer->segs, result);
640 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
641 wa_xfer_id_init(xfer);
642 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
647 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
648 xfer->seg_size : transfer_size;
649 transfer_size -= xfer->seg_size;
650 for (cnt = 1; cnt < xfer->segs; cnt++) {
651 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
654 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
655 cpu_to_le32(xfer->seg_size)
657 xfer->seg[cnt]->status = WA_SEG_READY;
658 transfer_size -= xfer->seg_size;
672 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
678 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
679 xfer, seg->index, result);
685 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
686 xfer, seg->index, result);
706 * xfer->lock normally nests the seg_lock and not viceversa.
714 struct wa_xfer *xfer;
723 xfer = seg->xfer;
724 result = __wa_seg_submit(rpipe, xfer, seg);
725 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
726 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
729 spin_lock_irqsave(&xfer->lock, flags);
730 __wa_xfer_abort(xfer);
731 xfer->segs_done++;
732 spin_unlock_irqrestore(&xfer->lock, flags);
741 * xfer->lock is taken
746 static int __wa_xfer_submit(struct wa_xfer *xfer)
749 struct wahc *wa = xfer->wa;
754 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
760 list_add_tail(&xfer->list_node, &wa->xfer_list);
766 for (cnt = 0; cnt < xfer->segs; cnt++) {
769 seg = xfer->seg[cnt];
770 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
771 xfer, cnt, available, empty,
774 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
778 result = __wa_seg_submit(rpipe, xfer, seg);
780 __wa_xfer_abort(xfer);
784 xfer->segs_submitted++;
797 * xfer->wa filled and refcounted
798 * xfer->ep filled with rpipe refcounted if
800 * xfer->urb filled and refcounted (this is the case when called
804 * xfer->gfp filled
809 * the submitted URBs or from the xfer-result path to kick in. If xfer
810 * result never kicks in, the xfer will timeout from the USB code and
813 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
817 struct urb *urb = xfer->urb;
818 struct wahc *wa = xfer->wa;
823 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
839 spin_lock_irqsave(&xfer->lock, flags);
840 xfer->wusb_dev = wusb_dev;
845 result = __wa_xfer_setup(xfer, urb);
848 result = __wa_xfer_submit(xfer);
851 spin_unlock_irqrestore(&xfer->lock, flags);
860 spin_unlock_irqrestore(&xfer->lock, flags);
864 rpipe_put(xfer->ep->hcpriv);
866 xfer->result = result;
867 wa_xfer_giveback(xfer);
871 done = __wa_xfer_is_done(xfer);
872 xfer->result = result;
873 spin_unlock_irqrestore(&xfer->lock, flags);
875 wa_xfer_completion(xfer);
885 * order, we just drop the lock once we have the xfer and reacquire it
891 struct wa_xfer *xfer, *next;
895 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
897 list_del_init(&xfer->list_node);
900 urb = xfer->urb;
901 wa_urb_enqueue_b(xfer);
927 struct wa_xfer *xfer;
934 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
939 xfer = kzalloc(sizeof(*xfer), gfp);
940 if (xfer == NULL)
946 wa_xfer_init(xfer);
947 xfer->wa = wa_get(wa);
948 xfer->urb = urb;
949 xfer->gfp = gfp;
950 xfer->ep = ep;
951 urb->hcpriv = xfer;
953 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
954 xfer, urb, urb->pipe, urb->transfer_buffer_length,
962 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
966 wa_urb_enqueue_b(xfer);
971 kfree(xfer);
986 * still had no time to set the xfer up. Because
998 struct wa_xfer *xfer;
1004 xfer = urb->hcpriv;
1005 if (xfer == NULL) {
1013 spin_lock_irqsave(&xfer->lock, flags);
1014 rpipe = xfer->ep->hcpriv;
1017 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1020 if (xfer->seg == NULL) /* still hasn't reached */
1022 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1023 __wa_xfer_abort(xfer);
1024 for (cnt = 0; cnt < xfer->segs; cnt++) {
1025 seg = xfer->seg[cnt];
1029 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1030 xfer, cnt, seg->status);
1037 xfer->segs_done++;
1044 if (xfer->is_inbound == 0)
1046 xfer->segs_done++;
1051 xfer->segs_done++;
1057 xfer->segs_done++;
1066 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1067 __wa_xfer_is_done(xfer);
1068 spin_unlock_irqrestore(&xfer->lock, flags);
1069 wa_xfer_completion(xfer);
1075 spin_unlock_irqrestore(&xfer->lock, flags);
1080 list_del_init(&xfer->list_node);
1082 xfer->result = urb->status;
1083 spin_unlock_irqrestore(&xfer->lock, flags);
1084 wa_xfer_giveback(xfer);
1141 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1154 spin_lock_irqsave(&xfer->lock, flags);
1156 if (unlikely(seg_idx >= xfer->segs))
1158 seg = xfer->seg[seg_idx];
1159 rpipe = xfer->ep->hcpriv;
1161 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1162 xfer, seg_idx, usb_status, seg->status);
1170 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1171 xfer, seg_idx, seg->status);
1176 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1177 xfer, seg->index, usb_status);
1182 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1185 if (xfer->is_dma) {
1187 xfer->urb->transfer_dma
1188 + seg_idx * xfer->seg_size;
1193 xfer->urb->transfer_buffer
1194 + seg_idx * xfer->seg_size;
1208 xfer->segs_done++;
1210 done = __wa_xfer_is_done(xfer);
1212 spin_unlock_irqrestore(&xfer->lock, flags);
1214 wa_xfer_completion(xfer);
1226 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1227 xfer, seg_idx, result);
1231 xfer->segs_done++;
1233 __wa_xfer_abort(xfer);
1234 done = __wa_xfer_is_done(xfer);
1235 spin_unlock_irqrestore(&xfer->lock, flags);
1237 wa_xfer_completion(xfer);
1243 spin_unlock_irqrestore(&xfer->lock, flags);
1244 wa_urb_dequeue(wa, xfer->urb);
1246 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1256 spin_unlock_irqrestore(&xfer->lock, flags);
1267 * seg->xfer could be already gone.
1272 struct wa_xfer *xfer = seg->xfer;
1282 spin_lock_irqsave(&xfer->lock, flags);
1283 wa = xfer->wa;
1285 rpipe = xfer->ep->hcpriv;
1286 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1287 xfer, seg->index, (size_t)urb->actual_length);
1290 xfer->segs_done++;
1292 done = __wa_xfer_is_done(xfer);
1293 spin_unlock_irqrestore(&xfer->lock, flags);
1295 wa_xfer_completion(xfer);
1303 spin_lock_irqsave(&xfer->lock, flags);
1304 wa = xfer->wa;
1306 rpipe = xfer->ep->hcpriv;
1308 dev_err(dev, "xfer %p#%u: data in error %d\n",
1309 xfer, seg->index, urb->status);
1318 xfer->segs_done++;
1320 __wa_xfer_abort(xfer);
1321 done = __wa_xfer_is_done(xfer);
1322 spin_unlock_irqrestore(&xfer->lock, flags);
1324 wa_xfer_completion(xfer);
1347 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1363 struct wa_xfer *xfer;
1369 /* We have a xfer result buffer; check it */
1370 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1373 dev_err(dev, "DTI Error: xfer result--bad size "
1374 "xfer result (%d bytes vs %zu needed)\n",
1380 dev_err(dev, "DTI Error: xfer result--"
1386 dev_err(dev, "DTI Error: xfer result--"
1397 xfer = wa_xfer_get_by_id(wa, xfer_id);
1398 if (xfer == NULL) {
1399 dev_err(dev, "DTI Error: xfer result--"
1400 "unknown xfer 0x%08x (status 0x%02x)\n",
1404 wa_xfer_result_chew(wa, xfer);
1405 wa_xfer_put(xfer);
1443 * don't really set it up and start it until the first xfer complete