• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/usb/host/

Lines Matching defs:qtd

467 static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
501 qtd->buffer = (void *) &oxu->mem->db_pool[i];
502 qtd->buffer_dma = virt_to_phys(qtd->buffer);
504 qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
519 static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
525 index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
528 qtd->qtd_buffer_len = 0;
529 qtd->buffer_dma = 0;
530 qtd->buffer = NULL;
537 static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
539 memset(qtd, 0, sizeof *qtd);
540 qtd->qtd_dma = dma;
541 qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
542 qtd->hw_next = EHCI_LIST_END;
543 qtd->hw_alt_next = EHCI_LIST_END;
544 INIT_LIST_HEAD(&qtd->qtd_list);
547 static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
551 if (qtd->buffer)
552 oxu_buf_free(oxu, qtd);
556 index = qtd - &oxu->mem->qtd_pool[0];
567 struct ehci_qtd *qtd = NULL;
576 qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
577 memset(qtd, 0, sizeof *qtd);
579 qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
580 qtd->hw_next = EHCI_LIST_END;
581 qtd->hw_alt_next = EHCI_LIST_END;
582 INIT_LIST_HEAD(&qtd->qtd_list);
584 qtd->qtd_dma = virt_to_phys(qtd);
591 return qtd;
772 /* Fill a qtd, returning how much of the buffer we were able to queue up.
774 static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
781 qtd->hw_buf[0] = cpu_to_le32((u32)addr);
782 qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
790 /* per-qtd limit: from 16K to 20K (best alignment) */
793 qtd->hw_buf[i] = cpu_to_le32((u32)addr);
794 qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
806 qtd->hw_token = cpu_to_le32((count << 16) | token);
807 qtd->length = count;
813 struct ehci_qh *qh, struct ehci_qtd *qtd)
818 qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
829 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
837 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
848 struct ehci_qtd *qtd;
851 qtd = qh->dummy;
853 qtd = list_entry(qh->qtd_list.next,
855 /* first qtd may already be partially processed */
856 if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
857 qtd = NULL;
860 if (qtd)
861 qh_update(oxu, qh, qtd);
907 oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
1002 struct ehci_qtd *qtd;
1006 qtd = list_entry(entry, struct ehci_qtd, qtd_list);
1007 urb = qtd->urb;
1030 if (qtd == end)
1033 /* hardware copies qtd out of qh overlay */
1035 token = le32_to_cpu(qtd->hw_token);
1047 !(qtd->hw_alt_next & EHCI_LIST_END)) {
1063 /* ignore active urbs unless some previous qtd
1079 && cpu_to_le32(qtd->qtd_dma)
1097 qtd->length, token);
1098 if ((usb_pipein(qtd->urb->pipe)) &&
1099 (NULL != qtd->transfer_buffer))
1100 memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
1104 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
1105 last = list_entry(qtd->qtd_list.prev,
1107 last->hw_next = qtd->hw_next;
1109 list_del(&qtd->qtd_list);
1110 last = qtd;
1135 * overlaying the dummy qtd (which reduces DMA chatter).
1174 struct ehci_qtd *qtd;
1176 qtd = list_entry(entry, struct ehci_qtd, qtd_list);
1177 list_del(&qtd->qtd_list);
1178 oxu_qtd_free(oxu, qtd);
1189 struct ehci_qtd *qtd, *qtd_prev;
1200 qtd = ehci_qtd_alloc(oxu);
1201 if (unlikely(!qtd))
1203 list_add_tail(&qtd->qtd_list, head);
1204 qtd->urb = urb;
1217 ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
1221 qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
1223 memcpy(qtd->buffer, qtd->urb->setup_packet,
1228 qtd_prev = qtd;
1229 qtd = ehci_qtd_alloc(oxu);
1230 if (unlikely(!qtd))
1232 qtd->urb = urb;
1233 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1234 list_add_tail(&qtd->qtd_list, head);
1245 ret = oxu_buf_alloc(oxu, qtd, len);
1249 buf = qtd->buffer_dma;
1253 memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
1269 this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
1270 qtd->transfer_buffer = transfer_buf;
1275 qtd->hw_alt_next = oxu->async->hw_alt_next;
1277 /* qh makes control packets use qtd toggle; maybe switch it */
1284 qtd_prev = qtd;
1285 qtd = ehci_qtd_alloc(oxu);
1286 if (unlikely(!qtd))
1289 ret = oxu_buf_alloc(oxu, qtd, len);
1293 qtd->urb = urb;
1294 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1295 list_add_tail(&qtd->qtd_list, head);
1303 qtd->hw_alt_next = EHCI_LIST_END;
1322 qtd_prev = qtd;
1323 qtd = ehci_qtd_alloc(oxu);
1324 if (unlikely(!qtd))
1326 qtd->urb = urb;
1327 qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
1328 list_add_tail(&qtd->qtd_list, head);
1331 qtd_fill(qtd, 0, 0, token, 0);
1336 qtd->hw_token |= cpu_to_le32(QTD_IOC);
1344 /* Each QH holds a qtd list; a QH is used for everything except iso.
1439 info1 |= 1 << 14; /* toggle from qtd */
1455 info1 |= 1 << 14; /* toggle from qtd */
1521 /* qtd completions reported later by interrupt */
1545 struct ehci_qtd *qtd;
1548 qtd = NULL;
1550 qtd = list_entry(qtd_list->next, struct ehci_qtd,
1561 /* just one way to queue requests: swap with the dummy qtd.
1564 if (likely(qtd != NULL)) {
1574 token = qtd->hw_token;
1575 qtd->hw_token = HALT_BIT;
1580 *dummy = *qtd;
1583 list_del(&qtd->qtd_list);
1587 ehci_qtd_init(qtd, qtd->qtd_dma);
1588 qh->dummy = qtd;
1591 dma = qtd->qtd_dma;
1592 qtd = list_entry(qh->qtd_list.prev,
1594 qtd->hw_next = QTD_NEXT(dma);
1610 struct ehci_qtd *qtd;
1616 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1620 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1624 qtd, urb->ep->hcpriv);