• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/usb/host/

Lines Matching defs:qh

26  * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
87 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
90 BUG_ON(qh->qh_state != QH_STATE_IDLE);
92 qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
93 qh->hw_alt_next = EHCI_LIST_END;
100 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
104 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
105 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
106 qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE);
107 usb_settoggle (qh->dev, epnum, is_out, 1);
111 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
113 qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING);
116 /* if it weren't for a common silicon quirk (writing the dummy into the qh
117 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
121 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
125 if (list_empty (&qh->qtd_list))
126 qtd = qh->dummy;
128 qtd = list_entry (qh->qtd_list.next,
131 if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current)
136 qh_update (ehci, qh, qtd);
221 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
224 if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) {
229 qh_put (qh);
320 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
321 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
323 static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
324 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
327 * Process and free completed qtds for a qh, returning URBs to drivers.
328 * Chases up to qh->hw_current. Returns number of completions called,
333 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
335 struct ehci_qtd *last = NULL, *end = qh->dummy;
342 if (unlikely (list_empty (&qh->qtd_list)))
347 * they add urbs to this qh's queue or mark them for unlinking.
351 state = qh->qh_state;
352 qh->qh_state = QH_STATE_COMPLETING;
360 list_for_each_safe (entry, tmp, &qh->qtd_list) {
382 /* hardware copies qtd out of qh overlay */
392 /* magic dummy for some short reads; qh won't advance.
414 * its urb was canceled. we may patch qh or qtds.
429 == qh->hw_current)
430 token = le32_to_cpu (qh->hw_token);
432 /* force halt for unlinked or blocked qh, so we'll
433 * patch the qh later and so that completions can't
436 if ((HALT_BIT & qh->hw_token) == 0) {
438 qh->hw_token |= HALT_BIT;
450 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
467 qh->qh_state = state;
469 /* be sure the hardware's done with the qh before refreshing
473 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
476 qh_refresh(ehci, qh);
483 & qh->hw_info2) != 0) {
484 intr_deschedule (ehci, qh);
485 (void) qh_schedule (ehci, qh);
487 unlink_async (ehci, qh);
524 * create a list of filled qtds for this URB; won't link into qh.
599 /* qh makes control packets use qtd toggle; maybe switch it */
616 * up after short reads, hc should advance qh past this urb
664 // Would be best to create all qh's from config descriptors,
666 // any previous qh and cancel its urbs first; endpoints are
685 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
690 if (!qh)
691 return qh;
707 * - qh has a polling interval
712 qh->usecs = NS_TO_US (usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
714 qh->start = NO_FRAME;
717 qh->c_usecs = 0;
718 qh->gap_uf = 0;
720 qh->period = urb->interval >> 3;
721 if (qh->period == 0 && urb->interval != 1) {
735 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
739 qh->c_usecs = qh->usecs + HS_USECS (0);
740 qh->usecs = HS_USECS (1);
742 qh->usecs += HS_USECS (1);
743 qh->c_usecs = HS_USECS (0);
747 qh->tt_usecs = NS_TO_US (think_time +
750 qh->period = urb->interval;
755 qh->dev = urb->dev;
814 qh_put (qh);
821 qh->qh_state = QH_STATE_IDLE;
822 qh->hw_info1 = cpu_to_le32 (info1);
823 qh->hw_info2 = cpu_to_le32 (info2);
825 qh_refresh (ehci, qh);
826 return qh;
831 /* move qh (and its qtds) onto async queue; maybe enable queue. */
833 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
835 __le32 dma = QH_NEXT (qh->qh_dma);
841 if (!head->qh_next.qh) {
856 if (qh->qh_state == QH_STATE_IDLE)
857 qh_refresh (ehci, qh);
860 qh->qh_next = head->qh_next;
861 qh->hw_next = head->hw_next;
864 head->qh_next.qh = qh;
867 qh->qh_state = QH_STATE_LINKED;
889 struct ehci_qh *qh = NULL;
891 qh = (struct ehci_qh *) *ptr;
892 if (unlikely (qh == NULL)) {
894 qh = qh_make (ehci, urb, GFP_ATOMIC);
895 *ptr = qh;
899 if (ehci_optimized(ehci, qh) >= 0)
902 return qh;
905 if (likely (qh != NULL)) {
914 /* control qh may need patching ... */
919 qh->hw_info1 &= ~QH_ADDR_MASK;
929 qtd2->urb->hcpriv = qh_get (qh);
950 dummy = qh->dummy;
958 __list_splice (qtd_list, qh->qtd_list.prev);
961 qh->dummy = qtd;
965 qtd = list_entry (qh->qtd_list.prev,
973 urb->hcpriv = qh_get (qh);
976 return qh;
992 struct ehci_qh *qh = NULL;
1000 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1087 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
1088 if (unlikely(qh == NULL)) {
1096 if (likely (qh->qh_state == QH_STATE_IDLE))
1097 qh_link_async (ehci, qh_get (qh));
1100 if (unlikely (qh == NULL))
1107 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
1111 struct ehci_qh *qh = ehci->reclaim;
1116 // qh->hw_next = cpu_to_le32 (qh->qh_dma);
1117 qh->qh_state = QH_STATE_IDLE;
1118 qh->qh_next.qh = NULL;
1119 qh_put (qh); // refcount from reclaim
1122 next = qh->reclaim;
1125 qh->reclaim = NULL;
1127 qh_completions (ehci, qh);
1129 if (!list_empty (&qh->qtd_list)
1131 qh_link_async (ehci, qh);
1133 qh_put (qh); // refcount from async list
1139 && ehci->async->qh_next.qh == NULL)
1149 /* makes sure the async qh will become idle */
1152 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1160 || (qh->qh_state != QH_STATE_LINKED
1161 && qh->qh_state != QH_STATE_UNLINK_WAIT)
1166 if (ehci_optimized(ehci, qh) >= 0)
1172 if (unlikely (qh == ehci->async)) {
1186 qh->qh_state = QH_STATE_UNLINK;
1187 ehci->reclaim = qh = qh_get (qh);
1190 while (prev->qh_next.qh != qh)
1191 prev = prev->qh_next.qh;
1193 prev->hw_next = qh->hw_next;
1194 prev->qh_next = qh->qh_next;
1198 /* if (unlikely (qh->reclaim != 0))
1216 struct ehci_qh *qh;
1223 qh = ehci->async->qh_next.qh;
1224 if (likely (qh != NULL)) {
1227 if (qh->stamp != ehci->stamp) {
1228 int pipeindex = ehci_optimized(ehci, qh);
1231 qh->stamp = ehci->stamp;
1232 ehci->ehci_bypass_callback(pipeindex, qh, &ehci->lock);
1236 /* clean any finished work for this qh */
1237 if (!list_empty (&qh->qtd_list)) {
1245 qh = qh_get (qh);
1246 qh->stamp = ehci->stamp;
1247 temp = qh_completions (ehci, qh);
1248 qh_put (qh);
1256 * as HCD schedule-scanning costs. delay for any qh
1261 if (list_empty (&qh->qtd_list)) {
1262 if (qh->stamp == ehci->stamp)
1265 && qh->qh_state == QH_STATE_LINKED)
1266 start_unlink_async (ehci, qh);
1270 qh = qh->qh_next.qh;
1271 } while (qh);