• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/host/

Lines Matching defs:xhci

70 #include "xhci.h"
96 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
99 if (ring == xhci->event_ring)
101 (seg->next == xhci->event_ring->first_seg);
110 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
113 if (ring == xhci->event_ring)
129 static void next_trb(struct xhci_hcd *xhci,
134 if (last_trb(xhci, ring, *seg, *trb)) {
146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
157 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
158 if (consumer && last_trb_on_last_seg(xhci, ring,
161 xhci_dbg(xhci, "Toggle cycle state "
174 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
178 if (ring == xhci->event_ring)
179 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
180 else if (ring == xhci->cmd_ring)
181 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
183 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
203 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
217 while (last_trb(xhci, ring, ring->enq_seg, next)) {
219 if (ring != xhci->event_ring) {
235 if (!xhci_link_trb_quirk(xhci)) {
244 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
247 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
257 if (ring == xhci->event_ring)
258 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
259 else if (ring == xhci->cmd_ring)
260 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
262 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
265 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
276 while (last_trb(xhci, ring, enq_seg, enq)) {
292 xhci_warn(xhci, "Not enough room on ring; "
304 while (last_trb(xhci, ring, enq_seg, enq)) {
313 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
317 xhci_dbg(xhci, "// Ding dong!\n");
318 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
319 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
321 xhci_readl(xhci, &xhci->dba->doorbell[0]);
324 static void ring_ep_doorbell(struct xhci_hcd *xhci,
332 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
334 ep = &xhci->devs[slot_id]->eps[ep_index];
338 field = xhci_readl(xhci, db_addr) & DB_MASK;
340 xhci_writel(xhci, field, db_addr);
345 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
350 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
355 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
362 ep = &xhci->devs[slot_id]->eps[ep_index];
367 ring_ep_doorbell(xhci, slot_id, ep_index, 0);
375 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
407 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
413 ep = &xhci->devs[slot_id]->eps[ep_index];
419 xhci_warn(xhci,
429 xhci_warn(xhci,
443 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
446 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
464 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
469 struct xhci_virt_device *dev = xhci->devs[slot_id];
475 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
478 xhci_warn(xhci, "WARN can't find new dequeue state "
484 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
491 xhci_dbg(xhci, "Finding endpoint context\n");
492 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
496 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
507 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
510 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
513 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
515 xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
520 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
535 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
536 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
549 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
561 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
566 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
571 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
573 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
580 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
592 static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
604 /* Must be called with xhci->lock held in interrupt context */
605 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
608 struct usb_hcd *hcd = xhci_to_hcd(xhci);
619 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
621 spin_unlock(&xhci->lock);
623 xhci_urb_free_priv(xhci, urb_priv);
624 spin_lock(&xhci->lock);
625 xhci_dbg(xhci, "%s URB given back\n", adjective);
639 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
655 ep = &xhci->devs[slot_id]->eps[ep_index];
658 xhci_stop_watchdog_timer_in_irq(xhci, ep);
659 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
670 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
673 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
686 xhci_warn(xhci, "WARN Cancelled URB %p "
697 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
701 td_to_noop(xhci, ep_ring, cur_td);
711 xhci_stop_watchdog_timer_in_irq(xhci, ep);
715 xhci_queue_new_dequeue_state(xhci,
719 xhci_ring_cmd_db(xhci);
722 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
742 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
747 if (xhci->xhc_state & XHCI_STATE_DYING)
751 /* Return to the event handler with xhci->lock re-acquired */
758 * through xhci->state.
775 struct xhci_hcd *xhci;
783 xhci = ep->xhci;
785 spin_lock(&xhci->lock);
788 if (xhci->xhc_state & XHCI_STATE_DYING) {
789 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
791 spin_unlock(&xhci->lock);
795 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
797 spin_unlock(&xhci->lock);
801 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
802 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
806 xhci->xhc_state |= XHCI_STATE_DYING;
808 xhci_quiesce(xhci);
809 spin_unlock(&xhci->lock);
811 ret = xhci_halt(xhci);
813 spin_lock(&xhci->lock);
823 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
824 xhci_warn(xhci, "Completing active URBs anyway.\n");
832 if (!xhci->devs[i])
835 temp_ep = &xhci->devs[i]->eps[j];
839 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
848 xhci_giveback_urb_in_irq(xhci, cur_td,
857 xhci_giveback_urb_in_irq(xhci, cur_td,
862 spin_unlock(&xhci->lock);
863 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
864 xhci_dbg(xhci, "Calling usb_hc_died()\n");
865 usb_hc_died(xhci_to_hcd(xhci));
866 xhci_dbg(xhci, "xHCI host controller is dead.\n");
876 static void handle_set_deq_completion(struct xhci_hcd *xhci,
891 dev = xhci->devs[slot_id];
895 xhci_warn(xhci, "WARN Set TR deq ptr command for "
902 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
903 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
911 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
915 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
921 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
925 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
929 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
941 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
947 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
950 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
962 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
969 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
970 xhci_dbg(xhci, "Queueing configure endpoint command\n");
971 xhci_queue_configure_endpoint(xhci,
972 xhci->devs[slot_id]->in_ctx->dma, slot_id,
974 xhci_ring_cmd_db(xhci);
977 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
978 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
986 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
997 if (xhci->cmd_ring->dequeue != command->command_trb)
1006 xhci_free_command(xhci, command);
1010 static void handle_cmd_completion(struct xhci_hcd *xhci,
1023 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1024 xhci->cmd_ring->dequeue);
1027 xhci->error_bitmask |= 1 << 4;
1032 xhci->error_bitmask |= 1 << 5;
1035 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
1038 xhci->slot_id = slot_id;
1040 xhci->slot_id = 0;
1041 complete(&xhci->addr_dev);
1044 if (xhci->devs[slot_id])
1045 xhci_free_virt_device(xhci, slot_id);
1048 virt_dev = xhci->devs[slot_id];
1049 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1059 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1069 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1073 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1074 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1077 xhci_dbg(xhci, "Completed config ep cmd - "
1081 xhci->devs[slot_id]->eps[ep_index].ep_state &=
1083 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1087 xhci_dbg(xhci, "Completed config ep cmd\n");
1088 xhci->devs[slot_id]->cmd_status =
1090 complete(&xhci->devs[slot_id]->cmd_completion);
1093 virt_dev = xhci->devs[slot_id];
1094 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1096 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
1097 complete(&xhci->devs[slot_id]->cmd_completion);
1100 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
1101 complete(&xhci->addr_dev);
1104 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
1107 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1110 ++xhci->noops_handled;
1113 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1116 xhci_dbg(xhci, "Completed reset device command.\n");
1118 xhci->cmd_ring->dequeue->generic.field[3]);
1119 virt_dev = xhci->devs[slot_id];
1121 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1123 xhci_warn(xhci, "Reset device command completion "
1127 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1128 xhci->error_bitmask |= 1 << 6;
1131 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1137 xhci->error_bitmask |= 1 << 6;
1140 inc_deq(xhci, xhci->cmd_ring, false);
1143 static void handle_vendor_event(struct xhci_hcd *xhci,
1149 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1150 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1151 handle_cmd_completion(xhci, &event->event_cmd);
1154 static void handle_port_status(struct xhci_hcd *xhci,
1161 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1162 xhci->error_bitmask |= 1 << 8;
1165 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1168 inc_deq(xhci, xhci->event_ring, true);
1170 spin_unlock(&xhci->lock);
1172 usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
1173 spin_lock(&xhci->lock);
1232 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1237 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1243 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1244 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1250 xhci_ring_cmd_db(xhci);
1259 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1279 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1285 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1287 xhci_dbg(xhci, "Treating code as success.\n");
1297 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1312 xdev = xhci->devs[slot_id];
1315 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1342 } else if (xhci_requires_manual_halt_cleanup(xhci,
1349 xhci_cleanup_halted_endpoint(xhci,
1355 inc_deq(xhci, ep_ring, false);
1356 inc_deq(xhci, ep_ring, false);
1371 xhci_warn(xhci, "URB transfer length is wrong, "
1399 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1411 xdev = xhci->devs[slot_id];
1414 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1417 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1421 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1425 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1429 xhci_dbg(xhci, "Successful control transfer!\n");
1434 xhci_warn(xhci, "WARN: short transfer on control ep\n");
1441 if (!xhci_requires_manual_halt_cleanup(xhci,
1444 xhci_dbg(xhci, "TRB error code %u, "
1458 xhci_cleanup_halted_endpoint(xhci,
1460 return finish_td(xhci, td, event_trb, event, ep, status, true);
1489 xhci_dbg(xhci, "Waiting for status "
1496 return finish_td(xhci, td, event_trb, event, ep, status, false);
1502 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1529 xhci_dbg(xhci, "Successful isoc transfer!\n");
1565 inc_deq(xhci, ep_ring, false);
1566 inc_deq(xhci, ep_ring, false);
1567 return finish_td(xhci, td, event_trb, event, ep, status, true);
1578 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1598 return finish_td(xhci, td, event_trb, event, ep, status, false);
1604 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1620 xhci_warn(xhci, "WARN Successful completion "
1628 xhci_dbg(xhci, "Successful bulk "
1631 xhci_dbg(xhci, "Successful interrupt "
1660 xhci_warn(xhci, "HC gave bad length "
1692 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1709 return finish_td(xhci, td, event_trb, event, ep, status, false);
1717 static int handle_tx_event(struct xhci_hcd *xhci,
1737 xdev = xhci->devs[slot_id];
1739 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
1745 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1748 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1751 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1767 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
1770 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
1773 xhci_warn(xhci, "WARN: Stalled endpoint\n");
1778 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1783 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1787 xhci_warn(xhci, "WARN: babble error on endpoint\n");
1791 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
1795 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
1798 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
1806 xhci_dbg(xhci, "underrun event on endpoint\n");
1808 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
1813 xhci_dbg(xhci, "overrun event on endpoint\n");
1815 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
1827 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
1830 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1834 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
1844 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
1847 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
1849 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
1852 xhci_dbg(xhci, "td_list is empty while skip "
1864 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
1870 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
1886 xhci_dbg(xhci, "event_trb is a no-op TRB. "
1896 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
1899 ret = process_isoc_td(xhci, td, event_trb, event, ep,
1902 ret = process_bulk_intr_td(xhci, td, event_trb, event,
1911 inc_deq(xhci, xhci->event_ring, true);
1925 xhci_urb_free_priv(xhci, urb_priv);
1927 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1928 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
1931 spin_unlock(&xhci->lock);
1932 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1933 spin_lock(&xhci->lock);
1949 * xhci->lock between event processing (e.g. to pass up port status changes).
1951 static void xhci_handle_event(struct xhci_hcd *xhci)
1957 xhci_dbg(xhci, "In %s\n", __func__);
1958 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
1959 xhci->error_bitmask |= 1 << 1;
1963 event = xhci->event_ring->dequeue;
1966 xhci->event_ring->cycle_state) {
1967 xhci->error_bitmask |= 1 << 2;
1970 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
1974 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
1975 handle_cmd_completion(xhci, &event->event_cmd);
1976 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
1979 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
1980 handle_port_status(xhci, event);
1981 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
1985 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
1986 ret = handle_tx_event(xhci, &event->trans_event);
1987 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
1989 xhci->error_bitmask |= 1 << 9;
1995 handle_vendor_event(xhci, event);
1997 xhci->error_bitmask |= 1 << 3;
2002 if (xhci->xhc_state & XHCI_STATE_DYING) {
2003 xhci_dbg(xhci, "xHCI host dying, returning from "
2010 inc_deq(xhci, xhci->event_ring, true);
2013 xhci_handle_event(xhci);
2023 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2030 spin_lock(&xhci->lock);
2031 trb = xhci->event_ring->dequeue;
2033 status = xhci_readl(xhci, &xhci->op_regs->status);
2038 spin_unlock(&xhci->lock);
2041 xhci_dbg(xhci, "op reg status = %08x\n", status);
2042 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2043 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2045 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2052 xhci_warn(xhci, "WARNING: Host System Error\n");
2053 xhci_halt(xhci);
2055 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
2056 spin_unlock(&xhci->lock);
2066 xhci_writel(xhci, status, &xhci->op_regs->status);
2072 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2074 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2077 if (xhci->xhc_state & XHCI_STATE_DYING) {
2078 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2083 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2084 xhci_write_64(xhci, temp_64 | ERST_EHB,
2085 &xhci->ir_set->erst_dequeue);
2086 spin_unlock(&xhci->lock);
2091 event_ring_deq = xhci->event_ring->dequeue;
2092 xhci_handle_event(xhci);
2094 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2096 if (event_ring_deq != xhci->event_ring->dequeue) {
2097 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2098 xhci->event_ring->dequeue);
2100 xhci_warn(xhci, "WARN something wrong with SW event "
2109 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2111 spin_unlock(&xhci->lock);
2136 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2147 inc_enq(xhci, ring, consumer, more_trbs_coming);
2150 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2154 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
2161 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2164 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2167 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2172 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2175 if (!room_on_ring(xhci, ep_ring, num_trbs)) {
2176 xhci_err(xhci, "ERROR no room on ep ring\n");
2184 xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
2187 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2192 if (!xhci_link_trb_quirk(xhci))
2201 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2204 xhci_dbg(xhci, "queue_trb: Toggle cycle "
2218 static int prepare_transfer(struct xhci_hcd *xhci,
2231 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2235 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2240 ret = prepare_ring(xhci, ep_ring,
2253 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
2255 xhci_urb_free_priv(xhci, urb_priv);
2272 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2281 xhci_dbg(xhci, "count sg list trbs: \n");
2298 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
2307 xhci_dbg(xhci, "\n");
2332 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2342 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2351 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2354 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2355 xhci->devs[slot_id]->out_ctx, ep_index);
2380 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
2439 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2459 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2463 num_trbs = count_sg_trbs_needed(xhci, urb);
2471 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2506 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
2537 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
2544 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2545 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2552 if (xhci->hci_version < 0x100) {
2572 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2609 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2615 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2635 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
2637 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2665 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2726 if (xhci->hci_version < 0x100) {
2745 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2766 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2771 /* Caller must have locked xhci->lock */
2772 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2785 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2797 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
2803 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2829 if (xhci->hci_version == 0x100) {
2838 queue_trb(xhci, ep_ring, false, true,
2853 queue_trb(xhci, ep_ring, false, true,
2870 queue_trb(xhci, ep_ring, false, false,
2877 giveback_first_trb(xhci, slot_id, ep_index, 0,
2882 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2905 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2920 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
2924 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
2950 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
2952 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
2999 queue_trb(xhci, ep_ring, false, false,
3017 xhci_err(xhci, "ISOC TD length unmatch\n");
3025 ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
3032 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3036 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3048 xdev = xhci->devs[slot_id];
3050 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3055 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3060 ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
3065 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3094 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3104 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3107 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3110 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3116 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3119 xhci_err(xhci, "ERR: No room for command on command ring\n");
3121 xhci_err(xhci, "ERR: Reserved TRB counting for "
3125 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
3126 field4 | xhci->cmd_ring->cycle_state);
3131 static int queue_cmd_noop(struct xhci_hcd *xhci)
3133 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
3140 void *xhci_setup_one_noop(struct xhci_hcd *xhci)
3142 if (queue_cmd_noop(xhci) < 0)
3144 xhci->noops_submitted++;
3149 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3151 return queue_command(xhci, 0, 0, 0,
3156 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3159 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3165 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
3168 return queue_command(xhci, field1, field2, field3, field4, false);
3172 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3174 return queue_command(xhci, 0, 0, 0,
3180 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3183 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3190 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3193 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3199 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3206 return queue_command(xhci, 0, 0, 0,
3213 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3226 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3227 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3231 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3236 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3243 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,