• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/host/

Lines Matching defs:xhci

31 #include "xhci.h"
55 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
61 result = xhci_readl(xhci, ptr);
76 void xhci_quiesce(struct xhci_hcd *xhci)
83 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
87 cmd = xhci_readl(xhci, &xhci->op_regs->command);
89 xhci_writel(xhci, cmd, &xhci->op_regs->command);
92 int xhci_halt(struct xhci_hcd *xhci)
94 xhci_dbg(xhci, "// Halt the HC\n");
95 xhci_quiesce(xhci);
97 return handshake(xhci, &xhci->op_regs->status,
102 int xhci_fake_doorbell(struct xhci_hcd *xhci, int slot_id)
107 if (!xhci_alloc_virt_device(xhci, slot_id, 0, GFP_NOIO)) {
108 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
113 xhci_ring_ep_doorbell(xhci, slot_id, 0, 0);
117 temp1 = xhci_readl(xhci, &xhci->op_regs->status);
118 xhci_dbg(xhci, "op reg status = %x\n",temp1);
122 xhci_dbg(xhci, "HSE problem detected\n");
125 xhci_dbg(xhci, "temp1=%x\n",temp1);
126 xhci_writel(xhci, temp1, &xhci->op_regs->status);
128 temp1 = xhci_readl(xhci, &xhci->op_regs->status);
129 xhci_dbg(xhci, "After clear op reg status=%x\n", temp1);
133 xhci_free_virt_device(xhci, slot_id);
136 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
141 xhci_writel(xhci, temp1, &xhci->op_regs->command);
145 ret = handshake(xhci, &xhci->op_regs->status,
149 xhci_err(xhci, "Host took too long to start, "
162 int xhci_start(struct xhci_hcd *xhci)
167 temp = xhci_readl(xhci, &xhci->op_regs->command);
169 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
171 xhci_writel(xhci, temp, &xhci->op_regs->command);
177 ret = handshake(xhci, &xhci->op_regs->status,
180 xhci_err(xhci, "Host took too long to start, "
185 xhci_fake_doorbell(xhci, 1);
198 int xhci_reset(struct xhci_hcd *xhci)
204 state = xhci_readl(xhci, &xhci->op_regs->status);
206 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
210 xhci_dbg(xhci, "// Reset the HC\n");
211 command = xhci_readl(xhci, &xhci->op_regs->command);
213 xhci_writel(xhci, command, &xhci->op_regs->command);
214 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
216 ret = handshake(xhci, &xhci->op_regs->command,
221 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
226 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
233 static void xhci_free_irq(struct xhci_hcd *xhci)
236 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
239 if (xhci_to_hcd(xhci)->irq >= 0)
242 if (xhci->msix_entries) {
243 for (i = 0; i < xhci->msix_count; i++)
244 if (xhci->msix_entries[i].vector)
245 free_irq(xhci->msix_entries[i].vector,
246 xhci_to_hcd(xhci));
248 free_irq(pdev->irq, xhci_to_hcd(xhci));
256 static int xhci_setup_msi(struct xhci_hcd *xhci)
259 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
263 xhci_err(xhci, "failed to allocate MSI entry\n");
268 0, "xhci_hcd", xhci_to_hcd(xhci));
270 xhci_err(xhci, "disable MSI interrupt\n");
280 static int xhci_setup_msix(struct xhci_hcd *xhci)
283 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
288 * with max number of interrupters based on the xhci HCSPARAMS1.
292 xhci->msix_count = min(num_online_cpus() + 1,
293 HCS_MAX_INTRS(xhci->hcs_params1));
295 xhci->msix_entries =
296 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
298 if (!xhci->msix_entries) {
299 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
303 for (i = 0; i < xhci->msix_count; i++) {
304 xhci->msix_entries[i].entry = i;
305 xhci->msix_entries[i].vector = 0;
308 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
310 xhci_err(xhci, "Failed to enable MSI-X\n");
314 for (i = 0; i < xhci->msix_count; i++) {
315 ret = request_irq(xhci->msix_entries[i].vector,
317 0, "xhci_hcd", xhci_to_hcd(xhci));
325 xhci_err(xhci, "disable MSI-X interrupt\n");
326 xhci_free_irq(xhci);
329 kfree(xhci->msix_entries);
330 xhci->msix_entries = NULL;
335 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
337 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
339 xhci_free_irq(xhci);
341 if (xhci->msix_entries) {
343 kfree(xhci->msix_entries);
344 xhci->msix_entries = NULL;
361 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
364 xhci_dbg(xhci, "xhci_init\n");
365 spin_lock_init(&xhci->lock);
367 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
368 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
370 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
372 retval = xhci_mem_init(xhci, GFP_KERNEL);
373 xhci_dbg(xhci, "Finished xhci_init\n");
387 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
390 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
392 spin_lock_irqsave(&xhci->lock, flags);
393 temp = xhci_readl(xhci, &xhci->op_regs->status);
394 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
395 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
396 xhci_dbg(xhci, "HW died, polling stopped.\n");
397 spin_unlock_irqrestore(&xhci->lock, flags);
401 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
402 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
403 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
404 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
405 xhci->error_bitmask = 0;
406 xhci_dbg(xhci, "Event ring:\n");
407 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
408 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
409 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
411 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
412 xhci_dbg(xhci, "Command ring:\n");
413 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
414 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
415 xhci_dbg_cmd_ptrs(xhci);
417 if (!xhci->devs[i])
420 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
424 if (xhci->noops_submitted != NUM_TEST_NOOPS)
425 if (xhci_setup_one_noop(xhci))
426 xhci_ring_cmd_db(xhci);
427 spin_unlock_irqrestore(&xhci->lock, flags);
429 if (!xhci->zombie)
430 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
432 xhci_dbg(xhci, "Quit polling the event ring.\n");
453 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
454 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
459 xhci_dbg(xhci, "xhci_run\n");
465 ret = xhci_setup_msix(xhci);
468 ret = xhci_setup_msi(xhci);
475 xhci_err(xhci, "request interrupt %d failed\n",
483 init_timer(&xhci->event_ring_timer);
484 xhci->event_ring_timer.data = (unsigned long) xhci;
485 xhci->event_ring_timer.function = xhci_event_ring_work;
487 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
488 xhci->zombie = 0;
489 xhci_dbg(xhci, "Setting event ring polling timer\n");
490 add_timer(&xhci->event_ring_timer);
493 xhci_dbg(xhci, "Command ring memory map follows:\n");
494 xhci_debug_ring(xhci, xhci->cmd_ring);
495 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
496 xhci_dbg_cmd_ptrs(xhci);
498 xhci_dbg(xhci, "ERST memory map follows:\n");
499 xhci_dbg_erst(xhci, &xhci->erst);
500 xhci_dbg(xhci, "Event ring:\n");
501 xhci_debug_ring(xhci, xhci->event_ring);
502 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
503 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
505 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
507 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
508 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
511 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
515 temp = xhci_readl(xhci, &xhci->op_regs->command);
517 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
519 xhci_writel(xhci, temp, &xhci->op_regs->command);
521 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
522 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
523 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
524 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
525 &xhci->ir_set->irq_pending);
526 xhci_print_ir_set(xhci, xhci->ir_set, 0);
529 doorbell = xhci_setup_one_noop(xhci);
530 if (xhci->quirks & XHCI_NEC_HOST)
531 xhci_queue_vendor_command(xhci, 0, 0, 0,
534 if (xhci_start(xhci)) {
535 xhci_halt(xhci);
540 (*doorbell)(xhci);
541 if (xhci->quirks & XHCI_NEC_HOST)
542 xhci_ring_cmd_db(xhci);
544 xhci_dbg(xhci, "Finished xhci_run\n");
560 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562 spin_lock_irq(&xhci->lock);
563 xhci_halt(xhci);
564 xhci_reset(xhci);
565 spin_unlock_irq(&xhci->lock);
567 xhci_cleanup_msix(xhci);
571 xhci->zombie = 1;
572 del_timer_sync(&xhci->event_ring_timer);
575 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
576 temp = xhci_readl(xhci, &xhci->op_regs->status);
577 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
578 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
579 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
580 &xhci->ir_set->irq_pending);
581 xhci_print_ir_set(xhci, xhci->ir_set, 0);
583 xhci_dbg(xhci, "cleaning up memory\n");
584 xhci_mem_cleanup(xhci);
585 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
586 xhci_readl(xhci, &xhci->op_regs->status));
598 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
600 spin_lock_irq(&xhci->lock);
601 xhci_halt(xhci);
602 spin_unlock_irq(&xhci->lock);
604 xhci_cleanup_msix(xhci);
606 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
607 xhci_readl(xhci, &xhci->op_regs->status));
685 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
695 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
706 out_ctx = xhci->devs[slot_id]->out_ctx;
707 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
711 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
712 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
714 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
716 xhci_dbg(xhci, "Issuing evaluate context command.\n");
719 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
720 xhci->devs[slot_id]->out_ctx, ep_index);
721 in_ctx = xhci->devs[slot_id]->in_ctx;
722 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
727 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
731 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
732 xhci_dbg_ctx(xhci, in_ctx, ep_index);
733 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
734 xhci_dbg_ctx(xhci, out_ctx, ep_index);
736 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
753 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
766 if (!xhci->devs || !xhci->devs[slot_id]) {
774 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
793 xhci_urb_free_priv(xhci, urb_priv);
807 ret = xhci_check_maxpacket(xhci, slot_id,
816 spin_lock_irqsave(&xhci->lock, flags);
817 if (xhci->xhc_state & XHCI_STATE_DYING)
819 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
821 spin_unlock_irqrestore(&xhci->lock, flags);
823 spin_lock_irqsave(&xhci->lock, flags);
824 if (xhci->xhc_state & XHCI_STATE_DYING)
826 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
828 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
831 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
833 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
838 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
841 spin_unlock_irqrestore(&xhci->lock, flags);
843 spin_lock_irqsave(&xhci->lock, flags);
844 if (xhci->xhc_state & XHCI_STATE_DYING)
846 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
848 spin_unlock_irqrestore(&xhci->lock, flags);
850 spin_lock_irqsave(&xhci->lock, flags);
851 if (xhci->xhc_state & XHCI_STATE_DYING)
853 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
855 spin_unlock_irqrestore(&xhci->lock, flags);
860 xhci_urb_free_priv(xhci, urb_priv);
862 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
865 spin_unlock_irqrestore(&xhci->lock, flags);
873 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
884 ep = &xhci->devs[slot_id]->eps[ep_index];
890 xhci_warn(xhci,
900 xhci_warn(xhci,
946 struct xhci_hcd *xhci;
953 xhci = hcd_to_xhci(hcd);
954 spin_lock_irqsave(&xhci->lock, flags);
959 temp = xhci_readl(xhci, &xhci->op_regs->status);
961 xhci_dbg(xhci, "HW died, freeing TD.\n");
965 spin_unlock_irqrestore(&xhci->lock, flags);
966 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
967 xhci_urb_free_priv(xhci, urb_priv);
970 if (xhci->xhc_state & XHCI_STATE_DYING) {
971 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
982 xhci_dbg(xhci, "Cancel URB %p\n", urb);
983 xhci_dbg(xhci, "Event ring:\n");
984 xhci_debug_ring(xhci, xhci->event_ring);
986 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
987 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
993 xhci_dbg(xhci, "Endpoint ring:\n");
994 xhci_debug_ring(xhci, ep_ring);
1012 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
1013 xhci_ring_cmd_db(xhci);
1016 spin_unlock_irqrestore(&xhci->lock, flags);
1031 * the xhci->devs[slot_id] structure.
1036 struct xhci_hcd *xhci;
1050 xhci = hcd_to_xhci(hcd);
1051 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1055 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1060 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1061 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1066 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1067 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1068 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1070 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1076 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1088 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1096 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1098 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1118 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1123 struct xhci_hcd *xhci;
1140 xhci = hcd_to_xhci(hcd);
1145 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1150 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1151 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1156 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1157 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1158 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1160 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1165 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1175 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
1193 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1204 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1213 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1225 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1228 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1233 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1241 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1269 xhci_err(xhci, "ERROR: unexpected command completion "
1277 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1281 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1295 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1303 xhci_err(xhci, "ERROR: unexpected command completion "
1314 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1327 spin_lock_irqsave(&xhci->lock, flags);
1328 virt_dev = xhci->devs[udev->slot_id];
1333 command->command_trb = xhci->cmd_ring->enqueue;
1341 xhci->cmd_ring->enq_seg->next->trbs;
1352 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1355 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1360 spin_unlock_irqrestore(&xhci->lock, flags);
1361 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1364 xhci_ring_cmd_db(xhci);
1365 spin_unlock_irqrestore(&xhci->lock, flags);
1372 xhci_warn(xhci, "%s while waiting for %s command\n",
1381 return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1382 return xhci_evaluate_context_result(xhci, udev, cmd_status);
1392 * else should be touching the xhci->devs[slot_id] structure, so we
1393 * don't need to take the xhci->lock for manipulating that.
1399 struct xhci_hcd *xhci;
1407 xhci = hcd_to_xhci(hcd);
1409 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
1410 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1414 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1415 virt_dev = xhci->devs[udev->slot_id];
1418 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1423 xhci_dbg(xhci, "New Input Control Context:\n");
1424 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1425 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1428 ret = xhci_configure_endpoint(xhci, udev, NULL,
1435 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1436 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1439 xhci_zero_in_ctx(xhci, virt_dev);
1448 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1459 struct xhci_hcd *xhci;
1466 xhci = hcd_to_xhci(hcd);
1468 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1469 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1473 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1474 virt_dev = xhci->devs[udev->slot_id];
1478 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1482 xhci_zero_in_ctx(xhci, virt_dev);
1485 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1491 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1494 xhci_slot_copy(xhci, in_ctx, out_ctx);
1497 xhci_dbg(xhci, "Input Context:\n");
1498 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1501 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1510 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1511 xhci->devs[slot_id]->out_ctx, ep_index);
1512 in_ctx = xhci->devs[slot_id]->in_ctx;
1513 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1517 xhci_warn(xhci, "WARN Cannot submit config ep after "
1519 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1527 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1528 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1531 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1537 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1538 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1542 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1549 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1550 xhci_dbg(xhci, "Queueing new dequeue state\n");
1551 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1554 xhci_dbg(xhci, "Setting up input context for "
1556 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1570 struct xhci_hcd *xhci;
1577 xhci = hcd_to_xhci(hcd);
1585 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1587 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1592 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1596 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1597 spin_lock_irqsave(&xhci->lock, flags);
1598 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1605 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1607 xhci_ring_cmd_db(xhci);
1612 spin_unlock_irqrestore(&xhci->lock, flags);
1615 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1618 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1628 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__);
1632 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1639 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1642 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1645 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1649 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1650 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1658 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1671 max_streams = HCC_MAX_PSA(xhci->hcc_params);
1673 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1684 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1695 ret = xhci_check_streams_endpoint(xhci, udev,
1703 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1717 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1728 if (!xhci->devs[slot_id])
1733 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1736 xhci_warn(xhci, "WARN Can't disable streams for "
1745 xhci_warn(xhci, "WARN Can't disable streams for "
1749 xhci_warn(xhci, "WARN xhci_free_streams() called "
1779 struct xhci_hcd *xhci;
1794 xhci = hcd_to_xhci(hcd);
1795 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
1798 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1800 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1808 spin_lock_irqsave(&xhci->lock, flags);
1809 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
1812 xhci_free_command(xhci, config_cmd);
1813 spin_unlock_irqrestore(&xhci->lock, flags);
1817 xhci_warn(xhci, "WARN: endpoints can't handle "
1819 xhci_free_command(xhci, config_cmd);
1820 spin_unlock_irqrestore(&xhci->lock, flags);
1823 vdev = xhci->devs[udev->slot_id];
1831 spin_unlock_irqrestore(&xhci->lock, flags);
1837 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
1838 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
1843 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
1855 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
1857 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
1859 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
1865 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
1869 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
1879 spin_lock_irqsave(&xhci->lock, flags);
1883 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
1887 xhci_free_command(xhci, config_cmd);
1888 spin_unlock_irqrestore(&xhci->lock, flags);
1897 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1901 xhci_endpoint_zero(xhci, vdev, eps[i]);
1903 xhci_free_command(xhci, config_cmd);
1918 struct xhci_hcd *xhci;
1925 xhci = hcd_to_xhci(hcd);
1926 vdev = xhci->devs[udev->slot_id];
1929 spin_lock_irqsave(&xhci->lock, flags);
1930 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
1933 spin_unlock_irqrestore(&xhci->lock, flags);
1947 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1948 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
1951 xhci_endpoint_copy(xhci, command->in_ctx,
1953 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
1956 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
1958 spin_unlock_irqrestore(&xhci->lock, flags);
1963 ret = xhci_configure_endpoint(xhci, udev, command,
1972 spin_lock_irqsave(&xhci->lock, flags);
1975 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1980 spin_unlock_irqrestore(&xhci->lock, flags);
2002 struct xhci_hcd *xhci;
2012 xhci = hcd_to_xhci(hcd);
2014 virt_dev = xhci->devs[slot_id];
2016 xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
2021 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2028 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
2030 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
2035 spin_lock_irqsave(&xhci->lock, flags);
2036 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
2044 xhci->cmd_ring->enq_seg->next->trbs;
2047 ret = xhci_queue_reset_device(xhci, slot_id);
2049 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2051 spin_unlock_irqrestore(&xhci->lock, flags);
2054 xhci_ring_cmd_db(xhci);
2055 spin_unlock_irqrestore(&xhci->lock, flags);
2062 xhci_warn(xhci, "%s while waiting for reset device command\n",
2064 spin_lock_irqsave(&xhci->lock, flags);
2070 spin_unlock_irqrestore(&xhci->lock, flags);
2083 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2085 xhci_get_slot_state(xhci, virt_dev->out_ctx));
2086 xhci_info(xhci, "Not freeing device rings.\n");
2091 xhci_dbg(xhci, "Successful reset device command.\n");
2094 if (xhci_is_vendor_info_code(xhci, ret))
2096 xhci_warn(xhci, "Unknown completion code %u for "
2107 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2110 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2111 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2115 xhci_free_command(xhci, reset_device_cmd);
2126 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2134 virt_dev = xhci->devs[udev->slot_id];
2144 spin_lock_irqsave(&xhci->lock, flags);
2146 state = xhci_readl(xhci, &xhci->op_regs->status);
2147 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
2148 xhci_free_virt_device(xhci, udev->slot_id);
2149 spin_unlock_irqrestore(&xhci->lock, flags);
2153 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
2154 spin_unlock_irqrestore(&xhci->lock, flags);
2155 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2158 xhci_ring_cmd_db(xhci);
2159 spin_unlock_irqrestore(&xhci->lock, flags);
2168 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2173 spin_lock_irqsave(&xhci->lock, flags);
2174 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2176 spin_unlock_irqrestore(&xhci->lock, flags);
2177 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2180 xhci_ring_cmd_db(xhci);
2181 spin_unlock_irqrestore(&xhci->lock, flags);
2183 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2186 xhci_warn(xhci, "%s while waiting for a slot\n",
2191 if (!xhci->slot_id) {
2192 xhci_err(xhci, "Error while assigning device slot ID\n");
2200 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2202 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2203 spin_lock_irqsave(&xhci->lock, flags);
2204 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2205 xhci_ring_cmd_db(xhci);
2206 spin_unlock_irqrestore(&xhci->lock, flags);
2209 udev->slot_id = xhci->slot_id;
2230 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2236 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2240 virt_dev = xhci->devs[udev->slot_id];
2244 xhci_setup_addressable_virt_dev(xhci, udev);
2246 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2248 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2249 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2251 spin_lock_irqsave(&xhci->lock, flags);
2252 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2255 spin_unlock_irqrestore(&xhci->lock, flags);
2256 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2259 xhci_ring_cmd_db(xhci);
2260 spin_unlock_irqrestore(&xhci->lock, flags);
2262 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2265 xhci_warn(xhci, "%s while waiting for a slot\n",
2273 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2282 xhci_dbg(xhci, "Successful Address Device command\n");
2285 xhci_err(xhci, "ERROR: unexpected command completion "
2287 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2288 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2295 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2296 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2297 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2299 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2301 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
2302 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2304 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2305 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2306 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2307 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2312 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2315 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2319 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
2331 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2344 vdev = xhci->devs[hdev->slot_id];
2346 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2349 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2351 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2355 spin_lock_irqsave(&xhci->lock, flags);
2356 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2357 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2359 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2363 if (xhci->hci_version > 0x95) {
2364 xhci_dbg(xhci, "xHCI version %x needs hub "
2366 (unsigned int) xhci->hci_version);
2377 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2379 (unsigned int) xhci->hci_version);
2382 spin_unlock_irqrestore(&xhci->lock, flags);
2384 xhci_dbg(xhci, "Set up %s for hub device.\n",
2385 (xhci->hci_version > 0x95) ?
2387 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
2388 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
2393 if (xhci->hci_version > 0x95)
2394 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2397 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2400 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
2401 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
2403 xhci_free_command(xhci, config_cmd);
2409 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2411 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;