Lines Matching defs:xhci

22 #include "xhci.h"
23 #include "xhci-trace.h"
24 #include "xhci-debugfs.h"
25 #include "xhci-dbgcap.h"
89 int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
98 xhci->xhc_state & exit_state,
101 if (result == U32_MAX || xhci->xhc_state & exit_state)
110 void xhci_quiesce(struct xhci_hcd *xhci)
117 halted = readl(&xhci->op_regs->status) & STS_HALT;
121 cmd = readl(&xhci->op_regs->command);
123 writel(cmd, &xhci->op_regs->command);
134 int xhci_halt(struct xhci_hcd *xhci)
138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
139 xhci_quiesce(xhci);
141 ret = xhci_handshake(&xhci->op_regs->status,
144 xhci_warn(xhci, "Host halt failed, %d\n", ret);
148 xhci->xhc_state |= XHCI_STATE_HALTED;
149 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
157 int xhci_start(struct xhci_hcd *xhci)
162 temp = readl(&xhci->op_regs->command);
164 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
166 writel(temp, &xhci->op_regs->command);
172 ret = xhci_handshake(&xhci->op_regs->status,
175 xhci_err(xhci, "Host took too long to start, "
180 xhci->xhc_state = 0;
181 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
194 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
200 state = readl(&xhci->op_regs->status);
203 xhci_warn(xhci, "Host not accessible, reset failed.\n");
208 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
212 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
213 command = readl(&xhci->op_regs->command);
215 writel(command, &xhci->op_regs->command);
224 if (xhci->quirks & XHCI_INTEL_HOST)
227 ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
232 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
233 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
235 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
241 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
243 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
244 xhci->usb2_rhub.bus_state.suspended_ports = 0;
245 xhci->usb2_rhub.bus_state.resuming_ports = 0;
246 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
247 xhci->usb3_rhub.bus_state.suspended_ports = 0;
248 xhci->usb3_rhub.bus_state.resuming_ports = 0;
253 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
255 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
276 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
280 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
283 val = readl(&xhci->op_regs->command);
285 writel(val, &xhci->op_regs->command);
288 val = readl(&xhci->op_regs->status);
290 writel(val, &xhci->op_regs->status);
293 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
295 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
296 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
298 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
300 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
301 ARRAY_SIZE(xhci->run_regs->ir_set));
306 ir = &xhci->run_regs->ir_set[i];
307 val = xhci_read_64(xhci, &ir->erst_base);
309 xhci_write_64(xhci, 0, &ir->erst_base);
310 val= xhci_read_64(xhci, &ir->erst_dequeue);
312 xhci_write_64(xhci, 0, &ir->erst_dequeue);
316 err = xhci_handshake(&xhci->op_regs->status,
320 xhci_info(xhci, "Fault detected\n");
368 struct xhci_hcd *xhci;
374 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
375 rhub = &xhci->usb3_rhub;
388 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
391 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
401 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
402 mod_timer(&xhci->comp_mode_recovery_timer,
413 * status event is generated when entering compliance mode (per xhci spec),
416 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
418 xhci->port_status_u0 = 0;
419 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
421 xhci->comp_mode_recovery_timer.expires = jiffies +
424 add_timer(&xhci->comp_mode_recovery_timer);
425 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
456 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
458 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
471 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
474 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
475 spin_lock_init(&xhci->lock);
476 if (xhci->hci_version == 0x95 && link_quirk) {
477 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
479 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
481 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
484 retval = xhci_mem_init(xhci, GFP_KERNEL);
485 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
489 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
490 compliance_mode_recovery_timer_init(xhci);
498 static int xhci_run_finished(struct xhci_hcd *xhci)
500 struct xhci_interrupter *ir = xhci->interrupters[0];
505 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
508 spin_lock_irqsave(&xhci->lock, flags);
510 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
511 temp = readl(&xhci->op_regs->command);
513 writel(temp, &xhci->op_regs->command);
515 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
518 if (xhci_start(xhci)) {
519 xhci_halt(xhci);
520 spin_unlock_irqrestore(&xhci->lock, flags);
524 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
526 if (xhci->quirks & XHCI_NEC_HOST)
527 xhci_ring_cmd_db(xhci);
529 spin_unlock_irqrestore(&xhci->lock, flags);
550 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
551 struct xhci_interrupter *ir = xhci->interrupters[0];
561 return xhci_run_finished(xhci);
563 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
565 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
567 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
570 xhci_set_interrupter_moderation(ir, xhci->imod_interval);
572 if (xhci->quirks & XHCI_NEC_HOST) {
575 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
579 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
582 xhci_free_command(xhci, command);
584 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
587 xhci_create_dbc_dev(xhci);
589 xhci_debugfs_init(xhci);
591 if (xhci_has_one_roothub(xhci))
592 return xhci_run_finished(xhci);
612 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
613 struct xhci_interrupter *ir = xhci->interrupters[0];
615 mutex_lock(&xhci->mutex);
619 mutex_unlock(&xhci->mutex);
623 xhci_remove_dbc_dev(xhci);
625 spin_lock_irq(&xhci->lock);
626 xhci->xhc_state |= XHCI_STATE_HALTED;
627 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
628 xhci_halt(xhci);
629 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
630 spin_unlock_irq(&xhci->lock);
633 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
634 (!(xhci_all_ports_seen_u0(xhci)))) {
635 del_timer_sync(&xhci->comp_mode_recovery_timer);
636 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
641 if (xhci->quirks & XHCI_AMD_PLL_FIX)
644 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
646 temp = readl(&xhci->op_regs->status);
647 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
650 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
651 xhci_mem_cleanup(xhci);
652 xhci_debugfs_exit(xhci);
653 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
655 readl(&xhci->op_regs->status));
656 mutex_unlock(&xhci->mutex);
671 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
673 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
677 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
682 if (xhci->shared_hcd) {
683 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
684 del_timer_sync(&xhci->shared_hcd->rh_timer);
687 spin_lock_irq(&xhci->lock);
688 xhci_halt(xhci);
694 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
695 xhci->quirks & XHCI_RESET_TO_DEFAULT)
696 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
698 spin_unlock_irq(&xhci->lock);
700 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
702 readl(&xhci->op_regs->status));
707 static void xhci_save_registers(struct xhci_hcd *xhci)
712 xhci->s3.command = readl(&xhci->op_regs->command);
713 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
714 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
715 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
719 for (i = 0; i < xhci->max_interrupters; i++) {
720 ir = xhci->interrupters[i];
725 ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
726 ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
732 static void xhci_restore_registers(struct xhci_hcd *xhci)
737 writel(xhci->s3.command, &xhci->op_regs->command);
738 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
739 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
740 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
743 for (i = 0; i < xhci->max_interrupters; i++) {
744 ir = xhci->interrupters[i];
749 xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
750 xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
756 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
761 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
763 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
764 xhci->cmd_ring->dequeue) &
766 xhci->cmd_ring->cycle_state;
767 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
770 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
782 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
787 ring = xhci->cmd_ring;
805 xhci_set_cmd_ring_deq(xhci);
817 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
825 spin_lock_irqsave(&xhci->lock, flags);
842 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
846 spin_unlock_irqrestore(&xhci->lock, flags);
849 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
856 status = readl(&xhci->op_regs->status);
862 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
865 port_index = xhci->usb2_rhub.num_ports;
866 ports = xhci->usb2_rhub.ports;
873 port_index = xhci->usb3_rhub.num_ports;
874 ports = xhci->usb3_rhub.ports;
890 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
894 struct usb_hcd *hcd = xhci_to_hcd(xhci);
902 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
906 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
907 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
912 xhci_dbc_suspend(xhci);
915 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
919 if (xhci->shared_hcd) {
920 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
921 del_timer_sync(&xhci->shared_hcd->rh_timer);
924 if (xhci->quirks & XHCI_SUSPEND_DELAY)
927 spin_lock_irq(&xhci->lock);
929 if (xhci->shared_hcd)
930 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
935 command = readl(&xhci->op_regs->command);
937 writel(command, &xhci->op_regs->command);
940 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
942 if (xhci_handshake(&xhci->op_regs->status,
944 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
945 spin_unlock_irq(&xhci->lock);
948 xhci_clear_command_ring(xhci);
951 xhci_save_registers(xhci);
954 command = readl(&xhci->op_regs->command);
956 writel(command, &xhci->op_regs->command);
957 xhci->broken_suspend = 0;
958 if (xhci_handshake(&xhci->op_regs->status,
966 * if SRE and HCE bits are not set (as per xhci
969 res = readl(&xhci->op_regs->status);
970 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
973 xhci->broken_suspend = 1;
975 xhci_warn(xhci, "WARN: xHC save state timeout\n");
976 spin_unlock_irq(&xhci->lock);
980 spin_unlock_irq(&xhci->lock);
986 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
987 (!(xhci_all_ports_seen_u0(xhci)))) {
988 del_timer_sync(&xhci->comp_mode_recovery_timer);
989 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1004 int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
1008 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1022 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1023 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1027 if (xhci->shared_hcd)
1028 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1030 spin_lock_irq(&xhci->lock);
1032 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1040 retval = xhci_handshake(&xhci->op_regs->status,
1043 xhci_warn(xhci, "Controller not ready at resume %d\n",
1045 spin_unlock_irq(&xhci->lock);
1049 xhci_restore_registers(xhci);
1051 xhci_set_cmd_ring_deq(xhci);
1054 command = readl(&xhci->op_regs->command);
1056 writel(command, &xhci->op_regs->command);
1062 if (xhci_handshake(&xhci->op_regs->status,
1064 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1065 spin_unlock_irq(&xhci->lock);
1070 temp = readl(&xhci->op_regs->status);
1074 !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
1076 if (!xhci->broken_suspend)
1077 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1081 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1082 !(xhci_all_ports_seen_u0(xhci))) {
1083 del_timer_sync(&xhci->comp_mode_recovery_timer);
1084 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1089 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1090 if (xhci->shared_hcd)
1091 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1093 xhci_dbg(xhci, "Stop HCD\n");
1094 xhci_halt(xhci);
1095 xhci_zero_64b_regs(xhci);
1096 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1097 spin_unlock_irq(&xhci->lock);
1101 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1102 temp = readl(&xhci->op_regs->status);
1103 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1104 xhci_disable_interrupter(xhci->interrupters[0]);
1106 xhci_dbg(xhci, "cleaning up memory\n");
1107 xhci_mem_cleanup(xhci);
1108 xhci_debugfs_exit(xhci);
1109 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1110 readl(&xhci->op_regs->status));
1116 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1122 xhci_dbg(xhci, "Start the primary HCD\n");
1124 if (!retval && xhci->shared_hcd) {
1125 xhci_dbg(xhci, "Start the secondary HCD\n");
1126 retval = xhci_run(xhci->shared_hcd);
1130 if (xhci->shared_hcd)
1131 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1136 command = readl(&xhci->op_regs->command);
1138 writel(command, &xhci->op_regs->command);
1139 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1151 spin_unlock_irq(&xhci->lock);
1153 xhci_dbc_resume(xhci);
1163 if (xhci->usb3_rhub.bus_state.suspended_ports ||
1164 xhci->usb3_rhub.bus_state.bus_suspended)
1167 pending_portevent = xhci_pending_portevent(xhci);
1172 pending_portevent = xhci_pending_portevent(xhci);
1176 if (xhci->shared_hcd)
1177 usb_hcd_resume_root_hub(xhci->shared_hcd);
1187 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1188 compliance_mode_recovery_timer_init(xhci);
1190 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1194 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1196 if (xhci->shared_hcd) {
1197 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1198 usb_hcd_poll_rh_status(xhci->shared_hcd);
1328 struct xhci_hcd *xhci;
1330 xhci = hcd_to_xhci(hcd);
1335 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1344 struct xhci_hcd *xhci;
1347 xhci = hcd_to_xhci(hcd);
1352 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1416 struct xhci_hcd *xhci;
1428 xhci = hcd_to_xhci(hcd);
1430 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1431 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1436 virt_dev = xhci->devs[udev->slot_id];
1438 xhci_dbg(xhci, "xHCI %s called with udev and "
1444 if (xhci->xhc_state & XHCI_STATE_HALTED)
1450 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1460 static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
1469 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
1478 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1480 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1483 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1486 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1489 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1496 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1502 xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
1504 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
1512 ret = xhci_configure_endpoint(xhci, vdev->udev, command,
1535 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1565 spin_lock_irqsave(&xhci->lock, flags);
1581 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1582 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1587 if (xhci->xhc_state & XHCI_STATE_DYING) {
1588 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1594 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1597 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1603 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1611 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1615 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1619 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1623 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1632 spin_unlock_irqrestore(&xhci->lock, flags);
1672 struct xhci_hcd *xhci;
1681 xhci = hcd_to_xhci(hcd);
1682 spin_lock_irqsave(&xhci->lock, flags);
1692 vdev = xhci->devs[urb->dev->slot_id];
1699 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1704 temp = readl(&xhci->op_regs->status);
1705 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1706 xhci_hc_died(xhci);
1716 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1725 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1726 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1742 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1765 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1771 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1773 xhci_ring_cmd_db(xhci);
1776 spin_unlock_irqrestore(&xhci->lock, flags);
1783 spin_unlock_irqrestore(&xhci->lock, flags);
1799 * the xhci->devs[slot_id] structure.
1804 struct xhci_hcd *xhci;
1816 xhci = hcd_to_xhci(hcd);
1817 if (xhci->xhc_state & XHCI_STATE_DYING)
1820 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1823 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1828 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1829 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1832 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1838 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1846 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1847 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1858 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1860 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1862 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1882 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1887 struct xhci_hcd *xhci;
1903 xhci = hcd_to_xhci(hcd);
1904 if (xhci->xhc_state & XHCI_STATE_DYING)
1913 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1918 virt_dev = xhci->devs[udev->slot_id];
1922 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1933 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1943 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1953 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1973 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1976 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1985 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1994 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2006 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2011 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2019 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2027 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2056 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2061 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2069 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2077 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2106 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2111 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2119 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2140 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2164 * Must be called with xhci->lock held.
2166 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2171 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2172 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2173 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2176 xhci->num_active_eps, added_eps,
2177 xhci->limit_active_eps);
2180 xhci->num_active_eps += added_eps;
2181 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2183 xhci->num_active_eps);
2191 * Must be called with xhci->lock held.
2193 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2198 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2199 xhci->num_active_eps -= num_failed_eps;
2200 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2203 xhci->num_active_eps);
2210 * Must be called with xhci->lock held.
2212 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2217 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2218 xhci->num_active_eps -= num_dropped_eps;
2220 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2223 xhci->num_active_eps);
2258 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2266 bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
2288 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2345 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2361 return xhci_check_ss_bw(xhci, virt_dev);
2382 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2385 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2386 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2390 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2395 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2478 xhci_warn(xhci, "Not enough bandwidth. "
2499 xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
2502 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2511 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2546 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2561 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2564 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2607 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2623 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2626 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2678 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2686 rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
2698 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2712 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2728 xhci_drop_ep_from_interval_table(xhci,
2736 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2740 xhci_add_ep_to_interval_table(xhci,
2748 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2752 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2765 xhci_drop_ep_from_interval_table(xhci,
2777 xhci_add_ep_to_interval_table(xhci,
2791 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2805 spin_lock_irqsave(&xhci->lock, flags);
2807 if (xhci->xhc_state & XHCI_STATE_DYING) {
2808 spin_unlock_irqrestore(&xhci->lock, flags);
2812 virt_dev = xhci->devs[udev->slot_id];
2816 spin_unlock_irqrestore(&xhci->lock, flags);
2817 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2822 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2823 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2824 spin_unlock_irqrestore(&xhci->lock, flags);
2825 xhci_warn(xhci, "Not enough host resources, "
2827 xhci->num_active_eps);
2830 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2831 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2832 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2833 xhci_free_host_resources(xhci, ctrl_ctx);
2834 spin_unlock_irqrestore(&xhci->lock, flags);
2835 xhci_warn(xhci, "Not enough bandwidth\n");
2839 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2845 ret = xhci_queue_configure_endpoint(xhci, command,
2849 ret = xhci_queue_evaluate_context(xhci, command,
2853 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2854 xhci_free_host_resources(xhci, ctrl_ctx);
2855 spin_unlock_irqrestore(&xhci->lock, flags);
2856 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2860 xhci_ring_cmd_db(xhci);
2861 spin_unlock_irqrestore(&xhci->lock, flags);
2867 ret = xhci_configure_endpoint_result(xhci, udev,
2870 ret = xhci_evaluate_context_result(xhci, udev,
2873 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2874 spin_lock_irqsave(&xhci->lock, flags);
2879 xhci_free_host_resources(xhci, ctrl_ctx);
2881 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2882 spin_unlock_irqrestore(&xhci->lock, flags);
2887 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2893 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2895 xhci_free_stream_info(xhci, ep->stream_info);
2908 * else should be touching the xhci->devs[slot_id] structure, so we
2909 * don't need to take the xhci->lock for manipulating that.
2915 struct xhci_hcd *xhci;
2924 xhci = hcd_to_xhci(hcd);
2925 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2926 (xhci->xhc_state & XHCI_STATE_REMOVING))
2929 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2930 virt_dev = xhci->devs[udev->slot_id];
2932 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2941 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2957 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2969 ret = xhci_configure_endpoint(xhci, udev, command,
2979 xhci_free_endpoint_ring(xhci, virt_dev, i);
2980 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2983 xhci_zero_in_ctx(xhci, virt_dev);
2995 xhci_free_endpoint_ring(xhci, virt_dev, i);
2997 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3000 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3012 struct xhci_hcd *xhci;
3019 xhci = hcd_to_xhci(hcd);
3021 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3022 virt_dev = xhci->devs[udev->slot_id];
3026 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3027 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3031 xhci_zero_in_ctx(xhci, virt_dev);
3035 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3043 xhci_slot_copy(xhci, in_ctx, out_ctx);
3050 struct xhci_hcd *xhci;
3057 xhci = hcd_to_xhci(hcd);
3059 spin_lock_irqsave(&xhci->lock, flags);
3065 vdev = xhci->devs[udev->slot_id];
3074 spin_unlock_irqrestore(&xhci->lock, flags);
3080 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3084 spin_unlock_irqrestore(&xhci->lock, flags);
3096 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3105 struct xhci_hcd *xhci;
3116 xhci = hcd_to_xhci(hcd);
3122 * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
3130 vdev = xhci->devs[udev->slot_id];
3134 xhci_check_ep0_maxpacket(xhci, vdev);
3143 vdev = xhci->devs[udev->slot_id];
3151 spin_lock_irqsave(&xhci->lock, flags);
3154 spin_unlock_irqrestore(&xhci->lock, flags);
3157 spin_unlock_irqrestore(&xhci->lock, flags);
3168 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3172 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3176 spin_lock_irqsave(&xhci->lock, flags);
3189 spin_unlock_irqrestore(&xhci->lock, flags);
3190 xhci_free_command(xhci, cfg_cmd);
3194 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3197 spin_unlock_irqrestore(&xhci->lock, flags);
3198 xhci_free_command(xhci, cfg_cmd);
3199 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3204 xhci_ring_cmd_db(xhci);
3205 spin_unlock_irqrestore(&xhci->lock, flags);
3209 spin_lock_irqsave(&xhci->lock, flags);
3214 spin_unlock_irqrestore(&xhci->lock, flags);
3215 xhci_free_command(xhci, cfg_cmd);
3216 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3221 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3223 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3225 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3228 spin_unlock_irqrestore(&xhci->lock, flags);
3229 xhci_free_command(xhci, cfg_cmd);
3230 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3235 xhci_ring_cmd_db(xhci);
3236 spin_unlock_irqrestore(&xhci->lock, flags);
3240 xhci_free_command(xhci, cfg_cmd);
3242 xhci_free_command(xhci, stop_cmd);
3243 spin_lock_irqsave(&xhci->lock, flags);
3246 spin_unlock_irqrestore(&xhci->lock, flags);
3249 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3259 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3263 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3270 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3273 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3276 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3280 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3281 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3289 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3302 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3304 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3315 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3326 ret = xhci_check_streams_endpoint(xhci, udev,
3333 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3347 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3358 if (!xhci->devs[slot_id])
3363 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3366 xhci_warn(xhci, "WARN Can't disable streams for "
3375 xhci_warn(xhci, "WARN Can't disable streams for "
3379 xhci_warn(xhci, "WARN xhci_free_streams() called "
3409 struct xhci_hcd *xhci;
3426 xhci = hcd_to_xhci(hcd);
3427 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3431 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3432 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3433 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3437 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3443 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3445 xhci_free_command(xhci, config_cmd);
3453 spin_lock_irqsave(&xhci->lock, flags);
3454 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3457 xhci_free_command(xhci, config_cmd);
3458 spin_unlock_irqrestore(&xhci->lock, flags);
3462 xhci_warn(xhci, "WARN: endpoints can't handle "
3464 xhci_free_command(xhci, config_cmd);
3465 spin_unlock_irqrestore(&xhci->lock, flags);
3468 vdev = xhci->devs[udev->slot_id];
3476 spin_unlock_irqrestore(&xhci->lock, flags);
3482 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3483 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3489 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3505 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3507 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3509 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3515 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3520 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3530 spin_lock_irqsave(&xhci->lock, flags);
3534 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3538 xhci_free_command(xhci, config_cmd);
3539 spin_unlock_irqrestore(&xhci->lock, flags);
3543 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3552 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3559 xhci_endpoint_zero(xhci, vdev, eps[i]);
3561 xhci_free_command(xhci, config_cmd);
3576 struct xhci_hcd *xhci;
3584 xhci = hcd_to_xhci(hcd);
3585 vdev = xhci->devs[udev->slot_id];
3588 spin_lock_irqsave(&xhci->lock, flags);
3589 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3592 spin_unlock_irqrestore(&xhci->lock, flags);
3604 spin_unlock_irqrestore(&xhci->lock, flags);
3605 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3614 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3615 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3618 xhci_endpoint_copy(xhci, command->in_ctx,
3623 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3626 spin_unlock_irqrestore(&xhci->lock, flags);
3631 ret = xhci_configure_endpoint(xhci, udev, command,
3640 spin_lock_irqsave(&xhci->lock, flags);
3643 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3651 spin_unlock_irqrestore(&xhci->lock, flags);
3661 * Must be called with xhci->lock held.
3663 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3676 xhci->num_active_eps -= num_dropped_eps;
3678 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3682 xhci->num_active_eps);
3708 struct xhci_hcd *xhci;
3718 xhci = hcd_to_xhci(hcd);
3720 virt_dev = xhci->devs[slot_id];
3722 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3739 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3750 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3757 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3764 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3766 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3771 spin_lock_irqsave(&xhci->lock, flags);
3773 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3775 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3776 spin_unlock_irqrestore(&xhci->lock, flags);
3779 xhci_ring_cmd_db(xhci);
3780 spin_unlock_irqrestore(&xhci->lock, flags);
3793 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3798 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3800 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3801 xhci_dbg(xhci, "Not freeing device rings.\n");
3806 xhci_dbg(xhci, "Successful reset device command.\n");
3809 if (xhci_is_vendor_info_code(xhci, ret))
3811 xhci_warn(xhci, "Unknown completion code %u for "
3818 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3819 spin_lock_irqsave(&xhci->lock, flags);
3821 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3822 spin_unlock_irqrestore(&xhci->lock, flags);
3830 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3832 xhci_free_stream_info(xhci, ep->stream_info);
3838 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3839 xhci_free_endpoint_ring(xhci, virt_dev, i);
3842 xhci_drop_ep_from_interval_table(xhci,
3851 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3856 xhci_free_command(xhci, reset_device_cmd);
3867 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3878 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3888 virt_dev = xhci->devs[udev->slot_id];
3889 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3896 xhci_disable_slot(xhci, udev->slot_id);
3898 spin_lock_irqsave(&xhci->lock, flags);
3899 xhci_free_virt_device(xhci, udev->slot_id);
3900 spin_unlock_irqrestore(&xhci->lock, flags);
3904 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3911 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3915 xhci_debugfs_remove_slot(xhci, slot_id);
3917 spin_lock_irqsave(&xhci->lock, flags);
3919 state = readl(&xhci->op_regs->status);
3920 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3921 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3922 spin_unlock_irqrestore(&xhci->lock, flags);
3927 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3930 spin_unlock_irqrestore(&xhci->lock, flags);
3934 xhci_ring_cmd_db(xhci);
3935 spin_unlock_irqrestore(&xhci->lock, flags);
3940 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
3943 xhci_free_command(xhci, command);
3952 * Must be called with xhci->lock held.
3954 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3956 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3957 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3960 xhci->num_active_eps, xhci->limit_active_eps);
3963 xhci->num_active_eps += 1;
3964 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3966 xhci->num_active_eps);
3977 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3984 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3988 spin_lock_irqsave(&xhci->lock, flags);
3989 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3991 spin_unlock_irqrestore(&xhci->lock, flags);
3992 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3993 xhci_free_command(xhci, command);
3996 xhci_ring_cmd_db(xhci);
3997 spin_unlock_irqrestore(&xhci->lock, flags);
4003 xhci_err(xhci, "Error while assigning device slot ID: %s\n",
4005 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4007 readl(&xhci->cap_regs->hcs_params1)));
4008 xhci_free_command(xhci, command);
4012 xhci_free_command(xhci, command);
4014 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4015 spin_lock_irqsave(&xhci->lock, flags);
4016 ret = xhci_reserve_host_control_ep_resources(xhci);
4018 spin_unlock_irqrestore(&xhci->lock, flags);
4019 xhci_warn(xhci, "Not enough host resources, "
4021 xhci->num_active_eps);
4024 spin_unlock_irqrestore(&xhci->lock, flags);
4030 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4031 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4034 vdev = xhci->devs[slot_id];
4035 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4040 xhci_debugfs_create_slot(xhci, slot_id);
4046 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4054 xhci_disable_slot(xhci, udev->slot_id);
4055 xhci_free_virt_device(xhci, udev->slot_id);
4077 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4083 mutex_lock(&xhci->mutex);
4085 if (xhci->xhc_state) { /* dying, removing or halted */
4091 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4097 virt_dev = xhci->devs[udev->slot_id];
4105 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4110 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4116 xhci_dbg(xhci, "Slot already in default state\n");
4121 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4130 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4133 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4144 xhci_setup_addressable_virt_dev(xhci, udev);
4147 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4151 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4155 spin_lock_irqsave(&xhci->lock, flags);
4157 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4160 spin_unlock_irqrestore(&xhci->lock, flags);
4161 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4165 xhci_ring_cmd_db(xhci);
4166 spin_unlock_irqrestore(&xhci->lock, flags);
4178 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4183 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4190 mutex_unlock(&xhci->mutex);
4191 ret = xhci_disable_slot(xhci, udev->slot_id);
4192 xhci_free_virt_device(xhci, udev->slot_id);
4204 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4208 xhci_err(xhci,
4211 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4217 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4218 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4220 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4223 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4225 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4226 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4229 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4235 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4240 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4243 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4247 mutex_unlock(&xhci->mutex);
4285 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4295 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4299 spin_lock_irqsave(&xhci->lock, flags);
4301 virt_dev = xhci->devs[udev->slot_id];
4306 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4310 spin_unlock_irqrestore(&xhci->lock, flags);
4311 xhci_free_command(xhci, command);
4318 spin_unlock_irqrestore(&xhci->lock, flags);
4319 xhci_free_command(xhci, command);
4320 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4325 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4326 spin_unlock_irqrestore(&xhci->lock, flags);
4329 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4334 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4338 ret = xhci_configure_endpoint(xhci, udev, command,
4342 spin_lock_irqsave(&xhci->lock, flags);
4344 spin_unlock_irqrestore(&xhci->lock, flags);
4347 xhci_free_command(xhci, command);
4359 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4366 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4418 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4427 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4430 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4441 spin_lock_irqsave(&xhci->lock, flags);
4443 ports = xhci->usb2_rhub.ports;
4449 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4457 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4467 spin_unlock_irqrestore(&xhci->lock, flags);
4469 ret = xhci_change_max_exit_latency(xhci, udev,
4473 spin_lock_irqsave(&xhci->lock, flags);
4480 hird = xhci_calculate_hird_besl(xhci, udev);
4497 spin_unlock_irqrestore(&xhci->lock, flags);
4498 xhci_change_max_exit_latency(xhci, udev, 0);
4506 spin_unlock_irqrestore(&xhci->lock, flags);
4514 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4520 for (i = 0; i < xhci->num_ext_caps; i++) {
4521 if (xhci->ext_caps[i] & capability) {
4523 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4524 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4535 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4546 if (xhci->hw_lpm_support == 1 &&
4548 xhci, portnum, XHCI_HLC)) {
4552 if (xhci_check_usb2_port_capability(xhci, portnum,
4657 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4671 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4721 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4735 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4752 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4759 return xhci_calculate_u1_timeout(xhci, udev, desc);
4761 return xhci_calculate_u2_timeout(xhci, udev, desc);
4766 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4774 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4791 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4800 if (xhci_update_timeout_for_endpoint(xhci, udev,
4807 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4819 if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
4821 if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
4839 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4858 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4892 if (xhci_update_timeout_for_interface(xhci, udev,
4947 struct xhci_hcd *xhci;
4953 xhci = hcd_to_xhci(hcd);
4958 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4959 !xhci->devs[udev->slot_id])
4962 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4967 port = xhci->usb3_rhub.ports[udev->portnum - 1];
4980 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4989 struct xhci_hcd *xhci;
4992 xhci = hcd_to_xhci(hcd);
4993 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4994 !xhci->devs[udev->slot_id])
4998 return xhci_change_max_exit_latency(xhci, udev, mel);
5034 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5047 vdev = xhci->devs[hdev->slot_id];
5049 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5053 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5059 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5061 xhci_free_command(xhci, config_cmd);
5065 spin_lock_irqsave(&xhci->lock, flags);
5067 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5068 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5069 xhci_free_command(xhci, config_cmd);
5070 spin_unlock_irqrestore(&xhci->lock, flags);
5074 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5076 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5088 if (xhci->hci_version > 0x95) {
5089 xhci_dbg(xhci, "xHCI version %x needs hub "
5091 (unsigned int) xhci->hci_version);
5103 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5107 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5109 (unsigned int) xhci->hci_version);
5112 spin_unlock_irqrestore(&xhci->lock, flags);
5114 xhci_dbg(xhci, "Set up %s for hub device.\n",
5115 (xhci->hci_version > 0x95) ?
5121 if (xhci->hci_version > 0x95)
5122 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5125 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5128 xhci_free_command(xhci, config_cmd);
5135 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5137 return readl(&xhci->run_regs->microframe_index) >> 3;
5140 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5142 xhci->usb2_rhub.hcd = hcd;
5153 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5166 if (xhci->usb3_rhub.min_rev == 0x1)
5169 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5185 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5188 xhci->usb3_rhub.hcd = hcd;
5193 struct xhci_hcd *xhci;
5210 xhci = hcd_to_xhci(hcd);
5213 xhci_hcd_init_usb3_data(xhci, hcd);
5217 mutex_init(&xhci->mutex);
5218 xhci->main_hcd = hcd;
5219 xhci->cap_regs = hcd->regs;
5220 xhci->op_regs = hcd->regs +
5221 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5222 xhci->run_regs = hcd->regs +
5223 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5225 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5226 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5227 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5228 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5229 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5230 if (xhci->hci_version > 0x100)
5231 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5233 /* xhci-plat or xhci-pci might have set max_interrupters already */
5234 if ((!xhci->max_interrupters) ||
5235 xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
5236 xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
5238 xhci->quirks |= quirks;
5241 get_quirks(dev, xhci);
5243 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5247 if (xhci->hci_version > 0x96)
5248 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5251 retval = xhci_halt(xhci);
5255 xhci_zero_64b_regs(xhci);
5257 xhci_dbg(xhci, "Resetting HCD\n");
5259 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5262 xhci_dbg(xhci, "Reset complete\n");
5268 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5271 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5272 xhci->hcc_params &= ~BIT(0);
5276 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5278 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5288 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5292 xhci_dbg(xhci, "Calling HCD init\n");
5297 xhci_dbg(xhci, "Called HCD init\n");
5300 xhci_hcd_init_usb3_data(xhci, hcd);
5302 xhci_hcd_init_usb2_data(xhci, hcd);
5304 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5305 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5314 struct xhci_hcd *xhci;
5320 xhci = hcd_to_xhci(hcd);
5322 spin_lock_irqsave(&xhci->lock, flags);
5327 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5328 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5329 spin_unlock_irqrestore(&xhci->lock, flags);
5333 .description = "xhci-hcd",