• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/usb/host/
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include "xhci.h"
24
25#define XHCI_INIT_VALUE 0x0
26
27/* Add verbose debugging later, just print everything for now */
28
29void xhci_dbg_regs(struct xhci_hcd *xhci)
30{
31	u32 temp;
32
33	xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
34			xhci->cap_regs);
35	temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
36	xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37			&xhci->cap_regs->hc_capbase, temp);
38	xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
39			(unsigned int) HC_LENGTH(temp));
40
41	xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
42
43	temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
44	xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
45			&xhci->cap_regs->run_regs_off,
46			(unsigned int) temp & RTSOFF_MASK);
47	xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
48
49	temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
50	xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
51	xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
52}
53
54static void xhci_print_cap_regs(struct xhci_hcd *xhci)
55{
56	u32 temp;
57
58	xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
59
60	temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
61	xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
62			(unsigned int) temp);
63	xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
64			(unsigned int) HC_LENGTH(temp));
65	xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
66			(unsigned int) HC_VERSION(temp));
67
68	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
69	xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
70			(unsigned int) temp);
71	xhci_dbg(xhci, "  Max device slots: %u\n",
72			(unsigned int) HCS_MAX_SLOTS(temp));
73	xhci_dbg(xhci, "  Max interrupters: %u\n",
74			(unsigned int) HCS_MAX_INTRS(temp));
75	xhci_dbg(xhci, "  Max ports: %u\n",
76			(unsigned int) HCS_MAX_PORTS(temp));
77
78	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
79	xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
80			(unsigned int) temp);
81	xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
82			(unsigned int) HCS_IST(temp));
83	xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
84			(unsigned int) HCS_ERST_MAX(temp));
85
86	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
87	xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
88			(unsigned int) temp);
89	xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
90			(unsigned int) HCS_U1_LATENCY(temp));
91	xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
92			(unsigned int) HCS_U2_LATENCY(temp));
93
94	temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
95	xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
96	xhci_dbg(xhci, "  HC generates %s bit addresses\n",
97			HCC_64BIT_ADDR(temp) ? "64" : "32");
98	xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
99
100	temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
101	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
102}
103
104static void xhci_print_command_reg(struct xhci_hcd *xhci)
105{
106	u32 temp;
107
108	temp = xhci_readl(xhci, &xhci->op_regs->command);
109	xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
110	xhci_dbg(xhci, "  HC is %s\n",
111			(temp & CMD_RUN) ? "running" : "being stopped");
112	xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
113			(temp & CMD_RESET) ? "not " : "");
114	xhci_dbg(xhci, "  Event Interrupts %s\n",
115			(temp & CMD_EIE) ? "enabled " : "disabled");
116	xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
117			(temp & CMD_EIE) ? "enabled " : "disabled");
118	xhci_dbg(xhci, "  HC has %sfinished light reset\n",
119			(temp & CMD_LRESET) ? "not " : "");
120}
121
122static void xhci_print_status(struct xhci_hcd *xhci)
123{
124	u32 temp;
125
126	temp = xhci_readl(xhci, &xhci->op_regs->status);
127	xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
128	xhci_dbg(xhci, "  Event ring is %sempty\n",
129			(temp & STS_EINT) ? "not " : "");
130	xhci_dbg(xhci, "  %sHost System Error\n",
131			(temp & STS_FATAL) ? "WARNING: " : "No ");
132	xhci_dbg(xhci, "  HC is %s\n",
133			(temp & STS_HALT) ? "halted" : "running");
134}
135
136static void xhci_print_op_regs(struct xhci_hcd *xhci)
137{
138	xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
139	xhci_print_command_reg(xhci);
140	xhci_print_status(xhci);
141}
142
143static void xhci_print_ports(struct xhci_hcd *xhci)
144{
145	u32 __iomem *addr;
146	int i, j;
147	int ports;
148	char *names[NUM_PORT_REGS] = {
149		"status",
150		"power",
151		"link",
152		"reserved",
153	};
154
155	ports = HCS_MAX_PORTS(xhci->hcs_params1);
156	addr = &xhci->op_regs->port_status_base;
157	for (i = 0; i < ports; i++) {
158		for (j = 0; j < NUM_PORT_REGS; ++j) {
159			xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
160					addr, names[j],
161					(unsigned int) xhci_readl(xhci, addr));
162			addr++;
163		}
164	}
165}
166
167void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
168{
169	void *addr;
170	u32 temp;
171	u64 temp_64;
172
173	addr = &ir_set->irq_pending;
174	temp = xhci_readl(xhci, addr);
175	if (temp == XHCI_INIT_VALUE)
176		return;
177
178	xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
179
180	xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
181			(unsigned int)temp);
182
183	addr = &ir_set->irq_control;
184	temp = xhci_readl(xhci, addr);
185	xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
186			(unsigned int)temp);
187
188	addr = &ir_set->erst_size;
189	temp = xhci_readl(xhci, addr);
190	xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
191			(unsigned int)temp);
192
193	addr = &ir_set->rsvd;
194	temp = xhci_readl(xhci, addr);
195	if (temp != XHCI_INIT_VALUE)
196		xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
197				addr, (unsigned int)temp);
198
199	addr = &ir_set->erst_base;
200	temp_64 = xhci_read_64(xhci, addr);
201	xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
202			addr, temp_64);
203
204	addr = &ir_set->erst_dequeue;
205	temp_64 = xhci_read_64(xhci, addr);
206	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
207			addr, temp_64);
208}
209
210void xhci_print_run_regs(struct xhci_hcd *xhci)
211{
212	u32 temp;
213	int i;
214
215	xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
216	temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
217	xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
218			&xhci->run_regs->microframe_index,
219			(unsigned int) temp);
220	for (i = 0; i < 7; ++i) {
221		temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
222		if (temp != XHCI_INIT_VALUE)
223			xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
224					&xhci->run_regs->rsvd[i],
225					i, (unsigned int) temp);
226	}
227}
228
229void xhci_print_registers(struct xhci_hcd *xhci)
230{
231	xhci_print_cap_regs(xhci);
232	xhci_print_op_regs(xhci);
233	xhci_print_ports(xhci);
234}
235
236void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
237{
238	int i;
239	for (i = 0; i < 4; ++i)
240		xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
241				i*4, trb->generic.field[i]);
242}
243
244/**
245 * Debug a transfer request block (TRB).
246 */
247void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
248{
249	u64	address;
250	u32	type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
251
252	switch (type) {
253	case TRB_TYPE(TRB_LINK):
254		xhci_dbg(xhci, "Link TRB:\n");
255		xhci_print_trb_offsets(xhci, trb);
256
257		address = trb->link.segment_ptr;
258		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
259
260		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
261				GET_INTR_TARGET(trb->link.intr_target));
262		xhci_dbg(xhci, "Cycle bit = %u\n",
263				(unsigned int) (trb->link.control & TRB_CYCLE));
264		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
265				(unsigned int) (trb->link.control & LINK_TOGGLE));
266		xhci_dbg(xhci, "No Snoop bit = %u\n",
267				(unsigned int) (trb->link.control & TRB_NO_SNOOP));
268		break;
269	case TRB_TYPE(TRB_TRANSFER):
270		address = trb->trans_event.buffer;
271		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
272		break;
273	case TRB_TYPE(TRB_COMPLETION):
274		address = trb->event_cmd.cmd_trb;
275		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
276		xhci_dbg(xhci, "Completion status = %u\n",
277				(unsigned int) GET_COMP_CODE(trb->event_cmd.status));
278		xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
279		break;
280	default:
281		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
282				(unsigned int) type>>10);
283		xhci_print_trb_offsets(xhci, trb);
284		break;
285	}
286}
287
288void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
289{
290	int i;
291	u32 addr = (u32) seg->dma;
292	union xhci_trb *trb = seg->trbs;
293
294	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
295		trb = &seg->trbs[i];
296		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
297				lower_32_bits(trb->link.segment_ptr),
298				upper_32_bits(trb->link.segment_ptr),
299				(unsigned int) trb->link.intr_target,
300				(unsigned int) trb->link.control);
301		addr += sizeof(*trb);
302	}
303}
304
305void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
306{
307	xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
308			ring->dequeue,
309			(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
310							    ring->dequeue));
311	xhci_dbg(xhci, "Ring deq updated %u times\n",
312			ring->deq_updates);
313	xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
314			ring->enqueue,
315			(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
316							    ring->enqueue));
317	xhci_dbg(xhci, "Ring enq updated %u times\n",
318			ring->enq_updates);
319}
320
321/**
322 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
323 *
324 * Print out each segment in the ring.  Check that the DMA address in
325 * each link segment actually matches the segment's stored DMA address.
326 * Check that the link end bit is only set at the end of the ring.
327 * Check that the dequeue and enqueue pointers point to real data in this ring
328 * (not some other ring).
329 */
330void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
331{
332	struct xhci_segment *seg;
333	struct xhci_segment *first_seg = ring->first_seg;
334	xhci_debug_segment(xhci, first_seg);
335
336	if (!ring->enq_updates && !ring->deq_updates) {
337		xhci_dbg(xhci, "  Ring has not been updated\n");
338		return;
339	}
340	for (seg = first_seg->next; seg != first_seg; seg = seg->next)
341		xhci_debug_segment(xhci, seg);
342}
343
344void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
345		unsigned int slot_id, unsigned int ep_index,
346		struct xhci_virt_ep *ep)
347{
348	int i;
349	struct xhci_ring *ring;
350
351	if (ep->ep_state & EP_HAS_STREAMS) {
352		for (i = 1; i < ep->stream_info->num_streams; i++) {
353			ring = ep->stream_info->stream_rings[i];
354			xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
355				slot_id, ep_index, i);
356			xhci_debug_segment(xhci, ring->deq_seg);
357		}
358	} else {
359		ring = ep->ring;
360		if (!ring)
361			return;
362		xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
363				slot_id, ep_index);
364		xhci_debug_segment(xhci, ring->deq_seg);
365	}
366}
367
368void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
369{
370	u32 addr = (u32) erst->erst_dma_addr;
371	int i;
372	struct xhci_erst_entry *entry;
373
374	for (i = 0; i < erst->num_entries; ++i) {
375		entry = &erst->entries[i];
376		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
377				(unsigned int) addr,
378				lower_32_bits(entry->seg_addr),
379				upper_32_bits(entry->seg_addr),
380				(unsigned int) entry->seg_size,
381				(unsigned int) entry->rsvd);
382		addr += sizeof(*entry);
383	}
384}
385
386void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
387{
388	u64 val;
389
390	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
391	xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
392			lower_32_bits(val));
393	xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
394			upper_32_bits(val));
395}
396
397/* Print the last 32 bytes for 64-byte contexts */
398static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
399{
400	int i;
401	for (i = 0; i < 4; ++i) {
402		xhci_dbg(xhci, "@%p (virt) @%08llx "
403			 "(dma) %#08llx - rsvd64[%d]\n",
404			 &ctx[4 + i], (unsigned long long)dma,
405			 ctx[4 + i], i);
406		dma += 8;
407	}
408}
409
410char *xhci_get_slot_state(struct xhci_hcd *xhci,
411		struct xhci_container_ctx *ctx)
412{
413	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
414
415	switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
416	case 0:
417		return "enabled/disabled";
418	case 1:
419		return "default";
420	case 2:
421		return "addressed";
422	case 3:
423		return "configured";
424	default:
425		return "reserved";
426	}
427}
428
429void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
430{
431	/* Fields are 32 bits wide, DMA addresses are in bytes */
432	int field_size = 32 / 8;
433	int i;
434
435	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
436	dma_addr_t dma = ctx->dma +
437		((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
438	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
439
440	xhci_dbg(xhci, "Slot Context:\n");
441	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
442			&slot_ctx->dev_info,
443			(unsigned long long)dma, slot_ctx->dev_info);
444	dma += field_size;
445	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
446			&slot_ctx->dev_info2,
447			(unsigned long long)dma, slot_ctx->dev_info2);
448	dma += field_size;
449	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
450			&slot_ctx->tt_info,
451			(unsigned long long)dma, slot_ctx->tt_info);
452	dma += field_size;
453	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
454			&slot_ctx->dev_state,
455			(unsigned long long)dma, slot_ctx->dev_state);
456	dma += field_size;
457	for (i = 0; i < 4; ++i) {
458		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
459				&slot_ctx->reserved[i], (unsigned long long)dma,
460				slot_ctx->reserved[i], i);
461		dma += field_size;
462	}
463
464	if (csz)
465		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
466}
467
468void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
469		     struct xhci_container_ctx *ctx,
470		     unsigned int last_ep)
471{
472	int i, j;
473	int last_ep_ctx = 31;
474	/* Fields are 32 bits wide, DMA addresses are in bytes */
475	int field_size = 32 / 8;
476	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
477
478	if (last_ep < 31)
479		last_ep_ctx = last_ep + 1;
480	for (i = 0; i < last_ep_ctx; ++i) {
481		struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
482		dma_addr_t dma = ctx->dma +
483			((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
484
485		xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
486		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
487				&ep_ctx->ep_info,
488				(unsigned long long)dma, ep_ctx->ep_info);
489		dma += field_size;
490		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
491				&ep_ctx->ep_info2,
492				(unsigned long long)dma, ep_ctx->ep_info2);
493		dma += field_size;
494		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
495				&ep_ctx->deq,
496				(unsigned long long)dma, ep_ctx->deq);
497		dma += 2*field_size;
498		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
499				&ep_ctx->tx_info,
500				(unsigned long long)dma, ep_ctx->tx_info);
501		dma += field_size;
502		for (j = 0; j < 3; ++j) {
503			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
504					&ep_ctx->reserved[j],
505					(unsigned long long)dma,
506					ep_ctx->reserved[j], j);
507			dma += field_size;
508		}
509
510		if (csz)
511			dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
512	}
513}
514
515void xhci_dbg_ctx(struct xhci_hcd *xhci,
516		  struct xhci_container_ctx *ctx,
517		  unsigned int last_ep)
518{
519	int i;
520	/* Fields are 32 bits wide, DMA addresses are in bytes */
521	int field_size = 32 / 8;
522	struct xhci_slot_ctx *slot_ctx;
523	dma_addr_t dma = ctx->dma;
524	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
525
526	if (ctx->type == XHCI_CTX_TYPE_INPUT) {
527		struct xhci_input_control_ctx *ctrl_ctx =
528			xhci_get_input_control_ctx(xhci, ctx);
529		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
530			 &ctrl_ctx->drop_flags, (unsigned long long)dma,
531			 ctrl_ctx->drop_flags);
532		dma += field_size;
533		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
534			 &ctrl_ctx->add_flags, (unsigned long long)dma,
535			 ctrl_ctx->add_flags);
536		dma += field_size;
537		for (i = 0; i < 6; ++i) {
538			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
539				 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
540				 ctrl_ctx->rsvd2[i], i);
541			dma += field_size;
542		}
543
544		if (csz)
545			dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
546	}
547
548	slot_ctx = xhci_get_slot_ctx(xhci, ctx);
549	xhci_dbg_slot_ctx(xhci, ctx);
550	xhci_dbg_ep_ctx(xhci, ctx, last_ep);
551}
552