1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * USB HOST XHCI Controller stack
4 *
5 * Based on xHCI host controller driver in linux-kernel
6 * by Sarah Sharp.
7 *
8 * Copyright (C) 2008 Intel Corp.
9 * Author: Sarah Sharp
10 *
11 * Copyright (C) 2013 Samsung Electronics Co.Ltd
12 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
13 *	    Vikas Sajjan <vikas.sajjan@samsung.com>
14 */
15
16#include <common.h>
17#include <cpu_func.h>
18#include <dm.h>
19#include <log.h>
20#include <asm/byteorder.h>
21#include <usb.h>
22#include <malloc.h>
23#include <asm/cache.h>
24#include <linux/bug.h>
25#include <linux/errno.h>
26
27#include <usb/xhci.h>
28
29#define CACHELINE_SIZE		CONFIG_SYS_CACHELINE_SIZE
30/**
31 * flushes the address passed till the length
32 *
33 * @param addr	pointer to memory region to be flushed
34 * @param len	the length of the cache line to be flushed
35 * Return: none
36 */
37void xhci_flush_cache(uintptr_t addr, u32 len)
38{
39	BUG_ON((void *)addr == NULL || len == 0);
40
41	flush_dcache_range(addr & ~(CACHELINE_SIZE - 1),
42				ALIGN(addr + len, CACHELINE_SIZE));
43}
44
45/**
46 * invalidates the address passed till the length
47 *
48 * @param addr	pointer to memory region to be invalidates
49 * @param len	the length of the cache line to be invalidated
50 * Return: none
51 */
52void xhci_inval_cache(uintptr_t addr, u32 len)
53{
54	BUG_ON((void *)addr == NULL || len == 0);
55
56	invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
57				ALIGN(addr + len, CACHELINE_SIZE));
58}
59
60
61/**
62 * frees the "segment" pointer passed
63 *
64 * @param ptr	pointer to "segement" to be freed
65 * Return: none
66 */
67static void xhci_segment_free(struct xhci_ctrl *ctrl, struct xhci_segment *seg)
68{
69	xhci_dma_unmap(ctrl, seg->dma, SEGMENT_SIZE);
70	free(seg->trbs);
71	seg->trbs = NULL;
72
73	free(seg);
74}
75
76/**
77 * frees the "ring" pointer passed
78 *
79 * @param ptr	pointer to "ring" to be freed
80 * Return: none
81 */
82static void xhci_ring_free(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
83{
84	struct xhci_segment *seg;
85	struct xhci_segment *first_seg;
86
87	BUG_ON(!ring);
88
89	first_seg = ring->first_seg;
90	seg = first_seg->next;
91	while (seg != first_seg) {
92		struct xhci_segment *next = seg->next;
93		xhci_segment_free(ctrl, seg);
94		seg = next;
95	}
96	xhci_segment_free(ctrl, first_seg);
97
98	free(ring);
99}
100
101/**
102 * Free the scratchpad buffer array and scratchpad buffers
103 *
104 * @ctrl	host controller data structure
105 * Return:	none
106 */
107static void xhci_scratchpad_free(struct xhci_ctrl *ctrl)
108{
109	struct xhci_hccr *hccr = ctrl->hccr;
110	int num_sp;
111
112	if (!ctrl->scratchpad)
113		return;
114
115	num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2));
116	xhci_dma_unmap(ctrl, ctrl->scratchpad->sp_array[0],
117		       num_sp * ctrl->page_size);
118	xhci_dma_unmap(ctrl, ctrl->dcbaa->dev_context_ptrs[0],
119		       num_sp * sizeof(u64));
120	ctrl->dcbaa->dev_context_ptrs[0] = 0;
121
122	free(ctrl->scratchpad->scratchpad);
123	free(ctrl->scratchpad->sp_array);
124	free(ctrl->scratchpad);
125	ctrl->scratchpad = NULL;
126}
127
128/**
129 * frees the "xhci_container_ctx" pointer passed
130 *
131 * @param ptr	pointer to "xhci_container_ctx" to be freed
132 * Return: none
133 */
134static void xhci_free_container_ctx(struct xhci_ctrl *ctrl,
135				    struct xhci_container_ctx *ctx)
136{
137	xhci_dma_unmap(ctrl, ctx->dma, ctx->size);
138	free(ctx->bytes);
139	free(ctx);
140}
141
142/**
143 * frees the virtual devices for "xhci_ctrl" pointer passed
144 *
145 * @param ptr	pointer to "xhci_ctrl" whose virtual devices are to be freed
146 * Return: none
147 */
148static void xhci_free_virt_devices(struct xhci_ctrl *ctrl)
149{
150	int i;
151	int slot_id;
152	struct xhci_virt_device *virt_dev;
153
154	/*
155	 * refactored here to loop through all virt_dev
156	 * Slot ID 0 is reserved
157	 */
158	for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) {
159		virt_dev = ctrl->devs[slot_id];
160		if (!virt_dev)
161			continue;
162
163		ctrl->dcbaa->dev_context_ptrs[slot_id] = 0;
164
165		for (i = 0; i < 31; ++i)
166			if (virt_dev->eps[i].ring)
167				xhci_ring_free(ctrl, virt_dev->eps[i].ring);
168
169		if (virt_dev->in_ctx)
170			xhci_free_container_ctx(ctrl, virt_dev->in_ctx);
171		if (virt_dev->out_ctx)
172			xhci_free_container_ctx(ctrl, virt_dev->out_ctx);
173
174		free(virt_dev);
175		/* make sure we are pointing to NULL */
176		ctrl->devs[slot_id] = NULL;
177	}
178}
179
180/**
181 * frees all the memory allocated
182 *
183 * @param ptr	pointer to "xhci_ctrl" to be cleaned up
184 * Return: none
185 */
186void xhci_cleanup(struct xhci_ctrl *ctrl)
187{
188	xhci_ring_free(ctrl, ctrl->event_ring);
189	xhci_ring_free(ctrl, ctrl->cmd_ring);
190	xhci_scratchpad_free(ctrl);
191	xhci_free_virt_devices(ctrl);
192	xhci_dma_unmap(ctrl, ctrl->erst.erst_dma_addr,
193		       sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
194	free(ctrl->erst.entries);
195	xhci_dma_unmap(ctrl, ctrl->dcbaa->dma,
196		       sizeof(struct xhci_device_context_array));
197	free(ctrl->dcbaa);
198	memset(ctrl, '\0', sizeof(struct xhci_ctrl));
199}
200
201/**
202 * Malloc the aligned memory
203 *
204 * @param size	size of memory to be allocated
205 * Return: allocates the memory and returns the aligned pointer
206 */
207static void *xhci_malloc(unsigned int size)
208{
209	void *ptr;
210	size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE);
211
212	ptr = memalign(cacheline_size, ALIGN(size, cacheline_size));
213	BUG_ON(!ptr);
214	memset(ptr, '\0', size);
215
216	xhci_flush_cache((uintptr_t)ptr, size);
217
218	return ptr;
219}
220
221/**
222 * Make the prev segment point to the next segment.
223 * Change the last TRB in the prev segment to be a Link TRB which points to the
224 * address of the next segment.  The caller needs to set any Link TRB
225 * related flags, such as End TRB, Toggle Cycle, and no snoop.
226 *
227 * @param prev	pointer to the previous segment
228 * @param next	pointer to the next segment
229 * @param link_trbs	flag to indicate whether to link the trbs or NOT
230 * Return: none
231 */
232static void xhci_link_segments(struct xhci_ctrl *ctrl, struct xhci_segment *prev,
233			       struct xhci_segment *next, bool link_trbs)
234{
235	u32 val;
236
237	if (!prev || !next)
238		return;
239	prev->next = next;
240	if (link_trbs) {
241		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
242			cpu_to_le64(next->dma);
243
244		/*
245		 * Set the last TRB in the segment to
246		 * have a TRB type ID of Link TRB
247		 */
248		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
249		val &= ~TRB_TYPE_BITMASK;
250		val |= TRB_TYPE(TRB_LINK);
251		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
252	}
253}
254
255/**
256 * Initialises the Ring's enqueue,dequeue,enq_seg pointers
257 *
258 * @param ring	pointer to the RING to be intialised
259 * Return: none
260 */
261static void xhci_initialize_ring_info(struct xhci_ring *ring)
262{
263	/*
264	 * The ring is empty, so the enqueue pointer == dequeue pointer
265	 */
266	ring->enqueue = ring->first_seg->trbs;
267	ring->enq_seg = ring->first_seg;
268	ring->dequeue = ring->enqueue;
269	ring->deq_seg = ring->first_seg;
270
271	/*
272	 * The ring is initialized to 0. The producer must write 1 to the
273	 * cycle bit to handover ownership of the TRB, so PCS = 1.
274	 * The consumer must compare CCS to the cycle bit to
275	 * check ownership, so CCS = 1.
276	 */
277	ring->cycle_state = 1;
278}
279
280/**
281 * Allocates a generic ring segment from the ring pool, sets the dma address,
282 * initializes the segment to zero, and sets the private next pointer to NULL.
283 * Section 4.11.1.1:
284 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
285 *
286 * @param	none
287 * Return: pointer to the newly allocated SEGMENT
288 */
289static struct xhci_segment *xhci_segment_alloc(struct xhci_ctrl *ctrl)
290{
291	struct xhci_segment *seg;
292
293	seg = malloc(sizeof(struct xhci_segment));
294	BUG_ON(!seg);
295
296	seg->trbs = xhci_malloc(SEGMENT_SIZE);
297	seg->dma = xhci_dma_map(ctrl, seg->trbs, SEGMENT_SIZE);
298
299	seg->next = NULL;
300
301	return seg;
302}
303
304/**
305 * Create a new ring with zero or more segments.
306 * TODO: current code only uses one-time-allocated single-segment rings
307 * of 1KB anyway, so we might as well get rid of all the segment and
308 * linking code (and maybe increase the size a bit, e.g. 4KB).
309 *
310 *
311 * Link each segment together into a ring.
312 * Set the end flag and the cycle toggle bit on the last segment.
313 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
314 *
315 * @param num_segs	number of segments in the ring
316 * @param link_trbs	flag to indicate whether to link the trbs or NOT
317 * Return: pointer to the newly created RING
318 */
319struct xhci_ring *xhci_ring_alloc(struct xhci_ctrl *ctrl, unsigned int num_segs,
320				  bool link_trbs)
321{
322	struct xhci_ring *ring;
323	struct xhci_segment *prev;
324
325	ring = malloc(sizeof(struct xhci_ring));
326	BUG_ON(!ring);
327
328	if (num_segs == 0)
329		return ring;
330
331	ring->first_seg = xhci_segment_alloc(ctrl);
332	BUG_ON(!ring->first_seg);
333
334	num_segs--;
335
336	prev = ring->first_seg;
337	while (num_segs > 0) {
338		struct xhci_segment *next;
339
340		next = xhci_segment_alloc(ctrl);
341		BUG_ON(!next);
342
343		xhci_link_segments(ctrl, prev, next, link_trbs);
344
345		prev = next;
346		num_segs--;
347	}
348	xhci_link_segments(ctrl, prev, ring->first_seg, link_trbs);
349	if (link_trbs) {
350		/* See section 4.9.2.1 and 6.4.4.1 */
351		prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
352					cpu_to_le32(LINK_TOGGLE);
353	}
354	xhci_initialize_ring_info(ring);
355
356	return ring;
357}
358
359/**
360 * Set up the scratchpad buffer array and scratchpad buffers
361 *
362 * @ctrl	host controller data structure
363 * Return:	-ENOMEM if buffer allocation fails, 0 on success
364 */
365static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl)
366{
367	struct xhci_hccr *hccr = ctrl->hccr;
368	struct xhci_hcor *hcor = ctrl->hcor;
369	struct xhci_scratchpad *scratchpad;
370	uint64_t val_64;
371	int num_sp;
372	uint32_t page_size;
373	void *buf;
374	int i;
375
376	num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2));
377	if (!num_sp)
378		return 0;
379
380	scratchpad = malloc(sizeof(*scratchpad));
381	if (!scratchpad)
382		goto fail_sp;
383	ctrl->scratchpad = scratchpad;
384
385	scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64));
386	if (!scratchpad->sp_array)
387		goto fail_sp2;
388
389	val_64 = xhci_dma_map(ctrl, scratchpad->sp_array,
390			      num_sp * sizeof(u64));
391	ctrl->dcbaa->dev_context_ptrs[0] = cpu_to_le64(val_64);
392
393	xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0],
394		sizeof(ctrl->dcbaa->dev_context_ptrs[0]));
395
396	page_size = xhci_readl(&hcor->or_pagesize) & 0xffff;
397	for (i = 0; i < 16; i++) {
398		if ((0x1 & page_size) != 0)
399			break;
400		page_size = page_size >> 1;
401	}
402	BUG_ON(i == 16);
403
404	ctrl->page_size = 1 << (i + 12);
405	buf = memalign(ctrl->page_size, num_sp * ctrl->page_size);
406	if (!buf)
407		goto fail_sp3;
408	memset(buf, '\0', num_sp * ctrl->page_size);
409	xhci_flush_cache((uintptr_t)buf, num_sp * ctrl->page_size);
410
411	scratchpad->scratchpad = buf;
412	val_64 = xhci_dma_map(ctrl, buf, num_sp * ctrl->page_size);
413	for (i = 0; i < num_sp; i++) {
414		scratchpad->sp_array[i] = cpu_to_le64(val_64);
415		val_64 += ctrl->page_size;
416	}
417
418	xhci_flush_cache((uintptr_t)scratchpad->sp_array,
419			 sizeof(u64) * num_sp);
420
421	return 0;
422
423fail_sp3:
424	free(scratchpad->sp_array);
425
426fail_sp2:
427	free(scratchpad);
428	ctrl->scratchpad = NULL;
429
430fail_sp:
431	return -ENOMEM;
432}
433
434/**
435 * Allocates the Container context
436 *
437 * @param ctrl	Host controller data structure
438 * @param type type of XHCI Container Context
439 * Return: NULL if failed else pointer to the context on success
440 */
441static struct xhci_container_ctx
442		*xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type)
443{
444	struct xhci_container_ctx *ctx;
445
446	ctx = malloc(sizeof(struct xhci_container_ctx));
447	BUG_ON(!ctx);
448
449	BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
450	ctx->type = type;
451	ctx->size = (MAX_EP_CTX_NUM + 1) *
452			CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams));
453	if (type == XHCI_CTX_TYPE_INPUT)
454		ctx->size += CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams));
455
456	ctx->bytes = xhci_malloc(ctx->size);
457	ctx->dma = xhci_dma_map(ctrl, ctx->bytes, ctx->size);
458
459	return ctx;
460}
461
462/**
463 * Allocating virtual device
464 *
465 * @param udev	pointer to USB deivce structure
466 * Return: 0 on success else -1 on failure
467 */
468int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id)
469{
470	u64 byte_64 = 0;
471	struct xhci_virt_device *virt_dev;
472
473	/* Slot ID 0 is reserved */
474	if (ctrl->devs[slot_id]) {
475		printf("Virt dev for slot[%d] already allocated\n", slot_id);
476		return -EEXIST;
477	}
478
479	ctrl->devs[slot_id] = malloc(sizeof(struct xhci_virt_device));
480
481	if (!ctrl->devs[slot_id]) {
482		puts("Failed to allocate virtual device\n");
483		return -ENOMEM;
484	}
485
486	memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device));
487	virt_dev = ctrl->devs[slot_id];
488
489	/* Allocate the (output) device context that will be used in the HC. */
490	virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl,
491					XHCI_CTX_TYPE_DEVICE);
492	if (!virt_dev->out_ctx) {
493		puts("Failed to allocate out context for virt dev\n");
494		return -ENOMEM;
495	}
496
497	/* Allocate the (input) device context for address device command */
498	virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl,
499					XHCI_CTX_TYPE_INPUT);
500	if (!virt_dev->in_ctx) {
501		puts("Failed to allocate in context for virt dev\n");
502		return -ENOMEM;
503	}
504
505	/* Allocate endpoint 0 ring */
506	virt_dev->eps[0].ring = xhci_ring_alloc(ctrl, 1, true);
507
508	byte_64 = virt_dev->out_ctx->dma;
509
510	/* Point to output device context in dcbaa. */
511	ctrl->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(byte_64);
512
513	xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id],
514			 sizeof(__le64));
515	return 0;
516}
517
518/**
519 * Allocates the necessary data structures
520 * for XHCI host controller
521 *
522 * @param ctrl	Host controller data structure
523 * @param hccr	pointer to HOST Controller Control Registers
524 * @param hcor	pointer to HOST Controller Operational Registers
525 * Return: 0 if successful else -1 on failure
526 */
527int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
528					struct xhci_hcor *hcor)
529{
530	uint64_t val_64;
531	uint64_t trb_64;
532	uint32_t val;
533	uint64_t deq;
534	int i;
535	struct xhci_segment *seg;
536
537	/* DCBAA initialization */
538	ctrl->dcbaa = xhci_malloc(sizeof(struct xhci_device_context_array));
539	if (ctrl->dcbaa == NULL) {
540		puts("unable to allocate DCBA\n");
541		return -ENOMEM;
542	}
543
544	ctrl->dcbaa->dma = xhci_dma_map(ctrl, ctrl->dcbaa,
545				sizeof(struct xhci_device_context_array));
546	/* Set the pointer in DCBAA register */
547	xhci_writeq(&hcor->or_dcbaap, ctrl->dcbaa->dma);
548
549	/* Command ring control pointer register initialization */
550	ctrl->cmd_ring = xhci_ring_alloc(ctrl, 1, true);
551
552	/* Set the address in the Command Ring Control register */
553	trb_64 = ctrl->cmd_ring->first_seg->dma;
554	val_64 = xhci_readq(&hcor->or_crcr);
555	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
556		(trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
557		ctrl->cmd_ring->cycle_state;
558	xhci_writeq(&hcor->or_crcr, val_64);
559
560	/* write the address of db register */
561	val = xhci_readl(&hccr->cr_dboff);
562	val &= DBOFF_MASK;
563	ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val);
564
565	/* write the address of runtime register */
566	val = xhci_readl(&hccr->cr_rtsoff);
567	val &= RTSOFF_MASK;
568	ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val);
569
570	/* writting the address of ir_set structure */
571	ctrl->ir_set = &ctrl->run_regs->ir_set[0];
572
573	/* Event ring does not maintain link TRB */
574	ctrl->event_ring = xhci_ring_alloc(ctrl, ERST_NUM_SEGS, false);
575	ctrl->erst.entries = xhci_malloc(sizeof(struct xhci_erst_entry) *
576					 ERST_NUM_SEGS);
577	ctrl->erst.erst_dma_addr = xhci_dma_map(ctrl, ctrl->erst.entries,
578			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
579
580	ctrl->erst.num_entries = ERST_NUM_SEGS;
581
582	for (val = 0, seg = ctrl->event_ring->first_seg;
583			val < ERST_NUM_SEGS;
584			val++) {
585		struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
586		trb_64 = seg->dma;
587		entry->seg_addr = cpu_to_le64(trb_64);
588		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
589		entry->rsvd = 0;
590		seg = seg->next;
591	}
592	xhci_flush_cache((uintptr_t)ctrl->erst.entries,
593			 ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
594
595	deq = xhci_trb_virt_to_dma(ctrl->event_ring->deq_seg,
596				   ctrl->event_ring->dequeue);
597
598	/* Update HC event ring dequeue pointer */
599	xhci_writeq(&ctrl->ir_set->erst_dequeue,
600				(u64)deq & (u64)~ERST_PTR_MASK);
601
602	/* set ERST count with the number of entries in the segment table */
603	val = xhci_readl(&ctrl->ir_set->erst_size);
604	val &= ERST_SIZE_MASK;
605	val |= ERST_NUM_SEGS;
606	xhci_writel(&ctrl->ir_set->erst_size, val);
607
608	/* this is the event ring segment table pointer */
609	val_64 = xhci_readq(&ctrl->ir_set->erst_base);
610	val_64 &= ERST_PTR_MASK;
611	val_64 |= ctrl->erst.erst_dma_addr & ~ERST_PTR_MASK;
612
613	xhci_writeq(&ctrl->ir_set->erst_base, val_64);
614
615	/* set up the scratchpad buffer array and scratchpad buffers */
616	xhci_scratchpad_alloc(ctrl);
617
618	/* initializing the virtual devices to NULL */
619	for (i = 0; i < MAX_HC_SLOTS; ++i)
620		ctrl->devs[i] = NULL;
621
622	/*
623	 * Just Zero'ing this register completely,
624	 * or some spurious Device Notification Events
625	 * might screw things here.
626	 */
627	xhci_writel(&hcor->or_dnctrl, 0x0);
628
629	return 0;
630}
631
632/**
633 * Give the input control context for the passed container context
634 *
635 * @param ctx	pointer to the context
636 * Return: pointer to the Input control context data
637 */
638struct xhci_input_control_ctx
639		*xhci_get_input_control_ctx(struct xhci_container_ctx *ctx)
640{
641	BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
642	return (struct xhci_input_control_ctx *)ctx->bytes;
643}
644
645/**
646 * Give the slot context for the passed container context
647 *
648 * @param ctrl	Host controller data structure
649 * @param ctx	pointer to the context
650 * Return: pointer to the slot control context data
651 */
652struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
653				struct xhci_container_ctx *ctx)
654{
655	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
656		return (struct xhci_slot_ctx *)ctx->bytes;
657
658	return (struct xhci_slot_ctx *)
659		(ctx->bytes + CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams)));
660}
661
662/**
663 * Gets the EP context from based on the ep_index
664 *
665 * @param ctrl	Host controller data structure
666 * @param ctx	context container
667 * @param ep_index	index of the endpoint
668 * Return: pointer to the End point context
669 */
670struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
671				    struct xhci_container_ctx *ctx,
672				    unsigned int ep_index)
673{
674	/* increment ep index by offset of start of ep ctx array */
675	ep_index++;
676	if (ctx->type == XHCI_CTX_TYPE_INPUT)
677		ep_index++;
678
679	return (struct xhci_ep_ctx *)
680		(ctx->bytes +
681		(ep_index * CTX_SIZE(xhci_readl(&ctrl->hccr->cr_hccparams))));
682}
683
684/**
685 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
686 * Useful when you want to change one particular aspect of the endpoint
687 * and then issue a configure endpoint command.
688 *
689 * @param ctrl	Host controller data structure
690 * @param in_ctx contains the input context
691 * @param out_ctx contains the input context
692 * @param ep_index index of the end point
693 * Return: none
694 */
695void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
696			struct xhci_container_ctx *in_ctx,
697			struct xhci_container_ctx *out_ctx,
698			unsigned int ep_index)
699{
700	struct xhci_ep_ctx *out_ep_ctx;
701	struct xhci_ep_ctx *in_ep_ctx;
702
703	out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index);
704	in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index);
705
706	in_ep_ctx->ep_info = out_ep_ctx->ep_info;
707	in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
708	in_ep_ctx->deq = out_ep_ctx->deq;
709	in_ep_ctx->tx_info = out_ep_ctx->tx_info;
710}
711
712/**
713 * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
714 * Useful when you want to change one particular aspect of the endpoint
715 * and then issue a configure endpoint command.
716 * Only the context entries field matters, but
717 * we'll copy the whole thing anyway.
718 *
719 * @param ctrl	Host controller data structure
720 * @param in_ctx contains the inpout context
721 * @param out_ctx contains the inpout context
722 * Return: none
723 */
724void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx,
725					struct xhci_container_ctx *out_ctx)
726{
727	struct xhci_slot_ctx *in_slot_ctx;
728	struct xhci_slot_ctx *out_slot_ctx;
729
730	in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx);
731	out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx);
732
733	in_slot_ctx->dev_info = out_slot_ctx->dev_info;
734	in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
735	in_slot_ctx->tt_info = out_slot_ctx->tt_info;
736	in_slot_ctx->dev_state = out_slot_ctx->dev_state;
737}
738
739/**
740 * Setup an xHCI virtual device for a Set Address command
741 *
742 * @param udev pointer to the Device Data Structure
743 * Return: returns negative value on failure else 0 on success
744 */
745void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl,
746				     struct usb_device *udev, int hop_portnr)
747{
748	struct xhci_virt_device *virt_dev;
749	struct xhci_ep_ctx *ep0_ctx;
750	struct xhci_slot_ctx *slot_ctx;
751	u32 port_num = 0;
752	u64 trb_64 = 0;
753	int slot_id = udev->slot_id;
754	int speed = udev->speed;
755	int route = 0;
756#if CONFIG_IS_ENABLED(DM_USB)
757	struct usb_device *dev = udev;
758	struct usb_hub_device *hub;
759#endif
760
761	virt_dev = ctrl->devs[slot_id];
762
763	BUG_ON(!virt_dev);
764
765	/* Extract the EP0 and Slot Ctrl */
766	ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0);
767	slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx);
768
769	/* Only the control endpoint is valid - one endpoint context */
770	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
771
772#if CONFIG_IS_ENABLED(DM_USB)
773	/* Calculate the route string for this device */
774	port_num = dev->portnr;
775	while (!usb_hub_is_root_hub(dev->dev)) {
776		hub = dev_get_uclass_priv(dev->dev);
777		/*
778		 * Each hub in the topology is expected to have no more than
779		 * 15 ports in order for the route string of a device to be
780		 * unique. SuperSpeed hubs are restricted to only having 15
781		 * ports, but FS/LS/HS hubs are not. The xHCI specification
782		 * says that if the port number the device is greater than 15,
783		 * that portion of the route string shall be set to 15.
784		 */
785		if (port_num > 15)
786			port_num = 15;
787		route |= port_num << (hub->hub_depth * 4);
788		dev = dev_get_parent_priv(dev->dev);
789		port_num = dev->portnr;
790		dev = dev_get_parent_priv(dev->dev->parent);
791	}
792
793	debug("route string %x\n", route);
794#endif
795	slot_ctx->dev_info |= cpu_to_le32(route);
796
797	switch (speed) {
798	case USB_SPEED_SUPER:
799		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
800		break;
801	case USB_SPEED_HIGH:
802		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
803		break;
804	case USB_SPEED_FULL:
805		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
806		break;
807	case USB_SPEED_LOW:
808		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
809		break;
810	default:
811		/* Speed was set earlier, this shouldn't happen. */
812		BUG();
813	}
814
815#if CONFIG_IS_ENABLED(DM_USB)
816	/* Set up TT fields to support FS/LS devices */
817	if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
818		struct udevice *parent = udev->dev;
819
820		dev = udev;
821		do {
822			port_num = dev->portnr;
823			dev = dev_get_parent_priv(parent);
824			if (usb_hub_is_root_hub(dev->dev))
825				break;
826			parent = dev->dev->parent;
827		} while (dev->speed != USB_SPEED_HIGH);
828
829		if (!usb_hub_is_root_hub(dev->dev)) {
830			hub = dev_get_uclass_priv(dev->dev);
831			if (hub->tt.multi)
832				slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
833			slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num));
834			slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id));
835		}
836	}
837#endif
838
839	port_num = hop_portnr;
840	debug("port_num = %d\n", port_num);
841
842	slot_ctx->dev_info2 |=
843			cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) <<
844				ROOT_HUB_PORT_SHIFT));
845
846	/* Step 4 - ring already allocated */
847	/* Step 5 */
848	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
849	debug("SPEED = %d\n", speed);
850
851	switch (speed) {
852	case USB_SPEED_SUPER:
853		ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
854		debug("Setting Packet size = 512bytes\n");
855		break;
856	case USB_SPEED_HIGH:
857	/* USB core guesses at a 64-byte max packet first for FS devices */
858	case USB_SPEED_FULL:
859		ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
860		debug("Setting Packet size = 64bytes\n");
861		break;
862	case USB_SPEED_LOW:
863		ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
864		debug("Setting Packet size = 8bytes\n");
865		break;
866	default:
867		/* New speed? */
868		BUG();
869	}
870
871	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
872	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
873
874	trb_64 = virt_dev->eps[0].ring->first_seg->dma;
875	ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
876
877	/*
878	 * xHCI spec 6.2.3:
879	 * software shall set 'Average TRB Length' to 8 for control endpoints.
880	 */
881	ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8));
882
883	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
884
885	xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx));
886	xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx));
887}
888