1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * USB HOST XHCI Controller stack
4 *
5 * Based on xHCI host controller driver in linux-kernel
6 * by Sarah Sharp.
7 *
8 * Copyright (C) 2008 Intel Corp.
9 * Author: Sarah Sharp
10 *
11 * Copyright (C) 2013 Samsung Electronics Co.Ltd
12 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
13 *	    Vikas Sajjan <vikas.sajjan@samsung.com>
14 */
15
16#include <common.h>
17#include <cpu_func.h>
18#include <log.h>
19#include <asm/byteorder.h>
20#include <usb.h>
21#include <asm/unaligned.h>
22#include <linux/bug.h>
23#include <linux/errno.h>
24
25#include <usb/xhci.h>
26
27/*
28 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
29 * address of the TRB.
30 */
31dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
32				union xhci_trb *trb)
33{
34	unsigned long segment_offset;
35
36	if (!seg || !trb || trb < seg->trbs)
37		return 0;
38	/* offset in TRBs */
39	segment_offset = trb - seg->trbs;
40	if (segment_offset >= TRBS_PER_SEGMENT)
41		return 0;
42	return seg->dma + (segment_offset * sizeof(*trb));
43}
44
45/**
46 * Is this TRB a link TRB or was the last TRB the last TRB in this event ring
47 * segment?  I.e. would the updated event TRB pointer step off the end of the
48 * event seg ?
49 *
50 * @param ctrl	Host controller data structure
51 * @param ring	pointer to the ring
52 * @param seg	poniter to the segment to which TRB belongs
53 * @param trb	poniter to the ring trb
54 * Return: 1 if this TRB a link TRB else 0
55 */
56static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
57			struct xhci_segment *seg, union xhci_trb *trb)
58{
59	if (ring == ctrl->event_ring)
60		return trb == &seg->trbs[TRBS_PER_SEGMENT];
61	else
62		return TRB_TYPE_LINK_LE32(trb->link.control);
63}
64
65/**
66 * Does this link TRB point to the first segment in a ring,
67 * or was the previous TRB the last TRB on the last segment in the ERST?
68 *
69 * @param ctrl	Host controller data structure
70 * @param ring	pointer to the ring
71 * @param seg	poniter to the segment to which TRB belongs
72 * @param trb	poniter to the ring trb
73 * Return: 1 if this TRB is the last TRB on the last segment else 0
74 */
75static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
76				 struct xhci_ring *ring,
77				 struct xhci_segment *seg,
78				 union xhci_trb *trb)
79{
80	if (ring == ctrl->event_ring)
81		return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
82			(seg->next == ring->first_seg));
83	else
84		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
85}
86
87/**
88 * See Cycle bit rules. SW is the consumer for the event ring only.
89 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
90 *
91 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
92 * chain bit is set), then set the chain bit in all the following link TRBs.
93 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
94 * have their chain bit cleared (so that each Link TRB is a separate TD).
95 *
96 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
97 * set, but other sections talk about dealing with the chain bit set.  This was
98 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
99 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
100 *
101 * @param ctrl	Host controller data structure
102 * @param ring	pointer to the ring
103 * @param more_trbs_coming	flag to indicate whether more trbs
104 *				are expected or NOT.
105 *				Will you enqueue more TRBs before calling
106 *				prepare_ring()?
107 * Return: none
108 */
109static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
110						bool more_trbs_coming)
111{
112	u32 chain;
113	union xhci_trb *next;
114
115	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
116	next = ++(ring->enqueue);
117
118	/*
119	 * Update the dequeue pointer further if that was a link TRB or we're at
120	 * the end of an event ring segment (which doesn't have link TRBS)
121	 */
122	while (last_trb(ctrl, ring, ring->enq_seg, next)) {
123		if (ring != ctrl->event_ring) {
124			/*
125			 * If the caller doesn't plan on enqueueing more
126			 * TDs before ringing the doorbell, then we
127			 * don't want to give the link TRB to the
128			 * hardware just yet.  We'll give the link TRB
129			 * back in prepare_ring() just before we enqueue
130			 * the TD at the top of the ring.
131			 */
132			if (!chain && !more_trbs_coming)
133				break;
134
135			/*
136			 * If we're not dealing with 0.95 hardware or
137			 * isoc rings on AMD 0.96 host,
138			 * carry over the chain bit of the previous TRB
139			 * (which may mean the chain bit is cleared).
140			 */
141			next->link.control &= cpu_to_le32(~TRB_CHAIN);
142			next->link.control |= cpu_to_le32(chain);
143
144			next->link.control ^= cpu_to_le32(TRB_CYCLE);
145			xhci_flush_cache((uintptr_t)next,
146					 sizeof(union xhci_trb));
147		}
148		/* Toggle the cycle bit after the last ring segment. */
149		if (last_trb_on_last_seg(ctrl, ring,
150					ring->enq_seg, next))
151			ring->cycle_state = (ring->cycle_state ? 0 : 1);
152
153		ring->enq_seg = ring->enq_seg->next;
154		ring->enqueue = ring->enq_seg->trbs;
155		next = ring->enqueue;
156	}
157}
158
159/**
160 * See Cycle bit rules. SW is the consumer for the event ring only.
161 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
162 *
163 * @param ctrl	Host controller data structure
164 * @param ring	Ring whose Dequeue TRB pointer needs to be incremented.
165 * return none
166 */
167static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
168{
169	do {
170		/*
171		 * Update the dequeue pointer further if that was a link TRB or
172		 * we're at the end of an event ring segment (which doesn't have
173		 * link TRBS)
174		 */
175		if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
176			if (ring == ctrl->event_ring &&
177					last_trb_on_last_seg(ctrl, ring,
178						ring->deq_seg, ring->dequeue)) {
179				ring->cycle_state = (ring->cycle_state ? 0 : 1);
180			}
181			ring->deq_seg = ring->deq_seg->next;
182			ring->dequeue = ring->deq_seg->trbs;
183		} else {
184			ring->dequeue++;
185		}
186	} while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
187}
188
189/**
190 * Generic function for queueing a TRB on a ring.
191 * The caller must have checked to make sure there's room on the ring.
192 *
193 * @param	more_trbs_coming:   Will you enqueue more TRBs before calling
194 *				prepare_ring()?
195 * @param ctrl	Host controller data structure
196 * @param ring	pointer to the ring
197 * @param more_trbs_coming	flag to indicate whether more trbs
198 * @param trb_fields	pointer to trb field array containing TRB contents
199 * Return: pointer to the enqueued trb
200 */
201static dma_addr_t queue_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
202			    bool more_trbs_coming, unsigned int *trb_fields)
203{
204	struct xhci_generic_trb *trb;
205	dma_addr_t addr;
206	int i;
207
208	trb = &ring->enqueue->generic;
209
210	for (i = 0; i < 4; i++)
211		trb->field[i] = cpu_to_le32(trb_fields[i]);
212
213	xhci_flush_cache((uintptr_t)trb, sizeof(struct xhci_generic_trb));
214
215	addr = xhci_trb_virt_to_dma(ring->enq_seg, (union xhci_trb *)trb);
216
217	inc_enq(ctrl, ring, more_trbs_coming);
218
219	return addr;
220}
221
222/**
223 * Does various checks on the endpoint ring, and makes it ready
224 * to queue num_trbs.
225 *
226 * @param ctrl		Host controller data structure
227 * @param ep_ring	pointer to the EP Transfer Ring
228 * @param ep_state	State of the End Point
229 * Return: error code in case of invalid ep_state, 0 on success
230 */
231static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
232							u32 ep_state)
233{
234	union xhci_trb *next = ep_ring->enqueue;
235
236	/* Make sure the endpoint has been added to xHC schedule */
237	switch (ep_state) {
238	case EP_STATE_DISABLED:
239		/*
240		 * USB core changed config/interfaces without notifying us,
241		 * or hardware is reporting the wrong state.
242		 */
243		puts("WARN urb submitted to disabled ep\n");
244		return -ENOENT;
245	case EP_STATE_ERROR:
246		puts("WARN waiting for error on ep to be cleared\n");
247		return -EINVAL;
248	case EP_STATE_HALTED:
249		puts("WARN endpoint is halted\n");
250		return -EINVAL;
251	case EP_STATE_STOPPED:
252	case EP_STATE_RUNNING:
253		debug("EP STATE RUNNING.\n");
254		break;
255	default:
256		puts("ERROR unknown endpoint state for ep\n");
257		return -EINVAL;
258	}
259
260	while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
261		/*
262		 * If we're not dealing with 0.95 hardware or isoc rings
263		 * on AMD 0.96 host, clear the chain bit.
264		 */
265		next->link.control &= cpu_to_le32(~TRB_CHAIN);
266
267		next->link.control ^= cpu_to_le32(TRB_CYCLE);
268
269		xhci_flush_cache((uintptr_t)next, sizeof(union xhci_trb));
270
271		/* Toggle the cycle bit after the last ring segment. */
272		if (last_trb_on_last_seg(ctrl, ep_ring,
273					ep_ring->enq_seg, next))
274			ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
275		ep_ring->enq_seg = ep_ring->enq_seg->next;
276		ep_ring->enqueue = ep_ring->enq_seg->trbs;
277		next = ep_ring->enqueue;
278	}
279
280	return 0;
281}
282
283/**
284 * Generic function for queueing a command TRB on the command ring.
285 * Check to make sure there's room on the command ring for one command TRB.
286 *
287 * @param ctrl		Host controller data structure
288 * @param ptr		Pointer address to write in the first two fields (opt.)
289 * @param slot_id	Slot ID to encode in the flags field (opt.)
290 * @param ep_index	Endpoint index to encode in the flags field (opt.)
291 * @param cmd		Command type to enqueue
292 * Return: none
293 */
294void xhci_queue_command(struct xhci_ctrl *ctrl, dma_addr_t addr, u32 slot_id,
295			u32 ep_index, trb_type cmd)
296{
297	u32 fields[4];
298
299	BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
300
301	fields[0] = lower_32_bits(addr);
302	fields[1] = upper_32_bits(addr);
303	fields[2] = 0;
304	fields[3] = TRB_TYPE(cmd) | SLOT_ID_FOR_TRB(slot_id) |
305		    ctrl->cmd_ring->cycle_state;
306
307	/*
308	 * Only 'reset endpoint', 'stop endpoint' and 'set TR dequeue pointer'
309	 * commands need endpoint id encoded.
310	 */
311	if (cmd >= TRB_RESET_EP && cmd <= TRB_SET_DEQ)
312		fields[3] |= EP_ID_FOR_TRB(ep_index);
313
314	queue_trb(ctrl, ctrl->cmd_ring, false, fields);
315
316	/* Ring the command ring doorbell */
317	xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
318}
319
320/*
321 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
322 * packets remaining in the TD (*not* including this TRB).
323 *
324 * Total TD packet count = total_packet_count =
325 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
326 *
327 * Packets transferred up to and including this TRB = packets_transferred =
328 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
329 *
330 * TD size = total_packet_count - packets_transferred
331 *
332 * For xHCI 0.96 and older, TD size field should be the remaining bytes
333 * including this TRB, right shifted by 10
334 *
335 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
336 * This is taken care of in the TRB_TD_SIZE() macro
337 *
338 * The last TRB in a TD must have the TD size set to zero.
339 *
340 * @param ctrl	host controller data structure
341 * @param transferred	total size sent so far
342 * @param trb_buff_len	length of the TRB Buffer
343 * @param td_total_len	total packet count
344 * @param maxp	max packet size of current pipe
345 * @param more_trbs_coming	indicate last trb in TD
346 * Return: remainder
347 */
348static u32 xhci_td_remainder(struct xhci_ctrl *ctrl, int transferred,
349			     int trb_buff_len, unsigned int td_total_len,
350			     int maxp, bool more_trbs_coming)
351{
352	u32 total_packet_count;
353
354	/* MTK xHCI 0.96 contains some features from 1.0 */
355	if (ctrl->hci_version < 0x100 && !(ctrl->quirks & XHCI_MTK_HOST))
356		return ((td_total_len - transferred) >> 10);
357
358	/* One TRB with a zero-length data packet. */
359	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
360	    trb_buff_len == td_total_len)
361		return 0;
362
363	/* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
364	if ((ctrl->quirks & XHCI_MTK_HOST) && (ctrl->hci_version < 0x100))
365		trb_buff_len = 0;
366
367	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
368
369	/* Queueing functions don't count the current TRB into transferred */
370	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
371}
372
373/**
374 * Ring the doorbell of the End Point
375 *
376 * @param udev		pointer to the USB device structure
377 * @param ep_index	index of the endpoint
378 * @param start_cycle	cycle flag of the first TRB
379 * @param start_trb	pionter to the first TRB
380 * Return: none
381 */
382static void giveback_first_trb(struct usb_device *udev, int ep_index,
383				int start_cycle,
384				struct xhci_generic_trb *start_trb)
385{
386	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
387
388	/*
389	 * Pass all the TRBs to the hardware at once and make sure this write
390	 * isn't reordered.
391	 */
392	if (start_cycle)
393		start_trb->field[3] |= cpu_to_le32(start_cycle);
394	else
395		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
396
397	xhci_flush_cache((uintptr_t)start_trb, sizeof(struct xhci_generic_trb));
398
399	/* Ringing EP doorbell here */
400	xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
401				DB_VALUE(ep_index, 0));
402
403	return;
404}
405
406/**** POLLING mechanism for XHCI ****/
407
408/**
409 * Finalizes a handled event TRB by advancing our dequeue pointer and giving
410 * the TRB back to the hardware for recycling. Must call this exactly once at
411 * the end of each event handler, and not touch the TRB again afterwards.
412 *
413 * @param ctrl	Host controller data structure
414 * Return: none
415 */
416void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
417{
418	dma_addr_t deq;
419
420	/* Advance our dequeue pointer to the next event */
421	inc_deq(ctrl, ctrl->event_ring);
422
423	/* Inform the hardware */
424	deq = xhci_trb_virt_to_dma(ctrl->event_ring->deq_seg,
425				   ctrl->event_ring->dequeue);
426	xhci_writeq(&ctrl->ir_set->erst_dequeue, deq | ERST_EHB);
427}
428
429/**
430 * Checks if there is a new event to handle on the event ring.
431 *
432 * @param ctrl	Host controller data structure
433 * Return: 0 if failure else 1 on success
434 */
435static int event_ready(struct xhci_ctrl *ctrl)
436{
437	union xhci_trb *event;
438
439	xhci_inval_cache((uintptr_t)ctrl->event_ring->dequeue,
440			 sizeof(union xhci_trb));
441
442	event = ctrl->event_ring->dequeue;
443
444	/* Does the HC or OS own the TRB? */
445	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
446		ctrl->event_ring->cycle_state)
447		return 0;
448
449	return 1;
450}
451
452/**
453 * Waits for a specific type of event and returns it. Discards unexpected
454 * events. Caller *must* call xhci_acknowledge_event() after it is finished
455 * processing the event, and must not access the returned pointer afterwards.
456 *
457 * @param ctrl		Host controller data structure
458 * @param expected	TRB type expected from Event TRB
459 * Return: pointer to event trb
460 */
461union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
462{
463	trb_type type;
464	unsigned long ts = get_timer(0);
465
466	do {
467		union xhci_trb *event = ctrl->event_ring->dequeue;
468
469		if (!event_ready(ctrl))
470			continue;
471
472		type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
473		if (type == expected ||
474		    (expected == TRB_NONE && type != TRB_PORT_STATUS))
475			return event;
476
477		if (type == TRB_PORT_STATUS)
478		/* TODO: remove this once enumeration has been reworked */
479			/*
480			 * Port status change events always have a
481			 * successful completion code
482			 */
483			BUG_ON(GET_COMP_CODE(
484				le32_to_cpu(event->generic.field[2])) !=
485								COMP_SUCCESS);
486		else
487			printf("Unexpected XHCI event TRB, skipping... "
488				"(%08x %08x %08x %08x)\n",
489				le32_to_cpu(event->generic.field[0]),
490				le32_to_cpu(event->generic.field[1]),
491				le32_to_cpu(event->generic.field[2]),
492				le32_to_cpu(event->generic.field[3]));
493
494		xhci_acknowledge_event(ctrl);
495	} while (get_timer(ts) < XHCI_TIMEOUT);
496
497	if (expected == TRB_TRANSFER)
498		return NULL;
499
500	printf("XHCI timeout on event type %d...\n", expected);
501
502	return NULL;
503}
504
505/*
506 * Send reset endpoint command for given endpoint. This recovers from a
507 * halted endpoint (e.g. due to a stall error).
508 */
509static void reset_ep(struct usb_device *udev, int ep_index)
510{
511	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
512	struct xhci_ring *ring =  ctrl->devs[udev->slot_id]->eps[ep_index].ring;
513	union xhci_trb *event;
514	u64 addr;
515	u32 field;
516
517	printf("Resetting EP %d...\n", ep_index);
518	xhci_queue_command(ctrl, 0, udev->slot_id, ep_index, TRB_RESET_EP);
519	event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
520	if (!event)
521		return;
522
523	field = le32_to_cpu(event->trans_event.flags);
524	BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
525	xhci_acknowledge_event(ctrl);
526
527	addr = xhci_trb_virt_to_dma(ring->enq_seg,
528		(void *)((uintptr_t)ring->enqueue | ring->cycle_state));
529	xhci_queue_command(ctrl, addr, udev->slot_id, ep_index, TRB_SET_DEQ);
530	event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
531	if (!event)
532		return;
533
534	BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
535		!= udev->slot_id || GET_COMP_CODE(le32_to_cpu(
536		event->event_cmd.status)) != COMP_SUCCESS);
537	xhci_acknowledge_event(ctrl);
538}
539
540/*
541 * Stops transfer processing for an endpoint and throws away all unprocessed
542 * TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next
543 * xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and
544 * ring the doorbell, causing this endpoint to start working again.
545 * (Careful: This will BUG() when there was no transfer in progress. Shouldn't
546 * happen in practice for current uses and is too complicated to fix right now.)
547 */
548static void abort_td(struct usb_device *udev, int ep_index)
549{
550	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
551	struct xhci_ring *ring =  ctrl->devs[udev->slot_id]->eps[ep_index].ring;
552	union xhci_trb *event;
553	xhci_comp_code comp;
554	trb_type type;
555	u64 addr;
556	u32 field;
557
558	xhci_queue_command(ctrl, 0, udev->slot_id, ep_index, TRB_STOP_RING);
559
560	event = xhci_wait_for_event(ctrl, TRB_NONE);
561	if (!event)
562		return;
563
564	type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
565	if (type == TRB_TRANSFER) {
566		field = le32_to_cpu(event->trans_event.flags);
567		BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
568		BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
569		BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len
570			!= COMP_STOP)));
571		xhci_acknowledge_event(ctrl);
572
573		event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
574		if (!event)
575			return;
576		type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
577
578	} else {
579		printf("abort_td: Expected a TRB_TRANSFER TRB first\n");
580	}
581
582	comp = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
583	BUG_ON(type != TRB_COMPLETION ||
584		TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
585		!= udev->slot_id || (comp != COMP_SUCCESS && comp
586		!= COMP_CTX_STATE));
587	xhci_acknowledge_event(ctrl);
588
589	addr = xhci_trb_virt_to_dma(ring->enq_seg,
590		(void *)((uintptr_t)ring->enqueue | ring->cycle_state));
591	xhci_queue_command(ctrl, addr, udev->slot_id, ep_index, TRB_SET_DEQ);
592	event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
593	if (!event)
594		return;
595
596	BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
597		!= udev->slot_id || GET_COMP_CODE(le32_to_cpu(
598		event->event_cmd.status)) != COMP_SUCCESS);
599	xhci_acknowledge_event(ctrl);
600}
601
602static void record_transfer_result(struct usb_device *udev,
603				   union xhci_trb *event, int length)
604{
605	udev->act_len = min(length, length -
606		(int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
607
608	switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
609	case COMP_SUCCESS:
610		BUG_ON(udev->act_len != length);
611		/* fallthrough */
612	case COMP_SHORT_TX:
613		udev->status = 0;
614		break;
615	case COMP_STALL:
616		udev->status = USB_ST_STALLED;
617		break;
618	case COMP_DB_ERR:
619	case COMP_TRB_ERR:
620		udev->status = USB_ST_BUF_ERR;
621		break;
622	case COMP_BABBLE:
623		udev->status = USB_ST_BABBLE_DET;
624		break;
625	default:
626		udev->status = 0x80;  /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */
627	}
628}
629
630/**** Bulk and Control transfer methods ****/
631/**
632 * Queues up the BULK Request
633 *
634 * @param udev		pointer to the USB device structure
635 * @param pipe		contains the DIR_IN or OUT , devnum
636 * @param length	length of the buffer
637 * @param buffer	buffer to be read/written based on the request
638 * Return: returns 0 if successful else -1 on failure
639 */
640int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
641			int length, void *buffer)
642{
643	int num_trbs = 0;
644	struct xhci_generic_trb *start_trb;
645	bool first_trb = false;
646	int start_cycle;
647	u32 field = 0;
648	u32 length_field = 0;
649	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
650	int slot_id = udev->slot_id;
651	int ep_index;
652	struct xhci_virt_device *virt_dev;
653	struct xhci_ep_ctx *ep_ctx;
654	struct xhci_ring *ring;		/* EP transfer ring */
655	union xhci_trb *event;
656
657	int running_total, trb_buff_len;
658	bool more_trbs_coming = true;
659	int maxpacketsize;
660	u64 addr;
661	int ret;
662	u32 trb_fields[4];
663	u64 buf_64 = xhci_dma_map(ctrl, buffer, length);
664	dma_addr_t last_transfer_trb_addr;
665	int available_length;
666
667	debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
668		udev, pipe, buffer, length);
669
670	available_length = length;
671	ep_index = usb_pipe_ep_index(pipe);
672	virt_dev = ctrl->devs[slot_id];
673
674	xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
675			 virt_dev->out_ctx->size);
676
677	ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
678
679	/*
680	 * If the endpoint was halted due to a prior error, resume it before
681	 * the next transfer. It is the responsibility of the upper layer to
682	 * have dealt with whatever caused the error.
683	 */
684	if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
685		reset_ep(udev, ep_index);
686
687	ring = virt_dev->eps[ep_index].ring;
688	if (!ring)
689		return -EINVAL;
690
691	/*
692	 * How much data is (potentially) left before the 64KB boundary?
693	 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
694	 * that the buffer should not span 64KB boundary. if so
695	 * we send request in more than 1 TRB by chaining them.
696	 */
697	running_total = TRB_MAX_BUFF_SIZE -
698			(lower_32_bits(buf_64) & (TRB_MAX_BUFF_SIZE - 1));
699	trb_buff_len = running_total;
700	running_total &= TRB_MAX_BUFF_SIZE - 1;
701
702	/*
703	 * If there's some data on this 64KB chunk, or we have to send a
704	 * zero-length transfer, we need at least one TRB
705	 */
706	if (running_total != 0 || length == 0)
707		num_trbs++;
708
709	/* How many more 64KB chunks to transfer, how many more TRBs? */
710	while (running_total < length) {
711		num_trbs++;
712		running_total += TRB_MAX_BUFF_SIZE;
713	}
714
715	/*
716	 * XXX: Calling routine prepare_ring() called in place of
717	 * prepare_trasfer() as there in 'Linux' since we are not
718	 * maintaining multiple TDs/transfer at the same time.
719	 */
720	ret = prepare_ring(ctrl, ring,
721			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
722	if (ret < 0)
723		return ret;
724
725	/*
726	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
727	 * until we've finished creating all the other TRBs.  The ring's cycle
728	 * state may change as we enqueue the other TRBs, so save it too.
729	 */
730	start_trb = &ring->enqueue->generic;
731	start_cycle = ring->cycle_state;
732
733	running_total = 0;
734	maxpacketsize = usb_maxpacket(udev, pipe);
735
736	/* How much data is in the first TRB? */
737	/*
738	 * How much data is (potentially) left before the 64KB boundary?
739	 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
740	 * that the buffer should not span 64KB boundary. if so
741	 * we send request in more than 1 TRB by chaining them.
742	 */
743	addr = buf_64;
744
745	if (trb_buff_len > length)
746		trb_buff_len = length;
747
748	first_trb = true;
749
750	/* flush the buffer before use */
751	xhci_flush_cache((uintptr_t)buffer, length);
752
753	/* Queue the first TRB, even if it's zero-length */
754	do {
755		u32 remainder = 0;
756		field = 0;
757		/* Don't change the cycle bit of the first TRB until later */
758		if (first_trb) {
759			first_trb = false;
760			if (start_cycle == 0)
761				field |= TRB_CYCLE;
762		} else {
763			field |= ring->cycle_state;
764		}
765
766		/*
767		 * Chain all the TRBs together; clear the chain bit in the last
768		 * TRB to indicate it's the last TRB in the chain.
769		 */
770		if (num_trbs > 1) {
771			field |= TRB_CHAIN;
772		} else {
773			field |= TRB_IOC;
774			more_trbs_coming = false;
775		}
776
777		/* Only set interrupt on short packet for IN endpoints */
778		if (usb_pipein(pipe))
779			field |= TRB_ISP;
780
781		/* Set the TRB length, TD size, and interrupter fields. */
782		remainder = xhci_td_remainder(ctrl, running_total, trb_buff_len,
783					      length, maxpacketsize,
784					      more_trbs_coming);
785
786		length_field = (TRB_LEN(trb_buff_len) |
787				TRB_TD_SIZE(remainder) |
788				TRB_INTR_TARGET(0));
789
790		trb_fields[0] = lower_32_bits(addr);
791		trb_fields[1] = upper_32_bits(addr);
792		trb_fields[2] = length_field;
793		trb_fields[3] = field | TRB_TYPE(TRB_NORMAL);
794
795		last_transfer_trb_addr = queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
796
797		--num_trbs;
798
799		running_total += trb_buff_len;
800
801		/* Calculate length for next transfer */
802		addr += trb_buff_len;
803		trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
804	} while (running_total < length);
805
806	giveback_first_trb(udev, ep_index, start_cycle, start_trb);
807
808again:
809	event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
810	if (!event) {
811		debug("XHCI bulk transfer timed out, aborting...\n");
812		abort_td(udev, ep_index);
813		udev->status = USB_ST_NAK_REC;  /* closest thing to a timeout */
814		udev->act_len = 0;
815		return -ETIMEDOUT;
816	}
817
818	if ((uintptr_t)(le64_to_cpu(event->trans_event.buffer)) !=
819	    (uintptr_t)last_transfer_trb_addr) {
820		available_length -=
821			(int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len));
822		xhci_acknowledge_event(ctrl);
823		goto again;
824	}
825
826	field = le32_to_cpu(event->trans_event.flags);
827	BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
828	BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
829
830	record_transfer_result(udev, event, available_length);
831	xhci_acknowledge_event(ctrl);
832	xhci_inval_cache((uintptr_t)buffer, length);
833	xhci_dma_unmap(ctrl, buf_64, length);
834
835	return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
836}
837
838/**
839 * Queues up the Control Transfer Request
840 *
841 * @param udev	pointer to the USB device structure
842 * @param pipe		contains the DIR_IN or OUT , devnum
843 * @param req		request type
844 * @param length	length of the buffer
845 * @param buffer	buffer to be read/written based on the request
846 * Return: returns 0 if successful else error code on failure
847 */
848int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
849			struct devrequest *req,	int length,
850			void *buffer)
851{
852	int ret;
853	int start_cycle;
854	int num_trbs;
855	u32 field;
856	u32 length_field;
857	u64 buf_64 = 0;
858	struct xhci_generic_trb *start_trb;
859	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
860	int slot_id = udev->slot_id;
861	int ep_index;
862	u32 trb_fields[4];
863	struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
864	struct xhci_ring *ep_ring;
865	union xhci_trb *event;
866	u32 remainder;
867
868	debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
869		req->request, req->request,
870		req->requesttype, req->requesttype,
871		le16_to_cpu(req->value), le16_to_cpu(req->value),
872		le16_to_cpu(req->index));
873
874	ep_index = usb_pipe_ep_index(pipe);
875
876	ep_ring = virt_dev->eps[ep_index].ring;
877	if (!ep_ring)
878		return -EINVAL;
879
880	/*
881	 * Check to see if the max packet size for the default control
882	 * endpoint changed during FS device enumeration
883	 */
884	if (udev->speed == USB_SPEED_FULL) {
885		ret = xhci_check_maxpacket(udev);
886		if (ret < 0)
887			return ret;
888	}
889
890	xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
891			 virt_dev->out_ctx->size);
892
893	struct xhci_ep_ctx *ep_ctx = NULL;
894	ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
895
896	/* 1 TRB for setup, 1 for status */
897	num_trbs = 2;
898	/*
899	 * Don't need to check if we need additional event data and normal TRBs,
900	 * since data in control transfers will never get bigger than 16MB
901	 * XXX: can we get a buffer that crosses 64KB boundaries?
902	 */
903
904	if (length > 0)
905		num_trbs++;
906	/*
907	 * XXX: Calling routine prepare_ring() called in place of
908	 * prepare_trasfer() as there in 'Linux' since we are not
909	 * maintaining multiple TDs/transfer at the same time.
910	 */
911	ret = prepare_ring(ctrl, ep_ring,
912				le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
913
914	if (ret < 0)
915		return ret;
916
917	/*
918	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
919	 * until we've finished creating all the other TRBs.  The ring's cycle
920	 * state may change as we enqueue the other TRBs, so save it too.
921	 */
922	start_trb = &ep_ring->enqueue->generic;
923	start_cycle = ep_ring->cycle_state;
924
925	debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
926
927	/* Queue setup TRB - see section 6.4.1.2.1 */
928	/* FIXME better way to translate setup_packet into two u32 fields? */
929	field = 0;
930	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
931	if (start_cycle == 0)
932		field |= 0x1;
933
934	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
935	if (ctrl->hci_version >= 0x100 || ctrl->quirks & XHCI_MTK_HOST) {
936		if (length > 0) {
937			if (req->requesttype & USB_DIR_IN)
938				field |= TRB_TX_TYPE(TRB_DATA_IN);
939			else
940				field |= TRB_TX_TYPE(TRB_DATA_OUT);
941		}
942	}
943
944	debug("req->requesttype = %d, req->request = %d, req->value = %d, req->index = %d, req->length = %d\n",
945	      req->requesttype, req->request, le16_to_cpu(req->value),
946	      le16_to_cpu(req->index), le16_to_cpu(req->length));
947
948	trb_fields[0] = req->requesttype | req->request << 8 |
949				le16_to_cpu(req->value) << 16;
950	trb_fields[1] = le16_to_cpu(req->index) |
951			le16_to_cpu(req->length) << 16;
952	/* TRB_LEN | (TRB_INTR_TARGET) */
953	trb_fields[2] = (TRB_LEN(8) | TRB_INTR_TARGET(0));
954	/* Immediate data in pointer */
955	trb_fields[3] = field;
956	queue_trb(ctrl, ep_ring, true, trb_fields);
957
958	/* Re-initializing field to zero */
959	field = 0;
960	/* If there's data, queue data TRBs */
961	/* Only set interrupt on short packet for IN endpoints */
962	if (usb_pipein(pipe))
963		field = TRB_ISP | TRB_TYPE(TRB_DATA);
964	else
965		field = TRB_TYPE(TRB_DATA);
966
967	remainder = xhci_td_remainder(ctrl, 0, length, length,
968				      usb_maxpacket(udev, pipe), true);
969	length_field = TRB_LEN(length) | TRB_TD_SIZE(remainder) |
970		       TRB_INTR_TARGET(0);
971	debug("length_field = %d, length = %d,"
972		"xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
973		length_field, TRB_LEN(length),
974		TRB_TD_SIZE(remainder), 0);
975
976	if (length > 0) {
977		if (req->requesttype & USB_DIR_IN)
978			field |= TRB_DIR_IN;
979		buf_64 = xhci_dma_map(ctrl, buffer, length);
980
981		trb_fields[0] = lower_32_bits(buf_64);
982		trb_fields[1] = upper_32_bits(buf_64);
983		trb_fields[2] = length_field;
984		trb_fields[3] = field | ep_ring->cycle_state;
985
986		xhci_flush_cache((uintptr_t)buffer, length);
987		queue_trb(ctrl, ep_ring, true, trb_fields);
988	}
989
990	/*
991	 * Queue status TRB -
992	 * see Table 7 and sections 4.11.2.2 and 6.4.1.2.3
993	 */
994
995	/* If the device sent data, the status stage is an OUT transfer */
996	field = 0;
997	if (length > 0 && req->requesttype & USB_DIR_IN)
998		field = 0;
999	else
1000		field = TRB_DIR_IN;
1001
1002	trb_fields[0] = 0;
1003	trb_fields[1] = 0;
1004	trb_fields[2] = TRB_INTR_TARGET(0);
1005		/* Event on completion */
1006	trb_fields[3] = field | TRB_IOC |
1007			TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state;
1008
1009	queue_trb(ctrl, ep_ring, false, trb_fields);
1010
1011	giveback_first_trb(udev, ep_index, start_cycle, start_trb);
1012
1013	event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
1014	if (!event)
1015		goto abort;
1016	field = le32_to_cpu(event->trans_event.flags);
1017
1018	BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
1019	BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
1020
1021	record_transfer_result(udev, event, length);
1022	xhci_acknowledge_event(ctrl);
1023	if (udev->status == USB_ST_STALLED) {
1024		reset_ep(udev, ep_index);
1025		return -EPIPE;
1026	}
1027
1028	/* Invalidate buffer to make it available to usb-core */
1029	if (length > 0) {
1030		xhci_inval_cache((uintptr_t)buffer, length);
1031		xhci_dma_unmap(ctrl, buf_64, length);
1032	}
1033
1034	if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
1035			== COMP_SHORT_TX) {
1036		/* Short data stage, clear up additional status stage event */
1037		event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
1038		if (!event)
1039			goto abort;
1040		BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
1041		BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
1042		xhci_acknowledge_event(ctrl);
1043	}
1044
1045	return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
1046
1047abort:
1048	debug("XHCI control transfer timed out, aborting...\n");
1049	abort_td(udev, ep_index);
1050	udev->status = USB_ST_NAK_REC;
1051	udev->act_len = 0;
1052	return -ETIMEDOUT;
1053}
1054