1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2001-2004 by David Brownell
4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 */
6
7/* this file is part of ehci-hcd.c */
8
9/*-------------------------------------------------------------------------*/
10
11/*
12 * EHCI scheduled transaction support:  interrupt, iso, split iso
13 * These are called "periodic" transactions in the EHCI spec.
14 *
15 * Note that for interrupt transfers, the QH/QTD manipulation is shared
16 * with the "asynchronous" transaction support (control/bulk transfers).
17 * The only real difference is in how interrupt transfers are scheduled.
18 *
19 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
20 * It keeps track of every ITD (or SITD) that's linked, and holds enough
21 * pre-calculated schedule data to make appending to the queue be quick.
22 */
23
24static int ehci_get_frame(struct usb_hcd *hcd);
25
26/*
27 * periodic_next_shadow - return "next" pointer on shadow list
28 * @periodic: host pointer to qh/itd/sitd
29 * @tag: hardware tag for type of this record
30 */
31static union ehci_shadow *
32periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
33		__hc32 tag)
34{
35	switch (hc32_to_cpu(ehci, tag)) {
36	case Q_TYPE_QH:
37		return &periodic->qh->qh_next;
38	case Q_TYPE_FSTN:
39		return &periodic->fstn->fstn_next;
40	case Q_TYPE_ITD:
41		return &periodic->itd->itd_next;
42	/* case Q_TYPE_SITD: */
43	default:
44		return &periodic->sitd->sitd_next;
45	}
46}
47
48static __hc32 *
49shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
50		__hc32 tag)
51{
52	switch (hc32_to_cpu(ehci, tag)) {
53	/* our ehci_shadow.qh is actually software part */
54	case Q_TYPE_QH:
55		return &periodic->qh->hw->hw_next;
56	/* others are hw parts */
57	default:
58		return periodic->hw_next;
59	}
60}
61
62/* caller must hold ehci->lock */
63static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
64{
65	union ehci_shadow	*prev_p = &ehci->pshadow[frame];
66	__hc32			*hw_p = &ehci->periodic[frame];
67	union ehci_shadow	here = *prev_p;
68
69	/* find predecessor of "ptr"; hw and shadow lists are in sync */
70	while (here.ptr && here.ptr != ptr) {
71		prev_p = periodic_next_shadow(ehci, prev_p,
72				Q_NEXT_TYPE(ehci, *hw_p));
73		hw_p = shadow_next_periodic(ehci, &here,
74				Q_NEXT_TYPE(ehci, *hw_p));
75		here = *prev_p;
76	}
77	/* an interrupt entry (at list end) could have been shared */
78	if (!here.ptr)
79		return;
80
81	/* update shadow and hardware lists ... the old "next" pointers
82	 * from ptr may still be in use, the caller updates them.
83	 */
84	*prev_p = *periodic_next_shadow(ehci, &here,
85			Q_NEXT_TYPE(ehci, *hw_p));
86
87	if (!ehci->use_dummy_qh ||
88	    *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
89			!= EHCI_LIST_END(ehci))
90		*hw_p = *shadow_next_periodic(ehci, &here,
91				Q_NEXT_TYPE(ehci, *hw_p));
92	else
93		*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
94}
95
96/*-------------------------------------------------------------------------*/
97
98/* Bandwidth and TT management */
99
100/* Find the TT data structure for this device; create it if necessary */
101static struct ehci_tt *find_tt(struct usb_device *udev)
102{
103	struct usb_tt		*utt = udev->tt;
104	struct ehci_tt		*tt, **tt_index, **ptt;
105	unsigned		port;
106	bool			allocated_index = false;
107
108	if (!utt)
109		return NULL;		/* Not below a TT */
110
111	/*
112	 * Find/create our data structure.
113	 * For hubs with a single TT, we get it directly.
114	 * For hubs with multiple TTs, there's an extra level of pointers.
115	 */
116	tt_index = NULL;
117	if (utt->multi) {
118		tt_index = utt->hcpriv;
119		if (!tt_index) {		/* Create the index array */
120			tt_index = kcalloc(utt->hub->maxchild,
121					   sizeof(*tt_index),
122					   GFP_ATOMIC);
123			if (!tt_index)
124				return ERR_PTR(-ENOMEM);
125			utt->hcpriv = tt_index;
126			allocated_index = true;
127		}
128		port = udev->ttport - 1;
129		ptt = &tt_index[port];
130	} else {
131		port = 0;
132		ptt = (struct ehci_tt **) &utt->hcpriv;
133	}
134
135	tt = *ptt;
136	if (!tt) {				/* Create the ehci_tt */
137		struct ehci_hcd		*ehci =
138				hcd_to_ehci(bus_to_hcd(udev->bus));
139
140		tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
141		if (!tt) {
142			if (allocated_index) {
143				utt->hcpriv = NULL;
144				kfree(tt_index);
145			}
146			return ERR_PTR(-ENOMEM);
147		}
148		list_add_tail(&tt->tt_list, &ehci->tt_list);
149		INIT_LIST_HEAD(&tt->ps_list);
150		tt->usb_tt = utt;
151		tt->tt_port = port;
152		*ptt = tt;
153	}
154
155	return tt;
156}
157
158/* Release the TT above udev, if it's not in use */
159static void drop_tt(struct usb_device *udev)
160{
161	struct usb_tt		*utt = udev->tt;
162	struct ehci_tt		*tt, **tt_index, **ptt;
163	int			cnt, i;
164
165	if (!utt || !utt->hcpriv)
166		return;		/* Not below a TT, or never allocated */
167
168	cnt = 0;
169	if (utt->multi) {
170		tt_index = utt->hcpriv;
171		ptt = &tt_index[udev->ttport - 1];
172
173		/* How many entries are left in tt_index? */
174		for (i = 0; i < utt->hub->maxchild; ++i)
175			cnt += !!tt_index[i];
176	} else {
177		tt_index = NULL;
178		ptt = (struct ehci_tt **) &utt->hcpriv;
179	}
180
181	tt = *ptt;
182	if (!tt || !list_empty(&tt->ps_list))
183		return;		/* never allocated, or still in use */
184
185	list_del(&tt->tt_list);
186	*ptt = NULL;
187	kfree(tt);
188	if (cnt == 1) {
189		utt->hcpriv = NULL;
190		kfree(tt_index);
191	}
192}
193
194static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
195		struct ehci_per_sched *ps)
196{
197	dev_dbg(&ps->udev->dev,
198			"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
199			ps->ep->desc.bEndpointAddress,
200			(sign >= 0 ? "reserve" : "release"), type,
201			(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
202			ps->phase, ps->phase_uf, ps->period,
203			ps->usecs, ps->c_usecs, ps->cs_mask);
204}
205
206static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
207		struct ehci_qh *qh, int sign)
208{
209	unsigned		start_uf;
210	unsigned		i, j, m;
211	int			usecs = qh->ps.usecs;
212	int			c_usecs = qh->ps.c_usecs;
213	int			tt_usecs = qh->ps.tt_usecs;
214	struct ehci_tt		*tt;
215
216	if (qh->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
217		return;
218	start_uf = qh->ps.bw_phase << 3;
219
220	bandwidth_dbg(ehci, sign, "intr", &qh->ps);
221
222	if (sign < 0) {		/* Release bandwidth */
223		usecs = -usecs;
224		c_usecs = -c_usecs;
225		tt_usecs = -tt_usecs;
226	}
227
228	/* Entire transaction (high speed) or start-split (full/low speed) */
229	for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
230			i += qh->ps.bw_uperiod)
231		ehci->bandwidth[i] += usecs;
232
233	/* Complete-split (full/low speed) */
234	if (qh->ps.c_usecs) {
235		/* NOTE: adjustments needed for FSTN */
236		for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
237				i += qh->ps.bw_uperiod) {
238			for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
239				if (qh->ps.cs_mask & m)
240					ehci->bandwidth[i+j] += c_usecs;
241			}
242		}
243	}
244
245	/* FS/LS bus bandwidth */
246	if (tt_usecs) {
247		/*
248		 * find_tt() will not return any error here as we have
249		 * already called find_tt() before calling this function
250		 * and checked for any error return. The previous call
251		 * would have created the data structure.
252		 */
253		tt = find_tt(qh->ps.udev);
254		if (sign > 0)
255			list_add_tail(&qh->ps.ps_list, &tt->ps_list);
256		else
257			list_del(&qh->ps.ps_list);
258
259		for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
260				i += qh->ps.bw_period)
261			tt->bandwidth[i] += tt_usecs;
262	}
263}
264
265/*-------------------------------------------------------------------------*/
266
267static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
268		struct ehci_tt *tt)
269{
270	struct ehci_per_sched	*ps;
271	unsigned		uframe, uf, x;
272	u8			*budget_line;
273
274	if (!tt)
275		return;
276	memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
277
278	/* Add up the contributions from all the endpoints using this TT */
279	list_for_each_entry(ps, &tt->ps_list, ps_list) {
280		for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
281				uframe += ps->bw_uperiod) {
282			budget_line = &budget_table[uframe];
283			x = ps->tt_usecs;
284
285			/* propagate the time forward */
286			for (uf = ps->phase_uf; uf < 8; ++uf) {
287				x += budget_line[uf];
288
289				/* Each microframe lasts 125 us */
290				if (x <= 125) {
291					budget_line[uf] = x;
292					break;
293				}
294				budget_line[uf] = 125;
295				x -= 125;
296			}
297		}
298	}
299}
300
301static int __maybe_unused same_tt(struct usb_device *dev1,
302		struct usb_device *dev2)
303{
304	if (!dev1->tt || !dev2->tt)
305		return 0;
306	if (dev1->tt != dev2->tt)
307		return 0;
308	if (dev1->tt->multi)
309		return dev1->ttport == dev2->ttport;
310	else
311		return 1;
312}
313
314#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
315
316static const unsigned char
317max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
318
319/* carryover low/fullspeed bandwidth that crosses uframe boundries */
320static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
321{
322	int i;
323
324	for (i = 0; i < 7; i++) {
325		if (max_tt_usecs[i] < tt_usecs[i]) {
326			tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
327			tt_usecs[i] = max_tt_usecs[i];
328		}
329	}
330}
331
332/*
333 * Return true if the device's tt's downstream bus is available for a
334 * periodic transfer of the specified length (usecs), starting at the
335 * specified frame/uframe.  Note that (as summarized in section 11.19
336 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
337 * uframe.
338 *
339 * The uframe parameter is when the fullspeed/lowspeed transfer
340 * should be executed in "B-frame" terms, which is the same as the
341 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
342 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
343 * See the EHCI spec sec 4.5 and fig 4.7.
344 *
345 * This checks if the full/lowspeed bus, at the specified starting uframe,
346 * has the specified bandwidth available, according to rules listed
347 * in USB 2.0 spec section 11.18.1 fig 11-60.
348 *
349 * This does not check if the transfer would exceed the max ssplit
350 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
351 * since proper scheduling limits ssplits to less than 16 per uframe.
352 */
353static int tt_available(
354	struct ehci_hcd		*ehci,
355	struct ehci_per_sched	*ps,
356	struct ehci_tt		*tt,
357	unsigned		frame,
358	unsigned		uframe
359)
360{
361	unsigned		period = ps->bw_period;
362	unsigned		usecs = ps->tt_usecs;
363
364	if ((period == 0) || (uframe >= 7))	/* error */
365		return 0;
366
367	for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
368			frame += period) {
369		unsigned	i, uf;
370		unsigned short	tt_usecs[8];
371
372		if (tt->bandwidth[frame] + usecs > 900)
373			return 0;
374
375		uf = frame << 3;
376		for (i = 0; i < 8; (++i, ++uf))
377			tt_usecs[i] = ehci->tt_budget[uf];
378
379		if (max_tt_usecs[uframe] <= tt_usecs[uframe])
380			return 0;
381
382		/* special case for isoc transfers larger than 125us:
383		 * the first and each subsequent fully used uframe
384		 * must be empty, so as to not illegally delay
385		 * already scheduled transactions
386		 */
387		if (usecs > 125) {
388			int ufs = (usecs / 125);
389
390			for (i = uframe; i < (uframe + ufs) && i < 8; i++)
391				if (tt_usecs[i] > 0)
392					return 0;
393		}
394
395		tt_usecs[uframe] += usecs;
396
397		carryover_tt_bandwidth(tt_usecs);
398
399		/* fail if the carryover pushed bw past the last uframe's limit */
400		if (max_tt_usecs[7] < tt_usecs[7])
401			return 0;
402	}
403
404	return 1;
405}
406
407#else
408
409/* return true iff the device's transaction translator is available
410 * for a periodic transfer starting at the specified frame, using
411 * all the uframes in the mask.
412 */
413static int tt_no_collision(
414	struct ehci_hcd		*ehci,
415	unsigned		period,
416	struct usb_device	*dev,
417	unsigned		frame,
418	u32			uf_mask
419)
420{
421	if (period == 0)	/* error */
422		return 0;
423
424	/* note bandwidth wastage:  split never follows csplit
425	 * (different dev or endpoint) until the next uframe.
426	 * calling convention doesn't make that distinction.
427	 */
428	for (; frame < ehci->periodic_size; frame += period) {
429		union ehci_shadow	here;
430		__hc32			type;
431		struct ehci_qh_hw	*hw;
432
433		here = ehci->pshadow[frame];
434		type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
435		while (here.ptr) {
436			switch (hc32_to_cpu(ehci, type)) {
437			case Q_TYPE_ITD:
438				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
439				here = here.itd->itd_next;
440				continue;
441			case Q_TYPE_QH:
442				hw = here.qh->hw;
443				if (same_tt(dev, here.qh->ps.udev)) {
444					u32		mask;
445
446					mask = hc32_to_cpu(ehci,
447							hw->hw_info2);
448					/* "knows" no gap is needed */
449					mask |= mask >> 8;
450					if (mask & uf_mask)
451						break;
452				}
453				type = Q_NEXT_TYPE(ehci, hw->hw_next);
454				here = here.qh->qh_next;
455				continue;
456			case Q_TYPE_SITD:
457				if (same_tt(dev, here.sitd->urb->dev)) {
458					u16		mask;
459
460					mask = hc32_to_cpu(ehci, here.sitd
461								->hw_uframe);
462					/* FIXME assumes no gap for IN! */
463					mask |= mask >> 8;
464					if (mask & uf_mask)
465						break;
466				}
467				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
468				here = here.sitd->sitd_next;
469				continue;
470			/* case Q_TYPE_FSTN: */
471			default:
472				ehci_dbg(ehci,
473					"periodic frame %d bogus type %d\n",
474					frame, type);
475			}
476
477			/* collision or error */
478			return 0;
479		}
480	}
481
482	/* no collision */
483	return 1;
484}
485
486#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
487
488/*-------------------------------------------------------------------------*/
489
490static void enable_periodic(struct ehci_hcd *ehci)
491{
492	if (ehci->periodic_count++)
493		goto out;
494
495	/* Stop waiting to turn off the periodic schedule */
496	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
497
498	/* Don't start the schedule until PSS is 0 */
499	ehci_poll_PSS(ehci);
500out:
501	turn_on_io_watchdog(ehci);
502}
503
504static void disable_periodic(struct ehci_hcd *ehci)
505{
506	if (--ehci->periodic_count)
507		return;
508
509	/* Don't turn off the schedule until PSS is 1 */
510	ehci_poll_PSS(ehci);
511}
512
513/*-------------------------------------------------------------------------*/
514
515/* periodic schedule slots have iso tds (normal or split) first, then a
516 * sparse tree for active interrupt transfers.
517 *
518 * this just links in a qh; caller guarantees uframe masks are set right.
519 * no FSTN support (yet; ehci 0.96+)
520 */
521static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
522{
523	unsigned	i;
524	unsigned	period = qh->ps.period;
525
526	dev_dbg(&qh->ps.udev->dev,
527		"link qh%d-%04x/%p start %d [%d/%d us]\n",
528		period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
529			& (QH_CMASK | QH_SMASK),
530		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
531
532	/* high bandwidth, or otherwise every microframe */
533	if (period == 0)
534		period = 1;
535
536	for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
537		union ehci_shadow	*prev = &ehci->pshadow[i];
538		__hc32			*hw_p = &ehci->periodic[i];
539		union ehci_shadow	here = *prev;
540		__hc32			type = 0;
541
542		/* skip the iso nodes at list head */
543		while (here.ptr) {
544			type = Q_NEXT_TYPE(ehci, *hw_p);
545			if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
546				break;
547			prev = periodic_next_shadow(ehci, prev, type);
548			hw_p = shadow_next_periodic(ehci, &here, type);
549			here = *prev;
550		}
551
552		/* sorting each branch by period (slow-->fast)
553		 * enables sharing interior tree nodes
554		 */
555		while (here.ptr && qh != here.qh) {
556			if (qh->ps.period > here.qh->ps.period)
557				break;
558			prev = &here.qh->qh_next;
559			hw_p = &here.qh->hw->hw_next;
560			here = *prev;
561		}
562		/* link in this qh, unless some earlier pass did that */
563		if (qh != here.qh) {
564			qh->qh_next = here;
565			if (here.qh)
566				qh->hw->hw_next = *hw_p;
567			wmb();
568			prev->qh = qh;
569			*hw_p = QH_NEXT(ehci, qh->qh_dma);
570		}
571	}
572	qh->qh_state = QH_STATE_LINKED;
573	qh->xacterrs = 0;
574	qh->unlink_reason = 0;
575
576	/* update per-qh bandwidth for debugfs */
577	ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
578		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
579		: (qh->ps.usecs * 8);
580
581	list_add(&qh->intr_node, &ehci->intr_qh_list);
582
583	/* maybe enable periodic schedule processing */
584	++ehci->intr_count;
585	enable_periodic(ehci);
586}
587
588static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
589{
590	unsigned	i;
591	unsigned	period;
592
593	/*
594	 * If qh is for a low/full-speed device, simply unlinking it
595	 * could interfere with an ongoing split transaction.  To unlink
596	 * it safely would require setting the QH_INACTIVATE bit and
597	 * waiting at least one frame, as described in EHCI 4.12.2.5.
598	 *
599	 * We won't bother with any of this.  Instead, we assume that the
600	 * only reason for unlinking an interrupt QH while the current URB
601	 * is still active is to dequeue all the URBs (flush the whole
602	 * endpoint queue).
603	 *
604	 * If rebalancing the periodic schedule is ever implemented, this
605	 * approach will no longer be valid.
606	 */
607
608	/* high bandwidth, or otherwise part of every microframe */
609	period = qh->ps.period ? : 1;
610
611	for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
612		periodic_unlink(ehci, i, qh);
613
614	/* update per-qh bandwidth for debugfs */
615	ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
616		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
617		: (qh->ps.usecs * 8);
618
619	dev_dbg(&qh->ps.udev->dev,
620		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
621		qh->ps.period,
622		hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
623		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
624
625	/* qh->qh_next still "live" to HC */
626	qh->qh_state = QH_STATE_UNLINK;
627	qh->qh_next.ptr = NULL;
628
629	if (ehci->qh_scan_next == qh)
630		ehci->qh_scan_next = list_entry(qh->intr_node.next,
631				struct ehci_qh, intr_node);
632	list_del(&qh->intr_node);
633}
634
635static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
636{
637	if (qh->qh_state != QH_STATE_LINKED ||
638			list_empty(&qh->unlink_node))
639		return;
640
641	list_del_init(&qh->unlink_node);
642
643	/*
644	 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
645	 * avoiding unnecessary CPU wakeup
646	 */
647}
648
649static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
650{
651	/* If the QH isn't linked then there's nothing we can do. */
652	if (qh->qh_state != QH_STATE_LINKED)
653		return;
654
655	/* if the qh is waiting for unlink, cancel it now */
656	cancel_unlink_wait_intr(ehci, qh);
657
658	qh_unlink_periodic(ehci, qh);
659
660	/* Make sure the unlinks are visible before starting the timer */
661	wmb();
662
663	/*
664	 * The EHCI spec doesn't say how long it takes the controller to
665	 * stop accessing an unlinked interrupt QH.  The timer delay is
666	 * 9 uframes; presumably that will be long enough.
667	 */
668	qh->unlink_cycle = ehci->intr_unlink_cycle;
669
670	/* New entries go at the end of the intr_unlink list */
671	list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
672
673	if (ehci->intr_unlinking)
674		;	/* Avoid recursive calls */
675	else if (ehci->rh_state < EHCI_RH_RUNNING)
676		ehci_handle_intr_unlinks(ehci);
677	else if (ehci->intr_unlink.next == &qh->unlink_node) {
678		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
679		++ehci->intr_unlink_cycle;
680	}
681}
682
683/*
684 * It is common only one intr URB is scheduled on one qh, and
685 * given complete() is run in tasklet context, introduce a bit
686 * delay to avoid unlink qh too early.
687 */
688static void start_unlink_intr_wait(struct ehci_hcd *ehci,
689				   struct ehci_qh *qh)
690{
691	qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
692
693	/* New entries go at the end of the intr_unlink_wait list */
694	list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
695
696	if (ehci->rh_state < EHCI_RH_RUNNING)
697		ehci_handle_start_intr_unlinks(ehci);
698	else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
699		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
700		++ehci->intr_unlink_wait_cycle;
701	}
702}
703
704static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
705{
706	struct ehci_qh_hw	*hw = qh->hw;
707	int			rc;
708
709	qh->qh_state = QH_STATE_IDLE;
710	hw->hw_next = EHCI_LIST_END(ehci);
711
712	if (!list_empty(&qh->qtd_list))
713		qh_completions(ehci, qh);
714
715	/* reschedule QH iff another request is queued */
716	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
717		rc = qh_schedule(ehci, qh);
718		if (rc == 0) {
719			qh_refresh(ehci, qh);
720			qh_link_periodic(ehci, qh);
721		}
722
723		/* An error here likely indicates handshake failure
724		 * or no space left in the schedule.  Neither fault
725		 * should happen often ...
726		 *
727		 * FIXME kill the now-dysfunctional queued urbs
728		 */
729		else {
730			ehci_err(ehci, "can't reschedule qh %p, err %d\n",
731					qh, rc);
732		}
733	}
734
735	/* maybe turn off periodic schedule */
736	--ehci->intr_count;
737	disable_periodic(ehci);
738}
739
740/*-------------------------------------------------------------------------*/
741
742static int check_period(
743	struct ehci_hcd *ehci,
744	unsigned	frame,
745	unsigned	uframe,
746	unsigned	uperiod,
747	unsigned	usecs
748) {
749	/* complete split running into next frame?
750	 * given FSTN support, we could sometimes check...
751	 */
752	if (uframe >= 8)
753		return 0;
754
755	/* convert "usecs we need" to "max already claimed" */
756	usecs = ehci->uframe_periodic_max - usecs;
757
758	for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
759			uframe += uperiod) {
760		if (ehci->bandwidth[uframe] > usecs)
761			return 0;
762	}
763
764	/* success! */
765	return 1;
766}
767
768static int check_intr_schedule(
769	struct ehci_hcd		*ehci,
770	unsigned		frame,
771	unsigned		uframe,
772	struct ehci_qh		*qh,
773	unsigned		*c_maskp,
774	struct ehci_tt		*tt
775)
776{
777	int		retval = -ENOSPC;
778	u8		mask = 0;
779
780	if (qh->ps.c_usecs && uframe >= 6)	/* FSTN territory? */
781		goto done;
782
783	if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
784		goto done;
785	if (!qh->ps.c_usecs) {
786		retval = 0;
787		*c_maskp = 0;
788		goto done;
789	}
790
791#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
792	if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
793		unsigned i;
794
795		/* TODO : this may need FSTN for SSPLIT in uframe 5. */
796		for (i = uframe+2; i < 8 && i <= uframe+4; i++)
797			if (!check_period(ehci, frame, i,
798					qh->ps.bw_uperiod, qh->ps.c_usecs))
799				goto done;
800			else
801				mask |= 1 << i;
802
803		retval = 0;
804
805		*c_maskp = mask;
806	}
807#else
808	/* Make sure this tt's buffer is also available for CSPLITs.
809	 * We pessimize a bit; probably the typical full speed case
810	 * doesn't need the second CSPLIT.
811	 *
812	 * NOTE:  both SPLIT and CSPLIT could be checked in just
813	 * one smart pass...
814	 */
815	mask = 0x03 << (uframe + qh->gap_uf);
816	*c_maskp = mask;
817
818	mask |= 1 << uframe;
819	if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
820		if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
821				qh->ps.bw_uperiod, qh->ps.c_usecs))
822			goto done;
823		if (!check_period(ehci, frame, uframe + qh->gap_uf,
824				qh->ps.bw_uperiod, qh->ps.c_usecs))
825			goto done;
826		retval = 0;
827	}
828#endif
829done:
830	return retval;
831}
832
833/* "first fit" scheduling policy used the first time through,
834 * or when the previous schedule slot can't be re-used.
835 */
836static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
837{
838	int		status = 0;
839	unsigned	uframe;
840	unsigned	c_mask;
841	struct ehci_qh_hw	*hw = qh->hw;
842	struct ehci_tt		*tt;
843
844	hw->hw_next = EHCI_LIST_END(ehci);
845
846	/* reuse the previous schedule slots, if we can */
847	if (qh->ps.phase != NO_FRAME) {
848		ehci_dbg(ehci, "reused qh %p schedule\n", qh);
849		return 0;
850	}
851
852	uframe = 0;
853	c_mask = 0;
854	tt = find_tt(qh->ps.udev);
855	if (IS_ERR(tt)) {
856		status = PTR_ERR(tt);
857		goto done;
858	}
859	compute_tt_budget(ehci->tt_budget, tt);
860
861	/* else scan the schedule to find a group of slots such that all
862	 * uframes have enough periodic bandwidth available.
863	 */
864	/* "normal" case, uframing flexible except with splits */
865	if (qh->ps.bw_period) {
866		int		i;
867		unsigned	frame;
868
869		for (i = qh->ps.bw_period; i > 0; --i) {
870			frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
871			for (uframe = 0; uframe < 8; uframe++) {
872				status = check_intr_schedule(ehci,
873						frame, uframe, qh, &c_mask, tt);
874				if (status == 0)
875					goto got_it;
876			}
877		}
878
879	/* qh->ps.bw_period == 0 means every uframe */
880	} else {
881		status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
882	}
883	if (status)
884		goto done;
885
886 got_it:
887	qh->ps.phase = (qh->ps.period ? ehci->random_frame &
888			(qh->ps.period - 1) : 0);
889	qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
890	qh->ps.phase_uf = uframe;
891	qh->ps.cs_mask = qh->ps.period ?
892			(c_mask << 8) | (1 << uframe) :
893			QH_SMASK;
894
895	/* reset S-frame and (maybe) C-frame masks */
896	hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
897	hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
898	reserve_release_intr_bandwidth(ehci, qh, 1);
899
900done:
901	return status;
902}
903
904static int intr_submit(
905	struct ehci_hcd		*ehci,
906	struct urb		*urb,
907	struct list_head	*qtd_list,
908	gfp_t			mem_flags
909) {
910	unsigned		epnum;
911	unsigned long		flags;
912	struct ehci_qh		*qh;
913	int			status;
914	struct list_head	empty;
915
916	/* get endpoint and transfer/schedule data */
917	epnum = urb->ep->desc.bEndpointAddress;
918
919	spin_lock_irqsave(&ehci->lock, flags);
920
921	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
922		status = -ESHUTDOWN;
923		goto done_not_linked;
924	}
925	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
926	if (unlikely(status))
927		goto done_not_linked;
928
929	/* get qh and force any scheduling errors */
930	INIT_LIST_HEAD(&empty);
931	qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
932	if (qh == NULL) {
933		status = -ENOMEM;
934		goto done;
935	}
936	if (qh->qh_state == QH_STATE_IDLE) {
937		status = qh_schedule(ehci, qh);
938		if (status)
939			goto done;
940	}
941
942	/* then queue the urb's tds to the qh */
943	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
944	BUG_ON(qh == NULL);
945
946	/* stuff into the periodic schedule */
947	if (qh->qh_state == QH_STATE_IDLE) {
948		qh_refresh(ehci, qh);
949		qh_link_periodic(ehci, qh);
950	} else {
951		/* cancel unlink wait for the qh */
952		cancel_unlink_wait_intr(ehci, qh);
953	}
954
955	/* ... update usbfs periodic stats */
956	ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
957
958done:
959	if (unlikely(status))
960		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
961done_not_linked:
962	spin_unlock_irqrestore(&ehci->lock, flags);
963	if (status)
964		qtd_list_free(ehci, urb, qtd_list);
965
966	return status;
967}
968
969static void scan_intr(struct ehci_hcd *ehci)
970{
971	struct ehci_qh		*qh;
972
973	list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
974			intr_node) {
975
976		/* clean any finished work for this qh */
977		if (!list_empty(&qh->qtd_list)) {
978			int temp;
979
980			/*
981			 * Unlinks could happen here; completion reporting
982			 * drops the lock.  That's why ehci->qh_scan_next
983			 * always holds the next qh to scan; if the next qh
984			 * gets unlinked then ehci->qh_scan_next is adjusted
985			 * in qh_unlink_periodic().
986			 */
987			temp = qh_completions(ehci, qh);
988			if (unlikely(temp))
989				start_unlink_intr(ehci, qh);
990			else if (unlikely(list_empty(&qh->qtd_list) &&
991					qh->qh_state == QH_STATE_LINKED))
992				start_unlink_intr_wait(ehci, qh);
993		}
994	}
995}
996
997/*-------------------------------------------------------------------------*/
998
999/* ehci_iso_stream ops work with both ITD and SITD */
1000
1001static struct ehci_iso_stream *
1002iso_stream_alloc(gfp_t mem_flags)
1003{
1004	struct ehci_iso_stream *stream;
1005
1006	stream = kzalloc(sizeof(*stream), mem_flags);
1007	if (likely(stream != NULL)) {
1008		INIT_LIST_HEAD(&stream->td_list);
1009		INIT_LIST_HEAD(&stream->free_list);
1010		stream->next_uframe = NO_FRAME;
1011		stream->ps.phase = NO_FRAME;
1012	}
1013	return stream;
1014}
1015
1016static void
1017iso_stream_init(
1018	struct ehci_hcd		*ehci,
1019	struct ehci_iso_stream	*stream,
1020	struct urb		*urb
1021)
1022{
1023	static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1024
1025	struct usb_device	*dev = urb->dev;
1026	u32			buf1;
1027	unsigned		epnum, maxp;
1028	int			is_input;
1029	unsigned		tmp;
1030
1031	/*
1032	 * this might be a "high bandwidth" highspeed endpoint,
1033	 * as encoded in the ep descriptor's wMaxPacket field
1034	 */
1035	epnum = usb_pipeendpoint(urb->pipe);
1036	is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1037	maxp = usb_endpoint_maxp(&urb->ep->desc);
1038	buf1 = is_input ? 1 << 11 : 0;
1039
1040	/* knows about ITD vs SITD */
1041	if (dev->speed == USB_SPEED_HIGH) {
1042		unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
1043
1044		stream->highspeed = 1;
1045
1046		buf1 |= maxp;
1047		maxp *= multi;
1048
1049		stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1050		stream->buf1 = cpu_to_hc32(ehci, buf1);
1051		stream->buf2 = cpu_to_hc32(ehci, multi);
1052
1053		/* usbfs wants to report the average usecs per frame tied up
1054		 * when transfers on this endpoint are scheduled ...
1055		 */
1056		stream->ps.usecs = HS_USECS_ISO(maxp);
1057
1058		/* period for bandwidth allocation */
1059		tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1060				1 << (urb->ep->desc.bInterval - 1));
1061
1062		/* Allow urb->interval to override */
1063		stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1064
1065		stream->uperiod = urb->interval;
1066		stream->ps.period = urb->interval >> 3;
1067		stream->bandwidth = stream->ps.usecs * 8 /
1068				stream->ps.bw_uperiod;
1069
1070	} else {
1071		u32		addr;
1072		int		think_time;
1073		int		hs_transfers;
1074
1075		addr = dev->ttport << 24;
1076		if (!ehci_is_TDI(ehci)
1077				|| (dev->tt->hub !=
1078					ehci_to_hcd(ehci)->self.root_hub))
1079			addr |= dev->tt->hub->devnum << 16;
1080		addr |= epnum << 8;
1081		addr |= dev->devnum;
1082		stream->ps.usecs = HS_USECS_ISO(maxp);
1083		think_time = dev->tt->think_time;
1084		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1085				dev->speed, is_input, 1, maxp));
1086		hs_transfers = max(1u, (maxp + 187) / 188);
1087		if (is_input) {
1088			u32	tmp;
1089
1090			addr |= 1 << 31;
1091			stream->ps.c_usecs = stream->ps.usecs;
1092			stream->ps.usecs = HS_USECS_ISO(1);
1093			stream->ps.cs_mask = 1;
1094
1095			/* c-mask as specified in USB 2.0 11.18.4 3.c */
1096			tmp = (1 << (hs_transfers + 2)) - 1;
1097			stream->ps.cs_mask |= tmp << (8 + 2);
1098		} else
1099			stream->ps.cs_mask = smask_out[hs_transfers - 1];
1100
1101		/* period for bandwidth allocation */
1102		tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1103				1 << (urb->ep->desc.bInterval - 1));
1104
1105		/* Allow urb->interval to override */
1106		stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1107		stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1108
1109		stream->ps.period = urb->interval;
1110		stream->uperiod = urb->interval << 3;
1111		stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1112				stream->ps.bw_period;
1113
1114		/* stream->splits gets created from cs_mask later */
1115		stream->address = cpu_to_hc32(ehci, addr);
1116	}
1117
1118	stream->ps.udev = dev;
1119	stream->ps.ep = urb->ep;
1120
1121	stream->bEndpointAddress = is_input | epnum;
1122	stream->maxp = maxp;
1123}
1124
1125static struct ehci_iso_stream *
1126iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1127{
1128	unsigned		epnum;
1129	struct ehci_iso_stream	*stream;
1130	struct usb_host_endpoint *ep;
1131	unsigned long		flags;
1132
1133	epnum = usb_pipeendpoint (urb->pipe);
1134	if (usb_pipein(urb->pipe))
1135		ep = urb->dev->ep_in[epnum];
1136	else
1137		ep = urb->dev->ep_out[epnum];
1138
1139	spin_lock_irqsave(&ehci->lock, flags);
1140	stream = ep->hcpriv;
1141
1142	if (unlikely(stream == NULL)) {
1143		stream = iso_stream_alloc(GFP_ATOMIC);
1144		if (likely(stream != NULL)) {
1145			ep->hcpriv = stream;
1146			iso_stream_init(ehci, stream, urb);
1147		}
1148
1149	/* if dev->ep [epnum] is a QH, hw is set */
1150	} else if (unlikely(stream->hw != NULL)) {
1151		ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1152			urb->dev->devpath, epnum,
1153			usb_pipein(urb->pipe) ? "in" : "out");
1154		stream = NULL;
1155	}
1156
1157	spin_unlock_irqrestore(&ehci->lock, flags);
1158	return stream;
1159}
1160
1161/*-------------------------------------------------------------------------*/
1162
1163/* ehci_iso_sched ops can be ITD-only or SITD-only */
1164
1165static struct ehci_iso_sched *
1166iso_sched_alloc(unsigned packets, gfp_t mem_flags)
1167{
1168	struct ehci_iso_sched	*iso_sched;
1169
1170	iso_sched = kzalloc(struct_size(iso_sched, packet, packets), mem_flags);
1171	if (likely(iso_sched != NULL))
1172		INIT_LIST_HEAD(&iso_sched->td_list);
1173
1174	return iso_sched;
1175}
1176
1177static inline void
1178itd_sched_init(
1179	struct ehci_hcd		*ehci,
1180	struct ehci_iso_sched	*iso_sched,
1181	struct ehci_iso_stream	*stream,
1182	struct urb		*urb
1183)
1184{
1185	unsigned	i;
1186	dma_addr_t	dma = urb->transfer_dma;
1187
1188	/* how many uframes are needed for these transfers */
1189	iso_sched->span = urb->number_of_packets * stream->uperiod;
1190
1191	/* figure out per-uframe itd fields that we'll need later
1192	 * when we fit new itds into the schedule.
1193	 */
1194	for (i = 0; i < urb->number_of_packets; i++) {
1195		struct ehci_iso_packet	*uframe = &iso_sched->packet[i];
1196		unsigned		length;
1197		dma_addr_t		buf;
1198		u32			trans;
1199
1200		length = urb->iso_frame_desc[i].length;
1201		buf = dma + urb->iso_frame_desc[i].offset;
1202
1203		trans = EHCI_ISOC_ACTIVE;
1204		trans |= buf & 0x0fff;
1205		if (unlikely(((i + 1) == urb->number_of_packets))
1206				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
1207			trans |= EHCI_ITD_IOC;
1208		trans |= length << 16;
1209		uframe->transaction = cpu_to_hc32(ehci, trans);
1210
1211		/* might need to cross a buffer page within a uframe */
1212		uframe->bufp = (buf & ~(u64)0x0fff);
1213		buf += length;
1214		if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
1215			uframe->cross = 1;
1216	}
1217}
1218
1219static void
1220iso_sched_free(
1221	struct ehci_iso_stream	*stream,
1222	struct ehci_iso_sched	*iso_sched
1223)
1224{
1225	if (!iso_sched)
1226		return;
1227	/* caller must hold ehci->lock! */
1228	list_splice(&iso_sched->td_list, &stream->free_list);
1229	kfree(iso_sched);
1230}
1231
1232static int
1233itd_urb_transaction(
1234	struct ehci_iso_stream	*stream,
1235	struct ehci_hcd		*ehci,
1236	struct urb		*urb,
1237	gfp_t			mem_flags
1238)
1239{
1240	struct ehci_itd		*itd;
1241	dma_addr_t		itd_dma;
1242	int			i;
1243	unsigned		num_itds;
1244	struct ehci_iso_sched	*sched;
1245	unsigned long		flags;
1246
1247	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
1248	if (unlikely(sched == NULL))
1249		return -ENOMEM;
1250
1251	itd_sched_init(ehci, sched, stream, urb);
1252
1253	if (urb->interval < 8)
1254		num_itds = 1 + (sched->span + 7) / 8;
1255	else
1256		num_itds = urb->number_of_packets;
1257
1258	/* allocate/init ITDs */
1259	spin_lock_irqsave(&ehci->lock, flags);
1260	for (i = 0; i < num_itds; i++) {
1261
1262		/*
1263		 * Use iTDs from the free list, but not iTDs that may
1264		 * still be in use by the hardware.
1265		 */
1266		if (likely(!list_empty(&stream->free_list))) {
1267			itd = list_first_entry(&stream->free_list,
1268					struct ehci_itd, itd_list);
1269			if (itd->frame == ehci->now_frame)
1270				goto alloc_itd;
1271			list_del(&itd->itd_list);
1272			itd_dma = itd->itd_dma;
1273		} else {
1274 alloc_itd:
1275			spin_unlock_irqrestore(&ehci->lock, flags);
1276			itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1277					&itd_dma);
1278			spin_lock_irqsave(&ehci->lock, flags);
1279			if (!itd) {
1280				iso_sched_free(stream, sched);
1281				spin_unlock_irqrestore(&ehci->lock, flags);
1282				return -ENOMEM;
1283			}
1284		}
1285
1286		memset(itd, 0, sizeof(*itd));
1287		itd->itd_dma = itd_dma;
1288		itd->frame = NO_FRAME;
1289		list_add(&itd->itd_list, &sched->td_list);
1290	}
1291	spin_unlock_irqrestore(&ehci->lock, flags);
1292
1293	/* temporarily store schedule info in hcpriv */
1294	urb->hcpriv = sched;
1295	urb->error_count = 0;
1296	return 0;
1297}
1298
1299/*-------------------------------------------------------------------------*/
1300
1301static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1302		struct ehci_iso_stream *stream, int sign)
1303{
1304	unsigned		uframe;
1305	unsigned		i, j;
1306	unsigned		s_mask, c_mask, m;
1307	int			usecs = stream->ps.usecs;
1308	int			c_usecs = stream->ps.c_usecs;
1309	int			tt_usecs = stream->ps.tt_usecs;
1310	struct ehci_tt		*tt;
1311
1312	if (stream->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
1313		return;
1314	uframe = stream->ps.bw_phase << 3;
1315
1316	bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1317
1318	if (sign < 0) {		/* Release bandwidth */
1319		usecs = -usecs;
1320		c_usecs = -c_usecs;
1321		tt_usecs = -tt_usecs;
1322	}
1323
1324	if (!stream->splits) {		/* High speed */
1325		for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1326				i += stream->ps.bw_uperiod)
1327			ehci->bandwidth[i] += usecs;
1328
1329	} else {			/* Full speed */
1330		s_mask = stream->ps.cs_mask;
1331		c_mask = s_mask >> 8;
1332
1333		/* NOTE: adjustment needed for frame overflow */
1334		for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1335				i += stream->ps.bw_uperiod) {
1336			for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1337					(++j, m <<= 1)) {
1338				if (s_mask & m)
1339					ehci->bandwidth[i+j] += usecs;
1340				else if (c_mask & m)
1341					ehci->bandwidth[i+j] += c_usecs;
1342			}
1343		}
1344
1345		/*
1346		 * find_tt() will not return any error here as we have
1347		 * already called find_tt() before calling this function
1348		 * and checked for any error return. The previous call
1349		 * would have created the data structure.
1350		 */
1351		tt = find_tt(stream->ps.udev);
1352		if (sign > 0)
1353			list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1354		else
1355			list_del(&stream->ps.ps_list);
1356
1357		for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1358				i += stream->ps.bw_period)
1359			tt->bandwidth[i] += tt_usecs;
1360	}
1361}
1362
1363static inline int
1364itd_slot_ok(
1365	struct ehci_hcd		*ehci,
1366	struct ehci_iso_stream	*stream,
1367	unsigned		uframe
1368)
1369{
1370	unsigned		usecs;
1371
1372	/* convert "usecs we need" to "max already claimed" */
1373	usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1374
1375	for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1376			uframe += stream->ps.bw_uperiod) {
1377		if (ehci->bandwidth[uframe] > usecs)
1378			return 0;
1379	}
1380	return 1;
1381}
1382
1383static inline int
1384sitd_slot_ok(
1385	struct ehci_hcd		*ehci,
1386	struct ehci_iso_stream	*stream,
1387	unsigned		uframe,
1388	struct ehci_iso_sched	*sched,
1389	struct ehci_tt		*tt
1390)
1391{
1392	unsigned		mask, tmp;
1393	unsigned		frame, uf;
1394
1395	mask = stream->ps.cs_mask << (uframe & 7);
1396
1397	/* for OUT, don't wrap SSPLIT into H-microframe 7 */
1398	if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1399		return 0;
1400
1401	/* for IN, don't wrap CSPLIT into the next frame */
1402	if (mask & ~0xffff)
1403		return 0;
1404
1405	/* check bandwidth */
1406	uframe &= stream->ps.bw_uperiod - 1;
1407	frame = uframe >> 3;
1408
1409#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1410	/* The tt's fullspeed bus bandwidth must be available.
1411	 * tt_available scheduling guarantees 10+% for control/bulk.
1412	 */
1413	uf = uframe & 7;
1414	if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1415		return 0;
1416#else
1417	/* tt must be idle for start(s), any gap, and csplit.
1418	 * assume scheduling slop leaves 10+% for control/bulk.
1419	 */
1420	if (!tt_no_collision(ehci, stream->ps.bw_period,
1421			stream->ps.udev, frame, mask))
1422		return 0;
1423#endif
1424
1425	do {
1426		unsigned	max_used;
1427		unsigned	i;
1428
1429		/* check starts (OUT uses more than one) */
1430		uf = uframe;
1431		max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1432		for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1433			if (ehci->bandwidth[uf] > max_used)
1434				return 0;
1435		}
1436
1437		/* for IN, check CSPLIT */
1438		if (stream->ps.c_usecs) {
1439			max_used = ehci->uframe_periodic_max -
1440					stream->ps.c_usecs;
1441			uf = uframe & ~7;
1442			tmp = 1 << (2+8);
1443			for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1444				if ((stream->ps.cs_mask & tmp) == 0)
1445					continue;
1446				if (ehci->bandwidth[uf+i] > max_used)
1447					return 0;
1448			}
1449		}
1450
1451		uframe += stream->ps.bw_uperiod;
1452	} while (uframe < EHCI_BANDWIDTH_SIZE);
1453
1454	stream->ps.cs_mask <<= uframe & 7;
1455	stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1456	return 1;
1457}
1458
1459/*
1460 * This scheduler plans almost as far into the future as it has actual
1461 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1462 * "as small as possible" to be cache-friendlier.)  That limits the size
1463 * transfers you can stream reliably; avoid more than 64 msec per urb.
1464 * Also avoid queue depths of less than ehci's worst irq latency (affected
1465 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1466 * and other factors); or more than about 230 msec total (for portability,
1467 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1468 */
1469
1470static int
1471iso_stream_schedule(
1472	struct ehci_hcd		*ehci,
1473	struct urb		*urb,
1474	struct ehci_iso_stream	*stream
1475)
1476{
1477	u32			now, base, next, start, period, span, now2;
1478	u32			wrap = 0, skip = 0;
1479	int			status = 0;
1480	unsigned		mod = ehci->periodic_size << 3;
1481	struct ehci_iso_sched	*sched = urb->hcpriv;
1482	bool			empty = list_empty(&stream->td_list);
1483	bool			new_stream = false;
1484
1485	period = stream->uperiod;
1486	span = sched->span;
1487	if (!stream->highspeed)
1488		span <<= 3;
1489
1490	/* Start a new isochronous stream? */
1491	if (unlikely(empty && !hcd_periodic_completion_in_progress(
1492			ehci_to_hcd(ehci), urb->ep))) {
1493
1494		/* Schedule the endpoint */
1495		if (stream->ps.phase == NO_FRAME) {
1496			int		done = 0;
1497			struct ehci_tt	*tt = find_tt(stream->ps.udev);
1498
1499			if (IS_ERR(tt)) {
1500				status = PTR_ERR(tt);
1501				goto fail;
1502			}
1503			compute_tt_budget(ehci->tt_budget, tt);
1504
1505			start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1506
1507			/* find a uframe slot with enough bandwidth.
1508			 * Early uframes are more precious because full-speed
1509			 * iso IN transfers can't use late uframes,
1510			 * and therefore they should be allocated last.
1511			 */
1512			next = start;
1513			start += period;
1514			do {
1515				start--;
1516				/* check schedule: enough space? */
1517				if (stream->highspeed) {
1518					if (itd_slot_ok(ehci, stream, start))
1519						done = 1;
1520				} else {
1521					if ((start % 8) >= 6)
1522						continue;
1523					if (sitd_slot_ok(ehci, stream, start,
1524							sched, tt))
1525						done = 1;
1526				}
1527			} while (start > next && !done);
1528
1529			/* no room in the schedule */
1530			if (!done) {
1531				ehci_dbg(ehci, "iso sched full %p", urb);
1532				status = -ENOSPC;
1533				goto fail;
1534			}
1535			stream->ps.phase = (start >> 3) &
1536					(stream->ps.period - 1);
1537			stream->ps.bw_phase = stream->ps.phase &
1538					(stream->ps.bw_period - 1);
1539			stream->ps.phase_uf = start & 7;
1540			reserve_release_iso_bandwidth(ehci, stream, 1);
1541		}
1542
1543		/* New stream is already scheduled; use the upcoming slot */
1544		else {
1545			start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1546		}
1547
1548		stream->next_uframe = start;
1549		new_stream = true;
1550	}
1551
1552	now = ehci_read_frame_index(ehci) & (mod - 1);
1553
1554	/* Take the isochronous scheduling threshold into account */
1555	if (ehci->i_thresh)
1556		next = now + ehci->i_thresh;	/* uframe cache */
1557	else
1558		next = (now + 2 + 7) & ~0x07;	/* full frame cache */
1559
1560	/* If needed, initialize last_iso_frame so that this URB will be seen */
1561	if (ehci->isoc_count == 0)
1562		ehci->last_iso_frame = now >> 3;
1563
1564	/*
1565	 * Use ehci->last_iso_frame as the base.  There can't be any
1566	 * TDs scheduled for earlier than that.
1567	 */
1568	base = ehci->last_iso_frame << 3;
1569	next = (next - base) & (mod - 1);
1570	start = (stream->next_uframe - base) & (mod - 1);
1571
1572	if (unlikely(new_stream))
1573		goto do_ASAP;
1574
1575	/*
1576	 * Typical case: reuse current schedule, stream may still be active.
1577	 * Hopefully there are no gaps from the host falling behind
1578	 * (irq delays etc).  If there are, the behavior depends on
1579	 * whether URB_ISO_ASAP is set.
1580	 */
1581	now2 = (now - base) & (mod - 1);
1582
1583	/* Is the schedule about to wrap around? */
1584	if (unlikely(!empty && start < period)) {
1585		ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1586				urb, stream->next_uframe, base, period, mod);
1587		status = -EFBIG;
1588		goto fail;
1589	}
1590
1591	/* Is the next packet scheduled after the base time? */
1592	if (likely(!empty || start <= now2 + period)) {
1593
1594		/* URB_ISO_ASAP: make sure that start >= next */
1595		if (unlikely(start < next &&
1596				(urb->transfer_flags & URB_ISO_ASAP)))
1597			goto do_ASAP;
1598
1599		/* Otherwise use start, if it's not in the past */
1600		if (likely(start >= now2))
1601			goto use_start;
1602
1603	/* Otherwise we got an underrun while the queue was empty */
1604	} else {
1605		if (urb->transfer_flags & URB_ISO_ASAP)
1606			goto do_ASAP;
1607		wrap = mod;
1608		now2 += mod;
1609	}
1610
1611	/* How many uframes and packets do we need to skip? */
1612	skip = (now2 - start + period - 1) & -period;
1613	if (skip >= span) {		/* Entirely in the past? */
1614		ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1615				urb, start + base, span - period, now2 + base,
1616				base);
1617
1618		/* Try to keep the last TD intact for scanning later */
1619		skip = span - period;
1620
1621		/* Will it come before the current scan position? */
1622		if (empty) {
1623			skip = span;	/* Skip the entire URB */
1624			status = 1;	/* and give it back immediately */
1625			iso_sched_free(stream, sched);
1626			sched = NULL;
1627		}
1628	}
1629	urb->error_count = skip / period;
1630	if (sched)
1631		sched->first_packet = urb->error_count;
1632	goto use_start;
1633
1634 do_ASAP:
1635	/* Use the first slot after "next" */
1636	start = next + ((start - next) & (period - 1));
1637
1638 use_start:
1639	/* Tried to schedule too far into the future? */
1640	if (unlikely(start + span - period >= mod + wrap)) {
1641		ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1642				urb, start, span - period, mod + wrap);
1643		status = -EFBIG;
1644		goto fail;
1645	}
1646
1647	start += base;
1648	stream->next_uframe = (start + skip) & (mod - 1);
1649
1650	/* report high speed start in uframes; full speed, in frames */
1651	urb->start_frame = start & (mod - 1);
1652	if (!stream->highspeed)
1653		urb->start_frame >>= 3;
1654	return status;
1655
1656 fail:
1657	iso_sched_free(stream, sched);
1658	urb->hcpriv = NULL;
1659	return status;
1660}
1661
1662/*-------------------------------------------------------------------------*/
1663
1664static inline void
1665itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1666		struct ehci_itd *itd)
1667{
1668	int i;
1669
1670	/* it's been recently zeroed */
1671	itd->hw_next = EHCI_LIST_END(ehci);
1672	itd->hw_bufp[0] = stream->buf0;
1673	itd->hw_bufp[1] = stream->buf1;
1674	itd->hw_bufp[2] = stream->buf2;
1675
1676	for (i = 0; i < 8; i++)
1677		itd->index[i] = -1;
1678
1679	/* All other fields are filled when scheduling */
1680}
1681
1682static inline void
1683itd_patch(
1684	struct ehci_hcd		*ehci,
1685	struct ehci_itd		*itd,
1686	struct ehci_iso_sched	*iso_sched,
1687	unsigned		index,
1688	u16			uframe
1689)
1690{
1691	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
1692	unsigned		pg = itd->pg;
1693
1694	/* BUG_ON(pg == 6 && uf->cross); */
1695
1696	uframe &= 0x07;
1697	itd->index[uframe] = index;
1698
1699	itd->hw_transaction[uframe] = uf->transaction;
1700	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1701	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1702	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1703
1704	/* iso_frame_desc[].offset must be strictly increasing */
1705	if (unlikely(uf->cross)) {
1706		u64	bufp = uf->bufp + 4096;
1707
1708		itd->pg = ++pg;
1709		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1710		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1711	}
1712}
1713
1714static inline void
1715itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1716{
1717	union ehci_shadow	*prev = &ehci->pshadow[frame];
1718	__hc32			*hw_p = &ehci->periodic[frame];
1719	union ehci_shadow	here = *prev;
1720	__hc32			type = 0;
1721
1722	/* skip any iso nodes which might belong to previous microframes */
1723	while (here.ptr) {
1724		type = Q_NEXT_TYPE(ehci, *hw_p);
1725		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1726			break;
1727		prev = periodic_next_shadow(ehci, prev, type);
1728		hw_p = shadow_next_periodic(ehci, &here, type);
1729		here = *prev;
1730	}
1731
1732	itd->itd_next = here;
1733	itd->hw_next = *hw_p;
1734	prev->itd = itd;
1735	itd->frame = frame;
1736	wmb();
1737	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1738}
1739
1740/* fit urb's itds into the selected schedule slot; activate as needed */
1741static void itd_link_urb(
1742	struct ehci_hcd		*ehci,
1743	struct urb		*urb,
1744	unsigned		mod,
1745	struct ehci_iso_stream	*stream
1746)
1747{
1748	int			packet;
1749	unsigned		next_uframe, uframe, frame;
1750	struct ehci_iso_sched	*iso_sched = urb->hcpriv;
1751	struct ehci_itd		*itd;
1752
1753	next_uframe = stream->next_uframe & (mod - 1);
1754
1755	if (unlikely(list_empty(&stream->td_list)))
1756		ehci_to_hcd(ehci)->self.bandwidth_allocated
1757				+= stream->bandwidth;
1758
1759	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1760		if (ehci->amd_pll_fix == 1)
1761			usb_amd_quirk_pll_disable();
1762	}
1763
1764	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1765
1766	/* fill iTDs uframe by uframe */
1767	for (packet = iso_sched->first_packet, itd = NULL;
1768			packet < urb->number_of_packets;) {
1769		if (itd == NULL) {
1770			/* ASSERT:  we have all necessary itds */
1771			/* BUG_ON(list_empty(&iso_sched->td_list)); */
1772
1773			/* ASSERT:  no itds for this endpoint in this uframe */
1774
1775			itd = list_entry(iso_sched->td_list.next,
1776					struct ehci_itd, itd_list);
1777			list_move_tail(&itd->itd_list, &stream->td_list);
1778			itd->stream = stream;
1779			itd->urb = urb;
1780			itd_init(ehci, stream, itd);
1781		}
1782
1783		uframe = next_uframe & 0x07;
1784		frame = next_uframe >> 3;
1785
1786		itd_patch(ehci, itd, iso_sched, packet, uframe);
1787
1788		next_uframe += stream->uperiod;
1789		next_uframe &= mod - 1;
1790		packet++;
1791
1792		/* link completed itds into the schedule */
1793		if (((next_uframe >> 3) != frame)
1794				|| packet == urb->number_of_packets) {
1795			itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1796			itd = NULL;
1797		}
1798	}
1799	stream->next_uframe = next_uframe;
1800
1801	/* don't need that schedule data any more */
1802	iso_sched_free(stream, iso_sched);
1803	urb->hcpriv = stream;
1804
1805	++ehci->isoc_count;
1806	enable_periodic(ehci);
1807}
1808
1809#define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1810
1811/* Process and recycle a completed ITD.  Return true iff its urb completed,
1812 * and hence its completion callback probably added things to the hardware
1813 * schedule.
1814 *
1815 * Note that we carefully avoid recycling this descriptor until after any
1816 * completion callback runs, so that it won't be reused quickly.  That is,
1817 * assuming (a) no more than two urbs per frame on this endpoint, and also
1818 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1819 * corrupts things if you reuse completed descriptors very quickly...
1820 */
1821static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1822{
1823	struct urb				*urb = itd->urb;
1824	struct usb_iso_packet_descriptor	*desc;
1825	u32					t;
1826	unsigned				uframe;
1827	int					urb_index = -1;
1828	struct ehci_iso_stream			*stream = itd->stream;
1829	bool					retval = false;
1830
1831	/* for each uframe with a packet */
1832	for (uframe = 0; uframe < 8; uframe++) {
1833		if (likely(itd->index[uframe] == -1))
1834			continue;
1835		urb_index = itd->index[uframe];
1836		desc = &urb->iso_frame_desc[urb_index];
1837
1838		t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1839		itd->hw_transaction[uframe] = 0;
1840
1841		/* report transfer status */
1842		if (unlikely(t & ISO_ERRS)) {
1843			urb->error_count++;
1844			if (t & EHCI_ISOC_BUF_ERR)
1845				desc->status = usb_pipein(urb->pipe)
1846					? -ENOSR  /* hc couldn't read */
1847					: -ECOMM; /* hc couldn't write */
1848			else if (t & EHCI_ISOC_BABBLE)
1849				desc->status = -EOVERFLOW;
1850			else /* (t & EHCI_ISOC_XACTERR) */
1851				desc->status = -EPROTO;
1852
1853			/* HC need not update length with this error */
1854			if (!(t & EHCI_ISOC_BABBLE)) {
1855				desc->actual_length = EHCI_ITD_LENGTH(t);
1856				urb->actual_length += desc->actual_length;
1857			}
1858		} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
1859			desc->status = 0;
1860			desc->actual_length = EHCI_ITD_LENGTH(t);
1861			urb->actual_length += desc->actual_length;
1862		} else {
1863			/* URB was too late */
1864			urb->error_count++;
1865		}
1866	}
1867
1868	/* handle completion now? */
1869	if (likely((urb_index + 1) != urb->number_of_packets))
1870		goto done;
1871
1872	/*
1873	 * ASSERT: it's really the last itd for this urb
1874	 * list_for_each_entry (itd, &stream->td_list, itd_list)
1875	 *	 BUG_ON(itd->urb == urb);
1876	 */
1877
1878	/* give urb back to the driver; completion often (re)submits */
1879	ehci_urb_done(ehci, urb, 0);
1880	retval = true;
1881	urb = NULL;
1882
1883	--ehci->isoc_count;
1884	disable_periodic(ehci);
1885
1886	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1887	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1888		if (ehci->amd_pll_fix == 1)
1889			usb_amd_quirk_pll_enable();
1890	}
1891
1892	if (unlikely(list_is_singular(&stream->td_list)))
1893		ehci_to_hcd(ehci)->self.bandwidth_allocated
1894				-= stream->bandwidth;
1895
1896done:
1897	itd->urb = NULL;
1898
1899	/* Add to the end of the free list for later reuse */
1900	list_move_tail(&itd->itd_list, &stream->free_list);
1901
1902	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1903	if (list_empty(&stream->td_list)) {
1904		list_splice_tail_init(&stream->free_list,
1905				&ehci->cached_itd_list);
1906		start_free_itds(ehci);
1907	}
1908
1909	return retval;
1910}
1911
1912/*-------------------------------------------------------------------------*/
1913
1914static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1915	gfp_t mem_flags)
1916{
1917	int			status = -EINVAL;
1918	unsigned long		flags;
1919	struct ehci_iso_stream	*stream;
1920
1921	/* Get iso_stream head */
1922	stream = iso_stream_find(ehci, urb);
1923	if (unlikely(stream == NULL)) {
1924		ehci_dbg(ehci, "can't get iso stream\n");
1925		return -ENOMEM;
1926	}
1927	if (unlikely(urb->interval != stream->uperiod)) {
1928		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1929			stream->uperiod, urb->interval);
1930		goto done;
1931	}
1932
1933#ifdef EHCI_URB_TRACE
1934	ehci_dbg(ehci,
1935		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1936		__func__, urb->dev->devpath, urb,
1937		usb_pipeendpoint(urb->pipe),
1938		usb_pipein(urb->pipe) ? "in" : "out",
1939		urb->transfer_buffer_length,
1940		urb->number_of_packets, urb->interval,
1941		stream);
1942#endif
1943
1944	/* allocate ITDs w/o locking anything */
1945	status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1946	if (unlikely(status < 0)) {
1947		ehci_dbg(ehci, "can't init itds\n");
1948		goto done;
1949	}
1950
1951	/* schedule ... need to lock */
1952	spin_lock_irqsave(&ehci->lock, flags);
1953	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1954		status = -ESHUTDOWN;
1955		goto done_not_linked;
1956	}
1957	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1958	if (unlikely(status))
1959		goto done_not_linked;
1960	status = iso_stream_schedule(ehci, urb, stream);
1961	if (likely(status == 0)) {
1962		itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1963	} else if (status > 0) {
1964		status = 0;
1965		ehci_urb_done(ehci, urb, 0);
1966	} else {
1967		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1968	}
1969 done_not_linked:
1970	spin_unlock_irqrestore(&ehci->lock, flags);
1971 done:
1972	return status;
1973}
1974
1975/*-------------------------------------------------------------------------*/
1976
1977/*
1978 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1979 * TTs in USB 2.0 hubs.  These need microframe scheduling.
1980 */
1981
1982static inline void
1983sitd_sched_init(
1984	struct ehci_hcd		*ehci,
1985	struct ehci_iso_sched	*iso_sched,
1986	struct ehci_iso_stream	*stream,
1987	struct urb		*urb
1988)
1989{
1990	unsigned	i;
1991	dma_addr_t	dma = urb->transfer_dma;
1992
1993	/* how many frames are needed for these transfers */
1994	iso_sched->span = urb->number_of_packets * stream->ps.period;
1995
1996	/* figure out per-frame sitd fields that we'll need later
1997	 * when we fit new sitds into the schedule.
1998	 */
1999	for (i = 0; i < urb->number_of_packets; i++) {
2000		struct ehci_iso_packet	*packet = &iso_sched->packet[i];
2001		unsigned		length;
2002		dma_addr_t		buf;
2003		u32			trans;
2004
2005		length = urb->iso_frame_desc[i].length & 0x03ff;
2006		buf = dma + urb->iso_frame_desc[i].offset;
2007
2008		trans = SITD_STS_ACTIVE;
2009		if (((i + 1) == urb->number_of_packets)
2010				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
2011			trans |= SITD_IOC;
2012		trans |= length << 16;
2013		packet->transaction = cpu_to_hc32(ehci, trans);
2014
2015		/* might need to cross a buffer page within a td */
2016		packet->bufp = buf;
2017		packet->buf1 = (buf + length) & ~0x0fff;
2018		if (packet->buf1 != (buf & ~(u64)0x0fff))
2019			packet->cross = 1;
2020
2021		/* OUT uses multiple start-splits */
2022		if (stream->bEndpointAddress & USB_DIR_IN)
2023			continue;
2024		length = (length + 187) / 188;
2025		if (length > 1) /* BEGIN vs ALL */
2026			length |= 1 << 3;
2027		packet->buf1 |= length;
2028	}
2029}
2030
2031static int
2032sitd_urb_transaction(
2033	struct ehci_iso_stream	*stream,
2034	struct ehci_hcd		*ehci,
2035	struct urb		*urb,
2036	gfp_t			mem_flags
2037)
2038{
2039	struct ehci_sitd	*sitd;
2040	dma_addr_t		sitd_dma;
2041	int			i;
2042	struct ehci_iso_sched	*iso_sched;
2043	unsigned long		flags;
2044
2045	iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
2046	if (iso_sched == NULL)
2047		return -ENOMEM;
2048
2049	sitd_sched_init(ehci, iso_sched, stream, urb);
2050
2051	/* allocate/init sITDs */
2052	spin_lock_irqsave(&ehci->lock, flags);
2053	for (i = 0; i < urb->number_of_packets; i++) {
2054
2055		/* NOTE:  for now, we don't try to handle wraparound cases
2056		 * for IN (using sitd->hw_backpointer, like a FSTN), which
2057		 * means we never need two sitds for full speed packets.
2058		 */
2059
2060		/*
2061		 * Use siTDs from the free list, but not siTDs that may
2062		 * still be in use by the hardware.
2063		 */
2064		if (likely(!list_empty(&stream->free_list))) {
2065			sitd = list_first_entry(&stream->free_list,
2066					 struct ehci_sitd, sitd_list);
2067			if (sitd->frame == ehci->now_frame)
2068				goto alloc_sitd;
2069			list_del(&sitd->sitd_list);
2070			sitd_dma = sitd->sitd_dma;
2071		} else {
2072 alloc_sitd:
2073			spin_unlock_irqrestore(&ehci->lock, flags);
2074			sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2075					&sitd_dma);
2076			spin_lock_irqsave(&ehci->lock, flags);
2077			if (!sitd) {
2078				iso_sched_free(stream, iso_sched);
2079				spin_unlock_irqrestore(&ehci->lock, flags);
2080				return -ENOMEM;
2081			}
2082		}
2083
2084		memset(sitd, 0, sizeof(*sitd));
2085		sitd->sitd_dma = sitd_dma;
2086		sitd->frame = NO_FRAME;
2087		list_add(&sitd->sitd_list, &iso_sched->td_list);
2088	}
2089
2090	/* temporarily store schedule info in hcpriv */
2091	urb->hcpriv = iso_sched;
2092	urb->error_count = 0;
2093
2094	spin_unlock_irqrestore(&ehci->lock, flags);
2095	return 0;
2096}
2097
2098/*-------------------------------------------------------------------------*/
2099
2100static inline void
2101sitd_patch(
2102	struct ehci_hcd		*ehci,
2103	struct ehci_iso_stream	*stream,
2104	struct ehci_sitd	*sitd,
2105	struct ehci_iso_sched	*iso_sched,
2106	unsigned		index
2107)
2108{
2109	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
2110	u64			bufp;
2111
2112	sitd->hw_next = EHCI_LIST_END(ehci);
2113	sitd->hw_fullspeed_ep = stream->address;
2114	sitd->hw_uframe = stream->splits;
2115	sitd->hw_results = uf->transaction;
2116	sitd->hw_backpointer = EHCI_LIST_END(ehci);
2117
2118	bufp = uf->bufp;
2119	sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2120	sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2121
2122	sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2123	if (uf->cross)
2124		bufp += 4096;
2125	sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2126	sitd->index = index;
2127}
2128
2129static inline void
2130sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2131{
2132	/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2133	sitd->sitd_next = ehci->pshadow[frame];
2134	sitd->hw_next = ehci->periodic[frame];
2135	ehci->pshadow[frame].sitd = sitd;
2136	sitd->frame = frame;
2137	wmb();
2138	ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2139}
2140
2141/* fit urb's sitds into the selected schedule slot; activate as needed */
2142static void sitd_link_urb(
2143	struct ehci_hcd		*ehci,
2144	struct urb		*urb,
2145	unsigned		mod,
2146	struct ehci_iso_stream	*stream
2147)
2148{
2149	int			packet;
2150	unsigned		next_uframe;
2151	struct ehci_iso_sched	*sched = urb->hcpriv;
2152	struct ehci_sitd	*sitd;
2153
2154	next_uframe = stream->next_uframe;
2155
2156	if (list_empty(&stream->td_list))
2157		/* usbfs ignores TT bandwidth */
2158		ehci_to_hcd(ehci)->self.bandwidth_allocated
2159				+= stream->bandwidth;
2160
2161	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2162		if (ehci->amd_pll_fix == 1)
2163			usb_amd_quirk_pll_disable();
2164	}
2165
2166	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2167
2168	/* fill sITDs frame by frame */
2169	for (packet = sched->first_packet, sitd = NULL;
2170			packet < urb->number_of_packets;
2171			packet++) {
2172
2173		/* ASSERT:  we have all necessary sitds */
2174		BUG_ON(list_empty(&sched->td_list));
2175
2176		/* ASSERT:  no itds for this endpoint in this frame */
2177
2178		sitd = list_entry(sched->td_list.next,
2179				struct ehci_sitd, sitd_list);
2180		list_move_tail(&sitd->sitd_list, &stream->td_list);
2181		sitd->stream = stream;
2182		sitd->urb = urb;
2183
2184		sitd_patch(ehci, stream, sitd, sched, packet);
2185		sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2186				sitd);
2187
2188		next_uframe += stream->uperiod;
2189	}
2190	stream->next_uframe = next_uframe & (mod - 1);
2191
2192	/* don't need that schedule data any more */
2193	iso_sched_free(stream, sched);
2194	urb->hcpriv = stream;
2195
2196	++ehci->isoc_count;
2197	enable_periodic(ehci);
2198}
2199
2200/*-------------------------------------------------------------------------*/
2201
2202#define	SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2203				| SITD_STS_XACT | SITD_STS_MMF)
2204
2205/* Process and recycle a completed SITD.  Return true iff its urb completed,
2206 * and hence its completion callback probably added things to the hardware
2207 * schedule.
2208 *
2209 * Note that we carefully avoid recycling this descriptor until after any
2210 * completion callback runs, so that it won't be reused quickly.  That is,
2211 * assuming (a) no more than two urbs per frame on this endpoint, and also
2212 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2213 * corrupts things if you reuse completed descriptors very quickly...
2214 */
2215static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2216{
2217	struct urb				*urb = sitd->urb;
2218	struct usb_iso_packet_descriptor	*desc;
2219	u32					t;
2220	int					urb_index;
2221	struct ehci_iso_stream			*stream = sitd->stream;
2222	bool					retval = false;
2223
2224	urb_index = sitd->index;
2225	desc = &urb->iso_frame_desc[urb_index];
2226	t = hc32_to_cpup(ehci, &sitd->hw_results);
2227
2228	/* report transfer status */
2229	if (unlikely(t & SITD_ERRS)) {
2230		urb->error_count++;
2231		if (t & SITD_STS_DBE)
2232			desc->status = usb_pipein(urb->pipe)
2233				? -ENOSR  /* hc couldn't read */
2234				: -ECOMM; /* hc couldn't write */
2235		else if (t & SITD_STS_BABBLE)
2236			desc->status = -EOVERFLOW;
2237		else /* XACT, MMF, etc */
2238			desc->status = -EPROTO;
2239	} else if (unlikely(t & SITD_STS_ACTIVE)) {
2240		/* URB was too late */
2241		urb->error_count++;
2242	} else {
2243		desc->status = 0;
2244		desc->actual_length = desc->length - SITD_LENGTH(t);
2245		urb->actual_length += desc->actual_length;
2246	}
2247
2248	/* handle completion now? */
2249	if ((urb_index + 1) != urb->number_of_packets)
2250		goto done;
2251
2252	/*
2253	 * ASSERT: it's really the last sitd for this urb
2254	 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
2255	 *	 BUG_ON(sitd->urb == urb);
2256	 */
2257
2258	/* give urb back to the driver; completion often (re)submits */
2259	ehci_urb_done(ehci, urb, 0);
2260	retval = true;
2261	urb = NULL;
2262
2263	--ehci->isoc_count;
2264	disable_periodic(ehci);
2265
2266	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2267	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2268		if (ehci->amd_pll_fix == 1)
2269			usb_amd_quirk_pll_enable();
2270	}
2271
2272	if (list_is_singular(&stream->td_list))
2273		ehci_to_hcd(ehci)->self.bandwidth_allocated
2274				-= stream->bandwidth;
2275
2276done:
2277	sitd->urb = NULL;
2278
2279	/* Add to the end of the free list for later reuse */
2280	list_move_tail(&sitd->sitd_list, &stream->free_list);
2281
2282	/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2283	if (list_empty(&stream->td_list)) {
2284		list_splice_tail_init(&stream->free_list,
2285				&ehci->cached_sitd_list);
2286		start_free_itds(ehci);
2287	}
2288
2289	return retval;
2290}
2291
2292
2293static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2294	gfp_t mem_flags)
2295{
2296	int			status = -EINVAL;
2297	unsigned long		flags;
2298	struct ehci_iso_stream	*stream;
2299
2300	/* Get iso_stream head */
2301	stream = iso_stream_find(ehci, urb);
2302	if (stream == NULL) {
2303		ehci_dbg(ehci, "can't get iso stream\n");
2304		return -ENOMEM;
2305	}
2306	if (urb->interval != stream->ps.period) {
2307		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2308			stream->ps.period, urb->interval);
2309		goto done;
2310	}
2311
2312#ifdef EHCI_URB_TRACE
2313	ehci_dbg(ehci,
2314		"submit %p dev%s ep%d%s-iso len %d\n",
2315		urb, urb->dev->devpath,
2316		usb_pipeendpoint(urb->pipe),
2317		usb_pipein(urb->pipe) ? "in" : "out",
2318		urb->transfer_buffer_length);
2319#endif
2320
2321	/* allocate SITDs */
2322	status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2323	if (status < 0) {
2324		ehci_dbg(ehci, "can't init sitds\n");
2325		goto done;
2326	}
2327
2328	/* schedule ... need to lock */
2329	spin_lock_irqsave(&ehci->lock, flags);
2330	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2331		status = -ESHUTDOWN;
2332		goto done_not_linked;
2333	}
2334	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2335	if (unlikely(status))
2336		goto done_not_linked;
2337	status = iso_stream_schedule(ehci, urb, stream);
2338	if (likely(status == 0)) {
2339		sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2340	} else if (status > 0) {
2341		status = 0;
2342		ehci_urb_done(ehci, urb, 0);
2343	} else {
2344		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2345	}
2346 done_not_linked:
2347	spin_unlock_irqrestore(&ehci->lock, flags);
2348 done:
2349	return status;
2350}
2351
2352/*-------------------------------------------------------------------------*/
2353
2354static void scan_isoc(struct ehci_hcd *ehci)
2355{
2356	unsigned		uf, now_frame, frame;
2357	unsigned		fmask = ehci->periodic_size - 1;
2358	bool			modified, live;
2359	union ehci_shadow	q, *q_p;
2360	__hc32			type, *hw_p;
2361
2362	/*
2363	 * When running, scan from last scan point up to "now"
2364	 * else clean up by scanning everything that's left.
2365	 * Touches as few pages as possible:  cache-friendly.
2366	 */
2367	if (ehci->rh_state >= EHCI_RH_RUNNING) {
2368		uf = ehci_read_frame_index(ehci);
2369		now_frame = (uf >> 3) & fmask;
2370		live = true;
2371	} else  {
2372		now_frame = (ehci->last_iso_frame - 1) & fmask;
2373		live = false;
2374	}
2375	ehci->now_frame = now_frame;
2376
2377	frame = ehci->last_iso_frame;
2378
2379restart:
2380	/* Scan each element in frame's queue for completions */
2381	q_p = &ehci->pshadow[frame];
2382	hw_p = &ehci->periodic[frame];
2383	q.ptr = q_p->ptr;
2384	type = Q_NEXT_TYPE(ehci, *hw_p);
2385	modified = false;
2386
2387	while (q.ptr != NULL) {
2388		switch (hc32_to_cpu(ehci, type)) {
2389		case Q_TYPE_ITD:
2390			/*
2391			 * If this ITD is still active, leave it for
2392			 * later processing ... check the next entry.
2393			 * No need to check for activity unless the
2394			 * frame is current.
2395			 */
2396			if (frame == now_frame && live) {
2397				rmb();
2398				for (uf = 0; uf < 8; uf++) {
2399					if (q.itd->hw_transaction[uf] &
2400							ITD_ACTIVE(ehci))
2401						break;
2402				}
2403				if (uf < 8) {
2404					q_p = &q.itd->itd_next;
2405					hw_p = &q.itd->hw_next;
2406					type = Q_NEXT_TYPE(ehci,
2407							q.itd->hw_next);
2408					q = *q_p;
2409					break;
2410				}
2411			}
2412
2413			/*
2414			 * Take finished ITDs out of the schedule
2415			 * and process them:  recycle, maybe report
2416			 * URB completion.  HC won't cache the
2417			 * pointer for much longer, if at all.
2418			 */
2419			*q_p = q.itd->itd_next;
2420			if (!ehci->use_dummy_qh ||
2421					q.itd->hw_next != EHCI_LIST_END(ehci))
2422				*hw_p = q.itd->hw_next;
2423			else
2424				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2425			type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2426			wmb();
2427			modified = itd_complete(ehci, q.itd);
2428			q = *q_p;
2429			break;
2430		case Q_TYPE_SITD:
2431			/*
2432			 * If this SITD is still active, leave it for
2433			 * later processing ... check the next entry.
2434			 * No need to check for activity unless the
2435			 * frame is current.
2436			 */
2437			if (((frame == now_frame) ||
2438					(((frame + 1) & fmask) == now_frame))
2439				&& live
2440				&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2441
2442				q_p = &q.sitd->sitd_next;
2443				hw_p = &q.sitd->hw_next;
2444				type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2445				q = *q_p;
2446				break;
2447			}
2448
2449			/*
2450			 * Take finished SITDs out of the schedule
2451			 * and process them:  recycle, maybe report
2452			 * URB completion.
2453			 */
2454			*q_p = q.sitd->sitd_next;
2455			if (!ehci->use_dummy_qh ||
2456					q.sitd->hw_next != EHCI_LIST_END(ehci))
2457				*hw_p = q.sitd->hw_next;
2458			else
2459				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2460			type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2461			wmb();
2462			modified = sitd_complete(ehci, q.sitd);
2463			q = *q_p;
2464			break;
2465		default:
2466			ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2467					type, frame, q.ptr);
2468			/* BUG(); */
2469			fallthrough;
2470		case Q_TYPE_QH:
2471		case Q_TYPE_FSTN:
2472			/* End of the iTDs and siTDs */
2473			q.ptr = NULL;
2474			break;
2475		}
2476
2477		/* Assume completion callbacks modify the queue */
2478		if (unlikely(modified && ehci->isoc_count > 0))
2479			goto restart;
2480	}
2481
2482	/* Stop when we have reached the current frame */
2483	if (frame == now_frame)
2484		return;
2485
2486	/* The last frame may still have active siTDs */
2487	ehci->last_iso_frame = frame;
2488	frame = (frame + 1) & fmask;
2489
2490	goto restart;
2491}
2492