1/* 2 * Copyright (C) 2001-2004 by David Brownell 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation; either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software Foundation, 16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19/* this file is part of ehci-hcd.c */ 20 21/*-------------------------------------------------------------------------*/ 22 23/* 24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. 25 * 26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" 27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned 28 * buffers needed for the larger number). We use one QH per endpoint, queue 29 * multiple urbs (all three types) per endpoint. URBs may need several qtds. 30 * 31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with 32 * interrupts) needs careful scheduling. Performance improvements can be 33 * an ongoing challenge. That's in "ehci-sched.c". 34 * 35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, 36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using 37 * (b) special fields in qh entries or (c) split iso entries. TTs will 38 * buffer low/full speed data so the host collects it at high speed. 39 */ 40 41/*-------------------------------------------------------------------------*/ 42 43/* fill a qtd, returning how much of the buffer we were able to queue up */ 44 45static int 46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, 47 size_t len, int token, int maxpacket) 48{ 49 int i, count; 50 u64 addr = buf; 51 52 /* one buffer entry per 4K ... first might be short or unaligned */ 53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); 54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); 55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 56 if (likely (len < count)) /* ... iff needed */ 57 count = len; 58 else { 59 buf += 0x1000; 60 buf &= ~0x0fff; 61 62 /* per-qtd limit: from 16K to 20K (best alignment) */ 63 for (i = 1; count < len && i < 5; i++) { 64 addr = buf; 65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); 66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, 67 (u32)(addr >> 32)); 68 buf += 0x1000; 69 if ((count + 0x1000) < len) 70 count += 0x1000; 71 else 72 count = len; 73 } 74 75 /* short packets may only terminate transfers */ 76 if (count != len) 77 count -= (count % maxpacket); 78 } 79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); 80 qtd->length = count; 81 82 return count; 83} 84 85/*-------------------------------------------------------------------------*/ 86 87static inline void 88qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) 89{ 90 struct ehci_qh_hw *hw = qh->hw; 91 92 /* writes to an active overlay are unsafe */ 93 BUG_ON(qh->qh_state != QH_STATE_IDLE); 94 95 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 96 hw->hw_alt_next = EHCI_LIST_END(ehci); 97 98 /* Except for control endpoints, we make hardware maintain data 99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 101 * ever clear it. 102 */ 103 if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { 104 unsigned is_out, epnum; 105 106 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); 107 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; 108 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 109 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 110 usb_settoggle (qh->dev, epnum, is_out, 1); 111 } 112 } 113 114 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 115 wmb (); 116 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 117} 118 119/* if it weren't for a common silicon quirk (writing the dummy into the qh 120 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault 121 * recovery (including urb dequeue) would need software changes to a QH... 122 */ 123static void 124qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) 125{ 126 struct ehci_qtd *qtd; 127 128 if (list_empty (&qh->qtd_list)) 129 qtd = qh->dummy; 130 else { 131 qtd = list_entry (qh->qtd_list.next, 132 struct ehci_qtd, qtd_list); 133 /* first qtd may already be partially processed */ 134 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) 135 qtd = NULL; 136 } 137 138 if (qtd) 139 qh_update (ehci, qh, qtd); 140} 141 142/*-------------------------------------------------------------------------*/ 143 144static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 145 146static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, 147 struct usb_host_endpoint *ep) 148{ 149 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 150 struct ehci_qh *qh = ep->hcpriv; 151 unsigned long flags; 152 153 spin_lock_irqsave(&ehci->lock, flags); 154 qh->clearing_tt = 0; 155 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) 156 && HC_IS_RUNNING(hcd->state)) 157 qh_link_async(ehci, qh); 158 spin_unlock_irqrestore(&ehci->lock, flags); 159} 160 161static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, 162 struct urb *urb, u32 token) 163{ 164 165 /* If an async split transaction gets an error or is unlinked, 166 * the TT buffer may be left in an indeterminate state. We 167 * have to clear the TT buffer. 168 * 169 * Note: this routine is never called for Isochronous transfers. 170 */ 171 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { 172#ifdef DEBUG 173 struct usb_device *tt = urb->dev->tt->hub; 174 dev_dbg(&tt->dev, 175 "clear tt buffer port %d, a%d ep%d t%08x\n", 176 urb->dev->ttport, urb->dev->devnum, 177 usb_pipeendpoint(urb->pipe), token); 178#endif /* DEBUG */ 179 if (!ehci_is_TDI(ehci) 180 || urb->dev->tt->hub != 181 ehci_to_hcd(ehci)->self.root_hub) { 182 if (usb_hub_clear_tt_buffer(urb) == 0) 183 qh->clearing_tt = 1; 184 } else { 185 186 /* REVISIT ARC-derived cores don't clear the root 187 * hub TT buffer in this way... 188 */ 189 } 190 } 191} 192 193static int qtd_copy_status ( 194 struct ehci_hcd *ehci, 195 struct urb *urb, 196 size_t length, 197 u32 token 198) 199{ 200 int status = -EINPROGRESS; 201 202 /* count IN/OUT bytes, not SETUP (even short packets) */ 203 if (likely (QTD_PID (token) != 2)) 204 urb->actual_length += length - QTD_LENGTH (token); 205 206 /* don't modify error codes */ 207 if (unlikely(urb->unlinked)) 208 return status; 209 210 /* force cleanup after short read; not always an error */ 211 if (unlikely (IS_SHORT_READ (token))) 212 status = -EREMOTEIO; 213 214 /* serious "can't proceed" faults reported by the hardware */ 215 if (token & QTD_STS_HALT) { 216 if (token & QTD_STS_BABBLE) { 217 status = -EOVERFLOW; 218 /* CERR nonzero + halt --> stall */ 219 } else if (QTD_CERR(token)) { 220 status = -EPIPE; 221 222 /* In theory, more than one of the following bits can be set 223 * since they are sticky and the transaction is retried. 224 * Which to test first is rather arbitrary. 225 */ 226 } else if (token & QTD_STS_MMF) { 227 /* fs/ls interrupt xfer missed the complete-split */ 228 status = -EPROTO; 229 } else if (token & QTD_STS_DBE) { 230 status = (QTD_PID (token) == 1) /* IN ? */ 231 ? -ENOSR /* hc couldn't read data */ 232 : -ECOMM; /* hc couldn't write data */ 233 } else if (token & QTD_STS_XACT) { 234 /* timeout, bad CRC, wrong PID, etc */ 235 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", 236 urb->dev->devpath, 237 usb_pipeendpoint(urb->pipe), 238 usb_pipein(urb->pipe) ? "in" : "out"); 239 status = -EPROTO; 240 } else { /* unknown */ 241 status = -EPROTO; 242 } 243 244 ehci_vdbg (ehci, 245 "dev%d ep%d%s qtd token %08x --> status %d\n", 246 usb_pipedevice (urb->pipe), 247 usb_pipeendpoint (urb->pipe), 248 usb_pipein (urb->pipe) ? "in" : "out", 249 token, status); 250 } 251 252 return status; 253} 254 255static void 256ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 257__releases(ehci->lock) 258__acquires(ehci->lock) 259{ 260 if (likely (urb->hcpriv != NULL)) { 261 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 262 263 /* S-mask in a QH means it's an interrupt urb */ 264 if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { 265 266 /* ... update hc-wide periodic stats (for usbfs) */ 267 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 268 } 269 qh_put (qh); 270 } 271 272 if (unlikely(urb->unlinked)) { 273 COUNT(ehci->stats.unlink); 274 } else { 275 /* report non-error and short read status as zero */ 276 if (status == -EINPROGRESS || status == -EREMOTEIO) 277 status = 0; 278 COUNT(ehci->stats.complete); 279 } 280 281#ifdef EHCI_URB_TRACE 282 ehci_dbg (ehci, 283 "%s %s urb %p ep%d%s status %d len %d/%d\n", 284 __func__, urb->dev->devpath, urb, 285 usb_pipeendpoint (urb->pipe), 286 usb_pipein (urb->pipe) ? "in" : "out", 287 status, 288 urb->actual_length, urb->transfer_buffer_length); 289#endif 290 291 /* complete() can reenter this HCD */ 292 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 293 spin_unlock (&ehci->lock); 294 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 295 spin_lock (&ehci->lock); 296} 297 298static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 299static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 300 301static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 302 303/* 304 * Process and free completed qtds for a qh, returning URBs to drivers. 305 * Chases up to qh->hw_current. Returns number of completions called, 306 * indicating how much "real" work we did. 307 */ 308static unsigned 309qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 310{ 311 struct ehci_qtd *last, *end = qh->dummy; 312 struct list_head *entry, *tmp; 313 int last_status; 314 int stopped; 315 unsigned count = 0; 316 u8 state; 317 const __le32 halt = HALT_BIT(ehci); 318 struct ehci_qh_hw *hw = qh->hw; 319 320 if (unlikely (list_empty (&qh->qtd_list))) 321 return count; 322 323 /* completions (or tasks on other cpus) must never clobber HALT 324 * till we've gone through and cleaned everything up, even when 325 * they add urbs to this qh's queue or mark them for unlinking. 326 * 327 * NOTE: unlinking expects to be done in queue order. 328 * 329 * It's a bug for qh->qh_state to be anything other than 330 * QH_STATE_IDLE, unless our caller is scan_async() or 331 * scan_periodic(). 332 */ 333 state = qh->qh_state; 334 qh->qh_state = QH_STATE_COMPLETING; 335 stopped = (state == QH_STATE_IDLE); 336 337 rescan: 338 last = NULL; 339 last_status = -EINPROGRESS; 340 qh->needs_rescan = 0; 341 342 /* remove de-activated QTDs from front of queue. 343 * after faults (including short reads), cleanup this urb 344 * then let the queue advance. 345 * if queue is stopped, handles unlinks. 346 */ 347 list_for_each_safe (entry, tmp, &qh->qtd_list) { 348 struct ehci_qtd *qtd; 349 struct urb *urb; 350 u32 token = 0; 351 352 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 353 urb = qtd->urb; 354 355 /* clean up any state from previous QTD ...*/ 356 if (last) { 357 if (likely (last->urb != urb)) { 358 ehci_urb_done(ehci, last->urb, last_status); 359 count++; 360 last_status = -EINPROGRESS; 361 } 362 ehci_qtd_free (ehci, last); 363 last = NULL; 364 } 365 366 /* ignore urbs submitted during completions we reported */ 367 if (qtd == end) 368 break; 369 370 /* hardware copies qtd out of qh overlay */ 371 rmb (); 372 token = hc32_to_cpu(ehci, qtd->hw_token); 373 374 /* always clean up qtds the hc de-activated */ 375 retry_xacterr: 376 if ((token & QTD_STS_ACTIVE) == 0) { 377 378 /* on STALL, error, and short reads this urb must 379 * complete and all its qtds must be recycled. 380 */ 381 if ((token & QTD_STS_HALT) != 0) { 382 383 /* retry transaction errors until we 384 * reach the software xacterr limit 385 */ 386 if ((token & QTD_STS_XACT) && 387 QTD_CERR(token) == 0 && 388 ++qh->xacterrs < QH_XACTERR_MAX && 389 !urb->unlinked) { 390 ehci_dbg(ehci, 391 "detected XactErr len %zu/%zu retry %d\n", 392 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); 393 394 /* reset the token in the qtd and the 395 * qh overlay (which still contains 396 * the qtd) so that we pick up from 397 * where we left off 398 */ 399 token &= ~QTD_STS_HALT; 400 token |= QTD_STS_ACTIVE | 401 (EHCI_TUNE_CERR << 10); 402 qtd->hw_token = cpu_to_hc32(ehci, 403 token); 404 wmb(); 405 hw->hw_token = cpu_to_hc32(ehci, 406 token); 407 goto retry_xacterr; 408 } 409 stopped = 1; 410 411 /* magic dummy for some short reads; qh won't advance. 412 * that silicon quirk can kick in with this dummy too. 413 * 414 * other short reads won't stop the queue, including 415 * control transfers (status stage handles that) or 416 * most other single-qtd reads ... the queue stops if 417 * URB_SHORT_NOT_OK was set so the driver submitting 418 * the urbs could clean it up. 419 */ 420 } else if (IS_SHORT_READ (token) 421 && !(qtd->hw_alt_next 422 & EHCI_LIST_END(ehci))) { 423 stopped = 1; 424 goto halt; 425 } 426 427 /* stop scanning when we reach qtds the hc is using */ 428 } else if (likely (!stopped 429 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { 430 break; 431 432 /* scan the whole queue for unlinks whenever it stops */ 433 } else { 434 stopped = 1; 435 436 /* cancel everything if we halt, suspend, etc */ 437 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) 438 last_status = -ESHUTDOWN; 439 440 /* this qtd is active; skip it unless a previous qtd 441 * for its urb faulted, or its urb was canceled. 442 */ 443 else if (last_status == -EINPROGRESS && !urb->unlinked) 444 continue; 445 446 /* qh unlinked; token in overlay may be most current */ 447 if (state == QH_STATE_IDLE 448 && cpu_to_hc32(ehci, qtd->qtd_dma) 449 == hw->hw_current) { 450 token = hc32_to_cpu(ehci, hw->hw_token); 451 452 /* An unlink may leave an incomplete 453 * async transaction in the TT buffer. 454 * We have to clear it. 455 */ 456 ehci_clear_tt_buffer(ehci, qh, urb, token); 457 } 458 459 /* force halt for unlinked or blocked qh, so we'll 460 * patch the qh later and so that completions can't 461 * activate it while we "know" it's stopped. 462 */ 463 if ((halt & hw->hw_token) == 0) { 464halt: 465 hw->hw_token |= halt; 466 wmb (); 467 } 468 } 469 470 /* unless we already know the urb's status, collect qtd status 471 * and update count of bytes transferred. in common short read 472 * cases with only one data qtd (including control transfers), 473 * queue processing won't halt. but with two or more qtds (for 474 * example, with a 32 KB transfer), when the first qtd gets a 475 * short read the second must be removed by hand. 476 */ 477 if (last_status == -EINPROGRESS) { 478 last_status = qtd_copy_status(ehci, urb, 479 qtd->length, token); 480 if (last_status == -EREMOTEIO 481 && (qtd->hw_alt_next 482 & EHCI_LIST_END(ehci))) 483 last_status = -EINPROGRESS; 484 485 /* As part of low/full-speed endpoint-halt processing 486 * we must clear the TT buffer (11.17.5). 487 */ 488 if (unlikely(last_status != -EINPROGRESS && 489 last_status != -EREMOTEIO)) { 490 /* The TT's in some hubs malfunction when they 491 * receive this request following a STALL (they 492 * stop sending isochronous packets). Since a 493 * STALL can't leave the TT buffer in a busy 494 * state (if you believe Figures 11-48 - 11-51 495 * in the USB 2.0 spec), we won't clear the TT 496 * buffer in this case. Strictly speaking this 497 * is a violation of the spec. 498 */ 499 if (last_status != -EPIPE) 500 ehci_clear_tt_buffer(ehci, qh, urb, 501 token); 502 } 503 } 504 505 /* if we're removing something not at the queue head, 506 * patch the hardware queue pointer. 507 */ 508 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { 509 last = list_entry (qtd->qtd_list.prev, 510 struct ehci_qtd, qtd_list); 511 last->hw_next = qtd->hw_next; 512 } 513 514 /* remove qtd; it's recycled after possible urb completion */ 515 list_del (&qtd->qtd_list); 516 last = qtd; 517 518 /* reinit the xacterr counter for the next qtd */ 519 qh->xacterrs = 0; 520 } 521 522 /* last urb's completion might still need calling */ 523 if (likely (last != NULL)) { 524 ehci_urb_done(ehci, last->urb, last_status); 525 count++; 526 ehci_qtd_free (ehci, last); 527 } 528 529 /* Do we need to rescan for URBs dequeued during a giveback? */ 530 if (unlikely(qh->needs_rescan)) { 531 /* If the QH is already unlinked, do the rescan now. */ 532 if (state == QH_STATE_IDLE) 533 goto rescan; 534 535 /* Otherwise we have to wait until the QH is fully unlinked. 536 * Our caller will start an unlink if qh->needs_rescan is 537 * set. But if an unlink has already started, nothing needs 538 * to be done. 539 */ 540 if (state != QH_STATE_LINKED) 541 qh->needs_rescan = 0; 542 } 543 544 /* restore original state; caller must unlink or relink */ 545 qh->qh_state = state; 546 547 /* be sure the hardware's done with the qh before refreshing 548 * it after fault cleanup, or recovering from silicon wrongly 549 * overlaying the dummy qtd (which reduces DMA chatter). 550 */ 551 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) { 552 switch (state) { 553 case QH_STATE_IDLE: 554 qh_refresh(ehci, qh); 555 break; 556 case QH_STATE_LINKED: 557 /* We won't refresh a QH that's linked (after the HC 558 * stopped the queue). That avoids a race: 559 * - HC reads first part of QH; 560 * - CPU updates that first part and the token; 561 * - HC reads rest of that QH, including token 562 * Result: HC gets an inconsistent image, and then 563 * DMAs to/from the wrong memory (corrupting it). 564 * 565 * That should be rare for interrupt transfers, 566 * except maybe high bandwidth ... 567 */ 568 569 /* Tell the caller to start an unlink */ 570 qh->needs_rescan = 1; 571 break; 572 /* otherwise, unlink already started */ 573 } 574 } 575 576 return count; 577} 578 579/*-------------------------------------------------------------------------*/ 580 581// high bandwidth multiplier, as encoded in highspeed endpoint descriptors 582#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) 583// ... and packet size, for any kind of endpoint descriptor 584#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) 585 586/* 587 * reverse of qh_urb_transaction: free a list of TDs. 588 * used for cleanup after errors, before HC sees an URB's TDs. 589 */ 590static void qtd_list_free ( 591 struct ehci_hcd *ehci, 592 struct urb *urb, 593 struct list_head *qtd_list 594) { 595 struct list_head *entry, *temp; 596 597 list_for_each_safe (entry, temp, qtd_list) { 598 struct ehci_qtd *qtd; 599 600 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 601 list_del (&qtd->qtd_list); 602 ehci_qtd_free (ehci, qtd); 603 } 604} 605 606/* 607 * create a list of filled qtds for this URB; won't link into qh. 608 */ 609static struct list_head * 610qh_urb_transaction ( 611 struct ehci_hcd *ehci, 612 struct urb *urb, 613 struct list_head *head, 614 gfp_t flags 615) { 616 struct ehci_qtd *qtd, *qtd_prev; 617 dma_addr_t buf; 618 int len, this_sg_len, maxpacket; 619 int is_input; 620 u32 token; 621 int i; 622 struct scatterlist *sg; 623 624 /* 625 * URBs map to sequences of QTDs: one logical transaction 626 */ 627 qtd = ehci_qtd_alloc (ehci, flags); 628 if (unlikely (!qtd)) 629 return NULL; 630 list_add_tail (&qtd->qtd_list, head); 631 qtd->urb = urb; 632 633 token = QTD_STS_ACTIVE; 634 token |= (EHCI_TUNE_CERR << 10); 635 /* for split transactions, SplitXState initialized to zero */ 636 637 len = urb->transfer_buffer_length; 638 is_input = usb_pipein (urb->pipe); 639 if (usb_pipecontrol (urb->pipe)) { 640 /* SETUP pid */ 641 qtd_fill(ehci, qtd, urb->setup_dma, 642 sizeof (struct usb_ctrlrequest), 643 token | (2 /* "setup" */ << 8), 8); 644 645 /* ... and always at least one more pid */ 646 token ^= QTD_TOGGLE; 647 qtd_prev = qtd; 648 qtd = ehci_qtd_alloc (ehci, flags); 649 if (unlikely (!qtd)) 650 goto cleanup; 651 qtd->urb = urb; 652 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 653 list_add_tail (&qtd->qtd_list, head); 654 655 /* for zero length DATA stages, STATUS is always IN */ 656 if (len == 0) 657 token |= (1 /* "in" */ << 8); 658 } 659 660 /* 661 * data transfer stage: buffer setup 662 */ 663 i = urb->num_sgs; 664 if (len > 0 && i > 0) { 665 sg = urb->sg; 666 buf = sg_dma_address(sg); 667 668 /* urb->transfer_buffer_length may be smaller than the 669 * size of the scatterlist (or vice versa) 670 */ 671 this_sg_len = min_t(int, sg_dma_len(sg), len); 672 } else { 673 sg = NULL; 674 buf = urb->transfer_dma; 675 this_sg_len = len; 676 } 677 678 if (is_input) 679 token |= (1 /* "in" */ << 8); 680 /* else it's already initted to "out" pid (0 << 8) */ 681 682 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); 683 684 /* 685 * buffer gets wrapped in one or more qtds; 686 * last one may be "short" (including zero len) 687 * and may serve as a control status ack 688 */ 689 for (;;) { 690 int this_qtd_len; 691 692 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, 693 maxpacket); 694 this_sg_len -= this_qtd_len; 695 len -= this_qtd_len; 696 buf += this_qtd_len; 697 698 /* 699 * short reads advance to a "magic" dummy instead of the next 700 * qtd ... that forces the queue to stop, for manual cleanup. 701 * (this will usually be overridden later.) 702 */ 703 if (is_input) 704 qtd->hw_alt_next = ehci->async->hw->hw_alt_next; 705 706 /* qh makes control packets use qtd toggle; maybe switch it */ 707 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 708 token ^= QTD_TOGGLE; 709 710 if (likely(this_sg_len <= 0)) { 711 if (--i <= 0 || len <= 0) 712 break; 713 sg = sg_next(sg); 714 buf = sg_dma_address(sg); 715 this_sg_len = min_t(int, sg_dma_len(sg), len); 716 } 717 718 qtd_prev = qtd; 719 qtd = ehci_qtd_alloc (ehci, flags); 720 if (unlikely (!qtd)) 721 goto cleanup; 722 qtd->urb = urb; 723 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 724 list_add_tail (&qtd->qtd_list, head); 725 } 726 727 /* 728 * unless the caller requires manual cleanup after short reads, 729 * have the alt_next mechanism keep the queue running after the 730 * last data qtd (the only one, for control and most other cases). 731 */ 732 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 733 || usb_pipecontrol (urb->pipe))) 734 qtd->hw_alt_next = EHCI_LIST_END(ehci); 735 736 /* 737 * control requests may need a terminating data "status" ack; 738 * bulk ones may need a terminating short packet (zero length). 739 */ 740 if (likely (urb->transfer_buffer_length != 0)) { 741 int one_more = 0; 742 743 if (usb_pipecontrol (urb->pipe)) { 744 one_more = 1; 745 token ^= 0x0100; /* "in" <--> "out" */ 746 token |= QTD_TOGGLE; /* force DATA1 */ 747 } else if (usb_pipebulk (urb->pipe) 748 && (urb->transfer_flags & URB_ZERO_PACKET) 749 && !(urb->transfer_buffer_length % maxpacket)) { 750 one_more = 1; 751 } 752 if (one_more) { 753 qtd_prev = qtd; 754 qtd = ehci_qtd_alloc (ehci, flags); 755 if (unlikely (!qtd)) 756 goto cleanup; 757 qtd->urb = urb; 758 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 759 list_add_tail (&qtd->qtd_list, head); 760 761 /* never any data in such packets */ 762 qtd_fill(ehci, qtd, 0, 0, token, 0); 763 } 764 } 765 766 /* by default, enable interrupt on urb completion */ 767 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 768 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); 769 return head; 770 771cleanup: 772 qtd_list_free (ehci, urb, head); 773 return NULL; 774} 775 776/*-------------------------------------------------------------------------*/ 777 778// Would be best to create all qh's from config descriptors, 779// when each interface/altsetting is established. Unlink 780// any previous qh and cancel its urbs first; endpoints are 781// implicitly reset then (data toggle too). 782// That'd mean updating how usbcore talks to HCDs. (2.7?) 783 784 785/* 786 * Each QH holds a qtd list; a QH is used for everything except iso. 787 * 788 * For interrupt urbs, the scheduler must set the microframe scheduling 789 * mask(s) each time the QH gets scheduled. For highspeed, that's 790 * just one microframe in the s-mask. For split interrupt transactions 791 * there are additional complications: c-mask, maybe FSTNs. 792 */ 793static struct ehci_qh * 794qh_make ( 795 struct ehci_hcd *ehci, 796 struct urb *urb, 797 gfp_t flags 798) { 799 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); 800 u32 info1 = 0, info2 = 0; 801 int is_input, type; 802 int maxp = 0; 803 struct usb_tt *tt = urb->dev->tt; 804 struct ehci_qh_hw *hw; 805 806 if (!qh) 807 return qh; 808 809 /* 810 * init endpoint/device data for this QH 811 */ 812 info1 |= usb_pipeendpoint (urb->pipe) << 8; 813 info1 |= usb_pipedevice (urb->pipe) << 0; 814 815 is_input = usb_pipein (urb->pipe); 816 type = usb_pipetype (urb->pipe); 817 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); 818 819 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth 820 * acts like up to 3KB, but is built from smaller packets. 821 */ 822 if (max_packet(maxp) > 1024) { 823 ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); 824 goto done; 825 } 826 827 /* Compute interrupt scheduling parameters just once, and save. 828 * - allowing for high bandwidth, how many nsec/uframe are used? 829 * - split transactions need a second CSPLIT uframe; same question 830 * - splits also need a schedule gap (for full/low speed I/O) 831 * - qh has a polling interval 832 * 833 * For control/bulk requests, the HC or TT handles these. 834 */ 835 if (type == PIPE_INTERRUPT) { 836 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, 837 is_input, 0, 838 hb_mult(maxp) * max_packet(maxp))); 839 qh->start = NO_FRAME; 840 841 if (urb->dev->speed == USB_SPEED_HIGH) { 842 qh->c_usecs = 0; 843 qh->gap_uf = 0; 844 845 qh->period = urb->interval >> 3; 846 if (qh->period == 0 && urb->interval != 1) { 847 /* NOTE interval 2 or 4 uframes could work. 848 * But interval 1 scheduling is simpler, and 849 * includes high bandwidth. 850 */ 851 urb->interval = 1; 852 } else if (qh->period > ehci->periodic_size) { 853 qh->period = ehci->periodic_size; 854 urb->interval = qh->period << 3; 855 } 856 } else { 857 int think_time; 858 859 /* gap is f(FS/LS transfer times) */ 860 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, 861 is_input, 0, maxp) / (125 * 1000); 862 863 if (is_input) { // SPLIT, gap, CSPLIT+DATA 864 qh->c_usecs = qh->usecs + HS_USECS (0); 865 qh->usecs = HS_USECS (1); 866 } else { // SPLIT+DATA, gap, CSPLIT 867 qh->usecs += HS_USECS (1); 868 qh->c_usecs = HS_USECS (0); 869 } 870 871 think_time = tt ? tt->think_time : 0; 872 qh->tt_usecs = NS_TO_US (think_time + 873 usb_calc_bus_time (urb->dev->speed, 874 is_input, 0, max_packet (maxp))); 875 qh->period = urb->interval; 876 if (qh->period > ehci->periodic_size) { 877 qh->period = ehci->periodic_size; 878 urb->interval = qh->period; 879 } 880 } 881 } 882 883 /* support for tt scheduling, and access to toggles */ 884 qh->dev = urb->dev; 885 886 /* using TT? */ 887 switch (urb->dev->speed) { 888 case USB_SPEED_LOW: 889 info1 |= (1 << 12); /* EPS "low" */ 890 /* FALL THROUGH */ 891 892 case USB_SPEED_FULL: 893 /* EPS 0 means "full" */ 894 if (type != PIPE_INTERRUPT) 895 info1 |= (EHCI_TUNE_RL_TT << 28); 896 if (type == PIPE_CONTROL) { 897 info1 |= (1 << 27); /* for TT */ 898 info1 |= 1 << 14; /* toggle from qtd */ 899 } 900 info1 |= maxp << 16; 901 902 info2 |= (EHCI_TUNE_MULT_TT << 30); 903 904 /* Some Freescale processors have an erratum in which the 905 * port number in the queue head was 0..N-1 instead of 1..N. 906 */ 907 if (ehci_has_fsl_portno_bug(ehci)) 908 info2 |= (urb->dev->ttport-1) << 23; 909 else 910 info2 |= urb->dev->ttport << 23; 911 912 /* set the address of the TT; for TDI's integrated 913 * root hub tt, leave it zeroed. 914 */ 915 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) 916 info2 |= tt->hub->devnum << 16; 917 918 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ 919 920 break; 921 922 case USB_SPEED_HIGH: /* no TT involved */ 923 info1 |= (2 << 12); /* EPS "high" */ 924 if (type == PIPE_CONTROL) { 925 info1 |= (EHCI_TUNE_RL_HS << 28); 926 info1 |= 64 << 16; /* usb2 fixed maxpacket */ 927 info1 |= 1 << 14; /* toggle from qtd */ 928 info2 |= (EHCI_TUNE_MULT_HS << 30); 929 } else if (type == PIPE_BULK) { 930 info1 |= (EHCI_TUNE_RL_HS << 28); 931 /* The USB spec says that high speed bulk endpoints 932 * always use 512 byte maxpacket. But some device 933 * vendors decided to ignore that, and MSFT is happy 934 * to help them do so. So now people expect to use 935 * such nonconformant devices with Linux too; sigh. 936 */ 937 info1 |= max_packet(maxp) << 16; 938 info2 |= (EHCI_TUNE_MULT_HS << 30); 939 } else { /* PIPE_INTERRUPT */ 940 info1 |= max_packet (maxp) << 16; 941 info2 |= hb_mult (maxp) << 30; 942 } 943 break; 944 default: 945 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed); 946done: 947 qh_put (qh); 948 return NULL; 949 } 950 951 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ 952 953 /* init as live, toggle clear, advance to dummy */ 954 qh->qh_state = QH_STATE_IDLE; 955 hw = qh->hw; 956 hw->hw_info1 = cpu_to_hc32(ehci, info1); 957 hw->hw_info2 = cpu_to_hc32(ehci, info2); 958 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 959 qh_refresh (ehci, qh); 960 return qh; 961} 962 963/*-------------------------------------------------------------------------*/ 964 965/* move qh (and its qtds) onto async queue; maybe enable queue. */ 966 967static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 968{ 969 __hc32 dma = QH_NEXT(ehci, qh->qh_dma); 970 struct ehci_qh *head; 971 972 /* Don't link a QH if there's a Clear-TT-Buffer pending */ 973 if (unlikely(qh->clearing_tt)) 974 return; 975 976 WARN_ON(qh->qh_state != QH_STATE_IDLE); 977 978 /* (re)start the async schedule? */ 979 head = ehci->async; 980 timer_action_done (ehci, TIMER_ASYNC_OFF); 981 if (!head->qh_next.qh) { 982 u32 cmd = ehci_readl(ehci, &ehci->regs->command); 983 984 if (!(cmd & CMD_ASE)) { 985 /* in case a clear of CMD_ASE didn't take yet */ 986 (void)handshake(ehci, &ehci->regs->status, 987 STS_ASS, 0, 150); 988 cmd |= CMD_ASE | CMD_RUN; 989 ehci_writel(ehci, cmd, &ehci->regs->command); 990 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 991 /* posted write need not be known to HC yet ... */ 992 } 993 } 994 995 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 996 qh_refresh(ehci, qh); 997 998 /* splice right after start */ 999 qh->qh_next = head->qh_next; 1000 qh->hw->hw_next = head->hw->hw_next; 1001 wmb (); 1002 1003 head->qh_next.qh = qh; 1004 head->hw->hw_next = dma; 1005 1006 qh_get(qh); 1007 qh->xacterrs = 0; 1008 qh->qh_state = QH_STATE_LINKED; 1009 /* qtd completions reported later by interrupt */ 1010} 1011 1012/*-------------------------------------------------------------------------*/ 1013 1014/* 1015 * For control/bulk/interrupt, return QH with these TDs appended. 1016 * Allocates and initializes the QH if necessary. 1017 * Returns null if it can't allocate a QH it needs to. 1018 * If the QH has TDs (urbs) already, that's great. 1019 */ 1020static struct ehci_qh *qh_append_tds ( 1021 struct ehci_hcd *ehci, 1022 struct urb *urb, 1023 struct list_head *qtd_list, 1024 int epnum, 1025 void **ptr 1026) 1027{ 1028 struct ehci_qh *qh = NULL; 1029 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); 1030 1031 qh = (struct ehci_qh *) *ptr; 1032 if (unlikely (qh == NULL)) { 1033 /* can't sleep here, we have ehci->lock... */ 1034 qh = qh_make (ehci, urb, GFP_ATOMIC); 1035 *ptr = qh; 1036 } 1037 if (likely (qh != NULL)) { 1038 struct ehci_qtd *qtd; 1039 1040 if (unlikely (list_empty (qtd_list))) 1041 qtd = NULL; 1042 else 1043 qtd = list_entry (qtd_list->next, struct ehci_qtd, 1044 qtd_list); 1045 1046 /* control qh may need patching ... */ 1047 if (unlikely (epnum == 0)) { 1048 1049 /* usb_reset_device() briefly reverts to address 0 */ 1050 if (usb_pipedevice (urb->pipe) == 0) 1051 qh->hw->hw_info1 &= ~qh_addr_mask; 1052 } 1053 1054 /* just one way to queue requests: swap with the dummy qtd. 1055 * only hc or qh_refresh() ever modify the overlay. 1056 */ 1057 if (likely (qtd != NULL)) { 1058 struct ehci_qtd *dummy; 1059 dma_addr_t dma; 1060 __hc32 token; 1061 1062 /* to avoid racing the HC, use the dummy td instead of 1063 * the first td of our list (becomes new dummy). both 1064 * tds stay deactivated until we're done, when the 1065 * HC is allowed to fetch the old dummy (4.10.2). 1066 */ 1067 token = qtd->hw_token; 1068 qtd->hw_token = HALT_BIT(ehci); 1069 wmb (); 1070 dummy = qh->dummy; 1071 1072 dma = dummy->qtd_dma; 1073 *dummy = *qtd; 1074 dummy->qtd_dma = dma; 1075 1076 list_del (&qtd->qtd_list); 1077 list_add (&dummy->qtd_list, qtd_list); 1078 list_splice_tail(qtd_list, &qh->qtd_list); 1079 1080 ehci_qtd_init(ehci, qtd, qtd->qtd_dma); 1081 qh->dummy = qtd; 1082 1083 /* hc must see the new dummy at list end */ 1084 dma = qtd->qtd_dma; 1085 qtd = list_entry (qh->qtd_list.prev, 1086 struct ehci_qtd, qtd_list); 1087 qtd->hw_next = QTD_NEXT(ehci, dma); 1088 1089 /* let the hc process these next qtds */ 1090 wmb (); 1091 dummy->hw_token = token; 1092 1093 urb->hcpriv = qh_get (qh); 1094 } 1095 } 1096 return qh; 1097} 1098 1099/*-------------------------------------------------------------------------*/ 1100 1101static int 1102submit_async ( 1103 struct ehci_hcd *ehci, 1104 struct urb *urb, 1105 struct list_head *qtd_list, 1106 gfp_t mem_flags 1107) { 1108 struct ehci_qtd *qtd; 1109 int epnum; 1110 unsigned long flags; 1111 struct ehci_qh *qh = NULL; 1112 int rc; 1113 1114 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); 1115 epnum = urb->ep->desc.bEndpointAddress; 1116 1117#ifdef EHCI_URB_TRACE 1118 ehci_dbg (ehci, 1119 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", 1120 __func__, urb->dev->devpath, urb, 1121 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", 1122 urb->transfer_buffer_length, 1123 qtd, urb->ep->hcpriv); 1124#endif 1125 1126 spin_lock_irqsave (&ehci->lock, flags); 1127 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 1128 rc = -ESHUTDOWN; 1129 goto done; 1130 } 1131 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 1132 if (unlikely(rc)) 1133 goto done; 1134 1135 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); 1136 if (unlikely(qh == NULL)) { 1137 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 1138 rc = -ENOMEM; 1139 goto done; 1140 } 1141 1142 /* Control/bulk operations through TTs don't need scheduling, 1143 * the HC and TT handle it when the TT has a buffer ready. 1144 */ 1145 if (likely (qh->qh_state == QH_STATE_IDLE)) 1146 qh_link_async(ehci, qh); 1147 done: 1148 spin_unlock_irqrestore (&ehci->lock, flags); 1149 if (unlikely (qh == NULL)) 1150 qtd_list_free (ehci, urb, qtd_list); 1151 return rc; 1152} 1153 1154/*-------------------------------------------------------------------------*/ 1155 1156/* the async qh for the qtds being reclaimed are now unlinked from the HC */ 1157 1158static void end_unlink_async (struct ehci_hcd *ehci) 1159{ 1160 struct ehci_qh *qh = ehci->reclaim; 1161 struct ehci_qh *next; 1162 1163 iaa_watchdog_done(ehci); 1164 1165 // qh->hw_next = cpu_to_hc32(qh->qh_dma); 1166 qh->qh_state = QH_STATE_IDLE; 1167 qh->qh_next.qh = NULL; 1168 qh_put (qh); // refcount from reclaim 1169 1170 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ 1171 next = qh->reclaim; 1172 ehci->reclaim = next; 1173 qh->reclaim = NULL; 1174 1175 qh_completions (ehci, qh); 1176 1177 if (!list_empty (&qh->qtd_list) 1178 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 1179 qh_link_async (ehci, qh); 1180 else { 1181 /* it's not free to turn the async schedule on/off; leave it 1182 * active but idle for a while once it empties. 1183 */ 1184 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) 1185 && ehci->async->qh_next.qh == NULL) 1186 timer_action (ehci, TIMER_ASYNC_OFF); 1187 } 1188 qh_put(qh); /* refcount from async list */ 1189 1190 if (next) { 1191 ehci->reclaim = NULL; 1192 start_unlink_async (ehci, next); 1193 } 1194} 1195 1196/* makes sure the async qh will become idle */ 1197/* caller must own ehci->lock */ 1198 1199static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 1200{ 1201 int cmd = ehci_readl(ehci, &ehci->regs->command); 1202 struct ehci_qh *prev; 1203 1204#ifdef DEBUG 1205 assert_spin_locked(&ehci->lock); 1206 if (ehci->reclaim 1207 || (qh->qh_state != QH_STATE_LINKED 1208 && qh->qh_state != QH_STATE_UNLINK_WAIT) 1209 ) 1210 BUG (); 1211#endif 1212 1213 /* stop async schedule right now? */ 1214 if (unlikely (qh == ehci->async)) { 1215 /* can't get here without STS_ASS set */ 1216 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT 1217 && !ehci->reclaim) { 1218 /* ... and CMD_IAAD clear */ 1219 ehci_writel(ehci, cmd & ~CMD_ASE, 1220 &ehci->regs->command); 1221 wmb (); 1222 // handshake later, if we need to 1223 timer_action_done (ehci, TIMER_ASYNC_OFF); 1224 } 1225 return; 1226 } 1227 1228 qh->qh_state = QH_STATE_UNLINK; 1229 ehci->reclaim = qh = qh_get (qh); 1230 1231 prev = ehci->async; 1232 while (prev->qh_next.qh != qh) 1233 prev = prev->qh_next.qh; 1234 1235 prev->hw->hw_next = qh->hw->hw_next; 1236 prev->qh_next = qh->qh_next; 1237 wmb (); 1238 1239 /* If the controller isn't running, we don't have to wait for it */ 1240 if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) { 1241 /* if (unlikely (qh->reclaim != 0)) 1242 * this will recurse, probably not much 1243 */ 1244 end_unlink_async (ehci); 1245 return; 1246 } 1247 1248 cmd |= CMD_IAAD; 1249 ehci_writel(ehci, cmd, &ehci->regs->command); 1250 (void)ehci_readl(ehci, &ehci->regs->command); 1251 iaa_watchdog_start(ehci); 1252} 1253 1254/*-------------------------------------------------------------------------*/ 1255 1256static void scan_async (struct ehci_hcd *ehci) 1257{ 1258 struct ehci_qh *qh; 1259 enum ehci_timer_action action = TIMER_IO_WATCHDOG; 1260 1261 ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); 1262 timer_action_done (ehci, TIMER_ASYNC_SHRINK); 1263rescan: 1264 qh = ehci->async->qh_next.qh; 1265 if (likely (qh != NULL)) { 1266 do { 1267 /* clean any finished work for this qh */ 1268 if (!list_empty (&qh->qtd_list) 1269 && qh->stamp != ehci->stamp) { 1270 int temp; 1271 1272 /* unlinks could happen here; completion 1273 * reporting drops the lock. rescan using 1274 * the latest schedule, but don't rescan 1275 * qhs we already finished (no looping). 1276 */ 1277 qh = qh_get (qh); 1278 qh->stamp = ehci->stamp; 1279 temp = qh_completions (ehci, qh); 1280 if (qh->needs_rescan) 1281 unlink_async(ehci, qh); 1282 qh_put (qh); 1283 if (temp != 0) { 1284 goto rescan; 1285 } 1286 } 1287 1288 /* unlink idle entries, reducing DMA usage as well 1289 * as HCD schedule-scanning costs. delay for any qh 1290 * we just scanned, there's a not-unusual case that it 1291 * doesn't stay idle for long. 1292 * (plus, avoids some kind of re-activation race.) 1293 */ 1294 if (list_empty(&qh->qtd_list) 1295 && qh->qh_state == QH_STATE_LINKED) { 1296 if (!ehci->reclaim 1297 && ((ehci->stamp - qh->stamp) & 0x1fff) 1298 >= (EHCI_SHRINK_FRAMES * 8)) 1299 start_unlink_async(ehci, qh); 1300 else 1301 action = TIMER_ASYNC_SHRINK; 1302 } 1303 1304 qh = qh->qh_next.qh; 1305 } while (qh); 1306 } 1307 if (action == TIMER_ASYNC_SHRINK) 1308 timer_action (ehci, TIMER_ASYNC_SHRINK); 1309} 1310