1/* 2 * Copyright (c) 2001-2004 by David Brownell 3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software Foundation, 17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20/* this file is part of ehci-hcd.c */ 21 22/*-------------------------------------------------------------------------*/ 23 24/* 25 * EHCI scheduled transaction support: interrupt, iso, split iso 26 * These are called "periodic" transactions in the EHCI spec. 27 * 28 * Note that for interrupt transfers, the QH/QTD manipulation is shared 29 * with the "asynchronous" transaction support (control/bulk transfers). 30 * The only real difference is in how interrupt transfers are scheduled. 31 * 32 * For ISO, we make an "iso_stream" head to serve the same role as a QH. 33 * It keeps track of every ITD (or SITD) that's linked, and holds enough 34 * pre-calculated schedule data to make appending to the queue be quick. 35 */ 36 37static int ehci_get_frame (struct usb_hcd *hcd); 38 39/*-------------------------------------------------------------------------*/ 40 41/* 42 * periodic_next_shadow - return "next" pointer on shadow list 43 * @periodic: host pointer to qh/itd/sitd 44 * @tag: hardware tag for type of this record 45 */ 46static union ehci_shadow * 47periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) 48{ 49 switch (tag) { 50 case Q_TYPE_QH: 51 return &periodic->qh->qh_next; 52 case Q_TYPE_FSTN: 53 return &periodic->fstn->fstn_next; 54 case Q_TYPE_ITD: 55 return &periodic->itd->itd_next; 56 // case Q_TYPE_SITD: 57 default: 58 return &periodic->sitd->sitd_next; 59 } 60} 61 62/* caller must hold ehci->lock */ 63static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) 64{ 65 union ehci_shadow *prev_p = &ehci->pshadow [frame]; 66 __le32 *hw_p = &ehci->periodic [frame]; 67 union ehci_shadow here = *prev_p; 68 69 /* find predecessor of "ptr"; hw and shadow lists are in sync */ 70 while (here.ptr && here.ptr != ptr) { 71 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); 72 hw_p = here.hw_next; 73 here = *prev_p; 74 } 75 /* an interrupt entry (at list end) could have been shared */ 76 if (!here.ptr) 77 return; 78 79 /* update shadow and hardware lists ... the old "next" pointers 80 * from ptr may still be in use, the caller updates them. 81 */ 82 *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); 83 *hw_p = *here.hw_next; 84} 85 86/* how many of the uframe's 125 usecs are allocated? */ 87static unsigned short 88periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) 89{ 90 __le32 *hw_p = &ehci->periodic [frame]; 91 union ehci_shadow *q = &ehci->pshadow [frame]; 92 unsigned usecs = 0; 93 94 while (q->ptr) { 95 switch (Q_NEXT_TYPE (*hw_p)) { 96 case Q_TYPE_QH: 97 /* is it in the S-mask? */ 98 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) 99 usecs += q->qh->usecs; 100 /* ... or C-mask? */ 101 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) 102 usecs += q->qh->c_usecs; 103 hw_p = &q->qh->hw_next; 104 q = &q->qh->qh_next; 105 break; 106 // case Q_TYPE_FSTN: 107 default: 108 /* for "save place" FSTNs, count the relevant INTR 109 * bandwidth from the previous frame 110 */ 111 if (q->fstn->hw_prev != EHCI_LIST_END) { 112 ehci_dbg (ehci, "ignoring FSTN cost ...\n"); 113 } 114 hw_p = &q->fstn->hw_next; 115 q = &q->fstn->fstn_next; 116 break; 117 case Q_TYPE_ITD: 118 usecs += q->itd->usecs [uframe]; 119 hw_p = &q->itd->hw_next; 120 q = &q->itd->itd_next; 121 break; 122 case Q_TYPE_SITD: 123 /* is it in the S-mask? (count SPLIT, DATA) */ 124 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { 125 if (q->sitd->hw_fullspeed_ep & 126 __constant_cpu_to_le32 (1<<31)) 127 usecs += q->sitd->stream->usecs; 128 else /* worst case for OUT start-split */ 129 usecs += HS_USECS_ISO (188); 130 } 131 132 /* ... C-mask? (count CSPLIT, DATA) */ 133 if (q->sitd->hw_uframe & 134 cpu_to_le32 (1 << (8 + uframe))) { 135 /* worst case for IN complete-split */ 136 usecs += q->sitd->stream->c_usecs; 137 } 138 139 hw_p = &q->sitd->hw_next; 140 q = &q->sitd->sitd_next; 141 break; 142 } 143 } 144#ifdef DEBUG 145 if (usecs > 100) 146 ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", 147 frame * 8 + uframe, usecs); 148#endif 149 return usecs; 150} 151 152/*-------------------------------------------------------------------------*/ 153 154static int same_tt (struct usb_device *dev1, struct usb_device *dev2) 155{ 156 if (!dev1->tt || !dev2->tt) 157 return 0; 158 if (dev1->tt != dev2->tt) 159 return 0; 160 if (dev1->tt->multi) 161 return dev1->ttport == dev2->ttport; 162 else 163 return 1; 164} 165 166#ifdef CONFIG_USB_EHCI_TT_NEWSCHED 167 168/* Which uframe does the low/fullspeed transfer start in? 169 * 170 * The parameter is the mask of ssplits in "H-frame" terms 171 * and this returns the transfer start uframe in "B-frame" terms, 172 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0 173 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag 174 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. 175 */ 176static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __le32 mask) 177{ 178 unsigned char smask = QH_SMASK & le32_to_cpu(mask); 179 if (!smask) { 180 ehci_err(ehci, "invalid empty smask!\n"); 181 /* uframe 7 can't have bw so this will indicate failure */ 182 return 7; 183 } 184 return ffs(smask) - 1; 185} 186 187static const unsigned char 188max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 189 190/* carryover low/fullspeed bandwidth that crosses uframe boundries */ 191static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) 192{ 193 int i; 194 for (i=0; i<7; i++) { 195 if (max_tt_usecs[i] < tt_usecs[i]) { 196 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; 197 tt_usecs[i] = max_tt_usecs[i]; 198 } 199 } 200} 201 202/* How many of the tt's periodic downstream 1000 usecs are allocated? 203 * 204 * While this measures the bandwidth in terms of usecs/uframe, 205 * the low/fullspeed bus has no notion of uframes, so any particular 206 * low/fullspeed transfer can "carry over" from one uframe to the next, 207 * since the TT just performs downstream transfers in sequence. 208 * 209 * For example two seperate 100 usec transfers can start in the same uframe, 210 * and the second one would "carry over" 75 usecs into the next uframe. 211 */ 212static void 213periodic_tt_usecs ( 214 struct ehci_hcd *ehci, 215 struct usb_device *dev, 216 unsigned frame, 217 unsigned short tt_usecs[8] 218) 219{ 220 __le32 *hw_p = &ehci->periodic [frame]; 221 union ehci_shadow *q = &ehci->pshadow [frame]; 222 unsigned char uf; 223 224 memset(tt_usecs, 0, 16); 225 226 while (q->ptr) { 227 switch (Q_NEXT_TYPE(*hw_p)) { 228 case Q_TYPE_ITD: 229 hw_p = &q->itd->hw_next; 230 q = &q->itd->itd_next; 231 continue; 232 case Q_TYPE_QH: 233 if (same_tt(dev, q->qh->dev)) { 234 uf = tt_start_uframe(ehci, q->qh->hw_info2); 235 tt_usecs[uf] += q->qh->tt_usecs; 236 } 237 hw_p = &q->qh->hw_next; 238 q = &q->qh->qh_next; 239 continue; 240 case Q_TYPE_SITD: 241 if (same_tt(dev, q->sitd->urb->dev)) { 242 uf = tt_start_uframe(ehci, q->sitd->hw_uframe); 243 tt_usecs[uf] += q->sitd->stream->tt_usecs; 244 } 245 hw_p = &q->sitd->hw_next; 246 q = &q->sitd->sitd_next; 247 continue; 248 // case Q_TYPE_FSTN: 249 default: 250 ehci_dbg(ehci, 251 "ignoring periodic frame %d FSTN\n", frame); 252 hw_p = &q->fstn->hw_next; 253 q = &q->fstn->fstn_next; 254 } 255 } 256 257 carryover_tt_bandwidth(tt_usecs); 258 259 if (max_tt_usecs[7] < tt_usecs[7]) 260 ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n", 261 frame, tt_usecs[7] - max_tt_usecs[7]); 262} 263 264/* 265 * Return true if the device's tt's downstream bus is available for a 266 * periodic transfer of the specified length (usecs), starting at the 267 * specified frame/uframe. Note that (as summarized in section 11.19 268 * of the usb 2.0 spec) TTs can buffer multiple transactions for each 269 * uframe. 270 * 271 * The uframe parameter is when the fullspeed/lowspeed transfer 272 * should be executed in "B-frame" terms, which is the same as the 273 * highspeed ssplit's uframe (which is in "H-frame" terms). For example 274 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0. 275 * See the EHCI spec sec 4.5 and fig 4.7. 276 * 277 * This checks if the full/lowspeed bus, at the specified starting uframe, 278 * has the specified bandwidth available, according to rules listed 279 * in USB 2.0 spec section 11.18.1 fig 11-60. 280 * 281 * This does not check if the transfer would exceed the max ssplit 282 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4, 283 * since proper scheduling limits ssplits to less than 16 per uframe. 284 */ 285static int tt_available ( 286 struct ehci_hcd *ehci, 287 unsigned period, 288 struct usb_device *dev, 289 unsigned frame, 290 unsigned uframe, 291 u16 usecs 292) 293{ 294 if ((period == 0) || (uframe >= 7)) /* error */ 295 return 0; 296 297 for (; frame < ehci->periodic_size; frame += period) { 298 unsigned short tt_usecs[8]; 299 300 periodic_tt_usecs (ehci, dev, frame, tt_usecs); 301 302 ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in" 303 " schedule %d/%d/%d/%d/%d/%d/%d/%d\n", 304 frame, usecs, uframe, 305 tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3], 306 tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]); 307 308 if (max_tt_usecs[uframe] <= tt_usecs[uframe]) { 309 ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n", 310 frame, uframe); 311 return 0; 312 } 313 314 /* special case for isoc transfers larger than 125us: 315 * the first and each subsequent fully used uframe 316 * must be empty, so as to not illegally delay 317 * already scheduled transactions 318 */ 319 if (125 < usecs) { 320 int ufs = (usecs / 125) - 1; 321 int i; 322 for (i = uframe; i < (uframe + ufs) && i < 8; i++) 323 if (0 < tt_usecs[i]) { 324 ehci_vdbg(ehci, 325 "multi-uframe xfer can't fit " 326 "in frame %d uframe %d\n", 327 frame, i); 328 return 0; 329 } 330 } 331 332 tt_usecs[uframe] += usecs; 333 334 carryover_tt_bandwidth(tt_usecs); 335 336 /* fail if the carryover pushed bw past the last uframe's limit */ 337 if (max_tt_usecs[7] < tt_usecs[7]) { 338 ehci_vdbg(ehci, 339 "tt unavailable usecs %d frame %d uframe %d\n", 340 usecs, frame, uframe); 341 return 0; 342 } 343 } 344 345 return 1; 346} 347 348#else 349 350/* return true iff the device's transaction translator is available 351 * for a periodic transfer starting at the specified frame, using 352 * all the uframes in the mask. 353 */ 354static int tt_no_collision ( 355 struct ehci_hcd *ehci, 356 unsigned period, 357 struct usb_device *dev, 358 unsigned frame, 359 u32 uf_mask 360) 361{ 362 if (period == 0) /* error */ 363 return 0; 364 365 /* note bandwidth wastage: split never follows csplit 366 * (different dev or endpoint) until the next uframe. 367 * calling convention doesn't make that distinction. 368 */ 369 for (; frame < ehci->periodic_size; frame += period) { 370 union ehci_shadow here; 371 __le32 type; 372 373 here = ehci->pshadow [frame]; 374 type = Q_NEXT_TYPE (ehci->periodic [frame]); 375 while (here.ptr) { 376 switch (type) { 377 case Q_TYPE_ITD: 378 type = Q_NEXT_TYPE (here.itd->hw_next); 379 here = here.itd->itd_next; 380 continue; 381 case Q_TYPE_QH: 382 if (same_tt (dev, here.qh->dev)) { 383 u32 mask; 384 385 mask = le32_to_cpu (here.qh->hw_info2); 386 /* "knows" no gap is needed */ 387 mask |= mask >> 8; 388 if (mask & uf_mask) 389 break; 390 } 391 type = Q_NEXT_TYPE (here.qh->hw_next); 392 here = here.qh->qh_next; 393 continue; 394 case Q_TYPE_SITD: 395 if (same_tt (dev, here.sitd->urb->dev)) { 396 u16 mask; 397 398 mask = le32_to_cpu (here.sitd 399 ->hw_uframe); 400 mask |= mask >> 8; 401 if (mask & uf_mask) 402 break; 403 } 404 type = Q_NEXT_TYPE (here.sitd->hw_next); 405 here = here.sitd->sitd_next; 406 continue; 407 // case Q_TYPE_FSTN: 408 default: 409 ehci_dbg (ehci, 410 "periodic frame %d bogus type %d\n", 411 frame, type); 412 } 413 414 /* collision or error */ 415 return 0; 416 } 417 } 418 419 /* no collision */ 420 return 1; 421} 422 423#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */ 424 425/*-------------------------------------------------------------------------*/ 426 427static int enable_periodic (struct ehci_hcd *ehci) 428{ 429 u32 cmd; 430 int status; 431 432 /* did clearing PSE did take effect yet? 433 * takes effect only at frame boundaries... 434 */ 435 status = handshake(ehci, &ehci->regs->status, STS_PSS, 0, 9 * 125); 436 if (status != 0) { 437 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 438 return status; 439 } 440 441 cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE; 442 ehci_writel(ehci, cmd, &ehci->regs->command); 443 /* posted write ... PSS happens later */ 444 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 445 446 /* make sure ehci_work scans these */ 447 ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index) 448 % (ehci->periodic_size << 3); 449 return 0; 450} 451 452static int disable_periodic (struct ehci_hcd *ehci) 453{ 454 u32 cmd; 455 int status; 456 457 /* did setting PSE not take effect yet? 458 * takes effect only at frame boundaries... 459 */ 460 status = handshake(ehci, &ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); 461 if (status != 0) { 462 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 463 return status; 464 } 465 466 cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE; 467 ehci_writel(ehci, cmd, &ehci->regs->command); 468 /* posted write ... */ 469 470 ehci->next_uframe = -1; 471 return 0; 472} 473 474/*-------------------------------------------------------------------------*/ 475 476/* periodic schedule slots have iso tds (normal or split) first, then a 477 * sparse tree for active interrupt transfers. 478 * 479 * this just links in a qh; caller guarantees uframe masks are set right. 480 * no FSTN support (yet; ehci 0.96+) 481 */ 482static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) 483{ 484 unsigned i; 485 unsigned period = qh->period; 486 487 dev_dbg (&qh->dev->dev, 488 "link qh%d-%04x/%p start %d [%d/%d us]\n", 489 period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), 490 qh, qh->start, qh->usecs, qh->c_usecs); 491 492 /* high bandwidth, or otherwise every microframe */ 493 if (period == 0) 494 period = 1; 495 496 for (i = qh->start; i < ehci->periodic_size; i += period) { 497 union ehci_shadow *prev = &ehci->pshadow [i]; 498 __le32 *hw_p = &ehci->periodic [i]; 499 union ehci_shadow here = *prev; 500 __le32 type = 0; 501 502 /* skip the iso nodes at list head */ 503 while (here.ptr) { 504 type = Q_NEXT_TYPE (*hw_p); 505 if (type == Q_TYPE_QH) 506 break; 507 prev = periodic_next_shadow (prev, type); 508 hw_p = &here.qh->hw_next; 509 here = *prev; 510 } 511 512 /* sorting each branch by period (slow-->fast) 513 * enables sharing interior tree nodes 514 */ 515 while (here.ptr && qh != here.qh) { 516 if (qh->period > here.qh->period) 517 break; 518 prev = &here.qh->qh_next; 519 hw_p = &here.qh->hw_next; 520 here = *prev; 521 } 522 /* link in this qh, unless some earlier pass did that */ 523 if (qh != here.qh) { 524 qh->qh_next = here; 525 if (here.qh) 526 qh->hw_next = *hw_p; 527 wmb (); 528 prev->qh = qh; 529 *hw_p = QH_NEXT (qh->qh_dma); 530 } 531 } 532 qh->qh_state = QH_STATE_LINKED; 533 qh_get (qh); 534 535 /* update per-qh bandwidth for usbfs */ 536 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period 537 ? ((qh->usecs + qh->c_usecs) / qh->period) 538 : (qh->usecs * 8); 539 540 /* maybe enable periodic schedule processing */ 541 if (!ehci->periodic_sched++) 542 return enable_periodic (ehci); 543 544 return 0; 545} 546 547static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) 548{ 549 unsigned i; 550 unsigned period; 551 552 // IF this isn't high speed 553 // and this qh is active in the current uframe 554 // (and overlay token SplitXstate is false?) 555 // THEN 556 // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */); 557 558 /* high bandwidth, or otherwise part of every microframe */ 559 if ((period = qh->period) == 0) 560 period = 1; 561 562 for (i = qh->start; i < ehci->periodic_size; i += period) 563 periodic_unlink (ehci, i, qh); 564 565 /* update per-qh bandwidth for usbfs */ 566 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period 567 ? ((qh->usecs + qh->c_usecs) / qh->period) 568 : (qh->usecs * 8); 569 570 dev_dbg (&qh->dev->dev, 571 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", 572 qh->period, 573 le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), 574 qh, qh->start, qh->usecs, qh->c_usecs); 575 576 /* qh->qh_next still "live" to HC */ 577 qh->qh_state = QH_STATE_UNLINK; 578 qh->qh_next.ptr = NULL; 579 qh_put (qh); 580 581 /* maybe turn off periodic schedule */ 582 ehci->periodic_sched--; 583 if (!ehci->periodic_sched) 584 (void) disable_periodic (ehci); 585} 586 587static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 588{ 589 unsigned wait; 590 591 qh_unlink_periodic (ehci, qh); 592 593 /* simple/paranoid: always delay, expecting the HC needs to read 594 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and 595 * expect khubd to clean up after any CSPLITs we won't issue. 596 * active high speed queues may need bigger delays... 597 */ 598 if (list_empty (&qh->qtd_list) 599 || (__constant_cpu_to_le32 (QH_CMASK) 600 & qh->hw_info2) != 0) 601 wait = 2; 602 else 603 wait = 55; /* worst case: 3 * 1024 */ 604 605 udelay (wait); 606 qh->qh_state = QH_STATE_IDLE; 607 qh->hw_next = EHCI_LIST_END; 608 wmb (); 609} 610 611/*-------------------------------------------------------------------------*/ 612 613static int check_period ( 614 struct ehci_hcd *ehci, 615 unsigned frame, 616 unsigned uframe, 617 unsigned period, 618 unsigned usecs 619) { 620 int claimed; 621 622 /* complete split running into next frame? 623 * given FSTN support, we could sometimes check... 624 */ 625 if (uframe >= 8) 626 return 0; 627 628 /* 629 * 80% periodic == 100 usec/uframe available 630 * convert "usecs we need" to "max already claimed" 631 */ 632 usecs = 100 - usecs; 633 634 /* we "know" 2 and 4 uframe intervals were rejected; so 635 * for period 0, check _every_ microframe in the schedule. 636 */ 637 if (unlikely (period == 0)) { 638 do { 639 for (uframe = 0; uframe < 7; uframe++) { 640 claimed = periodic_usecs (ehci, frame, uframe); 641 if (claimed > usecs) 642 return 0; 643 } 644 } while ((frame += 1) < ehci->periodic_size); 645 646 /* just check the specified uframe, at that period */ 647 } else { 648 do { 649 claimed = periodic_usecs (ehci, frame, uframe); 650 if (claimed > usecs) 651 return 0; 652 } while ((frame += period) < ehci->periodic_size); 653 } 654 655 // success! 656 return 1; 657} 658 659static int check_intr_schedule ( 660 struct ehci_hcd *ehci, 661 unsigned frame, 662 unsigned uframe, 663 const struct ehci_qh *qh, 664 __le32 *c_maskp 665) 666{ 667 int retval = -ENOSPC; 668 u8 mask = 0; 669 670 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ 671 goto done; 672 673 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) 674 goto done; 675 if (!qh->c_usecs) { 676 retval = 0; 677 *c_maskp = 0; 678 goto done; 679 } 680 681#ifdef CONFIG_USB_EHCI_TT_NEWSCHED 682 if (tt_available (ehci, qh->period, qh->dev, frame, uframe, 683 qh->tt_usecs)) { 684 unsigned i; 685 686 /* TODO : this may need FSTN for SSPLIT in uframe 5. */ 687 for (i=uframe+1; i<8 && i<uframe+4; i++) 688 if (!check_period (ehci, frame, i, 689 qh->period, qh->c_usecs)) 690 goto done; 691 else 692 mask |= 1 << i; 693 694 retval = 0; 695 696 *c_maskp = cpu_to_le32 (mask << 8); 697 } 698#else 699 /* Make sure this tt's buffer is also available for CSPLITs. 700 * We pessimize a bit; probably the typical full speed case 701 * doesn't need the second CSPLIT. 702 * 703 * NOTE: both SPLIT and CSPLIT could be checked in just 704 * one smart pass... 705 */ 706 mask = 0x03 << (uframe + qh->gap_uf); 707 *c_maskp = cpu_to_le32 (mask << 8); 708 709 mask |= 1 << uframe; 710 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { 711 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, 712 qh->period, qh->c_usecs)) 713 goto done; 714 if (!check_period (ehci, frame, uframe + qh->gap_uf, 715 qh->period, qh->c_usecs)) 716 goto done; 717 retval = 0; 718 } 719#endif 720done: 721 return retval; 722} 723 724/* "first fit" scheduling policy used the first time through, 725 * or when the previous schedule slot can't be re-used. 726 */ 727static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 728{ 729 int status; 730 unsigned uframe; 731 __le32 c_mask; 732 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ 733 734 qh_refresh(ehci, qh); 735 qh->hw_next = EHCI_LIST_END; 736 frame = qh->start; 737 738 /* reuse the previous schedule slots, if we can */ 739 if (frame < qh->period) { 740 uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK); 741 status = check_intr_schedule (ehci, frame, --uframe, 742 qh, &c_mask); 743 } else { 744 uframe = 0; 745 c_mask = 0; 746 status = -ENOSPC; 747 } 748 749 /* else scan the schedule to find a group of slots such that all 750 * uframes have enough periodic bandwidth available. 751 */ 752 if (status) { 753 /* "normal" case, uframing flexible except with splits */ 754 if (qh->period) { 755 frame = qh->period - 1; 756 do { 757 for (uframe = 0; uframe < 8; uframe++) { 758 status = check_intr_schedule (ehci, 759 frame, uframe, qh, 760 &c_mask); 761 if (status == 0) 762 break; 763 } 764 } while (status && frame--); 765 766 /* qh->period == 0 means every uframe */ 767 } else { 768 frame = 0; 769 status = check_intr_schedule (ehci, 0, 0, qh, &c_mask); 770 } 771 if (status) 772 goto done; 773 qh->start = frame; 774 775 /* reset S-frame and (maybe) C-frame masks */ 776 qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK)); 777 qh->hw_info2 |= qh->period 778 ? cpu_to_le32 (1 << uframe) 779 : __constant_cpu_to_le32 (QH_SMASK); 780 qh->hw_info2 |= c_mask; 781 } else 782 ehci_dbg (ehci, "reused qh %p schedule\n", qh); 783 784 /* stuff into the periodic schedule */ 785 status = qh_link_periodic (ehci, qh); 786done: 787 return status; 788} 789 790static int intr_submit ( 791 struct ehci_hcd *ehci, 792 struct usb_host_endpoint *ep, 793 struct urb *urb, 794 struct list_head *qtd_list, 795 gfp_t mem_flags 796) { 797 unsigned epnum; 798 unsigned long flags; 799 struct ehci_qh *qh; 800 int status = 0; 801 struct list_head empty; 802 803 /* get endpoint and transfer/schedule data */ 804 epnum = ep->desc.bEndpointAddress; 805 806 spin_lock_irqsave (&ehci->lock, flags); 807 808 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 809 &ehci_to_hcd(ehci)->flags))) { 810 status = -ESHUTDOWN; 811 goto done; 812 } 813 814 /* get qh and force any scheduling errors */ 815 INIT_LIST_HEAD (&empty); 816 qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv); 817 if (qh == NULL) { 818 status = -ENOMEM; 819 goto done; 820 } 821 if (qh->qh_state == QH_STATE_IDLE) { 822 if ((status = qh_schedule (ehci, qh)) != 0) 823 goto done; 824 } 825 826 /* then queue the urb's tds to the qh */ 827 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); 828 BUG_ON (qh == NULL); 829 830 /* ... update usbfs periodic stats */ 831 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; 832 833done: 834 spin_unlock_irqrestore (&ehci->lock, flags); 835 if (status) 836 qtd_list_free (ehci, urb, qtd_list); 837 838 return status; 839} 840 841/*-------------------------------------------------------------------------*/ 842 843/* ehci_iso_stream ops work with both ITD and SITD */ 844 845static struct ehci_iso_stream * 846iso_stream_alloc (gfp_t mem_flags) 847{ 848 struct ehci_iso_stream *stream; 849 850 stream = kzalloc(sizeof *stream, mem_flags); 851 if (likely (stream != NULL)) { 852 INIT_LIST_HEAD(&stream->td_list); 853 INIT_LIST_HEAD(&stream->free_list); 854 stream->next_uframe = -1; 855 stream->refcount = 1; 856 } 857 return stream; 858} 859 860static void 861iso_stream_init ( 862 struct ehci_hcd *ehci, 863 struct ehci_iso_stream *stream, 864 struct usb_device *dev, 865 int pipe, 866 unsigned interval 867) 868{ 869 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; 870 871 u32 buf1; 872 unsigned epnum, maxp; 873 int is_input; 874 long bandwidth; 875 876 /* 877 * this might be a "high bandwidth" highspeed endpoint, 878 * as encoded in the ep descriptor's wMaxPacket field 879 */ 880 epnum = usb_pipeendpoint (pipe); 881 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; 882 maxp = usb_maxpacket(dev, pipe, !is_input); 883 if (is_input) { 884 buf1 = (1 << 11); 885 } else { 886 buf1 = 0; 887 } 888 889 /* knows about ITD vs SITD */ 890 if (dev->speed == USB_SPEED_HIGH) { 891 unsigned multi = hb_mult(maxp); 892 893 stream->highspeed = 1; 894 895 maxp = max_packet(maxp); 896 buf1 |= maxp; 897 maxp *= multi; 898 899 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); 900 stream->buf1 = cpu_to_le32 (buf1); 901 stream->buf2 = cpu_to_le32 (multi); 902 903 /* usbfs wants to report the average usecs per frame tied up 904 * when transfers on this endpoint are scheduled ... 905 */ 906 stream->usecs = HS_USECS_ISO (maxp); 907 bandwidth = stream->usecs * 8; 908 bandwidth /= 1 << (interval - 1); 909 910 } else { 911 u32 addr; 912 int think_time; 913 int hs_transfers; 914 915 addr = dev->ttport << 24; 916 if (!ehci_is_TDI(ehci) 917 || (dev->tt->hub != 918 ehci_to_hcd(ehci)->self.root_hub)) 919 addr |= dev->tt->hub->devnum << 16; 920 addr |= epnum << 8; 921 addr |= dev->devnum; 922 stream->usecs = HS_USECS_ISO (maxp); 923 think_time = dev->tt ? dev->tt->think_time : 0; 924 stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time ( 925 dev->speed, is_input, 1, maxp)); 926 hs_transfers = max (1u, (maxp + 187) / 188); 927 if (is_input) { 928 u32 tmp; 929 930 addr |= 1 << 31; 931 stream->c_usecs = stream->usecs; 932 stream->usecs = HS_USECS_ISO (1); 933 stream->raw_mask = 1; 934 935 /* c-mask as specified in USB 2.0 11.18.4 3.c */ 936 tmp = (1 << (hs_transfers + 2)) - 1; 937 stream->raw_mask |= tmp << (8 + 2); 938 } else 939 stream->raw_mask = smask_out [hs_transfers - 1]; 940 bandwidth = stream->usecs + stream->c_usecs; 941 bandwidth /= 1 << (interval + 2); 942 943 /* stream->splits gets created from raw_mask later */ 944 stream->address = cpu_to_le32 (addr); 945 } 946 stream->bandwidth = bandwidth; 947 948 stream->udev = dev; 949 950 stream->bEndpointAddress = is_input | epnum; 951 stream->interval = interval; 952 stream->maxp = maxp; 953} 954 955static void 956iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream) 957{ 958 stream->refcount--; 959 960 /* free whenever just a dev->ep reference remains. 961 * not like a QH -- no persistent state (toggle, halt) 962 */ 963 if (stream->refcount == 1) { 964 int is_in; 965 966 // BUG_ON (!list_empty(&stream->td_list)); 967 968 while (!list_empty (&stream->free_list)) { 969 struct list_head *entry; 970 971 entry = stream->free_list.next; 972 list_del (entry); 973 974 /* knows about ITD vs SITD */ 975 if (stream->highspeed) { 976 struct ehci_itd *itd; 977 978 itd = list_entry (entry, struct ehci_itd, 979 itd_list); 980 dma_pool_free (ehci->itd_pool, itd, 981 itd->itd_dma); 982 } else { 983 struct ehci_sitd *sitd; 984 985 sitd = list_entry (entry, struct ehci_sitd, 986 sitd_list); 987 dma_pool_free (ehci->sitd_pool, sitd, 988 sitd->sitd_dma); 989 } 990 } 991 992 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0; 993 stream->bEndpointAddress &= 0x0f; 994 stream->ep->hcpriv = NULL; 995 996 if (stream->rescheduled) { 997 ehci_info (ehci, "ep%d%s-iso rescheduled " 998 "%lu times in %lu seconds\n", 999 stream->bEndpointAddress, is_in ? "in" : "out", 1000 stream->rescheduled, 1001 ((jiffies - stream->start)/HZ) 1002 ); 1003 } 1004 1005 kfree(stream); 1006 } 1007} 1008 1009static inline struct ehci_iso_stream * 1010iso_stream_get (struct ehci_iso_stream *stream) 1011{ 1012 if (likely (stream != NULL)) 1013 stream->refcount++; 1014 return stream; 1015} 1016 1017static struct ehci_iso_stream * 1018iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) 1019{ 1020 unsigned epnum; 1021 struct ehci_iso_stream *stream; 1022 struct usb_host_endpoint *ep; 1023 unsigned long flags; 1024 1025 epnum = usb_pipeendpoint (urb->pipe); 1026 if (usb_pipein(urb->pipe)) 1027 ep = urb->dev->ep_in[epnum]; 1028 else 1029 ep = urb->dev->ep_out[epnum]; 1030 1031 spin_lock_irqsave (&ehci->lock, flags); 1032 stream = ep->hcpriv; 1033 1034 if (unlikely (stream == NULL)) { 1035 stream = iso_stream_alloc(GFP_ATOMIC); 1036 if (likely (stream != NULL)) { 1037 /* dev->ep owns the initial refcount */ 1038 ep->hcpriv = stream; 1039 stream->ep = ep; 1040 iso_stream_init(ehci, stream, urb->dev, urb->pipe, 1041 urb->interval); 1042 } 1043 1044 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ 1045 } else if (unlikely (stream->hw_info1 != 0)) { 1046 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", 1047 urb->dev->devpath, epnum, 1048 usb_pipein(urb->pipe) ? "in" : "out"); 1049 stream = NULL; 1050 } 1051 1052 /* caller guarantees an eventual matching iso_stream_put */ 1053 stream = iso_stream_get (stream); 1054 1055 spin_unlock_irqrestore (&ehci->lock, flags); 1056 return stream; 1057} 1058 1059/*-------------------------------------------------------------------------*/ 1060 1061/* ehci_iso_sched ops can be ITD-only or SITD-only */ 1062 1063static struct ehci_iso_sched * 1064iso_sched_alloc (unsigned packets, gfp_t mem_flags) 1065{ 1066 struct ehci_iso_sched *iso_sched; 1067 int size = sizeof *iso_sched; 1068 1069 size += packets * sizeof (struct ehci_iso_packet); 1070 iso_sched = kzalloc(size, mem_flags); 1071 if (likely (iso_sched != NULL)) { 1072 INIT_LIST_HEAD (&iso_sched->td_list); 1073 } 1074 return iso_sched; 1075} 1076 1077static inline void 1078itd_sched_init ( 1079 struct ehci_iso_sched *iso_sched, 1080 struct ehci_iso_stream *stream, 1081 struct urb *urb 1082) 1083{ 1084 unsigned i; 1085 dma_addr_t dma = urb->transfer_dma; 1086 1087 /* how many uframes are needed for these transfers */ 1088 iso_sched->span = urb->number_of_packets * stream->interval; 1089 1090 /* figure out per-uframe itd fields that we'll need later 1091 * when we fit new itds into the schedule. 1092 */ 1093 for (i = 0; i < urb->number_of_packets; i++) { 1094 struct ehci_iso_packet *uframe = &iso_sched->packet [i]; 1095 unsigned length; 1096 dma_addr_t buf; 1097 u32 trans; 1098 1099 length = urb->iso_frame_desc [i].length; 1100 buf = dma + urb->iso_frame_desc [i].offset; 1101 1102 trans = EHCI_ISOC_ACTIVE; 1103 trans |= buf & 0x0fff; 1104 if (unlikely (((i + 1) == urb->number_of_packets)) 1105 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1106 trans |= EHCI_ITD_IOC; 1107 trans |= length << 16; 1108 uframe->transaction = cpu_to_le32 (trans); 1109 1110 /* might need to cross a buffer page within a uframe */ 1111 uframe->bufp = (buf & ~(u64)0x0fff); 1112 buf += length; 1113 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) 1114 uframe->cross = 1; 1115 } 1116} 1117 1118static void 1119iso_sched_free ( 1120 struct ehci_iso_stream *stream, 1121 struct ehci_iso_sched *iso_sched 1122) 1123{ 1124 if (!iso_sched) 1125 return; 1126 // caller must hold ehci->lock! 1127 list_splice (&iso_sched->td_list, &stream->free_list); 1128 kfree (iso_sched); 1129} 1130 1131static int 1132itd_urb_transaction ( 1133 struct ehci_iso_stream *stream, 1134 struct ehci_hcd *ehci, 1135 struct urb *urb, 1136 gfp_t mem_flags 1137) 1138{ 1139 struct ehci_itd *itd; 1140 dma_addr_t itd_dma; 1141 int i; 1142 unsigned num_itds; 1143 struct ehci_iso_sched *sched; 1144 unsigned long flags; 1145 1146 sched = iso_sched_alloc (urb->number_of_packets, mem_flags); 1147 if (unlikely (sched == NULL)) 1148 return -ENOMEM; 1149 1150 itd_sched_init (sched, stream, urb); 1151 1152 if (urb->interval < 8) 1153 num_itds = 1 + (sched->span + 7) / 8; 1154 else 1155 num_itds = urb->number_of_packets; 1156 1157 /* allocate/init ITDs */ 1158 spin_lock_irqsave (&ehci->lock, flags); 1159 for (i = 0; i < num_itds; i++) { 1160 1161 /* free_list.next might be cache-hot ... but maybe 1162 * the HC caches it too. avoid that issue for now. 1163 */ 1164 1165 /* prefer previously-allocated itds */ 1166 if (likely (!list_empty(&stream->free_list))) { 1167 itd = list_entry (stream->free_list.prev, 1168 struct ehci_itd, itd_list); 1169 list_del (&itd->itd_list); 1170 itd_dma = itd->itd_dma; 1171 } else 1172 itd = NULL; 1173 1174 if (!itd) { 1175 spin_unlock_irqrestore (&ehci->lock, flags); 1176 itd = dma_pool_alloc (ehci->itd_pool, mem_flags, 1177 &itd_dma); 1178 spin_lock_irqsave (&ehci->lock, flags); 1179 } 1180 1181 if (unlikely (NULL == itd)) { 1182 iso_sched_free (stream, sched); 1183 spin_unlock_irqrestore (&ehci->lock, flags); 1184 return -ENOMEM; 1185 } 1186 memset (itd, 0, sizeof *itd); 1187 itd->itd_dma = itd_dma; 1188 list_add (&itd->itd_list, &sched->td_list); 1189 } 1190 spin_unlock_irqrestore (&ehci->lock, flags); 1191 1192 /* temporarily store schedule info in hcpriv */ 1193 urb->hcpriv = sched; 1194 urb->error_count = 0; 1195 return 0; 1196} 1197 1198/*-------------------------------------------------------------------------*/ 1199 1200static inline int 1201itd_slot_ok ( 1202 struct ehci_hcd *ehci, 1203 u32 mod, 1204 u32 uframe, 1205 u8 usecs, 1206 u32 period 1207) 1208{ 1209 uframe %= period; 1210 do { 1211 /* can't commit more than 80% periodic == 100 usec */ 1212 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) 1213 > (100 - usecs)) 1214 return 0; 1215 1216 /* we know urb->interval is 2^N uframes */ 1217 uframe += period; 1218 } while (uframe < mod); 1219 return 1; 1220} 1221 1222static inline int 1223sitd_slot_ok ( 1224 struct ehci_hcd *ehci, 1225 u32 mod, 1226 struct ehci_iso_stream *stream, 1227 u32 uframe, 1228 struct ehci_iso_sched *sched, 1229 u32 period_uframes 1230) 1231{ 1232 u32 mask, tmp; 1233 u32 frame, uf; 1234 1235 mask = stream->raw_mask << (uframe & 7); 1236 1237 /* for IN, don't wrap CSPLIT into the next frame */ 1238 if (mask & ~0xffff) 1239 return 0; 1240 1241 /* this multi-pass logic is simple, but performance may 1242 * suffer when the schedule data isn't cached. 1243 */ 1244 1245 /* check bandwidth */ 1246 uframe %= period_uframes; 1247 do { 1248 u32 max_used; 1249 1250 frame = uframe >> 3; 1251 uf = uframe & 7; 1252 1253#ifdef CONFIG_USB_EHCI_TT_NEWSCHED 1254 /* The tt's fullspeed bus bandwidth must be available. 1255 * tt_available scheduling guarantees 10+% for control/bulk. 1256 */ 1257 if (!tt_available (ehci, period_uframes << 3, 1258 stream->udev, frame, uf, stream->tt_usecs)) 1259 return 0; 1260#else 1261 /* tt must be idle for start(s), any gap, and csplit. 1262 * assume scheduling slop leaves 10+% for control/bulk. 1263 */ 1264 if (!tt_no_collision (ehci, period_uframes << 3, 1265 stream->udev, frame, mask)) 1266 return 0; 1267#endif 1268 1269 /* check starts (OUT uses more than one) */ 1270 max_used = 100 - stream->usecs; 1271 for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) { 1272 if (periodic_usecs (ehci, frame, uf) > max_used) 1273 return 0; 1274 } 1275 1276 /* for IN, check CSPLIT */ 1277 if (stream->c_usecs) { 1278 uf = uframe & 7; 1279 max_used = 100 - stream->c_usecs; 1280 do { 1281 tmp = 1 << uf; 1282 tmp <<= 8; 1283 if ((stream->raw_mask & tmp) == 0) 1284 continue; 1285 if (periodic_usecs (ehci, frame, uf) 1286 > max_used) 1287 return 0; 1288 } while (++uf < 8); 1289 } 1290 1291 /* we know urb->interval is 2^N uframes */ 1292 uframe += period_uframes; 1293 } while (uframe < mod); 1294 1295 stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7)); 1296 return 1; 1297} 1298 1299/* 1300 * This scheduler plans almost as far into the future as it has actual 1301 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to 1302 * "as small as possible" to be cache-friendlier.) That limits the size 1303 * transfers you can stream reliably; avoid more than 64 msec per urb. 1304 * Also avoid queue depths of less than ehci's worst irq latency (affected 1305 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, 1306 * and other factors); or more than about 230 msec total (for portability, 1307 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! 1308 */ 1309 1310#define SCHEDULE_SLOP 10 /* frames */ 1311 1312static int 1313iso_stream_schedule ( 1314 struct ehci_hcd *ehci, 1315 struct urb *urb, 1316 struct ehci_iso_stream *stream 1317) 1318{ 1319 u32 now, start, max, period; 1320 int status; 1321 unsigned mod = ehci->periodic_size << 3; 1322 struct ehci_iso_sched *sched = urb->hcpriv; 1323 1324 if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { 1325 ehci_dbg (ehci, "iso request %p too long\n", urb); 1326 status = -EFBIG; 1327 goto fail; 1328 } 1329 1330 if ((stream->depth + sched->span) > mod) { 1331 ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n", 1332 urb, stream->depth, sched->span, mod); 1333 status = -EFBIG; 1334 goto fail; 1335 } 1336 1337 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; 1338 1339 /* when's the last uframe this urb could start? */ 1340 max = now + mod; 1341 1342 /* typical case: reuse current schedule. stream is still active, 1343 * and no gaps from host falling behind (irq delays etc) 1344 */ 1345 if (likely (!list_empty (&stream->td_list))) { 1346 start = stream->next_uframe; 1347 if (start < now) 1348 start += mod; 1349 if (likely ((start + sched->span) < max)) 1350 goto ready; 1351 /* else fell behind; someday, try to reschedule */ 1352 status = -EL2NSYNC; 1353 goto fail; 1354 } 1355 1356 /* need to schedule; when's the next (u)frame we could start? 1357 * this is bigger than ehci->i_thresh allows; scheduling itself 1358 * isn't free, the slop should handle reasonably slow cpus. it 1359 * can also help high bandwidth if the dma and irq loads don't 1360 * jump until after the queue is primed. 1361 */ 1362 start = SCHEDULE_SLOP * 8 + (now & ~0x07); 1363 start %= mod; 1364 stream->next_uframe = start; 1365 1366 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ 1367 1368 period = urb->interval; 1369 if (!stream->highspeed) 1370 period <<= 3; 1371 1372 /* find a uframe slot with enough bandwidth */ 1373 for (; start < (stream->next_uframe + period); start++) { 1374 int enough_space; 1375 1376 /* check schedule: enough space? */ 1377 if (stream->highspeed) 1378 enough_space = itd_slot_ok (ehci, mod, start, 1379 stream->usecs, period); 1380 else { 1381 if ((start % 8) >= 6) 1382 continue; 1383 enough_space = sitd_slot_ok (ehci, mod, stream, 1384 start, sched, period); 1385 } 1386 1387 /* schedule it here if there's enough bandwidth */ 1388 if (enough_space) { 1389 stream->next_uframe = start % mod; 1390 goto ready; 1391 } 1392 } 1393 1394 /* no room in the schedule */ 1395 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", 1396 list_empty (&stream->td_list) ? "" : "re", 1397 urb, now, max); 1398 status = -ENOSPC; 1399 1400fail: 1401 iso_sched_free (stream, sched); 1402 urb->hcpriv = NULL; 1403 return status; 1404 1405ready: 1406 /* report high speed start in uframes; full speed, in frames */ 1407 urb->start_frame = stream->next_uframe; 1408 if (!stream->highspeed) 1409 urb->start_frame >>= 3; 1410 return 0; 1411} 1412 1413/*-------------------------------------------------------------------------*/ 1414 1415static inline void 1416itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) 1417{ 1418 int i; 1419 1420 /* it's been recently zeroed */ 1421 itd->hw_next = EHCI_LIST_END; 1422 itd->hw_bufp [0] = stream->buf0; 1423 itd->hw_bufp [1] = stream->buf1; 1424 itd->hw_bufp [2] = stream->buf2; 1425 1426 for (i = 0; i < 8; i++) 1427 itd->index[i] = -1; 1428 1429 /* All other fields are filled when scheduling */ 1430} 1431 1432static inline void 1433itd_patch ( 1434 struct ehci_itd *itd, 1435 struct ehci_iso_sched *iso_sched, 1436 unsigned index, 1437 u16 uframe 1438) 1439{ 1440 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1441 unsigned pg = itd->pg; 1442 1443 // BUG_ON (pg == 6 && uf->cross); 1444 1445 uframe &= 0x07; 1446 itd->index [uframe] = index; 1447 1448 itd->hw_transaction [uframe] = uf->transaction; 1449 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12); 1450 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0); 1451 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32)); 1452 1453 /* iso_frame_desc[].offset must be strictly increasing */ 1454 if (unlikely (uf->cross)) { 1455 u64 bufp = uf->bufp + 4096; 1456 itd->pg = ++pg; 1457 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0); 1458 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32)); 1459 } 1460} 1461 1462static inline void 1463itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) 1464{ 1465 /* always prepend ITD/SITD ... only QH tree is order-sensitive */ 1466 itd->itd_next = ehci->pshadow [frame]; 1467 itd->hw_next = ehci->periodic [frame]; 1468 ehci->pshadow [frame].itd = itd; 1469 itd->frame = frame; 1470 wmb (); 1471 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; 1472} 1473 1474/* fit urb's itds into the selected schedule slot; activate as needed */ 1475static int 1476itd_link_urb ( 1477 struct ehci_hcd *ehci, 1478 struct urb *urb, 1479 unsigned mod, 1480 struct ehci_iso_stream *stream 1481) 1482{ 1483 int packet; 1484 unsigned next_uframe, uframe, frame; 1485 struct ehci_iso_sched *iso_sched = urb->hcpriv; 1486 struct ehci_itd *itd; 1487 1488 next_uframe = stream->next_uframe % mod; 1489 1490 if (unlikely (list_empty(&stream->td_list))) { 1491 ehci_to_hcd(ehci)->self.bandwidth_allocated 1492 += stream->bandwidth; 1493 ehci_vdbg (ehci, 1494 "schedule devp %s ep%d%s-iso period %d start %d.%d\n", 1495 urb->dev->devpath, stream->bEndpointAddress & 0x0f, 1496 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", 1497 urb->interval, 1498 next_uframe >> 3, next_uframe & 0x7); 1499 stream->start = jiffies; 1500 } 1501 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1502 1503 /* fill iTDs uframe by uframe */ 1504 for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) { 1505 if (itd == NULL) { 1506 /* ASSERT: we have all necessary itds */ 1507 // BUG_ON (list_empty (&iso_sched->td_list)); 1508 1509 /* ASSERT: no itds for this endpoint in this uframe */ 1510 1511 itd = list_entry (iso_sched->td_list.next, 1512 struct ehci_itd, itd_list); 1513 list_move_tail (&itd->itd_list, &stream->td_list); 1514 itd->stream = iso_stream_get (stream); 1515 itd->urb = usb_get_urb (urb); 1516 itd_init (stream, itd); 1517 } 1518 1519 uframe = next_uframe & 0x07; 1520 frame = next_uframe >> 3; 1521 1522 itd->usecs [uframe] = stream->usecs; 1523 itd_patch (itd, iso_sched, packet, uframe); 1524 1525 next_uframe += stream->interval; 1526 stream->depth += stream->interval; 1527 next_uframe %= mod; 1528 packet++; 1529 1530 /* link completed itds into the schedule */ 1531 if (((next_uframe >> 3) != frame) 1532 || packet == urb->number_of_packets) { 1533 itd_link (ehci, frame % ehci->periodic_size, itd); 1534 itd = NULL; 1535 } 1536 } 1537 stream->next_uframe = next_uframe; 1538 1539 /* don't need that schedule data any more */ 1540 iso_sched_free (stream, iso_sched); 1541 urb->hcpriv = NULL; 1542 1543 timer_action (ehci, TIMER_IO_WATCHDOG); 1544 if (unlikely (!ehci->periodic_sched++)) 1545 return enable_periodic (ehci); 1546 return 0; 1547} 1548 1549#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) 1550 1551static unsigned 1552itd_complete ( 1553 struct ehci_hcd *ehci, 1554 struct ehci_itd *itd 1555) { 1556 struct urb *urb = itd->urb; 1557 struct usb_iso_packet_descriptor *desc; 1558 u32 t; 1559 unsigned uframe; 1560 int urb_index = -1; 1561 struct ehci_iso_stream *stream = itd->stream; 1562 struct usb_device *dev; 1563 1564 /* for each uframe with a packet */ 1565 for (uframe = 0; uframe < 8; uframe++) { 1566 if (likely (itd->index[uframe] == -1)) 1567 continue; 1568 urb_index = itd->index[uframe]; 1569 desc = &urb->iso_frame_desc [urb_index]; 1570 1571 t = le32_to_cpup (&itd->hw_transaction [uframe]); 1572 itd->hw_transaction [uframe] = 0; 1573 stream->depth -= stream->interval; 1574 1575 /* report transfer status */ 1576 if (unlikely (t & ISO_ERRS)) { 1577 urb->error_count++; 1578 if (t & EHCI_ISOC_BUF_ERR) 1579 desc->status = usb_pipein (urb->pipe) 1580 ? -ENOSR /* hc couldn't read */ 1581 : -ECOMM; /* hc couldn't write */ 1582 else if (t & EHCI_ISOC_BABBLE) 1583 desc->status = -EOVERFLOW; 1584 else /* (t & EHCI_ISOC_XACTERR) */ 1585 desc->status = -EPROTO; 1586 1587 /* HC need not update length with this error */ 1588 if (!(t & EHCI_ISOC_BABBLE)) 1589 desc->actual_length = EHCI_ITD_LENGTH (t); 1590 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { 1591 desc->status = 0; 1592 desc->actual_length = EHCI_ITD_LENGTH (t); 1593 } 1594 } 1595 1596 usb_put_urb (urb); 1597 itd->urb = NULL; 1598 itd->stream = NULL; 1599 list_move (&itd->itd_list, &stream->free_list); 1600 iso_stream_put (ehci, stream); 1601 1602 /* handle completion now? */ 1603 if (likely ((urb_index + 1) != urb->number_of_packets)) 1604 return 0; 1605 1606 /* ASSERT: it's really the last itd for this urb 1607 list_for_each_entry (itd, &stream->td_list, itd_list) 1608 BUG_ON (itd->urb == urb); 1609 */ 1610 1611 /* give urb back to the driver ... can be out-of-order */ 1612 dev = urb->dev; 1613 ehci_urb_done (ehci, urb); 1614 urb = NULL; 1615 1616 /* defer stopping schedule; completion can submit */ 1617 ehci->periodic_sched--; 1618 if (unlikely (!ehci->periodic_sched)) 1619 (void) disable_periodic (ehci); 1620 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1621 1622 if (unlikely (list_empty (&stream->td_list))) { 1623 ehci_to_hcd(ehci)->self.bandwidth_allocated 1624 -= stream->bandwidth; 1625 ehci_vdbg (ehci, 1626 "deschedule devp %s ep%d%s-iso\n", 1627 dev->devpath, stream->bEndpointAddress & 0x0f, 1628 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 1629 } 1630 iso_stream_put (ehci, stream); 1631 1632 return 1; 1633} 1634 1635/*-------------------------------------------------------------------------*/ 1636 1637static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, 1638 gfp_t mem_flags) 1639{ 1640 int status = -EINVAL; 1641 unsigned long flags; 1642 struct ehci_iso_stream *stream; 1643 1644 /* Get iso_stream head */ 1645 stream = iso_stream_find (ehci, urb); 1646 if (unlikely (stream == NULL)) { 1647 ehci_dbg (ehci, "can't get iso stream\n"); 1648 return -ENOMEM; 1649 } 1650 if (unlikely (urb->interval != stream->interval)) { 1651 ehci_dbg (ehci, "can't change iso interval %d --> %d\n", 1652 stream->interval, urb->interval); 1653 goto done; 1654 } 1655 1656#ifdef EHCI_URB_TRACE 1657 ehci_dbg (ehci, 1658 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", 1659 __FUNCTION__, urb->dev->devpath, urb, 1660 usb_pipeendpoint (urb->pipe), 1661 usb_pipein (urb->pipe) ? "in" : "out", 1662 urb->transfer_buffer_length, 1663 urb->number_of_packets, urb->interval, 1664 stream); 1665#endif 1666 1667 /* allocate ITDs w/o locking anything */ 1668 status = itd_urb_transaction (stream, ehci, urb, mem_flags); 1669 if (unlikely (status < 0)) { 1670 ehci_dbg (ehci, "can't init itds\n"); 1671 goto done; 1672 } 1673 1674 /* schedule ... need to lock */ 1675 spin_lock_irqsave (&ehci->lock, flags); 1676 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 1677 &ehci_to_hcd(ehci)->flags))) 1678 status = -ESHUTDOWN; 1679 else 1680 status = iso_stream_schedule (ehci, urb, stream); 1681 if (likely (status == 0)) 1682 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 1683 spin_unlock_irqrestore (&ehci->lock, flags); 1684 1685done: 1686 if (unlikely (status < 0)) 1687 iso_stream_put (ehci, stream); 1688 return status; 1689} 1690 1691#ifdef CONFIG_USB_EHCI_SPLIT_ISO 1692 1693/*-------------------------------------------------------------------------*/ 1694 1695/* 1696 * "Split ISO TDs" ... used for USB 1.1 devices going through the 1697 * TTs in USB 2.0 hubs. These need microframe scheduling. 1698 */ 1699 1700static inline void 1701sitd_sched_init ( 1702 struct ehci_iso_sched *iso_sched, 1703 struct ehci_iso_stream *stream, 1704 struct urb *urb 1705) 1706{ 1707 unsigned i; 1708 dma_addr_t dma = urb->transfer_dma; 1709 1710 /* how many frames are needed for these transfers */ 1711 iso_sched->span = urb->number_of_packets * stream->interval; 1712 1713 /* figure out per-frame sitd fields that we'll need later 1714 * when we fit new sitds into the schedule. 1715 */ 1716 for (i = 0; i < urb->number_of_packets; i++) { 1717 struct ehci_iso_packet *packet = &iso_sched->packet [i]; 1718 unsigned length; 1719 dma_addr_t buf; 1720 u32 trans; 1721 1722 length = urb->iso_frame_desc [i].length & 0x03ff; 1723 buf = dma + urb->iso_frame_desc [i].offset; 1724 1725 trans = SITD_STS_ACTIVE; 1726 if (((i + 1) == urb->number_of_packets) 1727 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1728 trans |= SITD_IOC; 1729 trans |= length << 16; 1730 packet->transaction = cpu_to_le32 (trans); 1731 1732 /* might need to cross a buffer page within a td */ 1733 packet->bufp = buf; 1734 packet->buf1 = (buf + length) & ~0x0fff; 1735 if (packet->buf1 != (buf & ~(u64)0x0fff)) 1736 packet->cross = 1; 1737 1738 /* OUT uses multiple start-splits */ 1739 if (stream->bEndpointAddress & USB_DIR_IN) 1740 continue; 1741 length = (length + 187) / 188; 1742 if (length > 1) /* BEGIN vs ALL */ 1743 length |= 1 << 3; 1744 packet->buf1 |= length; 1745 } 1746} 1747 1748static int 1749sitd_urb_transaction ( 1750 struct ehci_iso_stream *stream, 1751 struct ehci_hcd *ehci, 1752 struct urb *urb, 1753 gfp_t mem_flags 1754) 1755{ 1756 struct ehci_sitd *sitd; 1757 dma_addr_t sitd_dma; 1758 int i; 1759 struct ehci_iso_sched *iso_sched; 1760 unsigned long flags; 1761 1762 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); 1763 if (iso_sched == NULL) 1764 return -ENOMEM; 1765 1766 sitd_sched_init (iso_sched, stream, urb); 1767 1768 /* allocate/init sITDs */ 1769 spin_lock_irqsave (&ehci->lock, flags); 1770 for (i = 0; i < urb->number_of_packets; i++) { 1771 1772 /* NOTE: for now, we don't try to handle wraparound cases 1773 * for IN (using sitd->hw_backpointer, like a FSTN), which 1774 * means we never need two sitds for full speed packets. 1775 */ 1776 1777 /* free_list.next might be cache-hot ... but maybe 1778 * the HC caches it too. avoid that issue for now. 1779 */ 1780 1781 /* prefer previously-allocated sitds */ 1782 if (!list_empty(&stream->free_list)) { 1783 sitd = list_entry (stream->free_list.prev, 1784 struct ehci_sitd, sitd_list); 1785 list_del (&sitd->sitd_list); 1786 sitd_dma = sitd->sitd_dma; 1787 } else 1788 sitd = NULL; 1789 1790 if (!sitd) { 1791 spin_unlock_irqrestore (&ehci->lock, flags); 1792 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, 1793 &sitd_dma); 1794 spin_lock_irqsave (&ehci->lock, flags); 1795 } 1796 1797 if (!sitd) { 1798 iso_sched_free (stream, iso_sched); 1799 spin_unlock_irqrestore (&ehci->lock, flags); 1800 return -ENOMEM; 1801 } 1802 memset (sitd, 0, sizeof *sitd); 1803 sitd->sitd_dma = sitd_dma; 1804 list_add (&sitd->sitd_list, &iso_sched->td_list); 1805 } 1806 1807 /* temporarily store schedule info in hcpriv */ 1808 urb->hcpriv = iso_sched; 1809 urb->error_count = 0; 1810 1811 spin_unlock_irqrestore (&ehci->lock, flags); 1812 return 0; 1813} 1814 1815/*-------------------------------------------------------------------------*/ 1816 1817static inline void 1818sitd_patch ( 1819 struct ehci_iso_stream *stream, 1820 struct ehci_sitd *sitd, 1821 struct ehci_iso_sched *iso_sched, 1822 unsigned index 1823) 1824{ 1825 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1826 u64 bufp = uf->bufp; 1827 1828 sitd->hw_next = EHCI_LIST_END; 1829 sitd->hw_fullspeed_ep = stream->address; 1830 sitd->hw_uframe = stream->splits; 1831 sitd->hw_results = uf->transaction; 1832 sitd->hw_backpointer = EHCI_LIST_END; 1833 1834 bufp = uf->bufp; 1835 sitd->hw_buf [0] = cpu_to_le32 (bufp); 1836 sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32); 1837 1838 sitd->hw_buf [1] = cpu_to_le32 (uf->buf1); 1839 if (uf->cross) 1840 bufp += 4096; 1841 sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32); 1842 sitd->index = index; 1843} 1844 1845static inline void 1846sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) 1847{ 1848 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ 1849 sitd->sitd_next = ehci->pshadow [frame]; 1850 sitd->hw_next = ehci->periodic [frame]; 1851 ehci->pshadow [frame].sitd = sitd; 1852 sitd->frame = frame; 1853 wmb (); 1854 ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD; 1855} 1856 1857/* fit urb's sitds into the selected schedule slot; activate as needed */ 1858static int 1859sitd_link_urb ( 1860 struct ehci_hcd *ehci, 1861 struct urb *urb, 1862 unsigned mod, 1863 struct ehci_iso_stream *stream 1864) 1865{ 1866 int packet; 1867 unsigned next_uframe; 1868 struct ehci_iso_sched *sched = urb->hcpriv; 1869 struct ehci_sitd *sitd; 1870 1871 next_uframe = stream->next_uframe; 1872 1873 if (list_empty(&stream->td_list)) { 1874 /* usbfs ignores TT bandwidth */ 1875 ehci_to_hcd(ehci)->self.bandwidth_allocated 1876 += stream->bandwidth; 1877 ehci_vdbg (ehci, 1878 "sched devp %s ep%d%s-iso [%d] %dms/%04x\n", 1879 urb->dev->devpath, stream->bEndpointAddress & 0x0f, 1880 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", 1881 (next_uframe >> 3) % ehci->periodic_size, 1882 stream->interval, le32_to_cpu (stream->splits)); 1883 stream->start = jiffies; 1884 } 1885 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1886 1887 /* fill sITDs frame by frame */ 1888 for (packet = 0, sitd = NULL; 1889 packet < urb->number_of_packets; 1890 packet++) { 1891 1892 /* ASSERT: we have all necessary sitds */ 1893 BUG_ON (list_empty (&sched->td_list)); 1894 1895 /* ASSERT: no itds for this endpoint in this frame */ 1896 1897 sitd = list_entry (sched->td_list.next, 1898 struct ehci_sitd, sitd_list); 1899 list_move_tail (&sitd->sitd_list, &stream->td_list); 1900 sitd->stream = iso_stream_get (stream); 1901 sitd->urb = usb_get_urb (urb); 1902 1903 sitd_patch (stream, sitd, sched, packet); 1904 sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, 1905 sitd); 1906 1907 next_uframe += stream->interval << 3; 1908 stream->depth += stream->interval << 3; 1909 } 1910 stream->next_uframe = next_uframe % mod; 1911 1912 /* don't need that schedule data any more */ 1913 iso_sched_free (stream, sched); 1914 urb->hcpriv = NULL; 1915 1916 timer_action (ehci, TIMER_IO_WATCHDOG); 1917 if (!ehci->periodic_sched++) 1918 return enable_periodic (ehci); 1919 return 0; 1920} 1921 1922/*-------------------------------------------------------------------------*/ 1923 1924#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ 1925 | SITD_STS_XACT | SITD_STS_MMF) 1926 1927static unsigned 1928sitd_complete ( 1929 struct ehci_hcd *ehci, 1930 struct ehci_sitd *sitd 1931) { 1932 struct urb *urb = sitd->urb; 1933 struct usb_iso_packet_descriptor *desc; 1934 u32 t; 1935 int urb_index = -1; 1936 struct ehci_iso_stream *stream = sitd->stream; 1937 struct usb_device *dev; 1938 1939 urb_index = sitd->index; 1940 desc = &urb->iso_frame_desc [urb_index]; 1941 t = le32_to_cpup (&sitd->hw_results); 1942 1943 /* report transfer status */ 1944 if (t & SITD_ERRS) { 1945 urb->error_count++; 1946 if (t & SITD_STS_DBE) 1947 desc->status = usb_pipein (urb->pipe) 1948 ? -ENOSR /* hc couldn't read */ 1949 : -ECOMM; /* hc couldn't write */ 1950 else if (t & SITD_STS_BABBLE) 1951 desc->status = -EOVERFLOW; 1952 else /* XACT, MMF, etc */ 1953 desc->status = -EPROTO; 1954 } else { 1955 desc->status = 0; 1956 desc->actual_length = desc->length - SITD_LENGTH (t); 1957 } 1958 1959 usb_put_urb (urb); 1960 sitd->urb = NULL; 1961 sitd->stream = NULL; 1962 list_move (&sitd->sitd_list, &stream->free_list); 1963 stream->depth -= stream->interval << 3; 1964 iso_stream_put (ehci, stream); 1965 1966 /* handle completion now? */ 1967 if ((urb_index + 1) != urb->number_of_packets) 1968 return 0; 1969 1970 /* ASSERT: it's really the last sitd for this urb 1971 list_for_each_entry (sitd, &stream->td_list, sitd_list) 1972 BUG_ON (sitd->urb == urb); 1973 */ 1974 1975 /* give urb back to the driver */ 1976 dev = urb->dev; 1977 ehci_urb_done (ehci, urb); 1978 urb = NULL; 1979 1980 /* defer stopping schedule; completion can submit */ 1981 ehci->periodic_sched--; 1982 if (!ehci->periodic_sched) 1983 (void) disable_periodic (ehci); 1984 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1985 1986 if (list_empty (&stream->td_list)) { 1987 ehci_to_hcd(ehci)->self.bandwidth_allocated 1988 -= stream->bandwidth; 1989 ehci_vdbg (ehci, 1990 "deschedule devp %s ep%d%s-iso\n", 1991 dev->devpath, stream->bEndpointAddress & 0x0f, 1992 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 1993 } 1994 iso_stream_put (ehci, stream); 1995 1996 return 1; 1997} 1998 1999 2000static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, 2001 gfp_t mem_flags) 2002{ 2003 int status = -EINVAL; 2004 unsigned long flags; 2005 struct ehci_iso_stream *stream; 2006 2007 /* Get iso_stream head */ 2008 stream = iso_stream_find (ehci, urb); 2009 if (stream == NULL) { 2010 ehci_dbg (ehci, "can't get iso stream\n"); 2011 return -ENOMEM; 2012 } 2013 if (urb->interval != stream->interval) { 2014 ehci_dbg (ehci, "can't change iso interval %d --> %d\n", 2015 stream->interval, urb->interval); 2016 goto done; 2017 } 2018 2019#ifdef EHCI_URB_TRACE 2020 ehci_dbg (ehci, 2021 "submit %p dev%s ep%d%s-iso len %d\n", 2022 urb, urb->dev->devpath, 2023 usb_pipeendpoint (urb->pipe), 2024 usb_pipein (urb->pipe) ? "in" : "out", 2025 urb->transfer_buffer_length); 2026#endif 2027 2028 /* allocate SITDs */ 2029 status = sitd_urb_transaction (stream, ehci, urb, mem_flags); 2030 if (status < 0) { 2031 ehci_dbg (ehci, "can't init sitds\n"); 2032 goto done; 2033 } 2034 2035 /* schedule ... need to lock */ 2036 spin_lock_irqsave (&ehci->lock, flags); 2037 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 2038 &ehci_to_hcd(ehci)->flags))) 2039 status = -ESHUTDOWN; 2040 else 2041 status = iso_stream_schedule (ehci, urb, stream); 2042 if (status == 0) 2043 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 2044 spin_unlock_irqrestore (&ehci->lock, flags); 2045 2046done: 2047 if (status < 0) 2048 iso_stream_put (ehci, stream); 2049 return status; 2050} 2051 2052#else 2053 2054static inline int 2055sitd_submit (struct ehci_hcd *ehci, struct urb *urb, gfp_t mem_flags) 2056{ 2057 ehci_dbg (ehci, "split iso support is disabled\n"); 2058 return -ENOSYS; 2059} 2060 2061static inline unsigned 2062sitd_complete ( 2063 struct ehci_hcd *ehci, 2064 struct ehci_sitd *sitd 2065) { 2066 ehci_err (ehci, "sitd_complete %p?\n", sitd); 2067 return 0; 2068} 2069 2070#endif /* USB_EHCI_SPLIT_ISO */ 2071 2072/*-------------------------------------------------------------------------*/ 2073 2074static void 2075scan_periodic (struct ehci_hcd *ehci) 2076{ 2077 unsigned frame, clock, now_uframe, mod; 2078 unsigned modified; 2079 2080 mod = ehci->periodic_size << 3; 2081 2082 /* 2083 * When running, scan from last scan point up to "now" 2084 * else clean up by scanning everything that's left. 2085 * Touches as few pages as possible: cache-friendly. 2086 */ 2087 now_uframe = ehci->next_uframe; 2088 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2089 clock = ehci_readl(ehci, &ehci->regs->frame_index); 2090 else 2091 clock = now_uframe + mod - 1; 2092 clock %= mod; 2093 2094 for (;;) { 2095 union ehci_shadow q, *q_p; 2096 __le32 type, *hw_p; 2097 unsigned uframes; 2098 2099 /* don't scan past the live uframe */ 2100 frame = now_uframe >> 3; 2101 if (frame == (clock >> 3)) 2102 uframes = now_uframe & 0x07; 2103 else { 2104 /* safe to scan the whole frame at once */ 2105 now_uframe |= 0x07; 2106 uframes = 8; 2107 } 2108 2109restart: 2110 /* scan each element in frame's queue for completions */ 2111 q_p = &ehci->pshadow [frame]; 2112 hw_p = &ehci->periodic [frame]; 2113 q.ptr = q_p->ptr; 2114 type = Q_NEXT_TYPE (*hw_p); 2115 modified = 0; 2116 2117 while (q.ptr != NULL) { 2118 unsigned uf; 2119 union ehci_shadow temp; 2120 int live; 2121 2122 live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state); 2123 switch (type) { 2124 case Q_TYPE_QH: 2125 /* handle any completions */ 2126 temp.qh = qh_get (q.qh); 2127 type = Q_NEXT_TYPE (q.qh->hw_next); 2128 q = q.qh->qh_next; 2129 modified = qh_completions (ehci, temp.qh); 2130 if (unlikely (list_empty (&temp.qh->qtd_list))) 2131 intr_deschedule (ehci, temp.qh); 2132 qh_put (temp.qh); 2133 break; 2134 case Q_TYPE_FSTN: 2135 /* for "save place" FSTNs, look at QH entries 2136 * in the previous frame for completions. 2137 */ 2138 if (q.fstn->hw_prev != EHCI_LIST_END) { 2139 dbg ("ignoring completions from FSTNs"); 2140 } 2141 type = Q_NEXT_TYPE (q.fstn->hw_next); 2142 q = q.fstn->fstn_next; 2143 break; 2144 case Q_TYPE_ITD: 2145 /* skip itds for later in the frame */ 2146 rmb (); 2147 for (uf = live ? uframes : 8; uf < 8; uf++) { 2148 if (0 == (q.itd->hw_transaction [uf] 2149 & ITD_ACTIVE)) 2150 continue; 2151 q_p = &q.itd->itd_next; 2152 hw_p = &q.itd->hw_next; 2153 type = Q_NEXT_TYPE (q.itd->hw_next); 2154 q = *q_p; 2155 break; 2156 } 2157 if (uf != 8) 2158 break; 2159 2160 /* this one's ready ... HC won't cache the 2161 * pointer for much longer, if at all. 2162 */ 2163 *q_p = q.itd->itd_next; 2164 *hw_p = q.itd->hw_next; 2165 type = Q_NEXT_TYPE (q.itd->hw_next); 2166 wmb(); 2167 modified = itd_complete (ehci, q.itd); 2168 q = *q_p; 2169 break; 2170 case Q_TYPE_SITD: 2171 if ((q.sitd->hw_results & SITD_ACTIVE) 2172 && live) { 2173 q_p = &q.sitd->sitd_next; 2174 hw_p = &q.sitd->hw_next; 2175 type = Q_NEXT_TYPE (q.sitd->hw_next); 2176 q = *q_p; 2177 break; 2178 } 2179 *q_p = q.sitd->sitd_next; 2180 *hw_p = q.sitd->hw_next; 2181 type = Q_NEXT_TYPE (q.sitd->hw_next); 2182 wmb(); 2183 modified = sitd_complete (ehci, q.sitd); 2184 q = *q_p; 2185 break; 2186 default: 2187 dbg ("corrupt type %d frame %d shadow %p", 2188 type, frame, q.ptr); 2189 // BUG (); 2190 q.ptr = NULL; 2191 } 2192 2193 /* assume completion callbacks modify the queue */ 2194 if (unlikely (modified)) 2195 goto restart; 2196 } 2197 2198 /* stop when we catch up to the HC */ 2199 2200 // latencies climb; that should be rare, but... 2201 // detect it, and just go all the way around. 2202 // FLR might help detect this case, so long as latencies 2203 // don't exceed periodic_size msec (default 1.024 sec). 2204 2205 2206 if (now_uframe == clock) { 2207 unsigned now; 2208 2209 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2210 break; 2211 ehci->next_uframe = now_uframe; 2212 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; 2213 if (now_uframe == now) 2214 break; 2215 2216 /* rescan the rest of this frame, then ... */ 2217 clock = now; 2218 } else { 2219 now_uframe++; 2220 now_uframe %= mod; 2221 } 2222 } 2223} 2224