1/* 2 * MUSB OTG driver peripheral support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * 02110-1301 USA 22 * 23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36#include <linux/kernel.h> 37#include <linux/list.h> 38#include <linux/timer.h> 39#include <linux/module.h> 40#include <linux/smp.h> 41#include <linux/spinlock.h> 42#include <linux/delay.h> 43#include <linux/moduleparam.h> 44#include <linux/stat.h> 45#include <linux/dma-mapping.h> 46#include <linux/slab.h> 47 48#include "musb_core.h" 49 50 51 52/* ----------------------------------------------------------------------- */ 53 54/* 55 * Immediately complete a request. 56 * 57 * @param request the request to complete 58 * @param status the status to complete the request with 59 * Context: controller locked, IRQs blocked. 60 */ 61void musb_g_giveback( 62 struct musb_ep *ep, 63 struct usb_request *request, 64 int status) 65__releases(ep->musb->lock) 66__acquires(ep->musb->lock) 67{ 68 struct musb_request *req; 69 struct musb *musb; 70 int busy = ep->busy; 71 72 req = to_musb_request(request); 73 74 list_del(&request->list); 75 if (req->request.status == -EINPROGRESS) 76 req->request.status = status; 77 musb = req->musb; 78 79 ep->busy = 1; 80 spin_unlock(&musb->lock); 81 if (is_dma_capable()) { 82 if (req->mapped) { 83 dma_unmap_single(musb->controller, 84 req->request.dma, 85 req->request.length, 86 req->tx 87 ? DMA_TO_DEVICE 88 : DMA_FROM_DEVICE); 89 req->request.dma = DMA_ADDR_INVALID; 90 req->mapped = 0; 91 } else if (req->request.dma != DMA_ADDR_INVALID) 92 dma_sync_single_for_cpu(musb->controller, 93 req->request.dma, 94 req->request.length, 95 req->tx 96 ? DMA_TO_DEVICE 97 : DMA_FROM_DEVICE); 98 } 99 if (request->status == 0) 100 DBG(5, "%s done request %p, %d/%d\n", 101 ep->end_point.name, request, 102 req->request.actual, req->request.length); 103 else 104 DBG(2, "%s request %p, %d/%d fault %d\n", 105 ep->end_point.name, request, 106 req->request.actual, req->request.length, 107 request->status); 108 req->request.complete(&req->ep->end_point, &req->request); 109 spin_lock(&musb->lock); 110 ep->busy = busy; 111} 112 113/* ----------------------------------------------------------------------- */ 114 115/* 116 * Abort requests queued to an endpoint using the status. Synchronous. 117 * caller locked controller and blocked irqs, and selected this ep. 118 */ 119static void nuke(struct musb_ep *ep, const int status) 120{ 121 struct musb_request *req = NULL; 122 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 123 124 ep->busy = 1; 125 126 if (is_dma_capable() && ep->dma) { 127 struct dma_controller *c = ep->musb->dma_controller; 128 int value; 129 130 if (ep->is_in) { 131 /* 132 * The programming guide says that we must not clear 133 * the DMAMODE bit before DMAENAB, so we only 134 * clear it in the second write... 135 */ 136 musb_writew(epio, MUSB_TXCSR, 137 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 138 musb_writew(epio, MUSB_TXCSR, 139 0 | MUSB_TXCSR_FLUSHFIFO); 140 } else { 141 musb_writew(epio, MUSB_RXCSR, 142 0 | MUSB_RXCSR_FLUSHFIFO); 143 musb_writew(epio, MUSB_RXCSR, 144 0 | MUSB_RXCSR_FLUSHFIFO); 145 } 146 147 value = c->channel_abort(ep->dma); 148 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); 149 c->channel_release(ep->dma); 150 ep->dma = NULL; 151 } 152 153 while (!list_empty(&(ep->req_list))) { 154 req = container_of(ep->req_list.next, struct musb_request, 155 request.list); 156 musb_g_giveback(ep, &req->request, status); 157 } 158} 159 160/* ----------------------------------------------------------------------- */ 161 162/* Data transfers - pure PIO, pure DMA, or mixed mode */ 163 164/* 165 * This assumes the separate CPPI engine is responding to DMA requests 166 * from the usb core ... sequenced a bit differently from mentor dma. 167 */ 168 169static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 170{ 171 if (can_bulk_split(musb, ep->type)) 172 return ep->hw_ep->max_packet_sz_tx; 173 else 174 return ep->packet_sz; 175} 176 177 178#ifdef CONFIG_USB_INVENTRA_DMA 179 180/* Peripheral tx (IN) using Mentor DMA works as follows: 181 Only mode 0 is used for transfers <= wPktSize, 182 mode 1 is used for larger transfers, 183 184 One of the following happens: 185 - Host sends IN token which causes an endpoint interrupt 186 -> TxAvail 187 -> if DMA is currently busy, exit. 188 -> if queue is non-empty, txstate(). 189 190 - Request is queued by the gadget driver. 191 -> if queue was previously empty, txstate() 192 193 txstate() 194 -> start 195 /\ -> setup DMA 196 | (data is transferred to the FIFO, then sent out when 197 | IN token(s) are recd from Host. 198 | -> DMA interrupt on completion 199 | calls TxAvail. 200 | -> stop DMA, ~DMAENAB, 201 | -> set TxPktRdy for last short pkt or zlp 202 | -> Complete Request 203 | -> Continue next request (call txstate) 204 |___________________________________| 205 206 * Non-Mentor DMA engines can of course work differently, such as by 207 * upleveling from irq-per-packet to irq-per-buffer. 208 */ 209 210#endif 211 212/* 213 * An endpoint is transmitting data. This can be called either from 214 * the IRQ routine or from ep.queue() to kickstart a request on an 215 * endpoint. 216 * 217 * Context: controller locked, IRQs blocked, endpoint selected 218 */ 219static void txstate(struct musb *musb, struct musb_request *req) 220{ 221 u8 epnum = req->epnum; 222 struct musb_ep *musb_ep; 223 void __iomem *epio = musb->endpoints[epnum].regs; 224 struct usb_request *request; 225 u16 fifo_count = 0, csr; 226 int use_dma = 0; 227 228 musb_ep = req->ep; 229 230 /* we shouldn't get here while DMA is active ... but we do ... */ 231 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 232 DBG(4, "dma pending...\n"); 233 return; 234 } 235 236 /* read TXCSR before */ 237 csr = musb_readw(epio, MUSB_TXCSR); 238 239 request = &req->request; 240 fifo_count = min(max_ep_writesize(musb, musb_ep), 241 (int)(request->length - request->actual)); 242 243 if (csr & MUSB_TXCSR_TXPKTRDY) { 244 DBG(5, "%s old packet still ready , txcsr %03x\n", 245 musb_ep->end_point.name, csr); 246 return; 247 } 248 249 if (csr & MUSB_TXCSR_P_SENDSTALL) { 250 DBG(5, "%s stalling, txcsr %03x\n", 251 musb_ep->end_point.name, csr); 252 return; 253 } 254 255 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", 256 epnum, musb_ep->packet_sz, fifo_count, 257 csr); 258 259#ifndef CONFIG_MUSB_PIO_ONLY 260 if (is_dma_capable() && musb_ep->dma) { 261 struct dma_controller *c = musb->dma_controller; 262 size_t request_size; 263 264 /* setup DMA, then program endpoint CSR */ 265 request_size = min_t(size_t, request->length - request->actual, 266 musb_ep->dma->max_len); 267 268 use_dma = (request->dma != DMA_ADDR_INVALID); 269 270 /* MUSB_TXCSR_P_ISO is still set correctly */ 271 272#ifdef CONFIG_USB_INVENTRA_DMA 273 { 274 if (request_size < musb_ep->packet_sz) 275 musb_ep->dma->desired_mode = 0; 276 else 277 musb_ep->dma->desired_mode = 1; 278 279 use_dma = use_dma && c->channel_program( 280 musb_ep->dma, musb_ep->packet_sz, 281 musb_ep->dma->desired_mode, 282 request->dma + request->actual, request_size); 283 if (use_dma) { 284 if (musb_ep->dma->desired_mode == 0) { 285 /* 286 * We must not clear the DMAMODE bit 287 * before the DMAENAB bit -- and the 288 * latter doesn't always get cleared 289 * before we get here... 290 */ 291 csr &= ~(MUSB_TXCSR_AUTOSET 292 | MUSB_TXCSR_DMAENAB); 293 musb_writew(epio, MUSB_TXCSR, csr 294 | MUSB_TXCSR_P_WZC_BITS); 295 csr &= ~MUSB_TXCSR_DMAMODE; 296 csr |= (MUSB_TXCSR_DMAENAB | 297 MUSB_TXCSR_MODE); 298 /* against programming guide */ 299 } else 300 csr |= (MUSB_TXCSR_AUTOSET 301 | MUSB_TXCSR_DMAENAB 302 | MUSB_TXCSR_DMAMODE 303 | MUSB_TXCSR_MODE); 304 305 csr &= ~MUSB_TXCSR_P_UNDERRUN; 306 musb_writew(epio, MUSB_TXCSR, csr); 307 } 308 } 309 310#elif defined(CONFIG_USB_TI_CPPI_DMA) 311 /* program endpoint CSR first, then setup DMA */ 312 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 313 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | 314 MUSB_TXCSR_MODE; 315 musb_writew(epio, MUSB_TXCSR, 316 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) 317 | csr); 318 319 /* ensure writebuffer is empty */ 320 csr = musb_readw(epio, MUSB_TXCSR); 321 322 /* NOTE host side sets DMAENAB later than this; both are 323 * OK since the transfer dma glue (between CPPI and Mentor 324 * fifos) just tells CPPI it could start. Data only moves 325 * to the USB TX fifo when both fifos are ready. 326 */ 327 328 /* "mode" is irrelevant here; handle terminating ZLPs like 329 * PIO does, since the hardware RNDIS mode seems unreliable 330 * except for the last-packet-is-already-short case. 331 */ 332 use_dma = use_dma && c->channel_program( 333 musb_ep->dma, musb_ep->packet_sz, 334 0, 335 request->dma + request->actual, 336 request_size); 337 if (!use_dma) { 338 c->channel_release(musb_ep->dma); 339 musb_ep->dma = NULL; 340 csr &= ~MUSB_TXCSR_DMAENAB; 341 musb_writew(epio, MUSB_TXCSR, csr); 342 /* invariant: prequest->buf is non-null */ 343 } 344#elif defined(CONFIG_USB_TUSB_OMAP_DMA) 345 use_dma = use_dma && c->channel_program( 346 musb_ep->dma, musb_ep->packet_sz, 347 request->zero, 348 request->dma + request->actual, 349 request_size); 350#endif 351 } 352#endif 353 354 if (!use_dma) { 355 musb_write_fifo(musb_ep->hw_ep, fifo_count, 356 (u8 *) (request->buf + request->actual)); 357 request->actual += fifo_count; 358 csr |= MUSB_TXCSR_TXPKTRDY; 359 csr &= ~MUSB_TXCSR_P_UNDERRUN; 360 musb_writew(epio, MUSB_TXCSR, csr); 361 } 362 363 /* host may already have the data when this message shows... */ 364 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", 365 musb_ep->end_point.name, use_dma ? "dma" : "pio", 366 request->actual, request->length, 367 musb_readw(epio, MUSB_TXCSR), 368 fifo_count, 369 musb_readw(epio, MUSB_TXMAXP)); 370} 371 372/* 373 * FIFO state update (e.g. data ready). 374 * Called from IRQ, with controller locked. 375 */ 376void musb_g_tx(struct musb *musb, u8 epnum) 377{ 378 u16 csr; 379 struct usb_request *request; 380 u8 __iomem *mbase = musb->mregs; 381 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 382 void __iomem *epio = musb->endpoints[epnum].regs; 383 struct dma_channel *dma; 384 385 musb_ep_select(mbase, epnum); 386 request = next_request(musb_ep); 387 388 csr = musb_readw(epio, MUSB_TXCSR); 389 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); 390 391 dma = is_dma_capable() ? musb_ep->dma : NULL; 392 393 /* 394 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX 395 * probably rates reporting as a host error. 396 */ 397 if (csr & MUSB_TXCSR_P_SENTSTALL) { 398 csr |= MUSB_TXCSR_P_WZC_BITS; 399 csr &= ~MUSB_TXCSR_P_SENTSTALL; 400 musb_writew(epio, MUSB_TXCSR, csr); 401 return; 402 } 403 404 if (csr & MUSB_TXCSR_P_UNDERRUN) { 405 /* We NAKed, no big deal... little reason to care. */ 406 csr |= MUSB_TXCSR_P_WZC_BITS; 407 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 408 musb_writew(epio, MUSB_TXCSR, csr); 409 DBG(20, "underrun on ep%d, req %p\n", epnum, request); 410 } 411 412 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 413 /* 414 * SHOULD NOT HAPPEN... has with CPPI though, after 415 * changing SENDSTALL (and other cases); harmless? 416 */ 417 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); 418 return; 419 } 420 421 if (request) { 422 u8 is_dma = 0; 423 424 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 425 is_dma = 1; 426 csr |= MUSB_TXCSR_P_WZC_BITS; 427 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 428 MUSB_TXCSR_TXPKTRDY); 429 musb_writew(epio, MUSB_TXCSR, csr); 430 /* Ensure writebuffer is empty. */ 431 csr = musb_readw(epio, MUSB_TXCSR); 432 request->actual += musb_ep->dma->actual_len; 433 DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n", 434 epnum, csr, musb_ep->dma->actual_len, request); 435 } 436 437 if (is_dma || request->actual == request->length) { 438 /* 439 * First, maybe a terminating short packet. Some DMA 440 * engines might handle this by themselves. 441 */ 442 if ((request->zero && request->length 443 && request->length % musb_ep->packet_sz == 0) 444#ifdef CONFIG_USB_INVENTRA_DMA 445 || (is_dma && (!dma->desired_mode || 446 (request->actual & 447 (musb_ep->packet_sz - 1)))) 448#endif 449 ) { 450 /* 451 * On DMA completion, FIFO may not be 452 * available yet... 453 */ 454 if (csr & MUSB_TXCSR_TXPKTRDY) 455 return; 456 457 DBG(4, "sending zero pkt\n"); 458 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 459 | MUSB_TXCSR_TXPKTRDY); 460 request->zero = 0; 461 } 462 463 if (request->actual == request->length) { 464 musb_g_giveback(musb_ep, request, 0); 465 request = musb_ep->desc ? next_request(musb_ep) : NULL; 466 if (!request) { 467 DBG(4, "%s idle now\n", 468 musb_ep->end_point.name); 469 return; 470 } 471 } 472 } 473 474 txstate(musb, to_musb_request(request)); 475 } 476} 477 478/* ------------------------------------------------------------ */ 479 480#ifdef CONFIG_USB_INVENTRA_DMA 481 482/* Peripheral rx (OUT) using Mentor DMA works as follows: 483 - Only mode 0 is used. 484 485 - Request is queued by the gadget class driver. 486 -> if queue was previously empty, rxstate() 487 488 - Host sends OUT token which causes an endpoint interrupt 489 /\ -> RxReady 490 | -> if request queued, call rxstate 491 | /\ -> setup DMA 492 | | -> DMA interrupt on completion 493 | | -> RxReady 494 | | -> stop DMA 495 | | -> ack the read 496 | | -> if data recd = max expected 497 | | by the request, or host 498 | | sent a short packet, 499 | | complete the request, 500 | | and start the next one. 501 | |_____________________________________| 502 | else just wait for the host 503 | to send the next OUT token. 504 |__________________________________________________| 505 506 * Non-Mentor DMA engines can of course work differently. 507 */ 508 509#endif 510 511/* 512 * Context: controller locked, IRQs blocked, endpoint selected 513 */ 514static void rxstate(struct musb *musb, struct musb_request *req) 515{ 516 const u8 epnum = req->epnum; 517 struct usb_request *request = &req->request; 518 struct musb_ep *musb_ep; 519 void __iomem *epio = musb->endpoints[epnum].regs; 520 unsigned fifo_count = 0; 521 u16 len; 522 u16 csr = musb_readw(epio, MUSB_RXCSR); 523 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 524 525 if (hw_ep->is_shared_fifo) 526 musb_ep = &hw_ep->ep_in; 527 else 528 musb_ep = &hw_ep->ep_out; 529 530 len = musb_ep->packet_sz; 531 532 /* We shouldn't get here while DMA is active, but we do... */ 533 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 534 DBG(4, "DMA pending...\n"); 535 return; 536 } 537 538 if (csr & MUSB_RXCSR_P_SENDSTALL) { 539 DBG(5, "%s stalling, RXCSR %04x\n", 540 musb_ep->end_point.name, csr); 541 return; 542 } 543 544 if (is_cppi_enabled() && musb_ep->dma) { 545 struct dma_controller *c = musb->dma_controller; 546 struct dma_channel *channel = musb_ep->dma; 547 548 /* NOTE: CPPI won't actually stop advancing the DMA 549 * queue after short packet transfers, so this is almost 550 * always going to run as IRQ-per-packet DMA so that 551 * faults will be handled correctly. 552 */ 553 if (c->channel_program(channel, 554 musb_ep->packet_sz, 555 !request->short_not_ok, 556 request->dma + request->actual, 557 request->length - request->actual)) { 558 559 /* make sure that if an rxpkt arrived after the irq, 560 * the cppi engine will be ready to take it as soon 561 * as DMA is enabled 562 */ 563 csr &= ~(MUSB_RXCSR_AUTOCLEAR 564 | MUSB_RXCSR_DMAMODE); 565 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 566 musb_writew(epio, MUSB_RXCSR, csr); 567 return; 568 } 569 } 570 571 if (csr & MUSB_RXCSR_RXPKTRDY) { 572 len = musb_readw(epio, MUSB_RXCOUNT); 573 if (request->actual < request->length) { 574#ifdef CONFIG_USB_INVENTRA_DMA 575 if (is_dma_capable() && musb_ep->dma) { 576 struct dma_controller *c; 577 struct dma_channel *channel; 578 int use_dma = 0; 579 580 c = musb->dma_controller; 581 channel = musb_ep->dma; 582 583 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 584 * mode 0 only. So we do not get endpoint interrupts due to DMA 585 * completion. We only get interrupts from DMA controller. 586 * 587 * We could operate in DMA mode 1 if we knew the size of the tranfer 588 * in advance. For mass storage class, request->length = what the host 589 * sends, so that'd work. But for pretty much everything else, 590 * request->length is routinely more than what the host sends. For 591 * most these gadgets, end of is signified either by a short packet, 592 * or filling the last byte of the buffer. (Sending extra data in 593 * that last pckate should trigger an overflow fault.) But in mode 1, 594 * we don't get DMA completion interrrupt for short packets. 595 * 596 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 597 * to get endpoint interrupt on every DMA req, but that didn't seem 598 * to work reliably. 599 * 600 * REVISIT an updated g_file_storage can set req->short_not_ok, which 601 * then becomes usable as a runtime "use mode 1" hint... 602 */ 603 604 csr |= MUSB_RXCSR_DMAENAB; 605 csr |= MUSB_RXCSR_AUTOCLEAR; 606#ifdef USE_MODE1 607 /* csr |= MUSB_RXCSR_DMAMODE; */ 608 609 /* this special sequence (enabling and then 610 * disabling MUSB_RXCSR_DMAMODE) is required 611 * to get DMAReq to activate 612 */ 613 musb_writew(epio, MUSB_RXCSR, 614 csr | MUSB_RXCSR_DMAMODE); 615#endif 616 musb_writew(epio, MUSB_RXCSR, csr); 617 618 if (request->actual < request->length) { 619 int transfer_size = 0; 620#ifdef USE_MODE1 621 transfer_size = min(request->length - request->actual, 622 channel->max_len); 623#else 624 transfer_size = min(request->length - request->actual, 625 (unsigned)len); 626#endif 627 if (transfer_size <= musb_ep->packet_sz) 628 musb_ep->dma->desired_mode = 0; 629 else 630 musb_ep->dma->desired_mode = 1; 631 632 use_dma = c->channel_program( 633 channel, 634 musb_ep->packet_sz, 635 channel->desired_mode, 636 request->dma 637 + request->actual, 638 transfer_size); 639 } 640 641 if (use_dma) 642 return; 643 } 644#endif /* Mentor's DMA */ 645 646 fifo_count = request->length - request->actual; 647 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", 648 musb_ep->end_point.name, 649 len, fifo_count, 650 musb_ep->packet_sz); 651 652 fifo_count = min_t(unsigned, len, fifo_count); 653 654#ifdef CONFIG_USB_TUSB_OMAP_DMA 655 if (tusb_dma_omap() && musb_ep->dma) { 656 struct dma_controller *c = musb->dma_controller; 657 struct dma_channel *channel = musb_ep->dma; 658 u32 dma_addr = request->dma + request->actual; 659 int ret; 660 661 ret = c->channel_program(channel, 662 musb_ep->packet_sz, 663 channel->desired_mode, 664 dma_addr, 665 fifo_count); 666 if (ret) 667 return; 668 } 669#endif 670 671 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 672 (request->buf + request->actual)); 673 request->actual += fifo_count; 674 675 /* REVISIT if we left anything in the fifo, flush 676 * it and report -EOVERFLOW 677 */ 678 679 /* ack the read! */ 680 csr |= MUSB_RXCSR_P_WZC_BITS; 681 csr &= ~MUSB_RXCSR_RXPKTRDY; 682 musb_writew(epio, MUSB_RXCSR, csr); 683 } 684 } 685 686 /* reach the end or short packet detected */ 687 if (request->actual == request->length || len < musb_ep->packet_sz) 688 musb_g_giveback(musb_ep, request, 0); 689} 690 691/* 692 * Data ready for a request; called from IRQ 693 */ 694void musb_g_rx(struct musb *musb, u8 epnum) 695{ 696 u16 csr; 697 struct usb_request *request; 698 void __iomem *mbase = musb->mregs; 699 struct musb_ep *musb_ep; 700 void __iomem *epio = musb->endpoints[epnum].regs; 701 struct dma_channel *dma; 702 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 703 704 if (hw_ep->is_shared_fifo) 705 musb_ep = &hw_ep->ep_in; 706 else 707 musb_ep = &hw_ep->ep_out; 708 709 musb_ep_select(mbase, epnum); 710 711 request = next_request(musb_ep); 712 if (!request) 713 return; 714 715 csr = musb_readw(epio, MUSB_RXCSR); 716 dma = is_dma_capable() ? musb_ep->dma : NULL; 717 718 DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, 719 csr, dma ? " (dma)" : "", request); 720 721 if (csr & MUSB_RXCSR_P_SENTSTALL) { 722 csr |= MUSB_RXCSR_P_WZC_BITS; 723 csr &= ~MUSB_RXCSR_P_SENTSTALL; 724 musb_writew(epio, MUSB_RXCSR, csr); 725 return; 726 } 727 728 if (csr & MUSB_RXCSR_P_OVERRUN) { 729 /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 730 csr &= ~MUSB_RXCSR_P_OVERRUN; 731 musb_writew(epio, MUSB_RXCSR, csr); 732 733 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); 734 if (request && request->status == -EINPROGRESS) 735 request->status = -EOVERFLOW; 736 } 737 if (csr & MUSB_RXCSR_INCOMPRX) { 738 /* REVISIT not necessarily an error */ 739 DBG(4, "%s, incomprx\n", musb_ep->end_point.name); 740 } 741 742 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 743 /* "should not happen"; likely RXPKTRDY pending for DMA */ 744 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, 745 "%s busy, csr %04x\n", 746 musb_ep->end_point.name, csr); 747 return; 748 } 749 750 if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 751 csr &= ~(MUSB_RXCSR_AUTOCLEAR 752 | MUSB_RXCSR_DMAENAB 753 | MUSB_RXCSR_DMAMODE); 754 musb_writew(epio, MUSB_RXCSR, 755 MUSB_RXCSR_P_WZC_BITS | csr); 756 757 request->actual += musb_ep->dma->actual_len; 758 759 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", 760 epnum, csr, 761 musb_readw(epio, MUSB_RXCSR), 762 musb_ep->dma->actual_len, request); 763 764#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) 765 /* Autoclear doesn't clear RxPktRdy for short packets */ 766 if ((dma->desired_mode == 0) 767 || (dma->actual_len 768 & (musb_ep->packet_sz - 1))) { 769 /* ack the read! */ 770 csr &= ~MUSB_RXCSR_RXPKTRDY; 771 musb_writew(epio, MUSB_RXCSR, csr); 772 } 773 774 /* incomplete, and not short? wait for next IN packet */ 775 if ((request->actual < request->length) 776 && (musb_ep->dma->actual_len 777 == musb_ep->packet_sz)) 778 return; 779#endif 780 musb_g_giveback(musb_ep, request, 0); 781 782 request = next_request(musb_ep); 783 if (!request) 784 return; 785 } 786 787 /* analyze request if the ep is hot */ 788 if (request) 789 rxstate(musb, to_musb_request(request)); 790 else 791 DBG(3, "packet waiting for %s%s request\n", 792 musb_ep->desc ? "" : "inactive ", 793 musb_ep->end_point.name); 794 return; 795} 796 797/* ------------------------------------------------------------ */ 798 799static int musb_gadget_enable(struct usb_ep *ep, 800 const struct usb_endpoint_descriptor *desc) 801{ 802 unsigned long flags; 803 struct musb_ep *musb_ep; 804 struct musb_hw_ep *hw_ep; 805 void __iomem *regs; 806 struct musb *musb; 807 void __iomem *mbase; 808 u8 epnum; 809 u16 csr; 810 unsigned tmp; 811 int status = -EINVAL; 812 813 if (!ep || !desc) 814 return -EINVAL; 815 816 musb_ep = to_musb_ep(ep); 817 hw_ep = musb_ep->hw_ep; 818 regs = hw_ep->regs; 819 musb = musb_ep->musb; 820 mbase = musb->mregs; 821 epnum = musb_ep->current_epnum; 822 823 spin_lock_irqsave(&musb->lock, flags); 824 825 if (musb_ep->desc) { 826 status = -EBUSY; 827 goto fail; 828 } 829 musb_ep->type = usb_endpoint_type(desc); 830 831 /* check direction and (later) maxpacket size against endpoint */ 832 if (usb_endpoint_num(desc) != epnum) 833 goto fail; 834 835 /* REVISIT this rules out high bandwidth periodic transfers */ 836 tmp = le16_to_cpu(desc->wMaxPacketSize); 837 if (tmp & ~0x07ff) 838 goto fail; 839 musb_ep->packet_sz = tmp; 840 841 /* enable the interrupts for the endpoint, set the endpoint 842 * packet size (or fail), set the mode, clear the fifo 843 */ 844 musb_ep_select(mbase, epnum); 845 if (usb_endpoint_dir_in(desc)) { 846 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); 847 848 if (hw_ep->is_shared_fifo) 849 musb_ep->is_in = 1; 850 if (!musb_ep->is_in) 851 goto fail; 852 if (tmp > hw_ep->max_packet_sz_tx) 853 goto fail; 854 855 int_txe |= (1 << epnum); 856 musb_writew(mbase, MUSB_INTRTXE, int_txe); 857 858 /* REVISIT if can_bulk_split(), use by updating "tmp"; 859 * likewise high bandwidth periodic tx 860 */ 861 /* Set TXMAXP with the FIFO size of the endpoint 862 * to disable double buffering mode. Currently, It seems that double 863 * buffering has problem if musb RTL revision number < 2.0. 864 */ 865 if (musb->hwvers < MUSB_HWVERS_2000) 866 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); 867 else 868 musb_writew(regs, MUSB_TXMAXP, tmp); 869 870 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 871 if (musb_readw(regs, MUSB_TXCSR) 872 & MUSB_TXCSR_FIFONOTEMPTY) 873 csr |= MUSB_TXCSR_FLUSHFIFO; 874 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 875 csr |= MUSB_TXCSR_P_ISO; 876 877 /* set twice in case of double buffering */ 878 musb_writew(regs, MUSB_TXCSR, csr); 879 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 880 musb_writew(regs, MUSB_TXCSR, csr); 881 882 } else { 883 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); 884 885 if (hw_ep->is_shared_fifo) 886 musb_ep->is_in = 0; 887 if (musb_ep->is_in) 888 goto fail; 889 if (tmp > hw_ep->max_packet_sz_rx) 890 goto fail; 891 892 int_rxe |= (1 << epnum); 893 musb_writew(mbase, MUSB_INTRRXE, int_rxe); 894 895 /* REVISIT if can_bulk_combine() use by updating "tmp" 896 * likewise high bandwidth periodic rx 897 */ 898 /* Set RXMAXP with the FIFO size of the endpoint 899 * to disable double buffering mode. 900 */ 901 if (musb->hwvers < MUSB_HWVERS_2000) 902 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); 903 else 904 musb_writew(regs, MUSB_RXMAXP, tmp); 905 906 /* force shared fifo to OUT-only mode */ 907 if (hw_ep->is_shared_fifo) { 908 csr = musb_readw(regs, MUSB_TXCSR); 909 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 910 musb_writew(regs, MUSB_TXCSR, csr); 911 } 912 913 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 914 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 915 csr |= MUSB_RXCSR_P_ISO; 916 else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 917 csr |= MUSB_RXCSR_DISNYET; 918 919 /* set twice in case of double buffering */ 920 musb_writew(regs, MUSB_RXCSR, csr); 921 musb_writew(regs, MUSB_RXCSR, csr); 922 } 923 924 /* NOTE: all the I/O code _should_ work fine without DMA, in case 925 * for some reason you run out of channels here. 926 */ 927 if (is_dma_capable() && musb->dma_controller) { 928 struct dma_controller *c = musb->dma_controller; 929 930 musb_ep->dma = c->channel_alloc(c, hw_ep, 931 (desc->bEndpointAddress & USB_DIR_IN)); 932 } else 933 musb_ep->dma = NULL; 934 935 musb_ep->desc = desc; 936 musb_ep->busy = 0; 937 musb_ep->wedged = 0; 938 status = 0; 939 940 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 941 musb_driver_name, musb_ep->end_point.name, 942 ({ char *s; switch (musb_ep->type) { 943 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; 944 case USB_ENDPOINT_XFER_INT: s = "int"; break; 945 default: s = "iso"; break; 946 }; s; }), 947 musb_ep->is_in ? "IN" : "OUT", 948 musb_ep->dma ? "dma, " : "", 949 musb_ep->packet_sz); 950 951 schedule_work(&musb->irq_work); 952 953fail: 954 spin_unlock_irqrestore(&musb->lock, flags); 955 return status; 956} 957 958/* 959 * Disable an endpoint flushing all requests queued. 960 */ 961static int musb_gadget_disable(struct usb_ep *ep) 962{ 963 unsigned long flags; 964 struct musb *musb; 965 u8 epnum; 966 struct musb_ep *musb_ep; 967 void __iomem *epio; 968 int status = 0; 969 970 musb_ep = to_musb_ep(ep); 971 musb = musb_ep->musb; 972 epnum = musb_ep->current_epnum; 973 epio = musb->endpoints[epnum].regs; 974 975 spin_lock_irqsave(&musb->lock, flags); 976 musb_ep_select(musb->mregs, epnum); 977 978 /* zero the endpoint sizes */ 979 if (musb_ep->is_in) { 980 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); 981 int_txe &= ~(1 << epnum); 982 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); 983 musb_writew(epio, MUSB_TXMAXP, 0); 984 } else { 985 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); 986 int_rxe &= ~(1 << epnum); 987 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); 988 musb_writew(epio, MUSB_RXMAXP, 0); 989 } 990 991 musb_ep->desc = NULL; 992 993 /* abort all pending DMA and requests */ 994 nuke(musb_ep, -ESHUTDOWN); 995 996 schedule_work(&musb->irq_work); 997 998 spin_unlock_irqrestore(&(musb->lock), flags); 999 1000 DBG(2, "%s\n", musb_ep->end_point.name); 1001 1002 return status; 1003} 1004 1005/* 1006 * Allocate a request for an endpoint. 1007 * Reused by ep0 code. 1008 */ 1009struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1010{ 1011 struct musb_ep *musb_ep = to_musb_ep(ep); 1012 struct musb_request *request = NULL; 1013 1014 request = kzalloc(sizeof *request, gfp_flags); 1015 if (request) { 1016 INIT_LIST_HEAD(&request->request.list); 1017 request->request.dma = DMA_ADDR_INVALID; 1018 request->epnum = musb_ep->current_epnum; 1019 request->ep = musb_ep; 1020 } 1021 1022 return &request->request; 1023} 1024 1025/* 1026 * Free a request 1027 * Reused by ep0 code. 1028 */ 1029void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1030{ 1031 kfree(to_musb_request(req)); 1032} 1033 1034static LIST_HEAD(buffers); 1035 1036struct free_record { 1037 struct list_head list; 1038 struct device *dev; 1039 unsigned bytes; 1040 dma_addr_t dma; 1041}; 1042 1043/* 1044 * Context: controller locked, IRQs blocked. 1045 */ 1046void musb_ep_restart(struct musb *musb, struct musb_request *req) 1047{ 1048 DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1049 req->tx ? "TX/IN" : "RX/OUT", 1050 &req->request, req->request.length, req->epnum); 1051 1052 musb_ep_select(musb->mregs, req->epnum); 1053 if (req->tx) 1054 txstate(musb, req); 1055 else 1056 rxstate(musb, req); 1057} 1058 1059static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1060 gfp_t gfp_flags) 1061{ 1062 struct musb_ep *musb_ep; 1063 struct musb_request *request; 1064 struct musb *musb; 1065 int status = 0; 1066 unsigned long lockflags; 1067 1068 if (!ep || !req) 1069 return -EINVAL; 1070 if (!req->buf) 1071 return -ENODATA; 1072 1073 musb_ep = to_musb_ep(ep); 1074 musb = musb_ep->musb; 1075 1076 request = to_musb_request(req); 1077 request->musb = musb; 1078 1079 if (request->ep != musb_ep) 1080 return -EINVAL; 1081 1082 DBG(4, "<== to %s request=%p\n", ep->name, req); 1083 1084 /* request is mine now... */ 1085 request->request.actual = 0; 1086 request->request.status = -EINPROGRESS; 1087 request->epnum = musb_ep->current_epnum; 1088 request->tx = musb_ep->is_in; 1089 1090 if (is_dma_capable() && musb_ep->dma) { 1091 if (request->request.dma == DMA_ADDR_INVALID) { 1092 request->request.dma = dma_map_single( 1093 musb->controller, 1094 request->request.buf, 1095 request->request.length, 1096 request->tx 1097 ? DMA_TO_DEVICE 1098 : DMA_FROM_DEVICE); 1099 request->mapped = 1; 1100 } else { 1101 dma_sync_single_for_device(musb->controller, 1102 request->request.dma, 1103 request->request.length, 1104 request->tx 1105 ? DMA_TO_DEVICE 1106 : DMA_FROM_DEVICE); 1107 request->mapped = 0; 1108 } 1109 } else if (!req->buf) { 1110 return -ENODATA; 1111 } else 1112 request->mapped = 0; 1113 1114 spin_lock_irqsave(&musb->lock, lockflags); 1115 1116 /* don't queue if the ep is down */ 1117 if (!musb_ep->desc) { 1118 DBG(4, "req %p queued to %s while ep %s\n", 1119 req, ep->name, "disabled"); 1120 status = -ESHUTDOWN; 1121 goto cleanup; 1122 } 1123 1124 /* add request to the list */ 1125 list_add_tail(&(request->request.list), &(musb_ep->req_list)); 1126 1127 /* it this is the head of the queue, start i/o ... */ 1128 if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) 1129 musb_ep_restart(musb, request); 1130 1131cleanup: 1132 spin_unlock_irqrestore(&musb->lock, lockflags); 1133 return status; 1134} 1135 1136static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1137{ 1138 struct musb_ep *musb_ep = to_musb_ep(ep); 1139 struct usb_request *r; 1140 unsigned long flags; 1141 int status = 0; 1142 struct musb *musb = musb_ep->musb; 1143 1144 if (!ep || !request || to_musb_request(request)->ep != musb_ep) 1145 return -EINVAL; 1146 1147 spin_lock_irqsave(&musb->lock, flags); 1148 1149 list_for_each_entry(r, &musb_ep->req_list, list) { 1150 if (r == request) 1151 break; 1152 } 1153 if (r != request) { 1154 DBG(3, "request %p not queued to %s\n", request, ep->name); 1155 status = -EINVAL; 1156 goto done; 1157 } 1158 1159 /* if the hardware doesn't have the request, easy ... */ 1160 if (musb_ep->req_list.next != &request->list || musb_ep->busy) 1161 musb_g_giveback(musb_ep, request, -ECONNRESET); 1162 1163 /* ... else abort the dma transfer ... */ 1164 else if (is_dma_capable() && musb_ep->dma) { 1165 struct dma_controller *c = musb->dma_controller; 1166 1167 musb_ep_select(musb->mregs, musb_ep->current_epnum); 1168 if (c->channel_abort) 1169 status = c->channel_abort(musb_ep->dma); 1170 else 1171 status = -EBUSY; 1172 if (status == 0) 1173 musb_g_giveback(musb_ep, request, -ECONNRESET); 1174 } else { 1175 /* NOTE: by sticking to easily tested hardware/driver states, 1176 * we leave counting of in-flight packets imprecise. 1177 */ 1178 musb_g_giveback(musb_ep, request, -ECONNRESET); 1179 } 1180 1181done: 1182 spin_unlock_irqrestore(&musb->lock, flags); 1183 return status; 1184} 1185 1186/* 1187 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any 1188 * data but will queue requests. 1189 * 1190 * exported to ep0 code 1191 */ 1192static int musb_gadget_set_halt(struct usb_ep *ep, int value) 1193{ 1194 struct musb_ep *musb_ep = to_musb_ep(ep); 1195 u8 epnum = musb_ep->current_epnum; 1196 struct musb *musb = musb_ep->musb; 1197 void __iomem *epio = musb->endpoints[epnum].regs; 1198 void __iomem *mbase; 1199 unsigned long flags; 1200 u16 csr; 1201 struct musb_request *request; 1202 int status = 0; 1203 1204 if (!ep) 1205 return -EINVAL; 1206 mbase = musb->mregs; 1207 1208 spin_lock_irqsave(&musb->lock, flags); 1209 1210 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1211 status = -EINVAL; 1212 goto done; 1213 } 1214 1215 musb_ep_select(mbase, epnum); 1216 1217 request = to_musb_request(next_request(musb_ep)); 1218 if (value) { 1219 if (request) { 1220 DBG(3, "request in progress, cannot halt %s\n", 1221 ep->name); 1222 status = -EAGAIN; 1223 goto done; 1224 } 1225 /* Cannot portably stall with non-empty FIFO */ 1226 if (musb_ep->is_in) { 1227 csr = musb_readw(epio, MUSB_TXCSR); 1228 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1229 DBG(3, "FIFO busy, cannot halt %s\n", ep->name); 1230 status = -EAGAIN; 1231 goto done; 1232 } 1233 } 1234 } else 1235 musb_ep->wedged = 0; 1236 1237 /* set/clear the stall and toggle bits */ 1238 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1239 if (musb_ep->is_in) { 1240 csr = musb_readw(epio, MUSB_TXCSR); 1241 csr |= MUSB_TXCSR_P_WZC_BITS 1242 | MUSB_TXCSR_CLRDATATOG; 1243 if (value) 1244 csr |= MUSB_TXCSR_P_SENDSTALL; 1245 else 1246 csr &= ~(MUSB_TXCSR_P_SENDSTALL 1247 | MUSB_TXCSR_P_SENTSTALL); 1248 csr &= ~MUSB_TXCSR_TXPKTRDY; 1249 musb_writew(epio, MUSB_TXCSR, csr); 1250 } else { 1251 csr = musb_readw(epio, MUSB_RXCSR); 1252 csr |= MUSB_RXCSR_P_WZC_BITS 1253 | MUSB_RXCSR_FLUSHFIFO 1254 | MUSB_RXCSR_CLRDATATOG; 1255 if (value) 1256 csr |= MUSB_RXCSR_P_SENDSTALL; 1257 else 1258 csr &= ~(MUSB_RXCSR_P_SENDSTALL 1259 | MUSB_RXCSR_P_SENTSTALL); 1260 musb_writew(epio, MUSB_RXCSR, csr); 1261 } 1262 1263 /* maybe start the first request in the queue */ 1264 if (!musb_ep->busy && !value && request) { 1265 DBG(3, "restarting the request\n"); 1266 musb_ep_restart(musb, request); 1267 } 1268 1269done: 1270 spin_unlock_irqrestore(&musb->lock, flags); 1271 return status; 1272} 1273 1274/* 1275 * Sets the halt feature with the clear requests ignored 1276 */ 1277static int musb_gadget_set_wedge(struct usb_ep *ep) 1278{ 1279 struct musb_ep *musb_ep = to_musb_ep(ep); 1280 1281 if (!ep) 1282 return -EINVAL; 1283 1284 musb_ep->wedged = 1; 1285 1286 return usb_ep_set_halt(ep); 1287} 1288 1289static int musb_gadget_fifo_status(struct usb_ep *ep) 1290{ 1291 struct musb_ep *musb_ep = to_musb_ep(ep); 1292 void __iomem *epio = musb_ep->hw_ep->regs; 1293 int retval = -EINVAL; 1294 1295 if (musb_ep->desc && !musb_ep->is_in) { 1296 struct musb *musb = musb_ep->musb; 1297 int epnum = musb_ep->current_epnum; 1298 void __iomem *mbase = musb->mregs; 1299 unsigned long flags; 1300 1301 spin_lock_irqsave(&musb->lock, flags); 1302 1303 musb_ep_select(mbase, epnum); 1304 retval = musb_readw(epio, MUSB_RXCOUNT); 1305 1306 spin_unlock_irqrestore(&musb->lock, flags); 1307 } 1308 return retval; 1309} 1310 1311static void musb_gadget_fifo_flush(struct usb_ep *ep) 1312{ 1313 struct musb_ep *musb_ep = to_musb_ep(ep); 1314 struct musb *musb = musb_ep->musb; 1315 u8 epnum = musb_ep->current_epnum; 1316 void __iomem *epio = musb->endpoints[epnum].regs; 1317 void __iomem *mbase; 1318 unsigned long flags; 1319 u16 csr, int_txe; 1320 1321 mbase = musb->mregs; 1322 1323 spin_lock_irqsave(&musb->lock, flags); 1324 musb_ep_select(mbase, (u8) epnum); 1325 1326 /* disable interrupts */ 1327 int_txe = musb_readw(mbase, MUSB_INTRTXE); 1328 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 1329 1330 if (musb_ep->is_in) { 1331 csr = musb_readw(epio, MUSB_TXCSR); 1332 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1333 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 1334 musb_writew(epio, MUSB_TXCSR, csr); 1335 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1336 musb_writew(epio, MUSB_TXCSR, csr); 1337 } 1338 } else { 1339 csr = musb_readw(epio, MUSB_RXCSR); 1340 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1341 musb_writew(epio, MUSB_RXCSR, csr); 1342 musb_writew(epio, MUSB_RXCSR, csr); 1343 } 1344 1345 /* re-enable interrupt */ 1346 musb_writew(mbase, MUSB_INTRTXE, int_txe); 1347 spin_unlock_irqrestore(&musb->lock, flags); 1348} 1349 1350static const struct usb_ep_ops musb_ep_ops = { 1351 .enable = musb_gadget_enable, 1352 .disable = musb_gadget_disable, 1353 .alloc_request = musb_alloc_request, 1354 .free_request = musb_free_request, 1355 .queue = musb_gadget_queue, 1356 .dequeue = musb_gadget_dequeue, 1357 .set_halt = musb_gadget_set_halt, 1358 .set_wedge = musb_gadget_set_wedge, 1359 .fifo_status = musb_gadget_fifo_status, 1360 .fifo_flush = musb_gadget_fifo_flush 1361}; 1362 1363/* ----------------------------------------------------------------------- */ 1364 1365static int musb_gadget_get_frame(struct usb_gadget *gadget) 1366{ 1367 struct musb *musb = gadget_to_musb(gadget); 1368 1369 return (int)musb_readw(musb->mregs, MUSB_FRAME); 1370} 1371 1372static int musb_gadget_wakeup(struct usb_gadget *gadget) 1373{ 1374 struct musb *musb = gadget_to_musb(gadget); 1375 void __iomem *mregs = musb->mregs; 1376 unsigned long flags; 1377 int status = -EINVAL; 1378 u8 power, devctl; 1379 int retries; 1380 1381 spin_lock_irqsave(&musb->lock, flags); 1382 1383 switch (musb->xceiv->state) { 1384 case OTG_STATE_B_PERIPHERAL: 1385 /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1386 * that's part of the standard usb 1.1 state machine, and 1387 * doesn't affect OTG transitions. 1388 */ 1389 if (musb->may_wakeup && musb->is_suspended) 1390 break; 1391 goto done; 1392 case OTG_STATE_B_IDLE: 1393 /* Start SRP ... OTG not required. */ 1394 devctl = musb_readb(mregs, MUSB_DEVCTL); 1395 DBG(2, "Sending SRP: devctl: %02x\n", devctl); 1396 devctl |= MUSB_DEVCTL_SESSION; 1397 musb_writeb(mregs, MUSB_DEVCTL, devctl); 1398 devctl = musb_readb(mregs, MUSB_DEVCTL); 1399 retries = 100; 1400 while (!(devctl & MUSB_DEVCTL_SESSION)) { 1401 devctl = musb_readb(mregs, MUSB_DEVCTL); 1402 if (retries-- < 1) 1403 break; 1404 } 1405 retries = 10000; 1406 while (devctl & MUSB_DEVCTL_SESSION) { 1407 devctl = musb_readb(mregs, MUSB_DEVCTL); 1408 if (retries-- < 1) 1409 break; 1410 } 1411 1412 /* Block idling for at least 1s */ 1413 musb_platform_try_idle(musb, 1414 jiffies + msecs_to_jiffies(1 * HZ)); 1415 1416 status = 0; 1417 goto done; 1418 default: 1419 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); 1420 goto done; 1421 } 1422 1423 status = 0; 1424 1425 power = musb_readb(mregs, MUSB_POWER); 1426 power |= MUSB_POWER_RESUME; 1427 musb_writeb(mregs, MUSB_POWER, power); 1428 DBG(2, "issue wakeup\n"); 1429 1430 mdelay(2); 1431 1432 power = musb_readb(mregs, MUSB_POWER); 1433 power &= ~MUSB_POWER_RESUME; 1434 musb_writeb(mregs, MUSB_POWER, power); 1435done: 1436 spin_unlock_irqrestore(&musb->lock, flags); 1437 return status; 1438} 1439 1440static int 1441musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1442{ 1443 struct musb *musb = gadget_to_musb(gadget); 1444 1445 musb->is_self_powered = !!is_selfpowered; 1446 return 0; 1447} 1448 1449static void musb_pullup(struct musb *musb, int is_on) 1450{ 1451 u8 power; 1452 1453 power = musb_readb(musb->mregs, MUSB_POWER); 1454 if (is_on) 1455 power |= MUSB_POWER_SOFTCONN; 1456 else 1457 power &= ~MUSB_POWER_SOFTCONN; 1458 1459 1460 DBG(3, "gadget %s D+ pullup %s\n", 1461 musb->gadget_driver->function, is_on ? "on" : "off"); 1462 musb_writeb(musb->mregs, MUSB_POWER, power); 1463} 1464 1465 1466static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1467{ 1468 struct musb *musb = gadget_to_musb(gadget); 1469 1470 if (!musb->xceiv->set_power) 1471 return -EOPNOTSUPP; 1472 return otg_set_power(musb->xceiv, mA); 1473} 1474 1475static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1476{ 1477 struct musb *musb = gadget_to_musb(gadget); 1478 unsigned long flags; 1479 1480 is_on = !!is_on; 1481 1482 /* NOTE: this assumes we are sensing vbus; we'd rather 1483 * not pullup unless the B-session is active. 1484 */ 1485 spin_lock_irqsave(&musb->lock, flags); 1486 if (is_on != musb->softconnect) { 1487 musb->softconnect = is_on; 1488 musb_pullup(musb, is_on); 1489 } 1490 spin_unlock_irqrestore(&musb->lock, flags); 1491 return 0; 1492} 1493 1494static const struct usb_gadget_ops musb_gadget_operations = { 1495 .get_frame = musb_gadget_get_frame, 1496 .wakeup = musb_gadget_wakeup, 1497 .set_selfpowered = musb_gadget_set_self_powered, 1498 /* .vbus_session = musb_gadget_vbus_session, */ 1499 .vbus_draw = musb_gadget_vbus_draw, 1500 .pullup = musb_gadget_pullup, 1501}; 1502 1503/* ----------------------------------------------------------------------- */ 1504 1505/* Registration */ 1506 1507/* Only this registration code "knows" the rule (from USB standards) 1508 * about there being only one external upstream port. It assumes 1509 * all peripheral ports are external... 1510 */ 1511static struct musb *the_gadget; 1512 1513static void musb_gadget_release(struct device *dev) 1514{ 1515 /* kref_put(WHAT) */ 1516 dev_dbg(dev, "%s\n", __func__); 1517} 1518 1519 1520static void __init 1521init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1522{ 1523 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1524 1525 memset(ep, 0, sizeof *ep); 1526 1527 ep->current_epnum = epnum; 1528 ep->musb = musb; 1529 ep->hw_ep = hw_ep; 1530 ep->is_in = is_in; 1531 1532 INIT_LIST_HEAD(&ep->req_list); 1533 1534 sprintf(ep->name, "ep%d%s", epnum, 1535 (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1536 is_in ? "in" : "out")); 1537 ep->end_point.name = ep->name; 1538 INIT_LIST_HEAD(&ep->end_point.ep_list); 1539 if (!epnum) { 1540 ep->end_point.maxpacket = 64; 1541 ep->end_point.ops = &musb_g_ep0_ops; 1542 musb->g.ep0 = &ep->end_point; 1543 } else { 1544 if (is_in) 1545 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; 1546 else 1547 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; 1548 ep->end_point.ops = &musb_ep_ops; 1549 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1550 } 1551} 1552 1553/* 1554 * Initialize the endpoints exposed to peripheral drivers, with backlinks 1555 * to the rest of the driver state. 1556 */ 1557static inline void __init musb_g_init_endpoints(struct musb *musb) 1558{ 1559 u8 epnum; 1560 struct musb_hw_ep *hw_ep; 1561 unsigned count = 0; 1562 1563 /* intialize endpoint list just once */ 1564 INIT_LIST_HEAD(&(musb->g.ep_list)); 1565 1566 for (epnum = 0, hw_ep = musb->endpoints; 1567 epnum < musb->nr_endpoints; 1568 epnum++, hw_ep++) { 1569 if (hw_ep->is_shared_fifo /* || !epnum */) { 1570 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1571 count++; 1572 } else { 1573 if (hw_ep->max_packet_sz_tx) { 1574 init_peripheral_ep(musb, &hw_ep->ep_in, 1575 epnum, 1); 1576 count++; 1577 } 1578 if (hw_ep->max_packet_sz_rx) { 1579 init_peripheral_ep(musb, &hw_ep->ep_out, 1580 epnum, 0); 1581 count++; 1582 } 1583 } 1584 } 1585} 1586 1587/* called once during driver setup to initialize and link into 1588 * the driver model; memory is zeroed. 1589 */ 1590int __init musb_gadget_setup(struct musb *musb) 1591{ 1592 int status; 1593 1594 /* REVISIT minor race: if (erroneously) setting up two 1595 * musb peripherals at the same time, only the bus lock 1596 * is probably held. 1597 */ 1598 if (the_gadget) 1599 return -EBUSY; 1600 the_gadget = musb; 1601 1602 musb->g.ops = &musb_gadget_operations; 1603 musb->g.is_dualspeed = 1; 1604 musb->g.speed = USB_SPEED_UNKNOWN; 1605 1606 /* this "gadget" abstracts/virtualizes the controller */ 1607 dev_set_name(&musb->g.dev, "gadget"); 1608 musb->g.dev.parent = musb->controller; 1609 musb->g.dev.dma_mask = musb->controller->dma_mask; 1610 musb->g.dev.release = musb_gadget_release; 1611 musb->g.name = musb_driver_name; 1612 1613 if (is_otg_enabled(musb)) 1614 musb->g.is_otg = 1; 1615 1616 musb_g_init_endpoints(musb); 1617 1618 musb->is_active = 0; 1619 musb_platform_try_idle(musb, 0); 1620 1621 status = device_register(&musb->g.dev); 1622 if (status != 0) 1623 the_gadget = NULL; 1624 return status; 1625} 1626 1627void musb_gadget_cleanup(struct musb *musb) 1628{ 1629 if (musb != the_gadget) 1630 return; 1631 1632 device_unregister(&musb->g.dev); 1633 the_gadget = NULL; 1634} 1635 1636/* 1637 * Register the gadget driver. Used by gadget drivers when 1638 * registering themselves with the controller. 1639 * 1640 * -EINVAL something went wrong (not driver) 1641 * -EBUSY another gadget is already using the controller 1642 * -ENOMEM no memeory to perform the operation 1643 * 1644 * @param driver the gadget driver 1645 * @return <0 if error, 0 if everything is fine 1646 */ 1647int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1648{ 1649 int retval; 1650 unsigned long flags; 1651 struct musb *musb = the_gadget; 1652 1653 if (!driver 1654 || driver->speed != USB_SPEED_HIGH 1655 || !driver->bind 1656 || !driver->setup) 1657 return -EINVAL; 1658 1659 /* driver must be initialized to support peripheral mode */ 1660 if (!musb) { 1661 DBG(1, "%s, no dev??\n", __func__); 1662 return -ENODEV; 1663 } 1664 1665 DBG(3, "registering driver %s\n", driver->function); 1666 spin_lock_irqsave(&musb->lock, flags); 1667 1668 if (musb->gadget_driver) { 1669 DBG(1, "%s is already bound to %s\n", 1670 musb_driver_name, 1671 musb->gadget_driver->driver.name); 1672 retval = -EBUSY; 1673 } else { 1674 musb->gadget_driver = driver; 1675 musb->g.dev.driver = &driver->driver; 1676 driver->driver.bus = NULL; 1677 musb->softconnect = 1; 1678 retval = 0; 1679 } 1680 1681 spin_unlock_irqrestore(&musb->lock, flags); 1682 1683 if (retval == 0) { 1684 retval = driver->bind(&musb->g); 1685 if (retval != 0) { 1686 DBG(3, "bind to driver %s failed --> %d\n", 1687 driver->driver.name, retval); 1688 musb->gadget_driver = NULL; 1689 musb->g.dev.driver = NULL; 1690 } 1691 1692 spin_lock_irqsave(&musb->lock, flags); 1693 1694 otg_set_peripheral(musb->xceiv, &musb->g); 1695 musb->xceiv->state = OTG_STATE_B_IDLE; 1696 musb->is_active = 1; 1697 1698 1699 if (!is_otg_enabled(musb)) 1700 musb_start(musb); 1701 1702 otg_set_peripheral(musb->xceiv, &musb->g); 1703 1704 spin_unlock_irqrestore(&musb->lock, flags); 1705 1706 if (is_otg_enabled(musb)) { 1707 DBG(3, "OTG startup...\n"); 1708 1709 /* REVISIT: funcall to other code, which also 1710 * handles power budgeting ... this way also 1711 * ensures HdrcStart is indirectly called. 1712 */ 1713 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); 1714 if (retval < 0) { 1715 DBG(1, "add_hcd failed, %d\n", retval); 1716 spin_lock_irqsave(&musb->lock, flags); 1717 otg_set_peripheral(musb->xceiv, NULL); 1718 musb->gadget_driver = NULL; 1719 musb->g.dev.driver = NULL; 1720 spin_unlock_irqrestore(&musb->lock, flags); 1721 } 1722 } 1723 } 1724 1725 return retval; 1726} 1727EXPORT_SYMBOL(usb_gadget_register_driver); 1728 1729static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) 1730{ 1731 int i; 1732 struct musb_hw_ep *hw_ep; 1733 1734 /* don't disconnect if it's not connected */ 1735 if (musb->g.speed == USB_SPEED_UNKNOWN) 1736 driver = NULL; 1737 else 1738 musb->g.speed = USB_SPEED_UNKNOWN; 1739 1740 /* deactivate the hardware */ 1741 if (musb->softconnect) { 1742 musb->softconnect = 0; 1743 musb_pullup(musb, 0); 1744 } 1745 musb_stop(musb); 1746 1747 /* killing any outstanding requests will quiesce the driver; 1748 * then report disconnect 1749 */ 1750 if (driver) { 1751 for (i = 0, hw_ep = musb->endpoints; 1752 i < musb->nr_endpoints; 1753 i++, hw_ep++) { 1754 musb_ep_select(musb->mregs, i); 1755 if (hw_ep->is_shared_fifo /* || !epnum */) { 1756 nuke(&hw_ep->ep_in, -ESHUTDOWN); 1757 } else { 1758 if (hw_ep->max_packet_sz_tx) 1759 nuke(&hw_ep->ep_in, -ESHUTDOWN); 1760 if (hw_ep->max_packet_sz_rx) 1761 nuke(&hw_ep->ep_out, -ESHUTDOWN); 1762 } 1763 } 1764 1765 spin_unlock(&musb->lock); 1766 driver->disconnect(&musb->g); 1767 spin_lock(&musb->lock); 1768 } 1769} 1770 1771/* 1772 * Unregister the gadget driver. Used by gadget drivers when 1773 * unregistering themselves from the controller. 1774 * 1775 * @param driver the gadget driver to unregister 1776 */ 1777int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 1778{ 1779 unsigned long flags; 1780 int retval = 0; 1781 struct musb *musb = the_gadget; 1782 1783 if (!driver || !driver->unbind || !musb) 1784 return -EINVAL; 1785 1786 /* REVISIT always use otg_set_peripheral() here too; 1787 * this needs to shut down the OTG engine. 1788 */ 1789 1790 spin_lock_irqsave(&musb->lock, flags); 1791 1792#ifdef CONFIG_USB_MUSB_OTG 1793 musb_hnp_stop(musb); 1794#endif 1795 1796 if (musb->gadget_driver == driver) { 1797 1798 (void) musb_gadget_vbus_draw(&musb->g, 0); 1799 1800 musb->xceiv->state = OTG_STATE_UNDEFINED; 1801 stop_activity(musb, driver); 1802 otg_set_peripheral(musb->xceiv, NULL); 1803 1804 DBG(3, "unregistering driver %s\n", driver->function); 1805 spin_unlock_irqrestore(&musb->lock, flags); 1806 driver->unbind(&musb->g); 1807 spin_lock_irqsave(&musb->lock, flags); 1808 1809 musb->gadget_driver = NULL; 1810 musb->g.dev.driver = NULL; 1811 1812 musb->is_active = 0; 1813 musb_platform_try_idle(musb, 0); 1814 } else 1815 retval = -EINVAL; 1816 spin_unlock_irqrestore(&musb->lock, flags); 1817 1818 if (is_otg_enabled(musb) && retval == 0) { 1819 usb_remove_hcd(musb_to_hcd(musb)); 1820 } 1821 1822 return retval; 1823} 1824EXPORT_SYMBOL(usb_gadget_unregister_driver); 1825 1826 1827/* ----------------------------------------------------------------------- */ 1828 1829/* lifecycle operations called through plat_uds.c */ 1830 1831void musb_g_resume(struct musb *musb) 1832{ 1833 musb->is_suspended = 0; 1834 switch (musb->xceiv->state) { 1835 case OTG_STATE_B_IDLE: 1836 break; 1837 case OTG_STATE_B_WAIT_ACON: 1838 case OTG_STATE_B_PERIPHERAL: 1839 musb->is_active = 1; 1840 if (musb->gadget_driver && musb->gadget_driver->resume) { 1841 spin_unlock(&musb->lock); 1842 musb->gadget_driver->resume(&musb->g); 1843 spin_lock(&musb->lock); 1844 } 1845 break; 1846 default: 1847 WARNING("unhandled RESUME transition (%s)\n", 1848 otg_state_string(musb)); 1849 } 1850} 1851 1852/* called when SOF packets stop for 3+ msec */ 1853void musb_g_suspend(struct musb *musb) 1854{ 1855 u8 devctl; 1856 1857 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1858 DBG(3, "devctl %02x\n", devctl); 1859 1860 switch (musb->xceiv->state) { 1861 case OTG_STATE_B_IDLE: 1862 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 1863 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 1864 break; 1865 case OTG_STATE_B_PERIPHERAL: 1866 musb->is_suspended = 1; 1867 if (musb->gadget_driver && musb->gadget_driver->suspend) { 1868 spin_unlock(&musb->lock); 1869 musb->gadget_driver->suspend(&musb->g); 1870 spin_lock(&musb->lock); 1871 } 1872 break; 1873 default: 1874 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 1875 * A_PERIPHERAL may need care too 1876 */ 1877 WARNING("unhandled SUSPEND transition (%s)\n", 1878 otg_state_string(musb)); 1879 } 1880} 1881 1882/* Called during SRP */ 1883void musb_g_wakeup(struct musb *musb) 1884{ 1885 musb_gadget_wakeup(&musb->g); 1886} 1887 1888/* called when VBUS drops below session threshold, and in other cases */ 1889void musb_g_disconnect(struct musb *musb) 1890{ 1891 void __iomem *mregs = musb->mregs; 1892 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 1893 1894 DBG(3, "devctl %02x\n", devctl); 1895 1896 /* clear HR */ 1897 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 1898 1899 /* don't draw vbus until new b-default session */ 1900 (void) musb_gadget_vbus_draw(&musb->g, 0); 1901 1902 musb->g.speed = USB_SPEED_UNKNOWN; 1903 if (musb->gadget_driver && musb->gadget_driver->disconnect) { 1904 spin_unlock(&musb->lock); 1905 musb->gadget_driver->disconnect(&musb->g); 1906 spin_lock(&musb->lock); 1907 } 1908 1909 switch (musb->xceiv->state) { 1910 default: 1911#ifdef CONFIG_USB_MUSB_OTG 1912 DBG(2, "Unhandled disconnect %s, setting a_idle\n", 1913 otg_state_string(musb)); 1914 musb->xceiv->state = OTG_STATE_A_IDLE; 1915 MUSB_HST_MODE(musb); 1916 break; 1917 case OTG_STATE_A_PERIPHERAL: 1918 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 1919 MUSB_HST_MODE(musb); 1920 break; 1921 case OTG_STATE_B_WAIT_ACON: 1922 case OTG_STATE_B_HOST: 1923#endif 1924 case OTG_STATE_B_PERIPHERAL: 1925 case OTG_STATE_B_IDLE: 1926 musb->xceiv->state = OTG_STATE_B_IDLE; 1927 break; 1928 case OTG_STATE_B_SRP_INIT: 1929 break; 1930 } 1931 1932 musb->is_active = 0; 1933} 1934 1935void musb_g_reset(struct musb *musb) 1936__releases(musb->lock) 1937__acquires(musb->lock) 1938{ 1939 void __iomem *mbase = musb->mregs; 1940 u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 1941 u8 power; 1942 1943 DBG(3, "<== %s addr=%x driver '%s'\n", 1944 (devctl & MUSB_DEVCTL_BDEVICE) 1945 ? "B-Device" : "A-Device", 1946 musb_readb(mbase, MUSB_FADDR), 1947 musb->gadget_driver 1948 ? musb->gadget_driver->driver.name 1949 : NULL 1950 ); 1951 1952 /* report disconnect, if we didn't already (flushing EP state) */ 1953 if (musb->g.speed != USB_SPEED_UNKNOWN) 1954 musb_g_disconnect(musb); 1955 1956 /* clear HR */ 1957 else if (devctl & MUSB_DEVCTL_HR) 1958 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 1959 1960 1961 /* what speed did we negotiate? */ 1962 power = musb_readb(mbase, MUSB_POWER); 1963 musb->g.speed = (power & MUSB_POWER_HSMODE) 1964 ? USB_SPEED_HIGH : USB_SPEED_FULL; 1965 1966 /* start in USB_STATE_DEFAULT */ 1967 musb->is_active = 1; 1968 musb->is_suspended = 0; 1969 MUSB_DEV_MODE(musb); 1970 musb->address = 0; 1971 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 1972 1973 musb->may_wakeup = 0; 1974 musb->g.b_hnp_enable = 0; 1975 musb->g.a_alt_hnp_support = 0; 1976 musb->g.a_hnp_support = 0; 1977 1978 /* Normal reset, as B-Device; 1979 * or else after HNP, as A-Device 1980 */ 1981 if (devctl & MUSB_DEVCTL_BDEVICE) { 1982 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 1983 musb->g.is_a_peripheral = 0; 1984 } else if (is_otg_enabled(musb)) { 1985 musb->xceiv->state = OTG_STATE_A_PERIPHERAL; 1986 musb->g.is_a_peripheral = 1; 1987 } else 1988 WARN_ON(1); 1989 1990 /* start with default limits on VBUS power draw */ 1991 (void) musb_gadget_vbus_draw(&musb->g, 1992 is_otg_enabled(musb) ? 8 : 100); 1993} 1994