1/* 2 * Copyright (C) 2005-2006 by Texas Instruments 3 * 4 * This file implements a DMA interface using TI's CPPI DMA. 5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. 6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. 7 */ 8 9#include <linux/platform_device.h> 10#include <linux/slab.h> 11#include <linux/usb.h> 12 13#include "musb_core.h" 14#include "musb_debug.h" 15#include "cppi_dma.h" 16 17 18/* CPPI DMA status 7-mar-2006: 19 * 20 * - See musb_{host,gadget}.c for more info 21 * 22 * - Correct RX DMA generally forces the engine into irq-per-packet mode, 23 * which can easily saturate the CPU under non-mass-storage loads. 24 * 25 * NOTES 24-aug-2006 (2.6.18-rc4): 26 * 27 * - peripheral RXDMA wedged in a test with packets of length 512/512/1. 28 * evidently after the 1 byte packet was received and acked, the queue 29 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, 30 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 31 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx 32 * of its next (512 byte) packet. IRQ issues? 33 * 34 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will 35 * evidently also directly update the RX and TX CSRs ... so audit all 36 * host and peripheral side DMA code to avoid CSR access after DMA has 37 * been started. 38 */ 39 40/* REVISIT now we can avoid preallocating these descriptors; or 41 * more simply, switch to a global freelist not per-channel ones. 42 * Note: at full speed, 64 descriptors == 4K bulk data. 43 */ 44#define NUM_TXCHAN_BD 64 45#define NUM_RXCHAN_BD 64 46 47static inline void cpu_drain_writebuffer(void) 48{ 49 wmb(); 50#ifdef CONFIG_CPU_ARM926T 51 /* REVISIT this "should not be needed", 52 * but lack of it sure seemed to hurt ... 53 */ 54 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); 55#endif 56} 57 58static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) 59{ 60 struct cppi_descriptor *bd = c->freelist; 61 62 if (bd) 63 c->freelist = bd->next; 64 return bd; 65} 66 67static inline void 68cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) 69{ 70 if (!bd) 71 return; 72 bd->next = c->freelist; 73 c->freelist = bd; 74} 75 76/* 77 * Start DMA controller 78 * 79 * Initialize the DMA controller as necessary. 80 */ 81 82/* zero out entire rx state RAM entry for the channel */ 83static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) 84{ 85 musb_writel(&rx->rx_skipbytes, 0, 0); 86 musb_writel(&rx->rx_head, 0, 0); 87 musb_writel(&rx->rx_sop, 0, 0); 88 musb_writel(&rx->rx_current, 0, 0); 89 musb_writel(&rx->rx_buf_current, 0, 0); 90 musb_writel(&rx->rx_len_len, 0, 0); 91 musb_writel(&rx->rx_cnt_cnt, 0, 0); 92} 93 94/* zero out entire tx state RAM entry for the channel */ 95static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) 96{ 97 musb_writel(&tx->tx_head, 0, 0); 98 musb_writel(&tx->tx_buf, 0, 0); 99 musb_writel(&tx->tx_current, 0, 0); 100 musb_writel(&tx->tx_buf_current, 0, 0); 101 musb_writel(&tx->tx_info, 0, 0); 102 musb_writel(&tx->tx_rem_len, 0, 0); 103 /* musb_writel(&tx->tx_dummy, 0, 0); */ 104 musb_writel(&tx->tx_complete, 0, ptr); 105} 106 107static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 108{ 109 int j; 110 111 /* initialize channel fields */ 112 c->head = NULL; 113 c->tail = NULL; 114 c->last_processed = NULL; 115 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 116 c->controller = cppi; 117 c->is_rndis = 0; 118 c->freelist = NULL; 119 120 /* build the BD Free list for the channel */ 121 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { 122 struct cppi_descriptor *bd; 123 dma_addr_t dma; 124 125 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); 126 bd->dma = dma; 127 cppi_bd_free(c, bd); 128 } 129} 130 131static int cppi_channel_abort(struct dma_channel *); 132 133static void cppi_pool_free(struct cppi_channel *c) 134{ 135 struct cppi *cppi = c->controller; 136 struct cppi_descriptor *bd; 137 138 (void) cppi_channel_abort(&c->channel); 139 c->channel.status = MUSB_DMA_STATUS_UNKNOWN; 140 c->controller = NULL; 141 142 /* free all its bds */ 143 bd = c->last_processed; 144 do { 145 if (bd) 146 dma_pool_free(cppi->pool, bd, bd->dma); 147 bd = cppi_bd_alloc(c); 148 } while (bd); 149 c->last_processed = NULL; 150} 151 152static int __init cppi_controller_start(struct dma_controller *c) 153{ 154 struct cppi *controller; 155 void __iomem *tibase; 156 int i; 157 158 controller = container_of(c, struct cppi, controller); 159 160 /* do whatever is necessary to start controller */ 161 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 162 controller->tx[i].transmit = true; 163 controller->tx[i].index = i; 164 } 165 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 166 controller->rx[i].transmit = false; 167 controller->rx[i].index = i; 168 } 169 170 /* setup BD list on a per channel basis */ 171 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) 172 cppi_pool_init(controller, controller->tx + i); 173 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 174 cppi_pool_init(controller, controller->rx + i); 175 176 tibase = controller->tibase; 177 INIT_LIST_HEAD(&controller->tx_complete); 178 179 /* initialise tx/rx channel head pointers to zero */ 180 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 181 struct cppi_channel *tx_ch = controller->tx + i; 182 struct cppi_tx_stateram __iomem *tx; 183 184 INIT_LIST_HEAD(&tx_ch->tx_complete); 185 186 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); 187 tx_ch->state_ram = tx; 188 cppi_reset_tx(tx, 0); 189 } 190 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { 191 struct cppi_channel *rx_ch = controller->rx + i; 192 struct cppi_rx_stateram __iomem *rx; 193 194 INIT_LIST_HEAD(&rx_ch->tx_complete); 195 196 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); 197 rx_ch->state_ram = rx; 198 cppi_reset_rx(rx); 199 } 200 201 /* enable individual cppi channels */ 202 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, 203 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 204 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, 205 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 206 207 /* enable tx/rx CPPI control */ 208 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 209 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); 210 211 /* disable RNDIS mode, also host rx RNDIS autorequest */ 212 musb_writel(tibase, DAVINCI_RNDIS_REG, 0); 213 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); 214 215 return 0; 216} 217 218/* 219 * Stop DMA controller 220 * 221 * De-Init the DMA controller as necessary. 222 */ 223 224static int cppi_controller_stop(struct dma_controller *c) 225{ 226 struct cppi *controller; 227 void __iomem *tibase; 228 int i; 229 230 controller = container_of(c, struct cppi, controller); 231 232 tibase = controller->tibase; 233 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 234 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, 235 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 236 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, 237 DAVINCI_DMA_ALL_CHANNELS_ENABLE); 238 239 DBG(1, "Tearing down RX and TX Channels\n"); 240 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { 241 controller->tx[i].last_processed = NULL; 242 cppi_pool_free(controller->tx + i); 243 } 244 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) 245 cppi_pool_free(controller->rx + i); 246 247 /* in Tx Case proper teardown is supported. We resort to disabling 248 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is 249 * complete TX CPPI cannot be disabled. 250 */ 251 /*disable tx/rx cppi */ 252 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 253 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); 254 255 return 0; 256} 257 258/* While dma channel is allocated, we only want the core irqs active 259 * for fault reports, otherwise we'd get irqs that we don't care about. 260 * Except for TX irqs, where dma done != fifo empty and reusable ... 261 * 262 * NOTE: docs don't say either way, but irq masking **enables** irqs. 263 * 264 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... 265 */ 266static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) 267{ 268 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); 269} 270 271static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) 272{ 273 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); 274} 275 276 277/* 278 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to 279 * each transfer direction of a non-control endpoint, so allocating 280 * (and deallocating) is mostly a way to notice bad housekeeping on 281 * the software side. We assume the irqs are always active. 282 */ 283static struct dma_channel * 284cppi_channel_allocate(struct dma_controller *c, 285 struct musb_hw_ep *ep, u8 transmit) 286{ 287 struct cppi *controller; 288 u8 index; 289 struct cppi_channel *cppi_ch; 290 void __iomem *tibase; 291 292 controller = container_of(c, struct cppi, controller); 293 tibase = controller->tibase; 294 295 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 296 index = ep->epnum - 1; 297 298 /* return the corresponding CPPI Channel Handle, and 299 * probably disable the non-CPPI irq until we need it. 300 */ 301 if (transmit) { 302 if (index >= ARRAY_SIZE(controller->tx)) { 303 DBG(1, "no %cX%d CPPI channel\n", 'T', index); 304 return NULL; 305 } 306 cppi_ch = controller->tx + index; 307 } else { 308 if (index >= ARRAY_SIZE(controller->rx)) { 309 DBG(1, "no %cX%d CPPI channel\n", 'R', index); 310 return NULL; 311 } 312 cppi_ch = controller->rx + index; 313 core_rxirq_disable(tibase, ep->epnum); 314 } 315 316 /* REVISIT make this an error later once the same driver code works 317 * with the other DMA engine too 318 */ 319 if (cppi_ch->hw_ep) 320 DBG(1, "re-allocating DMA%d %cX channel %p\n", 321 index, transmit ? 'T' : 'R', cppi_ch); 322 cppi_ch->hw_ep = ep; 323 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 cppi_ch->channel.max_len = 0x7fffffff; 325 326 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 327 return &cppi_ch->channel; 328} 329 330/* Release a CPPI Channel. */ 331static void cppi_channel_release(struct dma_channel *channel) 332{ 333 struct cppi_channel *c; 334 void __iomem *tibase; 335 336 /* REVISIT: for paranoia, check state and abort if needed... */ 337 338 c = container_of(channel, struct cppi_channel, channel); 339 tibase = c->controller->tibase; 340 if (!c->hw_ep) 341 DBG(1, "releasing idle DMA channel %p\n", c); 342 else if (!c->transmit) 343 core_rxirq_enable(tibase, c->index + 1); 344 345 /* for now, leave its cppi IRQ enabled (we won't trigger it) */ 346 c->hw_ep = NULL; 347 channel->status = MUSB_DMA_STATUS_UNKNOWN; 348} 349 350/* Context: controller irqlocked */ 351static void 352cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) 353{ 354 void __iomem *base = c->controller->mregs; 355 struct cppi_rx_stateram __iomem *rx = c->state_ram; 356 357 musb_ep_select(base, c->index + 1); 358 359 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 360 "%08x H%08x S%08x C%08x, " 361 "B%08x L%08x %08x .. %08x" 362 "\n", 363 c->index, tag, 364 musb_readl(c->controller->tibase, 365 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 366 musb_readw(c->hw_ep->regs, MUSB_RXCSR), 367 368 musb_readl(&rx->rx_skipbytes, 0), 369 musb_readl(&rx->rx_head, 0), 370 musb_readl(&rx->rx_sop, 0), 371 musb_readl(&rx->rx_current, 0), 372 373 musb_readl(&rx->rx_buf_current, 0), 374 musb_readl(&rx->rx_len_len, 0), 375 musb_readl(&rx->rx_cnt_cnt, 0), 376 musb_readl(&rx->rx_complete, 0) 377 ); 378} 379 380/* Context: controller irqlocked */ 381static void 382cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) 383{ 384 void __iomem *base = c->controller->mregs; 385 struct cppi_tx_stateram __iomem *tx = c->state_ram; 386 387 musb_ep_select(base, c->index + 1); 388 389 DBG(level, "TX DMA%d%s: csr %04x, " 390 "H%08x S%08x C%08x %08x, " 391 "F%08x L%08x .. %08x" 392 "\n", 393 c->index, tag, 394 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 395 396 musb_readl(&tx->tx_head, 0), 397 musb_readl(&tx->tx_buf, 0), 398 musb_readl(&tx->tx_current, 0), 399 musb_readl(&tx->tx_buf_current, 0), 400 401 musb_readl(&tx->tx_info, 0), 402 musb_readl(&tx->tx_rem_len, 0), 403 /* dummy/unused word 6 */ 404 musb_readl(&tx->tx_complete, 0) 405 ); 406} 407 408/* Context: controller irqlocked */ 409static inline void 410cppi_rndis_update(struct cppi_channel *c, int is_rx, 411 void __iomem *tibase, int is_rndis) 412{ 413 /* we may need to change the rndis flag for this cppi channel */ 414 if (c->is_rndis != is_rndis) { 415 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); 416 u32 temp = 1 << (c->index); 417 418 if (is_rx) 419 temp <<= 16; 420 if (is_rndis) 421 value |= temp; 422 else 423 value &= ~temp; 424 musb_writel(tibase, DAVINCI_RNDIS_REG, value); 425 c->is_rndis = is_rndis; 426 } 427} 428 429#ifdef CONFIG_USB_MUSB_DEBUG 430static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) 431{ 432 pr_debug("RXBD/%s %08x: " 433 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", 434 tag, bd->dma, 435 bd->hw_next, bd->hw_bufp, bd->hw_off_len, 436 bd->hw_options); 437} 438#endif 439 440static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) 441{ 442#ifdef CONFIG_USB_MUSB_DEBUG 443 struct cppi_descriptor *bd; 444 445 if (!_dbg_level(level)) 446 return; 447 cppi_dump_rx(level, rx, tag); 448 if (rx->last_processed) 449 cppi_dump_rxbd("last", rx->last_processed); 450 for (bd = rx->head; bd; bd = bd->next) 451 cppi_dump_rxbd("active", bd); 452#endif 453} 454 455 456/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; 457 * so we won't ever use it (see "CPPI RX Woes" below). 458 */ 459static inline int cppi_autoreq_update(struct cppi_channel *rx, 460 void __iomem *tibase, int onepacket, unsigned n_bds) 461{ 462 u32 val; 463 464#ifdef RNDIS_RX_IS_USABLE 465 u32 tmp; 466 /* assert(is_host_active(musb)) */ 467 468 /* start from "AutoReq never" */ 469 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 470 val = tmp & ~((0x3) << (rx->index * 2)); 471 472 /* HCD arranged reqpkt for packet #1. we arrange int 473 * for all but the last one, maybe in two segments. 474 */ 475 if (!onepacket) { 476 /* one segment, autoreq "all-but-last" */ 477 val |= ((0x1) << (rx->index * 2)); 478 } 479 480 if (val != tmp) { 481 int n = 100; 482 483 /* make sure that autoreq is updated before continuing */ 484 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); 485 do { 486 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 487 if (tmp == val) 488 break; 489 cpu_relax(); 490 } while (n-- > 0); 491 } 492#endif 493 494 /* REQPKT is turned off after each segment */ 495 if (n_bds && rx->channel.actual_len) { 496 void __iomem *regs = rx->hw_ep->regs; 497 498 val = musb_readw(regs, MUSB_RXCSR); 499 if (!(val & MUSB_RXCSR_H_REQPKT)) { 500 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; 501 musb_writew(regs, MUSB_RXCSR, val); 502 /* flush writebufer */ 503 val = musb_readw(regs, MUSB_RXCSR); 504 } 505 } 506 return n_bds; 507} 508 509 510/* Buffer enqueuing Logic: 511 * 512 * - RX builds new queues each time, to help handle routine "early 513 * termination" cases (faults, including errors and short reads) 514 * more correctly. 515 * 516 * - for now, TX reuses the same queue of BDs every time 517 * 518 * REVISIT long term, we want a normal dynamic model. 519 * ... the goal will be to append to the 520 * existing queue, processing completed "dma buffers" (segments) on the fly. 521 * 522 * Otherwise we force an IRQ latency between requests, which slows us a lot 523 * (especially in "transparent" dma). Unfortunately that model seems to be 524 * inherent in the DMA model from the Mentor code, except in the rare case 525 * of transfers big enough (~128+ KB) that we could append "middle" segments 526 * in the TX paths. (RX can't do this, see below.) 527 * 528 * That's true even in the CPPI- friendly iso case, where most urbs have 529 * several small segments provided in a group and where the "packet at a time" 530 * "transparent" DMA model is always correct, even on the RX side. 531 */ 532 533/* 534 * CPPI TX: 535 * ======== 536 * TX is a lot more reasonable than RX; it doesn't need to run in 537 * irq-per-packet mode very often. RNDIS mode seems to behave too 538 * (except how it handles the exactly-N-packets case). Building a 539 * txdma queue with multiple requests (urb or usb_request) looks 540 * like it would work ... but fault handling would need much testing. 541 * 542 * The main issue with TX mode RNDIS relates to transfer lengths that 543 * are an exact multiple of the packet length. It appears that there's 544 * a hiccup in that case (maybe the DMA completes before the ZLP gets 545 * written?) boiling down to not being able to rely on CPPI writing any 546 * terminating zero length packet before the next transfer is written. 547 * So that's punted to PIO; better yet, gadget drivers can avoid it. 548 * 549 * Plus, there's allegedly an undocumented constraint that rndis transfer 550 * length be a multiple of 64 bytes ... but the chip doesn't act that 551 * way, and we really don't _want_ that behavior anyway. 552 * 553 * On TX, "transparent" mode works ... although experiments have shown 554 * problems trying to use the SOP/EOP bits in different USB packets. 555 * 556 * REVISIT try to handle terminating zero length packets using CPPI 557 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet 558 * links avoid that issue by forcing them to avoid zlps.) 559 */ 560static void 561cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) 562{ 563 unsigned maxpacket = tx->maxpacket; 564 dma_addr_t addr = tx->buf_dma + tx->offset; 565 size_t length = tx->buf_len - tx->offset; 566 struct cppi_descriptor *bd; 567 unsigned n_bds; 568 unsigned i; 569 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; 570 int rndis; 571 572 /* TX can use the CPPI "rndis" mode, where we can probably fit this 573 * transfer in one BD and one IRQ. The only time we would NOT want 574 * to use it is when hardware constraints prevent it, or if we'd 575 * trigger the "send a ZLP?" confusion. 576 */ 577 rndis = (maxpacket & 0x3f) == 0 578 && length > maxpacket 579 && length < 0xffff 580 && (length % maxpacket) != 0; 581 582 if (rndis) { 583 maxpacket = length; 584 n_bds = 1; 585 } else { 586 n_bds = length / maxpacket; 587 if (!length || (length % maxpacket)) 588 n_bds++; 589 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); 590 length = min(n_bds * maxpacket, length); 591 } 592 593 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", 594 tx->index, 595 maxpacket, 596 rndis ? "rndis" : "transparent", 597 n_bds, 598 addr, length); 599 600 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); 601 602 /* assuming here that channel_program is called during 603 * transfer initiation ... current code maintains state 604 * for one outstanding request only (no queues, not even 605 * the implicit ones of an iso urb). 606 */ 607 608 bd = tx->freelist; 609 tx->head = bd; 610 tx->last_processed = NULL; 611 612 613 /* Prepare queue of BDs first, then hand it to hardware. 614 * All BDs except maybe the last should be of full packet 615 * size; for RNDIS there _is_ only that last packet. 616 */ 617 for (i = 0; i < n_bds; ) { 618 if (++i < n_bds && bd->next) 619 bd->hw_next = bd->next->dma; 620 else 621 bd->hw_next = 0; 622 623 bd->hw_bufp = tx->buf_dma + tx->offset; 624 625 if ((tx->offset + maxpacket) <= tx->buf_len) { 626 tx->offset += maxpacket; 627 bd->hw_off_len = maxpacket; 628 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 629 | CPPI_OWN_SET | maxpacket; 630 } else { 631 /* only this one may be a partial USB Packet */ 632 u32 partial_len; 633 634 partial_len = tx->buf_len - tx->offset; 635 tx->offset = tx->buf_len; 636 bd->hw_off_len = partial_len; 637 638 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET 639 | CPPI_OWN_SET | partial_len; 640 if (partial_len == 0) 641 bd->hw_options |= CPPI_ZERO_SET; 642 } 643 644 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", 645 bd, bd->hw_next, bd->hw_bufp, 646 bd->hw_off_len, bd->hw_options); 647 648 /* update the last BD enqueued to the list */ 649 tx->tail = bd; 650 bd = bd->next; 651 } 652 653 /* BDs live in DMA-coherent memory, but writes might be pending */ 654 cpu_drain_writebuffer(); 655 656 /* Write to the HeadPtr in state RAM to trigger */ 657 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); 658 659 cppi_dump_tx(5, tx, "/S"); 660} 661 662/* 663 * CPPI RX Woes: 664 * ============= 665 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte 666 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. 667 * (Full speed transfers have similar scenarios.) 668 * 669 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, 670 * and the next packet goes into a buffer that's queued later; while (b) fills 671 * the buffer with 1024 bytes. How to do that with CPPI? 672 * 673 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but 674 * (b) loses **BADLY** because nothing (!) happens when that second packet 675 * fills the buffer, much less when a third one arrives. (Which makes this 676 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination 677 * is optional, and it's fine if peripherals -- not hosts! -- pad messages 678 * out to end-of-buffer. Standard PCI host controller DMA descriptors 679 * implement that mode by default ... which is no accident.) 680 * 681 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have 682 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX 683 * ignores SOP/EOP markings and processes both of those BDs; so both packets 684 * are loaded into the buffer (with a 212 byte gap between them), and the next 685 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP 686 * are intended as outputs for RX queues, not inputs...) 687 * 688 * - A variant of "transparent" mode -- one BD at a time -- is the only way to 689 * reliably make both cases work, with software handling both cases correctly 690 * and at the significant penalty of needing an IRQ per packet. (The lack of 691 * I/O overlap can be slightly ameliorated by enabling double buffering.) 692 * 693 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could 694 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK 695 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors 696 * with guaranteed driver level fault recovery and scrubbing out what's left 697 * of that garbaged datastream. 698 * 699 * But there seems to be no way to identify the cases where CPPI RNDIS mode 700 * is appropriate -- which do NOT include RNDIS host drivers, but do include 701 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. 702 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic 703 * that applies best on the peripheral side (and which could fail rudely). 704 * 705 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all 706 * cases other than mass storage class. Otherwise we're correct but slow, 707 * since CPPI penalizes our need for a "true RNDIS" default mode. 708 */ 709 710 711/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY 712 * 713 * IFF 714 * (a) peripheral mode ... since rndis peripherals could pad their 715 * writes to hosts, causing i/o failure; or we'd have to cope with 716 * a largely unknowable variety of host side protocol variants 717 * (b) and short reads are NOT errors ... since full reads would 718 * cause those same i/o failures 719 * (c) and read length is 720 * - less than 64KB (max per cppi descriptor) 721 * - not a multiple of 4096 (g_zero default, full reads typical) 722 * - N (>1) packets long, ditto (full reads not EXPECTED) 723 * THEN 724 * try rx rndis mode 725 * 726 * Cost of heuristic failing: RXDMA wedges at the end of transfers that 727 * fill out the whole buffer. Buggy host side usb network drivers could 728 * trigger that, but "in the field" such bugs seem to be all but unknown. 729 * 730 * So this module parameter lets the heuristic be disabled. When using 731 * gadgetfs, the heuristic will probably need to be disabled. 732 */ 733static int cppi_rx_rndis = 1; 734 735module_param(cppi_rx_rndis, bool, 0); 736MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); 737 738 739/** 740 * cppi_next_rx_segment - dma read for the next chunk of a buffer 741 * @musb: the controller 742 * @rx: dma channel 743 * @onepacket: true unless caller treats short reads as errors, and 744 * performs fault recovery above usbcore. 745 * Context: controller irqlocked 746 * 747 * See above notes about why we can't use multi-BD RX queues except in 748 * rare cases (mass storage class), and can never use the hardware "rndis" 749 * mode (since it's not a "true" RNDIS mode) with complete safety.. 750 * 751 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in 752 * code to recover from corrupted datastreams after each short transfer. 753 */ 754static void 755cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) 756{ 757 unsigned maxpacket = rx->maxpacket; 758 dma_addr_t addr = rx->buf_dma + rx->offset; 759 size_t length = rx->buf_len - rx->offset; 760 struct cppi_descriptor *bd, *tail; 761 unsigned n_bds; 762 unsigned i; 763 void __iomem *tibase = musb->ctrl_base; 764 int is_rndis = 0; 765 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; 766 767 if (onepacket) { 768 /* almost every USB driver, host or peripheral side */ 769 n_bds = 1; 770 771 /* maybe apply the heuristic above */ 772 if (cppi_rx_rndis 773 && is_peripheral_active(musb) 774 && length > maxpacket 775 && (length & ~0xffff) == 0 776 && (length & 0x0fff) != 0 777 && (length & (maxpacket - 1)) == 0) { 778 maxpacket = length; 779 is_rndis = 1; 780 } 781 } else { 782 /* virtually nothing except mass storage class */ 783 if (length > 0xffff) { 784 n_bds = 0xffff / maxpacket; 785 length = n_bds * maxpacket; 786 } else { 787 n_bds = length / maxpacket; 788 if (length % maxpacket) 789 n_bds++; 790 } 791 if (n_bds == 1) 792 onepacket = 1; 793 else 794 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); 795 } 796 797 /* In host mode, autorequest logic can generate some IN tokens; it's 798 * tricky since we can't leave REQPKT set in RXCSR after the transfer 799 * finishes. So: multipacket transfers involve two or more segments. 800 * And always at least two IRQs ... RNDIS mode is not an option. 801 */ 802 if (is_host_active(musb)) 803 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); 804 805 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); 806 807 length = min(n_bds * maxpacket, length); 808 809 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " 810 "dma 0x%x len %u %u/%u\n", 811 rx->index, maxpacket, 812 onepacket 813 ? (is_rndis ? "rndis" : "onepacket") 814 : "multipacket", 815 n_bds, 816 musb_readl(tibase, 817 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 818 & 0xffff, 819 addr, length, rx->channel.actual_len, rx->buf_len); 820 821 /* only queue one segment at a time, since the hardware prevents 822 * correct queue shutdown after unexpected short packets 823 */ 824 bd = cppi_bd_alloc(rx); 825 rx->head = bd; 826 827 /* Build BDs for all packets in this segment */ 828 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { 829 u32 bd_len; 830 831 if (i) { 832 bd = cppi_bd_alloc(rx); 833 if (!bd) 834 break; 835 tail->next = bd; 836 tail->hw_next = bd->dma; 837 } 838 bd->hw_next = 0; 839 840 /* all but the last packet will be maxpacket size */ 841 if (maxpacket < length) 842 bd_len = maxpacket; 843 else 844 bd_len = length; 845 846 bd->hw_bufp = addr; 847 addr += bd_len; 848 rx->offset += bd_len; 849 850 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; 851 bd->buflen = bd_len; 852 853 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); 854 length -= bd_len; 855 } 856 857 /* we always expect at least one reusable BD! */ 858 if (!tail) { 859 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); 860 return; 861 } else if (i < n_bds) 862 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); 863 864 tail->next = NULL; 865 tail->hw_next = 0; 866 867 bd = rx->head; 868 rx->tail = tail; 869 870 /* short reads and other faults should terminate this entire 871 * dma segment. we want one "dma packet" per dma segment, not 872 * one per USB packet, terminating the whole queue at once... 873 * NOTE that current hardware seems to ignore SOP and EOP. 874 */ 875 bd->hw_options |= CPPI_SOP_SET; 876 tail->hw_options |= CPPI_EOP_SET; 877 878#ifdef CONFIG_USB_MUSB_DEBUG 879 if (_dbg_level(5)) { 880 struct cppi_descriptor *d; 881 882 for (d = rx->head; d; d = d->next) 883 cppi_dump_rxbd("S", d); 884 } 885#endif 886 887 /* in case the preceding transfer left some state... */ 888 tail = rx->last_processed; 889 if (tail) { 890 tail->next = bd; 891 tail->hw_next = bd->dma; 892 } 893 894 core_rxirq_enable(tibase, rx->index + 1); 895 896 /* BDs live in DMA-coherent memory, but writes might be pending */ 897 cpu_drain_writebuffer(); 898 899 /* REVISIT specs say to write this AFTER the BUFCNT register 900 * below ... but that loses badly. 901 */ 902 musb_writel(&rx_ram->rx_head, 0, bd->dma); 903 904 /* bufferCount must be at least 3, and zeroes on completion 905 * unless it underflows below zero, or stops at two, or keeps 906 * growing ... grr. 907 */ 908 i = musb_readl(tibase, 909 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 910 & 0xffff; 911 912 if (!i) 913 musb_writel(tibase, 914 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 915 n_bds + 2); 916 else if (n_bds > (i - 3)) 917 musb_writel(tibase, 918 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 919 n_bds - (i - 3)); 920 921 i = musb_readl(tibase, 922 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 923 & 0xffff; 924 if (i < (2 + n_bds)) { 925 DBG(2, "bufcnt%d underrun - %d (for %d)\n", 926 rx->index, i, n_bds); 927 musb_writel(tibase, 928 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), 929 n_bds + 2); 930 } 931 932 cppi_dump_rx(4, rx, "/S"); 933} 934 935/** 936 * cppi_channel_program - program channel for data transfer 937 * @ch: the channel 938 * @maxpacket: max packet size 939 * @mode: For RX, 1 unless the usb protocol driver promised to treat 940 * all short reads as errors and kick in high level fault recovery. 941 * For TX, ignored because of RNDIS mode races/glitches. 942 * @dma_addr: dma address of buffer 943 * @len: length of buffer 944 * Context: controller irqlocked 945 */ 946static int cppi_channel_program(struct dma_channel *ch, 947 u16 maxpacket, u8 mode, 948 dma_addr_t dma_addr, u32 len) 949{ 950 struct cppi_channel *cppi_ch; 951 struct cppi *controller; 952 struct musb *musb; 953 954 cppi_ch = container_of(ch, struct cppi_channel, channel); 955 controller = cppi_ch->controller; 956 musb = controller->musb; 957 958 switch (ch->status) { 959 case MUSB_DMA_STATUS_BUS_ABORT: 960 case MUSB_DMA_STATUS_CORE_ABORT: 961 /* fault irq handler should have handled cleanup */ 962 WARNING("%cX DMA%d not cleaned up after abort!\n", 963 cppi_ch->transmit ? 'T' : 'R', 964 cppi_ch->index); 965 /* WARN_ON(1); */ 966 break; 967 case MUSB_DMA_STATUS_BUSY: 968 WARNING("program active channel? %cX DMA%d\n", 969 cppi_ch->transmit ? 'T' : 'R', 970 cppi_ch->index); 971 /* WARN_ON(1); */ 972 break; 973 case MUSB_DMA_STATUS_UNKNOWN: 974 DBG(1, "%cX DMA%d not allocated!\n", 975 cppi_ch->transmit ? 'T' : 'R', 976 cppi_ch->index); 977 /* FALLTHROUGH */ 978 case MUSB_DMA_STATUS_FREE: 979 break; 980 } 981 982 ch->status = MUSB_DMA_STATUS_BUSY; 983 984 /* set transfer parameters, then queue up its first segment */ 985 cppi_ch->buf_dma = dma_addr; 986 cppi_ch->offset = 0; 987 cppi_ch->maxpacket = maxpacket; 988 cppi_ch->buf_len = len; 989 cppi_ch->channel.actual_len = 0; 990 991 /* TX channel? or RX? */ 992 if (cppi_ch->transmit) 993 cppi_next_tx_segment(musb, cppi_ch); 994 else 995 cppi_next_rx_segment(musb, cppi_ch, mode); 996 997 return true; 998} 999 1000static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) 1001{ 1002 struct cppi_channel *rx = &cppi->rx[ch]; 1003 struct cppi_rx_stateram __iomem *state = rx->state_ram; 1004 struct cppi_descriptor *bd; 1005 struct cppi_descriptor *last = rx->last_processed; 1006 bool completed = false; 1007 bool acked = false; 1008 int i; 1009 dma_addr_t safe2ack; 1010 void __iomem *regs = rx->hw_ep->regs; 1011 1012 cppi_dump_rx(6, rx, "/K"); 1013 1014 bd = last ? last->next : rx->head; 1015 if (!bd) 1016 return false; 1017 1018 /* run through all completed BDs */ 1019 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); 1020 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; 1021 i++, bd = bd->next) { 1022 u16 len; 1023 1024 /* catch latest BD writes from CPPI */ 1025 rmb(); 1026 if (!completed && (bd->hw_options & CPPI_OWN_SET)) 1027 break; 1028 1029 DBG(5, "C/RXBD %08x: nxt %08x buf %08x " 1030 "off.len %08x opt.len %08x (%d)\n", 1031 bd->dma, bd->hw_next, bd->hw_bufp, 1032 bd->hw_off_len, bd->hw_options, 1033 rx->channel.actual_len); 1034 1035 /* actual packet received length */ 1036 if ((bd->hw_options & CPPI_SOP_SET) && !completed) 1037 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; 1038 else 1039 len = 0; 1040 1041 if (bd->hw_options & CPPI_EOQ_MASK) 1042 completed = true; 1043 1044 if (!completed && len < bd->buflen) { 1045 /* NOTE: when we get a short packet, RXCSR_H_REQPKT 1046 * must have been cleared, and no more DMA packets may 1047 * active be in the queue... TI docs didn't say, but 1048 * CPPI ignores those BDs even though OWN is still set. 1049 */ 1050 completed = true; 1051 DBG(3, "rx short %d/%d (%d)\n", 1052 len, bd->buflen, 1053 rx->channel.actual_len); 1054 } 1055 1056 /* If we got here, we expect to ack at least one BD; meanwhile 1057 * CPPI may completing other BDs while we scan this list... 1058 * 1059 * RACE: we can notice OWN cleared before CPPI raises the 1060 * matching irq by writing that BD as the completion pointer. 1061 * In such cases, stop scanning and wait for the irq, avoiding 1062 * lost acks and states where BD ownership is unclear. 1063 */ 1064 if (bd->dma == safe2ack) { 1065 musb_writel(&state->rx_complete, 0, safe2ack); 1066 safe2ack = musb_readl(&state->rx_complete, 0); 1067 acked = true; 1068 if (bd->dma == safe2ack) 1069 safe2ack = 0; 1070 } 1071 1072 rx->channel.actual_len += len; 1073 1074 cppi_bd_free(rx, last); 1075 last = bd; 1076 1077 /* stop scanning on end-of-segment */ 1078 if (bd->hw_next == 0) 1079 completed = true; 1080 } 1081 rx->last_processed = last; 1082 1083 /* dma abort, lost ack, or ... */ 1084 if (!acked && last) { 1085 int csr; 1086 1087 if (safe2ack == 0 || safe2ack == rx->last_processed->dma) 1088 musb_writel(&state->rx_complete, 0, safe2ack); 1089 if (safe2ack == 0) { 1090 cppi_bd_free(rx, last); 1091 rx->last_processed = NULL; 1092 1093 /* if we land here on the host side, H_REQPKT will 1094 * be clear and we need to restart the queue... 1095 */ 1096 WARN_ON(rx->head); 1097 } 1098 musb_ep_select(cppi->mregs, rx->index + 1); 1099 csr = musb_readw(regs, MUSB_RXCSR); 1100 if (csr & MUSB_RXCSR_DMAENAB) { 1101 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", 1102 rx->index, 1103 rx->head, rx->tail, 1104 rx->last_processed 1105 ? rx->last_processed->dma 1106 : 0, 1107 completed ? ", completed" : "", 1108 csr); 1109 cppi_dump_rxq(4, "/what?", rx); 1110 } 1111 } 1112 if (!completed) { 1113 int csr; 1114 1115 rx->head = bd; 1116 1117 /* REVISIT seems like "autoreq all but EOP" doesn't... 1118 * setting it here "should" be racey, but seems to work 1119 */ 1120 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1121 if (is_host_active(cppi->musb) 1122 && bd 1123 && !(csr & MUSB_RXCSR_H_REQPKT)) { 1124 csr |= MUSB_RXCSR_H_REQPKT; 1125 musb_writew(regs, MUSB_RXCSR, 1126 MUSB_RXCSR_H_WZC_BITS | csr); 1127 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); 1128 } 1129 } else { 1130 rx->head = NULL; 1131 rx->tail = NULL; 1132 } 1133 1134 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); 1135 return completed; 1136} 1137 1138irqreturn_t cppi_interrupt(int irq, void *dev_id) 1139{ 1140 struct musb *musb = dev_id; 1141 struct cppi *cppi; 1142 void __iomem *tibase; 1143 struct musb_hw_ep *hw_ep = NULL; 1144 u32 rx, tx; 1145 int i, index; 1146 unsigned long flags; 1147 1148 cppi = container_of(musb->dma_controller, struct cppi, controller); 1149 if (cppi->irq) 1150 spin_lock_irqsave(&musb->lock, flags); 1151 1152 tibase = musb->ctrl_base; 1153 1154 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); 1155 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); 1156 1157 if (!tx && !rx) 1158 return IRQ_NONE; 1159 1160 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); 1161 1162 /* process TX channels */ 1163 for (index = 0; tx; tx = tx >> 1, index++) { 1164 struct cppi_channel *tx_ch; 1165 struct cppi_tx_stateram __iomem *tx_ram; 1166 bool completed = false; 1167 struct cppi_descriptor *bd; 1168 1169 if (!(tx & 1)) 1170 continue; 1171 1172 tx_ch = cppi->tx + index; 1173 tx_ram = tx_ch->state_ram; 1174 1175 1176 cppi_dump_tx(5, tx_ch, "/E"); 1177 1178 bd = tx_ch->head; 1179 1180 /* 1181 * If Head is null then this could mean that a abort interrupt 1182 * that needs to be acknowledged. 1183 */ 1184 if (NULL == bd) { 1185 DBG(1, "null BD\n"); 1186 tx_ram->tx_complete = 0; 1187 continue; 1188 } 1189 1190 /* run through all completed BDs */ 1191 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; 1192 i++, bd = bd->next) { 1193 u16 len; 1194 1195 /* catch latest BD writes from CPPI */ 1196 rmb(); 1197 if (bd->hw_options & CPPI_OWN_SET) 1198 break; 1199 1200 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", 1201 bd, bd->hw_next, bd->hw_bufp, 1202 bd->hw_off_len, bd->hw_options); 1203 1204 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; 1205 tx_ch->channel.actual_len += len; 1206 1207 tx_ch->last_processed = bd; 1208 1209 /* write completion register to acknowledge 1210 * processing of completed BDs, and possibly 1211 * release the IRQ; EOQ might not be set ... 1212 * 1213 * REVISIT use the same ack strategy as rx 1214 * 1215 * REVISIT have observed bit 18 set; huh?? 1216 */ 1217 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ 1218 musb_writel(&tx_ram->tx_complete, 0, bd->dma); 1219 1220 /* stop scanning on end-of-segment */ 1221 if (bd->hw_next == 0) 1222 completed = true; 1223 } 1224 1225 /* on end of segment, maybe go to next one */ 1226 if (completed) { 1227 /* cppi_dump_tx(4, tx_ch, "/complete"); */ 1228 1229 /* transfer more, or report completion */ 1230 if (tx_ch->offset >= tx_ch->buf_len) { 1231 tx_ch->head = NULL; 1232 tx_ch->tail = NULL; 1233 tx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1234 1235 hw_ep = tx_ch->hw_ep; 1236 1237 musb_dma_completion(musb, index + 1, 1); 1238 1239 } else { 1240 /* Bigger transfer than we could fit in 1241 * that first batch of descriptors... 1242 */ 1243 cppi_next_tx_segment(musb, tx_ch); 1244 } 1245 } else 1246 tx_ch->head = bd; 1247 } 1248 1249 /* Start processing the RX block */ 1250 for (index = 0; rx; rx = rx >> 1, index++) { 1251 1252 if (rx & 1) { 1253 struct cppi_channel *rx_ch; 1254 1255 rx_ch = cppi->rx + index; 1256 1257 /* let incomplete dma segments finish */ 1258 if (!cppi_rx_scan(cppi, index)) 1259 continue; 1260 1261 /* start another dma segment if needed */ 1262 if (rx_ch->channel.actual_len != rx_ch->buf_len 1263 && rx_ch->channel.actual_len 1264 == rx_ch->offset) { 1265 cppi_next_rx_segment(musb, rx_ch, 1); 1266 continue; 1267 } 1268 1269 /* all segments completed! */ 1270 rx_ch->channel.status = MUSB_DMA_STATUS_FREE; 1271 1272 hw_ep = rx_ch->hw_ep; 1273 1274 core_rxirq_disable(tibase, index + 1); 1275 musb_dma_completion(musb, index + 1, 0); 1276 } 1277 } 1278 1279 /* write to CPPI EOI register to re-enable interrupts */ 1280 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); 1281 1282 if (cppi->irq) 1283 spin_unlock_irqrestore(&musb->lock, flags); 1284 1285 return IRQ_HANDLED; 1286} 1287 1288/* Instantiate a software object representing a DMA controller. */ 1289struct dma_controller *__init 1290dma_controller_create(struct musb *musb, void __iomem *mregs) 1291{ 1292 struct cppi *controller; 1293 struct device *dev = musb->controller; 1294 struct platform_device *pdev = to_platform_device(dev); 1295 int irq = platform_get_irq(pdev, 1); 1296 1297 controller = kzalloc(sizeof *controller, GFP_KERNEL); 1298 if (!controller) 1299 return NULL; 1300 1301 controller->mregs = mregs; 1302 controller->tibase = mregs - DAVINCI_BASE_OFFSET; 1303 1304 controller->musb = musb; 1305 controller->controller.start = cppi_controller_start; 1306 controller->controller.stop = cppi_controller_stop; 1307 controller->controller.channel_alloc = cppi_channel_allocate; 1308 controller->controller.channel_release = cppi_channel_release; 1309 controller->controller.channel_program = cppi_channel_program; 1310 controller->controller.channel_abort = cppi_channel_abort; 1311 1312 /* NOTE: allocating from on-chip SRAM would give the least 1313 * contention for memory access, if that ever matters here. 1314 */ 1315 1316 /* setup BufferPool */ 1317 controller->pool = dma_pool_create("cppi", 1318 controller->musb->controller, 1319 sizeof(struct cppi_descriptor), 1320 CPPI_DESCRIPTOR_ALIGN, 0); 1321 if (!controller->pool) { 1322 kfree(controller); 1323 return NULL; 1324 } 1325 1326 if (irq > 0) { 1327 if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) { 1328 dev_err(dev, "request_irq %d failed!\n", irq); 1329 dma_controller_destroy(&controller->controller); 1330 return NULL; 1331 } 1332 controller->irq = irq; 1333 } 1334 1335 return &controller->controller; 1336} 1337 1338/* 1339 * Destroy a previously-instantiated DMA controller. 1340 */ 1341void dma_controller_destroy(struct dma_controller *c) 1342{ 1343 struct cppi *cppi; 1344 1345 cppi = container_of(c, struct cppi, controller); 1346 1347 if (cppi->irq) 1348 free_irq(cppi->irq, cppi->musb); 1349 1350 /* assert: caller stopped the controller first */ 1351 dma_pool_destroy(cppi->pool); 1352 1353 kfree(cppi); 1354} 1355 1356/* 1357 * Context: controller irqlocked, endpoint selected 1358 */ 1359static int cppi_channel_abort(struct dma_channel *channel) 1360{ 1361 struct cppi_channel *cppi_ch; 1362 struct cppi *controller; 1363 void __iomem *mbase; 1364 void __iomem *tibase; 1365 void __iomem *regs; 1366 u32 value; 1367 struct cppi_descriptor *queue; 1368 1369 cppi_ch = container_of(channel, struct cppi_channel, channel); 1370 1371 controller = cppi_ch->controller; 1372 1373 switch (channel->status) { 1374 case MUSB_DMA_STATUS_BUS_ABORT: 1375 case MUSB_DMA_STATUS_CORE_ABORT: 1376 /* from RX or TX fault irq handler */ 1377 case MUSB_DMA_STATUS_BUSY: 1378 /* the hardware needs shutting down */ 1379 regs = cppi_ch->hw_ep->regs; 1380 break; 1381 case MUSB_DMA_STATUS_UNKNOWN: 1382 case MUSB_DMA_STATUS_FREE: 1383 return 0; 1384 default: 1385 return -EINVAL; 1386 } 1387 1388 if (!cppi_ch->transmit && cppi_ch->head) 1389 cppi_dump_rxq(3, "/abort", cppi_ch); 1390 1391 mbase = controller->mregs; 1392 tibase = controller->tibase; 1393 1394 queue = cppi_ch->head; 1395 cppi_ch->head = NULL; 1396 cppi_ch->tail = NULL; 1397 1398 /* REVISIT should rely on caller having done this, 1399 * and caller should rely on us not changing it. 1400 * peripheral code is safe ... check host too. 1401 */ 1402 musb_ep_select(mbase, cppi_ch->index + 1); 1403 1404 if (cppi_ch->transmit) { 1405 struct cppi_tx_stateram __iomem *tx_ram; 1406 /* REVISIT put timeouts on these controller handshakes */ 1407 1408 cppi_dump_tx(6, cppi_ch, " (teardown)"); 1409 1410 /* teardown DMA engine then usb core */ 1411 do { 1412 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); 1413 } while (!(value & CPPI_TEAR_READY)); 1414 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); 1415 1416 tx_ram = cppi_ch->state_ram; 1417 do { 1418 value = musb_readl(&tx_ram->tx_complete, 0); 1419 } while (0xFFFFFFFC != value); 1420 1421 1422 value = musb_readw(regs, MUSB_TXCSR); 1423 value &= ~MUSB_TXCSR_DMAENAB; 1424 value |= MUSB_TXCSR_FLUSHFIFO; 1425 musb_writew(regs, MUSB_TXCSR, value); 1426 musb_writew(regs, MUSB_TXCSR, value); 1427 1428 /* 1429 * 1. Write to completion Ptr value 0x1(bit 0 set) 1430 * (write back mode) 1431 * 2. Wait for abort interrupt and then put the channel in 1432 * compare mode by writing 1 to the tx_complete register. 1433 */ 1434 cppi_reset_tx(tx_ram, 1); 1435 cppi_ch->head = 0; 1436 musb_writel(&tx_ram->tx_complete, 0, 1); 1437 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1438 1439 /* REVISIT tx side _should_ clean up the same way 1440 * as the RX side ... this does no cleanup at all! 1441 */ 1442 1443 } else /* RX */ { 1444 u16 csr; 1445 1446 /* NOTE: docs don't guarantee any of this works ... we 1447 * expect that if the usb core stops telling the cppi core 1448 * to pull more data from it, then it'll be safe to flush 1449 * current RX DMA state iff any pending fifo transfer is done. 1450 */ 1451 1452 core_rxirq_disable(tibase, cppi_ch->index + 1); 1453 1454 /* for host, ensure ReqPkt is never set again */ 1455 if (is_host_active(cppi_ch->controller->musb)) { 1456 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); 1457 value &= ~((0x3) << (cppi_ch->index * 2)); 1458 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); 1459 } 1460 1461 csr = musb_readw(regs, MUSB_RXCSR); 1462 1463 /* for host, clear (just) ReqPkt at end of current packet(s) */ 1464 if (is_host_active(cppi_ch->controller->musb)) { 1465 csr |= MUSB_RXCSR_H_WZC_BITS; 1466 csr &= ~MUSB_RXCSR_H_REQPKT; 1467 } else 1468 csr |= MUSB_RXCSR_P_WZC_BITS; 1469 1470 /* clear dma enable */ 1471 csr &= ~(MUSB_RXCSR_DMAENAB); 1472 musb_writew(regs, MUSB_RXCSR, csr); 1473 csr = musb_readw(regs, MUSB_RXCSR); 1474 1475 /* Quiesce: wait for current dma to finish (if not cleanup). 1476 * We can't use bit zero of stateram->rx_sop, since that 1477 * refers to an entire "DMA packet" not just emptying the 1478 * current fifo. Most segments need multiple usb packets. 1479 */ 1480 if (channel->status == MUSB_DMA_STATUS_BUSY) 1481 udelay(50); 1482 1483 /* scan the current list, reporting any data that was 1484 * transferred and acking any IRQ 1485 */ 1486 cppi_rx_scan(controller, cppi_ch->index); 1487 1488 /* clobber the existing state once it's idle 1489 * 1490 * NOTE: arguably, we should also wait for all the other 1491 * RX channels to quiesce (how??) and then temporarily 1492 * disable RXCPPI_CTRL_REG ... but it seems that we can 1493 * rely on the controller restarting from state ram, with 1494 * only RXCPPI_BUFCNT state being bogus. BUFCNT will 1495 * correct itself after the next DMA transfer though. 1496 * 1497 * REVISIT does using rndis mode change that? 1498 */ 1499 cppi_reset_rx(cppi_ch->state_ram); 1500 1501 /* next DMA request _should_ load cppi head ptr */ 1502 1503 /* ... we don't "free" that list, only mutate it in place. */ 1504 cppi_dump_rx(5, cppi_ch, " (done abort)"); 1505 1506 /* clean up previously pending bds */ 1507 cppi_bd_free(cppi_ch, cppi_ch->last_processed); 1508 cppi_ch->last_processed = NULL; 1509 1510 while (queue) { 1511 struct cppi_descriptor *tmp = queue->next; 1512 1513 cppi_bd_free(cppi_ch, queue); 1514 queue = tmp; 1515 } 1516 } 1517 1518 channel->status = MUSB_DMA_STATUS_FREE; 1519 cppi_ch->buf_dma = 0; 1520 cppi_ch->offset = 0; 1521 cppi_ch->buf_len = 0; 1522 cppi_ch->maxpacket = 0; 1523 return 0; 1524} 1525 1526/* TBD Queries: 1527 * 1528 * Power Management ... probably turn off cppi during suspend, restart; 1529 * check state ram? Clocking is presumably shared with usb core. 1530 */ 1531