• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/gadget/

Lines Matching refs:dma

52 #include <linux/dma-mapping.h>
94 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
98 * Some gadget drivers work better with the dma support here than others.
189 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
190 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
192 ep->dma = NULL;
236 if (!ep->dma) { /* pio, per-packet */
245 } else { /* dma, per-request */
263 ep->dma ? "dma" : "pio", max);
299 /* disable the dma, irqs, endpoint... */
300 if (ep->dma) {
301 writel (0, &ep->dma->dmactl);
305 , &ep->dma->dmastat);
380 ep->dma ? "dma" : "pio", _ep->name);
385 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
386 ep->dma = &ep->dev->dma [ep->num - 1];
408 req->req.dma = DMA_ADDR_INVALID;
411 /* this dma descriptor may be swapped with the previous dummy */
412 if (ep->dma) {
453 * one packet. ep-a..ep-d should use dma instead.
630 /* fill out dma descriptor to match a given request */
655 td->dmaaddr = cpu_to_le32 (req->req.dma);
657 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
671 static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
673 handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
676 static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
678 writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
679 spin_stop_dma (dma);
684 struct net2280_dma_regs __iomem *dma = ep->dma;
690 writel (tmp, &dma->dmacount);
691 writel (readl (&dma->dmastat), &dma->dmastat);
693 writel (td_dma, &dma->dmadesc);
694 writel (dmactl, &dma->dmactl);
698 writel ((1 << DMA_START), &dma->dmastat);
707 struct net2280_dma_regs __iomem *dma = ep->dma;
710 /* on this path we "know" there's no dma active (yet) */
711 WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
712 writel (0, &ep->dma->dmactl);
722 writel (readl (&dma->dmastat), &dma->dmastat);
725 writel (req->req.dma, &dma->dmaaddr);
728 /* dma irq, faking scatterlist status */
731 | tmp, &dma->dmacount);
735 writel ((1 << DMA_ENABLE), &dma->dmactl);
736 writel ((1 << DMA_START), &dma->dmastat);
743 /* force packet boundaries between dma requests, but prevent the
801 pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
803 req->req.dma = DMA_ADDR_INVALID;
846 if (ep->dma && _req->length == 0)
849 /* set up dma mapping in case the caller didn't */
850 if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
851 _req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
865 if (ep->dma)
908 } else if (ep->dma) {
974 tmp = readl (&ep->dma->dmacount);
995 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1022 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1029 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1065 writel (0, &ep->dma->dmactl);
1073 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
1074 spin_stop_dma (ep->dma);
1076 stop_dma (ep->dma);
1087 if (ep->dma)
1113 /* quiesce dma while we patch the queue */
1116 if (ep->dma) {
1117 dmactl = readl (&ep->dma->dmactl);
1119 stop_dma (ep->dma);
1135 if (ep->dma) {
1136 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1143 readl (&ep->dma->dmacount),
1153 } else if (ep->dma && use_dma_chaining) {
1156 &ep->dma->dmadesc);
1158 writel (readl (&ep->dma->dmacount)
1160 &ep->dma->dmacount);
1176 if (ep->dma) {
1177 /* turn off dma on inactive queues */
1179 stop_dma (ep->dma);
1183 writel (dmactl, &ep->dma->dmactl);
1438 ", chiprev %04x, dma %s\n\n"
1526 if (!ep->dma)
1530 " dma\tctl %08x stat %08x count %08x\n"
1532 readl (&ep->dma->dmactl),
1533 readl (&ep->dma->dmastat),
1534 readl (&ep->dma->dmacount),
1535 readl (&ep->dma->dmaaddr),
1536 readl (&ep->dma->dmadesc));
1610 ep->dma ? "dma" : "pio", ep->fifo_size
1629 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1635 readl (&ep->dma->dmacount));
1646 if (ep->dma) {
1680 /* another driver-specific mode might be a request type doing dma
1794 /* clear old dma and irq state */
1798 if (ep->dma)
1835 ep->dma = &dev->dma [tmp - 1];
2030 * also works for dma-capable endpoints, in pio mode or just
2113 if (likely (ep->dma != 0)) {
2125 /* any preceding dma transfers must finish.
2126 * dma handles (M >= N), may empty the queue
2141 count = readl (&ep->dma->dmacount);
2143 if (readl (&ep->dma->dmadesc)
2152 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
2153 spin_stop_dma (ep->dma);
2172 /* (re)start dma if needed, stop NAKing */
2177 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2571 struct net2280_dma_regs __iomem *dma;
2579 dma = ep->dma;
2581 if (!dma)
2584 /* clear ep's dma status */
2585 tmp = readl (&dma->dmastat);
2586 writel (tmp, &dma->dmastat);
2598 stop_dma (ep->dma);
2613 /* disable dma on inactive queues; else maybe restart */
2616 stop_dma (ep->dma);
2618 tmp = readl (&dma->dmactl);
2648 ERROR (dev, "pci dma error; stat %08x\n", stat);
2672 /* handle disconnect, dma, and more */
2799 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
2824 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
2868 INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",