• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/musb/

Lines Matching defs:hw_ep

212 	struct musb_hw_ep	*hw_ep = qh->hw_ep;
215 int epnum = hw_ep->epnum;
242 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
254 musb_ep_set_qh(hw_ep, is_in, qh);
284 hw_ep->tx_channel ? "dma" : "pio");
286 if (!hw_ep->tx_channel)
287 musb_h_tx_start(hw_ep);
289 musb_h_tx_dma_start(hw_ep);
331 void __iomem *epio = qh->hw_ep->regs;
351 struct musb_hw_ep *hw_ep, int is_in)
353 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
354 struct musb_hw_ep *ep = qh->hw_ep;
420 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
425 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
437 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
438 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
441 return musb_readw(hw_ep->regs, MUSB_RXCSR);
456 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
457 void __iomem *epio = hw_ep->regs;
458 struct musb_qh *qh = hw_ep->in_qh;
523 musb_read_fifo(hw_ep, length, buf);
528 musb_h_flush_rxfifo(hw_ep, csr);
612 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
615 struct dma_channel *channel = hw_ep->tx_channel;
616 void __iomem *epio = hw_ep->regs;
663 hw_ep->tx_channel = NULL;
685 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
686 void __iomem *epio = hw_ep->regs;
687 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
703 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
706 dma_controller, hw_ep, is_out);
708 hw_ep->tx_channel = dma_channel;
710 hw_ep->rx_channel = dma_channel;
732 musb_h_tx_flush_fifo(hw_ep);
762 musb_h_ep0_flush_fifo(hw_ep);
779 | ((hw_ep->max_packet_sz_tx /
793 load_count = min((u32) hw_ep->max_packet_sz_tx,
799 hw_ep, qh, urb, offset, len))
805 musb_write_fifo(hw_ep, load_count, buf);
815 if (hw_ep->rx_reinit) {
816 musb_rx_reinit(musb, qh, hw_ep);
828 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
834 hw_ep->epnum, csr);
849 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
850 csr = musb_readw(hw_ep->regs,
865 hw_ep->rx_channel = NULL;
874 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
875 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
889 struct musb_hw_ep *hw_ep = musb->control_ep;
890 struct musb_qh *qh = hw_ep->in_qh;
901 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
940 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
966 struct musb_hw_ep *hw_ep = musb->control_ep;
967 void __iomem *epio = hw_ep->regs;
968 struct musb_qh *qh = hw_ep->in_qh;
1028 musb_h_ep0_flush_fifo(hw_ep);
1042 musb_h_ep0_flush_fifo(hw_ep);
1075 musb_advance_schedule(musb, urb, hw_ep, 1);
1105 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1106 void __iomem *epio = hw_ep->regs;
1107 struct musb_qh *qh = hw_ep->out_qh;
1123 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1168 musb_h_tx_flush_fifo(hw_ep);
1302 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1305 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1308 musb_h_tx_dma_start(hw_ep);
1319 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1325 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1424 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1425 void __iomem *epio = hw_ep->regs;
1426 struct musb_qh *qh = hw_ep->in_qh;
1439 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1453 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1493 musb_bulk_rx_nak_timeout(musb, hw_ep);
1521 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1569 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1684 if (rx_count < hw_ep->max_packet_sz_rx) {
1735 hw_ep->rx_channel = NULL;
1755 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1772 struct musb_hw_ep *hw_ep = NULL;
1781 hw_ep = musb->control_ep;
1797 for (epnum = 1, hw_ep = musb->endpoints + 1;
1799 epnum++, hw_ep++) {
1802 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1805 if (hw_ep == musb->bulk_ep)
1809 diff = hw_ep->max_packet_sz_rx;
1811 diff = hw_ep->max_packet_sz_tx;
1828 hw_ep = musb->endpoints + epnum;
1830 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1842 hw_ep = musb->bulk_ep;
1865 hw_ep = musb->endpoints + best_end;
1873 qh->hw_ep = hw_ep;
1917 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2068 struct musb_hw_ep *ep = qh->hw_ep;
2159 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2201 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2212 * queue on hw_ep (e.g. bulk ring) when we're done.
2217 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);