• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/musb/

Lines Matching defs:musb

99 static void musb_ep_program(struct musb *musb, u8 epnum,
154 * musb must be locked.
204 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
208 void __iomem *mbase = musb->mregs;
226 musb->ep0_stage = MUSB_EP0_START;
255 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
294 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
295 __releases(musb->lock)
296 __acquires(musb->lock)
321 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
322 spin_unlock(&musb->lock);
323 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
324 spin_lock(&musb->lock);
350 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
373 musb_giveback(musb, urb, status);
421 musb_start_urb(musb, is_in, qh);
448 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
456 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
549 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
587 if (musb->is_multipoint) {
593 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
602 if (musb->hwvers < MUSB_HWVERS_2000)
677 static void musb_ep_program(struct musb *musb, u8 epnum,
684 void __iomem *mbase = musb->mregs;
685 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
701 dma_controller = musb->dma_controller;
766 if (musb->is_multipoint) {
776 if (can_bulk_split(musb, qh->type))
787 if (musb->is_multipoint)
792 if (can_bulk_split(musb, qh->type))
816 musb_rx_reinit(musb, qh, hw_ep);
884 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
889 struct musb_hw_ep *hw_ep = musb->control_ep;
893 switch (musb->ep0_stage) {
920 musb->ep0_stage = MUSB_EP0_IN;
925 musb->ep0_stage = MUSB_EP0_OUT;
947 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
960 irqreturn_t musb_h_ep0_irq(struct musb *musb)
965 void __iomem *mbase = musb->mregs;
966 struct musb_hw_ep *hw_ep = musb->control_ep;
982 csr, qh, len, urb, musb->ep0_stage);
985 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1007 * if (qh->ring.next != &musb->control), then
1048 if (musb_h_ep0_continue(musb, len, urb)) {
1050 csr = (MUSB_EP0_IN == musb->ep0_stage)
1063 musb->ep0_stage = MUSB_EP0_STATUS;
1071 musb->ep0_stage = MUSB_EP0_IDLE;
1075 musb_advance_schedule(musb, urb, hw_ep, 1);
1098 void musb_host_tx(struct musb *musb, u8 epnum)
1105 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1110 void __iomem *mbase = musb->mregs;
1149 * if (bulk && qh->ring.next != &musb->out_bulk), then
1162 (void) musb->dma_controller->channel_abort(dma);
1302 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1305 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1373 /* Schedule next QH from musb->in_bulk and move the current qh to
1376 static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1380 void __iomem *mbase = musb->mregs;
1394 cur_qh = first_qh(&musb->in_bulk);
1399 musb->dma_controller->channel_abort(dma);
1406 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1408 /* get the next qh from musb->in_bulk */
1409 next_qh = first_qh(&musb->in_bulk);
1413 musb_start_urb(musb, 1, next_qh);
1421 void musb_host_rx(struct musb *musb, u8 epnum)
1424 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1428 void __iomem *mbase = musb->mregs;
1492 && !list_is_singular(&musb->in_bulk)) {
1493 musb_bulk_rx_nak_timeout(musb, hw_ep);
1518 (void) musb->dma_controller->channel_abort(dma);
1548 (void) musb->dma_controller->channel_abort(dma);
1642 c = musb->dma_controller;
1743 done = musb_host_packet_rx(musb, urb,
1755 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1765 struct musb *musb,
1780 head = &musb->control;
1781 hw_ep = musb->control_ep;
1797 for (epnum = 1, hw_ep = musb->endpoints + 1;
1798 epnum < musb->nr_endpoints;
1805 if (hw_ep == musb->bulk_ep)
1828 hw_ep = musb->endpoints + epnum;
1842 hw_ep = musb->bulk_ep;
1844 head = &musb->in_bulk;
1846 head = &musb->out_bulk;
1865 hw_ep = musb->endpoints + best_end;
1876 musb_start_urb(musb, is_in, qh);
1886 struct musb *musb = hcd_to_musb(hcd);
1895 if (!is_host_active(musb) || !musb->is_active)
1898 spin_lock_irqsave(&musb->lock, flags);
1903 spin_unlock_irqrestore(&musb->lock, flags);
1924 spin_lock_irqsave(&musb->lock, flags);
1926 spin_unlock_irqrestore(&musb->lock, flags);
1939 * Some musb cores don't support high bandwidth ISO transfers; and
1947 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
1948 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2011 if (musb->is_multipoint) {
2033 spin_lock_irqsave(&musb->lock, flags);
2042 ret = musb_schedule(musb, qh,
2048 spin_unlock_irqrestore(&musb->lock, flags);
2052 spin_lock_irqsave(&musb->lock, flags);
2054 spin_unlock_irqrestore(&musb->lock, flags);
2071 void __iomem *regs = ep->musb->mregs;
2083 status = ep->musb->dma_controller->channel_abort(dma);
2119 musb_advance_schedule(ep->musb, urb, ep, is_in);
2125 struct musb *musb = hcd_to_musb(hcd);
2136 spin_lock_irqsave(&musb->lock, flags);
2163 musb_giveback(musb, urb, 0);
2177 spin_unlock_irqrestore(&musb->lock, flags);
2187 struct musb *musb = hcd_to_musb(hcd);
2191 spin_lock_irqsave(&musb->lock, flags);
2217 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2225 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2232 spin_unlock_irqrestore(&musb->lock, flags);
2237 struct musb *musb = hcd_to_musb(hcd);
2239 return musb_readw(musb->mregs, MUSB_FRAME);
2244 struct musb *musb = hcd_to_musb(hcd);
2250 musb->port1_status = 0;
2262 struct musb *musb = hcd_to_musb(hcd);
2265 if (!is_host_active(musb))
2268 switch (musb->xceiv->state) {
2276 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2278 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2284 if (musb->is_active) {
2286 otg_state_string(musb));
2299 .description = "musb-hcd",
2301 .hcd_priv_size = sizeof(struct musb),