Lines Matching refs:ep

90 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
93 int buflen = ep->is_in ? req->req.length : req->req.actual;
97 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
111 static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
124 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
126 u32 epctrl = gr_read32(&ep->regs->epctrl);
127 u32 epstat = gr_read32(&ep->regs->epstat);
131 seq_printf(seq, "%s:\n", ep->ep.name);
136 seq_printf(seq, " dma_start = %d\n", ep->dma_start);
137 seq_printf(seq, " stopped = %d\n", ep->stopped);
138 seq_printf(seq, " wedged = %d\n", ep->wedged);
139 seq_printf(seq, " callback = %d\n", ep->callback);
140 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
141 seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
142 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
156 if (list_empty(&ep->queue)) {
162 list_for_each_entry(req, &ep->queue, queue) {
186 struct gr_ep *ep;
199 list_for_each_entry(ep, &dev->ep_list, ep_list)
200 gr_seq_ep_show(seq, ep);
231 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
236 dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
238 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
282 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
296 dev = ep->dev;
297 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
300 if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
305 * divisible by ep->ep.maxpacket and the last descriptor was
310 memcpy(buftail, ep->tailbuf, req->oddlen);
314 dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
315 ep->ep.name);
316 gr_dbgprint_request("OVFL", ep, req);
322 if (ep->is_in)
323 gr_dbgprint_request("SENT", ep, req);
325 gr_dbgprint_request("RECV", ep, req);
328 /* Prevent changes to ep->queue during callback */
329 ep->callback = 1;
339 usb_gadget_giveback_request(&ep->ep, &req->req);
343 ep->callback = 0;
360 * Starts DMA for endpoint ep if there are requests in the queue.
362 * Must be called with dev->lock held and with !ep->stopped.
364 static void gr_start_dma(struct gr_ep *ep)
369 if (list_empty(&ep->queue)) {
370 ep->dma_start = 0;
374 req = list_first_entry(&ep->queue, struct gr_request, queue);
381 * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
385 if (!ep->is_in && req->oddlen)
386 req->last_desc->data = ep->tailbuf_paddr;
391 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
394 dmactrl = gr_read32(&ep->regs->dmactrl);
395 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
397 ep->dma_start = 1;
401 * Finishes the first request in the ep's queue and, if available, starts the
404 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
406 static void gr_dma_advance(struct gr_ep *ep, int status)
410 req = list_first_entry(&ep->queue, struct gr_request, queue);
411 gr_finish_request(ep, req, status);
412 gr_start_dma(ep); /* Regardless of ep->dma_start */
421 static void gr_abort_dma(struct gr_ep *ep)
425 dmactrl = gr_read32(&ep->regs->dmactrl);
426 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
436 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
441 desc = gr_alloc_dma_desc(ep, gfp_flags);
446 if (ep->is_in)
475 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
487 u16 size = min(bytes_left, ep->bytes_per_buffer);
489 if (size < ep->bytes_per_buffer) {
495 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
508 gr_free_dma_desc_chain(ep->dev, req);
519 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
528 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
540 u16 size = min(bytes_left, ep->bytes_per_buffer);
542 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
553 * multiples of ep->ep.maxpacket.
555 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
556 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
570 gr_free_dma_desc_chain(ep->dev, req);
576 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
578 struct gr_udc *dev = ep->dev;
581 if (unlikely(!ep->ep.desc && ep->num != 0)) {
582 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
589 ep->ep.name, req->req.buf, list_empty(&req->queue));
605 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
611 if (ep->is_in)
612 ret = gr_setup_in_desc_list(ep, req, gfp_flags);
614 ret = gr_setup_out_desc_list(ep, req, gfp_flags);
620 list_add_tail(&req->queue, &ep->queue);
623 if (!ep->dma_start && likely(!ep->stopped))
624 gr_start_dma(ep);
634 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
637 if (ep->is_in)
638 gr_dbgprint_request("RESP", ep, req);
640 return gr_queue(ep, req, gfp_flags);
651 static void gr_ep_nuke(struct gr_ep *ep)
655 ep->stopped = 1;
656 ep->dma_start = 0;
657 gr_abort_dma(ep);
659 while (!list_empty(&ep->queue)) {
660 req = list_first_entry(&ep->queue, struct gr_request, queue);
661 gr_finish_request(ep, req, -ESHUTDOWN);
670 static void gr_ep_reset(struct gr_ep *ep)
672 gr_write32(&ep->regs->epctrl, 0);
673 gr_write32(&ep->regs->dmactrl, 0);
675 ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
676 ep->ep.desc = NULL;
677 ep->stopped = 1;
678 ep->dma_start = 0;
703 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
708 if (ep->num && !ep->ep.desc)
711 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
715 if (!ep->num) {
718 gr_control_stall(ep->dev);
719 dev_dbg(ep->dev->dev, "EP: stall ep0\n");
725 dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
726 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
728 epctrl = gr_read32(&ep->regs->epctrl);
731 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
732 ep->stopped = 1;
734 ep->wedged = 1;
736 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
737 ep->stopped = 0;
738 ep->wedged = 0;
741 if (!ep->dma_start)
742 gr_start_dma(ep);
776 struct gr_ep *ep;
778 list_for_each_entry(ep, &dev->ep_list, ep_list)
779 gr_ep_nuke(ep);
793 struct gr_ep *ep;
797 ep = container_of(_ep, struct gr_ep, ep);
798 dev = ep->dev;
820 void (*complete)(struct usb_ep *ep,
981 struct gr_ep *ep;
993 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
997 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
1003 status = gr_ep_halt_wedge(ep, 1, 0, 1);
1013 if (ep->wedged)
1015 status = gr_ep_halt_wedge(ep, 0, 0, 1);
1234 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1236 static int gr_handle_in_ep(struct gr_ep *ep)
1240 req = list_first_entry(&ep->queue, struct gr_request, queue);
1247 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1251 gr_dma_advance(ep, 0);
1259 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1261 static int gr_handle_out_ep(struct gr_ep *ep)
1267 struct gr_udc *dev = ep->dev;
1269 req = list_first_entry(&ep->queue, struct gr_request, queue);
1283 if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
1286 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1297 gr_dma_advance(ep, 0);
1303 ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1304 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1398 struct gr_ep *ep;
1409 * Check IN ep interrupts. We check these before the OUT eps because
1414 ep = &dev->epi[i];
1415 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1416 handled = gr_handle_in_ep(ep) || handled;
1419 /* Check OUT ep interrupts */
1421 ep = &dev->epo[i];
1422 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1423 handled = gr_handle_out_ep(ep) || handled;
1434 list_for_each_entry(ep, &dev->ep_list, ep_list) {
1435 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1438 ep->ep.name);
1462 /* USB ep ops */
1469 struct gr_ep *ep;
1476 ep = container_of(_ep, struct gr_ep, ep);
1480 dev = ep->dev;
1483 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1490 epctrl = gr_read32(&ep->regs->epctrl);
1495 if (!ep->is_in != !usb_endpoint_dir_in(desc))
1498 /* Check ep num */
1499 if ((!ep->is_in && ep->num >= dev->nepo) ||
1500 (ep->is_in && ep->num >= dev->nepi))
1513 ep->ep.name);
1540 } else if (max > ep->ep.maxpacket_limit) {
1542 max, ep->ep.maxpacket_limit);
1546 spin_lock(&ep->dev->lock);
1548 if (!ep->stopped) {
1549 spin_unlock(&ep->dev->lock);
1553 ep->stopped = 0;
1554 ep->wedged = 0;
1555 ep->ep.desc = desc;
1556 ep->ep.maxpacket = max;
1557 ep->dma_start = 0;
1565 ep->bytes_per_buffer = (nt + 1) * max;
1566 } else if (ep->is_in) {
1572 ep->bytes_per_buffer = (buffer_size / max) * max;
1578 ep->bytes_per_buffer = max;
1585 if (ep->is_in)
1587 gr_write32(&ep->regs->epctrl, epctrl);
1589 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1591 spin_unlock(&ep->dev->lock);
1593 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1594 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1601 struct gr_ep *ep;
1605 ep = container_of(_ep, struct gr_ep, ep);
1606 if (!_ep || !ep->ep.desc)
1609 dev = ep->dev;
1612 if (ep == &dev->epo[0] || ep == &dev->epi[0])
1618 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1622 gr_ep_nuke(ep);
1623 gr_ep_reset(ep);
1624 ep->ep.desc = NULL;
1654 struct gr_ep *ep;
1662 ep = container_of(_ep, struct gr_ep, ep);
1664 dev = ep->dev;
1666 spin_lock(&ep->dev->lock);
1674 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1675 ep = &dev->epo[0];
1676 ep->ep.driver_data = dev->epi[0].ep.driver_data;
1679 if (ep->is_in)
1680 gr_dbgprint_request("EXTERN", ep, req);
1682 ret = gr_queue(ep, req, GFP_ATOMIC);
1684 spin_unlock(&ep->dev->lock);
1693 struct gr_ep *ep;
1698 ep = container_of(_ep, struct gr_ep, ep);
1699 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1701 dev = ep->dev;
1712 list_for_each_entry(iter, &ep->queue, queue) {
1723 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1725 gr_abort_dma(ep);
1726 if (ep->stopped)
1727 gr_finish_request(ep, req, -ECONNRESET);
1729 gr_dma_advance(ep, -ECONNRESET);
1732 gr_finish_request(ep, req, -ECONNRESET);
1747 struct gr_ep *ep;
1751 ep = container_of(_ep, struct gr_ep, ep);
1753 spin_lock(&ep->dev->lock);
1756 if (halt && ep->is_in && !list_empty(&ep->queue)) {
1761 ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1764 spin_unlock(&ep->dev->lock);
1787 struct gr_ep *ep;
1793 ep = container_of(_ep, struct gr_ep, ep);
1795 epstat = gr_read32(&ep->regs->epstat);
1809 struct gr_ep *ep;
1814 ep = container_of(_ep, struct gr_ep, ep);
1815 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1817 spin_lock(&ep->dev->lock);
1819 epctrl = gr_read32(&ep->regs->epctrl);
1821 gr_write32(&ep->regs->epctrl, epctrl);
1823 spin_unlock(&ep->dev->lock);
1960 struct gr_ep *ep;
1966 ep = &dev->epi[num];
1967 ep->ep.name = inames[num];
1968 ep->regs = &dev->regs->epi[num];
1970 ep = &dev->epo[num];
1971 ep->ep.name = onames[num];
1972 ep->regs = &dev->regs->epo[num];
1975 gr_ep_reset(ep);
1976 ep->num = num;
1977 ep->is_in = is_in;
1978 ep->dev = dev;
1979 ep->ep.ops = &gr_ep_ops;
1980 INIT_LIST_HEAD(&ep->queue);
1983 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
1989 gr_free_request(&ep->ep, _req);
2002 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2003 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2005 ep->ep.caps.type_control = true;
2007 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2008 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2010 ep->ep.caps.type_iso = true;
2011 ep->ep.caps.type_bulk = true;
2012 ep->ep.caps.type_int = true;
2014 list_add_tail(&ep->ep_list, &dev->ep_list);
2017 ep->ep.caps.dir_in = true;
2019 ep->ep.caps.dir_out = true;
2021 ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
2022 &ep->tailbuf_paddr, GFP_ATOMIC);
2023 if (!ep->tailbuf)
2043 dev->gadget.ep0 = &dev->epi[0].ep;
2080 struct gr_ep *ep;
2083 ep = &dev->epi[num];
2085 ep = &dev->epo[num];
2087 if (ep->tailbuf)
2088 dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
2089 ep->tailbuf, ep->tailbuf_paddr);
2109 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2110 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);