Lines Matching refs:hwep

52 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
357 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
368 node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
377 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
378 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
381 || hwreq->req.length % hwep->ep.maxpacket)
426 static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
434 ret = add_td_to_list(hwep, hwreq, 0, NULL);
450 ret = add_td_to_list(hwep, hwreq, count, NULL);
457 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
458 && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
459 ret = add_td_to_list(hwep, hwreq, 0, NULL);
467 static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
478 ret = add_td_to_list(hwep, hwreq, count, s);
507 static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
515 dev_err(hwep->ci->dev, "not supported operation for sg\n");
521 dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
529 ret = prepare_td_per_sg(hwep, hwreq, s);
545 * @hwep: endpoint
550 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
552 struct ci_hdrc *ci = hwep->ci;
563 &hwreq->req, hwep->dir);
568 ret = prepare_td_for_sg(hwep, hwreq);
570 ret = prepare_td_for_non_sg(hwep, hwreq);
583 trace_ci_prepare_td(hwep, hwreq, firstnode);
590 if (!list_empty(&hwep->qh.queue)) {
592 int n = hw_ep_bit(hwep->num, hwep->dir);
597 hwreqprev = list_entry(hwep->qh.queue.prev,
607 reprime_dtd(ci, hwep, prevlastnode);
622 hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
623 hwep->qh.ptr->td.token &=
626 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
627 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
630 || hwreq->req.length % hwep->ep.maxpacket)
632 hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
635 ret = hw_ep_prime(ci, hwep->num, hwep->dir,
636 hwep->type == USB_ENDPOINT_XFER_CONTROL);
643 * @hwep: endpoint
645 static void free_pending_td(struct ci_hw_ep *hwep)
647 struct td_node *pending = hwep->pending_td;
649 dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
650 hwep->pending_td = NULL;
654 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
657 hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
658 hwep->qh.ptr->td.token &=
661 return hw_ep_prime(ci, hwep->num, hwep->dir,
662 hwep->type == USB_ENDPOINT_XFER_CONTROL);
667 * @hwep: endpoint
672 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
678 struct ci_hdrc *ci = hwep->ci;
687 trace_ci_complete_td(hwep, hwreq, node);
689 int n = hw_ep_bit(hwep->num, hwep->dir);
694 reprime_dtd(ci, hwep, node);
716 if (hwep->dir == TX) {
726 if (hwep->pending_td)
727 free_pending_td(hwep);
729 hwep->pending_td = node;
733 usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
734 &hwreq->req, hwep->dir);
746 * @hwep: endpoint
751 static int _ep_nuke(struct ci_hw_ep *hwep)
752 __releases(hwep->lock)
753 __acquires(hwep->lock)
756 if (hwep == NULL)
759 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
761 while (!list_empty(&hwep->qh.queue)) {
764 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
768 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
778 spin_unlock(hwep->lock);
779 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
780 spin_lock(hwep->lock);
784 if (hwep->pending_td)
785 free_pending_td(hwep);
792 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
796 if (ep == NULL || hwep->ep.desc == NULL)
799 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
802 spin_lock_irqsave(hwep->lock, flags);
804 if (value && hwep->dir == TX && check_transfer &&
805 !list_empty(&hwep->qh.queue) &&
806 !usb_endpoint_xfer_control(hwep->ep.desc)) {
807 spin_unlock_irqrestore(hwep->lock, flags);
811 direction = hwep->dir;
813 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
816 hwep->wedge = 0;
818 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
819 hwep->dir = (hwep->dir == TX) ? RX : TX;
821 } while (hwep->dir != direction);
823 spin_unlock_irqrestore(hwep->lock, flags);
932 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
934 struct ci_hdrc *ci = hwep->ci;
937 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
940 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
942 hwep = (ci->ep0_dir == RX) ?
944 if (!list_empty(&hwep->qh.queue)) {
945 _ep_nuke(hwep);
946 dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
947 _usb_addr(hwep));
951 if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
952 hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
953 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
959 dev_err(hwep->ci->dev, "request already in queue\n");
967 retval = _hardware_enqueue(hwep, hwreq);
972 list_add_tail(&hwreq->queue, &hwep->qh.queue);
986 __releases(hwep->lock)
987 __acquires(hwep->lock)
989 struct ci_hw_ep *hwep = ci->ep0in;
994 if (hwep == NULL || setup == NULL)
997 spin_unlock(hwep->lock);
998 req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
999 spin_lock(hwep->lock);
1023 retval = _ep_queue(&hwep->ep, req, gfp_flags);
1032 spin_unlock(hwep->lock);
1033 usb_ep_free_request(&hwep->ep, req);
1034 spin_lock(hwep->lock);
1076 struct ci_hw_ep *hwep;
1087 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
1091 return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
1096 * @hwep: endpoint
1101 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
1102 __releases(hwep->lock)
1103 __acquires(hwep->lock)
1106 struct ci_hw_ep *hweptemp = hwep;
1109 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
1111 retval = _hardware_dequeue(hwep, hwreq);
1116 spin_unlock(hwep->lock);
1117 if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
1119 hweptemp = hwep->ci->ep0in;
1121 spin_lock(hwep->lock);
1148 struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1163 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1302 if (_ep_set_halt(&hwep->ep, 1, false))
1322 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1324 if (hwep->ep.desc == NULL)
1328 err = isr_tr_complete_low(hwep);
1329 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1334 if (_ep_set_halt(&hwep->ep, 1, false))
1360 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1368 spin_lock_irqsave(hwep->lock, flags);
1372 if (!list_empty(&hwep->qh.queue)) {
1373 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1374 spin_unlock_irqrestore(hwep->lock, flags);
1378 hwep->ep.desc = desc;
1380 hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1381 hwep->num = usb_endpoint_num(desc);
1382 hwep->type = usb_endpoint_type(desc);
1384 hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1385 hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1387 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1391 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1396 if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1399 hwep->qh.ptr->cap = cpu_to_le32(cap);
1401 hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
1403 if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1404 dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1412 if (hwep->num)
1413 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1414 hwep->type);
1416 spin_unlock_irqrestore(hwep->lock, flags);
1427 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1433 else if (hwep->ep.desc == NULL)
1436 spin_lock_irqsave(hwep->lock, flags);
1437 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1438 spin_unlock_irqrestore(hwep->lock, flags);
1444 direction = hwep->dir;
1446 retval |= _ep_nuke(hwep);
1447 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1449 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1450 hwep->dir = (hwep->dir == TX) ? RX : TX;
1452 } while (hwep->dir != direction);
1454 hwep->ep.desc = NULL;
1456 spin_unlock_irqrestore(hwep->lock, flags);
1488 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1496 dev_err(hwep->ci->dev, "freeing queued request\n");
1500 spin_lock_irqsave(hwep->lock, flags);
1503 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1511 spin_unlock_irqrestore(hwep->lock, flags);
1522 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1526 if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1529 spin_lock_irqsave(hwep->lock, flags);
1530 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1531 spin_unlock_irqrestore(hwep->lock, flags);
1535 spin_unlock_irqrestore(hwep->lock, flags);
1546 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1552 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1553 list_empty(&hwep->qh.queue))
1556 spin_lock_irqsave(hwep->lock, flags);
1557 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1558 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1561 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1569 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1574 spin_unlock(hwep->lock);
1575 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1576 spin_lock(hwep->lock);
1579 spin_unlock_irqrestore(hwep->lock, flags);
1600 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1603 if (ep == NULL || hwep->ep.desc == NULL)
1606 spin_lock_irqsave(hwep->lock, flags);
1607 hwep->wedge = 1;
1608 spin_unlock_irqrestore(hwep->lock, flags);
1620 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1624 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1628 spin_lock_irqsave(hwep->lock, flags);
1629 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1630 spin_unlock_irqrestore(hwep->lock, flags);
1634 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1636 spin_unlock_irqrestore(hwep->lock, flags);
1772 struct ci_hw_ep *hwep = ci->ep0in;
1775 spin_lock_irqsave(hwep->lock, flags);
1777 spin_unlock_irqrestore(hwep->lock, flags);
1852 struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1854 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1857 hwep->ci = ci;
1858 hwep->lock = &ci->lock;
1859 hwep->td_pool = ci->td_pool;
1861 hwep->ep.name = hwep->name;
1862 hwep->ep.ops = &usb_ep_ops;
1865 hwep->ep.caps.type_control = true;
1867 hwep->ep.caps.type_iso = true;
1868 hwep->ep.caps.type_bulk = true;
1869 hwep->ep.caps.type_int = true;
1873 hwep->ep.caps.dir_in = true;
1875 hwep->ep.caps.dir_out = true;
1882 usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1884 INIT_LIST_HEAD(&hwep->qh.queue);
1885 hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1886 &hwep->qh.dma);
1887 if (hwep->qh.ptr == NULL)
1896 ci->ep0out = hwep;
1898 ci->ep0in = hwep;
1900 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1904 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1915 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1917 if (hwep->pending_td)
1918 free_pending_td(hwep);
1919 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);