Lines Matching refs:ep

51 /* Free the bdl during ep disable */
52 static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
54 struct bd_list *bd_list = &ep->bd_list;
55 struct bdc *bdc = ep->bdc;
59 dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
60 __func__, ep->name, num_tabs);
63 dev_dbg(bdc->dev, "%s already freed\n", ep->name);
95 kfree(ep->bd_list.bd_table_array);
120 /* Allocate the bdl for ep, during config ep */
121 static int ep_bd_list_alloc(struct bdc_ep *ep)
125 struct bdc *bdc = ep->bdc;
129 if (usb_endpoint_xfer_isoc(ep->desc))
137 "%s ep:%p num_tabs:%d\n",
138 __func__, ep, num_tabs);
141 ep->bd_list.bd_table_array = kcalloc(num_tabs,
144 if (!ep->bd_list.bd_table_array)
169 ep->bd_list.bd_table_array[index] = bd_table;
175 chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
177 ep->bd_list.num_tabs = num_tabs;
178 ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
179 ep->bd_list.num_tabs = num_tabs;
180 ep->bd_list.num_bds_table = bd_p_tab;
181 ep->bd_list.eqp_bdi = 0;
182 ep->bd_list.hwd_bdi = 0;
187 ep_bd_list_free(ep, num_tabs);
215 static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
217 struct bd_list *bd_list = &ep->bd_list;
219 struct bdc *bdc = ep->bdc;
253 /* return the global bdi, to compare with ep eqp_bdi */
258 static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
262 tbi = bdi / ep->bd_list.num_bds_table;
263 dev_vdbg(ep->bdc->dev,
265 bdi, ep->bd_list.num_bds_table, tbi);
271 static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
277 end_bdi = ep->bd_list.max_bdi - 1;
278 else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
285 * How many transfer bd's are available on this ep bdl, chain bds are not
288 static int bd_available_ep(struct bdc_ep *ep)
290 struct bd_list *bd_list = &ep->bd_list;
292 struct bdc *bdc = ep->bdc;
321 if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
322 == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
343 struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
347 * We don't have anyway to check if ep state is running,
350 if (unlikely(ep->flags & BDC_EP_STOP))
351 ep->flags &= ~BDC_EP_STOP;
357 static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
359 int tbi = bdi_to_tbi(ep, bdi);
362 local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
363 dev_vdbg(ep->bdc->dev,
367 return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
371 static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
373 ep->bd_list.eqp_bdi++;
375 if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
376 ep->bd_list.eqp_bdi++;
379 if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
380 ep->bd_list.eqp_bdi = 0;
389 req->ep->dir = 0;
431 struct bdc_ep *ep;
436 ep = req->ep;
437 bd_list = &ep->bd_list;
441 bd = bdi_to_bd(ep, bd_list->eqp_bdi);
443 maxp = usb_endpoint_maxp(ep->desc);
446 dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
447 __func__, ep->name, num_bds, tfs, req_len, bd);
456 if (ep->ep_num == 1) {
462 if (!req->ep->dir)
476 bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
487 ep_bdlist_eqp_adv(ep);
492 ep->bd_list.eqp_bdi);
493 bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
497 bd = bdi_to_bd(ep, bd_xfr->start_bdi);
501 bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
512 struct bdc_ep *ep;
515 ep = req->ep;
518 ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
521 bd_available = bd_available_ep(ep);
523 /* how many bd's are avaialble on ep */
530 list_add_tail(&req->queue, &ep->queue);
531 bdc_dbg_bd_list(bdc, ep);
532 bdc_notify_xfr(bdc, ep->ep_num);
538 static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
541 struct bdc *bdc = ep->bdc;
546 dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
549 usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
552 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
558 int bdc_ep_disable(struct bdc_ep *ep)
565 bdc = ep->bdc;
566 dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
568 ret = bdc_stop_ep(bdc, ep->ep_num);
575 while (!list_empty(&ep->queue)) {
576 req = list_entry(ep->queue.next, struct bdc_req,
578 bdc_req_complete(ep, req, -ESHUTDOWN);
581 ret = bdc_dconfig_ep(bdc, ep);
586 ep->flags = 0;
588 if (ep->ep_num == 1)
592 ep_bd_list_free(ep, ep->bd_list.num_tabs);
593 ep->desc = NULL;
594 ep->comp_desc = NULL;
595 ep->usb_ep.desc = NULL;
596 ep->ep_type = 0;
601 /* Enable the ep */
602 int bdc_ep_enable(struct bdc_ep *ep)
607 bdc = ep->bdc;
611 ret = ep_bd_list_alloc(ep);
613 dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
616 bdc_dbg_bd_list(bdc, ep);
617 /* only for ep0: config ep is called for ep0 from connect event */
618 if (ep->ep_num == 1)
622 ret = bdc_config_ep(bdc, ep);
626 ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
627 ep->usb_ep.desc = ep->desc;
628 ep->usb_ep.comp_desc = ep->comp_desc;
629 ep->ep_type = usb_endpoint_type(ep->desc);
630 ep->flags |= BDC_EP_ENABLED;
641 struct bdc_ep *ep;
644 ep = bdc->bdc_ep_array[1];
645 status_req->ep = ep;
656 static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
661 bdc = ep->bdc;
665 req->epnum = ep->ep_num;
686 ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
688 dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
698 struct bdc_ep *ep;
701 ep = bdc->bdc_ep_array[1];
702 bdc->ep0_req.ep = ep;
705 return ep0_queue(ep, &bdc->ep0_req);
708 /* Queue req on ep */
709 static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
714 if (!req || !ep->usb_ep.desc)
717 bdc = ep->bdc;
721 req->epnum = ep->ep_num;
723 ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
732 /* Dequeue a request from ep */
733 static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
747 bdc = ep->bdc;
749 eqp_bdi = ep->bd_list.eqp_bdi - 1;
752 eqp_bdi = ep->bd_list.max_bdi;
755 end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
757 dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
758 __func__, ep->name, start_bdi, end_bdi);
759 dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__,
760 ep, (void *)ep->usb_ep.desc);
761 /* if still connected, stop the ep to see where the HW is ? */
763 ret = bdc_stop_ep(bdc, ep->ep_num);
782 curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
822 tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
823 table = ep->bd_list.bd_table_array[tbi];
826 tbi * ep->bd_list.num_bds_table);
828 first_req = list_first_entry(&ep->queue, struct bdc_req,
844 bd_start = bdi_to_bd(ep, start_bdi);
849 bdc_dbg_bd_list(bdc, ep);
855 ret = bdc_ep_bla(bdc, ep, next_bd_dma);
865 /* Halt/Clear the ep based on value */
866 static int ep_set_halt(struct bdc_ep *ep, u32 value)
871 bdc = ep->bdc;
872 dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
876 if (ep->ep_num == 1)
879 ret = bdc_ep_set_stall(bdc, ep->ep_num);
882 ep->name);
884 ep->flags |= BDC_EP_STALL;
888 ret = bdc_ep_clear_stall(bdc, ep->ep_num);
891 ep->name);
893 ep->flags &= ~BDC_EP_STALL;
900 /* Free all the ep */
903 struct bdc_ep *ep;
908 ep = bdc->bdc_ep_array[epnum];
909 if (!ep)
912 if (ep->flags & BDC_EP_ENABLED)
913 ep_bd_list_free(ep, ep->bd_list.num_tabs);
917 list_del(&ep->usb_ep.ep_list);
919 kfree(ep);
952 static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
956 struct bd_list *bd_list = &ep->bd_list;
966 dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
969 if (ep->ignore_next_sr) {
970 ep->ignore_next_sr = false;
974 if (unlikely(list_empty(&ep->queue))) {
978 req = list_entry(ep->queue.next, struct bdc_req,
998 short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
1010 if (!(bdi_to_tbi(ep, start_bdi) ==
1011 bdi_to_tbi(ep, short_bdi)))
1027 end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
1029 ep->ignore_next_sr = true;
1032 short_bd = bdi_to_bd(ep, short_bdi);
1049 ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
1051 dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
1055 bdc_req_complete(ep, bd_xfr->req, status);
1090 struct bdc_ep *ep = bdc->bdc_ep_array[1];
1095 ep_set_halt(ep, 1);
1098 while (!list_empty(&ep->queue)) {
1099 req = list_entry(ep->queue.next, struct bdc_req,
1101 bdc_req_complete(ep, req, -ESHUTDOWN);
1233 struct bdc_ep *ep;
1290 ep = bdc->bdc_ep_array[epnum];
1291 if (!ep)
1294 return ep_set_halt(ep, set);
1308 struct bdc_ep *ep;
1356 ep = bdc->bdc_ep_array[epnum];
1357 if (!ep) {
1361 if (ep->flags & BDC_EP_STALL)
1388 struct bdc_ep *ep;
1397 ep = bdc->bdc_ep_array[1];
1398 bdc->ep0_req.ep = ep;
1416 bdc->ep0_req.ep = bdc->bdc_ep_array[1];
1512 struct bdc_ep *ep;
1516 ep = bdc->bdc_ep_array[1];
1518 if (ep->flags & BDC_EP_STALL) {
1519 ret = ep_set_halt(ep, 0);
1551 struct bdc_ep *ep;
1557 ep = bdc->bdc_ep_array[1];
1563 if (ep->flags & BDC_EP_STALL) {
1564 ret = ep_set_halt(ep, 0);
1646 struct bdc_ep *ep;
1651 ep = bdc->bdc_ep_array[ep_num];
1652 if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
1653 dev_err(bdc->dev, "xsf for ep not enabled\n");
1666 dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
1667 __func__, sr_status, ep->name);
1672 handle_xsr_succ_status(bdc, ep, sreport);
1702 dev_warn(bdc->dev, "Babble on ep not handled\n");
1715 struct bdc_ep *ep;
1725 ep = to_bdc_ep(_ep);
1727 bdc = ep->bdc;
1728 dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
1730 _req, ep->name, _req->length, _req->zero);
1732 if (!ep->usb_ep.desc) {
1735 _req, ep->name);
1746 if (ep == bdc->bdc_ep_array[1])
1747 ret = ep0_queue(ep, req);
1749 ret = ep_queue(ep, req);
1762 struct bdc_ep *ep;
1769 ep = to_bdc_ep(_ep);
1771 bdc = ep->bdc;
1772 dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
1773 bdc_dbg_bd_list(bdc, ep);
1778 list_for_each_entry(iter, &ep->queue, queue) {
1789 ret = ep_dequeue(ep, req);
1794 bdc_req_complete(ep, req, -ECONNRESET);
1797 bdc_dbg_bd_list(bdc, ep);
1806 struct bdc_ep *ep;
1810 ep = to_bdc_ep(_ep);
1811 bdc = ep->bdc;
1812 dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
1814 if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
1816 else if (!list_empty(&ep->queue))
1819 ret = ep_set_halt(ep, value);
1830 struct bdc_ep *ep;
1836 ep = to_bdc_ep(_ep);
1837 req->ep = ep;
1838 req->epnum = ep->ep_num;
1840 dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
1861 struct bdc_ep *ep;
1875 ep = to_bdc_ep(_ep);
1876 bdc = ep->bdc;
1879 if (ep == bdc->bdc_ep_array[1])
1887 dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
1889 ep->desc = desc;
1890 ep->comp_desc = _ep->comp_desc;
1891 ret = bdc_ep_enable(ep);
1900 struct bdc_ep *ep;
1908 ep = to_bdc_ep(_ep);
1909 bdc = ep->bdc;
1912 if (ep == bdc->bdc_ep_array[1]) {
1917 "%s() ep:%s ep->flags:%08x\n",
1918 __func__, ep->name, ep->flags);
1920 if (!(ep->flags & BDC_EP_ENABLED)) {
1923 ep->name);
1927 ret = bdc_ep_disable(ep);
1946 struct bdc_ep *ep;
1949 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1950 if (!ep)
1953 ep->bdc = bdc;
1954 ep->dir = dir;
1957 ep->usb_ep.caps.dir_in = true;
1959 ep->usb_ep.caps.dir_out = true;
1961 /* ep->ep_num is the index inside bdc_ep */
1963 ep->ep_num = 1;
1964 bdc->bdc_ep_array[ep->ep_num] = ep;
1965 snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
1966 usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
1967 ep->usb_ep.caps.type_control = true;
1968 ep->comp_desc = NULL;
1969 bdc->gadget.ep0 = &ep->usb_ep;
1972 ep->ep_num = epnum * 2 - 1;
1974 ep->ep_num = epnum * 2 - 2;
1976 bdc->bdc_ep_array[ep->ep_num] = ep;
1977 snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
1980 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
1981 ep->usb_ep.caps.type_iso = true;
1982 ep->usb_ep.caps.type_bulk = true;
1983 ep->usb_ep.caps.type_int = true;
1984 ep->usb_ep.max_streams = 0;
1985 list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
1987 ep->usb_ep.ops = &bdc_gadget_ep_ops;
1988 ep->usb_ep.name = ep->name;
1989 ep->flags = 0;
1990 ep->ignore_next_sr = false;
1991 dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
1992 ep, ep->usb_ep.name, epnum, ep->ep_num);
1994 INIT_LIST_HEAD(&ep->queue);
1999 /* Init all ep */
2010 dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
2019 "init ep failed for:%d error: %d\n",
2028 "init ep failed for:%d error: %d\n",