Lines Matching defs:ep

41 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
194 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
195 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
232 /* mask all ep interrupts */
279 static int udc_set_txfifo_addr(struct udc_ep *ep)
285 if (!ep || !(ep->in))
288 dev = ep->dev;
289 ep->txfifo = dev->txfifo;
291 /* traverse ep's */
292 for (i = 0; i < ep->num; i++) {
293 if (dev->ep[i].regs) {
295 tmp = readl(&dev->ep[i].regs->bufin_framenum);
297 ep->txfifo += tmp;
306 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
308 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
309 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
311 ep->naking = 1;
321 struct udc_ep *ep;
334 ep = container_of(usbep, struct udc_ep, ep);
335 dev = ep->dev;
337 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
343 ep->ep.desc = desc;
345 ep->halted = 0;
348 tmp = readl(&dev->ep[ep->num].regs->ctl);
350 writel(tmp, &dev->ep[ep->num].regs->ctl);
354 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
356 ep->ep.maxpacket = maxpacket;
357 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
359 /* IN ep */
360 if (ep->in) {
362 /* ep ix in UDC CSR register space */
363 udc_csr_epix = ep->num;
366 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
373 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
376 udc_set_txfifo_addr(ep);
379 tmp = readl(&ep->regs->ctl);
381 writel(tmp, &ep->regs->ctl);
383 /* OUT ep */
385 /* ep ix in UDC CSR register space */
386 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
389 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
392 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
394 if (use_dma && !ep->in) {
396 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
397 ep->bna_occurred = 0;
400 if (ep->num != UDC_EP0OUT_IX)
404 /* set ep values */
408 /* ep number */
410 /* ep direction */
411 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
412 /* ep type */
414 /* ep config */
415 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
416 /* ep interface */
417 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
418 /* ep alt */
419 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
423 /* enable ep irq */
425 tmp &= AMD_UNMASK_BIT(ep->num);
432 if (!use_dma || ep->in) {
433 tmp = readl(&ep->regs->ctl);
435 writel(tmp, &ep->regs->ctl);
436 ep->naking = 0;
437 UDC_QUEUE_CNAK(ep, ep->num);
447 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
451 VDBG(ep->dev, "ep-%d reset\n", ep->num);
452 ep->ep.desc = NULL;
453 ep->ep.ops = &udc_ep_ops;
454 INIT_LIST_HEAD(&ep->queue);
456 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
458 tmp = readl(&ep->regs->ctl);
460 writel(tmp, &ep->regs->ctl);
461 ep->naking = 1;
465 tmp |= AMD_BIT(ep->num);
468 if (ep->in) {
470 tmp = readl(&ep->regs->ctl);
472 writel(tmp, &ep->regs->ctl);
474 tmp = readl(&ep->regs->sts);
476 writel(tmp, &ep->regs->sts);
479 tmp = readl(&ep->regs->ctl);
481 writel(tmp, &ep->regs->ctl);
485 writel(0, &ep->regs->desptr);
491 struct udc_ep *ep = NULL;
497 ep = container_of(usbep, struct udc_ep, ep);
498 if (usbep->name == ep0_string || !ep->ep.desc)
501 DBG(ep->dev, "Disable ep-%d\n", ep->num);
503 spin_lock_irqsave(&ep->dev->lock, iflags);
504 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
505 empty_req_queue(ep);
506 ep_init(ep->dev->regs, ep);
507 spin_unlock_irqrestore(&ep->dev->lock, iflags);
518 struct udc_ep *ep;
523 ep = container_of(usbep, struct udc_ep, ep);
525 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
533 if (ep->dma) {
535 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
542 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
583 struct udc_ep *ep;
589 ep = container_of(usbep, struct udc_ep, ep);
591 VDBG(ep->dev, "free_req req=%p\n", req);
594 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
598 udc_free_dma_chain(ep->dev, req);
600 dma_pool_free(ep->dev->data_requests, req->td_data,
627 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
633 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
636 ep->bna_dummy_req = req;
644 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
652 if (!req || !ep)
661 bytes = ep->ep.maxpacket;
667 writel(*(buf + i), ep->txfifo);
672 ep->txfifo);
676 writel(0, &ep->regs->confirm);
717 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
725 bytes = readl(&ep->regs->sts);
731 if ((buf_space % ep->ep.maxpacket) != 0) {
732 DBG(ep->dev,
734 ep->ep.name, bytes, buf_space);
742 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
747 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
748 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
755 struct udc_ep *ep,
769 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
774 if (!ep->in)
778 len = req->req.length / ep->ep.maxpacket;
779 if (req->req.length % ep->ep.maxpacket)
785 udc_free_dma_chain(ep->dev, req);
795 td = dma_pool_alloc(ep->dev->data_requests,
833 if (ep->in) {
837 ep->ep.maxpacket,
851 if (ep->in) {
871 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
876 VDBG(ep->dev, "prep_dma\n");
877 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
878 ep->num, req->td_data);
889 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
892 DBG(ep->dev, "Out of DMA memory\n");
895 if (ep->in) {
896 if (req->req.length == ep->ep.maxpacket) {
900 ep->ep.maxpacket,
908 if (ep->in) {
909 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
910 "maxpacket=%d ep%d\n",
912 ep->ep.maxpacket, ep->num);
917 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
918 || ep->num == UDC_EP0OUT_IX
919 || ep->num == UDC_EP0IN_IX) {
937 VDBG(ep->dev, "OUT set host ready\n");
945 if (ep->naking) {
946 tmp = readl(&ep->regs->ctl);
948 writel(tmp, &ep->regs->ctl);
949 ep->naking = 0;
950 UDC_QUEUE_CNAK(ep, ep->num);
960 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
961 __releases(ep->dev->lock)
962 __acquires(ep->dev->lock)
967 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
969 dev = ep->dev;
971 if (ep->dma)
972 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
974 halted = ep->halted;
975 ep->halted = 1;
981 /* remove from ep queue */
984 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
985 &req->req, req->req.length, ep->ep.name, sts);
988 usb_gadget_giveback_request(&ep->ep, &req->req);
990 ep->halted = halted;
1053 struct udc_ep *ep;
1065 ep = container_of(usbep, struct udc_ep, ep);
1066 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1069 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1070 dev = ep->dev;
1076 if (ep->dma) {
1078 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1093 if (list_empty(&ep->queue)) {
1097 complete_req(ep, req, 0);
1098 VDBG(dev, "%s: zlp\n", ep->ep.name);
1112 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1114 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1115 dev->ep[UDC_EP0IN_IX].naking = 0;
1116 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1122 if (ep->dma) {
1123 retval = prep_dma(ep, req, GFP_ATOMIC);
1127 if (ep->in) {
1136 if (!ep->in) {
1152 if (ep->bna_occurred) {
1154 memcpy(ep->bna_dummy_req->td_data,
1160 writel(req->td_phys, &ep->regs->desptr);
1163 if (ep->naking) {
1164 tmp = readl(&ep->regs->ctl);
1166 writel(tmp, &ep->regs->ctl);
1167 ep->naking = 0;
1168 UDC_QUEUE_CNAK(ep, ep->num);
1171 if (ep->in) {
1172 /* enable ep irq */
1174 tmp &= AMD_UNMASK_BIT(ep->num);
1177 } else if (ep->in) {
1178 /* enable ep irq */
1180 tmp &= AMD_UNMASK_BIT(ep->num);
1184 } else if (ep->dma) {
1187 * prep_dma not used for OUT ep's, this is not possible
1190 if (ep->in) {
1191 retval = prep_dma(ep, req, GFP_ATOMIC);
1197 /* add request to ep queue */
1200 list_add_tail(&req->queue, &ep->queue);
1207 if (ep->num != UDC_EP0OUT_IX)
1211 if (!ep->in) {
1219 if (udc_rxfifo_read(ep, req)) {
1221 complete_req(ep, req, 0);
1235 void empty_req_queue(struct udc_ep *ep)
1239 ep->halted = 1;
1240 while (!list_empty(&ep->queue)) {
1241 req = list_entry(ep->queue.next,
1244 complete_req(ep, req, -ESHUTDOWN);
1252 struct udc_ep *ep;
1257 ep = container_of(usbep, struct udc_ep, ep);
1258 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1259 && ep->num != UDC_EP0OUT_IX)))
1264 spin_lock_irqsave(&ep->dev->lock, iflags);
1265 halted = ep->halted;
1266 ep->halted = 1;
1268 if (ep->queue.next == &req->queue) {
1269 if (ep->dma && req->dma_going) {
1270 if (ep->in)
1271 ep->cancel_transfer = 1;
1286 ep->cancel_transfer = 1;
1288 udc_init_bna_dummy(ep->req);
1289 writel(ep->bna_dummy_req->td_phys,
1290 &ep->regs->desptr);
1296 complete_req(ep, req, -ECONNRESET);
1297 ep->halted = halted;
1299 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1307 struct udc_ep *ep;
1317 ep = container_of(usbep, struct udc_ep, ep);
1318 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1320 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1326 if (ep->num == 0)
1327 ep->dev->stall_ep0in = 1;
1333 tmp = readl(&ep->regs->ctl);
1335 writel(tmp, &ep->regs->ctl);
1336 ep->halted = 1;
1344 DBG(ep->dev, "start polltimer\n");
1350 /* ep is halted by set_halt() before */
1351 if (ep->halted) {
1352 tmp = readl(&ep->regs->ctl);
1357 writel(tmp, &ep->regs->ctl);
1358 ep->halted = 0;
1359 UDC_QUEUE_CNAK(ep, ep->num);
1436 /* make gadget ep lists */
1438 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1440 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1442 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1446 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1448 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1450 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1451 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1523 struct udc_ep *ep;
1537 /* set basic ep parameters */
1539 ep = &dev->ep[tmp];
1540 ep->dev = dev;
1541 ep->ep.name = ep_info[tmp].name;
1542 ep->ep.caps = ep_info[tmp].caps;
1543 ep->num = tmp;
1545 ep->txfifo = dev->txfifo;
1549 ep->fifo_depth = UDC_TXFIFO_SIZE;
1550 ep->in = 1;
1552 ep->fifo_depth = UDC_RXFIFO_SIZE;
1553 ep->in = 0;
1556 ep->regs = &dev->ep_regs[tmp];
1558 * ep will be reset only if ep was not enabled before to avoid
1559 * disabling ep interrupts when ENUM interrupt occurs but ep is
1562 if (!ep->ep.desc)
1563 ep_init(dev->regs, ep);
1567 * ep->dma is not really used, just to indicate that
1571 ep->dma = &dev->regs->ctl;
1577 reg = readl(&dev->ep[tmp].regs->ctl);
1579 writel(reg, &dev->ep[tmp].regs->ctl);
1580 dev->ep[tmp].naking = 1;
1587 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1589 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1592 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1594 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1602 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1603 dev->ep[UDC_EP0IN_IX].halted = 0;
1656 empty_req_queue(&dev->ep[tmp]);
1660 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
1687 * ep int. status reset
1755 static void udc_handle_halt_state(struct udc_ep *ep)
1759 if (ep->halted == 1) {
1760 tmp = readl(&ep->regs->ctl);
1771 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1773 writel(tmp, &ep->regs->ctl);*/
1777 writel(tmp, &ep->regs->ctl);
1778 ep->halted = 0;
1779 UDC_QUEUE_CNAK(ep, ep->num);
1787 struct udc_ep *ep;
1795 ep = &udc->ep[UDC_EPIN_IX];
1796 udc_handle_halt_state(ep);
1797 if (ep->halted)
1800 ep = &udc->ep[UDC_EPOUT_IX];
1801 udc_handle_halt_state(ep);
1802 if (ep->halted)
1826 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1828 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1831 dev->ep[UDC_EP0IN_IX].in = 1;
1832 dev->ep[UDC_EP0OUT_IX].in = 0;
1835 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1842 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1845 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1852 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1855 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1862 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1875 dev->ep[UDC_EP0OUT_IX].td->status |=
1878 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1879 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1880 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1881 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1903 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1905 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1906 dev->ep[UDC_EP0IN_IX].naking = 0;
1907 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1910 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1912 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1913 dev->ep[UDC_EP0OUT_IX].naking = 0;
1914 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1941 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1942 dev->ep[UDC_EP0IN_IX].ep.driver_data;
1969 empty_req_queue(&dev->ep[tmp]);
2006 DBG(dev, "CNAK pending for ep%d\n", tmp);
2008 reg = readl(&dev->ep[tmp].regs->ctl);
2010 writel(reg, &dev->ep[tmp].regs->ctl);
2011 dev->ep[tmp].naking = 0;
2012 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2017 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2019 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2021 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2022 dev->ep[UDC_EP0OUT_IX].naking = 0;
2023 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2024 dev->ep[UDC_EP0OUT_IX].num);
2060 struct udc_ep *ep;
2066 VDBG(dev, "ep%d irq\n", ep_ix);
2067 ep = &dev->ep[ep_ix];
2069 tmp = readl(&ep->regs->sts);
2073 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2074 ep->num, readl(&ep->regs->desptr));
2076 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2077 if (!ep->cancel_transfer)
2078 ep->bna_occurred = 1;
2080 ep->cancel_transfer = 0;
2087 dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
2090 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2095 if (!list_empty(&ep->queue)) {
2098 req = list_entry(ep->queue.next,
2109 if (req && udc_rxfifo_read(ep, req)) {
2113 complete_req(ep, req, 0);
2115 if (!list_empty(&ep->queue) && !ep->halted) {
2116 req = list_entry(ep->queue.next,
2123 } else if (!ep->cancel_transfer && req) {
2136 if (ep->bna_occurred) {
2138 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2140 ep->bna_occurred = 0;
2141 udc_init_bna_dummy(ep->req);
2179 if ((tmp % ep->ep.maxpacket) != 0) {
2181 ep->ep.name, count, tmp);
2189 complete_req(ep, req, 0);
2192 if (!list_empty(&ep->queue) && !ep->halted) {
2193 req = list_entry(ep->queue.next,
2204 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2208 &ep->regs->desptr);
2218 if (ep->bna_dummy_req) {
2220 writel(ep->bna_dummy_req->td_phys,
2221 &ep->regs->desptr);
2222 ep->bna_occurred = 0;
2239 if (ep->num != UDC_EP0OUT_IX)
2251 } else if (ep->cancel_transfer) {
2253 ep->cancel_transfer = 0;
2263 /* clear OUT bits in ep status */
2264 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2275 struct udc_ep *ep;
2280 ep = &dev->ep[ep_ix];
2282 epsts = readl(&ep->regs->sts);
2287 "BNA ep%din occurred - DESPTR = %08lx\n",
2288 ep->num,
2289 (unsigned long) readl(&ep->regs->desptr));
2292 writel(epsts, &ep->regs->sts);
2300 "HE ep%dn occurred - DESPTR = %08lx\n",
2301 ep->num, (unsigned long) readl(&ep->regs->desptr));
2304 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2313 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2314 req = list_entry(ep->queue.next,
2331 complete_req(ep, req, 0);
2334 if (list_empty(&ep->queue)) {
2337 tmp |= AMD_BIT(ep->num);
2342 ep->cancel_transfer = 0;
2352 if (!list_empty(&ep->queue)) {
2354 req = list_entry(ep->queue.next,
2359 udc_txfifo_write(ep, &req->req);
2361 if (len > ep->ep.maxpacket)
2362 len = ep->ep.maxpacket;
2365 || (len != ep->ep.maxpacket)) {
2367 complete_req(ep, req, 0);
2382 ep->ep.maxpacket) {
2389 writel(req->td_phys, &ep->regs->desptr);
2399 tmp = readl(&ep->regs->ctl);
2401 writel(tmp, &ep->regs->ctl);
2405 } else if (!use_dma && ep->in) {
2409 tmp |= AMD_BIT(ep->num);
2415 writel(epsts, &ep->regs->sts);
2432 struct udc_ep *ep;
2435 ep = &dev->ep[UDC_EP0OUT_IX];
2440 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2445 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2446 ep->bna_occurred = 1;
2459 ep->dev->stall_ep0in = 0;
2463 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2465 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2466 dev->ep[UDC_EP0IN_IX].naking = 1;
2470 /* clear OUT bits in ep status */
2472 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2475 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2477 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2479 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2488 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2493 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2498 if (ep->bna_dummy_req) {
2500 writel(ep->bna_dummy_req->td_phys,
2501 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2502 ep->bna_occurred = 0;
2506 dev->ep[UDC_EP0OUT_IX].naking = 1;
2533 ep_tmp = &udc->ep[UDC_EPIN_IX];
2534 udc_set_halt(&ep_tmp->ep, 0);
2535 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2536 udc_set_halt(&ep_tmp->ep, 0);
2545 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2551 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2552 dev->ep[UDC_EP0IN_IX].naking = 0;
2553 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2558 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2565 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2567 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2568 dev->ep[UDC_EP0OUT_IX].naking = 0;
2569 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2573 /* clear OUT bits in ep status */
2575 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2580 /* clear OUT bits in ep status */
2581 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2586 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2590 dev->ep[UDC_EP0OUT_IX].td->status =
2592 dev->ep[UDC_EP0OUT_IX].td->status,
2603 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2604 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2611 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2621 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2643 struct udc_ep *ep;
2647 ep = &dev->ep[UDC_EP0IN_IX];
2652 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2660 &dev->ep[UDC_EP0IN_IX].regs->sts);
2666 if (ep->dma) {
2669 &dev->ep[UDC_EP0IN_IX].regs->sts);
2674 tmp = readl(&ep->regs->ctl);
2676 writel(tmp, &ep->regs->ctl);
2678 if (!list_empty(&ep->queue)) {
2680 req = list_entry(ep->queue.next,
2683 if (ep->dma) {
2685 writel(req->td_phys, &ep->regs->desptr);
2695 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2698 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2704 complete_req(ep, req, 0);
2708 udc_txfifo_write(ep, &req->req);
2712 if (len > ep->ep.maxpacket)
2713 len = ep->ep.maxpacket;
2717 || (len != ep->ep.maxpacket)) {
2719 complete_req(ep, req, 0);
2725 ep->halted = 0;
2727 if (!ep->dma) {
2730 &dev->ep[UDC_EP0IN_IX].regs->sts);
2746 struct udc_ep *ep;
2768 ep = &dev->ep[i];
2769 if (ep->in) {
2771 /* ep ix in UDC CSR register space */
2772 udc_csr_epix = ep->num;
2775 /* OUT ep */
2777 /* ep ix in UDC CSR register space */
2778 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2782 /* ep cfg */
2783 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2789 ep->halted = 0;
2790 tmp = readl(&ep->regs->ctl);
2792 writel(tmp, &ep->regs->ctl);
2821 ep = &dev->ep[i];
2822 if (ep->in) {
2824 /* ep ix in UDC CSR register space */
2825 udc_csr_epix = ep->num;
2828 /* OUT ep */
2830 /* ep ix in UDC CSR register space */
2831 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2835 /* set ep values */
2837 /* ep interface */
2838 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2841 /* ep alt */
2842 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2848 ep->halted = 0;
2849 tmp = readl(&ep->regs->ctl);
2851 writel(tmp, &ep->regs->ctl);
2883 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2884 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2932 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2933 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2940 /* init ep 0 */
2979 /* check for ep irq */
2989 * iterate ep's
2999 /* irq for out ep ? */
3051 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3052 dev->ep[UDC_EP0OUT_IX].td_phys);
3053 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3054 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3084 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3096 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3101 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3105 &dev->ep[UDC_EP0OUT_IX].td_phys);
3110 dev->ep[UDC_EP0OUT_IX].td = td_data;
3114 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3115 dev->ep[UDC_EP0OUT_IX].td_stp_dma);