1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13/* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59#include <dm.h> 60#include <dm/device_compat.h> 61#include <dm/devres.h> 62#include <linux/bitops.h> 63#include <linux/delay.h> 64#include <linux/err.h> 65#include <linux/printk.h> 66#include <linux/usb/gadget.h> 67#include <linux/compat.h> 68#include <linux/iopoll.h> 69#include <linux/dma-mapping.h> 70#include <linux/bitmap.h> 71#include <linux/bug.h> 72 73#include "core.h" 74#include "gadget-export.h" 75#include "gadget.h" 76#include "trace.h" 77#include "drd.h" 78 79#define readl_poll_timeout_atomic readl_poll_timeout 80#define usleep_range(a, b) udelay((b)) 81 82static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 83 struct usb_request *request, 84 gfp_t gfp_flags); 85 86static void cdns3_gadget_udc_set_speed(struct usb_gadget *gadget, 87 enum usb_device_speed speed); 88 89/** 90 * cdns3_set_register_bit - set bit in given register. 91 * @ptr: address of device controller register to be read and changed 92 * @mask: bits requested to set 93 */ 94void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 95{ 96 mask = readl(ptr) | mask; 97 writel(mask, ptr); 98} 99 100/** 101 * cdns3_ep_addr_to_index - Macro converts endpoint address to 102 * index of endpoint object in cdns3_device.eps[] container 103 * @ep_addr: endpoint address for which endpoint object is required 104 * 105 */ 106u8 cdns3_ep_addr_to_index(u8 ep_addr) 107{ 108 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 109} 110 111static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 112 struct cdns3_endpoint *priv_ep) 113{ 114 int dma_index; 115 116 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 117 118 return dma_index / TRB_SIZE; 119} 120 121/** 122 * cdns3_next_request - returns next request from list 123 * @list: list containing requests 124 * 125 * Returns request or NULL if no requests in list 126 */ 127struct usb_request *cdns3_next_request(struct list_head *list) 128{ 129 return list_first_entry_or_null(list, struct usb_request, list); 130} 131 132/** 133 * cdns3_next_align_buf - returns next buffer from list 134 * @list: list containing buffers 135 * 136 * Returns buffer or NULL if no buffers in list 137 */ 138struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 139{ 140 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 141} 142 143/** 144 * cdns3_next_priv_request - returns next request from list 145 * @list: list containing requests 146 * 147 * Returns request or NULL if no requests in list 148 */ 149struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 150{ 151 return list_first_entry_or_null(list, struct cdns3_request, list); 152} 153 154/** 155 * select_ep - selects endpoint 156 * @priv_dev: extended gadget object 157 * @ep: endpoint address 158 */ 159void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 160{ 161 if (priv_dev->selected_ep == ep) 162 return; 163 164 priv_dev->selected_ep = ep; 165 writel(ep, &priv_dev->regs->ep_sel); 166} 167 168dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 169 struct cdns3_trb *trb) 170{ 171 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 172 173 return priv_ep->trb_pool_dma + offset; 174} 175 176int cdns3_ring_size(struct cdns3_endpoint *priv_ep) 177{ 178 switch (priv_ep->type) { 179 case USB_ENDPOINT_XFER_ISOC: 180 return TRB_ISO_RING_SIZE; 181 case USB_ENDPOINT_XFER_CONTROL: 182 return TRB_CTRL_RING_SIZE; 183 default: 184 return TRB_RING_SIZE; 185 } 186} 187 188/** 189 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 190 * @priv_ep: endpoint object 191 * 192 * Function will return 0 on success or -ENOMEM on allocation error 193 */ 194int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 195{ 196 int ring_size = cdns3_ring_size(priv_ep); 197 struct cdns3_trb *link_trb; 198 199 if (!priv_ep->trb_pool) { 200 priv_ep->trb_pool = 201 dma_alloc_coherent(ring_size, 202 (unsigned long *)&priv_ep->trb_pool_dma); 203 if (!priv_ep->trb_pool) 204 return -ENOMEM; 205 } else { 206 memset(priv_ep->trb_pool, 0, ring_size); 207 } 208 209 if (!priv_ep->num) 210 return 0; 211 212 priv_ep->num_trbs = ring_size / TRB_SIZE; 213 /* Initialize the last TRB as Link TRB. */ 214 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 215 link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma); 216 link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE; 217 218 return 0; 219} 220 221static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 222{ 223 if (priv_ep->trb_pool) { 224 dma_free_coherent(priv_ep->trb_pool); 225 priv_ep->trb_pool = NULL; 226 } 227} 228 229/** 230 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 231 * @priv_ep: endpoint object 232 * 233 * Endpoint must be selected before call to this function 234 */ 235static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 236{ 237 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 238 int val; 239 240 trace_cdns3_halt(priv_ep, 1, 1); 241 242 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 243 &priv_dev->regs->ep_cmd); 244 245 /* wait for DFLUSH cleared */ 246 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 247 !(val & EP_CMD_DFLUSH), 1000); 248 priv_ep->flags |= EP_STALLED; 249 priv_ep->flags &= ~EP_STALL_PENDING; 250} 251 252/** 253 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 254 * @priv_dev: extended gadget object 255 */ 256void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 257{ 258 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 259 260 cdns3_allow_enable_l1(priv_dev, 0); 261 priv_dev->hw_configured_flag = 0; 262 priv_dev->onchip_used_size = 0; 263 priv_dev->out_mem_is_allocated = 0; 264 priv_dev->wait_for_setup = 0; 265} 266 267/** 268 * cdns3_ep_inc_trb - increment a trb index. 269 * @index: Pointer to the TRB index to increment. 270 * @cs: Cycle state 271 * @trb_in_seg: number of TRBs in segment 272 * 273 * The index should never point to the link TRB. After incrementing, 274 * if it is point to the link TRB, wrap around to the beginning and revert 275 * cycle state bit The 276 * link TRB is always at the last TRB entry. 277 */ 278static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 279{ 280 (*index)++; 281 if (*index == (trb_in_seg - 1)) { 282 *index = 0; 283 *cs ^= 1; 284 } 285} 286 287/** 288 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 289 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 290 */ 291static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 292{ 293 priv_ep->free_trbs--; 294 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 295} 296 297/** 298 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 299 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 300 */ 301static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 302{ 303 priv_ep->free_trbs++; 304 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 305} 306 307void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) 308{ 309 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 310 int current_trb = priv_req->start_trb; 311 312 while (current_trb != priv_req->end_trb) { 313 cdns3_ep_inc_deq(priv_ep); 314 current_trb = priv_ep->dequeue; 315 } 316 317 cdns3_ep_inc_deq(priv_ep); 318} 319 320/** 321 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 322 * @priv_dev: Extended gadget object 323 * @enable: Enable/disable permit to transition to L1. 324 * 325 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 326 * then controller answer with ACK handshake. 327 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 328 * then controller answer with NYET handshake. 329 */ 330void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 331{ 332 if (enable) 333 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 334 else 335 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 336} 337 338enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 339{ 340 u32 reg; 341 342 reg = readl(&priv_dev->regs->usb_sts); 343 344 if (DEV_SUPERSPEED(reg)) 345 return USB_SPEED_SUPER; 346 else if (DEV_HIGHSPEED(reg)) 347 return USB_SPEED_HIGH; 348 else if (DEV_FULLSPEED(reg)) 349 return USB_SPEED_FULL; 350 else if (DEV_LOWSPEED(reg)) 351 return USB_SPEED_LOW; 352 return USB_SPEED_UNKNOWN; 353} 354 355/** 356 * cdns3_start_all_request - add to ring all request not started 357 * @priv_dev: Extended gadget object 358 * @priv_ep: The endpoint for whom request will be started. 359 * 360 * Returns return ENOMEM if transfer ring i not enough TRBs to start 361 * all requests. 362 */ 363static int cdns3_start_all_request(struct cdns3_device *priv_dev, 364 struct cdns3_endpoint *priv_ep) 365{ 366 struct usb_request *request; 367 int ret = 0; 368 369 while (!list_empty(&priv_ep->deferred_req_list)) { 370 request = cdns3_next_request(&priv_ep->deferred_req_list); 371 372 ret = cdns3_ep_run_transfer(priv_ep, request); 373 if (ret) 374 return ret; 375 376 list_del(&request->list); 377 list_add_tail(&request->list, 378 &priv_ep->pending_req_list); 379 } 380 381 priv_ep->flags &= ~EP_RING_FULL; 382 return ret; 383} 384 385/* 386 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 387 * driver try to detect whether endpoint need additional internal 388 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 389 * if before first DESCMISS interrupt the DMA will be armed. 390 */ 391#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \ 392 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 393 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 394 (reg) |= EP_STS_EN_DESCMISEN; \ 395 } } while (0) 396 397/** 398 * cdns3_wa2_descmiss_copy_data copy data from internal requests to 399 * request queued by class driver. 400 * @priv_ep: extended endpoint object 401 * @request: request object 402 */ 403static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 404 struct usb_request *request) 405{ 406 struct usb_request *descmiss_req; 407 struct cdns3_request *descmiss_priv_req; 408 409 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 410 int chunk_end; 411 int length; 412 413 descmiss_priv_req = 414 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 415 descmiss_req = &descmiss_priv_req->request; 416 417 /* driver can't touch pending request */ 418 if (descmiss_priv_req->flags & REQUEST_PENDING) 419 break; 420 421 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 422 length = request->actual + descmiss_req->actual; 423 424 request->status = descmiss_req->status; 425 426 if (length <= request->length) { 427 memcpy(&((u8 *)request->buf)[request->actual], 428 descmiss_req->buf, 429 descmiss_req->actual); 430 request->actual = length; 431 } else { 432 /* It should never occur */ 433 request->status = -ENOMEM; 434 } 435 436 list_del_init(&descmiss_priv_req->list); 437 438 kfree(descmiss_req->buf); 439 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 440 --priv_ep->wa2_counter; 441 442 if (!chunk_end) 443 break; 444 } 445} 446 447struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 448 struct cdns3_endpoint *priv_ep, 449 struct cdns3_request *priv_req) 450{ 451 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 452 priv_req->flags & REQUEST_INTERNAL) { 453 struct usb_request *req; 454 455 req = cdns3_next_request(&priv_ep->deferred_req_list); 456 457 priv_ep->descmis_req = NULL; 458 459 if (!req) 460 return NULL; 461 462 cdns3_wa2_descmiss_copy_data(priv_ep, req); 463 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 464 req->length != req->actual) { 465 /* wait for next part of transfer */ 466 return NULL; 467 } 468 469 if (req->status == -EINPROGRESS) 470 req->status = 0; 471 472 list_del_init(&req->list); 473 cdns3_start_all_request(priv_dev, priv_ep); 474 return req; 475 } 476 477 return &priv_req->request; 478} 479 480int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 481 struct cdns3_endpoint *priv_ep, 482 struct cdns3_request *priv_req) 483{ 484 int deferred = 0; 485 486 /* 487 * If transfer was queued before DESCMISS appear than we 488 * can disable handling of DESCMISS interrupt. Driver assumes that it 489 * can disable special treatment for this endpoint. 490 */ 491 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 492 u32 reg; 493 494 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 495 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 496 reg = readl(&priv_dev->regs->ep_sts_en); 497 reg &= ~EP_STS_EN_DESCMISEN; 498 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 499 writel(reg, &priv_dev->regs->ep_sts_en); 500 } 501 502 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 503 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 504 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 505 506 /* 507 * DESCMISS transfer has been finished, so data will be 508 * directly copied from internal allocated usb_request 509 * objects. 510 */ 511 if (pending_empty && !descmiss_empty && 512 !(priv_req->flags & REQUEST_INTERNAL)) { 513 cdns3_wa2_descmiss_copy_data(priv_ep, 514 &priv_req->request); 515 516 trace_cdns3_wa2(priv_ep, "get internal stored data"); 517 518 list_add_tail(&priv_req->request.list, 519 &priv_ep->pending_req_list); 520 cdns3_gadget_giveback(priv_ep, priv_req, 521 priv_req->request.status); 522 523 /* 524 * Intentionally driver returns positive value as 525 * correct value. It informs that transfer has 526 * been finished. 527 */ 528 return EINPROGRESS; 529 } 530 531 /* 532 * Driver will wait for completion DESCMISS transfer, 533 * before starts new, not DESCMISS transfer. 534 */ 535 if (!pending_empty && !descmiss_empty) { 536 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 537 deferred = 1; 538 } 539 540 if (priv_req->flags & REQUEST_INTERNAL) 541 list_add_tail(&priv_req->list, 542 &priv_ep->wa2_descmiss_req_list); 543 } 544 545 return deferred; 546} 547 548static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 549{ 550 struct cdns3_request *priv_req; 551 552 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 553 u8 chain; 554 555 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 556 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 557 558 trace_cdns3_wa2(priv_ep, "removes eldest request"); 559 560 kfree(priv_req->request.buf); 561 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 562 &priv_req->request); 563 list_del_init(&priv_req->list); 564 --priv_ep->wa2_counter; 565 566 if (!chain) 567 break; 568 } 569} 570 571/** 572 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 573 * @priv_dev: extended gadget object 574 * 575 * This function is used only for WA2. For more information see Work around 2 576 * description. 577 */ 578static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 579{ 580 struct cdns3_request *priv_req; 581 struct usb_request *request; 582 583 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 584 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 585 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 586 } 587 588 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 589 590 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) 591 cdns3_wa2_remove_old_request(priv_ep); 592 593 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 594 GFP_ATOMIC); 595 if (!request) 596 goto err; 597 598 priv_req = to_cdns3_request(request); 599 priv_req->flags |= REQUEST_INTERNAL; 600 601 /* if this field is still assigned it indicate that transfer related 602 * with this request has not been finished yet. Driver in this 603 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 604 * flag to previous one. It will indicate that current request is 605 * part of the previous one. 606 */ 607 if (priv_ep->descmis_req) 608 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 609 610 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 611 GFP_ATOMIC); 612 priv_ep->wa2_counter++; 613 614 if (!priv_req->request.buf) { 615 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 616 goto err; 617 } 618 619 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 620 priv_ep->descmis_req = priv_req; 621 622 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 623 &priv_ep->descmis_req->request, 624 GFP_ATOMIC); 625 626 return; 627 628err: 629 dev_err(priv_ep->cdns3_dev->dev, 630 "Failed: No sufficient memory for DESCMIS\n"); 631} 632 633/** 634 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 635 * @priv_ep: The endpoint to whom the request belongs to 636 * @priv_req: The request we're giving back 637 * @status: completion code for the request 638 * 639 * Must be called with controller's lock held and interrupts disabled. This 640 * function will unmap @req and call its ->complete() callback to notify upper 641 * layers that it has completed. 642 */ 643void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 644 struct cdns3_request *priv_req, 645 int status) 646{ 647 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 648 struct usb_request *request = &priv_req->request; 649 650 list_del_init(&request->list); 651 652 if (request->status == -EINPROGRESS) 653 request->status = status; 654 655 usb_gadget_unmap_request(&priv_dev->gadget, request, 656 priv_ep->dir); 657 658 if ((priv_req->flags & REQUEST_UNALIGNED) && 659 priv_ep->dir == USB_DIR_OUT && !request->status) 660 memcpy(request->buf, priv_req->aligned_buf->buf, 661 request->length); 662 663 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 664 trace_cdns3_gadget_giveback(priv_req); 665 666 if (priv_dev->dev_ver < DEV_VER_V2) { 667 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 668 priv_req); 669 if (!request) 670 return; 671 } 672 673 if (request->complete) { 674 spin_unlock(&priv_dev->lock); 675 usb_gadget_giveback_request(&priv_ep->endpoint, 676 request); 677 spin_lock(&priv_dev->lock); 678 } 679 680 if (request->buf == priv_dev->zlp_buf) 681 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 682} 683 684void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 685{ 686 /* Work around for stale data address in TRB*/ 687 if (priv_ep->wa1_set) { 688 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 689 690 priv_ep->wa1_set = 0; 691 priv_ep->wa1_trb_index = 0xFFFF; 692 if (priv_ep->wa1_cycle_bit) { 693 priv_ep->wa1_trb->control = 694 priv_ep->wa1_trb->control | 0x1; 695 } else { 696 priv_ep->wa1_trb->control = 697 priv_ep->wa1_trb->control & ~0x1; 698 } 699 } 700} 701 702static void cdns3_free_aligned_request_buf(struct cdns3_device *priv_dev) 703{ 704 struct cdns3_aligned_buf *buf, *tmp; 705 unsigned long flags; 706 707 spin_lock_irqsave(&priv_dev->lock, flags); 708 709 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 710 if (!buf->in_use) { 711 list_del(&buf->list); 712 713 /* 714 * Re-enable interrupts to free DMA capable memory. 715 * Driver can't free this memory with disabled 716 * interrupts. 717 */ 718 spin_unlock_irqrestore(&priv_dev->lock, flags); 719 dma_free_coherent(buf->buf); 720 kfree(buf); 721 spin_lock_irqsave(&priv_dev->lock, flags); 722 } 723 } 724 725 spin_unlock_irqrestore(&priv_dev->lock, flags); 726} 727 728static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 729{ 730 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 731 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 732 struct cdns3_aligned_buf *buf; 733 734 /* check if buffer is aligned to 8. */ 735 if (!((uintptr_t)priv_req->request.buf & 0x7)) 736 return 0; 737 738 buf = priv_req->aligned_buf; 739 740 if (!buf || priv_req->request.length > buf->size) { 741 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 742 if (!buf) 743 return -ENOMEM; 744 745 buf->size = priv_req->request.length; 746 747 buf->buf = dma_alloc_coherent(buf->size, 748 (unsigned long *)&buf->dma); 749 if (!buf->buf) { 750 kfree(buf); 751 return -ENOMEM; 752 } 753 754 if (priv_req->aligned_buf) { 755 trace_cdns3_free_aligned_request(priv_req); 756 priv_req->aligned_buf->in_use = 0; 757#ifndef __UBOOT__ 758 queue_work(system_freezable_wq, 759 &priv_dev->aligned_buf_wq); 760#else 761 cdns3_free_aligned_request_buf(priv_dev); 762#endif 763 } 764 765 buf->in_use = 1; 766 priv_req->aligned_buf = buf; 767 768 list_add_tail(&buf->list, 769 &priv_dev->aligned_buf_list); 770 } 771 772 if (priv_ep->dir == USB_DIR_IN) { 773 memcpy(buf->buf, priv_req->request.buf, 774 priv_req->request.length); 775 } 776 777 priv_req->flags |= REQUEST_UNALIGNED; 778 trace_cdns3_prepare_aligned_request(priv_req); 779 780 return 0; 781} 782 783static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 784 struct cdns3_trb *trb) 785{ 786 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 787 788 if (!priv_ep->wa1_set) { 789 u32 doorbell; 790 791 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 792 793 if (doorbell) { 794 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 795 priv_ep->wa1_set = 1; 796 priv_ep->wa1_trb = trb; 797 priv_ep->wa1_trb_index = priv_ep->enqueue; 798 trace_cdns3_wa1(priv_ep, "set guard"); 799 return 0; 800 } 801 } 802 return 1; 803} 804 805static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 806 struct cdns3_endpoint *priv_ep) 807{ 808 int dma_index; 809 u32 doorbell; 810 811 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 812 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 813 814 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 815 cdns3_wa1_restore_cycle_bit(priv_ep); 816} 817 818/** 819 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 820 * @priv_ep: endpoint object 821 * 822 * Returns zero on success or negative value on failure 823 */ 824int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 825 struct usb_request *request) 826{ 827 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 828 struct cdns3_request *priv_req; 829 struct cdns3_trb *trb; 830 dma_addr_t trb_dma; 831 u32 togle_pcs = 1; 832 int sg_iter = 0; 833 int num_trb = 1; 834 int address; 835 u32 control; 836 int pcs; 837 838 if (num_trb > priv_ep->free_trbs) { 839 priv_ep->flags |= EP_RING_FULL; 840 return -ENOBUFS; 841 } 842 843 priv_req = to_cdns3_request(request); 844 address = priv_ep->endpoint.desc->bEndpointAddress; 845 846 priv_ep->flags |= EP_PENDING_REQUEST; 847 848 /* must allocate buffer aligned to 8 */ 849 if (priv_req->flags & REQUEST_UNALIGNED) 850 trb_dma = priv_req->aligned_buf->dma; 851 else 852 trb_dma = request->dma; 853 854 trb = priv_ep->trb_pool + priv_ep->enqueue; 855 priv_req->start_trb = priv_ep->enqueue; 856 priv_req->trb = trb; 857 858 cdns3_select_ep(priv_ep->cdns3_dev, address); 859 860 /* prepare ring */ 861 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 862 struct cdns3_trb *link_trb; 863 int doorbell, dma_index; 864 u32 ch_bit = 0; 865 866 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 867 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 868 869 /* Driver can't update LINK TRB if it is current processed. */ 870 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 871 priv_ep->flags |= EP_DEFERRED_DRDY; 872 return -ENOBUFS; 873 } 874 875 /*updating C bt in Link TRB before starting DMA*/ 876 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 877 /* 878 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 879 * that DMA stuck at the LINK TRB. 880 * On the other hand, removing TRB_CHAIN for longer TRs for 881 * epXout cause that DMA stuck after handling LINK TRB. 882 * To eliminate this strange behavioral driver set TRB_CHAIN 883 * bit only for TR size > 2. 884 */ 885 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 886 TRBS_PER_SEGMENT > 2) 887 ch_bit = TRB_CHAIN; 888 889 link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) | 890 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit; 891 } 892 893 if (priv_dev->dev_ver <= DEV_VER_V2) 894 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 895 896 /* set incorrect Cycle Bit for first trb*/ 897 control = priv_ep->pcs ? 0 : TRB_CYCLE; 898 899 do { 900 u32 length; 901 u16 td_size = 0; 902 903 /* fill TRB */ 904 control |= TRB_TYPE(TRB_NORMAL); 905 trb->buffer = TRB_BUFFER(trb_dma); 906 907 length = request->length; 908 909 if (likely(priv_dev->dev_ver >= DEV_VER_V2)) 910 td_size = DIV_ROUND_UP(length, 911 priv_ep->endpoint.maxpacket); 912 913 trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) | 914 TRB_LEN(length); 915 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 916 trb->length |= TRB_TDL_SS_SIZE(td_size); 917 else 918 control |= TRB_TDL_HS_SIZE(td_size); 919 920 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 921 922 /* 923 * first trb should be prepared as last to avoid processing 924 * transfer to early 925 */ 926 if (sg_iter != 0) 927 control |= pcs; 928 929 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 930 control |= TRB_IOC | TRB_ISP; 931 } else { 932 /* for last element in TD or in SG list */ 933 if (sg_iter == (num_trb - 1) && sg_iter != 0) 934 control |= pcs | TRB_IOC | TRB_ISP; 935 } 936 937 if (sg_iter) 938 trb->control = control; 939 else 940 priv_req->trb->control = control; 941 942 control = 0; 943 ++sg_iter; 944 priv_req->end_trb = priv_ep->enqueue; 945 cdns3_ep_inc_enq(priv_ep); 946 trb = priv_ep->trb_pool + priv_ep->enqueue; 947 } while (sg_iter < num_trb); 948 949 trb = priv_req->trb; 950 951 priv_req->flags |= REQUEST_PENDING; 952 953 if (sg_iter == 1) 954 trb->control |= TRB_IOC | TRB_ISP; 955 956 /* 957 * Memory barrier - cycle bit must be set before other filds in trb. 958 */ 959 dmb(); 960 961 /* give the TD to the consumer*/ 962 if (togle_pcs) 963 trb->control = trb->control ^ 1; 964 965 if (priv_dev->dev_ver <= DEV_VER_V2) 966 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 967 968 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 969 970 /* 971 * Memory barrier - Cycle Bit must be set before trb->length and 972 * trb->buffer fields. 973 */ 974 dmb(); 975 976 /* 977 * For DMULT mode we can set address to transfer ring only once after 978 * enabling endpoint. 979 */ 980 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 981 /* 982 * Until SW is not ready to handle the OUT transfer the ISO OUT 983 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 984 * EP_CFG_ENABLE must be set before updating ep_traddr. 985 */ 986 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 987 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 988 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 989 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 990 EP_CFG_ENABLE); 991 } 992 993 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 994 priv_req->start_trb * TRB_SIZE), 995 &priv_dev->regs->ep_traddr); 996 997 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 998 } 999 1000 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1001 trace_cdns3_ring(priv_ep); 1002 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1003 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1004 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1005 trace_cdns3_doorbell_epx(priv_ep->name, 1006 readl(&priv_dev->regs->ep_traddr)); 1007 } 1008 1009 /* WORKAROUND for transition to L0 */ 1010 __cdns3_gadget_wakeup(priv_dev); 1011 1012 return 0; 1013} 1014 1015void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1016{ 1017 struct cdns3_endpoint *priv_ep; 1018 struct usb_ep *ep; 1019 int val; 1020 1021 if (priv_dev->hw_configured_flag) 1022 return; 1023 1024 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1025 writel(EP_CMD_ERDY | EP_CMD_REQ_CMPL, &priv_dev->regs->ep_cmd); 1026 1027 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1028 USB_CONF_U1EN | USB_CONF_U2EN); 1029 1030 /* wait until configuration set */ 1031 readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val, 1032 val & USB_STS_CFGSTS_MASK, 100); 1033 1034 priv_dev->hw_configured_flag = 1; 1035 1036 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1037 priv_ep = ep_to_cdns3_ep(ep); 1038 if (priv_ep->flags & EP_ENABLED) 1039 cdns3_start_all_request(priv_dev, priv_ep); 1040 } 1041} 1042 1043/** 1044 * cdns3_request_handled - check whether request has been handled by DMA 1045 * 1046 * @priv_ep: extended endpoint object. 1047 * @priv_req: request object for checking 1048 * 1049 * Endpoint must be selected before invoking this function. 1050 * 1051 * Returns false if request has not been handled by DMA, else returns true. 1052 * 1053 * SR - start ring 1054 * ER - end ring 1055 * DQ = priv_ep->dequeue - dequeue position 1056 * EQ = priv_ep->enqueue - enqueue position 1057 * ST = priv_req->start_trb - index of first TRB in transfer ring 1058 * ET = priv_req->end_trb - index of last TRB in transfer ring 1059 * CI = current_index - index of processed TRB by DMA. 1060 * 1061 * As first step, function checks if cycle bit for priv_req->start_trb is 1062 * correct. 1063 * 1064 * some rules: 1065 * 1. priv_ep->dequeue never exceed current_index. 1066 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1067 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1068 * and priv_ep->free_trbs is zero. 1069 * This case indicate that TR is full. 1070 * 1071 * Then We can split recognition into two parts: 1072 * Case 1 - priv_ep->dequeue < current_index 1073 * SR ... EQ ... DQ ... CI ... ER 1074 * SR ... DQ ... CI ... EQ ... ER 1075 * 1076 * Request has been handled by DMA if ST and ET is between DQ and CI. 1077 * 1078 * Case 2 - priv_ep->dequeue > current_index 1079 * This situation take place when CI go through the LINK TRB at the end of 1080 * transfer ring. 1081 * SR ... CI ... EQ ... DQ ... ER 1082 * 1083 * Request has been handled by DMA if ET is less then CI or 1084 * ET is greater or equal DQ. 1085 */ 1086static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep, 1087 struct cdns3_request *priv_req) 1088{ 1089 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1090 struct cdns3_trb *trb = priv_req->trb; 1091 int current_index = 0; 1092 int handled = 0; 1093 int doorbell; 1094 1095 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1096 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1097 1098 trb = &priv_ep->trb_pool[priv_req->start_trb]; 1099 1100 if ((trb->control & TRB_CYCLE) != priv_ep->ccs) 1101 goto finish; 1102 1103 if (doorbell == 1 && current_index == priv_ep->dequeue) 1104 goto finish; 1105 1106 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1107 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1108 handled = 1; 1109 goto finish; 1110 } 1111 1112 if (priv_ep->enqueue == priv_ep->dequeue && 1113 priv_ep->free_trbs == 0) { 1114 handled = 1; 1115 } else if (priv_ep->dequeue < current_index) { 1116 if ((current_index == (priv_ep->num_trbs - 1)) && 1117 !priv_ep->dequeue) 1118 goto finish; 1119 1120 if (priv_req->end_trb >= priv_ep->dequeue && 1121 priv_req->end_trb < current_index) 1122 handled = 1; 1123 } else if (priv_ep->dequeue > current_index) { 1124 if (priv_req->end_trb < current_index || 1125 priv_req->end_trb >= priv_ep->dequeue) 1126 handled = 1; 1127 } 1128 1129finish: 1130 trace_cdns3_request_handled(priv_req, current_index, handled); 1131 1132 return handled; 1133} 1134 1135static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1136 struct cdns3_endpoint *priv_ep) 1137{ 1138 struct cdns3_request *priv_req; 1139 struct usb_request *request; 1140 struct cdns3_trb *trb; 1141 1142 while (!list_empty(&priv_ep->pending_req_list)) { 1143 request = cdns3_next_request(&priv_ep->pending_req_list); 1144 priv_req = to_cdns3_request(request); 1145 1146 /* Re-select endpoint. It could be changed by other CPU during 1147 * handling usb_gadget_giveback_request. 1148 */ 1149#ifndef __UBOOT__ 1150 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1151#else 1152 cdns3_select_ep(priv_dev, 1153 priv_ep->endpoint.desc->bEndpointAddress); 1154#endif 1155 1156 if (!cdns3_request_handled(priv_ep, priv_req)) 1157 goto prepare_next_td; 1158 1159 trb = priv_ep->trb_pool + priv_ep->dequeue; 1160 trace_cdns3_complete_trb(priv_ep, trb); 1161 1162 if (trb != priv_req->trb) 1163 dev_warn(priv_dev->dev, 1164 "request_trb=0x%p, queue_trb=0x%p\n", 1165 priv_req->trb, trb); 1166 1167 request->actual = TRB_LEN(le32_to_cpu(trb->length)); 1168 cdns3_move_deq_to_next_trb(priv_req); 1169 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1170 1171 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1172 TRBS_PER_SEGMENT == 2) 1173 break; 1174 } 1175 priv_ep->flags &= ~EP_PENDING_REQUEST; 1176 1177prepare_next_td: 1178 if (!(priv_ep->flags & EP_STALLED) && 1179 !(priv_ep->flags & EP_STALL_PENDING)) 1180 cdns3_start_all_request(priv_dev, priv_ep); 1181} 1182 1183void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1184{ 1185 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1186 1187 cdns3_wa1_restore_cycle_bit(priv_ep); 1188 1189 if (rearm) { 1190 trace_cdns3_ring(priv_ep); 1191 1192 /* Cycle Bit must be updated before arming DMA. */ 1193 dmb(); 1194 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1195 1196 __cdns3_gadget_wakeup(priv_dev); 1197 1198 trace_cdns3_doorbell_epx(priv_ep->name, 1199 readl(&priv_dev->regs->ep_traddr)); 1200 } 1201} 1202 1203/** 1204 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1205 * @priv_ep: endpoint object 1206 * 1207 * Returns 0 1208 */ 1209static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1210{ 1211 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1212 u32 ep_sts_reg; 1213 1214#ifndef __UBOOT__ 1215 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1216#else 1217 cdns3_select_ep(priv_dev, priv_ep->endpoint.desc->bEndpointAddress); 1218#endif 1219 1220 trace_cdns3_epx_irq(priv_dev, priv_ep); 1221 1222 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1223 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1224 1225 if (ep_sts_reg & EP_STS_TRBERR) { 1226 if (priv_ep->flags & EP_STALL_PENDING && 1227 !(ep_sts_reg & EP_STS_DESCMIS && 1228 priv_dev->dev_ver < DEV_VER_V2)) { 1229 cdns3_ep_stall_flush(priv_ep); 1230 } 1231 1232 /* 1233 * For isochronous transfer driver completes request on 1234 * IOC or on TRBERR. IOC appears only when device receive 1235 * OUT data packet. If host disable stream or lost some packet 1236 * then the only way to finish all queued transfer is to do it 1237 * on TRBERR event. 1238 */ 1239 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1240 !priv_ep->wa1_set) { 1241 if (!priv_ep->dir) { 1242 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1243 1244 ep_cfg &= ~EP_CFG_ENABLE; 1245 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1246 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1247 } 1248 cdns3_transfer_completed(priv_dev, priv_ep); 1249 } else if (!(priv_ep->flags & EP_STALLED) && 1250 !(priv_ep->flags & EP_STALL_PENDING)) { 1251 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1252 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1253 cdns3_start_all_request(priv_dev, priv_ep); 1254 } else { 1255 cdns3_rearm_transfer(priv_ep, 1256 priv_ep->wa1_set); 1257 } 1258 } 1259 } 1260 1261 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) { 1262 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1263 if (ep_sts_reg & EP_STS_ISP) 1264 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1265 else 1266 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1267 } 1268 1269 cdns3_transfer_completed(priv_dev, priv_ep); 1270 } 1271 1272 /* 1273 * WA2: this condition should only be meet when 1274 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1275 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1276 * In other cases this interrupt will be disabled/ 1277 */ 1278 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1279 !(priv_ep->flags & EP_STALLED)) 1280 cdns3_wa2_descmissing_packet(priv_ep); 1281 1282 return 0; 1283} 1284 1285static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1286{ 1287 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) { 1288 spin_unlock(&priv_dev->lock); 1289 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1290 spin_lock(&priv_dev->lock); 1291 } 1292} 1293 1294/** 1295 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1296 * @priv_dev: extended gadget object 1297 * @usb_ists: bitmap representation of device's reported interrupts 1298 * (usb_ists register value) 1299 */ 1300static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1301 u32 usb_ists) 1302{ 1303 int speed = 0; 1304 1305 trace_cdns3_usb_irq(priv_dev, usb_ists); 1306 if (usb_ists & USB_ISTS_L1ENTI) { 1307 /* 1308 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1309 * from L1. To fix it, if any DMA transfer is pending driver 1310 * must starts driving resume signal immediately. 1311 */ 1312 if (readl(&priv_dev->regs->drbl)) 1313 __cdns3_gadget_wakeup(priv_dev); 1314 } 1315 1316 /* Connection detected */ 1317 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1318 speed = cdns3_get_speed(priv_dev); 1319 priv_dev->gadget.speed = speed; 1320 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1321 cdns3_ep0_config(priv_dev); 1322 } 1323 1324 /* Disconnection detected */ 1325 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1326 cdns3_disconnect_gadget(priv_dev); 1327 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1328 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1329 cdns3_hw_reset_eps_config(priv_dev); 1330 } 1331 1332 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1333 if (priv_dev->gadget_driver && 1334 priv_dev->gadget_driver->suspend) { 1335 spin_unlock(&priv_dev->lock); 1336 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1337 spin_lock(&priv_dev->lock); 1338 } 1339 } 1340 1341 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1342 if (priv_dev->gadget_driver && 1343 priv_dev->gadget_driver->resume) { 1344 spin_unlock(&priv_dev->lock); 1345 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1346 spin_lock(&priv_dev->lock); 1347 } 1348 } 1349 1350 /* reset*/ 1351 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1352 if (priv_dev->gadget_driver) { 1353 spin_unlock(&priv_dev->lock); 1354 usb_gadget_udc_reset(&priv_dev->gadget, 1355 priv_dev->gadget_driver); 1356 spin_lock(&priv_dev->lock); 1357 1358 /*read again to check the actual speed*/ 1359 speed = cdns3_get_speed(priv_dev); 1360 priv_dev->gadget.speed = speed; 1361 cdns3_hw_reset_eps_config(priv_dev); 1362 cdns3_ep0_config(priv_dev); 1363 } 1364 } 1365} 1366 1367/** 1368 * cdns3_device_irq_handler- interrupt handler for device part of controller 1369 * 1370 * @irq: irq number for cdns3 core device 1371 * @data: structure of cdns3 1372 * 1373 * Returns IRQ_HANDLED or IRQ_NONE 1374 */ 1375static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1376{ 1377 struct cdns3_device *priv_dev; 1378 struct cdns3 *cdns = data; 1379 irqreturn_t ret = IRQ_NONE; 1380 u32 reg; 1381 1382 priv_dev = cdns->gadget_dev; 1383 1384 /* check USB device interrupt */ 1385 reg = readl(&priv_dev->regs->usb_ists); 1386 if (reg) { 1387 /* After masking interrupts the new interrupts won't be 1388 * reported in usb_ists/ep_ists. In order to not lose some 1389 * of them driver disables only detected interrupts. 1390 * They will be enabled ASAP after clearing source of 1391 * interrupt. This an unusual behavior only applies to 1392 * usb_ists register. 1393 */ 1394 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1395 /* mask deferred interrupt. */ 1396 writel(reg, &priv_dev->regs->usb_ien); 1397 ret = IRQ_WAKE_THREAD; 1398 } 1399 1400 /* check endpoint interrupt */ 1401 reg = readl(&priv_dev->regs->ep_ists); 1402 if (reg) { 1403 writel(0, &priv_dev->regs->ep_ien); 1404 ret = IRQ_WAKE_THREAD; 1405 } 1406 1407 return ret; 1408} 1409 1410/** 1411 * cdns3_device_thread_irq_handler- interrupt handler for device part 1412 * of controller 1413 * 1414 * @irq: irq number for cdns3 core device 1415 * @data: structure of cdns3 1416 * 1417 * Returns IRQ_HANDLED or IRQ_NONE 1418 */ 1419static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1420{ 1421 struct cdns3_device *priv_dev; 1422 struct cdns3 *cdns = data; 1423 irqreturn_t ret = IRQ_NONE; 1424 unsigned long flags; 1425 int bit; 1426 u32 reg; 1427 1428 priv_dev = cdns->gadget_dev; 1429 spin_lock_irqsave(&priv_dev->lock, flags); 1430 1431 reg = readl(&priv_dev->regs->usb_ists); 1432 if (reg) { 1433 writel(reg, &priv_dev->regs->usb_ists); 1434 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1435 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1436 ret = IRQ_HANDLED; 1437 } 1438 1439 reg = readl(&priv_dev->regs->ep_ists); 1440 1441 /* handle default endpoint OUT */ 1442 if (reg & EP_ISTS_EP_OUT0) { 1443 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1444 ret = IRQ_HANDLED; 1445 } 1446 1447 /* handle default endpoint IN */ 1448 if (reg & EP_ISTS_EP_IN0) { 1449 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1450 ret = IRQ_HANDLED; 1451 } 1452 1453 /* check if interrupt from non default endpoint, if no exit */ 1454 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1455 if (!reg) 1456 goto irqend; 1457 1458 for_each_set_bit(bit, (unsigned long *)®, 1459 sizeof(u32) * BITS_PER_BYTE) { 1460 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1461 ret = IRQ_HANDLED; 1462 } 1463 1464irqend: 1465 writel(~0, &priv_dev->regs->ep_ien); 1466 spin_unlock_irqrestore(&priv_dev->lock, flags); 1467 1468 return ret; 1469} 1470 1471/** 1472 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1473 * 1474 * The real reservation will occur during write to EP_CFG register, 1475 * this function is used to check if the 'size' reservation is allowed. 1476 * 1477 * @priv_dev: extended gadget object 1478 * @size: the size (KB) for EP would like to allocate 1479 * @is_in: endpoint direction 1480 * 1481 * Return 0 if the required size can met or negative value on failure 1482 */ 1483static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1484 int size, int is_in) 1485{ 1486 int remained; 1487 1488 /* 2KB are reserved for EP0*/ 1489 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1490 1491 if (is_in) { 1492 if (remained < size) 1493 return -EPERM; 1494 1495 priv_dev->onchip_used_size += size; 1496 } else { 1497 int required; 1498 1499 /** 1500 * ALL OUT EPs are shared the same chunk onchip memory, so 1501 * driver checks if it already has assigned enough buffers 1502 */ 1503 if (priv_dev->out_mem_is_allocated >= size) 1504 return 0; 1505 1506 required = size - priv_dev->out_mem_is_allocated; 1507 1508 if (required > remained) 1509 return -EPERM; 1510 1511 priv_dev->out_mem_is_allocated += required; 1512 priv_dev->onchip_used_size += required; 1513 } 1514 1515 return 0; 1516} 1517 1518void cdns3_configure_dmult(struct cdns3_device *priv_dev, 1519 struct cdns3_endpoint *priv_ep) 1520{ 1521 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 1522 1523 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 1524 if (priv_dev->dev_ver <= DEV_VER_V2) 1525 writel(USB_CONF_DMULT, ®s->usb_conf); 1526 1527 if (priv_dev->dev_ver == DEV_VER_V2) 1528 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 1529 1530 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 1531 u32 mask; 1532 1533 if (priv_ep->dir) 1534 mask = BIT(priv_ep->num + 16); 1535 else 1536 mask = BIT(priv_ep->num); 1537 1538 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1539 cdns3_set_register_bit(®s->tdl_from_trb, mask); 1540 cdns3_set_register_bit(®s->tdl_beh, mask); 1541 cdns3_set_register_bit(®s->tdl_beh2, mask); 1542 cdns3_set_register_bit(®s->dma_adv_td, mask); 1543 } 1544 1545 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 1546 cdns3_set_register_bit(®s->tdl_from_trb, mask); 1547 1548 cdns3_set_register_bit(®s->dtrans, mask); 1549 } 1550} 1551 1552/** 1553 * cdns3_ep_config Configure hardware endpoint 1554 * @priv_ep: extended endpoint object 1555 */ 1556void cdns3_ep_config(struct cdns3_endpoint *priv_ep) 1557{ 1558 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 1559 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1560 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 1561 u32 max_packet_size = 0; 1562 u8 maxburst = 0; 1563 u32 ep_cfg = 0; 1564 u8 buffering; 1565 u8 mult = 0; 1566 int ret; 1567 1568 buffering = CDNS3_EP_BUF_SIZE - 1; 1569 1570 cdns3_configure_dmult(priv_dev, priv_ep); 1571 1572 switch (priv_ep->type) { 1573 case USB_ENDPOINT_XFER_INT: 1574 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 1575 1576 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || 1577 priv_dev->dev_ver > DEV_VER_V2) 1578 ep_cfg |= EP_CFG_TDL_CHK; 1579 break; 1580 case USB_ENDPOINT_XFER_BULK: 1581 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 1582 1583 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || 1584 priv_dev->dev_ver > DEV_VER_V2) 1585 ep_cfg |= EP_CFG_TDL_CHK; 1586 break; 1587 default: 1588 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 1589 mult = CDNS3_EP_ISO_HS_MULT - 1; 1590 buffering = mult + 1; 1591 } 1592 1593 switch (priv_dev->gadget.speed) { 1594 case USB_SPEED_FULL: 1595 max_packet_size = is_iso_ep ? 1023 : 64; 1596 break; 1597 case USB_SPEED_HIGH: 1598 max_packet_size = is_iso_ep ? 1024 : 512; 1599 break; 1600 case USB_SPEED_SUPER: 1601 /* It's limitation that driver assumes in driver. */ 1602 mult = 0; 1603 max_packet_size = 1024; 1604 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 1605 maxburst = CDNS3_EP_ISO_SS_BURST - 1; 1606 buffering = (mult + 1) * 1607 (maxburst + 1); 1608 1609 if (priv_ep->interval > 1) 1610 buffering++; 1611 } else { 1612 maxburst = CDNS3_EP_BUF_SIZE - 1; 1613 } 1614 break; 1615 default: 1616 /* all other speed are not supported */ 1617 return; 1618 } 1619 1620 if (max_packet_size == 1024) 1621 priv_ep->trb_burst_size = 128; 1622 else if (max_packet_size >= 512) 1623 priv_ep->trb_burst_size = 64; 1624 else 1625 priv_ep->trb_burst_size = 16; 1626 1627 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 1628 !!priv_ep->dir); 1629 if (ret) { 1630 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 1631 return; 1632 } 1633 1634 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 1635 EP_CFG_MULT(mult) | 1636 EP_CFG_BUFFERING(buffering) | 1637 EP_CFG_MAXBURST(maxburst); 1638 1639 cdns3_select_ep(priv_dev, bEndpointAddress); 1640 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1641 1642 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 1643 priv_ep->name, ep_cfg); 1644} 1645 1646/* Find correct direction for HW endpoint according to description */ 1647static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 1648 struct cdns3_endpoint *priv_ep) 1649{ 1650 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 1651 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 1652} 1653 1654static struct 1655cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 1656 struct usb_endpoint_descriptor *desc) 1657{ 1658 struct usb_ep *ep; 1659 struct cdns3_endpoint *priv_ep; 1660 1661 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1662 unsigned long num; 1663 /* ep name pattern likes epXin or epXout */ 1664 char c[2] = {ep->name[2], '\0'}; 1665 1666 num = dectoul(c, NULL); 1667 1668 priv_ep = ep_to_cdns3_ep(ep); 1669 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 1670 if (!(priv_ep->flags & EP_CLAIMED)) { 1671 priv_ep->num = num; 1672 return priv_ep; 1673 } 1674 } 1675 } 1676 1677 return ERR_PTR(-ENOENT); 1678} 1679 1680/* 1681 * Cadence IP has one limitation that all endpoints must be configured 1682 * (Type & MaxPacketSize) before setting configuration through hardware 1683 * register, it means we can't change endpoints configuration after 1684 * set_configuration. 1685 * 1686 * This function set EP_CLAIMED flag which is added when the gadget driver 1687 * uses usb_ep_autoconfig to configure specific endpoint; 1688 * When the udc driver receives set_configurion request, 1689 * it goes through all claimed endpoints, and configure all endpoints 1690 * accordingly. 1691 * 1692 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 1693 * ep_cfg register which can be changed after set_configuration, and do 1694 * some software operation accordingly. 1695 */ 1696static struct 1697usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 1698 struct usb_endpoint_descriptor *desc, 1699 struct usb_ss_ep_comp_descriptor *comp_desc) 1700{ 1701 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 1702 struct cdns3_endpoint *priv_ep; 1703 unsigned long flags; 1704 1705 priv_ep = cdns3_find_available_ep(priv_dev, desc); 1706 if (IS_ERR(priv_ep)) { 1707 dev_err(priv_dev->dev, "no available ep\n"); 1708 return NULL; 1709 } 1710 1711 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 1712 1713 spin_lock_irqsave(&priv_dev->lock, flags); 1714 priv_ep->endpoint.desc = desc; 1715 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 1716 priv_ep->type = usb_endpoint_type(desc); 1717 priv_ep->flags |= EP_CLAIMED; 1718 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 1719 1720 spin_unlock_irqrestore(&priv_dev->lock, flags); 1721 return &priv_ep->endpoint; 1722} 1723 1724/** 1725 * cdns3_gadget_ep_alloc_request Allocates request 1726 * @ep: endpoint object associated with request 1727 * @gfp_flags: gfp flags 1728 * 1729 * Returns allocated request address, NULL on allocation error 1730 */ 1731struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 1732 gfp_t gfp_flags) 1733{ 1734 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 1735 struct cdns3_request *priv_req; 1736 1737 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 1738 if (!priv_req) 1739 return NULL; 1740 1741 priv_req->priv_ep = priv_ep; 1742 1743 trace_cdns3_alloc_request(priv_req); 1744 return &priv_req->request; 1745} 1746 1747/** 1748 * cdns3_gadget_ep_free_request Free memory occupied by request 1749 * @ep: endpoint object associated with request 1750 * @request: request to free memory 1751 */ 1752void cdns3_gadget_ep_free_request(struct usb_ep *ep, 1753 struct usb_request *request) 1754{ 1755 struct cdns3_request *priv_req = to_cdns3_request(request); 1756 1757 if (priv_req->aligned_buf) 1758 priv_req->aligned_buf->in_use = 0; 1759 1760 trace_cdns3_free_request(priv_req); 1761 kfree(priv_req); 1762} 1763 1764/** 1765 * cdns3_gadget_ep_enable Enable endpoint 1766 * @ep: endpoint object 1767 * @desc: endpoint descriptor 1768 * 1769 * Returns 0 on success, error code elsewhere 1770 */ 1771static int cdns3_gadget_ep_enable(struct usb_ep *ep, 1772 const struct usb_endpoint_descriptor *desc) 1773{ 1774 struct cdns3_endpoint *priv_ep; 1775 struct cdns3_device *priv_dev; 1776 u32 reg = EP_STS_EN_TRBERREN; 1777 u32 bEndpointAddress; 1778 unsigned long flags; 1779 int enable = 1; 1780 int ret; 1781 int val; 1782 1783 priv_ep = ep_to_cdns3_ep(ep); 1784 priv_dev = priv_ep->cdns3_dev; 1785 1786 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 1787 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 1788 return -EINVAL; 1789 } 1790 1791 if (!desc->wMaxPacketSize) { 1792 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 1793 return -EINVAL; 1794 } 1795 1796 if (WARN_ON(priv_ep->flags & EP_ENABLED)) 1797 return 0; 1798 1799 spin_lock_irqsave(&priv_dev->lock, flags); 1800 1801 priv_ep->endpoint.desc = desc; 1802 priv_ep->type = usb_endpoint_type(desc); 1803 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 1804 1805 if (priv_ep->interval > ISO_MAX_INTERVAL && 1806 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 1807 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 1808 ISO_MAX_INTERVAL); 1809 1810 ret = -EINVAL; 1811 goto exit; 1812 } 1813 1814 ret = cdns3_allocate_trb_pool(priv_ep); 1815 1816 if (ret) 1817 goto exit; 1818 1819 bEndpointAddress = priv_ep->num | priv_ep->dir; 1820 cdns3_select_ep(priv_dev, bEndpointAddress); 1821 1822 trace_cdns3_gadget_ep_enable(priv_ep); 1823 1824 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 1825 1826 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 1827 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 1828 1000); 1829 1830 if (unlikely(ret)) { 1831 cdns3_free_trb_pool(priv_ep); 1832 ret = -EINVAL; 1833 goto exit; 1834 } 1835 1836 /* enable interrupt for selected endpoint */ 1837 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 1838 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 1839 1840 if (priv_dev->dev_ver < DEV_VER_V2) 1841 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 1842 1843 writel(reg, &priv_dev->regs->ep_sts_en); 1844 1845 /* 1846 * For some versions of controller at some point during ISO OUT traffic 1847 * DMA reads Transfer Ring for the EP which has never got doorbell. 1848 * This issue was detected only on simulation, but to avoid this issue 1849 * driver add protection against it. To fix it driver enable ISO OUT 1850 * endpoint before setting DRBL. This special treatment of ISO OUT 1851 * endpoints are recommended by controller specification. 1852 */ 1853 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 1854 enable = 0; 1855 1856 if (enable) 1857 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE); 1858 1859 ep->desc = desc; 1860 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 1861 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 1862 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 1863 priv_ep->wa1_set = 0; 1864 priv_ep->enqueue = 0; 1865 priv_ep->dequeue = 0; 1866 reg = readl(&priv_dev->regs->ep_sts); 1867 priv_ep->pcs = !!EP_STS_CCS(reg); 1868 priv_ep->ccs = !!EP_STS_CCS(reg); 1869 /* one TRB is reserved for link TRB used in DMULT mode*/ 1870 priv_ep->free_trbs = priv_ep->num_trbs - 1; 1871exit: 1872 spin_unlock_irqrestore(&priv_dev->lock, flags); 1873 1874 return ret; 1875} 1876 1877/** 1878 * cdns3_gadget_ep_disable Disable endpoint 1879 * @ep: endpoint object 1880 * 1881 * Returns 0 on success, error code elsewhere 1882 */ 1883static int cdns3_gadget_ep_disable(struct usb_ep *ep) 1884{ 1885 struct cdns3_endpoint *priv_ep; 1886 struct cdns3_request *priv_req; 1887 struct cdns3_device *priv_dev; 1888 struct usb_request *request; 1889 unsigned long flags; 1890 int ret = 0; 1891 u32 ep_cfg; 1892 int val; 1893 1894 if (!ep) { 1895 pr_err("usbss: invalid parameters\n"); 1896 return -EINVAL; 1897 } 1898 1899 priv_ep = ep_to_cdns3_ep(ep); 1900 priv_dev = priv_ep->cdns3_dev; 1901 1902 if (WARN_ON(!(priv_ep->flags & EP_ENABLED))) 1903 return 0; 1904 1905 spin_lock_irqsave(&priv_dev->lock, flags); 1906 1907 trace_cdns3_gadget_ep_disable(priv_ep); 1908 1909 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 1910 1911 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1912 ep_cfg &= ~EP_CFG_ENABLE; 1913 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1914 1915 /** 1916 * Driver needs some time before resetting endpoint. 1917 * It need waits for clearing DBUSY bit or for timeout expired. 1918 * 10us is enough time for controller to stop transfer. 1919 */ 1920 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 1921 !(val & EP_STS_DBUSY), 10); 1922 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 1923 1924 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 1925 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 1926 1000); 1927 if (unlikely(ret)) 1928 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 1929 priv_ep->name); 1930 1931 while (!list_empty(&priv_ep->pending_req_list)) { 1932 request = cdns3_next_request(&priv_ep->pending_req_list); 1933 1934 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 1935 -ESHUTDOWN); 1936 } 1937 1938 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 1939 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 1940 1941 kfree(priv_req->request.buf); 1942 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 1943 &priv_req->request); 1944 list_del_init(&priv_req->list); 1945 --priv_ep->wa2_counter; 1946 } 1947 1948 while (!list_empty(&priv_ep->deferred_req_list)) { 1949 request = cdns3_next_request(&priv_ep->deferred_req_list); 1950 1951 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 1952 -ESHUTDOWN); 1953 } 1954 1955 priv_ep->descmis_req = NULL; 1956 1957 ep->desc = NULL; 1958 priv_ep->flags &= ~EP_ENABLED; 1959 1960 spin_unlock_irqrestore(&priv_dev->lock, flags); 1961 1962 return ret; 1963} 1964 1965/** 1966 * cdns3_gadget_ep_queue Transfer data on endpoint 1967 * @ep: endpoint object 1968 * @request: request object 1969 * @gfp_flags: gfp flags 1970 * 1971 * Returns 0 on success, error code elsewhere 1972 */ 1973static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 1974 struct usb_request *request, 1975 gfp_t gfp_flags) 1976{ 1977 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 1978 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1979 struct cdns3_request *priv_req; 1980 int ret = 0; 1981 1982 request->actual = 0; 1983 request->status = -EINPROGRESS; 1984 priv_req = to_cdns3_request(request); 1985 trace_cdns3_ep_queue(priv_req); 1986 1987 if (priv_dev->dev_ver < DEV_VER_V2) { 1988 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 1989 priv_req); 1990 1991 if (ret == EINPROGRESS) 1992 return 0; 1993 } 1994 1995 ret = cdns3_prepare_aligned_request_buf(priv_req); 1996 if (ret < 0) 1997 return ret; 1998 1999 ret = usb_gadget_map_request(&priv_dev->gadget, request, 2000 usb_endpoint_dir_in(ep->desc)); 2001 if (ret) 2002 return ret; 2003 2004 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2005 2006 /* 2007 * If hardware endpoint configuration has not been set yet then 2008 * just queue request in deferred list. Transfer will be started in 2009 * cdns3_set_hw_configuration. 2010 */ 2011 if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) && 2012 !(priv_ep->flags & EP_STALL_PENDING)) 2013 cdns3_start_all_request(priv_dev, priv_ep); 2014 2015 return 0; 2016} 2017 2018static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2019 gfp_t gfp_flags) 2020{ 2021 struct usb_request *zlp_request; 2022 struct cdns3_endpoint *priv_ep; 2023 struct cdns3_device *priv_dev; 2024 unsigned long flags; 2025 int ret; 2026 2027 if (!request || !ep) 2028 return -EINVAL; 2029 2030 priv_ep = ep_to_cdns3_ep(ep); 2031 priv_dev = priv_ep->cdns3_dev; 2032 2033 spin_lock_irqsave(&priv_dev->lock, flags); 2034 2035 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2036 2037 if (ret == 0 && request->zero && request->length && 2038 (request->length % ep->maxpacket == 0)) { 2039 struct cdns3_request *priv_req; 2040 2041 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2042 zlp_request->buf = priv_dev->zlp_buf; 2043 zlp_request->length = 0; 2044 2045 priv_req = to_cdns3_request(zlp_request); 2046 priv_req->flags |= REQUEST_ZLP; 2047 2048 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2049 priv_ep->name); 2050 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2051 } 2052 2053 spin_unlock_irqrestore(&priv_dev->lock, flags); 2054 return ret; 2055} 2056 2057/** 2058 * cdns3_gadget_ep_dequeue Remove request from transfer queue 2059 * @ep: endpoint object associated with request 2060 * @request: request object 2061 * 2062 * Returns 0 on success, error code elsewhere 2063 */ 2064int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2065 struct usb_request *request) 2066{ 2067 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2068 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2069 struct usb_request *req, *req_temp; 2070 struct cdns3_request *priv_req; 2071 struct cdns3_trb *link_trb; 2072 unsigned long flags; 2073 int ret = 0; 2074 2075 if (!ep || !request || !ep->desc) 2076 return -EINVAL; 2077 2078 spin_lock_irqsave(&priv_dev->lock, flags); 2079 2080 priv_req = to_cdns3_request(request); 2081 2082 trace_cdns3_ep_dequeue(priv_req); 2083 2084 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2085 2086 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2087 list) { 2088 if (request == req) 2089 goto found; 2090 } 2091 2092 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2093 list) { 2094 if (request == req) 2095 goto found; 2096 } 2097 2098 goto not_found; 2099 2100found: 2101 2102 if (priv_ep->wa1_trb == priv_req->trb) 2103 cdns3_wa1_restore_cycle_bit(priv_ep); 2104 2105 link_trb = priv_req->trb; 2106 cdns3_move_deq_to_next_trb(priv_req); 2107 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2108 2109 /* Update ring */ 2110 request = cdns3_next_request(&priv_ep->deferred_req_list); 2111 if (request) { 2112 priv_req = to_cdns3_request(request); 2113 2114 link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + 2115 (priv_req->start_trb * TRB_SIZE)); 2116 link_trb->control = (link_trb->control & TRB_CYCLE) | 2117 TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE; 2118 } else { 2119 priv_ep->flags |= EP_UPDATE_EP_TRBADDR; 2120 } 2121 2122not_found: 2123 spin_unlock_irqrestore(&priv_dev->lock, flags); 2124 return ret; 2125} 2126 2127/** 2128 * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint 2129 * Should be called after acquiring spin_lock and selecting ep 2130 * @ep: endpoint object to set stall on. 2131 */ 2132void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2133{ 2134 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2135 2136 trace_cdns3_halt(priv_ep, 1, 0); 2137 2138 if (!(priv_ep->flags & EP_STALLED)) { 2139 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2140 2141 if (!(ep_sts_reg & EP_STS_DBUSY)) 2142 cdns3_ep_stall_flush(priv_ep); 2143 else 2144 priv_ep->flags |= EP_STALL_PENDING; 2145 } 2146} 2147 2148/** 2149 * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint 2150 * Should be called after acquiring spin_lock and selecting ep 2151 * @ep: endpoint object to clear stall on 2152 */ 2153int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2154{ 2155 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2156 struct usb_request *request; 2157 int ret = 0; 2158 int val; 2159 2160 trace_cdns3_halt(priv_ep, 0, 0); 2161 2162 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2163 2164 /* wait for EPRST cleared */ 2165 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2166 !(val & EP_CMD_EPRST), 100); 2167 if (ret) 2168 return -EINVAL; 2169 2170 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2171 2172 request = cdns3_next_request(&priv_ep->pending_req_list); 2173 2174 if (request) 2175 cdns3_rearm_transfer(priv_ep, 1); 2176 2177 cdns3_start_all_request(priv_dev, priv_ep); 2178 return ret; 2179} 2180 2181/** 2182 * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint 2183 * @ep: endpoint object to set/clear stall on 2184 * @value: 1 for set stall, 0 for clear stall 2185 * 2186 * Returns 0 on success, error code elsewhere 2187 */ 2188int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2189{ 2190 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2191 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2192 unsigned long flags; 2193 int ret = 0; 2194 2195 if (!(priv_ep->flags & EP_ENABLED)) 2196 return -EPERM; 2197 2198 spin_lock_irqsave(&priv_dev->lock, flags); 2199 2200 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2201 2202 if (!value) { 2203 priv_ep->flags &= ~EP_WEDGE; 2204 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2205 } else { 2206 __cdns3_gadget_ep_set_halt(priv_ep); 2207 } 2208 2209 spin_unlock_irqrestore(&priv_dev->lock, flags); 2210 2211 return ret; 2212} 2213 2214extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2215 2216static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2217 .enable = cdns3_gadget_ep_enable, 2218 .disable = cdns3_gadget_ep_disable, 2219 .alloc_request = cdns3_gadget_ep_alloc_request, 2220 .free_request = cdns3_gadget_ep_free_request, 2221 .queue = cdns3_gadget_ep_queue, 2222 .dequeue = cdns3_gadget_ep_dequeue, 2223 .set_halt = cdns3_gadget_ep_set_halt, 2224 .set_wedge = cdns3_gadget_ep_set_wedge, 2225}; 2226 2227/** 2228 * cdns3_gadget_get_frame Returns number of actual ITP frame 2229 * @gadget: gadget object 2230 * 2231 * Returns number of actual ITP frame 2232 */ 2233static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2234{ 2235 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2236 2237 return readl(&priv_dev->regs->usb_itpn); 2238} 2239 2240int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2241{ 2242 enum usb_device_speed speed; 2243 2244 speed = cdns3_get_speed(priv_dev); 2245 2246 if (speed >= USB_SPEED_SUPER) 2247 return 0; 2248 2249 /* Start driving resume signaling to indicate remote wakeup. */ 2250 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2251 2252 return 0; 2253} 2254 2255static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2256{ 2257 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2258 unsigned long flags; 2259 int ret = 0; 2260 2261 spin_lock_irqsave(&priv_dev->lock, flags); 2262 ret = __cdns3_gadget_wakeup(priv_dev); 2263 spin_unlock_irqrestore(&priv_dev->lock, flags); 2264 return ret; 2265} 2266 2267static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2268 int is_selfpowered) 2269{ 2270 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2271 unsigned long flags; 2272 2273 spin_lock_irqsave(&priv_dev->lock, flags); 2274 priv_dev->is_selfpowered = !!is_selfpowered; 2275 spin_unlock_irqrestore(&priv_dev->lock, flags); 2276 return 0; 2277} 2278 2279static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2280{ 2281 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2282 2283 if (is_on) 2284 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2285 else 2286 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2287 2288 return 0; 2289} 2290 2291static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2292{ 2293 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2294 u32 reg; 2295 2296 cdns3_ep0_config(priv_dev); 2297 2298 /* enable interrupts for endpoint 0 (in and out) */ 2299 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2300 2301 /* 2302 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2303 * revision of controller. 2304 */ 2305 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2306 reg = readl(®s->dbg_link1); 2307 2308 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2309 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2310 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2311 writel(reg, ®s->dbg_link1); 2312 } 2313 2314 /* 2315 * By default some platforms has set protected access to memory. 2316 * This cause problem with cache, so driver restore non-secure 2317 * access to memory. 2318 */ 2319 reg = readl(®s->dma_axi_ctrl); 2320 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2321 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2322 writel(reg, ®s->dma_axi_ctrl); 2323 2324 /* enable generic interrupt*/ 2325 writel(USB_IEN_INIT, ®s->usb_ien); 2326 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2327 2328 /* Set the Fast access bit */ 2329 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2330 2331 cdns3_configure_dmult(priv_dev, NULL); 2332 2333 cdns3_gadget_pullup(&priv_dev->gadget, 1); 2334} 2335 2336/** 2337 * cdns3_gadget_udc_start Gadget start 2338 * @gadget: gadget object 2339 * @driver: driver which operates on this gadget 2340 * 2341 * Returns 0 on success, error code elsewhere 2342 */ 2343static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2344 struct usb_gadget_driver *driver) 2345{ 2346 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2347 unsigned long flags; 2348 2349 spin_lock_irqsave(&priv_dev->lock, flags); 2350 priv_dev->gadget_driver = driver; 2351 cdns3_gadget_udc_set_speed(gadget, gadget->max_speed); 2352 cdns3_gadget_config(priv_dev); 2353 spin_unlock_irqrestore(&priv_dev->lock, flags); 2354 return 0; 2355} 2356 2357/** 2358 * cdns3_gadget_udc_stop Stops gadget 2359 * @gadget: gadget object 2360 * 2361 * Returns 0 2362 */ 2363static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2364{ 2365 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2366 struct cdns3_endpoint *priv_ep; 2367 u32 bEndpointAddress; 2368 struct usb_ep *ep; 2369 int ret = 0; 2370 int val; 2371 2372 priv_dev->gadget_driver = NULL; 2373 2374 priv_dev->onchip_used_size = 0; 2375 priv_dev->out_mem_is_allocated = 0; 2376 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2377 2378 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2379 priv_ep = ep_to_cdns3_ep(ep); 2380 bEndpointAddress = priv_ep->num | priv_ep->dir; 2381 cdns3_select_ep(priv_dev, bEndpointAddress); 2382 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2383 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2384 !(val & EP_CMD_EPRST), 100); 2385 } 2386 2387 /* disable interrupt for device */ 2388 writel(0, &priv_dev->regs->usb_ien); 2389 writel(0, &priv_dev->regs->usb_pwr); 2390 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2391 2392 return ret; 2393} 2394 2395static void cdns3_gadget_udc_set_speed(struct usb_gadget *gadget, 2396 enum usb_device_speed speed) 2397{ 2398 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2399 2400 switch (speed) { 2401 case USB_SPEED_FULL: 2402 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2403 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2404 break; 2405 case USB_SPEED_HIGH: 2406 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2407 break; 2408 case USB_SPEED_SUPER: 2409 break; 2410 default: 2411 dev_err(priv_dev->dev, "invalid speed parameter %d\n", speed); 2412 } 2413 2414 priv_dev->gadget.speed = speed; 2415} 2416 2417static const struct usb_gadget_ops cdns3_gadget_ops = { 2418 .get_frame = cdns3_gadget_get_frame, 2419 .wakeup = cdns3_gadget_wakeup, 2420 .set_selfpowered = cdns3_gadget_set_selfpowered, 2421 .pullup = cdns3_gadget_pullup, 2422 .udc_start = cdns3_gadget_udc_start, 2423 .udc_stop = cdns3_gadget_udc_stop, 2424 .match_ep = cdns3_gadget_match_ep, 2425 .udc_set_speed = cdns3_gadget_udc_set_speed, 2426}; 2427 2428static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 2429{ 2430 int i; 2431 2432 /* ep0 OUT point to ep0 IN. */ 2433 priv_dev->eps[16] = NULL; 2434 2435 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 2436 if (priv_dev->eps[i]) { 2437 cdns3_free_trb_pool(priv_dev->eps[i]); 2438 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 2439 } 2440} 2441 2442/** 2443 * cdns3_init_eps Initializes software endpoints of gadget 2444 * @cdns3: extended gadget object 2445 * 2446 * Returns 0 on success, error code elsewhere 2447 */ 2448static int cdns3_init_eps(struct cdns3_device *priv_dev) 2449{ 2450 u32 ep_enabled_reg, iso_ep_reg; 2451 struct cdns3_endpoint *priv_ep; 2452 int ep_dir, ep_number; 2453 u32 ep_mask; 2454 int ret = 0; 2455 int i; 2456 2457 /* Read it from USB_CAP3 to USB_CAP5 */ 2458 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 2459 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 2460 2461 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 2462 2463 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 2464 ep_dir = i >> 4; /* i div 16 */ 2465 ep_number = i & 0xF; /* i % 16 */ 2466 ep_mask = BIT(i); 2467 2468 if (!(ep_enabled_reg & ep_mask)) 2469 continue; 2470 2471 if (ep_dir && !ep_number) { 2472 priv_dev->eps[i] = priv_dev->eps[0]; 2473 continue; 2474 } 2475 2476 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 2477 GFP_KERNEL); 2478 if (!priv_ep) { 2479 ret = -ENOMEM; 2480 goto err; 2481 } 2482 2483 /* set parent of endpoint object */ 2484 priv_ep->cdns3_dev = priv_dev; 2485 priv_dev->eps[i] = priv_ep; 2486 priv_ep->num = ep_number; 2487 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 2488 2489 if (!ep_number) { 2490 ret = cdns3_init_ep0(priv_dev, priv_ep); 2491 if (ret) { 2492 dev_err(priv_dev->dev, "Failed to init ep0\n"); 2493 goto err; 2494 } 2495 } else { 2496 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 2497 ep_number, !!ep_dir ? "in" : "out"); 2498 priv_ep->endpoint.name = priv_ep->name; 2499 2500 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 2501 CDNS3_EP_MAX_PACKET_LIMIT); 2502 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 2503 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 2504 if (ep_dir) 2505 priv_ep->endpoint.caps.dir_in = 1; 2506 else 2507 priv_ep->endpoint.caps.dir_out = 1; 2508 2509 if (iso_ep_reg & ep_mask) 2510 priv_ep->endpoint.caps.type_iso = 1; 2511 2512 priv_ep->endpoint.caps.type_bulk = 1; 2513 priv_ep->endpoint.caps.type_int = 1; 2514 2515 list_add_tail(&priv_ep->endpoint.ep_list, 2516 &priv_dev->gadget.ep_list); 2517 } 2518 2519 priv_ep->flags = 0; 2520 2521 dev_info(priv_dev->dev, "Initialized %s support: %s %s\n", 2522 priv_ep->name, 2523 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 2524 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 2525 2526 INIT_LIST_HEAD(&priv_ep->pending_req_list); 2527 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 2528 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 2529 } 2530 2531 return 0; 2532err: 2533 cdns3_free_all_eps(priv_dev); 2534 return -ENOMEM; 2535} 2536 2537void cdns3_gadget_exit(struct cdns3 *cdns) 2538{ 2539 struct cdns3_device *priv_dev; 2540 2541 priv_dev = cdns->gadget_dev; 2542 2543 usb_del_gadget_udc(&priv_dev->gadget); 2544 2545 cdns3_free_all_eps(priv_dev); 2546 2547 while (!list_empty(&priv_dev->aligned_buf_list)) { 2548 struct cdns3_aligned_buf *buf; 2549 2550 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 2551 dma_free_coherent(buf->buf); 2552 2553 list_del(&buf->list); 2554 kfree(buf); 2555 } 2556 2557 dma_free_coherent(priv_dev->setup_buf); 2558 2559 kfree(priv_dev->zlp_buf); 2560 kfree(priv_dev); 2561 cdns->gadget_dev = NULL; 2562 cdns3_drd_switch_gadget(cdns, 0); 2563} 2564 2565static int cdns3_gadget_start(struct cdns3 *cdns) 2566{ 2567 struct cdns3_device *priv_dev; 2568 u32 max_speed; 2569 int ret; 2570 2571 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 2572 if (!priv_dev) 2573 return -ENOMEM; 2574 2575 cdns->gadget_dev = priv_dev; 2576 priv_dev->sysdev = cdns->dev; 2577 priv_dev->dev = cdns->dev; 2578 priv_dev->regs = cdns->dev_regs; 2579 2580 dev_read_u32(priv_dev->dev, "cdns,on-chip-buff-size", 2581 &priv_dev->onchip_buffers); 2582 2583 if (priv_dev->onchip_buffers <= 0) { 2584 u32 reg = readl(&priv_dev->regs->usb_cap2); 2585 2586 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 2587 } 2588 2589 if (!priv_dev->onchip_buffers) 2590 priv_dev->onchip_buffers = 256; 2591 2592 max_speed = usb_get_maximum_speed(dev_ofnode(cdns->dev)); 2593 2594 /* Check the maximum_speed parameter */ 2595 switch (max_speed) { 2596 case USB_SPEED_FULL: 2597 /* fall through */ 2598 case USB_SPEED_HIGH: 2599 /* fall through */ 2600 case USB_SPEED_SUPER: 2601 break; 2602 default: 2603 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 2604 max_speed); 2605 /* fall through */ 2606 case USB_SPEED_UNKNOWN: 2607 /* default to superspeed */ 2608 max_speed = USB_SPEED_SUPER; 2609 break; 2610 } 2611 2612 /* fill gadget fields */ 2613 priv_dev->gadget.max_speed = max_speed; 2614 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2615 priv_dev->gadget.ops = &cdns3_gadget_ops; 2616 priv_dev->gadget.name = "cdns3-gadget"; 2617#ifndef __UBOOT__ 2618 priv_dev->gadget.name = "usb-ss-gadget"; 2619 priv_dev->gadget.sg_supported = 1; 2620 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 2621#endif 2622 2623 spin_lock_init(&priv_dev->lock); 2624 INIT_WORK(&priv_dev->pending_status_wq, 2625 cdns3_pending_setup_status_handler); 2626 2627 /* initialize endpoint container */ 2628 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 2629 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 2630 2631 ret = cdns3_init_eps(priv_dev); 2632 if (ret) { 2633 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 2634 goto err1; 2635 } 2636 2637 /* allocate memory for setup packet buffer */ 2638 priv_dev->setup_buf = 2639 dma_alloc_coherent(8, (unsigned long *)&priv_dev->setup_dma); 2640 if (!priv_dev->setup_buf) { 2641 ret = -ENOMEM; 2642 goto err2; 2643 } 2644 2645 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 2646 2647 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 2648 readl(&priv_dev->regs->usb_cap6)); 2649 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 2650 readl(&priv_dev->regs->usb_cap1)); 2651 dev_dbg(priv_dev->dev, "On-Chip memory cnfiguration: %08x\n", 2652 readl(&priv_dev->regs->usb_cap2)); 2653 2654 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 2655 2656 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 2657 if (!priv_dev->zlp_buf) { 2658 ret = -ENOMEM; 2659 goto err3; 2660 } 2661 2662 /* add USB gadget device */ 2663 ret = usb_add_gadget_udc((struct device *)priv_dev->dev, 2664 &priv_dev->gadget); 2665 if (ret < 0) { 2666 dev_err(priv_dev->dev, 2667 "Failed to register USB device controller\n"); 2668 goto err4; 2669 } 2670 2671 return 0; 2672err4: 2673 kfree(priv_dev->zlp_buf); 2674err3: 2675 dma_free_coherent(priv_dev->setup_buf); 2676err2: 2677 cdns3_free_all_eps(priv_dev); 2678err1: 2679 cdns->gadget_dev = NULL; 2680 return ret; 2681} 2682 2683static int __cdns3_gadget_init(struct cdns3 *cdns) 2684{ 2685 int ret = 0; 2686 2687 cdns3_drd_switch_gadget(cdns, 1); 2688 2689 ret = cdns3_gadget_start(cdns); 2690 if (ret) 2691 return ret; 2692 2693 return 0; 2694} 2695 2696static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup) 2697{ 2698 struct cdns3_device *priv_dev = cdns->gadget_dev; 2699 2700 cdns3_disconnect_gadget(priv_dev); 2701 2702 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2703 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 2704 cdns3_hw_reset_eps_config(priv_dev); 2705 2706 /* disable interrupt for device */ 2707 writel(0, &priv_dev->regs->usb_ien); 2708 2709 cdns3_gadget_pullup(&priv_dev->gadget, 0); 2710 2711 return 0; 2712} 2713 2714static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated) 2715{ 2716 struct cdns3_device *priv_dev = cdns->gadget_dev; 2717 2718 if (!priv_dev->gadget_driver) 2719 return 0; 2720 2721 cdns3_gadget_config(priv_dev); 2722 2723 return 0; 2724} 2725 2726/** 2727 * cdns3_gadget_init - initialize device structure 2728 * 2729 * cdns: cdns3 instance 2730 * 2731 * This function initializes the gadget. 2732 */ 2733int cdns3_gadget_init(struct cdns3 *cdns) 2734{ 2735 struct cdns3_role_driver *rdrv; 2736 2737 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 2738 if (!rdrv) 2739 return -ENOMEM; 2740 2741 rdrv->start = __cdns3_gadget_init; 2742 rdrv->stop = cdns3_gadget_exit; 2743 rdrv->suspend = cdns3_gadget_suspend; 2744 rdrv->resume = cdns3_gadget_resume; 2745 rdrv->state = CDNS3_ROLE_STATE_INACTIVE; 2746 rdrv->name = "gadget"; 2747 cdns->roles[USB_ROLE_DEVICE] = rdrv; 2748 2749 return 0; 2750} 2751 2752/** 2753 * cdns3_gadget_uboot_handle_interrupt - handle cdns3 gadget interrupt 2754 * @cdns: pointer to struct cdns3 2755 * 2756 * Handles ep0 and gadget interrupt 2757 */ 2758static void cdns3_gadget_uboot_handle_interrupt(struct cdns3 *cdns) 2759{ 2760 int ret = cdns3_device_irq_handler(0, cdns); 2761 2762 if (ret == IRQ_WAKE_THREAD) 2763 cdns3_device_thread_irq_handler(0, cdns); 2764} 2765 2766int dm_usb_gadget_handle_interrupts(struct udevice *dev) 2767{ 2768 struct cdns3 *cdns = dev_get_priv(dev); 2769 2770 cdns3_gadget_uboot_handle_interrupt(cdns); 2771 2772 return 0; 2773} 2774