1/* linux/drivers/usb/gadget/s3c-hsotg.c 2 * 3 * Copyright 2008 Openmoko, Inc. 4 * Copyright 2008 Simtec Electronics 5 * Ben Dooks <ben@simtec.co.uk> 6 * http://armlinux.simtec.co.uk/ 7 * 8 * S3C USB2.0 High-speed / OtG driver 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13*/ 14 15#include <linux/kernel.h> 16#include <linux/module.h> 17#include <linux/spinlock.h> 18#include <linux/interrupt.h> 19#include <linux/platform_device.h> 20#include <linux/dma-mapping.h> 21#include <linux/debugfs.h> 22#include <linux/seq_file.h> 23#include <linux/delay.h> 24#include <linux/io.h> 25#include <linux/slab.h> 26#include <linux/clk.h> 27 28#include <linux/usb/ch9.h> 29#include <linux/usb/gadget.h> 30 31#include <mach/map.h> 32 33#include <plat/regs-usb-hsotg-phy.h> 34#include <plat/regs-usb-hsotg.h> 35#include <mach/regs-sys.h> 36#include <plat/udc-hs.h> 37#include <plat/cpu.h> 38 39#define DMA_ADDR_INVALID (~((dma_addr_t)0)) 40 41/* EP0_MPS_LIMIT 42 * 43 * Unfortunately there seems to be a limit of the amount of data that can 44 * be transfered by IN transactions on EP0. This is either 127 bytes or 3 45 * packets (which practially means 1 packet and 63 bytes of data) when the 46 * MPS is set to 64. 47 * 48 * This means if we are wanting to move >127 bytes of data, we need to 49 * split the transactions up, but just doing one packet at a time does 50 * not work (this may be an implicit DATA0 PID on first packet of the 51 * transaction) and doing 2 packets is outside the controller's limits. 52 * 53 * If we try to lower the MPS size for EP0, then no transfers work properly 54 * for EP0, and the system will fail basic enumeration. As no cause for this 55 * has currently been found, we cannot support any large IN transfers for 56 * EP0. 57 */ 58#define EP0_MPS_LIMIT 64 59 60struct s3c_hsotg; 61struct s3c_hsotg_req; 62 63/** 64 * struct s3c_hsotg_ep - driver endpoint definition. 65 * @ep: The gadget layer representation of the endpoint. 66 * @name: The driver generated name for the endpoint. 67 * @queue: Queue of requests for this endpoint. 68 * @parent: Reference back to the parent device structure. 69 * @req: The current request that the endpoint is processing. This is 70 * used to indicate an request has been loaded onto the endpoint 71 * and has yet to be completed (maybe due to data move, or simply 72 * awaiting an ack from the core all the data has been completed). 73 * @debugfs: File entry for debugfs file for this endpoint. 74 * @lock: State lock to protect contents of endpoint. 75 * @dir_in: Set to true if this endpoint is of the IN direction, which 76 * means that it is sending data to the Host. 77 * @index: The index for the endpoint registers. 78 * @name: The name array passed to the USB core. 79 * @halted: Set if the endpoint has been halted. 80 * @periodic: Set if this is a periodic ep, such as Interrupt 81 * @sent_zlp: Set if we've sent a zero-length packet. 82 * @total_data: The total number of data bytes done. 83 * @fifo_size: The size of the FIFO (for periodic IN endpoints) 84 * @fifo_load: The amount of data loaded into the FIFO (periodic IN) 85 * @last_load: The offset of data for the last start of request. 86 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN 87 * 88 * This is the driver's state for each registered enpoint, allowing it 89 * to keep track of transactions that need doing. Each endpoint has a 90 * lock to protect the state, to try and avoid using an overall lock 91 * for the host controller as much as possible. 92 * 93 * For periodic IN endpoints, we have fifo_size and fifo_load to try 94 * and keep track of the amount of data in the periodic FIFO for each 95 * of these as we don't have a status register that tells us how much 96 * is in each of them. (note, this may actually be useless information 97 * as in shared-fifo mode periodic in acts like a single-frame packet 98 * buffer than a fifo) 99 */ 100struct s3c_hsotg_ep { 101 struct usb_ep ep; 102 struct list_head queue; 103 struct s3c_hsotg *parent; 104 struct s3c_hsotg_req *req; 105 struct dentry *debugfs; 106 107 spinlock_t lock; 108 109 unsigned long total_data; 110 unsigned int size_loaded; 111 unsigned int last_load; 112 unsigned int fifo_load; 113 unsigned short fifo_size; 114 115 unsigned char dir_in; 116 unsigned char index; 117 118 unsigned int halted:1; 119 unsigned int periodic:1; 120 unsigned int sent_zlp:1; 121 122 char name[10]; 123}; 124 125#define S3C_HSOTG_EPS (8+1) /* limit to 9 for the moment */ 126 127/** 128 * struct s3c_hsotg - driver state. 129 * @dev: The parent device supplied to the probe function 130 * @driver: USB gadget driver 131 * @plat: The platform specific configuration data. 132 * @regs: The memory area mapped for accessing registers. 133 * @regs_res: The resource that was allocated when claiming register space. 134 * @irq: The IRQ number we are using 135 * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos. 136 * @debug_root: root directrory for debugfs. 137 * @debug_file: main status file for debugfs. 138 * @debug_fifo: FIFO status file for debugfs. 139 * @ep0_reply: Request used for ep0 reply. 140 * @ep0_buff: Buffer for EP0 reply data, if needed. 141 * @ctrl_buff: Buffer for EP0 control requests. 142 * @ctrl_req: Request for EP0 control packets. 143 * @eps: The endpoints being supplied to the gadget framework 144 */ 145struct s3c_hsotg { 146 struct device *dev; 147 struct usb_gadget_driver *driver; 148 struct s3c_hsotg_plat *plat; 149 150 void __iomem *regs; 151 struct resource *regs_res; 152 int irq; 153 struct clk *clk; 154 155 unsigned int dedicated_fifos:1; 156 157 struct dentry *debug_root; 158 struct dentry *debug_file; 159 struct dentry *debug_fifo; 160 161 struct usb_request *ep0_reply; 162 struct usb_request *ctrl_req; 163 u8 ep0_buff[8]; 164 u8 ctrl_buff[8]; 165 166 struct usb_gadget gadget; 167 struct s3c_hsotg_ep eps[]; 168}; 169 170/** 171 * struct s3c_hsotg_req - data transfer request 172 * @req: The USB gadget request 173 * @queue: The list of requests for the endpoint this is queued for. 174 * @in_progress: Has already had size/packets written to core 175 * @mapped: DMA buffer for this request has been mapped via dma_map_single(). 176 */ 177struct s3c_hsotg_req { 178 struct usb_request req; 179 struct list_head queue; 180 unsigned char in_progress; 181 unsigned char mapped; 182}; 183 184/* conversion functions */ 185static inline struct s3c_hsotg_req *our_req(struct usb_request *req) 186{ 187 return container_of(req, struct s3c_hsotg_req, req); 188} 189 190static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep) 191{ 192 return container_of(ep, struct s3c_hsotg_ep, ep); 193} 194 195static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget) 196{ 197 return container_of(gadget, struct s3c_hsotg, gadget); 198} 199 200static inline void __orr32(void __iomem *ptr, u32 val) 201{ 202 writel(readl(ptr) | val, ptr); 203} 204 205static inline void __bic32(void __iomem *ptr, u32 val) 206{ 207 writel(readl(ptr) & ~val, ptr); 208} 209 210/* forward decleration of functions */ 211static void s3c_hsotg_dump(struct s3c_hsotg *hsotg); 212 213/** 214 * using_dma - return the DMA status of the driver. 215 * @hsotg: The driver state. 216 * 217 * Return true if we're using DMA. 218 * 219 * Currently, we have the DMA support code worked into everywhere 220 * that needs it, but the AMBA DMA implementation in the hardware can 221 * only DMA from 32bit aligned addresses. This means that gadgets such 222 * as the CDC Ethernet cannot work as they often pass packets which are 223 * not 32bit aligned. 224 * 225 * Unfortunately the choice to use DMA or not is global to the controller 226 * and seems to be only settable when the controller is being put through 227 * a core reset. This means we either need to fix the gadgets to take 228 * account of DMA alignment, or add bounce buffers (yuerk). 229 * 230 * Until this issue is sorted out, we always return 'false'. 231 */ 232static inline bool using_dma(struct s3c_hsotg *hsotg) 233{ 234 return false; /* support is not complete */ 235} 236 237/** 238 * s3c_hsotg_en_gsint - enable one or more of the general interrupt 239 * @hsotg: The device state 240 * @ints: A bitmask of the interrupts to enable 241 */ 242static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints) 243{ 244 u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK); 245 u32 new_gsintmsk; 246 247 new_gsintmsk = gsintmsk | ints; 248 249 if (new_gsintmsk != gsintmsk) { 250 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); 251 writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK); 252 } 253} 254 255/** 256 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt 257 * @hsotg: The device state 258 * @ints: A bitmask of the interrupts to enable 259 */ 260static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints) 261{ 262 u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK); 263 u32 new_gsintmsk; 264 265 new_gsintmsk = gsintmsk & ~ints; 266 267 if (new_gsintmsk != gsintmsk) 268 writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK); 269} 270 271/** 272 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq 273 * @hsotg: The device state 274 * @ep: The endpoint index 275 * @dir_in: True if direction is in. 276 * @en: The enable value, true to enable 277 * 278 * Set or clear the mask for an individual endpoint's interrupt 279 * request. 280 */ 281static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg, 282 unsigned int ep, unsigned int dir_in, 283 unsigned int en) 284{ 285 unsigned long flags; 286 u32 bit = 1 << ep; 287 u32 daint; 288 289 if (!dir_in) 290 bit <<= 16; 291 292 local_irq_save(flags); 293 daint = readl(hsotg->regs + S3C_DAINTMSK); 294 if (en) 295 daint |= bit; 296 else 297 daint &= ~bit; 298 writel(daint, hsotg->regs + S3C_DAINTMSK); 299 local_irq_restore(flags); 300} 301 302/** 303 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs 304 * @hsotg: The device instance. 305 */ 306static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg) 307{ 308 unsigned int ep; 309 unsigned int addr; 310 unsigned int size; 311 int timeout; 312 u32 val; 313 314 /* the ryu 2.6.24 release ahs 315 writel(0x1C0, hsotg->regs + S3C_GRXFSIZ); 316 writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) | 317 S3C_GNPTXFSIZ_NPTxFDep(0x1C0), 318 hsotg->regs + S3C_GNPTXFSIZ); 319 */ 320 321 /* set FIFO sizes to 2048/1024 */ 322 323 writel(2048, hsotg->regs + S3C_GRXFSIZ); 324 writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) | 325 S3C_GNPTXFSIZ_NPTxFDep(1024), 326 hsotg->regs + S3C_GNPTXFSIZ); 327 328 /* arange all the rest of the TX FIFOs, as some versions of this 329 * block have overlapping default addresses. This also ensures 330 * that if the settings have been changed, then they are set to 331 * known values. */ 332 333 /* start at the end of the GNPTXFSIZ, rounded up */ 334 addr = 2048 + 1024; 335 size = 768; 336 337 /* currently we allocate TX FIFOs for all possible endpoints, 338 * and assume that they are all the same size. */ 339 340 for (ep = 0; ep <= 15; ep++) { 341 val = addr; 342 val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT; 343 addr += size; 344 345 writel(val, hsotg->regs + S3C_DPTXFSIZn(ep)); 346 } 347 348 /* according to p428 of the design guide, we need to ensure that 349 * all fifos are flushed before continuing */ 350 351 writel(S3C_GRSTCTL_TxFNum(0x10) | S3C_GRSTCTL_TxFFlsh | 352 S3C_GRSTCTL_RxFFlsh, hsotg->regs + S3C_GRSTCTL); 353 354 /* wait until the fifos are both flushed */ 355 timeout = 100; 356 while (1) { 357 val = readl(hsotg->regs + S3C_GRSTCTL); 358 359 if ((val & (S3C_GRSTCTL_TxFFlsh | S3C_GRSTCTL_RxFFlsh)) == 0) 360 break; 361 362 if (--timeout == 0) { 363 dev_err(hsotg->dev, 364 "%s: timeout flushing fifos (GRSTCTL=%08x)\n", 365 __func__, val); 366 } 367 368 udelay(1); 369 } 370 371 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); 372} 373 374/** 375 * @ep: USB endpoint to allocate request for. 376 * @flags: Allocation flags 377 * 378 * Allocate a new USB request structure appropriate for the specified endpoint 379 */ 380static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, 381 gfp_t flags) 382{ 383 struct s3c_hsotg_req *req; 384 385 req = kzalloc(sizeof(struct s3c_hsotg_req), flags); 386 if (!req) 387 return NULL; 388 389 INIT_LIST_HEAD(&req->queue); 390 391 req->req.dma = DMA_ADDR_INVALID; 392 return &req->req; 393} 394 395/** 396 * is_ep_periodic - return true if the endpoint is in periodic mode. 397 * @hs_ep: The endpoint to query. 398 * 399 * Returns true if the endpoint is in periodic mode, meaning it is being 400 * used for an Interrupt or ISO transfer. 401 */ 402static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep) 403{ 404 return hs_ep->periodic; 405} 406 407/** 408 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request 409 * @hsotg: The device state. 410 * @hs_ep: The endpoint for the request 411 * @hs_req: The request being processed. 412 * 413 * This is the reverse of s3c_hsotg_map_dma(), called for the completion 414 * of a request to ensure the buffer is ready for access by the caller. 415*/ 416static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, 417 struct s3c_hsotg_ep *hs_ep, 418 struct s3c_hsotg_req *hs_req) 419{ 420 struct usb_request *req = &hs_req->req; 421 enum dma_data_direction dir; 422 423 dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 424 425 /* ignore this if we're not moving any data */ 426 if (hs_req->req.length == 0) 427 return; 428 429 if (hs_req->mapped) { 430 /* we mapped this, so unmap and remove the dma */ 431 432 dma_unmap_single(hsotg->dev, req->dma, req->length, dir); 433 434 req->dma = DMA_ADDR_INVALID; 435 hs_req->mapped = 0; 436 } else { 437 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir); 438 } 439} 440 441/** 442 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO 443 * @hsotg: The controller state. 444 * @hs_ep: The endpoint we're going to write for. 445 * @hs_req: The request to write data for. 446 * 447 * This is called when the TxFIFO has some space in it to hold a new 448 * transmission and we have something to give it. The actual setup of 449 * the data size is done elsewhere, so all we have to do is to actually 450 * write the data. 451 * 452 * The return value is zero if there is more space (or nothing was done) 453 * otherwise -ENOSPC is returned if the FIFO space was used up. 454 * 455 * This routine is only needed for PIO 456*/ 457static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg, 458 struct s3c_hsotg_ep *hs_ep, 459 struct s3c_hsotg_req *hs_req) 460{ 461 bool periodic = is_ep_periodic(hs_ep); 462 u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS); 463 int buf_pos = hs_req->req.actual; 464 int to_write = hs_ep->size_loaded; 465 void *data; 466 int can_write; 467 int pkt_round; 468 469 to_write -= (buf_pos - hs_ep->last_load); 470 471 /* if there's nothing to write, get out early */ 472 if (to_write == 0) 473 return 0; 474 475 if (periodic && !hsotg->dedicated_fifos) { 476 u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index)); 477 int size_left; 478 int size_done; 479 480 /* work out how much data was loaded so we can calculate 481 * how much data is left in the fifo. */ 482 483 size_left = S3C_DxEPTSIZ_XferSize_GET(epsize); 484 485 /* if shared fifo, we cannot write anything until the 486 * previous data has been completely sent. 487 */ 488 if (hs_ep->fifo_load != 0) { 489 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp); 490 return -ENOSPC; 491 } 492 493 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", 494 __func__, size_left, 495 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); 496 497 /* how much of the data has moved */ 498 size_done = hs_ep->size_loaded - size_left; 499 500 /* how much data is left in the fifo */ 501 can_write = hs_ep->fifo_load - size_done; 502 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", 503 __func__, can_write); 504 505 can_write = hs_ep->fifo_size - can_write; 506 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", 507 __func__, can_write); 508 509 if (can_write <= 0) { 510 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp); 511 return -ENOSPC; 512 } 513 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) { 514 can_write = readl(hsotg->regs + S3C_DTXFSTS(hs_ep->index)); 515 516 can_write &= 0xffff; 517 can_write *= 4; 518 } else { 519 if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) { 520 dev_dbg(hsotg->dev, 521 "%s: no queue slots available (0x%08x)\n", 522 __func__, gnptxsts); 523 524 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp); 525 return -ENOSPC; 526 } 527 528 can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts); 529 can_write *= 4; /* fifo size is in 32bit quantities. */ 530 } 531 532 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n", 533 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket); 534 535 /* limit to 512 bytes of data, it seems at least on the non-periodic 536 * FIFO, requests of >512 cause the endpoint to get stuck with a 537 * fragment of the end of the transfer in it. 538 */ 539 if (can_write > 512) 540 can_write = 512; 541 542 /* limit the write to one max-packet size worth of data, but allow 543 * the transfer to return that it did not run out of fifo space 544 * doing it. */ 545 if (to_write > hs_ep->ep.maxpacket) { 546 to_write = hs_ep->ep.maxpacket; 547 548 s3c_hsotg_en_gsint(hsotg, 549 periodic ? S3C_GINTSTS_PTxFEmp : 550 S3C_GINTSTS_NPTxFEmp); 551 } 552 553 /* see if we can write data */ 554 555 if (to_write > can_write) { 556 to_write = can_write; 557 pkt_round = to_write % hs_ep->ep.maxpacket; 558 559 /* Not sure, but we probably shouldn't be writing partial 560 * packets into the FIFO, so round the write down to an 561 * exact number of packets. 562 * 563 * Note, we do not currently check to see if we can ever 564 * write a full packet or not to the FIFO. 565 */ 566 567 if (pkt_round) 568 to_write -= pkt_round; 569 570 /* enable correct FIFO interrupt to alert us when there 571 * is more room left. */ 572 573 s3c_hsotg_en_gsint(hsotg, 574 periodic ? S3C_GINTSTS_PTxFEmp : 575 S3C_GINTSTS_NPTxFEmp); 576 } 577 578 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 579 to_write, hs_req->req.length, can_write, buf_pos); 580 581 if (to_write <= 0) 582 return -ENOSPC; 583 584 hs_req->req.actual = buf_pos + to_write; 585 hs_ep->total_data += to_write; 586 587 if (periodic) 588 hs_ep->fifo_load += to_write; 589 590 to_write = DIV_ROUND_UP(to_write, 4); 591 data = hs_req->req.buf + buf_pos; 592 593 writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write); 594 595 return (to_write >= can_write) ? -ENOSPC : 0; 596} 597 598/** 599 * get_ep_limit - get the maximum data legnth for this endpoint 600 * @hs_ep: The endpoint 601 * 602 * Return the maximum data that can be queued in one go on a given endpoint 603 * so that transfers that are too long can be split. 604 */ 605static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep) 606{ 607 int index = hs_ep->index; 608 unsigned maxsize; 609 unsigned maxpkt; 610 611 if (index != 0) { 612 maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1; 613 maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1; 614 } else { 615 maxsize = 64+64; 616 if (hs_ep->dir_in) { 617 maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1; 618 } else { 619 maxpkt = 2; 620 } 621 } 622 623 /* we made the constant loading easier above by using +1 */ 624 maxpkt--; 625 maxsize--; 626 627 /* constrain by packet count if maxpkts*pktsize is greater 628 * than the length register size. */ 629 630 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) 631 maxsize = maxpkt * hs_ep->ep.maxpacket; 632 633 return maxsize; 634} 635 636/** 637 * s3c_hsotg_start_req - start a USB request from an endpoint's queue 638 * @hsotg: The controller state. 639 * @hs_ep: The endpoint to process a request for 640 * @hs_req: The request to start. 641 * @continuing: True if we are doing more for the current request. 642 * 643 * Start the given request running by setting the endpoint registers 644 * appropriately, and writing any data to the FIFOs. 645 */ 646static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg, 647 struct s3c_hsotg_ep *hs_ep, 648 struct s3c_hsotg_req *hs_req, 649 bool continuing) 650{ 651 struct usb_request *ureq = &hs_req->req; 652 int index = hs_ep->index; 653 int dir_in = hs_ep->dir_in; 654 u32 epctrl_reg; 655 u32 epsize_reg; 656 u32 epsize; 657 u32 ctrl; 658 unsigned length; 659 unsigned packets; 660 unsigned maxreq; 661 662 if (index != 0) { 663 if (hs_ep->req && !continuing) { 664 dev_err(hsotg->dev, "%s: active request\n", __func__); 665 WARN_ON(1); 666 return; 667 } else if (hs_ep->req != hs_req && continuing) { 668 dev_err(hsotg->dev, 669 "%s: continue different req\n", __func__); 670 WARN_ON(1); 671 return; 672 } 673 } 674 675 epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index); 676 epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index); 677 678 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", 679 __func__, readl(hsotg->regs + epctrl_reg), index, 680 hs_ep->dir_in ? "in" : "out"); 681 682 length = ureq->length - ureq->actual; 683 684 if (0) 685 dev_dbg(hsotg->dev, 686 "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n", 687 ureq->buf, length, ureq->dma, 688 ureq->no_interrupt, ureq->zero, ureq->short_not_ok); 689 690 maxreq = get_ep_limit(hs_ep); 691 if (length > maxreq) { 692 int round = maxreq % hs_ep->ep.maxpacket; 693 694 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", 695 __func__, length, maxreq, round); 696 697 /* round down to multiple of packets */ 698 if (round) 699 maxreq -= round; 700 701 length = maxreq; 702 } 703 704 if (length) 705 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); 706 else 707 packets = 1; /* send one packet if length is zero. */ 708 709 if (dir_in && index != 0) 710 epsize = S3C_DxEPTSIZ_MC(1); 711 else 712 epsize = 0; 713 714 if (index != 0 && ureq->zero) { 715 /* test for the packets being exactly right for the 716 * transfer */ 717 718 if (length == (packets * hs_ep->ep.maxpacket)) 719 packets++; 720 } 721 722 epsize |= S3C_DxEPTSIZ_PktCnt(packets); 723 epsize |= S3C_DxEPTSIZ_XferSize(length); 724 725 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", 726 __func__, packets, length, ureq->length, epsize, epsize_reg); 727 728 /* store the request as the current one we're doing */ 729 hs_ep->req = hs_req; 730 731 /* write size / packets */ 732 writel(epsize, hsotg->regs + epsize_reg); 733 734 ctrl = readl(hsotg->regs + epctrl_reg); 735 736 if (ctrl & S3C_DxEPCTL_Stall) { 737 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); 738 739 /* not sure what we can do here, if it is EP0 then we should 740 * get this cleared once the endpoint has transmitted the 741 * STALL packet, otherwise it needs to be cleared by the 742 * host. 743 */ 744 } 745 746 if (using_dma(hsotg)) { 747 unsigned int dma_reg; 748 749 /* write DMA address to control register, buffer already 750 * synced by s3c_hsotg_ep_queue(). */ 751 752 dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index); 753 writel(ureq->dma, hsotg->regs + dma_reg); 754 755 dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n", 756 __func__, ureq->dma, dma_reg); 757 } 758 759 ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */ 760 ctrl |= S3C_DxEPCTL_USBActEp; 761 ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */ 762 763 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 764 writel(ctrl, hsotg->regs + epctrl_reg); 765 766 /* set these, it seems that DMA support increments past the end 767 * of the packet buffer so we need to calculate the length from 768 * this information. */ 769 hs_ep->size_loaded = length; 770 hs_ep->last_load = ureq->actual; 771 772 if (dir_in && !using_dma(hsotg)) { 773 /* set these anyway, we may need them for non-periodic in */ 774 hs_ep->fifo_load = 0; 775 776 s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req); 777 } 778 779 /* clear the INTknTXFEmpMsk when we start request, more as a aide 780 * to debugging to see what is going on. */ 781 if (dir_in) 782 writel(S3C_DIEPMSK_INTknTXFEmpMsk, 783 hsotg->regs + S3C_DIEPINT(index)); 784 785 /* Note, trying to clear the NAK here causes problems with transmit 786 * on the S3C6400 ending up with the TXFIFO becomming full. */ 787 788 /* check ep is enabled */ 789 if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna)) 790 dev_warn(hsotg->dev, 791 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n", 792 index, readl(hsotg->regs + epctrl_reg)); 793 794 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", 795 __func__, readl(hsotg->regs + epctrl_reg)); 796} 797 798/** 799 * s3c_hsotg_map_dma - map the DMA memory being used for the request 800 * @hsotg: The device state. 801 * @hs_ep: The endpoint the request is on. 802 * @req: The request being processed. 803 * 804 * We've been asked to queue a request, so ensure that the memory buffer 805 * is correctly setup for DMA. If we've been passed an extant DMA address 806 * then ensure the buffer has been synced to memory. If our buffer has no 807 * DMA memory, then we map the memory and mark our request to allow us to 808 * cleanup on completion. 809*/ 810static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg, 811 struct s3c_hsotg_ep *hs_ep, 812 struct usb_request *req) 813{ 814 enum dma_data_direction dir; 815 struct s3c_hsotg_req *hs_req = our_req(req); 816 817 dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 818 819 /* if the length is zero, ignore the DMA data */ 820 if (hs_req->req.length == 0) 821 return 0; 822 823 if (req->dma == DMA_ADDR_INVALID) { 824 dma_addr_t dma; 825 826 dma = dma_map_single(hsotg->dev, req->buf, req->length, dir); 827 828 if (unlikely(dma_mapping_error(hsotg->dev, dma))) 829 goto dma_error; 830 831 if (dma & 3) { 832 dev_err(hsotg->dev, "%s: unaligned dma buffer\n", 833 __func__); 834 835 dma_unmap_single(hsotg->dev, dma, req->length, dir); 836 return -EINVAL; 837 } 838 839 hs_req->mapped = 1; 840 req->dma = dma; 841 } else { 842 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir); 843 hs_req->mapped = 0; 844 } 845 846 return 0; 847 848dma_error: 849 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", 850 __func__, req->buf, req->length); 851 852 return -EIO; 853} 854 855static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 856 gfp_t gfp_flags) 857{ 858 struct s3c_hsotg_req *hs_req = our_req(req); 859 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 860 struct s3c_hsotg *hs = hs_ep->parent; 861 unsigned long irqflags; 862 bool first; 863 864 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 865 ep->name, req, req->length, req->buf, req->no_interrupt, 866 req->zero, req->short_not_ok); 867 868 /* initialise status of the request */ 869 INIT_LIST_HEAD(&hs_req->queue); 870 req->actual = 0; 871 req->status = -EINPROGRESS; 872 873 /* if we're using DMA, sync the buffers as necessary */ 874 if (using_dma(hs)) { 875 int ret = s3c_hsotg_map_dma(hs, hs_ep, req); 876 if (ret) 877 return ret; 878 } 879 880 spin_lock_irqsave(&hs_ep->lock, irqflags); 881 882 first = list_empty(&hs_ep->queue); 883 list_add_tail(&hs_req->queue, &hs_ep->queue); 884 885 if (first) 886 s3c_hsotg_start_req(hs, hs_ep, hs_req, false); 887 888 spin_unlock_irqrestore(&hs_ep->lock, irqflags); 889 890 return 0; 891} 892 893static void s3c_hsotg_ep_free_request(struct usb_ep *ep, 894 struct usb_request *req) 895{ 896 struct s3c_hsotg_req *hs_req = our_req(req); 897 898 kfree(hs_req); 899} 900 901/** 902 * s3c_hsotg_complete_oursetup - setup completion callback 903 * @ep: The endpoint the request was on. 904 * @req: The request completed. 905 * 906 * Called on completion of any requests the driver itself 907 * submitted that need cleaning up. 908 */ 909static void s3c_hsotg_complete_oursetup(struct usb_ep *ep, 910 struct usb_request *req) 911{ 912 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 913 struct s3c_hsotg *hsotg = hs_ep->parent; 914 915 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); 916 917 s3c_hsotg_ep_free_request(ep, req); 918} 919 920/** 921 * ep_from_windex - convert control wIndex value to endpoint 922 * @hsotg: The driver state. 923 * @windex: The control request wIndex field (in host order). 924 * 925 * Convert the given wIndex into a pointer to an driver endpoint 926 * structure, or return NULL if it is not a valid endpoint. 927*/ 928static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg, 929 u32 windex) 930{ 931 struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F]; 932 int dir = (windex & USB_DIR_IN) ? 1 : 0; 933 int idx = windex & 0x7F; 934 935 if (windex >= 0x100) 936 return NULL; 937 938 if (idx > S3C_HSOTG_EPS) 939 return NULL; 940 941 if (idx && ep->dir_in != dir) 942 return NULL; 943 944 return ep; 945} 946 947/** 948 * s3c_hsotg_send_reply - send reply to control request 949 * @hsotg: The device state 950 * @ep: Endpoint 0 951 * @buff: Buffer for request 952 * @length: Length of reply. 953 * 954 * Create a request and queue it on the given endpoint. This is useful as 955 * an internal method of sending replies to certain control requests, etc. 956 */ 957static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg, 958 struct s3c_hsotg_ep *ep, 959 void *buff, 960 int length) 961{ 962 struct usb_request *req; 963 int ret; 964 965 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); 966 967 req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); 968 hsotg->ep0_reply = req; 969 if (!req) { 970 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); 971 return -ENOMEM; 972 } 973 974 req->buf = hsotg->ep0_buff; 975 req->length = length; 976 req->zero = 1; /* always do zero-length final transfer */ 977 req->complete = s3c_hsotg_complete_oursetup; 978 979 if (length) 980 memcpy(req->buf, buff, length); 981 else 982 ep->sent_zlp = 1; 983 984 ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); 985 if (ret) { 986 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); 987 return ret; 988 } 989 990 return 0; 991} 992 993/** 994 * s3c_hsotg_process_req_status - process request GET_STATUS 995 * @hsotg: The device state 996 * @ctrl: USB control request 997 */ 998static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg, 999 struct usb_ctrlrequest *ctrl) 1000{ 1001 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; 1002 struct s3c_hsotg_ep *ep; 1003 __le16 reply; 1004 int ret; 1005 1006 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); 1007 1008 if (!ep0->dir_in) { 1009 dev_warn(hsotg->dev, "%s: direction out?\n", __func__); 1010 return -EINVAL; 1011 } 1012 1013 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1014 case USB_RECIP_DEVICE: 1015 reply = cpu_to_le16(0); /* bit 0 => self powered, 1016 * bit 1 => remote wakeup */ 1017 break; 1018 1019 case USB_RECIP_INTERFACE: 1020 /* currently, the data result should be zero */ 1021 reply = cpu_to_le16(0); 1022 break; 1023 1024 case USB_RECIP_ENDPOINT: 1025 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1026 if (!ep) 1027 return -ENOENT; 1028 1029 reply = cpu_to_le16(ep->halted ? 1 : 0); 1030 break; 1031 1032 default: 1033 return 0; 1034 } 1035 1036 if (le16_to_cpu(ctrl->wLength) != 2) 1037 return -EINVAL; 1038 1039 ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2); 1040 if (ret) { 1041 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); 1042 return ret; 1043 } 1044 1045 return 1; 1046} 1047 1048static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value); 1049 1050/** 1051 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE 1052 * @hsotg: The device state 1053 * @ctrl: USB control request 1054 */ 1055static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg, 1056 struct usb_ctrlrequest *ctrl) 1057{ 1058 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 1059 struct s3c_hsotg_ep *ep; 1060 1061 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", 1062 __func__, set ? "SET" : "CLEAR"); 1063 1064 if (ctrl->bRequestType == USB_RECIP_ENDPOINT) { 1065 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1066 if (!ep) { 1067 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", 1068 __func__, le16_to_cpu(ctrl->wIndex)); 1069 return -ENOENT; 1070 } 1071 1072 switch (le16_to_cpu(ctrl->wValue)) { 1073 case USB_ENDPOINT_HALT: 1074 s3c_hsotg_ep_sethalt(&ep->ep, set); 1075 break; 1076 1077 default: 1078 return -ENOENT; 1079 } 1080 } else 1081 return -ENOENT; /* currently only deal with endpoint */ 1082 1083 return 1; 1084} 1085 1086/** 1087 * s3c_hsotg_process_control - process a control request 1088 * @hsotg: The device state 1089 * @ctrl: The control request received 1090 * 1091 * The controller has received the SETUP phase of a control request, and 1092 * needs to work out what to do next (and whether to pass it on to the 1093 * gadget driver). 1094 */ 1095static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, 1096 struct usb_ctrlrequest *ctrl) 1097{ 1098 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; 1099 int ret = 0; 1100 u32 dcfg; 1101 1102 ep0->sent_zlp = 0; 1103 1104 dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n", 1105 ctrl->bRequest, ctrl->bRequestType, 1106 ctrl->wValue, ctrl->wLength); 1107 1108 /* record the direction of the request, for later use when enquing 1109 * packets onto EP0. */ 1110 1111 ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0; 1112 dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in); 1113 1114 /* if we've no data with this request, then the last part of the 1115 * transaction is going to implicitly be IN. */ 1116 if (ctrl->wLength == 0) 1117 ep0->dir_in = 1; 1118 1119 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1120 switch (ctrl->bRequest) { 1121 case USB_REQ_SET_ADDRESS: 1122 dcfg = readl(hsotg->regs + S3C_DCFG); 1123 dcfg &= ~S3C_DCFG_DevAddr_MASK; 1124 dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT; 1125 writel(dcfg, hsotg->regs + S3C_DCFG); 1126 1127 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); 1128 1129 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0); 1130 return; 1131 1132 case USB_REQ_GET_STATUS: 1133 ret = s3c_hsotg_process_req_status(hsotg, ctrl); 1134 break; 1135 1136 case USB_REQ_CLEAR_FEATURE: 1137 case USB_REQ_SET_FEATURE: 1138 ret = s3c_hsotg_process_req_feature(hsotg, ctrl); 1139 break; 1140 } 1141 } 1142 1143 /* as a fallback, try delivering it to the driver to deal with */ 1144 1145 if (ret == 0 && hsotg->driver) { 1146 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1147 if (ret < 0) 1148 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1149 } 1150 1151 if (ret > 0) { 1152 if (!ep0->dir_in) { 1153 /* need to generate zlp in reply or take data */ 1154 /* todo - deal with any data we might be sent? */ 1155 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0); 1156 } 1157 } 1158 1159 /* the request is either unhandlable, or is not formatted correctly 1160 * so respond with a STALL for the status stage to indicate failure. 1161 */ 1162 1163 if (ret < 0) { 1164 u32 reg; 1165 u32 ctrl; 1166 1167 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); 1168 reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0; 1169 1170 /* S3C_DxEPCTL_Stall will be cleared by EP once it has 1171 * taken effect, so no need to clear later. */ 1172 1173 ctrl = readl(hsotg->regs + reg); 1174 ctrl |= S3C_DxEPCTL_Stall; 1175 ctrl |= S3C_DxEPCTL_CNAK; 1176 writel(ctrl, hsotg->regs + reg); 1177 1178 dev_dbg(hsotg->dev, 1179 "writen DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n", 1180 ctrl, reg, readl(hsotg->regs + reg)); 1181 1182 /* don't belive we need to anything more to get the EP 1183 * to reply with a STALL packet */ 1184 } 1185} 1186 1187static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); 1188 1189/** 1190 * s3c_hsotg_complete_setup - completion of a setup transfer 1191 * @ep: The endpoint the request was on. 1192 * @req: The request completed. 1193 * 1194 * Called on completion of any requests the driver itself submitted for 1195 * EP0 setup packets 1196 */ 1197static void s3c_hsotg_complete_setup(struct usb_ep *ep, 1198 struct usb_request *req) 1199{ 1200 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 1201 struct s3c_hsotg *hsotg = hs_ep->parent; 1202 1203 if (req->status < 0) { 1204 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); 1205 return; 1206 } 1207 1208 if (req->actual == 0) 1209 s3c_hsotg_enqueue_setup(hsotg); 1210 else 1211 s3c_hsotg_process_control(hsotg, req->buf); 1212} 1213 1214/** 1215 * s3c_hsotg_enqueue_setup - start a request for EP0 packets 1216 * @hsotg: The device state. 1217 * 1218 * Enqueue a request on EP0 if necessary to received any SETUP packets 1219 * received from the host. 1220 */ 1221static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg) 1222{ 1223 struct usb_request *req = hsotg->ctrl_req; 1224 struct s3c_hsotg_req *hs_req = our_req(req); 1225 int ret; 1226 1227 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); 1228 1229 req->zero = 0; 1230 req->length = 8; 1231 req->buf = hsotg->ctrl_buff; 1232 req->complete = s3c_hsotg_complete_setup; 1233 1234 if (!list_empty(&hs_req->queue)) { 1235 dev_dbg(hsotg->dev, "%s already queued???\n", __func__); 1236 return; 1237 } 1238 1239 hsotg->eps[0].dir_in = 0; 1240 1241 ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC); 1242 if (ret < 0) { 1243 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); 1244 /* Don't think there's much we can do other than watch the 1245 * driver fail. */ 1246 } 1247} 1248 1249/** 1250 * get_ep_head - return the first request on the endpoint 1251 * @hs_ep: The controller endpoint to get 1252 * 1253 * Get the first request on the endpoint. 1254*/ 1255static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep) 1256{ 1257 if (list_empty(&hs_ep->queue)) 1258 return NULL; 1259 1260 return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue); 1261} 1262 1263/** 1264 * s3c_hsotg_complete_request - complete a request given to us 1265 * @hsotg: The device state. 1266 * @hs_ep: The endpoint the request was on. 1267 * @hs_req: The request to complete. 1268 * @result: The result code (0 => Ok, otherwise errno) 1269 * 1270 * The given request has finished, so call the necessary completion 1271 * if it has one and then look to see if we can start a new request 1272 * on the endpoint. 1273 * 1274 * Note, expects the ep to already be locked as appropriate. 1275*/ 1276static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg, 1277 struct s3c_hsotg_ep *hs_ep, 1278 struct s3c_hsotg_req *hs_req, 1279 int result) 1280{ 1281 bool restart; 1282 1283 if (!hs_req) { 1284 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 1285 return; 1286 } 1287 1288 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", 1289 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); 1290 1291 /* only replace the status if we've not already set an error 1292 * from a previous transaction */ 1293 1294 if (hs_req->req.status == -EINPROGRESS) 1295 hs_req->req.status = result; 1296 1297 hs_ep->req = NULL; 1298 list_del_init(&hs_req->queue); 1299 1300 if (using_dma(hsotg)) 1301 s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req); 1302 1303 /* call the complete request with the locks off, just in case the 1304 * request tries to queue more work for this endpoint. */ 1305 1306 if (hs_req->req.complete) { 1307 spin_unlock(&hs_ep->lock); 1308 hs_req->req.complete(&hs_ep->ep, &hs_req->req); 1309 spin_lock(&hs_ep->lock); 1310 } 1311 1312 /* Look to see if there is anything else to do. Note, the completion 1313 * of the previous request may have caused a new request to be started 1314 * so be careful when doing this. */ 1315 1316 if (!hs_ep->req && result >= 0) { 1317 restart = !list_empty(&hs_ep->queue); 1318 if (restart) { 1319 hs_req = get_ep_head(hs_ep); 1320 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false); 1321 } 1322 } 1323} 1324 1325/** 1326 * s3c_hsotg_complete_request_lock - complete a request given to us (locked) 1327 * @hsotg: The device state. 1328 * @hs_ep: The endpoint the request was on. 1329 * @hs_req: The request to complete. 1330 * @result: The result code (0 => Ok, otherwise errno) 1331 * 1332 * See s3c_hsotg_complete_request(), but called with the endpoint's 1333 * lock held. 1334*/ 1335static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg, 1336 struct s3c_hsotg_ep *hs_ep, 1337 struct s3c_hsotg_req *hs_req, 1338 int result) 1339{ 1340 unsigned long flags; 1341 1342 spin_lock_irqsave(&hs_ep->lock, flags); 1343 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 1344 spin_unlock_irqrestore(&hs_ep->lock, flags); 1345} 1346 1347/** 1348 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint 1349 * @hsotg: The device state. 1350 * @ep_idx: The endpoint index for the data 1351 * @size: The size of data in the fifo, in bytes 1352 * 1353 * The FIFO status shows there is data to read from the FIFO for a given 1354 * endpoint, so sort out whether we need to read the data into a request 1355 * that has been made for that endpoint. 1356 */ 1357static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size) 1358{ 1359 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx]; 1360 struct s3c_hsotg_req *hs_req = hs_ep->req; 1361 void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx); 1362 int to_read; 1363 int max_req; 1364 int read_ptr; 1365 1366 if (!hs_req) { 1367 u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx)); 1368 int ptr; 1369 1370 dev_warn(hsotg->dev, 1371 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n", 1372 __func__, size, ep_idx, epctl); 1373 1374 /* dump the data from the FIFO, we've nothing we can do */ 1375 for (ptr = 0; ptr < size; ptr += 4) 1376 (void)readl(fifo); 1377 1378 return; 1379 } 1380 1381 spin_lock(&hs_ep->lock); 1382 1383 to_read = size; 1384 read_ptr = hs_req->req.actual; 1385 max_req = hs_req->req.length - read_ptr; 1386 1387 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", 1388 __func__, to_read, max_req, read_ptr, hs_req->req.length); 1389 1390 if (to_read > max_req) { 1391 /* more data appeared than we where willing 1392 * to deal with in this request. 1393 */ 1394 1395 /* currently we don't deal this */ 1396 WARN_ON_ONCE(1); 1397 } 1398 1399 hs_ep->total_data += to_read; 1400 hs_req->req.actual += to_read; 1401 to_read = DIV_ROUND_UP(to_read, 4); 1402 1403 /* note, we might over-write the buffer end by 3 bytes depending on 1404 * alignment of the data. */ 1405 readsl(fifo, hs_req->req.buf + read_ptr, to_read); 1406 1407 spin_unlock(&hs_ep->lock); 1408} 1409 1410/** 1411 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint 1412 * @hsotg: The device instance 1413 * @req: The request currently on this endpoint 1414 * 1415 * Generate a zero-length IN packet request for terminating a SETUP 1416 * transaction. 1417 * 1418 * Note, since we don't write any data to the TxFIFO, then it is 1419 * currently belived that we do not need to wait for any space in 1420 * the TxFIFO. 1421 */ 1422static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg, 1423 struct s3c_hsotg_req *req) 1424{ 1425 u32 ctrl; 1426 1427 if (!req) { 1428 dev_warn(hsotg->dev, "%s: no request?\n", __func__); 1429 return; 1430 } 1431 1432 if (req->req.length == 0) { 1433 hsotg->eps[0].sent_zlp = 1; 1434 s3c_hsotg_enqueue_setup(hsotg); 1435 return; 1436 } 1437 1438 hsotg->eps[0].dir_in = 1; 1439 hsotg->eps[0].sent_zlp = 1; 1440 1441 dev_dbg(hsotg->dev, "sending zero-length packet\n"); 1442 1443 /* issue a zero-sized packet to terminate this */ 1444 writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) | 1445 S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0)); 1446 1447 ctrl = readl(hsotg->regs + S3C_DIEPCTL0); 1448 ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */ 1449 ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */ 1450 ctrl |= S3C_DxEPCTL_USBActEp; 1451 writel(ctrl, hsotg->regs + S3C_DIEPCTL0); 1452} 1453 1454/** 1455 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO 1456 * @hsotg: The device instance 1457 * @epnum: The endpoint received from 1458 * @was_setup: Set if processing a SetupDone event. 1459 * 1460 * The RXFIFO has delivered an OutDone event, which means that the data 1461 * transfer for an OUT endpoint has been completed, either by a short 1462 * packet or by the finish of a transfer. 1463*/ 1464static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg, 1465 int epnum, bool was_setup) 1466{ 1467 u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum)); 1468 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum]; 1469 struct s3c_hsotg_req *hs_req = hs_ep->req; 1470 struct usb_request *req = &hs_req->req; 1471 unsigned size_left = S3C_DxEPTSIZ_XferSize_GET(epsize); 1472 int result = 0; 1473 1474 if (!hs_req) { 1475 dev_dbg(hsotg->dev, "%s: no request active\n", __func__); 1476 return; 1477 } 1478 1479 if (using_dma(hsotg)) { 1480 unsigned size_done; 1481 1482 /* Calculate the size of the transfer by checking how much 1483 * is left in the endpoint size register and then working it 1484 * out from the amount we loaded for the transfer. 1485 * 1486 * We need to do this as DMA pointers are always 32bit aligned 1487 * so may overshoot/undershoot the transfer. 1488 */ 1489 1490 size_done = hs_ep->size_loaded - size_left; 1491 size_done += hs_ep->last_load; 1492 1493 req->actual = size_done; 1494 } 1495 1496 /* if there is more request to do, schedule new transfer */ 1497 if (req->actual < req->length && size_left == 0) { 1498 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true); 1499 return; 1500 } 1501 1502 if (req->actual < req->length && req->short_not_ok) { 1503 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", 1504 __func__, req->actual, req->length); 1505 1506 /* todo - what should we return here? there's no one else 1507 * even bothering to check the status. */ 1508 } 1509 1510 if (epnum == 0) { 1511 if (!was_setup && req->complete != s3c_hsotg_complete_setup) 1512 s3c_hsotg_send_zlp(hsotg, hs_req); 1513 } 1514 1515 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result); 1516} 1517 1518/** 1519 * s3c_hsotg_read_frameno - read current frame number 1520 * @hsotg: The device instance 1521 * 1522 * Return the current frame number 1523*/ 1524static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg) 1525{ 1526 u32 dsts; 1527 1528 dsts = readl(hsotg->regs + S3C_DSTS); 1529 dsts &= S3C_DSTS_SOFFN_MASK; 1530 dsts >>= S3C_DSTS_SOFFN_SHIFT; 1531 1532 return dsts; 1533} 1534 1535/** 1536 * s3c_hsotg_handle_rx - RX FIFO has data 1537 * @hsotg: The device instance 1538 * 1539 * The IRQ handler has detected that the RX FIFO has some data in it 1540 * that requires processing, so find out what is in there and do the 1541 * appropriate read. 1542 * 1543 * The RXFIFO is a true FIFO, the packets comming out are still in packet 1544 * chunks, so if you have x packets received on an endpoint you'll get x 1545 * FIFO events delivered, each with a packet's worth of data in it. 1546 * 1547 * When using DMA, we should not be processing events from the RXFIFO 1548 * as the actual data should be sent to the memory directly and we turn 1549 * on the completion interrupts to get notifications of transfer completion. 1550 */ 1551static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) 1552{ 1553 u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP); 1554 u32 epnum, status, size; 1555 1556 WARN_ON(using_dma(hsotg)); 1557 1558 epnum = grxstsr & S3C_GRXSTS_EPNum_MASK; 1559 status = grxstsr & S3C_GRXSTS_PktSts_MASK; 1560 1561 size = grxstsr & S3C_GRXSTS_ByteCnt_MASK; 1562 size >>= S3C_GRXSTS_ByteCnt_SHIFT; 1563 1564 if (1) 1565 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 1566 __func__, grxstsr, size, epnum); 1567 1568#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT) 1569 1570 switch (status >> S3C_GRXSTS_PktSts_SHIFT) { 1571 case __status(S3C_GRXSTS_PktSts_GlobalOutNAK): 1572 dev_dbg(hsotg->dev, "GlobalOutNAK\n"); 1573 break; 1574 1575 case __status(S3C_GRXSTS_PktSts_OutDone): 1576 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", 1577 s3c_hsotg_read_frameno(hsotg)); 1578 1579 if (!using_dma(hsotg)) 1580 s3c_hsotg_handle_outdone(hsotg, epnum, false); 1581 break; 1582 1583 case __status(S3C_GRXSTS_PktSts_SetupDone): 1584 dev_dbg(hsotg->dev, 1585 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 1586 s3c_hsotg_read_frameno(hsotg), 1587 readl(hsotg->regs + S3C_DOEPCTL(0))); 1588 1589 s3c_hsotg_handle_outdone(hsotg, epnum, true); 1590 break; 1591 1592 case __status(S3C_GRXSTS_PktSts_OutRX): 1593 s3c_hsotg_rx_data(hsotg, epnum, size); 1594 break; 1595 1596 case __status(S3C_GRXSTS_PktSts_SetupRX): 1597 dev_dbg(hsotg->dev, 1598 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 1599 s3c_hsotg_read_frameno(hsotg), 1600 readl(hsotg->regs + S3C_DOEPCTL(0))); 1601 1602 s3c_hsotg_rx_data(hsotg, epnum, size); 1603 break; 1604 1605 default: 1606 dev_warn(hsotg->dev, "%s: unknown status %08x\n", 1607 __func__, grxstsr); 1608 1609 s3c_hsotg_dump(hsotg); 1610 break; 1611 } 1612} 1613 1614/** 1615 * s3c_hsotg_ep0_mps - turn max packet size into register setting 1616 * @mps: The maximum packet size in bytes. 1617*/ 1618static u32 s3c_hsotg_ep0_mps(unsigned int mps) 1619{ 1620 switch (mps) { 1621 case 64: 1622 return S3C_D0EPCTL_MPS_64; 1623 case 32: 1624 return S3C_D0EPCTL_MPS_32; 1625 case 16: 1626 return S3C_D0EPCTL_MPS_16; 1627 case 8: 1628 return S3C_D0EPCTL_MPS_8; 1629 } 1630 1631 /* bad max packet size, warn and return invalid result */ 1632 WARN_ON(1); 1633 return (u32)-1; 1634} 1635 1636/** 1637 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field 1638 * @hsotg: The driver state. 1639 * @ep: The index number of the endpoint 1640 * @mps: The maximum packet size in bytes 1641 * 1642 * Configure the maximum packet size for the given endpoint, updating 1643 * the hardware control registers to reflect this. 1644 */ 1645static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg, 1646 unsigned int ep, unsigned int mps) 1647{ 1648 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep]; 1649 void __iomem *regs = hsotg->regs; 1650 u32 mpsval; 1651 u32 reg; 1652 1653 if (ep == 0) { 1654 /* EP0 is a special case */ 1655 mpsval = s3c_hsotg_ep0_mps(mps); 1656 if (mpsval > 3) 1657 goto bad_mps; 1658 } else { 1659 if (mps >= S3C_DxEPCTL_MPS_LIMIT+1) 1660 goto bad_mps; 1661 1662 mpsval = mps; 1663 } 1664 1665 hs_ep->ep.maxpacket = mps; 1666 1667 /* update both the in and out endpoint controldir_ registers, even 1668 * if one of the directions may not be in use. */ 1669 1670 reg = readl(regs + S3C_DIEPCTL(ep)); 1671 reg &= ~S3C_DxEPCTL_MPS_MASK; 1672 reg |= mpsval; 1673 writel(reg, regs + S3C_DIEPCTL(ep)); 1674 1675 reg = readl(regs + S3C_DOEPCTL(ep)); 1676 reg &= ~S3C_DxEPCTL_MPS_MASK; 1677 reg |= mpsval; 1678 writel(reg, regs + S3C_DOEPCTL(ep)); 1679 1680 return; 1681 1682bad_mps: 1683 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); 1684} 1685 1686 1687/** 1688 * s3c_hsotg_trytx - check to see if anything needs transmitting 1689 * @hsotg: The driver state 1690 * @hs_ep: The driver endpoint to check. 1691 * 1692 * Check to see if there is a request that has data to send, and if so 1693 * make an attempt to write data into the FIFO. 1694 */ 1695static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg, 1696 struct s3c_hsotg_ep *hs_ep) 1697{ 1698 struct s3c_hsotg_req *hs_req = hs_ep->req; 1699 1700 if (!hs_ep->dir_in || !hs_req) 1701 return 0; 1702 1703 if (hs_req->req.actual < hs_req->req.length) { 1704 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", 1705 hs_ep->index); 1706 return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req); 1707 } 1708 1709 return 0; 1710} 1711 1712/** 1713 * s3c_hsotg_complete_in - complete IN transfer 1714 * @hsotg: The device state. 1715 * @hs_ep: The endpoint that has just completed. 1716 * 1717 * An IN transfer has been completed, update the transfer's state and then 1718 * call the relevant completion routines. 1719 */ 1720static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg, 1721 struct s3c_hsotg_ep *hs_ep) 1722{ 1723 struct s3c_hsotg_req *hs_req = hs_ep->req; 1724 u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index)); 1725 int size_left, size_done; 1726 1727 if (!hs_req) { 1728 dev_dbg(hsotg->dev, "XferCompl but no req\n"); 1729 return; 1730 } 1731 1732 /* Calculate the size of the transfer by checking how much is left 1733 * in the endpoint size register and then working it out from 1734 * the amount we loaded for the transfer. 1735 * 1736 * We do this even for DMA, as the transfer may have incremented 1737 * past the end of the buffer (DMA transfers are always 32bit 1738 * aligned). 1739 */ 1740 1741 size_left = S3C_DxEPTSIZ_XferSize_GET(epsize); 1742 1743 size_done = hs_ep->size_loaded - size_left; 1744 size_done += hs_ep->last_load; 1745 1746 if (hs_req->req.actual != size_done) 1747 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", 1748 __func__, hs_req->req.actual, size_done); 1749 1750 hs_req->req.actual = size_done; 1751 1752 /* if we did all of the transfer, and there is more data left 1753 * around, then try restarting the rest of the request */ 1754 1755 if (!size_left && hs_req->req.actual < hs_req->req.length) { 1756 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); 1757 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true); 1758 } else 1759 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0); 1760} 1761 1762/** 1763 * s3c_hsotg_epint - handle an in/out endpoint interrupt 1764 * @hsotg: The driver state 1765 * @idx: The index for the endpoint (0..15) 1766 * @dir_in: Set if this is an IN endpoint 1767 * 1768 * Process and clear any interrupt pending for an individual endpoint 1769*/ 1770static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx, 1771 int dir_in) 1772{ 1773 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx]; 1774 u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx); 1775 u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx); 1776 u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx); 1777 u32 ints; 1778 u32 clear = 0; 1779 1780 ints = readl(hsotg->regs + epint_reg); 1781 1782 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", 1783 __func__, idx, dir_in ? "in" : "out", ints); 1784 1785 if (ints & S3C_DxEPINT_XferCompl) { 1786 dev_dbg(hsotg->dev, 1787 "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n", 1788 __func__, readl(hsotg->regs + epctl_reg), 1789 readl(hsotg->regs + epsiz_reg)); 1790 1791 /* we get OutDone from the FIFO, so we only need to look 1792 * at completing IN requests here */ 1793 if (dir_in) { 1794 s3c_hsotg_complete_in(hsotg, hs_ep); 1795 1796 if (idx == 0 && !hs_ep->req) 1797 s3c_hsotg_enqueue_setup(hsotg); 1798 } else if (using_dma(hsotg)) { 1799 /* We're using DMA, we need to fire an OutDone here 1800 * as we ignore the RXFIFO. */ 1801 1802 s3c_hsotg_handle_outdone(hsotg, idx, false); 1803 } 1804 1805 clear |= S3C_DxEPINT_XferCompl; 1806 } 1807 1808 if (ints & S3C_DxEPINT_EPDisbld) { 1809 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); 1810 clear |= S3C_DxEPINT_EPDisbld; 1811 } 1812 1813 if (ints & S3C_DxEPINT_AHBErr) { 1814 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); 1815 clear |= S3C_DxEPINT_AHBErr; 1816 } 1817 1818 if (ints & S3C_DxEPINT_Setup) { /* Setup or Timeout */ 1819 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); 1820 1821 if (using_dma(hsotg) && idx == 0) { 1822 /* this is the notification we've received a 1823 * setup packet. In non-DMA mode we'd get this 1824 * from the RXFIFO, instead we need to process 1825 * the setup here. */ 1826 1827 if (dir_in) 1828 WARN_ON_ONCE(1); 1829 else 1830 s3c_hsotg_handle_outdone(hsotg, 0, true); 1831 } 1832 1833 clear |= S3C_DxEPINT_Setup; 1834 } 1835 1836 if (ints & S3C_DxEPINT_Back2BackSetup) { 1837 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); 1838 clear |= S3C_DxEPINT_Back2BackSetup; 1839 } 1840 1841 if (dir_in) { 1842 /* not sure if this is important, but we'll clear it anyway 1843 */ 1844 if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) { 1845 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", 1846 __func__, idx); 1847 clear |= S3C_DIEPMSK_INTknTXFEmpMsk; 1848 } 1849 1850 /* this probably means something bad is happening */ 1851 if (ints & S3C_DIEPMSK_INTknEPMisMsk) { 1852 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", 1853 __func__, idx); 1854 clear |= S3C_DIEPMSK_INTknEPMisMsk; 1855 } 1856 1857 /* FIFO has space or is empty (see GAHBCFG) */ 1858 if (hsotg->dedicated_fifos && 1859 ints & S3C_DIEPMSK_TxFIFOEmpty) { 1860 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 1861 __func__, idx); 1862 s3c_hsotg_trytx(hsotg, hs_ep); 1863 clear |= S3C_DIEPMSK_TxFIFOEmpty; 1864 } 1865 } 1866 1867 writel(clear, hsotg->regs + epint_reg); 1868} 1869 1870/** 1871 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) 1872 * @hsotg: The device state. 1873 * 1874 * Handle updating the device settings after the enumeration phase has 1875 * been completed. 1876*/ 1877static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg) 1878{ 1879 u32 dsts = readl(hsotg->regs + S3C_DSTS); 1880 int ep0_mps = 0, ep_mps; 1881 1882 /* This should signal the finish of the enumeration phase 1883 * of the USB handshaking, so we should now know what rate 1884 * we connected at. */ 1885 1886 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); 1887 1888 /* note, since we're limited by the size of transfer on EP0, and 1889 * it seems IN transfers must be a even number of packets we do 1890 * not advertise a 64byte MPS on EP0. */ 1891 1892 /* catch both EnumSpd_FS and EnumSpd_FS48 */ 1893 switch (dsts & S3C_DSTS_EnumSpd_MASK) { 1894 case S3C_DSTS_EnumSpd_FS: 1895 case S3C_DSTS_EnumSpd_FS48: 1896 hsotg->gadget.speed = USB_SPEED_FULL; 1897 dev_info(hsotg->dev, "new device is full-speed\n"); 1898 1899 ep0_mps = EP0_MPS_LIMIT; 1900 ep_mps = 64; 1901 break; 1902 1903 case S3C_DSTS_EnumSpd_HS: 1904 dev_info(hsotg->dev, "new device is high-speed\n"); 1905 hsotg->gadget.speed = USB_SPEED_HIGH; 1906 1907 ep0_mps = EP0_MPS_LIMIT; 1908 ep_mps = 512; 1909 break; 1910 1911 case S3C_DSTS_EnumSpd_LS: 1912 hsotg->gadget.speed = USB_SPEED_LOW; 1913 dev_info(hsotg->dev, "new device is low-speed\n"); 1914 1915 /* note, we don't actually support LS in this driver at the 1916 * moment, and the documentation seems to imply that it isn't 1917 * supported by the PHYs on some of the devices. 1918 */ 1919 break; 1920 } 1921 1922 /* we should now know the maximum packet size for an 1923 * endpoint, so set the endpoints to a default value. */ 1924 1925 if (ep0_mps) { 1926 int i; 1927 s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps); 1928 for (i = 1; i < S3C_HSOTG_EPS; i++) 1929 s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps); 1930 } 1931 1932 /* ensure after enumeration our EP0 is active */ 1933 1934 s3c_hsotg_enqueue_setup(hsotg); 1935 1936 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 1937 readl(hsotg->regs + S3C_DIEPCTL0), 1938 readl(hsotg->regs + S3C_DOEPCTL0)); 1939} 1940 1941/** 1942 * kill_all_requests - remove all requests from the endpoint's queue 1943 * @hsotg: The device state. 1944 * @ep: The endpoint the requests may be on. 1945 * @result: The result code to use. 1946 * @force: Force removal of any current requests 1947 * 1948 * Go through the requests on the given endpoint and mark them 1949 * completed with the given result code. 1950 */ 1951static void kill_all_requests(struct s3c_hsotg *hsotg, 1952 struct s3c_hsotg_ep *ep, 1953 int result, bool force) 1954{ 1955 struct s3c_hsotg_req *req, *treq; 1956 unsigned long flags; 1957 1958 spin_lock_irqsave(&ep->lock, flags); 1959 1960 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 1961 /* currently, we can't do much about an already 1962 * running request on an in endpoint */ 1963 1964 if (ep->req == req && ep->dir_in && !force) 1965 continue; 1966 1967 s3c_hsotg_complete_request(hsotg, ep, req, 1968 result); 1969 } 1970 1971 spin_unlock_irqrestore(&ep->lock, flags); 1972} 1973 1974#define call_gadget(_hs, _entry) \ 1975 if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \ 1976 (_hs)->driver && (_hs)->driver->_entry) \ 1977 (_hs)->driver->_entry(&(_hs)->gadget); 1978 1979/** 1980 * s3c_hsotg_disconnect_irq - disconnect irq service 1981 * @hsotg: The device state. 1982 * 1983 * A disconnect IRQ has been received, meaning that the host has 1984 * lost contact with the bus. Remove all current transactions 1985 * and signal the gadget driver that this has happened. 1986*/ 1987static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg) 1988{ 1989 unsigned ep; 1990 1991 for (ep = 0; ep < S3C_HSOTG_EPS; ep++) 1992 kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true); 1993 1994 call_gadget(hsotg, disconnect); 1995} 1996 1997/** 1998 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler 1999 * @hsotg: The device state: 2000 * @periodic: True if this is a periodic FIFO interrupt 2001 */ 2002static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic) 2003{ 2004 struct s3c_hsotg_ep *ep; 2005 int epno, ret; 2006 2007 /* look through for any more data to transmit */ 2008 2009 for (epno = 0; epno < S3C_HSOTG_EPS; epno++) { 2010 ep = &hsotg->eps[epno]; 2011 2012 if (!ep->dir_in) 2013 continue; 2014 2015 if ((periodic && !ep->periodic) || 2016 (!periodic && ep->periodic)) 2017 continue; 2018 2019 ret = s3c_hsotg_trytx(hsotg, ep); 2020 if (ret < 0) 2021 break; 2022 } 2023} 2024 2025static struct s3c_hsotg *our_hsotg; 2026 2027/* IRQ flags which will trigger a retry around the IRQ loop */ 2028#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \ 2029 S3C_GINTSTS_PTxFEmp | \ 2030 S3C_GINTSTS_RxFLvl) 2031 2032/** 2033 * s3c_hsotg_irq - handle device interrupt 2034 * @irq: The IRQ number triggered 2035 * @pw: The pw value when registered the handler. 2036 */ 2037static irqreturn_t s3c_hsotg_irq(int irq, void *pw) 2038{ 2039 struct s3c_hsotg *hsotg = pw; 2040 int retry_count = 8; 2041 u32 gintsts; 2042 u32 gintmsk; 2043 2044irq_retry: 2045 gintsts = readl(hsotg->regs + S3C_GINTSTS); 2046 gintmsk = readl(hsotg->regs + S3C_GINTMSK); 2047 2048 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", 2049 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); 2050 2051 gintsts &= gintmsk; 2052 2053 if (gintsts & S3C_GINTSTS_OTGInt) { 2054 u32 otgint = readl(hsotg->regs + S3C_GOTGINT); 2055 2056 dev_info(hsotg->dev, "OTGInt: %08x\n", otgint); 2057 2058 writel(otgint, hsotg->regs + S3C_GOTGINT); 2059 writel(S3C_GINTSTS_OTGInt, hsotg->regs + S3C_GINTSTS); 2060 } 2061 2062 if (gintsts & S3C_GINTSTS_DisconnInt) { 2063 dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__); 2064 writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS); 2065 2066 s3c_hsotg_disconnect_irq(hsotg); 2067 } 2068 2069 if (gintsts & S3C_GINTSTS_SessReqInt) { 2070 dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__); 2071 writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS); 2072 } 2073 2074 if (gintsts & S3C_GINTSTS_EnumDone) { 2075 s3c_hsotg_irq_enumdone(hsotg); 2076 writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS); 2077 } 2078 2079 if (gintsts & S3C_GINTSTS_ConIDStsChng) { 2080 dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n", 2081 readl(hsotg->regs + S3C_DSTS), 2082 readl(hsotg->regs + S3C_GOTGCTL)); 2083 2084 writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS); 2085 } 2086 2087 if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) { 2088 u32 daint = readl(hsotg->regs + S3C_DAINT); 2089 u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT; 2090 u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT); 2091 int ep; 2092 2093 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); 2094 2095 for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) { 2096 if (daint_out & 1) 2097 s3c_hsotg_epint(hsotg, ep, 0); 2098 } 2099 2100 for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) { 2101 if (daint_in & 1) 2102 s3c_hsotg_epint(hsotg, ep, 1); 2103 } 2104 2105 writel(daint, hsotg->regs + S3C_DAINT); 2106 writel(gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt), 2107 hsotg->regs + S3C_GINTSTS); 2108 } 2109 2110 if (gintsts & S3C_GINTSTS_USBRst) { 2111 dev_info(hsotg->dev, "%s: USBRst\n", __func__); 2112 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 2113 readl(hsotg->regs + S3C_GNPTXSTS)); 2114 2115 kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true); 2116 2117 /* it seems after a reset we can end up with a situation 2118 * where the TXFIFO still has data in it... the docs 2119 * suggest resetting all the fifos, so use the init_fifo 2120 * code to relayout and flush the fifos. 2121 */ 2122 2123 s3c_hsotg_init_fifo(hsotg); 2124 2125 s3c_hsotg_enqueue_setup(hsotg); 2126 2127 writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS); 2128 } 2129 2130 /* check both FIFOs */ 2131 2132 if (gintsts & S3C_GINTSTS_NPTxFEmp) { 2133 dev_dbg(hsotg->dev, "NPTxFEmp\n"); 2134 2135 /* Disable the interrupt to stop it happening again 2136 * unless one of these endpoint routines decides that 2137 * it needs re-enabling */ 2138 2139 s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp); 2140 s3c_hsotg_irq_fifoempty(hsotg, false); 2141 2142 writel(S3C_GINTSTS_NPTxFEmp, hsotg->regs + S3C_GINTSTS); 2143 } 2144 2145 if (gintsts & S3C_GINTSTS_PTxFEmp) { 2146 dev_dbg(hsotg->dev, "PTxFEmp\n"); 2147 2148 /* See note in S3C_GINTSTS_NPTxFEmp */ 2149 2150 s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp); 2151 s3c_hsotg_irq_fifoempty(hsotg, true); 2152 2153 writel(S3C_GINTSTS_PTxFEmp, hsotg->regs + S3C_GINTSTS); 2154 } 2155 2156 if (gintsts & S3C_GINTSTS_RxFLvl) { 2157 /* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, 2158 * we need to retry s3c_hsotg_handle_rx if this is still 2159 * set. */ 2160 2161 s3c_hsotg_handle_rx(hsotg); 2162 writel(S3C_GINTSTS_RxFLvl, hsotg->regs + S3C_GINTSTS); 2163 } 2164 2165 if (gintsts & S3C_GINTSTS_ModeMis) { 2166 dev_warn(hsotg->dev, "warning, mode mismatch triggered\n"); 2167 writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS); 2168 } 2169 2170 if (gintsts & S3C_GINTSTS_USBSusp) { 2171 dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n"); 2172 writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS); 2173 2174 call_gadget(hsotg, suspend); 2175 } 2176 2177 if (gintsts & S3C_GINTSTS_WkUpInt) { 2178 dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n"); 2179 writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS); 2180 2181 call_gadget(hsotg, resume); 2182 } 2183 2184 if (gintsts & S3C_GINTSTS_ErlySusp) { 2185 dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n"); 2186 writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS); 2187 } 2188 2189 /* these next two seem to crop-up occasionally causing the core 2190 * to shutdown the USB transfer, so try clearing them and logging 2191 * the occurence. */ 2192 2193 if (gintsts & S3C_GINTSTS_GOUTNakEff) { 2194 dev_info(hsotg->dev, "GOUTNakEff triggered\n"); 2195 2196 s3c_hsotg_dump(hsotg); 2197 2198 writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL); 2199 writel(S3C_GINTSTS_GOUTNakEff, hsotg->regs + S3C_GINTSTS); 2200 } 2201 2202 if (gintsts & S3C_GINTSTS_GINNakEff) { 2203 dev_info(hsotg->dev, "GINNakEff triggered\n"); 2204 2205 s3c_hsotg_dump(hsotg); 2206 2207 writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL); 2208 writel(S3C_GINTSTS_GINNakEff, hsotg->regs + S3C_GINTSTS); 2209 } 2210 2211 /* if we've had fifo events, we should try and go around the 2212 * loop again to see if there's any point in returning yet. */ 2213 2214 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 2215 goto irq_retry; 2216 2217 return IRQ_HANDLED; 2218} 2219 2220/** 2221 * s3c_hsotg_ep_enable - enable the given endpoint 2222 * @ep: The USB endpint to configure 2223 * @desc: The USB endpoint descriptor to configure with. 2224 * 2225 * This is called from the USB gadget code's usb_ep_enable(). 2226*/ 2227static int s3c_hsotg_ep_enable(struct usb_ep *ep, 2228 const struct usb_endpoint_descriptor *desc) 2229{ 2230 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2231 struct s3c_hsotg *hsotg = hs_ep->parent; 2232 unsigned long flags; 2233 int index = hs_ep->index; 2234 u32 epctrl_reg; 2235 u32 epctrl; 2236 u32 mps; 2237 int dir_in; 2238 int ret = 0; 2239 2240 dev_dbg(hsotg->dev, 2241 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 2242 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, 2243 desc->wMaxPacketSize, desc->bInterval); 2244 2245 /* not to be called for EP0 */ 2246 WARN_ON(index == 0); 2247 2248 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; 2249 if (dir_in != hs_ep->dir_in) { 2250 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); 2251 return -EINVAL; 2252 } 2253 2254 mps = le16_to_cpu(desc->wMaxPacketSize); 2255 2256 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */ 2257 2258 epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index); 2259 epctrl = readl(hsotg->regs + epctrl_reg); 2260 2261 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", 2262 __func__, epctrl, epctrl_reg); 2263 2264 spin_lock_irqsave(&hs_ep->lock, flags); 2265 2266 epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK); 2267 epctrl |= S3C_DxEPCTL_MPS(mps); 2268 2269 /* mark the endpoint as active, otherwise the core may ignore 2270 * transactions entirely for this endpoint */ 2271 epctrl |= S3C_DxEPCTL_USBActEp; 2272 2273 /* set the NAK status on the endpoint, otherwise we might try and 2274 * do something with data that we've yet got a request to process 2275 * since the RXFIFO will take data for an endpoint even if the 2276 * size register hasn't been set. 2277 */ 2278 2279 epctrl |= S3C_DxEPCTL_SNAK; 2280 2281 /* update the endpoint state */ 2282 hs_ep->ep.maxpacket = mps; 2283 2284 /* default, set to non-periodic */ 2285 hs_ep->periodic = 0; 2286 2287 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 2288 case USB_ENDPOINT_XFER_ISOC: 2289 dev_err(hsotg->dev, "no current ISOC support\n"); 2290 ret = -EINVAL; 2291 goto out; 2292 2293 case USB_ENDPOINT_XFER_BULK: 2294 epctrl |= S3C_DxEPCTL_EPType_Bulk; 2295 break; 2296 2297 case USB_ENDPOINT_XFER_INT: 2298 if (dir_in) { 2299 /* Allocate our TxFNum by simply using the index 2300 * of the endpoint for the moment. We could do 2301 * something better if the host indicates how 2302 * many FIFOs we are expecting to use. */ 2303 2304 hs_ep->periodic = 1; 2305 epctrl |= S3C_DxEPCTL_TxFNum(index); 2306 } 2307 2308 epctrl |= S3C_DxEPCTL_EPType_Intterupt; 2309 break; 2310 2311 case USB_ENDPOINT_XFER_CONTROL: 2312 epctrl |= S3C_DxEPCTL_EPType_Control; 2313 break; 2314 } 2315 2316 /* if the hardware has dedicated fifos, we must give each IN EP 2317 * a unique tx-fifo even if it is non-periodic. 2318 */ 2319 if (dir_in && hsotg->dedicated_fifos) 2320 epctrl |= S3C_DxEPCTL_TxFNum(index); 2321 2322 /* for non control endpoints, set PID to D0 */ 2323 if (index) 2324 epctrl |= S3C_DxEPCTL_SetD0PID; 2325 2326 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", 2327 __func__, epctrl); 2328 2329 writel(epctrl, hsotg->regs + epctrl_reg); 2330 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", 2331 __func__, readl(hsotg->regs + epctrl_reg)); 2332 2333 /* enable the endpoint interrupt */ 2334 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 2335 2336out: 2337 spin_unlock_irqrestore(&hs_ep->lock, flags); 2338 return ret; 2339} 2340 2341static int s3c_hsotg_ep_disable(struct usb_ep *ep) 2342{ 2343 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2344 struct s3c_hsotg *hsotg = hs_ep->parent; 2345 int dir_in = hs_ep->dir_in; 2346 int index = hs_ep->index; 2347 unsigned long flags; 2348 u32 epctrl_reg; 2349 u32 ctrl; 2350 2351 dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep); 2352 2353 if (ep == &hsotg->eps[0].ep) { 2354 dev_err(hsotg->dev, "%s: called for ep0\n", __func__); 2355 return -EINVAL; 2356 } 2357 2358 epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index); 2359 2360 /* terminate all requests with shutdown */ 2361 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false); 2362 2363 spin_lock_irqsave(&hs_ep->lock, flags); 2364 2365 ctrl = readl(hsotg->regs + epctrl_reg); 2366 ctrl &= ~S3C_DxEPCTL_EPEna; 2367 ctrl &= ~S3C_DxEPCTL_USBActEp; 2368 ctrl |= S3C_DxEPCTL_SNAK; 2369 2370 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 2371 writel(ctrl, hsotg->regs + epctrl_reg); 2372 2373 /* disable endpoint interrupts */ 2374 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); 2375 2376 spin_unlock_irqrestore(&hs_ep->lock, flags); 2377 return 0; 2378} 2379 2380/** 2381 * on_list - check request is on the given endpoint 2382 * @ep: The endpoint to check. 2383 * @test: The request to test if it is on the endpoint. 2384*/ 2385static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test) 2386{ 2387 struct s3c_hsotg_req *req, *treq; 2388 2389 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 2390 if (req == test) 2391 return true; 2392 } 2393 2394 return false; 2395} 2396 2397static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 2398{ 2399 struct s3c_hsotg_req *hs_req = our_req(req); 2400 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2401 struct s3c_hsotg *hs = hs_ep->parent; 2402 unsigned long flags; 2403 2404 dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); 2405 2406 if (hs_req == hs_ep->req) { 2407 dev_dbg(hs->dev, "%s: already in progress\n", __func__); 2408 return -EINPROGRESS; 2409 } 2410 2411 spin_lock_irqsave(&hs_ep->lock, flags); 2412 2413 if (!on_list(hs_ep, hs_req)) { 2414 spin_unlock_irqrestore(&hs_ep->lock, flags); 2415 return -EINVAL; 2416 } 2417 2418 s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); 2419 spin_unlock_irqrestore(&hs_ep->lock, flags); 2420 2421 return 0; 2422} 2423 2424static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value) 2425{ 2426 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2427 struct s3c_hsotg *hs = hs_ep->parent; 2428 int index = hs_ep->index; 2429 unsigned long irqflags; 2430 u32 epreg; 2431 u32 epctl; 2432 2433 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); 2434 2435 spin_lock_irqsave(&hs_ep->lock, irqflags); 2436 2437 /* write both IN and OUT control registers */ 2438 2439 epreg = S3C_DIEPCTL(index); 2440 epctl = readl(hs->regs + epreg); 2441 2442 if (value) 2443 epctl |= S3C_DxEPCTL_Stall; 2444 else 2445 epctl &= ~S3C_DxEPCTL_Stall; 2446 2447 writel(epctl, hs->regs + epreg); 2448 2449 epreg = S3C_DOEPCTL(index); 2450 epctl = readl(hs->regs + epreg); 2451 2452 if (value) 2453 epctl |= S3C_DxEPCTL_Stall; 2454 else 2455 epctl &= ~S3C_DxEPCTL_Stall; 2456 2457 writel(epctl, hs->regs + epreg); 2458 2459 spin_unlock_irqrestore(&hs_ep->lock, irqflags); 2460 2461 return 0; 2462} 2463 2464static struct usb_ep_ops s3c_hsotg_ep_ops = { 2465 .enable = s3c_hsotg_ep_enable, 2466 .disable = s3c_hsotg_ep_disable, 2467 .alloc_request = s3c_hsotg_ep_alloc_request, 2468 .free_request = s3c_hsotg_ep_free_request, 2469 .queue = s3c_hsotg_ep_queue, 2470 .dequeue = s3c_hsotg_ep_dequeue, 2471 .set_halt = s3c_hsotg_ep_sethalt, 2472 /* note, don't belive we have any call for the fifo routines */ 2473}; 2474 2475/** 2476 * s3c_hsotg_corereset - issue softreset to the core 2477 * @hsotg: The device state 2478 * 2479 * Issue a soft reset to the core, and await the core finishing it. 2480*/ 2481static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg) 2482{ 2483 int timeout; 2484 u32 grstctl; 2485 2486 dev_dbg(hsotg->dev, "resetting core\n"); 2487 2488 /* issue soft reset */ 2489 writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL); 2490 2491 timeout = 1000; 2492 do { 2493 grstctl = readl(hsotg->regs + S3C_GRSTCTL); 2494 } while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0); 2495 2496 if (!(grstctl & S3C_GRSTCTL_CSftRst)) { 2497 dev_err(hsotg->dev, "Failed to get CSftRst asserted\n"); 2498 return -EINVAL; 2499 } 2500 2501 timeout = 1000; 2502 2503 while (1) { 2504 u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL); 2505 2506 if (timeout-- < 0) { 2507 dev_info(hsotg->dev, 2508 "%s: reset failed, GRSTCTL=%08x\n", 2509 __func__, grstctl); 2510 return -ETIMEDOUT; 2511 } 2512 2513 if (grstctl & S3C_GRSTCTL_CSftRst) 2514 continue; 2515 2516 if (!(grstctl & S3C_GRSTCTL_AHBIdle)) 2517 continue; 2518 2519 break; /* reset done */ 2520 } 2521 2522 dev_dbg(hsotg->dev, "reset successful\n"); 2523 return 0; 2524} 2525 2526int usb_gadget_register_driver(struct usb_gadget_driver *driver) 2527{ 2528 struct s3c_hsotg *hsotg = our_hsotg; 2529 int ret; 2530 2531 if (!hsotg) { 2532 printk(KERN_ERR "%s: called with no device\n", __func__); 2533 return -ENODEV; 2534 } 2535 2536 if (!driver) { 2537 dev_err(hsotg->dev, "%s: no driver\n", __func__); 2538 return -EINVAL; 2539 } 2540 2541 if (driver->speed != USB_SPEED_HIGH && 2542 driver->speed != USB_SPEED_FULL) { 2543 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 2544 } 2545 2546 if (!driver->bind || !driver->setup) { 2547 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 2548 return -EINVAL; 2549 } 2550 2551 WARN_ON(hsotg->driver); 2552 2553 driver->driver.bus = NULL; 2554 hsotg->driver = driver; 2555 hsotg->gadget.dev.driver = &driver->driver; 2556 hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask; 2557 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 2558 2559 ret = device_add(&hsotg->gadget.dev); 2560 if (ret) { 2561 dev_err(hsotg->dev, "failed to register gadget device\n"); 2562 goto err; 2563 } 2564 2565 ret = driver->bind(&hsotg->gadget); 2566 if (ret) { 2567 dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name); 2568 2569 hsotg->gadget.dev.driver = NULL; 2570 hsotg->driver = NULL; 2571 goto err; 2572 } 2573 2574 /* we must now enable ep0 ready for host detection and then 2575 * set configuration. */ 2576 2577 s3c_hsotg_corereset(hsotg); 2578 2579 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2580 writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | 2581 (0x5 << 10), hsotg->regs + S3C_GUSBCFG); 2582 2583 /* looks like soft-reset changes state of FIFOs */ 2584 s3c_hsotg_init_fifo(hsotg); 2585 2586 __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon); 2587 2588 writel(1 << 18 | S3C_DCFG_DevSpd_HS, hsotg->regs + S3C_DCFG); 2589 2590 writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt | 2591 S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst | 2592 S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt | 2593 S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt | 2594 S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff | 2595 S3C_GINTSTS_ErlySusp, 2596 hsotg->regs + S3C_GINTMSK); 2597 2598 if (using_dma(hsotg)) 2599 writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn | 2600 S3C_GAHBCFG_HBstLen_Incr4, 2601 hsotg->regs + S3C_GAHBCFG); 2602 else 2603 writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG); 2604 2605 /* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end 2606 * up being flooded with interrupts if the host is polling the 2607 * endpoint to try and read data. */ 2608 2609 writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk | 2610 S3C_DIEPMSK_INTknEPMisMsk | 2611 S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk | 2612 ((hsotg->dedicated_fifos) ? S3C_DIEPMSK_TxFIFOEmpty : 0), 2613 hsotg->regs + S3C_DIEPMSK); 2614 2615 /* don't need XferCompl, we get that from RXFIFO in slave mode. In 2616 * DMA mode we may need this. */ 2617 writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk | 2618 S3C_DOEPMSK_EPDisbldMsk | 2619 (using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk | 2620 S3C_DIEPMSK_TimeOUTMsk) : 0), 2621 hsotg->regs + S3C_DOEPMSK); 2622 2623 writel(0, hsotg->regs + S3C_DAINTMSK); 2624 2625 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2626 readl(hsotg->regs + S3C_DIEPCTL0), 2627 readl(hsotg->regs + S3C_DOEPCTL0)); 2628 2629 /* enable in and out endpoint interrupts */ 2630 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt); 2631 2632 /* Enable the RXFIFO when in slave mode, as this is how we collect 2633 * the data. In DMA mode, we get events from the FIFO but also 2634 * things we cannot process, so do not use it. */ 2635 if (!using_dma(hsotg)) 2636 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl); 2637 2638 /* Enable interrupts for EP0 in and out */ 2639 s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1); 2640 s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1); 2641 2642 __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone); 2643 udelay(10); /* see openiboot */ 2644 __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone); 2645 2646 dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL)); 2647 2648 /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by 2649 writing to the EPCTL register.. */ 2650 2651 /* set to read 1 8byte packet */ 2652 writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) | 2653 S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0); 2654 2655 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) | 2656 S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna | 2657 S3C_DxEPCTL_USBActEp, 2658 hsotg->regs + S3C_DOEPCTL0); 2659 2660 /* enable, but don't activate EP0in */ 2661 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) | 2662 S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0); 2663 2664 s3c_hsotg_enqueue_setup(hsotg); 2665 2666 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2667 readl(hsotg->regs + S3C_DIEPCTL0), 2668 readl(hsotg->regs + S3C_DOEPCTL0)); 2669 2670 /* clear global NAKs */ 2671 writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK, 2672 hsotg->regs + S3C_DCTL); 2673 2674 /* must be at-least 3ms to allow bus to see disconnect */ 2675 msleep(3); 2676 2677 /* remove the soft-disconnect and let's go */ 2678 __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon); 2679 2680 /* report to the user, and return */ 2681 2682 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); 2683 return 0; 2684 2685err: 2686 hsotg->driver = NULL; 2687 hsotg->gadget.dev.driver = NULL; 2688 return ret; 2689} 2690EXPORT_SYMBOL(usb_gadget_register_driver); 2691 2692int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2693{ 2694 struct s3c_hsotg *hsotg = our_hsotg; 2695 int ep; 2696 2697 if (!hsotg) 2698 return -ENODEV; 2699 2700 if (!driver || driver != hsotg->driver || !driver->unbind) 2701 return -EINVAL; 2702 2703 /* all endpoints should be shutdown */ 2704 for (ep = 0; ep < S3C_HSOTG_EPS; ep++) 2705 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 2706 2707 call_gadget(hsotg, disconnect); 2708 2709 driver->unbind(&hsotg->gadget); 2710 hsotg->driver = NULL; 2711 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 2712 2713 device_del(&hsotg->gadget.dev); 2714 2715 dev_info(hsotg->dev, "unregistered gadget driver '%s'\n", 2716 driver->driver.name); 2717 2718 return 0; 2719} 2720EXPORT_SYMBOL(usb_gadget_unregister_driver); 2721 2722static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget) 2723{ 2724 return s3c_hsotg_read_frameno(to_hsotg(gadget)); 2725} 2726 2727static struct usb_gadget_ops s3c_hsotg_gadget_ops = { 2728 .get_frame = s3c_hsotg_gadget_getframe, 2729}; 2730 2731/** 2732 * s3c_hsotg_initep - initialise a single endpoint 2733 * @hsotg: The device state. 2734 * @hs_ep: The endpoint to be initialised. 2735 * @epnum: The endpoint number 2736 * 2737 * Initialise the given endpoint (as part of the probe and device state 2738 * creation) to give to the gadget driver. Setup the endpoint name, any 2739 * direction information and other state that may be required. 2740 */ 2741static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg, 2742 struct s3c_hsotg_ep *hs_ep, 2743 int epnum) 2744{ 2745 u32 ptxfifo; 2746 char *dir; 2747 2748 if (epnum == 0) 2749 dir = ""; 2750 else if ((epnum % 2) == 0) { 2751 dir = "out"; 2752 } else { 2753 dir = "in"; 2754 hs_ep->dir_in = 1; 2755 } 2756 2757 hs_ep->index = epnum; 2758 2759 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); 2760 2761 INIT_LIST_HEAD(&hs_ep->queue); 2762 INIT_LIST_HEAD(&hs_ep->ep.ep_list); 2763 2764 spin_lock_init(&hs_ep->lock); 2765 2766 /* add to the list of endpoints known by the gadget driver */ 2767 if (epnum) 2768 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); 2769 2770 hs_ep->parent = hsotg; 2771 hs_ep->ep.name = hs_ep->name; 2772 hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT; 2773 hs_ep->ep.ops = &s3c_hsotg_ep_ops; 2774 2775 /* Read the FIFO size for the Periodic TX FIFO, even if we're 2776 * an OUT endpoint, we may as well do this if in future the 2777 * code is changed to make each endpoint's direction changeable. 2778 */ 2779 2780 ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum)); 2781 hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4; 2782 2783 /* if we're using dma, we need to set the next-endpoint pointer 2784 * to be something valid. 2785 */ 2786 2787 if (using_dma(hsotg)) { 2788 u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15); 2789 writel(next, hsotg->regs + S3C_DIEPCTL(epnum)); 2790 writel(next, hsotg->regs + S3C_DOEPCTL(epnum)); 2791 } 2792} 2793 2794/** 2795 * s3c_hsotg_otgreset - reset the OtG phy block 2796 * @hsotg: The host state. 2797 * 2798 * Power up the phy, set the basic configuration and start the PHY. 2799 */ 2800static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg) 2801{ 2802 struct clk *xusbxti; 2803 u32 pwr, osc; 2804 2805 pwr = readl(S3C_PHYPWR); 2806 pwr &= ~0x19; 2807 writel(pwr, S3C_PHYPWR); 2808 mdelay(1); 2809 2810 osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0; 2811 2812 xusbxti = clk_get(hsotg->dev, "xusbxti"); 2813 if (xusbxti && !IS_ERR(xusbxti)) { 2814 switch (clk_get_rate(xusbxti)) { 2815 case 12*MHZ: 2816 osc |= S3C_PHYCLK_CLKSEL_12M; 2817 break; 2818 case 24*MHZ: 2819 osc |= S3C_PHYCLK_CLKSEL_24M; 2820 break; 2821 default: 2822 case 48*MHZ: 2823 /* default reference clock */ 2824 break; 2825 } 2826 clk_put(xusbxti); 2827 } 2828 2829 writel(osc | 0x10, S3C_PHYCLK); 2830 2831 /* issue a full set of resets to the otg and core */ 2832 2833 writel(S3C_RSTCON_PHY, S3C_RSTCON); 2834 udelay(20); /* at-least 10uS */ 2835 writel(0, S3C_RSTCON); 2836} 2837 2838 2839static void s3c_hsotg_init(struct s3c_hsotg *hsotg) 2840{ 2841 u32 cfg4; 2842 2843 /* unmask subset of endpoint interrupts */ 2844 2845 writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk | 2846 S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk, 2847 hsotg->regs + S3C_DIEPMSK); 2848 2849 writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk | 2850 S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk, 2851 hsotg->regs + S3C_DOEPMSK); 2852 2853 writel(0, hsotg->regs + S3C_DAINTMSK); 2854 2855 /* Be in disconnected state until gadget is registered */ 2856 __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon); 2857 2858 if (0) { 2859 /* post global nak until we're ready */ 2860 writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak, 2861 hsotg->regs + S3C_DCTL); 2862 } 2863 2864 /* setup fifos */ 2865 2866 dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 2867 readl(hsotg->regs + S3C_GRXFSIZ), 2868 readl(hsotg->regs + S3C_GNPTXFSIZ)); 2869 2870 s3c_hsotg_init_fifo(hsotg); 2871 2872 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2873 writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10), 2874 hsotg->regs + S3C_GUSBCFG); 2875 2876 writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0, 2877 hsotg->regs + S3C_GAHBCFG); 2878 2879 /* check hardware configuration */ 2880 2881 cfg4 = readl(hsotg->regs + 0x50); 2882 hsotg->dedicated_fifos = (cfg4 >> 25) & 1; 2883 2884 dev_info(hsotg->dev, "%s fifos\n", 2885 hsotg->dedicated_fifos ? "dedicated" : "shared"); 2886} 2887 2888static void s3c_hsotg_dump(struct s3c_hsotg *hsotg) 2889{ 2890 struct device *dev = hsotg->dev; 2891 void __iomem *regs = hsotg->regs; 2892 u32 val; 2893 int idx; 2894 2895 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", 2896 readl(regs + S3C_DCFG), readl(regs + S3C_DCTL), 2897 readl(regs + S3C_DIEPMSK)); 2898 2899 dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n", 2900 readl(regs + S3C_GAHBCFG), readl(regs + 0x44)); 2901 2902 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 2903 readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ)); 2904 2905 /* show periodic fifo settings */ 2906 2907 for (idx = 1; idx <= 15; idx++) { 2908 val = readl(regs + S3C_DPTXFSIZn(idx)); 2909 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, 2910 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT, 2911 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK); 2912 } 2913 2914 for (idx = 0; idx < 15; idx++) { 2915 dev_info(dev, 2916 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, 2917 readl(regs + S3C_DIEPCTL(idx)), 2918 readl(regs + S3C_DIEPTSIZ(idx)), 2919 readl(regs + S3C_DIEPDMA(idx))); 2920 2921 val = readl(regs + S3C_DOEPCTL(idx)); 2922 dev_info(dev, 2923 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", 2924 idx, readl(regs + S3C_DOEPCTL(idx)), 2925 readl(regs + S3C_DOEPTSIZ(idx)), 2926 readl(regs + S3C_DOEPDMA(idx))); 2927 2928 } 2929 2930 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 2931 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE)); 2932} 2933 2934 2935/** 2936 * state_show - debugfs: show overall driver and device state. 2937 * @seq: The seq file to write to. 2938 * @v: Unused parameter. 2939 * 2940 * This debugfs entry shows the overall state of the hardware and 2941 * some general information about each of the endpoints available 2942 * to the system. 2943 */ 2944static int state_show(struct seq_file *seq, void *v) 2945{ 2946 struct s3c_hsotg *hsotg = seq->private; 2947 void __iomem *regs = hsotg->regs; 2948 int idx; 2949 2950 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n", 2951 readl(regs + S3C_DCFG), 2952 readl(regs + S3C_DCTL), 2953 readl(regs + S3C_DSTS)); 2954 2955 seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n", 2956 readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK)); 2957 2958 seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n", 2959 readl(regs + S3C_GINTMSK), 2960 readl(regs + S3C_GINTSTS)); 2961 2962 seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n", 2963 readl(regs + S3C_DAINTMSK), 2964 readl(regs + S3C_DAINT)); 2965 2966 seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n", 2967 readl(regs + S3C_GNPTXSTS), 2968 readl(regs + S3C_GRXSTSR)); 2969 2970 seq_printf(seq, "\nEndpoint status:\n"); 2971 2972 for (idx = 0; idx < 15; idx++) { 2973 u32 in, out; 2974 2975 in = readl(regs + S3C_DIEPCTL(idx)); 2976 out = readl(regs + S3C_DOEPCTL(idx)); 2977 2978 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x", 2979 idx, in, out); 2980 2981 in = readl(regs + S3C_DIEPTSIZ(idx)); 2982 out = readl(regs + S3C_DOEPTSIZ(idx)); 2983 2984 seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x", 2985 in, out); 2986 2987 seq_printf(seq, "\n"); 2988 } 2989 2990 return 0; 2991} 2992 2993static int state_open(struct inode *inode, struct file *file) 2994{ 2995 return single_open(file, state_show, inode->i_private); 2996} 2997 2998static const struct file_operations state_fops = { 2999 .owner = THIS_MODULE, 3000 .open = state_open, 3001 .read = seq_read, 3002 .llseek = seq_lseek, 3003 .release = single_release, 3004}; 3005 3006/** 3007 * fifo_show - debugfs: show the fifo information 3008 * @seq: The seq_file to write data to. 3009 * @v: Unused parameter. 3010 * 3011 * Show the FIFO information for the overall fifo and all the 3012 * periodic transmission FIFOs. 3013*/ 3014static int fifo_show(struct seq_file *seq, void *v) 3015{ 3016 struct s3c_hsotg *hsotg = seq->private; 3017 void __iomem *regs = hsotg->regs; 3018 u32 val; 3019 int idx; 3020 3021 seq_printf(seq, "Non-periodic FIFOs:\n"); 3022 seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ)); 3023 3024 val = readl(regs + S3C_GNPTXFSIZ); 3025 seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n", 3026 val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT, 3027 val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK); 3028 3029 seq_printf(seq, "\nPeriodic TXFIFOs:\n"); 3030 3031 for (idx = 1; idx <= 15; idx++) { 3032 val = readl(regs + S3C_DPTXFSIZn(idx)); 3033 3034 seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx, 3035 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT, 3036 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK); 3037 } 3038 3039 return 0; 3040} 3041 3042static int fifo_open(struct inode *inode, struct file *file) 3043{ 3044 return single_open(file, fifo_show, inode->i_private); 3045} 3046 3047static const struct file_operations fifo_fops = { 3048 .owner = THIS_MODULE, 3049 .open = fifo_open, 3050 .read = seq_read, 3051 .llseek = seq_lseek, 3052 .release = single_release, 3053}; 3054 3055 3056static const char *decode_direction(int is_in) 3057{ 3058 return is_in ? "in" : "out"; 3059} 3060 3061/** 3062 * ep_show - debugfs: show the state of an endpoint. 3063 * @seq: The seq_file to write data to. 3064 * @v: Unused parameter. 3065 * 3066 * This debugfs entry shows the state of the given endpoint (one is 3067 * registered for each available). 3068*/ 3069static int ep_show(struct seq_file *seq, void *v) 3070{ 3071 struct s3c_hsotg_ep *ep = seq->private; 3072 struct s3c_hsotg *hsotg = ep->parent; 3073 struct s3c_hsotg_req *req; 3074 void __iomem *regs = hsotg->regs; 3075 int index = ep->index; 3076 int show_limit = 15; 3077 unsigned long flags; 3078 3079 seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n", 3080 ep->index, ep->ep.name, decode_direction(ep->dir_in)); 3081 3082 /* first show the register state */ 3083 3084 seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n", 3085 readl(regs + S3C_DIEPCTL(index)), 3086 readl(regs + S3C_DOEPCTL(index))); 3087 3088 seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n", 3089 readl(regs + S3C_DIEPDMA(index)), 3090 readl(regs + S3C_DOEPDMA(index))); 3091 3092 seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n", 3093 readl(regs + S3C_DIEPINT(index)), 3094 readl(regs + S3C_DOEPINT(index))); 3095 3096 seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n", 3097 readl(regs + S3C_DIEPTSIZ(index)), 3098 readl(regs + S3C_DOEPTSIZ(index))); 3099 3100 seq_printf(seq, "\n"); 3101 seq_printf(seq, "mps %d\n", ep->ep.maxpacket); 3102 seq_printf(seq, "total_data=%ld\n", ep->total_data); 3103 3104 seq_printf(seq, "request list (%p,%p):\n", 3105 ep->queue.next, ep->queue.prev); 3106 3107 spin_lock_irqsave(&ep->lock, flags); 3108 3109 list_for_each_entry(req, &ep->queue, queue) { 3110 if (--show_limit < 0) { 3111 seq_printf(seq, "not showing more requests...\n"); 3112 break; 3113 } 3114 3115 seq_printf(seq, "%c req %p: %d bytes @%p, ", 3116 req == ep->req ? '*' : ' ', 3117 req, req->req.length, req->req.buf); 3118 seq_printf(seq, "%d done, res %d\n", 3119 req->req.actual, req->req.status); 3120 } 3121 3122 spin_unlock_irqrestore(&ep->lock, flags); 3123 3124 return 0; 3125} 3126 3127static int ep_open(struct inode *inode, struct file *file) 3128{ 3129 return single_open(file, ep_show, inode->i_private); 3130} 3131 3132static const struct file_operations ep_fops = { 3133 .owner = THIS_MODULE, 3134 .open = ep_open, 3135 .read = seq_read, 3136 .llseek = seq_lseek, 3137 .release = single_release, 3138}; 3139 3140/** 3141 * s3c_hsotg_create_debug - create debugfs directory and files 3142 * @hsotg: The driver state 3143 * 3144 * Create the debugfs files to allow the user to get information 3145 * about the state of the system. The directory name is created 3146 * with the same name as the device itself, in case we end up 3147 * with multiple blocks in future systems. 3148*/ 3149static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg) 3150{ 3151 struct dentry *root; 3152 unsigned epidx; 3153 3154 root = debugfs_create_dir(dev_name(hsotg->dev), NULL); 3155 hsotg->debug_root = root; 3156 if (IS_ERR(root)) { 3157 dev_err(hsotg->dev, "cannot create debug root\n"); 3158 return; 3159 } 3160 3161 /* create general state file */ 3162 3163 hsotg->debug_file = debugfs_create_file("state", 0444, root, 3164 hsotg, &state_fops); 3165 3166 if (IS_ERR(hsotg->debug_file)) 3167 dev_err(hsotg->dev, "%s: failed to create state\n", __func__); 3168 3169 hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root, 3170 hsotg, &fifo_fops); 3171 3172 if (IS_ERR(hsotg->debug_fifo)) 3173 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__); 3174 3175 /* create one file for each endpoint */ 3176 3177 for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) { 3178 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx]; 3179 3180 ep->debugfs = debugfs_create_file(ep->name, 0444, 3181 root, ep, &ep_fops); 3182 3183 if (IS_ERR(ep->debugfs)) 3184 dev_err(hsotg->dev, "failed to create %s debug file\n", 3185 ep->name); 3186 } 3187} 3188 3189/** 3190 * s3c_hsotg_delete_debug - cleanup debugfs entries 3191 * @hsotg: The driver state 3192 * 3193 * Cleanup (remove) the debugfs files for use on module exit. 3194*/ 3195static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg) 3196{ 3197 unsigned epidx; 3198 3199 for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) { 3200 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx]; 3201 debugfs_remove(ep->debugfs); 3202 } 3203 3204 debugfs_remove(hsotg->debug_file); 3205 debugfs_remove(hsotg->debug_fifo); 3206 debugfs_remove(hsotg->debug_root); 3207} 3208 3209/** 3210 * s3c_hsotg_gate - set the hardware gate for the block 3211 * @pdev: The device we bound to 3212 * @on: On or off. 3213 * 3214 * Set the hardware gate setting into the block. If we end up on 3215 * something other than an S3C64XX, then we might need to change this 3216 * to using a platform data callback, or some other mechanism. 3217 */ 3218static void s3c_hsotg_gate(struct platform_device *pdev, bool on) 3219{ 3220 unsigned long flags; 3221 u32 others; 3222 3223 local_irq_save(flags); 3224 3225 others = __raw_readl(S3C64XX_OTHERS); 3226 if (on) 3227 others |= S3C64XX_OTHERS_USBMASK; 3228 else 3229 others &= ~S3C64XX_OTHERS_USBMASK; 3230 __raw_writel(others, S3C64XX_OTHERS); 3231 3232 local_irq_restore(flags); 3233} 3234 3235static struct s3c_hsotg_plat s3c_hsotg_default_pdata; 3236 3237static int __devinit s3c_hsotg_probe(struct platform_device *pdev) 3238{ 3239 struct s3c_hsotg_plat *plat = pdev->dev.platform_data; 3240 struct device *dev = &pdev->dev; 3241 struct s3c_hsotg *hsotg; 3242 struct resource *res; 3243 int epnum; 3244 int ret; 3245 3246 if (!plat) 3247 plat = &s3c_hsotg_default_pdata; 3248 3249 hsotg = kzalloc(sizeof(struct s3c_hsotg) + 3250 sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS, 3251 GFP_KERNEL); 3252 if (!hsotg) { 3253 dev_err(dev, "cannot get memory\n"); 3254 return -ENOMEM; 3255 } 3256 3257 hsotg->dev = dev; 3258 hsotg->plat = plat; 3259 3260 hsotg->clk = clk_get(&pdev->dev, "otg"); 3261 if (IS_ERR(hsotg->clk)) { 3262 dev_err(dev, "cannot get otg clock\n"); 3263 ret = -EINVAL; 3264 goto err_mem; 3265 } 3266 3267 platform_set_drvdata(pdev, hsotg); 3268 3269 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3270 if (!res) { 3271 dev_err(dev, "cannot find register resource 0\n"); 3272 ret = -EINVAL; 3273 goto err_clk; 3274 } 3275 3276 hsotg->regs_res = request_mem_region(res->start, resource_size(res), 3277 dev_name(dev)); 3278 if (!hsotg->regs_res) { 3279 dev_err(dev, "cannot reserve registers\n"); 3280 ret = -ENOENT; 3281 goto err_clk; 3282 } 3283 3284 hsotg->regs = ioremap(res->start, resource_size(res)); 3285 if (!hsotg->regs) { 3286 dev_err(dev, "cannot map registers\n"); 3287 ret = -ENXIO; 3288 goto err_regs_res; 3289 } 3290 3291 ret = platform_get_irq(pdev, 0); 3292 if (ret < 0) { 3293 dev_err(dev, "cannot find IRQ\n"); 3294 goto err_regs; 3295 } 3296 3297 hsotg->irq = ret; 3298 3299 ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg); 3300 if (ret < 0) { 3301 dev_err(dev, "cannot claim IRQ\n"); 3302 goto err_regs; 3303 } 3304 3305 dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq); 3306 3307 device_initialize(&hsotg->gadget.dev); 3308 3309 dev_set_name(&hsotg->gadget.dev, "gadget"); 3310 3311 hsotg->gadget.is_dualspeed = 1; 3312 hsotg->gadget.ops = &s3c_hsotg_gadget_ops; 3313 hsotg->gadget.name = dev_name(dev); 3314 3315 hsotg->gadget.dev.parent = dev; 3316 hsotg->gadget.dev.dma_mask = dev->dma_mask; 3317 3318 /* setup endpoint information */ 3319 3320 INIT_LIST_HEAD(&hsotg->gadget.ep_list); 3321 hsotg->gadget.ep0 = &hsotg->eps[0].ep; 3322 3323 /* allocate EP0 request */ 3324 3325 hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep, 3326 GFP_KERNEL); 3327 if (!hsotg->ctrl_req) { 3328 dev_err(dev, "failed to allocate ctrl req\n"); 3329 goto err_regs; 3330 } 3331 3332 /* reset the system */ 3333 3334 clk_enable(hsotg->clk); 3335 3336 s3c_hsotg_gate(pdev, true); 3337 3338 s3c_hsotg_otgreset(hsotg); 3339 s3c_hsotg_corereset(hsotg); 3340 s3c_hsotg_init(hsotg); 3341 3342 /* initialise the endpoints now the core has been initialised */ 3343 for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++) 3344 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum); 3345 3346 s3c_hsotg_create_debug(hsotg); 3347 3348 s3c_hsotg_dump(hsotg); 3349 3350 our_hsotg = hsotg; 3351 return 0; 3352 3353err_regs: 3354 iounmap(hsotg->regs); 3355 3356err_regs_res: 3357 release_resource(hsotg->regs_res); 3358 kfree(hsotg->regs_res); 3359err_clk: 3360 clk_put(hsotg->clk); 3361err_mem: 3362 kfree(hsotg); 3363 return ret; 3364} 3365 3366static int __devexit s3c_hsotg_remove(struct platform_device *pdev) 3367{ 3368 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev); 3369 3370 s3c_hsotg_delete_debug(hsotg); 3371 3372 usb_gadget_unregister_driver(hsotg->driver); 3373 3374 free_irq(hsotg->irq, hsotg); 3375 iounmap(hsotg->regs); 3376 3377 release_resource(hsotg->regs_res); 3378 kfree(hsotg->regs_res); 3379 3380 s3c_hsotg_gate(pdev, false); 3381 3382 clk_disable(hsotg->clk); 3383 clk_put(hsotg->clk); 3384 3385 kfree(hsotg); 3386 return 0; 3387} 3388 3389#define s3c_hsotg_suspend NULL 3390#define s3c_hsotg_resume NULL 3391 3392static struct platform_driver s3c_hsotg_driver = { 3393 .driver = { 3394 .name = "s3c-hsotg", 3395 .owner = THIS_MODULE, 3396 }, 3397 .probe = s3c_hsotg_probe, 3398 .remove = __devexit_p(s3c_hsotg_remove), 3399 .suspend = s3c_hsotg_suspend, 3400 .resume = s3c_hsotg_resume, 3401}; 3402 3403static int __init s3c_hsotg_modinit(void) 3404{ 3405 return platform_driver_register(&s3c_hsotg_driver); 3406} 3407 3408static void __exit s3c_hsotg_modexit(void) 3409{ 3410 platform_driver_unregister(&s3c_hsotg_driver); 3411} 3412 3413module_init(s3c_hsotg_modinit); 3414module_exit(s3c_hsotg_modexit); 3415 3416MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device"); 3417MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 3418MODULE_LICENSE("GPL"); 3419MODULE_ALIAS("platform:s3c-hsotg"); 3420