1/* 2 * USB Host Controller Driver for IMX21 3 * 4 * Copyright (C) 2006 Loping Dog Embedded Systems 5 * Copyright (C) 2009 Martin Fuzzey 6 * Originally written by Jay Monkman <jtm@lopingdog.com> 7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 * for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software Foundation, 21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 */ 23 24 25 /* 26 * The i.MX21 USB hardware contains 27 * * 32 transfer descriptors (called ETDs) 28 * * 4Kb of Data memory 29 * 30 * The data memory is shared between the host and fuction controlers 31 * (but this driver only supports the host controler) 32 * 33 * So setting up a transfer involves: 34 * * Allocating a ETD 35 * * Fill in ETD with appropriate information 36 * * Allocating data memory (and putting the offset in the ETD) 37 * * Activate the ETD 38 * * Get interrupt when done. 39 * 40 * An ETD is assigned to each active endpoint. 41 * 42 * Low resource (ETD and Data memory) situations are handled differently for 43 * isochronous and non insosynchronous transactions : 44 * 45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable 46 * 47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering. 48 * They allocate both ETDs and Data memory during URB submission 49 * (and fail if unavailable). 50 */ 51 52#include <linux/clk.h> 53#include <linux/io.h> 54#include <linux/kernel.h> 55#include <linux/list.h> 56#include <linux/platform_device.h> 57#include <linux/slab.h> 58#include <linux/usb.h> 59#include <linux/usb/hcd.h> 60 61#include "imx21-hcd.h" 62 63#ifdef DEBUG 64#define DEBUG_LOG_FRAME(imx21, etd, event) \ 65 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB) 66#else 67#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) 68#endif 69 70static const char hcd_name[] = "imx21-hcd"; 71 72static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd) 73{ 74 return (struct imx21 *)hcd->hcd_priv; 75} 76 77 78/* =========================================== */ 79/* Hardware access helpers */ 80/* =========================================== */ 81 82static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask) 83{ 84 void __iomem *reg = imx21->regs + offset; 85 writel(readl(reg) | mask, reg); 86} 87 88static inline void clear_register_bits(struct imx21 *imx21, 89 u32 offset, u32 mask) 90{ 91 void __iomem *reg = imx21->regs + offset; 92 writel(readl(reg) & ~mask, reg); 93} 94 95static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) 96{ 97 void __iomem *reg = imx21->regs + offset; 98 99 if (readl(reg) & mask) 100 writel(mask, reg); 101} 102 103static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) 104{ 105 void __iomem *reg = imx21->regs + offset; 106 107 if (!(readl(reg) & mask)) 108 writel(mask, reg); 109} 110 111static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value) 112{ 113 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword)); 114} 115 116static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword) 117{ 118 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword)); 119} 120 121static inline int wrap_frame(int counter) 122{ 123 return counter & 0xFFFF; 124} 125 126static inline int frame_after(int frame, int after) 127{ 128 /* handle wrapping like jiffies time_afer */ 129 return (s16)((s16)after - (s16)frame) < 0; 130} 131 132static int imx21_hc_get_frame(struct usb_hcd *hcd) 133{ 134 struct imx21 *imx21 = hcd_to_imx21(hcd); 135 136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); 137} 138 139 140#include "imx21-dbg.c" 141 142/* =========================================== */ 143/* ETD management */ 144/* =========================================== */ 145 146static int alloc_etd(struct imx21 *imx21) 147{ 148 int i; 149 struct etd_priv *etd = imx21->etd; 150 151 for (i = 0; i < USB_NUM_ETD; i++, etd++) { 152 if (etd->alloc == 0) { 153 memset(etd, 0, sizeof(imx21->etd[0])); 154 etd->alloc = 1; 155 debug_etd_allocated(imx21); 156 return i; 157 } 158 } 159 return -1; 160} 161 162static void disactivate_etd(struct imx21 *imx21, int num) 163{ 164 int etd_mask = (1 << num); 165 struct etd_priv *etd = &imx21->etd[num]; 166 167 writel(etd_mask, imx21->regs + USBH_ETDENCLR); 168 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask); 169 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR); 170 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); 171 172 etd->active_count = 0; 173 174 DEBUG_LOG_FRAME(imx21, etd, disactivated); 175} 176 177static void reset_etd(struct imx21 *imx21, int num) 178{ 179 struct etd_priv *etd = imx21->etd + num; 180 int i; 181 182 disactivate_etd(imx21, num); 183 184 for (i = 0; i < 4; i++) 185 etd_writel(imx21, num, i, 0); 186 etd->urb = NULL; 187 etd->ep = NULL; 188 etd->td = NULL;; 189} 190 191static void free_etd(struct imx21 *imx21, int num) 192{ 193 if (num < 0) 194 return; 195 196 if (num >= USB_NUM_ETD) { 197 dev_err(imx21->dev, "BAD etd=%d!\n", num); 198 return; 199 } 200 if (imx21->etd[num].alloc == 0) { 201 dev_err(imx21->dev, "ETD %d already free!\n", num); 202 return; 203 } 204 205 debug_etd_freed(imx21); 206 reset_etd(imx21, num); 207 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); 208} 209 210 211static void setup_etd_dword0(struct imx21 *imx21, 212 int etd_num, struct urb *urb, u8 dir, u16 maxpacket) 213{ 214 etd_writel(imx21, etd_num, 0, 215 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS | 216 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) | 217 ((u32) dir << DW0_DIRECT) | 218 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ? 219 1 : 0) << DW0_SPEED) | 220 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) | 221 ((u32) maxpacket << DW0_MAXPKTSIZ)); 222} 223 224static void activate_etd(struct imx21 *imx21, 225 int etd_num, dma_addr_t dma, u8 dir) 226{ 227 u32 etd_mask = 1 << etd_num; 228 struct etd_priv *etd = &imx21->etd[etd_num]; 229 230 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); 231 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); 232 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 233 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 234 235 if (dma) { 236 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); 237 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); 238 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); 239 writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); 240 set_register_bits(imx21, USB_ETDDMAEN, etd_mask); 241 } else { 242 if (dir != TD_DIR_IN) { 243 /* need to set for ZLP */ 244 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 245 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 246 } 247 } 248 249 DEBUG_LOG_FRAME(imx21, etd, activated); 250 251#ifdef DEBUG 252 if (!etd->active_count) { 253 int i; 254 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); 255 etd->disactivated_frame = -1; 256 etd->last_int_frame = -1; 257 etd->last_req_frame = -1; 258 259 for (i = 0; i < 4; i++) 260 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); 261 } 262#endif 263 264 etd->active_count = 1; 265 writel(etd_mask, imx21->regs + USBH_ETDENSET); 266} 267 268/* =========================================== */ 269/* Data memory management */ 270/* =========================================== */ 271 272static int alloc_dmem(struct imx21 *imx21, unsigned int size, 273 struct usb_host_endpoint *ep) 274{ 275 unsigned int offset = 0; 276 struct imx21_dmem_area *area; 277 struct imx21_dmem_area *tmp; 278 279 size += (~size + 1) & 0x3; /* Round to 4 byte multiple */ 280 281 if (size > DMEM_SIZE) { 282 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n", 283 size, DMEM_SIZE); 284 return -EINVAL; 285 } 286 287 list_for_each_entry(tmp, &imx21->dmem_list, list) { 288 if ((size + offset) < offset) 289 goto fail; 290 if ((size + offset) <= tmp->offset) 291 break; 292 offset = tmp->size + tmp->offset; 293 if ((offset + size) > DMEM_SIZE) 294 goto fail; 295 } 296 297 area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC); 298 if (area == NULL) 299 return -ENOMEM; 300 301 area->ep = ep; 302 area->offset = offset; 303 area->size = size; 304 list_add_tail(&area->list, &tmp->list); 305 debug_dmem_allocated(imx21, size); 306 return offset; 307 308fail: 309 return -ENOMEM; 310} 311 312/* Memory now available for a queued ETD - activate it */ 313static void activate_queued_etd(struct imx21 *imx21, 314 struct etd_priv *etd, u32 dmem_offset) 315{ 316 struct urb_priv *urb_priv = etd->urb->hcpriv; 317 int etd_num = etd - &imx21->etd[0]; 318 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD; 319 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03; 320 321 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", 322 etd_num); 323 etd_writel(imx21, etd_num, 1, 324 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); 325 326 urb_priv->active = 1; 327 activate_etd(imx21, etd_num, etd->dma_handle, dir); 328} 329 330static void free_dmem(struct imx21 *imx21, int offset) 331{ 332 struct imx21_dmem_area *area; 333 struct etd_priv *etd, *tmp; 334 int found = 0; 335 336 list_for_each_entry(area, &imx21->dmem_list, list) { 337 if (area->offset == offset) { 338 debug_dmem_freed(imx21, area->size); 339 list_del(&area->list); 340 kfree(area); 341 found = 1; 342 break; 343 } 344 } 345 346 if (!found) { 347 dev_err(imx21->dev, 348 "Trying to free unallocated DMEM %d\n", offset); 349 return; 350 } 351 352 /* Try again to allocate memory for anything we've queued */ 353 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { 354 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); 355 if (offset >= 0) { 356 list_del(&etd->queue); 357 activate_queued_etd(imx21, etd, (u32)offset); 358 } 359 } 360} 361 362static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) 363{ 364 struct imx21_dmem_area *area, *tmp; 365 366 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) { 367 if (area->ep == ep) { 368 dev_err(imx21->dev, 369 "Active DMEM %d for disabled ep=%p\n", 370 area->offset, ep); 371 list_del(&area->list); 372 kfree(area); 373 } 374 } 375} 376 377 378/* =========================================== */ 379/* End handling */ 380/* =========================================== */ 381static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); 382 383/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ 384static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) 385{ 386 int etd_num; 387 int i; 388 389 for (i = 0; i < NUM_ISO_ETDS; i++) { 390 etd_num = ep_priv->etd[i]; 391 if (etd_num < 0) 392 continue; 393 394 ep_priv->etd[i] = -1; 395 if (list_empty(&imx21->queue_for_etd)) { 396 free_etd(imx21, etd_num); 397 continue; 398 } 399 400 dev_dbg(imx21->dev, 401 "assigning idle etd %d for queued request\n", etd_num); 402 ep_priv = list_first_entry(&imx21->queue_for_etd, 403 struct ep_priv, queue); 404 list_del(&ep_priv->queue); 405 reset_etd(imx21, etd_num); 406 ep_priv->waiting_etd = 0; 407 ep_priv->etd[i] = etd_num; 408 409 if (list_empty(&ep_priv->ep->urb_list)) { 410 dev_err(imx21->dev, "No urb for queued ep!\n"); 411 continue; 412 } 413 schedule_nonisoc_etd(imx21, list_first_entry( 414 &ep_priv->ep->urb_list, struct urb, urb_list)); 415 } 416} 417 418static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status) 419__releases(imx21->lock) 420__acquires(imx21->lock) 421{ 422 struct imx21 *imx21 = hcd_to_imx21(hcd); 423 struct ep_priv *ep_priv = urb->ep->hcpriv; 424 struct urb_priv *urb_priv = urb->hcpriv; 425 426 debug_urb_completed(imx21, urb, status); 427 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status); 428 429 kfree(urb_priv->isoc_td); 430 kfree(urb->hcpriv); 431 urb->hcpriv = NULL; 432 usb_hcd_unlink_urb_from_ep(hcd, urb); 433 spin_unlock(&imx21->lock); 434 usb_hcd_giveback_urb(hcd, urb, status); 435 spin_lock(&imx21->lock); 436 if (list_empty(&ep_priv->ep->urb_list)) 437 ep_idle(imx21, ep_priv); 438} 439 440/* =========================================== */ 441/* ISOC Handling ... */ 442/* =========================================== */ 443 444static void schedule_isoc_etds(struct usb_hcd *hcd, 445 struct usb_host_endpoint *ep) 446{ 447 struct imx21 *imx21 = hcd_to_imx21(hcd); 448 struct ep_priv *ep_priv = ep->hcpriv; 449 struct etd_priv *etd; 450 struct urb_priv *urb_priv; 451 struct td *td; 452 int etd_num; 453 int i; 454 int cur_frame; 455 u8 dir; 456 457 for (i = 0; i < NUM_ISO_ETDS; i++) { 458too_late: 459 if (list_empty(&ep_priv->td_list)) 460 break; 461 462 etd_num = ep_priv->etd[i]; 463 if (etd_num < 0) 464 break; 465 466 etd = &imx21->etd[etd_num]; 467 if (etd->urb) 468 continue; 469 470 td = list_entry(ep_priv->td_list.next, struct td, list); 471 list_del(&td->list); 472 urb_priv = td->urb->hcpriv; 473 474 cur_frame = imx21_hc_get_frame(hcd); 475 if (frame_after(cur_frame, td->frame)) { 476 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n", 477 cur_frame, td->frame); 478 urb_priv->isoc_status = -EXDEV; 479 td->urb->iso_frame_desc[ 480 td->isoc_index].actual_length = 0; 481 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV; 482 if (--urb_priv->isoc_remaining == 0) 483 urb_done(hcd, td->urb, urb_priv->isoc_status); 484 goto too_late; 485 } 486 487 urb_priv->active = 1; 488 etd->td = td; 489 etd->ep = td->ep; 490 etd->urb = td->urb; 491 etd->len = td->len; 492 493 debug_isoc_submitted(imx21, cur_frame, td); 494 495 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN; 496 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); 497 etd_writel(imx21, etd_num, 1, etd->dmem_offset); 498 etd_writel(imx21, etd_num, 2, 499 (TD_NOTACCESSED << DW2_COMPCODE) | 500 ((td->frame & 0xFFFF) << DW2_STARTFRM)); 501 etd_writel(imx21, etd_num, 3, 502 (TD_NOTACCESSED << DW3_COMPCODE0) | 503 (td->len << DW3_PKTLEN0)); 504 505 activate_etd(imx21, etd_num, td->data, dir); 506 } 507} 508 509static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 510{ 511 struct imx21 *imx21 = hcd_to_imx21(hcd); 512 int etd_mask = 1 << etd_num; 513 struct urb_priv *urb_priv = urb->hcpriv; 514 struct etd_priv *etd = imx21->etd + etd_num; 515 struct td *td = etd->td; 516 struct usb_host_endpoint *ep = etd->ep; 517 int isoc_index = td->isoc_index; 518 unsigned int pipe = urb->pipe; 519 int dir_in = usb_pipein(pipe); 520 int cc; 521 int bytes_xfrd; 522 523 disactivate_etd(imx21, etd_num); 524 525 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf; 526 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff; 527 528 /* Input doesn't always fill the buffer, don't generate an error 529 * when this happens. 530 */ 531 if (dir_in && (cc == TD_DATAUNDERRUN)) 532 cc = TD_CC_NOERROR; 533 534 if (cc == TD_NOTACCESSED) 535 bytes_xfrd = 0; 536 537 debug_isoc_completed(imx21, 538 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd); 539 if (cc) { 540 urb_priv->isoc_status = -EXDEV; 541 dev_dbg(imx21->dev, 542 "bad iso cc=0x%X frame=%d sched frame=%d " 543 "cnt=%d len=%d urb=%p etd=%d index=%d\n", 544 cc, imx21_hc_get_frame(hcd), td->frame, 545 bytes_xfrd, td->len, urb, etd_num, isoc_index); 546 } 547 548 if (dir_in) 549 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 550 551 urb->actual_length += bytes_xfrd; 552 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; 553 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc]; 554 555 etd->td = NULL; 556 etd->urb = NULL; 557 etd->ep = NULL; 558 559 if (--urb_priv->isoc_remaining == 0) 560 urb_done(hcd, urb, urb_priv->isoc_status); 561 562 schedule_isoc_etds(hcd, ep); 563} 564 565static struct ep_priv *alloc_isoc_ep( 566 struct imx21 *imx21, struct usb_host_endpoint *ep) 567{ 568 struct ep_priv *ep_priv; 569 int i; 570 571 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); 572 if (ep_priv == NULL) 573 return NULL; 574 575 /* Allocate the ETDs */ 576 for (i = 0; i < NUM_ISO_ETDS; i++) { 577 ep_priv->etd[i] = alloc_etd(imx21); 578 if (ep_priv->etd[i] < 0) { 579 int j; 580 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n"); 581 for (j = 0; j < i; j++) 582 free_etd(imx21, ep_priv->etd[j]); 583 goto alloc_etd_failed; 584 } 585 imx21->etd[ep_priv->etd[i]].ep = ep; 586 } 587 588 INIT_LIST_HEAD(&ep_priv->td_list); 589 ep_priv->ep = ep; 590 ep->hcpriv = ep_priv; 591 return ep_priv; 592 593alloc_etd_failed: 594 kfree(ep_priv); 595 return NULL; 596} 597 598static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, 599 struct usb_host_endpoint *ep, 600 struct urb *urb, gfp_t mem_flags) 601{ 602 struct imx21 *imx21 = hcd_to_imx21(hcd); 603 struct urb_priv *urb_priv; 604 unsigned long flags; 605 struct ep_priv *ep_priv; 606 struct td *td = NULL; 607 int i; 608 int ret; 609 int cur_frame; 610 u16 maxpacket; 611 612 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); 613 if (urb_priv == NULL) 614 return -ENOMEM; 615 616 urb_priv->isoc_td = kzalloc( 617 sizeof(struct td) * urb->number_of_packets, mem_flags); 618 if (urb_priv->isoc_td == NULL) { 619 ret = -ENOMEM; 620 goto alloc_td_failed; 621 } 622 623 spin_lock_irqsave(&imx21->lock, flags); 624 625 if (ep->hcpriv == NULL) { 626 ep_priv = alloc_isoc_ep(imx21, ep); 627 if (ep_priv == NULL) { 628 ret = -ENOMEM; 629 goto alloc_ep_failed; 630 } 631 } else { 632 ep_priv = ep->hcpriv; 633 } 634 635 ret = usb_hcd_link_urb_to_ep(hcd, urb); 636 if (ret) 637 goto link_failed; 638 639 urb->status = -EINPROGRESS; 640 urb->actual_length = 0; 641 urb->error_count = 0; 642 urb->hcpriv = urb_priv; 643 urb_priv->ep = ep; 644 645 /* allocate data memory for largest packets if not already done */ 646 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 647 for (i = 0; i < NUM_ISO_ETDS; i++) { 648 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; 649 650 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { 651 /* not sure if this can really occur.... */ 652 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n", 653 etd->dmem_size, maxpacket); 654 ret = -EMSGSIZE; 655 goto alloc_dmem_failed; 656 } 657 658 if (etd->dmem_size == 0) { 659 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); 660 if (etd->dmem_offset < 0) { 661 dev_dbg(imx21->dev, "failed alloc isoc dmem\n"); 662 ret = -EAGAIN; 663 goto alloc_dmem_failed; 664 } 665 etd->dmem_size = maxpacket; 666 } 667 } 668 669 /* calculate frame */ 670 cur_frame = imx21_hc_get_frame(hcd); 671 if (urb->transfer_flags & URB_ISO_ASAP) { 672 if (list_empty(&ep_priv->td_list)) 673 urb->start_frame = cur_frame + 5; 674 else 675 urb->start_frame = list_entry( 676 ep_priv->td_list.prev, 677 struct td, list)->frame + urb->interval; 678 } 679 urb->start_frame = wrap_frame(urb->start_frame); 680 if (frame_after(cur_frame, urb->start_frame)) { 681 dev_dbg(imx21->dev, 682 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n", 683 urb->start_frame, cur_frame, 684 (urb->transfer_flags & URB_ISO_ASAP) != 0); 685 urb->start_frame = wrap_frame(cur_frame + 1); 686 } 687 688 /* set up transfers */ 689 td = urb_priv->isoc_td; 690 for (i = 0; i < urb->number_of_packets; i++, td++) { 691 td->ep = ep; 692 td->urb = urb; 693 td->len = urb->iso_frame_desc[i].length; 694 td->isoc_index = i; 695 td->frame = wrap_frame(urb->start_frame + urb->interval * i); 696 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; 697 list_add_tail(&td->list, &ep_priv->td_list); 698 } 699 700 urb_priv->isoc_remaining = urb->number_of_packets; 701 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n", 702 urb->number_of_packets, urb->start_frame, td->frame); 703 704 debug_urb_submitted(imx21, urb); 705 schedule_isoc_etds(hcd, ep); 706 707 spin_unlock_irqrestore(&imx21->lock, flags); 708 return 0; 709 710alloc_dmem_failed: 711 usb_hcd_unlink_urb_from_ep(hcd, urb); 712 713link_failed: 714alloc_ep_failed: 715 spin_unlock_irqrestore(&imx21->lock, flags); 716 kfree(urb_priv->isoc_td); 717 718alloc_td_failed: 719 kfree(urb_priv); 720 return ret; 721} 722 723static void dequeue_isoc_urb(struct imx21 *imx21, 724 struct urb *urb, struct ep_priv *ep_priv) 725{ 726 struct urb_priv *urb_priv = urb->hcpriv; 727 struct td *td, *tmp; 728 int i; 729 730 if (urb_priv->active) { 731 for (i = 0; i < NUM_ISO_ETDS; i++) { 732 int etd_num = ep_priv->etd[i]; 733 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { 734 struct etd_priv *etd = imx21->etd + etd_num; 735 736 reset_etd(imx21, etd_num); 737 if (etd->dmem_size) 738 free_dmem(imx21, etd->dmem_offset); 739 etd->dmem_size = 0; 740 } 741 } 742 } 743 744 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) { 745 if (td->urb == urb) { 746 dev_vdbg(imx21->dev, "removing td %p\n", td); 747 list_del(&td->list); 748 } 749 } 750} 751 752/* =========================================== */ 753/* NON ISOC Handling ... */ 754/* =========================================== */ 755 756static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) 757{ 758 unsigned int pipe = urb->pipe; 759 struct urb_priv *urb_priv = urb->hcpriv; 760 struct ep_priv *ep_priv = urb_priv->ep->hcpriv; 761 int state = urb_priv->state; 762 int etd_num = ep_priv->etd[0]; 763 struct etd_priv *etd; 764 int dmem_offset; 765 u32 count; 766 u16 etd_buf_size; 767 u16 maxpacket; 768 u8 dir; 769 u8 bufround; 770 u8 datatoggle; 771 u8 interval = 0; 772 u8 relpolpos = 0; 773 774 if (etd_num < 0) { 775 dev_err(imx21->dev, "No valid ETD\n"); 776 return; 777 } 778 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num)) 779 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num); 780 781 etd = &imx21->etd[etd_num]; 782 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe)); 783 if (!maxpacket) 784 maxpacket = 8; 785 786 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { 787 if (state == US_CTRL_SETUP) { 788 dir = TD_DIR_SETUP; 789 etd->dma_handle = urb->setup_dma; 790 bufround = 0; 791 count = 8; 792 datatoggle = TD_TOGGLE_DATA0; 793 } else { /* US_CTRL_ACK */ 794 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; 795 etd->dma_handle = urb->transfer_dma; 796 bufround = 0; 797 count = 0; 798 datatoggle = TD_TOGGLE_DATA1; 799 } 800 } else { 801 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; 802 bufround = (dir == TD_DIR_IN) ? 1 : 0; 803 etd->dma_handle = urb->transfer_dma; 804 if (usb_pipebulk(pipe) && (state == US_BULK0)) 805 count = 0; 806 else 807 count = urb->transfer_buffer_length; 808 809 if (usb_pipecontrol(pipe)) { 810 datatoggle = TD_TOGGLE_DATA1; 811 } else { 812 if (usb_gettoggle( 813 urb->dev, 814 usb_pipeendpoint(urb->pipe), 815 usb_pipeout(urb->pipe))) 816 datatoggle = TD_TOGGLE_DATA1; 817 else 818 datatoggle = TD_TOGGLE_DATA0; 819 } 820 } 821 822 etd->urb = urb; 823 etd->ep = urb_priv->ep; 824 etd->len = count; 825 826 if (usb_pipeint(pipe)) { 827 interval = urb->interval; 828 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff; 829 } 830 831 /* Write ETD to device memory */ 832 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket); 833 834 etd_writel(imx21, etd_num, 2, 835 (u32) interval << DW2_POLINTERV | 836 ((u32) relpolpos << DW2_RELPOLPOS) | 837 ((u32) dir << DW2_DIRPID) | 838 ((u32) bufround << DW2_BUFROUND) | 839 ((u32) datatoggle << DW2_DATATOG) | 840 ((u32) TD_NOTACCESSED << DW2_COMPCODE)); 841 842 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3 843 is smaller. Make sure we don't overrun the buffer! 844 */ 845 if (count && count < maxpacket) 846 etd_buf_size = count; 847 else 848 etd_buf_size = maxpacket; 849 850 etd_writel(imx21, etd_num, 3, 851 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count); 852 853 if (!count) 854 etd->dma_handle = 0; 855 856 /* allocate x and y buffer space at once */ 857 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; 858 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); 859 if (dmem_offset < 0) { 860 /* Setup everything we can in HW and update when we get DMEM */ 861 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); 862 863 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num); 864 debug_urb_queued_for_dmem(imx21, urb); 865 list_add_tail(&etd->queue, &imx21->queue_for_dmem); 866 return; 867 } 868 869 etd_writel(imx21, etd_num, 1, 870 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | 871 (u32) dmem_offset); 872 873 urb_priv->active = 1; 874 875 /* enable the ETD to kick off transfer */ 876 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", 877 etd_num, count, dir != TD_DIR_IN ? "out" : "in"); 878 activate_etd(imx21, etd_num, etd->dma_handle, dir); 879 880} 881 882static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 883{ 884 struct imx21 *imx21 = hcd_to_imx21(hcd); 885 struct etd_priv *etd = &imx21->etd[etd_num]; 886 u32 etd_mask = 1 << etd_num; 887 struct urb_priv *urb_priv = urb->hcpriv; 888 int dir; 889 u16 xbufaddr; 890 int cc; 891 u32 bytes_xfrd; 892 int etd_done; 893 894 disactivate_etd(imx21, etd_num); 895 896 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; 897 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff; 898 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; 899 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); 900 901 /* save toggle carry */ 902 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 903 usb_pipeout(urb->pipe), 904 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1); 905 906 if (dir == TD_DIR_IN) { 907 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 908 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 909 } 910 free_dmem(imx21, xbufaddr); 911 912 urb->error_count = 0; 913 if (!(urb->transfer_flags & URB_SHORT_NOT_OK) 914 && (cc == TD_DATAUNDERRUN)) 915 cc = TD_CC_NOERROR; 916 917 if (cc != 0) 918 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc); 919 920 etd_done = (cc_to_error[cc] != 0); /* stop if error */ 921 922 switch (usb_pipetype(urb->pipe)) { 923 case PIPE_CONTROL: 924 switch (urb_priv->state) { 925 case US_CTRL_SETUP: 926 if (urb->transfer_buffer_length > 0) 927 urb_priv->state = US_CTRL_DATA; 928 else 929 urb_priv->state = US_CTRL_ACK; 930 break; 931 case US_CTRL_DATA: 932 urb->actual_length += bytes_xfrd; 933 urb_priv->state = US_CTRL_ACK; 934 break; 935 case US_CTRL_ACK: 936 etd_done = 1; 937 break; 938 default: 939 dev_err(imx21->dev, 940 "Invalid pipe state %d\n", urb_priv->state); 941 etd_done = 1; 942 break; 943 } 944 break; 945 946 case PIPE_BULK: 947 urb->actual_length += bytes_xfrd; 948 if ((urb_priv->state == US_BULK) 949 && (urb->transfer_flags & URB_ZERO_PACKET) 950 && urb->transfer_buffer_length > 0 951 && ((urb->transfer_buffer_length % 952 usb_maxpacket(urb->dev, urb->pipe, 953 usb_pipeout(urb->pipe))) == 0)) { 954 /* need a 0-packet */ 955 urb_priv->state = US_BULK0; 956 } else { 957 etd_done = 1; 958 } 959 break; 960 961 case PIPE_INTERRUPT: 962 urb->actual_length += bytes_xfrd; 963 etd_done = 1; 964 break; 965 } 966 967 if (!etd_done) { 968 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); 969 schedule_nonisoc_etd(imx21, urb); 970 } else { 971 struct usb_host_endpoint *ep = urb->ep; 972 973 urb_done(hcd, urb, cc_to_error[cc]); 974 etd->urb = NULL; 975 976 if (!list_empty(&ep->urb_list)) { 977 urb = list_first_entry(&ep->urb_list, 978 struct urb, urb_list); 979 dev_vdbg(imx21->dev, "next URB %p\n", urb); 980 schedule_nonisoc_etd(imx21, urb); 981 } 982 } 983} 984 985static struct ep_priv *alloc_ep(void) 986{ 987 int i; 988 struct ep_priv *ep_priv; 989 990 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); 991 if (!ep_priv) 992 return NULL; 993 994 for (i = 0; i < NUM_ISO_ETDS; ++i) 995 ep_priv->etd[i] = -1; 996 997 return ep_priv; 998} 999 1000static int imx21_hc_urb_enqueue(struct usb_hcd *hcd, 1001 struct urb *urb, gfp_t mem_flags) 1002{ 1003 struct imx21 *imx21 = hcd_to_imx21(hcd); 1004 struct usb_host_endpoint *ep = urb->ep; 1005 struct urb_priv *urb_priv; 1006 struct ep_priv *ep_priv; 1007 struct etd_priv *etd; 1008 int ret; 1009 unsigned long flags; 1010 int new_ep = 0; 1011 1012 dev_vdbg(imx21->dev, 1013 "enqueue urb=%p ep=%p len=%d " 1014 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n", 1015 urb, ep, 1016 urb->transfer_buffer_length, 1017 urb->transfer_buffer, urb->transfer_dma, 1018 urb->setup_packet, urb->setup_dma); 1019 1020 if (usb_pipeisoc(urb->pipe)) 1021 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags); 1022 1023 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); 1024 if (!urb_priv) 1025 return -ENOMEM; 1026 1027 spin_lock_irqsave(&imx21->lock, flags); 1028 1029 ep_priv = ep->hcpriv; 1030 if (ep_priv == NULL) { 1031 ep_priv = alloc_ep(); 1032 if (!ep_priv) { 1033 ret = -ENOMEM; 1034 goto failed_alloc_ep; 1035 } 1036 ep->hcpriv = ep_priv; 1037 ep_priv->ep = ep; 1038 new_ep = 1; 1039 } 1040 1041 ret = usb_hcd_link_urb_to_ep(hcd, urb); 1042 if (ret) 1043 goto failed_link; 1044 1045 urb->status = -EINPROGRESS; 1046 urb->actual_length = 0; 1047 urb->error_count = 0; 1048 urb->hcpriv = urb_priv; 1049 urb_priv->ep = ep; 1050 1051 switch (usb_pipetype(urb->pipe)) { 1052 case PIPE_CONTROL: 1053 urb_priv->state = US_CTRL_SETUP; 1054 break; 1055 case PIPE_BULK: 1056 urb_priv->state = US_BULK; 1057 break; 1058 } 1059 1060 debug_urb_submitted(imx21, urb); 1061 if (ep_priv->etd[0] < 0) { 1062 if (ep_priv->waiting_etd) { 1063 dev_dbg(imx21->dev, 1064 "no ETD available already queued %p\n", 1065 ep_priv); 1066 debug_urb_queued_for_etd(imx21, urb); 1067 goto out; 1068 } 1069 ep_priv->etd[0] = alloc_etd(imx21); 1070 if (ep_priv->etd[0] < 0) { 1071 dev_dbg(imx21->dev, 1072 "no ETD available queueing %p\n", ep_priv); 1073 debug_urb_queued_for_etd(imx21, urb); 1074 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd); 1075 ep_priv->waiting_etd = 1; 1076 goto out; 1077 } 1078 } 1079 1080 /* Schedule if no URB already active for this endpoint */ 1081 etd = &imx21->etd[ep_priv->etd[0]]; 1082 if (etd->urb == NULL) { 1083 DEBUG_LOG_FRAME(imx21, etd, last_req); 1084 schedule_nonisoc_etd(imx21, urb); 1085 } 1086 1087out: 1088 spin_unlock_irqrestore(&imx21->lock, flags); 1089 return 0; 1090 1091failed_link: 1092failed_alloc_ep: 1093 spin_unlock_irqrestore(&imx21->lock, flags); 1094 kfree(urb_priv); 1095 return ret; 1096} 1097 1098static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, 1099 int status) 1100{ 1101 struct imx21 *imx21 = hcd_to_imx21(hcd); 1102 unsigned long flags; 1103 struct usb_host_endpoint *ep; 1104 struct ep_priv *ep_priv; 1105 struct urb_priv *urb_priv = urb->hcpriv; 1106 int ret = -EINVAL; 1107 1108 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n", 1109 urb, usb_pipeisoc(urb->pipe), status); 1110 1111 spin_lock_irqsave(&imx21->lock, flags); 1112 1113 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1114 if (ret) 1115 goto fail; 1116 ep = urb_priv->ep; 1117 ep_priv = ep->hcpriv; 1118 1119 debug_urb_unlinked(imx21, urb); 1120 1121 if (usb_pipeisoc(urb->pipe)) { 1122 dequeue_isoc_urb(imx21, urb, ep_priv); 1123 schedule_isoc_etds(hcd, ep); 1124 } else if (urb_priv->active) { 1125 int etd_num = ep_priv->etd[0]; 1126 if (etd_num != -1) { 1127 disactivate_etd(imx21, etd_num); 1128 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff); 1129 imx21->etd[etd_num].urb = NULL; 1130 } 1131 } 1132 1133 urb_done(hcd, urb, status); 1134 1135 spin_unlock_irqrestore(&imx21->lock, flags); 1136 return 0; 1137 1138fail: 1139 spin_unlock_irqrestore(&imx21->lock, flags); 1140 return ret; 1141} 1142 1143/* =========================================== */ 1144/* Interrupt dispatch */ 1145/* =========================================== */ 1146 1147static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) 1148{ 1149 int etd_num; 1150 int enable_sof_int = 0; 1151 unsigned long flags; 1152 1153 spin_lock_irqsave(&imx21->lock, flags); 1154 1155 for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) { 1156 u32 etd_mask = 1 << etd_num; 1157 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask; 1158 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask; 1159 struct etd_priv *etd = &imx21->etd[etd_num]; 1160 1161 1162 if (done) { 1163 DEBUG_LOG_FRAME(imx21, etd, last_int); 1164 } else { 1165/* 1166 * Kludge warning! 1167 * 1168 * When multiple transfers are using the bus we sometimes get into a state 1169 * where the transfer has completed (the CC field of the ETD is != 0x0F), 1170 * the ETD has self disabled but the ETDDONESTAT flag is not set 1171 * (and hence no interrupt occurs). 1172 * This causes the transfer in question to hang. 1173 * The kludge below checks for this condition at each SOF and processes any 1174 * blocked ETDs (after an arbitary 10 frame wait) 1175 * 1176 * With a single active transfer the usbtest test suite will run for days 1177 * without the kludge. 1178 * With other bus activity (eg mass storage) even just test1 will hang without 1179 * the kludge. 1180 */ 1181 u32 dword0; 1182 int cc; 1183 1184 if (etd->active_count && !enabled) /* suspicious... */ 1185 enable_sof_int = 1; 1186 1187 if (!sof || enabled || !etd->active_count) 1188 continue; 1189 1190 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE; 1191 if (cc == TD_NOTACCESSED) 1192 continue; 1193 1194 if (++etd->active_count < 10) 1195 continue; 1196 1197 dword0 = etd_readl(imx21, etd_num, 0); 1198 dev_dbg(imx21->dev, 1199 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n", 1200 etd_num, dword0 & 0x7F, 1201 (dword0 >> DW0_ENDPNT) & 0x0F, 1202 cc); 1203 1204#ifdef DEBUG 1205 dev_dbg(imx21->dev, 1206 "frame: act=%d disact=%d" 1207 " int=%d req=%d cur=%d\n", 1208 etd->activated_frame, 1209 etd->disactivated_frame, 1210 etd->last_int_frame, 1211 etd->last_req_frame, 1212 readl(imx21->regs + USBH_FRMNUB)); 1213 imx21->debug_unblocks++; 1214#endif 1215 etd->active_count = 0; 1216/* End of kludge */ 1217 } 1218 1219 if (etd->ep == NULL || etd->urb == NULL) { 1220 dev_dbg(imx21->dev, 1221 "Interrupt for unexpected etd %d" 1222 " ep=%p urb=%p\n", 1223 etd_num, etd->ep, etd->urb); 1224 disactivate_etd(imx21, etd_num); 1225 continue; 1226 } 1227 1228 if (usb_pipeisoc(etd->urb->pipe)) 1229 isoc_etd_done(hcd, etd->urb, etd_num); 1230 else 1231 nonisoc_etd_done(hcd, etd->urb, etd_num); 1232 } 1233 1234 /* only enable SOF interrupt if it may be needed for the kludge */ 1235 if (enable_sof_int) 1236 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); 1237 else 1238 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); 1239 1240 1241 spin_unlock_irqrestore(&imx21->lock, flags); 1242} 1243 1244static irqreturn_t imx21_irq(struct usb_hcd *hcd) 1245{ 1246 struct imx21 *imx21 = hcd_to_imx21(hcd); 1247 u32 ints = readl(imx21->regs + USBH_SYSISR); 1248 1249 if (ints & USBH_SYSIEN_HERRINT) 1250 dev_dbg(imx21->dev, "Scheduling error\n"); 1251 1252 if (ints & USBH_SYSIEN_SORINT) 1253 dev_dbg(imx21->dev, "Scheduling overrun\n"); 1254 1255 if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT)) 1256 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT); 1257 1258 writel(ints, imx21->regs + USBH_SYSISR); 1259 return IRQ_HANDLED; 1260} 1261 1262static void imx21_hc_endpoint_disable(struct usb_hcd *hcd, 1263 struct usb_host_endpoint *ep) 1264{ 1265 struct imx21 *imx21 = hcd_to_imx21(hcd); 1266 unsigned long flags; 1267 struct ep_priv *ep_priv; 1268 int i; 1269 1270 if (ep == NULL) 1271 return; 1272 1273 spin_lock_irqsave(&imx21->lock, flags); 1274 ep_priv = ep->hcpriv; 1275 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); 1276 1277 if (!list_empty(&ep->urb_list)) 1278 dev_dbg(imx21->dev, "ep's URB list is not empty\n"); 1279 1280 if (ep_priv != NULL) { 1281 for (i = 0; i < NUM_ISO_ETDS; i++) { 1282 if (ep_priv->etd[i] > -1) 1283 dev_dbg(imx21->dev, "free etd %d for disable\n", 1284 ep_priv->etd[i]); 1285 1286 free_etd(imx21, ep_priv->etd[i]); 1287 } 1288 kfree(ep_priv); 1289 ep->hcpriv = NULL; 1290 } 1291 1292 for (i = 0; i < USB_NUM_ETD; i++) { 1293 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { 1294 dev_err(imx21->dev, 1295 "Active etd %d for disabled ep=%p!\n", i, ep); 1296 free_etd(imx21, i); 1297 } 1298 } 1299 free_epdmem(imx21, ep); 1300 spin_unlock_irqrestore(&imx21->lock, flags); 1301} 1302 1303/* =========================================== */ 1304/* Hub handling */ 1305/* =========================================== */ 1306 1307static int get_hub_descriptor(struct usb_hcd *hcd, 1308 struct usb_hub_descriptor *desc) 1309{ 1310 struct imx21 *imx21 = hcd_to_imx21(hcd); 1311 desc->bDescriptorType = 0x29; /* HUB descriptor */ 1312 desc->bHubContrCurrent = 0; 1313 1314 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA) 1315 & USBH_ROOTHUBA_NDNSTMPRT_MASK; 1316 desc->bDescLength = 9; 1317 desc->bPwrOn2PwrGood = 0; 1318 desc->wHubCharacteristics = (__force __u16) cpu_to_le16( 1319 0x0002 | /* No power switching */ 1320 0x0010 | /* No over current protection */ 1321 0); 1322 1323 desc->bitmap[0] = 1 << 1; 1324 desc->bitmap[1] = ~0; 1325 return 0; 1326} 1327 1328static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf) 1329{ 1330 struct imx21 *imx21 = hcd_to_imx21(hcd); 1331 int ports; 1332 int changed = 0; 1333 int i; 1334 unsigned long flags; 1335 1336 spin_lock_irqsave(&imx21->lock, flags); 1337 ports = readl(imx21->regs + USBH_ROOTHUBA) 1338 & USBH_ROOTHUBA_NDNSTMPRT_MASK; 1339 if (ports > 7) { 1340 ports = 7; 1341 dev_err(imx21->dev, "ports %d > 7\n", ports); 1342 } 1343 for (i = 0; i < ports; i++) { 1344 if (readl(imx21->regs + USBH_PORTSTAT(i)) & 1345 (USBH_PORTSTAT_CONNECTSC | 1346 USBH_PORTSTAT_PRTENBLSC | 1347 USBH_PORTSTAT_PRTSTATSC | 1348 USBH_PORTSTAT_OVRCURIC | 1349 USBH_PORTSTAT_PRTRSTSC)) { 1350 1351 changed = 1; 1352 buf[0] |= 1 << (i + 1); 1353 } 1354 } 1355 spin_unlock_irqrestore(&imx21->lock, flags); 1356 1357 if (changed) 1358 dev_info(imx21->dev, "Hub status changed\n"); 1359 return changed; 1360} 1361 1362static int imx21_hc_hub_control(struct usb_hcd *hcd, 1363 u16 typeReq, 1364 u16 wValue, u16 wIndex, char *buf, u16 wLength) 1365{ 1366 struct imx21 *imx21 = hcd_to_imx21(hcd); 1367 int rc = 0; 1368 u32 status_write = 0; 1369 1370 switch (typeReq) { 1371 case ClearHubFeature: 1372 dev_dbg(imx21->dev, "ClearHubFeature\n"); 1373 switch (wValue) { 1374 case C_HUB_OVER_CURRENT: 1375 dev_dbg(imx21->dev, " OVER_CURRENT\n"); 1376 break; 1377 case C_HUB_LOCAL_POWER: 1378 dev_dbg(imx21->dev, " LOCAL_POWER\n"); 1379 break; 1380 default: 1381 dev_dbg(imx21->dev, " unknown\n"); 1382 rc = -EINVAL; 1383 break; 1384 } 1385 break; 1386 1387 case ClearPortFeature: 1388 dev_dbg(imx21->dev, "ClearPortFeature\n"); 1389 switch (wValue) { 1390 case USB_PORT_FEAT_ENABLE: 1391 dev_dbg(imx21->dev, " ENABLE\n"); 1392 status_write = USBH_PORTSTAT_CURCONST; 1393 break; 1394 case USB_PORT_FEAT_SUSPEND: 1395 dev_dbg(imx21->dev, " SUSPEND\n"); 1396 status_write = USBH_PORTSTAT_PRTOVRCURI; 1397 break; 1398 case USB_PORT_FEAT_POWER: 1399 dev_dbg(imx21->dev, " POWER\n"); 1400 status_write = USBH_PORTSTAT_LSDEVCON; 1401 break; 1402 case USB_PORT_FEAT_C_ENABLE: 1403 dev_dbg(imx21->dev, " C_ENABLE\n"); 1404 status_write = USBH_PORTSTAT_PRTENBLSC; 1405 break; 1406 case USB_PORT_FEAT_C_SUSPEND: 1407 dev_dbg(imx21->dev, " C_SUSPEND\n"); 1408 status_write = USBH_PORTSTAT_PRTSTATSC; 1409 break; 1410 case USB_PORT_FEAT_C_CONNECTION: 1411 dev_dbg(imx21->dev, " C_CONNECTION\n"); 1412 status_write = USBH_PORTSTAT_CONNECTSC; 1413 break; 1414 case USB_PORT_FEAT_C_OVER_CURRENT: 1415 dev_dbg(imx21->dev, " C_OVER_CURRENT\n"); 1416 status_write = USBH_PORTSTAT_OVRCURIC; 1417 break; 1418 case USB_PORT_FEAT_C_RESET: 1419 dev_dbg(imx21->dev, " C_RESET\n"); 1420 status_write = USBH_PORTSTAT_PRTRSTSC; 1421 break; 1422 default: 1423 dev_dbg(imx21->dev, " unknown\n"); 1424 rc = -EINVAL; 1425 break; 1426 } 1427 1428 break; 1429 1430 case GetHubDescriptor: 1431 dev_dbg(imx21->dev, "GetHubDescriptor\n"); 1432 rc = get_hub_descriptor(hcd, (void *)buf); 1433 break; 1434 1435 case GetHubStatus: 1436 dev_dbg(imx21->dev, " GetHubStatus\n"); 1437 *(__le32 *) buf = 0; 1438 break; 1439 1440 case GetPortStatus: 1441 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n", 1442 wIndex, USBH_PORTSTAT(wIndex - 1)); 1443 *(__le32 *) buf = readl(imx21->regs + 1444 USBH_PORTSTAT(wIndex - 1)); 1445 break; 1446 1447 case SetHubFeature: 1448 dev_dbg(imx21->dev, "SetHubFeature\n"); 1449 switch (wValue) { 1450 case C_HUB_OVER_CURRENT: 1451 dev_dbg(imx21->dev, " OVER_CURRENT\n"); 1452 break; 1453 1454 case C_HUB_LOCAL_POWER: 1455 dev_dbg(imx21->dev, " LOCAL_POWER\n"); 1456 break; 1457 default: 1458 dev_dbg(imx21->dev, " unknown\n"); 1459 rc = -EINVAL; 1460 break; 1461 } 1462 1463 break; 1464 1465 case SetPortFeature: 1466 dev_dbg(imx21->dev, "SetPortFeature\n"); 1467 switch (wValue) { 1468 case USB_PORT_FEAT_SUSPEND: 1469 dev_dbg(imx21->dev, " SUSPEND\n"); 1470 status_write = USBH_PORTSTAT_PRTSUSPST; 1471 break; 1472 case USB_PORT_FEAT_POWER: 1473 dev_dbg(imx21->dev, " POWER\n"); 1474 status_write = USBH_PORTSTAT_PRTPWRST; 1475 break; 1476 case USB_PORT_FEAT_RESET: 1477 dev_dbg(imx21->dev, " RESET\n"); 1478 status_write = USBH_PORTSTAT_PRTRSTST; 1479 break; 1480 default: 1481 dev_dbg(imx21->dev, " unknown\n"); 1482 rc = -EINVAL; 1483 break; 1484 } 1485 break; 1486 1487 default: 1488 dev_dbg(imx21->dev, " unknown\n"); 1489 rc = -EINVAL; 1490 break; 1491 } 1492 1493 if (status_write) 1494 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1)); 1495 return rc; 1496} 1497 1498/* =========================================== */ 1499/* Host controller management */ 1500/* =========================================== */ 1501 1502static int imx21_hc_reset(struct usb_hcd *hcd) 1503{ 1504 struct imx21 *imx21 = hcd_to_imx21(hcd); 1505 unsigned long timeout; 1506 unsigned long flags; 1507 1508 spin_lock_irqsave(&imx21->lock, flags); 1509 1510 /* Reset the Host controler modules */ 1511 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | 1512 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, 1513 imx21->regs + USBOTG_RST_CTRL); 1514 1515 /* Wait for reset to finish */ 1516 timeout = jiffies + HZ; 1517 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) { 1518 if (time_after(jiffies, timeout)) { 1519 spin_unlock_irqrestore(&imx21->lock, flags); 1520 dev_err(imx21->dev, "timeout waiting for reset\n"); 1521 return -ETIMEDOUT; 1522 } 1523 spin_unlock_irq(&imx21->lock); 1524 schedule_timeout_uninterruptible(1); 1525 spin_lock_irq(&imx21->lock); 1526 } 1527 spin_unlock_irqrestore(&imx21->lock, flags); 1528 return 0; 1529} 1530 1531static int __devinit imx21_hc_start(struct usb_hcd *hcd) 1532{ 1533 struct imx21 *imx21 = hcd_to_imx21(hcd); 1534 unsigned long flags; 1535 int i, j; 1536 u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST; 1537 u32 usb_control = 0; 1538 1539 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) & 1540 USBOTG_HWMODE_HOSTXCVR_MASK); 1541 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) & 1542 USBOTG_HWMODE_OTGXCVR_MASK); 1543 1544 if (imx21->pdata->host1_txenoe) 1545 usb_control |= USBCTRL_HOST1_TXEN_OE; 1546 1547 if (!imx21->pdata->host1_xcverless) 1548 usb_control |= USBCTRL_HOST1_BYP_TLL; 1549 1550 if (imx21->pdata->otg_ext_xcvr) 1551 usb_control |= USBCTRL_OTC_RCV_RXDP; 1552 1553 1554 spin_lock_irqsave(&imx21->lock, flags); 1555 1556 writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN), 1557 imx21->regs + USBOTG_CLK_CTRL); 1558 writel(hw_mode, imx21->regs + USBOTG_HWMODE); 1559 writel(usb_control, imx21->regs + USBCTRL); 1560 writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE, 1561 imx21->regs + USB_MISCCONTROL); 1562 1563 /* Clear the ETDs */ 1564 for (i = 0; i < USB_NUM_ETD; i++) 1565 for (j = 0; j < 4; j++) 1566 etd_writel(imx21, i, j, 0); 1567 1568 /* Take the HC out of reset */ 1569 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1, 1570 imx21->regs + USBH_HOST_CTRL); 1571 1572 /* Enable ports */ 1573 if (imx21->pdata->enable_otg_host) 1574 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, 1575 imx21->regs + USBH_PORTSTAT(0)); 1576 1577 if (imx21->pdata->enable_host1) 1578 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, 1579 imx21->regs + USBH_PORTSTAT(1)); 1580 1581 if (imx21->pdata->enable_host2) 1582 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, 1583 imx21->regs + USBH_PORTSTAT(2)); 1584 1585 1586 hcd->state = HC_STATE_RUNNING; 1587 1588 /* Enable host controller interrupts */ 1589 set_register_bits(imx21, USBH_SYSIEN, 1590 USBH_SYSIEN_HERRINT | 1591 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT); 1592 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); 1593 1594 spin_unlock_irqrestore(&imx21->lock, flags); 1595 1596 return 0; 1597} 1598 1599static void imx21_hc_stop(struct usb_hcd *hcd) 1600{ 1601 struct imx21 *imx21 = hcd_to_imx21(hcd); 1602 unsigned long flags; 1603 1604 spin_lock_irqsave(&imx21->lock, flags); 1605 1606 writel(0, imx21->regs + USBH_SYSIEN); 1607 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); 1608 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN, 1609 USBOTG_CLK_CTRL); 1610 spin_unlock_irqrestore(&imx21->lock, flags); 1611} 1612 1613/* =========================================== */ 1614/* Driver glue */ 1615/* =========================================== */ 1616 1617static struct hc_driver imx21_hc_driver = { 1618 .description = hcd_name, 1619 .product_desc = "IMX21 USB Host Controller", 1620 .hcd_priv_size = sizeof(struct imx21), 1621 1622 .flags = HCD_USB11, 1623 .irq = imx21_irq, 1624 1625 .reset = imx21_hc_reset, 1626 .start = imx21_hc_start, 1627 .stop = imx21_hc_stop, 1628 1629 /* I/O requests */ 1630 .urb_enqueue = imx21_hc_urb_enqueue, 1631 .urb_dequeue = imx21_hc_urb_dequeue, 1632 .endpoint_disable = imx21_hc_endpoint_disable, 1633 1634 /* scheduling support */ 1635 .get_frame_number = imx21_hc_get_frame, 1636 1637 /* Root hub support */ 1638 .hub_status_data = imx21_hc_hub_status_data, 1639 .hub_control = imx21_hc_hub_control, 1640 1641}; 1642 1643static struct mx21_usbh_platform_data default_pdata = { 1644 .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF, 1645 .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF, 1646 .enable_host1 = 1, 1647 .enable_host2 = 1, 1648 .enable_otg_host = 1, 1649 1650}; 1651 1652static int imx21_remove(struct platform_device *pdev) 1653{ 1654 struct usb_hcd *hcd = platform_get_drvdata(pdev); 1655 struct imx21 *imx21 = hcd_to_imx21(hcd); 1656 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1657 1658 remove_debug_files(imx21); 1659 usb_remove_hcd(hcd); 1660 1661 if (res != NULL) { 1662 clk_disable(imx21->clk); 1663 clk_put(imx21->clk); 1664 iounmap(imx21->regs); 1665 release_mem_region(res->start, resource_size(res)); 1666 } 1667 1668 kfree(hcd); 1669 return 0; 1670} 1671 1672 1673static int imx21_probe(struct platform_device *pdev) 1674{ 1675 struct usb_hcd *hcd; 1676 struct imx21 *imx21; 1677 struct resource *res; 1678 int ret; 1679 int irq; 1680 1681 printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc); 1682 1683 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1684 if (!res) 1685 return -ENODEV; 1686 irq = platform_get_irq(pdev, 0); 1687 if (irq < 0) 1688 return -ENXIO; 1689 1690 hcd = usb_create_hcd(&imx21_hc_driver, 1691 &pdev->dev, dev_name(&pdev->dev)); 1692 if (hcd == NULL) { 1693 dev_err(&pdev->dev, "Cannot create hcd (%s)\n", 1694 dev_name(&pdev->dev)); 1695 return -ENOMEM; 1696 } 1697 1698 imx21 = hcd_to_imx21(hcd); 1699 imx21->dev = &pdev->dev; 1700 imx21->pdata = pdev->dev.platform_data; 1701 if (!imx21->pdata) 1702 imx21->pdata = &default_pdata; 1703 1704 spin_lock_init(&imx21->lock); 1705 INIT_LIST_HEAD(&imx21->dmem_list); 1706 INIT_LIST_HEAD(&imx21->queue_for_etd); 1707 INIT_LIST_HEAD(&imx21->queue_for_dmem); 1708 create_debug_files(imx21); 1709 1710 res = request_mem_region(res->start, resource_size(res), hcd_name); 1711 if (!res) { 1712 ret = -EBUSY; 1713 goto failed_request_mem; 1714 } 1715 1716 imx21->regs = ioremap(res->start, resource_size(res)); 1717 if (imx21->regs == NULL) { 1718 dev_err(imx21->dev, "Cannot map registers\n"); 1719 ret = -ENOMEM; 1720 goto failed_ioremap; 1721 } 1722 1723 /* Enable clocks source */ 1724 imx21->clk = clk_get(imx21->dev, NULL); 1725 if (IS_ERR(imx21->clk)) { 1726 dev_err(imx21->dev, "no clock found\n"); 1727 ret = PTR_ERR(imx21->clk); 1728 goto failed_clock_get; 1729 } 1730 1731 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000)); 1732 if (ret) 1733 goto failed_clock_set; 1734 ret = clk_enable(imx21->clk); 1735 if (ret) 1736 goto failed_clock_enable; 1737 1738 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n", 1739 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF); 1740 1741 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); 1742 if (ret != 0) { 1743 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret); 1744 goto failed_add_hcd; 1745 } 1746 1747 return 0; 1748 1749failed_add_hcd: 1750 clk_disable(imx21->clk); 1751failed_clock_enable: 1752failed_clock_set: 1753 clk_put(imx21->clk); 1754failed_clock_get: 1755 iounmap(imx21->regs); 1756failed_ioremap: 1757 release_mem_region(res->start, res->end - res->start); 1758failed_request_mem: 1759 remove_debug_files(imx21); 1760 usb_put_hcd(hcd); 1761 return ret; 1762} 1763 1764static struct platform_driver imx21_hcd_driver = { 1765 .driver = { 1766 .name = (char *)hcd_name, 1767 }, 1768 .probe = imx21_probe, 1769 .remove = imx21_remove, 1770 .suspend = NULL, 1771 .resume = NULL, 1772}; 1773 1774static int __init imx21_hcd_init(void) 1775{ 1776 return platform_driver_register(&imx21_hcd_driver); 1777} 1778 1779static void __exit imx21_hcd_cleanup(void) 1780{ 1781 platform_driver_unregister(&imx21_hcd_driver); 1782} 1783 1784module_init(imx21_hcd_init); 1785module_exit(imx21_hcd_cleanup); 1786 1787MODULE_DESCRIPTION("i.MX21 USB Host controller"); 1788MODULE_AUTHOR("Martin Fuzzey"); 1789MODULE_LICENSE("GPL"); 1790MODULE_ALIAS("platform:imx21-hcd"); 1791