• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/host/

Lines Matching refs:oxu

57 #define oxu_dbg(oxu, fmt, args...) \
58 dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
59 #define oxu_err(oxu, fmt, args...) \
60 dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
61 #define oxu_info(oxu, fmt, args...) \
62 dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
64 static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
66 return container_of((void *) oxu, struct usb_hcd, hcd_priv);
84 #define oxu_vdbg(oxu, fmt, args...) /* Nop */
203 #define dbg_status(oxu, label, status) { \
206 oxu_dbg(oxu, "%s\n", _buf); \
209 #define dbg_cmd(oxu, label, command) { \
212 oxu_dbg(oxu, "%s\n", _buf); \
215 #define dbg_port(oxu, label, port, status) { \
218 oxu_dbg(oxu, "%s\n", _buf); \
241 static void ehci_work(struct oxu_hcd *oxu);
261 static inline void timer_action_done(struct oxu_hcd *oxu,
264 clear_bit(action, &oxu->actions);
267 static inline void timer_action(struct oxu_hcd *oxu,
270 if (!test_and_set_bit(action, &oxu->actions)) {
295 && t > oxu->watchdog.expires
296 && timer_pending(&oxu->watchdog))
298 mod_timer(&oxu->watchdog, t);
319 static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
338 static int ehci_halt(struct oxu_hcd *oxu)
340 u32 temp = readl(&oxu->regs->status);
343 writel(0, &oxu->regs->intr_enable);
348 temp = readl(&oxu->regs->command);
350 writel(temp, &oxu->regs->command);
351 return handshake(oxu, &oxu->regs->status,
356 static void tdi_reset(struct oxu_hcd *oxu)
361 reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
368 static int ehci_reset(struct oxu_hcd *oxu)
371 u32 command = readl(&oxu->regs->command);
374 dbg_cmd(oxu, "reset", command);
375 writel(command, &oxu->regs->command);
376 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
377 oxu->next_statechange = jiffies;
378 retval = handshake(oxu, &oxu->regs->command,
384 tdi_reset(oxu);
390 static void ehci_quiesce(struct oxu_hcd *oxu)
395 if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
400 temp = readl(&oxu->regs->command) << 10;
402 if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
404 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
409 temp = readl(&oxu->regs->command);
411 writel(temp, &oxu->regs->command);
414 if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
416 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
421 static int check_reset_complete(struct oxu_hcd *oxu, int index,
425 oxu->reset_done[index] = 0;
431 oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
435 oxu_dbg(oxu, "port %d high speed\n", index + 1);
440 static void ehci_hub_descriptor(struct oxu_hcd *oxu,
443 int ports = HCS_N_PORTS(oxu->hcs_params);
447 desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
459 if (HCS_PPC(oxu->hcs_params))
467 static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
475 oxu_err(oxu, "buffer too big (%d)\n", len);
479 spin_lock(&oxu->mem_lock);
490 i += max(a_blocks, (int)oxu->db_used[i])) {
494 if (oxu->db_used[i + j])
501 qtd->buffer = (void *) &oxu->mem->db_pool[i];
505 oxu->db_used[i] = a_blocks;
507 spin_unlock(&oxu->mem_lock);
514 spin_unlock(&oxu->mem_lock);
519 static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
523 spin_lock(&oxu->mem_lock);
525 index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
527 oxu->db_used[index] = 0;
532 spin_unlock(&oxu->mem_lock);
547 static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
552 oxu_buf_free(oxu, qtd);
554 spin_lock(&oxu->mem_lock);
556 index = qtd - &oxu->mem->qtd_pool[0];
557 oxu->qtd_used[index] = 0;
559 spin_unlock(&oxu->mem_lock);
564 static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
569 spin_lock(&oxu->mem_lock);
572 if (!oxu->qtd_used[i])
576 qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
586 oxu->qtd_used[i] = 1;
589 spin_unlock(&oxu->mem_lock);
594 static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
598 spin_lock(&oxu->mem_lock);
600 index = qh - &oxu->mem->qh_pool[0];
601 oxu->qh_used[index] = 0;
603 spin_unlock(&oxu->mem_lock);
611 struct oxu_hcd *oxu = qh->oxu;
615 oxu_dbg(oxu, "unused qh not empty!\n");
619 oxu_qtd_free(oxu, qh->dummy);
620 oxu_qh_free(oxu, qh);
623 static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
628 spin_lock(&oxu->mem_lock);
631 if (!oxu->qh_used[i])
635 qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
639 qh->oxu = oxu;
644 qh->dummy = ehci_qtd_alloc(oxu);
646 oxu_dbg(oxu, "no dummy td\n");
647 oxu->qh_used[i] = 0;
652 oxu->qh_used[i] = 1;
655 spin_unlock(&oxu->mem_lock);
672 static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
676 spin_lock(&oxu->mem_lock);
678 index = murb - &oxu->murb_pool[0];
679 oxu->murb_used[index] = 0;
681 spin_unlock(&oxu->mem_lock);
686 static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
692 spin_lock(&oxu->mem_lock);
695 if (!oxu->murb_used[i])
699 murb = &(oxu->murb_pool)[i];
701 oxu->murb_used[i] = 1;
704 spin_unlock(&oxu->mem_lock);
713 static void ehci_mem_cleanup(struct oxu_hcd *oxu)
715 kfree(oxu->murb_pool);
716 oxu->murb_pool = NULL;
718 if (oxu->async)
719 qh_put(oxu->async);
720 oxu->async = NULL;
722 del_timer(&oxu->urb_timer);
724 oxu->periodic = NULL;
727 kfree(oxu->pshadow);
728 oxu->pshadow = NULL;
733 static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
737 for (i = 0; i < oxu->periodic_size; i++)
738 oxu->mem->frame_list[i] = EHCI_LIST_END;
740 oxu->qh_used[i] = 0;
742 oxu->qtd_used[i] = 0;
744 oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
745 if (!oxu->murb_pool)
749 oxu->murb_used[i] = 0;
751 oxu->async = oxu_qh_alloc(oxu);
752 if (!oxu->async)
755 oxu->periodic = (__le32 *) &oxu->mem->frame_list;
756 oxu->periodic_dma = virt_to_phys(oxu->periodic);
758 for (i = 0; i < oxu->periodic_size; i++)
759 oxu->periodic[i] = EHCI_LIST_END;
762 oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
763 if (oxu->pshadow != NULL)
767 oxu_dbg(oxu, "couldn't init memory\n");
768 ehci_mem_cleanup(oxu);
812 static inline void qh_update(struct oxu_hcd *oxu,
846 static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
861 qh_update(oxu, qh, qtd);
864 static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
895 oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
907 oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
915 static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
916 __releases(oxu->lock)
917 __acquires(oxu->lock)
926 oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
947 oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
956 spin_unlock(&oxu->lock);
957 usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
958 spin_lock(&oxu->lock);
961 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
962 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
964 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
965 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
973 static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
1016 ehci_urb_done(oxu, last->urb);
1019 oxu_murb_free(oxu, murb);
1021 ehci_urb_done(oxu, last->urb);
1025 oxu_qtd_free(oxu, last);
1054 HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
1060 if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
1095 qtd_copy_status(oxu, urb->complete ?
1119 ehci_urb_done(oxu, last->urb);
1122 oxu_murb_free(oxu, murb);
1124 ehci_urb_done(oxu, last->urb);
1127 oxu_qtd_free(oxu, last);
1140 qh_refresh(oxu, qh);
1148 intr_deschedule(oxu, qh);
1149 (void) qh_schedule(oxu, qh);
1151 unlink_async(oxu, qh);
1168 static void qtd_list_free(struct oxu_hcd *oxu,
1178 oxu_qtd_free(oxu, qtd);
1184 static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
1200 qtd = ehci_qtd_alloc(oxu);
1217 ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
1229 qtd = ehci_qtd_alloc(oxu);
1245 ret = oxu_buf_alloc(oxu, qtd, len);
1275 qtd->hw_alt_next = oxu->async->hw_alt_next;
1285 qtd = ehci_qtd_alloc(oxu);
1289 ret = oxu_buf_alloc(oxu, qtd, len);
1323 qtd = ehci_qtd_alloc(oxu);
1340 qtd_list_free(oxu, urb, head);
1351 static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
1354 struct ehci_qh *qh = oxu_qh_alloc(oxu);
1480 qh_refresh(oxu, qh);
1486 static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
1492 head = oxu->async;
1493 timer_action_done(oxu, TIMER_ASYNC_OFF);
1495 u32 cmd = readl(&oxu->regs->command);
1499 (void)handshake(oxu, &oxu->regs->status,
1502 writel(cmd, &oxu->regs->command);
1503 oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
1510 qh_refresh(oxu, qh);
1532 static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
1540 /* can't sleep here, we have oxu->lock... */
1541 qh = qh_make(oxu, urb, GFP_ATOMIC);
1607 static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
1620 oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1627 spin_lock_irqsave(&oxu->lock, flags);
1628 if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
1633 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
1643 qh_link_async(oxu, qh_get(qh));
1645 spin_unlock_irqrestore(&oxu->lock, flags);
1647 qtd_list_free(oxu, urb, qtd_list);
1653 static void end_unlink_async(struct oxu_hcd *oxu)
1655 struct ehci_qh *qh = oxu->reclaim;
1658 timer_action_done(oxu, TIMER_IAA_WATCHDOG);
1666 oxu->reclaim = next;
1667 oxu->reclaim_ready = 0;
1670 qh_completions(oxu, qh);
1673 && HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
1674 qh_link_async(oxu, qh);
1681 if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
1682 && oxu->async->qh_next.qh == NULL)
1683 timer_action(oxu, TIMER_ASYNC_OFF);
1687 oxu->reclaim = NULL;
1688 start_unlink_async(oxu, next);
1693 /* caller must own oxu->lock */
1695 static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
1697 int cmd = readl(&oxu->regs->command);
1701 assert_spin_locked(&oxu->lock);
1702 if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
1708 if (unlikely(qh == oxu->async)) {
1710 if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
1711 && !oxu->reclaim) {
1713 writel(cmd & ~CMD_ASE, &oxu->regs->command);
1716 timer_action_done(oxu, TIMER_ASYNC_OFF);
1722 oxu->reclaim = qh = qh_get(qh);
1724 prev = oxu->async;
1732 if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
1736 end_unlink_async(oxu);
1740 oxu->reclaim_ready = 0;
1742 writel(cmd, &oxu->regs->command);
1743 (void) readl(&oxu->regs->command);
1744 timer_action(oxu, TIMER_IAA_WATCHDOG);
1747 static void scan_async(struct oxu_hcd *oxu)
1752 if (!++(oxu->stamp))
1753 oxu->stamp++;
1754 timer_action_done(oxu, TIMER_ASYNC_SHRINK);
1756 qh = oxu->async->qh_next.qh;
1761 && qh->stamp != oxu->stamp) {
1770 qh->stamp = oxu->stamp;
1771 temp = qh_completions(oxu, qh);
1784 if (qh->stamp == oxu->stamp)
1786 else if (!oxu->reclaim
1788 start_unlink_async(oxu, qh);
1795 timer_action(oxu, TIMER_ASYNC_SHRINK);
1813 /* caller must hold oxu->lock */
1814 static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
1816 union ehci_shadow *prev_p = &oxu->pshadow[frame];
1817 __le32 *hw_p = &oxu->periodic[frame];
1838 static unsigned short periodic_usecs(struct oxu_hcd *oxu,
1841 __le32 *hw_p = &oxu->periodic[frame];
1842 union ehci_shadow *q = &oxu->pshadow[frame];
1862 oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
1868 static int enable_periodic(struct oxu_hcd *oxu)
1876 status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
1878 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
1882 cmd = readl(&oxu->regs->command) | CMD_PSE;
1883 writel(cmd, &oxu->regs->command);
1885 oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
1888 oxu->next_uframe = readl(&oxu->regs->frame_index)
1889 % (oxu->periodic_size << 3);
1893 static int disable_periodic(struct oxu_hcd *oxu)
1901 status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
1903 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
1907 cmd = readl(&oxu->regs->command) & ~CMD_PSE;
1908 writel(cmd, &oxu->regs->command);
1911 oxu->next_uframe = -1;
1919 * no FSTN support (yet; oxu 0.96+)
1921 static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
1935 for (i = qh->start; i < oxu->periodic_size; i += period) {
1936 union ehci_shadow *prev = &oxu->pshadow[i];
1937 __le32 *hw_p = &oxu->periodic[i];
1975 oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
1980 if (!oxu->periodic_sched++)
1981 return enable_periodic(oxu);
1986 static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
1997 for (i = qh->start; i < oxu->periodic_size; i += period)
1998 periodic_unlink(oxu, i, qh);
2001 oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
2017 oxu->periodic_sched--;
2018 if (!oxu->periodic_sched)
2019 (void) disable_periodic(oxu);
2022 static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2026 qh_unlink_periodic(oxu, qh);
2045 static int check_period(struct oxu_hcd *oxu,
2069 claimed = periodic_usecs(oxu, frame, uframe);
2073 } while ((frame += 1) < oxu->periodic_size);
2078 claimed = periodic_usecs(oxu, frame, uframe);
2081 } while ((frame += period) < oxu->periodic_size);
2087 static int check_intr_schedule(struct oxu_hcd *oxu,
2096 if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
2111 static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
2118 qh_refresh(oxu, qh);
2125 status = check_intr_schedule(oxu, frame, --uframe,
2142 status = check_intr_schedule(oxu,
2153 status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
2166 oxu_dbg(oxu, "reused qh %p schedule\n", qh);
2169 status = qh_link_periodic(oxu, qh);
2174 static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
2186 spin_lock_irqsave(&oxu->lock, flags);
2188 if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
2195 qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
2201 status = qh_schedule(oxu, qh);
2207 qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
2211 oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
2214 spin_unlock_irqrestore(&oxu->lock, flags);
2216 qtd_list_free(oxu, urb, qtd_list);
2221 static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
2224 oxu_dbg(oxu, "iso support is missing!\n");
2228 static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
2231 oxu_dbg(oxu, "split iso support is missing!\n");
2235 static void scan_periodic(struct oxu_hcd *oxu)
2240 mod = oxu->periodic_size << 3;
2247 now_uframe = oxu->next_uframe;
2248 if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
2249 clock = readl(&oxu->regs->frame_index);
2271 q_p = &oxu->pshadow[frame];
2272 hw_p = &oxu->periodic[frame];
2281 live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
2288 modified = qh_completions(oxu, temp.qh);
2290 intr_deschedule(oxu, temp.qh);
2311 if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
2313 oxu->next_uframe = now_uframe;
2314 now = readl(&oxu->regs->frame_index) % mod;
2331 static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
2333 int port = HCS_N_PORTS(oxu->hcs_params);
2336 writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
2339 static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
2343 if (!HCS_PPC(oxu->hcs_params))
2346 oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
2347 for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
2348 (void) oxu_hub_control(oxu_to_hcd(oxu),
2356 * It calls driver completion functions, after dropping oxu->lock.
2358 static void ehci_work(struct oxu_hcd *oxu)
2360 timer_action_done(oxu, TIMER_IO_WATCHDOG);
2361 if (oxu->reclaim_ready)
2362 end_unlink_async(oxu);
2364 /* another CPU may drop oxu->lock during a schedule scan while
2368 if (oxu->scanning)
2370 oxu->scanning = 1;
2371 scan_async(oxu);
2372 if (oxu->next_uframe != -1)
2373 scan_periodic(oxu);
2374 oxu->scanning = 0;
2380 if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
2381 (oxu->async->qh_next.ptr != NULL ||
2382 oxu->periodic_sched != 0))
2383 timer_action(oxu, TIMER_IO_WATCHDOG);
2386 static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
2390 && oxu->reclaim
2391 && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
2394 for (last = oxu->reclaim;
2402 } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
2403 end_unlink_async(oxu);
2407 start_unlink_async(oxu, qh);
2416 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2420 spin_lock(&oxu->lock);
2422 status = readl(&oxu->regs->status);
2426 oxu_dbg(oxu, "device removed\n");
2432 spin_unlock(&oxu->lock);
2437 writel(status, &oxu->regs->status);
2438 readl(&oxu->regs->command); /* unblock posted write */
2443 dbg_status(oxu, "irq", status);
2454 oxu->reclaim_ready = 1;
2460 unsigned i = HCS_N_PORTS(oxu->hcs_params);
2464 if (!(readl(&oxu->regs->command) & CMD_RUN))
2468 int pstatus = readl(&oxu->regs->port_status[i]);
2473 || oxu->reset_done[i] != 0)
2480 oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
2481 oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
2482 mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
2489 status = readl(&oxu->regs->status);
2490 dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
2491 dbg_status(oxu, "fatal", status);
2493 oxu_err(oxu, "fatal error\n");
2495 ehci_reset(oxu);
2496 writel(0, &oxu->regs->configured_flag);
2505 ehci_work(oxu);
2506 spin_unlock(&oxu->lock);
2514 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2523 if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
2524 (!oxu->is_otg && (status & OXU_USBSPHI)))
2537 struct oxu_hcd *oxu = (struct oxu_hcd *) param;
2540 spin_lock_irqsave(&oxu->lock, flags);
2543 if (oxu->reclaim) {
2544 u32 status = readl(&oxu->regs->status);
2546 oxu_vdbg(oxu, "lost IAA\n");
2547 writel(STS_IAA, &oxu->regs->status);
2548 oxu->reclaim_ready = 1;
2553 if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
2554 start_unlink_async(oxu, oxu->async);
2556 /* oxu could run by timer, without IRQs ... */
2557 ehci_work(oxu);
2559 spin_unlock_irqrestore(&oxu->lock, flags);
2566 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2571 spin_lock_init(&oxu->lock);
2573 init_timer(&oxu->watchdog);
2574 oxu->watchdog.function = oxu_watchdog;
2575 oxu->watchdog.data = (unsigned long) oxu;
2581 oxu->periodic_size = DEFAULT_I_TDPS;
2582 retval = ehci_mem_init(oxu, GFP_KERNEL);
2587 hcc_params = readl(&oxu->caps->hcc_params);
2589 oxu->i_thresh = 8;
2591 oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
2593 oxu->reclaim = NULL;
2594 oxu->reclaim_ready = 0;
2595 oxu->next_uframe = -1;
2604 oxu->async->qh_next.qh = NULL;
2605 oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
2606 oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
2607 oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
2608 oxu->async->hw_qtd_next = EHCI_LIST_END;
2609 oxu->async->qh_state = QH_STATE_LINKED;
2610 oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
2629 oxu_dbg(oxu, "park %d\n", park);
2636 oxu->command = temp;
2645 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2648 spin_lock_init(&oxu->mem_lock);
2649 INIT_LIST_HEAD(&oxu->urb_list);
2650 oxu->urb_len = 0;
2655 if (oxu->is_otg) {
2656 oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
2657 oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
2658 HC_LENGTH(readl(&oxu->caps->hc_capbase));
2660 oxu->mem = hcd->regs + OXU_SPH_MEM;
2662 oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
2663 oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
2664 HC_LENGTH(readl(&oxu->caps->hc_capbase));
2666 oxu->mem = hcd->regs + OXU_OTG_MEM;
2669 oxu->hcs_params = readl(&oxu->caps->hcs_params);
2670 oxu->sbrn = 0x20;
2681 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2688 retval = ehci_reset(oxu);
2690 ehci_mem_cleanup(oxu);
2693 writel(oxu->periodic_dma, &oxu->regs->frame_list);
2694 writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
2696 /* hcc_params controls whether oxu->regs->segment must (!!!)
2707 hcc_params = readl(&oxu->caps->hcc_params);
2709 writel(0, &oxu->regs->segment);
2711 oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
2713 oxu->command |= CMD_RUN;
2714 writel(oxu->command, &oxu->regs->command);
2715 dbg_cmd(oxu, "init", oxu->command);
2724 writel(FLAG_CF, &oxu->regs->configured_flag);
2725 readl(&oxu->regs->command); /* unblock posted writes */
2727 temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
2728 oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
2729 ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
2733 writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
2740 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2743 ehci_port_power(oxu, 0);
2746 del_timer_sync(&oxu->watchdog);
2748 spin_lock_irq(&oxu->lock);
2750 ehci_quiesce(oxu);
2752 ehci_reset(oxu);
2753 writel(0, &oxu->regs->intr_enable);
2754 spin_unlock_irq(&oxu->lock);
2757 writel(0, &oxu->regs->configured_flag);
2760 spin_lock_irq(&oxu->lock);
2761 if (oxu->async)
2762 ehci_work(oxu);
2763 spin_unlock_irq(&oxu->lock);
2764 ehci_mem_cleanup(oxu);
2766 dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
2775 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2777 (void) ehci_halt(oxu);
2778 ehci_turn_off_all_ports(oxu);
2781 writel(0, &oxu->regs->configured_flag);
2784 readl(&oxu->regs->configured_flag);
2801 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2810 if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
2812 return submit_async(oxu, urb, &qtd_list, mem_flags);
2815 if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
2817 return intr_submit(oxu, urb, &qtd_list, mem_flags);
2821 return itd_submit(oxu, urb, mem_flags);
2823 return sitd_submit(oxu, urb, mem_flags);
2833 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2863 murb = (struct urb *) oxu_murb_alloc(oxu);
2894 murb = (struct urb *) oxu_murb_alloc(oxu);
2925 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2929 spin_lock_irqsave(&oxu->lock, flags);
2937 unlink_async(oxu, qh);
2946 intr_deschedule(oxu, qh);
2949 qh_completions(oxu, qh);
2952 oxu_dbg(oxu, "bogus qh %p state %d\n",
2962 status = qh_schedule(oxu, qh);
2963 spin_unlock_irqrestore(&oxu->lock, flags);
2974 spin_unlock_irqrestore(&oxu->lock, flags);
2982 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
2990 spin_lock_irqsave(&oxu->lock, flags);
2999 oxu_vdbg(oxu, "iso delay\n");
3007 for (tmp = oxu->async->qh_next.qh;
3014 unlink_async(oxu, qh);
3018 spin_unlock_irqrestore(&oxu->lock, flags);
3032 oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
3039 spin_unlock_irqrestore(&oxu->lock, flags);
3045 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3047 return (readl(&oxu->regs->frame_index) >> 3) %
3048 oxu->periodic_size;
3054 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3065 ports = HCS_N_PORTS(oxu->hcs_params);
3085 spin_lock_irqsave(&oxu->lock, flags);
3087 temp = readl(&oxu->regs->port_status[i]);
3097 oxu->reset_done[i] = 0;
3099 time_after_eq(jiffies, oxu->reset_done[i]))) {
3107 spin_unlock_irqrestore(&oxu->lock, flags);
3112 static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
3130 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3131 int ports = HCS_N_PORTS(oxu->hcs_params);
3132 u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
3139 spin_lock_irqsave(&oxu->lock, flags);
3180 oxu->reset_done[wIndex] = jiffies
3188 if (HCS_PPC(oxu->hcs_params))
3204 readl(&oxu->regs->command); /* unblock posted write */
3207 ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
3233 if (!oxu->reset_done[wIndex]) {
3235 oxu->reset_done[wIndex] = jiffies
3238 mod_timer(&oxu_to_hcd(oxu)->rh_timer,
3239 oxu->reset_done[wIndex]);
3244 oxu->reset_done[wIndex])) {
3246 oxu->reset_done[wIndex] = 0;
3252 retval = handshake(oxu, status_reg,
3255 oxu_err(oxu,
3267 oxu->reset_done[wIndex])) {
3269 oxu->reset_done[wIndex] = 0;
3277 retval = handshake(oxu, status_reg,
3280 oxu_err(oxu, "port %d reset error %d\n",
3286 temp = check_reset_complete(oxu, wIndex, status_reg,
3292 test_bit(wIndex, &oxu->companion_ports)) {
3296 oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
3309 status |= oxu_port_speed(oxu, temp);
3325 dbg_port(oxu, "GetStatus", wIndex + 1, temp);
3359 if (HCS_PPC(oxu->hcs_params))
3369 oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
3377 oxu->reset_done[wIndex] = jiffies
3391 ehci_quiesce(oxu);
3392 ehci_halt(oxu);
3400 readl(&oxu->regs->command); /* unblock posted writes */
3408 spin_unlock_irqrestore(&oxu->lock, flags);
3416 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3420 oxu_dbg(oxu, "suspend root hub\n");
3422 if (time_before(jiffies, oxu->next_statechange))
3425 port = HCS_N_PORTS(oxu->hcs_params);
3426 spin_lock_irq(&oxu->lock);
3430 ehci_quiesce(oxu);
3433 oxu->command = readl(&oxu->regs->command);
3434 if (oxu->reclaim)
3435 oxu->reclaim_ready = 1;
3436 ehci_work(oxu);
3443 oxu->bus_suspended = 0;
3445 u32 __iomem *reg = &oxu->regs->port_status[port];
3453 set_bit(port, &oxu->bus_suspended);
3463 oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
3470 del_timer_sync(&oxu->watchdog);
3471 ehci_halt(oxu);
3478 writel(mask, &oxu->regs->intr_enable);
3479 readl(&oxu->regs->intr_enable);
3481 oxu->next_statechange = jiffies + msecs_to_jiffies(10);
3482 spin_unlock_irq(&oxu->lock);
3489 struct oxu_hcd *oxu = hcd_to_oxu(hcd);
3493 if (time_before(jiffies, oxu->next_statechange))
3495 spin_lock_irq(&oxu->lock);
3503 temp = readl(&oxu->regs->intr_enable);
3504 oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
3509 writel(0, &oxu->regs->intr_enable);
3512 writel(0, &oxu->regs->segment);
3513 writel(oxu->periodic_dma, &oxu->regs->frame_list);
3514 writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
3517 writel(oxu->command, &oxu->regs->command);
3524 i = HCS_N_PORTS(oxu->hcs_params);
3526 temp = readl(&oxu->regs->port_status[i]);
3529 if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
3530 oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
3533 writel(temp, &oxu->regs->port_status[i]);
3535 i = HCS_N_PORTS(oxu->hcs_params);
3538 temp = readl(&oxu->regs->port_status[i]);
3539 if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
3541 writel(temp, &oxu->regs->port_status[i]);
3542 oxu_vdbg(oxu, "resumed port %d\n", i + 1);
3545 (void) readl(&oxu->regs->command);
3549 if (oxu->async->qh_next.qh)
3551 if (oxu->periodic_sched)
3554 oxu->command |= temp;
3555 writel(oxu->command, &oxu->regs->command);
3558 oxu->next_statechange = jiffies + msecs_to_jiffies(5);
3562 writel(INTR_MASK, &oxu->regs->intr_enable);
3564 spin_unlock_irq(&oxu->lock);
3689 struct oxu_hcd *oxu;
3708 oxu = hcd_to_oxu(hcd);
3709 oxu->is_otg = otg;