• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/usb/host/

Lines Matching refs:ohci

37  * PRECONDITION:  ohci lock held, irqs blocked.
40 finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41 __releases(ohci->lock)
42 __acquires(ohci->lock)
46 urb_free_priv (ohci, urb->hcpriv);
52 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
53 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
54 if (quirk_amdiso(ohci))
56 if (quirk_amdprefetch(ohci))
57 sb800_prefetch(ohci, 0);
61 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
70 usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
71 spin_unlock (&ohci->lock);
72 usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
73 spin_lock (&ohci->lock);
76 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
77 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
78 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
79 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
91 static int balance (struct ohci_hcd *ohci, int interval, int load)
103 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
108 if ((ohci->load [j] + load) > 900)
125 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
129 ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
130 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
134 struct ed **prev = &ohci->periodic [i];
135 __hc32 *prev_p = &ohci->hcca->int_table [i];
155 *prev_p = cpu_to_hc32(ohci, ed->dma);
158 ohci->load [i] += ed->load;
160 ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
165 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
173 if (quirk_zfmicro(ohci)
175 && !(ohci->eds_scheduled++))
176 mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ));
185 * periodic schedule encodes a tree like figure 3-5 in the ohci
191 if (ohci->ed_controltail == NULL) {
192 WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
193 ohci_writel (ohci, ed->dma,
194 &ohci->regs->ed_controlhead);
196 ohci->ed_controltail->ed_next = ed;
197 ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
200 ed->ed_prev = ohci->ed_controltail;
201 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
203 ohci->hc_control |= OHCI_CTRL_CLE;
204 ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
205 ohci_writel (ohci, ohci->hc_control,
206 &ohci->regs->control);
208 ohci->ed_controltail = ed;
212 if (ohci->ed_bulktail == NULL) {
213 WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
214 ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
216 ohci->ed_bulktail->ed_next = ed;
217 ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
220 ed->ed_prev = ohci->ed_bulktail;
221 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
223 ohci->hc_control |= OHCI_CTRL_BLE;
224 ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
225 ohci_writel (ohci, ohci->hc_control,
226 &ohci->regs->control);
228 ohci->ed_bulktail = ed;
234 branch = balance (ohci, ed->interval, ed->load);
236 ohci_dbg (ohci,
242 periodic_link (ohci, ed);
254 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
260 struct ed **prev = &ohci->periodic [i];
261 __hc32 *prev_p = &ohci->hcca->int_table [i];
271 ohci->load [i] -= ed->load;
273 ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
275 ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
276 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
302 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
304 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
323 ohci->hc_control &= ~OHCI_CTRL_CLE;
324 ohci_writel (ohci, ohci->hc_control,
325 &ohci->regs->control);
328 ohci_writel (ohci,
329 hc32_to_cpup (ohci, &ed->hwNextED),
330 &ohci->regs->ed_controlhead);
336 if (ohci->ed_controltail == ed) {
337 ohci->ed_controltail = ed->ed_prev;
338 if (ohci->ed_controltail)
339 ohci->ed_controltail->ed_next = NULL;
349 ohci->hc_control &= ~OHCI_CTRL_BLE;
350 ohci_writel (ohci, ohci->hc_control,
351 &ohci->regs->control);
354 ohci_writel (ohci,
355 hc32_to_cpup (ohci, &ed->hwNextED),
356 &ohci->regs->ed_bulkhead);
362 if (ohci->ed_bulktail == ed) {
363 ohci->ed_bulktail = ed->ed_prev;
364 if (ohci->ed_bulktail)
365 ohci->ed_bulktail->ed_next = NULL;
374 periodic_unlink (ohci, ed);
386 struct ohci_hcd *ohci,
395 spin_lock_irqsave (&ohci->lock, flags);
402 ed = ed_alloc (ohci, GFP_ATOMIC);
409 td = td_alloc (ohci, GFP_ATOMIC);
412 ed_free (ohci, ed);
417 ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
447 ed->hwINFO = cpu_to_hc32(ohci, info);
453 spin_unlock_irqrestore (&ohci->lock, flags);
465 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
467 ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
468 ed_deschedule (ohci, ed);
471 ed->ed_next = ohci->ed_rm_list;
473 ohci->ed_rm_list = ed;
476 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
477 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
479 (void) ohci_readl (ohci, &ohci->regs->control);
486 ed->tick = ohci_frame_no(ohci) + 1;
497 td_fill (struct ohci_hcd *ohci, u32 info,
538 td->hwINFO = cpu_to_hc32 (ohci, info);
540 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
541 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
545 td->hwCBP = cpu_to_hc32 (ohci, data);
548 td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
551 td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
558 td->td_hash = ohci->td_hash [hash];
559 ohci->td_hash [hash] = td;
574 struct ohci_hcd *ohci,
592 urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
596 list_add (&urb_priv->pending, &ohci->pending);
614 periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
615 && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
623 td_fill (ohci, info, data, 4096, urb, cnt);
631 td_fill (ohci, info, data, data_len, urb, cnt);
635 td_fill (ohci, info, 0, 0, urb, cnt);
641 ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
650 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
655 td_fill (ohci, info, data, data_len, urb, cnt++);
660 td_fill (ohci, info, data, 0, urb, cnt++);
663 ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
678 td_fill (ohci, TD_CC | TD_ISO | frame,
682 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
683 if (quirk_amdiso(ohci))
685 if (quirk_amdprefetch(ohci))
686 sb800_prefetch(ohci, 1);
688 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
689 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
696 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
697 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
708 static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
710 u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
718 u16 tdPSW = ohci_hwPSW(ohci, td, 0);
742 ohci_vdbg (ohci,
752 u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
769 hc32_to_cpup (ohci, &td->hwCBP)
774 ohci_vdbg (ohci,
785 static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
791 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
796 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
798 ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
840 ohci_dbg (ohci,
845 hc32_to_cpu (ohci, td->hwINFO),
853 static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
859 td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
860 ohci->hcca->done_head = 0;
869 td = dma_to_td (ohci, td_dma);
871 ohci_err (ohci, "bad entry %8x\n", td_dma);
875 td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
876 cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
883 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
884 ed_halted(ohci, td, cc);
888 td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
897 finish_unlinks (struct ohci_hcd *ohci, u16 tick)
902 for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
910 if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
923 head = hc32_to_cpu (ohci, ed->hwHeadP) &
928 if (ed == ohci->ed_to_check)
929 ohci->ed_to_check = NULL;
972 savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
980 tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
982 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
984 ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
987 td_done (ohci, urb, td);
993 finish_urb(ohci, urb, 0);
1001 if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
1002 ohci->eds_scheduled--;
1003 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1006 ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
1010 if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
1011 ed_schedule (ohci, ed);
1019 if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
1020 && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
1021 && !ohci->ed_rm_list) {
1024 if (ohci->ed_controltail) {
1026 if (quirk_zfmicro(ohci))
1028 if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1030 ohci_writel (ohci, 0,
1031 &ohci->regs->ed_controlcurrent);
1034 if (ohci->ed_bulktail) {
1036 if (quirk_zfmicro(ohci))
1038 if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1040 ohci_writel (ohci, 0,
1041 &ohci->regs->ed_bulkcurrent);
1047 ohci->hc_control |= control;
1048 if (quirk_zfmicro(ohci))
1050 ohci_writel (ohci, ohci->hc_control,
1051 &ohci->regs->control);
1054 if (quirk_zfmicro(ohci))
1056 ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1071 static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1079 status = td_done(ohci, urb, td);
1084 finish_urb(ohci, urb, status);
1089 start_ed_unlink(ohci, ed);
1092 } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
1093 == cpu_to_hc32(ohci, ED_SKIP)) {
1095 if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
1096 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
1100 ohci_writel(ohci, OHCI_CLF,
1101 &ohci->regs->cmdstatus);
1104 ohci_writel(ohci, OHCI_BLF,
1105 &ohci->regs->cmdstatus);
1122 dl_done_list (struct ohci_hcd *ohci)
1124 struct td *td = dl_reverse_done_list (ohci);
1128 takeback_td(ohci, td);