• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/host/

Lines Matching refs:uhci

28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
30 if (uhci->is_stopped)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
37 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
53 uhci->fsbr_is_on = 1;
54 lqh = list_entry(uhci->skel_async_qh->node.prev,
56 lqh->link = LINK_TO_QH(uhci->skel_term_qh);
59 static void uhci_fsbr_off(struct uhci_hcd *uhci)
65 uhci->fsbr_is_on = 0;
66 lqh = list_entry(uhci->skel_async_qh->node.prev,
71 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
79 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
82 uhci->fsbr_is_wanted = 1;
83 if (!uhci->fsbr_is_on)
84 uhci_fsbr_on(uhci);
85 else if (uhci->fsbr_expiring) {
86 uhci->fsbr_expiring = 0;
87 del_timer(&uhci->fsbr_timer);
94 struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
97 spin_lock_irqsave(&uhci->lock, flags);
98 if (uhci->fsbr_expiring) {
99 uhci->fsbr_expiring = 0;
100 uhci_fsbr_off(uhci);
102 spin_unlock_irqrestore(&uhci->lock, flags);
106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
111 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
127 dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
129 dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
131 dma_pool_free(uhci->td_pool, td, td->dma_handle);
155 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
163 if (uhci->frame_cpu[framenum]) {
166 ftd = uhci->frame_cpu[framenum];
175 td->link = uhci->frame[framenum];
177 uhci->frame[framenum] = LINK_TO_TD(td);
178 uhci->frame_cpu[framenum] = td;
182 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
191 if (uhci->frame_cpu[td->frame] == td) {
193 uhci->frame[td->frame] = td->link;
194 uhci->frame_cpu[td->frame] = NULL;
199 uhci->frame[td->frame] = LINK_TO_TD(ntd);
200 uhci->frame_cpu[td->frame] = ntd;
213 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
220 ftd = uhci->frame_cpu[framenum];
223 uhci->frame[framenum] = ltd->link;
224 uhci->frame_cpu[framenum] = NULL;
234 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
240 uhci_remove_td_from_frame_list(uhci, td);
243 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
249 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
265 qh->dummy_td = uhci_alloc_td(uhci);
267 dma_pool_free(uhci->qh_pool, qh, dma_handle);
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
295 dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
301 uhci_free_td(uhci, qh->dummy_td);
303 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
313 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
325 ret = (uhci->frame_number + uhci->is_stopped !=
422 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
424 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
433 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
437 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
449 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
457 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
472 uhci->skel_term_qh->link = link_to_new_qh;
478 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
503 if (qh == uhci->next_qh)
504 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
509 link_iso(uhci, qh);
511 link_interrupt(uhci, qh);
513 link_async(uhci, qh);
519 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
531 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
542 uhci->skel_term_qh->link = link_to_next_qh;
549 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
560 unlink_interrupt(uhci, qh);
562 unlink_async(uhci, qh);
564 uhci_get_current_frame_number(uhci);
565 qh->unlink_frame = uhci->frame_number;
568 if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
569 uhci_set_next_interrupt(uhci);
572 if (qh == uhci->next_qh)
573 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
575 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
584 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
588 if (qh == uhci->next_qh)
589 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
591 list_move(&qh->node, &uhci->idle_qh_list);
596 uhci_free_td(uhci, qh->post_td);
601 if (uhci->num_waiting)
602 wake_up_all(&uhci->waitqh);
608 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
610 int highest_load = uhci->load[phase];
613 highest_load = max_t(int, highest_load, uhci->load[phase]);
621 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
628 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
634 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
636 load = uhci_highest_load(uhci, phase, qh->period);
646 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
657 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
664 uhci->load[i] += load;
665 uhci->total_load += load;
667 uhci_to_hcd(uhci)->self.bandwidth_allocated =
668 uhci->total_load / MAX_PHASE;
671 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
675 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
680 dev_dbg(uhci_dev(uhci),
690 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
697 uhci->load[i] -= load;
698 uhci->total_load -= load;
700 uhci_to_hcd(uhci)->self.bandwidth_allocated =
701 uhci->total_load / MAX_PHASE;
704 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
708 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
713 dev_dbg(uhci_dev(uhci),
720 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
738 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
744 dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
749 uhci_free_td(uhci, td);
787 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
842 td = uhci_alloc_td(uhci);
862 td = uhci_alloc_td(uhci);
879 td = uhci_alloc_td(uhci);
898 uhci_add_fsbr(uhci, urb);
913 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
955 td = uhci_alloc_td(uhci);
983 td = uhci_alloc_td(uhci);
1009 td = uhci_alloc_td(uhci);
1029 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1040 ret = uhci_submit_common(uhci, urb, qh);
1042 uhci_add_fsbr(uhci, urb);
1046 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1076 ret = uhci_check_bandwidth(uhci, qh);
1083 ret = uhci_submit_common(uhci, urb, qh);
1087 uhci_reserve_bandwidth(uhci, qh);
1095 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1134 uhci_free_td(uhci, td);
1142 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1173 uhci_show_qh(uhci, urbp->qh, errbuf,
1200 uhci_free_td(uhci, qh->post_td);
1219 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1226 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1244 i = uhci_check_bandwidth(uhci, qh);
1249 uhci_get_current_frame_number(uhci);
1250 frame = uhci->frame_number + 10;
1257 i = urb->start_frame - uhci->last_iso_frame;
1261 i = uhci_check_bandwidth(uhci, qh);
1286 uhci_get_current_frame_number(uhci);
1287 if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1288 frame = uhci->frame_number + 1;
1297 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1306 td = uhci_alloc_td(uhci);
1323 uhci_insert_td_in_frame_list(uhci, td, frame);
1334 uhci_reserve_bandwidth(uhci, qh);
1338 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1349 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1352 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1370 uhci_free_td(uhci, td);
1381 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1386 spin_lock_irqsave(&uhci->lock, flags);
1393 urbp = uhci_alloc_urb_priv(uhci, urb);
1400 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1408 ret = uhci_submit_control(uhci, urb, qh);
1411 ret = uhci_submit_bulk(uhci, urb, qh);
1414 ret = uhci_submit_interrupt(uhci, urb, qh);
1418 ret = uhci_submit_isochronous(uhci, urb, qh);
1432 uhci_activate_qh(uhci, qh);
1433 uhci_urbp_wants_fsbr(uhci, urbp);
1439 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1441 uhci_free_urb_priv(uhci, urbp);
1446 spin_unlock_irqrestore(&uhci->lock, flags);
1452 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1457 spin_lock_irqsave(&uhci->lock, flags);
1466 uhci_unlink_isochronous_tds(uhci, urb);
1470 uhci_get_current_frame_number(uhci);
1471 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1472 qh->unlink_frame = uhci->frame_number;
1475 uhci_unlink_qh(uhci, qh);
1478 spin_unlock_irqrestore(&uhci->lock, flags);
1485 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1487 __releases(uhci->lock)
1488 __acquires(uhci->lock)
1521 uhci_free_urb_priv(uhci, urbp);
1522 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1524 spin_unlock(&uhci->lock);
1525 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1526 spin_lock(&uhci->lock);
1531 uhci_unlink_qh(uhci, qh);
1533 uhci_release_bandwidth(uhci, qh);
1542 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1544 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1555 status = uhci_result_isochronous(uhci, urb);
1557 status = uhci_result_common(uhci, urb);
1570 uhci_giveback_urb(uhci, qh, urb, status);
1591 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1595 uhci_giveback_urb(uhci, qh, urb, 0);
1618 uhci_activate_qh(uhci, qh);
1624 uhci_make_qh_idle(uhci, qh);
1637 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1670 ret = uhci->is_stopped;
1693 uhci_unlink_qh(uhci, qh);
1698 uhci_urbp_wants_fsbr(uhci, urbp);
1708 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1714 if (uhci->scan_in_progress) {
1715 uhci->need_rescan = 1;
1718 uhci->scan_in_progress = 1;
1720 uhci->need_rescan = 0;
1721 uhci->fsbr_is_wanted = 0;
1723 uhci_clear_next_interrupt(uhci);
1724 uhci_get_current_frame_number(uhci);
1725 uhci->cur_iso_frame = uhci->frame_number;
1729 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1731 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1732 uhci->next_qh = list_entry(qh->node.next,
1735 if (uhci_advance_check(uhci, qh)) {
1736 uhci_scan_qh(uhci, qh);
1738 uhci_urbp_wants_fsbr(uhci,
1745 uhci->last_iso_frame = uhci->cur_iso_frame;
1746 if (uhci->need_rescan)
1748 uhci->scan_in_progress = 0;
1750 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1751 !uhci->fsbr_expiring) {
1752 uhci->fsbr_expiring = 1;
1753 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1756 if (list_empty(&uhci->skel_unlink_qh->node))
1757 uhci_clear_next_interrupt(uhci);
1759 uhci_set_next_interrupt(uhci);