• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/c67x00/

Lines Matching refs:c67x00

2  * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
27 #include "c67x00.h"
28 #include "c67x00-hcd.h"
152 static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
154 struct device *dev = c67x00_hcd_dev(c67x00);
176 dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { }
183 static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
185 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
222 static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
229 c67x00->urb_count--;
232 c67x00->urb_iso_count--;
233 if (c67x00->urb_iso_count == 0)
234 c67x00->max_frame_bw = MAX_FRAME_BW_STD;
242 list_for_each_entry(td, &c67x00->td_list, td_list)
255 c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
261 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
266 if (frame_after(c67x00->current_frame, ep_data->next_frame))
268 frame_add(c67x00->current_frame, 1);
272 /* Allocate and initialize a new c67x00 endpoint data structure */
287 ep_data->next_frame = frame_add(c67x00->current_frame, 1);
293 list_add(&ep_data->node, &c67x00->list[type]);
297 list_for_each_entry(prev, &c67x00->list[type], node) {
331 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
335 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
337 spin_lock_irqsave(&c67x00->lock, flags);
342 spin_unlock_irqrestore(&c67x00->lock, flags);
347 INIT_COMPLETION(c67x00->endpoint_disable);
348 c67x00_sched_kick(c67x00);
349 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
351 spin_lock_irqsave(&c67x00->lock, flags);
354 spin_unlock_irqrestore(&c67x00->lock, flags);
372 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
375 spin_lock_irqsave(&c67x00->lock, flags);
398 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
421 if (c67x00->urb_iso_count == 0)
422 c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
423 c67x00->urb_iso_count++;
446 if (!c67x00->urb_count++)
447 c67x00_ll_hpi_enable_sofeop(c67x00->sie);
449 c67x00_sched_kick(c67x00);
450 spin_unlock_irqrestore(&c67x00->lock, flags);
459 spin_unlock_irqrestore(&c67x00->lock, flags);
466 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
470 spin_lock_irqsave(&c67x00->lock, flags);
475 c67x00_release_urb(c67x00, urb);
478 spin_unlock(&c67x00->lock);
480 spin_lock(&c67x00->lock);
482 spin_unlock_irqrestore(&c67x00->lock, flags);
487 spin_unlock_irqrestore(&c67x00->lock, flags);
494 * pre: c67x00 locked, urb unlocked
497 c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
509 c67x00_release_urb(c67x00, urb);
510 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
511 spin_unlock(&c67x00->lock);
512 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
513 spin_lock(&c67x00->lock);
518 static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
558 if (unlikely(bit_time + c67x00->bandwidth_allocated >=
559 c67x00->max_frame_bw))
562 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
563 c67x00->td_base_addr + SIE_TD_SIZE))
566 if (unlikely(c67x00->next_buf_addr + len >=
567 c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
571 if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
572 MAX_PERIODIC_BW(c67x00->max_frame_bw)))
574 c67x00->periodic_bw_allocated += bit_time;
577 c67x00->bandwidth_allocated += bit_time;
586 static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
596 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
608 !(c67x00->low_speed_ports & (1 << urbp->port)))
633 td->td_addr = c67x00->next_td_addr;
634 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
637 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
638 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
647 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
654 c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
656 list_add_tail(&td->td_list, &c67x00->td_list);
668 static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
697 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
714 static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
722 ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
732 ret = c67x00_add_data_urb(c67x00, urb);
739 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
752 static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
756 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
759 return c67x00_add_data_urb(c67x00, urb);
764 static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
768 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
779 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
786 c67x00_giveback_urb(c67x00, urb, 0);
798 static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
805 list_for_each_entry(ep_data, &c67x00->list[type], node) {
812 add(c67x00, urb);
817 static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
822 if (!list_empty(&c67x00->td_list)) {
823 dev_warn(c67x00_hcd_dev(c67x00),
825 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
826 dbg_td(c67x00, td, "Unprocessed td");
832 c67x00->bandwidth_allocated = 0;
833 c67x00->periodic_bw_allocated = 0;
835 c67x00->next_td_addr = c67x00->td_base_addr;
836 c67x00->next_buf_addr = c67x00->buf_base_addr;
839 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
840 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
841 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
842 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
851 c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
853 c67x00_ll_read_mem_le16(c67x00->sie->dev,
857 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
861 static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
864 dbg_td(c67x00, td, "ERROR_FLAG");
868 /* dbg_td(c67x00, td, "STALL"); */
872 dbg_td(c67x00, td, "TIMEOUT");
910 static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
916 while (td->td_list.next != &c67x00->td_list) {
928 static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
953 c67x00_clear_pipe(c67x00, td);
959 c67x00_giveback_urb(c67x00, urb, 0);
967 c67x00_clear_pipe(c67x00, td);
968 c67x00_giveback_urb(c67x00, urb, 0);
974 static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
990 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
992 c67x00_giveback_urb(c67x00, urb, 0);
998 * c67x00_check_td_list - handle tds which have been processed by the c67x00
1001 static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
1008 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
1010 c67x00_parse_td(c67x00, td);
1018 c67x00_handle_isoc(c67x00, td);
1026 c67x00_giveback_urb(c67x00, urb,
1027 c67x00_td_to_error(c67x00, td));
1041 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
1047 c67x00_handle_successful_td(c67x00, td);
1051 c67x00_clear_pipe(c67x00, td);
1064 static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
1069 return !c67x00_ll_husb_get_current_td(c67x00->sie);
1075 static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
1080 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
1083 c67x00_ll_write_mem_le16(c67x00->sie->dev,
1087 static void c67x00_send_frame(struct c67x00_hcd *c67x00)
1091 if (list_empty(&c67x00->td_list))
1092 dev_warn(c67x00_hcd_dev(c67x00),
1096 list_for_each_entry(td, &c67x00->td_list, td_list) {
1097 if (td->td_list.next == &c67x00->td_list)
1100 c67x00_send_td(c67x00, td);
1103 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
1111 static void c67x00_do_work(struct c67x00_hcd *c67x00)
1113 spin_lock(&c67x00->lock);
1115 if (!c67x00_all_tds_processed(c67x00))
1118 c67x00_check_td_list(c67x00);
1122 complete(&c67x00->endpoint_disable);
1124 if (!list_empty(&c67x00->td_list))
1127 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
1128 if (c67x00->current_frame == c67x00->last_frame)
1130 c67x00->last_frame = c67x00->current_frame;
1133 if (!c67x00->urb_count) {
1134 c67x00_ll_hpi_disable_sofeop(c67x00->sie);
1138 c67x00_fill_frame(c67x00);
1139 if (!list_empty(&c67x00->td_list))
1141 c67x00_send_frame(c67x00);
1144 spin_unlock(&c67x00->lock);
1151 struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00;
1152 c67x00_do_work(c67x00);
1155 void c67x00_sched_kick(struct c67x00_hcd *c67x00)
1157 tasklet_hi_schedule(&c67x00->tasklet);
1160 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
1162 tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet,
1163 (unsigned long)c67x00);
1167 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
1169 tasklet_kill(&c67x00->tasklet);