• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/irda/

Lines Matching refs:self

44 static void __irttp_close_tsap(struct tsap_cb *self);
58 static void irttp_run_tx_queue(struct tsap_cb *self);
59 static void irttp_run_rx_queue(struct tsap_cb *self);
61 static void irttp_flush_queues(struct tsap_cb *self);
62 static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
63 static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
134 * Function irttp_start_todo_timer (self, timeout)
140 static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
143 mod_timer(&self->todo_timer, jiffies + timeout);
162 struct tsap_cb *self = (struct tsap_cb *) data;
165 if (!self || self->magic != TTP_TSAP_MAGIC)
168 IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
171 irttp_run_rx_queue(self);
172 irttp_run_tx_queue(self);
175 if (test_bit(0, &self->disconnect_pend)) {
177 if (skb_queue_empty(&self->tx_queue)) {
179 clear_bit(0, &self->disconnect_pend); /* FALSE */
181 /* Note : self->disconnect_skb may be NULL */
182 irttp_disconnect_request(self, self->disconnect_skb,
184 self->disconnect_skb = NULL;
187 irttp_start_todo_timer(self, HZ/10);
195 if (self->close_pend)
197 irttp_close_tsap(self);
201 * Function irttp_flush_queues (self)
205 static void irttp_flush_queues(struct tsap_cb *self)
211 IRDA_ASSERT(self != NULL, return;);
212 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
215 while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
219 while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
223 while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
228 * Function irttp_reassemble (self)
234 static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
239 IRDA_ASSERT(self != NULL, return NULL;);
240 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
242 IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
243 self->rx_sdu_size);
245 skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
254 skb_put(skb, self->rx_sdu_size);
259 while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
268 __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
269 /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
271 * have n == self->rx_sdu_size, except in cases where we
272 * droped the last fragment (when self->rx_sdu_size exceed
273 * self->rx_max_sdu_size), where n < self->rx_sdu_size.
275 IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
280 self->rx_sdu_size = 0;
291 static inline void irttp_fragment_skb(struct tsap_cb *self,
299 IRDA_ASSERT(self != NULL, return;);
300 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
306 while (skb->len > self->max_seg_size) {
310 frag = alloc_skb(self->max_seg_size+self->max_header_size,
315 skb_reserve(frag, self->max_header_size);
318 skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
319 self->max_seg_size);
326 skb_pull(skb, self->max_seg_size);
329 skb_queue_tail(&self->tx_queue, frag);
338 skb_queue_tail(&self->tx_queue, skb);
342 * Function irttp_param_max_sdu_size (self, param)
351 struct tsap_cb *self;
353 self = (struct tsap_cb *) instance;
355 IRDA_ASSERT(self != NULL, return -1;);
356 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
359 param->pv.i = self->tx_max_sdu_size;
361 self->tx_max_sdu_size = param->pv.i;
393 struct tsap_cb *self;
408 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
409 if (self == NULL) {
415 irttp_init_tsap(self);
418 self->todo_timer.data = (unsigned long) self;
419 self->todo_timer.function = &irttp_todo_expired;
431 ttp_notify.instance = self;
434 self->magic = TTP_TSAP_MAGIC;
435 self->connected = FALSE;
451 self->stsap_sel = lsap->slsap_sel;
452 IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
454 self->notify = *notify;
455 self->lsap = lsap;
457 hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
460 self->initial_credit = TTP_RX_MAX_CREDIT;
462 self->initial_credit = credit;
464 return self;
475 static void __irttp_close_tsap(struct tsap_cb *self)
478 IRDA_ASSERT(self != NULL, return;);
479 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
481 irttp_flush_queues(self);
483 del_timer(&self->todo_timer);
487 if (self->disconnect_skb)
488 dev_kfree_skb(self->disconnect_skb);
490 self->connected = FALSE;
491 self->magic = ~TTP_TSAP_MAGIC;
493 kfree(self);
497 * Function irttp_close (self)
506 int irttp_close_tsap(struct tsap_cb *self)
512 IRDA_ASSERT(self != NULL, return -1;);
513 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
516 if (self->connected) {
518 if (!test_bit(0, &self->disconnect_pend)) {
521 irttp_disconnect_request(self, NULL, P_NORMAL);
523 self->close_pend = TRUE;
524 irttp_start_todo_timer(self, HZ/10);
529 tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
531 IRDA_ASSERT(tsap == self, return -1;);
534 if (self->lsap) {
535 irlmp_close_lsap(self->lsap);
536 self->lsap = NULL;
539 __irttp_close_tsap(self);
546 * Function irttp_udata_request (self, skb)
551 int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
553 IRDA_ASSERT(self != NULL, return -1;);
554 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
560 if ((skb->len == 0) || (!self->connected)) {
566 if (skb->len > self->max_seg_size) {
572 irlmp_udata_request(self->lsap, skb);
573 self->stats.tx_packets++;
590 int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
595 IRDA_ASSERT(self != NULL, return -1;);
596 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
600 skb_queue_len(&self->tx_queue));
603 if ((skb->len == 0) || (!self->connected)) {
613 if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
624 if ((self->tx_max_sdu_size != 0) &&
625 (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
626 (skb->len > self->tx_max_sdu_size))
636 if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
640 irttp_run_tx_queue(self);
649 if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
655 skb_queue_tail(&self->tx_queue, skb);
663 irttp_fragment_skb(self, skb);
667 if ((!self->tx_sdu_busy) &&
668 (skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
670 if (self->notify.flow_indication) {
671 self->notify.flow_indication(self->notify.instance,
672 self, FLOW_STOP);
674 /* self->tx_sdu_busy is the state of the client.
681 self->tx_sdu_busy = TRUE;
685 irttp_run_tx_queue(self);
696 * Function irttp_run_tx_queue (self)
701 static void irttp_run_tx_queue(struct tsap_cb *self)
709 self->send_credit, skb_queue_len(&self->tx_queue));
712 if (irda_lock(&self->tx_queue_lock) == FALSE)
718 while ((self->send_credit > 0) &&
719 (!irlmp_lap_tx_queue_full(self->lsap)) &&
720 (skb = skb_dequeue(&self->tx_queue)))
727 spin_lock_irqsave(&self->lock, flags);
729 n = self->avail_credit;
730 self->avail_credit = 0;
734 self->avail_credit = n-127;
737 self->remote_credit += n;
738 self->send_credit--;
740 spin_unlock_irqrestore(&self->lock, flags);
751 * stored in in IrLAP (self->wx_list). When we are within
773 irlmp_data_request(self->lsap, skb);
774 self->stats.tx_packets++;
783 if ((self->tx_sdu_busy) &&
784 (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
785 (!self->close_pend))
787 if (self->notify.flow_indication)
788 self->notify.flow_indication(self->notify.instance,
789 self, FLOW_START);
791 /* self->tx_sdu_busy is the state of the client.
794 self->tx_sdu_busy = FALSE;
798 self->tx_queue_lock = 0;
802 * Function irttp_give_credit (self)
807 static inline void irttp_give_credit(struct tsap_cb *self)
813 IRDA_ASSERT(self != NULL, return;);
814 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
818 self->send_credit, self->avail_credit, self->remote_credit);
833 spin_lock_irqsave(&self->lock, flags);
835 n = self->avail_credit;
836 self->avail_credit = 0;
840 self->avail_credit = n - 127;
843 self->remote_credit += n;
845 spin_unlock_irqrestore(&self->lock, flags);
850 irlmp_data_request(self->lsap, tx_skb);
851 self->stats.tx_packets++;
863 struct tsap_cb *self;
868 self = (struct tsap_cb *) instance;
870 IRDA_ASSERT(self != NULL, return -1;);
871 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
874 self->stats.rx_packets++;
877 if (self->notify.udata_indication) {
878 err = self->notify.udata_indication(self->notify.instance,
879 self,skb);
899 struct tsap_cb *self;
903 self = (struct tsap_cb *) instance;
907 self->stats.rx_packets++;
914 spin_lock_irqsave(&self->lock, flags);
915 self->send_credit += n;
917 self->remote_credit--;
918 spin_unlock_irqrestore(&self->lock, flags);
929 skb_queue_tail(&self->rx_queue, skb);
947 irttp_run_rx_queue(self);
961 if (self->send_credit == n) {
963 irttp_run_tx_queue(self);
973 * Function irttp_status_indication (self, reason)
981 struct tsap_cb *self;
985 self = (struct tsap_cb *) instance;
987 IRDA_ASSERT(self != NULL, return;);
988 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
991 if (self->close_pend)
997 if (self->notify.status_indication != NULL)
998 self->notify.status_indication(self->notify.instance,
1005 * Function irttp_flow_indication (self, reason)
1012 struct tsap_cb *self;
1014 self = (struct tsap_cb *) instance;
1016 IRDA_ASSERT(self != NULL, return;);
1017 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1019 IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
1030 irttp_run_tx_queue(self);
1044 if(self->disconnect_pend)
1045 irttp_start_todo_timer(self, 0);
1049 * Function irttp_flow_request (self, command)
1055 void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
1059 IRDA_ASSERT(self != NULL, return;);
1060 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1065 self->rx_sdu_busy = TRUE;
1069 self->rx_sdu_busy = FALSE;
1073 irttp_run_rx_queue(self);
1083 * Function irttp_connect_request (self, dtsap_sel, daddr, qos)
1088 int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1099 IRDA_ASSERT(self != NULL, return -EBADR;);
1100 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
1102 if (self->connected) {
1128 self->connected = FALSE;
1129 self->avail_credit = 0;
1130 self->rx_max_sdu_size = max_sdu_size;
1131 self->rx_sdu_size = 0;
1132 self->rx_sdu_busy = FALSE;
1133 self->dtsap_sel = dtsap_sel;
1135 n = self->initial_credit;
1137 self->remote_credit = 0;
1138 self->send_credit = 0;
1144 self->avail_credit=n-127;
1148 self->remote_credit = n;
1174 return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
1189 struct tsap_cb *self;
1197 self = (struct tsap_cb *) instance;
1199 IRDA_ASSERT(self != NULL, return;);
1200 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1203 self->max_seg_size = max_seg_size - TTP_HEADER;
1204 self->max_header_size = max_header_size + TTP_HEADER;
1221 self->send_credit = n;
1222 self->tx_max_sdu_size = 0;
1223 self->connected = TRUE;
1233 ret = irda_param_extract_all(self, skb->data+1,
1251 self->send_credit, self->avail_credit, self->remote_credit);
1254 self->tx_max_sdu_size);
1256 if (self->notify.connect_confirm) {
1257 self->notify.connect_confirm(self->notify.instance, self, qos,
1258 self->tx_max_sdu_size,
1259 self->max_header_size, skb);
1274 struct tsap_cb *self;
1281 self = (struct tsap_cb *) instance;
1283 IRDA_ASSERT(self != NULL, return;);
1284 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1289 self->max_seg_size = max_seg_size - TTP_HEADER;
1290 self->max_header_size = max_header_size+TTP_HEADER;
1292 IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
1295 self->dtsap_sel = lsap->dlsap_sel;
1299 self->send_credit = n;
1300 self->tx_max_sdu_size = 0;
1310 ret = irda_param_extract_all(self, skb->data+1,
1328 if (self->notify.connect_indication) {
1329 self->notify.connect_indication(self->notify.instance, self,
1330 qos, self->tx_max_sdu_size,
1331 self->max_header_size, skb);
1343 int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1351 IRDA_ASSERT(self != NULL, return -1;);
1352 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1355 self->stsap_sel);
1376 self->avail_credit = 0;
1377 self->remote_credit = 0;
1378 self->rx_max_sdu_size = max_sdu_size;
1379 self->rx_sdu_size = 0;
1380 self->rx_sdu_busy = FALSE;
1382 n = self->initial_credit;
1386 self->avail_credit = n - 127;
1390 self->remote_credit = n;
1391 self->connected = TRUE;
1404 /* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1, */
1419 ret = irlmp_connect_response(self->lsap, tx_skb);
1426 * Function irttp_dup (self, instance)
1484 * Function irttp_disconnect_request (self)
1490 int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1495 IRDA_ASSERT(self != NULL, return -1;);
1496 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1499 if (!self->connected) {
1511 if(test_and_set_bit(0, &self->disconnect_pend)) {
1518 irttp_run_tx_queue(self);
1525 if (!skb_queue_empty(&self->tx_queue)) {
1533 irttp_flush_queues(self);
1540 self->disconnect_skb = userdata; /* May be NULL */
1542 irttp_run_tx_queue(self);
1544 irttp_start_todo_timer(self, HZ/10);
1548 /* Note : we don't need to check if self->rx_queue is full and the
1549 * state of self->rx_sdu_busy because the disconnect response will
1554 self->connected = FALSE;
1569 ret = irlmp_disconnect_request(self->lsap, userdata);
1572 clear_bit(0, &self->disconnect_pend); /* FALSE */
1579 * Function irttp_disconnect_indication (self, reason)
1587 struct tsap_cb *self;
1591 self = (struct tsap_cb *) instance;
1593 IRDA_ASSERT(self != NULL, return;);
1594 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1597 self->connected = FALSE;
1600 if (self->close_pend) {
1605 irttp_close_tsap(self);
1612 * attempted to close the tsap and self->close_pend would be TRUE.
1616 if(self->notify.disconnect_indication)
1617 self->notify.disconnect_indication(self->notify.instance, self,
1625 * Function irttp_do_data_indication (self, skb)
1631 static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
1636 if (self->close_pend) {
1641 err = self->notify.data_indication(self->notify.instance, self, skb);
1652 self->rx_sdu_busy = TRUE;
1659 skb_queue_head(&self->rx_queue, skb);
1664 * Function irttp_run_rx_queue (self)
1669 static void irttp_run_rx_queue(struct tsap_cb *self)
1675 self->send_credit, self->avail_credit, self->remote_credit);
1678 if (irda_lock(&self->rx_queue_lock) == FALSE)
1684 while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
1692 self->rx_sdu_size += skb->len;
1700 if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
1701 irttp_do_data_indication(self, skb);
1702 self->rx_sdu_size = 0;
1713 if (self->rx_sdu_size <= self->rx_max_sdu_size) {
1716 skb_queue_tail(&self->rx_fragments, skb);
1726 if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
1727 (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
1735 if (!skb_queue_empty(&self->rx_fragments)) {
1736 skb_queue_tail(&self->rx_fragments,
1739 skb = irttp_reassemble_skb(self);
1743 irttp_do_data_indication(self, skb);
1751 skb = irttp_reassemble_skb(self);
1753 irttp_do_data_indication(self, skb);
1755 self->rx_sdu_size = 0;
1766 * No need to spinlock, write is atomic and self correcting...
1769 self->avail_credit = (self->initial_credit -
1770 (self->remote_credit +
1771 skb_queue_len(&self->rx_queue) +
1772 skb_queue_len(&self->rx_fragments)));
1775 if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
1776 (self->avail_credit > 0)) {
1778 irttp_give_credit(self);
1796 self->rx_queue_lock = 0;
1807 struct tsap_cb *self;
1813 for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
1814 self != NULL;
1815 self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
1821 return self;
1841 const struct tsap_cb *self = v;
1845 self->stsap_sel);
1847 self->dtsap_sel);
1849 self->connected? "TRUE":"FALSE");
1851 self->avail_credit);
1853 self->remote_credit);
1855 self->send_credit);
1857 self->stats.tx_packets);
1859 self->stats.rx_packets);
1861 skb_queue_len(&self->tx_queue));
1863 skb_queue_len(&self->rx_queue));
1865 self->tx_sdu_busy? "TRUE":"FALSE");
1867 self->rx_sdu_busy? "TRUE":"FALSE");
1869 self->max_seg_size);
1871 self->tx_max_sdu_size);
1873 self->rx_max_sdu_size);
1876 self->notify.name);