• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/net/tipc/

Lines Matching defs:l_ptr

97 static void link_handle_out_of_seq_msg(struct link *l_ptr,
99 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
100 static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
101 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
105 static void link_check_defragm_bufs(struct link *l_ptr);
106 static void link_state_event(struct link *l_ptr, u32 event);
107 static void link_reset_statistics(struct link *l_ptr);
108 static void link_print(struct link *l_ptr, struct print_buf *buf,
129 * - "l_ptr" must be valid when using dbg_link_XXX() macros
134 #define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
135 #define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0)
136 #define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
139 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
140 tipc_printbuf_move(LOG, &l_ptr->print_buf); \
144 static void dbg_print_link(struct link *l_ptr, const char *str)
147 link_print(l_ptr, DBG_OUTPUT, str);
171 static int link_working_working(struct link *l_ptr)
173 return (l_ptr->state == WORKING_WORKING);
176 static int link_working_unknown(struct link *l_ptr)
178 return (l_ptr->state == WORKING_UNKNOWN);
181 static int link_reset_unknown(struct link *l_ptr)
183 return (l_ptr->state == RESET_UNKNOWN);
186 static int link_reset_reset(struct link *l_ptr)
188 return (l_ptr->state == RESET_RESET);
191 static int link_blocked(struct link *l_ptr)
193 return (l_ptr->exp_msg_count || l_ptr->blocked);
196 static int link_congested(struct link *l_ptr)
198 return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
201 static u32 link_max_pkt(struct link *l_ptr)
203 return l_ptr->max_pkt;
206 static void link_init_max_pkt(struct link *l_ptr)
210 max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
214 l_ptr->max_pkt_target = max_pkt;
215 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
216 l_ptr->max_pkt = l_ptr->max_pkt_target;
218 l_ptr->max_pkt = MAX_PKT_DEFAULT;
220 l_ptr->max_pkt_probes = 0;
223 static u32 link_next_sent(struct link *l_ptr)
225 if (l_ptr->next_out)
226 return msg_seqno(buf_msg(l_ptr->next_out));
227 return mod(l_ptr->next_out_no);
230 static u32 link_last_sent(struct link *l_ptr)
232 return mod(link_next_sent(l_ptr) - 1);
239 int tipc_link_is_up(struct link *l_ptr)
241 if (!l_ptr)
243 return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
246 int tipc_link_is_active(struct link *l_ptr)
248 return ((l_ptr->owner->active_links[0] == l_ptr) ||
249 (l_ptr->owner->active_links[1] == l_ptr));
323 * @l_ptr: pointer to link
331 static void link_timeout(struct link *l_ptr)
333 tipc_node_lock(l_ptr->owner);
337 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
338 l_ptr->stats.queue_sz_counts++;
340 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
341 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
343 if (l_ptr->first_out) {
344 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
352 l_ptr->stats.msg_lengths_total += length;
353 l_ptr->stats.msg_length_counts++;
355 l_ptr->stats.msg_length_profile[0]++;
357 l_ptr->stats.msg_length_profile[1]++;
359 l_ptr->stats.msg_length_profile[2]++;
361 l_ptr->stats.msg_length_profile[3]++;
363 l_ptr->stats.msg_length_profile[4]++;
365 l_ptr->stats.msg_length_profile[5]++;
367 l_ptr->stats.msg_length_profile[6]++;
373 link_check_defragm_bufs(l_ptr);
375 link_state_event(l_ptr, TIMEOUT_EVT);
377 if (l_ptr->next_out)
378 tipc_link_push_queue(l_ptr);
380 tipc_node_unlock(l_ptr->owner);
383 static void link_set_timer(struct link *l_ptr, u32 time)
385 k_start_timer(&l_ptr->timer, time);
400 struct link *l_ptr;
404 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
405 if (!l_ptr) {
410 l_ptr->addr = peer;
412 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
418 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
419 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
420 list_add_tail(&l_ptr->link_list, &b_ptr->links);
421 l_ptr->checkpoint = 1;
422 l_ptr->b_ptr = b_ptr;
423 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
424 l_ptr->state = RESET_UNKNOWN;
426 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
427 msg = l_ptr->pmsg;
428 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
429 msg_set_size(msg, sizeof(l_ptr->proto_msg));
434 l_ptr->priority = b_ptr->priority;
435 tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
437 link_init_max_pkt(l_ptr);
439 l_ptr->next_out_no = 1;
440 INIT_LIST_HEAD(&l_ptr->waiting_ports);
442 link_reset_statistics(l_ptr);
444 l_ptr->owner = tipc_node_attach_link(l_ptr);
445 if (!l_ptr->owner) {
446 kfree(l_ptr);
454 kfree(l_ptr);
458 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
461 tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
464 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
466 return l_ptr;
471 * @l_ptr: pointer to link
478 void tipc_link_delete(struct link *l_ptr)
480 if (!l_ptr) {
487 k_cancel_timer(&l_ptr->timer);
489 tipc_node_lock(l_ptr->owner);
490 tipc_link_reset(l_ptr);
491 tipc_node_detach_link(l_ptr->owner, l_ptr);
492 tipc_link_stop(l_ptr);
493 list_del_init(&l_ptr->link_list);
495 kfree(l_ptr->print_buf.buf);
496 tipc_node_unlock(l_ptr->owner);
497 k_term_timer(&l_ptr->timer);
498 kfree(l_ptr);
501 void tipc_link_start(struct link *l_ptr)
503 dbg("tipc_link_start %x\n", l_ptr);
504 link_state_event(l_ptr, STARTING_EVT);
509 * @l_ptr: pointer to link
517 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
528 p_ptr->congested_link = l_ptr;
530 p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
531 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
532 l_ptr->stats.link_congs++;
540 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
544 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
552 if (link_congested(l_ptr))
554 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
573 * @l_ptr: pointer to link
576 static void link_release_outqueue(struct link *l_ptr)
578 struct sk_buff *buf = l_ptr->first_out;
586 l_ptr->first_out = NULL;
587 l_ptr->out_queue_size = 0;
592 * @l_ptr: pointer to link
595 void tipc_link_reset_fragments(struct link *l_ptr)
597 struct sk_buff *buf = l_ptr->defragm_buf;
605 l_ptr->defragm_buf = NULL;
610 * @l_ptr: pointer to link
613 void tipc_link_stop(struct link *l_ptr)
618 buf = l_ptr->oldest_deferred_in;
625 buf = l_ptr->first_out;
632 tipc_link_reset_fragments(l_ptr);
634 buf_discard(l_ptr->proto_msg_queue);
635 l_ptr->proto_msg_queue = NULL;
639 #define link_send_event(fcn, l_ptr, up) do { } while (0)
642 void tipc_link_reset(struct link *l_ptr)
645 u32 prev_state = l_ptr->state;
646 u32 checkpoint = l_ptr->next_in_no;
647 int was_active_link = tipc_link_is_active(l_ptr);
649 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
652 l_ptr->peer_session = 0;
655 link_init_max_pkt(l_ptr);
657 l_ptr->state = RESET_UNKNOWN;
663 tipc_node_link_down(l_ptr->owner, l_ptr);
664 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
665 if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
666 l_ptr->owner->permit_changeover) {
667 l_ptr->reset_checkpoint = checkpoint;
668 l_ptr->exp_msg_count = START_CHANGEOVER;
673 link_release_outqueue(l_ptr);
674 buf_discard(l_ptr->proto_msg_queue);
675 l_ptr->proto_msg_queue = NULL;
676 buf = l_ptr->oldest_deferred_in;
682 if (!list_empty(&l_ptr->waiting_ports))
683 tipc_link_wakeup_ports(l_ptr, 1);
685 l_ptr->retransm_queue_head = 0;
686 l_ptr->retransm_queue_size = 0;
687 l_ptr->last_out = NULL;
688 l_ptr->first_out = NULL;
689 l_ptr->next_out = NULL;
690 l_ptr->unacked_window = 0;
691 l_ptr->checkpoint = 1;
692 l_ptr->next_out_no = 1;
693 l_ptr->deferred_inqueue_sz = 0;
694 l_ptr->oldest_deferred_in = NULL;
695 l_ptr->newest_deferred_in = NULL;
696 l_ptr->fsm_msg_cnt = 0;
697 l_ptr->stale_count = 0;
698 link_reset_statistics(l_ptr);
700 link_send_event(tipc_cfg_link_event, l_ptr, 0);
701 if (!in_own_cluster(l_ptr->addr))
702 link_send_event(tipc_disc_link_event, l_ptr, 0);
706 static void link_activate(struct link *l_ptr)
708 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
709 tipc_node_link_up(l_ptr->owner, l_ptr);
710 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
711 link_send_event(tipc_cfg_link_event, l_ptr, 1);
712 if (!in_own_cluster(l_ptr->addr))
713 link_send_event(tipc_disc_link_event, l_ptr, 1);
718 * @l_ptr: pointer to link
722 static void link_state_event(struct link *l_ptr, unsigned event)
725 u32 cont_intv = l_ptr->continuity_interval;
727 if (!l_ptr->started && (event != STARTING_EVT))
730 if (link_blocked(l_ptr)) {
732 link_set_timer(l_ptr, cont_intv);
736 dbg_link("STATE_EV: <%s> ", l_ptr->name);
738 switch (l_ptr->state) {
750 if (l_ptr->next_in_no != l_ptr->checkpoint) {
751 l_ptr->checkpoint = l_ptr->next_in_no;
752 if (tipc_bclink_acks_missing(l_ptr->owner)) {
753 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
755 l_ptr->fsm_msg_cnt++;
756 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
757 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
759 l_ptr->fsm_msg_cnt++;
761 link_set_timer(l_ptr, cont_intv);
765 l_ptr->state = WORKING_UNKNOWN;
766 l_ptr->fsm_msg_cnt = 0;
767 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
768 l_ptr->fsm_msg_cnt++;
769 link_set_timer(l_ptr, cont_intv / 4);
774 l_ptr->name);
775 tipc_link_reset(l_ptr);
776 l_ptr->state = RESET_RESET;
777 l_ptr->fsm_msg_cnt = 0;
778 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
779 l_ptr->fsm_msg_cnt++;
780 link_set_timer(l_ptr, cont_intv);
793 l_ptr->state = WORKING_WORKING;
794 l_ptr->fsm_msg_cnt = 0;
795 link_set_timer(l_ptr, cont_intv);
800 "while probing\n", l_ptr->name);
801 tipc_link_reset(l_ptr);
802 l_ptr->state = RESET_RESET;
803 l_ptr->fsm_msg_cnt = 0;
804 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
805 l_ptr->fsm_msg_cnt++;
806 link_set_timer(l_ptr, cont_intv);
810 if (l_ptr->next_in_no != l_ptr->checkpoint) {
812 l_ptr->state = WORKING_WORKING;
813 l_ptr->fsm_msg_cnt = 0;
814 l_ptr->checkpoint = l_ptr->next_in_no;
815 if (tipc_bclink_acks_missing(l_ptr->owner)) {
816 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
818 l_ptr->fsm_msg_cnt++;
820 link_set_timer(l_ptr, cont_intv);
821 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
823 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
825 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
827 l_ptr->fsm_msg_cnt++;
828 link_set_timer(l_ptr, cont_intv / 4);
831 l_ptr->fsm_msg_cnt);
833 l_ptr->name);
834 tipc_link_reset(l_ptr);
835 l_ptr->state = RESET_UNKNOWN;
836 l_ptr->fsm_msg_cnt = 0;
837 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
839 l_ptr->fsm_msg_cnt++;
840 link_set_timer(l_ptr, cont_intv);
854 other = l_ptr->owner->active_links[0];
860 l_ptr->state = WORKING_WORKING;
861 l_ptr->fsm_msg_cnt = 0;
862 link_activate(l_ptr);
863 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
864 l_ptr->fsm_msg_cnt++;
865 link_set_timer(l_ptr, cont_intv);
870 l_ptr->state = RESET_RESET;
871 l_ptr->fsm_msg_cnt = 0;
872 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
873 l_ptr->fsm_msg_cnt++;
874 link_set_timer(l_ptr, cont_intv);
878 l_ptr->started = 1;
882 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
883 l_ptr->fsm_msg_cnt++;
884 link_set_timer(l_ptr, cont_intv);
897 other = l_ptr->owner->active_links[0];
903 l_ptr->state = WORKING_WORKING;
904 l_ptr->fsm_msg_cnt = 0;
905 link_activate(l_ptr);
906 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
907 l_ptr->fsm_msg_cnt++;
908 link_set_timer(l_ptr, cont_intv);
915 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
916 l_ptr->fsm_msg_cnt++;
917 link_set_timer(l_ptr, cont_intv);
918 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
925 err("Unknown link state %u/%u\n", l_ptr->state, event);
934 static int link_bundle_buf(struct link *l_ptr,
951 if (link_max_pkt(l_ptr) < (to_pos + size))
962 l_ptr->stats.sent_bundled++;
966 static void link_add_to_outqueue(struct link *l_ptr,
970 u32 ack = mod(l_ptr->next_in_no - 1);
971 u32 seqno = mod(l_ptr->next_out_no++);
974 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
976 if (l_ptr->first_out) {
977 l_ptr->last_out->next = buf;
978 l_ptr->last_out = buf;
980 l_ptr->first_out = l_ptr->last_out = buf;
981 l_ptr->out_queue_size++;
990 int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
995 u32 queue_size = l_ptr->out_queue_size;
997 u32 queue_limit = l_ptr->queue_limit[imp];
998 u32 max_packet = link_max_pkt(l_ptr);
1006 return link_schedule_port(l_ptr, msg_origport(msg),
1012 warn("Resetting link <%s>, send queue full", l_ptr->name);
1013 tipc_link_reset(l_ptr);
1021 return tipc_link_send_long_buf(l_ptr, buf);
1025 if (queue_size > l_ptr->stats.max_queue_sz)
1026 l_ptr->stats.max_queue_sz = queue_size;
1028 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
1029 !link_congested(l_ptr))) {
1030 link_add_to_outqueue(l_ptr, buf, msg);
1032 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1033 l_ptr->unacked_window = 0;
1035 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1036 l_ptr->stats.bearer_congs++;
1037 l_ptr->next_out = buf;
1048 if (l_ptr->next_out &&
1049 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1050 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1062 TIPC_OK, INT_H_SIZE, l_ptr->addr);
1066 link_bundle_buf(l_ptr, bundler, buf);
1069 l_ptr->stats.sent_bundles++;
1073 if (!l_ptr->next_out)
1074 l_ptr->next_out = buf;
1075 link_add_to_outqueue(l_ptr, buf, msg);
1076 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1088 struct link *l_ptr;
1096 l_ptr = n_ptr->active_links[selector & 1];
1097 if (l_ptr) {
1098 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1099 res = tipc_link_send_buf(l_ptr, buf);
1122 static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1128 if (likely(!link_congested(l_ptr))) {
1129 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1130 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1131 link_add_to_outqueue(l_ptr, buf, msg);
1132 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1133 &l_ptr->media_addr))) {
1134 l_ptr->unacked_window = 0;
1139 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1140 l_ptr->stats.bearer_congs++;
1141 l_ptr->next_out = buf;
1146 *used_max_pkt = link_max_pkt(l_ptr);
1148 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1159 struct link *l_ptr;
1172 l_ptr = n_ptr->active_links[selector];
1174 buf, l_ptr, destnode);
1175 if (likely(l_ptr)) {
1176 res = link_send_buf_fast(l_ptr, buf, &dummy);
1202 struct link *l_ptr;
1221 l_ptr = node->active_links[selector];
1222 if (likely(l_ptr)) {
1224 res = link_send_buf_fast(l_ptr, buf,
1241 if (link_congested(l_ptr) ||
1242 !list_empty(&l_ptr->b_ptr->cong_links)) {
1243 res = link_schedule_port(l_ptr,
1253 sender->max_pkt = link_max_pkt(l_ptr);
1297 struct link *l_ptr;
1411 l_ptr = node->active_links[sender->publ.ref & 1];
1412 if (!l_ptr) {
1416 if (link_max_pkt(l_ptr) < max_pkt) {
1417 sender->max_pkt = link_max_pkt(l_ptr);
1438 l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
1439 if (!l_ptr->next_out)
1440 l_ptr->next_out = buf_chain;
1441 l_ptr->stats.sent_fragmented++;
1446 l_ptr->stats.sent_fragments++;
1447 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1448 link_add_to_outqueue(l_ptr, buf, msg);
1455 tipc_link_push_queue(l_ptr);
1463 u32 tipc_link_push_packet(struct link *l_ptr)
1465 struct sk_buff *buf = l_ptr->first_out;
1466 u32 r_q_size = l_ptr->retransm_queue_size;
1467 u32 r_q_head = l_ptr->retransm_queue_head;
1474 link_last_sent(l_ptr));
1481 l_ptr->retransm_queue_head = r_q_head = first;
1482 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1488 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1489 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1490 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1492 l_ptr->retransm_queue_head = mod(++r_q_head);
1493 l_ptr->retransm_queue_size = --r_q_size;
1494 l_ptr->stats.retransmitted++;
1497 l_ptr->stats.bearer_congs++;
1505 buf = l_ptr->proto_msg_queue;
1507 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1508 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1509 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1511 l_ptr->unacked_window = 0;
1513 l_ptr->proto_msg_queue = NULL;
1517 l_ptr->stats.bearer_congs++;
1524 buf = l_ptr->next_out;
1528 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1530 if (mod(next - first) < l_ptr->queue_limit[0]) {
1531 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1532 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1533 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1537 l_ptr->next_out = buf->next;
1541 l_ptr->stats.bearer_congs++;
1553 void tipc_link_push_queue(struct link *l_ptr)
1557 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1561 res = tipc_link_push_packet(l_ptr);
1565 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1598 static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1602 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1605 if (l_ptr->addr) {
1609 link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
1610 tipc_link_reset(l_ptr);
1623 n_ptr = l_ptr->owner->next;
1639 l_ptr->stale_count = 0;
1643 void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1653 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1655 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1658 dbg_print_link(l_ptr, " ");
1659 l_ptr->retransm_queue_head = msg_seqno(msg);
1660 l_ptr->retransm_queue_size = retransmits;
1668 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1669 if (++l_ptr->stale_count > 100) {
1670 link_retransmit_failure(l_ptr, buf);
1674 l_ptr->last_retransmitted = msg_seqno(msg);
1675 l_ptr->stale_count = 1;
1679 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1681 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1682 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1683 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1687 l_ptr->stats.retransmitted++;
1689 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1690 l_ptr->stats.bearer_congs++;
1691 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1692 l_ptr->retransm_queue_size = retransmits;
1697 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1719 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1724 if (l_ptr->oldest_deferred_in == NULL)
1727 seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1728 if (seq_no == mod(l_ptr->next_in_no)) {
1729 l_ptr->newest_deferred_in->next = buf;
1730 buf = l_ptr->oldest_deferred_in;
1731 l_ptr->oldest_deferred_in = NULL;
1732 l_ptr->deferred_inqueue_sz = 0;
1743 struct link *l_ptr;
1774 l_ptr = n_ptr->links[b_ptr->identity];
1775 if (unlikely(!l_ptr)) {
1787 crs = l_ptr->first_out;
1788 while ((crs != l_ptr->next_out) &&
1797 l_ptr->first_out = crs;
1798 l_ptr->out_queue_size -= released;
1800 if (unlikely(l_ptr->next_out))
1801 tipc_link_push_queue(l_ptr);
1802 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1803 tipc_link_wakeup_ports(l_ptr, 0);
1804 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1805 l_ptr->stats.sent_acks++;
1806 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1810 if (likely(link_working_working(l_ptr))) {
1811 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1812 l_ptr->next_in_no++;
1813 if (unlikely(l_ptr->oldest_deferred_in))
1814 head = link_insert_deferred_queue(l_ptr,
1825 l_ptr->stats.recv_bundles++;
1826 l_ptr->stats.recv_bundled +=
1844 l_ptr->stats.recv_fragments++;
1845 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1847 l_ptr->stats.recv_fragmented++;
1853 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1869 link_handle_out_of_seq_msg(l_ptr, buf);
1870 head = link_insert_deferred_queue(l_ptr, head);
1876 link_recv_proto_msg(l_ptr, buf);
1877 head = link_insert_deferred_queue(l_ptr, head);
1882 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1884 if (link_working_working(l_ptr)) {
1958 static void link_handle_out_of_seq_msg(struct link *l_ptr,
1964 link_recv_proto_msg(l_ptr, buf);
1969 seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
1973 l_ptr->checkpoint--;
1980 if (less(seq_no, mod(l_ptr->next_in_no))) {
1981 l_ptr->stats.duplicates++;
1986 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1987 &l_ptr->newest_deferred_in, buf)) {
1988 l_ptr->deferred_inqueue_sz++;
1989 l_ptr->stats.deferred_recv++;
1990 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1991 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1993 l_ptr->stats.duplicates++;
1999 void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2003 struct tipc_msg *msg = l_ptr->pmsg;
2004 u32 msg_size = sizeof(l_ptr->proto_msg);
2006 if (link_blocked(l_ptr))
2009 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
2010 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
2014 u32 next_sent = mod(l_ptr->next_out_no);
2016 if (!tipc_link_is_up(l_ptr))
2018 if (l_ptr->next_out)
2019 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
2021 if (l_ptr->oldest_deferred_in) {
2022 u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
2023 gap = mod(rec - mod(l_ptr->next_in_no));
2027 l_ptr->stats.sent_nacks++;
2031 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
2034 u32 mtu = l_ptr->max_pkt;
2036 if ((mtu < l_ptr->max_pkt_target) &&
2037 link_working_working(l_ptr) &&
2038 l_ptr->fsm_msg_cnt) {
2039 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2040 if (l_ptr->max_pkt_probes == 10) {
2041 l_ptr->max_pkt_target = (msg_size - 4);
2042 l_ptr->max_pkt_probes = 0;
2043 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2045 l_ptr->max_pkt_probes++;
2048 l_ptr->stats.sent_probes++;
2050 l_ptr->stats.sent_states++;
2052 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2055 msg_set_link_tolerance(msg, l_ptr->tolerance);
2056 msg_set_linkprio(msg, l_ptr->priority);
2057 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2060 if (tipc_node_has_redundant_links(l_ptr->owner)) {
2065 msg_set_linkprio(msg, l_ptr->priority);
2069 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2073 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2074 if (!l_ptr->proto_msg_queue) {
2075 l_ptr->proto_msg_queue =
2076 buf_acquire(sizeof(l_ptr->proto_msg));
2078 buf = l_ptr->proto_msg_queue;
2081 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2094 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2097 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2098 l_ptr->unacked_window = 0;
2104 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2105 l_ptr->proto_msg_queue = buf;
2106 l_ptr->stats.bearer_congs++;
2115 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2125 if (link_blocked(l_ptr))
2130 l_ptr->checkpoint--;
2132 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2134 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2136 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2141 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2142 if (msg_session(msg) == l_ptr->peer_session) {
2144 msg_session(msg), l_ptr->peer_session);
2152 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2155 (msg_tol > l_ptr->tolerance))
2156 link_set_supervision_props(l_ptr, msg_tol);
2158 if (msg_linkprio(msg) > l_ptr->priority)
2159 l_ptr->priority = msg_linkprio(msg);
2163 if (max_pkt_info < l_ptr->max_pkt_target)
2164 l_ptr->max_pkt_target = max_pkt_info;
2165 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2166 l_ptr->max_pkt = l_ptr->max_pkt_target;
2168 l_ptr->max_pkt = l_ptr->max_pkt_target;
2170 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2172 link_state_event(l_ptr, msg_type(msg));
2174 l_ptr->peer_session = msg_session(msg);
2175 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2178 if (!tipc_node_has_redundant_links(l_ptr->owner)) {
2179 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2185 link_set_supervision_props(l_ptr, msg_tol);
2188 (msg_linkprio(msg) != l_ptr->priority)) {
2190 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2191 l_ptr->priority = msg_linkprio(msg);
2192 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2195 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2196 l_ptr->stats.recv_states++;
2197 if (link_reset_unknown(l_ptr))
2200 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2202 mod(l_ptr->next_in_no));
2206 if (max_pkt_ack > l_ptr->max_pkt) {
2208 l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2209 l_ptr->max_pkt = max_pkt_ack;
2210 l_ptr->max_pkt_probes = 0;
2215 l_ptr->stats.recv_probes++;
2216 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2223 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2226 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2231 l_ptr->stats.recv_nacks++;
2232 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2248 void tipc_link_tunnel(struct link *l_ptr,
2257 tunnel = l_ptr->owner->active_links[selector & 1];
2272 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2284 void tipc_link_changeover(struct link *l_ptr)
2286 u32 msgcount = l_ptr->out_queue_size;
2287 struct sk_buff *crs = l_ptr->first_out;
2288 struct link *tunnel = l_ptr->owner->active_links[0];
2295 if (!l_ptr->owner->permit_changeover) {
2302 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2303 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2307 if (!l_ptr->first_out) {
2314 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2325 split_bundles = (l_ptr->owner->active_links[0] !=
2326 l_ptr->owner->active_links[1]);
2338 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2344 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2351 void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2357 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2358 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2359 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2360 iter = l_ptr->first_out;
2368 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2369 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2380 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2384 if (!tipc_link_is_up(l_ptr))
2418 static int link_recv_changeover_msg(struct link **l_ptr,
2428 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2433 if (dest_link == *l_ptr) {
2435 (*l_ptr)->name);
2439 (*l_ptr)->b_ptr->net_plane);
2440 *l_ptr = dest_link;
2539 int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2547 u32 pack_sz = link_max_pkt(l_ptr);
2553 destaddr = l_ptr->addr;
2563 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2565 l_ptr->stats.sent_fragmented++;
2588 l_ptr->stats.sent_fragments++;
2589 tipc_link_send_buf(l_ptr, fragm);
2590 if (!tipc_link_is_up(l_ptr))
2731 * @l_ptr: pointer to link
2734 static void link_check_defragm_bufs(struct link *l_ptr)
2738 struct sk_buff *buf = l_ptr->defragm_buf;
2742 if (!link_working_working(l_ptr))
2754 dbg_print_link(l_ptr, "curr:");
2756 dbg_print_buf_chain(l_ptr->defragm_buf);
2760 l_ptr->defragm_buf = buf->next;
2769 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2771 l_ptr->tolerance = tolerance;
2772 l_ptr->continuity_interval =
2774 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2778 void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2781 l_ptr->queue_limit[DATA_LOW] = window;
2782 l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
2783 l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
2784 l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
2786 l_ptr->queue_limit[DATA_LOW + 4] = 300;
2787 l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
2788 l_ptr->queue_limit[DATA_HIGH + 4] = 900;
2789 l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
2790 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2791 l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2792 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2793 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2795 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2813 struct link *l_ptr;
2826 l_ptr = (*node)->links[b_ptr->identity];
2827 if (!l_ptr || strcmp(l_ptr->name, name))
2830 return l_ptr;
2838 struct link *l_ptr;
2857 l_ptr = link_find_link(args->name, &node);
2858 if (!l_ptr) {
2869 link_set_supervision_props(l_ptr, new_value);
2870 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2878 l_ptr->priority = new_value;
2879 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2887 tipc_link_set_queue_limits(l_ptr, new_value);
2903 * @l_ptr: pointer to link
2906 static void link_reset_statistics(struct link *l_ptr)
2908 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2909 l_ptr->stats.sent_info = l_ptr->next_out_no;
2910 l_ptr->stats.recv_info = l_ptr->next_in_no;
2916 struct link *l_ptr;
2930 l_ptr = link_find_link(link_name, &node);
2931 if (!l_ptr) {
2937 link_reset_statistics(l_ptr);
2964 struct link *l_ptr;
2975 l_ptr = link_find_link(name, &node);
2976 if (!l_ptr) {
2982 if (tipc_link_is_active(l_ptr))
2984 else if (tipc_link_is_up(l_ptr))
2991 l_ptr->name, status, link_max_pkt(l_ptr),
2992 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2994 l_ptr->next_in_no - l_ptr->stats.recv_info,
2995 l_ptr->stats.recv_fragments,
2996 l_ptr->stats.recv_fragmented,
2997 l_ptr->stats.recv_bundles,
2998 l_ptr->stats.recv_bundled);
3000 l_ptr->next_out_no - l_ptr->stats.sent_info,
3001 l_ptr->stats.sent_fragments,
3002 l_ptr->stats.sent_fragmented,
3003 l_ptr->stats.sent_bundles,
3004 l_ptr->stats.sent_bundled);
3005 profile_total = l_ptr->stats.msg_length_counts;
3011 l_ptr->stats.msg_length_counts,
3012 l_ptr->stats.msg_lengths_total / profile_total,
3013 percent(l_ptr->stats.msg_length_profile[0], profile_total),
3014 percent(l_ptr->stats.msg_length_profile[1], profile_total),
3015 percent(l_ptr->stats.msg_length_profile[2], profile_total),
3016 percent(l_ptr->stats.msg_length_profile[3], profile_total),
3017 percent(l_ptr->stats.msg_length_profile[4], profile_total),
3018 percent(l_ptr->stats.msg_length_profile[5], profile_total),
3019 percent(l_ptr->stats.msg_length_profile[6], profile_total));
3021 l_ptr->stats.recv_states,
3022 l_ptr->stats.recv_probes,
3023 l_ptr->stats.recv_nacks,
3024 l_ptr->stats.deferred_recv,
3025 l_ptr->stats.duplicates);
3027 l_ptr->stats.sent_states,
3028 l_ptr->stats.sent_probes,
3029 l_ptr->stats.sent_nacks,
3030 l_ptr->stats.sent_acks,
3031 l_ptr->stats.retransmitted);
3033 l_ptr->stats.bearer_congs,
3034 l_ptr->stats.link_congs,
3035 l_ptr->stats.max_queue_sz,
3036 l_ptr->stats.queue_sz_counts
3037 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
3087 struct link *l_ptr;
3097 l_ptr = n_ptr->active_links[selector & 1];
3098 if (l_ptr)
3099 res = link_max_pkt(l_ptr);
3107 static void link_dump_send_queue(struct link *l_ptr)
3109 if (l_ptr->next_out) {
3111 dbg_print_buf_chain(l_ptr->next_out);
3114 if (l_ptr->first_out) {
3115 dbg_print_buf_chain(l_ptr->first_out);
3120 static void link_print(struct link *l_ptr, struct print_buf *buf,
3124 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3127 l_ptr->addr, l_ptr->b_ptr->publ.name);
3128 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3129 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3131 if (l_ptr->first_out) {
3132 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3133 if (l_ptr->next_out)
3135 msg_seqno(buf_msg(l_ptr->next_out)));
3138 (l_ptr->last_out)), l_ptr->out_queue_size);
3139 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3140 msg_seqno(buf_msg(l_ptr->first_out)))
3141 != (l_ptr->out_queue_size - 1))
3142 || (l_ptr->last_out->next != 0)) {
3144 tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3145 tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
3146 tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
3147 link_dump_send_queue(l_ptr);
3151 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3152 if (l_ptr->oldest_deferred_in) {
3153 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3154 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3156 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3158 l_ptr->deferred_inqueue_sz);
3161 if (link_working_unknown(l_ptr))
3163 if (link_reset_reset(l_ptr))
3165 if (link_reset_unknown(l_ptr))
3167 if (link_working_working(l_ptr))