Lines Matching refs:tp

246 static void tcp_remove_timer(struct tcpcb *tp);
248 static uint32_t tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index);
249 static void tcp_sched_timers(struct tcpcb *tp);
264 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
267 void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
268 void add_to_time_wait(struct tcpcb *tp, uint32_t delay) ;
272 void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
281 LIST_REMOVE(tp->t_inpcb, inp_list);
283 /* if (tp->t_timer[TCPT_2MSL] <= 0)
284 tp->t_timer[TCPT_2MSL] = 1; */
293 tp->t_rcvtime = (tp->t_rcvtime / TCP_RETRANSHZ) * PR_SLOWHZ;
295 tp->t_rcvtime += timer & (N_TIME_WAIT_SLOTS - 1);
301 LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list);
304 void add_to_time_wait(struct tcpcb *tp, uint32_t delay)
309 tcp_unlock(tp->t_inpcb->inp_socket, 0, 0);
311 tcp_lock(tp->t_inpcb->inp_socket, 0, 0);
313 add_to_time_wait_locked(tp, delay);
321 struct tcpcb *tp;
324 tp = intotcpcb(inp);
353 ((tp != NULL) && (tp->t_state == TCPS_CLOSED) && (so->so_head != NULL)
387 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
391 /* If this tp still happens to be on the timer list,
394 if (TIMER_IS_ON_LIST(tp)) {
395 tcp_remove_timer(tp);
416 struct tcpcb *tp;
469 tp = intotcpcb(inp);
470 if (tp == NULL) /* tp already closed, remove from list */
473 if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
474 tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
475 tp->t_rcvtime += N_TIME_WAIT_SLOTS;
478 tp->t_timer[TCPT_2MSL] = 0;
480 if (tp->t_timer[TCPT_2MSL] == 0) {
483 tcp_free_sackholes(tp);
484 tp = tcp_close(tp);
508 * Cancel all timers for TCP tp.
511 tcp_canceltimers(tp)
512 struct tcpcb *tp;
516 tcp_remove_timer(tp);
518 tp->t_timer[i] = 0;
519 tp->tentry.timer_start = tcp_now;
520 tp->tentry.index = TCPT_NONE;
535 tcp_timers(tp, timer)
536 register struct tcpcb *tp;
550 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
553 so = tp->t_inpcb->inp_socket;
554 idle_time = tcp_now - tp->t_rcvtime;
567 tcp_free_sackholes(tp);
568 if (tp->t_state != TCPS_TIME_WAIT &&
569 tp->t_state != TCPS_FIN_WAIT_2 &&
571 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, (u_int32_t)tcp_keepintvl);
574 tp = tcp_close(tp);
575 return(tp);
592 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
593 (tp->rxt_conndroptime > 0 && tp->rxt_start > 0 &&
594 (tcp_now - tp->rxt_start) >= tp->rxt_conndroptime) ||
595 ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
596 (tp->t_flags & TF_SENTFIN) != 0 &&
597 tp->t_rxtshift >= 4)) {
599 if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
604 tp->t_rxtshift = TCP_MAXRXTSHIFT;
608 tp = tcp_drop(tp, tp->t_softerror ?
609 tp->t_softerror : ETIMEDOUT);
614 if (tp->t_rxtshift == 1) {
624 tp->snd_cwnd_prev = tp->snd_cwnd;
625 tp->snd_ssthresh_prev = tp->snd_ssthresh;
626 tp->snd_recover_prev = tp->snd_recover;
627 if (IN_FASTRECOVERY(tp))
628 tp->t_flags |= TF_WASFRECOVERY;
630 tp->t_flags &= ~TF_WASFRECOVERY;
631 tp->t_badrxtwin = tcp_now + (tp->t_srtt >> (TCP_RTT_SHIFT));
636 tp->rxt_start = tcp_now;
640 if (tp->t_state == TCPS_SYN_SENT)
641 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
643 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
644 TCPT_RANGESET(tp->t_rxtcur, rexmt,
645 tp->t_rttmin, TCPTV_REXMTMAX,
646 TCP_ADD_REXMTSLOP(tp));
647 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
649 if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb))
652 tcp_free_sackholes(tp);
657 if (tcp_pmtud_black_hole_detect && (tp->t_state == TCPS_ESTABLISHED)) {
658 if (((tp->t_flags & (TF_PMTUD|TF_MAXSEGSNT)) == (TF_PMTUD|TF_MAXSEGSNT)) &&
659 (tp->t_rxtshift == 2)) {
666 tp->t_flags &= ~TF_PMTUD; /* Disable Path MTU Discovery for now */
667 tp->t_flags |= TF_BLACKHOLE; /* Record that we may have found a black hole */
668 optlen = tp->t_maxopd - tp->t_maxseg;
669 tp->t_pmtud_saved_maxopd = tp->t_maxopd; /* Keep track of previous MSS */
670 if (tp->t_maxopd > tcp_pmtud_black_hole_mss)
671 tp->t_maxopd = tcp_pmtud_black_hole_mss; /* Reduce the MSS to intermediary value */
673 tp->t_maxopd = /* use the default MSS */
679 tp->t_maxseg = tp->t_maxopd - optlen;
684 if (CC_ALGO(tp)->cwnd_init != NULL)
685 CC_ALGO(tp)->cwnd_init(tp);
694 if ((tp->t_flags & TF_BLACKHOLE) && (tp->t_rxtshift > 4)) {
695 tp->t_flags |= TF_PMTUD;
696 tp->t_flags &= ~TF_BLACKHOLE;
697 optlen = tp->t_maxopd - tp->t_maxseg;
698 tp->t_maxopd = tp->t_pmtud_saved_maxopd;
699 tp->t_maxseg = tp->t_maxopd - optlen;
703 if (CC_ALGO(tp)->cwnd_init != NULL)
704 CC_ALGO(tp)->cwnd_init(tp);
717 if ((tp->t_state == TCPS_SYN_SENT) &&
718 (tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres))
719 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
729 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
732 in6_losing(tp->t_inpcb);
735 in_losing(tp->t_inpcb);
736 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
737 tp->t_srtt = 0;
739 tp->snd_nxt = tp->snd_una;
744 tp->snd_recover = tp->snd_max;
748 tp->t_flags |= TF_ACKNOW;
752 tp->t_rtttime = 0;
754 if (CC_ALGO(tp)->after_timeout != NULL)
755 CC_ALGO(tp)->after_timeout(tp);
757 tp->t_dupacks = 0;
758 EXIT_FASTRECOVERY(tp);
763 if ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON) {
764 tp->ecn_flags |= TE_SENDCWR;
767 DTRACE_TCP5(cc, void, NULL, struct inpcb *, tp->t_inpcb,
768 struct tcpcb *, tp, struct tcphdr *, NULL,
771 (void) tcp_output(tp);
791 if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
793 idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
794 ((tp->t_persist_stop != 0) &&
795 TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
800 tp = tcp_drop(tp, ETIMEDOUT);
803 tcp_setpersist(tp);
804 tp->t_force = 1;
805 (void) tcp_output(tp);
806 tp->t_force = 0;
815 if (tp->t_state < TCPS_ESTABLISHED)
818 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
819 (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
820 if (idle_time >= TCP_KEEPIDLE(tp) + (u_int32_t)tcp_maxidle)
828 * Using sequence number tp->snd_una-1
835 t_template = tcp_maketemplate(tp);
839 if (tp->t_inpcb->inp_flags & INP_BOUND_IF)
840 ifscope = tp->t_inpcb->inp_boundifp->if_index;
848 if (tp->t_inpcb->inp_flags & INP_NO_IFT_CELLULAR)
851 tcp_respond(tp, t_template->tt_ipgen,
853 tp->rcv_nxt, tp->snd_una - 1, 0, ifscope,
857 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, tcp_keepintvl);
859 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_KEEPIDLE(tp));
862 if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
863 tp->t_flags &= ~TF_DELACK;
864 tp->t_timer[TCPT_DELACK] = 0;
865 tp->t_flags |= TF_ACKNOW;
870 if ((tp->t_flags & TF_STRETCHACK) != 0)
871 tcp_reset_stretch_ack(tp);
878 CLEAR_IAJ_STATE(tp);
881 (void) tcp_output(tp);
886 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
887 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
895 tp = tcp_drop(tp, ETIMEDOUT);
898 return (tp);
903 tcp_remove_timer(struct tcpcb *tp)
907 lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
908 if (!(TIMER_IS_ON_LIST(tp))) {
914 if (!(TIMER_IS_ON_LIST(tp))) {
919 if (listp->next_te != NULL && listp->next_te == &tp->tentry)
920 listp->next_te = LIST_NEXT(&tp->tentry, le);
922 LIST_REMOVE(&tp->tentry, le);
923 tp->t_flags &= ~(TF_TIMER_ONLIST);
927 tp->tentry.le.le_next = NULL;
928 tp->tentry.le.le_prev = NULL;
993 tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index) {
1002 VERIFY(tp != NULL);
1005 tcp_lock(tp->t_inpcb->inp_socket, 1, 0);
1007 so = tp->t_inpcb->inp_socket;
1009 if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1) == WNT_STOPUSING) {
1010 if (TIMER_IS_ON_LIST(tp)) {
1011 tcp_remove_timer(tp);
1024 index = tp->tentry.index;
1025 timer_val = tp->t_timer[index];
1027 if (index == TCPT_NONE || tp->tentry.runtime == 0)
1030 diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
1032 if (tp->tentry.index != TCPT_NONE) {
1034 *(next_index) = tp->tentry.index;
1039 tp->t_timer[index] = 0;
1041 tp = tcp_timers(tp, index);
1042 if (tp == NULL)
1050 if (tp->t_timer[i] != 0) {
1051 diff = timer_diff(tp->tentry.timer_start, tp->t_timer[i], tcp_now, 0);
1053 tp->t_timer[i] = 0;
1057 tp->t_timer[i] = diff;
1067 tp->tentry.timer_start = tcp_now;
1068 tp->tentry.index = lo_index;
1070 tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
1072 tp->tentry.runtime = 0;
1079 tp->t_timer[i] = 0;
1080 tp = tcp_timers(tp, i);
1081 if (tp == NULL)
1085 tcp_set_lotimer_index(tp);
1088 if (tp->tentry.index < TCPT_NONE) {
1089 offset = tp->t_timer[tp->tentry.index];
1090 *(next_index) = tp->tentry.index;
1094 if (tp != NULL && tp->tentry.index == TCPT_NONE) {
1095 tcp_remove_timer(tp);
1108 struct tcpcb *tp;
1133 tp = TIMERENTRY_TO_TP(te);
1138 if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
1143 if (TIMER_IS_ON_LIST(tp)) {
1144 tp->t_flags &= ~(TF_TIMER_ONLIST);
1145 LIST_REMOVE(&tp->tentry, le);
1148 tp->tentry.le.le_next = NULL;
1149 tp->tentry.le.le_prev = NULL;
1160 VERIFY_NEXT_LINK(&tp->tentry, le);
1161 VERIFY_PREV_LINK(&tp->tentry, le);
1166 offset = tcp_run_conn_timer(tp, &index);
1233 tcp_sched_timers(struct tcpcb *tp)
1235 struct tcptimerentry *te = &tp->tentry;
1242 if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
1244 if (TIMER_IS_ON_LIST(tp)) {
1245 tcp_remove_timer(tp);
1251 tcp_remove_timer(tp);
1264 if (!TIMER_IS_ON_LIST(tp)) {
1271 tp->t_flags |= TF_TIMER_ONLIST;
1331 tcp_set_lotimer_index(struct tcpcb *tp) {
1335 if (tp->t_timer[i] != 0 &&
1336 (lo_timer == 0 || tp->t_timer[i] < lo_timer)) {
1337 lo_timer = tp->t_timer[i];
1341 tp->tentry.index = lo_index;
1343 tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
1345 tp->tentry.runtime = 0;
1350 tcp_check_timer_state(struct tcpcb *tp) {
1352 lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1354 tcp_set_lotimer_index(tp);
1356 tcp_sched_timers(tp);