• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/net/ipv4/

Lines Matching refs:mss_now

577 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
579 if (skb->len <= mss_now || !sk_can_gso(sk)) {
589 factor = skb->len + (mss_now - 1);
590 factor /= mss_now;
592 skb_shinfo(skb)->gso_size = mss_now;
602 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
668 tcp_set_skb_tso_segs(sk, skb, mss_now);
669 tcp_set_skb_tso_segs(sk, buff, mss_now);
777 int mss_now;
782 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
785 if (mss_now > tp->rx_opt.mss_clamp)
786 mss_now = tp->rx_opt.mss_clamp;
789 mss_now -= icsk->icsk_ext_hdr_len;
792 if (mss_now < 48)
793 mss_now = 48;
796 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
798 return mss_now;
855 int mss_now;
860 mss_now = tcp_mtu_to_mss(sk, pmtu);
863 if (tp->max_window && mss_now > (tp->max_window>>1))
864 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
869 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
870 tp->mss_cache = mss_now;
872 return mss_now;
886 u32 mss_now;
890 mss_now = tp->mss_cache;
898 mss_now = tcp_sync_mss(sk, mtu);
902 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
907 mss_now -= TCPOLEN_MD5SIG_ALIGNED;
910 xmit_size_goal = mss_now;
923 xmit_size_goal -= (xmit_size_goal % mss_now);
927 return mss_now;
952 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
957 cwnd_len = mss_now * cwnd;
984 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
990 tcp_skb_mss(skb) != mss_now)) {
991 tcp_set_skb_tso_segs(sk, skb, mss_now);
1013 unsigned mss_now, int nonagle)
1015 return (skb->len < mss_now &&
1103 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1111 return tcp_fragment(sk, skb, len, mss_now);
1138 tcp_set_skb_tso_segs(sk, skb, mss_now);
1139 tcp_set_skb_tso_segs(sk, buff, mss_now);
1227 int mss_now;
1241 mss_now = tcp_current_mss(sk, 0);
1317 tcp_set_skb_tso_segs(sk, skb, mss_now);
1354 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1381 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1388 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1392 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1401 limit = mss_now;
1404 mss_now, cwnd_quota);
1407 unsigned int trim = skb->len % mss_now;
1415 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1428 tcp_minshall_update(tp, mss_now, skb);
1457 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1463 BUG_ON(!skb || skb->len < mss_now);
1465 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1466 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1473 limit = mss_now;
1476 mss_now, cwnd_quota);
1479 unsigned int trim = skb->len % mss_now;
1487 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1618 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
1643 ((skb_size + next_skb_size) > mss_now))
1998 int mss_now;
2004 mss_now = tcp_current_mss(sk, 1);
2033 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);