• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/ipv4/

Lines Matching refs:mss_now

927 				 unsigned int mss_now)
929 if (skb->len <= mss_now || !sk_can_gso(sk) ||
938 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
939 skb_shinfo(skb)->gso_size = mss_now;
995 unsigned int mss_now)
1061 tcp_set_skb_tso_segs(sk, skb, mss_now);
1062 tcp_set_skb_tso_segs(sk, buff, mss_now);
1147 int mss_now;
1152 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1155 if (mss_now > tp->rx_opt.mss_clamp)
1156 mss_now = tp->rx_opt.mss_clamp;
1159 mss_now -= icsk->icsk_ext_hdr_len;
1162 if (mss_now < 48)
1163 mss_now = 48;
1166 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
1168 return mss_now;
1226 int mss_now;
1231 mss_now = tcp_mtu_to_mss(sk, pmtu);
1232 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1237 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1238 tp->mss_cache = mss_now;
1240 return mss_now;
1251 u32 mss_now;
1256 mss_now = tp->mss_cache;
1261 mss_now = tcp_sync_mss(sk, mtu);
1269 * we have to adjust mss_now correspondingly */
1272 mss_now -= delta;
1275 return mss_now;
1300 * mss_now != mss_cache, we will request caller to create a small skb
1311 unsigned int mss_now, unsigned int cwnd)
1317 cwnd_len = mss_now * cwnd;
1327 return needed - needed % mss_now;
1355 unsigned int mss_now)
1359 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1360 tcp_set_skb_tso_segs(sk, skb, mss_now);
1382 unsigned mss_now, int nonagle)
1384 return (skb->len < mss_now &&
1471 unsigned int mss_now, gfp_t gfp)
1479 return tcp_fragment(sk, skb, len, mss_now);
1507 tcp_set_skb_tso_segs(sk, skb, mss_now);
1508 tcp_set_skb_tso_segs(sk, buff, mss_now);
1607 int mss_now;
1621 mss_now = tcp_current_mss(sk);
1689 tcp_set_skb_tso_segs(sk, skb, mss_now);
1731 static int BCMFASTPATH_HOST tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1755 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1762 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1766 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1775 limit = mss_now;
1777 limit = tcp_mss_split_point(sk, skb, mss_now,
1781 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1794 tcp_minshall_update(tp, mss_now, skb);
1829 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1833 BUG_ON(!skb || skb->len < mss_now);
1835 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2307 int mss_now;
2313 mss_now = tcp_current_mss(sk);
2336 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);