Searched refs:snd_una (Results 1 - 25 of 50) sorted by last modified time

12

/linux-master/net/ipv4/
H A Dtcp_ao.c773 snd_basis = tcp_sk(sk)->snd_una;
819 sne = tcp_ao_compute_sne(READ_ONCE(ao->snd_sne), READ_ONCE(tp->snd_una),
H A Dtcp.c631 answ = READ_ONCE(tp->write_seq) - tp->snd_una;
3982 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
4663 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una);
H A Dtcp_minisocks.c541 newtp->snd_sml = newtp->snd_una = seq;
H A Dtcp_ipv4.c432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
485 u32 seq, snd_una; local
539 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
541 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
543 !between(seq, snd_una, tp->snd_nxt)) {
2902 READ_ONCE(tp->write_seq) - tp->snd_una,
H A Dtcp_input.c99 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
908 if (after(tp->snd_una, tp->rtt_seq)) {
1229 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1234 * again, D-SACK block must not to go across snd_una (for the same reason as
1257 * start_seq == snd_una is non-sensical (see comments above)
1259 if (after(start_seq, tp->snd_una))
1265 /* ...Then it's D-SACK, and must reside below snd_una completely */
1266 if (after(end_seq, tp->snd_una))
1397 if (!after(end_seq, tp->snd_una))
1596 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
[all...]
H A Dtcp_cubic.c392 if (after(tp->snd_una, ca->end_seq))
H A Dtcp_dctcp.c122 if (!before(tp->snd_una, ca->next_seq)) {
H A Dtcp_diag.c31 r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
H A Dtcp_timer.c553 inet->inet_num, tp->snd_una, tp->snd_nxt,
561 inet->inet_num, tp->snd_una, tp->snd_nxt,
H A Dtcp_output.c415 return tp->snd_una != tp->snd_up;
1386 /* The urg_mode check is necessary during a below snd_una win probe */
1917 if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
1957 return after(tp->snd_sml, tp->snd_una) &&
3297 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3303 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3307 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3323 if (TCP_SKB_CB(skb)->seq != tp->snd_una)
3888 tp->snd_una = tp->write_seq;
4269 tcp_init_nondata_skb(skb, tp->snd_una
[all...]
/linux-master/net/mptcp/
H A Dsubflow.c475 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
H A Dsockopt.c930 info->mptcpi_snd_una = msk->snd_una;
H A Dprotocol.c419 msk->write_seq == READ_ONCE(msk->snd_una);
994 u64 snd_una; local
996 snd_una = msk->snd_una;
998 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1013 if (dfrag && after64(snd_una, dfrag->data_seq)) {
1014 u64 delta = snd_una - dfrag->data_seq;
1034 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1038 if (snd_una == msk->snd_nxt && snd_una
1293 u64 snd_una = READ_ONCE(msk->snd_una); local
[all...]
H A Dprotocol.h283 u64 snd_una; member in struct:mptcp_sock
423 if (msk->snd_una == msk->snd_nxt)
H A Doptions.c1033 msk->bytes_acked += new_snd_una - msk->snd_una;
1034 WRITE_ONCE(msk->snd_una, new_snd_una);
1051 old_snd_una = msk->snd_una;
1131 /* on fallback we just need to ignore the msk-level snd_una, as
/linux-master/net/core/
H A Dsock.c920 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
H A Dfilter.c7122 case offsetof(struct bpf_tcp_sock, snd_una):
7123 BPF_TCP_SOCK_GET_COMMON(snd_una);
10577 case offsetof(struct bpf_sock_ops, snd_una):
10578 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
/linux-master/tools/include/uapi/linux/
H A Dbpf.h6344 __u32 snd_una; /* First byte we want an ack for */ member in struct:bpf_tcp_sock
6371 * sum(delta(snd_una)), or how many bytes
6783 __u32 snd_una; member in struct:bpf_sock_ops
/linux-master/net/ipv6/
H A Dtcp_ipv6.c383 __u32 seq, snd_una; local
433 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
435 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
437 !between(seq, snd_una, tp->snd_nxt)) {
2223 READ_ONCE(tp->write_seq) - tp->snd_una,
/linux-master/include/uapi/linux/
H A Dbpf.h6344 __u32 snd_una; /* First byte we want an ack for */ member in struct:bpf_tcp_sock
6371 * sum(delta(snd_una)), or how many bytes
6783 __u32 snd_una; member in struct:bpf_sock_ops
/linux-master/include/trace/events/
H A Dtcp.h255 __field(__u32, snd_una)
284 __entry->snd_una = tp->snd_una;
296 TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx skbaddr=%p skaddr=%p",
299 __entry->data_len, __entry->snd_nxt, __entry->snd_una,
/linux-master/include/net/
H A Dtcp.h1365 return tp->snd_una + tp->snd_wnd;
2117 return tp->snd_una;
/linux-master/include/linux/
H A Dtcp.h289 u32 snd_una; /* First byte we want an ack for */ member in struct:tcp_sock
328 * sum(delta(snd_una)), or how many bytes
425 u32 undo_marker; /* snd_una upon a new recovery episode. */
/linux-master/drivers/scsi/cxgbi/
H A Dlibcxgbi.c833 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
999 csk->snd_una = snd_nxt - 1;
1024 unsigned int snd_una, int seq_chk)
1027 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1029 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
1064 if (unlikely(before(snd_una, csk->snd_una))) {
1065 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1066 csk, csk->state, csk->flags, csk->tid, snd_una,
1067 csk->snd_una);
1023 cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, unsigned int snd_una, int seq_chk) argument
[all...]
/linux-master/drivers/infiniband/hw/irdma/
H A Dtype.h912 u32 snd_una; member in struct:irdma_tcp_offload_info

Completed in 535 milliseconds

12