Lines Matching refs:sk

22 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
25 __skb_queue_tail(&sk->sk_receive_queue, skb);
26 skb_set_owner_r(skb, sk);
27 sk->sk_data_ready(sk);
30 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
38 sk->sk_shutdown = SHUTDOWN_MASK;
39 sock_set_flag(sk, SOCK_DONE);
40 dccp_enqueue_skb(sk, skb);
43 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
47 switch (sk->sk_state) {
65 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
70 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
71 dccp_done(sk);
77 dccp_fin(sk, skb);
78 dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
84 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
89 static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
99 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
100 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
105 switch (sk->sk_state) {
107 dccp_send_close(sk, 0);
108 dccp_set_state(sk, DCCP_CLOSING);
114 dccp_fin(sk, skb);
115 dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
118 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
145 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
149 sk->sk_err = err;
152 dccp_fin(sk, skb);
154 if (err && !sock_flag(sk, SOCK_DEAD))
155 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
156 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
159 static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
161 struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
170 static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
172 const struct dccp_sock *dp = dccp_sk(sk);
175 if (!(sk->sk_shutdown & RCV_SHUTDOWN))
176 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
181 if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
182 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
185 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
188 struct dccp_sock *dp = dccp_sk(sk);
208 dccp_update_gsr(sk, seqno);
237 dccp_update_gsr(sk, seqno);
276 dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
283 static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
286 struct dccp_sock *dp = dccp_sk(sk);
296 dccp_enqueue_skb(sk, skb);
309 dccp_rcv_reset(sk, skb);
312 if (dccp_rcv_closereq(sk, skb))
316 if (dccp_rcv_close(sk, skb))
341 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
346 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
364 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
367 if (dccp_check_seqno(sk, skb))
370 if (dccp_parse_options(sk, NULL, skb))
373 dccp_handle_ackvec_processing(sk, skb);
374 dccp_deliver_input_to_ccids(sk, skb);
376 return __dccp_rcv_established(sk, skb, dh, len);
384 static int dccp_rcv_request_sent_state_process(struct sock *sk,
401 const struct inet_connection_sock *icsk = inet_csk(sk);
402 struct dccp_sock *dp = dccp_sk(sk);
420 if (dccp_parse_options(sk, NULL, skb))
425 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
429 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
430 WARN_ON(sk->sk_send_head == NULL);
431 kfree_skb(sk->sk_send_head);
432 sk->sk_send_head = NULL;
443 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
460 dccp_set_state(sk, DCCP_PARTOPEN);
468 if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
472 icsk->icsk_af_ops->rebuild_header(sk);
474 if (!sock_flag(sk, SOCK_DEAD)) {
475 sk->sk_state_change(sk);
476 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
479 if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) ||
497 dccp_send_ack(sk);
512 dccp_set_state(sk, DCCP_CLOSED);
513 sk->sk_err = ECOMM;
517 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
522 struct dccp_sock *dp = dccp_sk(sk);
528 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
531 if (sk->sk_state == DCCP_RESPOND)
546 if (sk->sk_state == DCCP_PARTOPEN)
547 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
553 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
557 dccp_set_state(sk, DCCP_OPEN);
561 __dccp_rcv_established(sk, skb, dh, len);
571 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
574 struct dccp_sock *dp = dccp_sk(sk);
576 const int old_state = sk->sk_state;
602 if (sk->sk_state == DCCP_LISTEN) {
609 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
623 } else if (sk->sk_state == DCCP_CLOSED) {
629 if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
644 (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
645 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
650 if (dccp_parse_options(sk, NULL, skb))
662 dccp_rcv_reset(sk, skb);
665 if (dccp_rcv_closereq(sk, skb))
669 if (dccp_rcv_close(sk, skb))
674 switch (sk->sk_state) {
676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
685 dccp_handle_ackvec_processing(sk, skb);
686 dccp_deliver_input_to_ccids(sk, skb);
689 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
698 sk->sk_state_change(sk);
699 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
703 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
718 * @sk: socket structure
724 u32 dccp_sample_rtt(struct sock *sk, long delta)
727 delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;