1/* 2 * net/dccp/timer.c 3 * 4 * An implementation of the DCCP protocol 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#include <linux/dccp.h> 14#include <linux/skbuff.h> 15 16#include "dccp.h" 17 18/* sysctl variables governing numbers of retransmission attempts */ 19int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; 20int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; 21int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; 22 23static void dccp_write_err(struct sock *sk) 24{ 25 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 26 sk->sk_error_report(sk); 27 28 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); 29 dccp_done(sk); 30 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); 31} 32 33/* A write timeout has occurred. Process the after effects. */ 34static int dccp_write_timeout(struct sock *sk) 35{ 36 const struct inet_connection_sock *icsk = inet_csk(sk); 37 int retry_until; 38 39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { 40 if (icsk->icsk_retransmits != 0) 41 dst_negative_advice(sk); 42 retry_until = icsk->icsk_syn_retries ? 43 : sysctl_dccp_request_retries; 44 } else { 45 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { 46 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu 47 black hole detection. :-( 48 49 It is place to make it. It is not made. I do not want 50 to make it. It is disguisting. It does not work in any 51 case. Let me to cite the same draft, which requires for 52 us to implement this: 53 54 "The one security concern raised by this memo is that ICMP black holes 55 are often caused by over-zealous security administrators who block 56 all ICMP messages. It is vitally important that those who design and 57 deploy security systems understand the impact of strict filtering on 58 upper-layer protocols. The safest web site in the world is worthless 59 if most TCP implementations cannot transfer data from it. It would 60 be far nicer to have all of the black holes fixed rather than fixing 61 all of the TCP implementations." 62 63 Golden words :-). 64 */ 65 66 dst_negative_advice(sk); 67 } 68 69 retry_until = sysctl_dccp_retries2; 70 } 71 72 if (icsk->icsk_retransmits >= retry_until) { 73 /* Has it gone just too far? */ 74 dccp_write_err(sk); 75 return 1; 76 } 77 return 0; 78} 79 80/* 81 * The DCCP retransmit timer. 82 */ 83static void dccp_retransmit_timer(struct sock *sk) 84{ 85 struct inet_connection_sock *icsk = inet_csk(sk); 86 87 /* 88 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was 89 * sent, no need to retransmit, this sock is dead. 90 */ 91 if (dccp_write_timeout(sk)) 92 return; 93 94 /* 95 * We want to know the number of packets retransmitted, not the 96 * total number of retransmissions of clones of original packets. 97 */ 98 if (icsk->icsk_retransmits == 0) 99 DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); 100 101 if (dccp_retransmit_skb(sk) != 0) { 102 /* 103 * Retransmission failed because of local congestion, 104 * do not backoff. 105 */ 106 if (--icsk->icsk_retransmits == 0) 107 icsk->icsk_retransmits = 1; 108 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 109 min(icsk->icsk_rto, 110 TCP_RESOURCE_PROBE_INTERVAL), 111 DCCP_RTO_MAX); 112 return; 113 } 114 115 icsk->icsk_backoff++; 116 117 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); 118 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, 119 DCCP_RTO_MAX); 120 if (icsk->icsk_retransmits > sysctl_dccp_retries1) 121 __sk_dst_reset(sk); 122} 123 124static void dccp_write_timer(unsigned long data) 125{ 126 struct sock *sk = (struct sock *)data; 127 struct inet_connection_sock *icsk = inet_csk(sk); 128 int event = 0; 129 130 bh_lock_sock(sk); 131 if (sock_owned_by_user(sk)) { 132 /* Try again later */ 133 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, 134 jiffies + (HZ / 20)); 135 goto out; 136 } 137 138 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) 139 goto out; 140 141 if (time_after(icsk->icsk_timeout, jiffies)) { 142 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, 143 icsk->icsk_timeout); 144 goto out; 145 } 146 147 event = icsk->icsk_pending; 148 icsk->icsk_pending = 0; 149 150 switch (event) { 151 case ICSK_TIME_RETRANS: 152 dccp_retransmit_timer(sk); 153 break; 154 } 155out: 156 bh_unlock_sock(sk); 157 sock_put(sk); 158} 159 160/* 161 * Timer for listening sockets 162 */ 163static void dccp_response_timer(struct sock *sk) 164{ 165 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT, 166 DCCP_RTO_MAX); 167} 168 169static void dccp_keepalive_timer(unsigned long data) 170{ 171 struct sock *sk = (struct sock *)data; 172 173 /* Only process if socket is not in use. */ 174 bh_lock_sock(sk); 175 if (sock_owned_by_user(sk)) { 176 /* Try again later. */ 177 inet_csk_reset_keepalive_timer(sk, HZ / 20); 178 goto out; 179 } 180 181 if (sk->sk_state == DCCP_LISTEN) { 182 dccp_response_timer(sk); 183 goto out; 184 } 185out: 186 bh_unlock_sock(sk); 187 sock_put(sk); 188} 189 190/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ 191static void dccp_delack_timer(unsigned long data) 192{ 193 struct sock *sk = (struct sock *)data; 194 struct inet_connection_sock *icsk = inet_csk(sk); 195 196 bh_lock_sock(sk); 197 if (sock_owned_by_user(sk)) { 198 /* Try again later. */ 199 icsk->icsk_ack.blocked = 1; 200 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); 201 sk_reset_timer(sk, &icsk->icsk_delack_timer, 202 jiffies + TCP_DELACK_MIN); 203 goto out; 204 } 205 206 if (sk->sk_state == DCCP_CLOSED || 207 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 208 goto out; 209 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 210 sk_reset_timer(sk, &icsk->icsk_delack_timer, 211 icsk->icsk_ack.timeout); 212 goto out; 213 } 214 215 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; 216 217 if (inet_csk_ack_scheduled(sk)) { 218 if (!icsk->icsk_ack.pingpong) { 219 /* Delayed ACK missed: inflate ATO. */ 220 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, 221 icsk->icsk_rto); 222 } else { 223 /* Delayed ACK missed: leave pingpong mode and 224 * deflate ATO. 225 */ 226 icsk->icsk_ack.pingpong = 0; 227 icsk->icsk_ack.ato = TCP_ATO_MIN; 228 } 229 dccp_send_ack(sk); 230 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); 231 } 232out: 233 bh_unlock_sock(sk); 234 sock_put(sk); 235} 236 237/* Transmit-delay timer: used by the CCIDs to delay actual send time */ 238static void dccp_write_xmit_timer(unsigned long data) 239{ 240 struct sock *sk = (struct sock *)data; 241 struct dccp_sock *dp = dccp_sk(sk); 242 243 bh_lock_sock(sk); 244 if (sock_owned_by_user(sk)) 245 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); 246 else 247 dccp_write_xmit(sk, 0); 248 bh_unlock_sock(sk); 249 sock_put(sk); 250} 251 252static void dccp_init_write_xmit_timer(struct sock *sk) 253{ 254 struct dccp_sock *dp = dccp_sk(sk); 255 256 setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 257 (unsigned long)sk); 258} 259 260void dccp_init_xmit_timers(struct sock *sk) 261{ 262 dccp_init_write_xmit_timer(sk); 263 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, 264 &dccp_keepalive_timer); 265} 266 267static ktime_t dccp_timestamp_seed; 268/** 269 * dccp_timestamp - 10s of microseconds time source 270 * Returns the number of 10s of microseconds since loading DCCP. This is native 271 * DCCP time difference format (RFC 4340, sec. 13). 272 * Please note: This will wrap around about circa every 11.9 hours. 273 */ 274u32 dccp_timestamp(void) 275{ 276 s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); 277 278 do_div(delta, 10); 279 return delta; 280} 281EXPORT_SYMBOL_GPL(dccp_timestamp); 282 283void __init dccp_timestamping_init(void) 284{ 285 dccp_timestamp_seed = ktime_get_real(); 286} 287