1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Generic TIME_WAIT sockets functions
7 *
8 *		From code orinally in TCP
9 */
10
11
12#include <net/inet_hashtables.h>
13#include <net/inet_timewait_sock.h>
14#include <net/ip.h>
15
16/* Must be called with locally disabled BHs. */
17void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo)
18{
19	struct inet_bind_hashbucket *bhead;
20	struct inet_bind_bucket *tb;
21	/* Unlink from established hashes. */
22	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash);
23
24	write_lock(&ehead->lock);
25	if (hlist_unhashed(&tw->tw_node)) {
26		write_unlock(&ehead->lock);
27		return;
28	}
29	__hlist_del(&tw->tw_node);
30	sk_node_init(&tw->tw_node);
31	write_unlock(&ehead->lock);
32
33	/* Disassociate with bind bucket. */
34	bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
35	spin_lock(&bhead->lock);
36	tb = tw->tw_tb;
37	__hlist_del(&tw->tw_bind_node);
38	tw->tw_tb = NULL;
39	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
40	spin_unlock(&bhead->lock);
41#ifdef SOCK_REFCNT_DEBUG
42	if (atomic_read(&tw->tw_refcnt) != 1) {
43		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
44		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
45	}
46#endif
47	inet_twsk_put(tw);
48}
49
50EXPORT_SYMBOL_GPL(__inet_twsk_kill);
51
52/*
53 * Enter the time wait state. This is called with locally disabled BH.
54 * Essentially we whip up a timewait bucket, copy the relevant info into it
55 * from the SK, and mess with hash chains and list linkage.
56 */
57void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
58			   struct inet_hashinfo *hashinfo)
59{
60	const struct inet_sock *inet = inet_sk(sk);
61	const struct inet_connection_sock *icsk = inet_csk(sk);
62	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
63	struct inet_bind_hashbucket *bhead;
64	/* Step 1: Put TW into bind hash. Original socket stays there too.
65	   Note, that any socket with inet->num != 0 MUST be bound in
66	   binding cache, even if it is closed.
67	 */
68	bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
69	spin_lock(&bhead->lock);
70	tw->tw_tb = icsk->icsk_bind_hash;
71	BUG_TRAP(icsk->icsk_bind_hash);
72	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
73	spin_unlock(&bhead->lock);
74
75	write_lock(&ehead->lock);
76
77	/* Step 2: Remove SK from established hash. */
78	if (__sk_del_node_init(sk))
79		sock_prot_dec_use(sk->sk_prot);
80
81	/* Step 3: Hash TW into TIMEWAIT chain. */
82	inet_twsk_add_node(tw, &ehead->twchain);
83	atomic_inc(&tw->tw_refcnt);
84
85	write_unlock(&ehead->lock);
86}
87
88EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
89
90struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
91{
92	struct inet_timewait_sock *tw =
93		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
94				 GFP_ATOMIC);
95	if (tw != NULL) {
96		const struct inet_sock *inet = inet_sk(sk);
97
98		/* Give us an identity. */
99		tw->tw_daddr	    = inet->daddr;
100		tw->tw_rcv_saddr    = inet->rcv_saddr;
101		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
102		tw->tw_num	    = inet->num;
103		tw->tw_state	    = TCP_TIME_WAIT;
104		tw->tw_substate	    = state;
105		tw->tw_sport	    = inet->sport;
106		tw->tw_dport	    = inet->dport;
107		tw->tw_family	    = sk->sk_family;
108		tw->tw_reuse	    = sk->sk_reuse;
109		tw->tw_hash	    = sk->sk_hash;
110		tw->tw_ipv6only	    = 0;
111		tw->tw_prot	    = sk->sk_prot_creator;
112		atomic_set(&tw->tw_refcnt, 1);
113		inet_twsk_dead_node_init(tw);
114		__module_get(tw->tw_prot->owner);
115	}
116
117	return tw;
118}
119
120EXPORT_SYMBOL_GPL(inet_twsk_alloc);
121
122/* Returns non-zero if quota exceeded.  */
123static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
124				    const int slot)
125{
126	struct inet_timewait_sock *tw;
127	struct hlist_node *node;
128	unsigned int killed;
129	int ret;
130
131	/* NOTE: compare this to previous version where lock
132	 * was released after detaching chain. It was racy,
133	 * because tw buckets are scheduled in not serialized context
134	 * in 2.3 (with netfilter), and with softnet it is common, because
135	 * soft irqs are not sequenced.
136	 */
137	killed = 0;
138	ret = 0;
139rescan:
140	inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
141		__inet_twsk_del_dead_node(tw);
142		spin_unlock(&twdr->death_lock);
143		__inet_twsk_kill(tw, twdr->hashinfo);
144		inet_twsk_put(tw);
145		killed++;
146		spin_lock(&twdr->death_lock);
147		if (killed > INET_TWDR_TWKILL_QUOTA) {
148			ret = 1;
149			break;
150		}
151
152		/* While we dropped twdr->death_lock, another cpu may have
153		 * killed off the next TW bucket in the list, therefore
154		 * do a fresh re-read of the hlist head node with the
155		 * lock reacquired.  We still use the hlist traversal
156		 * macro in order to get the prefetches.
157		 */
158		goto rescan;
159	}
160
161	twdr->tw_count -= killed;
162	NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
163
164	return ret;
165}
166
167void inet_twdr_hangman(unsigned long data)
168{
169	struct inet_timewait_death_row *twdr;
170	int unsigned need_timer;
171
172	twdr = (struct inet_timewait_death_row *)data;
173	spin_lock(&twdr->death_lock);
174
175	if (twdr->tw_count == 0)
176		goto out;
177
178	need_timer = 0;
179	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
180		twdr->thread_slots |= (1 << twdr->slot);
181		schedule_work(&twdr->twkill_work);
182		need_timer = 1;
183	} else {
184		/* We purged the entire slot, anything left?  */
185		if (twdr->tw_count)
186			need_timer = 1;
187	}
188	twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
189	if (need_timer)
190		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
191out:
192	spin_unlock(&twdr->death_lock);
193}
194
195EXPORT_SYMBOL_GPL(inet_twdr_hangman);
196
197extern void twkill_slots_invalid(void);
198
199void inet_twdr_twkill_work(struct work_struct *work)
200{
201	struct inet_timewait_death_row *twdr =
202		container_of(work, struct inet_timewait_death_row, twkill_work);
203	int i;
204
205	if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
206		twkill_slots_invalid();
207
208	while (twdr->thread_slots) {
209		spin_lock_bh(&twdr->death_lock);
210		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
211			if (!(twdr->thread_slots & (1 << i)))
212				continue;
213
214			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
215				if (need_resched()) {
216					spin_unlock_bh(&twdr->death_lock);
217					schedule();
218					spin_lock_bh(&twdr->death_lock);
219				}
220			}
221
222			twdr->thread_slots &= ~(1 << i);
223		}
224		spin_unlock_bh(&twdr->death_lock);
225	}
226}
227
228EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
229
230/* These are always called from BH context.  See callers in
231 * tcp_input.c to verify this.
232 */
233
234/* This is for handling early-kills of TIME_WAIT sockets. */
235void inet_twsk_deschedule(struct inet_timewait_sock *tw,
236			  struct inet_timewait_death_row *twdr)
237{
238	spin_lock(&twdr->death_lock);
239	if (inet_twsk_del_dead_node(tw)) {
240		inet_twsk_put(tw);
241		if (--twdr->tw_count == 0)
242			del_timer(&twdr->tw_timer);
243	}
244	spin_unlock(&twdr->death_lock);
245	__inet_twsk_kill(tw, twdr->hashinfo);
246}
247
248EXPORT_SYMBOL(inet_twsk_deschedule);
249
250void inet_twsk_schedule(struct inet_timewait_sock *tw,
251		       struct inet_timewait_death_row *twdr,
252		       const int timeo, const int timewait_len)
253{
254	struct hlist_head *list;
255	int slot;
256
257	/* timeout := RTO * 3.5
258	 *
259	 * 3.5 = 1+2+0.5 to wait for two retransmits.
260	 *
261	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
262	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
263	 * FINs (or previous seqments) are lost (probability of such event
264	 * is p^(N+1), where p is probability to lose single packet and
265	 * time to detect the loss is about RTO*(2^N - 1) with exponential
266	 * backoff). Normal timewait length is calculated so, that we
267	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
268	 * [ BTW Linux. following BSD, violates this requirement waiting
269	 *   only for 60sec, we should wait at least for 240 secs.
270	 *   Well, 240 consumes too much of resources 8)
271	 * ]
272	 * This interval is not reduced to catch old duplicate and
273	 * responces to our wandering segments living for two MSLs.
274	 * However, if we use PAWS to detect
275	 * old duplicates, we can reduce the interval to bounds required
276	 * by RTO, rather than MSL. So, if peer understands PAWS, we
277	 * kill tw bucket after 3.5*RTO (it is important that this number
278	 * is greater than TS tick!) and detect old duplicates with help
279	 * of PAWS.
280	 */
281	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
282
283	spin_lock(&twdr->death_lock);
284
285	/* Unlink it, if it was scheduled */
286	if (inet_twsk_del_dead_node(tw))
287		twdr->tw_count--;
288	else
289		atomic_inc(&tw->tw_refcnt);
290
291	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
292		/* Schedule to slow timer */
293		if (timeo >= timewait_len) {
294			slot = INET_TWDR_TWKILL_SLOTS - 1;
295		} else {
296			slot = (timeo + twdr->period - 1) / twdr->period;
297			if (slot >= INET_TWDR_TWKILL_SLOTS)
298				slot = INET_TWDR_TWKILL_SLOTS - 1;
299		}
300		tw->tw_ttd = jiffies + timeo;
301		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
302		list = &twdr->cells[slot];
303	} else {
304		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
305
306		if (twdr->twcal_hand < 0) {
307			twdr->twcal_hand = 0;
308			twdr->twcal_jiffie = jiffies;
309			twdr->twcal_timer.expires = twdr->twcal_jiffie +
310					      (slot << INET_TWDR_RECYCLE_TICK);
311			add_timer(&twdr->twcal_timer);
312		} else {
313			if (time_after(twdr->twcal_timer.expires,
314				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
315				mod_timer(&twdr->twcal_timer,
316					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
317			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
318		}
319		list = &twdr->twcal_row[slot];
320	}
321
322	hlist_add_head(&tw->tw_death_node, list);
323
324	if (twdr->tw_count++ == 0)
325		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
326	spin_unlock(&twdr->death_lock);
327}
328
329EXPORT_SYMBOL_GPL(inet_twsk_schedule);
330
331void inet_twdr_twcal_tick(unsigned long data)
332{
333	struct inet_timewait_death_row *twdr;
334	int n, slot;
335	unsigned long j;
336	unsigned long now = jiffies;
337	int killed = 0;
338	int adv = 0;
339
340	twdr = (struct inet_timewait_death_row *)data;
341
342	spin_lock(&twdr->death_lock);
343	if (twdr->twcal_hand < 0)
344		goto out;
345
346	slot = twdr->twcal_hand;
347	j = twdr->twcal_jiffie;
348
349	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
350		if (time_before_eq(j, now)) {
351			struct hlist_node *node, *safe;
352			struct inet_timewait_sock *tw;
353
354			inet_twsk_for_each_inmate_safe(tw, node, safe,
355						       &twdr->twcal_row[slot]) {
356				__inet_twsk_del_dead_node(tw);
357				__inet_twsk_kill(tw, twdr->hashinfo);
358				inet_twsk_put(tw);
359				killed++;
360			}
361		} else {
362			if (!adv) {
363				adv = 1;
364				twdr->twcal_jiffie = j;
365				twdr->twcal_hand = slot;
366			}
367
368			if (!hlist_empty(&twdr->twcal_row[slot])) {
369				mod_timer(&twdr->twcal_timer, j);
370				goto out;
371			}
372		}
373		j += 1 << INET_TWDR_RECYCLE_TICK;
374		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
375	}
376	twdr->twcal_hand = -1;
377
378out:
379	if ((twdr->tw_count -= killed) == 0)
380		del_timer(&twdr->tw_timer);
381	NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
382	spin_unlock(&twdr->death_lock);
383}
384
385EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
386