1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors:	Ross Biro
9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12 *		Florian La Roche, <flla@stud.uni-sb.de>
13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 *		Jorge Cwik, <jorge@laser.satlink.net>
19 *
20 * Fixes:
21 *		Alan Cox	:	Numerous verify_area() calls
22 *		Alan Cox	:	Set the ACK bit on a reset
23 *		Alan Cox	:	Stopped it crashing if it closed while
24 *					sk->inuse=1 and was trying to connect
25 *					(tcp_err()).
26 *		Alan Cox	:	All icmp error handling was broken
27 *					pointers passed where wrong and the
28 *					socket was looked up backwards. Nobody
29 *					tested any icmp error code obviously.
30 *		Alan Cox	:	tcp_err() now handled properly. It
31 *					wakes people on errors. poll
32 *					behaves and the icmp error race
33 *					has gone by moving it into sock.c
34 *		Alan Cox	:	tcp_send_reset() fixed to work for
35 *					everything not just packets for
36 *					unknown sockets.
37 *		Alan Cox	:	tcp option processing.
38 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
39 *					syn rule wrong]
40 *		Herp Rosmanith  :	More reset fixes
41 *		Alan Cox	:	No longer acks invalid rst frames.
42 *					Acking any kind of RST is right out.
43 *		Alan Cox	:	Sets an ignore me flag on an rst
44 *					receive otherwise odd bits of prattle
45 *					escape still
46 *		Alan Cox	:	Fixed another acking RST frame bug.
47 *					Should stop LAN workplace lockups.
48 *		Alan Cox	: 	Some tidyups using the new skb list
49 *					facilities
50 *		Alan Cox	:	sk->keepopen now seems to work
51 *		Alan Cox	:	Pulls options out correctly on accepts
52 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
53 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
54 *					bit to skb ops.
55 *		Alan Cox	:	Tidied tcp_data to avoid a potential
56 *					nasty.
57 *		Alan Cox	:	Added some better commenting, as the
58 *					tcp is hard to follow
59 *		Alan Cox	:	Removed incorrect check for 20 * psh
60 *	Michael O'Reilly	:	ack < copied bug fix.
61 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
62 *		Alan Cox	:	FIN with no memory -> CRASH
63 *		Alan Cox	:	Added socket option proto entries.
64 *					Also added awareness of them to accept.
65 *		Alan Cox	:	Added TCP options (SOL_TCP)
66 *		Alan Cox	:	Switched wakeup calls to callbacks,
67 *					so the kernel can layer network
68 *					sockets.
69 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
70 *		Alan Cox	:	Handle FIN (more) properly (we hope).
71 *		Alan Cox	:	RST frames sent on unsynchronised
72 *					state ack error.
73 *		Alan Cox	:	Put in missing check for SYN bit.
74 *		Alan Cox	:	Added tcp_select_window() aka NET2E
75 *					window non shrink trick.
76 *		Alan Cox	:	Added a couple of small NET2E timer
77 *					fixes
78 *		Charles Hedrick :	TCP fixes
79 *		Toomas Tamm	:	TCP window fixes
80 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
81 *		Charles Hedrick	:	Rewrote most of it to actually work
82 *		Linus		:	Rewrote tcp_read() and URG handling
83 *					completely
84 *		Gerhard Koerting:	Fixed some missing timer handling
85 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
86 *		Gerhard Koerting:	PC/TCP workarounds
87 *		Adam Caldwell	:	Assorted timer/timing errors
88 *		Matthew Dillon	:	Fixed another RST bug
89 *		Alan Cox	:	Move to kernel side addressing changes.
90 *		Alan Cox	:	Beginning work on TCP fastpathing
91 *					(not yet usable)
92 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
93 *		Alan Cox	:	TCP fast path debugging
94 *		Alan Cox	:	Window clamping
95 *		Michael Riepe	:	Bug in tcp_check()
96 *		Matt Dillon	:	More TCP improvements and RST bug fixes
97 *		Matt Dillon	:	Yet more small nasties remove from the
98 *					TCP code (Be very nice to this man if
99 *					tcp finally works 100%) 8)
100 *		Alan Cox	:	BSD accept semantics.
101 *		Alan Cox	:	Reset on closedown bug.
102 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
103 *		Michael Pall	:	Handle poll() after URG properly in
104 *					all cases.
105 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
106 *					(multi URG PUSH broke rlogin).
107 *		Michael Pall	:	Fix the multi URG PUSH problem in
108 *					tcp_readable(), poll() after URG
109 *					works now.
110 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
111 *					BSD api.
112 *		Alan Cox	:	Changed the semantics of sk->socket to
113 *					fix a race and a signal problem with
114 *					accept() and async I/O.
115 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
116 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
117 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
118 *					clients/servers which listen in on
119 *					fixed ports.
120 *		Alan Cox	:	Cleaned the above up and shrank it to
121 *					a sensible code size.
122 *		Alan Cox	:	Self connect lockup fix.
123 *		Alan Cox	:	No connect to multicast.
124 *		Ross Biro	:	Close unaccepted children on master
125 *					socket close.
126 *		Alan Cox	:	Reset tracing code.
127 *		Alan Cox	:	Spurious resets on shutdown.
128 *		Alan Cox	:	Giant 15 minute/60 second timer error
129 *		Alan Cox	:	Small whoops in polling before an
130 *					accept.
131 *		Alan Cox	:	Kept the state trace facility since
132 *					it's handy for debugging.
133 *		Alan Cox	:	More reset handler fixes.
134 *		Alan Cox	:	Started rewriting the code based on
135 *					the RFC's for other useful protocol
136 *					references see: Comer, KA9Q NOS, and
137 *					for a reference on the difference
138 *					between specifications and how BSD
139 *					works see the 4.4lite source.
140 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
141 *					close.
142 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
143 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
144 *		Alan Cox	:	Reimplemented timers as per the RFC
145 *					and using multiple timers for sanity.
146 *		Alan Cox	:	Small bug fixes, and a lot of new
147 *					comments.
148 *		Alan Cox	:	Fixed dual reader crash by locking
149 *					the buffers (much like datagram.c)
150 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
151 *					now gets fed up of retrying without
152 *					(even a no space) answer.
153 *		Alan Cox	:	Extracted closing code better
154 *		Alan Cox	:	Fixed the closing state machine to
155 *					resemble the RFC.
156 *		Alan Cox	:	More 'per spec' fixes.
157 *		Jorge Cwik	:	Even faster checksumming.
158 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
159 *					only frames. At least one pc tcp stack
160 *					generates them.
161 *		Alan Cox	:	Cache last socket.
162 *		Alan Cox	:	Per route irtt.
163 *		Matt Day	:	poll()->select() match BSD precisely on error
164 *		Alan Cox	:	New buffers
165 *		Marc Tamsky	:	Various sk->prot->retransmits and
166 *					sk->retransmits misupdating fixed.
167 *					Fixed tcp_write_timeout: stuck close,
168 *					and TCP syn retries gets used now.
169 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
170 *					ack if state is TCP_CLOSED.
171 *		Alan Cox	:	Look up device on a retransmit - routes may
172 *					change. Doesn't yet cope with MSS shrink right
173 *					but it's a start!
174 *		Marc Tamsky	:	Closing in closing fixes.
175 *		Mike Shaver	:	RFC1122 verifications.
176 *		Alan Cox	:	rcv_saddr errors.
177 *		Alan Cox	:	Block double connect().
178 *		Alan Cox	:	Small hooks for enSKIP.
179 *		Alexey Kuznetsov:	Path MTU discovery.
180 *		Alan Cox	:	Support soft errors.
181 *		Alan Cox	:	Fix MTU discovery pathological case
182 *					when the remote claims no mtu!
183 *		Marc Tamsky	:	TCP_CLOSE fix.
184 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
185 *					window but wrong (fixes NT lpd problems)
186 *		Pedro Roque	:	Better TCP window handling, delayed ack.
187 *		Joerg Reuter	:	No modification of locked buffers in
188 *					tcp_do_retransmit()
189 *		Eric Schenk	:	Changed receiver side silly window
190 *					avoidance algorithm to BSD style
191 *					algorithm. This doubles throughput
192 *					against machines running Solaris,
193 *					and seems to result in general
194 *					improvement.
195 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
196 *	Willy Konynenberg	:	Transparent proxying support.
197 *	Mike McLagan		:	Routing by source
198 *		Keith Owens	:	Do proper merging with partial SKB's in
199 *					tcp_do_sendmsg to avoid burstiness.
200 *		Eric Schenk	:	Fix fast close down bug with
201 *					shutdown() followed by close().
202 *		Andi Kleen 	:	Make poll agree with SIGIO
203 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
204 *					lingertime == 0 (RFC 793 ABORT Call)
205 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
206 *					csum_and_copy_from_user() if possible.
207 *
208 *		This program is free software; you can redistribute it and/or
209 *		modify it under the terms of the GNU General Public License
210 *		as published by the Free Software Foundation; either version
211 *		2 of the License, or(at your option) any later version.
212 *
213 * Description of States:
214 *
215 *	TCP_SYN_SENT		sent a connection request, waiting for ack
216 *
217 *	TCP_SYN_RECV		received a connection request, sent ack,
218 *				waiting for final ack in three-way handshake.
219 *
220 *	TCP_ESTABLISHED		connection established
221 *
222 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
223 *				transmission of remaining buffered data
224 *
225 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
226 *				to shutdown
227 *
228 *	TCP_CLOSING		both sides have shutdown but we still have
229 *				data we have to finish sending
230 *
231 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
232 *				closed, can only be entered from FIN_WAIT2
233 *				or CLOSING.  Required because the other end
234 *				may not have gotten our last ACK causing it
235 *				to retransmit the data packet (which we ignore)
236 *
237 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
238 *				us to finish writing our data and to shutdown
239 *				(we have to close() to move on to LAST_ACK)
240 *
241 *	TCP_LAST_ACK		out side has shutdown after remote has
242 *				shutdown.  There may still be data in our
243 *				buffer that we have to finish sending
244 *
245 *	TCP_CLOSE		socket is finished
246 */
247
248#include <linux/kernel.h>
249#include <linux/module.h>
250#include <linux/types.h>
251#include <linux/fcntl.h>
252#include <linux/poll.h>
253#include <linux/init.h>
254#include <linux/fs.h>
255#include <linux/skbuff.h>
256#include <linux/scatterlist.h>
257#include <linux/splice.h>
258#include <linux/net.h>
259#include <linux/socket.h>
260#include <linux/random.h>
261#include <linux/bootmem.h>
262#include <linux/highmem.h>
263#include <linux/swap.h>
264#include <linux/cache.h>
265#include <linux/err.h>
266#include <linux/crypto.h>
267#include <linux/time.h>
268#include <linux/slab.h>
269
270#include <net/icmp.h>
271#include <net/tcp.h>
272#include <net/xfrm.h>
273#include <net/ip.h>
274#include <net/netdma.h>
275#include <net/sock.h>
276
277#include <asm/uaccess.h>
278#include <asm/ioctls.h>
279
280#include <typedefs.h>
281#include <bcmdefs.h>
282
283int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
284
285struct percpu_counter tcp_orphan_count;
286EXPORT_SYMBOL_GPL(tcp_orphan_count);
287
288int sysctl_tcp_mem[3] __read_mostly;
289int sysctl_tcp_wmem[3] __read_mostly;
290int sysctl_tcp_rmem[3] __read_mostly;
291
292EXPORT_SYMBOL(sysctl_tcp_mem);
293EXPORT_SYMBOL(sysctl_tcp_rmem);
294EXPORT_SYMBOL(sysctl_tcp_wmem);
295
296atomic_t tcp_memory_allocated;	/* Current allocated memory. */
297EXPORT_SYMBOL(tcp_memory_allocated);
298
299/*
300 * Current number of TCP sockets.
301 */
302struct percpu_counter tcp_sockets_allocated;
303EXPORT_SYMBOL(tcp_sockets_allocated);
304
305/*
306 * TCP splice context
307 */
308struct tcp_splice_state {
309	struct pipe_inode_info *pipe;
310	size_t len;
311	unsigned int flags;
312};
313
314/*
315 * Pressure flag: try to collapse.
316 * Technical note: it is used by multiple contexts non atomically.
317 * All the __sk_mem_schedule() is of this nature: accounting
318 * is strict, actions are advisory and have some latency.
319 */
320int tcp_memory_pressure __read_mostly;
321EXPORT_SYMBOL(tcp_memory_pressure);
322
323void tcp_enter_memory_pressure(struct sock *sk)
324{
325	if (!tcp_memory_pressure) {
326		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
327		tcp_memory_pressure = 1;
328	}
329}
330EXPORT_SYMBOL(tcp_enter_memory_pressure);
331
332/* Convert seconds to retransmits based on initial and max timeout */
333static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
334{
335	u8 res = 0;
336
337	if (seconds > 0) {
338		int period = timeout;
339
340		res = 1;
341		while (seconds > period && res < 255) {
342			res++;
343			timeout <<= 1;
344			if (timeout > rto_max)
345				timeout = rto_max;
346			period += timeout;
347		}
348	}
349	return res;
350}
351
352/* Convert retransmits to seconds based on initial and max timeout */
353static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
354{
355	int period = 0;
356
357	if (retrans > 0) {
358		period = timeout;
359		while (--retrans) {
360			timeout <<= 1;
361			if (timeout > rto_max)
362				timeout = rto_max;
363			period += timeout;
364		}
365	}
366	return period;
367}
368
369/*
370 *	Wait for a TCP event.
371 *
372 *	Note that we don't need to lock the socket, as the upper poll layers
373 *	take care of normal races (between the test and the event) and we don't
374 *	go look at any of the socket buffers directly.
375 */
376unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
377{
378	unsigned int mask;
379	struct sock *sk = sock->sk;
380	struct tcp_sock *tp = tcp_sk(sk);
381
382	sock_poll_wait(file, sk_sleep(sk), wait);
383	if (sk->sk_state == TCP_LISTEN)
384		return inet_csk_listen_poll(sk);
385
386	/* Socket is not locked. We are protected from async events
387	 * by poll logic and correct handling of state changes
388	 * made by other threads is impossible in any case.
389	 */
390
391	mask = 0;
392
393	/*
394	 * POLLHUP is certainly not done right. But poll() doesn't
395	 * have a notion of HUP in just one direction, and for a
396	 * socket the read side is more interesting.
397	 *
398	 * Some poll() documentation says that POLLHUP is incompatible
399	 * with the POLLOUT/POLLWR flags, so somebody should check this
400	 * all. But careful, it tends to be safer to return too many
401	 * bits than too few, and you can easily break real applications
402	 * if you don't tell them that something has hung up!
403	 *
404	 * Check-me.
405	 *
406	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
407	 * our fs/select.c). It means that after we received EOF,
408	 * poll always returns immediately, making impossible poll() on write()
409	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
410	 * if and only if shutdown has been made in both directions.
411	 * Actually, it is interesting to look how Solaris and DUX
412	 * solve this dilemma. I would prefer, if POLLHUP were maskable,
413	 * then we could set it on SND_SHUTDOWN. BTW examples given
414	 * in Stevens' books assume exactly this behaviour, it explains
415	 * why POLLHUP is incompatible with POLLOUT.	--ANK
416	 *
417	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
418	 * blocking on fresh not-connected or disconnected socket. --ANK
419	 */
420	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
421		mask |= POLLHUP;
422	if (sk->sk_shutdown & RCV_SHUTDOWN)
423		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
424
425	/* Connected? */
426	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
427		int target = sock_rcvlowat(sk, 0, INT_MAX);
428
429		if (tp->urg_seq == tp->copied_seq &&
430		    !sock_flag(sk, SOCK_URGINLINE) &&
431		    tp->urg_data)
432			target++;
433
434		/* Potential race condition. If read of tp below will
435		 * escape above sk->sk_state, we can be illegally awaken
436		 * in SYN_* states. */
437		if (tp->rcv_nxt - tp->copied_seq >= target)
438			mask |= POLLIN | POLLRDNORM;
439
440		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
441			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
442				mask |= POLLOUT | POLLWRNORM;
443			} else {  /* send SIGIO later */
444				set_bit(SOCK_ASYNC_NOSPACE,
445					&sk->sk_socket->flags);
446				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
447
448				/* Race breaker. If space is freed after
449				 * wspace test but before the flags are set,
450				 * IO signal will be lost.
451				 */
452				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
453					mask |= POLLOUT | POLLWRNORM;
454			}
455		} else
456			mask |= POLLOUT | POLLWRNORM;
457
458		if (tp->urg_data & TCP_URG_VALID)
459			mask |= POLLPRI;
460	}
461	/* This barrier is coupled with smp_wmb() in tcp_reset() */
462	smp_rmb();
463	if (sk->sk_err)
464		mask |= POLLERR;
465
466	return mask;
467}
468EXPORT_SYMBOL(tcp_poll);
469
470int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
471{
472	struct tcp_sock *tp = tcp_sk(sk);
473	int answ;
474
475	switch (cmd) {
476	case SIOCINQ:
477		if (sk->sk_state == TCP_LISTEN)
478			return -EINVAL;
479
480		lock_sock(sk);
481		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
482			answ = 0;
483		else if (sock_flag(sk, SOCK_URGINLINE) ||
484			 !tp->urg_data ||
485			 before(tp->urg_seq, tp->copied_seq) ||
486			 !before(tp->urg_seq, tp->rcv_nxt)) {
487			struct sk_buff *skb;
488
489			answ = tp->rcv_nxt - tp->copied_seq;
490
491			/* Subtract 1, if FIN is in queue. */
492			skb = skb_peek_tail(&sk->sk_receive_queue);
493			if (answ && skb)
494				answ -= tcp_hdr(skb)->fin;
495		} else
496			answ = tp->urg_seq - tp->copied_seq;
497		release_sock(sk);
498		break;
499	case SIOCATMARK:
500		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
501		break;
502	case SIOCOUTQ:
503		if (sk->sk_state == TCP_LISTEN)
504			return -EINVAL;
505
506		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
507			answ = 0;
508		else
509			answ = tp->write_seq - tp->snd_una;
510		break;
511	default:
512		return -ENOIOCTLCMD;
513	}
514
515	return put_user(answ, (int __user *)arg);
516}
517EXPORT_SYMBOL(tcp_ioctl);
518
519static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
520{
521	TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
522	tp->pushed_seq = tp->write_seq;
523}
524
525static inline int forced_push(struct tcp_sock *tp)
526{
527	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
528}
529
530static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
531{
532	struct tcp_sock *tp = tcp_sk(sk);
533	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
534
535	skb->csum    = 0;
536	tcb->seq     = tcb->end_seq = tp->write_seq;
537	tcb->flags   = TCPHDR_ACK;
538	tcb->sacked  = 0;
539	skb_header_release(skb);
540	tcp_add_write_queue_tail(sk, skb);
541	sk->sk_wmem_queued += skb->truesize;
542	sk_mem_charge(sk, skb->truesize);
543	if (tp->nonagle & TCP_NAGLE_PUSH)
544		tp->nonagle &= ~TCP_NAGLE_PUSH;
545}
546
547static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
548{
549	if (flags & MSG_OOB)
550		tp->snd_up = tp->write_seq;
551}
552
553static inline void tcp_push(struct sock *sk, int flags, int mss_now,
554			    int nonagle)
555{
556	if (tcp_send_head(sk)) {
557		struct tcp_sock *tp = tcp_sk(sk);
558
559		if (!(flags & MSG_MORE) || forced_push(tp))
560			tcp_mark_push(tp, tcp_write_queue_tail(sk));
561
562		tcp_mark_urg(tp, flags);
563		__tcp_push_pending_frames(sk, mss_now,
564					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
565	}
566}
567
568static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
569				unsigned int offset, size_t len)
570{
571	struct tcp_splice_state *tss = rd_desc->arg.data;
572	int ret;
573
574	ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
575			      tss->flags);
576	if (ret > 0)
577		rd_desc->count -= ret;
578	return ret;
579}
580
581static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
582{
583	/* Store TCP splice context information in read_descriptor_t. */
584	read_descriptor_t rd_desc = {
585		.arg.data = tss,
586		.count	  = tss->len,
587	};
588
589	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
590}
591
592/**
593 *  tcp_splice_read - splice data from TCP socket to a pipe
594 * @sock:	socket to splice from
595 * @ppos:	position (not valid)
596 * @pipe:	pipe to splice to
597 * @len:	number of bytes to splice
598 * @flags:	splice modifier flags
599 *
600 * Description:
601 *    Will read pages from given socket and fill them into a pipe.
602 *
603 **/
604ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
605			struct pipe_inode_info *pipe, size_t len,
606			unsigned int flags)
607{
608	struct sock *sk = sock->sk;
609	struct tcp_splice_state tss = {
610		.pipe = pipe,
611		.len = len,
612		.flags = flags,
613	};
614	long timeo;
615	ssize_t spliced;
616	int ret;
617
618	sock_rps_record_flow(sk);
619	/*
620	 * We can't seek on a socket input
621	 */
622	if (unlikely(*ppos))
623		return -ESPIPE;
624
625	ret = spliced = 0;
626
627	lock_sock(sk);
628
629	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
630	while (tss.len) {
631		ret = __tcp_splice_read(sk, &tss);
632		if (ret < 0)
633			break;
634		else if (!ret) {
635			if (spliced)
636				break;
637			if (sock_flag(sk, SOCK_DONE))
638				break;
639			if (sk->sk_err) {
640				ret = sock_error(sk);
641				break;
642			}
643			if (sk->sk_shutdown & RCV_SHUTDOWN)
644				break;
645			if (sk->sk_state == TCP_CLOSE) {
646				/*
647				 * This occurs when user tries to read
648				 * from never connected socket.
649				 */
650				if (!sock_flag(sk, SOCK_DONE))
651					ret = -ENOTCONN;
652				break;
653			}
654			if (!timeo) {
655				ret = -EAGAIN;
656				break;
657			}
658			sk_wait_data(sk, &timeo);
659			if (signal_pending(current)) {
660				ret = sock_intr_errno(timeo);
661				break;
662			}
663			continue;
664		}
665		tss.len -= ret;
666		spliced += ret;
667
668		if (!timeo)
669			break;
670		release_sock(sk);
671		lock_sock(sk);
672
673		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
674		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
675		    signal_pending(current))
676			break;
677	}
678
679	release_sock(sk);
680
681	if (spliced)
682		return spliced;
683
684	return ret;
685}
686EXPORT_SYMBOL(tcp_splice_read);
687
688struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
689{
690	struct sk_buff *skb;
691
692	/* The TCP header must be at least 32-bit aligned.  */
693	size = ALIGN(size, 4);
694
695	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
696	if (skb) {
697		if (sk_wmem_schedule(sk, skb->truesize)) {
698			/*
699			 * Make sure that we have exactly size bytes
700			 * available to the caller, no more, no less.
701			 */
702			skb_reserve(skb, skb_tailroom(skb) - size);
703			return skb;
704		}
705		__kfree_skb(skb);
706	} else {
707		sk->sk_prot->enter_memory_pressure(sk);
708		sk_stream_moderate_sndbuf(sk);
709	}
710	return NULL;
711}
712
713static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
714				       int large_allowed)
715{
716	struct tcp_sock *tp = tcp_sk(sk);
717	u32 xmit_size_goal, old_size_goal;
718
719	xmit_size_goal = mss_now;
720
721	if (large_allowed && sk_can_gso(sk)) {
722		xmit_size_goal = ((sk->sk_gso_max_size - 1) -
723				  inet_csk(sk)->icsk_af_ops->net_header_len -
724				  inet_csk(sk)->icsk_ext_hdr_len -
725				  tp->tcp_header_len);
726
727		xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
728
729		/* We try hard to avoid divides here */
730		old_size_goal = tp->xmit_size_goal_segs * mss_now;
731
732		if (likely(old_size_goal <= xmit_size_goal &&
733			   old_size_goal + mss_now > xmit_size_goal)) {
734			xmit_size_goal = old_size_goal;
735		} else {
736			tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
737			xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
738		}
739	}
740
741	return max(xmit_size_goal, mss_now);
742}
743
744static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
745{
746	int mss_now;
747
748	mss_now = tcp_current_mss(sk);
749	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
750
751	return mss_now;
752}
753
754static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
755			 size_t psize, int flags)
756{
757	struct tcp_sock *tp = tcp_sk(sk);
758	int mss_now, size_goal;
759	int err;
760	ssize_t copied;
761	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
762
763	/* Wait for a connection to finish. */
764	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
765		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
766			goto out_err;
767
768	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
769
770	mss_now = tcp_send_mss(sk, &size_goal, flags);
771	copied = 0;
772
773	err = -EPIPE;
774	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
775		goto out_err;
776
777	while (psize > 0) {
778		struct sk_buff *skb = tcp_write_queue_tail(sk);
779		struct page *page = pages[poffset / PAGE_SIZE];
780		int copy, i, can_coalesce;
781		int offset = poffset % PAGE_SIZE;
782		int size = min_t(size_t, psize, PAGE_SIZE - offset);
783
784		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
785new_segment:
786			if (!sk_stream_memory_free(sk))
787				goto wait_for_sndbuf;
788
789			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
790			if (!skb)
791				goto wait_for_memory;
792
793			skb_entail(sk, skb);
794			copy = size_goal;
795		}
796
797		if (copy > size)
798			copy = size;
799
800		i = skb_shinfo(skb)->nr_frags;
801		can_coalesce = skb_can_coalesce(skb, i, page, offset);
802		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
803			tcp_mark_push(tp, skb);
804			goto new_segment;
805		}
806		if (!sk_wmem_schedule(sk, copy))
807			goto wait_for_memory;
808
809		if (can_coalesce) {
810			skb_shinfo(skb)->frags[i - 1].size += copy;
811		} else {
812			get_page(page);
813			skb_fill_page_desc(skb, i, page, offset, copy);
814		}
815
816		skb->len += copy;
817		skb->data_len += copy;
818		skb->truesize += copy;
819		sk->sk_wmem_queued += copy;
820		sk_mem_charge(sk, copy);
821		skb->ip_summed = CHECKSUM_PARTIAL;
822		tp->write_seq += copy;
823		TCP_SKB_CB(skb)->end_seq += copy;
824		skb_shinfo(skb)->gso_segs = 0;
825
826		if (!copied)
827			TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
828
829		copied += copy;
830		poffset += copy;
831		if (!(psize -= copy))
832			goto out;
833
834		if (skb->len < size_goal || (flags & MSG_OOB))
835			continue;
836
837		if (forced_push(tp)) {
838			tcp_mark_push(tp, skb);
839			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
840		} else if (skb == tcp_send_head(sk))
841			tcp_push_one(sk, mss_now);
842		continue;
843
844wait_for_sndbuf:
845		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
846wait_for_memory:
847		if (copied)
848			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
849
850		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
851			goto do_error;
852
853		mss_now = tcp_send_mss(sk, &size_goal, flags);
854	}
855
856out:
857#ifdef CONFIG_BCM47XX
858	if (copied && !(flags & MSG_MORE))
859#else
860	if (copied)
861#endif
862		tcp_push(sk, flags, mss_now, tp->nonagle);
863	return copied;
864
865do_error:
866	if (copied)
867		goto out;
868out_err:
869	return sk_stream_error(sk, flags, err);
870}
871
872int BCMFASTPATH_HOST tcp_sendpage(struct sock *sk, struct page *page, int offset,
873		 size_t size, int flags)
874{
875	ssize_t res;
876
877	if (!(sk->sk_route_caps & NETIF_F_SG) ||
878	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
879		return sock_no_sendpage(sk->sk_socket, page, offset, size,
880					flags);
881
882	lock_sock(sk);
883	TCP_CHECK_TIMER(sk);
884	res = do_tcp_sendpages(sk, &page, offset, size, flags);
885	TCP_CHECK_TIMER(sk);
886	release_sock(sk);
887	return res;
888}
889EXPORT_SYMBOL(tcp_sendpage);
890
891#define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
892#define TCP_OFF(sk)	(sk->sk_sndmsg_off)
893
894static inline int select_size(struct sock *sk, int sg)
895{
896	struct tcp_sock *tp = tcp_sk(sk);
897	int tmp = tp->mss_cache;
898
899	if (sg) {
900		if (sk_can_gso(sk))
901			tmp = 0;
902		else {
903			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
904
905			if (tmp >= pgbreak &&
906			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
907				tmp = pgbreak;
908		}
909	}
910
911	return tmp;
912}
913
914int BCMFASTPATH_HOST tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
915		size_t size)
916{
917	struct iovec *iov;
918	struct tcp_sock *tp = tcp_sk(sk);
919	struct sk_buff *skb;
920	int iovlen, flags;
921	int mss_now, size_goal;
922	int sg, err, copied;
923	long timeo;
924
925	lock_sock(sk);
926	TCP_CHECK_TIMER(sk);
927
928	flags = msg->msg_flags;
929	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
930
931	/* Wait for a connection to finish. */
932	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
933		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
934			goto out_err;
935
936	/* This should be in poll */
937	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
938
939	mss_now = tcp_send_mss(sk, &size_goal, flags);
940
941	/* Ok commence sending. */
942	iovlen = msg->msg_iovlen;
943	iov = msg->msg_iov;
944	copied = 0;
945
946	err = -EPIPE;
947	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
948		goto out_err;
949
950	sg = sk->sk_route_caps & NETIF_F_SG;
951
952	while (--iovlen >= 0) {
953		size_t seglen = iov->iov_len;
954		unsigned char __user *from = iov->iov_base;
955
956		iov++;
957
958		while (seglen > 0) {
959			int copy = 0;
960			int max = size_goal;
961
962			skb = tcp_write_queue_tail(sk);
963			if (tcp_send_head(sk)) {
964				if (skb->ip_summed == CHECKSUM_NONE)
965					max = mss_now;
966				copy = max - skb->len;
967			}
968
969			if (copy <= 0) {
970new_segment:
971				/* Allocate new segment. If the interface is SG,
972				 * allocate skb fitting to single page.
973				 */
974				if (!sk_stream_memory_free(sk))
975					goto wait_for_sndbuf;
976
977				skb = sk_stream_alloc_skb(sk,
978							  select_size(sk, sg),
979							  sk->sk_allocation);
980				if (!skb)
981					goto wait_for_memory;
982
983				/*
984				 * Check whether we can use HW checksum.
985				 */
986				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
987					skb->ip_summed = CHECKSUM_PARTIAL;
988
989				skb_entail(sk, skb);
990				copy = size_goal;
991				max = size_goal;
992			}
993
994			/* Try to append data to the end of skb. */
995			if (copy > seglen)
996				copy = seglen;
997
998			/* Where to copy to? */
999			if (skb_tailroom(skb) > 0) {
1000				/* We have some space in skb head. Superb! */
1001				if (copy > skb_tailroom(skb))
1002					copy = skb_tailroom(skb);
1003				if ((err = skb_add_data(skb, from, copy)) != 0)
1004					goto do_fault;
1005			} else {
1006				int merge = 0;
1007				int i = skb_shinfo(skb)->nr_frags;
1008				struct page *page = TCP_PAGE(sk);
1009				int off = TCP_OFF(sk);
1010
1011				if (skb_can_coalesce(skb, i, page, off) &&
1012				    off != PAGE_SIZE) {
1013					/* We can extend the last page
1014					 * fragment. */
1015					merge = 1;
1016				} else if (i == MAX_SKB_FRAGS || !sg) {
1017					/* Need to add new fragment and cannot
1018					 * do this because interface is non-SG,
1019					 * or because all the page slots are
1020					 * busy. */
1021					tcp_mark_push(tp, skb);
1022					goto new_segment;
1023				} else if (page) {
1024					if (off == PAGE_SIZE) {
1025						put_page(page);
1026						TCP_PAGE(sk) = page = NULL;
1027						off = 0;
1028					}
1029				} else
1030					off = 0;
1031
1032				if (copy > PAGE_SIZE - off)
1033					copy = PAGE_SIZE - off;
1034
1035				if (!sk_wmem_schedule(sk, copy))
1036					goto wait_for_memory;
1037
1038				if (!page) {
1039					/* Allocate new cache page. */
1040					if (!(page = sk_stream_alloc_page(sk)))
1041						goto wait_for_memory;
1042				}
1043
1044				/* Time to copy data. We are close to
1045				 * the end! */
1046				err = skb_copy_to_page(sk, from, skb, page,
1047						       off, copy);
1048				if (err) {
1049					/* If this page was new, give it to the
1050					 * socket so it does not get leaked.
1051					 */
1052					if (!TCP_PAGE(sk)) {
1053						TCP_PAGE(sk) = page;
1054						TCP_OFF(sk) = 0;
1055					}
1056					goto do_error;
1057				}
1058
1059				/* Update the skb. */
1060				if (merge) {
1061					skb_shinfo(skb)->frags[i - 1].size +=
1062									copy;
1063				} else {
1064					skb_fill_page_desc(skb, i, page, off, copy);
1065					if (TCP_PAGE(sk)) {
1066						get_page(page);
1067					} else if (off + copy < PAGE_SIZE) {
1068						get_page(page);
1069						TCP_PAGE(sk) = page;
1070					}
1071				}
1072
1073				TCP_OFF(sk) = off + copy;
1074			}
1075
1076			if (!copied)
1077				TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
1078
1079			tp->write_seq += copy;
1080			TCP_SKB_CB(skb)->end_seq += copy;
1081			skb_shinfo(skb)->gso_segs = 0;
1082
1083			from += copy;
1084			copied += copy;
1085			if ((seglen -= copy) == 0 && iovlen == 0)
1086				goto out;
1087
1088			if (skb->len < max || (flags & MSG_OOB))
1089				continue;
1090
1091			if (forced_push(tp)) {
1092				tcp_mark_push(tp, skb);
1093				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1094			} else if (skb == tcp_send_head(sk))
1095				tcp_push_one(sk, mss_now);
1096			continue;
1097
1098wait_for_sndbuf:
1099			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1100wait_for_memory:
1101			if (copied)
1102				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1103
1104			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1105				goto do_error;
1106
1107			mss_now = tcp_send_mss(sk, &size_goal, flags);
1108		}
1109	}
1110
1111out:
1112	if (copied)
1113		tcp_push(sk, flags, mss_now, tp->nonagle);
1114	TCP_CHECK_TIMER(sk);
1115	release_sock(sk);
1116	return copied;
1117
1118do_fault:
1119	if (!skb->len) {
1120		tcp_unlink_write_queue(skb, sk);
1121		/* It is the one place in all of TCP, except connection
1122		 * reset, where we can be unlinking the send_head.
1123		 */
1124		tcp_check_send_head(sk, skb);
1125		sk_wmem_free_skb(sk, skb);
1126	}
1127
1128do_error:
1129	if (copied)
1130		goto out;
1131out_err:
1132	err = sk_stream_error(sk, flags, err);
1133	TCP_CHECK_TIMER(sk);
1134	release_sock(sk);
1135	return err;
1136}
1137EXPORT_SYMBOL(tcp_sendmsg);
1138
1139/*
1140 *	Handle reading urgent data. BSD has very simple semantics for
1141 *	this, no blocking and very strange errors 8)
1142 */
1143
1144static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1145{
1146	struct tcp_sock *tp = tcp_sk(sk);
1147
1148	/* No URG data to read. */
1149	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1150	    tp->urg_data == TCP_URG_READ)
1151		return -EINVAL;	/* Yes this is right ! */
1152
1153	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1154		return -ENOTCONN;
1155
1156	if (tp->urg_data & TCP_URG_VALID) {
1157		int err = 0;
1158		char c = tp->urg_data;
1159
1160		if (!(flags & MSG_PEEK))
1161			tp->urg_data = TCP_URG_READ;
1162
1163		/* Read urgent data. */
1164		msg->msg_flags |= MSG_OOB;
1165
1166		if (len > 0) {
1167			if (!(flags & MSG_TRUNC))
1168				err = memcpy_toiovec(msg->msg_iov, &c, 1);
1169			len = 1;
1170		} else
1171			msg->msg_flags |= MSG_TRUNC;
1172
1173		return err ? -EFAULT : len;
1174	}
1175
1176	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1177		return 0;
1178
1179	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1180	 * the available implementations agree in this case:
1181	 * this call should never block, independent of the
1182	 * blocking state of the socket.
1183	 * Mike <pall@rz.uni-karlsruhe.de>
1184	 */
1185	return -EAGAIN;
1186}
1187
1188/* Clean up the receive buffer for full frames taken by the user,
1189 * then send an ACK if necessary.  COPIED is the number of bytes
1190 * tcp_recvmsg has given to the user so far, it speeds up the
1191 * calculation of whether or not we must ACK for the sake of
1192 * a window update.
1193 */
1194void tcp_cleanup_rbuf(struct sock *sk, int copied)
1195{
1196	struct tcp_sock *tp = tcp_sk(sk);
1197	int time_to_ack = 0;
1198
1199#if TCP_DEBUG
1200	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1201
1202	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1203	     KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1204	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1205#endif
1206
1207	if (inet_csk_ack_scheduled(sk)) {
1208		const struct inet_connection_sock *icsk = inet_csk(sk);
1209		   /* Delayed ACKs frequently hit locked sockets during bulk
1210		    * receive. */
1211		if (icsk->icsk_ack.blocked ||
1212		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1213		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1214		    /*
1215		     * If this read emptied read buffer, we send ACK, if
1216		     * connection is not bidirectional, user drained
1217		     * receive buffer and there was a small segment
1218		     * in queue.
1219		     */
1220		    (copied > 0 &&
1221		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1222		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1223		       !icsk->icsk_ack.pingpong)) &&
1224		      !atomic_read(&sk->sk_rmem_alloc)))
1225			time_to_ack = 1;
1226	}
1227
1228	/* We send an ACK if we can now advertise a non-zero window
1229	 * which has been raised "significantly".
1230	 *
1231	 * Even if window raised up to infinity, do not send window open ACK
1232	 * in states, where we will not receive more. It is useless.
1233	 */
1234	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1235		__u32 rcv_window_now = tcp_receive_window(tp);
1236
1237		/* Optimize, __tcp_select_window() is not cheap. */
1238		if (2*rcv_window_now <= tp->window_clamp) {
1239			__u32 new_window = __tcp_select_window(sk);
1240
1241			/* Send ACK now, if this read freed lots of space
1242			 * in our buffer. Certainly, new_window is new window.
1243			 * We can advertise it now, if it is not less than current one.
1244			 * "Lots" means "at least twice" here.
1245			 */
1246			if (new_window && new_window >= 2 * rcv_window_now)
1247				time_to_ack = 1;
1248		}
1249	}
1250	if (time_to_ack)
1251		tcp_send_ack(sk);
1252}
1253
1254static void tcp_prequeue_process(struct sock *sk)
1255{
1256	struct sk_buff *skb;
1257	struct tcp_sock *tp = tcp_sk(sk);
1258
1259	NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1260
1261	/* RX process wants to run with disabled BHs, though it is not
1262	 * necessary */
1263	local_bh_disable();
1264	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1265		sk_backlog_rcv(sk, skb);
1266	local_bh_enable();
1267
1268	/* Clear memory counter. */
1269	tp->ucopy.memory = 0;
1270}
1271
1272#ifdef CONFIG_NET_DMA
1273static void tcp_service_net_dma(struct sock *sk, bool wait)
1274{
1275	dma_cookie_t done, used;
1276	dma_cookie_t last_issued;
1277	struct tcp_sock *tp = tcp_sk(sk);
1278
1279	if (!tp->ucopy.dma_chan)
1280		return;
1281
1282	last_issued = tp->ucopy.dma_cookie;
1283	dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1284
1285	do {
1286		if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1287					      last_issued, &done,
1288					      &used) == DMA_SUCCESS) {
1289			/* Safe to free early-copied skbs now */
1290			__skb_queue_purge(&sk->sk_async_wait_queue);
1291			break;
1292		} else {
1293			struct sk_buff *skb;
1294			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1295			       (dma_async_is_complete(skb->dma_cookie, done,
1296						      used) == DMA_SUCCESS)) {
1297				__skb_dequeue(&sk->sk_async_wait_queue);
1298				kfree_skb(skb);
1299			}
1300		}
1301	} while (wait);
1302}
1303#endif
1304
1305static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1306{
1307	struct sk_buff *skb;
1308	u32 offset;
1309
1310	skb_queue_walk(&sk->sk_receive_queue, skb) {
1311		offset = seq - TCP_SKB_CB(skb)->seq;
1312		if (tcp_hdr(skb)->syn)
1313			offset--;
1314		if (offset < skb->len || tcp_hdr(skb)->fin) {
1315			*off = offset;
1316			return skb;
1317		}
1318	}
1319	return NULL;
1320}
1321
1322/*
1323 * This routine provides an alternative to tcp_recvmsg() for routines
1324 * that would like to handle copying from skbuffs directly in 'sendfile'
1325 * fashion.
1326 * Note:
1327 *	- It is assumed that the socket was locked by the caller.
1328 *	- The routine does not block.
1329 *	- At present, there is no support for reading OOB data
1330 *	  or for 'peeking' the socket using this routine
1331 *	  (although both would be easy to implement).
1332 */
1333int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1334		  sk_read_actor_t recv_actor)
1335{
1336	struct sk_buff *skb;
1337	struct tcp_sock *tp = tcp_sk(sk);
1338	u32 seq = tp->copied_seq;
1339	u32 offset;
1340	int copied = 0;
1341
1342	if (sk->sk_state == TCP_LISTEN)
1343		return -ENOTCONN;
1344	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1345		if (offset < skb->len) {
1346			int used;
1347			size_t len;
1348
1349			len = skb->len - offset;
1350			/* Stop reading if we hit a patch of urgent data */
1351			if (tp->urg_data) {
1352				u32 urg_offset = tp->urg_seq - seq;
1353				if (urg_offset < len)
1354					len = urg_offset;
1355				if (!len)
1356					break;
1357			}
1358			used = recv_actor(desc, skb, offset, len);
1359			if (used < 0) {
1360				if (!copied)
1361					copied = used;
1362				break;
1363			} else if (used <= len) {
1364				seq += used;
1365				copied += used;
1366				offset += used;
1367			}
1368			/*
1369			 * If recv_actor drops the lock (e.g. TCP splice
1370			 * receive) the skb pointer might be invalid when
1371			 * getting here: tcp_collapse might have deleted it
1372			 * while aggregating skbs from the socket queue.
1373			 */
1374			skb = tcp_recv_skb(sk, seq-1, &offset);
1375			if (!skb || (offset+1 != skb->len))
1376				break;
1377		}
1378		if (tcp_hdr(skb)->fin) {
1379			sk_eat_skb(sk, skb, 0);
1380			++seq;
1381			break;
1382		}
1383		sk_eat_skb(sk, skb, 0);
1384		if (!desc->count)
1385			break;
1386		tp->copied_seq = seq;
1387	}
1388	tp->copied_seq = seq;
1389
1390	tcp_rcv_space_adjust(sk);
1391
1392	/* Clean up data we have read: This will do ACK frames. */
1393	if (copied > 0)
1394		tcp_cleanup_rbuf(sk, copied);
1395	return copied;
1396}
1397EXPORT_SYMBOL(tcp_read_sock);
1398
1399/*
1400 *	This routine copies from a sock struct into the user buffer.
1401 *
1402 *	Technical note: in 2.3 we work on _locked_ socket, so that
1403 *	tricks with *seq access order and skb->users are not required.
1404 *	Probably, code can be easily improved even more.
1405 */
1406
1407int BCMFASTPATH_HOST tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1408		size_t len, int nonblock, int flags, int *addr_len)
1409{
1410	struct tcp_sock *tp = tcp_sk(sk);
1411	int copied = 0;
1412	u32 peek_seq;
1413	u32 *seq;
1414	unsigned long used;
1415	int err;
1416	int target;		/* Read at least this many bytes */
1417	long timeo;
1418	struct task_struct *user_recv = NULL;
1419	int copied_early = 0;
1420	struct sk_buff *skb;
1421	u32 urg_hole = 0;
1422
1423	lock_sock(sk);
1424
1425	TCP_CHECK_TIMER(sk);
1426
1427	err = -ENOTCONN;
1428	if (sk->sk_state == TCP_LISTEN)
1429		goto out;
1430
1431	timeo = sock_rcvtimeo(sk, nonblock);
1432
1433	/* Urgent data needs to be handled specially. */
1434	if (flags & MSG_OOB)
1435		goto recv_urg;
1436
1437	seq = &tp->copied_seq;
1438	if (flags & MSG_PEEK) {
1439		peek_seq = tp->copied_seq;
1440		seq = &peek_seq;
1441	}
1442
1443	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1444
1445#ifdef CONFIG_NET_DMA
1446	tp->ucopy.dma_chan = NULL;
1447	preempt_disable();
1448	skb = skb_peek_tail(&sk->sk_receive_queue);
1449	{
1450		int available = 0;
1451
1452		if (skb)
1453			available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1454		if ((available < target) &&
1455		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1456		    !sysctl_tcp_low_latency &&
1457		    dma_find_channel(DMA_MEMCPY)) {
1458			preempt_enable_no_resched();
1459			tp->ucopy.pinned_list =
1460					dma_pin_iovec_pages(msg->msg_iov, len);
1461		} else {
1462			preempt_enable_no_resched();
1463		}
1464	}
1465#endif
1466
1467	do {
1468		u32 offset;
1469
1470		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1471		if (tp->urg_data && tp->urg_seq == *seq) {
1472			if (copied)
1473				break;
1474			if (signal_pending(current)) {
1475				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1476				break;
1477			}
1478		}
1479
1480		/* Next get a buffer. */
1481
1482		skb_queue_walk(&sk->sk_receive_queue, skb) {
1483			/* Now that we have two receive queues this
1484			 * shouldn't happen.
1485			 */
1486			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1487			     KERN_INFO "recvmsg bug: copied %X "
1488				       "seq %X rcvnxt %X fl %X\n", *seq,
1489				       TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1490				       flags))
1491				break;
1492
1493			offset = *seq - TCP_SKB_CB(skb)->seq;
1494			if (tcp_hdr(skb)->syn)
1495				offset--;
1496			if (offset < skb->len)
1497				goto found_ok_skb;
1498			if (tcp_hdr(skb)->fin)
1499				goto found_fin_ok;
1500			WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
1501					"copied %X seq %X rcvnxt %X fl %X\n",
1502					*seq, TCP_SKB_CB(skb)->seq,
1503					tp->rcv_nxt, flags);
1504		}
1505
1506		/* Well, if we have backlog, try to process it now yet. */
1507
1508		if (copied >= target && !sk->sk_backlog.tail)
1509			break;
1510
1511		if (copied) {
1512			if (sk->sk_err ||
1513			    sk->sk_state == TCP_CLOSE ||
1514			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1515			    !timeo ||
1516			    signal_pending(current))
1517				break;
1518		} else {
1519			if (sock_flag(sk, SOCK_DONE))
1520				break;
1521
1522			if (sk->sk_err) {
1523				copied = sock_error(sk);
1524				break;
1525			}
1526
1527			if (sk->sk_shutdown & RCV_SHUTDOWN)
1528				break;
1529
1530			if (sk->sk_state == TCP_CLOSE) {
1531				if (!sock_flag(sk, SOCK_DONE)) {
1532					/* This occurs when user tries to read
1533					 * from never connected socket.
1534					 */
1535					copied = -ENOTCONN;
1536					break;
1537				}
1538				break;
1539			}
1540
1541			if (!timeo) {
1542				copied = -EAGAIN;
1543				break;
1544			}
1545
1546			if (signal_pending(current)) {
1547				copied = sock_intr_errno(timeo);
1548				break;
1549			}
1550		}
1551
1552		tcp_cleanup_rbuf(sk, copied);
1553
1554		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1555			/* Install new reader */
1556			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1557				user_recv = current;
1558				tp->ucopy.task = user_recv;
1559				tp->ucopy.iov = msg->msg_iov;
1560			}
1561
1562			tp->ucopy.len = len;
1563
1564			WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1565				!(flags & (MSG_PEEK | MSG_TRUNC)));
1566
1567			/* Ugly... If prequeue is not empty, we have to
1568			 * process it before releasing socket, otherwise
1569			 * order will be broken at second iteration.
1570			 * More elegant solution is required!!!
1571			 *
1572			 * Look: we have the following (pseudo)queues:
1573			 *
1574			 * 1. packets in flight
1575			 * 2. backlog
1576			 * 3. prequeue
1577			 * 4. receive_queue
1578			 *
1579			 * Each queue can be processed only if the next ones
1580			 * are empty. At this point we have empty receive_queue.
1581			 * But prequeue _can_ be not empty after 2nd iteration,
1582			 * when we jumped to start of loop because backlog
1583			 * processing added something to receive_queue.
1584			 * We cannot release_sock(), because backlog contains
1585			 * packets arrived _after_ prequeued ones.
1586			 *
1587			 * Shortly, algorithm is clear --- to process all
1588			 * the queues in order. We could make it more directly,
1589			 * requeueing packets from backlog to prequeue, if
1590			 * is not empty. It is more elegant, but eats cycles,
1591			 * unfortunately.
1592			 */
1593			if (!skb_queue_empty(&tp->ucopy.prequeue))
1594				goto do_prequeue;
1595
1596			/* __ Set realtime policy in scheduler __ */
1597		}
1598
1599#ifdef CONFIG_NET_DMA
1600		if (tp->ucopy.dma_chan)
1601			dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1602#endif
1603		if (copied >= target) {
1604			/* Do not sleep, just process backlog. */
1605			release_sock(sk);
1606			lock_sock(sk);
1607		} else
1608			sk_wait_data(sk, &timeo);
1609
1610#ifdef CONFIG_NET_DMA
1611		tcp_service_net_dma(sk, false);  /* Don't block */
1612		tp->ucopy.wakeup = 0;
1613#endif
1614
1615		if (user_recv) {
1616			int chunk;
1617
1618			/* __ Restore normal policy in scheduler __ */
1619
1620			if ((chunk = len - tp->ucopy.len) != 0) {
1621				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1622				len -= chunk;
1623				copied += chunk;
1624			}
1625
1626			if (tp->rcv_nxt == tp->copied_seq &&
1627			    !skb_queue_empty(&tp->ucopy.prequeue)) {
1628do_prequeue:
1629				tcp_prequeue_process(sk);
1630
1631				if ((chunk = len - tp->ucopy.len) != 0) {
1632					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1633					len -= chunk;
1634					copied += chunk;
1635				}
1636			}
1637		}
1638		if ((flags & MSG_PEEK) &&
1639		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
1640			if (net_ratelimit())
1641				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1642				       current->comm, task_pid_nr(current));
1643			peek_seq = tp->copied_seq;
1644		}
1645		continue;
1646
1647	found_ok_skb:
1648		/* Ok so how much can we use? */
1649		used = skb->len - offset;
1650		if (len < used)
1651			used = len;
1652
1653		/* Do we have urgent data here? */
1654		if (tp->urg_data) {
1655			u32 urg_offset = tp->urg_seq - *seq;
1656			if (urg_offset < used) {
1657				if (!urg_offset) {
1658					if (!sock_flag(sk, SOCK_URGINLINE)) {
1659						++*seq;
1660						urg_hole++;
1661						offset++;
1662						used--;
1663						if (!used)
1664							goto skip_copy;
1665					}
1666				} else
1667					used = urg_offset;
1668			}
1669		}
1670
1671		if (!(flags & MSG_TRUNC)) {
1672#ifdef CONFIG_NET_DMA
1673			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1674				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1675
1676			if (tp->ucopy.dma_chan) {
1677				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1678					tp->ucopy.dma_chan, skb, offset,
1679					msg->msg_iov, used,
1680					tp->ucopy.pinned_list);
1681
1682				if (tp->ucopy.dma_cookie < 0) {
1683
1684					printk(KERN_ALERT "dma_cookie < 0\n");
1685
1686					/* Exception. Bailout! */
1687					if (!copied)
1688						copied = -EFAULT;
1689					break;
1690				}
1691
1692				dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1693
1694				if ((offset + used) == skb->len)
1695					copied_early = 1;
1696
1697			} else
1698#endif
1699			{
1700				err = skb_copy_datagram_iovec(skb, offset,
1701						msg->msg_iov, used);
1702				if (err) {
1703					/* Exception. Bailout! */
1704					if (!copied)
1705						copied = -EFAULT;
1706					break;
1707				}
1708			}
1709		}
1710
1711		*seq += used;
1712		copied += used;
1713		len -= used;
1714
1715		tcp_rcv_space_adjust(sk);
1716
1717skip_copy:
1718		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1719			tp->urg_data = 0;
1720			tcp_fast_path_check(sk);
1721		}
1722		if (used + offset < skb->len)
1723			continue;
1724
1725		if (tcp_hdr(skb)->fin)
1726			goto found_fin_ok;
1727		if (!(flags & MSG_PEEK)) {
1728			sk_eat_skb(sk, skb, copied_early);
1729			copied_early = 0;
1730		}
1731		continue;
1732
1733	found_fin_ok:
1734		/* Process the FIN. */
1735		++*seq;
1736		if (!(flags & MSG_PEEK)) {
1737			sk_eat_skb(sk, skb, copied_early);
1738			copied_early = 0;
1739		}
1740		break;
1741	} while (len > 0);
1742
1743	if (user_recv) {
1744		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1745			int chunk;
1746
1747			tp->ucopy.len = copied > 0 ? len : 0;
1748
1749			tcp_prequeue_process(sk);
1750
1751			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1752				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1753				len -= chunk;
1754				copied += chunk;
1755			}
1756		}
1757
1758		tp->ucopy.task = NULL;
1759		tp->ucopy.len = 0;
1760	}
1761
1762#ifdef CONFIG_NET_DMA
1763	tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
1764	tp->ucopy.dma_chan = NULL;
1765
1766	if (tp->ucopy.pinned_list) {
1767		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1768		tp->ucopy.pinned_list = NULL;
1769	}
1770#endif
1771
1772	/* According to UNIX98, msg_name/msg_namelen are ignored
1773	 * on connected socket. I was just happy when found this 8) --ANK
1774	 */
1775
1776	/* Clean up data we have read: This will do ACK frames. */
1777	tcp_cleanup_rbuf(sk, copied);
1778
1779	TCP_CHECK_TIMER(sk);
1780	release_sock(sk);
1781	return copied;
1782
1783out:
1784	TCP_CHECK_TIMER(sk);
1785	release_sock(sk);
1786	return err;
1787
1788recv_urg:
1789	err = tcp_recv_urg(sk, msg, len, flags);
1790	goto out;
1791}
1792EXPORT_SYMBOL(tcp_recvmsg);
1793
1794void tcp_set_state(struct sock *sk, int state)
1795{
1796	int oldstate = sk->sk_state;
1797
1798	switch (state) {
1799	case TCP_ESTABLISHED:
1800		if (oldstate != TCP_ESTABLISHED)
1801			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1802		break;
1803
1804	case TCP_CLOSE:
1805		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1806			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1807
1808		sk->sk_prot->unhash(sk);
1809		if (inet_csk(sk)->icsk_bind_hash &&
1810		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1811			inet_put_port(sk);
1812		/* fall through */
1813	default:
1814		if (oldstate == TCP_ESTABLISHED)
1815			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1816	}
1817
1818	/* Change state AFTER socket is unhashed to avoid closed
1819	 * socket sitting in hash tables.
1820	 */
1821	sk->sk_state = state;
1822
1823#ifdef STATE_TRACE
1824	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1825#endif
1826}
1827EXPORT_SYMBOL_GPL(tcp_set_state);
1828
1829/*
1830 *	State processing on a close. This implements the state shift for
1831 *	sending our FIN frame. Note that we only send a FIN for some
1832 *	states. A shutdown() may have already sent the FIN, or we may be
1833 *	closed.
1834 */
1835
1836static const unsigned char new_state[16] = {
1837  /* current state:        new state:      action:	*/
1838  /* (Invalid)		*/ TCP_CLOSE,
1839  /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1840  /* TCP_SYN_SENT	*/ TCP_CLOSE,
1841  /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1842  /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
1843  /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
1844  /* TCP_TIME_WAIT	*/ TCP_CLOSE,
1845  /* TCP_CLOSE		*/ TCP_CLOSE,
1846  /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
1847  /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
1848  /* TCP_LISTEN		*/ TCP_CLOSE,
1849  /* TCP_CLOSING	*/ TCP_CLOSING,
1850};
1851
1852static int tcp_close_state(struct sock *sk)
1853{
1854	int next = (int)new_state[sk->sk_state];
1855	int ns = next & TCP_STATE_MASK;
1856
1857	tcp_set_state(sk, ns);
1858
1859	return next & TCP_ACTION_FIN;
1860}
1861
1862/*
1863 *	Shutdown the sending side of a connection. Much like close except
1864 *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1865 */
1866
1867void tcp_shutdown(struct sock *sk, int how)
1868{
1869	/*	We need to grab some memory, and put together a FIN,
1870	 *	and then put it into the queue to be sent.
1871	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1872	 */
1873	if (!(how & SEND_SHUTDOWN))
1874		return;
1875
1876	/* If we've already sent a FIN, or it's a closed state, skip this. */
1877	if ((1 << sk->sk_state) &
1878	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1879	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1880		/* Clear out any half completed packets.  FIN if needed. */
1881		if (tcp_close_state(sk))
1882			tcp_send_fin(sk);
1883	}
1884}
1885EXPORT_SYMBOL(tcp_shutdown);
1886
1887void tcp_close(struct sock *sk, long timeout)
1888{
1889	struct sk_buff *skb;
1890	int data_was_unread = 0;
1891	int state;
1892
1893	lock_sock(sk);
1894	sk->sk_shutdown = SHUTDOWN_MASK;
1895
1896	if (sk->sk_state == TCP_LISTEN) {
1897		tcp_set_state(sk, TCP_CLOSE);
1898
1899		/* Special case. */
1900		inet_csk_listen_stop(sk);
1901
1902		goto adjudge_to_death;
1903	}
1904
1905	/*  We need to flush the recv. buffs.  We do this only on the
1906	 *  descriptor close, not protocol-sourced closes, because the
1907	 *  reader process may not have drained the data yet!
1908	 */
1909	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1910		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1911			  tcp_hdr(skb)->fin;
1912		data_was_unread += len;
1913		__kfree_skb(skb);
1914	}
1915
1916	sk_mem_reclaim(sk);
1917
1918	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
1919	if (sk->sk_state == TCP_CLOSE)
1920		goto adjudge_to_death;
1921
1922	/* As outlined in RFC 2525, section 2.17, we send a RST here because
1923	 * data was lost. To witness the awful effects of the old behavior of
1924	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1925	 * GET in an FTP client, suspend the process, wait for the client to
1926	 * advertise a zero window, then kill -9 the FTP client, wheee...
1927	 * Note: timeout is always zero in such a case.
1928	 */
1929	if (data_was_unread) {
1930		/* Unread data was tossed, zap the connection. */
1931		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1932		tcp_set_state(sk, TCP_CLOSE);
1933		tcp_send_active_reset(sk, sk->sk_allocation);
1934	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1935		/* Check zero linger _after_ checking for unread data. */
1936		sk->sk_prot->disconnect(sk, 0);
1937		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
1938	} else if (tcp_close_state(sk)) {
1939		/* We FIN if the application ate all the data before
1940		 * zapping the connection.
1941		 */
1942
1943		/* RED-PEN. Formally speaking, we have broken TCP state
1944		 * machine. State transitions:
1945		 *
1946		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1947		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
1948		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1949		 *
1950		 * are legal only when FIN has been sent (i.e. in window),
1951		 * rather than queued out of window. Purists blame.
1952		 *
1953		 * F.e. "RFC state" is ESTABLISHED,
1954		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1955		 *
1956		 * The visible declinations are that sometimes
1957		 * we enter time-wait state, when it is not required really
1958		 * (harmless), do not send active resets, when they are
1959		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1960		 * they look as CLOSING or LAST_ACK for Linux)
1961		 * Probably, I missed some more holelets.
1962		 * 						--ANK
1963		 */
1964		tcp_send_fin(sk);
1965	}
1966
1967	sk_stream_wait_close(sk, timeout);
1968
1969adjudge_to_death:
1970	state = sk->sk_state;
1971	sock_hold(sk);
1972	sock_orphan(sk);
1973
1974	/* It is the last release_sock in its life. It will remove backlog. */
1975	release_sock(sk);
1976
1977
1978	/* Now socket is owned by kernel and we acquire BH lock
1979	   to finish close. No need to check for user refs.
1980	 */
1981	local_bh_disable();
1982	bh_lock_sock(sk);
1983	WARN_ON(sock_owned_by_user(sk));
1984
1985	percpu_counter_inc(sk->sk_prot->orphan_count);
1986
1987	/* Have we already been destroyed by a softirq or backlog? */
1988	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1989		goto out;
1990
1991	/*	This is a (useful) BSD violating of the RFC. There is a
1992	 *	problem with TCP as specified in that the other end could
1993	 *	keep a socket open forever with no application left this end.
1994	 *	We use a 3 minute timeout (about the same as BSD) then kill
1995	 *	our end. If they send after that then tough - BUT: long enough
1996	 *	that we won't make the old 4*rto = almost no time - whoops
1997	 *	reset mistake.
1998	 *
1999	 *	Nope, it was not mistake. It is really desired behaviour
2000	 *	f.e. on http servers, when such sockets are useless, but
2001	 *	consume significant resources. Let's do it with special
2002	 *	linger2	option.					--ANK
2003	 */
2004
2005	if (sk->sk_state == TCP_FIN_WAIT2) {
2006		struct tcp_sock *tp = tcp_sk(sk);
2007		if (tp->linger2 < 0) {
2008			tcp_set_state(sk, TCP_CLOSE);
2009			tcp_send_active_reset(sk, GFP_ATOMIC);
2010			NET_INC_STATS_BH(sock_net(sk),
2011					LINUX_MIB_TCPABORTONLINGER);
2012		} else {
2013			const int tmo = tcp_fin_time(sk);
2014
2015			if (tmo > TCP_TIMEWAIT_LEN) {
2016				inet_csk_reset_keepalive_timer(sk,
2017						tmo - TCP_TIMEWAIT_LEN);
2018			} else {
2019				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2020				goto out;
2021			}
2022		}
2023	}
2024	if (sk->sk_state != TCP_CLOSE) {
2025		sk_mem_reclaim(sk);
2026		if (tcp_too_many_orphans(sk, 0)) {
2027			if (net_ratelimit())
2028				printk(KERN_INFO "TCP: too many of orphaned "
2029				       "sockets\n");
2030			tcp_set_state(sk, TCP_CLOSE);
2031			tcp_send_active_reset(sk, GFP_ATOMIC);
2032			NET_INC_STATS_BH(sock_net(sk),
2033					LINUX_MIB_TCPABORTONMEMORY);
2034		}
2035	}
2036
2037	if (sk->sk_state == TCP_CLOSE)
2038		inet_csk_destroy_sock(sk);
2039	/* Otherwise, socket is reprieved until protocol close. */
2040
2041out:
2042	bh_unlock_sock(sk);
2043	local_bh_enable();
2044	sock_put(sk);
2045}
2046EXPORT_SYMBOL(tcp_close);
2047
2048/* These states need RST on ABORT according to RFC793 */
2049
2050static inline int tcp_need_reset(int state)
2051{
2052	return (1 << state) &
2053	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2054		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2055}
2056
2057int tcp_disconnect(struct sock *sk, int flags)
2058{
2059	struct inet_sock *inet = inet_sk(sk);
2060	struct inet_connection_sock *icsk = inet_csk(sk);
2061	struct tcp_sock *tp = tcp_sk(sk);
2062	int err = 0;
2063	int old_state = sk->sk_state;
2064
2065	if (old_state != TCP_CLOSE)
2066		tcp_set_state(sk, TCP_CLOSE);
2067
2068	/* ABORT function of RFC793 */
2069	if (old_state == TCP_LISTEN) {
2070		inet_csk_listen_stop(sk);
2071	} else if (tcp_need_reset(old_state) ||
2072		   (tp->snd_nxt != tp->write_seq &&
2073		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2074		/* The last check adjusts for discrepancy of Linux wrt. RFC
2075		 * states
2076		 */
2077		tcp_send_active_reset(sk, gfp_any());
2078		sk->sk_err = ECONNRESET;
2079	} else if (old_state == TCP_SYN_SENT)
2080		sk->sk_err = ECONNRESET;
2081
2082	tcp_clear_xmit_timers(sk);
2083	__skb_queue_purge(&sk->sk_receive_queue);
2084	tcp_write_queue_purge(sk);
2085	__skb_queue_purge(&tp->out_of_order_queue);
2086#ifdef CONFIG_NET_DMA
2087	__skb_queue_purge(&sk->sk_async_wait_queue);
2088#endif
2089
2090	inet->inet_dport = 0;
2091
2092	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2093		inet_reset_saddr(sk);
2094
2095	sk->sk_shutdown = 0;
2096	sock_reset_flag(sk, SOCK_DONE);
2097	tp->srtt = 0;
2098	if ((tp->write_seq += tp->max_window + 2) == 0)
2099		tp->write_seq = 1;
2100	icsk->icsk_backoff = 0;
2101	tp->snd_cwnd = 2;
2102	icsk->icsk_probes_out = 0;
2103	tp->packets_out = 0;
2104	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2105	tp->snd_cwnd_cnt = 0;
2106	tp->bytes_acked = 0;
2107	tp->window_clamp = 0;
2108	tcp_set_ca_state(sk, TCP_CA_Open);
2109	tcp_clear_retrans(tp);
2110	inet_csk_delack_init(sk);
2111	tcp_init_send_head(sk);
2112	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2113	__sk_dst_reset(sk);
2114
2115	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2116
2117	sk->sk_error_report(sk);
2118	return err;
2119}
2120EXPORT_SYMBOL(tcp_disconnect);
2121
2122/*
2123 *	Socket option code for TCP.
2124 */
2125static int do_tcp_setsockopt(struct sock *sk, int level,
2126		int optname, char __user *optval, unsigned int optlen)
2127{
2128	struct tcp_sock *tp = tcp_sk(sk);
2129	struct inet_connection_sock *icsk = inet_csk(sk);
2130	int val;
2131	int err = 0;
2132
2133	/* These are data/string values, all the others are ints */
2134	switch (optname) {
2135	case TCP_CONGESTION: {
2136		char name[TCP_CA_NAME_MAX];
2137
2138		if (optlen < 1)
2139			return -EINVAL;
2140
2141		val = strncpy_from_user(name, optval,
2142					min_t(long, TCP_CA_NAME_MAX-1, optlen));
2143		if (val < 0)
2144			return -EFAULT;
2145		name[val] = 0;
2146
2147		lock_sock(sk);
2148		err = tcp_set_congestion_control(sk, name);
2149		release_sock(sk);
2150		return err;
2151	}
2152	case TCP_COOKIE_TRANSACTIONS: {
2153		struct tcp_cookie_transactions ctd;
2154		struct tcp_cookie_values *cvp = NULL;
2155
2156		if (sizeof(ctd) > optlen)
2157			return -EINVAL;
2158		if (copy_from_user(&ctd, optval, sizeof(ctd)))
2159			return -EFAULT;
2160
2161		if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
2162		    ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
2163			return -EINVAL;
2164
2165		if (ctd.tcpct_cookie_desired == 0) {
2166			/* default to global value */
2167		} else if ((0x1 & ctd.tcpct_cookie_desired) ||
2168			   ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
2169			   ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
2170			return -EINVAL;
2171		}
2172
2173		if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
2174			/* Supercedes all other values */
2175			lock_sock(sk);
2176			if (tp->cookie_values != NULL) {
2177				kref_put(&tp->cookie_values->kref,
2178					 tcp_cookie_values_release);
2179				tp->cookie_values = NULL;
2180			}
2181			tp->rx_opt.cookie_in_always = 0; /* false */
2182			tp->rx_opt.cookie_out_never = 1; /* true */
2183			release_sock(sk);
2184			return err;
2185		}
2186
2187		/* Allocate ancillary memory before locking.
2188		 */
2189		if (ctd.tcpct_used > 0 ||
2190		    (tp->cookie_values == NULL &&
2191		     (sysctl_tcp_cookie_size > 0 ||
2192		      ctd.tcpct_cookie_desired > 0 ||
2193		      ctd.tcpct_s_data_desired > 0))) {
2194			cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
2195				      GFP_KERNEL);
2196			if (cvp == NULL)
2197				return -ENOMEM;
2198
2199			kref_init(&cvp->kref);
2200		}
2201		lock_sock(sk);
2202		tp->rx_opt.cookie_in_always =
2203			(TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
2204		tp->rx_opt.cookie_out_never = 0; /* false */
2205
2206		if (tp->cookie_values != NULL) {
2207			if (cvp != NULL) {
2208				/* Changed values are recorded by a changed
2209				 * pointer, ensuring the cookie will differ,
2210				 * without separately hashing each value later.
2211				 */
2212				kref_put(&tp->cookie_values->kref,
2213					 tcp_cookie_values_release);
2214			} else {
2215				cvp = tp->cookie_values;
2216			}
2217		}
2218
2219		if (cvp != NULL) {
2220			cvp->cookie_desired = ctd.tcpct_cookie_desired;
2221
2222			if (ctd.tcpct_used > 0) {
2223				memcpy(cvp->s_data_payload, ctd.tcpct_value,
2224				       ctd.tcpct_used);
2225				cvp->s_data_desired = ctd.tcpct_used;
2226				cvp->s_data_constant = 1; /* true */
2227			} else {
2228				/* No constant payload data. */
2229				cvp->s_data_desired = ctd.tcpct_s_data_desired;
2230				cvp->s_data_constant = 0; /* false */
2231			}
2232
2233			tp->cookie_values = cvp;
2234		}
2235		release_sock(sk);
2236		return err;
2237	}
2238	default:
2239		/* fallthru */
2240		break;
2241	}
2242
2243	if (optlen < sizeof(int))
2244		return -EINVAL;
2245
2246	if (get_user(val, (int __user *)optval))
2247		return -EFAULT;
2248
2249	lock_sock(sk);
2250
2251	switch (optname) {
2252	case TCP_MAXSEG:
2253		/* Values greater than interface MTU won't take effect. However
2254		 * at the point when this call is done we typically don't yet
2255		 * know which interface is going to be used */
2256		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2257			err = -EINVAL;
2258			break;
2259		}
2260		tp->rx_opt.user_mss = val;
2261		break;
2262
2263	case TCP_NODELAY:
2264		if (val) {
2265			/* TCP_NODELAY is weaker than TCP_CORK, so that
2266			 * this option on corked socket is remembered, but
2267			 * it is not activated until cork is cleared.
2268			 *
2269			 * However, when TCP_NODELAY is set we make
2270			 * an explicit push, which overrides even TCP_CORK
2271			 * for currently queued segments.
2272			 */
2273			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2274			tcp_push_pending_frames(sk);
2275		} else {
2276			tp->nonagle &= ~TCP_NAGLE_OFF;
2277		}
2278		break;
2279
2280	case TCP_THIN_LINEAR_TIMEOUTS:
2281		if (val < 0 || val > 1)
2282			err = -EINVAL;
2283		else
2284			tp->thin_lto = val;
2285		break;
2286
2287	case TCP_THIN_DUPACK:
2288		if (val < 0 || val > 1)
2289			err = -EINVAL;
2290		else
2291			tp->thin_dupack = val;
2292		break;
2293
2294	case TCP_CORK:
2295		/* When set indicates to always queue non-full frames.
2296		 * Later the user clears this option and we transmit
2297		 * any pending partial frames in the queue.  This is
2298		 * meant to be used alongside sendfile() to get properly
2299		 * filled frames when the user (for example) must write
2300		 * out headers with a write() call first and then use
2301		 * sendfile to send out the data parts.
2302		 *
2303		 * TCP_CORK can be set together with TCP_NODELAY and it is
2304		 * stronger than TCP_NODELAY.
2305		 */
2306		if (val) {
2307			tp->nonagle |= TCP_NAGLE_CORK;
2308		} else {
2309			tp->nonagle &= ~TCP_NAGLE_CORK;
2310			if (tp->nonagle&TCP_NAGLE_OFF)
2311				tp->nonagle |= TCP_NAGLE_PUSH;
2312			tcp_push_pending_frames(sk);
2313		}
2314		break;
2315
2316	case TCP_KEEPIDLE:
2317		if (val < 1 || val > MAX_TCP_KEEPIDLE)
2318			err = -EINVAL;
2319		else {
2320			tp->keepalive_time = val * HZ;
2321			if (sock_flag(sk, SOCK_KEEPOPEN) &&
2322			    !((1 << sk->sk_state) &
2323			      (TCPF_CLOSE | TCPF_LISTEN))) {
2324				u32 elapsed = keepalive_time_elapsed(tp);
2325				if (tp->keepalive_time > elapsed)
2326					elapsed = tp->keepalive_time - elapsed;
2327				else
2328					elapsed = 0;
2329				inet_csk_reset_keepalive_timer(sk, elapsed);
2330			}
2331		}
2332		break;
2333	case TCP_KEEPINTVL:
2334		if (val < 1 || val > MAX_TCP_KEEPINTVL)
2335			err = -EINVAL;
2336		else
2337			tp->keepalive_intvl = val * HZ;
2338		break;
2339	case TCP_KEEPCNT:
2340		if (val < 1 || val > MAX_TCP_KEEPCNT)
2341			err = -EINVAL;
2342		else
2343			tp->keepalive_probes = val;
2344		break;
2345	case TCP_SYNCNT:
2346		if (val < 1 || val > MAX_TCP_SYNCNT)
2347			err = -EINVAL;
2348		else
2349			icsk->icsk_syn_retries = val;
2350		break;
2351
2352	case TCP_LINGER2:
2353		if (val < 0)
2354			tp->linger2 = -1;
2355		else if (val > sysctl_tcp_fin_timeout / HZ)
2356			tp->linger2 = 0;
2357		else
2358			tp->linger2 = val * HZ;
2359		break;
2360
2361	case TCP_DEFER_ACCEPT:
2362		/* Translate value in seconds to number of retransmits */
2363		icsk->icsk_accept_queue.rskq_defer_accept =
2364			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2365					TCP_RTO_MAX / HZ);
2366		break;
2367
2368	case TCP_WINDOW_CLAMP:
2369		if (!val) {
2370			if (sk->sk_state != TCP_CLOSE) {
2371				err = -EINVAL;
2372				break;
2373			}
2374			tp->window_clamp = 0;
2375		} else
2376			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2377						SOCK_MIN_RCVBUF / 2 : val;
2378		break;
2379
2380	case TCP_QUICKACK:
2381		if (!val) {
2382			icsk->icsk_ack.pingpong = 1;
2383		} else {
2384			icsk->icsk_ack.pingpong = 0;
2385			if ((1 << sk->sk_state) &
2386			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2387			    inet_csk_ack_scheduled(sk)) {
2388				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2389				tcp_cleanup_rbuf(sk, 1);
2390				if (!(val & 1))
2391					icsk->icsk_ack.pingpong = 1;
2392			}
2393		}
2394		break;
2395
2396#ifdef CONFIG_TCP_MD5SIG
2397	case TCP_MD5SIG:
2398		/* Read the IP->Key mappings from userspace */
2399		err = tp->af_specific->md5_parse(sk, optval, optlen);
2400		break;
2401#endif
2402
2403	default:
2404		err = -ENOPROTOOPT;
2405		break;
2406	}
2407
2408	release_sock(sk);
2409	return err;
2410}
2411
2412int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2413		   unsigned int optlen)
2414{
2415	struct inet_connection_sock *icsk = inet_csk(sk);
2416
2417	if (level != SOL_TCP)
2418		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2419						     optval, optlen);
2420	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2421}
2422EXPORT_SYMBOL(tcp_setsockopt);
2423
2424#ifdef CONFIG_COMPAT
2425int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2426			  char __user *optval, unsigned int optlen)
2427{
2428	if (level != SOL_TCP)
2429		return inet_csk_compat_setsockopt(sk, level, optname,
2430						  optval, optlen);
2431	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2432}
2433EXPORT_SYMBOL(compat_tcp_setsockopt);
2434#endif
2435
2436/* Return information about state of tcp endpoint in API format. */
2437void tcp_get_info(struct sock *sk, struct tcp_info *info)
2438{
2439	struct tcp_sock *tp = tcp_sk(sk);
2440	const struct inet_connection_sock *icsk = inet_csk(sk);
2441	u32 now = tcp_time_stamp;
2442
2443	memset(info, 0, sizeof(*info));
2444
2445	info->tcpi_state = sk->sk_state;
2446	info->tcpi_ca_state = icsk->icsk_ca_state;
2447	info->tcpi_retransmits = icsk->icsk_retransmits;
2448	info->tcpi_probes = icsk->icsk_probes_out;
2449	info->tcpi_backoff = icsk->icsk_backoff;
2450
2451	if (tp->rx_opt.tstamp_ok)
2452		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2453	if (tcp_is_sack(tp))
2454		info->tcpi_options |= TCPI_OPT_SACK;
2455	if (tp->rx_opt.wscale_ok) {
2456		info->tcpi_options |= TCPI_OPT_WSCALE;
2457		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2458		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2459	}
2460
2461	if (tp->ecn_flags&TCP_ECN_OK)
2462		info->tcpi_options |= TCPI_OPT_ECN;
2463
2464	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2465	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2466	info->tcpi_snd_mss = tp->mss_cache;
2467	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2468
2469	if (sk->sk_state == TCP_LISTEN) {
2470		info->tcpi_unacked = sk->sk_ack_backlog;
2471		info->tcpi_sacked = sk->sk_max_ack_backlog;
2472	} else {
2473		info->tcpi_unacked = tp->packets_out;
2474		info->tcpi_sacked = tp->sacked_out;
2475	}
2476	info->tcpi_lost = tp->lost_out;
2477	info->tcpi_retrans = tp->retrans_out;
2478	info->tcpi_fackets = tp->fackets_out;
2479
2480	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2481	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2482	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2483
2484	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2485	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2486	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2487	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2488	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2489	info->tcpi_snd_cwnd = tp->snd_cwnd;
2490	info->tcpi_advmss = tp->advmss;
2491	info->tcpi_reordering = tp->reordering;
2492
2493	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2494	info->tcpi_rcv_space = tp->rcvq_space.space;
2495
2496	info->tcpi_total_retrans = tp->total_retrans;
2497}
2498EXPORT_SYMBOL_GPL(tcp_get_info);
2499
2500static int do_tcp_getsockopt(struct sock *sk, int level,
2501		int optname, char __user *optval, int __user *optlen)
2502{
2503	struct inet_connection_sock *icsk = inet_csk(sk);
2504	struct tcp_sock *tp = tcp_sk(sk);
2505	int val, len;
2506
2507	if (get_user(len, optlen))
2508		return -EFAULT;
2509
2510	len = min_t(unsigned int, len, sizeof(int));
2511
2512	if (len < 0)
2513		return -EINVAL;
2514
2515	switch (optname) {
2516	case TCP_MAXSEG:
2517		val = tp->mss_cache;
2518		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2519			val = tp->rx_opt.user_mss;
2520		break;
2521	case TCP_NODELAY:
2522		val = !!(tp->nonagle&TCP_NAGLE_OFF);
2523		break;
2524	case TCP_CORK:
2525		val = !!(tp->nonagle&TCP_NAGLE_CORK);
2526		break;
2527	case TCP_KEEPIDLE:
2528		val = keepalive_time_when(tp) / HZ;
2529		break;
2530	case TCP_KEEPINTVL:
2531		val = keepalive_intvl_when(tp) / HZ;
2532		break;
2533	case TCP_KEEPCNT:
2534		val = keepalive_probes(tp);
2535		break;
2536	case TCP_SYNCNT:
2537		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2538		break;
2539	case TCP_LINGER2:
2540		val = tp->linger2;
2541		if (val >= 0)
2542			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2543		break;
2544	case TCP_DEFER_ACCEPT:
2545		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2546				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2547		break;
2548	case TCP_WINDOW_CLAMP:
2549		val = tp->window_clamp;
2550		break;
2551	case TCP_INFO: {
2552		struct tcp_info info;
2553
2554		if (get_user(len, optlen))
2555			return -EFAULT;
2556
2557		tcp_get_info(sk, &info);
2558
2559		len = min_t(unsigned int, len, sizeof(info));
2560		if (put_user(len, optlen))
2561			return -EFAULT;
2562		if (copy_to_user(optval, &info, len))
2563			return -EFAULT;
2564		return 0;
2565	}
2566	case TCP_QUICKACK:
2567		val = !icsk->icsk_ack.pingpong;
2568		break;
2569
2570	case TCP_CONGESTION:
2571		if (get_user(len, optlen))
2572			return -EFAULT;
2573		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2574		if (put_user(len, optlen))
2575			return -EFAULT;
2576		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2577			return -EFAULT;
2578		return 0;
2579
2580	case TCP_COOKIE_TRANSACTIONS: {
2581		struct tcp_cookie_transactions ctd;
2582		struct tcp_cookie_values *cvp = tp->cookie_values;
2583
2584		if (get_user(len, optlen))
2585			return -EFAULT;
2586		if (len < sizeof(ctd))
2587			return -EINVAL;
2588
2589		memset(&ctd, 0, sizeof(ctd));
2590		ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
2591				   TCP_COOKIE_IN_ALWAYS : 0)
2592				| (tp->rx_opt.cookie_out_never ?
2593				   TCP_COOKIE_OUT_NEVER : 0);
2594
2595		if (cvp != NULL) {
2596			ctd.tcpct_flags |= (cvp->s_data_in ?
2597					    TCP_S_DATA_IN : 0)
2598					 | (cvp->s_data_out ?
2599					    TCP_S_DATA_OUT : 0);
2600
2601			ctd.tcpct_cookie_desired = cvp->cookie_desired;
2602			ctd.tcpct_s_data_desired = cvp->s_data_desired;
2603
2604			memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
2605			       cvp->cookie_pair_size);
2606			ctd.tcpct_used = cvp->cookie_pair_size;
2607		}
2608
2609		if (put_user(sizeof(ctd), optlen))
2610			return -EFAULT;
2611		if (copy_to_user(optval, &ctd, sizeof(ctd)))
2612			return -EFAULT;
2613		return 0;
2614	}
2615	case TCP_THIN_LINEAR_TIMEOUTS:
2616		val = tp->thin_lto;
2617		break;
2618	case TCP_THIN_DUPACK:
2619		val = tp->thin_dupack;
2620		break;
2621	default:
2622		return -ENOPROTOOPT;
2623	}
2624
2625	if (put_user(len, optlen))
2626		return -EFAULT;
2627	if (copy_to_user(optval, &val, len))
2628		return -EFAULT;
2629	return 0;
2630}
2631
2632int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2633		   int __user *optlen)
2634{
2635	struct inet_connection_sock *icsk = inet_csk(sk);
2636
2637	if (level != SOL_TCP)
2638		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2639						     optval, optlen);
2640	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2641}
2642EXPORT_SYMBOL(tcp_getsockopt);
2643
2644#ifdef CONFIG_COMPAT
2645int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2646			  char __user *optval, int __user *optlen)
2647{
2648	if (level != SOL_TCP)
2649		return inet_csk_compat_getsockopt(sk, level, optname,
2650						  optval, optlen);
2651	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2652}
2653EXPORT_SYMBOL(compat_tcp_getsockopt);
2654#endif
2655
2656struct sk_buff BCMFASTPATH_HOST *tcp_tso_segment(struct sk_buff *skb, int features)
2657{
2658	struct sk_buff *segs = ERR_PTR(-EINVAL);
2659	struct tcphdr *th;
2660	unsigned thlen;
2661	unsigned int seq;
2662	__be32 delta;
2663	unsigned int oldlen;
2664	unsigned int mss;
2665
2666	if (!pskb_may_pull(skb, sizeof(*th)))
2667		goto out;
2668
2669	th = tcp_hdr(skb);
2670	thlen = th->doff * 4;
2671	if (thlen < sizeof(*th))
2672		goto out;
2673
2674	if (!pskb_may_pull(skb, thlen))
2675		goto out;
2676
2677	oldlen = (u16)~skb->len;
2678	__skb_pull(skb, thlen);
2679
2680	mss = skb_shinfo(skb)->gso_size;
2681	if (unlikely(skb->len <= mss))
2682		goto out;
2683
2684	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2685		/* Packet is from an untrusted source, reset gso_segs. */
2686		int type = skb_shinfo(skb)->gso_type;
2687
2688		if (unlikely(type &
2689			     ~(SKB_GSO_TCPV4 |
2690			       SKB_GSO_DODGY |
2691			       SKB_GSO_TCP_ECN |
2692			       SKB_GSO_TCPV6 |
2693			       0) ||
2694			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2695			goto out;
2696
2697		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2698
2699		segs = NULL;
2700		goto out;
2701	}
2702
2703	/*
2704	 * For normal optimized packet handling, it calls skb_tcp_segmented().
2705	 * However, packets marked for Netfilter needs to be segmented using
2706	 * the old method since the packets are passed up to the application
2707	 * layer.
2708	 */
2709	if (!skb->tcpf_nf && (skb->protocol != htons(ETH_P_IPV6))) {
2710		return skb_tcp_segment(skb, features, oldlen, thlen);
2711	}
2712
2713	/* Old method */
2714	skb->tcpf_nf = 0;
2715	segs = skb_segment(skb, features);
2716	if (IS_ERR(segs))
2717		goto out;
2718
2719	delta = htonl(oldlen + (thlen + mss));
2720
2721	skb = segs;
2722	th = tcp_hdr(skb);
2723	seq = ntohl(th->seq);
2724
2725	do {
2726		th->fin = th->psh = 0;
2727
2728		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2729				       (__force u32)delta));
2730		if (skb->ip_summed != CHECKSUM_PARTIAL)
2731			th->check =
2732			     csum_fold(csum_partial(skb_transport_header(skb),
2733						    thlen, skb->csum));
2734
2735		seq += mss;
2736		skb = skb->next;
2737		th = tcp_hdr(skb);
2738
2739		th->seq = htonl(seq);
2740		th->cwr = 0;
2741	} while (skb->next);
2742
2743	delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2744		      skb->data_len);
2745	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2746				(__force u32)delta));
2747	if (skb->ip_summed != CHECKSUM_PARTIAL)
2748		th->check = csum_fold(csum_partial(skb_transport_header(skb),
2749						   thlen, skb->csum));
2750
2751out:
2752	return segs;
2753}
2754EXPORT_SYMBOL(tcp_tso_segment);
2755
2756struct sk_buff ** BCMFASTPATH_HOST tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2757{
2758	struct sk_buff **pp = NULL;
2759	struct sk_buff *p;
2760	struct tcphdr *th;
2761	struct tcphdr *th2;
2762	unsigned int len;
2763	unsigned int thlen;
2764	__be32 flags;
2765	unsigned int mss = 1;
2766	unsigned int hlen;
2767	unsigned int off;
2768	int flush = 1;
2769	int i;
2770
2771	off = skb_gro_offset(skb);
2772	hlen = off + sizeof(*th);
2773	th = skb_gro_header_fast(skb, off);
2774	if (skb_gro_header_hard(skb, hlen)) {
2775		th = skb_gro_header_slow(skb, hlen, off);
2776		if (unlikely(!th))
2777			goto out;
2778	}
2779
2780	thlen = th->doff * 4;
2781	if (thlen < sizeof(*th))
2782		goto out;
2783
2784	hlen = off + thlen;
2785	if (skb_gro_header_hard(skb, hlen)) {
2786		th = skb_gro_header_slow(skb, hlen, off);
2787		if (unlikely(!th))
2788			goto out;
2789	}
2790
2791	skb_gro_pull(skb, thlen);
2792
2793	len = skb_gro_len(skb);
2794	flags = tcp_flag_word(th);
2795
2796	for (; (p = *head); head = &p->next) {
2797		if (!NAPI_GRO_CB(p)->same_flow)
2798			continue;
2799
2800		th2 = tcp_hdr(p);
2801
2802		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
2803			NAPI_GRO_CB(p)->same_flow = 0;
2804			continue;
2805		}
2806
2807		goto found;
2808	}
2809
2810	goto out_check_final;
2811
2812found:
2813	flush = NAPI_GRO_CB(p)->flush;
2814	flush |= (__force int)(flags & TCP_FLAG_CWR);
2815	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
2816		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
2817	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2818	for (i = sizeof(*th); i < thlen; i += 4)
2819		flush |= *(u32 *)((u8 *)th + i) ^
2820			 *(u32 *)((u8 *)th2 + i);
2821
2822	mss = skb_shinfo(p)->gso_size;
2823
2824	flush |= (len - 1) >= mss;
2825	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2826
2827	if (flush || skb_gro_receive(head, skb)) {
2828		mss = 1;
2829		goto out_check_final;
2830	}
2831
2832	p = *head;
2833	th2 = tcp_hdr(p);
2834	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2835
2836out_check_final:
2837	flush = len < mss;
2838	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
2839					TCP_FLAG_RST | TCP_FLAG_SYN |
2840					TCP_FLAG_FIN));
2841
2842	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2843		pp = head;
2844
2845out:
2846	NAPI_GRO_CB(skb)->flush |= flush;
2847
2848	return pp;
2849}
2850EXPORT_SYMBOL(tcp_gro_receive);
2851
2852int BCMFASTPATH_HOST tcp_gro_complete(struct sk_buff *skb)
2853{
2854	struct tcphdr *th = tcp_hdr(skb);
2855
2856	skb->csum_start = skb_transport_header(skb) - skb->head;
2857	skb->csum_offset = offsetof(struct tcphdr, check);
2858	skb->ip_summed = CHECKSUM_PARTIAL;
2859
2860	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2861
2862	if (th->cwr)
2863		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2864
2865	return 0;
2866}
2867EXPORT_SYMBOL(tcp_gro_complete);
2868
2869#ifdef CONFIG_TCP_MD5SIG
2870static unsigned long tcp_md5sig_users;
2871static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
2872static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2873
2874static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2875{
2876	int cpu;
2877	for_each_possible_cpu(cpu) {
2878		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2879		if (p) {
2880			if (p->md5_desc.tfm)
2881				crypto_free_hash(p->md5_desc.tfm);
2882			kfree(p);
2883		}
2884	}
2885	free_percpu(pool);
2886}
2887
2888void tcp_free_md5sig_pool(void)
2889{
2890	struct tcp_md5sig_pool * __percpu *pool = NULL;
2891
2892	spin_lock_bh(&tcp_md5sig_pool_lock);
2893	if (--tcp_md5sig_users == 0) {
2894		pool = tcp_md5sig_pool;
2895		tcp_md5sig_pool = NULL;
2896	}
2897	spin_unlock_bh(&tcp_md5sig_pool_lock);
2898	if (pool)
2899		__tcp_free_md5sig_pool(pool);
2900}
2901EXPORT_SYMBOL(tcp_free_md5sig_pool);
2902
2903static struct tcp_md5sig_pool * __percpu *
2904__tcp_alloc_md5sig_pool(struct sock *sk)
2905{
2906	int cpu;
2907	struct tcp_md5sig_pool * __percpu *pool;
2908
2909	pool = alloc_percpu(struct tcp_md5sig_pool *);
2910	if (!pool)
2911		return NULL;
2912
2913	for_each_possible_cpu(cpu) {
2914		struct tcp_md5sig_pool *p;
2915		struct crypto_hash *hash;
2916
2917		p = kzalloc(sizeof(*p), sk->sk_allocation);
2918		if (!p)
2919			goto out_free;
2920		*per_cpu_ptr(pool, cpu) = p;
2921
2922		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2923		if (!hash || IS_ERR(hash))
2924			goto out_free;
2925
2926		p->md5_desc.tfm = hash;
2927	}
2928	return pool;
2929out_free:
2930	__tcp_free_md5sig_pool(pool);
2931	return NULL;
2932}
2933
2934struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2935{
2936	struct tcp_md5sig_pool * __percpu *pool;
2937	int alloc = 0;
2938
2939retry:
2940	spin_lock_bh(&tcp_md5sig_pool_lock);
2941	pool = tcp_md5sig_pool;
2942	if (tcp_md5sig_users++ == 0) {
2943		alloc = 1;
2944		spin_unlock_bh(&tcp_md5sig_pool_lock);
2945	} else if (!pool) {
2946		tcp_md5sig_users--;
2947		spin_unlock_bh(&tcp_md5sig_pool_lock);
2948		cpu_relax();
2949		goto retry;
2950	} else
2951		spin_unlock_bh(&tcp_md5sig_pool_lock);
2952
2953	if (alloc) {
2954		/* we cannot hold spinlock here because this may sleep. */
2955		struct tcp_md5sig_pool * __percpu *p;
2956
2957		p = __tcp_alloc_md5sig_pool(sk);
2958		spin_lock_bh(&tcp_md5sig_pool_lock);
2959		if (!p) {
2960			tcp_md5sig_users--;
2961			spin_unlock_bh(&tcp_md5sig_pool_lock);
2962			return NULL;
2963		}
2964		pool = tcp_md5sig_pool;
2965		if (pool) {
2966			/* oops, it has already been assigned. */
2967			spin_unlock_bh(&tcp_md5sig_pool_lock);
2968			__tcp_free_md5sig_pool(p);
2969		} else {
2970			tcp_md5sig_pool = pool = p;
2971			spin_unlock_bh(&tcp_md5sig_pool_lock);
2972		}
2973	}
2974	return pool;
2975}
2976EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2977
2978
2979/**
2980 *	tcp_get_md5sig_pool - get md5sig_pool for this user
2981 *
2982 *	We use percpu structure, so if we succeed, we exit with preemption
2983 *	and BH disabled, to make sure another thread or softirq handling
2984 *	wont try to get same context.
2985 */
2986struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2987{
2988	struct tcp_md5sig_pool * __percpu *p;
2989
2990	local_bh_disable();
2991
2992	spin_lock(&tcp_md5sig_pool_lock);
2993	p = tcp_md5sig_pool;
2994	if (p)
2995		tcp_md5sig_users++;
2996	spin_unlock(&tcp_md5sig_pool_lock);
2997
2998	if (p)
2999		return *this_cpu_ptr(p);
3000
3001	local_bh_enable();
3002	return NULL;
3003}
3004EXPORT_SYMBOL(tcp_get_md5sig_pool);
3005
3006void tcp_put_md5sig_pool(void)
3007{
3008	local_bh_enable();
3009	tcp_free_md5sig_pool();
3010}
3011EXPORT_SYMBOL(tcp_put_md5sig_pool);
3012
3013int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3014			struct tcphdr *th)
3015{
3016	struct scatterlist sg;
3017	int err;
3018
3019	__sum16 old_checksum = th->check;
3020	th->check = 0;
3021	/* options aren't included in the hash */
3022	sg_init_one(&sg, th, sizeof(struct tcphdr));
3023	err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
3024	th->check = old_checksum;
3025	return err;
3026}
3027EXPORT_SYMBOL(tcp_md5_hash_header);
3028
3029int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3030			  struct sk_buff *skb, unsigned header_len)
3031{
3032	struct scatterlist sg;
3033	const struct tcphdr *tp = tcp_hdr(skb);
3034	struct hash_desc *desc = &hp->md5_desc;
3035	unsigned i;
3036	const unsigned head_data_len = skb_headlen(skb) > header_len ?
3037				       skb_headlen(skb) - header_len : 0;
3038	const struct skb_shared_info *shi = skb_shinfo(skb);
3039	struct sk_buff *frag_iter;
3040
3041	sg_init_table(&sg, 1);
3042
3043	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3044	if (crypto_hash_update(desc, &sg, head_data_len))
3045		return 1;
3046
3047	for (i = 0; i < shi->nr_frags; ++i) {
3048		const struct skb_frag_struct *f = &shi->frags[i];
3049		sg_set_page(&sg, f->page, f->size, f->page_offset);
3050		if (crypto_hash_update(desc, &sg, f->size))
3051			return 1;
3052	}
3053
3054	skb_walk_frags(skb, frag_iter)
3055		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3056			return 1;
3057
3058	return 0;
3059}
3060EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3061
3062int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
3063{
3064	struct scatterlist sg;
3065
3066	sg_init_one(&sg, key->key, key->keylen);
3067	return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3068}
3069EXPORT_SYMBOL(tcp_md5_hash_key);
3070
3071#endif
3072
3073/**
3074 * Each Responder maintains up to two secret values concurrently for
3075 * efficient secret rollover.  Each secret value has 4 states:
3076 *
3077 * Generating.  (tcp_secret_generating != tcp_secret_primary)
3078 *    Generates new Responder-Cookies, but not yet used for primary
3079 *    verification.  This is a short-term state, typically lasting only
3080 *    one round trip time (RTT).
3081 *
3082 * Primary.  (tcp_secret_generating == tcp_secret_primary)
3083 *    Used both for generation and primary verification.
3084 *
3085 * Retiring.  (tcp_secret_retiring != tcp_secret_secondary)
3086 *    Used for verification, until the first failure that can be
3087 *    verified by the newer Generating secret.  At that time, this
3088 *    cookie's state is changed to Secondary, and the Generating
3089 *    cookie's state is changed to Primary.  This is a short-term state,
3090 *    typically lasting only one round trip time (RTT).
3091 *
3092 * Secondary.  (tcp_secret_retiring == tcp_secret_secondary)
3093 *    Used for secondary verification, after primary verification
3094 *    failures.  This state lasts no more than twice the Maximum Segment
3095 *    Lifetime (2MSL).  Then, the secret is discarded.
3096 */
3097struct tcp_cookie_secret {
3098	/* The secret is divided into two parts.  The digest part is the
3099	 * equivalent of previously hashing a secret and saving the state,
3100	 * and serves as an initialization vector (IV).  The message part
3101	 * serves as the trailing secret.
3102	 */
3103	u32				secrets[COOKIE_WORKSPACE_WORDS];
3104	unsigned long			expires;
3105};
3106
3107#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3108#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3109#define TCP_SECRET_LIFE (HZ * 600)
3110
3111static struct tcp_cookie_secret tcp_secret_one;
3112static struct tcp_cookie_secret tcp_secret_two;
3113
3114/* Essentially a circular list, without dynamic allocation. */
3115static struct tcp_cookie_secret *tcp_secret_generating;
3116static struct tcp_cookie_secret *tcp_secret_primary;
3117static struct tcp_cookie_secret *tcp_secret_retiring;
3118static struct tcp_cookie_secret *tcp_secret_secondary;
3119
3120static DEFINE_SPINLOCK(tcp_secret_locker);
3121
3122/* Select a pseudo-random word in the cookie workspace.
3123 */
3124static inline u32 tcp_cookie_work(const u32 *ws, const int n)
3125{
3126	return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
3127}
3128
3129/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
3130 * Called in softirq context.
3131 * Returns: 0 for success.
3132 */
3133int tcp_cookie_generator(u32 *bakery)
3134{
3135	unsigned long jiffy = jiffies;
3136
3137	if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
3138		spin_lock_bh(&tcp_secret_locker);
3139		if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
3140			/* refreshed by another */
3141			memcpy(bakery,
3142			       &tcp_secret_generating->secrets[0],
3143			       COOKIE_WORKSPACE_WORDS);
3144		} else {
3145			/* still needs refreshing */
3146			get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
3147
3148			/* The first time, paranoia assumes that the
3149			 * randomization function isn't as strong.  But,
3150			 * this secret initialization is delayed until
3151			 * the last possible moment (packet arrival).
3152			 * Although that time is observable, it is
3153			 * unpredictably variable.  Mash in the most
3154			 * volatile clock bits available, and expire the
3155			 * secret extra quickly.
3156			 */
3157			if (unlikely(tcp_secret_primary->expires ==
3158				     tcp_secret_secondary->expires)) {
3159				struct timespec tv;
3160
3161				getnstimeofday(&tv);
3162				bakery[COOKIE_DIGEST_WORDS+0] ^=
3163					(u32)tv.tv_nsec;
3164
3165				tcp_secret_secondary->expires = jiffy
3166					+ TCP_SECRET_1MSL
3167					+ (0x0f & tcp_cookie_work(bakery, 0));
3168			} else {
3169				tcp_secret_secondary->expires = jiffy
3170					+ TCP_SECRET_LIFE
3171					+ (0xff & tcp_cookie_work(bakery, 1));
3172				tcp_secret_primary->expires = jiffy
3173					+ TCP_SECRET_2MSL
3174					+ (0x1f & tcp_cookie_work(bakery, 2));
3175			}
3176			memcpy(&tcp_secret_secondary->secrets[0],
3177			       bakery, COOKIE_WORKSPACE_WORDS);
3178
3179			rcu_assign_pointer(tcp_secret_generating,
3180					   tcp_secret_secondary);
3181			rcu_assign_pointer(tcp_secret_retiring,
3182					   tcp_secret_primary);
3183			/*
3184			 * Neither call_rcu() nor synchronize_rcu() needed.
3185			 * Retiring data is not freed.  It is replaced after
3186			 * further (locked) pointer updates, and a quiet time
3187			 * (minimum 1MSL, maximum LIFE - 2MSL).
3188			 */
3189		}
3190		spin_unlock_bh(&tcp_secret_locker);
3191	} else {
3192		rcu_read_lock_bh();
3193		memcpy(bakery,
3194		       &rcu_dereference(tcp_secret_generating)->secrets[0],
3195		       COOKIE_WORKSPACE_WORDS);
3196		rcu_read_unlock_bh();
3197	}
3198	return 0;
3199}
3200EXPORT_SYMBOL(tcp_cookie_generator);
3201
3202void tcp_done(struct sock *sk)
3203{
3204	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3205		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3206
3207	tcp_set_state(sk, TCP_CLOSE);
3208	tcp_clear_xmit_timers(sk);
3209
3210	sk->sk_shutdown = SHUTDOWN_MASK;
3211
3212	if (!sock_flag(sk, SOCK_DEAD))
3213		sk->sk_state_change(sk);
3214	else
3215		inet_csk_destroy_sock(sk);
3216}
3217EXPORT_SYMBOL_GPL(tcp_done);
3218
3219extern struct tcp_congestion_ops tcp_reno;
3220
3221static __initdata unsigned long thash_entries;
3222static int __init set_thash_entries(char *str)
3223{
3224	if (!str)
3225		return 0;
3226	thash_entries = simple_strtoul(str, &str, 0);
3227	return 1;
3228}
3229__setup("thash_entries=", set_thash_entries);
3230
3231void __init tcp_init(void)
3232{
3233	struct sk_buff *skb = NULL;
3234	unsigned long nr_pages, limit;
3235	int i, max_share, cnt;
3236	unsigned long jiffy = jiffies;
3237
3238	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3239
3240	percpu_counter_init(&tcp_sockets_allocated, 0);
3241	percpu_counter_init(&tcp_orphan_count, 0);
3242	tcp_hashinfo.bind_bucket_cachep =
3243		kmem_cache_create("tcp_bind_bucket",
3244				  sizeof(struct inet_bind_bucket), 0,
3245				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3246
3247	/* Size and allocate the main established and bind bucket
3248	 * hash tables.
3249	 *
3250	 * The methodology is similar to that of the buffer cache.
3251	 */
3252	tcp_hashinfo.ehash =
3253		alloc_large_system_hash("TCP established",
3254					sizeof(struct inet_ehash_bucket),
3255					thash_entries,
3256					(totalram_pages >= 128 * 1024) ?
3257					13 : 15,
3258					0,
3259					NULL,
3260					&tcp_hashinfo.ehash_mask,
3261					thash_entries ? 0 : 512 * 1024);
3262	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
3263		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3264		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
3265	}
3266	if (inet_ehash_locks_alloc(&tcp_hashinfo))
3267		panic("TCP: failed to alloc ehash_locks");
3268	tcp_hashinfo.bhash =
3269		alloc_large_system_hash("TCP bind",
3270					sizeof(struct inet_bind_hashbucket),
3271					tcp_hashinfo.ehash_mask + 1,
3272					(totalram_pages >= 128 * 1024) ?
3273					13 : 15,
3274					0,
3275					&tcp_hashinfo.bhash_size,
3276					NULL,
3277					64 * 1024);
3278	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
3279	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3280		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3281		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3282	}
3283
3284
3285	cnt = tcp_hashinfo.ehash_mask + 1;
3286
3287	tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3288	sysctl_tcp_max_orphans = cnt / 2;
3289	sysctl_max_syn_backlog = max(128, cnt / 256);
3290
3291	/* Set the pressure threshold to be a fraction of global memory that
3292	 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
3293	 * memory, with a floor of 128 pages.
3294	 */
3295	nr_pages = totalram_pages - totalhigh_pages;
3296	limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
3297	limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
3298	limit = max(limit, 128UL);
3299	sysctl_tcp_mem[0] = limit / 4 * 3;
3300	sysctl_tcp_mem[1] = limit;
3301	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3302
3303	/* Set per-socket limits to no more than 1/128 the pressure threshold */
3304	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
3305	max_share = min(4UL*1024*1024, limit);
3306
3307	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3308	sysctl_tcp_wmem[1] = 16*1024;
3309	sysctl_tcp_wmem[2] = max(64*1024, max_share);
3310
3311	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3312	sysctl_tcp_rmem[1] = 87380;
3313	sysctl_tcp_rmem[2] = max(87380, max_share);
3314
3315	printk(KERN_INFO "TCP: Hash tables configured "
3316	       "(established %u bind %u)\n",
3317	       tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3318
3319	tcp_register_congestion_control(&tcp_reno);
3320
3321	memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
3322	memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
3323	tcp_secret_one.expires = jiffy; /* past due */
3324	tcp_secret_two.expires = jiffy; /* past due */
3325	tcp_secret_generating = &tcp_secret_one;
3326	tcp_secret_primary = &tcp_secret_one;
3327	tcp_secret_retiring = &tcp_secret_two;
3328	tcp_secret_secondary = &tcp_secret_two;
3329}
3330