1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Definitions for the AF_INET socket handler.
7 *
8 * Version:	@(#)sock.h	1.0.4	05/13/93
9 *
10 * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13 *		Florian La Roche <flla@stud.uni-sb.de>
14 *
15 * Fixes:
16 *		Alan Cox	:	Volatiles in skbuff pointers. See
17 *					skbuff comments. May be overdone,
18 *					better to prove they can be removed
19 *					than the reverse.
20 *		Alan Cox	:	Added a zapped field for tcp to note
21 *					a socket is reset and must stay shut up
22 *		Alan Cox	:	New fields for options
23 *	Pauline Middelink	:	identd support
24 *		Alan Cox	:	Eliminate low level recv/recvfrom
25 *		David S. Miller	:	New socket lookup architecture.
26 *              Steve Whitehouse:       Default routines for sock_ops
27 *
28 *		This program is free software; you can redistribute it and/or
29 *		modify it under the terms of the GNU General Public License
30 *		as published by the Free Software Foundation; either version
31 *		2 of the License, or (at your option) any later version.
32 */
33#ifndef _SOCK_H
34#define _SOCK_H
35
36#include <linux/config.h>
37#include <linux/timer.h>
38#include <linux/cache.h>
39#include <linux/in.h>		/* struct sockaddr_in */
40
41#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
42#include <linux/in6.h>		/* struct sockaddr_in6 */
43#include <linux/ipv6.h>		/* dest_cache, inet6_options */
44#include <linux/icmpv6.h>
45#include <net/if_inet6.h>	/* struct ipv6_mc_socklist */
46#endif
47
48#if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
49#include <linux/icmp.h>
50#endif
51#include <linux/tcp.h>		/* struct tcphdr */
52
53#include <linux/netdevice.h>
54#include <linux/skbuff.h>	/* struct sk_buff */
55#include <net/protocol.h>		/* struct inet_protocol */
56#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
57#include <net/x25.h>
58#endif
59#if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE)
60#include <linux/if_wanpipe.h>
61#endif
62
63#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
64#include <net/ax25.h>
65#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
66#include <net/netrom.h>
67#endif
68#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
69#include <net/rose.h>
70#endif
71#endif
72
73#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE) \
74 || defined(CONFIG_PPPOL2TP) || defined(CONFIG_PPPOL2TP_MODULE) \
75 || defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
76#include <linux/if_pppox.h>
77#include <linux/ppp_channel.h>   /* struct ppp_channel */
78#endif
79
80#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
81#if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
82#include <net/spx.h>
83#else
84#include <net/ipx.h>
85#endif /* CONFIG_SPX */
86#endif /* CONFIG_IPX */
87
88#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
89#include <linux/atalk.h>
90#endif
91
92#if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
93#include <net/dn.h>
94#endif
95
96#if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
97#include <net/irda/irda.h>
98#endif
99
100#if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
101struct atm_vcc;
102#endif
103
104#ifdef CONFIG_FILTER
105#include <linux/filter.h>
106#endif
107
108#include <asm/atomic.h>
109#include <net/dst.h>
110
111
112/* The AF_UNIX specific socket options */
113struct unix_opt {
114	struct unix_address	*addr;
115	struct dentry *		dentry;
116	struct vfsmount *	mnt;
117	struct semaphore	readsem;
118	struct sock *		other;
119	struct sock **		list;
120	struct sock *		gc_tree;
121	atomic_t		inflight;
122	rwlock_t		lock;
123	wait_queue_head_t	peer_wait;
124};
125
126
127/* Once the IPX ncpd patches are in these are going into protinfo. */
128#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
129struct ipx_opt {
130	ipx_address		dest_addr;
131	ipx_interface		*intrfc;
132	unsigned short		port;
133#ifdef CONFIG_IPX_INTERN
134	unsigned char           node[IPX_NODE_LEN];
135#endif
136	unsigned short		type;
137/*
138 * To handle special ncp connection-handling sockets for mars_nwe,
139 * the connection number must be stored in the socket.
140 */
141	unsigned short		ipx_ncp_conn;
142};
143#endif
144
145#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
146struct ipv6_pinfo {
147	struct in6_addr 	saddr;
148	struct in6_addr 	rcv_saddr;
149	struct in6_addr		daddr;
150	struct in6_addr		*daddr_cache;
151
152	__u32			flow_label;
153	__u32			frag_size;
154	int			hop_limit;
155	int			mcast_hops;
156	int			mcast_oif;
157
158	/* pktoption flags */
159	union {
160		struct {
161			__u8	srcrt:2,
162			        rxinfo:1,
163				rxhlim:1,
164				hopopts:1,
165				dstopts:1,
166                                authhdr:1,
167                                rxflow:1;
168		} bits;
169		__u8		all;
170	} rxopt;
171
172	/* sockopt flags */
173	__u8			mc_loop:1,
174	                        recverr:1,
175	                        sndflow:1,
176	                        pmtudisc:2;
177
178	struct ipv6_mc_socklist	*ipv6_mc_list;
179	struct ipv6_fl_socklist *ipv6_fl_list;
180	__u32			dst_cookie;
181
182	struct ipv6_txoptions	*opt;
183	struct sk_buff		*pktoptions;
184};
185
186struct raw6_opt {
187	__u32			checksum;	/* perform checksum */
188	__u32			offset;		/* checksum offset  */
189
190	struct icmp6_filter	filter;
191};
192
193#endif /* IPV6 */
194
195#if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
196struct raw_opt {
197	struct icmp_filter	filter;
198};
199#endif
200
201#if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
202struct inet_opt
203{
204	int			ttl;			/* TTL setting */
205	int			tos;			/* TOS */
206	unsigned	   	cmsg_flags;
207	struct ip_options	*opt;
208	unsigned char		hdrincl;		/* Include headers ? */
209	__u8			mc_ttl;			/* Multicasting TTL */
210	__u8			mc_loop;		/* Loopback */
211	unsigned		recverr : 1,
212				freebind : 1;
213	__u16			id;			/* ID counter for DF pkts */
214	__u8			pmtudisc;
215	int			mc_index;		/* Multicast device index */
216	__u32			mc_addr;
217	struct ip_mc_socklist	*mc_list;		/* Group array */
218};
219#endif
220
221#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
222#ifndef PPPOE_OPT_STRUCTURE
223#define PPPOE_OPT_STRUCTURE
224struct pppoe_opt
225{
226	struct net_device      *dev;	  /* device associated with socket*/
227	struct pppoe_addr	pa;	  /* what this socket is bound to*/
228	struct sockaddr_pppox	relay;	  /* what socket data will be
229					     relayed to (PPPoE relaying) */
230};
231#endif /* PPPOE_OPT_STRUCTURE */
232#define pppoe_dev	proto.pppoe.dev
233#define pppoe_pa	proto.pppoe.pa
234#define pppoe_relay	proto.pppoe.relay
235#endif
236
237#if defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
238struct pptp_opt {
239	struct pptp_addr	src_addr;
240	struct pptp_addr	dst_addr;
241	__u32 ack_sent, ack_recv;
242	__u32 seq_sent, seq_recv;
243	__u32 first_seq;
244	int ppp_flags;
245};
246#endif
247
248#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE) \
249 || defined(CONFIG_PPPOL2TP) || defined(CONFIG_PPPOL2TP_MODULE) \
250 || defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
251struct pppox_opt
252{
253	struct ppp_channel	chan;
254	struct sock		*sk;
255	struct pppox_opt	*next;	  /* for hash table */
256	union {
257#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
258		struct pppoe_opt pppoe;
259#endif
260#if defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
261		struct pptp_opt  pptp;
262#endif
263	} proto;
264};
265
266struct pppox_sock {
267	/* struct sock must be the first member of pppox_sock */
268	struct ppp_channel	chan;
269	struct sock		*sk;
270	struct pppox_sock	*next;	  /* for hash table */
271	union {
272#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
273		struct pppoe_opt pppoe;
274#endif
275#if defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
276		struct pptp_opt  pptp;
277#endif
278	} proto;
279	unsigned short		num;
280};
281#endif
282
283/* This defines a selective acknowledgement block. */
284struct tcp_sack_block {
285	__u32	start_seq;
286	__u32	end_seq;
287};
288
289#if 1 //Oleg
290#define UDP_OPT_IN_SOCK 1
291#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
292struct udp_opt {
293       __u16 esp_in_udp;
294       __u16 encap_type;
295       int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
296};
297#endif
298
299
300struct tcp_opt {
301	int	tcp_header_len;	/* Bytes of tcp header to send		*/
302
303/*
304 *	Header prediction flags
305 *	0x5?10 << 16 + snd_wnd in net byte order
306 */
307	__u32	pred_flags;
308
309/*
310 *	RFC793 variables by their proper names. This means you can
311 *	read the code and the spec side by side (and laugh ...)
312 *	See RFC793 and RFC1122. The RFC writes these in capitals.
313 */
314 	__u32	rcv_nxt;	/* What we want to receive next 	*/
315 	__u32	snd_nxt;	/* Next sequence we send		*/
316
317 	__u32	snd_una;	/* First byte we want an ack for	*/
318 	__u32	snd_sml;	/* Last byte of the most recently transmitted small packet */
319	__u32	rcv_tstamp;	/* timestamp of last received ACK (for keepalives) */
320	__u32	lsndtime;	/* timestamp of last sent data packet (for restart window) */
321
322	/* Delayed ACK control data */
323	struct {
324		__u8	pending;	/* ACK is pending */
325		__u8	quick;		/* Scheduled number of quick acks	*/
326		__u8	pingpong;	/* The session is interactive		*/
327		__u8	blocked;	/* Delayed ACK was blocked by socket lock*/
328		__u32	ato;		/* Predicted tick of soft clock		*/
329		unsigned long timeout;	/* Currently scheduled timeout		*/
330		__u32	lrcvtime;	/* timestamp of last received data packet*/
331		__u16	last_seg_size;	/* Size of last incoming segment	*/
332		__u16	rcv_mss;	/* MSS used for delayed ACK decisions	*/
333	} ack;
334
335	/* Data for direct copy to user */
336	struct {
337		struct sk_buff_head	prequeue;
338		struct task_struct	*task;
339		struct iovec		*iov;
340		int			memory;
341		int			len;
342	} ucopy;
343
344	__u32	snd_wl1;	/* Sequence for window update		*/
345	__u32	snd_wnd;	/* The window we expect to receive	*/
346	__u32	max_window;	/* Maximal window ever seen from peer	*/
347	__u32	pmtu_cookie;	/* Last pmtu seen by socket		*/
348	__u16	mss_cache;	/* Cached effective mss, not including SACKS */
349	__u16	mss_clamp;	/* Maximal mss, negotiated at connection setup */
350	__u16	ext_header_len;	/* Network protocol overhead (IP/IPv6 options) */
351	__u8	ca_state;	/* State of fast-retransmit machine 	*/
352	__u8	retransmits;	/* Number of unrecovered RTO timeouts.	*/
353
354	__u8	reordering;	/* Packet reordering metric.		*/
355	__u8	queue_shrunk;	/* Write queue has been shrunk recently.*/
356	__u8	defer_accept;	/* User waits for some data after accept() */
357
358/* RTT measurement */
359	__u8	backoff;	/* backoff				*/
360	__u32	srtt;		/* smothed round trip time << 3		*/
361	__u32	mdev;		/* medium deviation			*/
362	__u32	mdev_max;	/* maximal mdev for the last rtt period	*/
363	__u32	rttvar;		/* smoothed mdev_max			*/
364	__u32	rtt_seq;	/* sequence number to update rttvar	*/
365	__u32	rto;		/* retransmit timeout			*/
366
367	__u32	packets_out;	/* Packets which are "in flight"	*/
368	__u32	left_out;	/* Packets which leaved network		*/
369	__u32	retrans_out;	/* Retransmitted packets out		*/
370
371
372/*
373 *	Slow start and congestion control (see also Nagle, and Karn & Partridge)
374 */
375 	__u32	snd_ssthresh;	/* Slow start size threshold		*/
376 	__u32	snd_cwnd;	/* Sending congestion window		*/
377 	__u16	snd_cwnd_cnt;	/* Linear increase counter		*/
378	__u16	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
379	__u32	snd_cwnd_used;
380	__u32	snd_cwnd_stamp;
381
382	/* Two commonly used timers in both sender and receiver paths. */
383	unsigned long		timeout;
384 	struct timer_list	retransmit_timer;	/* Resend (no ack)	*/
385 	struct timer_list	delack_timer;		/* Ack delay 		*/
386
387	struct sk_buff_head	out_of_order_queue; /* Out of order segments go here */
388
389	struct tcp_func		*af_specific;	/* Operations which are AF_INET{4,6} specific	*/
390	struct sk_buff		*send_head;	/* Front of stuff to transmit			*/
391	struct page		*sndmsg_page;	/* Cached page for sendmsg			*/
392	u32			sndmsg_off;	/* Cached offset for sendmsg			*/
393
394 	__u32	rcv_wnd;	/* Current receiver window		*/
395	__u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
396	__u32	write_seq;	/* Tail(+1) of data held in tcp send buffer */
397	__u32	pushed_seq;	/* Last pushed seq, required to talk to windows */
398	__u32	copied_seq;	/* Head of yet unread data		*/
399/*
400 *      Options received (usually on last packet, some only on SYN packets).
401 */
402	char	tstamp_ok,	/* TIMESTAMP seen on SYN packet		*/
403		wscale_ok,	/* Wscale seen on SYN packet		*/
404		sack_ok;	/* SACK seen on SYN packet		*/
405	char	saw_tstamp;	/* Saw TIMESTAMP on last packet		*/
406        __u8	snd_wscale;	/* Window scaling received from sender	*/
407        __u8	rcv_wscale;	/* Window scaling to send to receiver	*/
408	__u8	nonagle;	/* Disable Nagle algorithm?             */
409	__u8	keepalive_probes; /* num of allowed keep alive probes	*/
410
411/*	PAWS/RTTM data	*/
412        __u32	rcv_tsval;	/* Time stamp value             	*/
413        __u32	rcv_tsecr;	/* Time stamp echo reply        	*/
414        __u32	ts_recent;	/* Time stamp to echo next		*/
415        long	ts_recent_stamp;/* Time we stored ts_recent (for aging) */
416
417/*	SACKs data	*/
418	__u16	user_mss;  	/* mss requested by user in ioctl */
419	__u8	dsack;		/* D-SACK is scheduled			*/
420	__u8	eff_sacks;	/* Size of SACK array to send with next packet */
421	struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
422	struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
423
424	__u32	window_clamp;	/* Maximal window to advertise		*/
425	__u32	rcv_ssthresh;	/* Current window clamp			*/
426	__u8	probes_out;	/* unanswered 0 window probes		*/
427	__u8	num_sacks;	/* Number of SACK blocks		*/
428	__u16	advmss;		/* Advertised MSS			*/
429
430	__u8	syn_retries;	/* num of allowed syn retries */
431	__u8	ecn_flags;	/* ECN status bits.			*/
432	__u16	prior_ssthresh; /* ssthresh saved at recovery start	*/
433	__u32	lost_out;	/* Lost packets				*/
434	__u32	sacked_out;	/* SACK'd packets			*/
435	__u32	fackets_out;	/* FACK'd packets			*/
436	__u32	high_seq;	/* snd_nxt at onset of congestion	*/
437
438	__u32	retrans_stamp;	/* Timestamp of the last retransmit,
439				 * also used in SYN-SENT to remember stamp of
440				 * the first SYN. */
441	__u32	undo_marker;	/* tracking retrans started here. */
442	int	undo_retrans;	/* number of undoable retransmissions. */
443	__u32	urg_seq;	/* Seq of received urgent pointer */
444	__u16	urg_data;	/* Saved octet of OOB data and control flags */
445	__u8	pending;	/* Scheduled timer event	*/
446	__u8	urg_mode;	/* In urgent mode		*/
447	__u32	snd_up;		/* Urgent pointer		*/
448
449	/* The syn_wait_lock is necessary only to avoid tcp_get_info having
450	 * to grab the main lock sock while browsing the listening hash
451	 * (otherwise it's deadlock prone).
452	 * This lock is acquired in read mode only from tcp_get_info() and
453	 * it's acquired in write mode _only_ from code that is actively
454	 * changing the syn_wait_queue. All readers that are holding
455	 * the master sock lock don't need to grab this lock in read mode
456	 * too as the syn_wait_queue writes are always protected from
457	 * the main sock lock.
458	 */
459	rwlock_t		syn_wait_lock;
460	struct tcp_listen_opt	*listen_opt;
461
462	/* FIFO of established children */
463	struct open_request	*accept_queue;
464	struct open_request	*accept_queue_tail;
465
466	int			write_pending;	/* A write to socket waits to start. */
467
468	unsigned int		keepalive_time;	  /* time before keep alive takes place */
469	unsigned int		keepalive_intvl;  /* time interval between keep alive probes */
470	int			linger2;
471
472	unsigned long last_synq_overflow;
473};
474
475
476/*
477 * This structure really needs to be cleaned up.
478 * Most of it is for TCP, and not used by any of
479 * the other protocols.
480 */
481
482/*
483 * The idea is to start moving to a newer struct gradualy
484 *
485 * IMHO the newer struct should have the following format:
486 *
487 *	struct sock {
488 *		sockmem [mem, proto, callbacks]
489 *
490 *		union or struct {
491 *			ax25;
492 *		} ll_pinfo;
493 *
494 *		union {
495 *			ipv4;
496 *			ipv6;
497 *			ipx;
498 *			netrom;
499 *			rose;
500 * 			x25;
501 *		} net_pinfo;
502 *
503 *		union {
504 *			tcp;
505 *			udp;
506 *			spx;
507 *			netrom;
508 *		} tp_pinfo;
509 *
510 *	}
511 *
512 * The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets.
513 * So, net_pinfo is IPv6 are really, and protinfo unifies all another
514 * private areas.
515 */
516
517/* Define this to get the sk->debug debugging facility. */
518#define SOCK_DEBUGGING
519#ifdef SOCK_DEBUGGING
520#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)
521#else
522#define SOCK_DEBUG(sk, msg...) do { } while (0)
523#endif
524
525/* This is the per-socket lock.  The spinlock provides a synchronization
526 * between user contexts and software interrupt processing, whereas the
527 * mini-semaphore synchronizes multiple users amongst themselves.
528 */
529typedef struct {
530	spinlock_t		slock;
531	unsigned int		users;
532	wait_queue_head_t	wq;
533} socket_lock_t;
534
535#define sock_lock_init(__sk) \
536do {	spin_lock_init(&((__sk)->lock.slock)); \
537	(__sk)->lock.users = 0; \
538	init_waitqueue_head(&((__sk)->lock.wq)); \
539} while(0)
540
541struct sock {
542	/* Socket demultiplex comparisons on incoming packets. */
543	__u32			daddr;		/* Foreign IPv4 addr			*/
544	__u32			rcv_saddr;	/* Bound local IPv4 addr		*/
545	__u16			dport;		/* Destination port			*/
546	unsigned short		num;		/* Local port				*/
547	int			bound_dev_if;	/* Bound device index if != 0		*/
548
549	/* Main hash linkage for various protocol lookup tables. */
550	struct sock		*next;
551	struct sock		**pprev;
552	struct sock		*bind_next;
553	struct sock		**bind_pprev;
554
555	volatile unsigned char	state,		/* Connection state			*/
556				zapped;		/* In ax25 & ipx means not linked	*/
557	__u16			sport;		/* Source port				*/
558
559	unsigned short		family;		/* Address family			*/
560	unsigned char		reuse;		/* SO_REUSEADDR setting			*/
561	unsigned char		shutdown;
562	atomic_t		refcnt;		/* Reference count			*/
563
564	socket_lock_t		lock;		/* Synchronizer...			*/
565	int			rcvbuf;		/* Size of receive buffer in bytes	*/
566
567	wait_queue_head_t	*sleep;		/* Sock wait queue			*/
568	struct dst_entry	*dst_cache;	/* Destination cache			*/
569	rwlock_t		dst_lock;
570	atomic_t		rmem_alloc;	/* Receive queue bytes committed	*/
571	struct sk_buff_head	receive_queue;	/* Incoming packets			*/
572	atomic_t		wmem_alloc;	/* Transmit queue bytes committed	*/
573	struct sk_buff_head	write_queue;	/* Packet sending queue			*/
574	atomic_t		omem_alloc;	/* "o" is "option" or "other" */
575	int			wmem_queued;	/* Persistent queue size */
576	int			forward_alloc;	/* Space allocated forward. */
577	__u32			saddr;		/* Sending source			*/
578	unsigned int		allocation;	/* Allocation mode			*/
579	int			sndbuf;		/* Size of send buffer in bytes		*/
580	struct sock		*prev;
581
582	volatile char		dead,
583				done,
584				urginline,
585				keepopen,
586				linger,
587				destroy,
588				no_check,
589				broadcast,
590				bsdism;
591	unsigned char		debug;
592	unsigned char		rcvtstamp;
593	unsigned char		use_write_queue;
594	unsigned char		userlocks;
595	/* Hole of 3 bytes. Try to pack. */
596	int			route_caps;
597	int			proc;
598	unsigned long	        lingertime;
599
600	int			hashent;
601	struct sock		*pair;
602
603	/* The backlog queue is special, it is always used with
604	 * the per-socket spinlock held and requires low latency
605	 * access.  Therefore we special case it's implementation.
606	 */
607	struct {
608		struct sk_buff *head;
609		struct sk_buff *tail;
610	} backlog;
611
612	rwlock_t		callback_lock;
613
614	/* Error queue, rarely used. */
615	struct sk_buff_head	error_queue;
616
617	struct proto		*prot;
618
619#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
620	union {
621		struct ipv6_pinfo	af_inet6;
622	} net_pinfo;
623#endif
624
625	union {
626		struct tcp_opt		af_tcp;
627#if 1 //Oleg
628		struct udp_opt          af_udp;
629#endif
630
631#if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
632		struct raw_opt		tp_raw4;
633#endif
634#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
635		struct raw6_opt		tp_raw;
636#endif /* CONFIG_IPV6 */
637#if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
638		struct spx_opt		af_spx;
639#endif /* CONFIG_SPX */
640
641	} tp_pinfo;
642
643	int			err, err_soft;	/* Soft holds errors that don't
644						   cause failure but are the cause
645						   of a persistent failure not just
646						   'timed out' */
647	unsigned short		ack_backlog;
648	unsigned short		max_ack_backlog;
649	__u32			priority;
650	unsigned short		type;
651	unsigned char		localroute;	/* Route locally only */
652	unsigned char		protocol;
653	struct ucred		peercred;
654	int			rcvlowat;
655	long			rcvtimeo;
656	long			sndtimeo;
657
658#ifdef CONFIG_FILTER
659	/* Socket Filtering Instructions */
660	struct sk_filter      	*filter;
661#endif /* CONFIG_FILTER */
662
663	/* This is where all the private (optional) areas that don't
664	 * overlap will eventually live.
665	 */
666	union {
667		void *destruct_hook;
668	  	struct unix_opt	af_unix;
669#if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
670		struct inet_opt af_inet;
671#endif
672#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
673		struct atalk_sock	af_at;
674#endif
675#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
676		struct ipx_opt		af_ipx;
677#endif
678#if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
679		struct dn_scp           dn;
680#endif
681#if defined(CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE)
682		struct packet_opt	*af_packet;
683#endif
684#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
685		x25_cb			*x25;
686#endif
687#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
688		ax25_cb			*ax25;
689#endif
690#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
691		nr_cb			*nr;
692#endif
693#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
694		rose_cb			*rose;
695#endif
696#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE) \
697 || defined(CONFIG_PPPOL2TP) || defined(CONFIG_PPPOL2TP_MODULE) \
698 || defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
699		struct pppox_opt	*pppox;
700#endif
701		struct netlink_opt	*af_netlink;
702#if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE)
703		struct econet_opt	*af_econet;
704#endif
705#if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
706		struct atm_vcc		*af_atm;
707#endif
708#if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
709		struct irda_sock        *irda;
710#endif
711#if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE)
712               struct wanpipe_opt      *af_wanpipe;
713#endif
714	} protinfo;
715
716
717	/* This part is used for the timeout functions. */
718	struct timer_list	timer;		/* This is the sock cleanup timer. */
719	struct timeval		stamp;
720
721	/* Identd and reporting IO signals */
722	struct socket		*socket;
723
724	/* RPC layer private data */
725	void			*user_data;
726
727	/* Callbacks */
728	void			(*state_change)(struct sock *sk);
729	void			(*data_ready)(struct sock *sk,int bytes);
730	void			(*write_space)(struct sock *sk);
731	void			(*error_report)(struct sock *sk);
732
733  	int			(*backlog_rcv) (struct sock *sk,
734						struct sk_buff *skb);
735	void                    (*destruct)(struct sock *sk);
736};
737
738#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE) \
739 || defined(CONFIG_PPPOL2TP) || defined(CONFIG_PPPOL2TP_MODULE) \
740 || defined(CONFIG_PPTP) || defined(CONFIG_PPTP_MODULE)
741static inline struct pppox_sock *pppox_sk(struct sock *sk)
742{
743	return (struct pppox_sock *)sk->protinfo.pppox;
744}
745
746static inline struct sock *sk_pppox(struct pppox_sock *po)
747{
748	return po->sk;
749}
750#endif
751
752/* The per-socket spinlock must be held here. */
753#define sk_add_backlog(__sk, __skb)			\
754do {	if((__sk)->backlog.tail == NULL) {		\
755		(__sk)->backlog.head =			\
756		     (__sk)->backlog.tail = (__skb);	\
757	} else {					\
758		((__sk)->backlog.tail)->next = (__skb);	\
759		(__sk)->backlog.tail = (__skb);		\
760	}						\
761	(__skb)->next = NULL;				\
762} while(0)
763
764/* IP protocol blocks we attach to sockets.
765 * socket layer -> transport layer interface
766 * transport -> network interface is defined by struct inet_proto
767 */
768struct proto {
769	void			(*close)(struct sock *sk,
770					long timeout);
771	int			(*connect)(struct sock *sk,
772				        struct sockaddr *uaddr,
773					int addr_len);
774	int			(*disconnect)(struct sock *sk, int flags);
775
776	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
777
778	int			(*ioctl)(struct sock *sk, int cmd,
779					 unsigned long arg);
780	int			(*init)(struct sock *sk);
781	int			(*destroy)(struct sock *sk);
782	void			(*shutdown)(struct sock *sk, int how);
783	int			(*setsockopt)(struct sock *sk, int level,
784					int optname, char *optval, int optlen);
785	int			(*getsockopt)(struct sock *sk, int level,
786					int optname, char *optval,
787					int *option);
788	int			(*sendmsg)(struct sock *sk, struct msghdr *msg,
789					   int len);
790	int			(*recvmsg)(struct sock *sk, struct msghdr *msg,
791					int len, int noblock, int flags,
792					int *addr_len);
793	int			(*bind)(struct sock *sk,
794					struct sockaddr *uaddr, int addr_len);
795
796	int			(*backlog_rcv) (struct sock *sk,
797						struct sk_buff *skb);
798
799	/* Keeping track of sk's, looking them up, and port selection methods. */
800	void			(*hash)(struct sock *sk);
801	void			(*unhash)(struct sock *sk);
802	int			(*get_port)(struct sock *sk, unsigned short snum);
803
804	char			name[32];
805
806	struct {
807		int inuse;
808		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
809	} stats[NR_CPUS];
810};
811
812/* Called with local bh disabled */
813static __inline__ void sock_prot_inc_use(struct proto *prot)
814{
815	prot->stats[smp_processor_id()].inuse++;
816}
817
818static __inline__ void sock_prot_dec_use(struct proto *prot)
819{
820	prot->stats[smp_processor_id()].inuse--;
821}
822
823/* About 10 seconds */
824#define SOCK_DESTROY_TIME (10*HZ)
825
826/* Sockets 0-1023 can't be bound to unless you are superuser */
827#define PROT_SOCK	1024
828
829#define SHUTDOWN_MASK	3
830#define RCV_SHUTDOWN	1
831#define SEND_SHUTDOWN	2
832
833#define SOCK_SNDBUF_LOCK	1
834#define SOCK_RCVBUF_LOCK	2
835#define SOCK_BINDADDR_LOCK	4
836#define SOCK_BINDPORT_LOCK	8
837
838
839/* Used by processes to "lock" a socket state, so that
840 * interrupts and bottom half handlers won't change it
841 * from under us. It essentially blocks any incoming
842 * packets, so that we won't get any new data or any
843 * packets that change the state of the socket.
844 *
845 * While locked, BH processing will add new packets to
846 * the backlog queue.  This queue is processed by the
847 * owner of the socket lock right before it is released.
848 *
849 * Since ~2.3.5 it is also exclusive sleep lock serializing
850 * accesses from user process context.
851 */
852extern void __lock_sock(struct sock *sk);
853extern void __release_sock(struct sock *sk);
854#define lock_sock(__sk) \
855do {	spin_lock_bh(&((__sk)->lock.slock)); \
856	if ((__sk)->lock.users != 0) \
857		__lock_sock(__sk); \
858	(__sk)->lock.users = 1; \
859	spin_unlock_bh(&((__sk)->lock.slock)); \
860} while(0)
861
862#define release_sock(__sk) \
863do {	spin_lock_bh(&((__sk)->lock.slock)); \
864	if ((__sk)->backlog.tail != NULL) \
865		__release_sock(__sk); \
866	(__sk)->lock.users = 0; \
867        if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
868	spin_unlock_bh(&((__sk)->lock.slock)); \
869} while(0)
870
871/* BH context may only use the following locking interface. */
872#define bh_lock_sock(__sk)	spin_lock(&((__sk)->lock.slock))
873#define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->lock.slock))
874
875extern struct sock *		sk_alloc(int family, int priority, int zero_it);
876extern void			sk_free(struct sock *sk);
877
878extern struct sk_buff		*sock_wmalloc(struct sock *sk,
879					      unsigned long size, int force,
880					      int priority);
881extern struct sk_buff		*sock_rmalloc(struct sock *sk,
882					      unsigned long size, int force,
883					      int priority);
884extern void			sock_wfree(struct sk_buff *skb);
885extern void			sock_rfree(struct sk_buff *skb);
886
887extern int			sock_setsockopt(struct socket *sock, int level,
888						int op, char *optval,
889						int optlen);
890
891extern int			sock_getsockopt(struct socket *sock, int level,
892						int op, char *optval,
893						int *optlen);
894extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
895						     unsigned long size,
896						     int noblock,
897						     int *errcode);
898extern struct sk_buff 		*sock_alloc_send_pskb(struct sock *sk,
899						      unsigned long header_len,
900						      unsigned long data_len,
901						      int noblock,
902						      int *errcode);
903extern void *sock_kmalloc(struct sock *sk, int size, int priority);
904extern void sock_kfree_s(struct sock *sk, void *mem, int size);
905
906/*
907 * Functions to fill in entries in struct proto_ops when a protocol
908 * does not implement a particular function.
909 */
910extern int                      sock_no_release(struct socket *);
911extern int                      sock_no_bind(struct socket *,
912					     struct sockaddr *, int);
913extern int                      sock_no_connect(struct socket *,
914						struct sockaddr *, int, int);
915extern int                      sock_no_socketpair(struct socket *,
916						   struct socket *);
917extern int                      sock_no_accept(struct socket *,
918					       struct socket *, int);
919extern int                      sock_no_getname(struct socket *,
920						struct sockaddr *, int *, int);
921extern unsigned int             sock_no_poll(struct file *, struct socket *,
922					     struct poll_table_struct *);
923extern int                      sock_no_ioctl(struct socket *, unsigned int,
924					      unsigned long);
925extern int			sock_no_listen(struct socket *, int);
926extern int                      sock_no_shutdown(struct socket *, int);
927extern int			sock_no_getsockopt(struct socket *, int , int,
928						   char *, int *);
929extern int			sock_no_setsockopt(struct socket *, int, int,
930						   char *, int);
931extern int 			sock_no_fcntl(struct socket *,
932					      unsigned int, unsigned long);
933extern int                      sock_no_sendmsg(struct socket *,
934						struct msghdr *, int,
935						struct scm_cookie *);
936extern int                      sock_no_recvmsg(struct socket *,
937						struct msghdr *, int, int,
938						struct scm_cookie *);
939extern int			sock_no_mmap(struct file *file,
940					     struct socket *sock,
941					     struct vm_area_struct *vma);
942extern ssize_t			sock_no_sendpage(struct socket *sock,
943						struct page *page,
944						int offset, size_t size,
945						int flags);
946
947/*
948 *	Default socket callbacks and setup code
949 */
950
951extern void sock_def_destruct(struct sock *);
952
953/* Initialise core socket variables */
954extern void sock_init_data(struct socket *sock, struct sock *sk);
955
956extern void sklist_remove_socket(struct sock **list, struct sock *sk);
957extern void sklist_insert_socket(struct sock **list, struct sock *sk);
958extern void sklist_destroy_socket(struct sock **list, struct sock *sk);
959
960#ifdef CONFIG_FILTER
961
962/**
963 *	sk_filter - run a packet through a socket filter
964 *	@skb: buffer to filter
965 *	@filter: filter to apply
966 *
967 * Run the filter code and then cut skb->data to correct size returned by
968 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
969 * than pkt_len we keep whole skb->data. This is the socket level
970 * wrapper to sk_run_filter. It returns 0 if the packet should
971 * be accepted or 1 if the packet should be tossed.
972 */
973
974static inline int sk_filter(struct sk_buff *skb, struct sk_filter *filter)
975{
976	int pkt_len;
977
978        pkt_len = sk_run_filter(skb, filter->insns, filter->len);
979        if(!pkt_len)
980                return 1;	/* Toss Packet */
981        else
982                skb_trim(skb, pkt_len);
983
984	return 0;
985}
986
987/**
988 *	sk_filter_release: Release a socket filter
989 *	@sk: socket
990 *	@fp: filter to remove
991 *
992 *	Remove a filter from a socket and release its resources.
993 */
994
995static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
996{
997	unsigned int size = sk_filter_len(fp);
998
999	atomic_sub(size, &sk->omem_alloc);
1000
1001	if (atomic_dec_and_test(&fp->refcnt))
1002		kfree(fp);
1003}
1004
1005static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1006{
1007	atomic_inc(&fp->refcnt);
1008	atomic_add(sk_filter_len(fp), &sk->omem_alloc);
1009}
1010
1011#endif /* CONFIG_FILTER */
1012
1013/*
1014 * Socket reference counting postulates.
1015 *
1016 * * Each user of socket SHOULD hold a reference count.
1017 * * Each access point to socket (an hash table bucket, reference from a list,
1018 *   running timer, skb in flight MUST hold a reference count.
1019 * * When reference count hits 0, it means it will never increase back.
1020 * * When reference count hits 0, it means that no references from
1021 *   outside exist to this socket and current process on current CPU
1022 *   is last user and may/should destroy this socket.
1023 * * sk_free is called from any context: process, BH, IRQ. When
1024 *   it is called, socket has no references from outside -> sk_free
1025 *   may release descendant resources allocated by the socket, but
1026 *   to the time when it is called, socket is NOT referenced by any
1027 *   hash tables, lists etc.
1028 * * Packets, delivered from outside (from network or from another process)
1029 *   and enqueued on receive/error queues SHOULD NOT grab reference count,
1030 *   when they sit in queue. Otherwise, packets will leak to hole, when
1031 *   socket is looked up by one cpu and unhasing is made by another CPU.
1032 *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
1033 *   (leak to backlog). Packet socket does all the processing inside
1034 *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1035 *   use separate SMP lock, so that they are prone too.
1036 */
1037
1038/* Grab socket reference count. This operation is valid only
1039   when sk is ALREADY grabbed f.e. it is found in hash table
1040   or a list and the lookup is made under lock preventing hash table
1041   modifications.
1042 */
1043
1044static inline void sock_hold(struct sock *sk)
1045{
1046	atomic_inc(&sk->refcnt);
1047}
1048
1049/* Ungrab socket in the context, which assumes that socket refcnt
1050   cannot hit zero, f.e. it is true in context of any socketcall.
1051 */
1052static inline void __sock_put(struct sock *sk)
1053{
1054	atomic_dec(&sk->refcnt);
1055}
1056
1057/* Ungrab socket and destroy it, if it was the last reference. */
1058static inline void sock_put(struct sock *sk)
1059{
1060	if (atomic_dec_and_test(&sk->refcnt))
1061		sk_free(sk);
1062}
1063
1064/* Detach socket from process context.
1065 * Announce socket dead, detach it from wait queue and inode.
1066 * Note that parent inode held reference count on this struct sock,
1067 * we do not release it in this function, because protocol
1068 * probably wants some additional cleanups or even continuing
1069 * to work with this socket (TCP).
1070 */
1071static inline void sock_orphan(struct sock *sk)
1072{
1073	write_lock_bh(&sk->callback_lock);
1074	sk->dead = 1;
1075	sk->socket = NULL;
1076	sk->sleep = NULL;
1077	write_unlock_bh(&sk->callback_lock);
1078}
1079
1080static inline void sock_graft(struct sock *sk, struct socket *parent)
1081{
1082	write_lock_bh(&sk->callback_lock);
1083	sk->sleep = &parent->wait;
1084	parent->sk = sk;
1085	sk->socket = parent;
1086	write_unlock_bh(&sk->callback_lock);
1087}
1088
1089static inline int sock_i_uid(struct sock *sk)
1090{
1091	int uid;
1092
1093	read_lock(&sk->callback_lock);
1094	uid = sk->socket ? sk->socket->inode->i_uid : 0;
1095	read_unlock(&sk->callback_lock);
1096	return uid;
1097}
1098
1099static inline unsigned long sock_i_ino(struct sock *sk)
1100{
1101	unsigned long ino;
1102
1103	read_lock(&sk->callback_lock);
1104	ino = sk->socket ? sk->socket->inode->i_ino : 0;
1105	read_unlock(&sk->callback_lock);
1106	return ino;
1107}
1108
1109static inline struct dst_entry *
1110__sk_dst_get(struct sock *sk)
1111{
1112	return sk->dst_cache;
1113}
1114
1115static inline struct dst_entry *
1116sk_dst_get(struct sock *sk)
1117{
1118	struct dst_entry *dst;
1119
1120	read_lock(&sk->dst_lock);
1121	dst = sk->dst_cache;
1122	if (dst)
1123		dst_hold(dst);
1124	read_unlock(&sk->dst_lock);
1125	return dst;
1126}
1127
1128static inline void
1129__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1130{
1131	struct dst_entry *old_dst;
1132
1133	old_dst = sk->dst_cache;
1134	sk->dst_cache = dst;
1135	dst_release(old_dst);
1136}
1137
1138static inline void
1139sk_dst_set(struct sock *sk, struct dst_entry *dst)
1140{
1141	write_lock(&sk->dst_lock);
1142	__sk_dst_set(sk, dst);
1143	write_unlock(&sk->dst_lock);
1144}
1145
1146static inline void
1147__sk_dst_reset(struct sock *sk)
1148{
1149	struct dst_entry *old_dst;
1150
1151	old_dst = sk->dst_cache;
1152	sk->dst_cache = NULL;
1153	dst_release(old_dst);
1154}
1155
1156static inline void
1157sk_dst_reset(struct sock *sk)
1158{
1159	write_lock(&sk->dst_lock);
1160	__sk_dst_reset(sk);
1161	write_unlock(&sk->dst_lock);
1162}
1163
1164static inline struct dst_entry *
1165__sk_dst_check(struct sock *sk, u32 cookie)
1166{
1167	struct dst_entry *dst = sk->dst_cache;
1168
1169	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1170		sk->dst_cache = NULL;
1171		return NULL;
1172	}
1173
1174	return dst;
1175}
1176
1177static inline struct dst_entry *
1178sk_dst_check(struct sock *sk, u32 cookie)
1179{
1180	struct dst_entry *dst = sk_dst_get(sk);
1181
1182	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1183		sk_dst_reset(sk);
1184		return NULL;
1185	}
1186
1187	return dst;
1188}
1189
1190
1191/*
1192 * 	Queue a received datagram if it will fit. Stream and sequenced
1193 *	protocols can't normally use this as they need to fit buffers in
1194 *	and play with them.
1195 *
1196 * 	Inlined as it's very short and called for pretty much every
1197 *	packet ever received.
1198 */
1199
1200static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1201{
1202	sock_hold(sk);
1203	skb->sk = sk;
1204	skb->destructor = sock_wfree;
1205	atomic_add(skb->truesize, &sk->wmem_alloc);
1206}
1207
1208static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1209{
1210	skb->sk = sk;
1211	skb->destructor = sock_rfree;
1212	atomic_add(skb->truesize, &sk->rmem_alloc);
1213}
1214
1215static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1216{
1217	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1218	   number of warnings when compiling with -W --ANK
1219	 */
1220	if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
1221                return -ENOMEM;
1222
1223#ifdef CONFIG_FILTER
1224	if (sk->filter) {
1225		int err = 0;
1226		struct sk_filter *filter;
1227
1228		/* It would be deadlock, if sock_queue_rcv_skb is used
1229		   with socket lock! We assume that users of this
1230		   function are lock free.
1231		 */
1232		bh_lock_sock(sk);
1233		if ((filter = sk->filter) != NULL && sk_filter(skb, filter))
1234			err = -EPERM;
1235		bh_unlock_sock(sk);
1236		if (err)
1237			return err;	/* Toss packet */
1238	}
1239#endif /* CONFIG_FILTER */
1240
1241	skb->dev = NULL;
1242	skb_set_owner_r(skb, sk);
1243	skb_queue_tail(&sk->receive_queue, skb);
1244	if (!sk->dead)
1245		sk->data_ready(sk,skb->len);
1246	return 0;
1247}
1248
1249static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1250{
1251	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1252	   number of warnings when compiling with -W --ANK
1253	 */
1254	if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
1255		return -ENOMEM;
1256	skb_set_owner_r(skb, sk);
1257	skb_queue_tail(&sk->error_queue,skb);
1258	if (!sk->dead)
1259		sk->data_ready(sk,skb->len);
1260	return 0;
1261}
1262
1263/*
1264 *	Recover an error report and clear atomically
1265 */
1266
1267static inline int sock_error(struct sock *sk)
1268{
1269	int err=xchg(&sk->err,0);
1270	return -err;
1271}
1272
1273static inline unsigned long sock_wspace(struct sock *sk)
1274{
1275	int amt = 0;
1276
1277	if (!(sk->shutdown & SEND_SHUTDOWN)) {
1278		amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
1279		if (amt < 0)
1280			amt = 0;
1281	}
1282	return amt;
1283}
1284
1285static inline void sk_wake_async(struct sock *sk, int how, int band)
1286{
1287	if (sk->socket && sk->socket->fasync_list)
1288		sock_wake_async(sk->socket, how, band);
1289}
1290
1291#define SOCK_MIN_SNDBUF 2048
1292#define SOCK_MIN_RCVBUF 256
1293
1294/*
1295 *	Default write policy as shown to user space via poll/select/SIGIO
1296 */
1297static inline int sock_writeable(struct sock *sk)
1298{
1299	return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2);
1300}
1301
1302static inline int gfp_any(void)
1303{
1304	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1305}
1306
1307static inline long sock_rcvtimeo(struct sock *sk, int noblock)
1308{
1309	return noblock ? 0 : sk->rcvtimeo;
1310}
1311
1312static inline long sock_sndtimeo(struct sock *sk, int noblock)
1313{
1314	return noblock ? 0 : sk->sndtimeo;
1315}
1316
1317static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
1318{
1319	return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;
1320}
1321
1322/* Alas, with timeout socket operations are not restartable.
1323 * Compare this to poll().
1324 */
1325static inline int sock_intr_errno(long timeo)
1326{
1327	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1328}
1329
1330static __inline__ void
1331sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1332{
1333	if (sk->rcvtstamp)
1334		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp);
1335	else
1336		sk->stamp = skb->stamp;
1337}
1338
1339/*
1340 *	Enable debug/info messages
1341 */
1342
1343#define NETDEBUG(x)	do { x; } while (0)
1344
1345/*
1346 * Macros for sleeping on a socket. Use them like this:
1347 *
1348 * SOCK_SLEEP_PRE(sk)
1349 * if (condition)
1350 * 	schedule();
1351 * SOCK_SLEEP_POST(sk)
1352 *
1353 */
1354
1355#define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1356				DECLARE_WAITQUEUE(wait, tsk); \
1357				tsk->state = TASK_INTERRUPTIBLE; \
1358				add_wait_queue((sk)->sleep, &wait); \
1359				release_sock(sk);
1360
1361#define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1362				remove_wait_queue((sk)->sleep, &wait); \
1363				lock_sock(sk); \
1364				}
1365
1366extern __u32 sysctl_wmem_max;
1367extern __u32 sysctl_rmem_max;
1368
1369#endif	/* _SOCK_H */
1370