1/*
2 * ip_vs_proto_tcp.c:	TCP load balancing support for IPVS
3 *
4 * Version:     $Id: ip_vs_proto_tcp.c,v 1.1.1.1 2007/08/03 18:53:52 Exp $
5 *
6 * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
7 *              Julian Anastasov <ja@ssi.bg>
8 *
9 *              This program is free software; you can redistribute it and/or
10 *              modify it under the terms of the GNU General Public License
11 *              as published by the Free Software Foundation; either version
12 *              2 of the License, or (at your option) any later version.
13 *
14 * Changes:
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/ip.h>
20#include <linux/tcp.h>                  /* for tcphdr */
21#include <net/ip.h>
22#include <net/tcp.h>                    /* for csum_tcpudp_magic */
23#include <linux/netfilter_ipv4.h>
24
25#include <net/ip_vs.h>
26
27
28static struct ip_vs_conn *
29tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
30		const struct iphdr *iph, unsigned int proto_off, int inverse)
31{
32	__be16 _ports[2], *pptr;
33
34	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
35	if (pptr == NULL)
36		return NULL;
37
38	if (likely(!inverse)) {
39		return ip_vs_conn_in_get(iph->protocol,
40					 iph->saddr, pptr[0],
41					 iph->daddr, pptr[1]);
42	} else {
43		return ip_vs_conn_in_get(iph->protocol,
44					 iph->daddr, pptr[1],
45					 iph->saddr, pptr[0]);
46	}
47}
48
49static struct ip_vs_conn *
50tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
51		 const struct iphdr *iph, unsigned int proto_off, int inverse)
52{
53	__be16 _ports[2], *pptr;
54
55	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
56	if (pptr == NULL)
57		return NULL;
58
59	if (likely(!inverse)) {
60		return ip_vs_conn_out_get(iph->protocol,
61					  iph->saddr, pptr[0],
62					  iph->daddr, pptr[1]);
63	} else {
64		return ip_vs_conn_out_get(iph->protocol,
65					  iph->daddr, pptr[1],
66					  iph->saddr, pptr[0]);
67	}
68}
69
70
71static int
72tcp_conn_schedule(struct sk_buff *skb,
73		  struct ip_vs_protocol *pp,
74		  int *verdict, struct ip_vs_conn **cpp)
75{
76	struct ip_vs_service *svc;
77	struct tcphdr _tcph, *th;
78
79	th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
80	if (th == NULL) {
81		*verdict = NF_DROP;
82		return 0;
83	}
84
85	if (th->syn &&
86	    (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol,
87				     ip_hdr(skb)->daddr, th->dest))) {
88		if (ip_vs_todrop()) {
89			/*
90			 * It seems that we are very loaded.
91			 * We have to drop this packet :(
92			 */
93			ip_vs_service_put(svc);
94			*verdict = NF_DROP;
95			return 0;
96		}
97
98		/*
99		 * Let the virtual server select a real server for the
100		 * incoming connection, and create a connection entry.
101		 */
102		*cpp = ip_vs_schedule(svc, skb);
103		if (!*cpp) {
104			*verdict = ip_vs_leave(svc, skb, pp);
105			return 0;
106		}
107		ip_vs_service_put(svc);
108	}
109	return 1;
110}
111
112
113static inline void
114tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip,
115		     __be16 oldport, __be16 newport)
116{
117	tcph->check =
118		csum_fold(ip_vs_check_diff4(oldip, newip,
119				 ip_vs_check_diff2(oldport, newport,
120						~csum_unfold(tcph->check))));
121}
122
123
124static int
125tcp_snat_handler(struct sk_buff **pskb,
126		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
127{
128	struct tcphdr *tcph;
129	const unsigned int tcphoff = ip_hdrlen(*pskb);
130
131	/* csum_check requires unshared skb */
132	if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph)))
133		return 0;
134
135	if (unlikely(cp->app != NULL)) {
136		/* Some checks before mangling */
137		if (pp->csum_check && !pp->csum_check(*pskb, pp))
138			return 0;
139
140		/* Call application helper if needed */
141		if (!ip_vs_app_pkt_out(cp, pskb))
142			return 0;
143	}
144
145	tcph = (void *)ip_hdr(*pskb) + tcphoff;
146	tcph->source = cp->vport;
147
148	/* Adjust TCP checksums */
149	if (!cp->app) {
150		/* Only port and addr are changed, do fast csum update */
151		tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr,
152				     cp->dport, cp->vport);
153		if ((*pskb)->ip_summed == CHECKSUM_COMPLETE)
154			(*pskb)->ip_summed = CHECKSUM_NONE;
155	} else {
156		/* full checksum calculation */
157		tcph->check = 0;
158		(*pskb)->csum = skb_checksum(*pskb, tcphoff,
159					     (*pskb)->len - tcphoff, 0);
160		tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr,
161						(*pskb)->len - tcphoff,
162						cp->protocol,
163						(*pskb)->csum);
164		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
165			  pp->name, tcph->check,
166			  (char*)&(tcph->check) - (char*)tcph);
167	}
168	return 1;
169}
170
171
172static int
173tcp_dnat_handler(struct sk_buff **pskb,
174		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
175{
176	struct tcphdr *tcph;
177	const unsigned int tcphoff = ip_hdrlen(*pskb);
178
179	/* csum_check requires unshared skb */
180	if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph)))
181		return 0;
182
183	if (unlikely(cp->app != NULL)) {
184		/* Some checks before mangling */
185		if (pp->csum_check && !pp->csum_check(*pskb, pp))
186			return 0;
187
188		/*
189		 *	Attempt ip_vs_app call.
190		 *	It will fix ip_vs_conn and iph ack_seq stuff
191		 */
192		if (!ip_vs_app_pkt_in(cp, pskb))
193			return 0;
194	}
195
196	tcph = (void *)ip_hdr(*pskb) + tcphoff;
197	tcph->dest = cp->dport;
198
199	/*
200	 *	Adjust TCP checksums
201	 */
202	if (!cp->app) {
203		/* Only port and addr are changed, do fast csum update */
204		tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr,
205				     cp->vport, cp->dport);
206		if ((*pskb)->ip_summed == CHECKSUM_COMPLETE)
207			(*pskb)->ip_summed = CHECKSUM_NONE;
208	} else {
209		/* full checksum calculation */
210		tcph->check = 0;
211		(*pskb)->csum = skb_checksum(*pskb, tcphoff,
212					     (*pskb)->len - tcphoff, 0);
213		tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr,
214						(*pskb)->len - tcphoff,
215						cp->protocol,
216						(*pskb)->csum);
217		(*pskb)->ip_summed = CHECKSUM_UNNECESSARY;
218	}
219	return 1;
220}
221
222
223static int
224tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
225{
226	const unsigned int tcphoff = ip_hdrlen(skb);
227
228	switch (skb->ip_summed) {
229	case CHECKSUM_NONE:
230		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
231	case CHECKSUM_COMPLETE:
232		if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
233				      skb->len - tcphoff,
234				      ip_hdr(skb)->protocol, skb->csum)) {
235			IP_VS_DBG_RL_PKT(0, pp, skb, 0,
236					 "Failed checksum for");
237			return 0;
238		}
239		break;
240	default:
241		/* No need to checksum. */
242		break;
243	}
244
245	return 1;
246}
247
248
249#define TCP_DIR_INPUT		0
250#define TCP_DIR_OUTPUT		4
251#define TCP_DIR_INPUT_ONLY	8
252
253static const int tcp_state_off[IP_VS_DIR_LAST] = {
254	[IP_VS_DIR_INPUT]		=	TCP_DIR_INPUT,
255	[IP_VS_DIR_OUTPUT]		=	TCP_DIR_OUTPUT,
256	[IP_VS_DIR_INPUT_ONLY]		=	TCP_DIR_INPUT_ONLY,
257};
258
259/*
260 *	Timeout table[state]
261 */
262static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
263	[IP_VS_TCP_S_NONE]		=	2*HZ,
264	[IP_VS_TCP_S_ESTABLISHED]	=	15*60*HZ,
265	[IP_VS_TCP_S_SYN_SENT]		=	2*60*HZ,
266	[IP_VS_TCP_S_SYN_RECV]		=	1*60*HZ,
267	[IP_VS_TCP_S_FIN_WAIT]		=	2*60*HZ,
268	[IP_VS_TCP_S_TIME_WAIT]		=	2*60*HZ,
269	[IP_VS_TCP_S_CLOSE]		=	10*HZ,
270	[IP_VS_TCP_S_CLOSE_WAIT]	=	60*HZ,
271	[IP_VS_TCP_S_LAST_ACK]		=	30*HZ,
272	[IP_VS_TCP_S_LISTEN]		=	2*60*HZ,
273	[IP_VS_TCP_S_SYNACK]		=	120*HZ,
274	[IP_VS_TCP_S_LAST]		=	2*HZ,
275};
276
277static char * tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
278	[IP_VS_TCP_S_NONE]		=	"NONE",
279	[IP_VS_TCP_S_ESTABLISHED]	=	"ESTABLISHED",
280	[IP_VS_TCP_S_SYN_SENT]		=	"SYN_SENT",
281	[IP_VS_TCP_S_SYN_RECV]		=	"SYN_RECV",
282	[IP_VS_TCP_S_FIN_WAIT]		=	"FIN_WAIT",
283	[IP_VS_TCP_S_TIME_WAIT]		=	"TIME_WAIT",
284	[IP_VS_TCP_S_CLOSE]		=	"CLOSE",
285	[IP_VS_TCP_S_CLOSE_WAIT]	=	"CLOSE_WAIT",
286	[IP_VS_TCP_S_LAST_ACK]		=	"LAST_ACK",
287	[IP_VS_TCP_S_LISTEN]		=	"LISTEN",
288	[IP_VS_TCP_S_SYNACK]		=	"SYNACK",
289	[IP_VS_TCP_S_LAST]		=	"BUG!",
290};
291
292#define sNO IP_VS_TCP_S_NONE
293#define sES IP_VS_TCP_S_ESTABLISHED
294#define sSS IP_VS_TCP_S_SYN_SENT
295#define sSR IP_VS_TCP_S_SYN_RECV
296#define sFW IP_VS_TCP_S_FIN_WAIT
297#define sTW IP_VS_TCP_S_TIME_WAIT
298#define sCL IP_VS_TCP_S_CLOSE
299#define sCW IP_VS_TCP_S_CLOSE_WAIT
300#define sLA IP_VS_TCP_S_LAST_ACK
301#define sLI IP_VS_TCP_S_LISTEN
302#define sSA IP_VS_TCP_S_SYNACK
303
304struct tcp_states_t {
305	int next_state[IP_VS_TCP_S_LAST];
306};
307
308static const char * tcp_state_name(int state)
309{
310	if (state >= IP_VS_TCP_S_LAST)
311		return "ERR!";
312	return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
313}
314
315static struct tcp_states_t tcp_states [] = {
316/*	INPUT */
317/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
318/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
319/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }},
320/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
321/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }},
322
323/*	OUTPUT */
324/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
325/*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }},
326/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
327/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
328/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
329
330/*	INPUT-ONLY */
331/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
332/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
333/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
334/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
335/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
336};
337
338static struct tcp_states_t tcp_states_dos [] = {
339/*	INPUT */
340/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
341/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
342/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }},
343/*ack*/ {{sCL, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }},
344/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
345
346/*	OUTPUT */
347/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
348/*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }},
349/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
350/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
351/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
352
353/*	INPUT-ONLY */
354/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
355/*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }},
356/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
357/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
358/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
359};
360
361static struct tcp_states_t *tcp_state_table = tcp_states;
362
363
364static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
365{
366	int on = (flags & 1);		/* secure_tcp */
367
368	tcp_state_table = (on? tcp_states_dos : tcp_states);
369}
370
371static int
372tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
373{
374	return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
375				       tcp_state_name_table, sname, to);
376}
377
378static inline int tcp_state_idx(struct tcphdr *th)
379{
380	if (th->rst)
381		return 3;
382	if (th->syn)
383		return 0;
384	if (th->fin)
385		return 1;
386	if (th->ack)
387		return 2;
388	return -1;
389}
390
391static inline void
392set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
393	      int direction, struct tcphdr *th)
394{
395	int state_idx;
396	int new_state = IP_VS_TCP_S_CLOSE;
397	int state_off = tcp_state_off[direction];
398
399	/*
400	 *    Update state offset to INPUT_ONLY if necessary
401	 *    or delete NO_OUTPUT flag if output packet detected
402	 */
403	if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
404		if (state_off == TCP_DIR_OUTPUT)
405			cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
406		else
407			state_off = TCP_DIR_INPUT_ONLY;
408	}
409
410	if ((state_idx = tcp_state_idx(th)) < 0) {
411		IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
412		goto tcp_state_out;
413	}
414
415	new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
416
417  tcp_state_out:
418	if (new_state != cp->state) {
419		struct ip_vs_dest *dest = cp->dest;
420
421		IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->"
422			  "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n",
423			  pp->name,
424			  (state_off==TCP_DIR_OUTPUT)?"output ":"input ",
425			  th->syn? 'S' : '.',
426			  th->fin? 'F' : '.',
427			  th->ack? 'A' : '.',
428			  th->rst? 'R' : '.',
429			  NIPQUAD(cp->daddr), ntohs(cp->dport),
430			  NIPQUAD(cp->caddr), ntohs(cp->cport),
431			  tcp_state_name(cp->state),
432			  tcp_state_name(new_state),
433			  atomic_read(&cp->refcnt));
434		if (dest) {
435			if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
436			    (new_state != IP_VS_TCP_S_ESTABLISHED)) {
437				atomic_dec(&dest->activeconns);
438				atomic_inc(&dest->inactconns);
439				cp->flags |= IP_VS_CONN_F_INACTIVE;
440			} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
441				   (new_state == IP_VS_TCP_S_ESTABLISHED)) {
442				atomic_inc(&dest->activeconns);
443				atomic_dec(&dest->inactconns);
444				cp->flags &= ~IP_VS_CONN_F_INACTIVE;
445			}
446		}
447	}
448
449	cp->timeout = pp->timeout_table[cp->state = new_state];
450}
451
452
453/*
454 *	Handle state transitions
455 */
456static int
457tcp_state_transition(struct ip_vs_conn *cp, int direction,
458		     const struct sk_buff *skb,
459		     struct ip_vs_protocol *pp)
460{
461	struct tcphdr _tcph, *th;
462
463	th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
464	if (th == NULL)
465		return 0;
466
467	spin_lock(&cp->lock);
468	set_tcp_state(pp, cp, direction, th);
469	spin_unlock(&cp->lock);
470
471	return 1;
472}
473
474
475/*
476 *	Hash table for TCP application incarnations
477 */
478#define	TCP_APP_TAB_BITS	4
479#define	TCP_APP_TAB_SIZE	(1 << TCP_APP_TAB_BITS)
480#define	TCP_APP_TAB_MASK	(TCP_APP_TAB_SIZE - 1)
481
482static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
483static DEFINE_SPINLOCK(tcp_app_lock);
484
485static inline __u16 tcp_app_hashkey(__be16 port)
486{
487	return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
488		& TCP_APP_TAB_MASK;
489}
490
491
492static int tcp_register_app(struct ip_vs_app *inc)
493{
494	struct ip_vs_app *i;
495	__u16 hash;
496	__be16 port = inc->port;
497	int ret = 0;
498
499	hash = tcp_app_hashkey(port);
500
501	spin_lock_bh(&tcp_app_lock);
502	list_for_each_entry(i, &tcp_apps[hash], p_list) {
503		if (i->port == port) {
504			ret = -EEXIST;
505			goto out;
506		}
507	}
508	list_add(&inc->p_list, &tcp_apps[hash]);
509	atomic_inc(&ip_vs_protocol_tcp.appcnt);
510
511  out:
512	spin_unlock_bh(&tcp_app_lock);
513	return ret;
514}
515
516
517static void
518tcp_unregister_app(struct ip_vs_app *inc)
519{
520	spin_lock_bh(&tcp_app_lock);
521	atomic_dec(&ip_vs_protocol_tcp.appcnt);
522	list_del(&inc->p_list);
523	spin_unlock_bh(&tcp_app_lock);
524}
525
526
527static int
528tcp_app_conn_bind(struct ip_vs_conn *cp)
529{
530	int hash;
531	struct ip_vs_app *inc;
532	int result = 0;
533
534	/* Default binding: bind app only for NAT */
535	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
536		return 0;
537
538	/* Lookup application incarnations and bind the right one */
539	hash = tcp_app_hashkey(cp->vport);
540
541	spin_lock(&tcp_app_lock);
542	list_for_each_entry(inc, &tcp_apps[hash], p_list) {
543		if (inc->port == cp->vport) {
544			if (unlikely(!ip_vs_app_inc_get(inc)))
545				break;
546			spin_unlock(&tcp_app_lock);
547
548			IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
549				  "%u.%u.%u.%u:%u to app %s on port %u\n",
550				  __FUNCTION__,
551				  NIPQUAD(cp->caddr), ntohs(cp->cport),
552				  NIPQUAD(cp->vaddr), ntohs(cp->vport),
553				  inc->name, ntohs(inc->port));
554			cp->app = inc;
555			if (inc->init_conn)
556				result = inc->init_conn(inc, cp);
557			goto out;
558		}
559	}
560	spin_unlock(&tcp_app_lock);
561
562  out:
563	return result;
564}
565
566
567/*
568 *	Set LISTEN timeout. (ip_vs_conn_put will setup timer)
569 */
570void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
571{
572	spin_lock(&cp->lock);
573	cp->state = IP_VS_TCP_S_LISTEN;
574	cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
575	spin_unlock(&cp->lock);
576}
577
578
579static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
580{
581	IP_VS_INIT_HASH_TABLE(tcp_apps);
582	pp->timeout_table = tcp_timeouts;
583}
584
585
586static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
587{
588}
589
590
591struct ip_vs_protocol ip_vs_protocol_tcp = {
592	.name =			"TCP",
593	.protocol =		IPPROTO_TCP,
594	.dont_defrag =		0,
595	.appcnt =		ATOMIC_INIT(0),
596	.init =			ip_vs_tcp_init,
597	.exit =			ip_vs_tcp_exit,
598	.register_app =		tcp_register_app,
599	.unregister_app =	tcp_unregister_app,
600	.conn_schedule =	tcp_conn_schedule,
601	.conn_in_get =		tcp_conn_in_get,
602	.conn_out_get =		tcp_conn_out_get,
603	.snat_handler =		tcp_snat_handler,
604	.dnat_handler =		tcp_dnat_handler,
605	.csum_check =		tcp_csum_check,
606	.state_name =		tcp_state_name,
607	.state_transition =	tcp_state_transition,
608	.app_conn_bind =	tcp_app_conn_bind,
609	.debug_packet =		ip_vs_tcpudp_debug_packet,
610	.timeout_change =	tcp_timeout_change,
611	.set_state_timeout =	tcp_set_state_timeout,
612};
613