tcp_syncache.c revision 159722
1/*-
2 * Copyright (c) 2001 McAfee, Inc.
3 * Copyright (c) 2006 Andre Oppermann, Internet Business Solutions AG
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Jonathan Lemon
7 * and McAfee Research, the Security Research Division of McAfee, Inc. under
8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/netinet/tcp_syncache.c 159722 2006-06-18 11:48:03Z andre $
33 */
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "opt_ipsec.h"
38#include "opt_mac.h"
39#include "opt_tcpdebug.h"
40#include "opt_tcp_sack.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/sysctl.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/malloc.h>
49#include <sys/mac.h>
50#include <sys/mbuf.h>
51#include <sys/md5.h>
52#include <sys/proc.h>		/* for proc0 declaration */
53#include <sys/random.h>
54#include <sys/rwlock.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57
58#include <net/if.h>
59#include <net/route.h>
60
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63#include <netinet/ip.h>
64#include <netinet/in_var.h>
65#include <netinet/in_pcb.h>
66#include <netinet/ip_var.h>
67#include <netinet/ip_options.h>
68#ifdef INET6
69#include <netinet/ip6.h>
70#include <netinet/icmp6.h>
71#include <netinet6/nd6.h>
72#include <netinet6/ip6_var.h>
73#include <netinet6/in6_pcb.h>
74#endif
75#include <netinet/tcp.h>
76#ifdef TCPDEBUG
77#include <netinet/tcpip.h>
78#endif
79#include <netinet/tcp_fsm.h>
80#include <netinet/tcp_seq.h>
81#include <netinet/tcp_timer.h>
82#include <netinet/tcp_var.h>
83#ifdef TCPDEBUG
84#include <netinet/tcp_debug.h>
85#endif
86#ifdef INET6
87#include <netinet6/tcp6_var.h>
88#endif
89
90#ifdef IPSEC
91#include <netinet6/ipsec.h>
92#ifdef INET6
93#include <netinet6/ipsec6.h>
94#endif
95#endif /*IPSEC*/
96
97#ifdef FAST_IPSEC
98#include <netipsec/ipsec.h>
99#ifdef INET6
100#include <netipsec/ipsec6.h>
101#endif
102#include <netipsec/key.h>
103#endif /*FAST_IPSEC*/
104
105#include <machine/in_cksum.h>
106#include <vm/uma.h>
107
108static int tcp_syncookies = 1;
109SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
110    &tcp_syncookies, 0,
111    "Use TCP SYN cookies if the syncache overflows");
112
113static void	 syncache_drop(struct syncache *, struct syncache_head *);
114static void	 syncache_free(struct syncache *);
115static void	 syncache_insert(struct syncache *, struct syncache_head *);
116struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
117static int	 syncache_respond(struct syncache *, struct mbuf *);
118static struct	 socket *syncache_socket(struct syncache *, struct socket *,
119		    struct mbuf *m);
120static void	 syncache_timer(void *);
121static void	 syncookie_init(void);
122static u_int32_t syncookie_generate(struct syncache *, u_int32_t *);
123static struct syncache
124		 *syncookie_lookup(struct in_conninfo *, struct tcphdr *,
125		    struct socket *);
126
127/*
128 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
129 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
130 * the odds are that the user has given up attempting to connect by then.
131 */
132#define SYNCACHE_MAXREXMTS		3
133
134/* Arbitrary values */
135#define TCP_SYNCACHE_HASHSIZE		512
136#define TCP_SYNCACHE_BUCKETLIMIT	30
137
138struct tcp_syncache {
139	struct	syncache_head *hashbase;
140	uma_zone_t zone;
141	u_int	hashsize;
142	u_int	hashmask;
143	u_int	bucket_limit;
144	u_int	cache_count;		/* XXX: unprotected */
145	u_int	cache_limit;
146	u_int	rexmt_limit;
147	u_int	hash_secret;
148};
149static struct tcp_syncache tcp_syncache;
150
151SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
152
153SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
154     &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
155
156SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
157     &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
158
159SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
160     &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
161
162SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
163     &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
164
165SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
166     &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
167
168static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
169
170#define SYNCACHE_HASH(inc, mask)					\
171	((tcp_syncache.hash_secret ^					\
172	  (inc)->inc_faddr.s_addr ^					\
173	  ((inc)->inc_faddr.s_addr >> 16) ^				\
174	  (inc)->inc_fport ^ (inc)->inc_lport) & mask)
175
176#define SYNCACHE_HASH6(inc, mask)					\
177	((tcp_syncache.hash_secret ^					\
178	  (inc)->inc6_faddr.s6_addr32[0] ^				\
179	  (inc)->inc6_faddr.s6_addr32[3] ^				\
180	  (inc)->inc_fport ^ (inc)->inc_lport) & mask)
181
182#define ENDPTS_EQ(a, b) (						\
183	(a)->ie_fport == (b)->ie_fport &&				\
184	(a)->ie_lport == (b)->ie_lport &&				\
185	(a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr &&			\
186	(a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr			\
187)
188
189#define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
190
191#define SYNCACHE_TIMEOUT(sc, sch, co) do {				\
192	(sc)->sc_rxmits++;						\
193	(sc)->sc_rxttime = ticks +					\
194		TCPTV_RTOBASE * tcp_backoff[(sc)->sc_rxmits - 1];	\
195	if ((sch)->sch_nextc > (sc)->sc_rxttime)			\
196		(sch)->sch_nextc = (sc)->sc_rxttime;			\
197	if (!TAILQ_EMPTY(&(sch)->sch_bucket) && !(co))			\
198		callout_reset(&(sch)->sch_timer,			\
199			(sch)->sch_nextc - ticks,			\
200			syncache_timer, (void *)(sch));			\
201} while (0)
202
203#define	SCH_LOCK(sch)		mtx_lock(&(sch)->sch_mtx)
204#define	SCH_UNLOCK(sch)		mtx_unlock(&(sch)->sch_mtx)
205#define	SCH_LOCK_ASSERT(sch)	mtx_assert(&(sch)->sch_mtx, MA_OWNED)
206
207/*
208 * Requires the syncache entry to be already removed from the bucket list.
209 */
210static void
211syncache_free(struct syncache *sc)
212{
213	if (sc->sc_ipopts)
214		(void) m_free(sc->sc_ipopts);
215
216	uma_zfree(tcp_syncache.zone, sc);
217}
218
219void
220syncache_init(void)
221{
222	int i;
223
224	tcp_syncache.cache_count = 0;
225	tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
226	tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
227	tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
228	tcp_syncache.hash_secret = arc4random();
229
230	TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
231	    &tcp_syncache.hashsize);
232	TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
233	    &tcp_syncache.bucket_limit);
234	if (!powerof2(tcp_syncache.hashsize) || tcp_syncache.hashsize == 0) {
235		printf("WARNING: syncache hash size is not a power of 2.\n");
236		tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
237	}
238	tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
239
240	/* Set limits. */
241	tcp_syncache.cache_limit =
242	    tcp_syncache.hashsize * tcp_syncache.bucket_limit;
243	TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
244	    &tcp_syncache.cache_limit);
245
246	/* Allocate the hash table. */
247	MALLOC(tcp_syncache.hashbase, struct syncache_head *,
248	    tcp_syncache.hashsize * sizeof(struct syncache_head),
249	    M_SYNCACHE, M_WAITOK);
250
251	/* Initialize the hash buckets. */
252	for (i = 0; i < tcp_syncache.hashsize; i++) {
253		TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
254		mtx_init(&tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
255			 NULL, MTX_DEF);
256		callout_init_mtx(&tcp_syncache.hashbase[i].sch_timer,
257			 &tcp_syncache.hashbase[i].sch_mtx, 0);
258		tcp_syncache.hashbase[i].sch_length = 0;
259	}
260
261	syncookie_init();
262
263	/* Create the syncache entry zone. */
264	tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
265	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
266	uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
267}
268
269/*
270 * Inserts a syncache entry into the specified bucket row.
271 * Locks and unlocks the syncache_head autonomously.
272 */
273static void
274syncache_insert(struct syncache *sc, struct syncache_head *sch)
275{
276	struct syncache *sc2;
277
278	SCH_LOCK(sch);
279
280	/*
281	 * Make sure that we don't overflow the per-bucket limit.
282	 * If the bucket is full, toss the oldest element.
283	 */
284	if (sch->sch_length >= tcp_syncache.bucket_limit) {
285		KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
286			("sch->sch_length incorrect"));
287		sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
288		syncache_drop(sc2, sch);
289		tcpstat.tcps_sc_bucketoverflow++;
290	}
291
292	/* Put it into the bucket. */
293	TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
294	sch->sch_length++;
295
296	/* Reinitialize the bucket row's timer. */
297	SYNCACHE_TIMEOUT(sc, sch, 1);
298
299	SCH_UNLOCK(sch);
300
301	tcp_syncache.cache_count++;
302	tcpstat.tcps_sc_added++;
303}
304
305/*
306 * Remove and free entry from syncache bucket row.
307 * Expects locked syncache head.
308 */
309static void
310syncache_drop(struct syncache *sc, struct syncache_head *sch)
311{
312
313	SCH_LOCK_ASSERT(sch);
314
315	TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
316	sch->sch_length--;
317
318	syncache_free(sc);
319	tcp_syncache.cache_count--;
320}
321
322/*
323 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
324 * If we have retransmitted an entry the maximum number of times, expire it.
325 * One separate timer for each bucket row.
326 */
327static void
328syncache_timer(void *xsch)
329{
330	struct syncache_head *sch = (struct syncache_head *)xsch;
331	struct syncache *sc, *nsc;
332	int tick = ticks;
333
334	/* NB: syncache_head has already been locked by the callout. */
335	SCH_LOCK_ASSERT(sch);
336
337	TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
338		/*
339		 * We do not check if the listen socket still exists
340		 * and accept the case where the listen socket may be
341		 * gone by the time we resend the SYN/ACK.  We do
342		 * not expect this to happens often. If it does,
343		 * then the RST will be sent by the time the remote
344		 * host does the SYN/ACK->ACK.
345		 */
346		if (sc->sc_rxttime >= tick) {
347			if (sc->sc_rxttime < sch->sch_nextc)
348				sch->sch_nextc = sc->sc_rxttime;
349			continue;
350		}
351
352		if (sc->sc_rxmits > tcp_syncache.rexmt_limit) {
353			syncache_drop(sc, sch);
354			tcpstat.tcps_sc_stale++;
355			continue;
356		}
357
358		(void) syncache_respond(sc, NULL);
359		tcpstat.tcps_sc_retransmitted++;
360		SYNCACHE_TIMEOUT(sc, sch, 0);
361	}
362	if (!TAILQ_EMPTY(&(sch)->sch_bucket))
363		callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
364			syncache_timer, (void *)(sch));
365}
366
367/*
368 * Find an entry in the syncache.
369 * Returns always with locked syncache_head plus a matching entry or NULL.
370 */
371struct syncache *
372syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
373{
374	struct syncache *sc;
375	struct syncache_head *sch;
376
377#ifdef INET6
378	if (inc->inc_isipv6) {
379		sch = &tcp_syncache.hashbase[
380		    SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
381		*schp = sch;
382
383		SCH_LOCK(sch);
384
385		/* Circle through bucket row to find matching entry. */
386		TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
387			if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
388				return (sc);
389		}
390	} else
391#endif
392	{
393		sch = &tcp_syncache.hashbase[
394		    SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
395		*schp = sch;
396
397		SCH_LOCK(sch);
398
399		/* Circle through bucket row to find matching entry. */
400		TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
401#ifdef INET6
402			if (sc->sc_inc.inc_isipv6)
403				continue;
404#endif
405			if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
406				return (sc);
407		}
408	}
409	SCH_LOCK_ASSERT(*schp);
410	return (NULL);			/* always returns with locked sch */
411}
412
413/*
414 * This function is called when we get a RST for a
415 * non-existent connection, so that we can see if the
416 * connection is in the syn cache.  If it is, zap it.
417 */
418void
419syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
420{
421	struct syncache *sc;
422	struct syncache_head *sch;
423
424	sc = syncache_lookup(inc, &sch);	/* returns locked sch */
425	SCH_LOCK_ASSERT(sch);
426	if (sc == NULL)
427		goto done;
428
429	/*
430	 * If the RST bit is set, check the sequence number to see
431	 * if this is a valid reset segment.
432	 * RFC 793 page 37:
433	 *   In all states except SYN-SENT, all reset (RST) segments
434	 *   are validated by checking their SEQ-fields.  A reset is
435	 *   valid if its sequence number is in the window.
436	 *
437	 *   The sequence number in the reset segment is normally an
438	 *   echo of our outgoing acknowlegement numbers, but some hosts
439	 *   send a reset with the sequence number at the rightmost edge
440	 *   of our receive window, and we have to handle this case.
441	 */
442	if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
443	    SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
444		syncache_drop(sc, sch);
445		tcpstat.tcps_sc_reset++;
446	}
447done:
448	SCH_UNLOCK(sch);
449}
450
451void
452syncache_badack(struct in_conninfo *inc)
453{
454	struct syncache *sc;
455	struct syncache_head *sch;
456
457	sc = syncache_lookup(inc, &sch);	/* returns locked sch */
458	SCH_LOCK_ASSERT(sch);
459	if (sc != NULL) {
460		syncache_drop(sc, sch);
461		tcpstat.tcps_sc_badack++;
462	}
463	SCH_UNLOCK(sch);
464}
465
466void
467syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
468{
469	struct syncache *sc;
470	struct syncache_head *sch;
471
472	sc = syncache_lookup(inc, &sch);	/* returns locked sch */
473	SCH_LOCK_ASSERT(sch);
474	if (sc == NULL)
475		goto done;
476
477	/* If the sequence number != sc_iss, then it's a bogus ICMP msg */
478	if (ntohl(th->th_seq) != sc->sc_iss)
479		goto done;
480
481	/*
482	 * If we've rertransmitted 3 times and this is our second error,
483	 * we remove the entry.  Otherwise, we allow it to continue on.
484	 * This prevents us from incorrectly nuking an entry during a
485	 * spurious network outage.
486	 *
487	 * See tcp_notify().
488	 */
489	if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
490		sc->sc_flags |= SCF_UNREACH;
491		goto done;
492	}
493	syncache_drop(sc, sch);
494	tcpstat.tcps_sc_unreach++;
495done:
496	SCH_UNLOCK(sch);
497}
498
499/*
500 * Build a new TCP socket structure from a syncache entry.
501 */
502static struct socket *
503syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
504{
505	struct inpcb *inp = NULL;
506	struct socket *so;
507	struct tcpcb *tp;
508
509	NET_ASSERT_GIANT();
510	INP_INFO_WLOCK_ASSERT(&tcbinfo);
511
512	/*
513	 * Ok, create the full blown connection, and set things up
514	 * as they would have been set up if we had created the
515	 * connection when the SYN arrived.  If we can't create
516	 * the connection, abort it.
517	 */
518	so = sonewconn(lso, SS_ISCONNECTED);
519	if (so == NULL) {
520		/*
521		 * Drop the connection; we will send a RST if the peer
522		 * retransmits the ACK,
523		 */
524		tcpstat.tcps_listendrop++;
525		goto abort2;
526	}
527#ifdef MAC
528	SOCK_LOCK(so);
529	mac_set_socket_peer_from_mbuf(m, so);
530	SOCK_UNLOCK(so);
531#endif
532
533	inp = sotoinpcb(so);
534	INP_LOCK(inp);
535
536	/* Insert new socket into PCB hash list. */
537	inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
538#ifdef INET6
539	if (sc->sc_inc.inc_isipv6) {
540		inp->in6p_laddr = sc->sc_inc.inc6_laddr;
541	} else {
542		inp->inp_vflag &= ~INP_IPV6;
543		inp->inp_vflag |= INP_IPV4;
544#endif
545		inp->inp_laddr = sc->sc_inc.inc_laddr;
546#ifdef INET6
547	}
548#endif
549	inp->inp_lport = sc->sc_inc.inc_lport;
550	if (in_pcbinshash(inp) != 0) {
551		/*
552		 * Undo the assignments above if we failed to
553		 * put the PCB on the hash lists.
554		 */
555#ifdef INET6
556		if (sc->sc_inc.inc_isipv6)
557			inp->in6p_laddr = in6addr_any;
558		else
559#endif
560			inp->inp_laddr.s_addr = INADDR_ANY;
561		inp->inp_lport = 0;
562		goto abort;
563	}
564#ifdef IPSEC
565	/* Copy old policy into new socket's. */
566	if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
567		printf("syncache_expand: could not copy policy\n");
568#endif
569#ifdef FAST_IPSEC
570	/* Copy old policy into new socket's. */
571	if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
572		printf("syncache_expand: could not copy policy\n");
573#endif
574#ifdef INET6
575	if (sc->sc_inc.inc_isipv6) {
576		struct inpcb *oinp = sotoinpcb(lso);
577		struct in6_addr laddr6;
578		struct sockaddr_in6 sin6;
579		/*
580		 * Inherit socket options from the listening socket.
581		 * Note that in6p_inputopts are not (and should not be)
582		 * copied, since it stores previously received options and is
583		 * used to detect if each new option is different than the
584		 * previous one and hence should be passed to a user.
585		 * If we copied in6p_inputopts, a user would not be able to
586		 * receive options just after calling the accept system call.
587		 */
588		inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
589		if (oinp->in6p_outputopts)
590			inp->in6p_outputopts =
591			    ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
592
593		sin6.sin6_family = AF_INET6;
594		sin6.sin6_len = sizeof(sin6);
595		sin6.sin6_addr = sc->sc_inc.inc6_faddr;
596		sin6.sin6_port = sc->sc_inc.inc_fport;
597		sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
598		laddr6 = inp->in6p_laddr;
599		if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
600			inp->in6p_laddr = sc->sc_inc.inc6_laddr;
601		if (in6_pcbconnect(inp, (struct sockaddr *)&sin6,
602		    thread0.td_ucred)) {
603			inp->in6p_laddr = laddr6;
604			goto abort;
605		}
606		/* Override flowlabel from in6_pcbconnect. */
607		inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
608		inp->in6p_flowinfo |= sc->sc_flowlabel;
609	} else
610#endif
611	{
612		struct in_addr laddr;
613		struct sockaddr_in sin;
614
615		inp->inp_options = ip_srcroute(m);
616		if (inp->inp_options == NULL) {
617			inp->inp_options = sc->sc_ipopts;
618			sc->sc_ipopts = NULL;
619		}
620
621		sin.sin_family = AF_INET;
622		sin.sin_len = sizeof(sin);
623		sin.sin_addr = sc->sc_inc.inc_faddr;
624		sin.sin_port = sc->sc_inc.inc_fport;
625		bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
626		laddr = inp->inp_laddr;
627		if (inp->inp_laddr.s_addr == INADDR_ANY)
628			inp->inp_laddr = sc->sc_inc.inc_laddr;
629		if (in_pcbconnect(inp, (struct sockaddr *)&sin,
630		    thread0.td_ucred)) {
631			inp->inp_laddr = laddr;
632			goto abort;
633		}
634	}
635	tp = intotcpcb(inp);
636	tp->t_state = TCPS_SYN_RECEIVED;
637	tp->iss = sc->sc_iss;
638	tp->irs = sc->sc_irs;
639	tcp_rcvseqinit(tp);
640	tcp_sendseqinit(tp);
641	tp->snd_wl1 = sc->sc_irs;
642	tp->rcv_up = sc->sc_irs + 1;
643	tp->rcv_wnd = sc->sc_wnd;
644	tp->rcv_adv += tp->rcv_wnd;
645
646	tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
647	if (sc->sc_flags & SCF_NOOPT)
648		tp->t_flags |= TF_NOOPT;
649	if (sc->sc_flags & SCF_WINSCALE) {
650		tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
651		tp->snd_scale = sc->sc_requested_s_scale;
652		tp->request_r_scale = sc->sc_request_r_scale;
653	}
654	if (sc->sc_flags & SCF_TIMESTAMP) {
655		tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
656		tp->ts_recent = sc->sc_tsrecent;
657		tp->ts_recent_age = ticks;
658	}
659#ifdef TCP_SIGNATURE
660	if (sc->sc_flags & SCF_SIGNATURE)
661		tp->t_flags |= TF_SIGNATURE;
662#endif
663	if (sc->sc_flags & SCF_SACK) {
664		tp->sack_enable = 1;
665		tp->t_flags |= TF_SACK_PERMIT;
666	}
667
668	/*
669	 * Set up MSS and get cached values from tcp_hostcache.
670	 * This might overwrite some of the defaults we just set.
671	 */
672	tcp_mss(tp, sc->sc_peer_mss);
673
674	/*
675	 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
676	 */
677	if (sc->sc_rxmits > 1)
678		tp->snd_cwnd = tp->t_maxseg;
679	callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
680
681	INP_UNLOCK(inp);
682
683	tcpstat.tcps_accepts++;
684	return (so);
685
686abort:
687	INP_UNLOCK(inp);
688abort2:
689	if (so != NULL)
690		soabort(so);
691	return (NULL);
692}
693
694/*
695 * This function gets called when we receive an ACK for a
696 * socket in the LISTEN state.  We look up the connection
697 * in the syncache, and if its there, we pull it out of
698 * the cache and turn it into a full-blown connection in
699 * the SYN-RECEIVED state.
700 */
701int
702syncache_expand(struct in_conninfo *inc, struct tcphdr *th,
703    struct socket **lsop, struct mbuf *m)
704{
705	struct syncache *sc;
706	struct syncache_head *sch;
707	struct socket *so;
708
709	/*
710	 * Global TCP locks are held because we manipulate the PCB lists
711	 * and create a new socket.
712	 */
713	INP_INFO_WLOCK_ASSERT(&tcbinfo);
714
715	sc = syncache_lookup(inc, &sch);	/* returns locked sch */
716	SCH_LOCK_ASSERT(sch);
717	if (sc == NULL) {
718		/*
719		 * There is no syncache entry, so see if this ACK is
720		 * a returning syncookie.  To do this, first:
721		 *  A. See if this socket has had a syncache entry dropped in
722		 *     the past.  We don't want to accept a bogus syncookie
723		 *     if we've never received a SYN.
724		 *  B. check that the syncookie is valid.  If it is, then
725		 *     cobble up a fake syncache entry, and return.
726		 */
727		SCH_UNLOCK(sch);
728		sch = NULL;
729
730		if (!tcp_syncookies)
731			goto failed;
732		sc = syncookie_lookup(inc, th, *lsop);
733		if (sc == NULL)
734			goto failed;
735		tcpstat.tcps_sc_recvcookie++;
736	} else {
737		/* Pull out the entry to unlock the bucket row. */
738		TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
739		sch->sch_length--;
740		SCH_UNLOCK(sch);
741	}
742
743	/*
744	 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
745	 */
746	if (th->th_ack != sc->sc_iss + 1)
747		goto failed;
748
749	so = syncache_socket(sc, *lsop, m);
750
751	if (so == NULL) {
752#if 0
753resetandabort:
754		/* XXXjlemon check this - is this correct? */
755		(void) tcp_respond(NULL, m, m, th,
756		    th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
757#endif
758		m_freem(m);			/* XXX: only needed for above */
759		tcpstat.tcps_sc_aborted++;
760		if (sch != NULL) {
761			syncache_insert(sc, sch);  /* try again later */
762			sc = NULL;
763		}
764		goto failed;
765	} else
766		tcpstat.tcps_sc_completed++;
767	*lsop = so;
768
769	syncache_free(sc);
770	return (1);
771failed:
772	if (sc != NULL)
773		syncache_free(sc);
774	return (0);
775}
776
777/*
778 * Given a LISTEN socket and an inbound SYN request, add
779 * this to the syn cache, and send back a segment:
780 *	<SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
781 * to the source.
782 *
783 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
784 * Doing so would require that we hold onto the data and deliver it
785 * to the application.  However, if we are the target of a SYN-flood
786 * DoS attack, an attacker could send data which would eventually
787 * consume all available buffer space if it were ACKed.  By not ACKing
788 * the data, we avoid this DoS scenario.
789 */
790int
791syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
792    struct inpcb *inp, struct socket **lsop, struct mbuf *m)
793{
794	struct tcpcb *tp;
795	struct socket *so;
796	struct syncache *sc = NULL;
797	struct syncache_head *sch;
798	struct mbuf *ipopts = NULL;
799	u_int32_t flowtmp;
800	int win, sb_hiwat, ip_ttl, ip_tos;
801#ifdef INET6
802	int autoflowlabel = 0;
803#endif
804
805	INP_INFO_WLOCK_ASSERT(&tcbinfo);
806	INP_LOCK_ASSERT(inp);			/* listen socket */
807
808	/*
809	 * Combine all so/tp operations very early to drop the INP lock as
810	 * soon as possible.
811	 */
812	so = *lsop;
813	tp = sototcpcb(so);
814
815#ifdef INET6
816	if (inc->inc_isipv6 &&
817	    (inp->in6p_flags & IN6P_AUTOFLOWLABEL))
818		autoflowlabel = 1;
819#endif
820	ip_ttl = inp->inp_ip_ttl;
821	ip_tos = inp->inp_ip_tos;
822	win = sbspace(&so->so_rcv);
823	sb_hiwat = so->so_rcv.sb_hiwat;
824	if (tp->t_flags & TF_NOOPT)
825		sc->sc_flags = SCF_NOOPT;
826
827	so = NULL;
828	tp = NULL;
829
830	INP_UNLOCK(inp);
831	INP_INFO_WUNLOCK(&tcbinfo);
832
833	/*
834	 * Remember the IP options, if any.
835	 */
836#ifdef INET6
837	if (!inc->inc_isipv6)
838#endif
839		ipopts = ip_srcroute(m);
840
841	/*
842	 * See if we already have an entry for this connection.
843	 * If we do, resend the SYN,ACK, and reset the retransmit timer.
844	 *
845	 * XXX: should the syncache be re-initialized with the contents
846	 * of the new SYN here (which may have different options?)
847	 */
848	sc = syncache_lookup(inc, &sch);	/* returns locked entry */
849	SCH_LOCK_ASSERT(sch);
850	if (sc != NULL) {
851		tcpstat.tcps_sc_dupsyn++;
852		if (ipopts) {
853			/*
854			 * If we were remembering a previous source route,
855			 * forget it and use the new one we've been given.
856			 */
857			if (sc->sc_ipopts)
858				(void) m_free(sc->sc_ipopts);
859			sc->sc_ipopts = ipopts;
860		}
861		/*
862		 * Update timestamp if present.
863		 */
864		if (sc->sc_flags & SCF_TIMESTAMP)
865			sc->sc_tsrecent = to->to_tsval;
866		if (syncache_respond(sc, m) == 0) {
867			SYNCACHE_TIMEOUT(sc, sch, 1);
868			tcpstat.tcps_sndacks++;
869			tcpstat.tcps_sndtotal++;
870		}
871		SCH_UNLOCK(sch);
872		goto done;
873	}
874
875	sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
876	if (sc == NULL) {
877		/*
878		 * The zone allocator couldn't provide more entries.
879		 * Treat this as if the cache was full; drop the oldest
880		 * entry and insert the new one.
881		 */
882		tcpstat.tcps_sc_zonefail++;
883		sc = TAILQ_LAST(&sch->sch_bucket, sch_head);
884		syncache_drop(sc, sch);
885		SCH_UNLOCK(sch);
886		sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
887		if (sc == NULL) {
888			if (ipopts)
889				(void) m_free(ipopts);
890			goto done;
891		}
892	} else
893		SCH_UNLOCK(sch);
894
895	/*
896	 * Fill in the syncache values.
897	 */
898	sc->sc_ipopts = ipopts;
899	sc->sc_inc.inc_fport = inc->inc_fport;
900	sc->sc_inc.inc_lport = inc->inc_lport;
901#ifdef INET6
902	sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
903	if (inc->inc_isipv6) {
904		sc->sc_inc.inc6_faddr = inc->inc6_faddr;
905		sc->sc_inc.inc6_laddr = inc->inc6_laddr;
906	} else
907#endif
908	{
909		sc->sc_inc.inc_faddr = inc->inc_faddr;
910		sc->sc_inc.inc_laddr = inc->inc_laddr;
911		sc->sc_ip_tos = ip_tos;
912		sc->sc_ip_ttl = ip_ttl;
913	}
914	sc->sc_irs = th->th_seq;
915	sc->sc_flags = 0;
916	sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
917	sc->sc_flowlabel = 0;
918	if (tcp_syncookies) {
919		sc->sc_iss = syncookie_generate(sc, &flowtmp);
920#ifdef INET6
921		if (autoflowlabel)
922			sc->sc_flowlabel = flowtmp & IPV6_FLOWLABEL_MASK;
923#endif
924	} else {
925		sc->sc_iss = arc4random();
926#ifdef INET6
927		if (autoflowlabel)
928			sc->sc_flowlabel =
929			    (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
930#endif
931	}
932
933	/*
934	 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
935	 * win was derived from socket earlier in the function.
936	 */
937	win = imax(win, 0);
938	win = imin(win, TCP_MAXWIN);
939	sc->sc_wnd = win;
940
941	if (tcp_do_rfc1323) {
942		/*
943		 * A timestamp received in a SYN makes
944		 * it ok to send timestamp requests and replies.
945		 */
946		if (to->to_flags & TOF_TS) {
947			sc->sc_tsrecent = to->to_tsval;
948			sc->sc_flags |= SCF_TIMESTAMP;
949		}
950		if (to->to_flags & TOF_SCALE) {
951			int wscale = 0;
952
953			/* Compute proper scaling value from buffer space */
954			while (wscale < TCP_MAX_WINSHIFT &&
955			    (TCP_MAXWIN << wscale) < sb_hiwat)
956				wscale++;
957			sc->sc_request_r_scale = wscale;
958			sc->sc_requested_s_scale = to->to_requested_s_scale;
959			sc->sc_flags |= SCF_WINSCALE;
960		}
961	}
962#ifdef TCP_SIGNATURE
963	/*
964	 * If listening socket requested TCP digests, and received SYN
965	 * contains the option, flag this in the syncache so that
966	 * syncache_respond() will do the right thing with the SYN+ACK.
967	 * XXX: Currently we always record the option by default and will
968	 * attempt to use it in syncache_respond().
969	 */
970	if (to->to_flags & TOF_SIGNATURE)
971		sc->sc_flags |= SCF_SIGNATURE;
972#endif
973
974	if (to->to_flags & TOF_SACK)
975		sc->sc_flags |= SCF_SACK;
976
977	/*
978	 * Do a standard 3-way handshake.
979	 */
980	if (syncache_respond(sc, m) == 0) {
981		syncache_insert(sc, sch);	/* locks and unlocks sch */
982		tcpstat.tcps_sndacks++;
983		tcpstat.tcps_sndtotal++;
984	} else {
985		syncache_free(sc);
986		tcpstat.tcps_sc_dropped++;
987	}
988
989done:
990	*lsop = NULL;
991	return (1);
992}
993
994static int
995syncache_respond(struct syncache *sc, struct mbuf *m)
996{
997	u_int8_t *optp;
998	int optlen, error;
999	u_int16_t tlen, hlen, mssopt;
1000	struct ip *ip = NULL;
1001	struct tcphdr *th;
1002#ifdef INET6
1003	struct ip6_hdr *ip6 = NULL;
1004#endif
1005#ifdef MAC
1006	struct inpcb *inp = NULL;
1007#endif
1008
1009	hlen =
1010#ifdef INET6
1011	       (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
1012#endif
1013		sizeof(struct ip);
1014
1015	KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer"));
1016
1017	/* Determine MSS we advertize to other end of connection. */
1018	mssopt = tcp_mssopt(&sc->sc_inc);
1019
1020	/* Compute the size of the TCP options. */
1021	if (sc->sc_flags & SCF_NOOPT) {
1022		optlen = 0;
1023	} else {
1024		optlen = TCPOLEN_MAXSEG +
1025		    ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1026		    ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0);
1027#ifdef TCP_SIGNATURE
1028		if (sc->sc_flags & SCF_SIGNATURE)
1029			optlen += TCPOLEN_SIGNATURE;
1030#endif
1031		if (sc->sc_flags & SCF_SACK)
1032			optlen += TCPOLEN_SACK_PERMITTED;
1033		optlen = roundup2(optlen, 4);
1034	}
1035	tlen = hlen + sizeof(struct tcphdr) + optlen;
1036
1037	/*
1038	 * XXX: Assume that the entire packet will fit in a header mbuf.
1039	 */
1040	KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1041
1042	/* Create the IP+TCP header from scratch. */
1043	if (m)
1044		m_freem(m);
1045
1046	m = m_gethdr(M_DONTWAIT, MT_DATA);
1047	if (m == NULL)
1048		return (ENOBUFS);
1049	m->m_data += max_linkhdr;
1050	m->m_len = tlen;
1051	m->m_pkthdr.len = tlen;
1052	m->m_pkthdr.rcvif = NULL;
1053
1054#ifdef MAC
1055	/*
1056	 * For MAC look up the inpcb to get access to the label information.
1057	 * We don't store the inpcb pointer in struct syncache to make locking
1058	 * less complicated and to save locking operations.  However for MAC
1059	 * this gives a slight overhead as we have to do a full pcblookup here.
1060	 */
1061	INP_INFO_RLOCK(&tcbinfo);
1062	if (inp == NULL) {
1063#ifdef INET6 /* && MAC */
1064		if (sc->sc_inc.inc_isipv6)
1065			inp = in6_pcblookup_hash(&tcbinfo,
1066				&sc->sc_inc.inc6_laddr, sc->sc_inc.inc_lport,
1067				&sc->sc_inc.inc6_faddr, sc->sc_inc.inc_fport,
1068				1, NULL);
1069		else
1070#endif /* INET6 */
1071			inp = in_pcblookup_hash(&tcbinfo,
1072				sc->sc_inc.inc_laddr, sc->sc_inc.inc_lport,
1073				sc->sc_inc.inc_faddr, sc->sc_inc.inc_fport,
1074				1, NULL);
1075		if (inp == NULL) {
1076			m_freem(m);
1077			INP_INFO_RUNLOCK(&tcbinfo);
1078			return (ESHUTDOWN);
1079		}
1080	}
1081	INP_LOCK(inp);
1082	if (!inp->inp_socket->so_options & SO_ACCEPTCONN) {
1083		m_freem(m);
1084		INP_UNLOCK(inp);
1085		INP_INFO_RUNLOCK(&tcbinfo);
1086		return (ESHUTDOWN);
1087	}
1088	mac_create_mbuf_from_inpcb(inp, m);
1089	INP_UNLOCK(inp);
1090	INP_INFO_RUNLOCK(&tcbinfo);
1091#endif /* MAC */
1092
1093#ifdef INET6
1094	if (sc->sc_inc.inc_isipv6) {
1095		ip6 = mtod(m, struct ip6_hdr *);
1096		ip6->ip6_vfc = IPV6_VERSION;
1097		ip6->ip6_nxt = IPPROTO_TCP;
1098		ip6->ip6_src = sc->sc_inc.inc6_laddr;
1099		ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1100		ip6->ip6_plen = htons(tlen - hlen);
1101		/* ip6_hlim is set after checksum */
1102		ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1103		ip6->ip6_flow |= sc->sc_flowlabel;
1104
1105		th = (struct tcphdr *)(ip6 + 1);
1106	} else
1107#endif
1108	{
1109		ip = mtod(m, struct ip *);
1110		ip->ip_v = IPVERSION;
1111		ip->ip_hl = sizeof(struct ip) >> 2;
1112		ip->ip_len = tlen;
1113		ip->ip_id = 0;
1114		ip->ip_off = 0;
1115		ip->ip_sum = 0;
1116		ip->ip_p = IPPROTO_TCP;
1117		ip->ip_src = sc->sc_inc.inc_laddr;
1118		ip->ip_dst = sc->sc_inc.inc_faddr;
1119		ip->ip_ttl = sc->sc_ip_ttl;
1120		ip->ip_tos = sc->sc_ip_tos;
1121
1122		/*
1123		 * See if we should do MTU discovery.  Route lookups are
1124		 * expensive, so we will only unset the DF bit if:
1125		 *
1126		 *	1) path_mtu_discovery is disabled
1127		 *	2) the SCF_UNREACH flag has been set
1128		 */
1129		if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1130		       ip->ip_off |= IP_DF;
1131
1132		th = (struct tcphdr *)(ip + 1);
1133	}
1134	th->th_sport = sc->sc_inc.inc_lport;
1135	th->th_dport = sc->sc_inc.inc_fport;
1136
1137	th->th_seq = htonl(sc->sc_iss);
1138	th->th_ack = htonl(sc->sc_irs + 1);
1139	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1140	th->th_x2 = 0;
1141	th->th_flags = TH_SYN|TH_ACK;
1142	th->th_win = htons(sc->sc_wnd);
1143	th->th_urp = 0;
1144
1145	/* Tack on the TCP options. */
1146	if (optlen != 0) {
1147		optp = (u_int8_t *)(th + 1);
1148		*optp++ = TCPOPT_MAXSEG;
1149		*optp++ = TCPOLEN_MAXSEG;
1150		*optp++ = (mssopt >> 8) & 0xff;
1151		*optp++ = mssopt & 0xff;
1152
1153		if (sc->sc_flags & SCF_WINSCALE) {
1154			*((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1155			    TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1156			    sc->sc_request_r_scale);
1157			optp += 4;
1158		}
1159
1160		if (sc->sc_flags & SCF_TIMESTAMP) {
1161			u_int32_t *lp = (u_int32_t *)(optp);
1162
1163			/* Form timestamp option per appendix A of RFC 1323. */
1164			*lp++ = htonl(TCPOPT_TSTAMP_HDR);
1165			*lp++ = htonl(ticks);
1166			*lp   = htonl(sc->sc_tsrecent);
1167			optp += TCPOLEN_TSTAMP_APPA;
1168		}
1169
1170#ifdef TCP_SIGNATURE
1171		/*
1172		 * Handle TCP-MD5 passive opener response.
1173		 */
1174		if (sc->sc_flags & SCF_SIGNATURE) {
1175			u_int8_t *bp = optp;
1176			int i;
1177
1178			*bp++ = TCPOPT_SIGNATURE;
1179			*bp++ = TCPOLEN_SIGNATURE;
1180			for (i = 0; i < TCP_SIGLEN; i++)
1181				*bp++ = 0;
1182			tcp_signature_compute(m, sizeof(struct ip), 0, optlen,
1183			    optp + 2, IPSEC_DIR_OUTBOUND);
1184			optp += TCPOLEN_SIGNATURE;
1185		}
1186#endif /* TCP_SIGNATURE */
1187
1188		if (sc->sc_flags & SCF_SACK) {
1189			*optp++ = TCPOPT_SACK_PERMITTED;
1190			*optp++ = TCPOLEN_SACK_PERMITTED;
1191		}
1192
1193		{
1194			/* Pad TCP options to a 4 byte boundary */
1195			int padlen = optlen - (optp - (u_int8_t *)(th + 1));
1196			while (padlen-- > 0)
1197				*optp++ = TCPOPT_EOL;
1198		}
1199	}
1200
1201#ifdef INET6
1202	if (sc->sc_inc.inc_isipv6) {
1203		th->th_sum = 0;
1204		th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1205		ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1206		error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1207	} else
1208#endif
1209	{
1210		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1211		    htons(tlen - hlen + IPPROTO_TCP));
1212		m->m_pkthdr.csum_flags = CSUM_TCP;
1213		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1214		error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
1215	}
1216	return (error);
1217}
1218
1219/*
1220 * cookie layers:
1221 *
1222 *	|. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1223 *	| peer iss                                                      |
1224 *	| MD5(laddr,faddr,secret,lport,fport)             |. . . . . . .|
1225 *	|                     0                       |(A)|             |
1226 * (A): peer mss index
1227 */
1228
1229/*
1230 * The values below are chosen to minimize the size of the tcp_secret
1231 * table, as well as providing roughly a 16 second lifetime for the cookie.
1232 */
1233
1234#define SYNCOOKIE_WNDBITS	5	/* exposed bits for window indexing */
1235#define SYNCOOKIE_TIMESHIFT	1	/* scale ticks to window time units */
1236
1237#define SYNCOOKIE_WNDMASK	((1 << SYNCOOKIE_WNDBITS) - 1)
1238#define SYNCOOKIE_NSECRETS	(1 << SYNCOOKIE_WNDBITS)
1239#define SYNCOOKIE_TIMEOUT \
1240    (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1241#define SYNCOOKIE_DATAMASK	((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1242
1243#define SYNCOOKIE_RLOCK(ts)	(rw_rlock(&(ts).ts_rwmtx))
1244#define SYNCOOKIE_RUNLOCK(ts)	(rw_runlock(&(ts).ts_rwmtx))
1245#define SYNCOOKIE_TRY_UPGRADE(ts)  (rw_try_upgrade(&(ts).ts_rwmtx))
1246#define SYNCOOKIE_DOWNGRADE(ts)	(rw_downgrade(&(ts).ts_rwmtx))
1247
1248static struct {
1249	struct rwlock	ts_rwmtx;
1250	u_int		ts_expire;	/* ticks */
1251	u_int32_t	ts_secbits[4];
1252} tcp_secret[SYNCOOKIE_NSECRETS];
1253
1254static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1255
1256static MD5_CTX syn_ctx;
1257
1258#define MD5Add(v)	MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1259
1260struct md5_add {
1261	u_int32_t laddr, faddr;
1262	u_int32_t secbits[4];
1263	u_int16_t lport, fport;
1264};
1265
1266#ifdef CTASSERT
1267CTASSERT(sizeof(struct md5_add) == 28);
1268#endif
1269
1270/*
1271 * Consider the problem of a recreated (and retransmitted) cookie.  If the
1272 * original SYN was accepted, the connection is established.  The second
1273 * SYN is inflight, and if it arrives with an ISN that falls within the
1274 * receive window, the connection is killed.
1275 *
1276 * However, since cookies have other problems, this may not be worth
1277 * worrying about.
1278 */
1279
1280static void
1281syncookie_init(void) {
1282	int idx;
1283
1284	for (idx = 0; idx < SYNCOOKIE_NSECRETS; idx++) {
1285		rw_init(&(tcp_secret[idx].ts_rwmtx), "tcp_secret");
1286	}
1287}
1288
1289static u_int32_t
1290syncookie_generate(struct syncache *sc, u_int32_t *flowid)
1291{
1292	u_int32_t md5_buffer[4];
1293	u_int32_t data;
1294	int idx, i;
1295	struct md5_add add;
1296
1297	idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1298	SYNCOOKIE_RLOCK(tcp_secret[idx]);
1299	if (tcp_secret[idx].ts_expire < time_uptime &&
1300	    SYNCOOKIE_TRY_UPGRADE(tcp_secret[idx]) ) {
1301		/* need write access */
1302		for (i = 0; i < 4; i++)
1303			tcp_secret[idx].ts_secbits[i] = arc4random();
1304		tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1305		SYNCOOKIE_DOWNGRADE(tcp_secret[idx]);
1306	}
1307	for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1308		if (tcp_msstab[data] <= sc->sc_peer_mss)
1309			break;
1310	data = (data << SYNCOOKIE_WNDBITS) | idx;
1311	data ^= sc->sc_irs;				/* peer's iss */
1312	MD5Init(&syn_ctx);
1313#ifdef INET6
1314	if (sc->sc_inc.inc_isipv6) {
1315		MD5Add(sc->sc_inc.inc6_laddr);
1316		MD5Add(sc->sc_inc.inc6_faddr);
1317		add.laddr = 0;
1318		add.faddr = 0;
1319	} else
1320#endif
1321	{
1322		add.laddr = sc->sc_inc.inc_laddr.s_addr;
1323		add.faddr = sc->sc_inc.inc_faddr.s_addr;
1324	}
1325	add.lport = sc->sc_inc.inc_lport;
1326	add.fport = sc->sc_inc.inc_fport;
1327	add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1328	add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1329	add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1330	add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1331	SYNCOOKIE_RUNLOCK(tcp_secret[idx]);
1332	MD5Add(add);
1333	MD5Final((u_char *)&md5_buffer, &syn_ctx);
1334	data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1335	*flowid = md5_buffer[1];
1336	return (data);
1337}
1338
1339static struct syncache *
1340syncookie_lookup(struct in_conninfo *inc, struct tcphdr *th, struct socket *so)
1341{
1342	u_int32_t md5_buffer[4];
1343	struct syncache *sc;
1344	u_int32_t data;
1345	int wnd, idx;
1346	struct md5_add add;
1347
1348	data = (th->th_ack - 1) ^ (th->th_seq - 1);	/* remove ISS */
1349	idx = data & SYNCOOKIE_WNDMASK;
1350	SYNCOOKIE_RLOCK(tcp_secret[idx]);
1351	if (tcp_secret[idx].ts_expire < ticks ||
1352	    sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) {
1353		SYNCOOKIE_RUNLOCK(tcp_secret[idx]);
1354		return (NULL);
1355	}
1356	MD5Init(&syn_ctx);
1357#ifdef INET6
1358	if (inc->inc_isipv6) {
1359		MD5Add(inc->inc6_laddr);
1360		MD5Add(inc->inc6_faddr);
1361		add.laddr = 0;
1362		add.faddr = 0;
1363	} else
1364#endif
1365	{
1366		add.laddr = inc->inc_laddr.s_addr;
1367		add.faddr = inc->inc_faddr.s_addr;
1368	}
1369	add.lport = inc->inc_lport;
1370	add.fport = inc->inc_fport;
1371	add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1372	add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1373	add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1374	add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1375	SYNCOOKIE_RUNLOCK(tcp_secret[idx]);
1376	MD5Add(add);
1377	MD5Final((u_char *)&md5_buffer, &syn_ctx);
1378	data ^= md5_buffer[0];
1379	if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1380		return (NULL);
1381	data = data >> SYNCOOKIE_WNDBITS;
1382
1383	sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
1384	if (sc == NULL)
1385		return (NULL);
1386	/*
1387	 * Fill in the syncache values.
1388	 * XXX: duplicate code from syncache_add
1389	 */
1390	sc->sc_ipopts = NULL;
1391	sc->sc_inc.inc_fport = inc->inc_fport;
1392	sc->sc_inc.inc_lport = inc->inc_lport;
1393#ifdef INET6
1394	sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1395	if (inc->inc_isipv6) {
1396		sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1397		sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1398		if (sotoinpcb(so)->in6p_flags & IN6P_AUTOFLOWLABEL)
1399			sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK;
1400	} else
1401#endif
1402	{
1403		sc->sc_inc.inc_faddr = inc->inc_faddr;
1404		sc->sc_inc.inc_laddr = inc->inc_laddr;
1405		sc->sc_ip_ttl = sotoinpcb(so)->inp_ip_ttl;
1406		sc->sc_ip_tos = sotoinpcb(so)->inp_ip_tos;
1407	}
1408	sc->sc_irs = th->th_seq - 1;
1409	sc->sc_iss = th->th_ack - 1;
1410	wnd = sbspace(&so->so_rcv);
1411	wnd = imax(wnd, 0);
1412	wnd = imin(wnd, TCP_MAXWIN);
1413	sc->sc_wnd = wnd;
1414	sc->sc_flags = 0;
1415	sc->sc_rxmits = 0;
1416	sc->sc_peer_mss = tcp_msstab[data];
1417	return (sc);
1418}
1419