tcp_syncache.c revision 150131
1/*-
2 * Copyright (c) 2001 McAfee, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Jonathan Lemon
6 * and McAfee Research, the Security Research Division of McAfee, Inc. under
7 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
8 * DARPA CHATS research program.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: head/sys/netinet/tcp_syncache.c 150131 2005-09-14 15:06:22Z andre $
32 */
33
34#include "opt_inet.h"
35#include "opt_inet6.h"
36#include "opt_ipsec.h"
37#include "opt_mac.h"
38#include "opt_tcpdebug.h"
39#include "opt_tcp_sack.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/sysctl.h>
45#include <sys/malloc.h>
46#include <sys/mac.h>
47#include <sys/mbuf.h>
48#include <sys/md5.h>
49#include <sys/proc.h>		/* for proc0 declaration */
50#include <sys/random.h>
51#include <sys/socket.h>
52#include <sys/socketvar.h>
53
54#include <net/if.h>
55#include <net/route.h>
56
57#include <netinet/in.h>
58#include <netinet/in_systm.h>
59#include <netinet/ip.h>
60#include <netinet/in_var.h>
61#include <netinet/in_pcb.h>
62#include <netinet/ip_var.h>
63#ifdef INET6
64#include <netinet/ip6.h>
65#include <netinet/icmp6.h>
66#include <netinet6/nd6.h>
67#include <netinet6/ip6_var.h>
68#include <netinet6/in6_pcb.h>
69#endif
70#include <netinet/tcp.h>
71#ifdef TCPDEBUG
72#include <netinet/tcpip.h>
73#endif
74#include <netinet/tcp_fsm.h>
75#include <netinet/tcp_seq.h>
76#include <netinet/tcp_timer.h>
77#include <netinet/tcp_var.h>
78#ifdef TCPDEBUG
79#include <netinet/tcp_debug.h>
80#endif
81#ifdef INET6
82#include <netinet6/tcp6_var.h>
83#endif
84
85#ifdef IPSEC
86#include <netinet6/ipsec.h>
87#ifdef INET6
88#include <netinet6/ipsec6.h>
89#endif
90#endif /*IPSEC*/
91
92#ifdef FAST_IPSEC
93#include <netipsec/ipsec.h>
94#ifdef INET6
95#include <netipsec/ipsec6.h>
96#endif
97#include <netipsec/key.h>
98#endif /*FAST_IPSEC*/
99
100#include <machine/in_cksum.h>
101#include <vm/uma.h>
102
103static int tcp_syncookies = 1;
104SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
105    &tcp_syncookies, 0,
106    "Use TCP SYN cookies if the syncache overflows");
107
108static void	 syncache_drop(struct syncache *, struct syncache_head *);
109static void	 syncache_free(struct syncache *);
110static void	 syncache_insert(struct syncache *, struct syncache_head *);
111struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
112#ifdef TCPDEBUG
113static int	 syncache_respond(struct syncache *, struct mbuf *, struct socket *);
114#else
115static int	 syncache_respond(struct syncache *, struct mbuf *);
116#endif
117static struct	 socket *syncache_socket(struct syncache *, struct socket *,
118		    struct mbuf *m);
119static void	 syncache_timer(void *);
120static u_int32_t syncookie_generate(struct syncache *, u_int32_t *);
121static struct syncache *syncookie_lookup(struct in_conninfo *,
122		    struct tcphdr *, struct socket *);
123
124/*
125 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
126 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
127 * the odds are that the user has given up attempting to connect by then.
128 */
129#define SYNCACHE_MAXREXMTS		3
130
131/* Arbitrary values */
132#define TCP_SYNCACHE_HASHSIZE		512
133#define TCP_SYNCACHE_BUCKETLIMIT	30
134
135struct tcp_syncache {
136	struct	syncache_head *hashbase;
137	uma_zone_t zone;
138	u_int	hashsize;
139	u_int	hashmask;
140	u_int	bucket_limit;
141	u_int	cache_count;
142	u_int	cache_limit;
143	u_int	rexmt_limit;
144	u_int	hash_secret;
145	TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
146	struct	callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
147};
148static struct tcp_syncache tcp_syncache;
149
150SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
151
152SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
153     &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
154
155SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
156     &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
157
158SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
159     &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
160
161SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
162     &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
163
164SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
165     &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
166
167static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
168
169#define SYNCACHE_HASH(inc, mask)					\
170	((tcp_syncache.hash_secret ^					\
171	  (inc)->inc_faddr.s_addr ^					\
172	  ((inc)->inc_faddr.s_addr >> 16) ^				\
173	  (inc)->inc_fport ^ (inc)->inc_lport) & mask)
174
175#define SYNCACHE_HASH6(inc, mask)					\
176	((tcp_syncache.hash_secret ^					\
177	  (inc)->inc6_faddr.s6_addr32[0] ^				\
178	  (inc)->inc6_faddr.s6_addr32[3] ^				\
179	  (inc)->inc_fport ^ (inc)->inc_lport) & mask)
180
181#define ENDPTS_EQ(a, b) (						\
182	(a)->ie_fport == (b)->ie_fport &&				\
183	(a)->ie_lport == (b)->ie_lport &&				\
184	(a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr &&			\
185	(a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr			\
186)
187
188#define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
189
190#define SYNCACHE_TIMEOUT(sc, slot) do {				\
191	sc->sc_rxtslot = (slot);					\
192	sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)];	\
193	TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq);	\
194	if (!callout_active(&tcp_syncache.tt_timerq[(slot)]))		\
195		callout_reset(&tcp_syncache.tt_timerq[(slot)],		\
196		    TCPTV_RTOBASE * tcp_backoff[(slot)],		\
197		    syncache_timer, (void *)((intptr_t)(slot)));	\
198} while (0)
199
200static void
201syncache_free(struct syncache *sc)
202{
203	if (sc->sc_ipopts)
204		(void) m_free(sc->sc_ipopts);
205
206	uma_zfree(tcp_syncache.zone, sc);
207}
208
209void
210syncache_init(void)
211{
212	int i;
213
214	tcp_syncache.cache_count = 0;
215	tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
216	tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
217	tcp_syncache.cache_limit =
218	    tcp_syncache.hashsize * tcp_syncache.bucket_limit;
219	tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
220	tcp_syncache.hash_secret = arc4random();
221
222	TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
223	    &tcp_syncache.hashsize);
224	TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
225	    &tcp_syncache.cache_limit);
226	TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
227	    &tcp_syncache.bucket_limit);
228	if (!powerof2(tcp_syncache.hashsize) || tcp_syncache.hashsize == 0) {
229		printf("WARNING: syncache hash size is not a power of 2.\n");
230		tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
231	}
232	tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
233
234	/* Allocate the hash table. */
235	MALLOC(tcp_syncache.hashbase, struct syncache_head *,
236	    tcp_syncache.hashsize * sizeof(struct syncache_head),
237	    M_SYNCACHE, M_WAITOK);
238
239	/* Initialize the hash buckets. */
240	for (i = 0; i < tcp_syncache.hashsize; i++) {
241		TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
242		tcp_syncache.hashbase[i].sch_length = 0;
243	}
244
245	/* Initialize the timer queues. */
246	for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
247		TAILQ_INIT(&tcp_syncache.timerq[i]);
248		callout_init(&tcp_syncache.tt_timerq[i], NET_CALLOUT_MPSAFE);
249	}
250
251	/*
252	 * Allocate the syncache entries.  Allow the zone to allocate one
253	 * more entry than cache limit, so a new entry can bump out an
254	 * older one.
255	 */
256	tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
257	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
258	uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
259	tcp_syncache.cache_limit -= 1;
260}
261
262static void
263syncache_insert(sc, sch)
264	struct syncache *sc;
265	struct syncache_head *sch;
266{
267	struct syncache *sc2;
268	int i;
269
270	INP_INFO_WLOCK_ASSERT(&tcbinfo);
271
272	/*
273	 * Make sure that we don't overflow the per-bucket
274	 * limit or the total cache size limit.
275	 */
276	if (sch->sch_length >= tcp_syncache.bucket_limit) {
277		/*
278		 * The bucket is full, toss the oldest element.
279		 */
280		sc2 = TAILQ_FIRST(&sch->sch_bucket);
281		sc2->sc_tp->ts_recent = ticks;
282		syncache_drop(sc2, sch);
283		tcpstat.tcps_sc_bucketoverflow++;
284	} else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
285		/*
286		 * The cache is full.  Toss the oldest entry in the
287		 * entire cache.  This is the front entry in the
288		 * first non-empty timer queue with the largest
289		 * timeout value.
290		 */
291		for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
292			sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
293			if (sc2 != NULL)
294				break;
295		}
296		sc2->sc_tp->ts_recent = ticks;
297		syncache_drop(sc2, NULL);
298		tcpstat.tcps_sc_cacheoverflow++;
299	}
300
301	/* Initialize the entry's timer. */
302	SYNCACHE_TIMEOUT(sc, 0);
303
304	/* Put it into the bucket. */
305	TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
306	sch->sch_length++;
307	tcp_syncache.cache_count++;
308	tcpstat.tcps_sc_added++;
309}
310
311static void
312syncache_drop(sc, sch)
313	struct syncache *sc;
314	struct syncache_head *sch;
315{
316	INP_INFO_WLOCK_ASSERT(&tcbinfo);
317
318	if (sch == NULL) {
319#ifdef INET6
320		if (sc->sc_inc.inc_isipv6) {
321			sch = &tcp_syncache.hashbase[
322			    SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
323		} else
324#endif
325		{
326			sch = &tcp_syncache.hashbase[
327			    SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
328		}
329	}
330
331	TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
332	sch->sch_length--;
333	tcp_syncache.cache_count--;
334
335	TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
336	if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
337		callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
338
339	syncache_free(sc);
340}
341
342/*
343 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
344 * If we have retransmitted an entry the maximum number of times, expire it.
345 */
346static void
347syncache_timer(xslot)
348	void *xslot;
349{
350	intptr_t slot = (intptr_t)xslot;
351	struct syncache *sc, *nsc;
352	struct inpcb *inp;
353
354	INP_INFO_WLOCK(&tcbinfo);
355	if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
356	    !callout_active(&tcp_syncache.tt_timerq[slot])) {
357		/* XXX can this happen? */
358		INP_INFO_WUNLOCK(&tcbinfo);
359		return;
360	}
361	callout_deactivate(&tcp_syncache.tt_timerq[slot]);
362
363	nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
364	while (nsc != NULL) {
365		if (ticks < nsc->sc_rxttime)
366			break;
367		sc = nsc;
368		inp = sc->sc_tp->t_inpcb;
369		if (slot == SYNCACHE_MAXREXMTS ||
370		    slot >= tcp_syncache.rexmt_limit ||
371		    inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) {
372			nsc = TAILQ_NEXT(sc, sc_timerq);
373			syncache_drop(sc, NULL);
374			tcpstat.tcps_sc_stale++;
375			continue;
376		}
377		/*
378		 * syncache_respond() may call back into the syncache to
379		 * to modify another entry, so do not obtain the next
380		 * entry on the timer chain until it has completed.
381		 */
382#ifdef TCPDEBUG
383		(void) syncache_respond(sc, NULL, NULL);
384#else
385		(void) syncache_respond(sc, NULL);
386#endif
387		nsc = TAILQ_NEXT(sc, sc_timerq);
388		tcpstat.tcps_sc_retransmitted++;
389		TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
390		SYNCACHE_TIMEOUT(sc, slot + 1);
391	}
392	if (nsc != NULL)
393		callout_reset(&tcp_syncache.tt_timerq[slot],
394		    nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
395	INP_INFO_WUNLOCK(&tcbinfo);
396}
397
398/*
399 * Find an entry in the syncache.
400 */
401struct syncache *
402syncache_lookup(inc, schp)
403	struct in_conninfo *inc;
404	struct syncache_head **schp;
405{
406	struct syncache *sc;
407	struct syncache_head *sch;
408
409	INP_INFO_WLOCK_ASSERT(&tcbinfo);
410
411#ifdef INET6
412	if (inc->inc_isipv6) {
413		sch = &tcp_syncache.hashbase[
414		    SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
415		*schp = sch;
416		TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
417			if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
418				return (sc);
419		}
420	} else
421#endif
422	{
423		sch = &tcp_syncache.hashbase[
424		    SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
425		*schp = sch;
426		TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
427#ifdef INET6
428			if (sc->sc_inc.inc_isipv6)
429				continue;
430#endif
431			if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
432				return (sc);
433		}
434	}
435	return (NULL);
436}
437
438/*
439 * This function is called when we get a RST for a
440 * non-existent connection, so that we can see if the
441 * connection is in the syn cache.  If it is, zap it.
442 */
443void
444syncache_chkrst(inc, th)
445	struct in_conninfo *inc;
446	struct tcphdr *th;
447{
448	struct syncache *sc;
449	struct syncache_head *sch;
450
451	INP_INFO_WLOCK_ASSERT(&tcbinfo);
452
453	sc = syncache_lookup(inc, &sch);
454	if (sc == NULL)
455		return;
456	/*
457	 * If the RST bit is set, check the sequence number to see
458	 * if this is a valid reset segment.
459	 * RFC 793 page 37:
460	 *   In all states except SYN-SENT, all reset (RST) segments
461	 *   are validated by checking their SEQ-fields.  A reset is
462	 *   valid if its sequence number is in the window.
463	 *
464	 *   The sequence number in the reset segment is normally an
465	 *   echo of our outgoing acknowlegement numbers, but some hosts
466	 *   send a reset with the sequence number at the rightmost edge
467	 *   of our receive window, and we have to handle this case.
468	 */
469	if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
470	    SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
471		syncache_drop(sc, sch);
472		tcpstat.tcps_sc_reset++;
473	}
474}
475
476void
477syncache_badack(inc)
478	struct in_conninfo *inc;
479{
480	struct syncache *sc;
481	struct syncache_head *sch;
482
483	INP_INFO_WLOCK_ASSERT(&tcbinfo);
484
485	sc = syncache_lookup(inc, &sch);
486	if (sc != NULL) {
487		syncache_drop(sc, sch);
488		tcpstat.tcps_sc_badack++;
489	}
490}
491
492void
493syncache_unreach(inc, th)
494	struct in_conninfo *inc;
495	struct tcphdr *th;
496{
497	struct syncache *sc;
498	struct syncache_head *sch;
499
500	INP_INFO_WLOCK_ASSERT(&tcbinfo);
501
502	sc = syncache_lookup(inc, &sch);
503	if (sc == NULL)
504		return;
505
506	/* If the sequence number != sc_iss, then it's a bogus ICMP msg */
507	if (ntohl(th->th_seq) != sc->sc_iss)
508		return;
509
510	/*
511	 * If we've rertransmitted 3 times and this is our second error,
512	 * we remove the entry.  Otherwise, we allow it to continue on.
513	 * This prevents us from incorrectly nuking an entry during a
514	 * spurious network outage.
515	 *
516	 * See tcp_notify().
517	 */
518	if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
519		sc->sc_flags |= SCF_UNREACH;
520		return;
521	}
522	syncache_drop(sc, sch);
523	tcpstat.tcps_sc_unreach++;
524}
525
526/*
527 * Build a new TCP socket structure from a syncache entry.
528 */
529static struct socket *
530syncache_socket(sc, lso, m)
531	struct syncache *sc;
532	struct socket *lso;
533	struct mbuf *m;
534{
535	struct inpcb *inp = NULL;
536	struct socket *so;
537	struct tcpcb *tp;
538
539	NET_ASSERT_GIANT();
540	INP_INFO_WLOCK_ASSERT(&tcbinfo);
541
542	/*
543	 * Ok, create the full blown connection, and set things up
544	 * as they would have been set up if we had created the
545	 * connection when the SYN arrived.  If we can't create
546	 * the connection, abort it.
547	 */
548	so = sonewconn(lso, SS_ISCONNECTED);
549	if (so == NULL) {
550		/*
551		 * Drop the connection; we will send a RST if the peer
552		 * retransmits the ACK,
553		 */
554		tcpstat.tcps_listendrop++;
555		goto abort2;
556	}
557#ifdef MAC
558	SOCK_LOCK(so);
559	mac_set_socket_peer_from_mbuf(m, so);
560	SOCK_UNLOCK(so);
561#endif
562
563	inp = sotoinpcb(so);
564	INP_LOCK(inp);
565
566	/*
567	 * Insert new socket into hash list.
568	 */
569	inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
570#ifdef INET6
571	if (sc->sc_inc.inc_isipv6) {
572		inp->in6p_laddr = sc->sc_inc.inc6_laddr;
573	} else {
574		inp->inp_vflag &= ~INP_IPV6;
575		inp->inp_vflag |= INP_IPV4;
576#endif
577		inp->inp_laddr = sc->sc_inc.inc_laddr;
578#ifdef INET6
579	}
580#endif
581	inp->inp_lport = sc->sc_inc.inc_lport;
582	if (in_pcbinshash(inp) != 0) {
583		/*
584		 * Undo the assignments above if we failed to
585		 * put the PCB on the hash lists.
586		 */
587#ifdef INET6
588		if (sc->sc_inc.inc_isipv6)
589			inp->in6p_laddr = in6addr_any;
590		else
591#endif
592			inp->inp_laddr.s_addr = INADDR_ANY;
593		inp->inp_lport = 0;
594		goto abort;
595	}
596#ifdef IPSEC
597	/* copy old policy into new socket's */
598	if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
599		printf("syncache_expand: could not copy policy\n");
600#endif
601#ifdef FAST_IPSEC
602	/* copy old policy into new socket's */
603	if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
604		printf("syncache_expand: could not copy policy\n");
605#endif
606#ifdef INET6
607	if (sc->sc_inc.inc_isipv6) {
608		struct inpcb *oinp = sotoinpcb(lso);
609		struct in6_addr laddr6;
610		struct sockaddr_in6 sin6;
611		/*
612		 * Inherit socket options from the listening socket.
613		 * Note that in6p_inputopts are not (and should not be)
614		 * copied, since it stores previously received options and is
615		 * used to detect if each new option is different than the
616		 * previous one and hence should be passed to a user.
617		 * If we copied in6p_inputopts, a user would not be able to
618		 * receive options just after calling the accept system call.
619		 */
620		inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
621		if (oinp->in6p_outputopts)
622			inp->in6p_outputopts =
623			    ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
624
625		sin6.sin6_family = AF_INET6;
626		sin6.sin6_len = sizeof(sin6);
627		sin6.sin6_addr = sc->sc_inc.inc6_faddr;
628		sin6.sin6_port = sc->sc_inc.inc_fport;
629		sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
630		laddr6 = inp->in6p_laddr;
631		if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
632			inp->in6p_laddr = sc->sc_inc.inc6_laddr;
633		if (in6_pcbconnect(inp, (struct sockaddr *)&sin6,
634		    thread0.td_ucred)) {
635			inp->in6p_laddr = laddr6;
636			goto abort;
637		}
638		/* Override flowlabel from in6_pcbconnect. */
639		inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
640		inp->in6p_flowinfo |= sc->sc_flowlabel;
641	} else
642#endif
643	{
644		struct in_addr laddr;
645		struct sockaddr_in sin;
646
647		inp->inp_options = ip_srcroute(m);
648		if (inp->inp_options == NULL) {
649			inp->inp_options = sc->sc_ipopts;
650			sc->sc_ipopts = NULL;
651		}
652
653		sin.sin_family = AF_INET;
654		sin.sin_len = sizeof(sin);
655		sin.sin_addr = sc->sc_inc.inc_faddr;
656		sin.sin_port = sc->sc_inc.inc_fport;
657		bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
658		laddr = inp->inp_laddr;
659		if (inp->inp_laddr.s_addr == INADDR_ANY)
660			inp->inp_laddr = sc->sc_inc.inc_laddr;
661		if (in_pcbconnect(inp, (struct sockaddr *)&sin,
662		    thread0.td_ucred)) {
663			inp->inp_laddr = laddr;
664			goto abort;
665		}
666	}
667
668	tp = intotcpcb(inp);
669	tp->t_state = TCPS_SYN_RECEIVED;
670	tp->iss = sc->sc_iss;
671	tp->irs = sc->sc_irs;
672	tcp_rcvseqinit(tp);
673	tcp_sendseqinit(tp);
674	tp->snd_wl1 = sc->sc_irs;
675	tp->rcv_up = sc->sc_irs + 1;
676	tp->rcv_wnd = sc->sc_wnd;
677	tp->rcv_adv += tp->rcv_wnd;
678
679	tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
680	if (sc->sc_flags & SCF_NOOPT)
681		tp->t_flags |= TF_NOOPT;
682	if (sc->sc_flags & SCF_WINSCALE) {
683		tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
684		tp->requested_s_scale = sc->sc_requested_s_scale;
685		tp->request_r_scale = sc->sc_request_r_scale;
686	}
687	if (sc->sc_flags & SCF_TIMESTAMP) {
688		tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
689		tp->ts_recent = sc->sc_tsrecent;
690		tp->ts_recent_age = ticks;
691	}
692#ifdef TCP_SIGNATURE
693	if (sc->sc_flags & SCF_SIGNATURE)
694		tp->t_flags |= TF_SIGNATURE;
695#endif
696	if (sc->sc_flags & SCF_SACK) {
697		tp->sack_enable = 1;
698		tp->t_flags |= TF_SACK_PERMIT;
699	}
700	/*
701	 * Set up MSS and get cached values from tcp_hostcache.
702	 * This might overwrite some of the defaults we just set.
703	 */
704	tcp_mss(tp, sc->sc_peer_mss);
705
706	/*
707	 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
708	 */
709	if (sc->sc_rxtslot != 0)
710		tp->snd_cwnd = tp->t_maxseg;
711	callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
712
713	INP_UNLOCK(inp);
714
715	tcpstat.tcps_accepts++;
716	return (so);
717
718abort:
719	INP_UNLOCK(inp);
720abort2:
721	if (so != NULL)
722		(void) soabort(so);
723	return (NULL);
724}
725
726/*
727 * This function gets called when we receive an ACK for a
728 * socket in the LISTEN state.  We look up the connection
729 * in the syncache, and if its there, we pull it out of
730 * the cache and turn it into a full-blown connection in
731 * the SYN-RECEIVED state.
732 */
733int
734syncache_expand(inc, th, sop, m)
735	struct in_conninfo *inc;
736	struct tcphdr *th;
737	struct socket **sop;
738	struct mbuf *m;
739{
740	struct syncache *sc;
741	struct syncache_head *sch;
742	struct socket *so;
743
744	INP_INFO_WLOCK_ASSERT(&tcbinfo);
745
746	sc = syncache_lookup(inc, &sch);
747	if (sc == NULL) {
748		/*
749		 * There is no syncache entry, so see if this ACK is
750		 * a returning syncookie.  To do this, first:
751		 *  A. See if this socket has had a syncache entry dropped in
752		 *     the past.  We don't want to accept a bogus syncookie
753		 *     if we've never received a SYN.
754		 *  B. check that the syncookie is valid.  If it is, then
755		 *     cobble up a fake syncache entry, and return.
756		 */
757		if (!tcp_syncookies)
758			return (0);
759		sc = syncookie_lookup(inc, th, *sop);
760		if (sc == NULL)
761			return (0);
762		sch = NULL;
763		tcpstat.tcps_sc_recvcookie++;
764	}
765
766	/*
767	 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
768	 */
769	if (th->th_ack != sc->sc_iss + 1)
770		return (0);
771
772	so = syncache_socket(sc, *sop, m);
773	if (so == NULL) {
774#if 0
775resetandabort:
776		/* XXXjlemon check this - is this correct? */
777		(void) tcp_respond(NULL, m, m, th,
778		    th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
779#endif
780		m_freem(m);			/* XXX only needed for above */
781		tcpstat.tcps_sc_aborted++;
782	} else
783		tcpstat.tcps_sc_completed++;
784
785	if (sch == NULL)
786		syncache_free(sc);
787	else
788		syncache_drop(sc, sch);
789	*sop = so;
790	return (1);
791}
792
793/*
794 * Given a LISTEN socket and an inbound SYN request, add
795 * this to the syn cache, and send back a segment:
796 *	<SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
797 * to the source.
798 *
799 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
800 * Doing so would require that we hold onto the data and deliver it
801 * to the application.  However, if we are the target of a SYN-flood
802 * DoS attack, an attacker could send data which would eventually
803 * consume all available buffer space if it were ACKed.  By not ACKing
804 * the data, we avoid this DoS scenario.
805 */
806int
807syncache_add(inc, to, th, sop, m)
808	struct in_conninfo *inc;
809	struct tcpopt *to;
810	struct tcphdr *th;
811	struct socket **sop;
812	struct mbuf *m;
813{
814	struct tcpcb *tp;
815	struct socket *so;
816	struct syncache *sc = NULL;
817	struct syncache_head *sch;
818	struct mbuf *ipopts = NULL;
819	u_int32_t flowtmp;
820	int i, win;
821
822	INP_INFO_WLOCK_ASSERT(&tcbinfo);
823
824	so = *sop;
825	tp = sototcpcb(so);
826
827	/*
828	 * Remember the IP options, if any.
829	 */
830#ifdef INET6
831	if (!inc->inc_isipv6)
832#endif
833		ipopts = ip_srcroute(m);
834
835	/*
836	 * See if we already have an entry for this connection.
837	 * If we do, resend the SYN,ACK, and reset the retransmit timer.
838	 *
839	 * XXX
840	 * should the syncache be re-initialized with the contents
841	 * of the new SYN here (which may have different options?)
842	 */
843	sc = syncache_lookup(inc, &sch);
844	if (sc != NULL) {
845		tcpstat.tcps_sc_dupsyn++;
846		if (ipopts) {
847			/*
848			 * If we were remembering a previous source route,
849			 * forget it and use the new one we've been given.
850			 */
851			if (sc->sc_ipopts)
852				(void) m_free(sc->sc_ipopts);
853			sc->sc_ipopts = ipopts;
854		}
855		/*
856		 * Update timestamp if present.
857		 */
858		if (sc->sc_flags & SCF_TIMESTAMP)
859			sc->sc_tsrecent = to->to_tsval;
860		/*
861		 * PCB may have changed, pick up new values.
862		 */
863		sc->sc_tp = tp;
864		sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
865#ifdef TCPDEBUG
866		if (syncache_respond(sc, m, so) == 0) {
867#else
868		if (syncache_respond(sc, m) == 0) {
869#endif
870			/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
871			TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
872			    sc, sc_timerq);
873			SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
874			tcpstat.tcps_sndacks++;
875			tcpstat.tcps_sndtotal++;
876		}
877		*sop = NULL;
878		return (1);
879	}
880
881	sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
882	if (sc == NULL) {
883		/*
884		 * The zone allocator couldn't provide more entries.
885		 * Treat this as if the cache was full; drop the oldest
886		 * entry and insert the new one.
887		 */
888		/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
889		for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
890			sc = TAILQ_FIRST(&tcp_syncache.timerq[i]);
891			if (sc != NULL)
892				break;
893		}
894		sc->sc_tp->ts_recent = ticks;
895		syncache_drop(sc, NULL);
896		tcpstat.tcps_sc_zonefail++;
897		sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
898		if (sc == NULL) {
899			if (ipopts)
900				(void) m_free(ipopts);
901			return (0);
902		}
903	}
904
905	/*
906	 * Fill in the syncache values.
907	 */
908	bzero(sc, sizeof(*sc));
909	sc->sc_tp = tp;
910	sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
911	sc->sc_ipopts = ipopts;
912	sc->sc_inc.inc_fport = inc->inc_fport;
913	sc->sc_inc.inc_lport = inc->inc_lport;
914#ifdef INET6
915	sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
916	if (inc->inc_isipv6) {
917		sc->sc_inc.inc6_faddr = inc->inc6_faddr;
918		sc->sc_inc.inc6_laddr = inc->inc6_laddr;
919	} else
920#endif
921	{
922		sc->sc_inc.inc_faddr = inc->inc_faddr;
923		sc->sc_inc.inc_laddr = inc->inc_laddr;
924	}
925	sc->sc_irs = th->th_seq;
926	sc->sc_flags = 0;
927	sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
928	sc->sc_flowlabel = 0;
929	if (tcp_syncookies) {
930		sc->sc_iss = syncookie_generate(sc, &flowtmp);
931#ifdef INET6
932		if (inc->inc_isipv6 &&
933		    (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) {
934			sc->sc_flowlabel = flowtmp & IPV6_FLOWLABEL_MASK;
935		}
936#endif
937	} else {
938		sc->sc_iss = arc4random();
939#ifdef INET6
940		if (inc->inc_isipv6 &&
941		    (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) {
942			sc->sc_flowlabel =
943			    (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
944		}
945#endif
946	}
947
948	/* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
949	win = sbspace(&so->so_rcv);
950	win = imax(win, 0);
951	win = imin(win, TCP_MAXWIN);
952	sc->sc_wnd = win;
953
954	if (tcp_do_rfc1323) {
955		/*
956		 * A timestamp received in a SYN makes
957		 * it ok to send timestamp requests and replies.
958		 */
959		if (to->to_flags & TOF_TS) {
960			sc->sc_tsrecent = to->to_tsval;
961			sc->sc_flags |= SCF_TIMESTAMP;
962		}
963		if (to->to_flags & TOF_SCALE) {
964			int wscale = 0;
965
966			/* Compute proper scaling value from buffer space */
967			while (wscale < TCP_MAX_WINSHIFT &&
968			    (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
969				wscale++;
970			sc->sc_request_r_scale = wscale;
971			sc->sc_requested_s_scale = to->to_requested_s_scale;
972			sc->sc_flags |= SCF_WINSCALE;
973		}
974	}
975	if (tp->t_flags & TF_NOOPT)
976		sc->sc_flags = SCF_NOOPT;
977#ifdef TCP_SIGNATURE
978	/*
979	 * If listening socket requested TCP digests, and received SYN
980	 * contains the option, flag this in the syncache so that
981	 * syncache_respond() will do the right thing with the SYN+ACK.
982	 * XXX Currently we always record the option by default and will
983	 * attempt to use it in syncache_respond().
984	 */
985	if (to->to_flags & TOF_SIGNATURE)
986		sc->sc_flags |= SCF_SIGNATURE;
987#endif
988
989	if (to->to_flags & TOF_SACK)
990		sc->sc_flags |= SCF_SACK;
991
992	/*
993	 * Do a standard 3-way handshake.
994	 */
995#ifdef TCPDEBUG
996	if (syncache_respond(sc, m, so) == 0) {
997#else
998	if (syncache_respond(sc, m) == 0) {
999#endif
1000		syncache_insert(sc, sch);
1001		tcpstat.tcps_sndacks++;
1002		tcpstat.tcps_sndtotal++;
1003	} else {
1004		syncache_free(sc);
1005		tcpstat.tcps_sc_dropped++;
1006	}
1007	*sop = NULL;
1008	return (1);
1009}
1010
1011#ifdef TCPDEBUG
1012static int
1013syncache_respond(sc, m, so)
1014	struct syncache *sc;
1015	struct mbuf *m;
1016	struct socket *so;
1017#else
1018static int
1019syncache_respond(sc, m)
1020	struct syncache *sc;
1021	struct mbuf *m;
1022#endif
1023{
1024	u_int8_t *optp;
1025	int optlen, error;
1026	u_int16_t tlen, hlen, mssopt;
1027	struct ip *ip = NULL;
1028	struct tcphdr *th;
1029	struct inpcb *inp;
1030#ifdef INET6
1031	struct ip6_hdr *ip6 = NULL;
1032#endif
1033
1034	hlen =
1035#ifdef INET6
1036	       (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
1037#endif
1038		sizeof(struct ip);
1039
1040	KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer"));
1041
1042	/* Determine MSS we advertize to other end of connection */
1043	mssopt = tcp_mssopt(&sc->sc_inc);
1044
1045	/* Compute the size of the TCP options. */
1046	if (sc->sc_flags & SCF_NOOPT) {
1047		optlen = 0;
1048	} else {
1049		optlen = TCPOLEN_MAXSEG +
1050		    ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1051		    ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0);
1052#ifdef TCP_SIGNATURE
1053		if (sc->sc_flags & SCF_SIGNATURE)
1054			optlen += TCPOLEN_SIGNATURE;
1055#endif
1056		if (sc->sc_flags & SCF_SACK)
1057			optlen += TCPOLEN_SACK_PERMITTED;
1058		optlen = roundup2(optlen, 4);
1059	}
1060	tlen = hlen + sizeof(struct tcphdr) + optlen;
1061
1062	/*
1063	 * XXX
1064	 * assume that the entire packet will fit in a header mbuf
1065	 */
1066	KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1067
1068	/*
1069	 * XXX shouldn't this reuse the mbuf if possible ?
1070	 * Create the IP+TCP header from scratch.
1071	 */
1072	if (m)
1073		m_freem(m);
1074
1075	m = m_gethdr(M_DONTWAIT, MT_HEADER);
1076	if (m == NULL)
1077		return (ENOBUFS);
1078	m->m_data += max_linkhdr;
1079	m->m_len = tlen;
1080	m->m_pkthdr.len = tlen;
1081	m->m_pkthdr.rcvif = NULL;
1082	inp = sc->sc_tp->t_inpcb;
1083	INP_LOCK(inp);
1084#ifdef MAC
1085	mac_create_mbuf_from_inpcb(inp, m);
1086#endif
1087
1088#ifdef INET6
1089	if (sc->sc_inc.inc_isipv6) {
1090		ip6 = mtod(m, struct ip6_hdr *);
1091		ip6->ip6_vfc = IPV6_VERSION;
1092		ip6->ip6_nxt = IPPROTO_TCP;
1093		ip6->ip6_src = sc->sc_inc.inc6_laddr;
1094		ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1095		ip6->ip6_plen = htons(tlen - hlen);
1096		/* ip6_hlim is set after checksum */
1097		ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
1098		ip6->ip6_flow |= sc->sc_flowlabel;
1099
1100		th = (struct tcphdr *)(ip6 + 1);
1101	} else
1102#endif
1103	{
1104		ip = mtod(m, struct ip *);
1105		ip->ip_v = IPVERSION;
1106		ip->ip_hl = sizeof(struct ip) >> 2;
1107		ip->ip_len = tlen;
1108		ip->ip_id = 0;
1109		ip->ip_off = 0;
1110		ip->ip_sum = 0;
1111		ip->ip_p = IPPROTO_TCP;
1112		ip->ip_src = sc->sc_inc.inc_laddr;
1113		ip->ip_dst = sc->sc_inc.inc_faddr;
1114		ip->ip_ttl = inp->inp_ip_ttl;   /* XXX */
1115		ip->ip_tos = inp->inp_ip_tos;   /* XXX */
1116
1117		/*
1118		 * See if we should do MTU discovery.  Route lookups are
1119		 * expensive, so we will only unset the DF bit if:
1120		 *
1121		 *	1) path_mtu_discovery is disabled
1122		 *	2) the SCF_UNREACH flag has been set
1123		 */
1124		if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1125		       ip->ip_off |= IP_DF;
1126
1127		th = (struct tcphdr *)(ip + 1);
1128	}
1129	th->th_sport = sc->sc_inc.inc_lport;
1130	th->th_dport = sc->sc_inc.inc_fport;
1131
1132	th->th_seq = htonl(sc->sc_iss);
1133	th->th_ack = htonl(sc->sc_irs + 1);
1134	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1135	th->th_x2 = 0;
1136	th->th_flags = TH_SYN|TH_ACK;
1137	th->th_win = htons(sc->sc_wnd);
1138	th->th_urp = 0;
1139
1140	/* Tack on the TCP options. */
1141	if (optlen != 0) {
1142		optp = (u_int8_t *)(th + 1);
1143		*optp++ = TCPOPT_MAXSEG;
1144		*optp++ = TCPOLEN_MAXSEG;
1145		*optp++ = (mssopt >> 8) & 0xff;
1146		*optp++ = mssopt & 0xff;
1147
1148		if (sc->sc_flags & SCF_WINSCALE) {
1149			*((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1150			    TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1151			    sc->sc_request_r_scale);
1152			optp += 4;
1153		}
1154
1155		if (sc->sc_flags & SCF_TIMESTAMP) {
1156			u_int32_t *lp = (u_int32_t *)(optp);
1157
1158			/* Form timestamp option per appendix A of RFC 1323. */
1159			*lp++ = htonl(TCPOPT_TSTAMP_HDR);
1160			*lp++ = htonl(ticks);
1161			*lp   = htonl(sc->sc_tsrecent);
1162			optp += TCPOLEN_TSTAMP_APPA;
1163		}
1164
1165#ifdef TCP_SIGNATURE
1166		/*
1167		 * Handle TCP-MD5 passive opener response.
1168		 */
1169		if (sc->sc_flags & SCF_SIGNATURE) {
1170			u_int8_t *bp = optp;
1171			int i;
1172
1173			*bp++ = TCPOPT_SIGNATURE;
1174			*bp++ = TCPOLEN_SIGNATURE;
1175			for (i = 0; i < TCP_SIGLEN; i++)
1176				*bp++ = 0;
1177			tcp_signature_compute(m, sizeof(struct ip), 0, optlen,
1178			    optp + 2, IPSEC_DIR_OUTBOUND);
1179			optp += TCPOLEN_SIGNATURE;
1180		}
1181#endif /* TCP_SIGNATURE */
1182
1183		if (sc->sc_flags & SCF_SACK) {
1184			*optp++ = TCPOPT_SACK_PERMITTED;
1185			*optp++ = TCPOLEN_SACK_PERMITTED;
1186		}
1187
1188		{
1189			/* Pad TCP options to a 4 byte boundary */
1190			int padlen = optlen - (optp - (u_int8_t *)(th + 1));
1191			while (padlen-- > 0)
1192				*optp++ = TCPOPT_EOL;
1193		}
1194	}
1195
1196#ifdef INET6
1197	if (sc->sc_inc.inc_isipv6) {
1198		th->th_sum = 0;
1199		th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1200		ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1201		error = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
1202	} else
1203#endif
1204	{
1205		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1206		    htons(tlen - hlen + IPPROTO_TCP));
1207		m->m_pkthdr.csum_flags = CSUM_TCP;
1208		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1209#ifdef TCPDEBUG
1210		/*
1211		 * Trace.
1212		 */
1213		if (so != NULL && so->so_options & SO_DEBUG) {
1214			struct tcpcb *tp = sototcpcb(so);
1215			tcp_trace(TA_OUTPUT, tp->t_state, tp,
1216			    mtod(m, void *), th, 0);
1217		}
1218#endif
1219		error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, inp);
1220	}
1221	INP_UNLOCK(inp);
1222	return (error);
1223}
1224
1225/*
1226 * cookie layers:
1227 *
1228 *	|. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1229 *	| peer iss                                                      |
1230 *	| MD5(laddr,faddr,secret,lport,fport)             |. . . . . . .|
1231 *	|                     0                       |(A)|             |
1232 * (A): peer mss index
1233 */
1234
1235/*
1236 * The values below are chosen to minimize the size of the tcp_secret
1237 * table, as well as providing roughly a 16 second lifetime for the cookie.
1238 */
1239
1240#define SYNCOOKIE_WNDBITS	5	/* exposed bits for window indexing */
1241#define SYNCOOKIE_TIMESHIFT	1	/* scale ticks to window time units */
1242
1243#define SYNCOOKIE_WNDMASK	((1 << SYNCOOKIE_WNDBITS) - 1)
1244#define SYNCOOKIE_NSECRETS	(1 << SYNCOOKIE_WNDBITS)
1245#define SYNCOOKIE_TIMEOUT \
1246    (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1247#define SYNCOOKIE_DATAMASK	((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1248
1249static struct {
1250	u_int32_t	ts_secbits[4];
1251	u_int		ts_expire;
1252} tcp_secret[SYNCOOKIE_NSECRETS];
1253
1254static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1255
1256static MD5_CTX syn_ctx;
1257
1258#define MD5Add(v)	MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1259
1260struct md5_add {
1261	u_int32_t laddr, faddr;
1262	u_int32_t secbits[4];
1263	u_int16_t lport, fport;
1264};
1265
1266#ifdef CTASSERT
1267CTASSERT(sizeof(struct md5_add) == 28);
1268#endif
1269
1270/*
1271 * Consider the problem of a recreated (and retransmitted) cookie.  If the
1272 * original SYN was accepted, the connection is established.  The second
1273 * SYN is inflight, and if it arrives with an ISN that falls within the
1274 * receive window, the connection is killed.
1275 *
1276 * However, since cookies have other problems, this may not be worth
1277 * worrying about.
1278 */
1279
1280static u_int32_t
1281syncookie_generate(struct syncache *sc, u_int32_t *flowid)
1282{
1283	u_int32_t md5_buffer[4];
1284	u_int32_t data;
1285	int idx, i;
1286	struct md5_add add;
1287
1288	/* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
1289
1290	idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1291	if (tcp_secret[idx].ts_expire < ticks) {
1292		for (i = 0; i < 4; i++)
1293			tcp_secret[idx].ts_secbits[i] = arc4random();
1294		tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1295	}
1296	for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1297		if (tcp_msstab[data] <= sc->sc_peer_mss)
1298			break;
1299	data = (data << SYNCOOKIE_WNDBITS) | idx;
1300	data ^= sc->sc_irs;				/* peer's iss */
1301	MD5Init(&syn_ctx);
1302#ifdef INET6
1303	if (sc->sc_inc.inc_isipv6) {
1304		MD5Add(sc->sc_inc.inc6_laddr);
1305		MD5Add(sc->sc_inc.inc6_faddr);
1306		add.laddr = 0;
1307		add.faddr = 0;
1308	} else
1309#endif
1310	{
1311		add.laddr = sc->sc_inc.inc_laddr.s_addr;
1312		add.faddr = sc->sc_inc.inc_faddr.s_addr;
1313	}
1314	add.lport = sc->sc_inc.inc_lport;
1315	add.fport = sc->sc_inc.inc_fport;
1316	add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1317	add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1318	add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1319	add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1320	MD5Add(add);
1321	MD5Final((u_char *)&md5_buffer, &syn_ctx);
1322	data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1323	*flowid = md5_buffer[1];
1324	return (data);
1325}
1326
1327static struct syncache *
1328syncookie_lookup(inc, th, so)
1329	struct in_conninfo *inc;
1330	struct tcphdr *th;
1331	struct socket *so;
1332{
1333	u_int32_t md5_buffer[4];
1334	struct syncache *sc;
1335	u_int32_t data;
1336	int wnd, idx;
1337	struct md5_add add;
1338
1339	/* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
1340
1341	data = (th->th_ack - 1) ^ (th->th_seq - 1);	/* remove ISS */
1342	idx = data & SYNCOOKIE_WNDMASK;
1343	if (tcp_secret[idx].ts_expire < ticks ||
1344	    sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
1345		return (NULL);
1346	MD5Init(&syn_ctx);
1347#ifdef INET6
1348	if (inc->inc_isipv6) {
1349		MD5Add(inc->inc6_laddr);
1350		MD5Add(inc->inc6_faddr);
1351		add.laddr = 0;
1352		add.faddr = 0;
1353	} else
1354#endif
1355	{
1356		add.laddr = inc->inc_laddr.s_addr;
1357		add.faddr = inc->inc_faddr.s_addr;
1358	}
1359	add.lport = inc->inc_lport;
1360	add.fport = inc->inc_fport;
1361	add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1362	add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1363	add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1364	add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1365	MD5Add(add);
1366	MD5Final((u_char *)&md5_buffer, &syn_ctx);
1367	data ^= md5_buffer[0];
1368	if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1369		return (NULL);
1370	data = data >> SYNCOOKIE_WNDBITS;
1371
1372	sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
1373	if (sc == NULL)
1374		return (NULL);
1375	/*
1376	 * Fill in the syncache values.
1377	 * XXX duplicate code from syncache_add
1378	 */
1379	sc->sc_ipopts = NULL;
1380	sc->sc_inc.inc_fport = inc->inc_fport;
1381	sc->sc_inc.inc_lport = inc->inc_lport;
1382	sc->sc_tp = sototcpcb(so);
1383#ifdef INET6
1384	sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1385	if (inc->inc_isipv6) {
1386		sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1387		sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1388		if (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)
1389			sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK;
1390	} else
1391#endif
1392	{
1393		sc->sc_inc.inc_faddr = inc->inc_faddr;
1394		sc->sc_inc.inc_laddr = inc->inc_laddr;
1395	}
1396	sc->sc_irs = th->th_seq - 1;
1397	sc->sc_iss = th->th_ack - 1;
1398	wnd = sbspace(&so->so_rcv);
1399	wnd = imax(wnd, 0);
1400	wnd = imin(wnd, TCP_MAXWIN);
1401	sc->sc_wnd = wnd;
1402	sc->sc_flags = 0;
1403	sc->sc_rxtslot = 0;
1404	sc->sc_peer_mss = tcp_msstab[data];
1405	return (sc);
1406}
1407