tcp.c revision 11042:2d6e217af1b4
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26/* Copyright (c) 1990 Mentat Inc. */
27
28#include <sys/types.h>
29#include <sys/stream.h>
30#include <sys/strsun.h>
31#include <sys/strsubr.h>
32#include <sys/stropts.h>
33#include <sys/strlog.h>
34#define	_SUN_TPI_VERSION 2
35#include <sys/tihdr.h>
36#include <sys/timod.h>
37#include <sys/ddi.h>
38#include <sys/sunddi.h>
39#include <sys/suntpi.h>
40#include <sys/xti_inet.h>
41#include <sys/cmn_err.h>
42#include <sys/debug.h>
43#include <sys/sdt.h>
44#include <sys/vtrace.h>
45#include <sys/kmem.h>
46#include <sys/ethernet.h>
47#include <sys/cpuvar.h>
48#include <sys/dlpi.h>
49#include <sys/pattr.h>
50#include <sys/policy.h>
51#include <sys/priv.h>
52#include <sys/zone.h>
53#include <sys/sunldi.h>
54
55#include <sys/errno.h>
56#include <sys/signal.h>
57#include <sys/socket.h>
58#include <sys/socketvar.h>
59#include <sys/sockio.h>
60#include <sys/isa_defs.h>
61#include <sys/md5.h>
62#include <sys/random.h>
63#include <sys/uio.h>
64#include <sys/systm.h>
65#include <netinet/in.h>
66#include <netinet/tcp.h>
67#include <netinet/ip6.h>
68#include <netinet/icmp6.h>
69#include <net/if.h>
70#include <net/route.h>
71#include <inet/ipsec_impl.h>
72
73#include <inet/common.h>
74#include <inet/ip.h>
75#include <inet/ip_impl.h>
76#include <inet/ip6.h>
77#include <inet/ip_ndp.h>
78#include <inet/proto_set.h>
79#include <inet/mib2.h>
80#include <inet/nd.h>
81#include <inet/optcom.h>
82#include <inet/snmpcom.h>
83#include <inet/kstatcom.h>
84#include <inet/tcp.h>
85#include <inet/tcp_impl.h>
86#include <inet/udp_impl.h>
87#include <net/pfkeyv2.h>
88#include <inet/ipdrop.h>
89
90#include <inet/ipclassifier.h>
91#include <inet/ip_ire.h>
92#include <inet/ip_ftable.h>
93#include <inet/ip_if.h>
94#include <inet/ipp_common.h>
95#include <inet/ip_rts.h>
96#include <inet/ip_netinfo.h>
97#include <sys/squeue_impl.h>
98#include <sys/squeue.h>
99#include <inet/kssl/ksslapi.h>
100#include <sys/tsol/label.h>
101#include <sys/tsol/tnet.h>
102#include <rpc/pmap_prot.h>
103#include <sys/callo.h>
104
105/*
106 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
107 *
108 * (Read the detailed design doc in PSARC case directory)
109 *
110 * The entire tcp state is contained in tcp_t and conn_t structure
111 * which are allocated in tandem using ipcl_conn_create() and passing
112 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
113 * the references on the tcp_t. The tcp_t structure is never compressed
114 * and packets always land on the correct TCP perimeter from the time
115 * eager is created till the time tcp_t dies (as such the old mentat
116 * TCP global queue is not used for detached state and no IPSEC checking
117 * is required). The global queue is still allocated to send out resets
118 * for connection which have no listeners and IP directly calls
119 * tcp_xmit_listeners_reset() which does any policy check.
120 *
121 * Protection and Synchronisation mechanism:
122 *
123 * The tcp data structure does not use any kind of lock for protecting
124 * its state but instead uses 'squeues' for mutual exclusion from various
125 * read and write side threads. To access a tcp member, the thread should
126 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
127 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
128 * can pass any tcp function having prototype of edesc_t as argument
129 * (different from traditional STREAMs model where packets come in only
130 * designated entry points). The list of functions that can be directly
131 * called via squeue are listed before the usual function prototype.
132 *
133 * Referencing:
134 *
135 * TCP is MT-Hot and we use a reference based scheme to make sure that the
136 * tcp structure doesn't disappear when its needed. When the application
137 * creates an outgoing connection or accepts an incoming connection, we
138 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
139 * The IP reference is just a symbolic reference since ip_tcpclose()
140 * looks at tcp structure after tcp_close_output() returns which could
141 * have dropped the last TCP reference. So as long as the connection is
142 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
143 * conn_t. The classifier puts its own reference when the connection is
144 * inserted in listen or connected hash. Anytime a thread needs to enter
145 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
146 * on write side or by doing a classify on read side and then puts a
147 * reference on the conn before doing squeue_enter/tryenter/fill. For
148 * read side, the classifier itself puts the reference under fanout lock
149 * to make sure that tcp can't disappear before it gets processed. The
150 * squeue will drop this reference automatically so the called function
151 * doesn't have to do a DEC_REF.
152 *
153 * Opening a new connection:
154 *
155 * The outgoing connection open is pretty simple. tcp_open() does the
156 * work in creating the conn/tcp structure and initializing it. The
157 * squeue assignment is done based on the CPU the application
158 * is running on. So for outbound connections, processing is always done
159 * on application CPU which might be different from the incoming CPU
160 * being interrupted by the NIC. An optimal way would be to figure out
161 * the NIC <-> CPU binding at listen time, and assign the outgoing
162 * connection to the squeue attached to the CPU that will be interrupted
163 * for incoming packets (we know the NIC based on the bind IP address).
164 * This might seem like a problem if more data is going out but the
165 * fact is that in most cases the transmit is ACK driven transmit where
166 * the outgoing data normally sits on TCP's xmit queue waiting to be
167 * transmitted.
168 *
169 * Accepting a connection:
170 *
171 * This is a more interesting case because of various races involved in
172 * establishing a eager in its own perimeter. Read the meta comment on
173 * top of tcp_input_listener(). But briefly, the squeue is picked by
174 * ip_fanout based on the ring or the sender (if loopback).
175 *
176 * Closing a connection:
177 *
178 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
179 * via squeue to do the close and mark the tcp as detached if the connection
180 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
181 * reference but tcp_close() drop IP's reference always. So if tcp was
182 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
183 * and 1 because it is in classifier's connected hash. This is the condition
184 * we use to determine that its OK to clean up the tcp outside of squeue
185 * when time wait expires (check the ref under fanout and conn_lock and
186 * if it is 2, remove it from fanout hash and kill it).
187 *
188 * Although close just drops the necessary references and marks the
189 * tcp_detached state, tcp_close needs to know the tcp_detached has been
190 * set (under squeue) before letting the STREAM go away (because a
191 * inbound packet might attempt to go up the STREAM while the close
192 * has happened and tcp_detached is not set). So a special lock and
193 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
194 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
195 * tcp_detached.
196 *
197 * Special provisions and fast paths:
198 *
199 * We make special provisions for sockfs by marking tcp_issocket
200 * whenever we have only sockfs on top of TCP. This allows us to skip
201 * putting the tcp in acceptor hash since a sockfs listener can never
202 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
203 * since eager has already been allocated and the accept now happens
204 * on acceptor STREAM. There is a big blob of comment on top of
205 * tcp_input_listener explaining the new accept. When socket is POP'd,
206 * sockfs sends us an ioctl to mark the fact and we go back to old
207 * behaviour. Once tcp_issocket is unset, its never set for the
208 * life of that connection.
209 *
210 * IPsec notes :
211 *
212 * Since a packet is always executed on the correct TCP perimeter
213 * all IPsec processing is defered to IP including checking new
214 * connections and setting IPSEC policies for new connection. The
215 * only exception is tcp_xmit_listeners_reset() which is called
216 * directly from IP and needs to policy check to see if TH_RST
217 * can be sent out.
218 */
219
220/*
221 * Values for squeue switch:
222 * 1: SQ_NODRAIN
223 * 2: SQ_PROCESS
224 * 3: SQ_FILL
225 */
226int tcp_squeue_wput = 2;	/* /etc/systems */
227int tcp_squeue_flag;
228
229/*
230 * This controls how tiny a write must be before we try to copy it
231 * into the mblk on the tail of the transmit queue.  Not much
232 * speedup is observed for values larger than sixteen.  Zero will
233 * disable the optimisation.
234 */
235int tcp_tx_pull_len = 16;
236
237/*
238 * TCP Statistics.
239 *
240 * How TCP statistics work.
241 *
242 * There are two types of statistics invoked by two macros.
243 *
244 * TCP_STAT(name) does non-atomic increment of a named stat counter. It is
245 * supposed to be used in non MT-hot paths of the code.
246 *
247 * TCP_DBGSTAT(name) does atomic increment of a named stat counter. It is
248 * supposed to be used for DEBUG purposes and may be used on a hot path.
249 *
250 * Both TCP_STAT and TCP_DBGSTAT counters are available using kstat
251 * (use "kstat tcp" to get them).
252 *
253 * There is also additional debugging facility that marks tcp_clean_death()
254 * instances and saves them in tcp_t structure. It is triggered by
255 * TCP_TAG_CLEAN_DEATH define. Also, there is a global array of counters for
256 * tcp_clean_death() calls that counts the number of times each tag was hit. It
257 * is triggered by TCP_CLD_COUNTERS define.
258 *
259 * How to add new counters.
260 *
261 * 1) Add a field in the tcp_stat structure describing your counter.
262 * 2) Add a line in the template in tcp_kstat2_init() with the name
263 *    of the counter.
264 *
265 *    IMPORTANT!! - make sure that both are in sync !!
266 * 3) Use either TCP_STAT or TCP_DBGSTAT with the name.
267 *
268 * Please avoid using private counters which are not kstat-exported.
269 *
270 * TCP_TAG_CLEAN_DEATH set to 1 enables tagging of tcp_clean_death() instances
271 * in tcp_t structure.
272 *
273 * TCP_MAX_CLEAN_DEATH_TAG is the maximum number of possible clean death tags.
274 */
275
276#ifndef TCP_DEBUG_COUNTER
277#ifdef DEBUG
278#define	TCP_DEBUG_COUNTER 1
279#else
280#define	TCP_DEBUG_COUNTER 0
281#endif
282#endif
283
284#define	TCP_CLD_COUNTERS 0
285
286#define	TCP_TAG_CLEAN_DEATH 1
287#define	TCP_MAX_CLEAN_DEATH_TAG 32
288
289#ifdef lint
290static int _lint_dummy_;
291#endif
292
293#if TCP_CLD_COUNTERS
294static uint_t tcp_clean_death_stat[TCP_MAX_CLEAN_DEATH_TAG];
295#define	TCP_CLD_STAT(x) tcp_clean_death_stat[x]++
296#elif defined(lint)
297#define	TCP_CLD_STAT(x) ASSERT(_lint_dummy_ == 0);
298#else
299#define	TCP_CLD_STAT(x)
300#endif
301
302#if TCP_DEBUG_COUNTER
303#define	TCP_DBGSTAT(tcps, x)	\
304	atomic_add_64(&((tcps)->tcps_statistics.x.value.ui64), 1)
305#define	TCP_G_DBGSTAT(x)	\
306	atomic_add_64(&(tcp_g_statistics.x.value.ui64), 1)
307#elif defined(lint)
308#define	TCP_DBGSTAT(tcps, x) ASSERT(_lint_dummy_ == 0);
309#define	TCP_G_DBGSTAT(x) ASSERT(_lint_dummy_ == 0);
310#else
311#define	TCP_DBGSTAT(tcps, x)
312#define	TCP_G_DBGSTAT(x)
313#endif
314
315#define	TCP_G_STAT(x)	(tcp_g_statistics.x.value.ui64++)
316
317tcp_g_stat_t	tcp_g_statistics;
318kstat_t		*tcp_g_kstat;
319
320/* Macros for timestamp comparisons */
321#define	TSTMP_GEQ(a, b)	((int32_t)((a)-(b)) >= 0)
322#define	TSTMP_LT(a, b)	((int32_t)((a)-(b)) < 0)
323
324/*
325 * Parameters for TCP Initial Send Sequence number (ISS) generation.  When
326 * tcp_strong_iss is set to 1, which is the default, the ISS is calculated
327 * by adding three components: a time component which grows by 1 every 4096
328 * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27);
329 * a per-connection component which grows by 125000 for every new connection;
330 * and an "extra" component that grows by a random amount centered
331 * approximately on 64000.  This causes the ISS generator to cycle every
332 * 4.89 hours if no TCP connections are made, and faster if connections are
333 * made.
334 *
335 * When tcp_strong_iss is set to 0, ISS is calculated by adding two
336 * components: a time component which grows by 250000 every second; and
337 * a per-connection component which grows by 125000 for every new connections.
338 *
339 * A third method, when tcp_strong_iss is set to 2, for generating ISS is
340 * prescribed by Steve Bellovin.  This involves adding time, the 125000 per
341 * connection, and a one-way hash (MD5) of the connection ID <sport, dport,
342 * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered
343 * password.
344 */
345#define	ISS_INCR	250000
346#define	ISS_NSEC_SHT	12
347
348static sin_t	sin_null;	/* Zero address for quick clears */
349static sin6_t	sin6_null;	/* Zero address for quick clears */
350
351/*
352 * This implementation follows the 4.3BSD interpretation of the urgent
353 * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause
354 * incompatible changes in protocols like telnet and rlogin.
355 */
356#define	TCP_OLD_URP_INTERPRETATION	1
357
358/*
359 * Since tcp_listener is not cleared atomically with tcp_detached
360 * being cleared we need this extra bit to tell a detached connection
361 * apart from one that is in the process of being accepted.
362 */
363#define	TCP_IS_DETACHED_NONEAGER(tcp)	\
364	(TCP_IS_DETACHED(tcp) &&	\
365	    (!(tcp)->tcp_hard_binding))
366
367/*
368 * TCP reassembly macros.  We hide starting and ending sequence numbers in
369 * b_next and b_prev of messages on the reassembly queue.  The messages are
370 * chained using b_cont.  These macros are used in tcp_reass() so we don't
371 * have to see the ugly casts and assignments.
372 */
373#define	TCP_REASS_SEQ(mp)		((uint32_t)(uintptr_t)((mp)->b_next))
374#define	TCP_REASS_SET_SEQ(mp, u)	((mp)->b_next = \
375					(mblk_t *)(uintptr_t)(u))
376#define	TCP_REASS_END(mp)		((uint32_t)(uintptr_t)((mp)->b_prev))
377#define	TCP_REASS_SET_END(mp, u)	((mp)->b_prev = \
378					(mblk_t *)(uintptr_t)(u))
379
380/*
381 * Implementation of TCP Timers.
382 * =============================
383 *
384 * INTERFACE:
385 *
386 * There are two basic functions dealing with tcp timers:
387 *
388 *	timeout_id_t	tcp_timeout(connp, func, time)
389 * 	clock_t		tcp_timeout_cancel(connp, timeout_id)
390 *	TCP_TIMER_RESTART(tcp, intvl)
391 *
392 * tcp_timeout() starts a timer for the 'tcp' instance arranging to call 'func'
393 * after 'time' ticks passed. The function called by timeout() must adhere to
394 * the same restrictions as a driver soft interrupt handler - it must not sleep
395 * or call other functions that might sleep. The value returned is the opaque
396 * non-zero timeout identifier that can be passed to tcp_timeout_cancel() to
397 * cancel the request. The call to tcp_timeout() may fail in which case it
398 * returns zero. This is different from the timeout(9F) function which never
399 * fails.
400 *
401 * The call-back function 'func' always receives 'connp' as its single
402 * argument. It is always executed in the squeue corresponding to the tcp
403 * structure. The tcp structure is guaranteed to be present at the time the
404 * call-back is called.
405 *
406 * NOTE: The call-back function 'func' is never called if tcp is in
407 * 	the TCPS_CLOSED state.
408 *
409 * tcp_timeout_cancel() attempts to cancel a pending tcp_timeout()
410 * request. locks acquired by the call-back routine should not be held across
411 * the call to tcp_timeout_cancel() or a deadlock may result.
412 *
413 * tcp_timeout_cancel() returns -1 if it can not cancel the timeout request.
414 * Otherwise, it returns an integer value greater than or equal to 0. In
415 * particular, if the call-back function is already placed on the squeue, it can
416 * not be canceled.
417 *
418 * NOTE: both tcp_timeout() and tcp_timeout_cancel() should always be called
419 * 	within squeue context corresponding to the tcp instance. Since the
420 *	call-back is also called via the same squeue, there are no race
421 *	conditions described in untimeout(9F) manual page since all calls are
422 *	strictly serialized.
423 *
424 *      TCP_TIMER_RESTART() is a macro that attempts to cancel a pending timeout
425 *	stored in tcp_timer_tid and starts a new one using
426 *	MSEC_TO_TICK(intvl). It always uses tcp_timer() function as a call-back
427 *	and stores the return value of tcp_timeout() in the tcp->tcp_timer_tid
428 *	field.
429 *
430 * NOTE: since the timeout cancellation is not guaranteed, the cancelled
431 *	call-back may still be called, so it is possible tcp_timer() will be
432 *	called several times. This should not be a problem since tcp_timer()
433 *	should always check the tcp instance state.
434 *
435 *
436 * IMPLEMENTATION:
437 *
438 * TCP timers are implemented using three-stage process. The call to
439 * tcp_timeout() uses timeout(9F) function to call tcp_timer_callback() function
440 * when the timer expires. The tcp_timer_callback() arranges the call of the
441 * tcp_timer_handler() function via squeue corresponding to the tcp
442 * instance. The tcp_timer_handler() calls actual requested timeout call-back
443 * and passes tcp instance as an argument to it. Information is passed between
444 * stages using the tcp_timer_t structure which contains the connp pointer, the
445 * tcp call-back to call and the timeout id returned by the timeout(9F).
446 *
447 * The tcp_timer_t structure is not used directly, it is embedded in an mblk_t -
448 * like structure that is used to enter an squeue. The mp->b_rptr of this pseudo
449 * mblk points to the beginning of tcp_timer_t structure. The tcp_timeout()
450 * returns the pointer to this mblk.
451 *
452 * The pseudo mblk is allocated from a special tcp_timer_cache kmem cache. It
453 * looks like a normal mblk without actual dblk attached to it.
454 *
455 * To optimize performance each tcp instance holds a small cache of timer
456 * mblocks. In the current implementation it caches up to two timer mblocks per
457 * tcp instance. The cache is preserved over tcp frees and is only freed when
458 * the whole tcp structure is destroyed by its kmem destructor. Since all tcp
459 * timer processing happens on a corresponding squeue, the cache manipulation
460 * does not require any locks. Experiments show that majority of timer mblocks
461 * allocations are satisfied from the tcp cache and do not involve kmem calls.
462 *
463 * The tcp_timeout() places a refhold on the connp instance which guarantees
464 * that it will be present at the time the call-back function fires. The
465 * tcp_timer_handler() drops the reference after calling the call-back, so the
466 * call-back function does not need to manipulate the references explicitly.
467 */
468
469typedef struct tcp_timer_s {
470	conn_t	*connp;
471	void 	(*tcpt_proc)(void *);
472	callout_id_t   tcpt_tid;
473} tcp_timer_t;
474
475static kmem_cache_t *tcp_timercache;
476kmem_cache_t	*tcp_sack_info_cache;
477
478/*
479 * For scalability, we must not run a timer for every TCP connection
480 * in TIME_WAIT state.  To see why, consider (for time wait interval of
481 * 4 minutes):
482 *	1000 connections/sec * 240 seconds/time wait = 240,000 active conn's
483 *
484 * This list is ordered by time, so you need only delete from the head
485 * until you get to entries which aren't old enough to delete yet.
486 * The list consists of only the detached TIME_WAIT connections.
487 *
488 * Note that the timer (tcp_time_wait_expire) is started when the tcp_t
489 * becomes detached TIME_WAIT (either by changing the state and already
490 * being detached or the other way around). This means that the TIME_WAIT
491 * state can be extended (up to doubled) if the connection doesn't become
492 * detached for a long time.
493 *
494 * The list manipulations (including tcp_time_wait_next/prev)
495 * are protected by the tcp_time_wait_lock. The content of the
496 * detached TIME_WAIT connections is protected by the normal perimeters.
497 *
498 * This list is per squeue and squeues are shared across the tcp_stack_t's.
499 * Things on tcp_time_wait_head remain associated with the tcp_stack_t
500 * and conn_netstack.
501 * The tcp_t's that are added to tcp_free_list are disassociated and
502 * have NULL tcp_tcps and conn_netstack pointers.
503 */
504typedef struct tcp_squeue_priv_s {
505	kmutex_t	tcp_time_wait_lock;
506	callout_id_t	tcp_time_wait_tid;
507	tcp_t		*tcp_time_wait_head;
508	tcp_t		*tcp_time_wait_tail;
509	tcp_t		*tcp_free_list;
510	uint_t		tcp_free_list_cnt;
511} tcp_squeue_priv_t;
512
513/*
514 * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs.
515 * Running it every 5 seconds seems to give the best results.
516 */
517#define	TCP_TIME_WAIT_DELAY drv_usectohz(5000000)
518
519/*
520 * To prevent memory hog, limit the number of entries in tcp_free_list
521 * to 1% of available memory / number of cpus
522 */
523uint_t tcp_free_list_max_cnt = 0;
524
525#define	TCP_XMIT_LOWATER	4096
526#define	TCP_XMIT_HIWATER	49152
527#define	TCP_RECV_LOWATER	2048
528#define	TCP_RECV_HIWATER	49152
529
530/*
531 *  PAWS needs a timer for 24 days.  This is the number of ticks in 24 days
532 */
533#define	PAWS_TIMEOUT	((clock_t)(24*24*60*60*hz))
534
535#define	TIDUSZ	4096	/* transport interface data unit size */
536
537/*
538 * Bind hash list size and has function.  It has to be a power of 2 for
539 * hashing.
540 */
541#define	TCP_BIND_FANOUT_SIZE	512
542#define	TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1))
543/*
544 * Size of listen and acceptor hash list.  It has to be a power of 2 for
545 * hashing.
546 */
547#define	TCP_FANOUT_SIZE		256
548
549#ifdef	_ILP32
550#define	TCP_ACCEPTOR_HASH(accid)					\
551		(((uint_t)(accid) >> 8) & (TCP_FANOUT_SIZE - 1))
552#else
553#define	TCP_ACCEPTOR_HASH(accid)					\
554		((uint_t)(accid) & (TCP_FANOUT_SIZE - 1))
555#endif	/* _ILP32 */
556
557#define	IP_ADDR_CACHE_SIZE	2048
558#define	IP_ADDR_CACHE_HASH(faddr)					\
559	(ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1))
560
561/*
562 * TCP options struct returned from tcp_parse_options.
563 */
564typedef struct tcp_opt_s {
565	uint32_t	tcp_opt_mss;
566	uint32_t	tcp_opt_wscale;
567	uint32_t	tcp_opt_ts_val;
568	uint32_t	tcp_opt_ts_ecr;
569	tcp_t		*tcp;
570} tcp_opt_t;
571
572/*
573 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing
574 */
575
576#ifdef _BIG_ENDIAN
577#define	TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
578	(TCPOPT_TSTAMP << 8) | 10)
579#else
580#define	TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \
581	(TCPOPT_NOP << 8) | TCPOPT_NOP)
582#endif
583
584/*
585 * Flags returned from tcp_parse_options.
586 */
587#define	TCP_OPT_MSS_PRESENT	1
588#define	TCP_OPT_WSCALE_PRESENT	2
589#define	TCP_OPT_TSTAMP_PRESENT	4
590#define	TCP_OPT_SACK_OK_PRESENT	8
591#define	TCP_OPT_SACK_PRESENT	16
592
593/* TCP option length */
594#define	TCPOPT_NOP_LEN		1
595#define	TCPOPT_MAXSEG_LEN	4
596#define	TCPOPT_WS_LEN		3
597#define	TCPOPT_REAL_WS_LEN	(TCPOPT_WS_LEN+1)
598#define	TCPOPT_TSTAMP_LEN	10
599#define	TCPOPT_REAL_TS_LEN	(TCPOPT_TSTAMP_LEN+2)
600#define	TCPOPT_SACK_OK_LEN	2
601#define	TCPOPT_REAL_SACK_OK_LEN	(TCPOPT_SACK_OK_LEN+2)
602#define	TCPOPT_REAL_SACK_LEN	4
603#define	TCPOPT_MAX_SACK_LEN	36
604#define	TCPOPT_HEADER_LEN	2
605
606/* TCP cwnd burst factor. */
607#define	TCP_CWND_INFINITE	65535
608#define	TCP_CWND_SS		3
609#define	TCP_CWND_NORMAL		5
610
611/* Maximum TCP initial cwin (start/restart). */
612#define	TCP_MAX_INIT_CWND	8
613
614/*
615 * Initialize cwnd according to RFC 3390.  def_max_init_cwnd is
616 * either tcp_slow_start_initial or tcp_slow_start_after idle
617 * depending on the caller.  If the upper layer has not used the
618 * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd
619 * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd.
620 * If the upper layer has changed set the tcp_init_cwnd, just use
621 * it to calculate the tcp_cwnd.
622 */
623#define	SET_TCP_INIT_CWND(tcp, mss, def_max_init_cwnd)			\
624{									\
625	if ((tcp)->tcp_init_cwnd == 0) {				\
626		(tcp)->tcp_cwnd = MIN(def_max_init_cwnd * (mss),	\
627		    MIN(4 * (mss), MAX(2 * (mss), 4380 / (mss) * (mss)))); \
628	} else {							\
629		(tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss);		\
630	}								\
631	tcp->tcp_cwnd_cnt = 0;						\
632}
633
634/* TCP Timer control structure */
635typedef struct tcpt_s {
636	pfv_t	tcpt_pfv;	/* The routine we are to call */
637	tcp_t	*tcpt_tcp;	/* The parameter we are to pass in */
638} tcpt_t;
639
640/*
641 * Functions called directly via squeue having a prototype of edesc_t.
642 */
643void		tcp_input_listener(void *arg, mblk_t *mp, void *arg2,
644    ip_recv_attr_t *ira);
645static void	tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2,
646    ip_recv_attr_t *dummy);
647void		tcp_accept_finish(void *arg, mblk_t *mp, void *arg2,
648    ip_recv_attr_t *dummy);
649static void	tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2,
650    ip_recv_attr_t *dummy);
651static void	tcp_wput_proto(void *arg, mblk_t *mp, void *arg2,
652    ip_recv_attr_t *dummy);
653void		tcp_input_data(void *arg, mblk_t *mp, void *arg2,
654    ip_recv_attr_t *ira);
655static void	tcp_close_output(void *arg, mblk_t *mp, void *arg2,
656    ip_recv_attr_t *dummy);
657void		tcp_output(void *arg, mblk_t *mp, void *arg2,
658    ip_recv_attr_t *dummy);
659void		tcp_output_urgent(void *arg, mblk_t *mp, void *arg2,
660    ip_recv_attr_t *dummy);
661static void	tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2,
662    ip_recv_attr_t *dummy);
663static void	tcp_timer_handler(void *arg, mblk_t *mp, void *arg2,
664    ip_recv_attr_t *dummy);
665static void	tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2,
666    ip_recv_attr_t *dummy);
667
668
669/* Prototype for TCP functions */
670static void	tcp_random_init(void);
671int		tcp_random(void);
672static void	tcp_tli_accept(tcp_t *tcp, mblk_t *mp);
673static void	tcp_accept_swap(tcp_t *listener, tcp_t *acceptor,
674		    tcp_t *eager);
675static int	tcp_set_destination(tcp_t *tcp);
676static in_port_t tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr,
677    int reuseaddr, boolean_t quick_connect, boolean_t bind_to_req_port_only,
678    boolean_t user_specified);
679static void	tcp_closei_local(tcp_t *tcp);
680static void	tcp_close_detached(tcp_t *tcp);
681static boolean_t tcp_conn_con(tcp_t *tcp, uchar_t *iphdr,
682		    mblk_t *idmp, mblk_t **defermp, ip_recv_attr_t *ira);
683static void	tcp_tpi_connect(tcp_t *tcp, mblk_t *mp);
684static int	tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp,
685		    in_port_t dstport, uint_t srcid);
686static int	tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp,
687		    in_port_t dstport, uint32_t flowinfo,
688		    uint_t srcid, uint32_t scope_id);
689static int	tcp_clean_death(tcp_t *tcp, int err, uint8_t tag);
690static void	tcp_disconnect(tcp_t *tcp, mblk_t *mp);
691static char	*tcp_display(tcp_t *tcp, char *, char);
692static boolean_t tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum);
693static void	tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only);
694static void	tcp_eager_unlink(tcp_t *tcp);
695static void	tcp_err_ack(tcp_t *tcp, mblk_t *mp, int tlierr,
696		    int unixerr);
697static void	tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive,
698		    int tlierr, int unixerr);
699static int	tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp,
700		    cred_t *cr);
701static int	tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp,
702		    char *value, caddr_t cp, cred_t *cr);
703static int	tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp,
704		    char *value, caddr_t cp, cred_t *cr);
705static int	tcp_tpistate(tcp_t *tcp);
706static void	tcp_bind_hash_insert(tf_t *tf, tcp_t *tcp,
707    int caller_holds_lock);
708static void	tcp_bind_hash_remove(tcp_t *tcp);
709static tcp_t	*tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *);
710void		tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp);
711static void	tcp_acceptor_hash_remove(tcp_t *tcp);
712static void	tcp_capability_req(tcp_t *tcp, mblk_t *mp);
713static void	tcp_info_req(tcp_t *tcp, mblk_t *mp);
714static void	tcp_addr_req(tcp_t *tcp, mblk_t *mp);
715static void	tcp_init_values(tcp_t *tcp);
716static void	tcp_ip_notify(tcp_t *tcp);
717static void	tcp_iss_init(tcp_t *tcp);
718static void	tcp_keepalive_killer(void *arg);
719static int	tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt);
720static void	tcp_mss_set(tcp_t *tcp, uint32_t size);
721static int	tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp,
722		    int *do_disconnectp, int *t_errorp, int *sys_errorp);
723static boolean_t tcp_allow_connopt_set(int level, int name);
724int		tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr);
725static int	tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr);
726static boolean_t tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt,
727    tcp_stack_t *);
728static int	tcp_param_set(queue_t *q, mblk_t *mp, char *value,
729		    caddr_t cp, cred_t *cr);
730static int	tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value,
731		    caddr_t cp, cred_t *cr);
732static void	tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *);
733static int	tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value,
734		    caddr_t cp, cred_t *cr);
735static void	tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_cnt);
736static void	tcp_update_xmit_tail(tcp_t *tcp, uint32_t snxt);
737static mblk_t	*tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start);
738static void	tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp);
739static void	tcp_reinit(tcp_t *tcp);
740static void	tcp_reinit_values(tcp_t *tcp);
741
742static uint_t	tcp_rwnd_reopen(tcp_t *tcp);
743static uint_t	tcp_rcv_drain(tcp_t *tcp);
744static void	tcp_sack_rxmit(tcp_t *tcp, uint_t *flags);
745static boolean_t tcp_send_rst_chk(tcp_stack_t *);
746static void	tcp_ss_rexmit(tcp_t *tcp);
747static mblk_t	*tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp,
748    ip_recv_attr_t *);
749static void	tcp_process_options(tcp_t *, tcpha_t *);
750static void	tcp_rsrv(queue_t *q);
751static int	tcp_snmp_state(tcp_t *tcp);
752static void	tcp_timer(void *arg);
753static void	tcp_timer_callback(void *);
754static in_port_t tcp_update_next_port(in_port_t port, const tcp_t *tcp,
755    boolean_t random);
756static in_port_t tcp_get_next_priv_port(const tcp_t *);
757static void	tcp_wput_sock(queue_t *q, mblk_t *mp);
758static void	tcp_wput_fallback(queue_t *q, mblk_t *mp);
759void		tcp_tpi_accept(queue_t *q, mblk_t *mp);
760static void	tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent);
761static void	tcp_wput_flush(tcp_t *tcp, mblk_t *mp);
762static void	tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp);
763static int	tcp_send(tcp_t *tcp, const int mss,
764		    const int total_hdr_len, const int tcp_hdr_len,
765		    const int num_sack_blk, int *usable, uint_t *snxt,
766		    int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time);
767static void	tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now,
768		    int num_sack_blk);
769static void	tcp_wsrv(queue_t *q);
770static int	tcp_xmit_end(tcp_t *tcp);
771static void	tcp_ack_timer(void *arg);
772static mblk_t	*tcp_ack_mp(tcp_t *tcp);
773static void	tcp_xmit_early_reset(char *str, mblk_t *mp,
774		    uint32_t seq, uint32_t ack, int ctl, ip_recv_attr_t *,
775		    ip_stack_t *, conn_t *);
776static void	tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq,
777		    uint32_t ack, int ctl);
778static void	tcp_set_rto(tcp_t *, time_t);
779static void	tcp_icmp_input(void *, mblk_t *, void *, ip_recv_attr_t *);
780static void	tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *);
781static boolean_t tcp_verifyicmp(conn_t *, void *, icmph_t *, icmp6_t *,
782    ip_recv_attr_t *);
783static int	tcp_build_hdrs(tcp_t *);
784static void	tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp,
785    uint32_t seg_seq, uint32_t seg_ack, int seg_len, tcpha_t *tcpha,
786    ip_recv_attr_t *ira);
787boolean_t	tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp);
788static boolean_t tcp_zcopy_check(tcp_t *);
789static void	tcp_zcopy_notify(tcp_t *);
790static mblk_t	*tcp_zcopy_backoff(tcp_t *, mblk_t *, boolean_t);
791static void	tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa);
792static void	tcp_update_pmtu(tcp_t *tcp, boolean_t decrease_only);
793static void	tcp_update_zcopy(tcp_t *tcp);
794static void	tcp_notify(void *, ip_xmit_attr_t *, ixa_notify_type_t,
795    ixa_notify_arg_t);
796static void	tcp_rexmit_after_error(tcp_t *tcp);
797static void	tcp_send_data(tcp_t *, mblk_t *);
798extern mblk_t	*tcp_timermp_alloc(int);
799extern void	tcp_timermp_free(tcp_t *);
800static void	tcp_timer_free(tcp_t *tcp, mblk_t *mp);
801static void	tcp_stop_lingering(tcp_t *tcp);
802static void	tcp_close_linger_timeout(void *arg);
803static void	*tcp_stack_init(netstackid_t stackid, netstack_t *ns);
804static void	tcp_stack_fini(netstackid_t stackid, void *arg);
805static void	*tcp_g_kstat_init(tcp_g_stat_t *);
806static void	tcp_g_kstat_fini(kstat_t *);
807static void	*tcp_kstat_init(netstackid_t, tcp_stack_t *);
808static void	tcp_kstat_fini(netstackid_t, kstat_t *);
809static void	*tcp_kstat2_init(netstackid_t, tcp_stat_t *);
810static void	tcp_kstat2_fini(netstackid_t, kstat_t *);
811static int	tcp_kstat_update(kstat_t *kp, int rw);
812static mblk_t	*tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp,
813    ip_recv_attr_t *ira);
814static mblk_t	*tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp,
815    ip_recv_attr_t *ira);
816static int	tcp_squeue_switch(int);
817
818static int	tcp_open(queue_t *, dev_t *, int, int, cred_t *, boolean_t);
819static int	tcp_openv4(queue_t *, dev_t *, int, int, cred_t *);
820static int	tcp_openv6(queue_t *, dev_t *, int, int, cred_t *);
821static int	tcp_tpi_close(queue_t *, int);
822static int	tcp_tpi_close_accept(queue_t *);
823
824static void	tcp_squeue_add(squeue_t *);
825static void	tcp_setcred_data(mblk_t *, ip_recv_attr_t *);
826
827extern void	tcp_kssl_input(tcp_t *, mblk_t *, cred_t *);
828
829void tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy);
830void tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2,
831    ip_recv_attr_t *dummy);
832
833static int tcp_accept(sock_lower_handle_t, sock_lower_handle_t,
834	    sock_upper_handle_t, cred_t *);
835static int tcp_listen(sock_lower_handle_t, int, cred_t *);
836static int tcp_do_listen(conn_t *, struct sockaddr *, socklen_t, int, cred_t *,
837    boolean_t);
838static int tcp_do_connect(conn_t *, const struct sockaddr *, socklen_t,
839    cred_t *, pid_t);
840static int tcp_do_bind(conn_t *, struct sockaddr *, socklen_t, cred_t *,
841    boolean_t);
842static int tcp_do_unbind(conn_t *);
843static int tcp_bind_check(conn_t *, struct sockaddr *, socklen_t, cred_t *,
844    boolean_t);
845
846static void tcp_ulp_newconn(conn_t *, conn_t *, mblk_t *);
847
848/*
849 * Routines related to the TCP_IOC_ABORT_CONN ioctl command.
850 *
851 * TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting
852 * TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure
853 * (defined in tcp.h) needs to be filled in and passed into the kernel
854 * via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t
855 * structure contains the four-tuple of a TCP connection and a range of TCP
856 * states (specified by ac_start and ac_end). The use of wildcard addresses
857 * and ports is allowed. Connections with a matching four tuple and a state
858 * within the specified range will be aborted. The valid states for the
859 * ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT,
860 * inclusive.
861 *
862 * An application which has its connection aborted by this ioctl will receive
863 * an error that is dependent on the connection state at the time of the abort.
864 * If the connection state is < TCPS_TIME_WAIT, an application should behave as
865 * though a RST packet has been received.  If the connection state is equal to
866 * TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel
867 * and all resources associated with the connection will be freed.
868 */
869static mblk_t	*tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *, tcp_t *);
870static void	tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *);
871static void	tcp_ioctl_abort_handler(void *arg, mblk_t *mp, void *arg2,
872    ip_recv_attr_t *dummy);
873static int	tcp_ioctl_abort(tcp_ioc_abort_conn_t *, tcp_stack_t *tcps);
874static void	tcp_ioctl_abort_conn(queue_t *, mblk_t *);
875static int	tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *,
876    boolean_t, tcp_stack_t *);
877
878static struct module_info tcp_rinfo =  {
879	TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER
880};
881
882static struct module_info tcp_winfo =  {
883	TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16
884};
885
886/*
887 * Entry points for TCP as a device. The normal case which supports
888 * the TCP functionality.
889 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
890 */
891struct qinit tcp_rinitv4 = {
892	NULL, (pfi_t)tcp_rsrv, tcp_openv4, tcp_tpi_close, NULL, &tcp_rinfo
893};
894
895struct qinit tcp_rinitv6 = {
896	NULL, (pfi_t)tcp_rsrv, tcp_openv6, tcp_tpi_close, NULL, &tcp_rinfo
897};
898
899struct qinit tcp_winit = {
900	(pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
901};
902
903/* Initial entry point for TCP in socket mode. */
904struct qinit tcp_sock_winit = {
905	(pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo
906};
907
908/* TCP entry point during fallback */
909struct qinit tcp_fallback_sock_winit = {
910	(pfi_t)tcp_wput_fallback, NULL, NULL, NULL, NULL, &tcp_winfo
911};
912
913/*
914 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
915 * an accept. Avoid allocating data structures since eager has already
916 * been created.
917 */
918struct qinit tcp_acceptor_rinit = {
919	NULL, (pfi_t)tcp_rsrv, NULL, tcp_tpi_close_accept, NULL, &tcp_winfo
920};
921
922struct qinit tcp_acceptor_winit = {
923	(pfi_t)tcp_tpi_accept, NULL, NULL, NULL, NULL, &tcp_winfo
924};
925
926/* For AF_INET aka /dev/tcp */
927struct streamtab tcpinfov4 = {
928	&tcp_rinitv4, &tcp_winit
929};
930
931/* For AF_INET6 aka /dev/tcp6 */
932struct streamtab tcpinfov6 = {
933	&tcp_rinitv6, &tcp_winit
934};
935
936sock_downcalls_t sock_tcp_downcalls;
937
938/* Setable only in /etc/system. Move to ndd? */
939boolean_t tcp_icmp_source_quench = B_FALSE;
940
941/*
942 * Following assumes TPI alignment requirements stay along 32 bit
943 * boundaries
944 */
945#define	ROUNDUP32(x) \
946	(((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
947
948/* Template for response to info request. */
949static struct T_info_ack tcp_g_t_info_ack = {
950	T_INFO_ACK,		/* PRIM_type */
951	0,			/* TSDU_size */
952	T_INFINITE,		/* ETSDU_size */
953	T_INVALID,		/* CDATA_size */
954	T_INVALID,		/* DDATA_size */
955	sizeof (sin_t),		/* ADDR_size */
956	0,			/* OPT_size - not initialized here */
957	TIDUSZ,			/* TIDU_size */
958	T_COTS_ORD,		/* SERV_type */
959	TCPS_IDLE,		/* CURRENT_state */
960	(XPG4_1|EXPINLINE)	/* PROVIDER_flag */
961};
962
963static struct T_info_ack tcp_g_t_info_ack_v6 = {
964	T_INFO_ACK,		/* PRIM_type */
965	0,			/* TSDU_size */
966	T_INFINITE,		/* ETSDU_size */
967	T_INVALID,		/* CDATA_size */
968	T_INVALID,		/* DDATA_size */
969	sizeof (sin6_t),	/* ADDR_size */
970	0,			/* OPT_size - not initialized here */
971	TIDUSZ,		/* TIDU_size */
972	T_COTS_ORD,		/* SERV_type */
973	TCPS_IDLE,		/* CURRENT_state */
974	(XPG4_1|EXPINLINE)	/* PROVIDER_flag */
975};
976
977#define	MS	1L
978#define	SECONDS	(1000 * MS)
979#define	MINUTES	(60 * SECONDS)
980#define	HOURS	(60 * MINUTES)
981#define	DAYS	(24 * HOURS)
982
983#define	PARAM_MAX (~(uint32_t)0)
984
985/* Max size IP datagram is 64k - 1 */
986#define	TCP_MSS_MAX_IPV4 (IP_MAXPACKET - (sizeof (ipha_t) + sizeof (tcpha_t)))
987#define	TCP_MSS_MAX_IPV6 (IP_MAXPACKET - (sizeof (ip6_t) + sizeof (tcpha_t)))
988/* Max of the above */
989#define	TCP_MSS_MAX	TCP_MSS_MAX_IPV4
990
991/* Largest TCP port number */
992#define	TCP_MAX_PORT	(64 * 1024 - 1)
993
994/*
995 * tcp_wroff_xtra is the extra space in front of TCP/IP header for link
996 * layer header.  It has to be a multiple of 4.
997 */
998static tcpparam_t lcl_tcp_wroff_xtra_param = { 0, 256, 32, "tcp_wroff_xtra" };
999#define	tcps_wroff_xtra	tcps_wroff_xtra_param->tcp_param_val
1000
1001/*
1002 * All of these are alterable, within the min/max values given, at run time.
1003 * Note that the default value of "tcp_time_wait_interval" is four minutes,
1004 * per the TCP spec.
1005 */
1006/* BEGIN CSTYLED */
1007static tcpparam_t	lcl_tcp_param_arr[] = {
1008 /*min		max		value		name */
1009 { 1*SECONDS,	10*MINUTES,	1*MINUTES,	"tcp_time_wait_interval"},
1010 { 1,		PARAM_MAX,	128,		"tcp_conn_req_max_q" },
1011 { 0,		PARAM_MAX,	1024,		"tcp_conn_req_max_q0" },
1012 { 1,		1024,		1,		"tcp_conn_req_min" },
1013 { 0*MS,	20*SECONDS,	0*MS,		"tcp_conn_grace_period" },
1014 { 128,		(1<<30),	1024*1024,	"tcp_cwnd_max" },
1015 { 0,		10,		0,		"tcp_debug" },
1016 { 1024,	(32*1024),	1024,		"tcp_smallest_nonpriv_port"},
1017 { 1*SECONDS,	PARAM_MAX,	3*MINUTES,	"tcp_ip_abort_cinterval"},
1018 { 1*SECONDS,	PARAM_MAX,	3*MINUTES,	"tcp_ip_abort_linterval"},
1019 { 500*MS,	PARAM_MAX,	8*MINUTES,	"tcp_ip_abort_interval"},
1020 { 1*SECONDS,	PARAM_MAX,	10*SECONDS,	"tcp_ip_notify_cinterval"},
1021 { 500*MS,	PARAM_MAX,	10*SECONDS,	"tcp_ip_notify_interval"},
1022 { 1,		255,		64,		"tcp_ipv4_ttl"},
1023 { 10*SECONDS,	10*DAYS,	2*HOURS,	"tcp_keepalive_interval"},
1024 { 0,		100,		10,		"tcp_maxpsz_multiplier" },
1025 { 1,		TCP_MSS_MAX_IPV4, 536,		"tcp_mss_def_ipv4"},
1026 { 1,		TCP_MSS_MAX_IPV4, TCP_MSS_MAX_IPV4, "tcp_mss_max_ipv4"},
1027 { 1,		TCP_MSS_MAX,	108,		"tcp_mss_min"},
1028 { 1,		(64*1024)-1,	(4*1024)-1,	"tcp_naglim_def"},
1029 { 1*MS,	20*SECONDS,	3*SECONDS,	"tcp_rexmit_interval_initial"},
1030 { 1*MS,	2*HOURS,	60*SECONDS,	"tcp_rexmit_interval_max"},
1031 { 1*MS,	2*HOURS,	400*MS,		"tcp_rexmit_interval_min"},
1032 { 1*MS,	1*MINUTES,	100*MS,		"tcp_deferred_ack_interval" },
1033 { 0,		16,		0,		"tcp_snd_lowat_fraction" },
1034 { 0,		128000,		0,		"tcp_sth_rcv_hiwat" },
1035 { 0,		128000,		0,		"tcp_sth_rcv_lowat" },
1036 { 1,		10000,		3,		"tcp_dupack_fast_retransmit" },
1037 { 0,		1,		0,		"tcp_ignore_path_mtu" },
1038 { 1024,	TCP_MAX_PORT,	32*1024,	"tcp_smallest_anon_port"},
1039 { 1024,	TCP_MAX_PORT,	TCP_MAX_PORT,	"tcp_largest_anon_port"},
1040 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_HIWATER,"tcp_xmit_hiwat"},
1041 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_LOWATER,"tcp_xmit_lowat"},
1042 { TCP_RECV_LOWATER, (1<<30), TCP_RECV_HIWATER,"tcp_recv_hiwat"},
1043 { 1,		65536,		4,		"tcp_recv_hiwat_minmss"},
1044 { 1*SECONDS,	PARAM_MAX,	675*SECONDS,	"tcp_fin_wait_2_flush_interval"},
1045 { 8192,	(1<<30),	1024*1024,	"tcp_max_buf"},
1046/*
1047 * Question:  What default value should I set for tcp_strong_iss?
1048 */
1049 { 0,		2,		1,		"tcp_strong_iss"},
1050 { 0,		65536,		20,		"tcp_rtt_updates"},
1051 { 0,		1,		1,		"tcp_wscale_always"},
1052 { 0,		1,		0,		"tcp_tstamp_always"},
1053 { 0,		1,		1,		"tcp_tstamp_if_wscale"},
1054 { 0*MS,	2*HOURS,	0*MS,		"tcp_rexmit_interval_extra"},
1055 { 0,		16,		2,		"tcp_deferred_acks_max"},
1056 { 1,		16384,		4,		"tcp_slow_start_after_idle"},
1057 { 1,		4,		4,		"tcp_slow_start_initial"},
1058 { 0,		2,		2,		"tcp_sack_permitted"},
1059 { 0,		1,		1,		"tcp_compression_enabled"},
1060 { 0,		IPV6_MAX_HOPS,	IPV6_DEFAULT_HOPS,	"tcp_ipv6_hoplimit"},
1061 { 1,		TCP_MSS_MAX_IPV6, 1220,		"tcp_mss_def_ipv6"},
1062 { 1,		TCP_MSS_MAX_IPV6, TCP_MSS_MAX_IPV6, "tcp_mss_max_ipv6"},
1063 { 0,		1,		0,		"tcp_rev_src_routes"},
1064 { 10*MS,	500*MS,		50*MS,		"tcp_local_dack_interval"},
1065 { 0,		16,		8,		"tcp_local_dacks_max"},
1066 { 0,		2,		1,		"tcp_ecn_permitted"},
1067 { 0,		1,		1,		"tcp_rst_sent_rate_enabled"},
1068 { 0,		PARAM_MAX,	40,		"tcp_rst_sent_rate"},
1069 { 0,		100*MS,		50*MS,		"tcp_push_timer_interval"},
1070 { 0,		1,		0,		"tcp_use_smss_as_mss_opt"},
1071 { 0,		PARAM_MAX,	8*MINUTES,	"tcp_keepalive_abort_interval"},
1072 { 0,		1,		0,		"tcp_dev_flow_ctl"},
1073};
1074/* END CSTYLED */
1075
1076/* Round up the value to the nearest mss. */
1077#define	MSS_ROUNDUP(value, mss)		((((value) - 1) / (mss) + 1) * (mss))
1078
1079/*
1080 * Set ECN capable transport (ECT) code point in IP header.
1081 *
1082 * Note that there are 2 ECT code points '01' and '10', which are called
1083 * ECT(1) and ECT(0) respectively.  Here we follow the original ECT code
1084 * point ECT(0) for TCP as described in RFC 2481.
1085 */
1086#define	SET_ECT(tcp, iph) \
1087	if ((tcp)->tcp_connp->conn_ipversion == IPV4_VERSION) { \
1088		/* We need to clear the code point first. */ \
1089		((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \
1090		((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \
1091	} else { \
1092		((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \
1093		((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \
1094	}
1095
1096/*
1097 * The format argument to pass to tcp_display().
1098 * DISP_PORT_ONLY means that the returned string has only port info.
1099 * DISP_ADDR_AND_PORT means that the returned string also contains the
1100 * remote and local IP address.
1101 */
1102#define	DISP_PORT_ONLY		1
1103#define	DISP_ADDR_AND_PORT	2
1104
1105#define	IS_VMLOANED_MBLK(mp) \
1106	(((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
1107
1108uint32_t do_tcpzcopy = 1;		/* 0: disable, 1: enable, 2: force */
1109
1110/*
1111 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
1112 * tunable settable via NDD.  Otherwise, the per-connection behavior is
1113 * determined dynamically during tcp_set_destination(), which is the default.
1114 */
1115boolean_t tcp_static_maxpsz = B_FALSE;
1116
1117/* Setable in /etc/system */
1118/* If set to 0, pick ephemeral port sequentially; otherwise randomly. */
1119uint32_t tcp_random_anon_port = 1;
1120
1121/*
1122 * To reach to an eager in Q0 which can be dropped due to an incoming
1123 * new SYN request when Q0 is full, a new doubly linked list is
1124 * introduced. This list allows to select an eager from Q0 in O(1) time.
1125 * This is needed to avoid spending too much time walking through the
1126 * long list of eagers in Q0 when tcp_drop_q0() is called. Each member of
1127 * this new list has to be a member of Q0.
1128 * This list is headed by listener's tcp_t. When the list is empty,
1129 * both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0,
1130 * of listener's tcp_t point to listener's tcp_t itself.
1131 *
1132 * Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager
1133 * in the list. MAKE_UNDROPPABLE() takes the eager out of the list.
1134 * These macros do not affect the eager's membership to Q0.
1135 */
1136
1137
1138#define	MAKE_DROPPABLE(listener, eager)					\
1139	if ((eager)->tcp_eager_next_drop_q0 == NULL) {			\
1140		(listener)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0\
1141		    = (eager);						\
1142		(eager)->tcp_eager_prev_drop_q0 = (listener);		\
1143		(eager)->tcp_eager_next_drop_q0 =			\
1144		    (listener)->tcp_eager_next_drop_q0;			\
1145		(listener)->tcp_eager_next_drop_q0 = (eager);		\
1146	}
1147
1148#define	MAKE_UNDROPPABLE(eager)						\
1149	if ((eager)->tcp_eager_next_drop_q0 != NULL) {			\
1150		(eager)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0	\
1151		    = (eager)->tcp_eager_prev_drop_q0;			\
1152		(eager)->tcp_eager_prev_drop_q0->tcp_eager_next_drop_q0	\
1153		    = (eager)->tcp_eager_next_drop_q0;			\
1154		(eager)->tcp_eager_prev_drop_q0 = NULL;			\
1155		(eager)->tcp_eager_next_drop_q0 = NULL;			\
1156	}
1157
1158/*
1159 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more
1160 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent
1161 * data, TCP will not respond with an ACK.  RFC 793 requires that
1162 * TCP responds with an ACK for such a bogus ACK.  By not following
1163 * the RFC, we prevent TCP from getting into an ACK storm if somehow
1164 * an attacker successfully spoofs an acceptable segment to our
1165 * peer; or when our peer is "confused."
1166 */
1167uint32_t tcp_drop_ack_unsent_cnt = 10;
1168
1169/*
1170 * Hook functions to enable cluster networking
1171 * On non-clustered systems these vectors must always be NULL.
1172 */
1173
1174void (*cl_inet_listen)(netstackid_t stack_id, uint8_t protocol,
1175			    sa_family_t addr_family, uint8_t *laddrp,
1176			    in_port_t lport, void *args) = NULL;
1177void (*cl_inet_unlisten)(netstackid_t stack_id, uint8_t protocol,
1178			    sa_family_t addr_family, uint8_t *laddrp,
1179			    in_port_t lport, void *args) = NULL;
1180
1181int (*cl_inet_connect2)(netstackid_t stack_id, uint8_t protocol,
1182			    boolean_t is_outgoing,
1183			    sa_family_t addr_family,
1184			    uint8_t *laddrp, in_port_t lport,
1185			    uint8_t *faddrp, in_port_t fport,
1186			    void *args) = NULL;
1187void (*cl_inet_disconnect)(netstackid_t stack_id, uint8_t protocol,
1188			    sa_family_t addr_family, uint8_t *laddrp,
1189			    in_port_t lport, uint8_t *faddrp,
1190			    in_port_t fport, void *args) = NULL;
1191
1192
1193/*
1194 * int CL_INET_CONNECT(conn_t *cp, tcp_t *tcp, boolean_t is_outgoing, int err)
1195 */
1196#define	CL_INET_CONNECT(connp, is_outgoing, err) {		\
1197	(err) = 0;						\
1198	if (cl_inet_connect2 != NULL) {				\
1199		/*						\
1200		 * Running in cluster mode - register active connection	\
1201		 * information						\
1202		 */							\
1203		if ((connp)->conn_ipversion == IPV4_VERSION) {		\
1204			if ((connp)->conn_laddr_v4 != 0) {		\
1205				(err) = (*cl_inet_connect2)(		\
1206				    (connp)->conn_netstack->netstack_stackid,\
1207				    IPPROTO_TCP, is_outgoing, AF_INET,	\
1208				    (uint8_t *)(&((connp)->conn_laddr_v4)),\
1209				    (in_port_t)(connp)->conn_lport,	\
1210				    (uint8_t *)(&((connp)->conn_faddr_v4)),\
1211				    (in_port_t)(connp)->conn_fport, NULL); \
1212			}						\
1213		} else {						\
1214			if (!IN6_IS_ADDR_UNSPECIFIED(			\
1215			    &(connp)->conn_laddr_v6)) {			\
1216				(err) = (*cl_inet_connect2)(		\
1217				    (connp)->conn_netstack->netstack_stackid,\
1218				    IPPROTO_TCP, is_outgoing, AF_INET6,	\
1219				    (uint8_t *)(&((connp)->conn_laddr_v6)),\
1220				    (in_port_t)(connp)->conn_lport,	\
1221				    (uint8_t *)(&((connp)->conn_faddr_v6)), \
1222				    (in_port_t)(connp)->conn_fport, NULL); \
1223			}						\
1224		}							\
1225	}								\
1226}
1227
1228#define	CL_INET_DISCONNECT(connp)	{				\
1229	if (cl_inet_disconnect != NULL) {				\
1230		/*							\
1231		 * Running in cluster mode - deregister active		\
1232		 * connection information				\
1233		 */							\
1234		if ((connp)->conn_ipversion == IPV4_VERSION) {		\
1235			if ((connp)->conn_laddr_v4 != 0) {		\
1236				(*cl_inet_disconnect)(			\
1237				    (connp)->conn_netstack->netstack_stackid,\
1238				    IPPROTO_TCP, AF_INET,		\
1239				    (uint8_t *)(&((connp)->conn_laddr_v4)),\
1240				    (in_port_t)(connp)->conn_lport,	\
1241				    (uint8_t *)(&((connp)->conn_faddr_v4)),\
1242				    (in_port_t)(connp)->conn_fport, NULL); \
1243			}						\
1244		} else {						\
1245			if (!IN6_IS_ADDR_UNSPECIFIED(			\
1246			    &(connp)->conn_laddr_v6)) {			\
1247				(*cl_inet_disconnect)(			\
1248				    (connp)->conn_netstack->netstack_stackid,\
1249				    IPPROTO_TCP, AF_INET6,		\
1250				    (uint8_t *)(&((connp)->conn_laddr_v6)),\
1251				    (in_port_t)(connp)->conn_lport,	\
1252				    (uint8_t *)(&((connp)->conn_faddr_v6)), \
1253				    (in_port_t)(connp)->conn_fport, NULL); \
1254			}						\
1255		}							\
1256	}								\
1257}
1258
1259/*
1260 * Cluster networking hook for traversing current connection list.
1261 * This routine is used to extract the current list of live connections
1262 * which must continue to to be dispatched to this node.
1263 */
1264int cl_tcp_walk_list(netstackid_t stack_id,
1265    int (*callback)(cl_tcp_info_t *, void *), void *arg);
1266
1267static int cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *),
1268    void *arg, tcp_stack_t *tcps);
1269
1270static void
1271tcp_set_recv_threshold(tcp_t *tcp, uint32_t new_rcvthresh)
1272{
1273	uint32_t default_threshold = SOCKET_RECVHIWATER >> 3;
1274
1275	if (IPCL_IS_NONSTR(tcp->tcp_connp)) {
1276		conn_t *connp = tcp->tcp_connp;
1277		struct sock_proto_props sopp;
1278
1279		/*
1280		 * only increase rcvthresh upto default_threshold
1281		 */
1282		if (new_rcvthresh > default_threshold)
1283			new_rcvthresh = default_threshold;
1284
1285		sopp.sopp_flags = SOCKOPT_RCVTHRESH;
1286		sopp.sopp_rcvthresh = new_rcvthresh;
1287
1288		(*connp->conn_upcalls->su_set_proto_props)
1289		    (connp->conn_upper_handle, &sopp);
1290	}
1291}
1292/*
1293 * Figure out the value of window scale opton.  Note that the rwnd is
1294 * ASSUMED to be rounded up to the nearest MSS before the calculation.
1295 * We cannot find the scale value and then do a round up of tcp_rwnd
1296 * because the scale value may not be correct after that.
1297 *
1298 * Set the compiler flag to make this function inline.
1299 */
1300static void
1301tcp_set_ws_value(tcp_t *tcp)
1302{
1303	int i;
1304	uint32_t rwnd = tcp->tcp_rwnd;
1305
1306	for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT;
1307	    i++, rwnd >>= 1)
1308		;
1309	tcp->tcp_rcv_ws = i;
1310}
1311
1312/*
1313 * Remove a connection from the list of detached TIME_WAIT connections.
1314 * It returns B_FALSE if it can't remove the connection from the list
1315 * as the connection has already been removed from the list due to an
1316 * earlier call to tcp_time_wait_remove(); otherwise it returns B_TRUE.
1317 */
1318static boolean_t
1319tcp_time_wait_remove(tcp_t *tcp, tcp_squeue_priv_t *tcp_time_wait)
1320{
1321	boolean_t	locked = B_FALSE;
1322
1323	if (tcp_time_wait == NULL) {
1324		tcp_time_wait = *((tcp_squeue_priv_t **)
1325		    squeue_getprivate(tcp->tcp_connp->conn_sqp, SQPRIVATE_TCP));
1326		mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1327		locked = B_TRUE;
1328	} else {
1329		ASSERT(MUTEX_HELD(&tcp_time_wait->tcp_time_wait_lock));
1330	}
1331
1332	if (tcp->tcp_time_wait_expire == 0) {
1333		ASSERT(tcp->tcp_time_wait_next == NULL);
1334		ASSERT(tcp->tcp_time_wait_prev == NULL);
1335		if (locked)
1336			mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1337		return (B_FALSE);
1338	}
1339	ASSERT(TCP_IS_DETACHED(tcp));
1340	ASSERT(tcp->tcp_state == TCPS_TIME_WAIT);
1341
1342	if (tcp == tcp_time_wait->tcp_time_wait_head) {
1343		ASSERT(tcp->tcp_time_wait_prev == NULL);
1344		tcp_time_wait->tcp_time_wait_head = tcp->tcp_time_wait_next;
1345		if (tcp_time_wait->tcp_time_wait_head != NULL) {
1346			tcp_time_wait->tcp_time_wait_head->tcp_time_wait_prev =
1347			    NULL;
1348		} else {
1349			tcp_time_wait->tcp_time_wait_tail = NULL;
1350		}
1351	} else if (tcp == tcp_time_wait->tcp_time_wait_tail) {
1352		ASSERT(tcp != tcp_time_wait->tcp_time_wait_head);
1353		ASSERT(tcp->tcp_time_wait_next == NULL);
1354		tcp_time_wait->tcp_time_wait_tail = tcp->tcp_time_wait_prev;
1355		ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL);
1356		tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = NULL;
1357	} else {
1358		ASSERT(tcp->tcp_time_wait_prev->tcp_time_wait_next == tcp);
1359		ASSERT(tcp->tcp_time_wait_next->tcp_time_wait_prev == tcp);
1360		tcp->tcp_time_wait_prev->tcp_time_wait_next =
1361		    tcp->tcp_time_wait_next;
1362		tcp->tcp_time_wait_next->tcp_time_wait_prev =
1363		    tcp->tcp_time_wait_prev;
1364	}
1365	tcp->tcp_time_wait_next = NULL;
1366	tcp->tcp_time_wait_prev = NULL;
1367	tcp->tcp_time_wait_expire = 0;
1368
1369	if (locked)
1370		mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1371	return (B_TRUE);
1372}
1373
1374/*
1375 * Add a connection to the list of detached TIME_WAIT connections
1376 * and set its time to expire.
1377 */
1378static void
1379tcp_time_wait_append(tcp_t *tcp)
1380{
1381	tcp_stack_t	*tcps = tcp->tcp_tcps;
1382	tcp_squeue_priv_t *tcp_time_wait =
1383	    *((tcp_squeue_priv_t **)squeue_getprivate(tcp->tcp_connp->conn_sqp,
1384	    SQPRIVATE_TCP));
1385
1386	tcp_timers_stop(tcp);
1387
1388	/* Freed above */
1389	ASSERT(tcp->tcp_timer_tid == 0);
1390	ASSERT(tcp->tcp_ack_tid == 0);
1391
1392	/* must have happened at the time of detaching the tcp */
1393	ASSERT(tcp->tcp_ptpahn == NULL);
1394	ASSERT(tcp->tcp_flow_stopped == 0);
1395	ASSERT(tcp->tcp_time_wait_next == NULL);
1396	ASSERT(tcp->tcp_time_wait_prev == NULL);
1397	ASSERT(tcp->tcp_time_wait_expire == NULL);
1398	ASSERT(tcp->tcp_listener == NULL);
1399
1400	tcp->tcp_time_wait_expire = ddi_get_lbolt();
1401	/*
1402	 * The value computed below in tcp->tcp_time_wait_expire may
1403	 * appear negative or wrap around. That is ok since our
1404	 * interest is only in the difference between the current lbolt
1405	 * value and tcp->tcp_time_wait_expire. But the value should not
1406	 * be zero, since it means the tcp is not in the TIME_WAIT list.
1407	 * The corresponding comparison in tcp_time_wait_collector() uses
1408	 * modular arithmetic.
1409	 */
1410	tcp->tcp_time_wait_expire +=
1411	    drv_usectohz(tcps->tcps_time_wait_interval * 1000);
1412	if (tcp->tcp_time_wait_expire == 0)
1413		tcp->tcp_time_wait_expire = 1;
1414
1415	ASSERT(TCP_IS_DETACHED(tcp));
1416	ASSERT(tcp->tcp_state == TCPS_TIME_WAIT);
1417	ASSERT(tcp->tcp_time_wait_next == NULL);
1418	ASSERT(tcp->tcp_time_wait_prev == NULL);
1419	TCP_DBGSTAT(tcps, tcp_time_wait);
1420
1421	mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1422	if (tcp_time_wait->tcp_time_wait_head == NULL) {
1423		ASSERT(tcp_time_wait->tcp_time_wait_tail == NULL);
1424		tcp_time_wait->tcp_time_wait_head = tcp;
1425	} else {
1426		ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL);
1427		ASSERT(tcp_time_wait->tcp_time_wait_tail->tcp_state ==
1428		    TCPS_TIME_WAIT);
1429		tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = tcp;
1430		tcp->tcp_time_wait_prev = tcp_time_wait->tcp_time_wait_tail;
1431	}
1432	tcp_time_wait->tcp_time_wait_tail = tcp;
1433	mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1434}
1435
1436/* ARGSUSED */
1437void
1438tcp_timewait_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
1439{
1440	conn_t	*connp = (conn_t *)arg;
1441	tcp_t	*tcp = connp->conn_tcp;
1442	tcp_stack_t	*tcps = tcp->tcp_tcps;
1443
1444	ASSERT(tcp != NULL);
1445	if (tcp->tcp_state == TCPS_CLOSED) {
1446		return;
1447	}
1448
1449	ASSERT((connp->conn_family == AF_INET &&
1450	    connp->conn_ipversion == IPV4_VERSION) ||
1451	    (connp->conn_family == AF_INET6 &&
1452	    (connp->conn_ipversion == IPV4_VERSION ||
1453	    connp->conn_ipversion == IPV6_VERSION)));
1454	ASSERT(!tcp->tcp_listener);
1455
1456	TCP_STAT(tcps, tcp_time_wait_reap);
1457	ASSERT(TCP_IS_DETACHED(tcp));
1458
1459	/*
1460	 * Because they have no upstream client to rebind or tcp_close()
1461	 * them later, we axe the connection here and now.
1462	 */
1463	tcp_close_detached(tcp);
1464}
1465
1466/*
1467 * Remove cached/latched IPsec references.
1468 */
1469void
1470tcp_ipsec_cleanup(tcp_t *tcp)
1471{
1472	conn_t		*connp = tcp->tcp_connp;
1473
1474	ASSERT(connp->conn_flags & IPCL_TCPCONN);
1475
1476	if (connp->conn_latch != NULL) {
1477		IPLATCH_REFRELE(connp->conn_latch);
1478		connp->conn_latch = NULL;
1479	}
1480	if (connp->conn_latch_in_policy != NULL) {
1481		IPPOL_REFRELE(connp->conn_latch_in_policy);
1482		connp->conn_latch_in_policy = NULL;
1483	}
1484	if (connp->conn_latch_in_action != NULL) {
1485		IPACT_REFRELE(connp->conn_latch_in_action);
1486		connp->conn_latch_in_action = NULL;
1487	}
1488	if (connp->conn_policy != NULL) {
1489		IPPH_REFRELE(connp->conn_policy, connp->conn_netstack);
1490		connp->conn_policy = NULL;
1491	}
1492}
1493
1494/*
1495 * Cleaup before placing on free list.
1496 * Disassociate from the netstack/tcp_stack_t since the freelist
1497 * is per squeue and not per netstack.
1498 */
1499void
1500tcp_cleanup(tcp_t *tcp)
1501{
1502	mblk_t		*mp;
1503	tcp_sack_info_t	*tcp_sack_info;
1504	conn_t		*connp = tcp->tcp_connp;
1505	tcp_stack_t	*tcps = tcp->tcp_tcps;
1506	netstack_t	*ns = tcps->tcps_netstack;
1507	mblk_t		*tcp_rsrv_mp;
1508
1509	tcp_bind_hash_remove(tcp);
1510
1511	/* Cleanup that which needs the netstack first */
1512	tcp_ipsec_cleanup(tcp);
1513	ixa_cleanup(connp->conn_ixa);
1514
1515	if (connp->conn_ht_iphc != NULL) {
1516		kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
1517		connp->conn_ht_iphc = NULL;
1518		connp->conn_ht_iphc_allocated = 0;
1519		connp->conn_ht_iphc_len = 0;
1520		connp->conn_ht_ulp = NULL;
1521		connp->conn_ht_ulp_len = 0;
1522		tcp->tcp_ipha = NULL;
1523		tcp->tcp_ip6h = NULL;
1524		tcp->tcp_tcpha = NULL;
1525	}
1526
1527	/* We clear any IP_OPTIONS and extension headers */
1528	ip_pkt_free(&connp->conn_xmit_ipp);
1529
1530	tcp_free(tcp);
1531
1532	/* Release any SSL context */
1533	if (tcp->tcp_kssl_ent != NULL) {
1534		kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY);
1535		tcp->tcp_kssl_ent = NULL;
1536	}
1537
1538	if (tcp->tcp_kssl_ctx != NULL) {
1539		kssl_release_ctx(tcp->tcp_kssl_ctx);
1540		tcp->tcp_kssl_ctx = NULL;
1541	}
1542	tcp->tcp_kssl_pending = B_FALSE;
1543
1544	/*
1545	 * Since we will bzero the entire structure, we need to
1546	 * remove it and reinsert it in global hash list. We
1547	 * know the walkers can't get to this conn because we
1548	 * had set CONDEMNED flag earlier and checked reference
1549	 * under conn_lock so walker won't pick it and when we
1550	 * go the ipcl_globalhash_remove() below, no walker
1551	 * can get to it.
1552	 */
1553	ipcl_globalhash_remove(connp);
1554
1555	/* Save some state */
1556	mp = tcp->tcp_timercache;
1557
1558	tcp_sack_info = tcp->tcp_sack_info;
1559	tcp_rsrv_mp = tcp->tcp_rsrv_mp;
1560
1561	if (connp->conn_cred != NULL) {
1562		crfree(connp->conn_cred);
1563		connp->conn_cred = NULL;
1564	}
1565	ipcl_conn_cleanup(connp);
1566	connp->conn_flags = IPCL_TCPCONN;
1567
1568	/*
1569	 * Now it is safe to decrement the reference counts.
1570	 * This might be the last reference on the netstack
1571	 * in which case it will cause the freeing of the IP Instance.
1572	 */
1573	connp->conn_netstack = NULL;
1574	connp->conn_ixa->ixa_ipst = NULL;
1575	netstack_rele(ns);
1576	ASSERT(tcps != NULL);
1577	tcp->tcp_tcps = NULL;
1578
1579	bzero(tcp, sizeof (tcp_t));
1580
1581	/* restore the state */
1582	tcp->tcp_timercache = mp;
1583
1584	tcp->tcp_sack_info = tcp_sack_info;
1585	tcp->tcp_rsrv_mp = tcp_rsrv_mp;
1586
1587	tcp->tcp_connp = connp;
1588
1589	ASSERT(connp->conn_tcp == tcp);
1590	ASSERT(connp->conn_flags & IPCL_TCPCONN);
1591	connp->conn_state_flags = CONN_INCIPIENT;
1592	ASSERT(connp->conn_proto == IPPROTO_TCP);
1593	ASSERT(connp->conn_ref == 1);
1594}
1595
1596/*
1597 * Blows away all tcps whose TIME_WAIT has expired. List traversal
1598 * is done forwards from the head.
1599 * This walks all stack instances since
1600 * tcp_time_wait remains global across all stacks.
1601 */
1602/* ARGSUSED */
1603void
1604tcp_time_wait_collector(void *arg)
1605{
1606	tcp_t *tcp;
1607	clock_t now;
1608	mblk_t *mp;
1609	conn_t *connp;
1610	kmutex_t *lock;
1611	boolean_t removed;
1612
1613	squeue_t *sqp = (squeue_t *)arg;
1614	tcp_squeue_priv_t *tcp_time_wait =
1615	    *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP));
1616
1617	mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1618	tcp_time_wait->tcp_time_wait_tid = 0;
1619
1620	if (tcp_time_wait->tcp_free_list != NULL &&
1621	    tcp_time_wait->tcp_free_list->tcp_in_free_list == B_TRUE) {
1622		TCP_G_STAT(tcp_freelist_cleanup);
1623		while ((tcp = tcp_time_wait->tcp_free_list) != NULL) {
1624			tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next;
1625			tcp->tcp_time_wait_next = NULL;
1626			tcp_time_wait->tcp_free_list_cnt--;
1627			ASSERT(tcp->tcp_tcps == NULL);
1628			CONN_DEC_REF(tcp->tcp_connp);
1629		}
1630		ASSERT(tcp_time_wait->tcp_free_list_cnt == 0);
1631	}
1632
1633	/*
1634	 * In order to reap time waits reliably, we should use a
1635	 * source of time that is not adjustable by the user -- hence
1636	 * the call to ddi_get_lbolt().
1637	 */
1638	now = ddi_get_lbolt();
1639	while ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL) {
1640		/*
1641		 * Compare times using modular arithmetic, since
1642		 * lbolt can wrapover.
1643		 */
1644		if ((now - tcp->tcp_time_wait_expire) < 0) {
1645			break;
1646		}
1647
1648		removed = tcp_time_wait_remove(tcp, tcp_time_wait);
1649		ASSERT(removed);
1650
1651		connp = tcp->tcp_connp;
1652		ASSERT(connp->conn_fanout != NULL);
1653		lock = &connp->conn_fanout->connf_lock;
1654		/*
1655		 * This is essentially a TW reclaim fast path optimization for
1656		 * performance where the timewait collector checks under the
1657		 * fanout lock (so that no one else can get access to the
1658		 * conn_t) that the refcnt is 2 i.e. one for TCP and one for
1659		 * the classifier hash list. If ref count is indeed 2, we can
1660		 * just remove the conn under the fanout lock and avoid
1661		 * cleaning up the conn under the squeue, provided that
1662		 * clustering callbacks are not enabled. If clustering is
1663		 * enabled, we need to make the clustering callback before
1664		 * setting the CONDEMNED flag and after dropping all locks and
1665		 * so we forego this optimization and fall back to the slow
1666		 * path. Also please see the comments in tcp_closei_local
1667		 * regarding the refcnt logic.
1668		 *
1669		 * Since we are holding the tcp_time_wait_lock, its better
1670		 * not to block on the fanout_lock because other connections
1671		 * can't add themselves to time_wait list. So we do a
1672		 * tryenter instead of mutex_enter.
1673		 */
1674		if (mutex_tryenter(lock)) {
1675			mutex_enter(&connp->conn_lock);
1676			if ((connp->conn_ref == 2) &&
1677			    (cl_inet_disconnect == NULL)) {
1678				ipcl_hash_remove_locked(connp,
1679				    connp->conn_fanout);
1680				/*
1681				 * Set the CONDEMNED flag now itself so that
1682				 * the refcnt cannot increase due to any
1683				 * walker.
1684				 */
1685				connp->conn_state_flags |= CONN_CONDEMNED;
1686				mutex_exit(lock);
1687				mutex_exit(&connp->conn_lock);
1688				if (tcp_time_wait->tcp_free_list_cnt <
1689				    tcp_free_list_max_cnt) {
1690					/* Add to head of tcp_free_list */
1691					mutex_exit(
1692					    &tcp_time_wait->tcp_time_wait_lock);
1693					tcp_cleanup(tcp);
1694					ASSERT(connp->conn_latch == NULL);
1695					ASSERT(connp->conn_policy == NULL);
1696					ASSERT(tcp->tcp_tcps == NULL);
1697					ASSERT(connp->conn_netstack == NULL);
1698
1699					mutex_enter(
1700					    &tcp_time_wait->tcp_time_wait_lock);
1701					tcp->tcp_time_wait_next =
1702					    tcp_time_wait->tcp_free_list;
1703					tcp_time_wait->tcp_free_list = tcp;
1704					tcp_time_wait->tcp_free_list_cnt++;
1705					continue;
1706				} else {
1707					/* Do not add to tcp_free_list */
1708					mutex_exit(
1709					    &tcp_time_wait->tcp_time_wait_lock);
1710					tcp_bind_hash_remove(tcp);
1711					ixa_cleanup(tcp->tcp_connp->conn_ixa);
1712					tcp_ipsec_cleanup(tcp);
1713					CONN_DEC_REF(tcp->tcp_connp);
1714				}
1715			} else {
1716				CONN_INC_REF_LOCKED(connp);
1717				mutex_exit(lock);
1718				mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1719				mutex_exit(&connp->conn_lock);
1720				/*
1721				 * We can reuse the closemp here since conn has
1722				 * detached (otherwise we wouldn't even be in
1723				 * time_wait list). tcp_closemp_used can safely
1724				 * be changed without taking a lock as no other
1725				 * thread can concurrently access it at this
1726				 * point in the connection lifecycle.
1727				 */
1728
1729				if (tcp->tcp_closemp.b_prev == NULL)
1730					tcp->tcp_closemp_used = B_TRUE;
1731				else
1732					cmn_err(CE_PANIC,
1733					    "tcp_timewait_collector: "
1734					    "concurrent use of tcp_closemp: "
1735					    "connp %p tcp %p\n", (void *)connp,
1736					    (void *)tcp);
1737
1738				TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15);
1739				mp = &tcp->tcp_closemp;
1740				SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
1741				    tcp_timewait_output, connp, NULL,
1742				    SQ_FILL, SQTAG_TCP_TIMEWAIT);
1743			}
1744		} else {
1745			mutex_enter(&connp->conn_lock);
1746			CONN_INC_REF_LOCKED(connp);
1747			mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1748			mutex_exit(&connp->conn_lock);
1749			/*
1750			 * We can reuse the closemp here since conn has
1751			 * detached (otherwise we wouldn't even be in
1752			 * time_wait list). tcp_closemp_used can safely
1753			 * be changed without taking a lock as no other
1754			 * thread can concurrently access it at this
1755			 * point in the connection lifecycle.
1756			 */
1757
1758			if (tcp->tcp_closemp.b_prev == NULL)
1759				tcp->tcp_closemp_used = B_TRUE;
1760			else
1761				cmn_err(CE_PANIC, "tcp_timewait_collector: "
1762				    "concurrent use of tcp_closemp: "
1763				    "connp %p tcp %p\n", (void *)connp,
1764				    (void *)tcp);
1765
1766			TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15);
1767			mp = &tcp->tcp_closemp;
1768			SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
1769			    tcp_timewait_output, connp, NULL,
1770			    SQ_FILL, SQTAG_TCP_TIMEWAIT);
1771		}
1772		mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
1773	}
1774
1775	if (tcp_time_wait->tcp_free_list != NULL)
1776		tcp_time_wait->tcp_free_list->tcp_in_free_list = B_TRUE;
1777
1778	tcp_time_wait->tcp_time_wait_tid =
1779	    timeout_generic(CALLOUT_NORMAL, tcp_time_wait_collector, sqp,
1780	    TICK_TO_NSEC(TCP_TIME_WAIT_DELAY), CALLOUT_TCP_RESOLUTION,
1781	    CALLOUT_FLAG_ROUNDUP);
1782	mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
1783}
1784
1785/*
1786 * Reply to a clients T_CONN_RES TPI message. This function
1787 * is used only for TLI/XTI listener. Sockfs sends T_CONN_RES
1788 * on the acceptor STREAM and processed in tcp_accept_common().
1789 * Read the block comment on top of tcp_input_listener().
1790 */
1791static void
1792tcp_tli_accept(tcp_t *listener, mblk_t *mp)
1793{
1794	tcp_t		*acceptor;
1795	tcp_t		*eager;
1796	tcp_t   	*tcp;
1797	struct T_conn_res	*tcr;
1798	t_uscalar_t	acceptor_id;
1799	t_scalar_t	seqnum;
1800	mblk_t		*discon_mp = NULL;
1801	mblk_t		*ok_mp;
1802	mblk_t		*mp1;
1803	tcp_stack_t	*tcps = listener->tcp_tcps;
1804	conn_t		*econnp;
1805
1806	if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) {
1807		tcp_err_ack(listener, mp, TPROTO, 0);
1808		return;
1809	}
1810	tcr = (struct T_conn_res *)mp->b_rptr;
1811
1812	/*
1813	 * Under ILP32 the stream head points tcr->ACCEPTOR_id at the
1814	 * read side queue of the streams device underneath us i.e. the
1815	 * read side queue of 'ip'. Since we can't deference QUEUE_ptr we
1816	 * look it up in the queue_hash.  Under LP64 it sends down the
1817	 * minor_t of the accepting endpoint.
1818	 *
1819	 * Once the acceptor/eager are modified (in tcp_accept_swap) the
1820	 * fanout hash lock is held.
1821	 * This prevents any thread from entering the acceptor queue from
1822	 * below (since it has not been hard bound yet i.e. any inbound
1823	 * packets will arrive on the listener conn_t and
1824	 * go through the classifier).
1825	 * The CONN_INC_REF will prevent the acceptor from closing.
1826	 *
1827	 * XXX It is still possible for a tli application to send down data
1828	 * on the accepting stream while another thread calls t_accept.
1829	 * This should not be a problem for well-behaved applications since
1830	 * the T_OK_ACK is sent after the queue swapping is completed.
1831	 *
1832	 * If the accepting fd is the same as the listening fd, avoid
1833	 * queue hash lookup since that will return an eager listener in a
1834	 * already established state.
1835	 */
1836	acceptor_id = tcr->ACCEPTOR_id;
1837	mutex_enter(&listener->tcp_eager_lock);
1838	if (listener->tcp_acceptor_id == acceptor_id) {
1839		eager = listener->tcp_eager_next_q;
1840		/* only count how many T_CONN_INDs so don't count q0 */
1841		if ((listener->tcp_conn_req_cnt_q != 1) ||
1842		    (eager->tcp_conn_req_seqnum != tcr->SEQ_number)) {
1843			mutex_exit(&listener->tcp_eager_lock);
1844			tcp_err_ack(listener, mp, TBADF, 0);
1845			return;
1846		}
1847		if (listener->tcp_conn_req_cnt_q0 != 0) {
1848			/* Throw away all the eagers on q0. */
1849			tcp_eager_cleanup(listener, 1);
1850		}
1851		if (listener->tcp_syn_defense) {
1852			listener->tcp_syn_defense = B_FALSE;
1853			if (listener->tcp_ip_addr_cache != NULL) {
1854				kmem_free(listener->tcp_ip_addr_cache,
1855				    IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
1856				listener->tcp_ip_addr_cache = NULL;
1857			}
1858		}
1859		/*
1860		 * Transfer tcp_conn_req_max to the eager so that when
1861		 * a disconnect occurs we can revert the endpoint to the
1862		 * listen state.
1863		 */
1864		eager->tcp_conn_req_max = listener->tcp_conn_req_max;
1865		ASSERT(listener->tcp_conn_req_cnt_q0 == 0);
1866		/*
1867		 * Get a reference on the acceptor just like the
1868		 * tcp_acceptor_hash_lookup below.
1869		 */
1870		acceptor = listener;
1871		CONN_INC_REF(acceptor->tcp_connp);
1872	} else {
1873		acceptor = tcp_acceptor_hash_lookup(acceptor_id, tcps);
1874		if (acceptor == NULL) {
1875			if (listener->tcp_connp->conn_debug) {
1876				(void) strlog(TCP_MOD_ID, 0, 1,
1877				    SL_ERROR|SL_TRACE,
1878				    "tcp_accept: did not find acceptor 0x%x\n",
1879				    acceptor_id);
1880			}
1881			mutex_exit(&listener->tcp_eager_lock);
1882			tcp_err_ack(listener, mp, TPROVMISMATCH, 0);
1883			return;
1884		}
1885		/*
1886		 * Verify acceptor state. The acceptable states for an acceptor
1887		 * include TCPS_IDLE and TCPS_BOUND.
1888		 */
1889		switch (acceptor->tcp_state) {
1890		case TCPS_IDLE:
1891			/* FALLTHRU */
1892		case TCPS_BOUND:
1893			break;
1894		default:
1895			CONN_DEC_REF(acceptor->tcp_connp);
1896			mutex_exit(&listener->tcp_eager_lock);
1897			tcp_err_ack(listener, mp, TOUTSTATE, 0);
1898			return;
1899		}
1900	}
1901
1902	/* The listener must be in TCPS_LISTEN */
1903	if (listener->tcp_state != TCPS_LISTEN) {
1904		CONN_DEC_REF(acceptor->tcp_connp);
1905		mutex_exit(&listener->tcp_eager_lock);
1906		tcp_err_ack(listener, mp, TOUTSTATE, 0);
1907		return;
1908	}
1909
1910	/*
1911	 * Rendezvous with an eager connection request packet hanging off
1912	 * 'tcp' that has the 'seqnum' tag.  We tagged the detached open
1913	 * tcp structure when the connection packet arrived in
1914	 * tcp_input_listener().
1915	 */
1916	seqnum = tcr->SEQ_number;
1917	eager = listener;
1918	do {
1919		eager = eager->tcp_eager_next_q;
1920		if (eager == NULL) {
1921			CONN_DEC_REF(acceptor->tcp_connp);
1922			mutex_exit(&listener->tcp_eager_lock);
1923			tcp_err_ack(listener, mp, TBADSEQ, 0);
1924			return;
1925		}
1926	} while (eager->tcp_conn_req_seqnum != seqnum);
1927	mutex_exit(&listener->tcp_eager_lock);
1928
1929	/*
1930	 * At this point, both acceptor and listener have 2 ref
1931	 * that they begin with. Acceptor has one additional ref
1932	 * we placed in lookup while listener has 3 additional
1933	 * ref for being behind the squeue (tcp_accept() is
1934	 * done on listener's squeue); being in classifier hash;
1935	 * and eager's ref on listener.
1936	 */
1937	ASSERT(listener->tcp_connp->conn_ref >= 5);
1938	ASSERT(acceptor->tcp_connp->conn_ref >= 3);
1939
1940	/*
1941	 * The eager at this point is set in its own squeue and
1942	 * could easily have been killed (tcp_accept_finish will
1943	 * deal with that) because of a TH_RST so we can only
1944	 * ASSERT for a single ref.
1945	 */
1946	ASSERT(eager->tcp_connp->conn_ref >= 1);
1947
1948	/*
1949	 * Pre allocate the discon_ind mblk also. tcp_accept_finish will
1950	 * use it if something failed.
1951	 */
1952	discon_mp = allocb(MAX(sizeof (struct T_discon_ind),
1953	    sizeof (struct stroptions)), BPRI_HI);
1954	if (discon_mp == NULL) {
1955		CONN_DEC_REF(acceptor->tcp_connp);
1956		CONN_DEC_REF(eager->tcp_connp);
1957		tcp_err_ack(listener, mp, TSYSERR, ENOMEM);
1958		return;
1959	}
1960
1961	econnp = eager->tcp_connp;
1962
1963	/* Hold a copy of mp, in case reallocb fails */
1964	if ((mp1 = copymsg(mp)) == NULL) {
1965		CONN_DEC_REF(acceptor->tcp_connp);
1966		CONN_DEC_REF(eager->tcp_connp);
1967		freemsg(discon_mp);
1968		tcp_err_ack(listener, mp, TSYSERR, ENOMEM);
1969		return;
1970	}
1971
1972	tcr = (struct T_conn_res *)mp1->b_rptr;
1973
1974	/*
1975	 * This is an expanded version of mi_tpi_ok_ack_alloc()
1976	 * which allocates a larger mblk and appends the new
1977	 * local address to the ok_ack.  The address is copied by
1978	 * soaccept() for getsockname().
1979	 */
1980	{
1981		int extra;
1982
1983		extra = (econnp->conn_family == AF_INET) ?
1984		    sizeof (sin_t) : sizeof (sin6_t);
1985
1986		/*
1987		 * Try to re-use mp, if possible.  Otherwise, allocate
1988		 * an mblk and return it as ok_mp.  In any case, mp
1989		 * is no longer usable upon return.
1990		 */
1991		if ((ok_mp = mi_tpi_ok_ack_alloc_extra(mp, extra)) == NULL) {
1992			CONN_DEC_REF(acceptor->tcp_connp);
1993			CONN_DEC_REF(eager->tcp_connp);
1994			freemsg(discon_mp);
1995			/* Original mp has been freed by now, so use mp1 */
1996			tcp_err_ack(listener, mp1, TSYSERR, ENOMEM);
1997			return;
1998		}
1999
2000		mp = NULL;	/* We should never use mp after this point */
2001
2002		switch (extra) {
2003		case sizeof (sin_t): {
2004			sin_t *sin = (sin_t *)ok_mp->b_wptr;
2005
2006			ok_mp->b_wptr += extra;
2007			sin->sin_family = AF_INET;
2008			sin->sin_port = econnp->conn_lport;
2009			sin->sin_addr.s_addr = econnp->conn_laddr_v4;
2010			break;
2011		}
2012		case sizeof (sin6_t): {
2013			sin6_t *sin6 = (sin6_t *)ok_mp->b_wptr;
2014
2015			ok_mp->b_wptr += extra;
2016			sin6->sin6_family = AF_INET6;
2017			sin6->sin6_port = econnp->conn_lport;
2018			sin6->sin6_addr = econnp->conn_laddr_v6;
2019			sin6->sin6_flowinfo = econnp->conn_flowinfo;
2020			if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6) &&
2021			    (econnp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)) {
2022				sin6->sin6_scope_id =
2023				    econnp->conn_ixa->ixa_scopeid;
2024			} else {
2025				sin6->sin6_scope_id = 0;
2026			}
2027			sin6->__sin6_src_id = 0;
2028			break;
2029		}
2030		default:
2031			break;
2032		}
2033		ASSERT(ok_mp->b_wptr <= ok_mp->b_datap->db_lim);
2034	}
2035
2036	/*
2037	 * If there are no options we know that the T_CONN_RES will
2038	 * succeed. However, we can't send the T_OK_ACK upstream until
2039	 * the tcp_accept_swap is done since it would be dangerous to
2040	 * let the application start using the new fd prior to the swap.
2041	 */
2042	tcp_accept_swap(listener, acceptor, eager);
2043
2044	/*
2045	 * tcp_accept_swap unlinks eager from listener but does not drop
2046	 * the eager's reference on the listener.
2047	 */
2048	ASSERT(eager->tcp_listener == NULL);
2049	ASSERT(listener->tcp_connp->conn_ref >= 5);
2050
2051	/*
2052	 * The eager is now associated with its own queue. Insert in
2053	 * the hash so that the connection can be reused for a future
2054	 * T_CONN_RES.
2055	 */
2056	tcp_acceptor_hash_insert(acceptor_id, eager);
2057
2058	/*
2059	 * We now do the processing of options with T_CONN_RES.
2060	 * We delay till now since we wanted to have queue to pass to
2061	 * option processing routines that points back to the right
2062	 * instance structure which does not happen until after
2063	 * tcp_accept_swap().
2064	 *
2065	 * Note:
2066	 * The sanity of the logic here assumes that whatever options
2067	 * are appropriate to inherit from listner=>eager are done
2068	 * before this point, and whatever were to be overridden (or not)
2069	 * in transfer logic from eager=>acceptor in tcp_accept_swap().
2070	 * [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it
2071	 *   before its ACCEPTOR_id comes down in T_CONN_RES ]
2072	 * This may not be true at this point in time but can be fixed
2073	 * independently. This option processing code starts with
2074	 * the instantiated acceptor instance and the final queue at
2075	 * this point.
2076	 */
2077
2078	if (tcr->OPT_length != 0) {
2079		/* Options to process */
2080		int t_error = 0;
2081		int sys_error = 0;
2082		int do_disconnect = 0;
2083
2084		if (tcp_conprim_opt_process(eager, mp1,
2085		    &do_disconnect, &t_error, &sys_error) < 0) {
2086			eager->tcp_accept_error = 1;
2087			if (do_disconnect) {
2088				/*
2089				 * An option failed which does not allow
2090				 * connection to be accepted.
2091				 *
2092				 * We allow T_CONN_RES to succeed and
2093				 * put a T_DISCON_IND on the eager queue.
2094				 */
2095				ASSERT(t_error == 0 && sys_error == 0);
2096				eager->tcp_send_discon_ind = 1;
2097			} else {
2098				ASSERT(t_error != 0);
2099				freemsg(ok_mp);
2100				/*
2101				 * Original mp was either freed or set
2102				 * to ok_mp above, so use mp1 instead.
2103				 */
2104				tcp_err_ack(listener, mp1, t_error, sys_error);
2105				goto finish;
2106			}
2107		}
2108		/*
2109		 * Most likely success in setting options (except if
2110		 * eager->tcp_send_discon_ind set).
2111		 * mp1 option buffer represented by OPT_length/offset
2112		 * potentially modified and contains results of setting
2113		 * options at this point
2114		 */
2115	}
2116
2117	/* We no longer need mp1, since all options processing has passed */
2118	freemsg(mp1);
2119
2120	putnext(listener->tcp_connp->conn_rq, ok_mp);
2121
2122	mutex_enter(&listener->tcp_eager_lock);
2123	if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) {
2124		tcp_t	*tail;
2125		mblk_t	*conn_ind;
2126
2127		/*
2128		 * This path should not be executed if listener and
2129		 * acceptor streams are the same.
2130		 */
2131		ASSERT(listener != acceptor);
2132
2133		tcp = listener->tcp_eager_prev_q0;
2134		/*
2135		 * listener->tcp_eager_prev_q0 points to the TAIL of the
2136		 * deferred T_conn_ind queue. We need to get to the head of
2137		 * the queue in order to send up T_conn_ind the same order as
2138		 * how the 3WHS is completed.
2139		 */
2140		while (tcp != listener) {
2141			if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0)
2142				break;
2143			else
2144				tcp = tcp->tcp_eager_prev_q0;
2145		}
2146		ASSERT(tcp != listener);
2147		conn_ind = tcp->tcp_conn.tcp_eager_conn_ind;
2148		ASSERT(conn_ind != NULL);
2149		tcp->tcp_conn.tcp_eager_conn_ind = NULL;
2150
2151		/* Move from q0 to q */
2152		ASSERT(listener->tcp_conn_req_cnt_q0 > 0);
2153		listener->tcp_conn_req_cnt_q0--;
2154		listener->tcp_conn_req_cnt_q++;
2155		tcp->tcp_eager_next_q0->tcp_eager_prev_q0 =
2156		    tcp->tcp_eager_prev_q0;
2157		tcp->tcp_eager_prev_q0->tcp_eager_next_q0 =
2158		    tcp->tcp_eager_next_q0;
2159		tcp->tcp_eager_prev_q0 = NULL;
2160		tcp->tcp_eager_next_q0 = NULL;
2161		tcp->tcp_conn_def_q0 = B_FALSE;
2162
2163		/* Make sure the tcp isn't in the list of droppables */
2164		ASSERT(tcp->tcp_eager_next_drop_q0 == NULL &&
2165		    tcp->tcp_eager_prev_drop_q0 == NULL);
2166
2167		/*
2168		 * Insert at end of the queue because sockfs sends
2169		 * down T_CONN_RES in chronological order. Leaving
2170		 * the older conn indications at front of the queue
2171		 * helps reducing search time.
2172		 */
2173		tail = listener->tcp_eager_last_q;
2174		if (tail != NULL)
2175			tail->tcp_eager_next_q = tcp;
2176		else
2177			listener->tcp_eager_next_q = tcp;
2178		listener->tcp_eager_last_q = tcp;
2179		tcp->tcp_eager_next_q = NULL;
2180		mutex_exit(&listener->tcp_eager_lock);
2181		putnext(tcp->tcp_connp->conn_rq, conn_ind);
2182	} else {
2183		mutex_exit(&listener->tcp_eager_lock);
2184	}
2185
2186	/*
2187	 * Done with the acceptor - free it
2188	 *
2189	 * Note: from this point on, no access to listener should be made
2190	 * as listener can be equal to acceptor.
2191	 */
2192finish:
2193	ASSERT(acceptor->tcp_detached);
2194	acceptor->tcp_connp->conn_rq = NULL;
2195	ASSERT(!IPCL_IS_NONSTR(acceptor->tcp_connp));
2196	acceptor->tcp_connp->conn_wq = NULL;
2197	(void) tcp_clean_death(acceptor, 0, 2);
2198	CONN_DEC_REF(acceptor->tcp_connp);
2199
2200	/*
2201	 * We pass discon_mp to tcp_accept_finish to get on the right squeue.
2202	 *
2203	 * It will update the setting for sockfs/stream head and also take
2204	 * care of any data that arrived before accept() wad called.
2205	 * In case we already received a FIN then tcp_accept_finish will send up
2206	 * the ordrel. It will also send up a window update if the window
2207	 * has opened up.
2208	 */
2209
2210	/*
2211	 * XXX: we currently have a problem if XTI application closes the
2212	 * acceptor stream in between. This problem exists in on10-gate also
2213	 * and is well know but nothing can be done short of major rewrite
2214	 * to fix it. Now it is possible to take care of it by assigning TLI/XTI
2215	 * eager same squeue as listener (we can distinguish non socket
2216	 * listeners at the time of handling a SYN in tcp_input_listener)
2217	 * and do most of the work that tcp_accept_finish does here itself
2218	 * and then get behind the acceptor squeue to access the acceptor
2219	 * queue.
2220	 */
2221	/*
2222	 * We already have a ref on tcp so no need to do one before squeue_enter
2223	 */
2224	SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, discon_mp,
2225	    tcp_accept_finish, eager->tcp_connp, NULL, SQ_FILL,
2226	    SQTAG_TCP_ACCEPT_FINISH);
2227}
2228
2229/*
2230 * Swap information between the eager and acceptor for a TLI/XTI client.
2231 * The sockfs accept is done on the acceptor stream and control goes
2232 * through tcp_tli_accept() and tcp_accept()/tcp_accept_swap() is not
2233 * called. In either case, both the eager and listener are in their own
2234 * perimeter (squeue) and the code has to deal with potential race.
2235 *
2236 * See the block comment on top of tcp_accept() and tcp_tli_accept().
2237 */
2238static void
2239tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, tcp_t *eager)
2240{
2241	conn_t	*econnp, *aconnp;
2242
2243	ASSERT(eager->tcp_connp->conn_rq == listener->tcp_connp->conn_rq);
2244	ASSERT(eager->tcp_detached && !acceptor->tcp_detached);
2245	ASSERT(!TCP_IS_SOCKET(acceptor));
2246	ASSERT(!TCP_IS_SOCKET(eager));
2247	ASSERT(!TCP_IS_SOCKET(listener));
2248
2249	/*
2250	 * Trusted Extensions may need to use a security label that is
2251	 * different from the acceptor's label on MLP and MAC-Exempt
2252	 * sockets. If this is the case, the required security label
2253	 * already exists in econnp->conn_ixa->ixa_tsl. Since we make the
2254	 * acceptor stream refer to econnp we atomatically get that label.
2255	 */
2256
2257	acceptor->tcp_detached = B_TRUE;
2258	/*
2259	 * To permit stream re-use by TLI/XTI, the eager needs a copy of
2260	 * the acceptor id.
2261	 */
2262	eager->tcp_acceptor_id = acceptor->tcp_acceptor_id;
2263
2264	/* remove eager from listen list... */
2265	mutex_enter(&listener->tcp_eager_lock);
2266	tcp_eager_unlink(eager);
2267	ASSERT(eager->tcp_eager_next_q == NULL &&
2268	    eager->tcp_eager_last_q == NULL);
2269	ASSERT(eager->tcp_eager_next_q0 == NULL &&
2270	    eager->tcp_eager_prev_q0 == NULL);
2271	mutex_exit(&listener->tcp_eager_lock);
2272
2273	econnp = eager->tcp_connp;
2274	aconnp = acceptor->tcp_connp;
2275	econnp->conn_rq = aconnp->conn_rq;
2276	econnp->conn_wq = aconnp->conn_wq;
2277	econnp->conn_rq->q_ptr = econnp;
2278	econnp->conn_wq->q_ptr = econnp;
2279
2280	/*
2281	 * In the TLI/XTI loopback case, we are inside the listener's squeue,
2282	 * which might be a different squeue from our peer TCP instance.
2283	 * For TCP Fusion, the peer expects that whenever tcp_detached is
2284	 * clear, our TCP queues point to the acceptor's queues.  Thus, use
2285	 * membar_producer() to ensure that the assignments of conn_rq/conn_wq
2286	 * above reach global visibility prior to the clearing of tcp_detached.
2287	 */
2288	membar_producer();
2289	eager->tcp_detached = B_FALSE;
2290
2291	ASSERT(eager->tcp_ack_tid == 0);
2292
2293	econnp->conn_dev = aconnp->conn_dev;
2294	econnp->conn_minor_arena = aconnp->conn_minor_arena;
2295
2296	ASSERT(econnp->conn_minor_arena != NULL);
2297	if (econnp->conn_cred != NULL)
2298		crfree(econnp->conn_cred);
2299	econnp->conn_cred = aconnp->conn_cred;
2300	aconnp->conn_cred = NULL;
2301	econnp->conn_cpid = aconnp->conn_cpid;
2302	ASSERT(econnp->conn_netstack == aconnp->conn_netstack);
2303	ASSERT(eager->tcp_tcps == acceptor->tcp_tcps);
2304
2305	econnp->conn_zoneid = aconnp->conn_zoneid;
2306	econnp->conn_allzones = aconnp->conn_allzones;
2307	econnp->conn_ixa->ixa_zoneid = aconnp->conn_ixa->ixa_zoneid;
2308
2309	econnp->conn_mac_mode = aconnp->conn_mac_mode;
2310	econnp->conn_zone_is_global = aconnp->conn_zone_is_global;
2311	aconnp->conn_mac_mode = CONN_MAC_DEFAULT;
2312
2313	/* Do the IPC initialization */
2314	CONN_INC_REF(econnp);
2315
2316	econnp->conn_family = aconnp->conn_family;
2317	econnp->conn_ipversion = aconnp->conn_ipversion;
2318
2319	/* Done with old IPC. Drop its ref on its connp */
2320	CONN_DEC_REF(aconnp);
2321}
2322
2323
2324/*
2325 * Adapt to the information, such as rtt and rtt_sd, provided from the
2326 * DCE and IRE maintained by IP.
2327 *
2328 * Checks for multicast and broadcast destination address.
2329 * Returns zero if ok; an errno on failure.
2330 *
2331 * Note that the MSS calculation here is based on the info given in
2332 * the DCE and IRE.  We do not do any calculation based on TCP options.  They
2333 * will be handled in tcp_input_data() when TCP knows which options to use.
2334 *
2335 * Note on how TCP gets its parameters for a connection.
2336 *
2337 * When a tcp_t structure is allocated, it gets all the default parameters.
2338 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
2339 * spipe, rpipe, ... from the route metrics.  Route metric overrides the
2340 * default.
2341 *
2342 * An incoming SYN with a multicast or broadcast destination address is dropped
2343 * in ip_fanout_v4/v6.
2344 *
2345 * An incoming SYN with a multicast or broadcast source address is always
2346 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
2347 * conn_connect.
2348 * The same logic in tcp_set_destination also serves to
2349 * reject an attempt to connect to a broadcast or multicast (destination)
2350 * address.
2351 */
2352static int
2353tcp_set_destination(tcp_t *tcp)
2354{
2355	uint32_t	mss_max;
2356	uint32_t	mss;
2357	boolean_t	tcp_detached = TCP_IS_DETACHED(tcp);
2358	conn_t		*connp = tcp->tcp_connp;
2359	tcp_stack_t	*tcps = tcp->tcp_tcps;
2360	iulp_t		uinfo;
2361	int		error;
2362	uint32_t	flags;
2363
2364	flags = IPDF_LSO | IPDF_ZCOPY;
2365	/*
2366	 * Make sure we have a dce for the destination to avoid dce_ident
2367	 * contention for connected sockets.
2368	 */
2369	flags |= IPDF_UNIQUE_DCE;
2370
2371	if (!tcps->tcps_ignore_path_mtu)
2372		connp->conn_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY;
2373
2374	/* Use conn_lock to satify ASSERT; tcp is already serialized */
2375	mutex_enter(&connp->conn_lock);
2376	error = conn_connect(connp, &uinfo, flags);
2377	mutex_exit(&connp->conn_lock);
2378	if (error != 0)
2379		return (error);
2380
2381	error = tcp_build_hdrs(tcp);
2382	if (error != 0)
2383		return (error);
2384
2385	tcp->tcp_localnet = uinfo.iulp_localnet;
2386
2387	if (uinfo.iulp_rtt != 0) {
2388		clock_t	rto;
2389
2390		tcp->tcp_rtt_sa = uinfo.iulp_rtt;
2391		tcp->tcp_rtt_sd = uinfo.iulp_rtt_sd;
2392		rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
2393		    tcps->tcps_rexmit_interval_extra +
2394		    (tcp->tcp_rtt_sa >> 5);
2395
2396		if (rto > tcps->tcps_rexmit_interval_max) {
2397			tcp->tcp_rto = tcps->tcps_rexmit_interval_max;
2398		} else if (rto < tcps->tcps_rexmit_interval_min) {
2399			tcp->tcp_rto = tcps->tcps_rexmit_interval_min;
2400		} else {
2401			tcp->tcp_rto = rto;
2402		}
2403	}
2404	if (uinfo.iulp_ssthresh != 0)
2405		tcp->tcp_cwnd_ssthresh = uinfo.iulp_ssthresh;
2406	else
2407		tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
2408	if (uinfo.iulp_spipe > 0) {
2409		connp->conn_sndbuf = MIN(uinfo.iulp_spipe,
2410		    tcps->tcps_max_buf);
2411		if (tcps->tcps_snd_lowat_fraction != 0) {
2412			connp->conn_sndlowat = connp->conn_sndbuf /
2413			    tcps->tcps_snd_lowat_fraction;
2414		}
2415		(void) tcp_maxpsz_set(tcp, B_TRUE);
2416	}
2417	/*
2418	 * Note that up till now, acceptor always inherits receive
2419	 * window from the listener.  But if there is a metrics
2420	 * associated with a host, we should use that instead of
2421	 * inheriting it from listener. Thus we need to pass this
2422	 * info back to the caller.
2423	 */
2424	if (uinfo.iulp_rpipe > 0) {
2425		tcp->tcp_rwnd = MIN(uinfo.iulp_rpipe,
2426		    tcps->tcps_max_buf);
2427	}
2428
2429	if (uinfo.iulp_rtomax > 0) {
2430		tcp->tcp_second_timer_threshold =
2431		    uinfo.iulp_rtomax;
2432	}
2433
2434	/*
2435	 * Use the metric option settings, iulp_tstamp_ok and
2436	 * iulp_wscale_ok, only for active open. What this means
2437	 * is that if the other side uses timestamp or window
2438	 * scale option, TCP will also use those options. That
2439	 * is for passive open.  If the application sets a
2440	 * large window, window scale is enabled regardless of
2441	 * the value in iulp_wscale_ok.  This is the behavior
2442	 * since 2.6.  So we keep it.
2443	 * The only case left in passive open processing is the
2444	 * check for SACK.
2445	 * For ECN, it should probably be like SACK.  But the
2446	 * current value is binary, so we treat it like the other
2447	 * cases.  The metric only controls active open.For passive
2448	 * open, the ndd param, tcp_ecn_permitted, controls the
2449	 * behavior.
2450	 */
2451	if (!tcp_detached) {
2452		/*
2453		 * The if check means that the following can only
2454		 * be turned on by the metrics only IRE, but not off.
2455		 */
2456		if (uinfo.iulp_tstamp_ok)
2457			tcp->tcp_snd_ts_ok = B_TRUE;
2458		if (uinfo.iulp_wscale_ok)
2459			tcp->tcp_snd_ws_ok = B_TRUE;
2460		if (uinfo.iulp_sack == 2)
2461			tcp->tcp_snd_sack_ok = B_TRUE;
2462		if (uinfo.iulp_ecn_ok)
2463			tcp->tcp_ecn_ok = B_TRUE;
2464	} else {
2465		/*
2466		 * Passive open.
2467		 *
2468		 * As above, the if check means that SACK can only be
2469		 * turned on by the metric only IRE.
2470		 */
2471		if (uinfo.iulp_sack > 0) {
2472			tcp->tcp_snd_sack_ok = B_TRUE;
2473		}
2474	}
2475
2476	/*
2477	 * XXX Note that currently, iulp_mtu can be as small as 68
2478	 * because of PMTUd.  So tcp_mss may go to negative if combined
2479	 * length of all those options exceeds 28 bytes.  But because
2480	 * of the tcp_mss_min check below, we may not have a problem if
2481	 * tcp_mss_min is of a reasonable value.  The default is 1 so
2482	 * the negative problem still exists.  And the check defeats PMTUd.
2483	 * In fact, if PMTUd finds that the MSS should be smaller than
2484	 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
2485	 * value.
2486	 *
2487	 * We do not deal with that now.  All those problems related to
2488	 * PMTUd will be fixed later.
2489	 */
2490	ASSERT(uinfo.iulp_mtu != 0);
2491	mss = tcp->tcp_initial_pmtu = uinfo.iulp_mtu;
2492
2493	/* Sanity check for MSS value. */
2494	if (connp->conn_ipversion == IPV4_VERSION)
2495		mss_max = tcps->tcps_mss_max_ipv4;
2496	else
2497		mss_max = tcps->tcps_mss_max_ipv6;
2498
2499	if (tcp->tcp_ipsec_overhead == 0)
2500		tcp->tcp_ipsec_overhead = conn_ipsec_length(connp);
2501
2502	mss -= tcp->tcp_ipsec_overhead;
2503
2504	if (mss < tcps->tcps_mss_min)
2505		mss = tcps->tcps_mss_min;
2506	if (mss > mss_max)
2507		mss = mss_max;
2508
2509	/* Note that this is the maximum MSS, excluding all options. */
2510	tcp->tcp_mss = mss;
2511
2512	/*
2513	 * Update the tcp connection with LSO capability.
2514	 */
2515	tcp_update_lso(tcp, connp->conn_ixa);
2516
2517	/*
2518	 * Initialize the ISS here now that we have the full connection ID.
2519	 * The RFC 1948 method of initial sequence number generation requires
2520	 * knowledge of the full connection ID before setting the ISS.
2521	 */
2522	tcp_iss_init(tcp);
2523
2524	tcp->tcp_loopback = (uinfo.iulp_loopback | uinfo.iulp_local);
2525
2526	/*
2527	 * Make sure that conn is not marked incipient
2528	 * for incoming connections. A blind
2529	 * removal of incipient flag is cheaper than
2530	 * check and removal.
2531	 */
2532	mutex_enter(&connp->conn_lock);
2533	connp->conn_state_flags &= ~CONN_INCIPIENT;
2534	mutex_exit(&connp->conn_lock);
2535	return (0);
2536}
2537
2538static void
2539tcp_tpi_bind(tcp_t *tcp, mblk_t *mp)
2540{
2541	int	error;
2542	conn_t	*connp = tcp->tcp_connp;
2543	struct sockaddr	*sa;
2544	mblk_t  *mp1;
2545	struct T_bind_req *tbr;
2546	int	backlog;
2547	socklen_t	len;
2548	sin_t	*sin;
2549	sin6_t	*sin6;
2550	cred_t		*cr;
2551
2552	/*
2553	 * All Solaris components should pass a db_credp
2554	 * for this TPI message, hence we ASSERT.
2555	 * But in case there is some other M_PROTO that looks
2556	 * like a TPI message sent by some other kernel
2557	 * component, we check and return an error.
2558	 */
2559	cr = msg_getcred(mp, NULL);
2560	ASSERT(cr != NULL);
2561	if (cr == NULL) {
2562		tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
2563		return;
2564	}
2565
2566	ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
2567	if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) {
2568		if (connp->conn_debug) {
2569			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
2570			    "tcp_tpi_bind: bad req, len %u",
2571			    (uint_t)(mp->b_wptr - mp->b_rptr));
2572		}
2573		tcp_err_ack(tcp, mp, TPROTO, 0);
2574		return;
2575	}
2576	/* Make sure the largest address fits */
2577	mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t), 1);
2578	if (mp1 == NULL) {
2579		tcp_err_ack(tcp, mp, TSYSERR, ENOMEM);
2580		return;
2581	}
2582	mp = mp1;
2583	tbr = (struct T_bind_req *)mp->b_rptr;
2584
2585	backlog = tbr->CONIND_number;
2586	len = tbr->ADDR_length;
2587
2588	switch (len) {
2589	case 0:		/* request for a generic port */
2590		tbr->ADDR_offset = sizeof (struct T_bind_req);
2591		if (connp->conn_family == AF_INET) {
2592			tbr->ADDR_length = sizeof (sin_t);
2593			sin = (sin_t *)&tbr[1];
2594			*sin = sin_null;
2595			sin->sin_family = AF_INET;
2596			sa = (struct sockaddr *)sin;
2597			len = sizeof (sin_t);
2598			mp->b_wptr = (uchar_t *)&sin[1];
2599		} else {
2600			ASSERT(connp->conn_family == AF_INET6);
2601			tbr->ADDR_length = sizeof (sin6_t);
2602			sin6 = (sin6_t *)&tbr[1];
2603			*sin6 = sin6_null;
2604			sin6->sin6_family = AF_INET6;
2605			sa = (struct sockaddr *)sin6;
2606			len = sizeof (sin6_t);
2607			mp->b_wptr = (uchar_t *)&sin6[1];
2608		}
2609		break;
2610
2611	case sizeof (sin_t):    /* Complete IPv4 address */
2612		sa = (struct sockaddr *)mi_offset_param(mp, tbr->ADDR_offset,
2613		    sizeof (sin_t));
2614		break;
2615
2616	case sizeof (sin6_t): /* Complete IPv6 address */
2617		sa = (struct sockaddr *)mi_offset_param(mp,
2618		    tbr->ADDR_offset, sizeof (sin6_t));
2619		break;
2620
2621	default:
2622		if (connp->conn_debug) {
2623			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
2624			    "tcp_tpi_bind: bad address length, %d",
2625			    tbr->ADDR_length);
2626		}
2627		tcp_err_ack(tcp, mp, TBADADDR, 0);
2628		return;
2629	}
2630
2631	if (backlog > 0) {
2632		error = tcp_do_listen(connp, sa, len, backlog, DB_CRED(mp),
2633		    tbr->PRIM_type != O_T_BIND_REQ);
2634	} else {
2635		error = tcp_do_bind(connp, sa, len, DB_CRED(mp),
2636		    tbr->PRIM_type != O_T_BIND_REQ);
2637	}
2638done:
2639	if (error > 0) {
2640		tcp_err_ack(tcp, mp, TSYSERR, error);
2641	} else if (error < 0) {
2642		tcp_err_ack(tcp, mp, -error, 0);
2643	} else {
2644		/*
2645		 * Update port information as sockfs/tpi needs it for checking
2646		 */
2647		if (connp->conn_family == AF_INET) {
2648			sin = (sin_t *)sa;
2649			sin->sin_port = connp->conn_lport;
2650		} else {
2651			sin6 = (sin6_t *)sa;
2652			sin6->sin6_port = connp->conn_lport;
2653		}
2654		mp->b_datap->db_type = M_PCPROTO;
2655		tbr->PRIM_type = T_BIND_ACK;
2656		putnext(connp->conn_rq, mp);
2657	}
2658}
2659
2660/*
2661 * If the "bind_to_req_port_only" parameter is set, if the requested port
2662 * number is available, return it, If not return 0
2663 *
2664 * If "bind_to_req_port_only" parameter is not set and
2665 * If the requested port number is available, return it.  If not, return
2666 * the first anonymous port we happen across.  If no anonymous ports are
2667 * available, return 0. addr is the requested local address, if any.
2668 *
2669 * In either case, when succeeding update the tcp_t to record the port number
2670 * and insert it in the bind hash table.
2671 *
2672 * Note that TCP over IPv4 and IPv6 sockets can use the same port number
2673 * without setting SO_REUSEADDR. This is needed so that they
2674 * can be viewed as two independent transport protocols.
2675 */
2676static in_port_t
2677tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr,
2678    int reuseaddr, boolean_t quick_connect,
2679    boolean_t bind_to_req_port_only, boolean_t user_specified)
2680{
2681	/* number of times we have run around the loop */
2682	int count = 0;
2683	/* maximum number of times to run around the loop */
2684	int loopmax;
2685	conn_t *connp = tcp->tcp_connp;
2686	tcp_stack_t	*tcps = tcp->tcp_tcps;
2687
2688	/*
2689	 * Lookup for free addresses is done in a loop and "loopmax"
2690	 * influences how long we spin in the loop
2691	 */
2692	if (bind_to_req_port_only) {
2693		/*
2694		 * If the requested port is busy, don't bother to look
2695		 * for a new one. Setting loop maximum count to 1 has
2696		 * that effect.
2697		 */
2698		loopmax = 1;
2699	} else {
2700		/*
2701		 * If the requested port is busy, look for a free one
2702		 * in the anonymous port range.
2703		 * Set loopmax appropriately so that one does not look
2704		 * forever in the case all of the anonymous ports are in use.
2705		 */
2706		if (connp->conn_anon_priv_bind) {
2707			/*
2708			 * loopmax =
2709			 * 	(IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1
2710			 */
2711			loopmax = IPPORT_RESERVED -
2712			    tcps->tcps_min_anonpriv_port;
2713		} else {
2714			loopmax = (tcps->tcps_largest_anon_port -
2715			    tcps->tcps_smallest_anon_port + 1);
2716		}
2717	}
2718	do {
2719		uint16_t	lport;
2720		tf_t		*tbf;
2721		tcp_t		*ltcp;
2722		conn_t		*lconnp;
2723
2724		lport = htons(port);
2725
2726		/*
2727		 * Ensure that the tcp_t is not currently in the bind hash.
2728		 * Hold the lock on the hash bucket to ensure that
2729		 * the duplicate check plus the insertion is an atomic
2730		 * operation.
2731		 *
2732		 * This function does an inline lookup on the bind hash list
2733		 * Make sure that we access only members of tcp_t
2734		 * and that we don't look at tcp_tcp, since we are not
2735		 * doing a CONN_INC_REF.
2736		 */
2737		tcp_bind_hash_remove(tcp);
2738		tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(lport)];
2739		mutex_enter(&tbf->tf_lock);
2740		for (ltcp = tbf->tf_tcp; ltcp != NULL;
2741		    ltcp = ltcp->tcp_bind_hash) {
2742			if (lport == ltcp->tcp_connp->conn_lport)
2743				break;
2744		}
2745
2746		for (; ltcp != NULL; ltcp = ltcp->tcp_bind_hash_port) {
2747			boolean_t not_socket;
2748			boolean_t exclbind;
2749
2750			lconnp = ltcp->tcp_connp;
2751
2752			/*
2753			 * On a labeled system, we must treat bindings to ports
2754			 * on shared IP addresses by sockets with MAC exemption
2755			 * privilege as being in all zones, as there's
2756			 * otherwise no way to identify the right receiver.
2757			 */
2758			if (!IPCL_BIND_ZONE_MATCH(lconnp, connp))
2759				continue;
2760
2761			/*
2762			 * If TCP_EXCLBIND is set for either the bound or
2763			 * binding endpoint, the semantics of bind
2764			 * is changed according to the following.
2765			 *
2766			 * spec = specified address (v4 or v6)
2767			 * unspec = unspecified address (v4 or v6)
2768			 * A = specified addresses are different for endpoints
2769			 *
2770			 * bound	bind to		allowed
2771			 * -------------------------------------
2772			 * unspec	unspec		no
2773			 * unspec	spec		no
2774			 * spec		unspec		no
2775			 * spec		spec		yes if A
2776			 *
2777			 * For labeled systems, SO_MAC_EXEMPT behaves the same
2778			 * as TCP_EXCLBIND, except that zoneid is ignored.
2779			 *
2780			 * Note:
2781			 *
2782			 * 1. Because of TLI semantics, an endpoint can go
2783			 * back from, say TCP_ESTABLISHED to TCPS_LISTEN or
2784			 * TCPS_BOUND, depending on whether it is originally
2785			 * a listener or not.  That is why we need to check
2786			 * for states greater than or equal to TCPS_BOUND
2787			 * here.
2788			 *
2789			 * 2. Ideally, we should only check for state equals
2790			 * to TCPS_LISTEN. And the following check should be
2791			 * added.
2792			 *
2793			 * if (ltcp->tcp_state == TCPS_LISTEN ||
2794			 *	!reuseaddr || !lconnp->conn_reuseaddr) {
2795			 *		...
2796			 * }
2797			 *
2798			 * The semantics will be changed to this.  If the
2799			 * endpoint on the list is in state not equal to
2800			 * TCPS_LISTEN and both endpoints have SO_REUSEADDR
2801			 * set, let the bind succeed.
2802			 *
2803			 * Because of (1), we cannot do that for TLI
2804			 * endpoints.  But we can do that for socket endpoints.
2805			 * If in future, we can change this going back
2806			 * semantics, we can use the above check for TLI also.
2807			 */
2808			not_socket = !(TCP_IS_SOCKET(ltcp) &&
2809			    TCP_IS_SOCKET(tcp));
2810			exclbind = lconnp->conn_exclbind ||
2811			    connp->conn_exclbind;
2812
2813			if ((lconnp->conn_mac_mode != CONN_MAC_DEFAULT) ||
2814			    (connp->conn_mac_mode != CONN_MAC_DEFAULT) ||
2815			    (exclbind && (not_socket ||
2816			    ltcp->tcp_state <= TCPS_ESTABLISHED))) {
2817				if (V6_OR_V4_INADDR_ANY(
2818				    lconnp->conn_bound_addr_v6) ||
2819				    V6_OR_V4_INADDR_ANY(*laddr) ||
2820				    IN6_ARE_ADDR_EQUAL(laddr,
2821				    &lconnp->conn_bound_addr_v6)) {
2822					break;
2823				}
2824				continue;
2825			}
2826
2827			/*
2828			 * Check ipversion to allow IPv4 and IPv6 sockets to
2829			 * have disjoint port number spaces, if *_EXCLBIND
2830			 * is not set and only if the application binds to a
2831			 * specific port. We use the same autoassigned port
2832			 * number space for IPv4 and IPv6 sockets.
2833			 */
2834			if (connp->conn_ipversion != lconnp->conn_ipversion &&
2835			    bind_to_req_port_only)
2836				continue;
2837
2838			/*
2839			 * Ideally, we should make sure that the source
2840			 * address, remote address, and remote port in the
2841			 * four tuple for this tcp-connection is unique.
2842			 * However, trying to find out the local source
2843			 * address would require too much code duplication
2844			 * with IP, since IP needs needs to have that code
2845			 * to support userland TCP implementations.
2846			 */
2847			if (quick_connect &&
2848			    (ltcp->tcp_state > TCPS_LISTEN) &&
2849			    ((connp->conn_fport != lconnp->conn_fport) ||
2850			    !IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6,
2851			    &lconnp->conn_faddr_v6)))
2852				continue;
2853
2854			if (!reuseaddr) {
2855				/*
2856				 * No socket option SO_REUSEADDR.
2857				 * If existing port is bound to
2858				 * a non-wildcard IP address
2859				 * and the requesting stream is
2860				 * bound to a distinct
2861				 * different IP addresses
2862				 * (non-wildcard, also), keep
2863				 * going.
2864				 */
2865				if (!V6_OR_V4_INADDR_ANY(*laddr) &&
2866				    !V6_OR_V4_INADDR_ANY(
2867				    lconnp->conn_bound_addr_v6) &&
2868				    !IN6_ARE_ADDR_EQUAL(laddr,
2869				    &lconnp->conn_bound_addr_v6))
2870					continue;
2871				if (ltcp->tcp_state >= TCPS_BOUND) {
2872					/*
2873					 * This port is being used and
2874					 * its state is >= TCPS_BOUND,
2875					 * so we can't bind to it.
2876					 */
2877					break;
2878				}
2879			} else {
2880				/*
2881				 * socket option SO_REUSEADDR is set on the
2882				 * binding tcp_t.
2883				 *
2884				 * If two streams are bound to
2885				 * same IP address or both addr
2886				 * and bound source are wildcards
2887				 * (INADDR_ANY), we want to stop
2888				 * searching.
2889				 * We have found a match of IP source
2890				 * address and source port, which is
2891				 * refused regardless of the
2892				 * SO_REUSEADDR setting, so we break.
2893				 */
2894				if (IN6_ARE_ADDR_EQUAL(laddr,
2895				    &lconnp->conn_bound_addr_v6) &&
2896				    (ltcp->tcp_state == TCPS_LISTEN ||
2897				    ltcp->tcp_state == TCPS_BOUND))
2898					break;
2899			}
2900		}
2901		if (ltcp != NULL) {
2902			/* The port number is busy */
2903			mutex_exit(&tbf->tf_lock);
2904		} else {
2905			/*
2906			 * This port is ours. Insert in fanout and mark as
2907			 * bound to prevent others from getting the port
2908			 * number.
2909			 */
2910			tcp->tcp_state = TCPS_BOUND;
2911			connp->conn_lport = htons(port);
2912
2913			ASSERT(&tcps->tcps_bind_fanout[TCP_BIND_HASH(
2914			    connp->conn_lport)] == tbf);
2915			tcp_bind_hash_insert(tbf, tcp, 1);
2916
2917			mutex_exit(&tbf->tf_lock);
2918
2919			/*
2920			 * We don't want tcp_next_port_to_try to "inherit"
2921			 * a port number supplied by the user in a bind.
2922			 */
2923			if (user_specified)
2924				return (port);
2925
2926			/*
2927			 * This is the only place where tcp_next_port_to_try
2928			 * is updated. After the update, it may or may not
2929			 * be in the valid range.
2930			 */
2931			if (!connp->conn_anon_priv_bind)
2932				tcps->tcps_next_port_to_try = port + 1;
2933			return (port);
2934		}
2935
2936		if (connp->conn_anon_priv_bind) {
2937			port = tcp_get_next_priv_port(tcp);
2938		} else {
2939			if (count == 0 && user_specified) {
2940				/*
2941				 * We may have to return an anonymous port. So
2942				 * get one to start with.
2943				 */
2944				port =
2945				    tcp_update_next_port(
2946				    tcps->tcps_next_port_to_try,
2947				    tcp, B_TRUE);
2948				user_specified = B_FALSE;
2949			} else {
2950				port = tcp_update_next_port(port + 1, tcp,
2951				    B_FALSE);
2952			}
2953		}
2954		if (port == 0)
2955			break;
2956
2957		/*
2958		 * Don't let this loop run forever in the case where
2959		 * all of the anonymous ports are in use.
2960		 */
2961	} while (++count < loopmax);
2962	return (0);
2963}
2964
2965/*
2966 * tcp_clean_death / tcp_close_detached must not be called more than once
2967 * on a tcp. Thus every function that potentially calls tcp_clean_death
2968 * must check for the tcp state before calling tcp_clean_death.
2969 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
2970 * tcp_timer_handler, all check for the tcp state.
2971 */
2972/* ARGSUSED */
2973void
2974tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2,
2975    ip_recv_attr_t *dummy)
2976{
2977	tcp_t	*tcp = ((conn_t *)arg)->conn_tcp;
2978
2979	freemsg(mp);
2980	if (tcp->tcp_state > TCPS_BOUND)
2981		(void) tcp_clean_death(((conn_t *)arg)->conn_tcp,
2982		    ETIMEDOUT, 5);
2983}
2984
2985/*
2986 * We are dying for some reason.  Try to do it gracefully.  (May be called
2987 * as writer.)
2988 *
2989 * Return -1 if the structure was not cleaned up (if the cleanup had to be
2990 * done by a service procedure).
2991 * TBD - Should the return value distinguish between the tcp_t being
2992 * freed and it being reinitialized?
2993 */
2994static int
2995tcp_clean_death(tcp_t *tcp, int err, uint8_t tag)
2996{
2997	mblk_t	*mp;
2998	queue_t	*q;
2999	conn_t	*connp = tcp->tcp_connp;
3000	tcp_stack_t	*tcps = tcp->tcp_tcps;
3001
3002	TCP_CLD_STAT(tag);
3003
3004#if TCP_TAG_CLEAN_DEATH
3005	tcp->tcp_cleandeathtag = tag;
3006#endif
3007
3008	if (tcp->tcp_fused)
3009		tcp_unfuse(tcp);
3010
3011	if (tcp->tcp_linger_tid != 0 &&
3012	    TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
3013		tcp_stop_lingering(tcp);
3014	}
3015
3016	ASSERT(tcp != NULL);
3017	ASSERT((connp->conn_family == AF_INET &&
3018	    connp->conn_ipversion == IPV4_VERSION) ||
3019	    (connp->conn_family == AF_INET6 &&
3020	    (connp->conn_ipversion == IPV4_VERSION ||
3021	    connp->conn_ipversion == IPV6_VERSION)));
3022
3023	if (TCP_IS_DETACHED(tcp)) {
3024		if (tcp->tcp_hard_binding) {
3025			/*
3026			 * Its an eager that we are dealing with. We close the
3027			 * eager but in case a conn_ind has already gone to the
3028			 * listener, let tcp_accept_finish() send a discon_ind
3029			 * to the listener and drop the last reference. If the
3030			 * listener doesn't even know about the eager i.e. the
3031			 * conn_ind hasn't gone up, blow away the eager and drop
3032			 * the last reference as well. If the conn_ind has gone
3033			 * up, state should be BOUND. tcp_accept_finish
3034			 * will figure out that the connection has received a
3035			 * RST and will send a DISCON_IND to the application.
3036			 */
3037			tcp_closei_local(tcp);
3038			if (!tcp->tcp_tconnind_started) {
3039				CONN_DEC_REF(connp);
3040			} else {
3041				tcp->tcp_state = TCPS_BOUND;
3042			}
3043		} else {
3044			tcp_close_detached(tcp);
3045		}
3046		return (0);
3047	}
3048
3049	TCP_STAT(tcps, tcp_clean_death_nondetached);
3050
3051	q = connp->conn_rq;
3052
3053	/* Trash all inbound data */
3054	if (!IPCL_IS_NONSTR(connp)) {
3055		ASSERT(q != NULL);
3056		flushq(q, FLUSHALL);
3057	}
3058
3059	/*
3060	 * If we are at least part way open and there is error
3061	 * (err==0 implies no error)
3062	 * notify our client by a T_DISCON_IND.
3063	 */
3064	if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) {
3065		if (tcp->tcp_state >= TCPS_ESTABLISHED &&
3066		    !TCP_IS_SOCKET(tcp)) {
3067			/*
3068			 * Send M_FLUSH according to TPI. Because sockets will
3069			 * (and must) ignore FLUSHR we do that only for TPI
3070			 * endpoints and sockets in STREAMS mode.
3071			 */
3072			(void) putnextctl1(q, M_FLUSH, FLUSHR);
3073		}
3074		if (connp->conn_debug) {
3075			(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
3076			    "tcp_clean_death: discon err %d", err);
3077		}
3078		if (IPCL_IS_NONSTR(connp)) {
3079			/* Direct socket, use upcall */
3080			(*connp->conn_upcalls->su_disconnected)(
3081			    connp->conn_upper_handle, tcp->tcp_connid, err);
3082		} else {
3083			mp = mi_tpi_discon_ind(NULL, err, 0);
3084			if (mp != NULL) {
3085				putnext(q, mp);
3086			} else {
3087				if (connp->conn_debug) {
3088					(void) strlog(TCP_MOD_ID, 0, 1,
3089					    SL_ERROR|SL_TRACE,
3090					    "tcp_clean_death, sending M_ERROR");
3091				}
3092				(void) putnextctl1(q, M_ERROR, EPROTO);
3093			}
3094		}
3095		if (tcp->tcp_state <= TCPS_SYN_RCVD) {
3096			/* SYN_SENT or SYN_RCVD */
3097			BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails);
3098		} else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) {
3099			/* ESTABLISHED or CLOSE_WAIT */
3100			BUMP_MIB(&tcps->tcps_mib, tcpEstabResets);
3101		}
3102	}
3103
3104	tcp_reinit(tcp);
3105	if (IPCL_IS_NONSTR(connp))
3106		(void) tcp_do_unbind(connp);
3107
3108	return (-1);
3109}
3110
3111/*
3112 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
3113 * to expire, stop the wait and finish the close.
3114 */
3115static void
3116tcp_stop_lingering(tcp_t *tcp)
3117{
3118	clock_t	delta = 0;
3119	tcp_stack_t	*tcps = tcp->tcp_tcps;
3120	conn_t		*connp = tcp->tcp_connp;
3121
3122	tcp->tcp_linger_tid = 0;
3123	if (tcp->tcp_state > TCPS_LISTEN) {
3124		tcp_acceptor_hash_remove(tcp);
3125		mutex_enter(&tcp->tcp_non_sq_lock);
3126		if (tcp->tcp_flow_stopped) {
3127			tcp_clrqfull(tcp);
3128		}
3129		mutex_exit(&tcp->tcp_non_sq_lock);
3130
3131		if (tcp->tcp_timer_tid != 0) {
3132			delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
3133			tcp->tcp_timer_tid = 0;
3134		}
3135		/*
3136		 * Need to cancel those timers which will not be used when
3137		 * TCP is detached.  This has to be done before the conn_wq
3138		 * is cleared.
3139		 */
3140		tcp_timers_stop(tcp);
3141
3142		tcp->tcp_detached = B_TRUE;
3143		connp->conn_rq = NULL;
3144		connp->conn_wq = NULL;
3145
3146		if (tcp->tcp_state == TCPS_TIME_WAIT) {
3147			tcp_time_wait_append(tcp);
3148			TCP_DBGSTAT(tcps, tcp_detach_time_wait);
3149			goto finish;
3150		}
3151
3152		/*
3153		 * If delta is zero the timer event wasn't executed and was
3154		 * successfully canceled. In this case we need to restart it
3155		 * with the minimal delta possible.
3156		 */
3157		if (delta >= 0) {
3158			tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
3159			    delta ? delta : 1);
3160		}
3161	} else {
3162		tcp_closei_local(tcp);
3163		CONN_DEC_REF(connp);
3164	}
3165finish:
3166	/* Signal closing thread that it can complete close */
3167	mutex_enter(&tcp->tcp_closelock);
3168	tcp->tcp_detached = B_TRUE;
3169	connp->conn_rq = NULL;
3170	connp->conn_wq = NULL;
3171
3172	tcp->tcp_closed = 1;
3173	cv_signal(&tcp->tcp_closecv);
3174	mutex_exit(&tcp->tcp_closelock);
3175}
3176
3177/*
3178 * Handle lingering timeouts. This function is called when the SO_LINGER timeout
3179 * expires.
3180 */
3181static void
3182tcp_close_linger_timeout(void *arg)
3183{
3184	conn_t	*connp = (conn_t *)arg;
3185	tcp_t 	*tcp = connp->conn_tcp;
3186
3187	tcp->tcp_client_errno = ETIMEDOUT;
3188	tcp_stop_lingering(tcp);
3189}
3190
3191static void
3192tcp_close_common(conn_t *connp, int flags)
3193{
3194	tcp_t		*tcp = connp->conn_tcp;
3195	mblk_t 		*mp = &tcp->tcp_closemp;
3196	boolean_t	conn_ioctl_cleanup_reqd = B_FALSE;
3197	mblk_t		*bp;
3198
3199	ASSERT(connp->conn_ref >= 2);
3200
3201	/*
3202	 * Mark the conn as closing. ipsq_pending_mp_add will not
3203	 * add any mp to the pending mp list, after this conn has
3204	 * started closing.
3205	 */
3206	mutex_enter(&connp->conn_lock);
3207	connp->conn_state_flags |= CONN_CLOSING;
3208	if (connp->conn_oper_pending_ill != NULL)
3209		conn_ioctl_cleanup_reqd = B_TRUE;
3210	CONN_INC_REF_LOCKED(connp);
3211	mutex_exit(&connp->conn_lock);
3212	tcp->tcp_closeflags = (uint8_t)flags;
3213	ASSERT(connp->conn_ref >= 3);
3214
3215	/*
3216	 * tcp_closemp_used is used below without any protection of a lock
3217	 * as we don't expect any one else to use it concurrently at this
3218	 * point otherwise it would be a major defect.
3219	 */
3220
3221	if (mp->b_prev == NULL)
3222		tcp->tcp_closemp_used = B_TRUE;
3223	else
3224		cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: "
3225		    "connp %p tcp %p\n", (void *)connp, (void *)tcp);
3226
3227	TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15);
3228
3229	SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_close_output, connp,
3230	    NULL, tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
3231
3232	mutex_enter(&tcp->tcp_closelock);
3233	while (!tcp->tcp_closed) {
3234		if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) {
3235			/*
3236			 * The cv_wait_sig() was interrupted. We now do the
3237			 * following:
3238			 *
3239			 * 1) If the endpoint was lingering, we allow this
3240			 * to be interrupted by cancelling the linger timeout
3241			 * and closing normally.
3242			 *
3243			 * 2) Revert to calling cv_wait()
3244			 *
3245			 * We revert to using cv_wait() to avoid an
3246			 * infinite loop which can occur if the calling
3247			 * thread is higher priority than the squeue worker
3248			 * thread and is bound to the same cpu.
3249			 */
3250			if (connp->conn_linger && connp->conn_lingertime > 0) {
3251				mutex_exit(&tcp->tcp_closelock);
3252				/* Entering squeue, bump ref count. */
3253				CONN_INC_REF(connp);
3254				bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL);
3255				SQUEUE_ENTER_ONE(connp->conn_sqp, bp,
3256				    tcp_linger_interrupted, connp, NULL,
3257				    tcp_squeue_flag, SQTAG_IP_TCP_CLOSE);
3258				mutex_enter(&tcp->tcp_closelock);
3259			}
3260			break;
3261		}
3262	}
3263	while (!tcp->tcp_closed)
3264		cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock);
3265	mutex_exit(&tcp->tcp_closelock);
3266
3267	/*
3268	 * In the case of listener streams that have eagers in the q or q0
3269	 * we wait for the eagers to drop their reference to us. conn_rq and
3270	 * conn_wq of the eagers point to our queues. By waiting for the
3271	 * refcnt to drop to 1, we are sure that the eagers have cleaned
3272	 * up their queue pointers and also dropped their references to us.
3273	 */
3274	if (tcp->tcp_wait_for_eagers) {
3275		mutex_enter(&connp->conn_lock);
3276		while (connp->conn_ref != 1) {
3277			cv_wait(&connp->conn_cv, &connp->conn_lock);
3278		}
3279		mutex_exit(&connp->conn_lock);
3280	}
3281	/*
3282	 * ioctl cleanup. The mp is queued in the ipx_pending_mp.
3283	 */
3284	if (conn_ioctl_cleanup_reqd)
3285		conn_ioctl_cleanup(connp);
3286
3287	connp->conn_cpid = NOPID;
3288}
3289
3290static int
3291tcp_tpi_close(queue_t *q, int flags)
3292{
3293	conn_t		*connp;
3294
3295	ASSERT(WR(q)->q_next == NULL);
3296
3297	if (flags & SO_FALLBACK) {
3298		/*
3299		 * stream is being closed while in fallback
3300		 * simply free the resources that were allocated
3301		 */
3302		inet_minor_free(WR(q)->q_ptr, (dev_t)(RD(q)->q_ptr));
3303		qprocsoff(q);
3304		goto done;
3305	}
3306
3307	connp = Q_TO_CONN(q);
3308	/*
3309	 * We are being closed as /dev/tcp or /dev/tcp6.
3310	 */
3311	tcp_close_common(connp, flags);
3312
3313	qprocsoff(q);
3314	inet_minor_free(connp->conn_minor_arena, connp->conn_dev);
3315
3316	/*
3317	 * Drop IP's reference on the conn. This is the last reference
3318	 * on the connp if the state was less than established. If the
3319	 * connection has gone into timewait state, then we will have
3320	 * one ref for the TCP and one more ref (total of two) for the
3321	 * classifier connected hash list (a timewait connections stays
3322	 * in connected hash till closed).
3323	 *
3324	 * We can't assert the references because there might be other
3325	 * transient reference places because of some walkers or queued
3326	 * packets in squeue for the timewait state.
3327	 */
3328	CONN_DEC_REF(connp);
3329done:
3330	q->q_ptr = WR(q)->q_ptr = NULL;
3331	return (0);
3332}
3333
3334static int
3335tcp_tpi_close_accept(queue_t *q)
3336{
3337	vmem_t	*minor_arena;
3338	dev_t	conn_dev;
3339
3340	ASSERT(WR(q)->q_qinfo == &tcp_acceptor_winit);
3341
3342	/*
3343	 * We had opened an acceptor STREAM for sockfs which is
3344	 * now being closed due to some error.
3345	 */
3346	qprocsoff(q);
3347
3348	minor_arena = (vmem_t *)WR(q)->q_ptr;
3349	conn_dev = (dev_t)RD(q)->q_ptr;
3350	ASSERT(minor_arena != NULL);
3351	ASSERT(conn_dev != 0);
3352	inet_minor_free(minor_arena, conn_dev);
3353	q->q_ptr = WR(q)->q_ptr = NULL;
3354	return (0);
3355}
3356
3357/*
3358 * Called by tcp_close() routine via squeue when lingering is
3359 * interrupted by a signal.
3360 */
3361
3362/* ARGSUSED */
3363static void
3364tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
3365{
3366	conn_t	*connp = (conn_t *)arg;
3367	tcp_t	*tcp = connp->conn_tcp;
3368
3369	freeb(mp);
3370	if (tcp->tcp_linger_tid != 0 &&
3371	    TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) {
3372		tcp_stop_lingering(tcp);
3373		tcp->tcp_client_errno = EINTR;
3374	}
3375}
3376
3377/*
3378 * Called by streams close routine via squeues when our client blows off her
3379 * descriptor, we take this to mean: "close the stream state NOW, close the tcp
3380 * connection politely" When SO_LINGER is set (with a non-zero linger time and
3381 * it is not a nonblocking socket) then this routine sleeps until the FIN is
3382 * acked.
3383 *
3384 * NOTE: tcp_close potentially returns error when lingering.
3385 * However, the stream head currently does not pass these errors
3386 * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK
3387 * errors to the application (from tsleep()) and not errors
3388 * like ECONNRESET caused by receiving a reset packet.
3389 */
3390
3391/* ARGSUSED */
3392static void
3393tcp_close_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
3394{
3395	char	*msg;
3396	conn_t	*connp = (conn_t *)arg;
3397	tcp_t	*tcp = connp->conn_tcp;
3398	clock_t	delta = 0;
3399	tcp_stack_t	*tcps = tcp->tcp_tcps;
3400
3401	ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
3402	    (connp->conn_fanout == NULL && connp->conn_ref >= 3));
3403
3404	mutex_enter(&tcp->tcp_eager_lock);
3405	if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
3406		/* Cleanup for listener */
3407		tcp_eager_cleanup(tcp, 0);
3408		tcp->tcp_wait_for_eagers = 1;
3409	}
3410	mutex_exit(&tcp->tcp_eager_lock);
3411
3412	tcp->tcp_lso = B_FALSE;
3413
3414	msg = NULL;
3415	switch (tcp->tcp_state) {
3416	case TCPS_CLOSED:
3417	case TCPS_IDLE:
3418	case TCPS_BOUND:
3419	case TCPS_LISTEN:
3420		break;
3421	case TCPS_SYN_SENT:
3422		msg = "tcp_close, during connect";
3423		break;
3424	case TCPS_SYN_RCVD:
3425		/*
3426		 * Close during the connect 3-way handshake
3427		 * but here there may or may not be pending data
3428		 * already on queue. Process almost same as in
3429		 * the ESTABLISHED state.
3430		 */
3431		/* FALLTHRU */
3432	default:
3433		if (tcp->tcp_fused)
3434			tcp_unfuse(tcp);
3435
3436		/*
3437		 * If SO_LINGER has set a zero linger time, abort the
3438		 * connection with a reset.
3439		 */
3440		if (connp->conn_linger && connp->conn_lingertime == 0) {
3441			msg = "tcp_close, zero lingertime";
3442			break;
3443		}
3444
3445		/*
3446		 * Abort connection if there is unread data queued.
3447		 */
3448		if (tcp->tcp_rcv_list || tcp->tcp_reass_head) {
3449			msg = "tcp_close, unread data";
3450			break;
3451		}
3452		/*
3453		 * We have done a qwait() above which could have possibly
3454		 * drained more messages in turn causing transition to a
3455		 * different state. Check whether we have to do the rest
3456		 * of the processing or not.
3457		 */
3458		if (tcp->tcp_state <= TCPS_LISTEN)
3459			break;
3460
3461		/*
3462		 * Transmit the FIN before detaching the tcp_t.
3463		 * After tcp_detach returns this queue/perimeter
3464		 * no longer owns the tcp_t thus others can modify it.
3465		 */
3466		(void) tcp_xmit_end(tcp);
3467
3468		/*
3469		 * If lingering on close then wait until the fin is acked,
3470		 * the SO_LINGER time passes, or a reset is sent/received.
3471		 */
3472		if (connp->conn_linger && connp->conn_lingertime > 0 &&
3473		    !(tcp->tcp_fin_acked) &&
3474		    tcp->tcp_state >= TCPS_ESTABLISHED) {
3475			if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) {
3476				tcp->tcp_client_errno = EWOULDBLOCK;
3477			} else if (tcp->tcp_client_errno == 0) {
3478
3479				ASSERT(tcp->tcp_linger_tid == 0);
3480
3481				tcp->tcp_linger_tid = TCP_TIMER(tcp,
3482				    tcp_close_linger_timeout,
3483				    connp->conn_lingertime * hz);
3484
3485				/* tcp_close_linger_timeout will finish close */
3486				if (tcp->tcp_linger_tid == 0)
3487					tcp->tcp_client_errno = ENOSR;
3488				else
3489					return;
3490			}
3491
3492			/*
3493			 * Check if we need to detach or just close
3494			 * the instance.
3495			 */
3496			if (tcp->tcp_state <= TCPS_LISTEN)
3497				break;
3498		}
3499
3500		/*
3501		 * Make sure that no other thread will access the conn_rq of
3502		 * this instance (through lookups etc.) as conn_rq will go
3503		 * away shortly.
3504		 */
3505		tcp_acceptor_hash_remove(tcp);
3506
3507		mutex_enter(&tcp->tcp_non_sq_lock);
3508		if (tcp->tcp_flow_stopped) {
3509			tcp_clrqfull(tcp);
3510		}
3511		mutex_exit(&tcp->tcp_non_sq_lock);
3512
3513		if (tcp->tcp_timer_tid != 0) {
3514			delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
3515			tcp->tcp_timer_tid = 0;
3516		}
3517		/*
3518		 * Need to cancel those timers which will not be used when
3519		 * TCP is detached.  This has to be done before the conn_wq
3520		 * is set to NULL.
3521		 */
3522		tcp_timers_stop(tcp);
3523
3524		tcp->tcp_detached = B_TRUE;
3525		if (tcp->tcp_state == TCPS_TIME_WAIT) {
3526			tcp_time_wait_append(tcp);
3527			TCP_DBGSTAT(tcps, tcp_detach_time_wait);
3528			ASSERT(connp->conn_ref >= 3);
3529			goto finish;
3530		}
3531
3532		/*
3533		 * If delta is zero the timer event wasn't executed and was
3534		 * successfully canceled. In this case we need to restart it
3535		 * with the minimal delta possible.
3536		 */
3537		if (delta >= 0)
3538			tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer,
3539			    delta ? delta : 1);
3540
3541		ASSERT(connp->conn_ref >= 3);
3542		goto finish;
3543	}
3544
3545	/* Detach did not complete. Still need to remove q from stream. */
3546	if (msg) {
3547		if (tcp->tcp_state == TCPS_ESTABLISHED ||
3548		    tcp->tcp_state == TCPS_CLOSE_WAIT)
3549			BUMP_MIB(&tcps->tcps_mib, tcpEstabResets);
3550		if (tcp->tcp_state == TCPS_SYN_SENT ||
3551		    tcp->tcp_state == TCPS_SYN_RCVD)
3552			BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails);
3553		tcp_xmit_ctl(msg, tcp,  tcp->tcp_snxt, 0, TH_RST);
3554	}
3555
3556	tcp_closei_local(tcp);
3557	CONN_DEC_REF(connp);
3558	ASSERT(connp->conn_ref >= 2);
3559
3560finish:
3561	mutex_enter(&tcp->tcp_closelock);
3562	/*
3563	 * Don't change the queues in the case of a listener that has
3564	 * eagers in its q or q0. It could surprise the eagers.
3565	 * Instead wait for the eagers outside the squeue.
3566	 */
3567	if (!tcp->tcp_wait_for_eagers) {
3568		tcp->tcp_detached = B_TRUE;
3569		connp->conn_rq = NULL;
3570		connp->conn_wq = NULL;
3571	}
3572
3573	/* Signal tcp_close() to finish closing. */
3574	tcp->tcp_closed = 1;
3575	cv_signal(&tcp->tcp_closecv);
3576	mutex_exit(&tcp->tcp_closelock);
3577}
3578
3579/*
3580 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
3581 * Some stream heads get upset if they see these later on as anything but NULL.
3582 */
3583static void
3584tcp_close_mpp(mblk_t **mpp)
3585{
3586	mblk_t	*mp;
3587
3588	if ((mp = *mpp) != NULL) {
3589		do {
3590			mp->b_next = NULL;
3591			mp->b_prev = NULL;
3592		} while ((mp = mp->b_cont) != NULL);
3593
3594		mp = *mpp;
3595		*mpp = NULL;
3596		freemsg(mp);
3597	}
3598}
3599
3600/* Do detached close. */
3601static void
3602tcp_close_detached(tcp_t *tcp)
3603{
3604	if (tcp->tcp_fused)
3605		tcp_unfuse(tcp);
3606
3607	/*
3608	 * Clustering code serializes TCP disconnect callbacks and
3609	 * cluster tcp list walks by blocking a TCP disconnect callback
3610	 * if a cluster tcp list walk is in progress. This ensures
3611	 * accurate accounting of TCPs in the cluster code even though
3612	 * the TCP list walk itself is not atomic.
3613	 */
3614	tcp_closei_local(tcp);
3615	CONN_DEC_REF(tcp->tcp_connp);
3616}
3617
3618/*
3619 * Stop all TCP timers, and free the timer mblks if requested.
3620 */
3621void
3622tcp_timers_stop(tcp_t *tcp)
3623{
3624	if (tcp->tcp_timer_tid != 0) {
3625		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
3626		tcp->tcp_timer_tid = 0;
3627	}
3628	if (tcp->tcp_ka_tid != 0) {
3629		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ka_tid);
3630		tcp->tcp_ka_tid = 0;
3631	}
3632	if (tcp->tcp_ack_tid != 0) {
3633		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid);
3634		tcp->tcp_ack_tid = 0;
3635	}
3636	if (tcp->tcp_push_tid != 0) {
3637		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
3638		tcp->tcp_push_tid = 0;
3639	}
3640}
3641
3642/*
3643 * The tcp_t is going away. Remove it from all lists and set it
3644 * to TCPS_CLOSED. The freeing up of memory is deferred until
3645 * tcp_inactive. This is needed since a thread in tcp_rput might have
3646 * done a CONN_INC_REF on this structure before it was removed from the
3647 * hashes.
3648 */
3649static void
3650tcp_closei_local(tcp_t *tcp)
3651{
3652	conn_t		*connp = tcp->tcp_connp;
3653	tcp_stack_t	*tcps = tcp->tcp_tcps;
3654
3655	if (!TCP_IS_SOCKET(tcp))
3656		tcp_acceptor_hash_remove(tcp);
3657
3658	UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs);
3659	tcp->tcp_ibsegs = 0;
3660	UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs);
3661	tcp->tcp_obsegs = 0;
3662
3663	/*
3664	 * If we are an eager connection hanging off a listener that
3665	 * hasn't formally accepted the connection yet, get off his
3666	 * list and blow off any data that we have accumulated.
3667	 */
3668	if (tcp->tcp_listener != NULL) {
3669		tcp_t	*listener = tcp->tcp_listener;
3670		mutex_enter(&listener->tcp_eager_lock);
3671		/*
3672		 * tcp_tconnind_started == B_TRUE means that the
3673		 * conn_ind has already gone to listener. At
3674		 * this point, eager will be closed but we
3675		 * leave it in listeners eager list so that
3676		 * if listener decides to close without doing
3677		 * accept, we can clean this up. In tcp_tli_accept
3678		 * we take care of the case of accept on closed
3679		 * eager.
3680		 */
3681		if (!tcp->tcp_tconnind_started) {
3682			tcp_eager_unlink(tcp);
3683			mutex_exit(&listener->tcp_eager_lock);
3684			/*
3685			 * We don't want to have any pointers to the
3686			 * listener queue, after we have released our
3687			 * reference on the listener
3688			 */
3689			ASSERT(tcp->tcp_detached);
3690			connp->conn_rq = NULL;
3691			connp->conn_wq = NULL;
3692			CONN_DEC_REF(listener->tcp_connp);
3693		} else {
3694			mutex_exit(&listener->tcp_eager_lock);
3695		}
3696	}
3697
3698	/* Stop all the timers */
3699	tcp_timers_stop(tcp);
3700
3701	if (tcp->tcp_state == TCPS_LISTEN) {
3702		if (tcp->tcp_ip_addr_cache) {
3703			kmem_free((void *)tcp->tcp_ip_addr_cache,
3704			    IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
3705			tcp->tcp_ip_addr_cache = NULL;
3706		}
3707	}
3708	mutex_enter(&tcp->tcp_non_sq_lock);
3709	if (tcp->tcp_flow_stopped)
3710		tcp_clrqfull(tcp);
3711	mutex_exit(&tcp->tcp_non_sq_lock);
3712
3713	tcp_bind_hash_remove(tcp);
3714	/*
3715	 * If the tcp_time_wait_collector (which runs outside the squeue)
3716	 * is trying to remove this tcp from the time wait list, we will
3717	 * block in tcp_time_wait_remove while trying to acquire the
3718	 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
3719	 * requires the ipcl_hash_remove to be ordered after the
3720	 * tcp_time_wait_remove for the refcnt checks to work correctly.
3721	 */
3722	if (tcp->tcp_state == TCPS_TIME_WAIT)
3723		(void) tcp_time_wait_remove(tcp, NULL);
3724	CL_INET_DISCONNECT(connp);
3725	ipcl_hash_remove(connp);
3726	ixa_cleanup(connp->conn_ixa);
3727
3728	/*
3729	 * Mark the conn as CONDEMNED
3730	 */
3731	mutex_enter(&connp->conn_lock);
3732	connp->conn_state_flags |= CONN_CONDEMNED;
3733	mutex_exit(&connp->conn_lock);
3734
3735	/* Need to cleanup any pending ioctls */
3736	ASSERT(tcp->tcp_time_wait_next == NULL);
3737	ASSERT(tcp->tcp_time_wait_prev == NULL);
3738	ASSERT(tcp->tcp_time_wait_expire == 0);
3739	tcp->tcp_state = TCPS_CLOSED;
3740
3741	/* Release any SSL context */
3742	if (tcp->tcp_kssl_ent != NULL) {
3743		kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY);
3744		tcp->tcp_kssl_ent = NULL;
3745	}
3746	if (tcp->tcp_kssl_ctx != NULL) {
3747		kssl_release_ctx(tcp->tcp_kssl_ctx);
3748		tcp->tcp_kssl_ctx = NULL;
3749	}
3750	tcp->tcp_kssl_pending = B_FALSE;
3751
3752	tcp_ipsec_cleanup(tcp);
3753}
3754
3755/*
3756 * tcp is dying (called from ipcl_conn_destroy and error cases).
3757 * Free the tcp_t in either case.
3758 */
3759void
3760tcp_free(tcp_t *tcp)
3761{
3762	mblk_t		*mp;
3763	conn_t		*connp = tcp->tcp_connp;
3764
3765	ASSERT(tcp != NULL);
3766	ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL);
3767
3768	connp->conn_rq = NULL;
3769	connp->conn_wq = NULL;
3770
3771	tcp_close_mpp(&tcp->tcp_xmit_head);
3772	tcp_close_mpp(&tcp->tcp_reass_head);
3773	if (tcp->tcp_rcv_list != NULL) {
3774		/* Free b_next chain */
3775		tcp_close_mpp(&tcp->tcp_rcv_list);
3776	}
3777	if ((mp = tcp->tcp_urp_mp) != NULL) {
3778		freemsg(mp);
3779	}
3780	if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
3781		freemsg(mp);
3782	}
3783
3784	if (tcp->tcp_fused_sigurg_mp != NULL) {
3785		ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
3786		freeb(tcp->tcp_fused_sigurg_mp);
3787		tcp->tcp_fused_sigurg_mp = NULL;
3788	}
3789
3790	if (tcp->tcp_ordrel_mp != NULL) {
3791		ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
3792		freeb(tcp->tcp_ordrel_mp);
3793		tcp->tcp_ordrel_mp = NULL;
3794	}
3795
3796	if (tcp->tcp_sack_info != NULL) {
3797		if (tcp->tcp_notsack_list != NULL) {
3798			TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list,
3799			    tcp);
3800		}
3801		bzero(tcp->tcp_sack_info, sizeof (tcp_sack_info_t));
3802	}
3803
3804	if (tcp->tcp_hopopts != NULL) {
3805		mi_free(tcp->tcp_hopopts);
3806		tcp->tcp_hopopts = NULL;
3807		tcp->tcp_hopoptslen = 0;
3808	}
3809	ASSERT(tcp->tcp_hopoptslen == 0);
3810	if (tcp->tcp_dstopts != NULL) {
3811		mi_free(tcp->tcp_dstopts);
3812		tcp->tcp_dstopts = NULL;
3813		tcp->tcp_dstoptslen = 0;
3814	}
3815	ASSERT(tcp->tcp_dstoptslen == 0);
3816	if (tcp->tcp_rthdrdstopts != NULL) {
3817		mi_free(tcp->tcp_rthdrdstopts);
3818		tcp->tcp_rthdrdstopts = NULL;
3819		tcp->tcp_rthdrdstoptslen = 0;
3820	}
3821	ASSERT(tcp->tcp_rthdrdstoptslen == 0);
3822	if (tcp->tcp_rthdr != NULL) {
3823		mi_free(tcp->tcp_rthdr);
3824		tcp->tcp_rthdr = NULL;
3825		tcp->tcp_rthdrlen = 0;
3826	}
3827	ASSERT(tcp->tcp_rthdrlen == 0);
3828
3829	/*
3830	 * Following is really a blowing away a union.
3831	 * It happens to have exactly two members of identical size
3832	 * the following code is enough.
3833	 */
3834	tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
3835}
3836
3837
3838/*
3839 * Put a connection confirmation message upstream built from the
3840 * address/flowid information with the conn and iph. Report our success or
3841 * failure.
3842 */
3843static boolean_t
3844tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, mblk_t *idmp,
3845    mblk_t **defermp, ip_recv_attr_t *ira)
3846{
3847	sin_t	sin;
3848	sin6_t	sin6;
3849	mblk_t	*mp;
3850	char	*optp = NULL;
3851	int	optlen = 0;
3852	conn_t	*connp = tcp->tcp_connp;
3853
3854	if (defermp != NULL)
3855		*defermp = NULL;
3856
3857	if (tcp->tcp_conn.tcp_opts_conn_req != NULL) {
3858		/*
3859		 * Return in T_CONN_CON results of option negotiation through
3860		 * the T_CONN_REQ. Note: If there is an real end-to-end option
3861		 * negotiation, then what is received from remote end needs
3862		 * to be taken into account but there is no such thing (yet?)
3863		 * in our TCP/IP.
3864		 * Note: We do not use mi_offset_param() here as
3865		 * tcp_opts_conn_req contents do not directly come from
3866		 * an application and are either generated in kernel or
3867		 * from user input that was already verified.
3868		 */
3869		mp = tcp->tcp_conn.tcp_opts_conn_req;
3870		optp = (char *)(mp->b_rptr +
3871		    ((struct T_conn_req *)mp->b_rptr)->OPT_offset);
3872		optlen = (int)
3873		    ((struct T_conn_req *)mp->b_rptr)->OPT_length;
3874	}
3875
3876	if (IPH_HDR_VERSION(iphdr) == IPV4_VERSION) {
3877
3878		/* packet is IPv4 */
3879		if (connp->conn_family == AF_INET) {
3880			sin = sin_null;
3881			sin.sin_addr.s_addr = connp->conn_faddr_v4;
3882			sin.sin_port = connp->conn_fport;
3883			sin.sin_family = AF_INET;
3884			mp = mi_tpi_conn_con(NULL, (char *)&sin,
3885			    (int)sizeof (sin_t), optp, optlen);
3886		} else {
3887			sin6 = sin6_null;
3888			sin6.sin6_addr = connp->conn_faddr_v6;
3889			sin6.sin6_port = connp->conn_fport;
3890			sin6.sin6_family = AF_INET6;
3891			mp = mi_tpi_conn_con(NULL, (char *)&sin6,
3892			    (int)sizeof (sin6_t), optp, optlen);
3893
3894		}
3895	} else {
3896		ip6_t	*ip6h = (ip6_t *)iphdr;
3897
3898		ASSERT(IPH_HDR_VERSION(iphdr) == IPV6_VERSION);
3899		ASSERT(connp->conn_family == AF_INET6);
3900		sin6 = sin6_null;
3901		sin6.sin6_addr = connp->conn_faddr_v6;
3902		sin6.sin6_port = connp->conn_fport;
3903		sin6.sin6_family = AF_INET6;
3904		sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK;
3905		mp = mi_tpi_conn_con(NULL, (char *)&sin6,
3906		    (int)sizeof (sin6_t), optp, optlen);
3907	}
3908
3909	if (!mp)
3910		return (B_FALSE);
3911
3912	mblk_copycred(mp, idmp);
3913
3914	if (defermp == NULL) {
3915		conn_t *connp = tcp->tcp_connp;
3916		if (IPCL_IS_NONSTR(connp)) {
3917			(*connp->conn_upcalls->su_connected)
3918			    (connp->conn_upper_handle, tcp->tcp_connid,
3919			    ira->ira_cred, ira->ira_cpid);
3920			freemsg(mp);
3921		} else {
3922			if (ira->ira_cred != NULL) {
3923				/* So that getpeerucred works for TPI sockfs */
3924				mblk_setcred(mp, ira->ira_cred, ira->ira_cpid);
3925			}
3926			putnext(connp->conn_rq, mp);
3927		}
3928	} else {
3929		*defermp = mp;
3930	}
3931
3932	if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
3933		tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
3934	return (B_TRUE);
3935}
3936
3937/*
3938 * Defense for the SYN attack -
3939 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest
3940 *    one from the list of droppable eagers. This list is a subset of q0.
3941 *    see comments before the definition of MAKE_DROPPABLE().
3942 * 2. Don't drop a SYN request before its first timeout. This gives every
3943 *    request at least til the first timeout to complete its 3-way handshake.
3944 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many
3945 *    requests currently on the queue that has timed out. This will be used
3946 *    as an indicator of whether an attack is under way, so that appropriate
3947 *    actions can be taken. (It's incremented in tcp_timer() and decremented
3948 *    either when eager goes into ESTABLISHED, or gets freed up.)
3949 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on
3950 *    # of timeout drops back to <= q0len/32 => SYN alert off
3951 */
3952static boolean_t
3953tcp_drop_q0(tcp_t *tcp)
3954{
3955	tcp_t	*eager;
3956	mblk_t	*mp;
3957	tcp_stack_t	*tcps = tcp->tcp_tcps;
3958
3959	ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock));
3960	ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0);
3961
3962	/* Pick oldest eager from the list of droppable eagers */
3963	eager = tcp->tcp_eager_prev_drop_q0;
3964
3965	/* If list is empty. return B_FALSE */
3966	if (eager == tcp) {
3967		return (B_FALSE);
3968	}
3969
3970	/* If allocated, the mp will be freed in tcp_clean_death_wrapper() */
3971	if ((mp = allocb(0, BPRI_HI)) == NULL)
3972		return (B_FALSE);
3973
3974	/*
3975	 * Take this eager out from the list of droppable eagers since we are
3976	 * going to drop it.
3977	 */
3978	MAKE_UNDROPPABLE(eager);
3979
3980	if (tcp->tcp_connp->conn_debug) {
3981		(void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE,
3982		    "tcp_drop_q0: listen half-open queue (max=%d) overflow"
3983		    " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0,
3984		    tcp->tcp_conn_req_cnt_q0,
3985		    tcp_display(tcp, NULL, DISP_PORT_ONLY));
3986	}
3987
3988	BUMP_MIB(&tcps->tcps_mib, tcpHalfOpenDrop);
3989
3990	/* Put a reference on the conn as we are enqueueing it in the sqeue */
3991	CONN_INC_REF(eager->tcp_connp);
3992
3993	SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp,
3994	    tcp_clean_death_wrapper, eager->tcp_connp, NULL,
3995	    SQ_FILL, SQTAG_TCP_DROP_Q0);
3996
3997	return (B_TRUE);
3998}
3999
4000/*
4001 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6
4002 */
4003static mblk_t *
4004tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp,
4005    ip_recv_attr_t *ira)
4006{
4007	tcp_t 		*ltcp = lconnp->conn_tcp;
4008	tcp_t		*tcp = connp->conn_tcp;
4009	mblk_t		*tpi_mp;
4010	ipha_t		*ipha;
4011	ip6_t		*ip6h;
4012	sin6_t 		sin6;
4013	uint_t		ifindex = ira->ira_ruifindex;
4014	tcp_stack_t	*tcps = tcp->tcp_tcps;
4015
4016	if (ira->ira_flags & IRAF_IS_IPV4) {
4017		ipha = (ipha_t *)mp->b_rptr;
4018
4019		connp->conn_ipversion = IPV4_VERSION;
4020		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6);
4021		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6);
4022		connp->conn_saddr_v6 = connp->conn_laddr_v6;
4023
4024		sin6 = sin6_null;
4025		sin6.sin6_addr = connp->conn_faddr_v6;
4026		sin6.sin6_port = connp->conn_fport;
4027		sin6.sin6_family = AF_INET6;
4028		sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6,
4029		    IPCL_ZONEID(lconnp), tcps->tcps_netstack);
4030
4031		if (connp->conn_recv_ancillary.crb_recvdstaddr) {
4032			sin6_t	sin6d;
4033
4034			sin6d = sin6_null;
4035			sin6d.sin6_addr = connp->conn_laddr_v6;
4036			sin6d.sin6_port = connp->conn_lport;
4037			sin6d.sin6_family = AF_INET;
4038			tpi_mp = mi_tpi_extconn_ind(NULL,
4039			    (char *)&sin6d, sizeof (sin6_t),
4040			    (char *)&tcp,
4041			    (t_scalar_t)sizeof (intptr_t),
4042			    (char *)&sin6d, sizeof (sin6_t),
4043			    (t_scalar_t)ltcp->tcp_conn_req_seqnum);
4044		} else {
4045			tpi_mp = mi_tpi_conn_ind(NULL,
4046			    (char *)&sin6, sizeof (sin6_t),
4047			    (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
4048			    (t_scalar_t)ltcp->tcp_conn_req_seqnum);
4049		}
4050	} else {
4051		ip6h = (ip6_t *)mp->b_rptr;
4052
4053		connp->conn_ipversion = IPV6_VERSION;
4054		connp->conn_laddr_v6 = ip6h->ip6_dst;
4055		connp->conn_faddr_v6 = ip6h->ip6_src;
4056		connp->conn_saddr_v6 = connp->conn_laddr_v6;
4057
4058		sin6 = sin6_null;
4059		sin6.sin6_addr = connp->conn_faddr_v6;
4060		sin6.sin6_port = connp->conn_fport;
4061		sin6.sin6_family = AF_INET6;
4062		sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK;
4063		sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6,
4064		    IPCL_ZONEID(lconnp), tcps->tcps_netstack);
4065
4066		if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) {
4067			/* Pass up the scope_id of remote addr */
4068			sin6.sin6_scope_id = ifindex;
4069		} else {
4070			sin6.sin6_scope_id = 0;
4071		}
4072		if (connp->conn_recv_ancillary.crb_recvdstaddr) {
4073			sin6_t	sin6d;
4074
4075			sin6d = sin6_null;
4076			sin6.sin6_addr = connp->conn_laddr_v6;
4077			sin6d.sin6_port = connp->conn_lport;
4078			sin6d.sin6_family = AF_INET6;
4079			if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6))
4080				sin6d.sin6_scope_id = ifindex;
4081
4082			tpi_mp = mi_tpi_extconn_ind(NULL,
4083			    (char *)&sin6d, sizeof (sin6_t),
4084			    (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
4085			    (char *)&sin6d, sizeof (sin6_t),
4086			    (t_scalar_t)ltcp->tcp_conn_req_seqnum);
4087		} else {
4088			tpi_mp = mi_tpi_conn_ind(NULL,
4089			    (char *)&sin6, sizeof (sin6_t),
4090			    (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
4091			    (t_scalar_t)ltcp->tcp_conn_req_seqnum);
4092		}
4093	}
4094
4095	tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
4096	return (tpi_mp);
4097}
4098
4099/* Handle a SYN on an AF_INET socket */
4100mblk_t *
4101tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp,
4102    ip_recv_attr_t *ira)
4103{
4104	tcp_t 		*ltcp = lconnp->conn_tcp;
4105	tcp_t		*tcp = connp->conn_tcp;
4106	sin_t		sin;
4107	mblk_t		*tpi_mp = NULL;
4108	tcp_stack_t	*tcps = tcp->tcp_tcps;
4109	ipha_t		*ipha;
4110
4111	ASSERT(ira->ira_flags & IRAF_IS_IPV4);
4112	ipha = (ipha_t *)mp->b_rptr;
4113
4114	connp->conn_ipversion = IPV4_VERSION;
4115	IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6);
4116	IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6);
4117	connp->conn_saddr_v6 = connp->conn_laddr_v6;
4118
4119	sin = sin_null;
4120	sin.sin_addr.s_addr = connp->conn_faddr_v4;
4121	sin.sin_port = connp->conn_fport;
4122	sin.sin_family = AF_INET;
4123	if (lconnp->conn_recv_ancillary.crb_recvdstaddr) {
4124		sin_t	sind;
4125
4126		sind = sin_null;
4127		sind.sin_addr.s_addr = connp->conn_laddr_v4;
4128		sind.sin_port = connp->conn_lport;
4129		sind.sin_family = AF_INET;
4130		tpi_mp = mi_tpi_extconn_ind(NULL,
4131		    (char *)&sind, sizeof (sin_t), (char *)&tcp,
4132		    (t_scalar_t)sizeof (intptr_t), (char *)&sind,
4133		    sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum);
4134	} else {
4135		tpi_mp = mi_tpi_conn_ind(NULL,
4136		    (char *)&sin, sizeof (sin_t),
4137		    (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
4138		    (t_scalar_t)ltcp->tcp_conn_req_seqnum);
4139	}
4140
4141	tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
4142	return (tpi_mp);
4143}
4144
4145/*
4146 * tcp_get_conn/tcp_free_conn
4147 *
4148 * tcp_get_conn is used to get a clean tcp connection structure.
4149 * It tries to reuse the connections put on the freelist by the
4150 * time_wait_collector failing which it goes to kmem_cache. This
4151 * way has two benefits compared to just allocating from and
4152 * freeing to kmem_cache.
4153 * 1) The time_wait_collector can free (which includes the cleanup)
4154 * outside the squeue. So when the interrupt comes, we have a clean
4155 * connection sitting in the freelist. Obviously, this buys us
4156 * performance.
4157 *
4158 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
4159 * has multiple disadvantages - tying up the squeue during alloc.
4160 * But allocating the conn/tcp in IP land is also not the best since
4161 * we can't check the 'q' and 'q0' which are protected by squeue and
4162 * blindly allocate memory which might have to be freed here if we are
4163 * not allowed to accept the connection. By using the freelist and
4164 * putting the conn/tcp back in freelist, we don't pay a penalty for
4165 * allocating memory without checking 'q/q0' and freeing it if we can't
4166 * accept the connection.
4167 *
4168 * Care should be taken to put the conn back in the same squeue's freelist
4169 * from which it was allocated. Best results are obtained if conn is
4170 * allocated from listener's squeue and freed to the same. Time wait
4171 * collector will free up the freelist is the connection ends up sitting
4172 * there for too long.
4173 */
4174void *
4175tcp_get_conn(void *arg, tcp_stack_t *tcps)
4176{
4177	tcp_t			*tcp = NULL;
4178	conn_t			*connp = NULL;
4179	squeue_t		*sqp = (squeue_t *)arg;
4180	tcp_squeue_priv_t 	*tcp_time_wait;
4181	netstack_t		*ns;
4182	mblk_t			*tcp_rsrv_mp = NULL;
4183
4184	tcp_time_wait =
4185	    *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP));
4186
4187	mutex_enter(&tcp_time_wait->tcp_time_wait_lock);
4188	tcp = tcp_time_wait->tcp_free_list;
4189	ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0));
4190	if (tcp != NULL) {
4191		tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next;
4192		tcp_time_wait->tcp_free_list_cnt--;
4193		mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
4194		tcp->tcp_time_wait_next = NULL;
4195		connp = tcp->tcp_connp;
4196		connp->conn_flags |= IPCL_REUSED;
4197
4198		ASSERT(tcp->tcp_tcps == NULL);
4199		ASSERT(connp->conn_netstack == NULL);
4200		ASSERT(tcp->tcp_rsrv_mp != NULL);
4201		ns = tcps->tcps_netstack;
4202		netstack_hold(ns);
4203		connp->conn_netstack = ns;
4204		connp->conn_ixa->ixa_ipst = ns->netstack_ip;
4205		tcp->tcp_tcps = tcps;
4206		ipcl_globalhash_insert(connp);
4207
4208		connp->conn_ixa->ixa_notify_cookie = tcp;
4209		ASSERT(connp->conn_ixa->ixa_notify == tcp_notify);
4210		connp->conn_recv = tcp_input_data;
4211		ASSERT(connp->conn_recvicmp == tcp_icmp_input);
4212		ASSERT(connp->conn_verifyicmp == tcp_verifyicmp);
4213		return ((void *)connp);
4214	}
4215	mutex_exit(&tcp_time_wait->tcp_time_wait_lock);
4216	/*
4217	 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
4218	 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
4219	 */
4220	tcp_rsrv_mp = allocb(0, BPRI_HI);
4221	if (tcp_rsrv_mp == NULL)
4222		return (NULL);
4223
4224	if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP,
4225	    tcps->tcps_netstack)) == NULL) {
4226		freeb(tcp_rsrv_mp);
4227		return (NULL);
4228	}
4229
4230	tcp = connp->conn_tcp;
4231	tcp->tcp_rsrv_mp = tcp_rsrv_mp;
4232	mutex_init(&tcp->tcp_rsrv_mp_lock, NULL, MUTEX_DEFAULT, NULL);
4233
4234	tcp->tcp_tcps = tcps;
4235
4236	connp->conn_recv = tcp_input_data;
4237	connp->conn_recvicmp = tcp_icmp_input;
4238	connp->conn_verifyicmp = tcp_verifyicmp;
4239
4240	/*
4241	 * Register tcp_notify to listen to capability changes detected by IP.
4242	 * This upcall is made in the context of the call to conn_ip_output
4243	 * thus it is inside the squeue.
4244	 */
4245	connp->conn_ixa->ixa_notify = tcp_notify;
4246	connp->conn_ixa->ixa_notify_cookie = tcp;
4247
4248	return ((void *)connp);
4249}
4250
4251/* BEGIN CSTYLED */
4252/*
4253 *
4254 * The sockfs ACCEPT path:
4255 * =======================
4256 *
4257 * The eager is now established in its own perimeter as soon as SYN is
4258 * received in tcp_input_listener(). When sockfs receives conn_ind, it
4259 * completes the accept processing on the acceptor STREAM. The sending
4260 * of conn_ind part is common for both sockfs listener and a TLI/XTI
4261 * listener but a TLI/XTI listener completes the accept processing
4262 * on the listener perimeter.
4263 *
4264 * Common control flow for 3 way handshake:
4265 * ----------------------------------------
4266 *
4267 * incoming SYN (listener perimeter)	-> tcp_input_listener()
4268 *
4269 * incoming SYN-ACK-ACK (eager perim) 	-> tcp_input_data()
4270 * send T_CONN_IND (listener perim)	-> tcp_send_conn_ind()
4271 *
4272 * Sockfs ACCEPT Path:
4273 * -------------------
4274 *
4275 * open acceptor stream (tcp_open allocates tcp_tli_accept()
4276 * as STREAM entry point)
4277 *
4278 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept()
4279 *
4280 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager
4281 * association (we are not behind eager's squeue but sockfs is protecting us
4282 * and no one knows about this stream yet. The STREAMS entry point q->q_info
4283 * is changed to point at tcp_wput().
4284 *
4285 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to
4286 * listener (done on listener's perimeter).
4287 *
4288 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish
4289 * accept.
4290 *
4291 * TLI/XTI client ACCEPT path:
4292 * ---------------------------
4293 *
4294 * soaccept() sends T_CONN_RES on the listener STREAM.
4295 *
4296 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send
4297 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()).
4298 *
4299 * Locks:
4300 * ======
4301 *
4302 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and
4303 * and listeners->tcp_eager_next_q.
4304 *
4305 * Referencing:
4306 * ============
4307 *
4308 * 1) We start out in tcp_input_listener by eager placing a ref on
4309 * listener and listener adding eager to listeners->tcp_eager_next_q0.
4310 *
4311 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before
4312 * doing so we place a ref on the eager. This ref is finally dropped at the
4313 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the
4314 * reference is dropped by the squeue framework.
4315 *
4316 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish
4317 *
4318 * The reference must be released by the same entity that added the reference
4319 * In the above scheme, the eager is the entity that adds and releases the
4320 * references. Note that tcp_accept_finish executes in the squeue of the eager
4321 * (albeit after it is attached to the acceptor stream). Though 1. executes
4322 * in the listener's squeue, the eager is nascent at this point and the
4323 * reference can be considered to have been added on behalf of the eager.
4324 *
4325 * Eager getting a Reset or listener closing:
4326 * ==========================================
4327 *
4328 * Once the listener and eager are linked, the listener never does the unlink.
4329 * If the listener needs to close, tcp_eager_cleanup() is called which queues
4330 * a message on all eager perimeter. The eager then does the unlink, clears
4331 * any pointers to the listener's queue and drops the reference to the
4332 * listener. The listener waits in tcp_close outside the squeue until its
4333 * refcount has dropped to 1. This ensures that the listener has waited for
4334 * all eagers to clear their association with the listener.
4335 *
4336 * Similarly, if eager decides to go away, it can unlink itself and close.
4337 * When the T_CONN_RES comes down, we check if eager has closed. Note that
4338 * the reference to eager is still valid because of the extra ref we put
4339 * in tcp_send_conn_ind.
4340 *
4341 * Listener can always locate the eager under the protection
4342 * of the listener->tcp_eager_lock, and then do a refhold
4343 * on the eager during the accept processing.
4344 *
4345 * The acceptor stream accesses the eager in the accept processing
4346 * based on the ref placed on eager before sending T_conn_ind.
4347 * The only entity that can negate this refhold is a listener close
4348 * which is mutually exclusive with an active acceptor stream.
4349 *
4350 * Eager's reference on the listener
4351 * ===================================
4352 *
4353 * If the accept happens (even on a closed eager) the eager drops its
4354 * reference on the listener at the start of tcp_accept_finish. If the
4355 * eager is killed due to an incoming RST before the T_conn_ind is sent up,
4356 * the reference is dropped in tcp_closei_local. If the listener closes,
4357 * the reference is dropped in tcp_eager_kill. In all cases the reference
4358 * is dropped while executing in the eager's context (squeue).
4359 */
4360/* END CSTYLED */
4361
4362/* Process the SYN packet, mp, directed at the listener 'tcp' */
4363
4364/*
4365 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN.
4366 * tcp_input_data will not see any packets for listeners since the listener
4367 * has conn_recv set to tcp_input_listener.
4368 */
4369/* ARGSUSED */
4370void
4371tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4372{
4373	tcpha_t		*tcpha;
4374	uint32_t	seg_seq;
4375	tcp_t		*eager;
4376	int		err;
4377	conn_t		*econnp = NULL;
4378	squeue_t	*new_sqp;
4379	mblk_t		*mp1;
4380	uint_t 		ip_hdr_len;
4381	conn_t		*lconnp = (conn_t *)arg;
4382	tcp_t		*listener = lconnp->conn_tcp;
4383	tcp_stack_t	*tcps = listener->tcp_tcps;
4384	ip_stack_t	*ipst = tcps->tcps_netstack->netstack_ip;
4385	uint_t		flags;
4386	mblk_t		*tpi_mp;
4387	uint_t		ifindex = ira->ira_ruifindex;
4388
4389	ip_hdr_len = ira->ira_ip_hdr_length;
4390	tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len];
4391	flags = (unsigned int)tcpha->tha_flags & 0xFF;
4392
4393	if (!(flags & TH_SYN)) {
4394		if ((flags & TH_RST) || (flags & TH_URG)) {
4395			freemsg(mp);
4396			return;
4397		}
4398		if (flags & TH_ACK) {
4399			/* Note this executes in listener's squeue */
4400			tcp_xmit_listeners_reset(mp, ira, ipst, lconnp);
4401			return;
4402		}
4403
4404		freemsg(mp);
4405		return;
4406	}
4407
4408	if (listener->tcp_state != TCPS_LISTEN)
4409		goto error2;
4410
4411	ASSERT(IPCL_IS_BOUND(lconnp));
4412
4413	mutex_enter(&listener->tcp_eager_lock);
4414	if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) {
4415		mutex_exit(&listener->tcp_eager_lock);
4416		TCP_STAT(tcps, tcp_listendrop);
4417		BUMP_MIB(&tcps->tcps_mib, tcpListenDrop);
4418		if (lconnp->conn_debug) {
4419			(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
4420			    "tcp_input_listener: listen backlog (max=%d) "
4421			    "overflow (%d pending) on %s",
4422			    listener->tcp_conn_req_max,
4423			    listener->tcp_conn_req_cnt_q,
4424			    tcp_display(listener, NULL, DISP_PORT_ONLY));
4425		}
4426		goto error2;
4427	}
4428
4429	if (listener->tcp_conn_req_cnt_q0 >=
4430	    listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) {
4431		/*
4432		 * Q0 is full. Drop a pending half-open req from the queue
4433		 * to make room for the new SYN req. Also mark the time we
4434		 * drop a SYN.
4435		 *
4436		 * A more aggressive defense against SYN attack will
4437		 * be to set the "tcp_syn_defense" flag now.
4438		 */
4439		TCP_STAT(tcps, tcp_listendropq0);
4440		listener->tcp_last_rcv_lbolt = lbolt64;
4441		if (!tcp_drop_q0(listener)) {
4442			mutex_exit(&listener->tcp_eager_lock);
4443			BUMP_MIB(&tcps->tcps_mib, tcpListenDropQ0);
4444			if (lconnp->conn_debug) {
4445				(void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE,
4446				    "tcp_input_listener: listen half-open "
4447				    "queue (max=%d) full (%d pending) on %s",
4448				    tcps->tcps_conn_req_max_q0,
4449				    listener->tcp_conn_req_cnt_q0,
4450				    tcp_display(listener, NULL,
4451				    DISP_PORT_ONLY));
4452			}
4453			goto error2;
4454		}
4455	}
4456	mutex_exit(&listener->tcp_eager_lock);
4457
4458	/*
4459	 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
4460	 * or based on the ring (for packets from GLD). Otherwise it is
4461	 * set based on lbolt i.e., a somewhat random number.
4462	 */
4463	ASSERT(ira->ira_sqp != NULL);
4464	new_sqp = ira->ira_sqp;
4465
4466	econnp = (conn_t *)tcp_get_conn(arg2, tcps);
4467	if (econnp == NULL)
4468		goto error2;
4469
4470	ASSERT(econnp->conn_netstack == lconnp->conn_netstack);
4471	econnp->conn_sqp = new_sqp;
4472	econnp->conn_initial_sqp = new_sqp;
4473	econnp->conn_ixa->ixa_sqp = new_sqp;
4474
4475	econnp->conn_fport = tcpha->tha_lport;
4476	econnp->conn_lport = tcpha->tha_fport;
4477
4478	err = conn_inherit_parent(lconnp, econnp);
4479	if (err != 0)
4480		goto error3;
4481
4482	ASSERT(OK_32PTR(mp->b_rptr));
4483	ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION ||
4484	    IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
4485
4486	if (lconnp->conn_family == AF_INET) {
4487		ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION);
4488		tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira);
4489	} else {
4490		tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira);
4491	}
4492
4493	if (tpi_mp == NULL)
4494		goto error3;
4495
4496	eager = econnp->conn_tcp;
4497	eager->tcp_detached = B_TRUE;
4498	SOCK_CONNID_INIT(eager->tcp_connid);
4499
4500	tcp_init_values(eager);
4501
4502	ASSERT((econnp->conn_ixa->ixa_flags &
4503	    (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
4504	    IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) ==
4505	    (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
4506	    IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO));
4507
4508	if (!tcps->tcps_dev_flow_ctl)
4509		econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL;
4510
4511	/* Prepare for diffing against previous packets */
4512	eager->tcp_recvifindex = 0;
4513	eager->tcp_recvhops = 0xffffffffU;
4514
4515	if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) {
4516		if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) ||
4517		    IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) {
4518			econnp->conn_incoming_ifindex = ifindex;
4519			econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET;
4520			econnp->conn_ixa->ixa_scopeid = ifindex;
4521		}
4522	}
4523
4524	if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) ==
4525	    (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) &&
4526	    tcps->tcps_rev_src_routes) {
4527		ipha_t *ipha = (ipha_t *)mp->b_rptr;
4528		ip_pkt_t *ipp = &econnp->conn_xmit_ipp;
4529
4530		/* Source routing option copyover (reverse it) */
4531		err = ip_find_hdr_v4(ipha, ipp, B_TRUE);
4532		if (err != 0) {
4533			freemsg(tpi_mp);
4534			goto error3;
4535		}
4536		ip_pkt_source_route_reverse_v4(ipp);
4537	}
4538
4539	ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL);
4540	ASSERT(!eager->tcp_tconnind_started);
4541	/*
4542	 * If the SYN came with a credential, it's a loopback packet or a
4543	 * labeled packet; attach the credential to the TPI message.
4544	 */
4545	if (ira->ira_cred != NULL)
4546		mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid);
4547
4548	eager->tcp_conn.tcp_eager_conn_ind = tpi_mp;
4549
4550	/* Inherit the listener's SSL protection state */
4551	if ((eager->tcp_kssl_ent = listener->tcp_kssl_ent) != NULL) {
4552		kssl_hold_ent(eager->tcp_kssl_ent);
4553		eager->tcp_kssl_pending = B_TRUE;
4554	}
4555
4556	/* Inherit the listener's non-STREAMS flag */
4557	if (IPCL_IS_NONSTR(lconnp)) {
4558		econnp->conn_flags |= IPCL_NONSTR;
4559	}
4560
4561	ASSERT(eager->tcp_ordrel_mp == NULL);
4562
4563	if (!IPCL_IS_NONSTR(econnp)) {
4564		/*
4565		 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that
4566		 * at close time, we will always have that to send up.
4567		 * Otherwise, we need to do special handling in case the
4568		 * allocation fails at that time.
4569		 */
4570		if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL)
4571			goto error3;
4572	}
4573	/*
4574	 * Now that the IP addresses and ports are setup in econnp we
4575	 * can do the IPsec policy work.
4576	 */
4577	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
4578		if (lconnp->conn_policy != NULL) {
4579			/*
4580			 * Inherit the policy from the listener; use
4581			 * actions from ira
4582			 */
4583			if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) {
4584				CONN_DEC_REF(econnp);
4585				freemsg(mp);
4586				goto error3;
4587			}
4588		}
4589	}
4590
4591	/* Inherit various TCP parameters from the listener */
4592	eager->tcp_naglim = listener->tcp_naglim;
4593	eager->tcp_first_timer_threshold = listener->tcp_first_timer_threshold;
4594	eager->tcp_second_timer_threshold =
4595	    listener->tcp_second_timer_threshold;
4596	eager->tcp_first_ctimer_threshold =
4597	    listener->tcp_first_ctimer_threshold;
4598	eager->tcp_second_ctimer_threshold =
4599	    listener->tcp_second_ctimer_threshold;
4600
4601	/*
4602	 * tcp_set_destination() may set tcp_rwnd according to the route
4603	 * metrics. If it does not, the eager's receive window will be set
4604	 * to the listener's receive window later in this function.
4605	 */
4606	eager->tcp_rwnd = 0;
4607
4608	/*
4609	 * Inherit listener's tcp_init_cwnd.  Need to do this before
4610	 * calling tcp_process_options() which set the initial cwnd.
4611	 */
4612	eager->tcp_init_cwnd = listener->tcp_init_cwnd;
4613
4614	if (is_system_labeled()) {
4615		ip_xmit_attr_t *ixa = econnp->conn_ixa;
4616
4617		ASSERT(ira->ira_tsl != NULL);
4618		/* Discard any old label */
4619		if (ixa->ixa_free_flags & IXA_FREE_TSL) {
4620			ASSERT(ixa->ixa_tsl != NULL);
4621			label_rele(ixa->ixa_tsl);
4622			ixa->ixa_free_flags &= ~IXA_FREE_TSL;
4623			ixa->ixa_tsl = NULL;
4624		}
4625		if ((lconnp->conn_mlp_type != mlptSingle ||
4626		    lconnp->conn_mac_mode != CONN_MAC_DEFAULT) &&
4627		    ira->ira_tsl != NULL) {
4628			/*
4629			 * If this is an MLP connection or a MAC-Exempt
4630			 * connection with an unlabeled node, packets are to be
4631			 * exchanged using the security label of the received
4632			 * SYN packet instead of the server application's label.
4633			 * tsol_check_dest called from ip_set_destination
4634			 * might later update TSF_UNLABELED by replacing
4635			 * ixa_tsl with a new label.
4636			 */
4637			label_hold(ira->ira_tsl);
4638			ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl);
4639			DTRACE_PROBE2(mlp_syn_accept, conn_t *,
4640			    econnp, ts_label_t *, ixa->ixa_tsl)
4641		} else {
4642			ixa->ixa_tsl = crgetlabel(econnp->conn_cred);
4643			DTRACE_PROBE2(syn_accept, conn_t *,
4644			    econnp, ts_label_t *, ixa->ixa_tsl)
4645		}
4646		/*
4647		 * conn_connect() called from tcp_set_destination will verify
4648		 * the destination is allowed to receive packets at the
4649		 * security label of the SYN-ACK we are generating. As part of
4650		 * that, tsol_check_dest() may create a new effective label for
4651		 * this connection.
4652		 * Finally conn_connect() will call conn_update_label.
4653		 * All that remains for TCP to do is to call
4654		 * conn_build_hdr_template which is done as part of
4655		 * tcp_set_destination.
4656		 */
4657	}
4658
4659	/*
4660	 * Since we will clear tcp_listener before we clear tcp_detached
4661	 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress
4662	 * so we can tell a TCP_DETACHED_NONEAGER apart.
4663	 */
4664	eager->tcp_hard_binding = B_TRUE;
4665
4666	tcp_bind_hash_insert(&tcps->tcps_bind_fanout[
4667	    TCP_BIND_HASH(econnp->conn_lport)], eager, 0);
4668
4669	CL_INET_CONNECT(econnp, B_FALSE, err);
4670	if (err != 0) {
4671		tcp_bind_hash_remove(eager);
4672		goto error3;
4673	}
4674
4675	/*
4676	 * No need to check for multicast destination since ip will only pass
4677	 * up multicasts to those that have expressed interest
4678	 * TODO: what about rejecting broadcasts?
4679	 * Also check that source is not a multicast or broadcast address.
4680	 */
4681	eager->tcp_state = TCPS_SYN_RCVD;
4682	SOCK_CONNID_BUMP(eager->tcp_connid);
4683
4684	/*
4685	 * Adapt our mss, ttl, ... based on the remote address.
4686	 */
4687
4688	if (tcp_set_destination(eager) != 0) {
4689		BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails);
4690		/* Undo the bind_hash_insert */
4691		tcp_bind_hash_remove(eager);
4692		goto error3;
4693	}
4694
4695	/* Process all TCP options. */
4696	tcp_process_options(eager, tcpha);
4697
4698	/* Is the other end ECN capable? */
4699	if (tcps->tcps_ecn_permitted >= 1 &&
4700	    (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) {
4701		eager->tcp_ecn_ok = B_TRUE;
4702	}
4703
4704	/*
4705	 * The listener's conn_rcvbuf should be the default window size or a
4706	 * window size changed via SO_RCVBUF option. First round up the
4707	 * eager's tcp_rwnd to the nearest MSS. Then find out the window
4708	 * scale option value if needed. Call tcp_rwnd_set() to finish the
4709	 * setting.
4710	 *
4711	 * Note if there is a rpipe metric associated with the remote host,
4712	 * we should not inherit receive window size from listener.
4713	 */
4714	eager->tcp_rwnd = MSS_ROUNDUP(
4715	    (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf :
4716	    eager->tcp_rwnd), eager->tcp_mss);
4717	if (eager->tcp_snd_ws_ok)
4718		tcp_set_ws_value(eager);
4719	/*
4720	 * Note that this is the only place tcp_rwnd_set() is called for
4721	 * accepting a connection.  We need to call it here instead of
4722	 * after the 3-way handshake because we need to tell the other
4723	 * side our rwnd in the SYN-ACK segment.
4724	 */
4725	(void) tcp_rwnd_set(eager, eager->tcp_rwnd);
4726
4727	ASSERT(eager->tcp_connp->conn_rcvbuf != 0 &&
4728	    eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd);
4729
4730	ASSERT(econnp->conn_rcvbuf != 0 &&
4731	    econnp->conn_rcvbuf == eager->tcp_rwnd);
4732
4733	/* Put a ref on the listener for the eager. */
4734	CONN_INC_REF(lconnp);
4735	mutex_enter(&listener->tcp_eager_lock);
4736	listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager;
4737	eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0;
4738	listener->tcp_eager_next_q0 = eager;
4739	eager->tcp_eager_prev_q0 = listener;
4740
4741	/* Set tcp_listener before adding it to tcp_conn_fanout */
4742	eager->tcp_listener = listener;
4743	eager->tcp_saved_listener = listener;
4744
4745	/*
4746	 * Tag this detached tcp vector for later retrieval
4747	 * by our listener client in tcp_accept().
4748	 */
4749	eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum;
4750	listener->tcp_conn_req_cnt_q0++;
4751	if (++listener->tcp_conn_req_seqnum == -1) {
4752		/*
4753		 * -1 is "special" and defined in TPI as something
4754		 * that should never be used in T_CONN_IND
4755		 */
4756		++listener->tcp_conn_req_seqnum;
4757	}
4758	mutex_exit(&listener->tcp_eager_lock);
4759
4760	if (listener->tcp_syn_defense) {
4761		/* Don't drop the SYN that comes from a good IP source */
4762		ipaddr_t *addr_cache;
4763
4764		addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache);
4765		if (addr_cache != NULL && econnp->conn_faddr_v4 ==
4766		    addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) {
4767			eager->tcp_dontdrop = B_TRUE;
4768		}
4769	}
4770
4771	/*
4772	 * We need to insert the eager in its own perimeter but as soon
4773	 * as we do that, we expose the eager to the classifier and
4774	 * should not touch any field outside the eager's perimeter.
4775	 * So do all the work necessary before inserting the eager
4776	 * in its own perimeter. Be optimistic that conn_connect()
4777	 * will succeed but undo everything if it fails.
4778	 */
4779	seg_seq = ntohl(tcpha->tha_seq);
4780	eager->tcp_irs = seg_seq;
4781	eager->tcp_rack = seg_seq;
4782	eager->tcp_rnxt = seg_seq + 1;
4783	eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt);
4784	BUMP_MIB(&tcps->tcps_mib, tcpPassiveOpens);
4785	eager->tcp_state = TCPS_SYN_RCVD;
4786	mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss,
4787	    NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE);
4788	if (mp1 == NULL) {
4789		/*
4790		 * Increment the ref count as we are going to
4791		 * enqueueing an mp in squeue
4792		 */
4793		CONN_INC_REF(econnp);
4794		goto error;
4795	}
4796
4797	/*
4798	 * We need to start the rto timer. In normal case, we start
4799	 * the timer after sending the packet on the wire (or at
4800	 * least believing that packet was sent by waiting for
4801	 * conn_ip_output() to return). Since this is the first packet
4802	 * being sent on the wire for the eager, our initial tcp_rto
4803	 * is at least tcp_rexmit_interval_min which is a fairly
4804	 * large value to allow the algorithm to adjust slowly to large
4805	 * fluctuations of RTT during first few transmissions.
4806	 *
4807	 * Starting the timer first and then sending the packet in this
4808	 * case shouldn't make much difference since tcp_rexmit_interval_min
4809	 * is of the order of several 100ms and starting the timer
4810	 * first and then sending the packet will result in difference
4811	 * of few micro seconds.
4812	 *
4813	 * Without this optimization, we are forced to hold the fanout
4814	 * lock across the ipcl_bind_insert() and sending the packet
4815	 * so that we don't race against an incoming packet (maybe RST)
4816	 * for this eager.
4817	 *
4818	 * It is necessary to acquire an extra reference on the eager
4819	 * at this point and hold it until after tcp_send_data() to
4820	 * ensure against an eager close race.
4821	 */
4822
4823	CONN_INC_REF(econnp);
4824
4825	TCP_TIMER_RESTART(eager, eager->tcp_rto);
4826
4827	/*
4828	 * Insert the eager in its own perimeter now. We are ready to deal
4829	 * with any packets on eager.
4830	 */
4831	if (ipcl_conn_insert(econnp) != 0)
4832		goto error;
4833
4834	/*
4835	 * Send the SYN-ACK. Can't use tcp_send_data since we can't update
4836	 * pmtu etc; we are not on the eager's squeue
4837	 */
4838	ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp);
4839	(void) conn_ip_output(mp1, econnp->conn_ixa);
4840	CONN_DEC_REF(econnp);
4841	freemsg(mp);
4842
4843	return;
4844error:
4845	freemsg(mp1);
4846	eager->tcp_closemp_used = B_TRUE;
4847	TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
4848	mp1 = &eager->tcp_closemp;
4849	SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill,
4850	    econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2);
4851
4852	/*
4853	 * If a connection already exists, send the mp to that connections so
4854	 * that it can be appropriately dealt with.
4855	 */
4856	ipst = tcps->tcps_netstack->netstack_ip;
4857
4858	if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) {
4859		if (!IPCL_IS_CONNECTED(econnp)) {
4860			/*
4861			 * Something bad happened. ipcl_conn_insert()
4862			 * failed because a connection already existed
4863			 * in connected hash but we can't find it
4864			 * anymore (someone blew it away). Just
4865			 * free this message and hopefully remote
4866			 * will retransmit at which time the SYN can be
4867			 * treated as a new connection or dealth with
4868			 * a TH_RST if a connection already exists.
4869			 */
4870			CONN_DEC_REF(econnp);
4871			freemsg(mp);
4872		} else {
4873			SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data,
4874			    econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1);
4875		}
4876	} else {
4877		/* Nobody wants this packet */
4878		freemsg(mp);
4879	}
4880	return;
4881error3:
4882	CONN_DEC_REF(econnp);
4883error2:
4884	freemsg(mp);
4885}
4886
4887/*
4888 * In an ideal case of vertical partition in NUMA architecture, its
4889 * beneficial to have the listener and all the incoming connections
4890 * tied to the same squeue. The other constraint is that incoming
4891 * connections should be tied to the squeue attached to interrupted
4892 * CPU for obvious locality reason so this leaves the listener to
4893 * be tied to the same squeue. Our only problem is that when listener
4894 * is binding, the CPU that will get interrupted by the NIC whose
4895 * IP address the listener is binding to is not even known. So
4896 * the code below allows us to change that binding at the time the
4897 * CPU is interrupted by virtue of incoming connection's squeue.
4898 *
4899 * This is usefull only in case of a listener bound to a specific IP
4900 * address. For other kind of listeners, they get bound the
4901 * very first time and there is no attempt to rebind them.
4902 */
4903void
4904tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2,
4905    ip_recv_attr_t *ira)
4906{
4907	conn_t		*connp = (conn_t *)arg;
4908	squeue_t	*sqp = (squeue_t *)arg2;
4909	squeue_t	*new_sqp;
4910	uint32_t	conn_flags;
4911
4912	/*
4913	 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
4914	 * or based on the ring (for packets from GLD). Otherwise it is
4915	 * set based on lbolt i.e., a somewhat random number.
4916	 */
4917	ASSERT(ira->ira_sqp != NULL);
4918	new_sqp = ira->ira_sqp;
4919
4920	if (connp->conn_fanout == NULL)
4921		goto done;
4922
4923	if (!(connp->conn_flags & IPCL_FULLY_BOUND)) {
4924		mutex_enter(&connp->conn_fanout->connf_lock);
4925		mutex_enter(&connp->conn_lock);
4926		/*
4927		 * No one from read or write side can access us now
4928		 * except for already queued packets on this squeue.
4929		 * But since we haven't changed the squeue yet, they
4930		 * can't execute. If they are processed after we have
4931		 * changed the squeue, they are sent back to the
4932		 * correct squeue down below.
4933		 * But a listner close can race with processing of
4934		 * incoming SYN. If incoming SYN processing changes
4935		 * the squeue then the listener close which is waiting
4936		 * to enter the squeue would operate on the wrong
4937		 * squeue. Hence we don't change the squeue here unless
4938		 * the refcount is exactly the minimum refcount. The
4939		 * minimum refcount of 4 is counted as - 1 each for
4940		 * TCP and IP, 1 for being in the classifier hash, and
4941		 * 1 for the mblk being processed.
4942		 */
4943
4944		if (connp->conn_ref != 4 ||
4945		    connp->conn_tcp->tcp_state != TCPS_LISTEN) {
4946			mutex_exit(&connp->conn_lock);
4947			mutex_exit(&connp->conn_fanout->connf_lock);
4948			goto done;
4949		}
4950		if (connp->conn_sqp != new_sqp) {
4951			while (connp->conn_sqp != new_sqp)
4952				(void) casptr(&connp->conn_sqp, sqp, new_sqp);
4953			/* No special MT issues for outbound ixa_sqp hint */
4954			connp->conn_ixa->ixa_sqp = new_sqp;
4955		}
4956
4957		do {
4958			conn_flags = connp->conn_flags;
4959			conn_flags |= IPCL_FULLY_BOUND;
4960			(void) cas32(&connp->conn_flags, connp->conn_flags,
4961			    conn_flags);
4962		} while (!(connp->conn_flags & IPCL_FULLY_BOUND));
4963
4964		mutex_exit(&connp->conn_fanout->connf_lock);
4965		mutex_exit(&connp->conn_lock);
4966
4967		/*
4968		 * Assume we have picked a good squeue for the listener. Make
4969		 * subsequent SYNs not try to change the squeue.
4970		 */
4971		connp->conn_recv = tcp_input_listener;
4972	}
4973
4974done:
4975	if (connp->conn_sqp != sqp) {
4976		CONN_INC_REF(connp);
4977		SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp,
4978		    ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND);
4979	} else {
4980		tcp_input_listener(connp, mp, sqp, ira);
4981	}
4982}
4983
4984/*
4985 * Successful connect request processing begins when our client passes
4986 * a T_CONN_REQ message into tcp_wput(), which performs function calls into
4987 * IP and the passes a T_OK_ACK (or T_ERROR_ACK upstream).
4988 *
4989 * After various error checks are completed, tcp_tpi_connect() lays
4990 * the target address and port into the composite header template.
4991 * Then we ask IP for information, including a source address if we didn't
4992 * already have one. Finally we prepare to send the SYN packet, and then
4993 * send up the T_OK_ACK reply message.
4994 */
4995static void
4996tcp_tpi_connect(tcp_t *tcp, mblk_t *mp)
4997{
4998	sin_t		*sin;
4999	struct T_conn_req	*tcr;
5000	struct sockaddr	*sa;
5001	socklen_t	len;
5002	int		error;
5003	cred_t		*cr;
5004	pid_t		cpid;
5005	conn_t		*connp = tcp->tcp_connp;
5006	queue_t		*q = connp->conn_wq;
5007
5008	/*
5009	 * All Solaris components should pass a db_credp
5010	 * for this TPI message, hence we ASSERT.
5011	 * But in case there is some other M_PROTO that looks
5012	 * like a TPI message sent by some other kernel
5013	 * component, we check and return an error.
5014	 */
5015	cr = msg_getcred(mp, &cpid);
5016	ASSERT(cr != NULL);
5017	if (cr == NULL) {
5018		tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
5019		return;
5020	}
5021
5022	tcr = (struct T_conn_req *)mp->b_rptr;
5023
5024	ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
5025	if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) {
5026		tcp_err_ack(tcp, mp, TPROTO, 0);
5027		return;
5028	}
5029
5030	/*
5031	 * Pre-allocate the T_ordrel_ind mblk so that at close time, we
5032	 * will always have that to send up.  Otherwise, we need to do
5033	 * special handling in case the allocation fails at that time.
5034	 * If the end point is TPI, the tcp_t can be reused and the
5035	 * tcp_ordrel_mp may be allocated already.
5036	 */
5037	if (tcp->tcp_ordrel_mp == NULL) {
5038		if ((tcp->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL) {
5039			tcp_err_ack(tcp, mp, TSYSERR, ENOMEM);
5040			return;
5041		}
5042	}
5043
5044	/*
5045	 * Determine packet type based on type of address passed in
5046	 * the request should contain an IPv4 or IPv6 address.
5047	 * Make sure that address family matches the type of
5048	 * family of the address passed down.
5049	 */
5050	switch (tcr->DEST_length) {
5051	default:
5052		tcp_err_ack(tcp, mp, TBADADDR, 0);
5053		return;
5054
5055	case (sizeof (sin_t) - sizeof (sin->sin_zero)): {
5056		/*
5057		 * XXX: The check for valid DEST_length was not there
5058		 * in earlier releases and some buggy
5059		 * TLI apps (e.g Sybase) got away with not feeding
5060		 * in sin_zero part of address.
5061		 * We allow that bug to keep those buggy apps humming.
5062		 * Test suites require the check on DEST_length.
5063		 * We construct a new mblk with valid DEST_length
5064		 * free the original so the rest of the code does
5065		 * not have to keep track of this special shorter
5066		 * length address case.
5067		 */
5068		mblk_t *nmp;
5069		struct T_conn_req *ntcr;
5070		sin_t *nsin;
5071
5072		nmp = allocb(sizeof (struct T_conn_req) + sizeof (sin_t) +
5073		    tcr->OPT_length, BPRI_HI);
5074		if (nmp == NULL) {
5075			tcp_err_ack(tcp, mp, TSYSERR, ENOMEM);
5076			return;
5077		}
5078		ntcr = (struct T_conn_req *)nmp->b_rptr;
5079		bzero(ntcr, sizeof (struct T_conn_req)); /* zero fill */
5080		ntcr->PRIM_type = T_CONN_REQ;
5081		ntcr->DEST_length = sizeof (sin_t);
5082		ntcr->DEST_offset = sizeof (struct T_conn_req);
5083
5084		nsin = (sin_t *)((uchar_t *)ntcr + ntcr->DEST_offset);
5085		*nsin = sin_null;
5086		/* Get pointer to shorter address to copy from original mp */
5087		sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset,
5088		    tcr->DEST_length); /* extract DEST_length worth of sin_t */
5089		if (sin == NULL || !OK_32PTR((char *)sin)) {
5090			freemsg(nmp);
5091			tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
5092			return;
5093		}
5094		nsin->sin_family = sin->sin_family;
5095		nsin->sin_port = sin->sin_port;
5096		nsin->sin_addr = sin->sin_addr;
5097		/* Note:nsin->sin_zero zero-fill with sin_null assign above */
5098		nmp->b_wptr = (uchar_t *)&nsin[1];
5099		if (tcr->OPT_length != 0) {
5100			ntcr->OPT_length = tcr->OPT_length;
5101			ntcr->OPT_offset = nmp->b_wptr - nmp->b_rptr;
5102			bcopy((uchar_t *)tcr + tcr->OPT_offset,
5103			    (uchar_t *)ntcr + ntcr->OPT_offset,
5104			    tcr->OPT_length);
5105			nmp->b_wptr += tcr->OPT_length;
5106		}
5107		freemsg(mp);	/* original mp freed */
5108		mp = nmp;	/* re-initialize original variables */
5109		tcr = ntcr;
5110	}
5111	/* FALLTHRU */
5112
5113	case sizeof (sin_t):
5114		sa = (struct sockaddr *)mi_offset_param(mp, tcr->DEST_offset,
5115		    sizeof (sin_t));
5116		len = sizeof (sin_t);
5117		break;
5118
5119	case sizeof (sin6_t):
5120		sa = (struct sockaddr *)mi_offset_param(mp, tcr->DEST_offset,
5121		    sizeof (sin6_t));
5122		len = sizeof (sin6_t);
5123		break;
5124	}
5125
5126	error = proto_verify_ip_addr(connp->conn_family, sa, len);
5127	if (error != 0) {
5128		tcp_err_ack(tcp, mp, TSYSERR, error);
5129		return;
5130	}
5131
5132	/*
5133	 * TODO: If someone in TCPS_TIME_WAIT has this dst/port we
5134	 * should key on their sequence number and cut them loose.
5135	 */
5136
5137	/*
5138	 * If options passed in, feed it for verification and handling
5139	 */
5140	if (tcr->OPT_length != 0) {
5141		mblk_t	*ok_mp;
5142		mblk_t	*discon_mp;
5143		mblk_t  *conn_opts_mp;
5144		int t_error, sys_error, do_disconnect;
5145
5146		conn_opts_mp = NULL;
5147
5148		if (tcp_conprim_opt_process(tcp, mp,
5149		    &do_disconnect, &t_error, &sys_error) < 0) {
5150			if (do_disconnect) {
5151				ASSERT(t_error == 0 && sys_error == 0);
5152				discon_mp = mi_tpi_discon_ind(NULL,
5153				    ECONNREFUSED, 0);
5154				if (!discon_mp) {
5155					tcp_err_ack_prim(tcp, mp, T_CONN_REQ,
5156					    TSYSERR, ENOMEM);
5157					return;
5158				}
5159				ok_mp = mi_tpi_ok_ack_alloc(mp);
5160				if (!ok_mp) {
5161					tcp_err_ack_prim(tcp, NULL, T_CONN_REQ,
5162					    TSYSERR, ENOMEM);
5163					return;
5164				}
5165				qreply(q, ok_mp);
5166				qreply(q, discon_mp); /* no flush! */
5167			} else {
5168				ASSERT(t_error != 0);
5169				tcp_err_ack_prim(tcp, mp, T_CONN_REQ, t_error,
5170				    sys_error);
5171			}
5172			return;
5173		}
5174		/*
5175		 * Success in setting options, the mp option buffer represented
5176		 * by OPT_length/offset has been potentially modified and
5177		 * contains results of option processing. We copy it in
5178		 * another mp to save it for potentially influencing returning
5179		 * it in T_CONN_CONN.
5180		 */
5181		if (tcr->OPT_length != 0) { /* there are resulting options */
5182			conn_opts_mp = copyb(mp);
5183			if (!conn_opts_mp) {
5184				tcp_err_ack_prim(tcp, mp, T_CONN_REQ,
5185				    TSYSERR, ENOMEM);
5186				return;
5187			}
5188			ASSERT(tcp->tcp_conn.tcp_opts_conn_req == NULL);
5189			tcp->tcp_conn.tcp_opts_conn_req = conn_opts_mp;
5190			/*
5191			 * Note:
5192			 * These resulting option negotiation can include any
5193			 * end-to-end negotiation options but there no such
5194			 * thing (yet?) in our TCP/IP.
5195			 */
5196		}
5197	}
5198
5199	/* call the non-TPI version */
5200	error = tcp_do_connect(tcp->tcp_connp, sa, len, cr, cpid);
5201	if (error < 0) {
5202		mp = mi_tpi_err_ack_alloc(mp, -error, 0);
5203	} else if (error > 0) {
5204		mp = mi_tpi_err_ack_alloc(mp, TSYSERR, error);
5205	} else {
5206		mp = mi_tpi_ok_ack_alloc(mp);
5207	}
5208
5209	/*
5210	 * Note: Code below is the "failure" case
5211	 */
5212	/* return error ack and blow away saved option results if any */
5213connect_failed:
5214	if (mp != NULL)
5215		putnext(connp->conn_rq, mp);
5216	else {
5217		tcp_err_ack_prim(tcp, NULL, T_CONN_REQ,
5218		    TSYSERR, ENOMEM);
5219	}
5220}
5221
5222/*
5223 * Handle connect to IPv4 destinations, including connections for AF_INET6
5224 * sockets connecting to IPv4 mapped IPv6 destinations.
5225 * Returns zero if OK, a positive errno, or a negative TLI error.
5226 */
5227static int
5228tcp_connect_ipv4(tcp_t *tcp, ipaddr_t *dstaddrp, in_port_t dstport,
5229    uint_t srcid)
5230{
5231	ipaddr_t 	dstaddr = *dstaddrp;
5232	uint16_t 	lport;
5233	conn_t		*connp = tcp->tcp_connp;
5234	tcp_stack_t	*tcps = tcp->tcp_tcps;
5235	int		error;
5236
5237	ASSERT(connp->conn_ipversion == IPV4_VERSION);
5238
5239	/* Check for attempt to connect to INADDR_ANY */
5240	if (dstaddr == INADDR_ANY)  {
5241		/*
5242		 * SunOS 4.x and 4.3 BSD allow an application
5243		 * to connect a TCP socket to INADDR_ANY.
5244		 * When they do this, the kernel picks the
5245		 * address of one interface and uses it
5246		 * instead.  The kernel usually ends up
5247		 * picking the address of the loopback
5248		 * interface.  This is an undocumented feature.
5249		 * However, we provide the same thing here
5250		 * in order to have source and binary
5251		 * compatibility with SunOS 4.x.
5252		 * Update the T_CONN_REQ (sin/sin6) since it is used to
5253		 * generate the T_CONN_CON.
5254		 */
5255		dstaddr = htonl(INADDR_LOOPBACK);
5256		*dstaddrp = dstaddr;
5257	}
5258
5259	/* Handle __sin6_src_id if socket not bound to an IP address */
5260	if (srcid != 0 && connp->conn_laddr_v4 == INADDR_ANY) {
5261		ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
5262		    IPCL_ZONEID(connp), tcps->tcps_netstack);
5263		connp->conn_saddr_v6 = connp->conn_laddr_v6;
5264	}
5265
5266	IN6_IPADDR_TO_V4MAPPED(dstaddr, &connp->conn_faddr_v6);
5267	connp->conn_fport = dstport;
5268
5269	/*
5270	 * At this point the remote destination address and remote port fields
5271	 * in the tcp-four-tuple have been filled in the tcp structure. Now we
5272	 * have to see which state tcp was in so we can take appropriate action.
5273	 */
5274	if (tcp->tcp_state == TCPS_IDLE) {
5275		/*
5276		 * We support a quick connect capability here, allowing
5277		 * clients to transition directly from IDLE to SYN_SENT
5278		 * tcp_bindi will pick an unused port, insert the connection
5279		 * in the bind hash and transition to BOUND state.
5280		 */
5281		lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
5282		    tcp, B_TRUE);
5283		lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
5284		    B_FALSE, B_FALSE);
5285		if (lport == 0)
5286			return (-TNOADDR);
5287	}
5288
5289	/*
5290	 * Lookup the route to determine a source address and the uinfo.
5291	 * If there was a source route we have tcp_ipha->ipha_dst as the first
5292	 * hop.
5293	 * Setup TCP parameters based on the metrics/DCE.
5294	 */
5295	error = tcp_set_destination(tcp);
5296	if (error != 0)
5297		return (error);
5298
5299	/*
5300	 * Don't let an endpoint connect to itself.
5301	 */
5302	if (connp->conn_faddr_v4 == connp->conn_laddr_v4 &&
5303	    connp->conn_fport == connp->conn_lport)
5304		return (-TBADADDR);
5305
5306	tcp->tcp_state = TCPS_SYN_SENT;
5307
5308	return (ipcl_conn_insert_v4(connp));
5309}
5310
5311/*
5312 * Handle connect to IPv6 destinations.
5313 * Returns zero if OK, a positive errno, or a negative TLI error.
5314 */
5315static int
5316tcp_connect_ipv6(tcp_t *tcp, in6_addr_t *dstaddrp, in_port_t dstport,
5317    uint32_t flowinfo, uint_t srcid, uint32_t scope_id)
5318{
5319	uint16_t 	lport;
5320	conn_t		*connp = tcp->tcp_connp;
5321	tcp_stack_t	*tcps = tcp->tcp_tcps;
5322	int		error;
5323
5324	ASSERT(connp->conn_family == AF_INET6);
5325
5326	/*
5327	 * If we're here, it means that the destination address is a native
5328	 * IPv6 address.  Return an error if conn_ipversion is not IPv6.  A
5329	 * reason why it might not be IPv6 is if the socket was bound to an
5330	 * IPv4-mapped IPv6 address.
5331	 */
5332	if (connp->conn_ipversion != IPV6_VERSION)
5333		return (-TBADADDR);
5334
5335	/*
5336	 * Interpret a zero destination to mean loopback.
5337	 * Update the T_CONN_REQ (sin/sin6) since it is used to
5338	 * generate the T_CONN_CON.
5339	 */
5340	if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp))
5341		*dstaddrp = ipv6_loopback;
5342
5343	/* Handle __sin6_src_id if socket not bound to an IP address */
5344	if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6)) {
5345		ip_srcid_find_id(srcid, &connp->conn_laddr_v6,
5346		    IPCL_ZONEID(connp), tcps->tcps_netstack);
5347		connp->conn_saddr_v6 = connp->conn_laddr_v6;
5348	}
5349
5350	/*
5351	 * Take care of the scope_id now.
5352	 */
5353	if (scope_id != 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp)) {
5354		connp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET;
5355		connp->conn_ixa->ixa_scopeid = scope_id;
5356	} else {
5357		connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET;
5358	}
5359
5360	connp->conn_flowinfo = flowinfo;
5361	connp->conn_faddr_v6 = *dstaddrp;
5362	connp->conn_fport = dstport;
5363
5364	/*
5365	 * At this point the remote destination address and remote port fields
5366	 * in the tcp-four-tuple have been filled in the tcp structure. Now we
5367	 * have to see which state tcp was in so we can take appropriate action.
5368	 */
5369	if (tcp->tcp_state == TCPS_IDLE) {
5370		/*
5371		 * We support a quick connect capability here, allowing
5372		 * clients to transition directly from IDLE to SYN_SENT
5373		 * tcp_bindi will pick an unused port, insert the connection
5374		 * in the bind hash and transition to BOUND state.
5375		 */
5376		lport = tcp_update_next_port(tcps->tcps_next_port_to_try,
5377		    tcp, B_TRUE);
5378		lport = tcp_bindi(tcp, lport, &connp->conn_laddr_v6, 0, B_TRUE,
5379		    B_FALSE, B_FALSE);
5380		if (lport == 0)
5381			return (-TNOADDR);
5382	}
5383
5384	/*
5385	 * Lookup the route to determine a source address and the uinfo.
5386	 * If there was a source route we have tcp_ip6h->ip6_dst as the first
5387	 * hop.
5388	 * Setup TCP parameters based on the metrics/DCE.
5389	 */
5390	error = tcp_set_destination(tcp);
5391	if (error != 0)
5392		return (error);
5393
5394	/*
5395	 * Don't let an endpoint connect to itself.
5396	 */
5397	if (IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6, &connp->conn_laddr_v6) &&
5398	    connp->conn_fport == connp->conn_lport)
5399		return (-TBADADDR);
5400
5401	tcp->tcp_state = TCPS_SYN_SENT;
5402
5403	return (ipcl_conn_insert_v6(connp));
5404}
5405
5406/*
5407 * Disconnect
5408 * Note that unlike other functions this returns a positive tli error
5409 * when it fails; it never returns an errno.
5410 */
5411static int
5412tcp_disconnect_common(tcp_t *tcp, t_scalar_t seqnum)
5413{
5414	tcp_t	*ltcp = NULL;
5415	conn_t		*lconnp;
5416	tcp_stack_t	*tcps = tcp->tcp_tcps;
5417	conn_t		*connp = tcp->tcp_connp;
5418
5419	/*
5420	 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
5421	 * when the stream is in BOUND state. Do not send a reset,
5422	 * since the destination IP address is not valid, and it can
5423	 * be the initialized value of all zeros (broadcast address).
5424	 */
5425	if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_hard_binding) {
5426		if (connp->conn_debug) {
5427			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
5428			    "tcp_disconnect: bad state, %d", tcp->tcp_state);
5429		}
5430		return (TOUTSTATE);
5431	}
5432
5433
5434	if (seqnum == -1 || tcp->tcp_conn_req_max == 0) {
5435
5436		/*
5437		 * According to TPI, for non-listeners, ignore seqnum
5438		 * and disconnect.
5439		 * Following interpretation of -1 seqnum is historical
5440		 * and implied TPI ? (TPI only states that for T_CONN_IND,
5441		 * a valid seqnum should not be -1).
5442		 *
5443		 *	-1 means disconnect everything
5444		 *	regardless even on a listener.
5445		 */
5446
5447		int old_state = tcp->tcp_state;
5448		ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
5449
5450		/*
5451		 * The connection can't be on the tcp_time_wait_head list
5452		 * since it is not detached.
5453		 */
5454		ASSERT(tcp->tcp_time_wait_next == NULL);
5455		ASSERT(tcp->tcp_time_wait_prev == NULL);
5456		ASSERT(tcp->tcp_time_wait_expire == 0);
5457		ltcp = NULL;
5458		/*
5459		 * If it used to be a listener, check to make sure no one else
5460		 * has taken the port before switching back to LISTEN state.
5461		 */
5462		if (connp->conn_ipversion == IPV4_VERSION) {
5463			lconnp = ipcl_lookup_listener_v4(connp->conn_lport,
5464			    connp->conn_laddr_v4, IPCL_ZONEID(connp), ipst);
5465			if (lconnp != NULL)
5466				ltcp = lconnp->conn_tcp;
5467		} else {
5468			uint_t ifindex = 0;
5469
5470			if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)
5471				ifindex = connp->conn_ixa->ixa_scopeid;
5472
5473			/* Allow conn_bound_if listeners? */
5474			lconnp = ipcl_lookup_listener_v6(connp->conn_lport,
5475			    &connp->conn_laddr_v6, ifindex, IPCL_ZONEID(connp),
5476			    ipst);
5477			if (lconnp != NULL)
5478				ltcp = lconnp->conn_tcp;
5479		}
5480		if (tcp->tcp_conn_req_max && ltcp == NULL) {
5481			tcp->tcp_state = TCPS_LISTEN;
5482		} else if (old_state > TCPS_BOUND) {
5483			tcp->tcp_conn_req_max = 0;
5484			tcp->tcp_state = TCPS_BOUND;
5485		}
5486		if (ltcp != NULL)
5487			CONN_DEC_REF(lconnp);
5488		if (old_state == TCPS_SYN_SENT || old_state == TCPS_SYN_RCVD) {
5489			BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails);
5490		} else if (old_state == TCPS_ESTABLISHED ||
5491		    old_state == TCPS_CLOSE_WAIT) {
5492			BUMP_MIB(&tcps->tcps_mib, tcpEstabResets);
5493		}
5494
5495		if (tcp->tcp_fused)
5496			tcp_unfuse(tcp);
5497
5498		mutex_enter(&tcp->tcp_eager_lock);
5499		if ((tcp->tcp_conn_req_cnt_q0 != 0) ||
5500		    (tcp->tcp_conn_req_cnt_q != 0)) {
5501			tcp_eager_cleanup(tcp, 0);
5502		}
5503		mutex_exit(&tcp->tcp_eager_lock);
5504
5505		tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt,
5506		    tcp->tcp_rnxt, TH_RST | TH_ACK);
5507
5508		tcp_reinit(tcp);
5509
5510		return (0);
5511	} else if (!tcp_eager_blowoff(tcp, seqnum)) {
5512		return (TBADSEQ);
5513	}
5514	return (0);
5515}
5516
5517/*
5518 * Our client hereby directs us to reject the connection request
5519 * that tcp_input_listener() marked with 'seqnum'.  Rejection consists
5520 * of sending the appropriate RST, not an ICMP error.
5521 */
5522static void
5523tcp_disconnect(tcp_t *tcp, mblk_t *mp)
5524{
5525	t_scalar_t seqnum;
5526	int	error;
5527	conn_t	*connp = tcp->tcp_connp;
5528
5529	ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
5530	if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) {
5531		tcp_err_ack(tcp, mp, TPROTO, 0);
5532		return;
5533	}
5534	seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number;
5535	error = tcp_disconnect_common(tcp, seqnum);
5536	if (error != 0)
5537		tcp_err_ack(tcp, mp, error, 0);
5538	else {
5539		if (tcp->tcp_state >= TCPS_ESTABLISHED) {
5540			/* Send M_FLUSH according to TPI */
5541			(void) putnextctl1(connp->conn_rq, M_FLUSH, FLUSHRW);
5542		}
5543		mp = mi_tpi_ok_ack_alloc(mp);
5544		if (mp != NULL)
5545			putnext(connp->conn_rq, mp);
5546	}
5547}
5548
5549/*
5550 * Diagnostic routine used to return a string associated with the tcp state.
5551 * Note that if the caller does not supply a buffer, it will use an internal
5552 * static string.  This means that if multiple threads call this function at
5553 * the same time, output can be corrupted...  Note also that this function
5554 * does not check the size of the supplied buffer.  The caller has to make
5555 * sure that it is big enough.
5556 */
5557static char *
5558tcp_display(tcp_t *tcp, char *sup_buf, char format)
5559{
5560	char		buf1[30];
5561	static char	priv_buf[INET6_ADDRSTRLEN * 2 + 80];
5562	char		*buf;
5563	char		*cp;
5564	in6_addr_t	local, remote;
5565	char		local_addrbuf[INET6_ADDRSTRLEN];
5566	char		remote_addrbuf[INET6_ADDRSTRLEN];
5567	conn_t		*connp;
5568
5569	if (sup_buf != NULL)
5570		buf = sup_buf;
5571	else
5572		buf = priv_buf;
5573
5574	if (tcp == NULL)
5575		return ("NULL_TCP");
5576
5577	connp = tcp->tcp_connp;
5578	switch (tcp->tcp_state) {
5579	case TCPS_CLOSED:
5580		cp = "TCP_CLOSED";
5581		break;
5582	case TCPS_IDLE:
5583		cp = "TCP_IDLE";
5584		break;
5585	case TCPS_BOUND:
5586		cp = "TCP_BOUND";
5587		break;
5588	case TCPS_LISTEN:
5589		cp = "TCP_LISTEN";
5590		break;
5591	case TCPS_SYN_SENT:
5592		cp = "TCP_SYN_SENT";
5593		break;
5594	case TCPS_SYN_RCVD:
5595		cp = "TCP_SYN_RCVD";
5596		break;
5597	case TCPS_ESTABLISHED:
5598		cp = "TCP_ESTABLISHED";
5599		break;
5600	case TCPS_CLOSE_WAIT:
5601		cp = "TCP_CLOSE_WAIT";
5602		break;
5603	case TCPS_FIN_WAIT_1:
5604		cp = "TCP_FIN_WAIT_1";
5605		break;
5606	case TCPS_CLOSING:
5607		cp = "TCP_CLOSING";
5608		break;
5609	case TCPS_LAST_ACK:
5610		cp = "TCP_LAST_ACK";
5611		break;
5612	case TCPS_FIN_WAIT_2:
5613		cp = "TCP_FIN_WAIT_2";
5614		break;
5615	case TCPS_TIME_WAIT:
5616		cp = "TCP_TIME_WAIT";
5617		break;
5618	default:
5619		(void) mi_sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state);
5620		cp = buf1;
5621		break;
5622	}
5623	switch (format) {
5624	case DISP_ADDR_AND_PORT:
5625		if (connp->conn_ipversion == IPV4_VERSION) {
5626			/*
5627			 * Note that we use the remote address in the tcp_b
5628			 * structure.  This means that it will print out
5629			 * the real destination address, not the next hop's
5630			 * address if source routing is used.
5631			 */
5632			IN6_IPADDR_TO_V4MAPPED(connp->conn_laddr_v4, &local);
5633			IN6_IPADDR_TO_V4MAPPED(connp->conn_faddr_v4, &remote);
5634
5635		} else {
5636			local = connp->conn_laddr_v6;
5637			remote = connp->conn_faddr_v6;
5638		}
5639		(void) inet_ntop(AF_INET6, &local, local_addrbuf,
5640		    sizeof (local_addrbuf));
5641		(void) inet_ntop(AF_INET6, &remote, remote_addrbuf,
5642		    sizeof (remote_addrbuf));
5643		(void) mi_sprintf(buf, "[%s.%u, %s.%u] %s",
5644		    local_addrbuf, ntohs(connp->conn_lport), remote_addrbuf,
5645		    ntohs(connp->conn_fport), cp);
5646		break;
5647	case DISP_PORT_ONLY:
5648	default:
5649		(void) mi_sprintf(buf, "[%u, %u] %s",
5650		    ntohs(connp->conn_lport), ntohs(connp->conn_fport), cp);
5651		break;
5652	}
5653
5654	return (buf);
5655}
5656
5657/*
5658 * Called via squeue to get on to eager's perimeter. It sends a
5659 * TH_RST if eager is in the fanout table. The listener wants the
5660 * eager to disappear either by means of tcp_eager_blowoff() or
5661 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be
5662 * called (via squeue) if the eager cannot be inserted in the
5663 * fanout table in tcp_input_listener().
5664 */
5665/* ARGSUSED */
5666void
5667tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
5668{
5669	conn_t	*econnp = (conn_t *)arg;
5670	tcp_t	*eager = econnp->conn_tcp;
5671	tcp_t	*listener = eager->tcp_listener;
5672
5673	/*
5674	 * We could be called because listener is closing. Since
5675	 * the eager was using listener's queue's, we avoid
5676	 * using the listeners queues from now on.
5677	 */
5678	ASSERT(eager->tcp_detached);
5679	econnp->conn_rq = NULL;
5680	econnp->conn_wq = NULL;
5681
5682	/*
5683	 * An eager's conn_fanout will be NULL if it's a duplicate
5684	 * for an existing 4-tuples in the conn fanout table.
5685	 * We don't want to send an RST out in such case.
5686	 */
5687	if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) {
5688		tcp_xmit_ctl("tcp_eager_kill, can't wait",
5689		    eager, eager->tcp_snxt, 0, TH_RST);
5690	}
5691
5692	/* We are here because listener wants this eager gone */
5693	if (listener != NULL) {
5694		mutex_enter(&listener->tcp_eager_lock);
5695		tcp_eager_unlink(eager);
5696		if (eager->tcp_tconnind_started) {
5697			/*
5698			 * The eager has sent a conn_ind up to the
5699			 * listener but listener decides to close
5700			 * instead. We need to drop the extra ref
5701			 * placed on eager in tcp_input_data() before
5702			 * sending the conn_ind to listener.
5703			 */
5704			CONN_DEC_REF(econnp);
5705		}
5706		mutex_exit(&listener->tcp_eager_lock);
5707		CONN_DEC_REF(listener->tcp_connp);
5708	}
5709
5710	if (eager->tcp_state != TCPS_CLOSED)
5711		tcp_close_detached(eager);
5712}
5713
5714/*
5715 * Reset any eager connection hanging off this listener marked
5716 * with 'seqnum' and then reclaim it's resources.
5717 */
5718static boolean_t
5719tcp_eager_blowoff(tcp_t	*listener, t_scalar_t seqnum)
5720{
5721	tcp_t	*eager;
5722	mblk_t 	*mp;
5723	tcp_stack_t	*tcps = listener->tcp_tcps;
5724
5725	TCP_STAT(tcps, tcp_eager_blowoff_calls);
5726	eager = listener;
5727	mutex_enter(&listener->tcp_eager_lock);
5728	do {
5729		eager = eager->tcp_eager_next_q;
5730		if (eager == NULL) {
5731			mutex_exit(&listener->tcp_eager_lock);
5732			return (B_FALSE);
5733		}
5734	} while (eager->tcp_conn_req_seqnum != seqnum);
5735
5736	if (eager->tcp_closemp_used) {
5737		mutex_exit(&listener->tcp_eager_lock);
5738		return (B_TRUE);
5739	}
5740	eager->tcp_closemp_used = B_TRUE;
5741	TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
5742	CONN_INC_REF(eager->tcp_connp);
5743	mutex_exit(&listener->tcp_eager_lock);
5744	mp = &eager->tcp_closemp;
5745	SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill,
5746	    eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF);
5747	return (B_TRUE);
5748}
5749
5750/*
5751 * Reset any eager connection hanging off this listener
5752 * and then reclaim it's resources.
5753 */
5754static void
5755tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only)
5756{
5757	tcp_t	*eager;
5758	mblk_t	*mp;
5759	tcp_stack_t	*tcps = listener->tcp_tcps;
5760
5761	ASSERT(MUTEX_HELD(&listener->tcp_eager_lock));
5762
5763	if (!q0_only) {
5764		/* First cleanup q */
5765		TCP_STAT(tcps, tcp_eager_blowoff_q);
5766		eager = listener->tcp_eager_next_q;
5767		while (eager != NULL) {
5768			if (!eager->tcp_closemp_used) {
5769				eager->tcp_closemp_used = B_TRUE;
5770				TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
5771				CONN_INC_REF(eager->tcp_connp);
5772				mp = &eager->tcp_closemp;
5773				SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp,
5774				    tcp_eager_kill, eager->tcp_connp, NULL,
5775				    SQ_FILL, SQTAG_TCP_EAGER_CLEANUP);
5776			}
5777			eager = eager->tcp_eager_next_q;
5778		}
5779	}
5780	/* Then cleanup q0 */
5781	TCP_STAT(tcps, tcp_eager_blowoff_q0);
5782	eager = listener->tcp_eager_next_q0;
5783	while (eager != listener) {
5784		if (!eager->tcp_closemp_used) {
5785			eager->tcp_closemp_used = B_TRUE;
5786			TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
5787			CONN_INC_REF(eager->tcp_connp);
5788			mp = &eager->tcp_closemp;
5789			SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp,
5790			    tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL,
5791			    SQTAG_TCP_EAGER_CLEANUP_Q0);
5792		}
5793		eager = eager->tcp_eager_next_q0;
5794	}
5795}
5796
5797/*
5798 * If we are an eager connection hanging off a listener that hasn't
5799 * formally accepted the connection yet, get off his list and blow off
5800 * any data that we have accumulated.
5801 */
5802static void
5803tcp_eager_unlink(tcp_t *tcp)
5804{
5805	tcp_t	*listener = tcp->tcp_listener;
5806
5807	ASSERT(MUTEX_HELD(&listener->tcp_eager_lock));
5808	ASSERT(listener != NULL);
5809	if (tcp->tcp_eager_next_q0 != NULL) {
5810		ASSERT(tcp->tcp_eager_prev_q0 != NULL);
5811
5812		/* Remove the eager tcp from q0 */
5813		tcp->tcp_eager_next_q0->tcp_eager_prev_q0 =
5814		    tcp->tcp_eager_prev_q0;
5815		tcp->tcp_eager_prev_q0->tcp_eager_next_q0 =
5816		    tcp->tcp_eager_next_q0;
5817		ASSERT(listener->tcp_conn_req_cnt_q0 > 0);
5818		listener->tcp_conn_req_cnt_q0--;
5819
5820		tcp->tcp_eager_next_q0 = NULL;
5821		tcp->tcp_eager_prev_q0 = NULL;
5822
5823		/*
5824		 * Take the eager out, if it is in the list of droppable
5825		 * eagers.
5826		 */
5827		MAKE_UNDROPPABLE(tcp);
5828
5829		if (tcp->tcp_syn_rcvd_timeout != 0) {
5830			/* we have timed out before */
5831			ASSERT(listener->tcp_syn_rcvd_timeout > 0);
5832			listener->tcp_syn_rcvd_timeout--;
5833		}
5834	} else {
5835		tcp_t   **tcpp = &listener->tcp_eager_next_q;
5836		tcp_t	*prev = NULL;
5837
5838		for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) {
5839			if (tcpp[0] == tcp) {
5840				if (listener->tcp_eager_last_q == tcp) {
5841					/*
5842					 * If we are unlinking the last
5843					 * element on the list, adjust
5844					 * tail pointer. Set tail pointer
5845					 * to nil when list is empty.
5846					 */
5847					ASSERT(tcp->tcp_eager_next_q == NULL);
5848					if (listener->tcp_eager_last_q ==
5849					    listener->tcp_eager_next_q) {
5850						listener->tcp_eager_last_q =
5851						    NULL;
5852					} else {
5853						/*
5854						 * We won't get here if there
5855						 * is only one eager in the
5856						 * list.
5857						 */
5858						ASSERT(prev != NULL);
5859						listener->tcp_eager_last_q =
5860						    prev;
5861					}
5862				}
5863				tcpp[0] = tcp->tcp_eager_next_q;
5864				tcp->tcp_eager_next_q = NULL;
5865				tcp->tcp_eager_last_q = NULL;
5866				ASSERT(listener->tcp_conn_req_cnt_q > 0);
5867				listener->tcp_conn_req_cnt_q--;
5868				break;
5869			}
5870			prev = tcpp[0];
5871		}
5872	}
5873	tcp->tcp_listener = NULL;
5874}
5875
5876/* Shorthand to generate and send TPI error acks to our client */
5877static void
5878tcp_err_ack(tcp_t *tcp, mblk_t *mp, int t_error, int sys_error)
5879{
5880	if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
5881		putnext(tcp->tcp_connp->conn_rq, mp);
5882}
5883
5884/* Shorthand to generate and send TPI error acks to our client */
5885static void
5886tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive,
5887    int t_error, int sys_error)
5888{
5889	struct T_error_ack	*teackp;
5890
5891	if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack),
5892	    M_PCPROTO, T_ERROR_ACK)) != NULL) {
5893		teackp = (struct T_error_ack *)mp->b_rptr;
5894		teackp->ERROR_prim = primitive;
5895		teackp->TLI_error = t_error;
5896		teackp->UNIX_error = sys_error;
5897		putnext(tcp->tcp_connp->conn_rq, mp);
5898	}
5899}
5900
5901/*
5902 * Note: No locks are held when inspecting tcp_g_*epriv_ports
5903 * but instead the code relies on:
5904 * - the fact that the address of the array and its size never changes
5905 * - the atomic assignment of the elements of the array
5906 */
5907/* ARGSUSED */
5908static int
5909tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
5910{
5911	int i;
5912	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
5913
5914	for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
5915		if (tcps->tcps_g_epriv_ports[i] != 0)
5916			(void) mi_mpprintf(mp, "%d ",
5917			    tcps->tcps_g_epriv_ports[i]);
5918	}
5919	return (0);
5920}
5921
5922/*
5923 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple
5924 * threads from changing it at the same time.
5925 */
5926/* ARGSUSED */
5927static int
5928tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
5929    cred_t *cr)
5930{
5931	long	new_value;
5932	int	i;
5933	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
5934
5935	/*
5936	 * Fail the request if the new value does not lie within the
5937	 * port number limits.
5938	 */
5939	if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
5940	    new_value <= 0 || new_value >= 65536) {
5941		return (EINVAL);
5942	}
5943
5944	mutex_enter(&tcps->tcps_epriv_port_lock);
5945	/* Check if the value is already in the list */
5946	for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
5947		if (new_value == tcps->tcps_g_epriv_ports[i]) {
5948			mutex_exit(&tcps->tcps_epriv_port_lock);
5949			return (EEXIST);
5950		}
5951	}
5952	/* Find an empty slot */
5953	for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
5954		if (tcps->tcps_g_epriv_ports[i] == 0)
5955			break;
5956	}
5957	if (i == tcps->tcps_g_num_epriv_ports) {
5958		mutex_exit(&tcps->tcps_epriv_port_lock);
5959		return (EOVERFLOW);
5960	}
5961	/* Set the new value */
5962	tcps->tcps_g_epriv_ports[i] = (uint16_t)new_value;
5963	mutex_exit(&tcps->tcps_epriv_port_lock);
5964	return (0);
5965}
5966
5967/*
5968 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple
5969 * threads from changing it at the same time.
5970 */
5971/* ARGSUSED */
5972static int
5973tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
5974    cred_t *cr)
5975{
5976	long	new_value;
5977	int	i;
5978	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
5979
5980	/*
5981	 * Fail the request if the new value does not lie within the
5982	 * port number limits.
5983	 */
5984	if (ddi_strtol(value, NULL, 10, &new_value) != 0 || new_value <= 0 ||
5985	    new_value >= 65536) {
5986		return (EINVAL);
5987	}
5988
5989	mutex_enter(&tcps->tcps_epriv_port_lock);
5990	/* Check that the value is already in the list */
5991	for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
5992		if (tcps->tcps_g_epriv_ports[i] == new_value)
5993			break;
5994	}
5995	if (i == tcps->tcps_g_num_epriv_ports) {
5996		mutex_exit(&tcps->tcps_epriv_port_lock);
5997		return (ESRCH);
5998	}
5999	/* Clear the value */
6000	tcps->tcps_g_epriv_ports[i] = 0;
6001	mutex_exit(&tcps->tcps_epriv_port_lock);
6002	return (0);
6003}
6004
6005/* Return the TPI/TLI equivalent of our current tcp_state */
6006static int
6007tcp_tpistate(tcp_t *tcp)
6008{
6009	switch (tcp->tcp_state) {
6010	case TCPS_IDLE:
6011		return (TS_UNBND);
6012	case TCPS_LISTEN:
6013		/*
6014		 * Return whether there are outstanding T_CONN_IND waiting
6015		 * for the matching T_CONN_RES. Therefore don't count q0.
6016		 */
6017		if (tcp->tcp_conn_req_cnt_q > 0)
6018			return (TS_WRES_CIND);
6019		else
6020			return (TS_IDLE);
6021	case TCPS_BOUND:
6022		return (TS_IDLE);
6023	case TCPS_SYN_SENT:
6024		return (TS_WCON_CREQ);
6025	case TCPS_SYN_RCVD:
6026		/*
6027		 * Note: assumption: this has to the active open SYN_RCVD.
6028		 * The passive instance is detached in SYN_RCVD stage of
6029		 * incoming connection processing so we cannot get request
6030		 * for T_info_ack on it.
6031		 */
6032		return (TS_WACK_CRES);
6033	case TCPS_ESTABLISHED:
6034		return (TS_DATA_XFER);
6035	case TCPS_CLOSE_WAIT:
6036		return (TS_WREQ_ORDREL);
6037	case TCPS_FIN_WAIT_1:
6038		return (TS_WIND_ORDREL);
6039	case TCPS_FIN_WAIT_2:
6040		return (TS_WIND_ORDREL);
6041
6042	case TCPS_CLOSING:
6043	case TCPS_LAST_ACK:
6044	case TCPS_TIME_WAIT:
6045	case TCPS_CLOSED:
6046		/*
6047		 * Following TS_WACK_DREQ7 is a rendition of "not
6048		 * yet TS_IDLE" TPI state. There is no best match to any
6049		 * TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we
6050		 * choose a value chosen that will map to TLI/XTI level
6051		 * state of TSTATECHNG (state is process of changing) which
6052		 * captures what this dummy state represents.
6053		 */
6054		return (TS_WACK_DREQ7);
6055	default:
6056		cmn_err(CE_WARN, "tcp_tpistate: strange state (%d) %s",
6057		    tcp->tcp_state, tcp_display(tcp, NULL,
6058		    DISP_PORT_ONLY));
6059		return (TS_UNBND);
6060	}
6061}
6062
6063static void
6064tcp_copy_info(struct T_info_ack *tia, tcp_t *tcp)
6065{
6066	tcp_stack_t	*tcps = tcp->tcp_tcps;
6067	conn_t		*connp = tcp->tcp_connp;
6068
6069	if (connp->conn_family == AF_INET6)
6070		*tia = tcp_g_t_info_ack_v6;
6071	else
6072		*tia = tcp_g_t_info_ack;
6073	tia->CURRENT_state = tcp_tpistate(tcp);
6074	tia->OPT_size = tcp_max_optsize;
6075	if (tcp->tcp_mss == 0) {
6076		/* Not yet set - tcp_open does not set mss */
6077		if (connp->conn_ipversion == IPV4_VERSION)
6078			tia->TIDU_size = tcps->tcps_mss_def_ipv4;
6079		else
6080			tia->TIDU_size = tcps->tcps_mss_def_ipv6;
6081	} else {
6082		tia->TIDU_size = tcp->tcp_mss;
6083	}
6084	/* TODO: Default ETSDU is 1.  Is that correct for tcp? */
6085}
6086
6087static void
6088tcp_do_capability_ack(tcp_t *tcp, struct T_capability_ack *tcap,
6089    t_uscalar_t cap_bits1)
6090{
6091	tcap->CAP_bits1 = 0;
6092
6093	if (cap_bits1 & TC1_INFO) {
6094		tcp_copy_info(&tcap->INFO_ack, tcp);
6095		tcap->CAP_bits1 |= TC1_INFO;
6096	}
6097
6098	if (cap_bits1 & TC1_ACCEPTOR_ID) {
6099		tcap->ACCEPTOR_id = tcp->tcp_acceptor_id;
6100		tcap->CAP_bits1 |= TC1_ACCEPTOR_ID;
6101	}
6102
6103}
6104
6105/*
6106 * This routine responds to T_CAPABILITY_REQ messages.  It is called by
6107 * tcp_wput.  Much of the T_CAPABILITY_ACK information is copied from
6108 * tcp_g_t_info_ack.  The current state of the stream is copied from
6109 * tcp_state.
6110 */
6111static void
6112tcp_capability_req(tcp_t *tcp, mblk_t *mp)
6113{
6114	t_uscalar_t		cap_bits1;
6115	struct T_capability_ack	*tcap;
6116
6117	if (MBLKL(mp) < sizeof (struct T_capability_req)) {
6118		freemsg(mp);
6119		return;
6120	}
6121
6122	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
6123
6124	mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
6125	    mp->b_datap->db_type, T_CAPABILITY_ACK);
6126	if (mp == NULL)
6127		return;
6128
6129	tcap = (struct T_capability_ack *)mp->b_rptr;
6130	tcp_do_capability_ack(tcp, tcap, cap_bits1);
6131
6132	putnext(tcp->tcp_connp->conn_rq, mp);
6133}
6134
6135/*
6136 * This routine responds to T_INFO_REQ messages.  It is called by tcp_wput.
6137 * Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack.
6138 * The current state of the stream is copied from tcp_state.
6139 */
6140static void
6141tcp_info_req(tcp_t *tcp, mblk_t *mp)
6142{
6143	mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
6144	    T_INFO_ACK);
6145	if (!mp) {
6146		tcp_err_ack(tcp, mp, TSYSERR, ENOMEM);
6147		return;
6148	}
6149	tcp_copy_info((struct T_info_ack *)mp->b_rptr, tcp);
6150	putnext(tcp->tcp_connp->conn_rq, mp);
6151}
6152
6153/* Respond to the TPI addr request */
6154static void
6155tcp_addr_req(tcp_t *tcp, mblk_t *mp)
6156{
6157	struct sockaddr *sa;
6158	mblk_t	*ackmp;
6159	struct T_addr_ack *taa;
6160	conn_t	*connp = tcp->tcp_connp;
6161	uint_t	addrlen;
6162
6163	/* Make it large enough for worst case */
6164	ackmp = reallocb(mp, sizeof (struct T_addr_ack) +
6165	    2 * sizeof (sin6_t), 1);
6166	if (ackmp == NULL) {
6167		tcp_err_ack(tcp, mp, TSYSERR, ENOMEM);
6168		return;
6169	}
6170
6171	taa = (struct T_addr_ack *)ackmp->b_rptr;
6172
6173	bzero(taa, sizeof (struct T_addr_ack));
6174	ackmp->b_wptr = (uchar_t *)&taa[1];
6175
6176	taa->PRIM_type = T_ADDR_ACK;
6177	ackmp->b_datap->db_type = M_PCPROTO;
6178
6179	if (connp->conn_family == AF_INET)
6180		addrlen = sizeof (sin_t);
6181	else
6182		addrlen = sizeof (sin6_t);
6183
6184	/*
6185	 * Note: Following code assumes 32 bit alignment of basic
6186	 * data structures like sin_t and struct T_addr_ack.
6187	 */
6188	if (tcp->tcp_state >= TCPS_BOUND) {
6189		/*
6190		 * Fill in local address first
6191		 */
6192		taa->LOCADDR_offset = sizeof (*taa);
6193		taa->LOCADDR_length = addrlen;
6194		sa = (struct sockaddr *)&taa[1];
6195		(void) conn_getsockname(connp, sa, &addrlen);
6196		ackmp->b_wptr += addrlen;
6197	}
6198	if (tcp->tcp_state >= TCPS_SYN_RCVD) {
6199		/*
6200		 * Fill in Remote address
6201		 */
6202		taa->REMADDR_length = addrlen;
6203		/* assumed 32-bit alignment */
6204		taa->REMADDR_offset = taa->LOCADDR_offset + taa->LOCADDR_length;
6205		sa = (struct sockaddr *)(ackmp->b_rptr + taa->REMADDR_offset);
6206		(void) conn_getpeername(connp, sa, &addrlen);
6207		ackmp->b_wptr += addrlen;
6208	}
6209	ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
6210	putnext(tcp->tcp_connp->conn_rq, ackmp);
6211}
6212
6213/*
6214 * Handle reinitialization of a tcp structure.
6215 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
6216 */
6217static void
6218tcp_reinit(tcp_t *tcp)
6219{
6220	mblk_t		*mp;
6221	tcp_stack_t	*tcps = tcp->tcp_tcps;
6222	conn_t		*connp  = tcp->tcp_connp;
6223
6224	TCP_STAT(tcps, tcp_reinit_calls);
6225
6226	/* tcp_reinit should never be called for detached tcp_t's */
6227	ASSERT(tcp->tcp_listener == NULL);
6228	ASSERT((connp->conn_family == AF_INET &&
6229	    connp->conn_ipversion == IPV4_VERSION) ||
6230	    (connp->conn_family == AF_INET6 &&
6231	    (connp->conn_ipversion == IPV4_VERSION ||
6232	    connp->conn_ipversion == IPV6_VERSION)));
6233
6234	/* Cancel outstanding timers */
6235	tcp_timers_stop(tcp);
6236
6237	/*
6238	 * Reset everything in the state vector, after updating global
6239	 * MIB data from instance counters.
6240	 */
6241	UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs);
6242	tcp->tcp_ibsegs = 0;
6243	UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs);
6244	tcp->tcp_obsegs = 0;
6245
6246	tcp_close_mpp(&tcp->tcp_xmit_head);
6247	if (tcp->tcp_snd_zcopy_aware)
6248		tcp_zcopy_notify(tcp);
6249	tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL;
6250	tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0;
6251	mutex_enter(&tcp->tcp_non_sq_lock);
6252	if (tcp->tcp_flow_stopped &&
6253	    TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
6254		tcp_clrqfull(tcp);
6255	}
6256	mutex_exit(&tcp->tcp_non_sq_lock);
6257	tcp_close_mpp(&tcp->tcp_reass_head);
6258	tcp->tcp_reass_tail = NULL;
6259	if (tcp->tcp_rcv_list != NULL) {
6260		/* Free b_next chain */
6261		tcp_close_mpp(&tcp->tcp_rcv_list);
6262		tcp->tcp_rcv_last_head = NULL;
6263		tcp->tcp_rcv_last_tail = NULL;
6264		tcp->tcp_rcv_cnt = 0;
6265	}
6266	tcp->tcp_rcv_last_tail = NULL;
6267
6268	if ((mp = tcp->tcp_urp_mp) != NULL) {
6269		freemsg(mp);
6270		tcp->tcp_urp_mp = NULL;
6271	}
6272	if ((mp = tcp->tcp_urp_mark_mp) != NULL) {
6273		freemsg(mp);
6274		tcp->tcp_urp_mark_mp = NULL;
6275	}
6276	if (tcp->tcp_fused_sigurg_mp != NULL) {
6277		ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
6278		freeb(tcp->tcp_fused_sigurg_mp);
6279		tcp->tcp_fused_sigurg_mp = NULL;
6280	}
6281	if (tcp->tcp_ordrel_mp != NULL) {
6282		ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
6283		freeb(tcp->tcp_ordrel_mp);
6284		tcp->tcp_ordrel_mp = NULL;
6285	}
6286
6287	/*
6288	 * Following is a union with two members which are
6289	 * identical types and size so the following cleanup
6290	 * is enough.
6291	 */
6292	tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind);
6293
6294	CL_INET_DISCONNECT(connp);
6295
6296	/*
6297	 * The connection can't be on the tcp_time_wait_head list
6298	 * since it is not detached.
6299	 */
6300	ASSERT(tcp->tcp_time_wait_next == NULL);
6301	ASSERT(tcp->tcp_time_wait_prev == NULL);
6302	ASSERT(tcp->tcp_time_wait_expire == 0);
6303
6304	if (tcp->tcp_kssl_pending) {
6305		tcp->tcp_kssl_pending = B_FALSE;
6306
6307		/* Don't reset if the initialized by bind. */
6308		if (tcp->tcp_kssl_ent != NULL) {
6309			kssl_release_ent(tcp->tcp_kssl_ent, NULL,
6310			    KSSL_NO_PROXY);
6311		}
6312	}
6313	if (tcp->tcp_kssl_ctx != NULL) {
6314		kssl_release_ctx(tcp->tcp_kssl_ctx);
6315		tcp->tcp_kssl_ctx = NULL;
6316	}
6317
6318	/*
6319	 * Reset/preserve other values
6320	 */
6321	tcp_reinit_values(tcp);
6322	ipcl_hash_remove(connp);
6323	ixa_cleanup(connp->conn_ixa);
6324	tcp_ipsec_cleanup(tcp);
6325
6326	connp->conn_laddr_v6 = connp->conn_bound_addr_v6;
6327	connp->conn_saddr_v6 = connp->conn_bound_addr_v6;
6328
6329	if (tcp->tcp_conn_req_max != 0) {
6330		/*
6331		 * This is the case when a TLI program uses the same
6332		 * transport end point to accept a connection.  This
6333		 * makes the TCP both a listener and acceptor.  When
6334		 * this connection is closed, we need to set the state
6335		 * back to TCPS_LISTEN.  Make sure that the eager list
6336		 * is reinitialized.
6337		 *
6338		 * Note that this stream is still bound to the four
6339		 * tuples of the previous connection in IP.  If a new
6340		 * SYN with different foreign address comes in, IP will
6341		 * not find it and will send it to the global queue.  In
6342		 * the global queue, TCP will do a tcp_lookup_listener()
6343		 * to find this stream.  This works because this stream
6344		 * is only removed from connected hash.
6345		 *
6346		 */
6347		tcp->tcp_state = TCPS_LISTEN;
6348		tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
6349		tcp->tcp_eager_next_drop_q0 = tcp;
6350		tcp->tcp_eager_prev_drop_q0 = tcp;
6351		/*
6352		 * Initially set conn_recv to tcp_input_listener_unbound to try
6353		 * to pick a good squeue for the listener when the first SYN
6354		 * arrives. tcp_input_listener_unbound sets it to
6355		 * tcp_input_listener on that first SYN.
6356		 */
6357		connp->conn_recv = tcp_input_listener_unbound;
6358
6359		connp->conn_proto = IPPROTO_TCP;
6360		connp->conn_faddr_v6 = ipv6_all_zeros;
6361		connp->conn_fport = 0;
6362
6363		(void) ipcl_bind_insert(connp);
6364	} else {
6365		tcp->tcp_state = TCPS_BOUND;
6366	}
6367
6368	/*
6369	 * Initialize to default values
6370	 */
6371	tcp_init_values(tcp);
6372
6373	ASSERT(tcp->tcp_ptpbhn != NULL);
6374	tcp->tcp_rwnd = connp->conn_rcvbuf;
6375	tcp->tcp_mss = connp->conn_ipversion != IPV4_VERSION ?
6376	    tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4;
6377}
6378
6379/*
6380 * Force values to zero that need be zero.
6381 * Do not touch values asociated with the BOUND or LISTEN state
6382 * since the connection will end up in that state after the reinit.
6383 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
6384 * structure!
6385 */
6386static void
6387tcp_reinit_values(tcp)
6388	tcp_t *tcp;
6389{
6390	tcp_stack_t	*tcps = tcp->tcp_tcps;
6391	conn_t		*connp = tcp->tcp_connp;
6392
6393#ifndef	lint
6394#define	DONTCARE(x)
6395#define	PRESERVE(x)
6396#else
6397#define	DONTCARE(x)	((x) = (x))
6398#define	PRESERVE(x)	((x) = (x))
6399#endif	/* lint */
6400
6401	PRESERVE(tcp->tcp_bind_hash_port);
6402	PRESERVE(tcp->tcp_bind_hash);
6403	PRESERVE(tcp->tcp_ptpbhn);
6404	PRESERVE(tcp->tcp_acceptor_hash);
6405	PRESERVE(tcp->tcp_ptpahn);
6406
6407	/* Should be ASSERT NULL on these with new code! */
6408	ASSERT(tcp->tcp_time_wait_next == NULL);
6409	ASSERT(tcp->tcp_time_wait_prev == NULL);
6410	ASSERT(tcp->tcp_time_wait_expire == 0);
6411	PRESERVE(tcp->tcp_state);
6412	PRESERVE(connp->conn_rq);
6413	PRESERVE(connp->conn_wq);
6414
6415	ASSERT(tcp->tcp_xmit_head == NULL);
6416	ASSERT(tcp->tcp_xmit_last == NULL);
6417	ASSERT(tcp->tcp_unsent == 0);
6418	ASSERT(tcp->tcp_xmit_tail == NULL);
6419	ASSERT(tcp->tcp_xmit_tail_unsent == 0);
6420
6421	tcp->tcp_snxt = 0;			/* Displayed in mib */
6422	tcp->tcp_suna = 0;			/* Displayed in mib */
6423	tcp->tcp_swnd = 0;
6424	DONTCARE(tcp->tcp_cwnd);	/* Init in tcp_process_options */
6425
6426	ASSERT(tcp->tcp_ibsegs == 0);
6427	ASSERT(tcp->tcp_obsegs == 0);
6428
6429	if (connp->conn_ht_iphc != NULL) {
6430		kmem_free(connp->conn_ht_iphc, connp->conn_ht_iphc_allocated);
6431		connp->conn_ht_iphc = NULL;
6432		connp->conn_ht_iphc_allocated = 0;
6433		connp->conn_ht_iphc_len = 0;
6434		connp->conn_ht_ulp = NULL;
6435		connp->conn_ht_ulp_len = 0;
6436		tcp->tcp_ipha = NULL;
6437		tcp->tcp_ip6h = NULL;
6438		tcp->tcp_tcpha = NULL;
6439	}
6440
6441	/* We clear any IP_OPTIONS and extension headers */
6442	ip_pkt_free(&connp->conn_xmit_ipp);
6443
6444	DONTCARE(tcp->tcp_naglim);		/* Init in tcp_init_values */
6445	DONTCARE(tcp->tcp_ipha);
6446	DONTCARE(tcp->tcp_ip6h);
6447	DONTCARE(tcp->tcp_tcpha);
6448	tcp->tcp_valid_bits = 0;
6449
6450	DONTCARE(tcp->tcp_timer_backoff);	/* Init in tcp_init_values */
6451	DONTCARE(tcp->tcp_last_recv_time);	/* Init in tcp_init_values */
6452	tcp->tcp_last_rcv_lbolt = 0;
6453
6454	tcp->tcp_init_cwnd = 0;
6455
6456	tcp->tcp_urp_last_valid = 0;
6457	tcp->tcp_hard_binding = 0;
6458
6459	tcp->tcp_fin_acked = 0;
6460	tcp->tcp_fin_rcvd = 0;
6461	tcp->tcp_fin_sent = 0;
6462	tcp->tcp_ordrel_done = 0;
6463
6464	tcp->tcp_detached = 0;
6465
6466	tcp->tcp_snd_ws_ok = B_FALSE;
6467	tcp->tcp_snd_ts_ok = B_FALSE;
6468	tcp->tcp_zero_win_probe = 0;
6469
6470	tcp->tcp_loopback = 0;
6471	tcp->tcp_localnet = 0;
6472	tcp->tcp_syn_defense = 0;
6473	tcp->tcp_set_timer = 0;
6474
6475	tcp->tcp_active_open = 0;
6476	tcp->tcp_rexmit = B_FALSE;
6477	tcp->tcp_xmit_zc_clean = B_FALSE;
6478
6479	tcp->tcp_snd_sack_ok = B_FALSE;
6480	tcp->tcp_hwcksum = B_FALSE;
6481
6482	DONTCARE(tcp->tcp_maxpsz_multiplier);	/* Init in tcp_init_values */
6483
6484	tcp->tcp_conn_def_q0 = 0;
6485	tcp->tcp_ip_forward_progress = B_FALSE;
6486	tcp->tcp_ecn_ok = B_FALSE;
6487
6488	tcp->tcp_cwr = B_FALSE;
6489	tcp->tcp_ecn_echo_on = B_FALSE;
6490	tcp->tcp_is_wnd_shrnk = B_FALSE;
6491
6492	if (tcp->tcp_sack_info != NULL) {
6493		if (tcp->tcp_notsack_list != NULL) {
6494			TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list,
6495			    tcp);
6496		}
6497		kmem_cache_free(tcp_sack_info_cache, tcp->tcp_sack_info);
6498		tcp->tcp_sack_info = NULL;
6499	}
6500
6501	tcp->tcp_rcv_ws = 0;
6502	tcp->tcp_snd_ws = 0;
6503	tcp->tcp_ts_recent = 0;
6504	tcp->tcp_rnxt = 0;			/* Displayed in mib */
6505	DONTCARE(tcp->tcp_rwnd);		/* Set in tcp_reinit() */
6506	tcp->tcp_initial_pmtu = 0;
6507
6508	ASSERT(tcp->tcp_reass_head == NULL);
6509	ASSERT(tcp->tcp_reass_tail == NULL);
6510
6511	tcp->tcp_cwnd_cnt = 0;
6512
6513	ASSERT(tcp->tcp_rcv_list == NULL);
6514	ASSERT(tcp->tcp_rcv_last_head == NULL);
6515	ASSERT(tcp->tcp_rcv_last_tail == NULL);
6516	ASSERT(tcp->tcp_rcv_cnt == 0);
6517
6518	DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_set_destination */
6519	DONTCARE(tcp->tcp_cwnd_max);		/* Init in tcp_init_values */
6520	tcp->tcp_csuna = 0;
6521
6522	tcp->tcp_rto = 0;			/* Displayed in MIB */
6523	DONTCARE(tcp->tcp_rtt_sa);		/* Init in tcp_init_values */
6524	DONTCARE(tcp->tcp_rtt_sd);		/* Init in tcp_init_values */
6525	tcp->tcp_rtt_update = 0;
6526
6527	DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
6528	DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
6529
6530	tcp->tcp_rack = 0;			/* Displayed in mib */
6531	tcp->tcp_rack_cnt = 0;
6532	tcp->tcp_rack_cur_max = 0;
6533	tcp->tcp_rack_abs_max = 0;
6534
6535	tcp->tcp_max_swnd = 0;
6536
6537	ASSERT(tcp->tcp_listener == NULL);
6538
6539	DONTCARE(tcp->tcp_irs);			/* tcp_valid_bits cleared */
6540	DONTCARE(tcp->tcp_iss);			/* tcp_valid_bits cleared */
6541	DONTCARE(tcp->tcp_fss);			/* tcp_valid_bits cleared */
6542	DONTCARE(tcp->tcp_urg);			/* tcp_valid_bits cleared */
6543
6544	ASSERT(tcp->tcp_conn_req_cnt_q == 0);
6545	ASSERT(tcp->tcp_conn_req_cnt_q0 == 0);
6546	PRESERVE(tcp->tcp_conn_req_max);
6547	PRESERVE(tcp->tcp_conn_req_seqnum);
6548
6549	DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */
6550	DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */
6551	DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */
6552	DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */
6553
6554	DONTCARE(tcp->tcp_urp_last);	/* tcp_urp_last_valid is cleared */
6555	ASSERT(tcp->tcp_urp_mp == NULL);
6556	ASSERT(tcp->tcp_urp_mark_mp == NULL);
6557	ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
6558
6559	ASSERT(tcp->tcp_eager_next_q == NULL);
6560	ASSERT(tcp->tcp_eager_last_q == NULL);
6561	ASSERT((tcp->tcp_eager_next_q0 == NULL &&
6562	    tcp->tcp_eager_prev_q0 == NULL) ||
6563	    tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0);
6564	ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL);
6565
6566	ASSERT((tcp->tcp_eager_next_drop_q0 == NULL &&
6567	    tcp->tcp_eager_prev_drop_q0 == NULL) ||
6568	    tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0);
6569
6570	tcp->tcp_client_errno = 0;
6571
6572	DONTCARE(connp->conn_sum);		/* Init in tcp_init_values */
6573
6574	connp->conn_faddr_v6 = ipv6_all_zeros;	/* Displayed in MIB */
6575
6576	PRESERVE(connp->conn_bound_addr_v6);
6577	tcp->tcp_last_sent_len = 0;
6578	tcp->tcp_dupack_cnt = 0;
6579
6580	connp->conn_fport = 0;			/* Displayed in MIB */
6581	PRESERVE(connp->conn_lport);
6582
6583	PRESERVE(tcp->tcp_acceptor_lockp);
6584
6585	ASSERT(tcp->tcp_ordrel_mp == NULL);
6586	PRESERVE(tcp->tcp_acceptor_id);
6587	DONTCARE(tcp->tcp_ipsec_overhead);
6588
6589	PRESERVE(connp->conn_family);
6590	/* Remove any remnants of mapped address binding */
6591	if (connp->conn_family == AF_INET6) {
6592		connp->conn_ipversion = IPV6_VERSION;
6593		tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
6594	} else {
6595		connp->conn_ipversion = IPV4_VERSION;
6596		tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
6597	}
6598
6599	connp->conn_bound_if = 0;
6600	connp->conn_recv_ancillary.crb_all = 0;
6601	tcp->tcp_recvifindex = 0;
6602	tcp->tcp_recvhops = 0;
6603	tcp->tcp_closed = 0;
6604	tcp->tcp_cleandeathtag = 0;
6605	if (tcp->tcp_hopopts != NULL) {
6606		mi_free(tcp->tcp_hopopts);
6607		tcp->tcp_hopopts = NULL;
6608		tcp->tcp_hopoptslen = 0;
6609	}
6610	ASSERT(tcp->tcp_hopoptslen == 0);
6611	if (tcp->tcp_dstopts != NULL) {
6612		mi_free(tcp->tcp_dstopts);
6613		tcp->tcp_dstopts = NULL;
6614		tcp->tcp_dstoptslen = 0;
6615	}
6616	ASSERT(tcp->tcp_dstoptslen == 0);
6617	if (tcp->tcp_rthdrdstopts != NULL) {
6618		mi_free(tcp->tcp_rthdrdstopts);
6619		tcp->tcp_rthdrdstopts = NULL;
6620		tcp->tcp_rthdrdstoptslen = 0;
6621	}
6622	ASSERT(tcp->tcp_rthdrdstoptslen == 0);
6623	if (tcp->tcp_rthdr != NULL) {
6624		mi_free(tcp->tcp_rthdr);
6625		tcp->tcp_rthdr = NULL;
6626		tcp->tcp_rthdrlen = 0;
6627	}
6628	ASSERT(tcp->tcp_rthdrlen == 0);
6629
6630	/* Reset fusion-related fields */
6631	tcp->tcp_fused = B_FALSE;
6632	tcp->tcp_unfusable = B_FALSE;
6633	tcp->tcp_fused_sigurg = B_FALSE;
6634	tcp->tcp_loopback_peer = NULL;
6635
6636	tcp->tcp_lso = B_FALSE;
6637
6638	tcp->tcp_in_ack_unsent = 0;
6639	tcp->tcp_cork = B_FALSE;
6640	tcp->tcp_tconnind_started = B_FALSE;
6641
6642	PRESERVE(tcp->tcp_squeue_bytes);
6643
6644	ASSERT(tcp->tcp_kssl_ctx == NULL);
6645	ASSERT(!tcp->tcp_kssl_pending);
6646	PRESERVE(tcp->tcp_kssl_ent);
6647
6648	tcp->tcp_closemp_used = B_FALSE;
6649
6650	PRESERVE(tcp->tcp_rsrv_mp);
6651	PRESERVE(tcp->tcp_rsrv_mp_lock);
6652
6653#ifdef DEBUG
6654	DONTCARE(tcp->tcmp_stk[0]);
6655#endif
6656
6657	PRESERVE(tcp->tcp_connid);
6658
6659
6660#undef	DONTCARE
6661#undef	PRESERVE
6662}
6663
6664static void
6665tcp_init_values(tcp_t *tcp)
6666{
6667	tcp_stack_t	*tcps = tcp->tcp_tcps;
6668	conn_t		*connp = tcp->tcp_connp;
6669
6670	ASSERT((connp->conn_family == AF_INET &&
6671	    connp->conn_ipversion == IPV4_VERSION) ||
6672	    (connp->conn_family == AF_INET6 &&
6673	    (connp->conn_ipversion == IPV4_VERSION ||
6674	    connp->conn_ipversion == IPV6_VERSION)));
6675
6676	/*
6677	 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
6678	 * will be close to tcp_rexmit_interval_initial.  By doing this, we
6679	 * allow the algorithm to adjust slowly to large fluctuations of RTT
6680	 * during first few transmissions of a connection as seen in slow
6681	 * links.
6682	 */
6683	tcp->tcp_rtt_sa = tcps->tcps_rexmit_interval_initial << 2;
6684	tcp->tcp_rtt_sd = tcps->tcps_rexmit_interval_initial >> 1;
6685	tcp->tcp_rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
6686	    tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) +
6687	    tcps->tcps_conn_grace_period;
6688	if (tcp->tcp_rto < tcps->tcps_rexmit_interval_min)
6689		tcp->tcp_rto = tcps->tcps_rexmit_interval_min;
6690	tcp->tcp_timer_backoff = 0;
6691	tcp->tcp_ms_we_have_waited = 0;
6692	tcp->tcp_last_recv_time = lbolt;
6693	tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_;
6694	tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
6695	tcp->tcp_snd_burst = TCP_CWND_INFINITE;
6696
6697	tcp->tcp_maxpsz_multiplier = tcps->tcps_maxpsz_multiplier;
6698
6699	tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval;
6700	tcp->tcp_first_ctimer_threshold = tcps->tcps_ip_notify_cinterval;
6701	tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval;
6702	/*
6703	 * Fix it to tcp_ip_abort_linterval later if it turns out to be a
6704	 * passive open.
6705	 */
6706	tcp->tcp_second_ctimer_threshold = tcps->tcps_ip_abort_cinterval;
6707
6708	tcp->tcp_naglim = tcps->tcps_naglim_def;
6709
6710	/* NOTE:  ISS is now set in tcp_set_destination(). */
6711
6712	/* Reset fusion-related fields */
6713	tcp->tcp_fused = B_FALSE;
6714	tcp->tcp_unfusable = B_FALSE;
6715	tcp->tcp_fused_sigurg = B_FALSE;
6716	tcp->tcp_loopback_peer = NULL;
6717
6718	/* We rebuild the header template on the next connect/conn_request */
6719
6720	connp->conn_mlp_type = mlptSingle;
6721
6722	/*
6723	 * Init the window scale to the max so tcp_rwnd_set() won't pare
6724	 * down tcp_rwnd. tcp_set_destination() will set the right value later.
6725	 */
6726	tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT;
6727	tcp->tcp_rwnd = connp->conn_rcvbuf;
6728
6729	tcp->tcp_cork = B_FALSE;
6730	/*
6731	 * Init the tcp_debug option if it wasn't already set.  This value
6732	 * determines whether TCP
6733	 * calls strlog() to print out debug messages.  Doing this
6734	 * initialization here means that this value is not inherited thru
6735	 * tcp_reinit().
6736	 */
6737	if (!connp->conn_debug)
6738		connp->conn_debug = tcps->tcps_dbg;
6739
6740	tcp->tcp_ka_interval = tcps->tcps_keepalive_interval;
6741	tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval;
6742}
6743
6744/* At minimum we need 8 bytes in the TCP header for the lookup */
6745#define	ICMP_MIN_TCP_HDR	8
6746
6747/*
6748 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages
6749 * passed up by IP. The message is always received on the correct tcp_t.
6750 * Assumes that IP has pulled up everything up to and including the ICMP header.
6751 */
6752/* ARGSUSED2 */
6753static void
6754tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
6755{
6756	conn_t		*connp = (conn_t *)arg1;
6757	icmph_t		*icmph;
6758	ipha_t		*ipha;
6759	int		iph_hdr_length;
6760	tcpha_t		*tcpha;
6761	uint32_t	seg_seq;
6762	tcp_t		*tcp = connp->conn_tcp;
6763
6764	/* Assume IP provides aligned packets */
6765	ASSERT(OK_32PTR(mp->b_rptr));
6766	ASSERT((MBLKL(mp) >= sizeof (ipha_t)));
6767
6768	/*
6769	 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent
6770	 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6.
6771	 */
6772	if (!(ira->ira_flags & IRAF_IS_IPV4)) {
6773		tcp_icmp_error_ipv6(tcp, mp, ira);
6774		return;
6775	}
6776
6777	/* Skip past the outer IP and ICMP headers */
6778	iph_hdr_length = ira->ira_ip_hdr_length;
6779	icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
6780	/*
6781	 * If we don't have the correct outer IP header length
6782	 * or if we don't have a complete inner IP header
6783	 * drop it.
6784	 */
6785	if (iph_hdr_length < sizeof (ipha_t) ||
6786	    (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) {
6787noticmpv4:
6788		freemsg(mp);
6789		return;
6790	}
6791	ipha = (ipha_t *)&icmph[1];
6792
6793	/* Skip past the inner IP and find the ULP header */
6794	iph_hdr_length = IPH_HDR_LENGTH(ipha);
6795	tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length);
6796	/*
6797	 * If we don't have the correct inner IP header length or if the ULP
6798	 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR
6799	 * bytes of TCP header, drop it.
6800	 */
6801	if (iph_hdr_length < sizeof (ipha_t) ||
6802	    ipha->ipha_protocol != IPPROTO_TCP ||
6803	    (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) {
6804		goto noticmpv4;
6805	}
6806
6807	seg_seq = ntohl(tcpha->tha_seq);
6808	switch (icmph->icmph_type) {
6809	case ICMP_DEST_UNREACHABLE:
6810		switch (icmph->icmph_code) {
6811		case ICMP_FRAGMENTATION_NEEDED:
6812			/*
6813			 * Update Path MTU, then try to send something out.
6814			 */
6815			tcp_update_pmtu(tcp, B_TRUE);
6816			tcp_rexmit_after_error(tcp);
6817			break;
6818		case ICMP_PORT_UNREACHABLE:
6819		case ICMP_PROTOCOL_UNREACHABLE:
6820			switch (tcp->tcp_state) {
6821			case TCPS_SYN_SENT:
6822			case TCPS_SYN_RCVD:
6823				/*
6824				 * ICMP can snipe away incipient
6825				 * TCP connections as long as
6826				 * seq number is same as initial
6827				 * send seq number.
6828				 */
6829				if (seg_seq == tcp->tcp_iss) {
6830					(void) tcp_clean_death(tcp,
6831					    ECONNREFUSED, 6);
6832				}
6833				break;
6834			}
6835			break;
6836		case ICMP_HOST_UNREACHABLE:
6837		case ICMP_NET_UNREACHABLE:
6838			/* Record the error in case we finally time out. */
6839			if (icmph->icmph_code == ICMP_HOST_UNREACHABLE)
6840				tcp->tcp_client_errno = EHOSTUNREACH;
6841			else
6842				tcp->tcp_client_errno = ENETUNREACH;
6843			if (tcp->tcp_state == TCPS_SYN_RCVD) {
6844				if (tcp->tcp_listener != NULL &&
6845				    tcp->tcp_listener->tcp_syn_defense) {
6846					/*
6847					 * Ditch the half-open connection if we
6848					 * suspect a SYN attack is under way.
6849					 */
6850					(void) tcp_clean_death(tcp,
6851					    tcp->tcp_client_errno, 7);
6852				}
6853			}
6854			break;
6855		default:
6856			break;
6857		}
6858		break;
6859	case ICMP_SOURCE_QUENCH: {
6860		/*
6861		 * use a global boolean to control
6862		 * whether TCP should respond to ICMP_SOURCE_QUENCH.
6863		 * The default is false.
6864		 */
6865		if (tcp_icmp_source_quench) {
6866			/*
6867			 * Reduce the sending rate as if we got a
6868			 * retransmit timeout
6869			 */
6870			uint32_t npkt;
6871
6872			npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) /
6873			    tcp->tcp_mss;
6874			tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss;
6875			tcp->tcp_cwnd = tcp->tcp_mss;
6876			tcp->tcp_cwnd_cnt = 0;
6877		}
6878		break;
6879	}
6880	}
6881	freemsg(mp);
6882}
6883
6884/*
6885 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might
6886 * change. But it can refer to fields like tcp_suna and tcp_snxt.
6887 *
6888 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP
6889 * error messages received by IP. The message is always received on the correct
6890 * tcp_t.
6891 */
6892/* ARGSUSED */
6893static boolean_t
6894tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6,
6895    ip_recv_attr_t *ira)
6896{
6897	tcpha_t		*tcpha = (tcpha_t *)arg2;
6898	uint32_t	seq = ntohl(tcpha->tha_seq);
6899	tcp_t		*tcp = connp->conn_tcp;
6900
6901	/*
6902	 * TCP sequence number contained in payload of the ICMP error message
6903	 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise,
6904	 * the message is either a stale ICMP error, or an attack from the
6905	 * network. Fail the verification.
6906	 */
6907	if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt))
6908		return (B_FALSE);
6909
6910	/* For "too big" we also check the ignore flag */
6911	if (ira->ira_flags & IRAF_IS_IPV4) {
6912		ASSERT(icmph != NULL);
6913		if (icmph->icmph_type == ICMP_DEST_UNREACHABLE &&
6914		    icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED &&
6915		    tcp->tcp_tcps->tcps_ignore_path_mtu)
6916			return (B_FALSE);
6917	} else {
6918		ASSERT(icmp6 != NULL);
6919		if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG &&
6920		    tcp->tcp_tcps->tcps_ignore_path_mtu)
6921			return (B_FALSE);
6922	}
6923	return (B_TRUE);
6924}
6925
6926/*
6927 * Update the TCP connection according to change of PMTU.
6928 *
6929 * Path MTU might have changed by either increase or decrease, so need to
6930 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
6931 * or negative MSS, since tcp_mss_set() will do it.
6932 */
6933static void
6934tcp_update_pmtu(tcp_t *tcp, boolean_t decrease_only)
6935{
6936	uint32_t	pmtu;
6937	int32_t		mss;
6938	conn_t		*connp = tcp->tcp_connp;
6939	ip_xmit_attr_t	*ixa = connp->conn_ixa;
6940	iaflags_t	ixaflags;
6941
6942	if (tcp->tcp_tcps->tcps_ignore_path_mtu)
6943		return;
6944
6945	if (tcp->tcp_state < TCPS_ESTABLISHED)
6946		return;
6947
6948	/*
6949	 * Always call ip_get_pmtu() to make sure that IP has updated
6950	 * ixa_flags properly.
6951	 */
6952	pmtu = ip_get_pmtu(ixa);
6953	ixaflags = ixa->ixa_flags;
6954
6955	/*
6956	 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
6957	 * IPsec overhead if applied. Make sure to use the most recent
6958	 * IPsec information.
6959	 */
6960	mss = pmtu - connp->conn_ht_iphc_len - conn_ipsec_length(connp);
6961
6962	/*
6963	 * Nothing to change, so just return.
6964	 */
6965	if (mss == tcp->tcp_mss)
6966		return;
6967
6968	/*
6969	 * Currently, for ICMP errors, only PMTU decrease is handled.
6970	 */
6971	if (mss > tcp->tcp_mss && decrease_only)
6972		return;
6973
6974	DTRACE_PROBE2(tcp_update_pmtu, int32_t, tcp->tcp_mss, uint32_t, mss);
6975
6976	/*
6977	 * Update ixa_fragsize and ixa_pmtu.
6978	 */
6979	ixa->ixa_fragsize = ixa->ixa_pmtu = pmtu;
6980
6981	/*
6982	 * Adjust MSS and all relevant variables.
6983	 */
6984	tcp_mss_set(tcp, mss);
6985
6986	/*
6987	 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
6988	 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
6989	 * has a (potentially different) min size we do the same. Make sure to
6990	 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
6991	 * fragment the packet.
6992	 *
6993	 * LSO over IPv6 can not be fragmented. So need to disable LSO
6994	 * when IPv6 fragmentation is needed.
6995	 */
6996	if (mss < tcp->tcp_tcps->tcps_mss_min)
6997		ixaflags |= IXAF_PMTU_TOO_SMALL;
6998
6999	if (ixaflags & IXAF_PMTU_TOO_SMALL)
7000		ixaflags &= ~(IXAF_DONTFRAG | IXAF_PMTU_IPV4_DF);
7001
7002	if ((connp->conn_ipversion == IPV4_VERSION) &&
7003	    !(ixaflags & IXAF_PMTU_IPV4_DF)) {
7004		tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0;
7005	}
7006	ixa->ixa_flags = ixaflags;
7007}
7008
7009/*
7010 * Do slow start retransmission after ICMP errors of PMTU changes.
7011 */
7012static void
7013tcp_rexmit_after_error(tcp_t *tcp)
7014{
7015	/*
7016	 * All sent data has been acknowledged or no data left to send, just
7017	 * to return.
7018	 */
7019	if (!SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) ||
7020	    (tcp->tcp_xmit_head == NULL))
7021		return;
7022
7023	if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && (tcp->tcp_unsent == 0))
7024		tcp->tcp_rexmit_max = tcp->tcp_fss;
7025	else
7026		tcp->tcp_rexmit_max = tcp->tcp_snxt;
7027
7028	tcp->tcp_rexmit_nxt = tcp->tcp_suna;
7029	tcp->tcp_rexmit = B_TRUE;
7030	tcp->tcp_dupack_cnt = 0;
7031	tcp->tcp_snd_burst = TCP_CWND_SS;
7032	tcp_ss_rexmit(tcp);
7033}
7034
7035/*
7036 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6
7037 * error messages passed up by IP.
7038 * Assumes that IP has pulled up all the extension headers as well
7039 * as the ICMPv6 header.
7040 */
7041static void
7042tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira)
7043{
7044	icmp6_t		*icmp6;
7045	ip6_t		*ip6h;
7046	uint16_t	iph_hdr_length = ira->ira_ip_hdr_length;
7047	tcpha_t		*tcpha;
7048	uint8_t		*nexthdrp;
7049	uint32_t	seg_seq;
7050
7051	/*
7052	 * Verify that we have a complete IP header.
7053	 */
7054	ASSERT((MBLKL(mp) >= sizeof (ip6_t)));
7055
7056	icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length];
7057	ip6h = (ip6_t *)&icmp6[1];
7058	/*
7059	 * Verify if we have a complete ICMP and inner IP header.
7060	 */
7061	if ((uchar_t *)&ip6h[1] > mp->b_wptr) {
7062noticmpv6:
7063		freemsg(mp);
7064		return;
7065	}
7066
7067	if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp))
7068		goto noticmpv6;
7069	tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length);
7070	/*
7071	 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't
7072	 * have at least ICMP_MIN_TCP_HDR bytes of  TCP header drop the
7073	 * packet.
7074	 */
7075	if ((*nexthdrp != IPPROTO_TCP) ||
7076	    ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) {
7077		goto noticmpv6;
7078	}
7079
7080	seg_seq = ntohl(tcpha->tha_seq);
7081	switch (icmp6->icmp6_type) {
7082	case ICMP6_PACKET_TOO_BIG:
7083		/*
7084		 * Update Path MTU, then try to send something out.
7085		 */
7086		tcp_update_pmtu(tcp, B_TRUE);
7087		tcp_rexmit_after_error(tcp);
7088		break;
7089	case ICMP6_DST_UNREACH:
7090		switch (icmp6->icmp6_code) {
7091		case ICMP6_DST_UNREACH_NOPORT:
7092			if (((tcp->tcp_state == TCPS_SYN_SENT) ||
7093			    (tcp->tcp_state == TCPS_SYN_RCVD)) &&
7094			    (seg_seq == tcp->tcp_iss)) {
7095				(void) tcp_clean_death(tcp,
7096				    ECONNREFUSED, 8);
7097			}
7098			break;
7099		case ICMP6_DST_UNREACH_ADMIN:
7100		case ICMP6_DST_UNREACH_NOROUTE:
7101		case ICMP6_DST_UNREACH_BEYONDSCOPE:
7102		case ICMP6_DST_UNREACH_ADDR:
7103			/* Record the error in case we finally time out. */
7104			tcp->tcp_client_errno = EHOSTUNREACH;
7105			if (((tcp->tcp_state == TCPS_SYN_SENT) ||
7106			    (tcp->tcp_state == TCPS_SYN_RCVD)) &&
7107			    (seg_seq == tcp->tcp_iss)) {
7108				if (tcp->tcp_listener != NULL &&
7109				    tcp->tcp_listener->tcp_syn_defense) {
7110					/*
7111					 * Ditch the half-open connection if we
7112					 * suspect a SYN attack is under way.
7113					 */
7114					(void) tcp_clean_death(tcp,
7115					    tcp->tcp_client_errno, 9);
7116				}
7117			}
7118
7119
7120			break;
7121		default:
7122			break;
7123		}
7124		break;
7125	case ICMP6_PARAM_PROB:
7126		/* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */
7127		if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER &&
7128		    (uchar_t *)ip6h + icmp6->icmp6_pptr ==
7129		    (uchar_t *)nexthdrp) {
7130			if (tcp->tcp_state == TCPS_SYN_SENT ||
7131			    tcp->tcp_state == TCPS_SYN_RCVD) {
7132				(void) tcp_clean_death(tcp,
7133				    ECONNREFUSED, 10);
7134			}
7135			break;
7136		}
7137		break;
7138
7139	case ICMP6_TIME_EXCEEDED:
7140	default:
7141		break;
7142	}
7143	freemsg(mp);
7144}
7145
7146/*
7147 * Notify IP that we are having trouble with this connection.  IP should
7148 * make note so it can potentially use a different IRE.
7149 */
7150static void
7151tcp_ip_notify(tcp_t *tcp)
7152{
7153	conn_t		*connp = tcp->tcp_connp;
7154	ire_t		*ire;
7155
7156	/*
7157	 * Note: in the case of source routing we want to blow away the
7158	 * route to the first source route hop.
7159	 */
7160	ire = connp->conn_ixa->ixa_ire;
7161	if (ire != NULL && !(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
7162		if (ire->ire_ipversion == IPV4_VERSION) {
7163			/*
7164			 * As per RFC 1122, we send an RTM_LOSING to inform
7165			 * routing protocols.
7166			 */
7167			ip_rts_change(RTM_LOSING, ire->ire_addr,
7168			    ire->ire_gateway_addr, ire->ire_mask,
7169			    connp->conn_laddr_v4,  0, 0, 0,
7170			    (RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_IFA),
7171			    ire->ire_ipst);
7172		}
7173		(void) ire_no_good(ire);
7174	}
7175}
7176
7177#pragma inline(tcp_send_data)
7178
7179/*
7180 * Timer callback routine for keepalive probe.  We do a fake resend of
7181 * last ACKed byte.  Then set a timer using RTO.  When the timer expires,
7182 * check to see if we have heard anything from the other end for the last
7183 * RTO period.  If we have, set the timer to expire for another
7184 * tcp_keepalive_intrvl and check again.  If we have not, set a timer using
7185 * RTO << 1 and check again when it expires.  Keep exponentially increasing
7186 * the timeout if we have not heard from the other side.  If for more than
7187 * (tcp_ka_interval + tcp_ka_abort_thres) we have not heard anything,
7188 * kill the connection unless the keepalive abort threshold is 0.  In
7189 * that case, we will probe "forever."
7190 */
7191static void
7192tcp_keepalive_killer(void *arg)
7193{
7194	mblk_t	*mp;
7195	conn_t	*connp = (conn_t *)arg;
7196	tcp_t  	*tcp = connp->conn_tcp;
7197	int32_t	firetime;
7198	int32_t	idletime;
7199	int32_t	ka_intrvl;
7200	tcp_stack_t	*tcps = tcp->tcp_tcps;
7201
7202	tcp->tcp_ka_tid = 0;
7203
7204	if (tcp->tcp_fused)
7205		return;
7206
7207	BUMP_MIB(&tcps->tcps_mib, tcpTimKeepalive);
7208	ka_intrvl = tcp->tcp_ka_interval;
7209
7210	/*
7211	 * Keepalive probe should only be sent if the application has not
7212	 * done a close on the connection.
7213	 */
7214	if (tcp->tcp_state > TCPS_CLOSE_WAIT) {
7215		return;
7216	}
7217	/* Timer fired too early, restart it. */
7218	if (tcp->tcp_state < TCPS_ESTABLISHED) {
7219		tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer,
7220		    MSEC_TO_TICK(ka_intrvl));
7221		return;
7222	}
7223
7224	idletime = TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time);
7225	/*
7226	 * If we have not heard from the other side for a long
7227	 * time, kill the connection unless the keepalive abort
7228	 * threshold is 0.  In that case, we will probe "forever."
7229	 */
7230	if (tcp->tcp_ka_abort_thres != 0 &&
7231	    idletime > (ka_intrvl + tcp->tcp_ka_abort_thres)) {
7232		BUMP_MIB(&tcps->tcps_mib, tcpTimKeepaliveDrop);
7233		(void) tcp_clean_death(tcp, tcp->tcp_client_errno ?
7234		    tcp->tcp_client_errno : ETIMEDOUT, 11);
7235		return;
7236	}
7237
7238	if (tcp->tcp_snxt == tcp->tcp_suna &&
7239	    idletime >= ka_intrvl) {
7240		/* Fake resend of last ACKed byte. */
7241		mblk_t	*mp1 = allocb(1, BPRI_LO);
7242
7243		if (mp1 != NULL) {
7244			*mp1->b_wptr++ = '\0';
7245			mp = tcp_xmit_mp(tcp, mp1, 1, NULL, NULL,
7246			    tcp->tcp_suna - 1, B_FALSE, NULL, B_TRUE);
7247			freeb(mp1);
7248			/*
7249			 * if allocation failed, fall through to start the
7250			 * timer back.
7251			 */
7252			if (mp != NULL) {
7253				tcp_send_data(tcp, mp);
7254				BUMP_MIB(&tcps->tcps_mib,
7255				    tcpTimKeepaliveProbe);
7256				if (tcp->tcp_ka_last_intrvl != 0) {
7257					int max;
7258					/*
7259					 * We should probe again at least
7260					 * in ka_intrvl, but not more than
7261					 * tcp_rexmit_interval_max.
7262					 */
7263					max = tcps->tcps_rexmit_interval_max;
7264					firetime = MIN(ka_intrvl - 1,
7265					    tcp->tcp_ka_last_intrvl << 1);
7266					if (firetime > max)
7267						firetime = max;
7268				} else {
7269					firetime = tcp->tcp_rto;
7270				}
7271				tcp->tcp_ka_tid = TCP_TIMER(tcp,
7272				    tcp_keepalive_killer,
7273				    MSEC_TO_TICK(firetime));
7274				tcp->tcp_ka_last_intrvl = firetime;
7275				return;
7276			}
7277		}
7278	} else {
7279		tcp->tcp_ka_last_intrvl = 0;
7280	}
7281
7282	/* firetime can be negative if (mp1 == NULL || mp == NULL) */
7283	if ((firetime = ka_intrvl - idletime) < 0) {
7284		firetime = ka_intrvl;
7285	}
7286	tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer,
7287	    MSEC_TO_TICK(firetime));
7288}
7289
7290int
7291tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk)
7292{
7293	conn_t	*connp = tcp->tcp_connp;
7294	queue_t	*q = connp->conn_rq;
7295	int32_t	mss = tcp->tcp_mss;
7296	int	maxpsz;
7297
7298	if (TCP_IS_DETACHED(tcp))
7299		return (mss);
7300	if (tcp->tcp_fused) {
7301		maxpsz = tcp_fuse_maxpsz(tcp);
7302		mss = INFPSZ;
7303	} else if (tcp->tcp_maxpsz_multiplier == 0) {
7304		/*
7305		 * Set the sd_qn_maxpsz according to the socket send buffer
7306		 * size, and sd_maxblk to INFPSZ (-1).  This will essentially
7307		 * instruct the stream head to copyin user data into contiguous
7308		 * kernel-allocated buffers without breaking it up into smaller
7309		 * chunks.  We round up the buffer size to the nearest SMSS.
7310		 */
7311		maxpsz = MSS_ROUNDUP(connp->conn_sndbuf, mss);
7312		if (tcp->tcp_kssl_ctx == NULL)
7313			mss = INFPSZ;
7314		else
7315			mss = SSL3_MAX_RECORD_LEN;
7316	} else {
7317		/*
7318		 * Set sd_qn_maxpsz to approx half the (receivers) buffer
7319		 * (and a multiple of the mss).  This instructs the stream
7320		 * head to break down larger than SMSS writes into SMSS-
7321		 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
7322		 */
7323		maxpsz = tcp->tcp_maxpsz_multiplier * mss;
7324		if (maxpsz > connp->conn_sndbuf / 2) {
7325			maxpsz = connp->conn_sndbuf / 2;
7326			/* Round up to nearest mss */
7327			maxpsz = MSS_ROUNDUP(maxpsz, mss);
7328		}
7329	}
7330
7331	(void) proto_set_maxpsz(q, connp, maxpsz);
7332	if (!(IPCL_IS_NONSTR(connp)))
7333		connp->conn_wq->q_maxpsz = maxpsz;
7334	if (set_maxblk)
7335		(void) proto_set_tx_maxblk(q, connp, mss);
7336	return (mss);
7337}
7338
7339/*
7340 * Extract option values from a tcp header.  We put any found values into the
7341 * tcpopt struct and return a bitmask saying which options were found.
7342 */
7343static int
7344tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt)
7345{
7346	uchar_t		*endp;
7347	int		len;
7348	uint32_t	mss;
7349	uchar_t		*up = (uchar_t *)tcpha;
7350	int		found = 0;
7351	int32_t		sack_len;
7352	tcp_seq		sack_begin, sack_end;
7353	tcp_t		*tcp;
7354
7355	endp = up + TCP_HDR_LENGTH(tcpha);
7356	up += TCP_MIN_HEADER_LENGTH;
7357	while (up < endp) {
7358		len = endp - up;
7359		switch (*up) {
7360		case TCPOPT_EOL:
7361			break;
7362
7363		case TCPOPT_NOP:
7364			up++;
7365			continue;
7366
7367		case TCPOPT_MAXSEG:
7368			if (len < TCPOPT_MAXSEG_LEN ||
7369			    up[1] != TCPOPT_MAXSEG_LEN)
7370				break;
7371
7372			mss = BE16_TO_U16(up+2);
7373			/* Caller must handle tcp_mss_min and tcp_mss_max_* */
7374			tcpopt->tcp_opt_mss = mss;
7375			found |= TCP_OPT_MSS_PRESENT;
7376
7377			up += TCPOPT_MAXSEG_LEN;
7378			continue;
7379
7380		case TCPOPT_WSCALE:
7381			if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN)
7382				break;
7383
7384			if (up[2] > TCP_MAX_WINSHIFT)
7385				tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT;
7386			else
7387				tcpopt->tcp_opt_wscale = up[2];
7388			found |= TCP_OPT_WSCALE_PRESENT;
7389
7390			up += TCPOPT_WS_LEN;
7391			continue;
7392
7393		case TCPOPT_SACK_PERMITTED:
7394			if (len < TCPOPT_SACK_OK_LEN ||
7395			    up[1] != TCPOPT_SACK_OK_LEN)
7396				break;
7397			found |= TCP_OPT_SACK_OK_PRESENT;
7398			up += TCPOPT_SACK_OK_LEN;
7399			continue;
7400
7401		case TCPOPT_SACK:
7402			if (len <= 2 || up[1] <= 2 || len < up[1])
7403				break;
7404
7405			/* If TCP is not interested in SACK blks... */
7406			if ((tcp = tcpopt->tcp) == NULL) {
7407				up += up[1];
7408				continue;
7409			}
7410			sack_len = up[1] - TCPOPT_HEADER_LEN;
7411			up += TCPOPT_HEADER_LEN;
7412
7413			/*
7414			 * If the list is empty, allocate one and assume
7415			 * nothing is sack'ed.
7416			 */
7417			ASSERT(tcp->tcp_sack_info != NULL);
7418			if (tcp->tcp_notsack_list == NULL) {
7419				tcp_notsack_update(&(tcp->tcp_notsack_list),
7420				    tcp->tcp_suna, tcp->tcp_snxt,
7421				    &(tcp->tcp_num_notsack_blk),
7422				    &(tcp->tcp_cnt_notsack_list));
7423
7424				/*
7425				 * Make sure tcp_notsack_list is not NULL.
7426				 * This happens when kmem_alloc(KM_NOSLEEP)
7427				 * returns NULL.
7428				 */
7429				if (tcp->tcp_notsack_list == NULL) {
7430					up += sack_len;
7431					continue;
7432				}
7433				tcp->tcp_fack = tcp->tcp_suna;
7434			}
7435
7436			while (sack_len > 0) {
7437				if (up + 8 > endp) {
7438					up = endp;
7439					break;
7440				}
7441				sack_begin = BE32_TO_U32(up);
7442				up += 4;
7443				sack_end = BE32_TO_U32(up);
7444				up += 4;
7445				sack_len -= 8;
7446				/*
7447				 * Bounds checking.  Make sure the SACK
7448				 * info is within tcp_suna and tcp_snxt.
7449				 * If this SACK blk is out of bound, ignore
7450				 * it but continue to parse the following
7451				 * blks.
7452				 */
7453				if (SEQ_LEQ(sack_end, sack_begin) ||
7454				    SEQ_LT(sack_begin, tcp->tcp_suna) ||
7455				    SEQ_GT(sack_end, tcp->tcp_snxt)) {
7456					continue;
7457				}
7458				tcp_notsack_insert(&(tcp->tcp_notsack_list),
7459				    sack_begin, sack_end,
7460				    &(tcp->tcp_num_notsack_blk),
7461				    &(tcp->tcp_cnt_notsack_list));
7462				if (SEQ_GT(sack_end, tcp->tcp_fack)) {
7463					tcp->tcp_fack = sack_end;
7464				}
7465			}
7466			found |= TCP_OPT_SACK_PRESENT;
7467			continue;
7468
7469		case TCPOPT_TSTAMP:
7470			if (len < TCPOPT_TSTAMP_LEN ||
7471			    up[1] != TCPOPT_TSTAMP_LEN)
7472				break;
7473
7474			tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2);
7475			tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6);
7476
7477			found |= TCP_OPT_TSTAMP_PRESENT;
7478
7479			up += TCPOPT_TSTAMP_LEN;
7480			continue;
7481
7482		default:
7483			if (len <= 1 || len < (int)up[1] || up[1] == 0)
7484				break;
7485			up += up[1];
7486			continue;
7487		}
7488		break;
7489	}
7490	return (found);
7491}
7492
7493/*
7494 * Set the MSS associated with a particular tcp based on its current value,
7495 * and a new one passed in. Observe minimums and maximums, and reset other
7496 * state variables that we want to view as multiples of MSS.
7497 *
7498 * The value of MSS could be either increased or descreased.
7499 */
7500static void
7501tcp_mss_set(tcp_t *tcp, uint32_t mss)
7502{
7503	uint32_t	mss_max;
7504	tcp_stack_t	*tcps = tcp->tcp_tcps;
7505	conn_t		*connp = tcp->tcp_connp;
7506
7507	if (connp->conn_ipversion == IPV4_VERSION)
7508		mss_max = tcps->tcps_mss_max_ipv4;
7509	else
7510		mss_max = tcps->tcps_mss_max_ipv6;
7511
7512	if (mss < tcps->tcps_mss_min)
7513		mss = tcps->tcps_mss_min;
7514	if (mss > mss_max)
7515		mss = mss_max;
7516	/*
7517	 * Unless naglim has been set by our client to
7518	 * a non-mss value, force naglim to track mss.
7519	 * This can help to aggregate small writes.
7520	 */
7521	if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim)
7522		tcp->tcp_naglim = mss;
7523	/*
7524	 * TCP should be able to buffer at least 4 MSS data for obvious
7525	 * performance reason.
7526	 */
7527	if ((mss << 2) > connp->conn_sndbuf)
7528		connp->conn_sndbuf = mss << 2;
7529
7530	/*
7531	 * Set the send lowater to at least twice of MSS.
7532	 */
7533	if ((mss << 1) > connp->conn_sndlowat)
7534		connp->conn_sndlowat = mss << 1;
7535
7536	/*
7537	 * Update tcp_cwnd according to the new value of MSS. Keep the
7538	 * previous ratio to preserve the transmit rate.
7539	 */
7540	tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss;
7541	tcp->tcp_cwnd_cnt = 0;
7542
7543	tcp->tcp_mss = mss;
7544	(void) tcp_maxpsz_set(tcp, B_TRUE);
7545}
7546
7547/* For /dev/tcp aka AF_INET open */
7548static int
7549tcp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
7550{
7551	return (tcp_open(q, devp, flag, sflag, credp, B_FALSE));
7552}
7553
7554/* For /dev/tcp6 aka AF_INET6 open */
7555static int
7556tcp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
7557{
7558	return (tcp_open(q, devp, flag, sflag, credp, B_TRUE));
7559}
7560
7561static conn_t *
7562tcp_create_common(cred_t *credp, boolean_t isv6, boolean_t issocket,
7563    int *errorp)
7564{
7565	tcp_t		*tcp = NULL;
7566	conn_t		*connp;
7567	zoneid_t	zoneid;
7568	tcp_stack_t	*tcps;
7569	squeue_t	*sqp;
7570
7571	ASSERT(errorp != NULL);
7572	/*
7573	 * Find the proper zoneid and netstack.
7574	 */
7575	/*
7576	 * Special case for install: miniroot needs to be able to
7577	 * access files via NFS as though it were always in the
7578	 * global zone.
7579	 */
7580	if (credp == kcred && nfs_global_client_only != 0) {
7581		zoneid = GLOBAL_ZONEID;
7582		tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->
7583		    netstack_tcp;
7584		ASSERT(tcps != NULL);
7585	} else {
7586		netstack_t *ns;
7587
7588		ns = netstack_find_by_cred(credp);
7589		ASSERT(ns != NULL);
7590		tcps = ns->netstack_tcp;
7591		ASSERT(tcps != NULL);
7592
7593		/*
7594		 * For exclusive stacks we set the zoneid to zero
7595		 * to make TCP operate as if in the global zone.
7596		 */
7597		if (tcps->tcps_netstack->netstack_stackid !=
7598		    GLOBAL_NETSTACKID)
7599			zoneid = GLOBAL_ZONEID;
7600		else
7601			zoneid = crgetzoneid(credp);
7602	}
7603
7604	sqp = IP_SQUEUE_GET((uint_t)gethrtime());
7605	connp = (conn_t *)tcp_get_conn(sqp, tcps);
7606	/*
7607	 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
7608	 * so we drop it by one.
7609	 */
7610	netstack_rele(tcps->tcps_netstack);
7611	if (connp == NULL) {
7612		*errorp = ENOSR;
7613		return (NULL);
7614	}
7615	ASSERT(connp->conn_ixa->ixa_protocol == connp->conn_proto);
7616
7617	connp->conn_sqp = sqp;
7618	connp->conn_initial_sqp = connp->conn_sqp;
7619	connp->conn_ixa->ixa_sqp = connp->conn_sqp;
7620	tcp = connp->conn_tcp;
7621
7622	/*
7623	 * Besides asking IP to set the checksum for us, have conn_ip_output
7624	 * to do the following checks when necessary:
7625	 *
7626	 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
7627	 * IXAF_VERIFY_PMTU: verify PMTU changes
7628	 * IXAF_VERIFY_LSO: verify LSO capability changes
7629	 */
7630	connp->conn_ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
7631	    IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO;
7632
7633	if (!tcps->tcps_dev_flow_ctl)
7634		connp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL;
7635
7636	if (isv6) {
7637		connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
7638		connp->conn_ipversion = IPV6_VERSION;
7639		connp->conn_family = AF_INET6;
7640		tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
7641		connp->conn_default_ttl = tcps->tcps_ipv6_hoplimit;
7642	} else {
7643		connp->conn_ipversion = IPV4_VERSION;
7644		connp->conn_family = AF_INET;
7645		tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
7646		connp->conn_default_ttl = tcps->tcps_ipv4_ttl;
7647	}
7648	connp->conn_xmit_ipp.ipp_unicast_hops = connp->conn_default_ttl;
7649
7650	crhold(credp);
7651	connp->conn_cred = credp;
7652	connp->conn_cpid = curproc->p_pid;
7653	connp->conn_open_time = lbolt64;
7654
7655	connp->conn_zoneid = zoneid;
7656	/* conn_allzones can not be set this early, hence no IPCL_ZONEID */
7657	connp->conn_ixa->ixa_zoneid = zoneid;
7658	connp->conn_mlp_type = mlptSingle;
7659	ASSERT(connp->conn_netstack == tcps->tcps_netstack);
7660	ASSERT(tcp->tcp_tcps == tcps);
7661
7662	/*
7663	 * If the caller has the process-wide flag set, then default to MAC
7664	 * exempt mode.  This allows read-down to unlabeled hosts.
7665	 */
7666	if (getpflags(NET_MAC_AWARE, credp) != 0)
7667		connp->conn_mac_mode = CONN_MAC_AWARE;
7668
7669	connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
7670
7671	if (issocket) {
7672		tcp->tcp_issocket = 1;
7673	}
7674
7675	connp->conn_rcvbuf = tcps->tcps_recv_hiwat;
7676	connp->conn_sndbuf = tcps->tcps_xmit_hiwat;
7677	connp->conn_sndlowat = tcps->tcps_xmit_lowat;
7678	connp->conn_so_type = SOCK_STREAM;
7679	connp->conn_wroff = connp->conn_ht_iphc_allocated +
7680	    tcps->tcps_wroff_xtra;
7681
7682	SOCK_CONNID_INIT(tcp->tcp_connid);
7683	tcp->tcp_state = TCPS_IDLE;
7684	tcp_init_values(tcp);
7685	return (connp);
7686}
7687
7688static int
7689tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
7690    boolean_t isv6)
7691{
7692	tcp_t		*tcp = NULL;
7693	conn_t		*connp = NULL;
7694	int		err;
7695	vmem_t		*minor_arena = NULL;
7696	dev_t		conn_dev;
7697	boolean_t	issocket;
7698
7699	if (q->q_ptr != NULL)
7700		return (0);
7701
7702	if (sflag == MODOPEN)
7703		return (EINVAL);
7704
7705	if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
7706	    ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
7707		minor_arena = ip_minor_arena_la;
7708	} else {
7709		/*
7710		 * Either minor numbers in the large arena were exhausted
7711		 * or a non socket application is doing the open.
7712		 * Try to allocate from the small arena.
7713		 */
7714		if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) {
7715			return (EBUSY);
7716		}
7717		minor_arena = ip_minor_arena_sa;
7718	}
7719
7720	ASSERT(minor_arena != NULL);
7721
7722	*devp = makedevice(getmajor(*devp), (minor_t)conn_dev);
7723
7724	if (flag & SO_FALLBACK) {
7725		/*
7726		 * Non streams socket needs a stream to fallback to
7727		 */
7728		RD(q)->q_ptr = (void *)conn_dev;
7729		WR(q)->q_qinfo = &tcp_fallback_sock_winit;
7730		WR(q)->q_ptr = (void *)minor_arena;
7731		qprocson(q);
7732		return (0);
7733	} else if (flag & SO_ACCEPTOR) {
7734		q->q_qinfo = &tcp_acceptor_rinit;
7735		/*
7736		 * the conn_dev and minor_arena will be subsequently used by
7737		 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
7738		 * the minor device number for this connection from the q_ptr.
7739		 */
7740		RD(q)->q_ptr = (void *)conn_dev;
7741		WR(q)->q_qinfo = &tcp_acceptor_winit;
7742		WR(q)->q_ptr = (void *)minor_arena;
7743		qprocson(q);
7744		return (0);
7745	}
7746
7747	issocket = flag & SO_SOCKSTR;
7748	connp = tcp_create_common(credp, isv6, issocket, &err);
7749
7750	if (connp == NULL) {
7751		inet_minor_free(minor_arena, conn_dev);
7752		q->q_ptr = WR(q)->q_ptr = NULL;
7753		return (err);
7754	}
7755
7756	connp->conn_rq = q;
7757	connp->conn_wq = WR(q);
7758	q->q_ptr = WR(q)->q_ptr = connp;
7759
7760	connp->conn_dev = conn_dev;
7761	connp->conn_minor_arena = minor_arena;
7762
7763	ASSERT(q->q_qinfo == &tcp_rinitv4 || q->q_qinfo == &tcp_rinitv6);
7764	ASSERT(WR(q)->q_qinfo == &tcp_winit);
7765
7766	tcp = connp->conn_tcp;
7767
7768	if (issocket) {
7769		WR(q)->q_qinfo = &tcp_sock_winit;
7770	} else {
7771#ifdef  _ILP32
7772		tcp->tcp_acceptor_id = (t_uscalar_t)RD(q);
7773#else
7774		tcp->tcp_acceptor_id = conn_dev;
7775#endif  /* _ILP32 */
7776		tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp);
7777	}
7778
7779	/*
7780	 * Put the ref for TCP. Ref for IP was already put
7781	 * by ipcl_conn_create. Also Make the conn_t globally
7782	 * visible to walkers
7783	 */
7784	mutex_enter(&connp->conn_lock);
7785	CONN_INC_REF_LOCKED(connp);
7786	ASSERT(connp->conn_ref == 2);
7787	connp->conn_state_flags &= ~CONN_INCIPIENT;
7788	mutex_exit(&connp->conn_lock);
7789
7790	qprocson(q);
7791	return (0);
7792}
7793
7794/*
7795 * Some TCP options can be "set" by requesting them in the option
7796 * buffer. This is needed for XTI feature test though we do not
7797 * allow it in general. We interpret that this mechanism is more
7798 * applicable to OSI protocols and need not be allowed in general.
7799 * This routine filters out options for which it is not allowed (most)
7800 * and lets through those (few) for which it is. [ The XTI interface
7801 * test suite specifics will imply that any XTI_GENERIC level XTI_* if
7802 * ever implemented will have to be allowed here ].
7803 */
7804static boolean_t
7805tcp_allow_connopt_set(int level, int name)
7806{
7807
7808	switch (level) {
7809	case IPPROTO_TCP:
7810		switch (name) {
7811		case TCP_NODELAY:
7812			return (B_TRUE);
7813		default:
7814			return (B_FALSE);
7815		}
7816		/*NOTREACHED*/
7817	default:
7818		return (B_FALSE);
7819	}
7820	/*NOTREACHED*/
7821}
7822
7823/*
7824 * This routine gets default values of certain options whose default
7825 * values are maintained by protocol specific code
7826 */
7827/* ARGSUSED */
7828int
7829tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr)
7830{
7831	int32_t	*i1 = (int32_t *)ptr;
7832	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
7833
7834	switch (level) {
7835	case IPPROTO_TCP:
7836		switch (name) {
7837		case TCP_NOTIFY_THRESHOLD:
7838			*i1 = tcps->tcps_ip_notify_interval;
7839			break;
7840		case TCP_ABORT_THRESHOLD:
7841			*i1 = tcps->tcps_ip_abort_interval;
7842			break;
7843		case TCP_CONN_NOTIFY_THRESHOLD:
7844			*i1 = tcps->tcps_ip_notify_cinterval;
7845			break;
7846		case TCP_CONN_ABORT_THRESHOLD:
7847			*i1 = tcps->tcps_ip_abort_cinterval;
7848			break;
7849		default:
7850			return (-1);
7851		}
7852		break;
7853	case IPPROTO_IP:
7854		switch (name) {
7855		case IP_TTL:
7856			*i1 = tcps->tcps_ipv4_ttl;
7857			break;
7858		default:
7859			return (-1);
7860		}
7861		break;
7862	case IPPROTO_IPV6:
7863		switch (name) {
7864		case IPV6_UNICAST_HOPS:
7865			*i1 = tcps->tcps_ipv6_hoplimit;
7866			break;
7867		default:
7868			return (-1);
7869		}
7870		break;
7871	default:
7872		return (-1);
7873	}
7874	return (sizeof (int));
7875}
7876
7877/*
7878 * TCP routine to get the values of options.
7879 */
7880static int
7881tcp_opt_get(conn_t *connp, int level, int name, uchar_t *ptr)
7882{
7883	int		*i1 = (int *)ptr;
7884	tcp_t		*tcp = connp->conn_tcp;
7885	conn_opt_arg_t	coas;
7886	int		retval;
7887
7888	coas.coa_connp = connp;
7889	coas.coa_ixa = connp->conn_ixa;
7890	coas.coa_ipp = &connp->conn_xmit_ipp;
7891	coas.coa_ancillary = B_FALSE;
7892	coas.coa_changed = 0;
7893
7894	switch (level) {
7895	case SOL_SOCKET:
7896		switch (name) {
7897		case SO_SND_COPYAVOID:
7898			*i1 = tcp->tcp_snd_zcopy_on ?
7899			    SO_SND_COPYAVOID : 0;
7900			return (sizeof (int));
7901		case SO_ACCEPTCONN:
7902			*i1 = (tcp->tcp_state == TCPS_LISTEN);
7903			return (sizeof (int));
7904		}
7905		break;
7906	case IPPROTO_TCP:
7907		switch (name) {
7908		case TCP_NODELAY:
7909			*i1 = (tcp->tcp_naglim == 1) ? TCP_NODELAY : 0;
7910			return (sizeof (int));
7911		case TCP_MAXSEG:
7912			*i1 = tcp->tcp_mss;
7913			return (sizeof (int));
7914		case TCP_NOTIFY_THRESHOLD:
7915			*i1 = (int)tcp->tcp_first_timer_threshold;
7916			return (sizeof (int));
7917		case TCP_ABORT_THRESHOLD:
7918			*i1 = tcp->tcp_second_timer_threshold;
7919			return (sizeof (int));
7920		case TCP_CONN_NOTIFY_THRESHOLD:
7921			*i1 = tcp->tcp_first_ctimer_threshold;
7922			return (sizeof (int));
7923		case TCP_CONN_ABORT_THRESHOLD:
7924			*i1 = tcp->tcp_second_ctimer_threshold;
7925			return (sizeof (int));
7926		case TCP_INIT_CWND:
7927			*i1 = tcp->tcp_init_cwnd;
7928			return (sizeof (int));
7929		case TCP_KEEPALIVE_THRESHOLD:
7930			*i1 = tcp->tcp_ka_interval;
7931			return (sizeof (int));
7932		case TCP_KEEPALIVE_ABORT_THRESHOLD:
7933			*i1 = tcp->tcp_ka_abort_thres;
7934			return (sizeof (int));
7935		case TCP_CORK:
7936			*i1 = tcp->tcp_cork;
7937			return (sizeof (int));
7938		}
7939		break;
7940	case IPPROTO_IP:
7941		if (connp->conn_family != AF_INET)
7942			return (-1);
7943		switch (name) {
7944		case IP_OPTIONS:
7945		case T_IP_OPTIONS:
7946			/* Caller ensures enough space */
7947			return (ip_opt_get_user(connp, ptr));
7948		default:
7949			break;
7950		}
7951		break;
7952
7953	case IPPROTO_IPV6:
7954		/*
7955		 * IPPROTO_IPV6 options are only supported for sockets
7956		 * that are using IPv6 on the wire.
7957		 */
7958		if (connp->conn_ipversion != IPV6_VERSION) {
7959			return (-1);
7960		}
7961		switch (name) {
7962		case IPV6_PATHMTU:
7963			if (tcp->tcp_state < TCPS_ESTABLISHED)
7964				return (-1);
7965			break;
7966		}
7967		break;
7968	}
7969	mutex_enter(&connp->conn_lock);
7970	retval = conn_opt_get(&coas, level, name, ptr);
7971	mutex_exit(&connp->conn_lock);
7972	return (retval);
7973}
7974
7975/*
7976 * TCP routine to get the values of options.
7977 */
7978int
7979tcp_tpi_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
7980{
7981	return (tcp_opt_get(Q_TO_CONN(q), level, name, ptr));
7982}
7983
7984/* returns UNIX error, the optlen is a value-result arg */
7985int
7986tcp_getsockopt(sock_lower_handle_t proto_handle, int level, int option_name,
7987    void *optvalp, socklen_t *optlen, cred_t *cr)
7988{
7989	conn_t		*connp = (conn_t *)proto_handle;
7990	squeue_t	*sqp = connp->conn_sqp;
7991	int		error;
7992	t_uscalar_t	max_optbuf_len;
7993	void		*optvalp_buf;
7994	int		len;
7995
7996	ASSERT(connp->conn_upper_handle != NULL);
7997
7998	error = proto_opt_check(level, option_name, *optlen, &max_optbuf_len,
7999	    tcp_opt_obj.odb_opt_des_arr,
8000	    tcp_opt_obj.odb_opt_arr_cnt,
8001	    B_FALSE, B_TRUE, cr);
8002	if (error != 0) {
8003		if (error < 0) {
8004			error = proto_tlitosyserr(-error);
8005		}
8006		return (error);
8007	}
8008
8009	optvalp_buf = kmem_alloc(max_optbuf_len, KM_SLEEP);
8010
8011	error = squeue_synch_enter(sqp, connp, NULL);
8012	if (error == ENOMEM) {
8013		kmem_free(optvalp_buf, max_optbuf_len);
8014		return (ENOMEM);
8015	}
8016
8017	len = tcp_opt_get(connp, level, option_name, optvalp_buf);
8018	squeue_synch_exit(sqp, connp);
8019
8020	if (len == -1) {
8021		kmem_free(optvalp_buf, max_optbuf_len);
8022		return (EINVAL);
8023	}
8024
8025	/*
8026	 * update optlen and copy option value
8027	 */
8028	t_uscalar_t size = MIN(len, *optlen);
8029
8030	bcopy(optvalp_buf, optvalp, size);
8031	bcopy(&size, optlen, sizeof (size));
8032
8033	kmem_free(optvalp_buf, max_optbuf_len);
8034	return (0);
8035}
8036
8037/*
8038 * We declare as 'int' rather than 'void' to satisfy pfi_t arg requirements.
8039 * Parameters are assumed to be verified by the caller.
8040 */
8041/* ARGSUSED */
8042int
8043tcp_opt_set(conn_t *connp, uint_t optset_context, int level, int name,
8044    uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
8045    void *thisdg_attrs, cred_t *cr)
8046{
8047	tcp_t	*tcp = connp->conn_tcp;
8048	int	*i1 = (int *)invalp;
8049	boolean_t onoff = (*i1 == 0) ? 0 : 1;
8050	boolean_t checkonly;
8051	int	reterr;
8052	tcp_stack_t	*tcps = tcp->tcp_tcps;
8053	conn_opt_arg_t	coas;
8054
8055	coas.coa_connp = connp;
8056	coas.coa_ixa = connp->conn_ixa;
8057	coas.coa_ipp = &connp->conn_xmit_ipp;
8058	coas.coa_ancillary = B_FALSE;
8059	coas.coa_changed = 0;
8060
8061	switch (optset_context) {
8062	case SETFN_OPTCOM_CHECKONLY:
8063		checkonly = B_TRUE;
8064		/*
8065		 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ
8066		 * inlen != 0 implies value supplied and
8067		 * 	we have to "pretend" to set it.
8068		 * inlen == 0 implies that there is no
8069		 * 	value part in T_CHECK request and just validation
8070		 * done elsewhere should be enough, we just return here.
8071		 */
8072		if (inlen == 0) {
8073			*outlenp = 0;
8074			return (0);
8075		}
8076		break;
8077	case SETFN_OPTCOM_NEGOTIATE:
8078		checkonly = B_FALSE;
8079		break;
8080	case SETFN_UD_NEGOTIATE: /* error on conn-oriented transports ? */
8081	case SETFN_CONN_NEGOTIATE:
8082		checkonly = B_FALSE;
8083		/*
8084		 * Negotiating local and "association-related" options
8085		 * from other (T_CONN_REQ, T_CONN_RES,T_UNITDATA_REQ)
8086		 * primitives is allowed by XTI, but we choose
8087		 * to not implement this style negotiation for Internet
8088		 * protocols (We interpret it is a must for OSI world but
8089		 * optional for Internet protocols) for all options.
8090		 * [ Will do only for the few options that enable test
8091		 * suites that our XTI implementation of this feature
8092		 * works for transports that do allow it ]
8093		 */
8094		if (!tcp_allow_connopt_set(level, name)) {
8095			*outlenp = 0;
8096			return (EINVAL);
8097		}
8098		break;
8099	default:
8100		/*
8101		 * We should never get here
8102		 */
8103		*outlenp = 0;
8104		return (EINVAL);
8105	}
8106
8107	ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) ||
8108	    (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0));
8109
8110	/*
8111	 * For TCP, we should have no ancillary data sent down
8112	 * (sendmsg isn't supported for SOCK_STREAM), so thisdg_attrs
8113	 * has to be zero.
8114	 */
8115	ASSERT(thisdg_attrs == NULL);
8116
8117	/*
8118	 * For fixed length options, no sanity check
8119	 * of passed in length is done. It is assumed *_optcom_req()
8120	 * routines do the right thing.
8121	 */
8122	switch (level) {
8123	case SOL_SOCKET:
8124		switch (name) {
8125		case SO_KEEPALIVE:
8126			if (checkonly) {
8127				/* check only case */
8128				break;
8129			}
8130
8131			if (!onoff) {
8132				if (connp->conn_keepalive) {
8133					if (tcp->tcp_ka_tid != 0) {
8134						(void) TCP_TIMER_CANCEL(tcp,
8135						    tcp->tcp_ka_tid);
8136						tcp->tcp_ka_tid = 0;
8137					}
8138					connp->conn_keepalive = 0;
8139				}
8140				break;
8141			}
8142			if (!connp->conn_keepalive) {
8143				/* Crank up the keepalive timer */
8144				tcp->tcp_ka_last_intrvl = 0;
8145				tcp->tcp_ka_tid = TCP_TIMER(tcp,
8146				    tcp_keepalive_killer,
8147				    MSEC_TO_TICK(tcp->tcp_ka_interval));
8148				connp->conn_keepalive = 1;
8149			}
8150			break;
8151		case SO_SNDBUF: {
8152			if (*i1 > tcps->tcps_max_buf) {
8153				*outlenp = 0;
8154				return (ENOBUFS);
8155			}
8156			if (checkonly)
8157				break;
8158
8159			connp->conn_sndbuf = *i1;
8160			if (tcps->tcps_snd_lowat_fraction != 0) {
8161				connp->conn_sndlowat = connp->conn_sndbuf /
8162				    tcps->tcps_snd_lowat_fraction;
8163			}
8164			(void) tcp_maxpsz_set(tcp, B_TRUE);
8165			/*
8166			 * If we are flow-controlled, recheck the condition.
8167			 * There are apps that increase SO_SNDBUF size when
8168			 * flow-controlled (EWOULDBLOCK), and expect the flow
8169			 * control condition to be lifted right away.
8170			 */
8171			mutex_enter(&tcp->tcp_non_sq_lock);
8172			if (tcp->tcp_flow_stopped &&
8173			    TCP_UNSENT_BYTES(tcp) < connp->conn_sndbuf) {
8174				tcp_clrqfull(tcp);
8175			}
8176			mutex_exit(&tcp->tcp_non_sq_lock);
8177			*outlenp = inlen;
8178			return (0);
8179		}
8180		case SO_RCVBUF:
8181			if (*i1 > tcps->tcps_max_buf) {
8182				*outlenp = 0;
8183				return (ENOBUFS);
8184			}
8185			/* Silently ignore zero */
8186			if (!checkonly && *i1 != 0) {
8187				*i1 = MSS_ROUNDUP(*i1, tcp->tcp_mss);
8188				(void) tcp_rwnd_set(tcp, *i1);
8189			}
8190			/*
8191			 * XXX should we return the rwnd here
8192			 * and tcp_opt_get ?
8193			 */
8194			*outlenp = inlen;
8195			return (0);
8196		case SO_SND_COPYAVOID:
8197			if (!checkonly) {
8198				if (tcp->tcp_loopback ||
8199				    (tcp->tcp_kssl_ctx != NULL) ||
8200				    (onoff != 1) || !tcp_zcopy_check(tcp)) {
8201					*outlenp = 0;
8202					return (EOPNOTSUPP);
8203				}
8204				tcp->tcp_snd_zcopy_aware = 1;
8205			}
8206			*outlenp = inlen;
8207			return (0);
8208		}
8209		break;
8210	case IPPROTO_TCP:
8211		switch (name) {
8212		case TCP_NODELAY:
8213			if (!checkonly)
8214				tcp->tcp_naglim = *i1 ? 1 : tcp->tcp_mss;
8215			break;
8216		case TCP_NOTIFY_THRESHOLD:
8217			if (!checkonly)
8218				tcp->tcp_first_timer_threshold = *i1;
8219			break;
8220		case TCP_ABORT_THRESHOLD:
8221			if (!checkonly)
8222				tcp->tcp_second_timer_threshold = *i1;
8223			break;
8224		case TCP_CONN_NOTIFY_THRESHOLD:
8225			if (!checkonly)
8226				tcp->tcp_first_ctimer_threshold = *i1;
8227			break;
8228		case TCP_CONN_ABORT_THRESHOLD:
8229			if (!checkonly)
8230				tcp->tcp_second_ctimer_threshold = *i1;
8231			break;
8232		case TCP_RECVDSTADDR:
8233			if (tcp->tcp_state > TCPS_LISTEN) {
8234				*outlenp = 0;
8235				return (EOPNOTSUPP);
8236			}
8237			/* Setting done in conn_opt_set */
8238			break;
8239		case TCP_INIT_CWND: {
8240			uint32_t init_cwnd = *((uint32_t *)invalp);
8241
8242			if (checkonly)
8243				break;
8244
8245			/*
8246			 * Only allow socket with network configuration
8247			 * privilege to set the initial cwnd to be larger
8248			 * than allowed by RFC 3390.
8249			 */
8250			if (init_cwnd <= MIN(4, MAX(2, 4380 / tcp->tcp_mss))) {
8251				tcp->tcp_init_cwnd = init_cwnd;
8252				break;
8253			}
8254			if ((reterr = secpolicy_ip_config(cr, B_TRUE)) != 0) {
8255				*outlenp = 0;
8256				return (reterr);
8257			}
8258			if (init_cwnd > TCP_MAX_INIT_CWND) {
8259				*outlenp = 0;
8260				return (EINVAL);
8261			}
8262			tcp->tcp_init_cwnd = init_cwnd;
8263			break;
8264		}
8265		case TCP_KEEPALIVE_THRESHOLD:
8266			if (checkonly)
8267				break;
8268
8269			if (*i1 < tcps->tcps_keepalive_interval_low ||
8270			    *i1 > tcps->tcps_keepalive_interval_high) {
8271				*outlenp = 0;
8272				return (EINVAL);
8273			}
8274			if (*i1 != tcp->tcp_ka_interval) {
8275				tcp->tcp_ka_interval = *i1;
8276				/*
8277				 * Check if we need to restart the
8278				 * keepalive timer.
8279				 */
8280				if (tcp->tcp_ka_tid != 0) {
8281					ASSERT(connp->conn_keepalive);
8282					(void) TCP_TIMER_CANCEL(tcp,
8283					    tcp->tcp_ka_tid);
8284					tcp->tcp_ka_last_intrvl = 0;
8285					tcp->tcp_ka_tid = TCP_TIMER(tcp,
8286					    tcp_keepalive_killer,
8287					    MSEC_TO_TICK(tcp->tcp_ka_interval));
8288				}
8289			}
8290			break;
8291		case TCP_KEEPALIVE_ABORT_THRESHOLD:
8292			if (!checkonly) {
8293				if (*i1 <
8294				    tcps->tcps_keepalive_abort_interval_low ||
8295				    *i1 >
8296				    tcps->tcps_keepalive_abort_interval_high) {
8297					*outlenp = 0;
8298					return (EINVAL);
8299				}
8300				tcp->tcp_ka_abort_thres = *i1;
8301			}
8302			break;
8303		case TCP_CORK:
8304			if (!checkonly) {
8305				/*
8306				 * if tcp->tcp_cork was set and is now
8307				 * being unset, we have to make sure that
8308				 * the remaining data gets sent out. Also
8309				 * unset tcp->tcp_cork so that tcp_wput_data()
8310				 * can send data even if it is less than mss
8311				 */
8312				if (tcp->tcp_cork && onoff == 0 &&
8313				    tcp->tcp_unsent > 0) {
8314					tcp->tcp_cork = B_FALSE;
8315					tcp_wput_data(tcp, NULL, B_FALSE);
8316				}
8317				tcp->tcp_cork = onoff;
8318			}
8319			break;
8320		default:
8321			break;
8322		}
8323		break;
8324	case IPPROTO_IP:
8325		if (connp->conn_family != AF_INET) {
8326			*outlenp = 0;
8327			return (EINVAL);
8328		}
8329		switch (name) {
8330		case IP_SEC_OPT:
8331			/*
8332			 * We should not allow policy setting after
8333			 * we start listening for connections.
8334			 */
8335			if (tcp->tcp_state == TCPS_LISTEN) {
8336				return (EINVAL);
8337			}
8338			break;
8339		}
8340		break;
8341	case IPPROTO_IPV6:
8342		/*
8343		 * IPPROTO_IPV6 options are only supported for sockets
8344		 * that are using IPv6 on the wire.
8345		 */
8346		if (connp->conn_ipversion != IPV6_VERSION) {
8347			*outlenp = 0;
8348			return (EINVAL);
8349		}
8350
8351		switch (name) {
8352		case IPV6_RECVPKTINFO:
8353			if (!checkonly) {
8354				/* Force it to be sent up with the next msg */
8355				tcp->tcp_recvifindex = 0;
8356			}
8357			break;
8358		case IPV6_RECVTCLASS:
8359			if (!checkonly) {
8360				/* Force it to be sent up with the next msg */
8361				tcp->tcp_recvtclass = 0xffffffffU;
8362			}
8363			break;
8364		case IPV6_RECVHOPLIMIT:
8365			if (!checkonly) {
8366				/* Force it to be sent up with the next msg */
8367				tcp->tcp_recvhops = 0xffffffffU;
8368			}
8369			break;
8370		case IPV6_PKTINFO:
8371			/* This is an extra check for TCP */
8372			if (inlen == sizeof (struct in6_pktinfo)) {
8373				struct in6_pktinfo *pkti;
8374
8375				pkti = (struct in6_pktinfo *)invalp;
8376				/*
8377				 * RFC 3542 states that ipi6_addr must be
8378				 * the unspecified address when setting the
8379				 * IPV6_PKTINFO sticky socket option on a
8380				 * TCP socket.
8381				 */
8382				if (!IN6_IS_ADDR_UNSPECIFIED(&pkti->ipi6_addr))
8383					return (EINVAL);
8384			}
8385			break;
8386		case IPV6_SEC_OPT:
8387			/*
8388			 * We should not allow policy setting after
8389			 * we start listening for connections.
8390			 */
8391			if (tcp->tcp_state == TCPS_LISTEN) {
8392				return (EINVAL);
8393			}
8394			break;
8395		}
8396		break;
8397	}
8398	reterr = conn_opt_set(&coas, level, name, inlen, invalp,
8399	    checkonly, cr);
8400	if (reterr != 0) {
8401		*outlenp = 0;
8402		return (reterr);
8403	}
8404
8405	/*
8406	 * Common case of OK return with outval same as inval
8407	 */
8408	if (invalp != outvalp) {
8409		/* don't trust bcopy for identical src/dst */
8410		(void) bcopy(invalp, outvalp, inlen);
8411	}
8412	*outlenp = inlen;
8413
8414	if (coas.coa_changed & COA_HEADER_CHANGED) {
8415		reterr = tcp_build_hdrs(tcp);
8416		if (reterr != 0)
8417			return (reterr);
8418	}
8419	if (coas.coa_changed & COA_ROUTE_CHANGED) {
8420		in6_addr_t nexthop;
8421
8422		/*
8423		 * If we are connected we re-cache the information.
8424		 * We ignore errors to preserve BSD behavior.
8425		 * Note that we don't redo IPsec policy lookup here
8426		 * since the final destination (or source) didn't change.
8427		 */
8428		ip_attr_nexthop(&connp->conn_xmit_ipp, connp->conn_ixa,
8429		    &connp->conn_faddr_v6, &nexthop);
8430
8431		if (!IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6) &&
8432		    !IN6_IS_ADDR_V4MAPPED_ANY(&connp->conn_faddr_v6)) {
8433			(void) ip_attr_connect(connp, connp->conn_ixa,
8434			    &connp->conn_laddr_v6, &connp->conn_faddr_v6,
8435			    &nexthop, connp->conn_fport, NULL, NULL,
8436			    IPDF_VERIFY_DST);
8437		}
8438	}
8439	if ((coas.coa_changed & COA_SNDBUF_CHANGED) && !IPCL_IS_NONSTR(connp)) {
8440		connp->conn_wq->q_hiwat = connp->conn_sndbuf;
8441	}
8442	if (coas.coa_changed & COA_WROFF_CHANGED) {
8443		connp->conn_wroff = connp->conn_ht_iphc_allocated +
8444		    tcps->tcps_wroff_xtra;
8445		(void) proto_set_tx_wroff(connp->conn_rq, connp,
8446		    connp->conn_wroff);
8447	}
8448	if (coas.coa_changed & COA_OOBINLINE_CHANGED) {
8449		if (IPCL_IS_NONSTR(connp))
8450			proto_set_rx_oob_opt(connp, onoff);
8451	}
8452	return (0);
8453}
8454
8455/* ARGSUSED */
8456int
8457tcp_tpi_opt_set(queue_t *q, uint_t optset_context, int level, int name,
8458    uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
8459    void *thisdg_attrs, cred_t *cr)
8460{
8461	conn_t	*connp =  Q_TO_CONN(q);
8462
8463	return (tcp_opt_set(connp, optset_context, level, name, inlen, invalp,
8464	    outlenp, outvalp, thisdg_attrs, cr));
8465}
8466
8467int
8468tcp_setsockopt(sock_lower_handle_t proto_handle, int level, int option_name,
8469    const void *optvalp, socklen_t optlen, cred_t *cr)
8470{
8471	conn_t		*connp = (conn_t *)proto_handle;
8472	squeue_t	*sqp = connp->conn_sqp;
8473	int		error;
8474
8475	ASSERT(connp->conn_upper_handle != NULL);
8476	/*
8477	 * Entering the squeue synchronously can result in a context switch,
8478	 * which can cause a rather sever performance degradation. So we try to
8479	 * handle whatever options we can without entering the squeue.
8480	 */
8481	if (level == IPPROTO_TCP) {
8482		switch (option_name) {
8483		case TCP_NODELAY:
8484			if (optlen != sizeof (int32_t))
8485				return (EINVAL);
8486			mutex_enter(&connp->conn_tcp->tcp_non_sq_lock);
8487			connp->conn_tcp->tcp_naglim = *(int *)optvalp ? 1 :
8488			    connp->conn_tcp->tcp_mss;
8489			mutex_exit(&connp->conn_tcp->tcp_non_sq_lock);
8490			return (0);
8491		default:
8492			break;
8493		}
8494	}
8495
8496	error = squeue_synch_enter(sqp, connp, NULL);
8497	if (error == ENOMEM) {
8498		return (ENOMEM);
8499	}
8500
8501	error = proto_opt_check(level, option_name, optlen, NULL,
8502	    tcp_opt_obj.odb_opt_des_arr,
8503	    tcp_opt_obj.odb_opt_arr_cnt,
8504	    B_TRUE, B_FALSE, cr);
8505
8506	if (error != 0) {
8507		if (error < 0) {
8508			error = proto_tlitosyserr(-error);
8509		}
8510		squeue_synch_exit(sqp, connp);
8511		return (error);
8512	}
8513
8514	error = tcp_opt_set(connp, SETFN_OPTCOM_NEGOTIATE, level, option_name,
8515	    optlen, (uchar_t *)optvalp, (uint_t *)&optlen, (uchar_t *)optvalp,
8516	    NULL, cr);
8517	squeue_synch_exit(sqp, connp);
8518
8519	ASSERT(error >= 0);
8520
8521	return (error);
8522}
8523
8524/*
8525 * Build/update the tcp header template (in conn_ht_iphc) based on
8526 * conn_xmit_ipp. The headers include ip6_t, any extension
8527 * headers, and the maximum size tcp header (to avoid reallocation
8528 * on the fly for additional tcp options).
8529 *
8530 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
8531 * Returns failure if can't allocate memory.
8532 */
8533static int
8534tcp_build_hdrs(tcp_t *tcp)
8535{
8536	tcp_stack_t	*tcps = tcp->tcp_tcps;
8537	conn_t		*connp = tcp->tcp_connp;
8538	tcpha_t		*tcpha;
8539	uint32_t	cksum;
8540	int		error;
8541
8542	/* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
8543	mutex_enter(&connp->conn_lock);
8544	error = conn_build_hdr_template(connp, TCP_MIN_HEADER_LENGTH,
8545	    TCP_MAX_TCP_OPTIONS_LENGTH, &connp->conn_laddr_v6,
8546	    &connp->conn_faddr_v6, connp->conn_flowinfo);
8547	mutex_exit(&connp->conn_lock);
8548	if (error != 0)
8549		return (error);
8550
8551	/*
8552	 * Any routing header/option has been massaged. The checksum difference
8553	 * is stored in conn_sum for later use.
8554	 */
8555	tcpha = (tcpha_t *)connp->conn_ht_ulp;
8556	tcp->tcp_tcpha = tcpha;
8557
8558	tcpha->tha_lport = connp->conn_lport;
8559	tcpha->tha_fport = connp->conn_fport;
8560	tcpha->tha_sum = 0;
8561	tcpha->tha_offset_and_reserved = (5 << 4);
8562
8563	/*
8564	 * IP wants our header length in the checksum field to
8565	 * allow it to perform a single pseudo-header+checksum
8566	 * calculation on behalf of TCP.
8567	 * Include the adjustment for a source route once IP_OPTIONS is set.
8568	 */
8569	cksum = sizeof (tcpha_t) + connp->conn_sum;
8570	cksum = (cksum >> 16) + (cksum & 0xFFFF);
8571	ASSERT(cksum < 0x10000);
8572	tcpha->tha_sum = htons(cksum);
8573
8574	if (connp->conn_ipversion == IPV4_VERSION)
8575		tcp->tcp_ipha = (ipha_t *)connp->conn_ht_iphc;
8576	else
8577		tcp->tcp_ip6h = (ip6_t *)connp->conn_ht_iphc;
8578
8579	if (connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra >
8580	    connp->conn_wroff) {
8581		connp->conn_wroff = connp->conn_ht_iphc_allocated +
8582		    tcps->tcps_wroff_xtra;
8583		(void) proto_set_tx_wroff(connp->conn_rq, connp,
8584		    connp->conn_wroff);
8585	}
8586	return (0);
8587}
8588
8589/* Get callback routine passed to nd_load by tcp_param_register */
8590/* ARGSUSED */
8591static int
8592tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
8593{
8594	tcpparam_t	*tcppa = (tcpparam_t *)cp;
8595
8596	(void) mi_mpprintf(mp, "%u", tcppa->tcp_param_val);
8597	return (0);
8598}
8599
8600/*
8601 * Walk through the param array specified registering each element with the
8602 * named dispatch handler.
8603 */
8604static boolean_t
8605tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, tcp_stack_t *tcps)
8606{
8607	for (; cnt-- > 0; tcppa++) {
8608		if (tcppa->tcp_param_name && tcppa->tcp_param_name[0]) {
8609			if (!nd_load(ndp, tcppa->tcp_param_name,
8610			    tcp_param_get, tcp_param_set,
8611			    (caddr_t)tcppa)) {
8612				nd_free(ndp);
8613				return (B_FALSE);
8614			}
8615		}
8616	}
8617	tcps->tcps_wroff_xtra_param = kmem_zalloc(sizeof (tcpparam_t),
8618	    KM_SLEEP);
8619	bcopy(&lcl_tcp_wroff_xtra_param, tcps->tcps_wroff_xtra_param,
8620	    sizeof (tcpparam_t));
8621	if (!nd_load(ndp, tcps->tcps_wroff_xtra_param->tcp_param_name,
8622	    tcp_param_get, tcp_param_set_aligned,
8623	    (caddr_t)tcps->tcps_wroff_xtra_param)) {
8624		nd_free(ndp);
8625		return (B_FALSE);
8626	}
8627	if (!nd_load(ndp, "tcp_extra_priv_ports",
8628	    tcp_extra_priv_ports_get, NULL, NULL)) {
8629		nd_free(ndp);
8630		return (B_FALSE);
8631	}
8632	if (!nd_load(ndp, "tcp_extra_priv_ports_add",
8633	    NULL, tcp_extra_priv_ports_add, NULL)) {
8634		nd_free(ndp);
8635		return (B_FALSE);
8636	}
8637	if (!nd_load(ndp, "tcp_extra_priv_ports_del",
8638	    NULL, tcp_extra_priv_ports_del, NULL)) {
8639		nd_free(ndp);
8640		return (B_FALSE);
8641	}
8642	if (!nd_load(ndp, "tcp_1948_phrase", NULL,
8643	    tcp_1948_phrase_set, NULL)) {
8644		nd_free(ndp);
8645		return (B_FALSE);
8646	}
8647	/*
8648	 * Dummy ndd variables - only to convey obsolescence information
8649	 * through printing of their name (no get or set routines)
8650	 * XXX Remove in future releases ?
8651	 */
8652	if (!nd_load(ndp,
8653	    "tcp_close_wait_interval(obsoleted - "
8654	    "use tcp_time_wait_interval)", NULL, NULL, NULL)) {
8655		nd_free(ndp);
8656		return (B_FALSE);
8657	}
8658	return (B_TRUE);
8659}
8660
8661/* ndd set routine for tcp_wroff_xtra. */
8662/* ARGSUSED */
8663static int
8664tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
8665    cred_t *cr)
8666{
8667	long new_value;
8668	tcpparam_t *tcppa = (tcpparam_t *)cp;
8669
8670	if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
8671	    new_value < tcppa->tcp_param_min ||
8672	    new_value > tcppa->tcp_param_max) {
8673		return (EINVAL);
8674	}
8675	/*
8676	 * Need to make sure new_value is a multiple of 4.  If it is not,
8677	 * round it up.  For future 64 bit requirement, we actually make it
8678	 * a multiple of 8.
8679	 */
8680	if (new_value & 0x7) {
8681		new_value = (new_value & ~0x7) + 0x8;
8682	}
8683	tcppa->tcp_param_val = new_value;
8684	return (0);
8685}
8686
8687/* Set callback routine passed to nd_load by tcp_param_register */
8688/* ARGSUSED */
8689static int
8690tcp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr)
8691{
8692	long	new_value;
8693	tcpparam_t	*tcppa = (tcpparam_t *)cp;
8694
8695	if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
8696	    new_value < tcppa->tcp_param_min ||
8697	    new_value > tcppa->tcp_param_max) {
8698		return (EINVAL);
8699	}
8700	tcppa->tcp_param_val = new_value;
8701	return (0);
8702}
8703
8704/*
8705 * Add a new piece to the tcp reassembly queue.  If the gap at the beginning
8706 * is filled, return as much as we can.  The message passed in may be
8707 * multi-part, chained using b_cont.  "start" is the starting sequence
8708 * number for this piece.
8709 */
8710static mblk_t *
8711tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start)
8712{
8713	uint32_t	end;
8714	mblk_t		*mp1;
8715	mblk_t		*mp2;
8716	mblk_t		*next_mp;
8717	uint32_t	u1;
8718	tcp_stack_t	*tcps = tcp->tcp_tcps;
8719
8720
8721	/* Walk through all the new pieces. */
8722	do {
8723		ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
8724		    (uintptr_t)INT_MAX);
8725		end = start + (int)(mp->b_wptr - mp->b_rptr);
8726		next_mp = mp->b_cont;
8727		if (start == end) {
8728			/* Empty.  Blast it. */
8729			freeb(mp);
8730			continue;
8731		}
8732		mp->b_cont = NULL;
8733		TCP_REASS_SET_SEQ(mp, start);
8734		TCP_REASS_SET_END(mp, end);
8735		mp1 = tcp->tcp_reass_tail;
8736		if (!mp1) {
8737			tcp->tcp_reass_tail = mp;
8738			tcp->tcp_reass_head = mp;
8739			BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs);
8740			UPDATE_MIB(&tcps->tcps_mib,
8741			    tcpInDataUnorderBytes, end - start);
8742			continue;
8743		}
8744		/* New stuff completely beyond tail? */
8745		if (SEQ_GEQ(start, TCP_REASS_END(mp1))) {
8746			/* Link it on end. */
8747			mp1->b_cont = mp;
8748			tcp->tcp_reass_tail = mp;
8749			BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs);
8750			UPDATE_MIB(&tcps->tcps_mib,
8751			    tcpInDataUnorderBytes, end - start);
8752			continue;
8753		}
8754		mp1 = tcp->tcp_reass_head;
8755		u1 = TCP_REASS_SEQ(mp1);
8756		/* New stuff at the front? */
8757		if (SEQ_LT(start, u1)) {
8758			/* Yes... Check for overlap. */
8759			mp->b_cont = mp1;
8760			tcp->tcp_reass_head = mp;
8761			tcp_reass_elim_overlap(tcp, mp);
8762			continue;
8763		}
8764		/*
8765		 * The new piece fits somewhere between the head and tail.
8766		 * We find our slot, where mp1 precedes us and mp2 trails.
8767		 */
8768		for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) {
8769			u1 = TCP_REASS_SEQ(mp2);
8770			if (SEQ_LEQ(start, u1))
8771				break;
8772		}
8773		/* Link ourselves in */
8774		mp->b_cont = mp2;
8775		mp1->b_cont = mp;
8776
8777		/* Trim overlap with following mblk(s) first */
8778		tcp_reass_elim_overlap(tcp, mp);
8779
8780		/* Trim overlap with preceding mblk */
8781		tcp_reass_elim_overlap(tcp, mp1);
8782
8783	} while (start = end, mp = next_mp);
8784	mp1 = tcp->tcp_reass_head;
8785	/* Anything ready to go? */
8786	if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt)
8787		return (NULL);
8788	/* Eat what we can off the queue */
8789	for (;;) {
8790		mp = mp1->b_cont;
8791		end = TCP_REASS_END(mp1);
8792		TCP_REASS_SET_SEQ(mp1, 0);
8793		TCP_REASS_SET_END(mp1, 0);
8794		if (!mp) {
8795			tcp->tcp_reass_tail = NULL;
8796			break;
8797		}
8798		if (end != TCP_REASS_SEQ(mp)) {
8799			mp1->b_cont = NULL;
8800			break;
8801		}
8802		mp1 = mp;
8803	}
8804	mp1 = tcp->tcp_reass_head;
8805	tcp->tcp_reass_head = mp;
8806	return (mp1);
8807}
8808
8809/* Eliminate any overlap that mp may have over later mblks */
8810static void
8811tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp)
8812{
8813	uint32_t	end;
8814	mblk_t		*mp1;
8815	uint32_t	u1;
8816	tcp_stack_t	*tcps = tcp->tcp_tcps;
8817
8818	end = TCP_REASS_END(mp);
8819	while ((mp1 = mp->b_cont) != NULL) {
8820		u1 = TCP_REASS_SEQ(mp1);
8821		if (!SEQ_GT(end, u1))
8822			break;
8823		if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) {
8824			mp->b_wptr -= end - u1;
8825			TCP_REASS_SET_END(mp, u1);
8826			BUMP_MIB(&tcps->tcps_mib, tcpInDataPartDupSegs);
8827			UPDATE_MIB(&tcps->tcps_mib,
8828			    tcpInDataPartDupBytes, end - u1);
8829			break;
8830		}
8831		mp->b_cont = mp1->b_cont;
8832		TCP_REASS_SET_SEQ(mp1, 0);
8833		TCP_REASS_SET_END(mp1, 0);
8834		freeb(mp1);
8835		BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs);
8836		UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, end - u1);
8837	}
8838	if (!mp1)
8839		tcp->tcp_reass_tail = mp;
8840}
8841
8842static uint_t
8843tcp_rwnd_reopen(tcp_t *tcp)
8844{
8845	uint_t ret = 0;
8846	uint_t thwin;
8847	conn_t *connp = tcp->tcp_connp;
8848
8849	/* Learn the latest rwnd information that we sent to the other side. */
8850	thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win))
8851	    << tcp->tcp_rcv_ws;
8852	/* This is peer's calculated send window (our receive window). */
8853	thwin -= tcp->tcp_rnxt - tcp->tcp_rack;
8854	/*
8855	 * Increase the receive window to max.  But we need to do receiver
8856	 * SWS avoidance.  This means that we need to check the increase of
8857	 * of receive window is at least 1 MSS.
8858	 */
8859	if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) {
8860		/*
8861		 * If the window that the other side knows is less than max
8862		 * deferred acks segments, send an update immediately.
8863		 */
8864		if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) {
8865			BUMP_MIB(&tcp->tcp_tcps->tcps_mib, tcpOutWinUpdate);
8866			ret = TH_ACK_NEEDED;
8867		}
8868		tcp->tcp_rwnd = connp->conn_rcvbuf;
8869	}
8870	return (ret);
8871}
8872
8873/*
8874 * Send up all messages queued on tcp_rcv_list.
8875 */
8876static uint_t
8877tcp_rcv_drain(tcp_t *tcp)
8878{
8879	mblk_t *mp;
8880	uint_t ret = 0;
8881#ifdef DEBUG
8882	uint_t cnt = 0;
8883#endif
8884	queue_t	*q = tcp->tcp_connp->conn_rq;
8885
8886	/* Can't drain on an eager connection */
8887	if (tcp->tcp_listener != NULL)
8888		return (ret);
8889
8890	/* Can't be a non-STREAMS connection */
8891	ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
8892
8893	/* No need for the push timer now. */
8894	if (tcp->tcp_push_tid != 0) {
8895		(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
8896		tcp->tcp_push_tid = 0;
8897	}
8898
8899	/*
8900	 * Handle two cases here: we are currently fused or we were
8901	 * previously fused and have some urgent data to be delivered
8902	 * upstream.  The latter happens because we either ran out of
8903	 * memory or were detached and therefore sending the SIGURG was
8904	 * deferred until this point.  In either case we pass control
8905	 * over to tcp_fuse_rcv_drain() since it may need to complete
8906	 * some work.
8907	 */
8908	if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) {
8909		ASSERT(IPCL_IS_NONSTR(tcp->tcp_connp) ||
8910		    tcp->tcp_fused_sigurg_mp != NULL);
8911		if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL :
8912		    &tcp->tcp_fused_sigurg_mp))
8913			return (ret);
8914	}
8915
8916	while ((mp = tcp->tcp_rcv_list) != NULL) {
8917		tcp->tcp_rcv_list = mp->b_next;
8918		mp->b_next = NULL;
8919#ifdef DEBUG
8920		cnt += msgdsize(mp);
8921#endif
8922		/* Does this need SSL processing first? */
8923		if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) {
8924			DTRACE_PROBE1(kssl_mblk__ksslinput_rcvdrain,
8925			    mblk_t *, mp);
8926			tcp_kssl_input(tcp, mp, NULL);
8927			continue;
8928		}
8929		putnext(q, mp);
8930	}
8931#ifdef DEBUG
8932	ASSERT(cnt == tcp->tcp_rcv_cnt);
8933#endif
8934	tcp->tcp_rcv_last_head = NULL;
8935	tcp->tcp_rcv_last_tail = NULL;
8936	tcp->tcp_rcv_cnt = 0;
8937
8938	if (canputnext(q))
8939		return (tcp_rwnd_reopen(tcp));
8940
8941	return (ret);
8942}
8943
8944/*
8945 * Queue data on tcp_rcv_list which is a b_next chain.
8946 * tcp_rcv_last_head/tail is the last element of this chain.
8947 * Each element of the chain is a b_cont chain.
8948 *
8949 * M_DATA messages are added to the current element.
8950 * Other messages are added as new (b_next) elements.
8951 */
8952void
8953tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr)
8954{
8955	ASSERT(seg_len == msgdsize(mp));
8956	ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL);
8957
8958	if (is_system_labeled()) {
8959		ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL);
8960		/*
8961		 * Provide for protocols above TCP such as RPC. NOPID leaves
8962		 * db_cpid unchanged.
8963		 * The cred could have already been set.
8964		 */
8965		if (cr != NULL)
8966			mblk_setcred(mp, cr, NOPID);
8967	}
8968
8969	if (tcp->tcp_rcv_list == NULL) {
8970		ASSERT(tcp->tcp_rcv_last_head == NULL);
8971		tcp->tcp_rcv_list = mp;
8972		tcp->tcp_rcv_last_head = mp;
8973	} else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) {
8974		tcp->tcp_rcv_last_tail->b_cont = mp;
8975	} else {
8976		tcp->tcp_rcv_last_head->b_next = mp;
8977		tcp->tcp_rcv_last_head = mp;
8978	}
8979
8980	while (mp->b_cont)
8981		mp = mp->b_cont;
8982
8983	tcp->tcp_rcv_last_tail = mp;
8984	tcp->tcp_rcv_cnt += seg_len;
8985	tcp->tcp_rwnd -= seg_len;
8986}
8987
8988/* The minimum of smoothed mean deviation in RTO calculation. */
8989#define	TCP_SD_MIN	400
8990
8991/*
8992 * Set RTO for this connection.  The formula is from Jacobson and Karels'
8993 * "Congestion Avoidance and Control" in SIGCOMM '88.  The variable names
8994 * are the same as those in Appendix A.2 of that paper.
8995 *
8996 * m = new measurement
8997 * sa = smoothed RTT average (8 * average estimates).
8998 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates).
8999 */
9000static void
9001tcp_set_rto(tcp_t *tcp, clock_t rtt)
9002{
9003	long m = TICK_TO_MSEC(rtt);
9004	clock_t sa = tcp->tcp_rtt_sa;
9005	clock_t sv = tcp->tcp_rtt_sd;
9006	clock_t rto;
9007	tcp_stack_t	*tcps = tcp->tcp_tcps;
9008
9009	BUMP_MIB(&tcps->tcps_mib, tcpRttUpdate);
9010	tcp->tcp_rtt_update++;
9011
9012	/* tcp_rtt_sa is not 0 means this is a new sample. */
9013	if (sa != 0) {
9014		/*
9015		 * Update average estimator:
9016		 *	new rtt = 7/8 old rtt + 1/8 Error
9017		 */
9018
9019		/* m is now Error in estimate. */
9020		m -= sa >> 3;
9021		if ((sa += m) <= 0) {
9022			/*
9023			 * Don't allow the smoothed average to be negative.
9024			 * We use 0 to denote reinitialization of the
9025			 * variables.
9026			 */
9027			sa = 1;
9028		}
9029
9030		/*
9031		 * Update deviation estimator:
9032		 *	new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev)
9033		 */
9034		if (m < 0)
9035			m = -m;
9036		m -= sv >> 2;
9037		sv += m;
9038	} else {
9039		/*
9040		 * This follows BSD's implementation.  So the reinitialized
9041		 * RTO is 3 * m.  We cannot go less than 2 because if the
9042		 * link is bandwidth dominated, doubling the window size
9043		 * during slow start means doubling the RTT.  We want to be
9044		 * more conservative when we reinitialize our estimates.  3
9045		 * is just a convenient number.
9046		 */
9047		sa = m << 3;
9048		sv = m << 1;
9049	}
9050	if (sv < TCP_SD_MIN) {
9051		/*
9052		 * We do not know that if sa captures the delay ACK
9053		 * effect as in a long train of segments, a receiver
9054		 * does not delay its ACKs.  So set the minimum of sv
9055		 * to be TCP_SD_MIN, which is default to 400 ms, twice
9056		 * of BSD DATO.  That means the minimum of mean
9057		 * deviation is 100 ms.
9058		 *
9059		 */
9060		sv = TCP_SD_MIN;
9061	}
9062	tcp->tcp_rtt_sa = sa;
9063	tcp->tcp_rtt_sd = sv;
9064	/*
9065	 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv)
9066	 *
9067	 * Add tcp_rexmit_interval extra in case of extreme environment
9068	 * where the algorithm fails to work.  The default value of
9069	 * tcp_rexmit_interval_extra should be 0.
9070	 *
9071	 * As we use a finer grained clock than BSD and update
9072	 * RTO for every ACKs, add in another .25 of RTT to the
9073	 * deviation of RTO to accomodate burstiness of 1/4 of
9074	 * window size.
9075	 */
9076	rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5);
9077
9078	if (rto > tcps->tcps_rexmit_interval_max) {
9079		tcp->tcp_rto = tcps->tcps_rexmit_interval_max;
9080	} else if (rto < tcps->tcps_rexmit_interval_min) {
9081		tcp->tcp_rto = tcps->tcps_rexmit_interval_min;
9082	} else {
9083		tcp->tcp_rto = rto;
9084	}
9085
9086	/* Now, we can reset tcp_timer_backoff to use the new RTO... */
9087	tcp->tcp_timer_backoff = 0;
9088}
9089
9090/*
9091 * tcp_get_seg_mp() is called to get the pointer to a segment in the
9092 * send queue which starts at the given sequence number. If the given
9093 * sequence number is equal to last valid sequence number (tcp_snxt), the
9094 * returned mblk is the last valid mblk, and off is set to the length of
9095 * that mblk.
9096 *
9097 * send queue which starts at the given seq. no.
9098 *
9099 * Parameters:
9100 *	tcp_t *tcp: the tcp instance pointer.
9101 *	uint32_t seq: the starting seq. no of the requested segment.
9102 *	int32_t *off: after the execution, *off will be the offset to
9103 *		the returned mblk which points to the requested seq no.
9104 *		It is the caller's responsibility to send in a non-null off.
9105 *
9106 * Return:
9107 *	A mblk_t pointer pointing to the requested segment in send queue.
9108 */
9109static mblk_t *
9110tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off)
9111{
9112	int32_t	cnt;
9113	mblk_t	*mp;
9114
9115	/* Defensive coding.  Make sure we don't send incorrect data. */
9116	if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GT(seq, tcp->tcp_snxt))
9117		return (NULL);
9118
9119	cnt = seq - tcp->tcp_suna;
9120	mp = tcp->tcp_xmit_head;
9121	while (cnt > 0 && mp != NULL) {
9122		cnt -= mp->b_wptr - mp->b_rptr;
9123		if (cnt <= 0) {
9124			cnt += mp->b_wptr - mp->b_rptr;
9125			break;
9126		}
9127		mp = mp->b_cont;
9128	}
9129	ASSERT(mp != NULL);
9130	*off = cnt;
9131	return (mp);
9132}
9133
9134/*
9135 * This function handles all retransmissions if SACK is enabled for this
9136 * connection.  First it calculates how many segments can be retransmitted
9137 * based on tcp_pipe.  Then it goes thru the notsack list to find eligible
9138 * segments.  A segment is eligible if sack_cnt for that segment is greater
9139 * than or equal tcp_dupack_fast_retransmit.  After it has retransmitted
9140 * all eligible segments, it checks to see if TCP can send some new segments
9141 * (fast recovery).  If it can, set the appropriate flag for tcp_input_data().
9142 *
9143 * Parameters:
9144 *	tcp_t *tcp: the tcp structure of the connection.
9145 *	uint_t *flags: in return, appropriate value will be set for
9146 *	tcp_input_data().
9147 */
9148static void
9149tcp_sack_rxmit(tcp_t *tcp, uint_t *flags)
9150{
9151	notsack_blk_t	*notsack_blk;
9152	int32_t		usable_swnd;
9153	int32_t		mss;
9154	uint32_t	seg_len;
9155	mblk_t		*xmit_mp;
9156	tcp_stack_t	*tcps = tcp->tcp_tcps;
9157
9158	ASSERT(tcp->tcp_sack_info != NULL);
9159	ASSERT(tcp->tcp_notsack_list != NULL);
9160	ASSERT(tcp->tcp_rexmit == B_FALSE);
9161
9162	/* Defensive coding in case there is a bug... */
9163	if (tcp->tcp_notsack_list == NULL) {
9164		return;
9165	}
9166	notsack_blk = tcp->tcp_notsack_list;
9167	mss = tcp->tcp_mss;
9168
9169	/*
9170	 * Limit the num of outstanding data in the network to be
9171	 * tcp_cwnd_ssthresh, which is half of the original congestion wnd.
9172	 */
9173	usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
9174
9175	/* At least retransmit 1 MSS of data. */
9176	if (usable_swnd <= 0) {
9177		usable_swnd = mss;
9178	}
9179
9180	/* Make sure no new RTT samples will be taken. */
9181	tcp->tcp_csuna = tcp->tcp_snxt;
9182
9183	notsack_blk = tcp->tcp_notsack_list;
9184	while (usable_swnd > 0) {
9185		mblk_t		*snxt_mp, *tmp_mp;
9186		tcp_seq		begin = tcp->tcp_sack_snxt;
9187		tcp_seq		end;
9188		int32_t		off;
9189
9190		for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) {
9191			if (SEQ_GT(notsack_blk->end, begin) &&
9192			    (notsack_blk->sack_cnt >=
9193			    tcps->tcps_dupack_fast_retransmit)) {
9194				end = notsack_blk->end;
9195				if (SEQ_LT(begin, notsack_blk->begin)) {
9196					begin = notsack_blk->begin;
9197				}
9198				break;
9199			}
9200		}
9201		/*
9202		 * All holes are filled.  Manipulate tcp_cwnd to send more
9203		 * if we can.  Note that after the SACK recovery, tcp_cwnd is
9204		 * set to tcp_cwnd_ssthresh.
9205		 */
9206		if (notsack_blk == NULL) {
9207			usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe;
9208			if (usable_swnd <= 0 || tcp->tcp_unsent == 0) {
9209				tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna;
9210				ASSERT(tcp->tcp_cwnd > 0);
9211				return;
9212			} else {
9213				usable_swnd = usable_swnd / mss;
9214				tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna +
9215				    MAX(usable_swnd * mss, mss);
9216				*flags |= TH_XMIT_NEEDED;
9217				return;
9218			}
9219		}
9220
9221		/*
9222		 * Note that we may send more than usable_swnd allows here
9223		 * because of round off, but no more than 1 MSS of data.
9224		 */
9225		seg_len = end - begin;
9226		if (seg_len > mss)
9227			seg_len = mss;
9228		snxt_mp = tcp_get_seg_mp(tcp, begin, &off);
9229		ASSERT(snxt_mp != NULL);
9230		/* This should not happen.  Defensive coding again... */
9231		if (snxt_mp == NULL) {
9232			return;
9233		}
9234
9235		xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off,
9236		    &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE);
9237		if (xmit_mp == NULL)
9238			return;
9239
9240		usable_swnd -= seg_len;
9241		tcp->tcp_pipe += seg_len;
9242		tcp->tcp_sack_snxt = begin + seg_len;
9243
9244		tcp_send_data(tcp, xmit_mp);
9245
9246		/*
9247		 * Update the send timestamp to avoid false retransmission.
9248		 */
9249		snxt_mp->b_prev = (mblk_t *)lbolt;
9250
9251		BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
9252		UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, seg_len);
9253		BUMP_MIB(&tcps->tcps_mib, tcpOutSackRetransSegs);
9254		/*
9255		 * Update tcp_rexmit_max to extend this SACK recovery phase.
9256		 * This happens when new data sent during fast recovery is
9257		 * also lost.  If TCP retransmits those new data, it needs
9258		 * to extend SACK recover phase to avoid starting another
9259		 * fast retransmit/recovery unnecessarily.
9260		 */
9261		if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) {
9262			tcp->tcp_rexmit_max = tcp->tcp_sack_snxt;
9263		}
9264	}
9265}
9266
9267/*
9268 * tcp_ss_rexmit() is called to do slow start retransmission after a timeout
9269 * or ICMP errors.
9270 *
9271 * To limit the number of duplicate segments, we limit the number of segment
9272 * to be sent in one time to tcp_snd_burst, the burst variable.
9273 */
9274static void
9275tcp_ss_rexmit(tcp_t *tcp)
9276{
9277	uint32_t	snxt;
9278	uint32_t	smax;
9279	int32_t		win;
9280	int32_t		mss;
9281	int32_t		off;
9282	int32_t		burst = tcp->tcp_snd_burst;
9283	mblk_t		*snxt_mp;
9284	tcp_stack_t	*tcps = tcp->tcp_tcps;
9285
9286	/*
9287	 * Note that tcp_rexmit can be set even though TCP has retransmitted
9288	 * all unack'ed segments.
9289	 */
9290	if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) {
9291		smax = tcp->tcp_rexmit_max;
9292		snxt = tcp->tcp_rexmit_nxt;
9293		if (SEQ_LT(snxt, tcp->tcp_suna)) {
9294			snxt = tcp->tcp_suna;
9295		}
9296		win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd);
9297		win -= snxt - tcp->tcp_suna;
9298		mss = tcp->tcp_mss;
9299		snxt_mp = tcp_get_seg_mp(tcp, snxt, &off);
9300
9301		while (SEQ_LT(snxt, smax) && (win > 0) &&
9302		    (burst > 0) && (snxt_mp != NULL)) {
9303			mblk_t	*xmit_mp;
9304			mblk_t	*old_snxt_mp = snxt_mp;
9305			uint32_t cnt = mss;
9306
9307			if (win < cnt) {
9308				cnt = win;
9309			}
9310			if (SEQ_GT(snxt + cnt, smax)) {
9311				cnt = smax - snxt;
9312			}
9313			xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off,
9314			    &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE);
9315			if (xmit_mp == NULL)
9316				return;
9317
9318			tcp_send_data(tcp, xmit_mp);
9319
9320			snxt += cnt;
9321			win -= cnt;
9322			/*
9323			 * Update the send timestamp to avoid false
9324			 * retransmission.
9325			 */
9326			old_snxt_mp->b_prev = (mblk_t *)lbolt;
9327			BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
9328			UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, cnt);
9329
9330			tcp->tcp_rexmit_nxt = snxt;
9331			burst--;
9332		}
9333		/*
9334		 * If we have transmitted all we have at the time
9335		 * we started the retranmission, we can leave
9336		 * the rest of the job to tcp_wput_data().  But we
9337		 * need to check the send window first.  If the
9338		 * win is not 0, go on with tcp_wput_data().
9339		 */
9340		if (SEQ_LT(snxt, smax) || win == 0) {
9341			return;
9342		}
9343	}
9344	/* Only call tcp_wput_data() if there is data to be sent. */
9345	if (tcp->tcp_unsent) {
9346		tcp_wput_data(tcp, NULL, B_FALSE);
9347	}
9348}
9349
9350/*
9351 * Process all TCP option in SYN segment.  Note that this function should
9352 * be called after tcp_set_destination() is called so that the necessary info
9353 * from IRE is already set in the tcp structure.
9354 *
9355 * This function sets up the correct tcp_mss value according to the
9356 * MSS option value and our header size.  It also sets up the window scale
9357 * and timestamp values, and initialize SACK info blocks.  But it does not
9358 * change receive window size after setting the tcp_mss value.  The caller
9359 * should do the appropriate change.
9360 */
9361void
9362tcp_process_options(tcp_t *tcp, tcpha_t *tcpha)
9363{
9364	int options;
9365	tcp_opt_t tcpopt;
9366	uint32_t mss_max;
9367	char *tmp_tcph;
9368	tcp_stack_t	*tcps = tcp->tcp_tcps;
9369	conn_t		*connp = tcp->tcp_connp;
9370
9371	tcpopt.tcp = NULL;
9372	options = tcp_parse_options(tcpha, &tcpopt);
9373
9374	/*
9375	 * Process MSS option.  Note that MSS option value does not account
9376	 * for IP or TCP options.  This means that it is equal to MTU - minimum
9377	 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for
9378	 * IPv6.
9379	 */
9380	if (!(options & TCP_OPT_MSS_PRESENT)) {
9381		if (connp->conn_ipversion == IPV4_VERSION)
9382			tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4;
9383		else
9384			tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6;
9385	} else {
9386		if (connp->conn_ipversion == IPV4_VERSION)
9387			mss_max = tcps->tcps_mss_max_ipv4;
9388		else
9389			mss_max = tcps->tcps_mss_max_ipv6;
9390		if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min)
9391			tcpopt.tcp_opt_mss = tcps->tcps_mss_min;
9392		else if (tcpopt.tcp_opt_mss > mss_max)
9393			tcpopt.tcp_opt_mss = mss_max;
9394	}
9395
9396	/* Process Window Scale option. */
9397	if (options & TCP_OPT_WSCALE_PRESENT) {
9398		tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale;
9399		tcp->tcp_snd_ws_ok = B_TRUE;
9400	} else {
9401		tcp->tcp_snd_ws = B_FALSE;
9402		tcp->tcp_snd_ws_ok = B_FALSE;
9403		tcp->tcp_rcv_ws = B_FALSE;
9404	}
9405
9406	/* Process Timestamp option. */
9407	if ((options & TCP_OPT_TSTAMP_PRESENT) &&
9408	    (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) {
9409		tmp_tcph = (char *)tcp->tcp_tcpha;
9410
9411		tcp->tcp_snd_ts_ok = B_TRUE;
9412		tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
9413		tcp->tcp_last_rcv_lbolt = lbolt64;
9414		ASSERT(OK_32PTR(tmp_tcph));
9415		ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
9416
9417		/* Fill in our template header with basic timestamp option. */
9418		tmp_tcph += connp->conn_ht_ulp_len;
9419		tmp_tcph[0] = TCPOPT_NOP;
9420		tmp_tcph[1] = TCPOPT_NOP;
9421		tmp_tcph[2] = TCPOPT_TSTAMP;
9422		tmp_tcph[3] = TCPOPT_TSTAMP_LEN;
9423		connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN;
9424		connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN;
9425		tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4);
9426	} else {
9427		tcp->tcp_snd_ts_ok = B_FALSE;
9428	}
9429
9430	/*
9431	 * Process SACK options.  If SACK is enabled for this connection,
9432	 * then allocate the SACK info structure.  Note the following ways
9433	 * when tcp_snd_sack_ok is set to true.
9434	 *
9435	 * For active connection: in tcp_set_destination() called in
9436	 * tcp_connect().
9437	 *
9438	 * For passive connection: in tcp_set_destination() called in
9439	 * tcp_input_listener().
9440	 *
9441	 * That's the reason why the extra TCP_IS_DETACHED() check is there.
9442	 * That check makes sure that if we did not send a SACK OK option,
9443	 * we will not enable SACK for this connection even though the other
9444	 * side sends us SACK OK option.  For active connection, the SACK
9445	 * info structure has already been allocated.  So we need to free
9446	 * it if SACK is disabled.
9447	 */
9448	if ((options & TCP_OPT_SACK_OK_PRESENT) &&
9449	    (tcp->tcp_snd_sack_ok ||
9450	    (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) {
9451		/* This should be true only in the passive case. */
9452		if (tcp->tcp_sack_info == NULL) {
9453			ASSERT(TCP_IS_DETACHED(tcp));
9454			tcp->tcp_sack_info =
9455			    kmem_cache_alloc(tcp_sack_info_cache, KM_NOSLEEP);
9456		}
9457		if (tcp->tcp_sack_info == NULL) {
9458			tcp->tcp_snd_sack_ok = B_FALSE;
9459		} else {
9460			tcp->tcp_snd_sack_ok = B_TRUE;
9461			if (tcp->tcp_snd_ts_ok) {
9462				tcp->tcp_max_sack_blk = 3;
9463			} else {
9464				tcp->tcp_max_sack_blk = 4;
9465			}
9466		}
9467	} else {
9468		/*
9469		 * Resetting tcp_snd_sack_ok to B_FALSE so that
9470		 * no SACK info will be used for this
9471		 * connection.  This assumes that SACK usage
9472		 * permission is negotiated.  This may need
9473		 * to be changed once this is clarified.
9474		 */
9475		if (tcp->tcp_sack_info != NULL) {
9476			ASSERT(tcp->tcp_notsack_list == NULL);
9477			kmem_cache_free(tcp_sack_info_cache,
9478			    tcp->tcp_sack_info);
9479			tcp->tcp_sack_info = NULL;
9480		}
9481		tcp->tcp_snd_sack_ok = B_FALSE;
9482	}
9483
9484	/*
9485	 * Now we know the exact TCP/IP header length, subtract
9486	 * that from tcp_mss to get our side's MSS.
9487	 */
9488	tcp->tcp_mss -= connp->conn_ht_iphc_len;
9489
9490	/*
9491	 * Here we assume that the other side's header size will be equal to
9492	 * our header size.  We calculate the real MSS accordingly.  Need to
9493	 * take into additional stuffs IPsec puts in.
9494	 *
9495	 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header)
9496	 */
9497	tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len +
9498	    tcp->tcp_ipsec_overhead -
9499	    ((connp->conn_ipversion == IPV4_VERSION ?
9500	    IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH);
9501
9502	/*
9503	 * Set MSS to the smaller one of both ends of the connection.
9504	 * We should not have called tcp_mss_set() before, but our
9505	 * side of the MSS should have been set to a proper value
9506	 * by tcp_set_destination().  tcp_mss_set() will also set up the
9507	 * STREAM head parameters properly.
9508	 *
9509	 * If we have a larger-than-16-bit window but the other side
9510	 * didn't want to do window scale, tcp_rwnd_set() will take
9511	 * care of that.
9512	 */
9513	tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss));
9514
9515	/*
9516	 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been
9517	 * updated properly.
9518	 */
9519	SET_TCP_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial);
9520}
9521
9522/*
9523 * Sends the T_CONN_IND to the listener. The caller calls this
9524 * functions via squeue to get inside the listener's perimeter
9525 * once the 3 way hand shake is done a T_CONN_IND needs to be
9526 * sent. As an optimization, the caller can call this directly
9527 * if listener's perimeter is same as eager's.
9528 */
9529/* ARGSUSED */
9530void
9531tcp_send_conn_ind(void *arg, mblk_t *mp, void *arg2)
9532{
9533	conn_t			*lconnp = (conn_t *)arg;
9534	tcp_t			*listener = lconnp->conn_tcp;
9535	tcp_t			*tcp;
9536	struct T_conn_ind	*conn_ind;
9537	ipaddr_t 		*addr_cache;
9538	boolean_t		need_send_conn_ind = B_FALSE;
9539	tcp_stack_t		*tcps = listener->tcp_tcps;
9540
9541	/* retrieve the eager */
9542	conn_ind = (struct T_conn_ind *)mp->b_rptr;
9543	ASSERT(conn_ind->OPT_offset != 0 &&
9544	    conn_ind->OPT_length == sizeof (intptr_t));
9545	bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp,
9546	    conn_ind->OPT_length);
9547
9548	/*
9549	 * TLI/XTI applications will get confused by
9550	 * sending eager as an option since it violates
9551	 * the option semantics. So remove the eager as
9552	 * option since TLI/XTI app doesn't need it anyway.
9553	 */
9554	if (!TCP_IS_SOCKET(listener)) {
9555		conn_ind->OPT_length = 0;
9556		conn_ind->OPT_offset = 0;
9557	}
9558	if (listener->tcp_state != TCPS_LISTEN) {
9559		/*
9560		 * If listener has closed, it would have caused a
9561		 * a cleanup/blowoff to happen for the eager. We
9562		 * just need to return.
9563		 */
9564		freemsg(mp);
9565		return;
9566	}
9567
9568
9569	/*
9570	 * if the conn_req_q is full defer passing up the
9571	 * T_CONN_IND until space is availabe after t_accept()
9572	 * processing
9573	 */
9574	mutex_enter(&listener->tcp_eager_lock);
9575
9576	/*
9577	 * Take the eager out, if it is in the list of droppable eagers
9578	 * as we are here because the 3W handshake is over.
9579	 */
9580	MAKE_UNDROPPABLE(tcp);
9581
9582	if (listener->tcp_conn_req_cnt_q < listener->tcp_conn_req_max) {
9583		tcp_t *tail;
9584
9585		/*
9586		 * The eager already has an extra ref put in tcp_input_data
9587		 * so that it stays till accept comes back even though it
9588		 * might get into TCPS_CLOSED as a result of a TH_RST etc.
9589		 */
9590		ASSERT(listener->tcp_conn_req_cnt_q0 > 0);
9591		listener->tcp_conn_req_cnt_q0--;
9592		listener->tcp_conn_req_cnt_q++;
9593
9594		/* Move from SYN_RCVD to ESTABLISHED list  */
9595		tcp->tcp_eager_next_q0->tcp_eager_prev_q0 =
9596		    tcp->tcp_eager_prev_q0;
9597		tcp->tcp_eager_prev_q0->tcp_eager_next_q0 =
9598		    tcp->tcp_eager_next_q0;
9599		tcp->tcp_eager_prev_q0 = NULL;
9600		tcp->tcp_eager_next_q0 = NULL;
9601
9602		/*
9603		 * Insert at end of the queue because sockfs
9604		 * sends down T_CONN_RES in chronological
9605		 * order. Leaving the older conn indications
9606		 * at front of the queue helps reducing search
9607		 * time.
9608		 */
9609		tail = listener->tcp_eager_last_q;
9610		if (tail != NULL)
9611			tail->tcp_eager_next_q = tcp;
9612		else
9613			listener->tcp_eager_next_q = tcp;
9614		listener->tcp_eager_last_q = tcp;
9615		tcp->tcp_eager_next_q = NULL;
9616		/*
9617		 * Delay sending up the T_conn_ind until we are
9618		 * done with the eager. Once we have have sent up
9619		 * the T_conn_ind, the accept can potentially complete
9620		 * any time and release the refhold we have on the eager.
9621		 */
9622		need_send_conn_ind = B_TRUE;
9623	} else {
9624		/*
9625		 * Defer connection on q0 and set deferred
9626		 * connection bit true
9627		 */
9628		tcp->tcp_conn_def_q0 = B_TRUE;
9629
9630		/* take tcp out of q0 ... */
9631		tcp->tcp_eager_prev_q0->tcp_eager_next_q0 =
9632		    tcp->tcp_eager_next_q0;
9633		tcp->tcp_eager_next_q0->tcp_eager_prev_q0 =
9634		    tcp->tcp_eager_prev_q0;
9635
9636		/* ... and place it at the end of q0 */
9637		tcp->tcp_eager_prev_q0 = listener->tcp_eager_prev_q0;
9638		tcp->tcp_eager_next_q0 = listener;
9639		listener->tcp_eager_prev_q0->tcp_eager_next_q0 = tcp;
9640		listener->tcp_eager_prev_q0 = tcp;
9641		tcp->tcp_conn.tcp_eager_conn_ind = mp;
9642	}
9643
9644	/* we have timed out before */
9645	if (tcp->tcp_syn_rcvd_timeout != 0) {
9646		tcp->tcp_syn_rcvd_timeout = 0;
9647		listener->tcp_syn_rcvd_timeout--;
9648		if (listener->tcp_syn_defense &&
9649		    listener->tcp_syn_rcvd_timeout <=
9650		    (tcps->tcps_conn_req_max_q0 >> 5) &&
9651		    10*MINUTES < TICK_TO_MSEC(lbolt64 -
9652		    listener->tcp_last_rcv_lbolt)) {
9653			/*
9654			 * Turn off the defense mode if we
9655			 * believe the SYN attack is over.
9656			 */
9657			listener->tcp_syn_defense = B_FALSE;
9658			if (listener->tcp_ip_addr_cache) {
9659				kmem_free((void *)listener->tcp_ip_addr_cache,
9660				    IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t));
9661				listener->tcp_ip_addr_cache = NULL;
9662			}
9663		}
9664	}
9665	addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache);
9666	if (addr_cache != NULL) {
9667		/*
9668		 * We have finished a 3-way handshake with this
9669		 * remote host. This proves the IP addr is good.
9670		 * Cache it!
9671		 */
9672		addr_cache[IP_ADDR_CACHE_HASH(tcp->tcp_connp->conn_faddr_v4)] =
9673		    tcp->tcp_connp->conn_faddr_v4;
9674	}
9675	mutex_exit(&listener->tcp_eager_lock);
9676	if (need_send_conn_ind)
9677		tcp_ulp_newconn(lconnp, tcp->tcp_connp, mp);
9678}
9679
9680/*
9681 * Send the newconn notification to ulp. The eager is blown off if the
9682 * notification fails.
9683 */
9684static void
9685tcp_ulp_newconn(conn_t *lconnp, conn_t *econnp, mblk_t *mp)
9686{
9687	if (IPCL_IS_NONSTR(lconnp)) {
9688		cred_t	*cr;
9689		pid_t	cpid = NOPID;
9690
9691		ASSERT(econnp->conn_tcp->tcp_listener == lconnp->conn_tcp);
9692		ASSERT(econnp->conn_tcp->tcp_saved_listener ==
9693		    lconnp->conn_tcp);
9694
9695		cr = msg_getcred(mp, &cpid);
9696
9697		/* Keep the message around in case of a fallback to TPI */
9698		econnp->conn_tcp->tcp_conn.tcp_eager_conn_ind = mp;
9699		/*
9700		 * Notify the ULP about the newconn. It is guaranteed that no
9701		 * tcp_accept() call will be made for the eager if the
9702		 * notification fails, so it's safe to blow it off in that
9703		 * case.
9704		 *
9705		 * The upper handle will be assigned when tcp_accept() is
9706		 * called.
9707		 */
9708		if ((*lconnp->conn_upcalls->su_newconn)
9709		    (lconnp->conn_upper_handle,
9710		    (sock_lower_handle_t)econnp,
9711		    &sock_tcp_downcalls, cr, cpid,
9712		    &econnp->conn_upcalls) == NULL) {
9713			/* Failed to allocate a socket */
9714			BUMP_MIB(&lconnp->conn_tcp->tcp_tcps->tcps_mib,
9715			    tcpEstabResets);
9716			(void) tcp_eager_blowoff(lconnp->conn_tcp,
9717			    econnp->conn_tcp->tcp_conn_req_seqnum);
9718		}
9719	} else {
9720		putnext(lconnp->conn_rq, mp);
9721	}
9722}
9723
9724/*
9725 * Handle a packet that has been reclassified by TCP.
9726 * This function drops the ref on connp that the caller had.
9727 */
9728static void
9729tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst)
9730{
9731	ipsec_stack_t	*ipss = ipst->ips_netstack->netstack_ipsec;
9732
9733	if (connp->conn_incoming_ifindex != 0 &&
9734	    connp->conn_incoming_ifindex != ira->ira_ruifindex) {
9735		freemsg(mp);
9736		CONN_DEC_REF(connp);
9737		return;
9738	}
9739
9740	if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) ||
9741	    (ira->ira_flags & IRAF_IPSEC_SECURE)) {
9742		ip6_t *ip6h;
9743		ipha_t *ipha;
9744
9745		if (ira->ira_flags & IRAF_IS_IPV4) {
9746			ipha = (ipha_t *)mp->b_rptr;
9747			ip6h = NULL;
9748		} else {
9749			ipha = NULL;
9750			ip6h = (ip6_t *)mp->b_rptr;
9751		}
9752		mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira);
9753		if (mp == NULL) {
9754			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
9755			/* Note that mp is NULL */
9756			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
9757			CONN_DEC_REF(connp);
9758			return;
9759		}
9760	}
9761
9762	if (IPCL_IS_TCP(connp)) {
9763		/*
9764		 * do not drain, certain use cases can blow
9765		 * the stack
9766		 */
9767		SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
9768		    connp->conn_recv, connp, ira,
9769		    SQ_NODRAIN, SQTAG_IP_TCP_INPUT);
9770	} else {
9771		/* Not TCP; must be SOCK_RAW, IPPROTO_TCP */
9772		(connp->conn_recv)(connp, mp, NULL,
9773		    ira);
9774		CONN_DEC_REF(connp);
9775	}
9776
9777}
9778
9779boolean_t tcp_outbound_squeue_switch = B_FALSE;
9780
9781/*
9782 * Handle M_DATA messages from IP. Its called directly from IP via
9783 * squeue for received IP packets.
9784 *
9785 * The first argument is always the connp/tcp to which the mp belongs.
9786 * There are no exceptions to this rule. The caller has already put
9787 * a reference on this connp/tcp and once tcp_input_data() returns,
9788 * the squeue will do the refrele.
9789 *
9790 * The TH_SYN for the listener directly go to tcp_input_listener via
9791 * squeue. ICMP errors go directly to tcp_icmp_input().
9792 *
9793 * sqp: NULL = recursive, sqp != NULL means called from squeue
9794 */
9795void
9796tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
9797{
9798	int32_t		bytes_acked;
9799	int32_t		gap;
9800	mblk_t		*mp1;
9801	uint_t		flags;
9802	uint32_t	new_swnd = 0;
9803	uchar_t		*iphdr;
9804	uchar_t		*rptr;
9805	int32_t		rgap;
9806	uint32_t	seg_ack;
9807	int		seg_len;
9808	uint_t		ip_hdr_len;
9809	uint32_t	seg_seq;
9810	tcpha_t		*tcpha;
9811	int		urp;
9812	tcp_opt_t	tcpopt;
9813	ip_pkt_t	ipp;
9814	boolean_t	ofo_seg = B_FALSE; /* Out of order segment */
9815	uint32_t	cwnd;
9816	uint32_t	add;
9817	int		npkt;
9818	int		mss;
9819	conn_t		*connp = (conn_t *)arg;
9820	squeue_t	*sqp = (squeue_t *)arg2;
9821	tcp_t		*tcp = connp->conn_tcp;
9822	tcp_stack_t	*tcps = tcp->tcp_tcps;
9823
9824	/*
9825	 * RST from fused tcp loopback peer should trigger an unfuse.
9826	 */
9827	if (tcp->tcp_fused) {
9828		TCP_STAT(tcps, tcp_fusion_aborted);
9829		tcp_unfuse(tcp);
9830	}
9831
9832	iphdr = mp->b_rptr;
9833	rptr = mp->b_rptr;
9834	ASSERT(OK_32PTR(rptr));
9835
9836	ip_hdr_len = ira->ira_ip_hdr_length;
9837	if (connp->conn_recv_ancillary.crb_all != 0) {
9838		/*
9839		 * Record packet information in the ip_pkt_t
9840		 */
9841		ipp.ipp_fields = 0;
9842		if (ira->ira_flags & IRAF_IS_IPV4) {
9843			(void) ip_find_hdr_v4((ipha_t *)rptr, &ipp,
9844			    B_FALSE);
9845		} else {
9846			uint8_t nexthdrp;
9847
9848			/*
9849			 * IPv6 packets can only be received by applications
9850			 * that are prepared to receive IPv6 addresses.
9851			 * The IP fanout must ensure this.
9852			 */
9853			ASSERT(connp->conn_family == AF_INET6);
9854
9855			(void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp,
9856			    &nexthdrp);
9857			ASSERT(nexthdrp == IPPROTO_TCP);
9858
9859			/* Could have caused a pullup? */
9860			iphdr = mp->b_rptr;
9861			rptr = mp->b_rptr;
9862		}
9863	}
9864	ASSERT(DB_TYPE(mp) == M_DATA);
9865	ASSERT(mp->b_next == NULL);
9866
9867	tcpha = (tcpha_t *)&rptr[ip_hdr_len];
9868	seg_seq = ntohl(tcpha->tha_seq);
9869	seg_ack = ntohl(tcpha->tha_ack);
9870	ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
9871	seg_len = (int)(mp->b_wptr - rptr) -
9872	    (ip_hdr_len + TCP_HDR_LENGTH(tcpha));
9873	if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) {
9874		do {
9875			ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
9876			    (uintptr_t)INT_MAX);
9877			seg_len += (int)(mp1->b_wptr - mp1->b_rptr);
9878		} while ((mp1 = mp1->b_cont) != NULL &&
9879		    mp1->b_datap->db_type == M_DATA);
9880	}
9881
9882	if (tcp->tcp_state == TCPS_TIME_WAIT) {
9883		tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack,
9884		    seg_len, tcpha, ira);
9885		return;
9886	}
9887
9888	if (sqp != NULL) {
9889		/*
9890		 * This is the correct place to update tcp_last_recv_time. Note
9891		 * that it is also updated for tcp structure that belongs to
9892		 * global and listener queues which do not really need updating.
9893		 * But that should not cause any harm.  And it is updated for
9894		 * all kinds of incoming segments, not only for data segments.
9895		 */
9896		tcp->tcp_last_recv_time = lbolt;
9897	}
9898
9899	flags = (unsigned int)tcpha->tha_flags & 0xFF;
9900
9901	BUMP_LOCAL(tcp->tcp_ibsegs);
9902	DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
9903
9904	if ((flags & TH_URG) && sqp != NULL) {
9905		/*
9906		 * TCP can't handle urgent pointers that arrive before
9907		 * the connection has been accept()ed since it can't
9908		 * buffer OOB data.  Discard segment if this happens.
9909		 *
9910		 * We can't just rely on a non-null tcp_listener to indicate
9911		 * that the accept() has completed since unlinking of the
9912		 * eager and completion of the accept are not atomic.
9913		 * tcp_detached, when it is not set (B_FALSE) indicates
9914		 * that the accept() has completed.
9915		 *
9916		 * Nor can it reassemble urgent pointers, so discard
9917		 * if it's not the next segment expected.
9918		 *
9919		 * Otherwise, collapse chain into one mblk (discard if
9920		 * that fails).  This makes sure the headers, retransmitted
9921		 * data, and new data all are in the same mblk.
9922		 */
9923		ASSERT(mp != NULL);
9924		if (tcp->tcp_detached || !pullupmsg(mp, -1)) {
9925			freemsg(mp);
9926			return;
9927		}
9928		/* Update pointers into message */
9929		iphdr = rptr = mp->b_rptr;
9930		tcpha = (tcpha_t *)&rptr[ip_hdr_len];
9931		if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) {
9932			/*
9933			 * Since we can't handle any data with this urgent
9934			 * pointer that is out of sequence, we expunge
9935			 * the data.  This allows us to still register
9936			 * the urgent mark and generate the M_PCSIG,
9937			 * which we can do.
9938			 */
9939			mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha);
9940			seg_len = 0;
9941		}
9942	}
9943
9944	switch (tcp->tcp_state) {
9945	case TCPS_SYN_SENT:
9946		if (connp->conn_final_sqp == NULL &&
9947		    tcp_outbound_squeue_switch && sqp != NULL) {
9948			ASSERT(connp->conn_initial_sqp == connp->conn_sqp);
9949			connp->conn_final_sqp = sqp;
9950			if (connp->conn_final_sqp != connp->conn_sqp) {
9951				DTRACE_PROBE1(conn__final__sqp__switch,
9952				    conn_t *, connp);
9953				CONN_INC_REF(connp);
9954				SQUEUE_SWITCH(connp, connp->conn_final_sqp);
9955				SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
9956				    tcp_input_data, connp, ira, ip_squeue_flag,
9957				    SQTAG_CONNECT_FINISH);
9958				return;
9959			}
9960			DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp);
9961		}
9962		if (flags & TH_ACK) {
9963			/*
9964			 * Note that our stack cannot send data before a
9965			 * connection is established, therefore the
9966			 * following check is valid.  Otherwise, it has
9967			 * to be changed.
9968			 */
9969			if (SEQ_LEQ(seg_ack, tcp->tcp_iss) ||
9970			    SEQ_GT(seg_ack, tcp->tcp_snxt)) {
9971				freemsg(mp);
9972				if (flags & TH_RST)
9973					return;
9974				tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq",
9975				    tcp, seg_ack, 0, TH_RST);
9976				return;
9977			}
9978			ASSERT(tcp->tcp_suna + 1 == seg_ack);
9979		}
9980		if (flags & TH_RST) {
9981			freemsg(mp);
9982			if (flags & TH_ACK)
9983				(void) tcp_clean_death(tcp,
9984				    ECONNREFUSED, 13);
9985			return;
9986		}
9987		if (!(flags & TH_SYN)) {
9988			freemsg(mp);
9989			return;
9990		}
9991
9992		/* Process all TCP options. */
9993		tcp_process_options(tcp, tcpha);
9994		/*
9995		 * The following changes our rwnd to be a multiple of the
9996		 * MIN(peer MSS, our MSS) for performance reason.
9997		 */
9998		(void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf,
9999		    tcp->tcp_mss));
10000
10001		/* Is the other end ECN capable? */
10002		if (tcp->tcp_ecn_ok) {
10003			if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) {
10004				tcp->tcp_ecn_ok = B_FALSE;
10005			}
10006		}
10007		/*
10008		 * Clear ECN flags because it may interfere with later
10009		 * processing.
10010		 */
10011		flags &= ~(TH_ECE|TH_CWR);
10012
10013		tcp->tcp_irs = seg_seq;
10014		tcp->tcp_rack = seg_seq;
10015		tcp->tcp_rnxt = seg_seq + 1;
10016		tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt);
10017		if (!TCP_IS_DETACHED(tcp)) {
10018			/* Allocate room for SACK options if needed. */
10019			connp->conn_wroff = connp->conn_ht_iphc_len;
10020			if (tcp->tcp_snd_sack_ok)
10021				connp->conn_wroff += TCPOPT_MAX_SACK_LEN;
10022			if (!tcp->tcp_loopback)
10023				connp->conn_wroff += tcps->tcps_wroff_xtra;
10024
10025			(void) proto_set_tx_wroff(connp->conn_rq, connp,
10026			    connp->conn_wroff);
10027		}
10028		if (flags & TH_ACK) {
10029			/*
10030			 * If we can't get the confirmation upstream, pretend
10031			 * we didn't even see this one.
10032			 *
10033			 * XXX: how can we pretend we didn't see it if we
10034			 * have updated rnxt et. al.
10035			 *
10036			 * For loopback we defer sending up the T_CONN_CON
10037			 * until after some checks below.
10038			 */
10039			mp1 = NULL;
10040			/*
10041			 * tcp_sendmsg() checks tcp_state without entering
10042			 * the squeue so tcp_state should be updated before
10043			 * sending up connection confirmation
10044			 */
10045			tcp->tcp_state = TCPS_ESTABLISHED;
10046			if (!tcp_conn_con(tcp, iphdr, mp,
10047			    tcp->tcp_loopback ? &mp1 : NULL, ira)) {
10048				tcp->tcp_state = TCPS_SYN_SENT;
10049				freemsg(mp);
10050				return;
10051			}
10052			/* SYN was acked - making progress */
10053			tcp->tcp_ip_forward_progress = B_TRUE;
10054
10055			/* One for the SYN */
10056			tcp->tcp_suna = tcp->tcp_iss + 1;
10057			tcp->tcp_valid_bits &= ~TCP_ISS_VALID;
10058
10059			/*
10060			 * If SYN was retransmitted, need to reset all
10061			 * retransmission info.  This is because this
10062			 * segment will be treated as a dup ACK.
10063			 */
10064			if (tcp->tcp_rexmit) {
10065				tcp->tcp_rexmit = B_FALSE;
10066				tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
10067				tcp->tcp_rexmit_max = tcp->tcp_snxt;
10068				tcp->tcp_snd_burst = tcp->tcp_localnet ?
10069				    TCP_CWND_INFINITE : TCP_CWND_NORMAL;
10070				tcp->tcp_ms_we_have_waited = 0;
10071
10072				/*
10073				 * Set tcp_cwnd back to 1 MSS, per
10074				 * recommendation from
10075				 * draft-floyd-incr-init-win-01.txt,
10076				 * Increasing TCP's Initial Window.
10077				 */
10078				tcp->tcp_cwnd = tcp->tcp_mss;
10079			}
10080
10081			tcp->tcp_swl1 = seg_seq;
10082			tcp->tcp_swl2 = seg_ack;
10083
10084			new_swnd = ntohs(tcpha->tha_win);
10085			tcp->tcp_swnd = new_swnd;
10086			if (new_swnd > tcp->tcp_max_swnd)
10087				tcp->tcp_max_swnd = new_swnd;
10088
10089			/*
10090			 * Always send the three-way handshake ack immediately
10091			 * in order to make the connection complete as soon as
10092			 * possible on the accepting host.
10093			 */
10094			flags |= TH_ACK_NEEDED;
10095
10096			/*
10097			 * Special case for loopback.  At this point we have
10098			 * received SYN-ACK from the remote endpoint.  In
10099			 * order to ensure that both endpoints reach the
10100			 * fused state prior to any data exchange, the final
10101			 * ACK needs to be sent before we indicate T_CONN_CON
10102			 * to the module upstream.
10103			 */
10104			if (tcp->tcp_loopback) {
10105				mblk_t *ack_mp;
10106
10107				ASSERT(!tcp->tcp_unfusable);
10108				ASSERT(mp1 != NULL);
10109				/*
10110				 * For loopback, we always get a pure SYN-ACK
10111				 * and only need to send back the final ACK
10112				 * with no data (this is because the other
10113				 * tcp is ours and we don't do T/TCP).  This
10114				 * final ACK triggers the passive side to
10115				 * perform fusion in ESTABLISHED state.
10116				 */
10117				if ((ack_mp = tcp_ack_mp(tcp)) != NULL) {
10118					if (tcp->tcp_ack_tid != 0) {
10119						(void) TCP_TIMER_CANCEL(tcp,
10120						    tcp->tcp_ack_tid);
10121						tcp->tcp_ack_tid = 0;
10122					}
10123					tcp_send_data(tcp, ack_mp);
10124					BUMP_LOCAL(tcp->tcp_obsegs);
10125					BUMP_MIB(&tcps->tcps_mib, tcpOutAck);
10126
10127					if (!IPCL_IS_NONSTR(connp)) {
10128						/* Send up T_CONN_CON */
10129						if (ira->ira_cred != NULL) {
10130							mblk_setcred(mp1,
10131							    ira->ira_cred,
10132							    ira->ira_cpid);
10133						}
10134						putnext(connp->conn_rq, mp1);
10135					} else {
10136						(*connp->conn_upcalls->
10137						    su_connected)
10138						    (connp->conn_upper_handle,
10139						    tcp->tcp_connid,
10140						    ira->ira_cred,
10141						    ira->ira_cpid);
10142						freemsg(mp1);
10143					}
10144
10145					freemsg(mp);
10146					return;
10147				}
10148				/*
10149				 * Forget fusion; we need to handle more
10150				 * complex cases below.  Send the deferred
10151				 * T_CONN_CON message upstream and proceed
10152				 * as usual.  Mark this tcp as not capable
10153				 * of fusion.
10154				 */
10155				TCP_STAT(tcps, tcp_fusion_unfusable);
10156				tcp->tcp_unfusable = B_TRUE;
10157				if (!IPCL_IS_NONSTR(connp)) {
10158					if (ira->ira_cred != NULL) {
10159						mblk_setcred(mp1, ira->ira_cred,
10160						    ira->ira_cpid);
10161					}
10162					putnext(connp->conn_rq, mp1);
10163				} else {
10164					(*connp->conn_upcalls->su_connected)
10165					    (connp->conn_upper_handle,
10166					    tcp->tcp_connid, ira->ira_cred,
10167					    ira->ira_cpid);
10168					freemsg(mp1);
10169				}
10170			}
10171
10172			/*
10173			 * Check to see if there is data to be sent.  If
10174			 * yes, set the transmit flag.  Then check to see
10175			 * if received data processing needs to be done.
10176			 * If not, go straight to xmit_check.  This short
10177			 * cut is OK as we don't support T/TCP.
10178			 */
10179			if (tcp->tcp_unsent)
10180				flags |= TH_XMIT_NEEDED;
10181
10182			if (seg_len == 0 && !(flags & TH_URG)) {
10183				freemsg(mp);
10184				goto xmit_check;
10185			}
10186
10187			flags &= ~TH_SYN;
10188			seg_seq++;
10189			break;
10190		}
10191		tcp->tcp_state = TCPS_SYN_RCVD;
10192		mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss,
10193		    NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE);
10194		if (mp1 != NULL) {
10195			tcp_send_data(tcp, mp1);
10196			TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
10197		}
10198		freemsg(mp);
10199		return;
10200	case TCPS_SYN_RCVD:
10201		if (flags & TH_ACK) {
10202			/*
10203			 * In this state, a SYN|ACK packet is either bogus
10204			 * because the other side must be ACKing our SYN which
10205			 * indicates it has seen the ACK for their SYN and
10206			 * shouldn't retransmit it or we're crossing SYNs
10207			 * on active open.
10208			 */
10209			if ((flags & TH_SYN) && !tcp->tcp_active_open) {
10210				freemsg(mp);
10211				tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn",
10212				    tcp, seg_ack, 0, TH_RST);
10213				return;
10214			}
10215			/*
10216			 * NOTE: RFC 793 pg. 72 says this should be
10217			 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt
10218			 * but that would mean we have an ack that ignored
10219			 * our SYN.
10220			 */
10221			if (SEQ_LEQ(seg_ack, tcp->tcp_suna) ||
10222			    SEQ_GT(seg_ack, tcp->tcp_snxt)) {
10223				freemsg(mp);
10224				tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack",
10225				    tcp, seg_ack, 0, TH_RST);
10226				return;
10227			}
10228		}
10229		break;
10230	case TCPS_LISTEN:
10231		/*
10232		 * Only a TLI listener can come through this path when a
10233		 * acceptor is going back to be a listener and a packet
10234		 * for the acceptor hits the classifier. For a socket
10235		 * listener, this can never happen because a listener
10236		 * can never accept connection on itself and hence a
10237		 * socket acceptor can not go back to being a listener.
10238		 */
10239		ASSERT(!TCP_IS_SOCKET(tcp));
10240		/*FALLTHRU*/
10241	case TCPS_CLOSED:
10242	case TCPS_BOUND: {
10243		conn_t	*new_connp;
10244		ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
10245
10246		/*
10247		 * Don't accept any input on a closed tcp as this TCP logically
10248		 * does not exist on the system. Don't proceed further with
10249		 * this TCP. For instance, this packet could trigger another
10250		 * close of this tcp which would be disastrous for tcp_refcnt.
10251		 * tcp_close_detached / tcp_clean_death / tcp_closei_local must
10252		 * be called at most once on a TCP. In this case we need to
10253		 * refeed the packet into the classifier and figure out where
10254		 * the packet should go.
10255		 */
10256		new_connp = ipcl_classify(mp, ira, ipst);
10257		if (new_connp != NULL) {
10258			/* Drops ref on new_connp */
10259			tcp_reinput(new_connp, mp, ira, ipst);
10260			return;
10261		}
10262		/* We failed to classify. For now just drop the packet */
10263		freemsg(mp);
10264		return;
10265	}
10266	case TCPS_IDLE:
10267		/*
10268		 * Handle the case where the tcp_clean_death() has happened
10269		 * on a connection (application hasn't closed yet) but a packet
10270		 * was already queued on squeue before tcp_clean_death()
10271		 * was processed. Calling tcp_clean_death() twice on same
10272		 * connection can result in weird behaviour.
10273		 */
10274		freemsg(mp);
10275		return;
10276	default:
10277		break;
10278	}
10279
10280	/*
10281	 * Already on the correct queue/perimeter.
10282	 * If this is a detached connection and not an eager
10283	 * connection hanging off a listener then new data
10284	 * (past the FIN) will cause a reset.
10285	 * We do a special check here where it
10286	 * is out of the main line, rather than check
10287	 * if we are detached every time we see new
10288	 * data down below.
10289	 */
10290	if (TCP_IS_DETACHED_NONEAGER(tcp) &&
10291	    (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) {
10292		BUMP_MIB(&tcps->tcps_mib, tcpInClosed);
10293		DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
10294
10295		freemsg(mp);
10296		/*
10297		 * This could be an SSL closure alert. We're detached so just
10298		 * acknowledge it this last time.
10299		 */
10300		if (tcp->tcp_kssl_ctx != NULL) {
10301			kssl_release_ctx(tcp->tcp_kssl_ctx);
10302			tcp->tcp_kssl_ctx = NULL;
10303
10304			tcp->tcp_rnxt += seg_len;
10305			tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt);
10306			flags |= TH_ACK_NEEDED;
10307			goto ack_check;
10308		}
10309
10310		tcp_xmit_ctl("new data when detached", tcp,
10311		    tcp->tcp_snxt, 0, TH_RST);
10312		(void) tcp_clean_death(tcp, EPROTO, 12);
10313		return;
10314	}
10315
10316	mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha);
10317	urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION;
10318	new_swnd = ntohs(tcpha->tha_win) <<
10319	    ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws);
10320
10321	if (tcp->tcp_snd_ts_ok) {
10322		if (!tcp_paws_check(tcp, tcpha, &tcpopt)) {
10323			/*
10324			 * This segment is not acceptable.
10325			 * Drop it and send back an ACK.
10326			 */
10327			freemsg(mp);
10328			flags |= TH_ACK_NEEDED;
10329			goto ack_check;
10330		}
10331	} else if (tcp->tcp_snd_sack_ok) {
10332		ASSERT(tcp->tcp_sack_info != NULL);
10333		tcpopt.tcp = tcp;
10334		/*
10335		 * SACK info in already updated in tcp_parse_options.  Ignore
10336		 * all other TCP options...
10337		 */
10338		(void) tcp_parse_options(tcpha, &tcpopt);
10339	}
10340try_again:;
10341	mss = tcp->tcp_mss;
10342	gap = seg_seq - tcp->tcp_rnxt;
10343	rgap = tcp->tcp_rwnd - (gap + seg_len);
10344	/*
10345	 * gap is the amount of sequence space between what we expect to see
10346	 * and what we got for seg_seq.  A positive value for gap means
10347	 * something got lost.  A negative value means we got some old stuff.
10348	 */
10349	if (gap < 0) {
10350		/* Old stuff present.  Is the SYN in there? */
10351		if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) &&
10352		    (seg_len != 0)) {
10353			flags &= ~TH_SYN;
10354			seg_seq++;
10355			urp--;
10356			/* Recompute the gaps after noting the SYN. */
10357			goto try_again;
10358		}
10359		BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs);
10360		UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes,
10361		    (seg_len > -gap ? -gap : seg_len));
10362		/* Remove the old stuff from seg_len. */
10363		seg_len += gap;
10364		/*
10365		 * Anything left?
10366		 * Make sure to check for unack'd FIN when rest of data
10367		 * has been previously ack'd.
10368		 */
10369		if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) {
10370			/*
10371			 * Resets are only valid if they lie within our offered
10372			 * window.  If the RST bit is set, we just ignore this
10373			 * segment.
10374			 */
10375			if (flags & TH_RST) {
10376				freemsg(mp);
10377				return;
10378			}
10379
10380			/*
10381			 * The arriving of dup data packets indicate that we
10382			 * may have postponed an ack for too long, or the other
10383			 * side's RTT estimate is out of shape. Start acking
10384			 * more often.
10385			 */
10386			if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) &&
10387			    tcp->tcp_rack_cnt >= 1 &&
10388			    tcp->tcp_rack_abs_max > 2) {
10389				tcp->tcp_rack_abs_max--;
10390			}
10391			tcp->tcp_rack_cur_max = 1;
10392
10393			/*
10394			 * This segment is "unacceptable".  None of its
10395			 * sequence space lies within our advertized window.
10396			 *
10397			 * Adjust seg_len to the original value for tracing.
10398			 */
10399			seg_len -= gap;
10400			if (connp->conn_debug) {
10401				(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
10402				    "tcp_rput: unacceptable, gap %d, rgap %d, "
10403				    "flags 0x%x, seg_seq %u, seg_ack %u, "
10404				    "seg_len %d, rnxt %u, snxt %u, %s",
10405				    gap, rgap, flags, seg_seq, seg_ack,
10406				    seg_len, tcp->tcp_rnxt, tcp->tcp_snxt,
10407				    tcp_display(tcp, NULL,
10408				    DISP_ADDR_AND_PORT));
10409			}
10410
10411			/*
10412			 * Arrange to send an ACK in response to the
10413			 * unacceptable segment per RFC 793 page 69. There
10414			 * is only one small difference between ours and the
10415			 * acceptability test in the RFC - we accept ACK-only
10416			 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK
10417			 * will be generated.
10418			 *
10419			 * Note that we have to ACK an ACK-only packet at least
10420			 * for stacks that send 0-length keep-alives with
10421			 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122,
10422			 * section 4.2.3.6. As long as we don't ever generate
10423			 * an unacceptable packet in response to an incoming
10424			 * packet that is unacceptable, it should not cause
10425			 * "ACK wars".
10426			 */
10427			flags |=  TH_ACK_NEEDED;
10428
10429			/*
10430			 * Continue processing this segment in order to use the
10431			 * ACK information it contains, but skip all other
10432			 * sequence-number processing.	Processing the ACK
10433			 * information is necessary in order to
10434			 * re-synchronize connections that may have lost
10435			 * synchronization.
10436			 *
10437			 * We clear seg_len and flag fields related to
10438			 * sequence number processing as they are not
10439			 * to be trusted for an unacceptable segment.
10440			 */
10441			seg_len = 0;
10442			flags &= ~(TH_SYN | TH_FIN | TH_URG);
10443			goto process_ack;
10444		}
10445
10446		/* Fix seg_seq, and chew the gap off the front. */
10447		seg_seq = tcp->tcp_rnxt;
10448		urp += gap;
10449		do {
10450			mblk_t	*mp2;
10451			ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
10452			    (uintptr_t)UINT_MAX);
10453			gap += (uint_t)(mp->b_wptr - mp->b_rptr);
10454			if (gap > 0) {
10455				mp->b_rptr = mp->b_wptr - gap;
10456				break;
10457			}
10458			mp2 = mp;
10459			mp = mp->b_cont;
10460			freeb(mp2);
10461		} while (gap < 0);
10462		/*
10463		 * If the urgent data has already been acknowledged, we
10464		 * should ignore TH_URG below
10465		 */
10466		if (urp < 0)
10467			flags &= ~TH_URG;
10468	}
10469	/*
10470	 * rgap is the amount of stuff received out of window.  A negative
10471	 * value is the amount out of window.
10472	 */
10473	if (rgap < 0) {
10474		mblk_t	*mp2;
10475
10476		if (tcp->tcp_rwnd == 0) {
10477			BUMP_MIB(&tcps->tcps_mib, tcpInWinProbe);
10478		} else {
10479			BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs);
10480			UPDATE_MIB(&tcps->tcps_mib,
10481			    tcpInDataPastWinBytes, -rgap);
10482		}
10483
10484		/*
10485		 * seg_len does not include the FIN, so if more than
10486		 * just the FIN is out of window, we act like we don't
10487		 * see it.  (If just the FIN is out of window, rgap
10488		 * will be zero and we will go ahead and acknowledge
10489		 * the FIN.)
10490		 */
10491		flags &= ~TH_FIN;
10492
10493		/* Fix seg_len and make sure there is something left. */
10494		seg_len += rgap;
10495		if (seg_len <= 0) {
10496			/*
10497			 * Resets are only valid if they lie within our offered
10498			 * window.  If the RST bit is set, we just ignore this
10499			 * segment.
10500			 */
10501			if (flags & TH_RST) {
10502				freemsg(mp);
10503				return;
10504			}
10505
10506			/* Per RFC 793, we need to send back an ACK. */
10507			flags |= TH_ACK_NEEDED;
10508
10509			/*
10510			 * Send SIGURG as soon as possible i.e. even
10511			 * if the TH_URG was delivered in a window probe
10512			 * packet (which will be unacceptable).
10513			 *
10514			 * We generate a signal if none has been generated
10515			 * for this connection or if this is a new urgent
10516			 * byte. Also send a zero-length "unmarked" message
10517			 * to inform SIOCATMARK that this is not the mark.
10518			 *
10519			 * tcp_urp_last_valid is cleared when the T_exdata_ind
10520			 * is sent up. This plus the check for old data
10521			 * (gap >= 0) handles the wraparound of the sequence
10522			 * number space without having to always track the
10523			 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks
10524			 * this max in its rcv_up variable).
10525			 *
10526			 * This prevents duplicate SIGURGS due to a "late"
10527			 * zero-window probe when the T_EXDATA_IND has already
10528			 * been sent up.
10529			 */
10530			if ((flags & TH_URG) &&
10531			    (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq,
10532			    tcp->tcp_urp_last))) {
10533				if (IPCL_IS_NONSTR(connp)) {
10534					if (!TCP_IS_DETACHED(tcp)) {
10535						(*connp->conn_upcalls->
10536						    su_signal_oob)
10537						    (connp->conn_upper_handle,
10538						    urp);
10539					}
10540				} else {
10541					mp1 = allocb(0, BPRI_MED);
10542					if (mp1 == NULL) {
10543						freemsg(mp);
10544						return;
10545					}
10546					if (!TCP_IS_DETACHED(tcp) &&
10547					    !putnextctl1(connp->conn_rq,
10548					    M_PCSIG, SIGURG)) {
10549						/* Try again on the rexmit. */
10550						freemsg(mp1);
10551						freemsg(mp);
10552						return;
10553					}
10554					/*
10555					 * If the next byte would be the mark
10556					 * then mark with MARKNEXT else mark
10557					 * with NOTMARKNEXT.
10558					 */
10559					if (gap == 0 && urp == 0)
10560						mp1->b_flag |= MSGMARKNEXT;
10561					else
10562						mp1->b_flag |= MSGNOTMARKNEXT;
10563					freemsg(tcp->tcp_urp_mark_mp);
10564					tcp->tcp_urp_mark_mp = mp1;
10565					flags |= TH_SEND_URP_MARK;
10566				}
10567				tcp->tcp_urp_last_valid = B_TRUE;
10568				tcp->tcp_urp_last = urp + seg_seq;
10569			}
10570			/*
10571			 * If this is a zero window probe, continue to
10572			 * process the ACK part.  But we need to set seg_len
10573			 * to 0 to avoid data processing.  Otherwise just
10574			 * drop the segment and send back an ACK.
10575			 */
10576			if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) {
10577				flags &= ~(TH_SYN | TH_URG);
10578				seg_len = 0;
10579				goto process_ack;
10580			} else {
10581				freemsg(mp);
10582				goto ack_check;
10583			}
10584		}
10585		/* Pitch out of window stuff off the end. */
10586		rgap = seg_len;
10587		mp2 = mp;
10588		do {
10589			ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <=
10590			    (uintptr_t)INT_MAX);
10591			rgap -= (int)(mp2->b_wptr - mp2->b_rptr);
10592			if (rgap < 0) {
10593				mp2->b_wptr += rgap;
10594				if ((mp1 = mp2->b_cont) != NULL) {
10595					mp2->b_cont = NULL;
10596					freemsg(mp1);
10597				}
10598				break;
10599			}
10600		} while ((mp2 = mp2->b_cont) != NULL);
10601	}
10602ok:;
10603	/*
10604	 * TCP should check ECN info for segments inside the window only.
10605	 * Therefore the check should be done here.
10606	 */
10607	if (tcp->tcp_ecn_ok) {
10608		if (flags & TH_CWR) {
10609			tcp->tcp_ecn_echo_on = B_FALSE;
10610		}
10611		/*
10612		 * Note that both ECN_CE and CWR can be set in the
10613		 * same segment.  In this case, we once again turn
10614		 * on ECN_ECHO.
10615		 */
10616		if (connp->conn_ipversion == IPV4_VERSION) {
10617			uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service;
10618
10619			if ((tos & IPH_ECN_CE) == IPH_ECN_CE) {
10620				tcp->tcp_ecn_echo_on = B_TRUE;
10621			}
10622		} else {
10623			uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf;
10624
10625			if ((vcf & htonl(IPH_ECN_CE << 20)) ==
10626			    htonl(IPH_ECN_CE << 20)) {
10627				tcp->tcp_ecn_echo_on = B_TRUE;
10628			}
10629		}
10630	}
10631
10632	/*
10633	 * Check whether we can update tcp_ts_recent.  This test is
10634	 * NOT the one in RFC 1323 3.4.  It is from Braden, 1993, "TCP
10635	 * Extensions for High Performance: An Update", Internet Draft.
10636	 */
10637	if (tcp->tcp_snd_ts_ok &&
10638	    TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
10639	    SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
10640		tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
10641		tcp->tcp_last_rcv_lbolt = lbolt64;
10642	}
10643
10644	if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) {
10645		/*
10646		 * FIN in an out of order segment.  We record this in
10647		 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq.
10648		 * Clear the FIN so that any check on FIN flag will fail.
10649		 * Remember that FIN also counts in the sequence number
10650		 * space.  So we need to ack out of order FIN only segments.
10651		 */
10652		if (flags & TH_FIN) {
10653			tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID;
10654			tcp->tcp_ofo_fin_seq = seg_seq + seg_len;
10655			flags &= ~TH_FIN;
10656			flags |= TH_ACK_NEEDED;
10657		}
10658		if (seg_len > 0) {
10659			/* Fill in the SACK blk list. */
10660			if (tcp->tcp_snd_sack_ok) {
10661				ASSERT(tcp->tcp_sack_info != NULL);
10662				tcp_sack_insert(tcp->tcp_sack_list,
10663				    seg_seq, seg_seq + seg_len,
10664				    &(tcp->tcp_num_sack_blk));
10665			}
10666
10667			/*
10668			 * Attempt reassembly and see if we have something
10669			 * ready to go.
10670			 */
10671			mp = tcp_reass(tcp, mp, seg_seq);
10672			/* Always ack out of order packets */
10673			flags |= TH_ACK_NEEDED | TH_PUSH;
10674			if (mp) {
10675				ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
10676				    (uintptr_t)INT_MAX);
10677				seg_len = mp->b_cont ? msgdsize(mp) :
10678				    (int)(mp->b_wptr - mp->b_rptr);
10679				seg_seq = tcp->tcp_rnxt;
10680				/*
10681				 * A gap is filled and the seq num and len
10682				 * of the gap match that of a previously
10683				 * received FIN, put the FIN flag back in.
10684				 */
10685				if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) &&
10686				    seg_seq + seg_len == tcp->tcp_ofo_fin_seq) {
10687					flags |= TH_FIN;
10688					tcp->tcp_valid_bits &=
10689					    ~TCP_OFO_FIN_VALID;
10690				}
10691			} else {
10692				/*
10693				 * Keep going even with NULL mp.
10694				 * There may be a useful ACK or something else
10695				 * we don't want to miss.
10696				 *
10697				 * But TCP should not perform fast retransmit
10698				 * because of the ack number.  TCP uses
10699				 * seg_len == 0 to determine if it is a pure
10700				 * ACK.  And this is not a pure ACK.
10701				 */
10702				seg_len = 0;
10703				ofo_seg = B_TRUE;
10704			}
10705		}
10706	} else if (seg_len > 0) {
10707		BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs);
10708		UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len);
10709		/*
10710		 * If an out of order FIN was received before, and the seq
10711		 * num and len of the new segment match that of the FIN,
10712		 * put the FIN flag back in.
10713		 */
10714		if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) &&
10715		    seg_seq + seg_len == tcp->tcp_ofo_fin_seq) {
10716			flags |= TH_FIN;
10717			tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID;
10718		}
10719	}
10720	if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) {
10721	if (flags & TH_RST) {
10722		freemsg(mp);
10723		switch (tcp->tcp_state) {
10724		case TCPS_SYN_RCVD:
10725			(void) tcp_clean_death(tcp, ECONNREFUSED, 14);
10726			break;
10727		case TCPS_ESTABLISHED:
10728		case TCPS_FIN_WAIT_1:
10729		case TCPS_FIN_WAIT_2:
10730		case TCPS_CLOSE_WAIT:
10731			(void) tcp_clean_death(tcp, ECONNRESET, 15);
10732			break;
10733		case TCPS_CLOSING:
10734		case TCPS_LAST_ACK:
10735			(void) tcp_clean_death(tcp, 0, 16);
10736			break;
10737		default:
10738			ASSERT(tcp->tcp_state != TCPS_TIME_WAIT);
10739			(void) tcp_clean_death(tcp, ENXIO, 17);
10740			break;
10741		}
10742		return;
10743	}
10744	if (flags & TH_SYN) {
10745		/*
10746		 * See RFC 793, Page 71
10747		 *
10748		 * The seq number must be in the window as it should
10749		 * be "fixed" above.  If it is outside window, it should
10750		 * be already rejected.  Note that we allow seg_seq to be
10751		 * rnxt + rwnd because we want to accept 0 window probe.
10752		 */
10753		ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) &&
10754		    SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd));
10755		freemsg(mp);
10756		/*
10757		 * If the ACK flag is not set, just use our snxt as the
10758		 * seq number of the RST segment.
10759		 */
10760		if (!(flags & TH_ACK)) {
10761			seg_ack = tcp->tcp_snxt;
10762		}
10763		tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1,
10764		    TH_RST|TH_ACK);
10765		ASSERT(tcp->tcp_state != TCPS_TIME_WAIT);
10766		(void) tcp_clean_death(tcp, ECONNRESET, 18);
10767		return;
10768	}
10769	/*
10770	 * urp could be -1 when the urp field in the packet is 0
10771	 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent
10772	 * byte was at seg_seq - 1, in which case we ignore the urgent flag.
10773	 */
10774	if (flags & TH_URG && urp >= 0) {
10775		if (!tcp->tcp_urp_last_valid ||
10776		    SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) {
10777			/*
10778			 * Non-STREAMS sockets handle the urgent data a litte
10779			 * differently from STREAMS based sockets. There is no
10780			 * need to mark any mblks with the MSG{NOT,}MARKNEXT
10781			 * flags to keep SIOCATMARK happy. Instead a
10782			 * su_signal_oob upcall is made to update the mark.
10783			 * Neither is a T_EXDATA_IND mblk needed to be
10784			 * prepended to the urgent data. The urgent data is
10785			 * delivered using the su_recv upcall, where we set
10786			 * the MSG_OOB flag to indicate that it is urg data.
10787			 *
10788			 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED
10789			 * are used by non-STREAMS sockets.
10790			 */
10791			if (IPCL_IS_NONSTR(connp)) {
10792				if (!TCP_IS_DETACHED(tcp)) {
10793					(*connp->conn_upcalls->su_signal_oob)
10794					    (connp->conn_upper_handle, urp);
10795				}
10796			} else {
10797				/*
10798				 * If we haven't generated the signal yet for
10799				 * this urgent pointer value, do it now.  Also,
10800				 * send up a zero-length M_DATA indicating
10801				 * whether or not this is the mark. The latter
10802				 * is not needed when a T_EXDATA_IND is sent up.
10803				 * However, if there are allocation failures
10804				 * this code relies on the sender retransmitting
10805				 * and the socket code for determining the mark
10806				 * should not block waiting for the peer to
10807				 * transmit. Thus, for simplicity we always
10808				 * send up the mark indication.
10809				 */
10810				mp1 = allocb(0, BPRI_MED);
10811				if (mp1 == NULL) {
10812					freemsg(mp);
10813					return;
10814				}
10815				if (!TCP_IS_DETACHED(tcp) &&
10816				    !putnextctl1(connp->conn_rq, M_PCSIG,
10817				    SIGURG)) {
10818					/* Try again on the rexmit. */
10819					freemsg(mp1);
10820					freemsg(mp);
10821					return;
10822				}
10823				/*
10824				 * Mark with NOTMARKNEXT for now.
10825				 * The code below will change this to MARKNEXT
10826				 * if we are at the mark.
10827				 *
10828				 * If there are allocation failures (e.g. in
10829				 * dupmsg below) the next time tcp_rput_data
10830				 * sees the urgent segment it will send up the
10831				 * MSGMARKNEXT message.
10832				 */
10833				mp1->b_flag |= MSGNOTMARKNEXT;
10834				freemsg(tcp->tcp_urp_mark_mp);
10835				tcp->tcp_urp_mark_mp = mp1;
10836				flags |= TH_SEND_URP_MARK;
10837#ifdef DEBUG
10838				(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
10839				    "tcp_rput: sent M_PCSIG 2 seq %x urp %x "
10840				    "last %x, %s",
10841				    seg_seq, urp, tcp->tcp_urp_last,
10842				    tcp_display(tcp, NULL, DISP_PORT_ONLY));
10843#endif /* DEBUG */
10844			}
10845			tcp->tcp_urp_last_valid = B_TRUE;
10846			tcp->tcp_urp_last = urp + seg_seq;
10847		} else if (tcp->tcp_urp_mark_mp != NULL) {
10848			/*
10849			 * An allocation failure prevented the previous
10850			 * tcp_input_data from sending up the allocated
10851			 * MSG*MARKNEXT message - send it up this time
10852			 * around.
10853			 */
10854			flags |= TH_SEND_URP_MARK;
10855		}
10856
10857		/*
10858		 * If the urgent byte is in this segment, make sure that it is
10859		 * all by itself.  This makes it much easier to deal with the
10860		 * possibility of an allocation failure on the T_exdata_ind.
10861		 * Note that seg_len is the number of bytes in the segment, and
10862		 * urp is the offset into the segment of the urgent byte.
10863		 * urp < seg_len means that the urgent byte is in this segment.
10864		 */
10865		if (urp < seg_len) {
10866			if (seg_len != 1) {
10867				uint32_t  tmp_rnxt;
10868				/*
10869				 * Break it up and feed it back in.
10870				 * Re-attach the IP header.
10871				 */
10872				mp->b_rptr = iphdr;
10873				if (urp > 0) {
10874					/*
10875					 * There is stuff before the urgent
10876					 * byte.
10877					 */
10878					mp1 = dupmsg(mp);
10879					if (!mp1) {
10880						/*
10881						 * Trim from urgent byte on.
10882						 * The rest will come back.
10883						 */
10884						(void) adjmsg(mp,
10885						    urp - seg_len);
10886						tcp_input_data(connp,
10887						    mp, NULL, ira);
10888						return;
10889					}
10890					(void) adjmsg(mp1, urp - seg_len);
10891					/* Feed this piece back in. */
10892					tmp_rnxt = tcp->tcp_rnxt;
10893					tcp_input_data(connp, mp1, NULL, ira);
10894					/*
10895					 * If the data passed back in was not
10896					 * processed (ie: bad ACK) sending
10897					 * the remainder back in will cause a
10898					 * loop. In this case, drop the
10899					 * packet and let the sender try
10900					 * sending a good packet.
10901					 */
10902					if (tmp_rnxt == tcp->tcp_rnxt) {
10903						freemsg(mp);
10904						return;
10905					}
10906				}
10907				if (urp != seg_len - 1) {
10908					uint32_t  tmp_rnxt;
10909					/*
10910					 * There is stuff after the urgent
10911					 * byte.
10912					 */
10913					mp1 = dupmsg(mp);
10914					if (!mp1) {
10915						/*
10916						 * Trim everything beyond the
10917						 * urgent byte.  The rest will
10918						 * come back.
10919						 */
10920						(void) adjmsg(mp,
10921						    urp + 1 - seg_len);
10922						tcp_input_data(connp,
10923						    mp, NULL, ira);
10924						return;
10925					}
10926					(void) adjmsg(mp1, urp + 1 - seg_len);
10927					tmp_rnxt = tcp->tcp_rnxt;
10928					tcp_input_data(connp, mp1, NULL, ira);
10929					/*
10930					 * If the data passed back in was not
10931					 * processed (ie: bad ACK) sending
10932					 * the remainder back in will cause a
10933					 * loop. In this case, drop the
10934					 * packet and let the sender try
10935					 * sending a good packet.
10936					 */
10937					if (tmp_rnxt == tcp->tcp_rnxt) {
10938						freemsg(mp);
10939						return;
10940					}
10941				}
10942				tcp_input_data(connp, mp, NULL, ira);
10943				return;
10944			}
10945			/*
10946			 * This segment contains only the urgent byte.  We
10947			 * have to allocate the T_exdata_ind, if we can.
10948			 */
10949			if (IPCL_IS_NONSTR(connp)) {
10950				int error;
10951
10952				(*connp->conn_upcalls->su_recv)
10953				    (connp->conn_upper_handle, mp, seg_len,
10954				    MSG_OOB, &error, NULL);
10955				/*
10956				 * We should never be in middle of a
10957				 * fallback, the squeue guarantees that.
10958				 */
10959				ASSERT(error != EOPNOTSUPP);
10960				mp = NULL;
10961				goto update_ack;
10962			} else if (!tcp->tcp_urp_mp) {
10963				struct T_exdata_ind *tei;
10964				mp1 = allocb(sizeof (struct T_exdata_ind),
10965				    BPRI_MED);
10966				if (!mp1) {
10967					/*
10968					 * Sigh... It'll be back.
10969					 * Generate any MSG*MARK message now.
10970					 */
10971					freemsg(mp);
10972					seg_len = 0;
10973					if (flags & TH_SEND_URP_MARK) {
10974
10975
10976						ASSERT(tcp->tcp_urp_mark_mp);
10977						tcp->tcp_urp_mark_mp->b_flag &=
10978						    ~MSGNOTMARKNEXT;
10979						tcp->tcp_urp_mark_mp->b_flag |=
10980						    MSGMARKNEXT;
10981					}
10982					goto ack_check;
10983				}
10984				mp1->b_datap->db_type = M_PROTO;
10985				tei = (struct T_exdata_ind *)mp1->b_rptr;
10986				tei->PRIM_type = T_EXDATA_IND;
10987				tei->MORE_flag = 0;
10988				mp1->b_wptr = (uchar_t *)&tei[1];
10989				tcp->tcp_urp_mp = mp1;
10990#ifdef DEBUG
10991				(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
10992				    "tcp_rput: allocated exdata_ind %s",
10993				    tcp_display(tcp, NULL,
10994				    DISP_PORT_ONLY));
10995#endif /* DEBUG */
10996				/*
10997				 * There is no need to send a separate MSG*MARK
10998				 * message since the T_EXDATA_IND will be sent
10999				 * now.
11000				 */
11001				flags &= ~TH_SEND_URP_MARK;
11002				freemsg(tcp->tcp_urp_mark_mp);
11003				tcp->tcp_urp_mark_mp = NULL;
11004			}
11005			/*
11006			 * Now we are all set.  On the next putnext upstream,
11007			 * tcp_urp_mp will be non-NULL and will get prepended
11008			 * to what has to be this piece containing the urgent
11009			 * byte.  If for any reason we abort this segment below,
11010			 * if it comes back, we will have this ready, or it
11011			 * will get blown off in close.
11012			 */
11013		} else if (urp == seg_len) {
11014			/*
11015			 * The urgent byte is the next byte after this sequence
11016			 * number. If this endpoint is non-STREAMS, then there
11017			 * is nothing to do here since the socket has already
11018			 * been notified about the urg pointer by the
11019			 * su_signal_oob call above.
11020			 *
11021			 * In case of STREAMS, some more work might be needed.
11022			 * If there is data it is marked with MSGMARKNEXT and
11023			 * and any tcp_urp_mark_mp is discarded since it is not
11024			 * needed. Otherwise, if the code above just allocated
11025			 * a zero-length tcp_urp_mark_mp message, that message
11026			 * is tagged with MSGMARKNEXT. Sending up these
11027			 * MSGMARKNEXT messages makes SIOCATMARK work correctly
11028			 * even though the T_EXDATA_IND will not be sent up
11029			 * until the urgent byte arrives.
11030			 */
11031			if (!IPCL_IS_NONSTR(tcp->tcp_connp)) {
11032				if (seg_len != 0) {
11033					flags |= TH_MARKNEXT_NEEDED;
11034					freemsg(tcp->tcp_urp_mark_mp);
11035					tcp->tcp_urp_mark_mp = NULL;
11036					flags &= ~TH_SEND_URP_MARK;
11037				} else if (tcp->tcp_urp_mark_mp != NULL) {
11038					flags |= TH_SEND_URP_MARK;
11039					tcp->tcp_urp_mark_mp->b_flag &=
11040					    ~MSGNOTMARKNEXT;
11041					tcp->tcp_urp_mark_mp->b_flag |=
11042					    MSGMARKNEXT;
11043				}
11044			}
11045#ifdef DEBUG
11046			(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
11047			    "tcp_rput: AT MARK, len %d, flags 0x%x, %s",
11048			    seg_len, flags,
11049			    tcp_display(tcp, NULL, DISP_PORT_ONLY));
11050#endif /* DEBUG */
11051		}
11052#ifdef DEBUG
11053		else {
11054			/* Data left until we hit mark */
11055			(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
11056			    "tcp_rput: URP %d bytes left, %s",
11057			    urp - seg_len, tcp_display(tcp, NULL,
11058			    DISP_PORT_ONLY));
11059		}
11060#endif /* DEBUG */
11061	}
11062
11063process_ack:
11064	if (!(flags & TH_ACK)) {
11065		freemsg(mp);
11066		goto xmit_check;
11067	}
11068	}
11069	bytes_acked = (int)(seg_ack - tcp->tcp_suna);
11070
11071	if (bytes_acked > 0)
11072		tcp->tcp_ip_forward_progress = B_TRUE;
11073	if (tcp->tcp_state == TCPS_SYN_RCVD) {
11074		if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) &&
11075		    ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) {
11076			/* 3-way handshake complete - pass up the T_CONN_IND */
11077			tcp_t	*listener = tcp->tcp_listener;
11078			mblk_t	*mp = tcp->tcp_conn.tcp_eager_conn_ind;
11079
11080			tcp->tcp_tconnind_started = B_TRUE;
11081			tcp->tcp_conn.tcp_eager_conn_ind = NULL;
11082			/*
11083			 * We are here means eager is fine but it can
11084			 * get a TH_RST at any point between now and till
11085			 * accept completes and disappear. We need to
11086			 * ensure that reference to eager is valid after
11087			 * we get out of eager's perimeter. So we do
11088			 * an extra refhold.
11089			 */
11090			CONN_INC_REF(connp);
11091
11092			/*
11093			 * The listener also exists because of the refhold
11094			 * done in tcp_input_listener. Its possible that it
11095			 * might have closed. We will check that once we
11096			 * get inside listeners context.
11097			 */
11098			CONN_INC_REF(listener->tcp_connp);
11099			if (listener->tcp_connp->conn_sqp ==
11100			    connp->conn_sqp) {
11101				/*
11102				 * We optimize by not calling an SQUEUE_ENTER
11103				 * on the listener since we know that the
11104				 * listener and eager squeues are the same.
11105				 * We are able to make this check safely only
11106				 * because neither the eager nor the listener
11107				 * can change its squeue. Only an active connect
11108				 * can change its squeue
11109				 */
11110				tcp_send_conn_ind(listener->tcp_connp, mp,
11111				    listener->tcp_connp->conn_sqp);
11112				CONN_DEC_REF(listener->tcp_connp);
11113			} else if (!tcp->tcp_loopback) {
11114				SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp,
11115				    mp, tcp_send_conn_ind,
11116				    listener->tcp_connp, NULL, SQ_FILL,
11117				    SQTAG_TCP_CONN_IND);
11118			} else {
11119				SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp,
11120				    mp, tcp_send_conn_ind,
11121				    listener->tcp_connp, NULL, SQ_PROCESS,
11122				    SQTAG_TCP_CONN_IND);
11123			}
11124		}
11125
11126		/*
11127		 * We are seeing the final ack in the three way
11128		 * hand shake of a active open'ed connection
11129		 * so we must send up a T_CONN_CON
11130		 *
11131		 * tcp_sendmsg() checks tcp_state without entering
11132		 * the squeue so tcp_state should be updated before
11133		 * sending up connection confirmation.
11134		 */
11135		tcp->tcp_state = TCPS_ESTABLISHED;
11136		if (tcp->tcp_active_open) {
11137			if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) {
11138				freemsg(mp);
11139				tcp->tcp_state = TCPS_SYN_RCVD;
11140				return;
11141			}
11142			/*
11143			 * Don't fuse the loopback endpoints for
11144			 * simultaneous active opens.
11145			 */
11146			if (tcp->tcp_loopback) {
11147				TCP_STAT(tcps, tcp_fusion_unfusable);
11148				tcp->tcp_unfusable = B_TRUE;
11149			}
11150		}
11151
11152		tcp->tcp_suna = tcp->tcp_iss + 1;	/* One for the SYN */
11153		bytes_acked--;
11154		/* SYN was acked - making progress */
11155		tcp->tcp_ip_forward_progress = B_TRUE;
11156
11157		/*
11158		 * If SYN was retransmitted, need to reset all
11159		 * retransmission info as this segment will be
11160		 * treated as a dup ACK.
11161		 */
11162		if (tcp->tcp_rexmit) {
11163			tcp->tcp_rexmit = B_FALSE;
11164			tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
11165			tcp->tcp_rexmit_max = tcp->tcp_snxt;
11166			tcp->tcp_snd_burst = tcp->tcp_localnet ?
11167			    TCP_CWND_INFINITE : TCP_CWND_NORMAL;
11168			tcp->tcp_ms_we_have_waited = 0;
11169			tcp->tcp_cwnd = mss;
11170		}
11171
11172		/*
11173		 * We set the send window to zero here.
11174		 * This is needed if there is data to be
11175		 * processed already on the queue.
11176		 * Later (at swnd_update label), the
11177		 * "new_swnd > tcp_swnd" condition is satisfied
11178		 * the XMIT_NEEDED flag is set in the current
11179		 * (SYN_RCVD) state. This ensures tcp_wput_data() is
11180		 * called if there is already data on queue in
11181		 * this state.
11182		 */
11183		tcp->tcp_swnd = 0;
11184
11185		if (new_swnd > tcp->tcp_max_swnd)
11186			tcp->tcp_max_swnd = new_swnd;
11187		tcp->tcp_swl1 = seg_seq;
11188		tcp->tcp_swl2 = seg_ack;
11189		tcp->tcp_valid_bits &= ~TCP_ISS_VALID;
11190
11191		/* Fuse when both sides are in ESTABLISHED state */
11192		if (tcp->tcp_loopback && do_tcp_fusion)
11193			tcp_fuse(tcp, iphdr, tcpha);
11194
11195	}
11196	/* This code follows 4.4BSD-Lite2 mostly. */
11197	if (bytes_acked < 0)
11198		goto est;
11199
11200	/*
11201	 * If TCP is ECN capable and the congestion experience bit is
11202	 * set, reduce tcp_cwnd and tcp_ssthresh.  But this should only be
11203	 * done once per window (or more loosely, per RTT).
11204	 */
11205	if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max))
11206		tcp->tcp_cwr = B_FALSE;
11207	if (tcp->tcp_ecn_ok && (flags & TH_ECE)) {
11208		if (!tcp->tcp_cwr) {
11209			npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss;
11210			tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss;
11211			tcp->tcp_cwnd = npkt * mss;
11212			/*
11213			 * If the cwnd is 0, use the timer to clock out
11214			 * new segments.  This is required by the ECN spec.
11215			 */
11216			if (npkt == 0) {
11217				TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
11218				/*
11219				 * This makes sure that when the ACK comes
11220				 * back, we will increase tcp_cwnd by 1 MSS.
11221				 */
11222				tcp->tcp_cwnd_cnt = 0;
11223			}
11224			tcp->tcp_cwr = B_TRUE;
11225			/*
11226			 * This marks the end of the current window of in
11227			 * flight data.  That is why we don't use
11228			 * tcp_suna + tcp_swnd.  Only data in flight can
11229			 * provide ECN info.
11230			 */
11231			tcp->tcp_cwr_snd_max = tcp->tcp_snxt;
11232			tcp->tcp_ecn_cwr_sent = B_FALSE;
11233		}
11234	}
11235
11236	mp1 = tcp->tcp_xmit_head;
11237	if (bytes_acked == 0) {
11238		if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) {
11239			int dupack_cnt;
11240
11241			BUMP_MIB(&tcps->tcps_mib, tcpInDupAck);
11242			/*
11243			 * Fast retransmit.  When we have seen exactly three
11244			 * identical ACKs while we have unacked data
11245			 * outstanding we take it as a hint that our peer
11246			 * dropped something.
11247			 *
11248			 * If TCP is retransmitting, don't do fast retransmit.
11249			 */
11250			if (mp1 && tcp->tcp_suna != tcp->tcp_snxt &&
11251			    ! tcp->tcp_rexmit) {
11252				/* Do Limited Transmit */
11253				if ((dupack_cnt = ++tcp->tcp_dupack_cnt) <
11254				    tcps->tcps_dupack_fast_retransmit) {
11255					/*
11256					 * RFC 3042
11257					 *
11258					 * What we need to do is temporarily
11259					 * increase tcp_cwnd so that new
11260					 * data can be sent if it is allowed
11261					 * by the receive window (tcp_rwnd).
11262					 * tcp_wput_data() will take care of
11263					 * the rest.
11264					 *
11265					 * If the connection is SACK capable,
11266					 * only do limited xmit when there
11267					 * is SACK info.
11268					 *
11269					 * Note how tcp_cwnd is incremented.
11270					 * The first dup ACK will increase
11271					 * it by 1 MSS.  The second dup ACK
11272					 * will increase it by 2 MSS.  This
11273					 * means that only 1 new segment will
11274					 * be sent for each dup ACK.
11275					 */
11276					if (tcp->tcp_unsent > 0 &&
11277					    (!tcp->tcp_snd_sack_ok ||
11278					    (tcp->tcp_snd_sack_ok &&
11279					    tcp->tcp_notsack_list != NULL))) {
11280						tcp->tcp_cwnd += mss <<
11281						    (tcp->tcp_dupack_cnt - 1);
11282						flags |= TH_LIMIT_XMIT;
11283					}
11284				} else if (dupack_cnt ==
11285				    tcps->tcps_dupack_fast_retransmit) {
11286
11287				/*
11288				 * If we have reduced tcp_ssthresh
11289				 * because of ECN, do not reduce it again
11290				 * unless it is already one window of data
11291				 * away.  After one window of data, tcp_cwr
11292				 * should then be cleared.  Note that
11293				 * for non ECN capable connection, tcp_cwr
11294				 * should always be false.
11295				 *
11296				 * Adjust cwnd since the duplicate
11297				 * ack indicates that a packet was
11298				 * dropped (due to congestion.)
11299				 */
11300				if (!tcp->tcp_cwr) {
11301					npkt = ((tcp->tcp_snxt -
11302					    tcp->tcp_suna) >> 1) / mss;
11303					tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) *
11304					    mss;
11305					tcp->tcp_cwnd = (npkt +
11306					    tcp->tcp_dupack_cnt) * mss;
11307				}
11308				if (tcp->tcp_ecn_ok) {
11309					tcp->tcp_cwr = B_TRUE;
11310					tcp->tcp_cwr_snd_max = tcp->tcp_snxt;
11311					tcp->tcp_ecn_cwr_sent = B_FALSE;
11312				}
11313
11314				/*
11315				 * We do Hoe's algorithm.  Refer to her
11316				 * paper "Improving the Start-up Behavior
11317				 * of a Congestion Control Scheme for TCP,"
11318				 * appeared in SIGCOMM'96.
11319				 *
11320				 * Save highest seq no we have sent so far.
11321				 * Be careful about the invisible FIN byte.
11322				 */
11323				if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
11324				    (tcp->tcp_unsent == 0)) {
11325					tcp->tcp_rexmit_max = tcp->tcp_fss;
11326				} else {
11327					tcp->tcp_rexmit_max = tcp->tcp_snxt;
11328				}
11329
11330				/*
11331				 * Do not allow bursty traffic during.
11332				 * fast recovery.  Refer to Fall and Floyd's
11333				 * paper "Simulation-based Comparisons of
11334				 * Tahoe, Reno and SACK TCP" (in CCR?)
11335				 * This is a best current practise.
11336				 */
11337				tcp->tcp_snd_burst = TCP_CWND_SS;
11338
11339				/*
11340				 * For SACK:
11341				 * Calculate tcp_pipe, which is the
11342				 * estimated number of bytes in
11343				 * network.
11344				 *
11345				 * tcp_fack is the highest sack'ed seq num
11346				 * TCP has received.
11347				 *
11348				 * tcp_pipe is explained in the above quoted
11349				 * Fall and Floyd's paper.  tcp_fack is
11350				 * explained in Mathis and Mahdavi's
11351				 * "Forward Acknowledgment: Refining TCP
11352				 * Congestion Control" in SIGCOMM '96.
11353				 */
11354				if (tcp->tcp_snd_sack_ok) {
11355					ASSERT(tcp->tcp_sack_info != NULL);
11356					if (tcp->tcp_notsack_list != NULL) {
11357						tcp->tcp_pipe = tcp->tcp_snxt -
11358						    tcp->tcp_fack;
11359						tcp->tcp_sack_snxt = seg_ack;
11360						flags |= TH_NEED_SACK_REXMIT;
11361					} else {
11362						/*
11363						 * Always initialize tcp_pipe
11364						 * even though we don't have
11365						 * any SACK info.  If later
11366						 * we get SACK info and
11367						 * tcp_pipe is not initialized,
11368						 * funny things will happen.
11369						 */
11370						tcp->tcp_pipe =
11371						    tcp->tcp_cwnd_ssthresh;
11372					}
11373				} else {
11374					flags |= TH_REXMIT_NEEDED;
11375				} /* tcp_snd_sack_ok */
11376
11377				} else {
11378					/*
11379					 * Here we perform congestion
11380					 * avoidance, but NOT slow start.
11381					 * This is known as the Fast
11382					 * Recovery Algorithm.
11383					 */
11384					if (tcp->tcp_snd_sack_ok &&
11385					    tcp->tcp_notsack_list != NULL) {
11386						flags |= TH_NEED_SACK_REXMIT;
11387						tcp->tcp_pipe -= mss;
11388						if (tcp->tcp_pipe < 0)
11389							tcp->tcp_pipe = 0;
11390					} else {
11391					/*
11392					 * We know that one more packet has
11393					 * left the pipe thus we can update
11394					 * cwnd.
11395					 */
11396					cwnd = tcp->tcp_cwnd + mss;
11397					if (cwnd > tcp->tcp_cwnd_max)
11398						cwnd = tcp->tcp_cwnd_max;
11399					tcp->tcp_cwnd = cwnd;
11400					if (tcp->tcp_unsent > 0)
11401						flags |= TH_XMIT_NEEDED;
11402					}
11403				}
11404			}
11405		} else if (tcp->tcp_zero_win_probe) {
11406			/*
11407			 * If the window has opened, need to arrange
11408			 * to send additional data.
11409			 */
11410			if (new_swnd != 0) {
11411				/* tcp_suna != tcp_snxt */
11412				/* Packet contains a window update */
11413				BUMP_MIB(&tcps->tcps_mib, tcpInWinUpdate);
11414				tcp->tcp_zero_win_probe = 0;
11415				tcp->tcp_timer_backoff = 0;
11416				tcp->tcp_ms_we_have_waited = 0;
11417
11418				/*
11419				 * Transmit starting with tcp_suna since
11420				 * the one byte probe is not ack'ed.
11421				 * If TCP has sent more than one identical
11422				 * probe, tcp_rexmit will be set.  That means
11423				 * tcp_ss_rexmit() will send out the one
11424				 * byte along with new data.  Otherwise,
11425				 * fake the retransmission.
11426				 */
11427				flags |= TH_XMIT_NEEDED;
11428				if (!tcp->tcp_rexmit) {
11429					tcp->tcp_rexmit = B_TRUE;
11430					tcp->tcp_dupack_cnt = 0;
11431					tcp->tcp_rexmit_nxt = tcp->tcp_suna;
11432					tcp->tcp_rexmit_max = tcp->tcp_suna + 1;
11433				}
11434			}
11435		}
11436		goto swnd_update;
11437	}
11438
11439	/*
11440	 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73.
11441	 * If the ACK value acks something that we have not yet sent, it might
11442	 * be an old duplicate segment.  Send an ACK to re-synchronize the
11443	 * other side.
11444	 * Note: reset in response to unacceptable ACK in SYN_RECEIVE
11445	 * state is handled above, so we can always just drop the segment and
11446	 * send an ACK here.
11447	 *
11448	 * In the case where the peer shrinks the window, we see the new window
11449	 * update, but all the data sent previously is queued up by the peer.
11450	 * To account for this, in tcp_process_shrunk_swnd(), the sequence
11451	 * number, which was already sent, and within window, is recorded.
11452	 * tcp_snxt is then updated.
11453	 *
11454	 * If the window has previously shrunk, and an ACK for data not yet
11455	 * sent, according to tcp_snxt is recieved, it may still be valid. If
11456	 * the ACK is for data within the window at the time the window was
11457	 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to
11458	 * the sequence number ACK'ed.
11459	 *
11460	 * If the ACK covers all the data sent at the time the window was
11461	 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE.
11462	 *
11463	 * Should we send ACKs in response to ACK only segments?
11464	 */
11465
11466	if (SEQ_GT(seg_ack, tcp->tcp_snxt)) {
11467		if ((tcp->tcp_is_wnd_shrnk) &&
11468		    (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) {
11469			uint32_t data_acked_ahead_snxt;
11470
11471			data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt;
11472			tcp_update_xmit_tail(tcp, seg_ack);
11473			tcp->tcp_unsent -= data_acked_ahead_snxt;
11474		} else {
11475			BUMP_MIB(&tcps->tcps_mib, tcpInAckUnsent);
11476			/* drop the received segment */
11477			freemsg(mp);
11478
11479			/*
11480			 * Send back an ACK.  If tcp_drop_ack_unsent_cnt is
11481			 * greater than 0, check if the number of such
11482			 * bogus ACks is greater than that count.  If yes,
11483			 * don't send back any ACK.  This prevents TCP from
11484			 * getting into an ACK storm if somehow an attacker
11485			 * successfully spoofs an acceptable segment to our
11486			 * peer.
11487			 */
11488			if (tcp_drop_ack_unsent_cnt > 0 &&
11489			    ++tcp->tcp_in_ack_unsent >
11490			    tcp_drop_ack_unsent_cnt) {
11491				TCP_STAT(tcps, tcp_in_ack_unsent_drop);
11492				return;
11493			}
11494			mp = tcp_ack_mp(tcp);
11495			if (mp != NULL) {
11496				BUMP_LOCAL(tcp->tcp_obsegs);
11497				BUMP_MIB(&tcps->tcps_mib, tcpOutAck);
11498				tcp_send_data(tcp, mp);
11499			}
11500			return;
11501		}
11502	} else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack,
11503	    tcp->tcp_snxt_shrunk)) {
11504			tcp->tcp_is_wnd_shrnk = B_FALSE;
11505	}
11506
11507	/*
11508	 * TCP gets a new ACK, update the notsack'ed list to delete those
11509	 * blocks that are covered by this ACK.
11510	 */
11511	if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) {
11512		tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack,
11513		    &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list));
11514	}
11515
11516	/*
11517	 * If we got an ACK after fast retransmit, check to see
11518	 * if it is a partial ACK.  If it is not and the congestion
11519	 * window was inflated to account for the other side's
11520	 * cached packets, retract it.  If it is, do Hoe's algorithm.
11521	 */
11522	if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) {
11523		ASSERT(tcp->tcp_rexmit == B_FALSE);
11524		if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) {
11525			tcp->tcp_dupack_cnt = 0;
11526			/*
11527			 * Restore the orig tcp_cwnd_ssthresh after
11528			 * fast retransmit phase.
11529			 */
11530			if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) {
11531				tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh;
11532			}
11533			tcp->tcp_rexmit_max = seg_ack;
11534			tcp->tcp_cwnd_cnt = 0;
11535			tcp->tcp_snd_burst = tcp->tcp_localnet ?
11536			    TCP_CWND_INFINITE : TCP_CWND_NORMAL;
11537
11538			/*
11539			 * Remove all notsack info to avoid confusion with
11540			 * the next fast retrasnmit/recovery phase.
11541			 */
11542			if (tcp->tcp_snd_sack_ok &&
11543			    tcp->tcp_notsack_list != NULL) {
11544				TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list,
11545				    tcp);
11546			}
11547		} else {
11548			if (tcp->tcp_snd_sack_ok &&
11549			    tcp->tcp_notsack_list != NULL) {
11550				flags |= TH_NEED_SACK_REXMIT;
11551				tcp->tcp_pipe -= mss;
11552				if (tcp->tcp_pipe < 0)
11553					tcp->tcp_pipe = 0;
11554			} else {
11555				/*
11556				 * Hoe's algorithm:
11557				 *
11558				 * Retransmit the unack'ed segment and
11559				 * restart fast recovery.  Note that we
11560				 * need to scale back tcp_cwnd to the
11561				 * original value when we started fast
11562				 * recovery.  This is to prevent overly
11563				 * aggressive behaviour in sending new
11564				 * segments.
11565				 */
11566				tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh +
11567				    tcps->tcps_dupack_fast_retransmit * mss;
11568				tcp->tcp_cwnd_cnt = tcp->tcp_cwnd;
11569				flags |= TH_REXMIT_NEEDED;
11570			}
11571		}
11572	} else {
11573		tcp->tcp_dupack_cnt = 0;
11574		if (tcp->tcp_rexmit) {
11575			/*
11576			 * TCP is retranmitting.  If the ACK ack's all
11577			 * outstanding data, update tcp_rexmit_max and
11578			 * tcp_rexmit_nxt.  Otherwise, update tcp_rexmit_nxt
11579			 * to the correct value.
11580			 *
11581			 * Note that SEQ_LEQ() is used.  This is to avoid
11582			 * unnecessary fast retransmit caused by dup ACKs
11583			 * received when TCP does slow start retransmission
11584			 * after a time out.  During this phase, TCP may
11585			 * send out segments which are already received.
11586			 * This causes dup ACKs to be sent back.
11587			 */
11588			if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) {
11589				if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) {
11590					tcp->tcp_rexmit_nxt = seg_ack;
11591				}
11592				if (seg_ack != tcp->tcp_rexmit_max) {
11593					flags |= TH_XMIT_NEEDED;
11594				}
11595			} else {
11596				tcp->tcp_rexmit = B_FALSE;
11597				tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
11598				tcp->tcp_snd_burst = tcp->tcp_localnet ?
11599				    TCP_CWND_INFINITE : TCP_CWND_NORMAL;
11600			}
11601			tcp->tcp_ms_we_have_waited = 0;
11602		}
11603	}
11604
11605	BUMP_MIB(&tcps->tcps_mib, tcpInAckSegs);
11606	UPDATE_MIB(&tcps->tcps_mib, tcpInAckBytes, bytes_acked);
11607	tcp->tcp_suna = seg_ack;
11608	if (tcp->tcp_zero_win_probe != 0) {
11609		tcp->tcp_zero_win_probe = 0;
11610		tcp->tcp_timer_backoff = 0;
11611	}
11612
11613	/*
11614	 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed.
11615	 * Note that it cannot be the SYN being ack'ed.  The code flow
11616	 * will not reach here.
11617	 */
11618	if (mp1 == NULL) {
11619		goto fin_acked;
11620	}
11621
11622	/*
11623	 * Update the congestion window.
11624	 *
11625	 * If TCP is not ECN capable or TCP is ECN capable but the
11626	 * congestion experience bit is not set, increase the tcp_cwnd as
11627	 * usual.
11628	 */
11629	if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) {
11630		cwnd = tcp->tcp_cwnd;
11631		add = mss;
11632
11633		if (cwnd >= tcp->tcp_cwnd_ssthresh) {
11634			/*
11635			 * This is to prevent an increase of less than 1 MSS of
11636			 * tcp_cwnd.  With partial increase, tcp_wput_data()
11637			 * may send out tinygrams in order to preserve mblk
11638			 * boundaries.
11639			 *
11640			 * By initializing tcp_cwnd_cnt to new tcp_cwnd and
11641			 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is
11642			 * increased by 1 MSS for every RTTs.
11643			 */
11644			if (tcp->tcp_cwnd_cnt <= 0) {
11645				tcp->tcp_cwnd_cnt = cwnd + add;
11646			} else {
11647				tcp->tcp_cwnd_cnt -= add;
11648				add = 0;
11649			}
11650		}
11651		tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max);
11652	}
11653
11654	/* See if the latest urgent data has been acknowledged */
11655	if ((tcp->tcp_valid_bits & TCP_URG_VALID) &&
11656	    SEQ_GT(seg_ack, tcp->tcp_urg))
11657		tcp->tcp_valid_bits &= ~TCP_URG_VALID;
11658
11659	/* Can we update the RTT estimates? */
11660	if (tcp->tcp_snd_ts_ok) {
11661		/* Ignore zero timestamp echo-reply. */
11662		if (tcpopt.tcp_opt_ts_ecr != 0) {
11663			tcp_set_rto(tcp, (int32_t)lbolt -
11664			    (int32_t)tcpopt.tcp_opt_ts_ecr);
11665		}
11666
11667		/* If needed, restart the timer. */
11668		if (tcp->tcp_set_timer == 1) {
11669			TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
11670			tcp->tcp_set_timer = 0;
11671		}
11672		/*
11673		 * Update tcp_csuna in case the other side stops sending
11674		 * us timestamps.
11675		 */
11676		tcp->tcp_csuna = tcp->tcp_snxt;
11677	} else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) {
11678		/*
11679		 * An ACK sequence we haven't seen before, so get the RTT
11680		 * and update the RTO. But first check if the timestamp is
11681		 * valid to use.
11682		 */
11683		if ((mp1->b_next != NULL) &&
11684		    SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next)))
11685			tcp_set_rto(tcp, (int32_t)lbolt -
11686			    (int32_t)(intptr_t)mp1->b_prev);
11687		else
11688			BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate);
11689
11690		/* Remeber the last sequence to be ACKed */
11691		tcp->tcp_csuna = seg_ack;
11692		if (tcp->tcp_set_timer == 1) {
11693			TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
11694			tcp->tcp_set_timer = 0;
11695		}
11696	} else {
11697		BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate);
11698	}
11699
11700	/* Eat acknowledged bytes off the xmit queue. */
11701	for (;;) {
11702		mblk_t	*mp2;
11703		uchar_t	*wptr;
11704
11705		wptr = mp1->b_wptr;
11706		ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX);
11707		bytes_acked -= (int)(wptr - mp1->b_rptr);
11708		if (bytes_acked < 0) {
11709			mp1->b_rptr = wptr + bytes_acked;
11710			/*
11711			 * Set a new timestamp if all the bytes timed by the
11712			 * old timestamp have been ack'ed.
11713			 */
11714			if (SEQ_GT(seg_ack,
11715			    (uint32_t)(uintptr_t)(mp1->b_next))) {
11716				mp1->b_prev = (mblk_t *)(uintptr_t)lbolt;
11717				mp1->b_next = NULL;
11718			}
11719			break;
11720		}
11721		mp1->b_next = NULL;
11722		mp1->b_prev = NULL;
11723		mp2 = mp1;
11724		mp1 = mp1->b_cont;
11725
11726		/*
11727		 * This notification is required for some zero-copy
11728		 * clients to maintain a copy semantic. After the data
11729		 * is ack'ed, client is safe to modify or reuse the buffer.
11730		 */
11731		if (tcp->tcp_snd_zcopy_aware &&
11732		    (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
11733			tcp_zcopy_notify(tcp);
11734		freeb(mp2);
11735		if (bytes_acked == 0) {
11736			if (mp1 == NULL) {
11737				/* Everything is ack'ed, clear the tail. */
11738				tcp->tcp_xmit_tail = NULL;
11739				/*
11740				 * Cancel the timer unless we are still
11741				 * waiting for an ACK for the FIN packet.
11742				 */
11743				if (tcp->tcp_timer_tid != 0 &&
11744				    tcp->tcp_snxt == tcp->tcp_suna) {
11745					(void) TCP_TIMER_CANCEL(tcp,
11746					    tcp->tcp_timer_tid);
11747					tcp->tcp_timer_tid = 0;
11748				}
11749				goto pre_swnd_update;
11750			}
11751			if (mp2 != tcp->tcp_xmit_tail)
11752				break;
11753			tcp->tcp_xmit_tail = mp1;
11754			ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
11755			    (uintptr_t)INT_MAX);
11756			tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr -
11757			    mp1->b_rptr);
11758			break;
11759		}
11760		if (mp1 == NULL) {
11761			/*
11762			 * More was acked but there is nothing more
11763			 * outstanding.  This means that the FIN was
11764			 * just acked or that we're talking to a clown.
11765			 */
11766fin_acked:
11767			ASSERT(tcp->tcp_fin_sent);
11768			tcp->tcp_xmit_tail = NULL;
11769			if (tcp->tcp_fin_sent) {
11770				/* FIN was acked - making progress */
11771				if (!tcp->tcp_fin_acked)
11772					tcp->tcp_ip_forward_progress = B_TRUE;
11773				tcp->tcp_fin_acked = B_TRUE;
11774				if (tcp->tcp_linger_tid != 0 &&
11775				    TCP_TIMER_CANCEL(tcp,
11776				    tcp->tcp_linger_tid) >= 0) {
11777					tcp_stop_lingering(tcp);
11778					freemsg(mp);
11779					mp = NULL;
11780				}
11781			} else {
11782				/*
11783				 * We should never get here because
11784				 * we have already checked that the
11785				 * number of bytes ack'ed should be
11786				 * smaller than or equal to what we
11787				 * have sent so far (it is the
11788				 * acceptability check of the ACK).
11789				 * We can only get here if the send
11790				 * queue is corrupted.
11791				 *
11792				 * Terminate the connection and
11793				 * panic the system.  It is better
11794				 * for us to panic instead of
11795				 * continuing to avoid other disaster.
11796				 */
11797				tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt,
11798				    tcp->tcp_rnxt, TH_RST|TH_ACK);
11799				panic("Memory corruption "
11800				    "detected for connection %s.",
11801				    tcp_display(tcp, NULL,
11802				    DISP_ADDR_AND_PORT));
11803				/*NOTREACHED*/
11804			}
11805			goto pre_swnd_update;
11806		}
11807		ASSERT(mp2 != tcp->tcp_xmit_tail);
11808	}
11809	if (tcp->tcp_unsent) {
11810		flags |= TH_XMIT_NEEDED;
11811	}
11812pre_swnd_update:
11813	tcp->tcp_xmit_head = mp1;
11814swnd_update:
11815	/*
11816	 * The following check is different from most other implementations.
11817	 * For bi-directional transfer, when segments are dropped, the
11818	 * "normal" check will not accept a window update in those
11819	 * retransmitted segemnts.  Failing to do that, TCP may send out
11820	 * segments which are outside receiver's window.  As TCP accepts
11821	 * the ack in those retransmitted segments, if the window update in
11822	 * the same segment is not accepted, TCP will incorrectly calculates
11823	 * that it can send more segments.  This can create a deadlock
11824	 * with the receiver if its window becomes zero.
11825	 */
11826	if (SEQ_LT(tcp->tcp_swl2, seg_ack) ||
11827	    SEQ_LT(tcp->tcp_swl1, seg_seq) ||
11828	    (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) {
11829		/*
11830		 * The criteria for update is:
11831		 *
11832		 * 1. the segment acknowledges some data.  Or
11833		 * 2. the segment is new, i.e. it has a higher seq num. Or
11834		 * 3. the segment is not old and the advertised window is
11835		 * larger than the previous advertised window.
11836		 */
11837		if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd)
11838			flags |= TH_XMIT_NEEDED;
11839		tcp->tcp_swnd = new_swnd;
11840		if (new_swnd > tcp->tcp_max_swnd)
11841			tcp->tcp_max_swnd = new_swnd;
11842		tcp->tcp_swl1 = seg_seq;
11843		tcp->tcp_swl2 = seg_ack;
11844	}
11845est:
11846	if (tcp->tcp_state > TCPS_ESTABLISHED) {
11847
11848		switch (tcp->tcp_state) {
11849		case TCPS_FIN_WAIT_1:
11850			if (tcp->tcp_fin_acked) {
11851				tcp->tcp_state = TCPS_FIN_WAIT_2;
11852				/*
11853				 * We implement the non-standard BSD/SunOS
11854				 * FIN_WAIT_2 flushing algorithm.
11855				 * If there is no user attached to this
11856				 * TCP endpoint, then this TCP struct
11857				 * could hang around forever in FIN_WAIT_2
11858				 * state if the peer forgets to send us
11859				 * a FIN.  To prevent this, we wait only
11860				 * 2*MSL (a convenient time value) for
11861				 * the FIN to arrive.  If it doesn't show up,
11862				 * we flush the TCP endpoint.  This algorithm,
11863				 * though a violation of RFC-793, has worked
11864				 * for over 10 years in BSD systems.
11865				 * Note: SunOS 4.x waits 675 seconds before
11866				 * flushing the FIN_WAIT_2 connection.
11867				 */
11868				TCP_TIMER_RESTART(tcp,
11869				    tcps->tcps_fin_wait_2_flush_interval);
11870			}
11871			break;
11872		case TCPS_FIN_WAIT_2:
11873			break;	/* Shutdown hook? */
11874		case TCPS_LAST_ACK:
11875			freemsg(mp);
11876			if (tcp->tcp_fin_acked) {
11877				(void) tcp_clean_death(tcp, 0, 19);
11878				return;
11879			}
11880			goto xmit_check;
11881		case TCPS_CLOSING:
11882			if (tcp->tcp_fin_acked) {
11883				tcp->tcp_state = TCPS_TIME_WAIT;
11884				/*
11885				 * Unconditionally clear the exclusive binding
11886				 * bit so this TIME-WAIT connection won't
11887				 * interfere with new ones.
11888				 */
11889				connp->conn_exclbind = 0;
11890				if (!TCP_IS_DETACHED(tcp)) {
11891					TCP_TIMER_RESTART(tcp,
11892					    tcps->tcps_time_wait_interval);
11893				} else {
11894					tcp_time_wait_append(tcp);
11895					TCP_DBGSTAT(tcps, tcp_rput_time_wait);
11896				}
11897			}
11898			/*FALLTHRU*/
11899		case TCPS_CLOSE_WAIT:
11900			freemsg(mp);
11901			goto xmit_check;
11902		default:
11903			ASSERT(tcp->tcp_state != TCPS_TIME_WAIT);
11904			break;
11905		}
11906	}
11907	if (flags & TH_FIN) {
11908		/* Make sure we ack the fin */
11909		flags |= TH_ACK_NEEDED;
11910		if (!tcp->tcp_fin_rcvd) {
11911			tcp->tcp_fin_rcvd = B_TRUE;
11912			tcp->tcp_rnxt++;
11913			tcpha = tcp->tcp_tcpha;
11914			tcpha->tha_ack = htonl(tcp->tcp_rnxt);
11915
11916			/*
11917			 * Generate the ordrel_ind at the end unless we
11918			 * are an eager guy.
11919			 * In the eager case tcp_rsrv will do this when run
11920			 * after tcp_accept is done.
11921			 */
11922			if (tcp->tcp_listener == NULL &&
11923			    !TCP_IS_DETACHED(tcp) && !tcp->tcp_hard_binding)
11924				flags |= TH_ORDREL_NEEDED;
11925			switch (tcp->tcp_state) {
11926			case TCPS_SYN_RCVD:
11927			case TCPS_ESTABLISHED:
11928				tcp->tcp_state = TCPS_CLOSE_WAIT;
11929				/* Keepalive? */
11930				break;
11931			case TCPS_FIN_WAIT_1:
11932				if (!tcp->tcp_fin_acked) {
11933					tcp->tcp_state = TCPS_CLOSING;
11934					break;
11935				}
11936				/* FALLTHRU */
11937			case TCPS_FIN_WAIT_2:
11938				tcp->tcp_state = TCPS_TIME_WAIT;
11939				/*
11940				 * Unconditionally clear the exclusive binding
11941				 * bit so this TIME-WAIT connection won't
11942				 * interfere with new ones.
11943				 */
11944				connp->conn_exclbind = 0;
11945				if (!TCP_IS_DETACHED(tcp)) {
11946					TCP_TIMER_RESTART(tcp,
11947					    tcps->tcps_time_wait_interval);
11948				} else {
11949					tcp_time_wait_append(tcp);
11950					TCP_DBGSTAT(tcps, tcp_rput_time_wait);
11951				}
11952				if (seg_len) {
11953					/*
11954					 * implies data piggybacked on FIN.
11955					 * break to handle data.
11956					 */
11957					break;
11958				}
11959				freemsg(mp);
11960				goto ack_check;
11961			}
11962		}
11963	}
11964	if (mp == NULL)
11965		goto xmit_check;
11966	if (seg_len == 0) {
11967		freemsg(mp);
11968		goto xmit_check;
11969	}
11970	if (mp->b_rptr == mp->b_wptr) {
11971		/*
11972		 * The header has been consumed, so we remove the
11973		 * zero-length mblk here.
11974		 */
11975		mp1 = mp;
11976		mp = mp->b_cont;
11977		freeb(mp1);
11978	}
11979update_ack:
11980	tcpha = tcp->tcp_tcpha;
11981	tcp->tcp_rack_cnt++;
11982	{
11983		uint32_t cur_max;
11984
11985		cur_max = tcp->tcp_rack_cur_max;
11986		if (tcp->tcp_rack_cnt >= cur_max) {
11987			/*
11988			 * We have more unacked data than we should - send
11989			 * an ACK now.
11990			 */
11991			flags |= TH_ACK_NEEDED;
11992			cur_max++;
11993			if (cur_max > tcp->tcp_rack_abs_max)
11994				tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max;
11995			else
11996				tcp->tcp_rack_cur_max = cur_max;
11997		} else if (TCP_IS_DETACHED(tcp)) {
11998			/* We don't have an ACK timer for detached TCP. */
11999			flags |= TH_ACK_NEEDED;
12000		} else if (seg_len < mss) {
12001			/*
12002			 * If we get a segment that is less than an mss, and we
12003			 * already have unacknowledged data, and the amount
12004			 * unacknowledged is not a multiple of mss, then we
12005			 * better generate an ACK now.  Otherwise, this may be
12006			 * the tail piece of a transaction, and we would rather
12007			 * wait for the response.
12008			 */
12009			uint32_t udif;
12010			ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <=
12011			    (uintptr_t)INT_MAX);
12012			udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack);
12013			if (udif && (udif % mss))
12014				flags |= TH_ACK_NEEDED;
12015			else
12016				flags |= TH_ACK_TIMER_NEEDED;
12017		} else {
12018			/* Start delayed ack timer */
12019			flags |= TH_ACK_TIMER_NEEDED;
12020		}
12021	}
12022	tcp->tcp_rnxt += seg_len;
12023	tcpha->tha_ack = htonl(tcp->tcp_rnxt);
12024
12025	if (mp == NULL)
12026		goto xmit_check;
12027
12028	/* Update SACK list */
12029	if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
12030		tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt,
12031		    &(tcp->tcp_num_sack_blk));
12032	}
12033
12034	if (tcp->tcp_urp_mp) {
12035		tcp->tcp_urp_mp->b_cont = mp;
12036		mp = tcp->tcp_urp_mp;
12037		tcp->tcp_urp_mp = NULL;
12038		/* Ready for a new signal. */
12039		tcp->tcp_urp_last_valid = B_FALSE;
12040#ifdef DEBUG
12041		(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
12042		    "tcp_rput: sending exdata_ind %s",
12043		    tcp_display(tcp, NULL, DISP_PORT_ONLY));
12044#endif /* DEBUG */
12045	}
12046
12047	/*
12048	 * Check for ancillary data changes compared to last segment.
12049	 */
12050	if (connp->conn_recv_ancillary.crb_all != 0) {
12051		mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira);
12052		if (mp == NULL)
12053			return;
12054	}
12055
12056	if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) {
12057		/*
12058		 * Side queue inbound data until the accept happens.
12059		 * tcp_accept/tcp_rput drains this when the accept happens.
12060		 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or
12061		 * T_EXDATA_IND) it is queued on b_next.
12062		 * XXX Make urgent data use this. Requires:
12063		 *	Removing tcp_listener check for TH_URG
12064		 *	Making M_PCPROTO and MARK messages skip the eager case
12065		 */
12066
12067		if (tcp->tcp_kssl_pending) {
12068			DTRACE_PROBE1(kssl_mblk__ksslinput_pending,
12069			    mblk_t *, mp);
12070			tcp_kssl_input(tcp, mp, ira->ira_cred);
12071		} else {
12072			tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred);
12073		}
12074	} else if (IPCL_IS_NONSTR(connp)) {
12075		/*
12076		 * Non-STREAMS socket
12077		 *
12078		 * Note that no KSSL processing is done here, because
12079		 * KSSL is not supported for non-STREAMS sockets.
12080		 */
12081		boolean_t push = flags & (TH_PUSH|TH_FIN);
12082		int error;
12083
12084		if ((*connp->conn_upcalls->su_recv)(
12085		    connp->conn_upper_handle,
12086		    mp, seg_len, 0, &error, &push) <= 0) {
12087			/*
12088			 * We should never be in middle of a
12089			 * fallback, the squeue guarantees that.
12090			 */
12091			ASSERT(error != EOPNOTSUPP);
12092			if (error == ENOSPC)
12093				tcp->tcp_rwnd -= seg_len;
12094		} else if (push) {
12095			/* PUSH bit set and sockfs is not flow controlled */
12096			flags |= tcp_rwnd_reopen(tcp);
12097		}
12098	} else {
12099		/* STREAMS socket */
12100		if (mp->b_datap->db_type != M_DATA ||
12101		    (flags & TH_MARKNEXT_NEEDED)) {
12102			if (tcp->tcp_rcv_list != NULL) {
12103				flags |= tcp_rcv_drain(tcp);
12104			}
12105			ASSERT(tcp->tcp_rcv_list == NULL ||
12106			    tcp->tcp_fused_sigurg);
12107
12108			if (flags & TH_MARKNEXT_NEEDED) {
12109#ifdef DEBUG
12110				(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
12111				    "tcp_rput: sending MSGMARKNEXT %s",
12112				    tcp_display(tcp, NULL,
12113				    DISP_PORT_ONLY));
12114#endif /* DEBUG */
12115				mp->b_flag |= MSGMARKNEXT;
12116				flags &= ~TH_MARKNEXT_NEEDED;
12117			}
12118
12119			/* Does this need SSL processing first? */
12120			if ((tcp->tcp_kssl_ctx != NULL) &&
12121			    (DB_TYPE(mp) == M_DATA)) {
12122				DTRACE_PROBE1(kssl_mblk__ksslinput_data1,
12123				    mblk_t *, mp);
12124				tcp_kssl_input(tcp, mp, ira->ira_cred);
12125			} else {
12126				if (is_system_labeled())
12127					tcp_setcred_data(mp, ira);
12128
12129				putnext(connp->conn_rq, mp);
12130				if (!canputnext(connp->conn_rq))
12131					tcp->tcp_rwnd -= seg_len;
12132			}
12133		} else if ((tcp->tcp_kssl_ctx != NULL) &&
12134		    (DB_TYPE(mp) == M_DATA)) {
12135			/* Does this need SSL processing first? */
12136			DTRACE_PROBE1(kssl_mblk__ksslinput_data2, mblk_t *, mp);
12137			tcp_kssl_input(tcp, mp, ira->ira_cred);
12138		} else if ((flags & (TH_PUSH|TH_FIN)) ||
12139		    tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) {
12140			if (tcp->tcp_rcv_list != NULL) {
12141				/*
12142				 * Enqueue the new segment first and then
12143				 * call tcp_rcv_drain() to send all data
12144				 * up.  The other way to do this is to
12145				 * send all queued data up and then call
12146				 * putnext() to send the new segment up.
12147				 * This way can remove the else part later
12148				 * on.
12149				 *
12150				 * We don't do this to avoid one more call to
12151				 * canputnext() as tcp_rcv_drain() needs to
12152				 * call canputnext().
12153				 */
12154				tcp_rcv_enqueue(tcp, mp, seg_len,
12155				    ira->ira_cred);
12156				flags |= tcp_rcv_drain(tcp);
12157			} else {
12158				if (is_system_labeled())
12159					tcp_setcred_data(mp, ira);
12160
12161				putnext(connp->conn_rq, mp);
12162				if (!canputnext(connp->conn_rq))
12163					tcp->tcp_rwnd -= seg_len;
12164			}
12165		} else {
12166			/*
12167			 * Enqueue all packets when processing an mblk
12168			 * from the co queue and also enqueue normal packets.
12169			 */
12170			tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred);
12171		}
12172		/*
12173		 * Make sure the timer is running if we have data waiting
12174		 * for a push bit. This provides resiliency against
12175		 * implementations that do not correctly generate push bits.
12176		 */
12177		if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) {
12178			/*
12179			 * The connection may be closed at this point, so don't
12180			 * do anything for a detached tcp.
12181			 */
12182			if (!TCP_IS_DETACHED(tcp))
12183				tcp->tcp_push_tid = TCP_TIMER(tcp,
12184				    tcp_push_timer,
12185				    MSEC_TO_TICK(
12186				    tcps->tcps_push_timer_interval));
12187		}
12188	}
12189
12190xmit_check:
12191	/* Is there anything left to do? */
12192	ASSERT(!(flags & TH_MARKNEXT_NEEDED));
12193	if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED|
12194	    TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED|
12195	    TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0)
12196		goto done;
12197
12198	/* Any transmit work to do and a non-zero window? */
12199	if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT|
12200	    TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) {
12201		if (flags & TH_REXMIT_NEEDED) {
12202			uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna;
12203
12204			BUMP_MIB(&tcps->tcps_mib, tcpOutFastRetrans);
12205			if (snd_size > mss)
12206				snd_size = mss;
12207			if (snd_size > tcp->tcp_swnd)
12208				snd_size = tcp->tcp_swnd;
12209			mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size,
12210			    NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size,
12211			    B_TRUE);
12212
12213			if (mp1 != NULL) {
12214				tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt;
12215				tcp->tcp_csuna = tcp->tcp_snxt;
12216				BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
12217				UPDATE_MIB(&tcps->tcps_mib,
12218				    tcpRetransBytes, snd_size);
12219				tcp_send_data(tcp, mp1);
12220			}
12221		}
12222		if (flags & TH_NEED_SACK_REXMIT) {
12223			tcp_sack_rxmit(tcp, &flags);
12224		}
12225		/*
12226		 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send
12227		 * out new segment.  Note that tcp_rexmit should not be
12228		 * set, otherwise TH_LIMIT_XMIT should not be set.
12229		 */
12230		if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) {
12231			if (!tcp->tcp_rexmit) {
12232				tcp_wput_data(tcp, NULL, B_FALSE);
12233			} else {
12234				tcp_ss_rexmit(tcp);
12235			}
12236		}
12237		/*
12238		 * Adjust tcp_cwnd back to normal value after sending
12239		 * new data segments.
12240		 */
12241		if (flags & TH_LIMIT_XMIT) {
12242			tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1);
12243			/*
12244			 * This will restart the timer.  Restarting the
12245			 * timer is used to avoid a timeout before the
12246			 * limited transmitted segment's ACK gets back.
12247			 */
12248			if (tcp->tcp_xmit_head != NULL)
12249				tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt;
12250		}
12251
12252		/* Anything more to do? */
12253		if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED|
12254		    TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0)
12255			goto done;
12256	}
12257ack_check:
12258	if (flags & TH_SEND_URP_MARK) {
12259		ASSERT(tcp->tcp_urp_mark_mp);
12260		ASSERT(!IPCL_IS_NONSTR(connp));
12261		/*
12262		 * Send up any queued data and then send the mark message
12263		 */
12264		if (tcp->tcp_rcv_list != NULL) {
12265			flags |= tcp_rcv_drain(tcp);
12266
12267		}
12268		ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg);
12269		mp1 = tcp->tcp_urp_mark_mp;
12270		tcp->tcp_urp_mark_mp = NULL;
12271		if (is_system_labeled())
12272			tcp_setcred_data(mp1, ira);
12273
12274		putnext(connp->conn_rq, mp1);
12275#ifdef DEBUG
12276		(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
12277		    "tcp_rput: sending zero-length %s %s",
12278		    ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" :
12279		    "MSGNOTMARKNEXT"),
12280		    tcp_display(tcp, NULL, DISP_PORT_ONLY));
12281#endif /* DEBUG */
12282		flags &= ~TH_SEND_URP_MARK;
12283	}
12284	if (flags & TH_ACK_NEEDED) {
12285		/*
12286		 * Time to send an ack for some reason.
12287		 */
12288		mp1 = tcp_ack_mp(tcp);
12289
12290		if (mp1 != NULL) {
12291			tcp_send_data(tcp, mp1);
12292			BUMP_LOCAL(tcp->tcp_obsegs);
12293			BUMP_MIB(&tcps->tcps_mib, tcpOutAck);
12294		}
12295		if (tcp->tcp_ack_tid != 0) {
12296			(void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid);
12297			tcp->tcp_ack_tid = 0;
12298		}
12299	}
12300	if (flags & TH_ACK_TIMER_NEEDED) {
12301		/*
12302		 * Arrange for deferred ACK or push wait timeout.
12303		 * Start timer if it is not already running.
12304		 */
12305		if (tcp->tcp_ack_tid == 0) {
12306			tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer,
12307			    MSEC_TO_TICK(tcp->tcp_localnet ?
12308			    (clock_t)tcps->tcps_local_dack_interval :
12309			    (clock_t)tcps->tcps_deferred_ack_interval));
12310		}
12311	}
12312	if (flags & TH_ORDREL_NEEDED) {
12313		/*
12314		 * Send up the ordrel_ind unless we are an eager guy.
12315		 * In the eager case tcp_rsrv will do this when run
12316		 * after tcp_accept is done.
12317		 */
12318		ASSERT(tcp->tcp_listener == NULL);
12319		ASSERT(!tcp->tcp_detached);
12320
12321		if (IPCL_IS_NONSTR(connp)) {
12322			ASSERT(tcp->tcp_ordrel_mp == NULL);
12323			tcp->tcp_ordrel_done = B_TRUE;
12324			(*connp->conn_upcalls->su_opctl)
12325			    (connp->conn_upper_handle, SOCK_OPCTL_SHUT_RECV, 0);
12326			goto done;
12327		}
12328
12329		if (tcp->tcp_rcv_list != NULL) {
12330			/*
12331			 * Push any mblk(s) enqueued from co processing.
12332			 */
12333			flags |= tcp_rcv_drain(tcp);
12334		}
12335		ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg);
12336
12337		mp1 = tcp->tcp_ordrel_mp;
12338		tcp->tcp_ordrel_mp = NULL;
12339		tcp->tcp_ordrel_done = B_TRUE;
12340		putnext(connp->conn_rq, mp1);
12341	}
12342done:
12343	ASSERT(!(flags & TH_MARKNEXT_NEEDED));
12344}
12345
12346/*
12347 * This routine adjusts next-to-send sequence number variables, in the
12348 * case where the reciever has shrunk it's window.
12349 */
12350static void
12351tcp_update_xmit_tail(tcp_t *tcp, uint32_t snxt)
12352{
12353	mblk_t *xmit_tail;
12354	int32_t offset;
12355
12356	tcp->tcp_snxt = snxt;
12357
12358	/* Get the mblk, and the offset in it, as per the shrunk window */
12359	xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset);
12360	ASSERT(xmit_tail != NULL);
12361	tcp->tcp_xmit_tail = xmit_tail;
12362	tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr -
12363	    xmit_tail->b_rptr - offset;
12364}
12365
12366/*
12367 * This function does PAWS protection check. Returns B_TRUE if the
12368 * segment passes the PAWS test, else returns B_FALSE.
12369 */
12370boolean_t
12371tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp)
12372{
12373	uint8_t	flags;
12374	int	options;
12375	uint8_t *up;
12376	conn_t	*connp = tcp->tcp_connp;
12377
12378	flags = (unsigned int)tcpha->tha_flags & 0xFF;
12379	/*
12380	 * If timestamp option is aligned nicely, get values inline,
12381	 * otherwise call general routine to parse.  Only do that
12382	 * if timestamp is the only option.
12383	 */
12384	if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH +
12385	    TCPOPT_REAL_TS_LEN &&
12386	    OK_32PTR((up = ((uint8_t *)tcpha) +
12387	    TCP_MIN_HEADER_LENGTH)) &&
12388	    *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) {
12389		tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4));
12390		tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8));
12391
12392		options = TCP_OPT_TSTAMP_PRESENT;
12393	} else {
12394		if (tcp->tcp_snd_sack_ok) {
12395			tcpoptp->tcp = tcp;
12396		} else {
12397			tcpoptp->tcp = NULL;
12398		}
12399		options = tcp_parse_options(tcpha, tcpoptp);
12400	}
12401
12402	if (options & TCP_OPT_TSTAMP_PRESENT) {
12403		/*
12404		 * Do PAWS per RFC 1323 section 4.2.  Accept RST
12405		 * regardless of the timestamp, page 18 RFC 1323.bis.
12406		 */
12407		if ((flags & TH_RST) == 0 &&
12408		    TSTMP_LT(tcpoptp->tcp_opt_ts_val,
12409		    tcp->tcp_ts_recent)) {
12410			if (TSTMP_LT(lbolt64, tcp->tcp_last_rcv_lbolt +
12411			    PAWS_TIMEOUT)) {
12412				/* This segment is not acceptable. */
12413				return (B_FALSE);
12414			} else {
12415				/*
12416				 * Connection has been idle for
12417				 * too long.  Reset the timestamp
12418				 * and assume the segment is valid.
12419				 */
12420				tcp->tcp_ts_recent =
12421				    tcpoptp->tcp_opt_ts_val;
12422			}
12423		}
12424	} else {
12425		/*
12426		 * If we don't get a timestamp on every packet, we
12427		 * figure we can't really trust 'em, so we stop sending
12428		 * and parsing them.
12429		 */
12430		tcp->tcp_snd_ts_ok = B_FALSE;
12431
12432		connp->conn_ht_iphc_len -= TCPOPT_REAL_TS_LEN;
12433		connp->conn_ht_ulp_len -= TCPOPT_REAL_TS_LEN;
12434		tcp->tcp_tcpha->tha_offset_and_reserved -= (3 << 4);
12435		/*
12436		 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid
12437		 * doing a slow start here so as to not to lose on the
12438		 * transfer rate built up so far.
12439		 */
12440		tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN);
12441		if (tcp->tcp_snd_sack_ok) {
12442			ASSERT(tcp->tcp_sack_info != NULL);
12443			tcp->tcp_max_sack_blk = 4;
12444		}
12445	}
12446	return (B_TRUE);
12447}
12448
12449/*
12450 * Attach ancillary data to a received TCP segments for the
12451 * ancillary pieces requested by the application that are
12452 * different than they were in the previous data segment.
12453 *
12454 * Save the "current" values once memory allocation is ok so that
12455 * when memory allocation fails we can just wait for the next data segment.
12456 */
12457static mblk_t *
12458tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp,
12459    ip_recv_attr_t *ira)
12460{
12461	struct T_optdata_ind *todi;
12462	int optlen;
12463	uchar_t *optptr;
12464	struct T_opthdr *toh;
12465	crb_t addflag;	/* Which pieces to add */
12466	mblk_t *mp1;
12467	conn_t	*connp = tcp->tcp_connp;
12468
12469	optlen = 0;
12470	addflag.crb_all = 0;
12471	/* If app asked for pktinfo and the index has changed ... */
12472	if (connp->conn_recv_ancillary.crb_ip_recvpktinfo &&
12473	    ira->ira_ruifindex != tcp->tcp_recvifindex) {
12474		optlen += sizeof (struct T_opthdr) +
12475		    sizeof (struct in6_pktinfo);
12476		addflag.crb_ip_recvpktinfo = 1;
12477	}
12478	/* If app asked for hoplimit and it has changed ... */
12479	if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit &&
12480	    ipp->ipp_hoplimit != tcp->tcp_recvhops) {
12481		optlen += sizeof (struct T_opthdr) + sizeof (uint_t);
12482		addflag.crb_ipv6_recvhoplimit = 1;
12483	}
12484	/* If app asked for tclass and it has changed ... */
12485	if (connp->conn_recv_ancillary.crb_ipv6_recvtclass &&
12486	    ipp->ipp_tclass != tcp->tcp_recvtclass) {
12487		optlen += sizeof (struct T_opthdr) + sizeof (uint_t);
12488		addflag.crb_ipv6_recvtclass = 1;
12489	}
12490	/*
12491	 * If app asked for hopbyhop headers and it has changed ...
12492	 * For security labels, note that (1) security labels can't change on
12493	 * a connected socket at all, (2) we're connected to at most one peer,
12494	 * (3) if anything changes, then it must be some other extra option.
12495	 */
12496	if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts &&
12497	    ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen,
12498	    (ipp->ipp_fields & IPPF_HOPOPTS),
12499	    ipp->ipp_hopopts, ipp->ipp_hopoptslen)) {
12500		optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen;
12501		addflag.crb_ipv6_recvhopopts = 1;
12502		if (!ip_allocbuf((void **)&tcp->tcp_hopopts,
12503		    &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS),
12504		    ipp->ipp_hopopts, ipp->ipp_hopoptslen))
12505			return (mp);
12506	}
12507	/* If app asked for dst headers before routing headers ... */
12508	if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts &&
12509	    ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen,
12510	    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
12511	    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) {
12512		optlen += sizeof (struct T_opthdr) +
12513		    ipp->ipp_rthdrdstoptslen;
12514		addflag.crb_ipv6_recvrthdrdstopts = 1;
12515		if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts,
12516		    &tcp->tcp_rthdrdstoptslen,
12517		    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
12518		    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen))
12519			return (mp);
12520	}
12521	/* If app asked for routing headers and it has changed ... */
12522	if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr &&
12523	    ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen,
12524	    (ipp->ipp_fields & IPPF_RTHDR),
12525	    ipp->ipp_rthdr, ipp->ipp_rthdrlen)) {
12526		optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen;
12527		addflag.crb_ipv6_recvrthdr = 1;
12528		if (!ip_allocbuf((void **)&tcp->tcp_rthdr,
12529		    &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR),
12530		    ipp->ipp_rthdr, ipp->ipp_rthdrlen))
12531			return (mp);
12532	}
12533	/* If app asked for dest headers and it has changed ... */
12534	if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts ||
12535	    connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) &&
12536	    ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen,
12537	    (ipp->ipp_fields & IPPF_DSTOPTS),
12538	    ipp->ipp_dstopts, ipp->ipp_dstoptslen)) {
12539		optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen;
12540		addflag.crb_ipv6_recvdstopts = 1;
12541		if (!ip_allocbuf((void **)&tcp->tcp_dstopts,
12542		    &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS),
12543		    ipp->ipp_dstopts, ipp->ipp_dstoptslen))
12544			return (mp);
12545	}
12546
12547	if (optlen == 0) {
12548		/* Nothing to add */
12549		return (mp);
12550	}
12551	mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED);
12552	if (mp1 == NULL) {
12553		/*
12554		 * Defer sending ancillary data until the next TCP segment
12555		 * arrives.
12556		 */
12557		return (mp);
12558	}
12559	mp1->b_cont = mp;
12560	mp = mp1;
12561	mp->b_wptr += sizeof (*todi) + optlen;
12562	mp->b_datap->db_type = M_PROTO;
12563	todi = (struct T_optdata_ind *)mp->b_rptr;
12564	todi->PRIM_type = T_OPTDATA_IND;
12565	todi->DATA_flag = 1;	/* MORE data */
12566	todi->OPT_length = optlen;
12567	todi->OPT_offset = sizeof (*todi);
12568	optptr = (uchar_t *)&todi[1];
12569	/*
12570	 * If app asked for pktinfo and the index has changed ...
12571	 * Note that the local address never changes for the connection.
12572	 */
12573	if (addflag.crb_ip_recvpktinfo) {
12574		struct in6_pktinfo *pkti;
12575		uint_t ifindex;
12576
12577		ifindex = ira->ira_ruifindex;
12578		toh = (struct T_opthdr *)optptr;
12579		toh->level = IPPROTO_IPV6;
12580		toh->name = IPV6_PKTINFO;
12581		toh->len = sizeof (*toh) + sizeof (*pkti);
12582		toh->status = 0;
12583		optptr += sizeof (*toh);
12584		pkti = (struct in6_pktinfo *)optptr;
12585		pkti->ipi6_addr = connp->conn_laddr_v6;
12586		pkti->ipi6_ifindex = ifindex;
12587		optptr += sizeof (*pkti);
12588		ASSERT(OK_32PTR(optptr));
12589		/* Save as "last" value */
12590		tcp->tcp_recvifindex = ifindex;
12591	}
12592	/* If app asked for hoplimit and it has changed ... */
12593	if (addflag.crb_ipv6_recvhoplimit) {
12594		toh = (struct T_opthdr *)optptr;
12595		toh->level = IPPROTO_IPV6;
12596		toh->name = IPV6_HOPLIMIT;
12597		toh->len = sizeof (*toh) + sizeof (uint_t);
12598		toh->status = 0;
12599		optptr += sizeof (*toh);
12600		*(uint_t *)optptr = ipp->ipp_hoplimit;
12601		optptr += sizeof (uint_t);
12602		ASSERT(OK_32PTR(optptr));
12603		/* Save as "last" value */
12604		tcp->tcp_recvhops = ipp->ipp_hoplimit;
12605	}
12606	/* If app asked for tclass and it has changed ... */
12607	if (addflag.crb_ipv6_recvtclass) {
12608		toh = (struct T_opthdr *)optptr;
12609		toh->level = IPPROTO_IPV6;
12610		toh->name = IPV6_TCLASS;
12611		toh->len = sizeof (*toh) + sizeof (uint_t);
12612		toh->status = 0;
12613		optptr += sizeof (*toh);
12614		*(uint_t *)optptr = ipp->ipp_tclass;
12615		optptr += sizeof (uint_t);
12616		ASSERT(OK_32PTR(optptr));
12617		/* Save as "last" value */
12618		tcp->tcp_recvtclass = ipp->ipp_tclass;
12619	}
12620	if (addflag.crb_ipv6_recvhopopts) {
12621		toh = (struct T_opthdr *)optptr;
12622		toh->level = IPPROTO_IPV6;
12623		toh->name = IPV6_HOPOPTS;
12624		toh->len = sizeof (*toh) + ipp->ipp_hopoptslen;
12625		toh->status = 0;
12626		optptr += sizeof (*toh);
12627		bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen);
12628		optptr += ipp->ipp_hopoptslen;
12629		ASSERT(OK_32PTR(optptr));
12630		/* Save as last value */
12631		ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen,
12632		    (ipp->ipp_fields & IPPF_HOPOPTS),
12633		    ipp->ipp_hopopts, ipp->ipp_hopoptslen);
12634	}
12635	if (addflag.crb_ipv6_recvrthdrdstopts) {
12636		toh = (struct T_opthdr *)optptr;
12637		toh->level = IPPROTO_IPV6;
12638		toh->name = IPV6_RTHDRDSTOPTS;
12639		toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen;
12640		toh->status = 0;
12641		optptr += sizeof (*toh);
12642		bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen);
12643		optptr += ipp->ipp_rthdrdstoptslen;
12644		ASSERT(OK_32PTR(optptr));
12645		/* Save as last value */
12646		ip_savebuf((void **)&tcp->tcp_rthdrdstopts,
12647		    &tcp->tcp_rthdrdstoptslen,
12648		    (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
12649		    ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
12650	}
12651	if (addflag.crb_ipv6_recvrthdr) {
12652		toh = (struct T_opthdr *)optptr;
12653		toh->level = IPPROTO_IPV6;
12654		toh->name = IPV6_RTHDR;
12655		toh->len = sizeof (*toh) + ipp->ipp_rthdrlen;
12656		toh->status = 0;
12657		optptr += sizeof (*toh);
12658		bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen);
12659		optptr += ipp->ipp_rthdrlen;
12660		ASSERT(OK_32PTR(optptr));
12661		/* Save as last value */
12662		ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen,
12663		    (ipp->ipp_fields & IPPF_RTHDR),
12664		    ipp->ipp_rthdr, ipp->ipp_rthdrlen);
12665	}
12666	if (addflag.crb_ipv6_recvdstopts) {
12667		toh = (struct T_opthdr *)optptr;
12668		toh->level = IPPROTO_IPV6;
12669		toh->name = IPV6_DSTOPTS;
12670		toh->len = sizeof (*toh) + ipp->ipp_dstoptslen;
12671		toh->status = 0;
12672		optptr += sizeof (*toh);
12673		bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen);
12674		optptr += ipp->ipp_dstoptslen;
12675		ASSERT(OK_32PTR(optptr));
12676		/* Save as last value */
12677		ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen,
12678		    (ipp->ipp_fields & IPPF_DSTOPTS),
12679		    ipp->ipp_dstopts, ipp->ipp_dstoptslen);
12680	}
12681	ASSERT(optptr == mp->b_wptr);
12682	return (mp);
12683}
12684
12685/* ARGSUSED */
12686static void
12687tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
12688{
12689	conn_t	*connp = (conn_t *)arg;
12690	tcp_t	*tcp = connp->conn_tcp;
12691	queue_t	*q = connp->conn_rq;
12692	tcp_stack_t	*tcps = tcp->tcp_tcps;
12693
12694	ASSERT(!IPCL_IS_NONSTR(connp));
12695	mutex_enter(&tcp->tcp_rsrv_mp_lock);
12696	tcp->tcp_rsrv_mp = mp;
12697	mutex_exit(&tcp->tcp_rsrv_mp_lock);
12698
12699	TCP_STAT(tcps, tcp_rsrv_calls);
12700
12701	if (TCP_IS_DETACHED(tcp) || q == NULL) {
12702		return;
12703	}
12704
12705	if (tcp->tcp_fused) {
12706		tcp_fuse_backenable(tcp);
12707		return;
12708	}
12709
12710	if (canputnext(q)) {
12711		/* Not flow-controlled, open rwnd */
12712		tcp->tcp_rwnd = connp->conn_rcvbuf;
12713
12714		/*
12715		 * Send back a window update immediately if TCP is above
12716		 * ESTABLISHED state and the increase of the rcv window
12717		 * that the other side knows is at least 1 MSS after flow
12718		 * control is lifted.
12719		 */
12720		if (tcp->tcp_state >= TCPS_ESTABLISHED &&
12721		    tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) {
12722			tcp_xmit_ctl(NULL, tcp,
12723			    (tcp->tcp_swnd == 0) ? tcp->tcp_suna :
12724			    tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK);
12725		}
12726	}
12727}
12728
12729/*
12730 * The read side service routine is called mostly when we get back-enabled as a
12731 * result of flow control relief.  Since we don't actually queue anything in
12732 * TCP, we have no data to send out of here.  What we do is clear the receive
12733 * window, and send out a window update.
12734 */
12735static void
12736tcp_rsrv(queue_t *q)
12737{
12738	conn_t		*connp = Q_TO_CONN(q);
12739	tcp_t		*tcp = connp->conn_tcp;
12740	mblk_t		*mp;
12741
12742	/* No code does a putq on the read side */
12743	ASSERT(q->q_first == NULL);
12744
12745	/*
12746	 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already
12747	 * been run.  So just return.
12748	 */
12749	mutex_enter(&tcp->tcp_rsrv_mp_lock);
12750	if ((mp = tcp->tcp_rsrv_mp) == NULL) {
12751		mutex_exit(&tcp->tcp_rsrv_mp_lock);
12752		return;
12753	}
12754	tcp->tcp_rsrv_mp = NULL;
12755	mutex_exit(&tcp->tcp_rsrv_mp_lock);
12756
12757	CONN_INC_REF(connp);
12758	SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp,
12759	    NULL, SQ_PROCESS, SQTAG_TCP_RSRV);
12760}
12761
12762/*
12763 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
12764 * We do not allow the receive window to shrink.  After setting rwnd,
12765 * set the flow control hiwat of the stream.
12766 *
12767 * This function is called in 2 cases:
12768 *
12769 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
12770 *    connection (passive open) and in tcp_input_data() for active connect.
12771 *    This is called after tcp_mss_set() when the desired MSS value is known.
12772 *    This makes sure that our window size is a mutiple of the other side's
12773 *    MSS.
12774 * 2) Handling SO_RCVBUF option.
12775 *
12776 * It is ASSUMED that the requested size is a multiple of the current MSS.
12777 *
12778 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
12779 * user requests so.
12780 */
12781int
12782tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd)
12783{
12784	uint32_t	mss = tcp->tcp_mss;
12785	uint32_t	old_max_rwnd;
12786	uint32_t	max_transmittable_rwnd;
12787	boolean_t	tcp_detached = TCP_IS_DETACHED(tcp);
12788	tcp_stack_t	*tcps = tcp->tcp_tcps;
12789	conn_t		*connp = tcp->tcp_connp;
12790
12791	/*
12792	 * Insist on a receive window that is at least
12793	 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
12794	 * funny TCP interactions of Nagle algorithm, SWS avoidance
12795	 * and delayed acknowledgement.
12796	 */
12797	rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss);
12798
12799	if (tcp->tcp_fused) {
12800		size_t sth_hiwat;
12801		tcp_t *peer_tcp = tcp->tcp_loopback_peer;
12802
12803		ASSERT(peer_tcp != NULL);
12804		sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd);
12805		if (!tcp_detached) {
12806			(void) proto_set_rx_hiwat(connp->conn_rq, connp,
12807			    sth_hiwat);
12808			tcp_set_recv_threshold(tcp, sth_hiwat >> 3);
12809		}
12810
12811		/*
12812		 * In the fusion case, the maxpsz stream head value of
12813		 * our peer is set according to its send buffer size
12814		 * and our receive buffer size; since the latter may
12815		 * have changed we need to update the peer's maxpsz.
12816		 */
12817		(void) tcp_maxpsz_set(peer_tcp, B_TRUE);
12818		return (sth_hiwat);
12819	}
12820
12821	if (tcp_detached)
12822		old_max_rwnd = tcp->tcp_rwnd;
12823	else
12824		old_max_rwnd = connp->conn_rcvbuf;
12825
12826
12827	/*
12828	 * If window size info has already been exchanged, TCP should not
12829	 * shrink the window.  Shrinking window is doable if done carefully.
12830	 * We may add that support later.  But so far there is not a real
12831	 * need to do that.
12832	 */
12833	if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) {
12834		/* MSS may have changed, do a round up again. */
12835		rwnd = MSS_ROUNDUP(old_max_rwnd, mss);
12836	}
12837
12838	/*
12839	 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
12840	 * can be applied even before the window scale option is decided.
12841	 */
12842	max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws;
12843	if (rwnd > max_transmittable_rwnd) {
12844		rwnd = max_transmittable_rwnd -
12845		    (max_transmittable_rwnd % mss);
12846		if (rwnd < mss)
12847			rwnd = max_transmittable_rwnd;
12848		/*
12849		 * If we're over the limit we may have to back down tcp_rwnd.
12850		 * The increment below won't work for us. So we set all three
12851		 * here and the increment below will have no effect.
12852		 */
12853		tcp->tcp_rwnd = old_max_rwnd = rwnd;
12854	}
12855	if (tcp->tcp_localnet) {
12856		tcp->tcp_rack_abs_max =
12857		    MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2);
12858	} else {
12859		/*
12860		 * For a remote host on a different subnet (through a router),
12861		 * we ack every other packet to be conforming to RFC1122.
12862		 * tcp_deferred_acks_max is default to 2.
12863		 */
12864		tcp->tcp_rack_abs_max =
12865		    MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2);
12866	}
12867	if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max)
12868		tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max;
12869	else
12870		tcp->tcp_rack_cur_max = 0;
12871	/*
12872	 * Increment the current rwnd by the amount the maximum grew (we
12873	 * can not overwrite it since we might be in the middle of a
12874	 * connection.)
12875	 */
12876	tcp->tcp_rwnd += rwnd - old_max_rwnd;
12877	connp->conn_rcvbuf = rwnd;
12878
12879	/* Are we already connected? */
12880	if (tcp->tcp_tcpha != NULL) {
12881		tcp->tcp_tcpha->tha_win =
12882		    htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
12883	}
12884
12885	if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max)
12886		tcp->tcp_cwnd_max = rwnd;
12887
12888	if (tcp_detached)
12889		return (rwnd);
12890
12891	tcp_set_recv_threshold(tcp, rwnd >> 3);
12892
12893	(void) proto_set_rx_hiwat(connp->conn_rq, connp, rwnd);
12894	return (rwnd);
12895}
12896
12897/*
12898 * Return SNMP stuff in buffer in mpdata.
12899 */
12900mblk_t *
12901tcp_snmp_get(queue_t *q, mblk_t *mpctl)
12902{
12903	mblk_t			*mpdata;
12904	mblk_t			*mp_conn_ctl = NULL;
12905	mblk_t			*mp_conn_tail;
12906	mblk_t			*mp_attr_ctl = NULL;
12907	mblk_t			*mp_attr_tail;
12908	mblk_t			*mp6_conn_ctl = NULL;
12909	mblk_t			*mp6_conn_tail;
12910	mblk_t			*mp6_attr_ctl = NULL;
12911	mblk_t			*mp6_attr_tail;
12912	struct opthdr		*optp;
12913	mib2_tcpConnEntry_t	tce;
12914	mib2_tcp6ConnEntry_t	tce6;
12915	mib2_transportMLPEntry_t mlp;
12916	connf_t			*connfp;
12917	int			i;
12918	boolean_t 		ispriv;
12919	zoneid_t 		zoneid;
12920	int			v4_conn_idx;
12921	int			v6_conn_idx;
12922	conn_t			*connp = Q_TO_CONN(q);
12923	tcp_stack_t		*tcps;
12924	ip_stack_t		*ipst;
12925	mblk_t			*mp2ctl;
12926
12927	/*
12928	 * make a copy of the original message
12929	 */
12930	mp2ctl = copymsg(mpctl);
12931
12932	if (mpctl == NULL ||
12933	    (mpdata = mpctl->b_cont) == NULL ||
12934	    (mp_conn_ctl = copymsg(mpctl)) == NULL ||
12935	    (mp_attr_ctl = copymsg(mpctl)) == NULL ||
12936	    (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
12937	    (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
12938		freemsg(mp_conn_ctl);
12939		freemsg(mp_attr_ctl);
12940		freemsg(mp6_conn_ctl);
12941		freemsg(mp6_attr_ctl);
12942		freemsg(mpctl);
12943		freemsg(mp2ctl);
12944		return (NULL);
12945	}
12946
12947	ipst = connp->conn_netstack->netstack_ip;
12948	tcps = connp->conn_netstack->netstack_tcp;
12949
12950	/* build table of connections -- need count in fixed part */
12951	SET_MIB(tcps->tcps_mib.tcpRtoAlgorithm, 4);   /* vanj */
12952	SET_MIB(tcps->tcps_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min);
12953	SET_MIB(tcps->tcps_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max);
12954	SET_MIB(tcps->tcps_mib.tcpMaxConn, -1);
12955	SET_MIB(tcps->tcps_mib.tcpCurrEstab, 0);
12956
12957	ispriv =
12958	    secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0;
12959	zoneid = Q_TO_CONN(q)->conn_zoneid;
12960
12961	v4_conn_idx = v6_conn_idx = 0;
12962	mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;
12963
12964	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
12965		ipst = tcps->tcps_netstack->netstack_ip;
12966
12967		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
12968
12969		connp = NULL;
12970
12971		while ((connp =
12972		    ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
12973			tcp_t *tcp;
12974			boolean_t needattr;
12975
12976			if (connp->conn_zoneid != zoneid)
12977				continue;	/* not in this zone */
12978
12979			tcp = connp->conn_tcp;
12980			UPDATE_MIB(&tcps->tcps_mib,
12981			    tcpHCInSegs, tcp->tcp_ibsegs);
12982			tcp->tcp_ibsegs = 0;
12983			UPDATE_MIB(&tcps->tcps_mib,
12984			    tcpHCOutSegs, tcp->tcp_obsegs);
12985			tcp->tcp_obsegs = 0;
12986
12987			tce6.tcp6ConnState = tce.tcpConnState =
12988			    tcp_snmp_state(tcp);
12989			if (tce.tcpConnState == MIB2_TCP_established ||
12990			    tce.tcpConnState == MIB2_TCP_closeWait)
12991				BUMP_MIB(&tcps->tcps_mib, tcpCurrEstab);
12992
12993			needattr = B_FALSE;
12994			bzero(&mlp, sizeof (mlp));
12995			if (connp->conn_mlp_type != mlptSingle) {
12996				if (connp->conn_mlp_type == mlptShared ||
12997				    connp->conn_mlp_type == mlptBoth)
12998					mlp.tme_flags |= MIB2_TMEF_SHARED;
12999				if (connp->conn_mlp_type == mlptPrivate ||
13000				    connp->conn_mlp_type == mlptBoth)
13001					mlp.tme_flags |= MIB2_TMEF_PRIVATE;
13002				needattr = B_TRUE;
13003			}
13004			if (connp->conn_anon_mlp) {
13005				mlp.tme_flags |= MIB2_TMEF_ANONMLP;
13006				needattr = B_TRUE;
13007			}
13008			switch (connp->conn_mac_mode) {
13009			case CONN_MAC_DEFAULT:
13010				break;
13011			case CONN_MAC_AWARE:
13012				mlp.tme_flags |= MIB2_TMEF_MACEXEMPT;
13013				needattr = B_TRUE;
13014				break;
13015			case CONN_MAC_IMPLICIT:
13016				mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT;
13017				needattr = B_TRUE;
13018				break;
13019			}
13020			if (connp->conn_ixa->ixa_tsl != NULL) {
13021				ts_label_t *tsl;
13022
13023				tsl = connp->conn_ixa->ixa_tsl;
13024				mlp.tme_flags |= MIB2_TMEF_IS_LABELED;
13025				mlp.tme_doi = label2doi(tsl);
13026				mlp.tme_label = *label2bslabel(tsl);
13027				needattr = B_TRUE;
13028			}
13029
13030			/* Create a message to report on IPv6 entries */
13031			if (connp->conn_ipversion == IPV6_VERSION) {
13032			tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6;
13033			tce6.tcp6ConnRemAddress = connp->conn_faddr_v6;
13034			tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport);
13035			tce6.tcp6ConnRemPort = ntohs(connp->conn_fport);
13036			if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) {
13037				tce6.tcp6ConnIfIndex =
13038				    connp->conn_ixa->ixa_scopeid;
13039			} else {
13040				tce6.tcp6ConnIfIndex = connp->conn_bound_if;
13041			}
13042			/* Don't want just anybody seeing these... */
13043			if (ispriv) {
13044				tce6.tcp6ConnEntryInfo.ce_snxt =
13045				    tcp->tcp_snxt;
13046				tce6.tcp6ConnEntryInfo.ce_suna =
13047				    tcp->tcp_suna;
13048				tce6.tcp6ConnEntryInfo.ce_rnxt =
13049				    tcp->tcp_rnxt;
13050				tce6.tcp6ConnEntryInfo.ce_rack =
13051				    tcp->tcp_rack;
13052			} else {
13053				/*
13054				 * Netstat, unfortunately, uses this to
13055				 * get send/receive queue sizes.  How to fix?
13056				 * Why not compute the difference only?
13057				 */
13058				tce6.tcp6ConnEntryInfo.ce_snxt =
13059				    tcp->tcp_snxt - tcp->tcp_suna;
13060				tce6.tcp6ConnEntryInfo.ce_suna = 0;
13061				tce6.tcp6ConnEntryInfo.ce_rnxt =
13062				    tcp->tcp_rnxt - tcp->tcp_rack;
13063				tce6.tcp6ConnEntryInfo.ce_rack = 0;
13064			}
13065
13066			tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd;
13067			tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
13068			tce6.tcp6ConnEntryInfo.ce_rto =  tcp->tcp_rto;
13069			tce6.tcp6ConnEntryInfo.ce_mss =  tcp->tcp_mss;
13070			tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state;
13071
13072			tce6.tcp6ConnCreationProcess =
13073			    (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS :
13074			    connp->conn_cpid;
13075			tce6.tcp6ConnCreationTime = connp->conn_open_time;
13076
13077			(void) snmp_append_data2(mp6_conn_ctl->b_cont,
13078			    &mp6_conn_tail, (char *)&tce6, sizeof (tce6));
13079
13080			mlp.tme_connidx = v6_conn_idx++;
13081			if (needattr)
13082				(void) snmp_append_data2(mp6_attr_ctl->b_cont,
13083				    &mp6_attr_tail, (char *)&mlp, sizeof (mlp));
13084			}
13085			/*
13086			 * Create an IPv4 table entry for IPv4 entries and also
13087			 * for IPv6 entries which are bound to in6addr_any
13088			 * but don't have IPV6_V6ONLY set.
13089			 * (i.e. anything an IPv4 peer could connect to)
13090			 */
13091			if (connp->conn_ipversion == IPV4_VERSION ||
13092			    (tcp->tcp_state <= TCPS_LISTEN &&
13093			    !connp->conn_ipv6_v6only &&
13094			    IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) {
13095				if (connp->conn_ipversion == IPV6_VERSION) {
13096					tce.tcpConnRemAddress = INADDR_ANY;
13097					tce.tcpConnLocalAddress = INADDR_ANY;
13098				} else {
13099					tce.tcpConnRemAddress =
13100					    connp->conn_faddr_v4;
13101					tce.tcpConnLocalAddress =
13102					    connp->conn_laddr_v4;
13103				}
13104				tce.tcpConnLocalPort = ntohs(connp->conn_lport);
13105				tce.tcpConnRemPort = ntohs(connp->conn_fport);
13106				/* Don't want just anybody seeing these... */
13107				if (ispriv) {
13108					tce.tcpConnEntryInfo.ce_snxt =
13109					    tcp->tcp_snxt;
13110					tce.tcpConnEntryInfo.ce_suna =
13111					    tcp->tcp_suna;
13112					tce.tcpConnEntryInfo.ce_rnxt =
13113					    tcp->tcp_rnxt;
13114					tce.tcpConnEntryInfo.ce_rack =
13115					    tcp->tcp_rack;
13116				} else {
13117					/*
13118					 * Netstat, unfortunately, uses this to
13119					 * get send/receive queue sizes.  How
13120					 * to fix?
13121					 * Why not compute the difference only?
13122					 */
13123					tce.tcpConnEntryInfo.ce_snxt =
13124					    tcp->tcp_snxt - tcp->tcp_suna;
13125					tce.tcpConnEntryInfo.ce_suna = 0;
13126					tce.tcpConnEntryInfo.ce_rnxt =
13127					    tcp->tcp_rnxt - tcp->tcp_rack;
13128					tce.tcpConnEntryInfo.ce_rack = 0;
13129				}
13130
13131				tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd;
13132				tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
13133				tce.tcpConnEntryInfo.ce_rto =  tcp->tcp_rto;
13134				tce.tcpConnEntryInfo.ce_mss =  tcp->tcp_mss;
13135				tce.tcpConnEntryInfo.ce_state =
13136				    tcp->tcp_state;
13137
13138				tce.tcpConnCreationProcess =
13139				    (connp->conn_cpid < 0) ?
13140				    MIB2_UNKNOWN_PROCESS :
13141				    connp->conn_cpid;
13142				tce.tcpConnCreationTime = connp->conn_open_time;
13143
13144				(void) snmp_append_data2(mp_conn_ctl->b_cont,
13145				    &mp_conn_tail, (char *)&tce, sizeof (tce));
13146
13147				mlp.tme_connidx = v4_conn_idx++;
13148				if (needattr)
13149					(void) snmp_append_data2(
13150					    mp_attr_ctl->b_cont,
13151					    &mp_attr_tail, (char *)&mlp,
13152					    sizeof (mlp));
13153			}
13154		}
13155	}
13156
13157	/* fixed length structure for IPv4 and IPv6 counters */
13158	SET_MIB(tcps->tcps_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t));
13159	SET_MIB(tcps->tcps_mib.tcp6ConnTableSize,
13160	    sizeof (mib2_tcp6ConnEntry_t));
13161	/* synchronize 32- and 64-bit counters */
13162	SYNC32_MIB(&tcps->tcps_mib, tcpInSegs, tcpHCInSegs);
13163	SYNC32_MIB(&tcps->tcps_mib, tcpOutSegs, tcpHCOutSegs);
13164	optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
13165	optp->level = MIB2_TCP;
13166	optp->name = 0;
13167	(void) snmp_append_data(mpdata, (char *)&tcps->tcps_mib,
13168	    sizeof (tcps->tcps_mib));
13169	optp->len = msgdsize(mpdata);
13170	qreply(q, mpctl);
13171
13172	/* table of connections... */
13173	optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
13174	    sizeof (struct T_optmgmt_ack)];
13175	optp->level = MIB2_TCP;
13176	optp->name = MIB2_TCP_CONN;
13177	optp->len = msgdsize(mp_conn_ctl->b_cont);
13178	qreply(q, mp_conn_ctl);
13179
13180	/* table of MLP attributes... */
13181	optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
13182	    sizeof (struct T_optmgmt_ack)];
13183	optp->level = MIB2_TCP;
13184	optp->name = EXPER_XPORT_MLP;
13185	optp->len = msgdsize(mp_attr_ctl->b_cont);
13186	if (optp->len == 0)
13187		freemsg(mp_attr_ctl);
13188	else
13189		qreply(q, mp_attr_ctl);
13190
13191	/* table of IPv6 connections... */
13192	optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
13193	    sizeof (struct T_optmgmt_ack)];
13194	optp->level = MIB2_TCP6;
13195	optp->name = MIB2_TCP6_CONN;
13196	optp->len = msgdsize(mp6_conn_ctl->b_cont);
13197	qreply(q, mp6_conn_ctl);
13198
13199	/* table of IPv6 MLP attributes... */
13200	optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
13201	    sizeof (struct T_optmgmt_ack)];
13202	optp->level = MIB2_TCP6;
13203	optp->name = EXPER_XPORT_MLP;
13204	optp->len = msgdsize(mp6_attr_ctl->b_cont);
13205	if (optp->len == 0)
13206		freemsg(mp6_attr_ctl);
13207	else
13208		qreply(q, mp6_attr_ctl);
13209	return (mp2ctl);
13210}
13211
13212/* Return 0 if invalid set request, 1 otherwise, including non-tcp requests  */
13213/* ARGSUSED */
13214int
13215tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
13216{
13217	mib2_tcpConnEntry_t	*tce = (mib2_tcpConnEntry_t *)ptr;
13218
13219	switch (level) {
13220	case MIB2_TCP:
13221		switch (name) {
13222		case 13:
13223			if (tce->tcpConnState != MIB2_TCP_deleteTCB)
13224				return (0);
13225			/* TODO: delete entry defined by tce */
13226			return (1);
13227		default:
13228			return (0);
13229		}
13230	default:
13231		return (1);
13232	}
13233}
13234
13235/* Translate TCP state to MIB2 TCP state. */
13236static int
13237tcp_snmp_state(tcp_t *tcp)
13238{
13239	if (tcp == NULL)
13240		return (0);
13241
13242	switch (tcp->tcp_state) {
13243	case TCPS_CLOSED:
13244	case TCPS_IDLE:	/* RFC1213 doesn't have analogue for IDLE & BOUND */
13245	case TCPS_BOUND:
13246		return (MIB2_TCP_closed);
13247	case TCPS_LISTEN:
13248		return (MIB2_TCP_listen);
13249	case TCPS_SYN_SENT:
13250		return (MIB2_TCP_synSent);
13251	case TCPS_SYN_RCVD:
13252		return (MIB2_TCP_synReceived);
13253	case TCPS_ESTABLISHED:
13254		return (MIB2_TCP_established);
13255	case TCPS_CLOSE_WAIT:
13256		return (MIB2_TCP_closeWait);
13257	case TCPS_FIN_WAIT_1:
13258		return (MIB2_TCP_finWait1);
13259	case TCPS_CLOSING:
13260		return (MIB2_TCP_closing);
13261	case TCPS_LAST_ACK:
13262		return (MIB2_TCP_lastAck);
13263	case TCPS_FIN_WAIT_2:
13264		return (MIB2_TCP_finWait2);
13265	case TCPS_TIME_WAIT:
13266		return (MIB2_TCP_timeWait);
13267	default:
13268		return (0);
13269	}
13270}
13271
13272/*
13273 * tcp_timer is the timer service routine.  It handles the retransmission,
13274 * FIN_WAIT_2 flush, and zero window probe timeout events.  It figures out
13275 * from the state of the tcp instance what kind of action needs to be done
13276 * at the time it is called.
13277 */
13278static void
13279tcp_timer(void *arg)
13280{
13281	mblk_t		*mp;
13282	clock_t		first_threshold;
13283	clock_t		second_threshold;
13284	clock_t		ms;
13285	uint32_t	mss;
13286	conn_t		*connp = (conn_t *)arg;
13287	tcp_t		*tcp = connp->conn_tcp;
13288	tcp_stack_t	*tcps = tcp->tcp_tcps;
13289
13290	tcp->tcp_timer_tid = 0;
13291
13292	if (tcp->tcp_fused)
13293		return;
13294
13295	first_threshold =  tcp->tcp_first_timer_threshold;
13296	second_threshold = tcp->tcp_second_timer_threshold;
13297	switch (tcp->tcp_state) {
13298	case TCPS_IDLE:
13299	case TCPS_BOUND:
13300	case TCPS_LISTEN:
13301		return;
13302	case TCPS_SYN_RCVD: {
13303		tcp_t	*listener = tcp->tcp_listener;
13304
13305		if (tcp->tcp_syn_rcvd_timeout == 0 && (listener != NULL)) {
13306			/* it's our first timeout */
13307			tcp->tcp_syn_rcvd_timeout = 1;
13308			mutex_enter(&listener->tcp_eager_lock);
13309			listener->tcp_syn_rcvd_timeout++;
13310			if (!tcp->tcp_dontdrop && !tcp->tcp_closemp_used) {
13311				/*
13312				 * Make this eager available for drop if we
13313				 * need to drop one to accomodate a new
13314				 * incoming SYN request.
13315				 */
13316				MAKE_DROPPABLE(listener, tcp);
13317			}
13318			if (!listener->tcp_syn_defense &&
13319			    (listener->tcp_syn_rcvd_timeout >
13320			    (tcps->tcps_conn_req_max_q0 >> 2)) &&
13321			    (tcps->tcps_conn_req_max_q0 > 200)) {
13322				/* We may be under attack. Put on a defense. */
13323				listener->tcp_syn_defense = B_TRUE;
13324				cmn_err(CE_WARN, "High TCP connect timeout "
13325				    "rate! System (port %d) may be under a "
13326				    "SYN flood attack!",
13327				    ntohs(listener->tcp_connp->conn_lport));
13328
13329				listener->tcp_ip_addr_cache = kmem_zalloc(
13330				    IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t),
13331				    KM_NOSLEEP);
13332			}
13333			mutex_exit(&listener->tcp_eager_lock);
13334		} else if (listener != NULL) {
13335			mutex_enter(&listener->tcp_eager_lock);
13336			tcp->tcp_syn_rcvd_timeout++;
13337			if (tcp->tcp_syn_rcvd_timeout > 1 &&
13338			    !tcp->tcp_closemp_used) {
13339				/*
13340				 * This is our second timeout. Put the tcp in
13341				 * the list of droppable eagers to allow it to
13342				 * be dropped, if needed. We don't check
13343				 * whether tcp_dontdrop is set or not to
13344				 * protect ourselve from a SYN attack where a
13345				 * remote host can spoof itself as one of the
13346				 * good IP source and continue to hold
13347				 * resources too long.
13348				 */
13349				MAKE_DROPPABLE(listener, tcp);
13350			}
13351			mutex_exit(&listener->tcp_eager_lock);
13352		}
13353	}
13354		/* FALLTHRU */
13355	case TCPS_SYN_SENT:
13356		first_threshold =  tcp->tcp_first_ctimer_threshold;
13357		second_threshold = tcp->tcp_second_ctimer_threshold;
13358		break;
13359	case TCPS_ESTABLISHED:
13360	case TCPS_FIN_WAIT_1:
13361	case TCPS_CLOSING:
13362	case TCPS_CLOSE_WAIT:
13363	case TCPS_LAST_ACK:
13364		/* If we have data to rexmit */
13365		if (tcp->tcp_suna != tcp->tcp_snxt) {
13366			clock_t	time_to_wait;
13367
13368			BUMP_MIB(&tcps->tcps_mib, tcpTimRetrans);
13369			if (!tcp->tcp_xmit_head)
13370				break;
13371			time_to_wait = lbolt -
13372			    (clock_t)tcp->tcp_xmit_head->b_prev;
13373			time_to_wait = tcp->tcp_rto -
13374			    TICK_TO_MSEC(time_to_wait);
13375			/*
13376			 * If the timer fires too early, 1 clock tick earlier,
13377			 * restart the timer.
13378			 */
13379			if (time_to_wait > msec_per_tick) {
13380				TCP_STAT(tcps, tcp_timer_fire_early);
13381				TCP_TIMER_RESTART(tcp, time_to_wait);
13382				return;
13383			}
13384			/*
13385			 * When we probe zero windows, we force the swnd open.
13386			 * If our peer acks with a closed window swnd will be
13387			 * set to zero by tcp_rput(). As long as we are
13388			 * receiving acks tcp_rput will
13389			 * reset 'tcp_ms_we_have_waited' so as not to trip the
13390			 * first and second interval actions.  NOTE: the timer
13391			 * interval is allowed to continue its exponential
13392			 * backoff.
13393			 */
13394			if (tcp->tcp_swnd == 0 || tcp->tcp_zero_win_probe) {
13395				if (connp->conn_debug) {
13396					(void) strlog(TCP_MOD_ID, 0, 1,
13397					    SL_TRACE, "tcp_timer: zero win");
13398				}
13399			} else {
13400				/*
13401				 * After retransmission, we need to do
13402				 * slow start.  Set the ssthresh to one
13403				 * half of current effective window and
13404				 * cwnd to one MSS.  Also reset
13405				 * tcp_cwnd_cnt.
13406				 *
13407				 * Note that if tcp_ssthresh is reduced because
13408				 * of ECN, do not reduce it again unless it is
13409				 * already one window of data away (tcp_cwr
13410				 * should then be cleared) or this is a
13411				 * timeout for a retransmitted segment.
13412				 */
13413				uint32_t npkt;
13414
13415				if (!tcp->tcp_cwr || tcp->tcp_rexmit) {
13416					npkt = ((tcp->tcp_timer_backoff ?
13417					    tcp->tcp_cwnd_ssthresh :
13418					    tcp->tcp_snxt -
13419					    tcp->tcp_suna) >> 1) / tcp->tcp_mss;
13420					tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) *
13421					    tcp->tcp_mss;
13422				}
13423				tcp->tcp_cwnd = tcp->tcp_mss;
13424				tcp->tcp_cwnd_cnt = 0;
13425				if (tcp->tcp_ecn_ok) {
13426					tcp->tcp_cwr = B_TRUE;
13427					tcp->tcp_cwr_snd_max = tcp->tcp_snxt;
13428					tcp->tcp_ecn_cwr_sent = B_FALSE;
13429				}
13430			}
13431			break;
13432		}
13433		/*
13434		 * We have something to send yet we cannot send.  The
13435		 * reason can be:
13436		 *
13437		 * 1. Zero send window: we need to do zero window probe.
13438		 * 2. Zero cwnd: because of ECN, we need to "clock out
13439		 * segments.
13440		 * 3. SWS avoidance: receiver may have shrunk window,
13441		 * reset our knowledge.
13442		 *
13443		 * Note that condition 2 can happen with either 1 or
13444		 * 3.  But 1 and 3 are exclusive.
13445		 */
13446		if (tcp->tcp_unsent != 0) {
13447			/*
13448			 * Should not hold the zero-copy messages for too long.
13449			 */
13450			if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_xmit_zc_clean)
13451				tcp->tcp_xmit_head = tcp_zcopy_backoff(tcp,
13452				    tcp->tcp_xmit_head, B_TRUE);
13453
13454			if (tcp->tcp_cwnd == 0) {
13455				/*
13456				 * Set tcp_cwnd to 1 MSS so that a
13457				 * new segment can be sent out.  We
13458				 * are "clocking out" new data when
13459				 * the network is really congested.
13460				 */
13461				ASSERT(tcp->tcp_ecn_ok);
13462				tcp->tcp_cwnd = tcp->tcp_mss;
13463			}
13464			if (tcp->tcp_swnd == 0) {
13465				/* Extend window for zero window probe */
13466				tcp->tcp_swnd++;
13467				tcp->tcp_zero_win_probe = B_TRUE;
13468				BUMP_MIB(&tcps->tcps_mib, tcpOutWinProbe);
13469			} else {
13470				/*
13471				 * Handle timeout from sender SWS avoidance.
13472				 * Reset our knowledge of the max send window
13473				 * since the receiver might have reduced its
13474				 * receive buffer.  Avoid setting tcp_max_swnd
13475				 * to one since that will essentially disable
13476				 * the SWS checks.
13477				 *
13478				 * Note that since we don't have a SWS
13479				 * state variable, if the timeout is set
13480				 * for ECN but not for SWS, this
13481				 * code will also be executed.  This is
13482				 * fine as tcp_max_swnd is updated
13483				 * constantly and it will not affect
13484				 * anything.
13485				 */
13486				tcp->tcp_max_swnd = MAX(tcp->tcp_swnd, 2);
13487			}
13488			tcp_wput_data(tcp, NULL, B_FALSE);
13489			return;
13490		}
13491		/* Is there a FIN that needs to be to re retransmitted? */
13492		if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
13493		    !tcp->tcp_fin_acked)
13494			break;
13495		/* Nothing to do, return without restarting timer. */
13496		TCP_STAT(tcps, tcp_timer_fire_miss);
13497		return;
13498	case TCPS_FIN_WAIT_2:
13499		/*
13500		 * User closed the TCP endpoint and peer ACK'ed our FIN.
13501		 * We waited some time for for peer's FIN, but it hasn't
13502		 * arrived.  We flush the connection now to avoid
13503		 * case where the peer has rebooted.
13504		 */
13505		if (TCP_IS_DETACHED(tcp)) {
13506			(void) tcp_clean_death(tcp, 0, 23);
13507		} else {
13508			TCP_TIMER_RESTART(tcp,
13509			    tcps->tcps_fin_wait_2_flush_interval);
13510		}
13511		return;
13512	case TCPS_TIME_WAIT:
13513		(void) tcp_clean_death(tcp, 0, 24);
13514		return;
13515	default:
13516		if (connp->conn_debug) {
13517			(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
13518			    "tcp_timer: strange state (%d) %s",
13519			    tcp->tcp_state, tcp_display(tcp, NULL,
13520			    DISP_PORT_ONLY));
13521		}
13522		return;
13523	}
13524
13525	if ((ms = tcp->tcp_ms_we_have_waited) > second_threshold) {
13526		/*
13527		 * Should not hold the zero-copy messages for too long.
13528		 */
13529		if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_xmit_zc_clean)
13530			tcp->tcp_xmit_head = tcp_zcopy_backoff(tcp,
13531			    tcp->tcp_xmit_head, B_TRUE);
13532
13533		/*
13534		 * For zero window probe, we need to send indefinitely,
13535		 * unless we have not heard from the other side for some
13536		 * time...
13537		 */
13538		if ((tcp->tcp_zero_win_probe == 0) ||
13539		    (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >
13540		    second_threshold)) {
13541			BUMP_MIB(&tcps->tcps_mib, tcpTimRetransDrop);
13542			/*
13543			 * If TCP is in SYN_RCVD state, send back a
13544			 * RST|ACK as BSD does.  Note that tcp_zero_win_probe
13545			 * should be zero in TCPS_SYN_RCVD state.
13546			 */
13547			if (tcp->tcp_state == TCPS_SYN_RCVD) {
13548				tcp_xmit_ctl("tcp_timer: RST sent on timeout "
13549				    "in SYN_RCVD",
13550				    tcp, tcp->tcp_snxt,
13551				    tcp->tcp_rnxt, TH_RST | TH_ACK);
13552			}
13553			(void) tcp_clean_death(tcp,
13554			    tcp->tcp_client_errno ?
13555			    tcp->tcp_client_errno : ETIMEDOUT, 25);
13556			return;
13557		} else {
13558			/*
13559			 * Set tcp_ms_we_have_waited to second_threshold
13560			 * so that in next timeout, we will do the above
13561			 * check (lbolt - tcp_last_recv_time).  This is
13562			 * also to avoid overflow.
13563			 *
13564			 * We don't need to decrement tcp_timer_backoff
13565			 * to avoid overflow because it will be decremented
13566			 * later if new timeout value is greater than
13567			 * tcp_rexmit_interval_max.  In the case when
13568			 * tcp_rexmit_interval_max is greater than
13569			 * second_threshold, it means that we will wait
13570			 * longer than second_threshold to send the next
13571			 * window probe.
13572			 */
13573			tcp->tcp_ms_we_have_waited = second_threshold;
13574		}
13575	} else if (ms > first_threshold) {
13576		/*
13577		 * Should not hold the zero-copy messages for too long.
13578		 */
13579		if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_xmit_zc_clean)
13580			tcp->tcp_xmit_head = tcp_zcopy_backoff(tcp,
13581			    tcp->tcp_xmit_head, B_TRUE);
13582
13583		/*
13584		 * We have been retransmitting for too long...  The RTT
13585		 * we calculated is probably incorrect.  Reinitialize it.
13586		 * Need to compensate for 0 tcp_rtt_sa.  Reset
13587		 * tcp_rtt_update so that we won't accidentally cache a
13588		 * bad value.  But only do this if this is not a zero
13589		 * window probe.
13590		 */
13591		if (tcp->tcp_rtt_sa != 0 && tcp->tcp_zero_win_probe == 0) {
13592			tcp->tcp_rtt_sd += (tcp->tcp_rtt_sa >> 3) +
13593			    (tcp->tcp_rtt_sa >> 5);
13594			tcp->tcp_rtt_sa = 0;
13595			tcp_ip_notify(tcp);
13596			tcp->tcp_rtt_update = 0;
13597		}
13598	}
13599	tcp->tcp_timer_backoff++;
13600	if ((ms = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd +
13601	    tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5)) <
13602	    tcps->tcps_rexmit_interval_min) {
13603		/*
13604		 * This means the original RTO is tcp_rexmit_interval_min.
13605		 * So we will use tcp_rexmit_interval_min as the RTO value
13606		 * and do the backoff.
13607		 */
13608		ms = tcps->tcps_rexmit_interval_min << tcp->tcp_timer_backoff;
13609	} else {
13610		ms <<= tcp->tcp_timer_backoff;
13611	}
13612	if (ms > tcps->tcps_rexmit_interval_max) {
13613		ms = tcps->tcps_rexmit_interval_max;
13614		/*
13615		 * ms is at max, decrement tcp_timer_backoff to avoid
13616		 * overflow.
13617		 */
13618		tcp->tcp_timer_backoff--;
13619	}
13620	tcp->tcp_ms_we_have_waited += ms;
13621	if (tcp->tcp_zero_win_probe == 0) {
13622		tcp->tcp_rto = ms;
13623	}
13624	TCP_TIMER_RESTART(tcp, ms);
13625	/*
13626	 * This is after a timeout and tcp_rto is backed off.  Set
13627	 * tcp_set_timer to 1 so that next time RTO is updated, we will
13628	 * restart the timer with a correct value.
13629	 */
13630	tcp->tcp_set_timer = 1;
13631	mss = tcp->tcp_snxt - tcp->tcp_suna;
13632	if (mss > tcp->tcp_mss)
13633		mss = tcp->tcp_mss;
13634	if (mss > tcp->tcp_swnd && tcp->tcp_swnd != 0)
13635		mss = tcp->tcp_swnd;
13636
13637	if ((mp = tcp->tcp_xmit_head) != NULL)
13638		mp->b_prev = (mblk_t *)lbolt;
13639	mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss,
13640	    B_TRUE);
13641
13642	/*
13643	 * When slow start after retransmission begins, start with
13644	 * this seq no.  tcp_rexmit_max marks the end of special slow
13645	 * start phase.  tcp_snd_burst controls how many segments
13646	 * can be sent because of an ack.
13647	 */
13648	tcp->tcp_rexmit_nxt = tcp->tcp_suna;
13649	tcp->tcp_snd_burst = TCP_CWND_SS;
13650	if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
13651	    (tcp->tcp_unsent == 0)) {
13652		tcp->tcp_rexmit_max = tcp->tcp_fss;
13653	} else {
13654		tcp->tcp_rexmit_max = tcp->tcp_snxt;
13655	}
13656	tcp->tcp_rexmit = B_TRUE;
13657	tcp->tcp_dupack_cnt = 0;
13658
13659	/*
13660	 * Remove all rexmit SACK blk to start from fresh.
13661	 */
13662	if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL)
13663		TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
13664	if (mp == NULL) {
13665		return;
13666	}
13667
13668	tcp->tcp_csuna = tcp->tcp_snxt;
13669	BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
13670	UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, mss);
13671	tcp_send_data(tcp, mp);
13672
13673}
13674
13675static int
13676tcp_do_unbind(conn_t *connp)
13677{
13678	tcp_t *tcp = connp->conn_tcp;
13679
13680	switch (tcp->tcp_state) {
13681	case TCPS_BOUND:
13682	case TCPS_LISTEN:
13683		break;
13684	default:
13685		return (-TOUTSTATE);
13686	}
13687
13688	/*
13689	 * Need to clean up all the eagers since after the unbind, segments
13690	 * will no longer be delivered to this listener stream.
13691	 */
13692	mutex_enter(&tcp->tcp_eager_lock);
13693	if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) {
13694		tcp_eager_cleanup(tcp, 0);
13695	}
13696	mutex_exit(&tcp->tcp_eager_lock);
13697
13698	connp->conn_laddr_v6 = ipv6_all_zeros;
13699	connp->conn_saddr_v6 = ipv6_all_zeros;
13700	tcp_bind_hash_remove(tcp);
13701	tcp->tcp_state = TCPS_IDLE;
13702
13703	ip_unbind(connp);
13704	bzero(&connp->conn_ports, sizeof (connp->conn_ports));
13705
13706	return (0);
13707}
13708
13709/* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */
13710static void
13711tcp_tpi_unbind(tcp_t *tcp, mblk_t *mp)
13712{
13713	conn_t *connp = tcp->tcp_connp;
13714	int error;
13715
13716	error = tcp_do_unbind(connp);
13717	if (error > 0) {
13718		tcp_err_ack(tcp, mp, TSYSERR, error);
13719	} else if (error < 0) {
13720		tcp_err_ack(tcp, mp, -error, 0);
13721	} else {
13722		/* Send M_FLUSH according to TPI */
13723		(void) putnextctl1(connp->conn_rq, M_FLUSH, FLUSHRW);
13724
13725		mp = mi_tpi_ok_ack_alloc(mp);
13726		if (mp != NULL)
13727			putnext(connp->conn_rq, mp);
13728	}
13729}
13730
13731/*
13732 * Don't let port fall into the privileged range.
13733 * Since the extra privileged ports can be arbitrary we also
13734 * ensure that we exclude those from consideration.
13735 * tcp_g_epriv_ports is not sorted thus we loop over it until
13736 * there are no changes.
13737 *
13738 * Note: No locks are held when inspecting tcp_g_*epriv_ports
13739 * but instead the code relies on:
13740 * - the fact that the address of the array and its size never changes
13741 * - the atomic assignment of the elements of the array
13742 *
13743 * Returns 0 if there are no more ports available.
13744 *
13745 * TS note: skip multilevel ports.
13746 */
13747static in_port_t
13748tcp_update_next_port(in_port_t port, const tcp_t *tcp, boolean_t random)
13749{
13750	int i;
13751	boolean_t restart = B_FALSE;
13752	tcp_stack_t *tcps = tcp->tcp_tcps;
13753
13754	if (random && tcp_random_anon_port != 0) {
13755		(void) random_get_pseudo_bytes((uint8_t *)&port,
13756		    sizeof (in_port_t));
13757		/*
13758		 * Unless changed by a sys admin, the smallest anon port
13759		 * is 32768 and the largest anon port is 65535.  It is
13760		 * very likely (50%) for the random port to be smaller
13761		 * than the smallest anon port.  When that happens,
13762		 * add port % (anon port range) to the smallest anon
13763		 * port to get the random port.  It should fall into the
13764		 * valid anon port range.
13765		 */
13766		if (port < tcps->tcps_smallest_anon_port) {
13767			port = tcps->tcps_smallest_anon_port +
13768			    port % (tcps->tcps_largest_anon_port -
13769			    tcps->tcps_smallest_anon_port);
13770		}
13771	}
13772
13773retry:
13774	if (port < tcps->tcps_smallest_anon_port)
13775		port = (in_port_t)tcps->tcps_smallest_anon_port;
13776
13777	if (port > tcps->tcps_largest_anon_port) {
13778		if (restart)
13779			return (0);
13780		restart = B_TRUE;
13781		port = (in_port_t)tcps->tcps_smallest_anon_port;
13782	}
13783
13784	if (port < tcps->tcps_smallest_nonpriv_port)
13785		port = (in_port_t)tcps->tcps_smallest_nonpriv_port;
13786
13787	for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
13788		if (port == tcps->tcps_g_epriv_ports[i]) {
13789			port++;
13790			/*
13791			 * Make sure whether the port is in the
13792			 * valid range.
13793			 */
13794			goto retry;
13795		}
13796	}
13797	if (is_system_labeled() &&
13798	    (i = tsol_next_port(crgetzone(tcp->tcp_connp->conn_cred), port,
13799	    IPPROTO_TCP, B_TRUE)) != 0) {
13800		port = i;
13801		goto retry;
13802	}
13803	return (port);
13804}
13805
13806/*
13807 * Return the next anonymous port in the privileged port range for
13808 * bind checking.  It starts at IPPORT_RESERVED - 1 and goes
13809 * downwards.  This is the same behavior as documented in the userland
13810 * library call rresvport(3N).
13811 *
13812 * TS note: skip multilevel ports.
13813 */
13814static in_port_t
13815tcp_get_next_priv_port(const tcp_t *tcp)
13816{
13817	static in_port_t next_priv_port = IPPORT_RESERVED - 1;
13818	in_port_t nextport;
13819	boolean_t restart = B_FALSE;
13820	tcp_stack_t *tcps = tcp->tcp_tcps;
13821retry:
13822	if (next_priv_port < tcps->tcps_min_anonpriv_port ||
13823	    next_priv_port >= IPPORT_RESERVED) {
13824		next_priv_port = IPPORT_RESERVED - 1;
13825		if (restart)
13826			return (0);
13827		restart = B_TRUE;
13828	}
13829	if (is_system_labeled() &&
13830	    (nextport = tsol_next_port(crgetzone(tcp->tcp_connp->conn_cred),
13831	    next_priv_port, IPPROTO_TCP, B_FALSE)) != 0) {
13832		next_priv_port = nextport;
13833		goto retry;
13834	}
13835	return (next_priv_port--);
13836}
13837
13838/* The write side r/w procedure. */
13839
13840#if CCS_STATS
13841struct {
13842	struct {
13843		int64_t count, bytes;
13844	} tot, hit;
13845} wrw_stats;
13846#endif
13847
13848/*
13849 * Call by tcp_wput() to handle all non data, except M_PROTO and M_PCPROTO,
13850 * messages.
13851 */
13852/* ARGSUSED */
13853static void
13854tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
13855{
13856	conn_t	*connp = (conn_t *)arg;
13857	tcp_t	*tcp = connp->conn_tcp;
13858
13859	ASSERT(DB_TYPE(mp) != M_IOCTL);
13860	/*
13861	 * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close.
13862	 * Once the close starts, streamhead and sockfs will not let any data
13863	 * packets come down (close ensures that there are no threads using the
13864	 * queue and no new threads will come down) but since qprocsoff()
13865	 * hasn't happened yet, a M_FLUSH or some non data message might
13866	 * get reflected back (in response to our own FLUSHRW) and get
13867	 * processed after tcp_close() is done. The conn would still be valid
13868	 * because a ref would have added but we need to check the state
13869	 * before actually processing the packet.
13870	 */
13871	if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) {
13872		freemsg(mp);
13873		return;
13874	}
13875
13876	switch (DB_TYPE(mp)) {
13877	case M_IOCDATA:
13878		tcp_wput_iocdata(tcp, mp);
13879		break;
13880	case M_FLUSH:
13881		tcp_wput_flush(tcp, mp);
13882		break;
13883	default:
13884		ip_wput_nondata(connp->conn_wq, mp);
13885		break;
13886	}
13887}
13888
13889/*
13890 * The TCP fast path write put procedure.
13891 * NOTE: the logic of the fast path is duplicated from tcp_wput_data()
13892 */
13893/* ARGSUSED */
13894void
13895tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
13896{
13897	int		len;
13898	int		hdrlen;
13899	int		plen;
13900	mblk_t		*mp1;
13901	uchar_t		*rptr;
13902	uint32_t	snxt;
13903	tcpha_t		*tcpha;
13904	struct datab	*db;
13905	uint32_t	suna;
13906	uint32_t	mss;
13907	ipaddr_t	*dst;
13908	ipaddr_t	*src;
13909	uint32_t	sum;
13910	int		usable;
13911	conn_t		*connp = (conn_t *)arg;
13912	tcp_t		*tcp = connp->conn_tcp;
13913	uint32_t	msize;
13914	tcp_stack_t	*tcps = tcp->tcp_tcps;
13915	ip_xmit_attr_t	*ixa;
13916
13917	/*
13918	 * Try and ASSERT the minimum possible references on the
13919	 * conn early enough. Since we are executing on write side,
13920	 * the connection is obviously not detached and that means
13921	 * there is a ref each for TCP and IP. Since we are behind
13922	 * the squeue, the minimum references needed are 3. If the
13923	 * conn is in classifier hash list, there should be an
13924	 * extra ref for that (we check both the possibilities).
13925	 */
13926	ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
13927	    (connp->conn_fanout == NULL && connp->conn_ref >= 3));
13928
13929	ASSERT(DB_TYPE(mp) == M_DATA);
13930	msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
13931
13932	mutex_enter(&tcp->tcp_non_sq_lock);
13933	tcp->tcp_squeue_bytes -= msize;
13934	mutex_exit(&tcp->tcp_non_sq_lock);
13935
13936	/* Bypass tcp protocol for fused tcp loopback */
13937	if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
13938		return;
13939
13940	mss = tcp->tcp_mss;
13941	/*
13942	 * If ZEROCOPY has turned off, try not to send any zero-copy message
13943	 * down. Do backoff, now.
13944	 */
13945	if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on)
13946		mp = tcp_zcopy_backoff(tcp, mp, B_FALSE);
13947
13948
13949	ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
13950	len = (int)(mp->b_wptr - mp->b_rptr);
13951
13952	/*
13953	 * Criteria for fast path:
13954	 *
13955	 *   1. no unsent data
13956	 *   2. single mblk in request
13957	 *   3. connection established
13958	 *   4. data in mblk
13959	 *   5. len <= mss
13960	 *   6. no tcp_valid bits
13961	 */
13962	if ((tcp->tcp_unsent != 0) ||
13963	    (tcp->tcp_cork) ||
13964	    (mp->b_cont != NULL) ||
13965	    (tcp->tcp_state != TCPS_ESTABLISHED) ||
13966	    (len == 0) ||
13967	    (len > mss) ||
13968	    (tcp->tcp_valid_bits != 0)) {
13969		tcp_wput_data(tcp, mp, B_FALSE);
13970		return;
13971	}
13972
13973	ASSERT(tcp->tcp_xmit_tail_unsent == 0);
13974	ASSERT(tcp->tcp_fin_sent == 0);
13975
13976	/* queue new packet onto retransmission queue */
13977	if (tcp->tcp_xmit_head == NULL) {
13978		tcp->tcp_xmit_head = mp;
13979	} else {
13980		tcp->tcp_xmit_last->b_cont = mp;
13981	}
13982	tcp->tcp_xmit_last = mp;
13983	tcp->tcp_xmit_tail = mp;
13984
13985	/* find out how much we can send */
13986	/* BEGIN CSTYLED */
13987	/*
13988	 *    un-acked	   usable
13989	 *  |--------------|-----------------|
13990	 *  tcp_suna       tcp_snxt	  tcp_suna+tcp_swnd
13991	 */
13992	/* END CSTYLED */
13993
13994	/* start sending from tcp_snxt */
13995	snxt = tcp->tcp_snxt;
13996
13997	/*
13998	 * Check to see if this connection has been idled for some
13999	 * time and no ACK is expected.  If it is, we need to slow
14000	 * start again to get back the connection's "self-clock" as
14001	 * described in VJ's paper.
14002	 *
14003	 * Reinitialize tcp_cwnd after idle.
14004	 */
14005	if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
14006	    (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
14007		SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
14008	}
14009
14010	usable = tcp->tcp_swnd;		/* tcp window size */
14011	if (usable > tcp->tcp_cwnd)
14012		usable = tcp->tcp_cwnd;	/* congestion window smaller */
14013	usable -= snxt;		/* subtract stuff already sent */
14014	suna = tcp->tcp_suna;
14015	usable += suna;
14016	/* usable can be < 0 if the congestion window is smaller */
14017	if (len > usable) {
14018		/* Can't send complete M_DATA in one shot */
14019		goto slow;
14020	}
14021
14022	mutex_enter(&tcp->tcp_non_sq_lock);
14023	if (tcp->tcp_flow_stopped &&
14024	    TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
14025		tcp_clrqfull(tcp);
14026	}
14027	mutex_exit(&tcp->tcp_non_sq_lock);
14028
14029	/*
14030	 * determine if anything to send (Nagle).
14031	 *
14032	 *   1. len < tcp_mss (i.e. small)
14033	 *   2. unacknowledged data present
14034	 *   3. len < nagle limit
14035	 *   4. last packet sent < nagle limit (previous packet sent)
14036	 */
14037	if ((len < mss) && (snxt != suna) &&
14038	    (len < (int)tcp->tcp_naglim) &&
14039	    (tcp->tcp_last_sent_len < tcp->tcp_naglim)) {
14040		/*
14041		 * This was the first unsent packet and normally
14042		 * mss < xmit_hiwater so there is no need to worry
14043		 * about flow control. The next packet will go
14044		 * through the flow control check in tcp_wput_data().
14045		 */
14046		/* leftover work from above */
14047		tcp->tcp_unsent = len;
14048		tcp->tcp_xmit_tail_unsent = len;
14049
14050		return;
14051	}
14052
14053	/* len <= tcp->tcp_mss && len == unsent so no silly window */
14054
14055	if (snxt == suna) {
14056		TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
14057	}
14058
14059	/* we have always sent something */
14060	tcp->tcp_rack_cnt = 0;
14061
14062	tcp->tcp_snxt = snxt + len;
14063	tcp->tcp_rack = tcp->tcp_rnxt;
14064
14065	if ((mp1 = dupb(mp)) == 0)
14066		goto no_memory;
14067	mp->b_prev = (mblk_t *)(uintptr_t)lbolt;
14068	mp->b_next = (mblk_t *)(uintptr_t)snxt;
14069
14070	/* adjust tcp header information */
14071	tcpha = tcp->tcp_tcpha;
14072	tcpha->tha_flags = (TH_ACK|TH_PUSH);
14073
14074	sum = len + connp->conn_ht_ulp_len + connp->conn_sum;
14075	sum = (sum >> 16) + (sum & 0xFFFF);
14076	tcpha->tha_sum = htons(sum);
14077
14078	tcpha->tha_seq = htonl(snxt);
14079
14080	BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs);
14081	UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len);
14082	BUMP_LOCAL(tcp->tcp_obsegs);
14083
14084	/* Update the latest receive window size in TCP header. */
14085	tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
14086
14087	tcp->tcp_last_sent_len = (ushort_t)len;
14088
14089	plen = len + connp->conn_ht_iphc_len;
14090
14091	ixa = connp->conn_ixa;
14092	ixa->ixa_pktlen = plen;
14093
14094	if (ixa->ixa_flags & IXAF_IS_IPV4) {
14095		tcp->tcp_ipha->ipha_length = htons(plen);
14096	} else {
14097		tcp->tcp_ip6h->ip6_plen = htons(plen - IPV6_HDR_LEN);
14098	}
14099
14100	/* see if we need to allocate a mblk for the headers */
14101	hdrlen = connp->conn_ht_iphc_len;
14102	rptr = mp1->b_rptr - hdrlen;
14103	db = mp1->b_datap;
14104	if ((db->db_ref != 2) || rptr < db->db_base ||
14105	    (!OK_32PTR(rptr))) {
14106		/* NOTE: we assume allocb returns an OK_32PTR */
14107		mp = allocb(hdrlen + tcps->tcps_wroff_xtra, BPRI_MED);
14108		if (!mp) {
14109			freemsg(mp1);
14110			goto no_memory;
14111		}
14112		mp->b_cont = mp1;
14113		mp1 = mp;
14114		/* Leave room for Link Level header */
14115		rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra];
14116		mp1->b_wptr = &rptr[hdrlen];
14117	}
14118	mp1->b_rptr = rptr;
14119
14120	/* Fill in the timestamp option. */
14121	if (tcp->tcp_snd_ts_ok) {
14122		U32_TO_BE32((uint32_t)lbolt,
14123		    (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
14124		U32_TO_BE32(tcp->tcp_ts_recent,
14125		    (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
14126	} else {
14127		ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
14128	}
14129
14130	/* copy header into outgoing packet */
14131	dst = (ipaddr_t *)rptr;
14132	src = (ipaddr_t *)connp->conn_ht_iphc;
14133	dst[0] = src[0];
14134	dst[1] = src[1];
14135	dst[2] = src[2];
14136	dst[3] = src[3];
14137	dst[4] = src[4];
14138	dst[5] = src[5];
14139	dst[6] = src[6];
14140	dst[7] = src[7];
14141	dst[8] = src[8];
14142	dst[9] = src[9];
14143	if (hdrlen -= 40) {
14144		hdrlen >>= 2;
14145		dst += 10;
14146		src += 10;
14147		do {
14148			*dst++ = *src++;
14149		} while (--hdrlen);
14150	}
14151
14152	/*
14153	 * Set the ECN info in the TCP header.  Note that this
14154	 * is not the template header.
14155	 */
14156	if (tcp->tcp_ecn_ok) {
14157		SET_ECT(tcp, rptr);
14158
14159		tcpha = (tcpha_t *)(rptr + ixa->ixa_ip_hdr_length);
14160		if (tcp->tcp_ecn_echo_on)
14161			tcpha->tha_flags |= TH_ECE;
14162		if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
14163			tcpha->tha_flags |= TH_CWR;
14164			tcp->tcp_ecn_cwr_sent = B_TRUE;
14165		}
14166	}
14167
14168	if (tcp->tcp_ip_forward_progress) {
14169		tcp->tcp_ip_forward_progress = B_FALSE;
14170		connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
14171	} else {
14172		connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
14173	}
14174	tcp_send_data(tcp, mp1);
14175	return;
14176
14177	/*
14178	 * If we ran out of memory, we pretend to have sent the packet
14179	 * and that it was lost on the wire.
14180	 */
14181no_memory:
14182	return;
14183
14184slow:
14185	/* leftover work from above */
14186	tcp->tcp_unsent = len;
14187	tcp->tcp_xmit_tail_unsent = len;
14188	tcp_wput_data(tcp, NULL, B_FALSE);
14189}
14190
14191/*
14192 * This runs at the tail end of accept processing on the squeue of the
14193 * new connection.
14194 */
14195/* ARGSUSED */
14196void
14197tcp_accept_finish(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
14198{
14199	conn_t			*connp = (conn_t *)arg;
14200	tcp_t			*tcp = connp->conn_tcp;
14201	queue_t			*q = connp->conn_rq;
14202	tcp_stack_t		*tcps = tcp->tcp_tcps;
14203	/* socket options */
14204	struct sock_proto_props	sopp;
14205
14206	/* We should just receive a single mblk that fits a T_discon_ind */
14207	ASSERT(mp->b_cont == NULL);
14208
14209	/*
14210	 * Drop the eager's ref on the listener, that was placed when
14211	 * this eager began life in tcp_input_listener.
14212	 */
14213	CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp);
14214	if (IPCL_IS_NONSTR(connp)) {
14215		/* Safe to free conn_ind message */
14216		freemsg(tcp->tcp_conn.tcp_eager_conn_ind);
14217		tcp->tcp_conn.tcp_eager_conn_ind = NULL;
14218	}
14219
14220	tcp->tcp_detached = B_FALSE;
14221
14222	if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_accept_error) {
14223		/*
14224		 * Someone blewoff the eager before we could finish
14225		 * the accept.
14226		 *
14227		 * The only reason eager exists it because we put in
14228		 * a ref on it when conn ind went up. We need to send
14229		 * a disconnect indication up while the last reference
14230		 * on the eager will be dropped by the squeue when we
14231		 * return.
14232		 */
14233		ASSERT(tcp->tcp_listener == NULL);
14234		if (tcp->tcp_issocket || tcp->tcp_send_discon_ind) {
14235			if (IPCL_IS_NONSTR(connp)) {
14236				ASSERT(tcp->tcp_issocket);
14237				(*connp->conn_upcalls->su_disconnected)(
14238				    connp->conn_upper_handle, tcp->tcp_connid,
14239				    ECONNREFUSED);
14240				freemsg(mp);
14241			} else {
14242				struct	T_discon_ind	*tdi;
14243
14244				(void) putnextctl1(q, M_FLUSH, FLUSHRW);
14245				/*
14246				 * Let us reuse the incoming mblk to avoid
14247				 * memory allocation failure problems. We know
14248				 * that the size of the incoming mblk i.e.
14249				 * stroptions is greater than sizeof
14250				 * T_discon_ind.
14251				 */
14252				ASSERT(DB_REF(mp) == 1);
14253				ASSERT(MBLKSIZE(mp) >=
14254				    sizeof (struct T_discon_ind));
14255
14256				DB_TYPE(mp) = M_PROTO;
14257				((union T_primitives *)mp->b_rptr)->type =
14258				    T_DISCON_IND;
14259				tdi = (struct T_discon_ind *)mp->b_rptr;
14260				if (tcp->tcp_issocket) {
14261					tdi->DISCON_reason = ECONNREFUSED;
14262					tdi->SEQ_number = 0;
14263				} else {
14264					tdi->DISCON_reason = ENOPROTOOPT;
14265					tdi->SEQ_number =
14266					    tcp->tcp_conn_req_seqnum;
14267				}
14268				mp->b_wptr = mp->b_rptr +
14269				    sizeof (struct T_discon_ind);
14270				putnext(q, mp);
14271			}
14272		}
14273		tcp->tcp_hard_binding = B_FALSE;
14274		return;
14275	}
14276
14277	/*
14278	 * Set max window size (conn_rcvbuf) of the acceptor.
14279	 */
14280	if (tcp->tcp_rcv_list == NULL) {
14281		/*
14282		 * Recv queue is empty, tcp_rwnd should not have changed.
14283		 * That means it should be equal to the listener's tcp_rwnd.
14284		 */
14285		connp->conn_rcvbuf = tcp->tcp_rwnd;
14286	} else {
14287#ifdef DEBUG
14288		mblk_t *tmp;
14289		mblk_t	*mp1;
14290		uint_t	cnt = 0;
14291
14292		mp1 = tcp->tcp_rcv_list;
14293		while ((tmp = mp1) != NULL) {
14294			mp1 = tmp->b_next;
14295			cnt += msgdsize(tmp);
14296		}
14297		ASSERT(cnt != 0 && tcp->tcp_rcv_cnt == cnt);
14298#endif
14299		/* There is some data, add them back to get the max. */
14300		connp->conn_rcvbuf = tcp->tcp_rwnd + tcp->tcp_rcv_cnt;
14301	}
14302	/*
14303	 * This is the first time we run on the correct
14304	 * queue after tcp_accept. So fix all the q parameters
14305	 * here.
14306	 */
14307	sopp.sopp_flags = SOCKOPT_RCVHIWAT | SOCKOPT_MAXBLK | SOCKOPT_WROFF;
14308	sopp.sopp_maxblk = tcp_maxpsz_set(tcp, B_FALSE);
14309
14310	sopp.sopp_rxhiwat = tcp->tcp_fused ?
14311	    tcp_fuse_set_rcv_hiwat(tcp, connp->conn_rcvbuf) :
14312	    connp->conn_rcvbuf;
14313
14314	/*
14315	 * Determine what write offset value to use depending on SACK and
14316	 * whether the endpoint is fused or not.
14317	 */
14318	if (tcp->tcp_fused) {
14319		ASSERT(tcp->tcp_loopback);
14320		ASSERT(tcp->tcp_loopback_peer != NULL);
14321		/*
14322		 * For fused tcp loopback, set the stream head's write
14323		 * offset value to zero since we won't be needing any room
14324		 * for TCP/IP headers.  This would also improve performance
14325		 * since it would reduce the amount of work done by kmem.
14326		 * Non-fused tcp loopback case is handled separately below.
14327		 */
14328		sopp.sopp_wroff = 0;
14329		/*
14330		 * Update the peer's transmit parameters according to
14331		 * our recently calculated high water mark value.
14332		 */
14333		(void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE);
14334	} else if (tcp->tcp_snd_sack_ok) {
14335		sopp.sopp_wroff = connp->conn_ht_iphc_allocated +
14336		    (tcp->tcp_loopback ? 0 : tcps->tcps_wroff_xtra);
14337	} else {
14338		sopp.sopp_wroff = connp->conn_ht_iphc_len +
14339		    (tcp->tcp_loopback ? 0 : tcps->tcps_wroff_xtra);
14340	}
14341
14342	/*
14343	 * If this is endpoint is handling SSL, then reserve extra
14344	 * offset and space at the end.
14345	 * Also have the stream head allocate SSL3_MAX_RECORD_LEN packets,
14346	 * overriding the previous setting. The extra cost of signing and
14347	 * encrypting multiple MSS-size records (12 of them with Ethernet),
14348	 * instead of a single contiguous one by the stream head
14349	 * largely outweighs the statistical reduction of ACKs, when
14350	 * applicable. The peer will also save on decryption and verification
14351	 * costs.
14352	 */
14353	if (tcp->tcp_kssl_ctx != NULL) {
14354		sopp.sopp_wroff += SSL3_WROFFSET;
14355
14356		sopp.sopp_flags |= SOCKOPT_TAIL;
14357		sopp.sopp_tail = SSL3_MAX_TAIL_LEN;
14358
14359		sopp.sopp_flags |= SOCKOPT_ZCOPY;
14360		sopp.sopp_zcopyflag = ZCVMUNSAFE;
14361
14362		sopp.sopp_maxblk = SSL3_MAX_RECORD_LEN;
14363	}
14364
14365	/* Send the options up */
14366	if (IPCL_IS_NONSTR(connp)) {
14367		if (sopp.sopp_flags & SOCKOPT_TAIL) {
14368			ASSERT(tcp->tcp_kssl_ctx != NULL);
14369			ASSERT(sopp.sopp_flags & SOCKOPT_ZCOPY);
14370		}
14371		if (tcp->tcp_loopback) {
14372			sopp.sopp_flags |= SOCKOPT_LOOPBACK;
14373			sopp.sopp_loopback = B_TRUE;
14374		}
14375		(*connp->conn_upcalls->su_set_proto_props)
14376		    (connp->conn_upper_handle, &sopp);
14377		freemsg(mp);
14378	} else {
14379		/*
14380		 * Let us reuse the incoming mblk to avoid
14381		 * memory allocation failure problems. We know
14382		 * that the size of the incoming mblk is at least
14383		 * stroptions
14384		 */
14385		struct stroptions *stropt;
14386
14387		ASSERT(DB_REF(mp) == 1);
14388		ASSERT(MBLKSIZE(mp) >= sizeof (struct stroptions));
14389
14390		DB_TYPE(mp) = M_SETOPTS;
14391		stropt = (struct stroptions *)mp->b_rptr;
14392		mp->b_wptr = mp->b_rptr + sizeof (struct stroptions);
14393		stropt = (struct stroptions *)mp->b_rptr;
14394		stropt->so_flags = SO_HIWAT | SO_WROFF | SO_MAXBLK;
14395		stropt->so_hiwat = sopp.sopp_rxhiwat;
14396		stropt->so_wroff = sopp.sopp_wroff;
14397		stropt->so_maxblk = sopp.sopp_maxblk;
14398
14399		if (sopp.sopp_flags & SOCKOPT_TAIL) {
14400			ASSERT(tcp->tcp_kssl_ctx != NULL);
14401
14402			stropt->so_flags |= SO_TAIL | SO_COPYOPT;
14403			stropt->so_tail = sopp.sopp_tail;
14404			stropt->so_copyopt = sopp.sopp_zcopyflag;
14405		}
14406
14407		/* Send the options up */
14408		putnext(q, mp);
14409	}
14410
14411	/*
14412	 * Pass up any data and/or a fin that has been received.
14413	 *
14414	 * Adjust receive window in case it had decreased
14415	 * (because there is data <=> tcp_rcv_list != NULL)
14416	 * while the connection was detached. Note that
14417	 * in case the eager was flow-controlled, w/o this
14418	 * code, the rwnd may never open up again!
14419	 */
14420	if (tcp->tcp_rcv_list != NULL) {
14421		if (IPCL_IS_NONSTR(connp)) {
14422			mblk_t *mp;
14423			int space_left;
14424			int error;
14425			boolean_t push = B_TRUE;
14426
14427			if (!tcp->tcp_fused && (*connp->conn_upcalls->su_recv)
14428			    (connp->conn_upper_handle, NULL, 0, 0, &error,
14429			    &push) >= 0) {
14430				tcp->tcp_rwnd = connp->conn_rcvbuf;
14431				if (tcp->tcp_state >= TCPS_ESTABLISHED &&
14432				    tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) {
14433					tcp_xmit_ctl(NULL,
14434					    tcp, (tcp->tcp_swnd == 0) ?
14435					    tcp->tcp_suna : tcp->tcp_snxt,
14436					    tcp->tcp_rnxt, TH_ACK);
14437				}
14438			}
14439			while ((mp = tcp->tcp_rcv_list) != NULL) {
14440				push = B_TRUE;
14441				tcp->tcp_rcv_list = mp->b_next;
14442				mp->b_next = NULL;
14443				space_left = (*connp->conn_upcalls->su_recv)
14444				    (connp->conn_upper_handle, mp, msgdsize(mp),
14445				    0, &error, &push);
14446				if (space_left < 0) {
14447					/*
14448					 * We should never be in middle of a
14449					 * fallback, the squeue guarantees that.
14450					 */
14451					ASSERT(error != EOPNOTSUPP);
14452				}
14453			}
14454			tcp->tcp_rcv_last_head = NULL;
14455			tcp->tcp_rcv_last_tail = NULL;
14456			tcp->tcp_rcv_cnt = 0;
14457		} else {
14458			/* We drain directly in case of fused tcp loopback */
14459
14460			if (!tcp->tcp_fused && canputnext(q)) {
14461				tcp->tcp_rwnd = connp->conn_rcvbuf;
14462				if (tcp->tcp_state >= TCPS_ESTABLISHED &&
14463				    tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) {
14464					tcp_xmit_ctl(NULL,
14465					    tcp, (tcp->tcp_swnd == 0) ?
14466					    tcp->tcp_suna : tcp->tcp_snxt,
14467					    tcp->tcp_rnxt, TH_ACK);
14468				}
14469			}
14470
14471			(void) tcp_rcv_drain(tcp);
14472		}
14473
14474		/*
14475		 * For fused tcp loopback, back-enable peer endpoint
14476		 * if it's currently flow-controlled.
14477		 */
14478		if (tcp->tcp_fused) {
14479			tcp_t *peer_tcp = tcp->tcp_loopback_peer;
14480
14481			ASSERT(peer_tcp != NULL);
14482			ASSERT(peer_tcp->tcp_fused);
14483
14484			mutex_enter(&peer_tcp->tcp_non_sq_lock);
14485			if (peer_tcp->tcp_flow_stopped) {
14486				tcp_clrqfull(peer_tcp);
14487				TCP_STAT(tcps, tcp_fusion_backenabled);
14488			}
14489			mutex_exit(&peer_tcp->tcp_non_sq_lock);
14490		}
14491	}
14492	ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg);
14493	if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) {
14494		tcp->tcp_ordrel_done = B_TRUE;
14495		if (IPCL_IS_NONSTR(connp)) {
14496			ASSERT(tcp->tcp_ordrel_mp == NULL);
14497			(*connp->conn_upcalls->su_opctl)(
14498			    connp->conn_upper_handle,
14499			    SOCK_OPCTL_SHUT_RECV, 0);
14500		} else {
14501			mp = tcp->tcp_ordrel_mp;
14502			tcp->tcp_ordrel_mp = NULL;
14503			putnext(q, mp);
14504		}
14505	}
14506	tcp->tcp_hard_binding = B_FALSE;
14507
14508	if (connp->conn_keepalive) {
14509		tcp->tcp_ka_last_intrvl = 0;
14510		tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer,
14511		    MSEC_TO_TICK(tcp->tcp_ka_interval));
14512	}
14513
14514	/*
14515	 * At this point, eager is fully established and will
14516	 * have the following references -
14517	 *
14518	 * 2 references for connection to exist (1 for TCP and 1 for IP).
14519	 * 1 reference for the squeue which will be dropped by the squeue as
14520	 *	soon as this function returns.
14521	 * There will be 1 additonal reference for being in classifier
14522	 *	hash list provided something bad hasn't happened.
14523	 */
14524	ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
14525	    (connp->conn_fanout == NULL && connp->conn_ref >= 3));
14526}
14527
14528/*
14529 * The function called through squeue to get behind listener's perimeter to
14530 * send a deferred conn_ind.
14531 */
14532/* ARGSUSED */
14533void
14534tcp_send_pending(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
14535{
14536	conn_t	*lconnp = (conn_t *)arg;
14537	tcp_t *listener = lconnp->conn_tcp;
14538	struct T_conn_ind *conn_ind;
14539	tcp_t *tcp;
14540
14541	conn_ind = (struct T_conn_ind *)mp->b_rptr;
14542	bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp,
14543	    conn_ind->OPT_length);
14544
14545	if (listener->tcp_state != TCPS_LISTEN) {
14546		/*
14547		 * If listener has closed, it would have caused a
14548		 * a cleanup/blowoff to happen for the eager, so
14549		 * we don't need to do anything more.
14550		 */
14551		freemsg(mp);
14552		return;
14553	}
14554
14555	tcp_ulp_newconn(lconnp, tcp->tcp_connp, mp);
14556}
14557
14558/*
14559 * Common to TPI and sockfs accept code.
14560 */
14561/* ARGSUSED2 */
14562static int
14563tcp_accept_common(conn_t *lconnp, conn_t *econnp, cred_t *cr)
14564{
14565	tcp_t *listener, *eager;
14566	mblk_t *discon_mp;
14567
14568	listener = lconnp->conn_tcp;
14569	ASSERT(listener->tcp_state == TCPS_LISTEN);
14570	eager = econnp->conn_tcp;
14571	ASSERT(eager->tcp_listener != NULL);
14572
14573	/*
14574	 * Pre allocate the discon_ind mblk also. tcp_accept_finish will
14575	 * use it if something failed.
14576	 */
14577	discon_mp = allocb(MAX(sizeof (struct T_discon_ind),
14578	    sizeof (struct stroptions)), BPRI_HI);
14579
14580	if (discon_mp == NULL) {
14581		return (-TPROTO);
14582	}
14583	eager->tcp_issocket = B_TRUE;
14584
14585	econnp->conn_zoneid = listener->tcp_connp->conn_zoneid;
14586	econnp->conn_allzones = listener->tcp_connp->conn_allzones;
14587	ASSERT(econnp->conn_netstack ==
14588	    listener->tcp_connp->conn_netstack);
14589	ASSERT(eager->tcp_tcps == listener->tcp_tcps);
14590
14591	/* Put the ref for IP */
14592	CONN_INC_REF(econnp);
14593
14594	/*
14595	 * We should have minimum of 3 references on the conn
14596	 * at this point. One each for TCP and IP and one for
14597	 * the T_conn_ind that was sent up when the 3-way handshake
14598	 * completed. In the normal case we would also have another
14599	 * reference (making a total of 4) for the conn being in the
14600	 * classifier hash list. However the eager could have received
14601	 * an RST subsequently and tcp_closei_local could have removed
14602	 * the eager from the classifier hash list, hence we can't
14603	 * assert that reference.
14604	 */
14605	ASSERT(econnp->conn_ref >= 3);
14606
14607	mutex_enter(&listener->tcp_eager_lock);
14608	if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) {
14609
14610		tcp_t *tail;
14611		tcp_t *tcp;
14612		mblk_t *mp1;
14613
14614		tcp = listener->tcp_eager_prev_q0;
14615		/*
14616		 * listener->tcp_eager_prev_q0 points to the TAIL of the
14617		 * deferred T_conn_ind queue. We need to get to the head
14618		 * of the queue in order to send up T_conn_ind the same
14619		 * order as how the 3WHS is completed.
14620		 */
14621		while (tcp != listener) {
14622			if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0 &&
14623			    !tcp->tcp_kssl_pending)
14624				break;
14625			else
14626				tcp = tcp->tcp_eager_prev_q0;
14627		}
14628		/* None of the pending eagers can be sent up now */
14629		if (tcp == listener)
14630			goto no_more_eagers;
14631
14632		mp1 = tcp->tcp_conn.tcp_eager_conn_ind;
14633		tcp->tcp_conn.tcp_eager_conn_ind = NULL;
14634		/* Move from q0 to q */
14635		ASSERT(listener->tcp_conn_req_cnt_q0 > 0);
14636		listener->tcp_conn_req_cnt_q0--;
14637		listener->tcp_conn_req_cnt_q++;
14638		tcp->tcp_eager_next_q0->tcp_eager_prev_q0 =
14639		    tcp->tcp_eager_prev_q0;
14640		tcp->tcp_eager_prev_q0->tcp_eager_next_q0 =
14641		    tcp->tcp_eager_next_q0;
14642		tcp->tcp_eager_prev_q0 = NULL;
14643		tcp->tcp_eager_next_q0 = NULL;
14644		tcp->tcp_conn_def_q0 = B_FALSE;
14645
14646		/* Make sure the tcp isn't in the list of droppables */
14647		ASSERT(tcp->tcp_eager_next_drop_q0 == NULL &&
14648		    tcp->tcp_eager_prev_drop_q0 == NULL);
14649
14650		/*
14651		 * Insert at end of the queue because sockfs sends
14652		 * down T_CONN_RES in chronological order. Leaving
14653		 * the older conn indications at front of the queue
14654		 * helps reducing search time.
14655		 */
14656		tail = listener->tcp_eager_last_q;
14657		if (tail != NULL) {
14658			tail->tcp_eager_next_q = tcp;
14659		} else {
14660			listener->tcp_eager_next_q = tcp;
14661		}
14662		listener->tcp_eager_last_q = tcp;
14663		tcp->tcp_eager_next_q = NULL;
14664
14665		/* Need to get inside the listener perimeter */
14666		CONN_INC_REF(listener->tcp_connp);
14667		SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp, mp1,
14668		    tcp_send_pending, listener->tcp_connp, NULL, SQ_FILL,
14669		    SQTAG_TCP_SEND_PENDING);
14670	}
14671no_more_eagers:
14672	tcp_eager_unlink(eager);
14673	mutex_exit(&listener->tcp_eager_lock);
14674
14675	/*
14676	 * At this point, the eager is detached from the listener
14677	 * but we still have an extra refs on eager (apart from the
14678	 * usual tcp references). The ref was placed in tcp_rput_data
14679	 * before sending the conn_ind in tcp_send_conn_ind.
14680	 * The ref will be dropped in tcp_accept_finish().
14681	 */
14682	SQUEUE_ENTER_ONE(econnp->conn_sqp, discon_mp, tcp_accept_finish,
14683	    econnp, NULL, SQ_NODRAIN, SQTAG_TCP_ACCEPT_FINISH_Q0);
14684	return (0);
14685}
14686
14687int
14688tcp_accept(sock_lower_handle_t lproto_handle,
14689    sock_lower_handle_t eproto_handle, sock_upper_handle_t sock_handle,
14690    cred_t *cr)
14691{
14692	conn_t *lconnp, *econnp;
14693	tcp_t *listener, *eager;
14694
14695	lconnp = (conn_t *)lproto_handle;
14696	listener = lconnp->conn_tcp;
14697	ASSERT(listener->tcp_state == TCPS_LISTEN);
14698	econnp = (conn_t *)eproto_handle;
14699	eager = econnp->conn_tcp;
14700	ASSERT(eager->tcp_listener != NULL);
14701
14702	/*
14703	 * It is OK to manipulate these fields outside the eager's squeue
14704	 * because they will not start being used until tcp_accept_finish
14705	 * has been called.
14706	 */
14707	ASSERT(lconnp->conn_upper_handle != NULL);
14708	ASSERT(econnp->conn_upper_handle == NULL);
14709	econnp->conn_upper_handle = sock_handle;
14710	econnp->conn_upcalls = lconnp->conn_upcalls;
14711	ASSERT(IPCL_IS_NONSTR(econnp));
14712	return (tcp_accept_common(lconnp, econnp, cr));
14713}
14714
14715
14716/*
14717 * This is the STREAMS entry point for T_CONN_RES coming down on
14718 * Acceptor STREAM when  sockfs listener does accept processing.
14719 * Read the block comment on top of tcp_input_listener().
14720 */
14721void
14722tcp_tpi_accept(queue_t *q, mblk_t *mp)
14723{
14724	queue_t *rq = RD(q);
14725	struct T_conn_res *conn_res;
14726	tcp_t *eager;
14727	tcp_t *listener;
14728	struct T_ok_ack *ok;
14729	t_scalar_t PRIM_type;
14730	conn_t *econnp;
14731	cred_t *cr;
14732
14733	ASSERT(DB_TYPE(mp) == M_PROTO);
14734
14735	/*
14736	 * All Solaris components should pass a db_credp
14737	 * for this TPI message, hence we ASSERT.
14738	 * But in case there is some other M_PROTO that looks
14739	 * like a TPI message sent by some other kernel
14740	 * component, we check and return an error.
14741	 */
14742	cr = msg_getcred(mp, NULL);
14743	ASSERT(cr != NULL);
14744	if (cr == NULL) {
14745		mp = mi_tpi_err_ack_alloc(mp, TSYSERR, EINVAL);
14746		if (mp != NULL)
14747			putnext(rq, mp);
14748		return;
14749	}
14750	conn_res = (struct T_conn_res *)mp->b_rptr;
14751	ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX);
14752	if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_res)) {
14753		mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0);
14754		if (mp != NULL)
14755			putnext(rq, mp);
14756		return;
14757	}
14758	switch (conn_res->PRIM_type) {
14759	case O_T_CONN_RES:
14760	case T_CONN_RES:
14761		/*
14762		 * We pass up an err ack if allocb fails. This will
14763		 * cause sockfs to issue a T_DISCON_REQ which will cause
14764		 * tcp_eager_blowoff to be called. sockfs will then call
14765		 * rq->q_qinfo->qi_qclose to cleanup the acceptor stream.
14766		 * we need to do the allocb up here because we have to
14767		 * make sure rq->q_qinfo->qi_qclose still points to the
14768		 * correct function (tcp_tpi_close_accept) in case allocb
14769		 * fails.
14770		 */
14771		bcopy(mp->b_rptr + conn_res->OPT_offset,
14772		    &eager, conn_res->OPT_length);
14773		PRIM_type = conn_res->PRIM_type;
14774		mp->b_datap->db_type = M_PCPROTO;
14775		mp->b_wptr = mp->b_rptr + sizeof (struct T_ok_ack);
14776		ok = (struct T_ok_ack *)mp->b_rptr;
14777		ok->PRIM_type = T_OK_ACK;
14778		ok->CORRECT_prim = PRIM_type;
14779		econnp = eager->tcp_connp;
14780		econnp->conn_dev = (dev_t)RD(q)->q_ptr;
14781		econnp->conn_minor_arena = (vmem_t *)(WR(q)->q_ptr);
14782		econnp->conn_rq = rq;
14783		econnp->conn_wq = q;
14784		rq->q_ptr = econnp;
14785		rq->q_qinfo = &tcp_rinitv4;	/* No open - same as rinitv6 */
14786		q->q_ptr = econnp;
14787		q->q_qinfo = &tcp_winit;
14788		listener = eager->tcp_listener;
14789
14790		if (tcp_accept_common(listener->tcp_connp,
14791		    econnp, cr) < 0) {
14792			mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0);
14793			if (mp != NULL)
14794				putnext(rq, mp);
14795			return;
14796		}
14797
14798		/*
14799		 * Send the new local address also up to sockfs. There
14800		 * should already be enough space in the mp that came
14801		 * down from soaccept().
14802		 */
14803		if (econnp->conn_family == AF_INET) {
14804			sin_t *sin;
14805
14806			ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >=
14807			    (sizeof (struct T_ok_ack) + sizeof (sin_t)));
14808			sin = (sin_t *)mp->b_wptr;
14809			mp->b_wptr += sizeof (sin_t);
14810			sin->sin_family = AF_INET;
14811			sin->sin_port = econnp->conn_lport;
14812			sin->sin_addr.s_addr = econnp->conn_laddr_v4;
14813		} else {
14814			sin6_t *sin6;
14815
14816			ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >=
14817			    sizeof (struct T_ok_ack) + sizeof (sin6_t));
14818			sin6 = (sin6_t *)mp->b_wptr;
14819			mp->b_wptr += sizeof (sin6_t);
14820			sin6->sin6_family = AF_INET6;
14821			sin6->sin6_port = econnp->conn_lport;
14822			sin6->sin6_addr = econnp->conn_laddr_v6;
14823			if (econnp->conn_ipversion == IPV4_VERSION) {
14824				sin6->sin6_flowinfo = 0;
14825			} else {
14826				ASSERT(eager->tcp_ip6h != NULL);
14827				sin6->sin6_flowinfo =
14828				    eager->tcp_ip6h->ip6_vcf &
14829				    ~IPV6_VERS_AND_FLOW_MASK;
14830			}
14831			if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6) &&
14832			    (econnp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)) {
14833				sin6->sin6_scope_id =
14834				    econnp->conn_ixa->ixa_scopeid;
14835			} else {
14836				sin6->sin6_scope_id = 0;
14837			}
14838			sin6->__sin6_src_id = 0;
14839		}
14840
14841		putnext(rq, mp);
14842		return;
14843	default:
14844		mp = mi_tpi_err_ack_alloc(mp, TNOTSUPPORT, 0);
14845		if (mp != NULL)
14846			putnext(rq, mp);
14847		return;
14848	}
14849}
14850
14851/*
14852 * Handle special out-of-band ioctl requests (see PSARC/2008/265).
14853 */
14854static void
14855tcp_wput_cmdblk(queue_t *q, mblk_t *mp)
14856{
14857	void	*data;
14858	mblk_t	*datamp = mp->b_cont;
14859	conn_t	*connp = Q_TO_CONN(q);
14860	tcp_t	*tcp = connp->conn_tcp;
14861	cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr;
14862
14863	if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) {
14864		cmdp->cb_error = EPROTO;
14865		qreply(q, mp);
14866		return;
14867	}
14868
14869	data = datamp->b_rptr;
14870
14871	switch (cmdp->cb_cmd) {
14872	case TI_GETPEERNAME:
14873		if (tcp->tcp_state < TCPS_SYN_RCVD)
14874			cmdp->cb_error = ENOTCONN;
14875		else
14876			cmdp->cb_error = conn_getpeername(connp, data,
14877			    &cmdp->cb_len);
14878		break;
14879	case TI_GETMYNAME:
14880		cmdp->cb_error = conn_getsockname(connp, data, &cmdp->cb_len);
14881		break;
14882	default:
14883		cmdp->cb_error = EINVAL;
14884		break;
14885	}
14886
14887	qreply(q, mp);
14888}
14889
14890void
14891tcp_wput(queue_t *q, mblk_t *mp)
14892{
14893	conn_t	*connp = Q_TO_CONN(q);
14894	tcp_t	*tcp;
14895	void (*output_proc)();
14896	t_scalar_t type;
14897	uchar_t *rptr;
14898	struct iocblk	*iocp;
14899	size_t size;
14900	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
14901
14902	ASSERT(connp->conn_ref >= 2);
14903
14904	switch (DB_TYPE(mp)) {
14905	case M_DATA:
14906		tcp = connp->conn_tcp;
14907		ASSERT(tcp != NULL);
14908
14909		size = msgdsize(mp);
14910
14911		mutex_enter(&tcp->tcp_non_sq_lock);
14912		tcp->tcp_squeue_bytes += size;
14913		if (TCP_UNSENT_BYTES(tcp) > connp->conn_sndbuf) {
14914			tcp_setqfull(tcp);
14915		}
14916		mutex_exit(&tcp->tcp_non_sq_lock);
14917
14918		CONN_INC_REF(connp);
14919		SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output, connp,
14920		    NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT);
14921		return;
14922
14923	case M_CMD:
14924		tcp_wput_cmdblk(q, mp);
14925		return;
14926
14927	case M_PROTO:
14928	case M_PCPROTO:
14929		/*
14930		 * if it is a snmp message, don't get behind the squeue
14931		 */
14932		tcp = connp->conn_tcp;
14933		rptr = mp->b_rptr;
14934		if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
14935			type = ((union T_primitives *)rptr)->type;
14936		} else {
14937			if (connp->conn_debug) {
14938				(void) strlog(TCP_MOD_ID, 0, 1,
14939				    SL_ERROR|SL_TRACE,
14940				    "tcp_wput_proto, dropping one...");
14941			}
14942			freemsg(mp);
14943			return;
14944		}
14945		if (type == T_SVR4_OPTMGMT_REQ) {
14946			/*
14947			 * All Solaris components should pass a db_credp
14948			 * for this TPI message, hence we ASSERT.
14949			 * But in case there is some other M_PROTO that looks
14950			 * like a TPI message sent by some other kernel
14951			 * component, we check and return an error.
14952			 */
14953			cred_t	*cr = msg_getcred(mp, NULL);
14954
14955			ASSERT(cr != NULL);
14956			if (cr == NULL) {
14957				tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
14958				return;
14959			}
14960			if (snmpcom_req(q, mp, tcp_snmp_set, ip_snmp_get,
14961			    cr)) {
14962				/*
14963				 * This was a SNMP request
14964				 */
14965				return;
14966			} else {
14967				output_proc = tcp_wput_proto;
14968			}
14969		} else {
14970			output_proc = tcp_wput_proto;
14971		}
14972		break;
14973	case M_IOCTL:
14974		/*
14975		 * Most ioctls can be processed right away without going via
14976		 * squeues - process them right here. Those that do require
14977		 * squeue (currently _SIOCSOCKFALLBACK)
14978		 * are processed by tcp_wput_ioctl().
14979		 */
14980		iocp = (struct iocblk *)mp->b_rptr;
14981		tcp = connp->conn_tcp;
14982
14983		switch (iocp->ioc_cmd) {
14984		case TCP_IOC_ABORT_CONN:
14985			tcp_ioctl_abort_conn(q, mp);
14986			return;
14987		case TI_GETPEERNAME:
14988		case TI_GETMYNAME:
14989			mi_copyin(q, mp, NULL,
14990			    SIZEOF_STRUCT(strbuf, iocp->ioc_flag));
14991			return;
14992		case ND_SET:
14993			/* nd_getset does the necessary checks */
14994		case ND_GET:
14995			if (nd_getset(q, tcps->tcps_g_nd, mp)) {
14996				qreply(q, mp);
14997				return;
14998			}
14999			ip_wput_nondata(q, mp);
15000			return;
15001
15002		default:
15003			output_proc = tcp_wput_ioctl;
15004			break;
15005		}
15006		break;
15007	default:
15008		output_proc = tcp_wput_nondata;
15009		break;
15010	}
15011
15012	CONN_INC_REF(connp);
15013	SQUEUE_ENTER_ONE(connp->conn_sqp, mp, output_proc, connp,
15014	    NULL, tcp_squeue_flag, SQTAG_TCP_WPUT_OTHER);
15015}
15016
15017/*
15018 * Initial STREAMS write side put() procedure for sockets. It tries to
15019 * handle the T_CAPABILITY_REQ which sockfs sends down while setting
15020 * up the socket without using the squeue. Non T_CAPABILITY_REQ messages
15021 * are handled by tcp_wput() as usual.
15022 *
15023 * All further messages will also be handled by tcp_wput() because we cannot
15024 * be sure that the above short cut is safe later.
15025 */
15026static void
15027tcp_wput_sock(queue_t *wq, mblk_t *mp)
15028{
15029	conn_t			*connp = Q_TO_CONN(wq);
15030	tcp_t			*tcp = connp->conn_tcp;
15031	struct T_capability_req	*car = (struct T_capability_req *)mp->b_rptr;
15032
15033	ASSERT(wq->q_qinfo == &tcp_sock_winit);
15034	wq->q_qinfo = &tcp_winit;
15035
15036	ASSERT(IPCL_IS_TCP(connp));
15037	ASSERT(TCP_IS_SOCKET(tcp));
15038
15039	if (DB_TYPE(mp) == M_PCPROTO &&
15040	    MBLKL(mp) == sizeof (struct T_capability_req) &&
15041	    car->PRIM_type == T_CAPABILITY_REQ) {
15042		tcp_capability_req(tcp, mp);
15043		return;
15044	}
15045
15046	tcp_wput(wq, mp);
15047}
15048
15049/* ARGSUSED */
15050static void
15051tcp_wput_fallback(queue_t *wq, mblk_t *mp)
15052{
15053#ifdef DEBUG
15054	cmn_err(CE_CONT, "tcp_wput_fallback: Message during fallback \n");
15055#endif
15056	freemsg(mp);
15057}
15058
15059/*
15060 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
15061 */
15062static boolean_t
15063tcp_zcopy_check(tcp_t *tcp)
15064{
15065	conn_t		*connp = tcp->tcp_connp;
15066	ip_xmit_attr_t	*ixa = connp->conn_ixa;
15067	boolean_t	zc_enabled = B_FALSE;
15068	tcp_stack_t	*tcps = tcp->tcp_tcps;
15069
15070	if (do_tcpzcopy == 2)
15071		zc_enabled = B_TRUE;
15072	else if ((do_tcpzcopy == 1) && (ixa->ixa_flags & IXAF_ZCOPY_CAPAB))
15073		zc_enabled = B_TRUE;
15074
15075	tcp->tcp_snd_zcopy_on = zc_enabled;
15076	if (!TCP_IS_DETACHED(tcp)) {
15077		if (zc_enabled) {
15078			ixa->ixa_flags |= IXAF_VERIFY_ZCOPY;
15079			(void) proto_set_tx_copyopt(connp->conn_rq, connp,
15080			    ZCVMSAFE);
15081			TCP_STAT(tcps, tcp_zcopy_on);
15082		} else {
15083			ixa->ixa_flags &= ~IXAF_VERIFY_ZCOPY;
15084			(void) proto_set_tx_copyopt(connp->conn_rq, connp,
15085			    ZCVMUNSAFE);
15086			TCP_STAT(tcps, tcp_zcopy_off);
15087		}
15088	}
15089	return (zc_enabled);
15090}
15091
15092/*
15093 * Backoff from a zero-copy message by copying data to a new allocated
15094 * message and freeing the original desballoca'ed segmapped message.
15095 *
15096 * This function is called by following two callers:
15097 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
15098 *    the origial desballoca'ed message and notify sockfs. This is in re-
15099 *    transmit state.
15100 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
15101 *    to be copied to new message.
15102 */
15103static mblk_t *
15104tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, boolean_t fix_xmitlist)
15105{
15106	mblk_t		*nbp;
15107	mblk_t		*head = NULL;
15108	mblk_t		*tail = NULL;
15109	tcp_stack_t	*tcps = tcp->tcp_tcps;
15110
15111	ASSERT(bp != NULL);
15112	while (bp != NULL) {
15113		if (IS_VMLOANED_MBLK(bp)) {
15114			TCP_STAT(tcps, tcp_zcopy_backoff);
15115			if ((nbp = copyb(bp)) == NULL) {
15116				tcp->tcp_xmit_zc_clean = B_FALSE;
15117				if (tail != NULL)
15118					tail->b_cont = bp;
15119				return ((head == NULL) ? bp : head);
15120			}
15121
15122			if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) {
15123				if (fix_xmitlist)
15124					tcp_zcopy_notify(tcp);
15125				else
15126					nbp->b_datap->db_struioflag |=
15127					    STRUIO_ZCNOTIFY;
15128			}
15129			nbp->b_cont = bp->b_cont;
15130
15131			/*
15132			 * Copy saved information and adjust tcp_xmit_tail
15133			 * if needed.
15134			 */
15135			if (fix_xmitlist) {
15136				nbp->b_prev = bp->b_prev;
15137				nbp->b_next = bp->b_next;
15138
15139				if (tcp->tcp_xmit_tail == bp)
15140					tcp->tcp_xmit_tail = nbp;
15141			}
15142
15143			/* Free the original message. */
15144			bp->b_prev = NULL;
15145			bp->b_next = NULL;
15146			freeb(bp);
15147
15148			bp = nbp;
15149		}
15150
15151		if (head == NULL) {
15152			head = bp;
15153		}
15154		if (tail == NULL) {
15155			tail = bp;
15156		} else {
15157			tail->b_cont = bp;
15158			tail = bp;
15159		}
15160
15161		/* Move forward. */
15162		bp = bp->b_cont;
15163	}
15164
15165	if (fix_xmitlist) {
15166		tcp->tcp_xmit_last = tail;
15167		tcp->tcp_xmit_zc_clean = B_TRUE;
15168	}
15169
15170	return (head);
15171}
15172
15173static void
15174tcp_zcopy_notify(tcp_t *tcp)
15175{
15176	struct stdata	*stp;
15177	conn_t		*connp;
15178
15179	if (tcp->tcp_detached)
15180		return;
15181	connp = tcp->tcp_connp;
15182	if (IPCL_IS_NONSTR(connp)) {
15183		(*connp->conn_upcalls->su_zcopy_notify)
15184		    (connp->conn_upper_handle);
15185		return;
15186	}
15187	stp = STREAM(connp->conn_rq);
15188	mutex_enter(&stp->sd_lock);
15189	stp->sd_flag |= STZCNOTIFY;
15190	cv_broadcast(&stp->sd_zcopy_wait);
15191	mutex_exit(&stp->sd_lock);
15192}
15193
15194/*
15195 * Update the TCP connection according to change of LSO capability.
15196 */
15197static void
15198tcp_update_lso(tcp_t *tcp, ip_xmit_attr_t *ixa)
15199{
15200	/*
15201	 * We check against IPv4 header length to preserve the old behavior
15202	 * of only enabling LSO when there are no IP options.
15203	 * But this restriction might not be necessary at all. Before removing
15204	 * it, need to verify how LSO is handled for source routing case, with
15205	 * which IP does software checksum.
15206	 *
15207	 * For IPv6, whenever any extension header is needed, LSO is supressed.
15208	 */
15209	if (ixa->ixa_ip_hdr_length != ((ixa->ixa_flags & IXAF_IS_IPV4) ?
15210	    IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN))
15211		return;
15212
15213	/*
15214	 * Either the LSO capability newly became usable, or it has changed.
15215	 */
15216	if (ixa->ixa_flags & IXAF_LSO_CAPAB) {
15217		ill_lso_capab_t	*lsoc = &ixa->ixa_lso_capab;
15218
15219		ASSERT(lsoc->ill_lso_max > 0);
15220		tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, lsoc->ill_lso_max);
15221
15222		DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
15223		    boolean_t, B_TRUE, uint32_t, tcp->tcp_lso_max);
15224
15225		/*
15226		 * If LSO to be enabled, notify the STREAM header with larger
15227		 * data block.
15228		 */
15229		if (!tcp->tcp_lso)
15230			tcp->tcp_maxpsz_multiplier = 0;
15231
15232		tcp->tcp_lso = B_TRUE;
15233		TCP_STAT(tcp->tcp_tcps, tcp_lso_enabled);
15234	} else { /* LSO capability is not usable any more. */
15235		DTRACE_PROBE3(tcp_update_lso, boolean_t, tcp->tcp_lso,
15236		    boolean_t, B_FALSE, uint32_t, tcp->tcp_lso_max);
15237
15238		/*
15239		 * If LSO to be disabled, notify the STREAM header with smaller
15240		 * data block. And need to restore fragsize to PMTU.
15241		 */
15242		if (tcp->tcp_lso) {
15243			tcp->tcp_maxpsz_multiplier =
15244			    tcp->tcp_tcps->tcps_maxpsz_multiplier;
15245			ixa->ixa_fragsize = ixa->ixa_pmtu;
15246			tcp->tcp_lso = B_FALSE;
15247			TCP_STAT(tcp->tcp_tcps, tcp_lso_disabled);
15248		}
15249	}
15250
15251	(void) tcp_maxpsz_set(tcp, B_TRUE);
15252}
15253
15254/*
15255 * Update the TCP connection according to change of ZEROCOPY capability.
15256 */
15257static void
15258tcp_update_zcopy(tcp_t *tcp)
15259{
15260	conn_t		*connp = tcp->tcp_connp;
15261	tcp_stack_t	*tcps = tcp->tcp_tcps;
15262
15263	if (tcp->tcp_snd_zcopy_on) {
15264		tcp->tcp_snd_zcopy_on = B_FALSE;
15265		if (!TCP_IS_DETACHED(tcp)) {
15266			(void) proto_set_tx_copyopt(connp->conn_rq, connp,
15267			    ZCVMUNSAFE);
15268			TCP_STAT(tcps, tcp_zcopy_off);
15269		}
15270	} else {
15271		tcp->tcp_snd_zcopy_on = B_TRUE;
15272		if (!TCP_IS_DETACHED(tcp)) {
15273			(void) proto_set_tx_copyopt(connp->conn_rq, connp,
15274			    ZCVMSAFE);
15275			TCP_STAT(tcps, tcp_zcopy_on);
15276		}
15277	}
15278}
15279
15280/*
15281 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
15282 * so it's safe to update the TCP connection.
15283 */
15284/* ARGSUSED1 */
15285static void
15286tcp_notify(void *arg, ip_xmit_attr_t *ixa, ixa_notify_type_t ntype,
15287    ixa_notify_arg_t narg)
15288{
15289	tcp_t		*tcp = (tcp_t *)arg;
15290	conn_t		*connp = tcp->tcp_connp;
15291
15292	switch (ntype) {
15293	case IXAN_LSO:
15294		tcp_update_lso(tcp, connp->conn_ixa);
15295		break;
15296	case IXAN_PMTU:
15297		tcp_update_pmtu(tcp, B_FALSE);
15298		break;
15299	case IXAN_ZCOPY:
15300		tcp_update_zcopy(tcp);
15301		break;
15302	default:
15303		break;
15304	}
15305}
15306
15307static void
15308tcp_send_data(tcp_t *tcp, mblk_t *mp)
15309{
15310	conn_t		*connp = tcp->tcp_connp;
15311
15312	/*
15313	 * Check here to avoid sending zero-copy message down to IP when
15314	 * ZEROCOPY capability has turned off. We only need to deal with
15315	 * the race condition between sockfs and the notification here.
15316	 * Since we have tried to backoff the tcp_xmit_head when turning
15317	 * zero-copy off and new messages in tcp_output(), we simply drop
15318	 * the dup'ed packet here and let tcp retransmit, if tcp_xmit_zc_clean
15319	 * is not true.
15320	 */
15321	if (tcp->tcp_snd_zcopy_aware && !tcp->tcp_snd_zcopy_on &&
15322	    !tcp->tcp_xmit_zc_clean) {
15323		ip_drop_output("TCP ZC was disabled but not clean", mp, NULL);
15324		freemsg(mp);
15325		return;
15326	}
15327
15328	ASSERT(connp->conn_ixa->ixa_notify_cookie == connp->conn_tcp);
15329	(void) conn_ip_output(mp, connp->conn_ixa);
15330}
15331
15332/*
15333 * This handles the case when the receiver has shrunk its win. Per RFC 1122
15334 * if the receiver shrinks the window, i.e. moves the right window to the
15335 * left, the we should not send new data, but should retransmit normally the
15336 * old unacked data between suna and suna + swnd. We might has sent data
15337 * that is now outside the new window, pretend that we didn't send  it.
15338 */
15339static void
15340tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count)
15341{
15342	uint32_t	snxt = tcp->tcp_snxt;
15343
15344	ASSERT(shrunk_count > 0);
15345
15346	if (!tcp->tcp_is_wnd_shrnk) {
15347		tcp->tcp_snxt_shrunk = snxt;
15348		tcp->tcp_is_wnd_shrnk = B_TRUE;
15349	} else if (SEQ_GT(snxt, tcp->tcp_snxt_shrunk)) {
15350		tcp->tcp_snxt_shrunk = snxt;
15351	}
15352
15353	/* Pretend we didn't send the data outside the window */
15354	snxt -= shrunk_count;
15355
15356	/* Reset all the values per the now shrunk window */
15357	tcp_update_xmit_tail(tcp, snxt);
15358	tcp->tcp_unsent += shrunk_count;
15359
15360	/*
15361	 * If the SACK option is set, delete the entire list of
15362	 * notsack'ed blocks.
15363	 */
15364	if (tcp->tcp_sack_info != NULL) {
15365		if (tcp->tcp_notsack_list != NULL)
15366			TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list, tcp);
15367	}
15368
15369	if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0)
15370		/*
15371		 * Make sure the timer is running so that we will probe a zero
15372		 * window.
15373		 */
15374		TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
15375}
15376
15377
15378/*
15379 * The TCP normal data output path.
15380 * NOTE: the logic of the fast path is duplicated from this function.
15381 */
15382static void
15383tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent)
15384{
15385	int		len;
15386	mblk_t		*local_time;
15387	mblk_t		*mp1;
15388	uint32_t	snxt;
15389	int		tail_unsent;
15390	int		tcpstate;
15391	int		usable = 0;
15392	mblk_t		*xmit_tail;
15393	int32_t		mss;
15394	int32_t		num_sack_blk = 0;
15395	int32_t		total_hdr_len;
15396	int32_t		tcp_hdr_len;
15397	int		rc;
15398	tcp_stack_t	*tcps = tcp->tcp_tcps;
15399	conn_t		*connp = tcp->tcp_connp;
15400
15401	tcpstate = tcp->tcp_state;
15402	if (mp == NULL) {
15403		/*
15404		 * tcp_wput_data() with NULL mp should only be called when
15405		 * there is unsent data.
15406		 */
15407		ASSERT(tcp->tcp_unsent > 0);
15408		/* Really tacky... but we need this for detached closes. */
15409		len = tcp->tcp_unsent;
15410		goto data_null;
15411	}
15412
15413#if CCS_STATS
15414	wrw_stats.tot.count++;
15415	wrw_stats.tot.bytes += msgdsize(mp);
15416#endif
15417	ASSERT(mp->b_datap->db_type == M_DATA);
15418	/*
15419	 * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ,
15420	 * or before a connection attempt has begun.
15421	 */
15422	if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT ||
15423	    (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
15424		if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) {
15425#ifdef DEBUG
15426			cmn_err(CE_WARN,
15427			    "tcp_wput_data: data after ordrel, %s",
15428			    tcp_display(tcp, NULL,
15429			    DISP_ADDR_AND_PORT));
15430#else
15431			if (connp->conn_debug) {
15432				(void) strlog(TCP_MOD_ID, 0, 1,
15433				    SL_TRACE|SL_ERROR,
15434				    "tcp_wput_data: data after ordrel, %s\n",
15435				    tcp_display(tcp, NULL,
15436				    DISP_ADDR_AND_PORT));
15437			}
15438#endif /* DEBUG */
15439		}
15440		if (tcp->tcp_snd_zcopy_aware &&
15441		    (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
15442			tcp_zcopy_notify(tcp);
15443		freemsg(mp);
15444		mutex_enter(&tcp->tcp_non_sq_lock);
15445		if (tcp->tcp_flow_stopped &&
15446		    TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
15447			tcp_clrqfull(tcp);
15448		}
15449		mutex_exit(&tcp->tcp_non_sq_lock);
15450		return;
15451	}
15452
15453	/* Strip empties */
15454	for (;;) {
15455		ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
15456		    (uintptr_t)INT_MAX);
15457		len = (int)(mp->b_wptr - mp->b_rptr);
15458		if (len > 0)
15459			break;
15460		mp1 = mp;
15461		mp = mp->b_cont;
15462		freeb(mp1);
15463		if (!mp) {
15464			return;
15465		}
15466	}
15467
15468	/* If we are the first on the list ... */
15469	if (tcp->tcp_xmit_head == NULL) {
15470		tcp->tcp_xmit_head = mp;
15471		tcp->tcp_xmit_tail = mp;
15472		tcp->tcp_xmit_tail_unsent = len;
15473	} else {
15474		/* If tiny tx and room in txq tail, pullup to save mblks. */
15475		struct datab *dp;
15476
15477		mp1 = tcp->tcp_xmit_last;
15478		if (len < tcp_tx_pull_len &&
15479		    (dp = mp1->b_datap)->db_ref == 1 &&
15480		    dp->db_lim - mp1->b_wptr >= len) {
15481			ASSERT(len > 0);
15482			ASSERT(!mp1->b_cont);
15483			if (len == 1) {
15484				*mp1->b_wptr++ = *mp->b_rptr;
15485			} else {
15486				bcopy(mp->b_rptr, mp1->b_wptr, len);
15487				mp1->b_wptr += len;
15488			}
15489			if (mp1 == tcp->tcp_xmit_tail)
15490				tcp->tcp_xmit_tail_unsent += len;
15491			mp1->b_cont = mp->b_cont;
15492			if (tcp->tcp_snd_zcopy_aware &&
15493			    (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
15494				mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
15495			freeb(mp);
15496			mp = mp1;
15497		} else {
15498			tcp->tcp_xmit_last->b_cont = mp;
15499		}
15500		len += tcp->tcp_unsent;
15501	}
15502
15503	/* Tack on however many more positive length mblks we have */
15504	if ((mp1 = mp->b_cont) != NULL) {
15505		do {
15506			int tlen;
15507			ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
15508			    (uintptr_t)INT_MAX);
15509			tlen = (int)(mp1->b_wptr - mp1->b_rptr);
15510			if (tlen <= 0) {
15511				mp->b_cont = mp1->b_cont;
15512				freeb(mp1);
15513			} else {
15514				len += tlen;
15515				mp = mp1;
15516			}
15517		} while ((mp1 = mp->b_cont) != NULL);
15518	}
15519	tcp->tcp_xmit_last = mp;
15520	tcp->tcp_unsent = len;
15521
15522	if (urgent)
15523		usable = 1;
15524
15525data_null:
15526	snxt = tcp->tcp_snxt;
15527	xmit_tail = tcp->tcp_xmit_tail;
15528	tail_unsent = tcp->tcp_xmit_tail_unsent;
15529
15530	/*
15531	 * Note that tcp_mss has been adjusted to take into account the
15532	 * timestamp option if applicable.  Because SACK options do not
15533	 * appear in every TCP segments and they are of variable lengths,
15534	 * they cannot be included in tcp_mss.  Thus we need to calculate
15535	 * the actual segment length when we need to send a segment which
15536	 * includes SACK options.
15537	 */
15538	if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
15539		int32_t	opt_len;
15540
15541		num_sack_blk = MIN(tcp->tcp_max_sack_blk,
15542		    tcp->tcp_num_sack_blk);
15543		opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN *
15544		    2 + TCPOPT_HEADER_LEN;
15545		mss = tcp->tcp_mss - opt_len;
15546		total_hdr_len = connp->conn_ht_iphc_len + opt_len;
15547		tcp_hdr_len = connp->conn_ht_ulp_len + opt_len;
15548	} else {
15549		mss = tcp->tcp_mss;
15550		total_hdr_len = connp->conn_ht_iphc_len;
15551		tcp_hdr_len = connp->conn_ht_ulp_len;
15552	}
15553
15554	if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
15555	    (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
15556		SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
15557	}
15558	if (tcpstate == TCPS_SYN_RCVD) {
15559		/*
15560		 * The three-way connection establishment handshake is not
15561		 * complete yet. We want to queue the data for transmission
15562		 * after entering ESTABLISHED state (RFC793). A jump to
15563		 * "done" label effectively leaves data on the queue.
15564		 */
15565		goto done;
15566	} else {
15567		int usable_r;
15568
15569		/*
15570		 * In the special case when cwnd is zero, which can only
15571		 * happen if the connection is ECN capable, return now.
15572		 * New segments is sent using tcp_timer().  The timer
15573		 * is set in tcp_input_data().
15574		 */
15575		if (tcp->tcp_cwnd == 0) {
15576			/*
15577			 * Note that tcp_cwnd is 0 before 3-way handshake is
15578			 * finished.
15579			 */
15580			ASSERT(tcp->tcp_ecn_ok ||
15581			    tcp->tcp_state < TCPS_ESTABLISHED);
15582			return;
15583		}
15584
15585		/* NOTE: trouble if xmitting while SYN not acked? */
15586		usable_r = snxt - tcp->tcp_suna;
15587		usable_r = tcp->tcp_swnd - usable_r;
15588
15589		/*
15590		 * Check if the receiver has shrunk the window.  If
15591		 * tcp_wput_data() with NULL mp is called, tcp_fin_sent
15592		 * cannot be set as there is unsent data, so FIN cannot
15593		 * be sent out.  Otherwise, we need to take into account
15594		 * of FIN as it consumes an "invisible" sequence number.
15595		 */
15596		ASSERT(tcp->tcp_fin_sent == 0);
15597		if (usable_r < 0) {
15598			/*
15599			 * The receiver has shrunk the window and we have sent
15600			 * -usable_r date beyond the window, re-adjust.
15601			 *
15602			 * If TCP window scaling is enabled, there can be
15603			 * round down error as the advertised receive window
15604			 * is actually right shifted n bits.  This means that
15605			 * the lower n bits info is wiped out.  It will look
15606			 * like the window is shrunk.  Do a check here to
15607			 * see if the shrunk amount is actually within the
15608			 * error in window calculation.  If it is, just
15609			 * return.  Note that this check is inside the
15610			 * shrunk window check.  This makes sure that even
15611			 * though tcp_process_shrunk_swnd() is not called,
15612			 * we will stop further processing.
15613			 */
15614			if ((-usable_r >> tcp->tcp_snd_ws) > 0) {
15615				tcp_process_shrunk_swnd(tcp, -usable_r);
15616			}
15617			return;
15618		}
15619
15620		/* usable = MIN(swnd, cwnd) - unacked_bytes */
15621		if (tcp->tcp_swnd > tcp->tcp_cwnd)
15622			usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd;
15623
15624		/* usable = MIN(usable, unsent) */
15625		if (usable_r > len)
15626			usable_r = len;
15627
15628		/* usable = MAX(usable, {1 for urgent, 0 for data}) */
15629		if (usable_r > 0) {
15630			usable = usable_r;
15631		} else {
15632			/* Bypass all other unnecessary processing. */
15633			goto done;
15634		}
15635	}
15636
15637	local_time = (mblk_t *)lbolt;
15638
15639	/*
15640	 * "Our" Nagle Algorithm.  This is not the same as in the old
15641	 * BSD.  This is more in line with the true intent of Nagle.
15642	 *
15643	 * The conditions are:
15644	 * 1. The amount of unsent data (or amount of data which can be
15645	 *    sent, whichever is smaller) is less than Nagle limit.
15646	 * 2. The last sent size is also less than Nagle limit.
15647	 * 3. There is unack'ed data.
15648	 * 4. Urgent pointer is not set.  Send urgent data ignoring the
15649	 *    Nagle algorithm.  This reduces the probability that urgent
15650	 *    bytes get "merged" together.
15651	 * 5. The app has not closed the connection.  This eliminates the
15652	 *    wait time of the receiving side waiting for the last piece of
15653	 *    (small) data.
15654	 *
15655	 * If all are satisified, exit without sending anything.  Note
15656	 * that Nagle limit can be smaller than 1 MSS.  Nagle limit is
15657	 * the smaller of 1 MSS and global tcp_naglim_def (default to be
15658	 * 4095).
15659	 */
15660	if (usable < (int)tcp->tcp_naglim &&
15661	    tcp->tcp_naglim > tcp->tcp_last_sent_len &&
15662	    snxt != tcp->tcp_suna &&
15663	    !(tcp->tcp_valid_bits & TCP_URG_VALID) &&
15664	    !(tcp->tcp_valid_bits & TCP_FSS_VALID)) {
15665		goto done;
15666	}
15667
15668	/*
15669	 * If tcp_zero_win_probe is not set and the tcp->tcp_cork option
15670	 * is set, then we have to force TCP not to send partial segment
15671	 * (smaller than MSS bytes). We are calculating the usable now
15672	 * based on full mss and will save the rest of remaining data for
15673	 * later. When tcp_zero_win_probe is set, TCP needs to send out
15674	 * something to do zero window probe.
15675	 */
15676	if (tcp->tcp_cork && !tcp->tcp_zero_win_probe) {
15677		if (usable < mss)
15678			goto done;
15679		usable = (usable / mss) * mss;
15680	}
15681
15682	/* Update the latest receive window size in TCP header. */
15683	tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
15684
15685	/* Send the packet. */
15686	rc = tcp_send(tcp, mss, total_hdr_len, tcp_hdr_len,
15687	    num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail,
15688	    local_time);
15689
15690	/* Pretend that all we were trying to send really got sent */
15691	if (rc < 0 && tail_unsent < 0) {
15692		do {
15693			xmit_tail = xmit_tail->b_cont;
15694			xmit_tail->b_prev = local_time;
15695			ASSERT((uintptr_t)(xmit_tail->b_wptr -
15696			    xmit_tail->b_rptr) <= (uintptr_t)INT_MAX);
15697			tail_unsent += (int)(xmit_tail->b_wptr -
15698			    xmit_tail->b_rptr);
15699		} while (tail_unsent < 0);
15700	}
15701done:;
15702	tcp->tcp_xmit_tail = xmit_tail;
15703	tcp->tcp_xmit_tail_unsent = tail_unsent;
15704	len = tcp->tcp_snxt - snxt;
15705	if (len) {
15706		/*
15707		 * If new data was sent, need to update the notsack
15708		 * list, which is, afterall, data blocks that have
15709		 * not been sack'ed by the receiver.  New data is
15710		 * not sack'ed.
15711		 */
15712		if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) {
15713			/* len is a negative value. */
15714			tcp->tcp_pipe -= len;
15715			tcp_notsack_update(&(tcp->tcp_notsack_list),
15716			    tcp->tcp_snxt, snxt,
15717			    &(tcp->tcp_num_notsack_blk),
15718			    &(tcp->tcp_cnt_notsack_list));
15719		}
15720		tcp->tcp_snxt = snxt + tcp->tcp_fin_sent;
15721		tcp->tcp_rack = tcp->tcp_rnxt;
15722		tcp->tcp_rack_cnt = 0;
15723		if ((snxt + len) == tcp->tcp_suna) {
15724			TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
15725		}
15726	} else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) {
15727		/*
15728		 * Didn't send anything. Make sure the timer is running
15729		 * so that we will probe a zero window.
15730		 */
15731		TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
15732	}
15733	/* Note that len is the amount we just sent but with a negative sign */
15734	tcp->tcp_unsent += len;
15735	mutex_enter(&tcp->tcp_non_sq_lock);
15736	if (tcp->tcp_flow_stopped) {
15737		if (TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat) {
15738			tcp_clrqfull(tcp);
15739		}
15740	} else if (TCP_UNSENT_BYTES(tcp) >= connp->conn_sndbuf) {
15741		if (!(tcp->tcp_detached))
15742			tcp_setqfull(tcp);
15743	}
15744	mutex_exit(&tcp->tcp_non_sq_lock);
15745}
15746
15747/*
15748 * tcp_fill_header is called by tcp_send() to fill the outgoing TCP header
15749 * with the template header, as well as other options such as time-stamp,
15750 * ECN and/or SACK.
15751 */
15752static void
15753tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, int num_sack_blk)
15754{
15755	tcpha_t *tcp_tmpl, *tcpha;
15756	uint32_t *dst, *src;
15757	int hdrlen;
15758	conn_t *connp = tcp->tcp_connp;
15759
15760	ASSERT(OK_32PTR(rptr));
15761
15762	/* Template header */
15763	tcp_tmpl = tcp->tcp_tcpha;
15764
15765	/* Header of outgoing packet */
15766	tcpha = (tcpha_t *)(rptr + connp->conn_ixa->ixa_ip_hdr_length);
15767
15768	/* dst and src are opaque 32-bit fields, used for copying */
15769	dst = (uint32_t *)rptr;
15770	src = (uint32_t *)connp->conn_ht_iphc;
15771	hdrlen = connp->conn_ht_iphc_len;
15772
15773	/* Fill time-stamp option if needed */
15774	if (tcp->tcp_snd_ts_ok) {
15775		U32_TO_BE32((uint32_t)now,
15776		    (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4);
15777		U32_TO_BE32(tcp->tcp_ts_recent,
15778		    (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8);
15779	} else {
15780		ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
15781	}
15782
15783	/*
15784	 * Copy the template header; is this really more efficient than
15785	 * calling bcopy()?  For simple IPv4/TCP, it may be the case,
15786	 * but perhaps not for other scenarios.
15787	 */
15788	dst[0] = src[0];
15789	dst[1] = src[1];
15790	dst[2] = src[2];
15791	dst[3] = src[3];
15792	dst[4] = src[4];
15793	dst[5] = src[5];
15794	dst[6] = src[6];
15795	dst[7] = src[7];
15796	dst[8] = src[8];
15797	dst[9] = src[9];
15798	if (hdrlen -= 40) {
15799		hdrlen >>= 2;
15800		dst += 10;
15801		src += 10;
15802		do {
15803			*dst++ = *src++;
15804		} while (--hdrlen);
15805	}
15806
15807	/*
15808	 * Set the ECN info in the TCP header if it is not a zero
15809	 * window probe.  Zero window probe is only sent in
15810	 * tcp_wput_data() and tcp_timer().
15811	 */
15812	if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) {
15813		SET_ECT(tcp, rptr);
15814
15815		if (tcp->tcp_ecn_echo_on)
15816			tcpha->tha_flags |= TH_ECE;
15817		if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
15818			tcpha->tha_flags |= TH_CWR;
15819			tcp->tcp_ecn_cwr_sent = B_TRUE;
15820		}
15821	}
15822
15823	/* Fill in SACK options */
15824	if (num_sack_blk > 0) {
15825		uchar_t *wptr = rptr + connp->conn_ht_iphc_len;
15826		sack_blk_t *tmp;
15827		int32_t	i;
15828
15829		wptr[0] = TCPOPT_NOP;
15830		wptr[1] = TCPOPT_NOP;
15831		wptr[2] = TCPOPT_SACK;
15832		wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
15833		    sizeof (sack_blk_t);
15834		wptr += TCPOPT_REAL_SACK_LEN;
15835
15836		tmp = tcp->tcp_sack_list;
15837		for (i = 0; i < num_sack_blk; i++) {
15838			U32_TO_BE32(tmp[i].begin, wptr);
15839			wptr += sizeof (tcp_seq);
15840			U32_TO_BE32(tmp[i].end, wptr);
15841			wptr += sizeof (tcp_seq);
15842		}
15843		tcpha->tha_offset_and_reserved +=
15844		    ((num_sack_blk * 2 + 1) << 4);
15845	}
15846}
15847
15848/*
15849 * tcp_send() is called by tcp_wput_data() and returns one of the following:
15850 *
15851 * -1 = failed allocation.
15852 *  0 = success; burst count reached, or usable send window is too small,
15853 *      and that we'd rather wait until later before sending again.
15854 */
15855static int
15856tcp_send(tcp_t *tcp, const int mss, const int total_hdr_len,
15857    const int tcp_hdr_len, const int num_sack_blk, int *usable,
15858    uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time)
15859{
15860	int		num_burst_seg = tcp->tcp_snd_burst;
15861	int		num_lso_seg = 1;
15862	uint_t		lso_usable;
15863	boolean_t	do_lso_send = B_FALSE;
15864	tcp_stack_t	*tcps = tcp->tcp_tcps;
15865	conn_t		*connp = tcp->tcp_connp;
15866	ip_xmit_attr_t	*ixa = connp->conn_ixa;
15867
15868	/*
15869	 * Check LSO possibility. The value of tcp->tcp_lso indicates whether
15870	 * the underlying connection is LSO capable. Will check whether having
15871	 * enough available data to initiate LSO transmission in the for(){}
15872	 * loops.
15873	 */
15874	if (tcp->tcp_lso && (tcp->tcp_valid_bits & ~TCP_FSS_VALID) == 0)
15875			do_lso_send = B_TRUE;
15876
15877	for (;;) {
15878		struct datab	*db;
15879		tcpha_t		*tcpha;
15880		uint32_t	sum;
15881		mblk_t		*mp, *mp1;
15882		uchar_t		*rptr;
15883		int		len;
15884
15885		/*
15886		 * Burst count reached, return successfully.
15887		 */
15888		if (num_burst_seg == 0)
15889			break;
15890
15891		/*
15892		 * Calculate the maximum payload length we can send at one
15893		 * time.
15894		 */
15895		if (do_lso_send) {
15896			/*
15897			 * Check whether be able to to do LSO for the current
15898			 * available data.
15899			 */
15900			if (num_burst_seg >= 2 && (*usable - 1) / mss >= 1) {
15901				lso_usable = MIN(tcp->tcp_lso_max, *usable);
15902				lso_usable = MIN(lso_usable,
15903				    num_burst_seg * mss);
15904
15905				num_lso_seg = lso_usable / mss;
15906				if (lso_usable % mss) {
15907					num_lso_seg++;
15908					tcp->tcp_last_sent_len = (ushort_t)
15909					    (lso_usable % mss);
15910				} else {
15911					tcp->tcp_last_sent_len = (ushort_t)mss;
15912				}
15913			} else {
15914				do_lso_send = B_FALSE;
15915				num_lso_seg = 1;
15916				lso_usable = mss;
15917			}
15918		}
15919
15920		ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1);
15921#ifdef DEBUG
15922		DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg, boolean_t,
15923		    do_lso_send);
15924#endif
15925		/*
15926		 * Adjust num_burst_seg here.
15927		 */
15928		num_burst_seg -= num_lso_seg;
15929
15930		len = mss;
15931		if (len > *usable) {
15932			ASSERT(do_lso_send == B_FALSE);
15933
15934			len = *usable;
15935			if (len <= 0) {
15936				/* Terminate the loop */
15937				break;	/* success; too small */
15938			}
15939			/*
15940			 * Sender silly-window avoidance.
15941			 * Ignore this if we are going to send a
15942			 * zero window probe out.
15943			 *
15944			 * TODO: force data into microscopic window?
15945			 *	==> (!pushed || (unsent > usable))
15946			 */
15947			if (len < (tcp->tcp_max_swnd >> 1) &&
15948			    (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len &&
15949			    !((tcp->tcp_valid_bits & TCP_URG_VALID) &&
15950			    len == 1) && (! tcp->tcp_zero_win_probe)) {
15951				/*
15952				 * If the retransmit timer is not running
15953				 * we start it so that we will retransmit
15954				 * in the case when the receiver has
15955				 * decremented the window.
15956				 */
15957				if (*snxt == tcp->tcp_snxt &&
15958				    *snxt == tcp->tcp_suna) {
15959					/*
15960					 * We are not supposed to send
15961					 * anything.  So let's wait a little
15962					 * bit longer before breaking SWS
15963					 * avoidance.
15964					 *
15965					 * What should the value be?
15966					 * Suggestion: MAX(init rexmit time,
15967					 * tcp->tcp_rto)
15968					 */
15969					TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
15970				}
15971				break;	/* success; too small */
15972			}
15973		}
15974
15975		tcpha = tcp->tcp_tcpha;
15976
15977		/*
15978		 * The reason to adjust len here is that we need to set flags
15979		 * and calculate checksum.
15980		 */
15981		if (do_lso_send)
15982			len = lso_usable;
15983
15984		*usable -= len; /* Approximate - can be adjusted later */
15985		if (*usable > 0)
15986			tcpha->tha_flags = TH_ACK;
15987		else
15988			tcpha->tha_flags = (TH_ACK | TH_PUSH);
15989
15990		/*
15991		 * Prime pump for IP's checksumming on our behalf.
15992		 * Include the adjustment for a source route if any.
15993		 * In case of LSO, the partial pseudo-header checksum should
15994		 * exclusive TCP length, so zero tha_sum before IP calculate
15995		 * pseudo-header checksum for partial checksum offload.
15996		 */
15997		if (do_lso_send) {
15998			sum = 0;
15999		} else {
16000			sum = len + tcp_hdr_len + connp->conn_sum;
16001			sum = (sum >> 16) + (sum & 0xFFFF);
16002		}
16003		tcpha->tha_sum = htons(sum);
16004		tcpha->tha_seq = htonl(*snxt);
16005
16006		/*
16007		 * Branch off to tcp_xmit_mp() if any of the VALID bits is
16008		 * set.  For the case when TCP_FSS_VALID is the only valid
16009		 * bit (normal active close), branch off only when we think
16010		 * that the FIN flag needs to be set.  Note for this case,
16011		 * that (snxt + len) may not reflect the actual seg_len,
16012		 * as len may be further reduced in tcp_xmit_mp().  If len
16013		 * gets modified, we will end up here again.
16014		 */
16015		if (tcp->tcp_valid_bits != 0 &&
16016		    (tcp->tcp_valid_bits != TCP_FSS_VALID ||
16017		    ((*snxt + len) == tcp->tcp_fss))) {
16018			uchar_t		*prev_rptr;
16019			uint32_t	prev_snxt = tcp->tcp_snxt;
16020
16021			if (*tail_unsent == 0) {
16022				ASSERT((*xmit_tail)->b_cont != NULL);
16023				*xmit_tail = (*xmit_tail)->b_cont;
16024				prev_rptr = (*xmit_tail)->b_rptr;
16025				*tail_unsent = (int)((*xmit_tail)->b_wptr -
16026				    (*xmit_tail)->b_rptr);
16027			} else {
16028				prev_rptr = (*xmit_tail)->b_rptr;
16029				(*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr -
16030				    *tail_unsent;
16031			}
16032			mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL,
16033			    *snxt, B_FALSE, (uint32_t *)&len, B_FALSE);
16034			/* Restore tcp_snxt so we get amount sent right. */
16035			tcp->tcp_snxt = prev_snxt;
16036			if (prev_rptr == (*xmit_tail)->b_rptr) {
16037				/*
16038				 * If the previous timestamp is still in use,
16039				 * don't stomp on it.
16040				 */
16041				if ((*xmit_tail)->b_next == NULL) {
16042					(*xmit_tail)->b_prev = local_time;
16043					(*xmit_tail)->b_next =
16044					    (mblk_t *)(uintptr_t)(*snxt);
16045				}
16046			} else
16047				(*xmit_tail)->b_rptr = prev_rptr;
16048
16049			if (mp == NULL) {
16050				return (-1);
16051			}
16052			mp1 = mp->b_cont;
16053
16054			if (len <= mss) /* LSO is unusable (!do_lso_send) */
16055				tcp->tcp_last_sent_len = (ushort_t)len;
16056			while (mp1->b_cont) {
16057				*xmit_tail = (*xmit_tail)->b_cont;
16058				(*xmit_tail)->b_prev = local_time;
16059				(*xmit_tail)->b_next =
16060				    (mblk_t *)(uintptr_t)(*snxt);
16061				mp1 = mp1->b_cont;
16062			}
16063			*snxt += len;
16064			*tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr;
16065			BUMP_LOCAL(tcp->tcp_obsegs);
16066			BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs);
16067			UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len);
16068			tcp_send_data(tcp, mp);
16069			continue;
16070		}
16071
16072		*snxt += len;	/* Adjust later if we don't send all of len */
16073		BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs);
16074		UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len);
16075
16076		if (*tail_unsent) {
16077			/* Are the bytes above us in flight? */
16078			rptr = (*xmit_tail)->b_wptr - *tail_unsent;
16079			if (rptr != (*xmit_tail)->b_rptr) {
16080				*tail_unsent -= len;
16081				if (len <= mss) /* LSO is unusable */
16082					tcp->tcp_last_sent_len = (ushort_t)len;
16083				len += total_hdr_len;
16084				ixa->ixa_pktlen = len;
16085
16086				if (ixa->ixa_flags & IXAF_IS_IPV4) {
16087					tcp->tcp_ipha->ipha_length = htons(len);
16088				} else {
16089					tcp->tcp_ip6h->ip6_plen =
16090					    htons(len - IPV6_HDR_LEN);
16091				}
16092
16093				mp = dupb(*xmit_tail);
16094				if (mp == NULL) {
16095					return (-1);	/* out_of_mem */
16096				}
16097				mp->b_rptr = rptr;
16098				/*
16099				 * If the old timestamp is no longer in use,
16100				 * sample a new timestamp now.
16101				 */
16102				if ((*xmit_tail)->b_next == NULL) {
16103					(*xmit_tail)->b_prev = local_time;
16104					(*xmit_tail)->b_next =
16105					    (mblk_t *)(uintptr_t)(*snxt-len);
16106				}
16107				goto must_alloc;
16108			}
16109		} else {
16110			*xmit_tail = (*xmit_tail)->b_cont;
16111			ASSERT((uintptr_t)((*xmit_tail)->b_wptr -
16112			    (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX);
16113			*tail_unsent = (int)((*xmit_tail)->b_wptr -
16114			    (*xmit_tail)->b_rptr);
16115		}
16116
16117		(*xmit_tail)->b_prev = local_time;
16118		(*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len);
16119
16120		*tail_unsent -= len;
16121		if (len <= mss) /* LSO is unusable (!do_lso_send) */
16122			tcp->tcp_last_sent_len = (ushort_t)len;
16123
16124		len += total_hdr_len;
16125		ixa->ixa_pktlen = len;
16126
16127		if (ixa->ixa_flags & IXAF_IS_IPV4) {
16128			tcp->tcp_ipha->ipha_length = htons(len);
16129		} else {
16130			tcp->tcp_ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
16131		}
16132
16133		mp = dupb(*xmit_tail);
16134		if (mp == NULL) {
16135			return (-1);	/* out_of_mem */
16136		}
16137
16138		len = total_hdr_len;
16139		/*
16140		 * There are four reasons to allocate a new hdr mblk:
16141		 *  1) The bytes above us are in use by another packet
16142		 *  2) We don't have good alignment
16143		 *  3) The mblk is being shared
16144		 *  4) We don't have enough room for a header
16145		 */
16146		rptr = mp->b_rptr - len;
16147		if (!OK_32PTR(rptr) ||
16148		    ((db = mp->b_datap), db->db_ref != 2) ||
16149		    rptr < db->db_base) {
16150			/* NOTE: we assume allocb returns an OK_32PTR */
16151
16152		must_alloc:;
16153			mp1 = allocb(connp->conn_ht_iphc_allocated +
16154			    tcps->tcps_wroff_xtra, BPRI_MED);
16155			if (mp1 == NULL) {
16156				freemsg(mp);
16157				return (-1);	/* out_of_mem */
16158			}
16159			mp1->b_cont = mp;
16160			mp = mp1;
16161			/* Leave room for Link Level header */
16162			len = total_hdr_len;
16163			rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
16164			mp->b_wptr = &rptr[len];
16165		}
16166
16167		/*
16168		 * Fill in the header using the template header, and add
16169		 * options such as time-stamp, ECN and/or SACK, as needed.
16170		 */
16171		tcp_fill_header(tcp, rptr, (clock_t)local_time, num_sack_blk);
16172
16173		mp->b_rptr = rptr;
16174
16175		if (*tail_unsent) {
16176			int spill = *tail_unsent;
16177
16178			mp1 = mp->b_cont;
16179			if (mp1 == NULL)
16180				mp1 = mp;
16181
16182			/*
16183			 * If we're a little short, tack on more mblks until
16184			 * there is no more spillover.
16185			 */
16186			while (spill < 0) {
16187				mblk_t *nmp;
16188				int nmpsz;
16189
16190				nmp = (*xmit_tail)->b_cont;
16191				nmpsz = MBLKL(nmp);
16192
16193				/*
16194				 * Excess data in mblk; can we split it?
16195				 * If LSO is enabled for the connection,
16196				 * keep on splitting as this is a transient
16197				 * send path.
16198				 */
16199				if (!do_lso_send && (spill + nmpsz > 0)) {
16200					/*
16201					 * Don't split if stream head was
16202					 * told to break up larger writes
16203					 * into smaller ones.
16204					 */
16205					if (tcp->tcp_maxpsz_multiplier > 0)
16206						break;
16207
16208					/*
16209					 * Next mblk is less than SMSS/2
16210					 * rounded up to nearest 64-byte;
16211					 * let it get sent as part of the
16212					 * next segment.
16213					 */
16214					if (tcp->tcp_localnet &&
16215					    !tcp->tcp_cork &&
16216					    (nmpsz < roundup((mss >> 1), 64)))
16217						break;
16218				}
16219
16220				*xmit_tail = nmp;
16221				ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX);
16222				/* Stash for rtt use later */
16223				(*xmit_tail)->b_prev = local_time;
16224				(*xmit_tail)->b_next =
16225				    (mblk_t *)(uintptr_t)(*snxt - len);
16226				mp1->b_cont = dupb(*xmit_tail);
16227				mp1 = mp1->b_cont;
16228
16229				spill += nmpsz;
16230				if (mp1 == NULL) {
16231					*tail_unsent = spill;
16232					freemsg(mp);
16233					return (-1);	/* out_of_mem */
16234				}
16235			}
16236
16237			/* Trim back any surplus on the last mblk */
16238			if (spill >= 0) {
16239				mp1->b_wptr -= spill;
16240				*tail_unsent = spill;
16241			} else {
16242				/*
16243				 * We did not send everything we could in
16244				 * order to remain within the b_cont limit.
16245				 */
16246				*usable -= spill;
16247				*snxt += spill;
16248				tcp->tcp_last_sent_len += spill;
16249				UPDATE_MIB(&tcps->tcps_mib,
16250				    tcpOutDataBytes, spill);
16251				/*
16252				 * Adjust the checksum
16253				 */
16254				tcpha = (tcpha_t *)(rptr +
16255				    ixa->ixa_ip_hdr_length);
16256				sum += spill;
16257				sum = (sum >> 16) + (sum & 0xFFFF);
16258				tcpha->tha_sum = htons(sum);
16259				if (connp->conn_ipversion == IPV4_VERSION) {
16260					sum = ntohs(
16261					    ((ipha_t *)rptr)->ipha_length) +
16262					    spill;
16263					((ipha_t *)rptr)->ipha_length =
16264					    htons(sum);
16265				} else {
16266					sum = ntohs(
16267					    ((ip6_t *)rptr)->ip6_plen) +
16268					    spill;
16269					((ip6_t *)rptr)->ip6_plen =
16270					    htons(sum);
16271				}
16272				ixa->ixa_pktlen += spill;
16273				*tail_unsent = 0;
16274			}
16275		}
16276		if (tcp->tcp_ip_forward_progress) {
16277			tcp->tcp_ip_forward_progress = B_FALSE;
16278			ixa->ixa_flags |= IXAF_REACH_CONF;
16279		} else {
16280			ixa->ixa_flags &= ~IXAF_REACH_CONF;
16281		}
16282
16283		/*
16284		 * Append LSO information, both flags and mss, to the mp.
16285		 */
16286		if (do_lso_send) {
16287			lso_info_set(mp, mss, HW_LSO);
16288			ixa->ixa_fragsize = IP_MAXPACKET;
16289			ixa->ixa_extra_ident = num_lso_seg - 1;
16290
16291			DTRACE_PROBE2(tcp_send_lso, int, num_lso_seg,
16292			    boolean_t, B_TRUE);
16293
16294			tcp_send_data(tcp, mp);
16295
16296			/*
16297			 * Restore values of ixa_fragsize and ixa_extra_ident.
16298			 */
16299			ixa->ixa_fragsize = ixa->ixa_pmtu;
16300			ixa->ixa_extra_ident = 0;
16301			tcp->tcp_obsegs += num_lso_seg;
16302			TCP_STAT(tcps, tcp_lso_times);
16303			TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg);
16304		} else {
16305			tcp_send_data(tcp, mp);
16306			BUMP_LOCAL(tcp->tcp_obsegs);
16307		}
16308	}
16309
16310	return (0);
16311}
16312
16313/* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */
16314static void
16315tcp_wput_flush(tcp_t *tcp, mblk_t *mp)
16316{
16317	uchar_t	fval = *mp->b_rptr;
16318	mblk_t	*tail;
16319	conn_t	*connp = tcp->tcp_connp;
16320	queue_t	*q = connp->conn_wq;
16321
16322	/* TODO: How should flush interact with urgent data? */
16323	if ((fval & FLUSHW) && tcp->tcp_xmit_head &&
16324	    !(tcp->tcp_valid_bits & TCP_URG_VALID)) {
16325		/*
16326		 * Flush only data that has not yet been put on the wire.  If
16327		 * we flush data that we have already transmitted, life, as we
16328		 * know it, may come to an end.
16329		 */
16330		tail = tcp->tcp_xmit_tail;
16331		tail->b_wptr -= tcp->tcp_xmit_tail_unsent;
16332		tcp->tcp_xmit_tail_unsent = 0;
16333		tcp->tcp_unsent = 0;
16334		if (tail->b_wptr != tail->b_rptr)
16335			tail = tail->b_cont;
16336		if (tail) {
16337			mblk_t **excess = &tcp->tcp_xmit_head;
16338			for (;;) {
16339				mblk_t *mp1 = *excess;
16340				if (mp1 == tail)
16341					break;
16342				tcp->tcp_xmit_tail = mp1;
16343				tcp->tcp_xmit_last = mp1;
16344				excess = &mp1->b_cont;
16345			}
16346			*excess = NULL;
16347			tcp_close_mpp(&tail);
16348			if (tcp->tcp_snd_zcopy_aware)
16349				tcp_zcopy_notify(tcp);
16350		}
16351		/*
16352		 * We have no unsent data, so unsent must be less than
16353		 * conn_sndlowat, so re-enable flow.
16354		 */
16355		mutex_enter(&tcp->tcp_non_sq_lock);
16356		if (tcp->tcp_flow_stopped) {
16357			tcp_clrqfull(tcp);
16358		}
16359		mutex_exit(&tcp->tcp_non_sq_lock);
16360	}
16361	/*
16362	 * TODO: you can't just flush these, you have to increase rwnd for one
16363	 * thing.  For another, how should urgent data interact?
16364	 */
16365	if (fval & FLUSHR) {
16366		*mp->b_rptr = fval & ~FLUSHW;
16367		/* XXX */
16368		qreply(q, mp);
16369		return;
16370	}
16371	freemsg(mp);
16372}
16373
16374/*
16375 * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA
16376 * messages.
16377 */
16378static void
16379tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp)
16380{
16381	mblk_t		*mp1;
16382	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
16383	STRUCT_HANDLE(strbuf, sb);
16384	uint_t		addrlen;
16385	conn_t		*connp = tcp->tcp_connp;
16386	queue_t 	*q = connp->conn_wq;
16387
16388	/* Make sure it is one of ours. */
16389	switch (iocp->ioc_cmd) {
16390	case TI_GETMYNAME:
16391	case TI_GETPEERNAME:
16392		break;
16393	default:
16394		ip_wput_nondata(q, mp);
16395		return;
16396	}
16397	switch (mi_copy_state(q, mp, &mp1)) {
16398	case -1:
16399		return;
16400	case MI_COPY_CASE(MI_COPY_IN, 1):
16401		break;
16402	case MI_COPY_CASE(MI_COPY_OUT, 1):
16403		/* Copy out the strbuf. */
16404		mi_copyout(q, mp);
16405		return;
16406	case MI_COPY_CASE(MI_COPY_OUT, 2):
16407		/* All done. */
16408		mi_copy_done(q, mp, 0);
16409		return;
16410	default:
16411		mi_copy_done(q, mp, EPROTO);
16412		return;
16413	}
16414	/* Check alignment of the strbuf */
16415	if (!OK_32PTR(mp1->b_rptr)) {
16416		mi_copy_done(q, mp, EINVAL);
16417		return;
16418	}
16419
16420	STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr);
16421
16422	if (connp->conn_family == AF_INET)
16423		addrlen = sizeof (sin_t);
16424	else
16425		addrlen = sizeof (sin6_t);
16426
16427	if (STRUCT_FGET(sb, maxlen) < addrlen) {
16428		mi_copy_done(q, mp, EINVAL);
16429		return;
16430	}
16431
16432	switch (iocp->ioc_cmd) {
16433	case TI_GETMYNAME:
16434		break;
16435	case TI_GETPEERNAME:
16436		if (tcp->tcp_state < TCPS_SYN_RCVD) {
16437			mi_copy_done(q, mp, ENOTCONN);
16438			return;
16439		}
16440		break;
16441	}
16442	mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE);
16443	if (!mp1)
16444		return;
16445
16446	STRUCT_FSET(sb, len, addrlen);
16447	switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
16448	case TI_GETMYNAME:
16449		(void) conn_getsockname(connp, (struct sockaddr *)mp1->b_wptr,
16450		    &addrlen);
16451		break;
16452	case TI_GETPEERNAME:
16453		(void) conn_getpeername(connp, (struct sockaddr *)mp1->b_wptr,
16454		    &addrlen);
16455		break;
16456	}
16457	mp1->b_wptr += addrlen;
16458	/* Copy out the address */
16459	mi_copyout(q, mp);
16460}
16461
16462static void
16463tcp_use_pure_tpi(tcp_t *tcp)
16464{
16465	conn_t		*connp = tcp->tcp_connp;
16466
16467#ifdef	_ILP32
16468	tcp->tcp_acceptor_id = (t_uscalar_t)connp->conn_rq;
16469#else
16470	tcp->tcp_acceptor_id = connp->conn_dev;
16471#endif
16472	/*
16473	 * Insert this socket into the acceptor hash.
16474	 * We might need it for T_CONN_RES message
16475	 */
16476	tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp);
16477
16478	tcp->tcp_issocket = B_FALSE;
16479	TCP_STAT(tcp->tcp_tcps, tcp_sock_fallback);
16480}
16481
16482/*
16483 * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL
16484 * messages.
16485 */
16486/* ARGSUSED */
16487static void
16488tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
16489{
16490	conn_t 		*connp = (conn_t *)arg;
16491	tcp_t		*tcp = connp->conn_tcp;
16492	queue_t		*q = connp->conn_wq;
16493	struct iocblk	*iocp;
16494
16495	ASSERT(DB_TYPE(mp) == M_IOCTL);
16496	/*
16497	 * Try and ASSERT the minimum possible references on the
16498	 * conn early enough. Since we are executing on write side,
16499	 * the connection is obviously not detached and that means
16500	 * there is a ref each for TCP and IP. Since we are behind
16501	 * the squeue, the minimum references needed are 3. If the
16502	 * conn is in classifier hash list, there should be an
16503	 * extra ref for that (we check both the possibilities).
16504	 */
16505	ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
16506	    (connp->conn_fanout == NULL && connp->conn_ref >= 3));
16507
16508	iocp = (struct iocblk *)mp->b_rptr;
16509	switch (iocp->ioc_cmd) {
16510	case _SIOCSOCKFALLBACK:
16511		/*
16512		 * Either sockmod is about to be popped and the socket
16513		 * would now be treated as a plain stream, or a module
16514		 * is about to be pushed so we could no longer use read-
16515		 * side synchronous streams for fused loopback tcp.
16516		 * Drain any queued data and disable direct sockfs
16517		 * interface from now on.
16518		 */
16519		if (!tcp->tcp_issocket) {
16520			DB_TYPE(mp) = M_IOCNAK;
16521			iocp->ioc_error = EINVAL;
16522		} else {
16523			tcp_use_pure_tpi(tcp);
16524			DB_TYPE(mp) = M_IOCACK;
16525			iocp->ioc_error = 0;
16526		}
16527		iocp->ioc_count = 0;
16528		iocp->ioc_rval = 0;
16529		qreply(q, mp);
16530		return;
16531	}
16532	ip_wput_nondata(q, mp);
16533}
16534
16535/*
16536 * This routine is called by tcp_wput() to handle all TPI requests.
16537 */
16538/* ARGSUSED */
16539static void
16540tcp_wput_proto(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
16541{
16542	conn_t		*connp = (conn_t *)arg;
16543	tcp_t		*tcp = connp->conn_tcp;
16544	union T_primitives *tprim = (union T_primitives *)mp->b_rptr;
16545	uchar_t		*rptr;
16546	t_scalar_t	type;
16547	cred_t		*cr;
16548
16549	/*
16550	 * Try and ASSERT the minimum possible references on the
16551	 * conn early enough. Since we are executing on write side,
16552	 * the connection is obviously not detached and that means
16553	 * there is a ref each for TCP and IP. Since we are behind
16554	 * the squeue, the minimum references needed are 3. If the
16555	 * conn is in classifier hash list, there should be an
16556	 * extra ref for that (we check both the possibilities).
16557	 */
16558	ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) ||
16559	    (connp->conn_fanout == NULL && connp->conn_ref >= 3));
16560
16561	rptr = mp->b_rptr;
16562	ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
16563	if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) {
16564		type = ((union T_primitives *)rptr)->type;
16565		if (type == T_EXDATA_REQ) {
16566			tcp_output_urgent(connp, mp, arg2, NULL);
16567		} else if (type != T_DATA_REQ) {
16568			goto non_urgent_data;
16569		} else {
16570			/* TODO: options, flags, ... from user */
16571			/* Set length to zero for reclamation below */
16572			tcp_wput_data(tcp, mp->b_cont, B_TRUE);
16573			freeb(mp);
16574		}
16575		return;
16576	} else {
16577		if (connp->conn_debug) {
16578			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
16579			    "tcp_wput_proto, dropping one...");
16580		}
16581		freemsg(mp);
16582		return;
16583	}
16584
16585non_urgent_data:
16586
16587	switch ((int)tprim->type) {
16588	case T_SSL_PROXY_BIND_REQ:	/* an SSL proxy endpoint bind request */
16589		/*
16590		 * save the kssl_ent_t from the next block, and convert this
16591		 * back to a normal bind_req.
16592		 */
16593		if (mp->b_cont != NULL) {
16594			ASSERT(MBLKL(mp->b_cont) >= sizeof (kssl_ent_t));
16595
16596			if (tcp->tcp_kssl_ent != NULL) {
16597				kssl_release_ent(tcp->tcp_kssl_ent, NULL,
16598				    KSSL_NO_PROXY);
16599				tcp->tcp_kssl_ent = NULL;
16600			}
16601			bcopy(mp->b_cont->b_rptr, &tcp->tcp_kssl_ent,
16602			    sizeof (kssl_ent_t));
16603			kssl_hold_ent(tcp->tcp_kssl_ent);
16604			freemsg(mp->b_cont);
16605			mp->b_cont = NULL;
16606		}
16607		tprim->type = T_BIND_REQ;
16608
16609	/* FALLTHROUGH */
16610	case O_T_BIND_REQ:	/* bind request */
16611	case T_BIND_REQ:	/* new semantics bind request */
16612		tcp_tpi_bind(tcp, mp);
16613		break;
16614	case T_UNBIND_REQ:	/* unbind request */
16615		tcp_tpi_unbind(tcp, mp);
16616		break;
16617	case O_T_CONN_RES:	/* old connection response XXX */
16618	case T_CONN_RES:	/* connection response */
16619		tcp_tli_accept(tcp, mp);
16620		break;
16621	case T_CONN_REQ:	/* connection request */
16622		tcp_tpi_connect(tcp, mp);
16623		break;
16624	case T_DISCON_REQ:	/* disconnect request */
16625		tcp_disconnect(tcp, mp);
16626		break;
16627	case T_CAPABILITY_REQ:
16628		tcp_capability_req(tcp, mp);	/* capability request */
16629		break;
16630	case T_INFO_REQ:	/* information request */
16631		tcp_info_req(tcp, mp);
16632		break;
16633	case T_SVR4_OPTMGMT_REQ:	/* manage options req */
16634	case T_OPTMGMT_REQ:
16635		/*
16636		 * Note:  no support for snmpcom_req() through new
16637		 * T_OPTMGMT_REQ. See comments in ip.c
16638		 */
16639
16640		/*
16641		 * All Solaris components should pass a db_credp
16642		 * for this TPI message, hence we ASSERT.
16643		 * But in case there is some other M_PROTO that looks
16644		 * like a TPI message sent by some other kernel
16645		 * component, we check and return an error.
16646		 */
16647		cr = msg_getcred(mp, NULL);
16648		ASSERT(cr != NULL);
16649		if (cr == NULL) {
16650			tcp_err_ack(tcp, mp, TSYSERR, EINVAL);
16651			return;
16652		}
16653		/*
16654		 * If EINPROGRESS is returned, the request has been queued
16655		 * for subsequent processing by ip_restart_optmgmt(), which
16656		 * will do the CONN_DEC_REF().
16657		 */
16658		if ((int)tprim->type == T_SVR4_OPTMGMT_REQ) {
16659			svr4_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
16660		} else {
16661			tpi_optcom_req(connp->conn_wq, mp, cr, &tcp_opt_obj);
16662		}
16663		break;
16664
16665	case T_UNITDATA_REQ:	/* unitdata request */
16666		tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
16667		break;
16668	case T_ORDREL_REQ:	/* orderly release req */
16669		freemsg(mp);
16670
16671		if (tcp->tcp_fused)
16672			tcp_unfuse(tcp);
16673
16674		if (tcp_xmit_end(tcp) != 0) {
16675			/*
16676			 * We were crossing FINs and got a reset from
16677			 * the other side. Just ignore it.
16678			 */
16679			if (connp->conn_debug) {
16680				(void) strlog(TCP_MOD_ID, 0, 1,
16681				    SL_ERROR|SL_TRACE,
16682				    "tcp_wput_proto, T_ORDREL_REQ out of "
16683				    "state %s",
16684				    tcp_display(tcp, NULL,
16685				    DISP_ADDR_AND_PORT));
16686			}
16687		}
16688		break;
16689	case T_ADDR_REQ:
16690		tcp_addr_req(tcp, mp);
16691		break;
16692	default:
16693		if (connp->conn_debug) {
16694			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
16695			    "tcp_wput_proto, bogus TPI msg, type %d",
16696			    tprim->type);
16697		}
16698		/*
16699		 * We used to M_ERROR.  Sending TNOTSUPPORT gives the user
16700		 * to recover.
16701		 */
16702		tcp_err_ack(tcp, mp, TNOTSUPPORT, 0);
16703		break;
16704	}
16705}
16706
16707/*
16708 * The TCP write service routine should never be called...
16709 */
16710/* ARGSUSED */
16711static void
16712tcp_wsrv(queue_t *q)
16713{
16714	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
16715
16716	TCP_STAT(tcps, tcp_wsrv_called);
16717}
16718
16719/*
16720 * Send out a control packet on the tcp connection specified.  This routine
16721 * is typically called where we need a simple ACK or RST generated.
16722 */
16723static void
16724tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl)
16725{
16726	uchar_t		*rptr;
16727	tcpha_t		*tcpha;
16728	ipha_t		*ipha = NULL;
16729	ip6_t		*ip6h = NULL;
16730	uint32_t	sum;
16731	int		total_hdr_len;
16732	int		ip_hdr_len;
16733	mblk_t		*mp;
16734	tcp_stack_t	*tcps = tcp->tcp_tcps;
16735	conn_t		*connp = tcp->tcp_connp;
16736	ip_xmit_attr_t	*ixa = connp->conn_ixa;
16737
16738	/*
16739	 * Save sum for use in source route later.
16740	 */
16741	sum = connp->conn_ht_ulp_len + connp->conn_sum;
16742	total_hdr_len = connp->conn_ht_iphc_len;
16743	ip_hdr_len = ixa->ixa_ip_hdr_length;
16744
16745	/* If a text string is passed in with the request, pass it to strlog. */
16746	if (str != NULL && connp->conn_debug) {
16747		(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
16748		    "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x",
16749		    str, seq, ack, ctl);
16750	}
16751	mp = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
16752	    BPRI_MED);
16753	if (mp == NULL) {
16754		return;
16755	}
16756	rptr = &mp->b_rptr[tcps->tcps_wroff_xtra];
16757	mp->b_rptr = rptr;
16758	mp->b_wptr = &rptr[total_hdr_len];
16759	bcopy(connp->conn_ht_iphc, rptr, total_hdr_len);
16760
16761	ixa->ixa_pktlen = total_hdr_len;
16762
16763	if (ixa->ixa_flags & IXAF_IS_IPV4) {
16764		ipha = (ipha_t *)rptr;
16765		ipha->ipha_length = htons(total_hdr_len);
16766	} else {
16767		ip6h = (ip6_t *)rptr;
16768		ip6h->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN);
16769	}
16770	tcpha = (tcpha_t *)&rptr[ip_hdr_len];
16771	tcpha->tha_flags = (uint8_t)ctl;
16772	if (ctl & TH_RST) {
16773		BUMP_MIB(&tcps->tcps_mib, tcpOutRsts);
16774		BUMP_MIB(&tcps->tcps_mib, tcpOutControl);
16775		/*
16776		 * Don't send TSopt w/ TH_RST packets per RFC 1323.
16777		 */
16778		if (tcp->tcp_snd_ts_ok &&
16779		    tcp->tcp_state > TCPS_SYN_SENT) {
16780			mp->b_wptr = &rptr[total_hdr_len - TCPOPT_REAL_TS_LEN];
16781			*(mp->b_wptr) = TCPOPT_EOL;
16782
16783			ixa->ixa_pktlen = total_hdr_len - TCPOPT_REAL_TS_LEN;
16784
16785			if (connp->conn_ipversion == IPV4_VERSION) {
16786				ipha->ipha_length = htons(total_hdr_len -
16787				    TCPOPT_REAL_TS_LEN);
16788			} else {
16789				ip6h->ip6_plen = htons(total_hdr_len -
16790				    IPV6_HDR_LEN - TCPOPT_REAL_TS_LEN);
16791			}
16792			tcpha->tha_offset_and_reserved -= (3 << 4);
16793			sum -= TCPOPT_REAL_TS_LEN;
16794		}
16795	}
16796	if (ctl & TH_ACK) {
16797		if (tcp->tcp_snd_ts_ok) {
16798			U32_TO_BE32(lbolt,
16799			    (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
16800			U32_TO_BE32(tcp->tcp_ts_recent,
16801			    (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
16802		}
16803
16804		/* Update the latest receive window size in TCP header. */
16805		tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
16806		tcp->tcp_rack = ack;
16807		tcp->tcp_rack_cnt = 0;
16808		BUMP_MIB(&tcps->tcps_mib, tcpOutAck);
16809	}
16810	BUMP_LOCAL(tcp->tcp_obsegs);
16811	tcpha->tha_seq = htonl(seq);
16812	tcpha->tha_ack = htonl(ack);
16813	/*
16814	 * Include the adjustment for a source route if any.
16815	 */
16816	sum = (sum >> 16) + (sum & 0xFFFF);
16817	tcpha->tha_sum = htons(sum);
16818	tcp_send_data(tcp, mp);
16819}
16820
16821/*
16822 * If this routine returns B_TRUE, TCP can generate a RST in response
16823 * to a segment.  If it returns B_FALSE, TCP should not respond.
16824 */
16825static boolean_t
16826tcp_send_rst_chk(tcp_stack_t *tcps)
16827{
16828	clock_t	now;
16829
16830	/*
16831	 * TCP needs to protect itself from generating too many RSTs.
16832	 * This can be a DoS attack by sending us random segments
16833	 * soliciting RSTs.
16834	 *
16835	 * What we do here is to have a limit of tcp_rst_sent_rate RSTs
16836	 * in each 1 second interval.  In this way, TCP still generate
16837	 * RSTs in normal cases but when under attack, the impact is
16838	 * limited.
16839	 */
16840	if (tcps->tcps_rst_sent_rate_enabled != 0) {
16841		now = lbolt;
16842		/* lbolt can wrap around. */
16843		if ((tcps->tcps_last_rst_intrvl > now) ||
16844		    (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) >
16845		    1*SECONDS)) {
16846			tcps->tcps_last_rst_intrvl = now;
16847			tcps->tcps_rst_cnt = 1;
16848		} else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) {
16849			return (B_FALSE);
16850		}
16851	}
16852	return (B_TRUE);
16853}
16854
16855/*
16856 * Generate a reset based on an inbound packet, connp is set by caller
16857 * when RST is in response to an unexpected inbound packet for which
16858 * there is active tcp state in the system.
16859 *
16860 * IPSEC NOTE : Try to send the reply with the same protection as it came
16861 * in.  We have the ip_recv_attr_t which is reversed to form the ip_xmit_attr_t.
16862 * That way the packet will go out at the same level of protection as it
16863 * came in with.
16864 */
16865static void
16866tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, uint32_t ack, int ctl,
16867    ip_recv_attr_t *ira, ip_stack_t *ipst, conn_t *connp)
16868{
16869	ipha_t		*ipha = NULL;
16870	ip6_t		*ip6h = NULL;
16871	ushort_t	len;
16872	tcpha_t		*tcpha;
16873	int		i;
16874	ipaddr_t	v4addr;
16875	in6_addr_t	v6addr;
16876	netstack_t	*ns = ipst->ips_netstack;
16877	tcp_stack_t	*tcps = ns->netstack_tcp;
16878	ip_xmit_attr_t	ixas, *ixa;
16879	uint_t		ip_hdr_len = ira->ira_ip_hdr_length;
16880	boolean_t	need_refrele = B_FALSE;		/* ixa_refrele(ixa) */
16881	ushort_t	port;
16882
16883	if (!tcp_send_rst_chk(tcps)) {
16884		tcps->tcps_rst_unsent++;
16885		freemsg(mp);
16886		return;
16887	}
16888
16889	/*
16890	 * If connp != NULL we use conn_ixa to keep IP_NEXTHOP and other
16891	 * options from the listener. In that case the caller must ensure that
16892	 * we are running on the listener = connp squeue.
16893	 *
16894	 * We get a safe copy of conn_ixa so we don't need to restore anything
16895	 * we or ip_output_simple might change in the ixa.
16896	 */
16897	if (connp != NULL) {
16898		ASSERT(connp->conn_on_sqp);
16899
16900		ixa = conn_get_ixa_exclusive(connp);
16901		if (ixa == NULL) {
16902			tcps->tcps_rst_unsent++;
16903			freemsg(mp);
16904			return;
16905		}
16906		need_refrele = B_TRUE;
16907	} else {
16908		bzero(&ixas, sizeof (ixas));
16909		ixa = &ixas;
16910		/*
16911		 * IXAF_VERIFY_SOURCE is overkill since we know the
16912		 * packet was for us.
16913		 */
16914		ixa->ixa_flags |= IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE;
16915		ixa->ixa_protocol = IPPROTO_TCP;
16916		ixa->ixa_zoneid = ira->ira_zoneid;
16917		ixa->ixa_ifindex = 0;
16918		ixa->ixa_ipst = ipst;
16919		ixa->ixa_cred = kcred;
16920		ixa->ixa_cpid = NOPID;
16921	}
16922
16923	if (str && tcps->tcps_dbg) {
16924		(void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
16925		    "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, "
16926		    "flags 0x%x",
16927		    str, seq, ack, ctl);
16928	}
16929	if (mp->b_datap->db_ref != 1) {
16930		mblk_t *mp1 = copyb(mp);
16931		freemsg(mp);
16932		mp = mp1;
16933		if (mp == NULL)
16934			goto done;
16935	} else if (mp->b_cont) {
16936		freemsg(mp->b_cont);
16937		mp->b_cont = NULL;
16938		DB_CKSUMFLAGS(mp) = 0;
16939	}
16940	/*
16941	 * We skip reversing source route here.
16942	 * (for now we replace all IP options with EOL)
16943	 */
16944	if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
16945		ipha = (ipha_t *)mp->b_rptr;
16946		for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++)
16947			mp->b_rptr[i] = IPOPT_EOL;
16948		/*
16949		 * Make sure that src address isn't flagrantly invalid.
16950		 * Not all broadcast address checking for the src address
16951		 * is possible, since we don't know the netmask of the src
16952		 * addr.  No check for destination address is done, since
16953		 * IP will not pass up a packet with a broadcast dest
16954		 * address to TCP.  Similar checks are done below for IPv6.
16955		 */
16956		if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST ||
16957		    CLASSD(ipha->ipha_src)) {
16958			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
16959			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
16960			freemsg(mp);
16961			goto done;
16962		}
16963	} else {
16964		ip6h = (ip6_t *)mp->b_rptr;
16965
16966		if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) ||
16967		    IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) {
16968			BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards);
16969			ip_drop_input("ipIfStatsInDiscards", mp, NULL);
16970			freemsg(mp);
16971			goto done;
16972		}
16973
16974		/* Remove any extension headers assuming partial overlay */
16975		if (ip_hdr_len > IPV6_HDR_LEN) {
16976			uint8_t *to;
16977
16978			to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN;
16979			ovbcopy(ip6h, to, IPV6_HDR_LEN);
16980			mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN;
16981			ip_hdr_len = IPV6_HDR_LEN;
16982			ip6h = (ip6_t *)mp->b_rptr;
16983			ip6h->ip6_nxt = IPPROTO_TCP;
16984		}
16985	}
16986	tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len];
16987	if (tcpha->tha_flags & TH_RST) {
16988		freemsg(mp);
16989		goto done;
16990	}
16991	tcpha->tha_offset_and_reserved = (5 << 4);
16992	len = ip_hdr_len + sizeof (tcpha_t);
16993	mp->b_wptr = &mp->b_rptr[len];
16994	if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
16995		ipha->ipha_length = htons(len);
16996		/* Swap addresses */
16997		v4addr = ipha->ipha_src;
16998		ipha->ipha_src = ipha->ipha_dst;
16999		ipha->ipha_dst = v4addr;
17000		ipha->ipha_ident = 0;
17001		ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl;
17002		ixa->ixa_flags |= IXAF_IS_IPV4;
17003		ixa->ixa_ip_hdr_length = ip_hdr_len;
17004	} else {
17005		ip6h->ip6_plen = htons(len - IPV6_HDR_LEN);
17006		/* Swap addresses */
17007		v6addr = ip6h->ip6_src;
17008		ip6h->ip6_src = ip6h->ip6_dst;
17009		ip6h->ip6_dst = v6addr;
17010		ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit;
17011		ixa->ixa_flags &= ~IXAF_IS_IPV4;
17012
17013		if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_dst)) {
17014			ixa->ixa_flags |= IXAF_SCOPEID_SET;
17015			ixa->ixa_scopeid = ira->ira_ruifindex;
17016		}
17017		ixa->ixa_ip_hdr_length = IPV6_HDR_LEN;
17018	}
17019	ixa->ixa_pktlen = len;
17020
17021	/* Swap the ports */
17022	port = tcpha->tha_fport;
17023	tcpha->tha_fport = tcpha->tha_lport;
17024	tcpha->tha_lport = port;
17025
17026	tcpha->tha_ack = htonl(ack);
17027	tcpha->tha_seq = htonl(seq);
17028	tcpha->tha_win = 0;
17029	tcpha->tha_sum = htons(sizeof (tcpha_t));
17030	tcpha->tha_flags = (uint8_t)ctl;
17031	if (ctl & TH_RST) {
17032		BUMP_MIB(&tcps->tcps_mib, tcpOutRsts);
17033		BUMP_MIB(&tcps->tcps_mib, tcpOutControl);
17034	}
17035
17036	/* Discard any old label */
17037	if (ixa->ixa_free_flags & IXA_FREE_TSL) {
17038		ASSERT(ixa->ixa_tsl != NULL);
17039		label_rele(ixa->ixa_tsl);
17040		ixa->ixa_free_flags &= ~IXA_FREE_TSL;
17041	}
17042	ixa->ixa_tsl = ira->ira_tsl;	/* Behave as a multi-level responder */
17043
17044	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
17045		/*
17046		 * Apply IPsec based on how IPsec was applied to
17047		 * the packet that caused the RST.
17048		 */
17049		if (!ipsec_in_to_out(ira, ixa, mp, ipha, ip6h)) {
17050			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
17051			/* Note: mp already consumed and ip_drop_packet done */
17052			goto done;
17053		}
17054	} else {
17055		/*
17056		 * This is in clear. The RST message we are building
17057		 * here should go out in clear, independent of our policy.
17058		 */
17059		ixa->ixa_flags |= IXAF_NO_IPSEC;
17060	}
17061
17062	/*
17063	 * NOTE:  one might consider tracing a TCP packet here, but
17064	 * this function has no active TCP state and no tcp structure
17065	 * that has a trace buffer.  If we traced here, we would have
17066	 * to keep a local trace buffer in tcp_record_trace().
17067	 */
17068
17069	(void) ip_output_simple(mp, ixa);
17070done:
17071	ixa_cleanup(ixa);
17072	if (need_refrele) {
17073		ASSERT(ixa != &ixas);
17074		ixa_refrele(ixa);
17075	}
17076}
17077
17078/*
17079 * Initiate closedown sequence on an active connection.  (May be called as
17080 * writer.)  Return value zero for OK return, non-zero for error return.
17081 */
17082static int
17083tcp_xmit_end(tcp_t *tcp)
17084{
17085	mblk_t		*mp;
17086	tcp_stack_t	*tcps = tcp->tcp_tcps;
17087	iulp_t		uinfo;
17088	ip_stack_t	*ipst = tcps->tcps_netstack->netstack_ip;
17089	conn_t		*connp = tcp->tcp_connp;
17090
17091	if (tcp->tcp_state < TCPS_SYN_RCVD ||
17092	    tcp->tcp_state > TCPS_CLOSE_WAIT) {
17093		/*
17094		 * Invalid state, only states TCPS_SYN_RCVD,
17095		 * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid
17096		 */
17097		return (-1);
17098	}
17099
17100	tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent;
17101	tcp->tcp_valid_bits |= TCP_FSS_VALID;
17102	/*
17103	 * If there is nothing more unsent, send the FIN now.
17104	 * Otherwise, it will go out with the last segment.
17105	 */
17106	if (tcp->tcp_unsent == 0) {
17107		mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
17108		    tcp->tcp_fss, B_FALSE, NULL, B_FALSE);
17109
17110		if (mp) {
17111			tcp_send_data(tcp, mp);
17112		} else {
17113			/*
17114			 * Couldn't allocate msg.  Pretend we got it out.
17115			 * Wait for rexmit timeout.
17116			 */
17117			tcp->tcp_snxt = tcp->tcp_fss + 1;
17118			TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
17119		}
17120
17121		/*
17122		 * If needed, update tcp_rexmit_snxt as tcp_snxt is
17123		 * changed.
17124		 */
17125		if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) {
17126			tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
17127		}
17128	} else {
17129		/*
17130		 * If tcp->tcp_cork is set, then the data will not get sent,
17131		 * so we have to check that and unset it first.
17132		 */
17133		if (tcp->tcp_cork)
17134			tcp->tcp_cork = B_FALSE;
17135		tcp_wput_data(tcp, NULL, B_FALSE);
17136	}
17137
17138	/*
17139	 * If TCP does not get enough samples of RTT or tcp_rtt_updates
17140	 * is 0, don't update the cache.
17141	 */
17142	if (tcps->tcps_rtt_updates == 0 ||
17143	    tcp->tcp_rtt_update < tcps->tcps_rtt_updates)
17144		return (0);
17145
17146	/*
17147	 * We do not have a good algorithm to update ssthresh at this time.
17148	 * So don't do any update.
17149	 */
17150	bzero(&uinfo, sizeof (uinfo));
17151	uinfo.iulp_rtt = tcp->tcp_rtt_sa;
17152	uinfo.iulp_rtt_sd = tcp->tcp_rtt_sd;
17153
17154	/*
17155	 * Note that uinfo is kept for conn_faddr in the DCE. Could update even
17156	 * if source routed but we don't.
17157	 */
17158	if (connp->conn_ipversion == IPV4_VERSION) {
17159		if (connp->conn_faddr_v4 !=  tcp->tcp_ipha->ipha_dst) {
17160			return (0);
17161		}
17162		(void) dce_update_uinfo_v4(connp->conn_faddr_v4, &uinfo, ipst);
17163	} else {
17164		uint_t ifindex;
17165
17166		if (!(IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6,
17167		    &tcp->tcp_ip6h->ip6_dst))) {
17168			return (0);
17169		}
17170		ifindex = 0;
17171		if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_faddr_v6)) {
17172			ip_xmit_attr_t *ixa = connp->conn_ixa;
17173
17174			/*
17175			 * If we are going to create a DCE we'd better have
17176			 * an ifindex
17177			 */
17178			if (ixa->ixa_nce != NULL) {
17179				ifindex = ixa->ixa_nce->nce_common->ncec_ill->
17180				    ill_phyint->phyint_ifindex;
17181			} else {
17182				return (0);
17183			}
17184		}
17185
17186		(void) dce_update_uinfo(&connp->conn_faddr_v6, ifindex, &uinfo,
17187		    ipst);
17188	}
17189	return (0);
17190}
17191
17192/*
17193 * Generate a "no listener here" RST in response to an "unknown" segment.
17194 * connp is set by caller when RST is in response to an unexpected
17195 * inbound packet for which there is active tcp state in the system.
17196 * Note that we are reusing the incoming mp to construct the outgoing RST.
17197 */
17198void
17199tcp_xmit_listeners_reset(mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst,
17200    conn_t *connp)
17201{
17202	uchar_t		*rptr;
17203	uint32_t	seg_len;
17204	tcpha_t		*tcpha;
17205	uint32_t	seg_seq;
17206	uint32_t	seg_ack;
17207	uint_t		flags;
17208	ipha_t 		*ipha;
17209	ip6_t 		*ip6h;
17210	boolean_t	policy_present;
17211	netstack_t	*ns = ipst->ips_netstack;
17212	tcp_stack_t	*tcps = ns->netstack_tcp;
17213	ipsec_stack_t	*ipss = tcps->tcps_netstack->netstack_ipsec;
17214	uint_t		ip_hdr_len = ira->ira_ip_hdr_length;
17215
17216	TCP_STAT(tcps, tcp_no_listener);
17217
17218	if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
17219		policy_present = ipss->ipsec_inbound_v4_policy_present;
17220		ipha = (ipha_t *)mp->b_rptr;
17221		ip6h = NULL;
17222	} else {
17223		policy_present = ipss->ipsec_inbound_v6_policy_present;
17224		ipha = NULL;
17225		ip6h = (ip6_t *)mp->b_rptr;
17226	}
17227
17228	if (policy_present) {
17229		/*
17230		 * The conn_t parameter is NULL because we already know
17231		 * nobody's home.
17232		 */
17233		mp = ipsec_check_global_policy(mp, (conn_t *)NULL, ipha, ip6h,
17234		    ira, ns);
17235		if (mp == NULL)
17236			return;
17237	}
17238	if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
17239		DTRACE_PROBE2(
17240		    tx__ip__log__error__nolistener__tcp,
17241		    char *, "Could not reply with RST to mp(1)",
17242		    mblk_t *, mp);
17243		ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n"));
17244		freemsg(mp);
17245		return;
17246	}
17247
17248	rptr = mp->b_rptr;
17249
17250	tcpha = (tcpha_t *)&rptr[ip_hdr_len];
17251	seg_seq = ntohl(tcpha->tha_seq);
17252	seg_ack = ntohl(tcpha->tha_ack);
17253	flags = tcpha->tha_flags;
17254
17255	seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcpha) + ip_hdr_len);
17256	if (flags & TH_RST) {
17257		freemsg(mp);
17258	} else if (flags & TH_ACK) {
17259		tcp_xmit_early_reset("no tcp, reset", mp, seg_ack, 0, TH_RST,
17260		    ira, ipst, connp);
17261	} else {
17262		if (flags & TH_SYN) {
17263			seg_len++;
17264		} else {
17265			/*
17266			 * Here we violate the RFC.  Note that a normal
17267			 * TCP will never send a segment without the ACK
17268			 * flag, except for RST or SYN segment.  This
17269			 * segment is neither.  Just drop it on the
17270			 * floor.
17271			 */
17272			freemsg(mp);
17273			tcps->tcps_rst_unsent++;
17274			return;
17275		}
17276
17277		tcp_xmit_early_reset("no tcp, reset/ack", mp, 0,
17278		    seg_seq + seg_len, TH_RST | TH_ACK, ira, ipst, connp);
17279	}
17280}
17281
17282/*
17283 * tcp_xmit_mp is called to return a pointer to an mblk chain complete with
17284 * ip and tcp header ready to pass down to IP.  If the mp passed in is
17285 * non-NULL, then up to max_to_send bytes of data will be dup'ed off that
17286 * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary
17287 * otherwise it will dup partial mblks.)
17288 * Otherwise, an appropriate ACK packet will be generated.  This
17289 * routine is not usually called to send new data for the first time.  It
17290 * is mostly called out of the timer for retransmits, and to generate ACKs.
17291 *
17292 * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will
17293 * be adjusted by *offset.  And after dupb(), the offset and the ending mblk
17294 * of the original mblk chain will be returned in *offset and *end_mp.
17295 */
17296mblk_t *
17297tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset,
17298    mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len,
17299    boolean_t rexmit)
17300{
17301	int	data_length;
17302	int32_t	off = 0;
17303	uint_t	flags;
17304	mblk_t	*mp1;
17305	mblk_t	*mp2;
17306	uchar_t	*rptr;
17307	tcpha_t	*tcpha;
17308	int32_t	num_sack_blk = 0;
17309	int32_t	sack_opt_len = 0;
17310	tcp_stack_t	*tcps = tcp->tcp_tcps;
17311	conn_t		*connp = tcp->tcp_connp;
17312	ip_xmit_attr_t	*ixa = connp->conn_ixa;
17313
17314	/* Allocate for our maximum TCP header + link-level */
17315	mp1 = allocb(connp->conn_ht_iphc_allocated + tcps->tcps_wroff_xtra,
17316	    BPRI_MED);
17317	if (!mp1)
17318		return (NULL);
17319	data_length = 0;
17320
17321	/*
17322	 * Note that tcp_mss has been adjusted to take into account the
17323	 * timestamp option if applicable.  Because SACK options do not
17324	 * appear in every TCP segments and they are of variable lengths,
17325	 * they cannot be included in tcp_mss.  Thus we need to calculate
17326	 * the actual segment length when we need to send a segment which
17327	 * includes SACK options.
17328	 */
17329	if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
17330		num_sack_blk = MIN(tcp->tcp_max_sack_blk,
17331		    tcp->tcp_num_sack_blk);
17332		sack_opt_len = num_sack_blk * sizeof (sack_blk_t) +
17333		    TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN;
17334		if (max_to_send + sack_opt_len > tcp->tcp_mss)
17335			max_to_send -= sack_opt_len;
17336	}
17337
17338	if (offset != NULL) {
17339		off = *offset;
17340		/* We use offset as an indicator that end_mp is not NULL. */
17341		*end_mp = NULL;
17342	}
17343	for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) {
17344		/* This could be faster with cooperation from downstream */
17345		if (mp2 != mp1 && !sendall &&
17346		    data_length + (int)(mp->b_wptr - mp->b_rptr) >
17347		    max_to_send)
17348			/*
17349			 * Don't send the next mblk since the whole mblk
17350			 * does not fit.
17351			 */
17352			break;
17353		mp2->b_cont = dupb(mp);
17354		mp2 = mp2->b_cont;
17355		if (!mp2) {
17356			freemsg(mp1);
17357			return (NULL);
17358		}
17359		mp2->b_rptr += off;
17360		ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <=
17361		    (uintptr_t)INT_MAX);
17362
17363		data_length += (int)(mp2->b_wptr - mp2->b_rptr);
17364		if (data_length > max_to_send) {
17365			mp2->b_wptr -= data_length - max_to_send;
17366			data_length = max_to_send;
17367			off = mp2->b_wptr - mp->b_rptr;
17368			break;
17369		} else {
17370			off = 0;
17371		}
17372	}
17373	if (offset != NULL) {
17374		*offset = off;
17375		*end_mp = mp;
17376	}
17377	if (seg_len != NULL) {
17378		*seg_len = data_length;
17379	}
17380
17381	/* Update the latest receive window size in TCP header. */
17382	tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
17383
17384	rptr = mp1->b_rptr + tcps->tcps_wroff_xtra;
17385	mp1->b_rptr = rptr;
17386	mp1->b_wptr = rptr + connp->conn_ht_iphc_len + sack_opt_len;
17387	bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len);
17388	tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length];
17389	tcpha->tha_seq = htonl(seq);
17390
17391	/*
17392	 * Use tcp_unsent to determine if the PUSH bit should be used assumes
17393	 * that this function was called from tcp_wput_data. Thus, when called
17394	 * to retransmit data the setting of the PUSH bit may appear some
17395	 * what random in that it might get set when it should not. This
17396	 * should not pose any performance issues.
17397	 */
17398	if (data_length != 0 && (tcp->tcp_unsent == 0 ||
17399	    tcp->tcp_unsent == data_length)) {
17400		flags = TH_ACK | TH_PUSH;
17401	} else {
17402		flags = TH_ACK;
17403	}
17404
17405	if (tcp->tcp_ecn_ok) {
17406		if (tcp->tcp_ecn_echo_on)
17407			flags |= TH_ECE;
17408
17409		/*
17410		 * Only set ECT bit and ECN_CWR if a segment contains new data.
17411		 * There is no TCP flow control for non-data segments, and
17412		 * only data segment is transmitted reliably.
17413		 */
17414		if (data_length > 0 && !rexmit) {
17415			SET_ECT(tcp, rptr);
17416			if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) {
17417				flags |= TH_CWR;
17418				tcp->tcp_ecn_cwr_sent = B_TRUE;
17419			}
17420		}
17421	}
17422
17423	if (tcp->tcp_valid_bits) {
17424		uint32_t u1;
17425
17426		if ((tcp->tcp_valid_bits & TCP_ISS_VALID) &&
17427		    seq == tcp->tcp_iss) {
17428			uchar_t	*wptr;
17429
17430			/*
17431			 * If TCP_ISS_VALID and the seq number is tcp_iss,
17432			 * TCP can only be in SYN-SENT, SYN-RCVD or
17433			 * FIN-WAIT-1 state.  It can be FIN-WAIT-1 if
17434			 * our SYN is not ack'ed but the app closes this
17435			 * TCP connection.
17436			 */
17437			ASSERT(tcp->tcp_state == TCPS_SYN_SENT ||
17438			    tcp->tcp_state == TCPS_SYN_RCVD ||
17439			    tcp->tcp_state == TCPS_FIN_WAIT_1);
17440
17441			/*
17442			 * Tack on the MSS option.  It is always needed
17443			 * for both active and passive open.
17444			 *
17445			 * MSS option value should be interface MTU - MIN
17446			 * TCP/IP header according to RFC 793 as it means
17447			 * the maximum segment size TCP can receive.  But
17448			 * to get around some broken middle boxes/end hosts
17449			 * out there, we allow the option value to be the
17450			 * same as the MSS option size on the peer side.
17451			 * In this way, the other side will not send
17452			 * anything larger than they can receive.
17453			 *
17454			 * Note that for SYN_SENT state, the ndd param
17455			 * tcp_use_smss_as_mss_opt has no effect as we
17456			 * don't know the peer's MSS option value. So
17457			 * the only case we need to take care of is in
17458			 * SYN_RCVD state, which is done later.
17459			 */
17460			wptr = mp1->b_wptr;
17461			wptr[0] = TCPOPT_MAXSEG;
17462			wptr[1] = TCPOPT_MAXSEG_LEN;
17463			wptr += 2;
17464			u1 = tcp->tcp_initial_pmtu -
17465			    (connp->conn_ipversion == IPV4_VERSION ?
17466			    IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) -
17467			    TCP_MIN_HEADER_LENGTH;
17468			U16_TO_BE16(u1, wptr);
17469			mp1->b_wptr = wptr + 2;
17470			/* Update the offset to cover the additional word */
17471			tcpha->tha_offset_and_reserved += (1 << 4);
17472
17473			/*
17474			 * Note that the following way of filling in
17475			 * TCP options are not optimal.  Some NOPs can
17476			 * be saved.  But there is no need at this time
17477			 * to optimize it.  When it is needed, we will
17478			 * do it.
17479			 */
17480			switch (tcp->tcp_state) {
17481			case TCPS_SYN_SENT:
17482				flags = TH_SYN;
17483
17484				if (tcp->tcp_snd_ts_ok) {
17485					uint32_t llbolt = (uint32_t)lbolt;
17486
17487					wptr = mp1->b_wptr;
17488					wptr[0] = TCPOPT_NOP;
17489					wptr[1] = TCPOPT_NOP;
17490					wptr[2] = TCPOPT_TSTAMP;
17491					wptr[3] = TCPOPT_TSTAMP_LEN;
17492					wptr += 4;
17493					U32_TO_BE32(llbolt, wptr);
17494					wptr += 4;
17495					ASSERT(tcp->tcp_ts_recent == 0);
17496					U32_TO_BE32(0L, wptr);
17497					mp1->b_wptr += TCPOPT_REAL_TS_LEN;
17498					tcpha->tha_offset_and_reserved +=
17499					    (3 << 4);
17500				}
17501
17502				/*
17503				 * Set up all the bits to tell other side
17504				 * we are ECN capable.
17505				 */
17506				if (tcp->tcp_ecn_ok) {
17507					flags |= (TH_ECE | TH_CWR);
17508				}
17509				break;
17510			case TCPS_SYN_RCVD:
17511				flags |= TH_SYN;
17512
17513				/*
17514				 * Reset the MSS option value to be SMSS
17515				 * We should probably add back the bytes
17516				 * for timestamp option and IPsec.  We
17517				 * don't do that as this is a workaround
17518				 * for broken middle boxes/end hosts, it
17519				 * is better for us to be more cautious.
17520				 * They may not take these things into
17521				 * account in their SMSS calculation.  Thus
17522				 * the peer's calculated SMSS may be smaller
17523				 * than what it can be.  This should be OK.
17524				 */
17525				if (tcps->tcps_use_smss_as_mss_opt) {
17526					u1 = tcp->tcp_mss;
17527					U16_TO_BE16(u1, wptr);
17528				}
17529
17530				/*
17531				 * If the other side is ECN capable, reply
17532				 * that we are also ECN capable.
17533				 */
17534				if (tcp->tcp_ecn_ok)
17535					flags |= TH_ECE;
17536				break;
17537			default:
17538				/*
17539				 * The above ASSERT() makes sure that this
17540				 * must be FIN-WAIT-1 state.  Our SYN has
17541				 * not been ack'ed so retransmit it.
17542				 */
17543				flags |= TH_SYN;
17544				break;
17545			}
17546
17547			if (tcp->tcp_snd_ws_ok) {
17548				wptr = mp1->b_wptr;
17549				wptr[0] =  TCPOPT_NOP;
17550				wptr[1] =  TCPOPT_WSCALE;
17551				wptr[2] =  TCPOPT_WS_LEN;
17552				wptr[3] = (uchar_t)tcp->tcp_rcv_ws;
17553				mp1->b_wptr += TCPOPT_REAL_WS_LEN;
17554				tcpha->tha_offset_and_reserved += (1 << 4);
17555			}
17556
17557			if (tcp->tcp_snd_sack_ok) {
17558				wptr = mp1->b_wptr;
17559				wptr[0] = TCPOPT_NOP;
17560				wptr[1] = TCPOPT_NOP;
17561				wptr[2] = TCPOPT_SACK_PERMITTED;
17562				wptr[3] = TCPOPT_SACK_OK_LEN;
17563				mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN;
17564				tcpha->tha_offset_and_reserved += (1 << 4);
17565			}
17566
17567			/* allocb() of adequate mblk assures space */
17568			ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
17569			    (uintptr_t)INT_MAX);
17570			u1 = (int)(mp1->b_wptr - mp1->b_rptr);
17571			/*
17572			 * Get IP set to checksum on our behalf
17573			 * Include the adjustment for a source route if any.
17574			 */
17575			u1 += connp->conn_sum;
17576			u1 = (u1 >> 16) + (u1 & 0xFFFF);
17577			tcpha->tha_sum = htons(u1);
17578			BUMP_MIB(&tcps->tcps_mib, tcpOutControl);
17579		}
17580		if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
17581		    (seq + data_length) == tcp->tcp_fss) {
17582			if (!tcp->tcp_fin_acked) {
17583				flags |= TH_FIN;
17584				BUMP_MIB(&tcps->tcps_mib, tcpOutControl);
17585			}
17586			if (!tcp->tcp_fin_sent) {
17587				tcp->tcp_fin_sent = B_TRUE;
17588				switch (tcp->tcp_state) {
17589				case TCPS_SYN_RCVD:
17590				case TCPS_ESTABLISHED:
17591					tcp->tcp_state = TCPS_FIN_WAIT_1;
17592					break;
17593				case TCPS_CLOSE_WAIT:
17594					tcp->tcp_state = TCPS_LAST_ACK;
17595					break;
17596				}
17597				if (tcp->tcp_suna == tcp->tcp_snxt)
17598					TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
17599				tcp->tcp_snxt = tcp->tcp_fss + 1;
17600			}
17601		}
17602		/*
17603		 * Note the trick here.  u1 is unsigned.  When tcp_urg
17604		 * is smaller than seq, u1 will become a very huge value.
17605		 * So the comparison will fail.  Also note that tcp_urp
17606		 * should be positive, see RFC 793 page 17.
17607		 */
17608		u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION;
17609		if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 &&
17610		    u1 < (uint32_t)(64 * 1024)) {
17611			flags |= TH_URG;
17612			BUMP_MIB(&tcps->tcps_mib, tcpOutUrg);
17613			tcpha->tha_urp = htons(u1);
17614		}
17615	}
17616	tcpha->tha_flags = (uchar_t)flags;
17617	tcp->tcp_rack = tcp->tcp_rnxt;
17618	tcp->tcp_rack_cnt = 0;
17619
17620	if (tcp->tcp_snd_ts_ok) {
17621		if (tcp->tcp_state != TCPS_SYN_SENT) {
17622			uint32_t llbolt = (uint32_t)lbolt;
17623
17624			U32_TO_BE32(llbolt,
17625			    (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
17626			U32_TO_BE32(tcp->tcp_ts_recent,
17627			    (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
17628		}
17629	}
17630
17631	if (num_sack_blk > 0) {
17632		uchar_t *wptr = (uchar_t *)tcpha + connp->conn_ht_ulp_len;
17633		sack_blk_t *tmp;
17634		int32_t	i;
17635
17636		wptr[0] = TCPOPT_NOP;
17637		wptr[1] = TCPOPT_NOP;
17638		wptr[2] = TCPOPT_SACK;
17639		wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
17640		    sizeof (sack_blk_t);
17641		wptr += TCPOPT_REAL_SACK_LEN;
17642
17643		tmp = tcp->tcp_sack_list;
17644		for (i = 0; i < num_sack_blk; i++) {
17645			U32_TO_BE32(tmp[i].begin, wptr);
17646			wptr += sizeof (tcp_seq);
17647			U32_TO_BE32(tmp[i].end, wptr);
17648			wptr += sizeof (tcp_seq);
17649		}
17650		tcpha->tha_offset_and_reserved += ((num_sack_blk * 2 + 1) << 4);
17651	}
17652	ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX);
17653	data_length += (int)(mp1->b_wptr - rptr);
17654
17655	ixa->ixa_pktlen = data_length;
17656
17657	if (ixa->ixa_flags & IXAF_IS_IPV4) {
17658		((ipha_t *)rptr)->ipha_length = htons(data_length);
17659	} else {
17660		ip6_t *ip6 = (ip6_t *)rptr;
17661
17662		ip6->ip6_plen = htons(data_length - IPV6_HDR_LEN);
17663	}
17664
17665	/*
17666	 * Prime pump for IP
17667	 * Include the adjustment for a source route if any.
17668	 */
17669	data_length -= ixa->ixa_ip_hdr_length;
17670	data_length += connp->conn_sum;
17671	data_length = (data_length >> 16) + (data_length & 0xFFFF);
17672	tcpha->tha_sum = htons(data_length);
17673	if (tcp->tcp_ip_forward_progress) {
17674		tcp->tcp_ip_forward_progress = B_FALSE;
17675		connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
17676	} else {
17677		connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
17678	}
17679	return (mp1);
17680}
17681
17682/* This function handles the push timeout. */
17683void
17684tcp_push_timer(void *arg)
17685{
17686	conn_t	*connp = (conn_t *)arg;
17687	tcp_t *tcp = connp->conn_tcp;
17688
17689	TCP_DBGSTAT(tcp->tcp_tcps, tcp_push_timer_cnt);
17690
17691	ASSERT(tcp->tcp_listener == NULL);
17692
17693	ASSERT(!IPCL_IS_NONSTR(connp));
17694
17695	tcp->tcp_push_tid = 0;
17696
17697	if (tcp->tcp_rcv_list != NULL &&
17698	    tcp_rcv_drain(tcp) == TH_ACK_NEEDED)
17699		tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK);
17700}
17701
17702/*
17703 * This function handles delayed ACK timeout.
17704 */
17705static void
17706tcp_ack_timer(void *arg)
17707{
17708	conn_t	*connp = (conn_t *)arg;
17709	tcp_t *tcp = connp->conn_tcp;
17710	mblk_t *mp;
17711	tcp_stack_t	*tcps = tcp->tcp_tcps;
17712
17713	TCP_DBGSTAT(tcps, tcp_ack_timer_cnt);
17714
17715	tcp->tcp_ack_tid = 0;
17716
17717	if (tcp->tcp_fused)
17718		return;
17719
17720	/*
17721	 * Do not send ACK if there is no outstanding unack'ed data.
17722	 */
17723	if (tcp->tcp_rnxt == tcp->tcp_rack) {
17724		return;
17725	}
17726
17727	if ((tcp->tcp_rnxt - tcp->tcp_rack) > tcp->tcp_mss) {
17728		/*
17729		 * Make sure we don't allow deferred ACKs to result in
17730		 * timer-based ACKing.  If we have held off an ACK
17731		 * when there was more than an mss here, and the timer
17732		 * goes off, we have to worry about the possibility
17733		 * that the sender isn't doing slow-start, or is out
17734		 * of step with us for some other reason.  We fall
17735		 * permanently back in the direction of
17736		 * ACK-every-other-packet as suggested in RFC 1122.
17737		 */
17738		if (tcp->tcp_rack_abs_max > 2)
17739			tcp->tcp_rack_abs_max--;
17740		tcp->tcp_rack_cur_max = 2;
17741	}
17742	mp = tcp_ack_mp(tcp);
17743
17744	if (mp != NULL) {
17745		BUMP_LOCAL(tcp->tcp_obsegs);
17746		BUMP_MIB(&tcps->tcps_mib, tcpOutAck);
17747		BUMP_MIB(&tcps->tcps_mib, tcpOutAckDelayed);
17748		tcp_send_data(tcp, mp);
17749	}
17750}
17751
17752
17753/* Generate an ACK-only (no data) segment for a TCP endpoint */
17754static mblk_t *
17755tcp_ack_mp(tcp_t *tcp)
17756{
17757	uint32_t	seq_no;
17758	tcp_stack_t	*tcps = tcp->tcp_tcps;
17759	conn_t		*connp = tcp->tcp_connp;
17760
17761	/*
17762	 * There are a few cases to be considered while setting the sequence no.
17763	 * Essentially, we can come here while processing an unacceptable pkt
17764	 * in the TCPS_SYN_RCVD state, in which case we set the sequence number
17765	 * to snxt (per RFC 793), note the swnd wouldn't have been set yet.
17766	 * If we are here for a zero window probe, stick with suna. In all
17767	 * other cases, we check if suna + swnd encompasses snxt and set
17768	 * the sequence number to snxt, if so. If snxt falls outside the
17769	 * window (the receiver probably shrunk its window), we will go with
17770	 * suna + swnd, otherwise the sequence no will be unacceptable to the
17771	 * receiver.
17772	 */
17773	if (tcp->tcp_zero_win_probe) {
17774		seq_no = tcp->tcp_suna;
17775	} else if (tcp->tcp_state == TCPS_SYN_RCVD) {
17776		ASSERT(tcp->tcp_swnd == 0);
17777		seq_no = tcp->tcp_snxt;
17778	} else {
17779		seq_no = SEQ_GT(tcp->tcp_snxt,
17780		    (tcp->tcp_suna + tcp->tcp_swnd)) ?
17781		    (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt;
17782	}
17783
17784	if (tcp->tcp_valid_bits) {
17785		/*
17786		 * For the complex case where we have to send some
17787		 * controls (FIN or SYN), let tcp_xmit_mp do it.
17788		 */
17789		return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE,
17790		    NULL, B_FALSE));
17791	} else {
17792		/* Generate a simple ACK */
17793		int	data_length;
17794		uchar_t	*rptr;
17795		tcpha_t	*tcpha;
17796		mblk_t	*mp1;
17797		int32_t	total_hdr_len;
17798		int32_t	tcp_hdr_len;
17799		int32_t	num_sack_blk = 0;
17800		int32_t sack_opt_len;
17801		ip_xmit_attr_t *ixa = connp->conn_ixa;
17802
17803		/*
17804		 * Allocate space for TCP + IP headers
17805		 * and link-level header
17806		 */
17807		if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
17808			num_sack_blk = MIN(tcp->tcp_max_sack_blk,
17809			    tcp->tcp_num_sack_blk);
17810			sack_opt_len = num_sack_blk * sizeof (sack_blk_t) +
17811			    TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN;
17812			total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len;
17813			tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len;
17814		} else {
17815			total_hdr_len = connp->conn_ht_iphc_len;
17816			tcp_hdr_len = connp->conn_ht_ulp_len;
17817		}
17818		mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED);
17819		if (!mp1)
17820			return (NULL);
17821
17822		/* Update the latest receive window size in TCP header. */
17823		tcp->tcp_tcpha->tha_win =
17824		    htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
17825		/* copy in prototype TCP + IP header */
17826		rptr = mp1->b_rptr + tcps->tcps_wroff_xtra;
17827		mp1->b_rptr = rptr;
17828		mp1->b_wptr = rptr + total_hdr_len;
17829		bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len);
17830
17831		tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length];
17832
17833		/* Set the TCP sequence number. */
17834		tcpha->tha_seq = htonl(seq_no);
17835
17836		/* Set up the TCP flag field. */
17837		tcpha->tha_flags = (uchar_t)TH_ACK;
17838		if (tcp->tcp_ecn_echo_on)
17839			tcpha->tha_flags |= TH_ECE;
17840
17841		tcp->tcp_rack = tcp->tcp_rnxt;
17842		tcp->tcp_rack_cnt = 0;
17843
17844		/* fill in timestamp option if in use */
17845		if (tcp->tcp_snd_ts_ok) {
17846			uint32_t llbolt = (uint32_t)lbolt;
17847
17848			U32_TO_BE32(llbolt,
17849			    (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
17850			U32_TO_BE32(tcp->tcp_ts_recent,
17851			    (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
17852		}
17853
17854		/* Fill in SACK options */
17855		if (num_sack_blk > 0) {
17856			uchar_t *wptr = (uchar_t *)tcpha +
17857			    connp->conn_ht_ulp_len;
17858			sack_blk_t *tmp;
17859			int32_t	i;
17860
17861			wptr[0] = TCPOPT_NOP;
17862			wptr[1] = TCPOPT_NOP;
17863			wptr[2] = TCPOPT_SACK;
17864			wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
17865			    sizeof (sack_blk_t);
17866			wptr += TCPOPT_REAL_SACK_LEN;
17867
17868			tmp = tcp->tcp_sack_list;
17869			for (i = 0; i < num_sack_blk; i++) {
17870				U32_TO_BE32(tmp[i].begin, wptr);
17871				wptr += sizeof (tcp_seq);
17872				U32_TO_BE32(tmp[i].end, wptr);
17873				wptr += sizeof (tcp_seq);
17874			}
17875			tcpha->tha_offset_and_reserved +=
17876			    ((num_sack_blk * 2 + 1) << 4);
17877		}
17878
17879		ixa->ixa_pktlen = total_hdr_len;
17880
17881		if (ixa->ixa_flags & IXAF_IS_IPV4) {
17882			((ipha_t *)rptr)->ipha_length = htons(total_hdr_len);
17883		} else {
17884			ip6_t *ip6 = (ip6_t *)rptr;
17885
17886			ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN);
17887		}
17888
17889		/*
17890		 * Prime pump for checksum calculation in IP.  Include the
17891		 * adjustment for a source route if any.
17892		 */
17893		data_length = tcp_hdr_len + connp->conn_sum;
17894		data_length = (data_length >> 16) + (data_length & 0xFFFF);
17895		tcpha->tha_sum = htons(data_length);
17896
17897		if (tcp->tcp_ip_forward_progress) {
17898			tcp->tcp_ip_forward_progress = B_FALSE;
17899			connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
17900		} else {
17901			connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
17902		}
17903		return (mp1);
17904	}
17905}
17906
17907/*
17908 * Hash list insertion routine for tcp_t structures. Each hash bucket
17909 * contains a list of tcp_t entries, and each entry is bound to a unique
17910 * port. If there are multiple tcp_t's that are bound to the same port, then
17911 * one of them will be linked into the hash bucket list, and the rest will
17912 * hang off of that one entry. For each port, entries bound to a specific IP
17913 * address will be inserted before those those bound to INADDR_ANY.
17914 */
17915static void
17916tcp_bind_hash_insert(tf_t *tbf, tcp_t *tcp, int caller_holds_lock)
17917{
17918	tcp_t	**tcpp;
17919	tcp_t	*tcpnext;
17920	tcp_t	*tcphash;
17921	conn_t	*connp = tcp->tcp_connp;
17922	conn_t	*connext;
17923
17924	if (tcp->tcp_ptpbhn != NULL) {
17925		ASSERT(!caller_holds_lock);
17926		tcp_bind_hash_remove(tcp);
17927	}
17928	tcpp = &tbf->tf_tcp;
17929	if (!caller_holds_lock) {
17930		mutex_enter(&tbf->tf_lock);
17931	} else {
17932		ASSERT(MUTEX_HELD(&tbf->tf_lock));
17933	}
17934	tcphash = tcpp[0];
17935	tcpnext = NULL;
17936	if (tcphash != NULL) {
17937		/* Look for an entry using the same port */
17938		while ((tcphash = tcpp[0]) != NULL &&
17939		    connp->conn_lport != tcphash->tcp_connp->conn_lport)
17940			tcpp = &(tcphash->tcp_bind_hash);
17941
17942		/* The port was not found, just add to the end */
17943		if (tcphash == NULL)
17944			goto insert;
17945
17946		/*
17947		 * OK, there already exists an entry bound to the
17948		 * same port.
17949		 *
17950		 * If the new tcp bound to the INADDR_ANY address
17951		 * and the first one in the list is not bound to
17952		 * INADDR_ANY we skip all entries until we find the
17953		 * first one bound to INADDR_ANY.
17954		 * This makes sure that applications binding to a
17955		 * specific address get preference over those binding to
17956		 * INADDR_ANY.
17957		 */
17958		tcpnext = tcphash;
17959		connext = tcpnext->tcp_connp;
17960		tcphash = NULL;
17961		if (V6_OR_V4_INADDR_ANY(connp->conn_bound_addr_v6) &&
17962		    !V6_OR_V4_INADDR_ANY(connext->conn_bound_addr_v6)) {
17963			while ((tcpnext = tcpp[0]) != NULL) {
17964				connext = tcpnext->tcp_connp;
17965				if (!V6_OR_V4_INADDR_ANY(
17966				    connext->conn_bound_addr_v6))
17967					tcpp = &(tcpnext->tcp_bind_hash_port);
17968				else
17969					break;
17970			}
17971			if (tcpnext != NULL) {
17972				tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash_port;
17973				tcphash = tcpnext->tcp_bind_hash;
17974				if (tcphash != NULL) {
17975					tcphash->tcp_ptpbhn =
17976					    &(tcp->tcp_bind_hash);
17977					tcpnext->tcp_bind_hash = NULL;
17978				}
17979			}
17980		} else {
17981			tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash_port;
17982			tcphash = tcpnext->tcp_bind_hash;
17983			if (tcphash != NULL) {
17984				tcphash->tcp_ptpbhn =
17985				    &(tcp->tcp_bind_hash);
17986				tcpnext->tcp_bind_hash = NULL;
17987			}
17988		}
17989	}
17990insert:
17991	tcp->tcp_bind_hash_port = tcpnext;
17992	tcp->tcp_bind_hash = tcphash;
17993	tcp->tcp_ptpbhn = tcpp;
17994	tcpp[0] = tcp;
17995	if (!caller_holds_lock)
17996		mutex_exit(&tbf->tf_lock);
17997}
17998
17999/*
18000 * Hash list removal routine for tcp_t structures.
18001 */
18002static void
18003tcp_bind_hash_remove(tcp_t *tcp)
18004{
18005	tcp_t	*tcpnext;
18006	kmutex_t *lockp;
18007	tcp_stack_t	*tcps = tcp->tcp_tcps;
18008	conn_t		*connp = tcp->tcp_connp;
18009
18010	if (tcp->tcp_ptpbhn == NULL)
18011		return;
18012
18013	/*
18014	 * Extract the lock pointer in case there are concurrent
18015	 * hash_remove's for this instance.
18016	 */
18017	ASSERT(connp->conn_lport != 0);
18018	lockp = &tcps->tcps_bind_fanout[TCP_BIND_HASH(
18019	    connp->conn_lport)].tf_lock;
18020
18021	ASSERT(lockp != NULL);
18022	mutex_enter(lockp);
18023	if (tcp->tcp_ptpbhn) {
18024		tcpnext = tcp->tcp_bind_hash_port;
18025		if (tcpnext != NULL) {
18026			tcp->tcp_bind_hash_port = NULL;
18027			tcpnext->tcp_ptpbhn = tcp->tcp_ptpbhn;
18028			tcpnext->tcp_bind_hash = tcp->tcp_bind_hash;
18029			if (tcpnext->tcp_bind_hash != NULL) {
18030				tcpnext->tcp_bind_hash->tcp_ptpbhn =
18031				    &(tcpnext->tcp_bind_hash);
18032				tcp->tcp_bind_hash = NULL;
18033			}
18034		} else if ((tcpnext = tcp->tcp_bind_hash) != NULL) {
18035			tcpnext->tcp_ptpbhn = tcp->tcp_ptpbhn;
18036			tcp->tcp_bind_hash = NULL;
18037		}
18038		*tcp->tcp_ptpbhn = tcpnext;
18039		tcp->tcp_ptpbhn = NULL;
18040	}
18041	mutex_exit(lockp);
18042}
18043
18044
18045/*
18046 * Hash list lookup routine for tcp_t structures.
18047 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
18048 */
18049static tcp_t *
18050tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps)
18051{
18052	tf_t	*tf;
18053	tcp_t	*tcp;
18054
18055	tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
18056	mutex_enter(&tf->tf_lock);
18057	for (tcp = tf->tf_tcp; tcp != NULL;
18058	    tcp = tcp->tcp_acceptor_hash) {
18059		if (tcp->tcp_acceptor_id == id) {
18060			CONN_INC_REF(tcp->tcp_connp);
18061			mutex_exit(&tf->tf_lock);
18062			return (tcp);
18063		}
18064	}
18065	mutex_exit(&tf->tf_lock);
18066	return (NULL);
18067}
18068
18069
18070/*
18071 * Hash list insertion routine for tcp_t structures.
18072 */
18073void
18074tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp)
18075{
18076	tf_t	*tf;
18077	tcp_t	**tcpp;
18078	tcp_t	*tcpnext;
18079	tcp_stack_t	*tcps = tcp->tcp_tcps;
18080
18081	tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)];
18082
18083	if (tcp->tcp_ptpahn != NULL)
18084		tcp_acceptor_hash_remove(tcp);
18085	tcpp = &tf->tf_tcp;
18086	mutex_enter(&tf->tf_lock);
18087	tcpnext = tcpp[0];
18088	if (tcpnext)
18089		tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash;
18090	tcp->tcp_acceptor_hash = tcpnext;
18091	tcp->tcp_ptpahn = tcpp;
18092	tcpp[0] = tcp;
18093	tcp->tcp_acceptor_lockp = &tf->tf_lock;	/* For tcp_*_hash_remove */
18094	mutex_exit(&tf->tf_lock);
18095}
18096
18097/*
18098 * Hash list removal routine for tcp_t structures.
18099 */
18100static void
18101tcp_acceptor_hash_remove(tcp_t *tcp)
18102{
18103	tcp_t	*tcpnext;
18104	kmutex_t *lockp;
18105
18106	/*
18107	 * Extract the lock pointer in case there are concurrent
18108	 * hash_remove's for this instance.
18109	 */
18110	lockp = tcp->tcp_acceptor_lockp;
18111
18112	if (tcp->tcp_ptpahn == NULL)
18113		return;
18114
18115	ASSERT(lockp != NULL);
18116	mutex_enter(lockp);
18117	if (tcp->tcp_ptpahn) {
18118		tcpnext = tcp->tcp_acceptor_hash;
18119		if (tcpnext) {
18120			tcpnext->tcp_ptpahn = tcp->tcp_ptpahn;
18121			tcp->tcp_acceptor_hash = NULL;
18122		}
18123		*tcp->tcp_ptpahn = tcpnext;
18124		tcp->tcp_ptpahn = NULL;
18125	}
18126	mutex_exit(lockp);
18127	tcp->tcp_acceptor_lockp = NULL;
18128}
18129
18130/*
18131 * Type three generator adapted from the random() function in 4.4 BSD:
18132 */
18133
18134/*
18135 * Copyright (c) 1983, 1993
18136 *	The Regents of the University of California.  All rights reserved.
18137 *
18138 * Redistribution and use in source and binary forms, with or without
18139 * modification, are permitted provided that the following conditions
18140 * are met:
18141 * 1. Redistributions of source code must retain the above copyright
18142 *    notice, this list of conditions and the following disclaimer.
18143 * 2. Redistributions in binary form must reproduce the above copyright
18144 *    notice, this list of conditions and the following disclaimer in the
18145 *    documentation and/or other materials provided with the distribution.
18146 * 3. All advertising materials mentioning features or use of this software
18147 *    must display the following acknowledgement:
18148 *	This product includes software developed by the University of
18149 *	California, Berkeley and its contributors.
18150 * 4. Neither the name of the University nor the names of its contributors
18151 *    may be used to endorse or promote products derived from this software
18152 *    without specific prior written permission.
18153 *
18154 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18155 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18156 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18157 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18158 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18159 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18160 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18161 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
18162 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
18163 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
18164 * SUCH DAMAGE.
18165 */
18166
18167/* Type 3 -- x**31 + x**3 + 1 */
18168#define	DEG_3		31
18169#define	SEP_3		3
18170
18171
18172/* Protected by tcp_random_lock */
18173static int tcp_randtbl[DEG_3 + 1];
18174
18175static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1];
18176static int *tcp_random_rptr = &tcp_randtbl[1];
18177
18178static int *tcp_random_state = &tcp_randtbl[1];
18179static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1];
18180
18181kmutex_t tcp_random_lock;
18182
18183void
18184tcp_random_init(void)
18185{
18186	int i;
18187	hrtime_t hrt;
18188	time_t wallclock;
18189	uint64_t result;
18190
18191	/*
18192	 * Use high-res timer and current time for seed.  Gethrtime() returns
18193	 * a longlong, which may contain resolution down to nanoseconds.
18194	 * The current time will either be a 32-bit or a 64-bit quantity.
18195	 * XOR the two together in a 64-bit result variable.
18196	 * Convert the result to a 32-bit value by multiplying the high-order
18197	 * 32-bits by the low-order 32-bits.
18198	 */
18199
18200	hrt = gethrtime();
18201	(void) drv_getparm(TIME, &wallclock);
18202	result = (uint64_t)wallclock ^ (uint64_t)hrt;
18203	mutex_enter(&tcp_random_lock);
18204	tcp_random_state[0] = ((result >> 32) & 0xffffffff) *
18205	    (result & 0xffffffff);
18206
18207	for (i = 1; i < DEG_3; i++)
18208		tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1]
18209		    + 12345;
18210	tcp_random_fptr = &tcp_random_state[SEP_3];
18211	tcp_random_rptr = &tcp_random_state[0];
18212	mutex_exit(&tcp_random_lock);
18213	for (i = 0; i < 10 * DEG_3; i++)
18214		(void) tcp_random();
18215}
18216
18217/*
18218 * tcp_random: Return a random number in the range [1 - (128K + 1)].
18219 * This range is selected to be approximately centered on TCP_ISS / 2,
18220 * and easy to compute. We get this value by generating a 32-bit random
18221 * number, selecting out the high-order 17 bits, and then adding one so
18222 * that we never return zero.
18223 */
18224int
18225tcp_random(void)
18226{
18227	int i;
18228
18229	mutex_enter(&tcp_random_lock);
18230	*tcp_random_fptr += *tcp_random_rptr;
18231
18232	/*
18233	 * The high-order bits are more random than the low-order bits,
18234	 * so we select out the high-order 17 bits and add one so that
18235	 * we never return zero.
18236	 */
18237	i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1;
18238	if (++tcp_random_fptr >= tcp_random_end_ptr) {
18239		tcp_random_fptr = tcp_random_state;
18240		++tcp_random_rptr;
18241	} else if (++tcp_random_rptr >= tcp_random_end_ptr)
18242		tcp_random_rptr = tcp_random_state;
18243
18244	mutex_exit(&tcp_random_lock);
18245	return (i);
18246}
18247
18248static int
18249tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, int *do_disconnectp,
18250    int *t_errorp, int *sys_errorp)
18251{
18252	int error;
18253	int is_absreq_failure;
18254	t_scalar_t *opt_lenp;
18255	t_scalar_t opt_offset;
18256	int prim_type;
18257	struct T_conn_req *tcreqp;
18258	struct T_conn_res *tcresp;
18259	cred_t *cr;
18260
18261	/*
18262	 * All Solaris components should pass a db_credp
18263	 * for this TPI message, hence we ASSERT.
18264	 * But in case there is some other M_PROTO that looks
18265	 * like a TPI message sent by some other kernel
18266	 * component, we check and return an error.
18267	 */
18268	cr = msg_getcred(mp, NULL);
18269	ASSERT(cr != NULL);
18270	if (cr == NULL)
18271		return (-1);
18272
18273	prim_type = ((union T_primitives *)mp->b_rptr)->type;
18274	ASSERT(prim_type == T_CONN_REQ || prim_type == O_T_CONN_RES ||
18275	    prim_type == T_CONN_RES);
18276
18277	switch (prim_type) {
18278	case T_CONN_REQ:
18279		tcreqp = (struct T_conn_req *)mp->b_rptr;
18280		opt_offset = tcreqp->OPT_offset;
18281		opt_lenp = (t_scalar_t *)&tcreqp->OPT_length;
18282		break;
18283	case O_T_CONN_RES:
18284	case T_CONN_RES:
18285		tcresp = (struct T_conn_res *)mp->b_rptr;
18286		opt_offset = tcresp->OPT_offset;
18287		opt_lenp = (t_scalar_t *)&tcresp->OPT_length;
18288		break;
18289	}
18290
18291	*t_errorp = 0;
18292	*sys_errorp = 0;
18293	*do_disconnectp = 0;
18294
18295	error = tpi_optcom_buf(tcp->tcp_connp->conn_wq, mp, opt_lenp,
18296	    opt_offset, cr, &tcp_opt_obj,
18297	    NULL, &is_absreq_failure);
18298
18299	switch (error) {
18300	case  0:		/* no error */
18301		ASSERT(is_absreq_failure == 0);
18302		return (0);
18303	case ENOPROTOOPT:
18304		*t_errorp = TBADOPT;
18305		break;
18306	case EACCES:
18307		*t_errorp = TACCES;
18308		break;
18309	default:
18310		*t_errorp = TSYSERR; *sys_errorp = error;
18311		break;
18312	}
18313	if (is_absreq_failure != 0) {
18314		/*
18315		 * The connection request should get the local ack
18316		 * T_OK_ACK and then a T_DISCON_IND.
18317		 */
18318		*do_disconnectp = 1;
18319	}
18320	return (-1);
18321}
18322
18323/*
18324 * Split this function out so that if the secret changes, I'm okay.
18325 *
18326 * Initialize the tcp_iss_cookie and tcp_iss_key.
18327 */
18328
18329#define	PASSWD_SIZE 16  /* MUST be multiple of 4 */
18330
18331static void
18332tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps)
18333{
18334	struct {
18335		int32_t current_time;
18336		uint32_t randnum;
18337		uint16_t pad;
18338		uint8_t ether[6];
18339		uint8_t passwd[PASSWD_SIZE];
18340	} tcp_iss_cookie;
18341	time_t t;
18342
18343	/*
18344	 * Start with the current absolute time.
18345	 */
18346	(void) drv_getparm(TIME, &t);
18347	tcp_iss_cookie.current_time = t;
18348
18349	/*
18350	 * XXX - Need a more random number per RFC 1750, not this crap.
18351	 * OTOH, if what follows is pretty random, then I'm in better shape.
18352	 */
18353	tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random());
18354	tcp_iss_cookie.pad = 0x365c;  /* Picked from HMAC pad values. */
18355
18356	/*
18357	 * The cpu_type_info is pretty non-random.  Ugggh.  It does serve
18358	 * as a good template.
18359	 */
18360	bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd,
18361	    min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info)));
18362
18363	/*
18364	 * The pass-phrase.  Normally this is supplied by user-called NDD.
18365	 */
18366	bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len));
18367
18368	/*
18369	 * See 4010593 if this section becomes a problem again,
18370	 * but the local ethernet address is useful here.
18371	 */
18372	(void) localetheraddr(NULL,
18373	    (struct ether_addr *)&tcp_iss_cookie.ether);
18374
18375	/*
18376	 * Hash 'em all together.  The MD5Final is called per-connection.
18377	 */
18378	mutex_enter(&tcps->tcps_iss_key_lock);
18379	MD5Init(&tcps->tcps_iss_key);
18380	MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie,
18381	    sizeof (tcp_iss_cookie));
18382	mutex_exit(&tcps->tcps_iss_key_lock);
18383}
18384
18385/*
18386 * Set the RFC 1948 pass phrase
18387 */
18388/* ARGSUSED */
18389static int
18390tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
18391    cred_t *cr)
18392{
18393	tcp_stack_t	*tcps = Q_TO_TCP(q)->tcp_tcps;
18394
18395	/*
18396	 * Basically, value contains a new pass phrase.  Pass it along!
18397	 */
18398	tcp_iss_key_init((uint8_t *)value, strlen(value), tcps);
18399	return (0);
18400}
18401
18402/* ARGSUSED */
18403static int
18404tcp_sack_info_constructor(void *buf, void *cdrarg, int kmflags)
18405{
18406	bzero(buf, sizeof (tcp_sack_info_t));
18407	return (0);
18408}
18409
18410/*
18411 * Called by IP when IP is loaded into the kernel
18412 */
18413void
18414tcp_ddi_g_init(void)
18415{
18416	tcp_timercache = kmem_cache_create("tcp_timercache",
18417	    sizeof (tcp_timer_t) + sizeof (mblk_t), 0,
18418	    NULL, NULL, NULL, NULL, NULL, 0);
18419
18420	tcp_sack_info_cache = kmem_cache_create("tcp_sack_info_cache",
18421	    sizeof (tcp_sack_info_t), 0,
18422	    tcp_sack_info_constructor, NULL, NULL, NULL, NULL, 0);
18423
18424	mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL);
18425
18426	/* Initialize the random number generator */
18427	tcp_random_init();
18428
18429	/* A single callback independently of how many netstacks we have */
18430	ip_squeue_init(tcp_squeue_add);
18431
18432	tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics);
18433
18434	tcp_squeue_flag = tcp_squeue_switch(tcp_squeue_wput);
18435
18436	/*
18437	 * We want to be informed each time a stack is created or
18438	 * destroyed in the kernel, so we can maintain the
18439	 * set of tcp_stack_t's.
18440	 */
18441	netstack_register(NS_TCP, tcp_stack_init, NULL, tcp_stack_fini);
18442}
18443
18444
18445#define	INET_NAME	"ip"
18446
18447/*
18448 * Initialize the TCP stack instance.
18449 */
18450static void *
18451tcp_stack_init(netstackid_t stackid, netstack_t *ns)
18452{
18453	tcp_stack_t	*tcps;
18454	tcpparam_t	*pa;
18455	int		i;
18456	int		error = 0;
18457	major_t		major;
18458
18459	tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP);
18460	tcps->tcps_netstack = ns;
18461
18462	/* Initialize locks */
18463	mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL);
18464	mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL);
18465
18466	tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS;
18467	tcps->tcps_g_epriv_ports[0] = 2049;
18468	tcps->tcps_g_epriv_ports[1] = 4045;
18469	tcps->tcps_min_anonpriv_port = 512;
18470
18471	tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) *
18472	    TCP_BIND_FANOUT_SIZE, KM_SLEEP);
18473	tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) *
18474	    TCP_FANOUT_SIZE, KM_SLEEP);
18475
18476	for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
18477		mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL,
18478		    MUTEX_DEFAULT, NULL);
18479	}
18480
18481	for (i = 0; i < TCP_FANOUT_SIZE; i++) {
18482		mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL,
18483		    MUTEX_DEFAULT, NULL);
18484	}
18485
18486	/* TCP's IPsec code calls the packet dropper. */
18487	ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement");
18488
18489	pa = (tcpparam_t *)kmem_alloc(sizeof (lcl_tcp_param_arr), KM_SLEEP);
18490	tcps->tcps_params = pa;
18491	bcopy(lcl_tcp_param_arr, tcps->tcps_params, sizeof (lcl_tcp_param_arr));
18492
18493	(void) tcp_param_register(&tcps->tcps_g_nd, tcps->tcps_params,
18494	    A_CNT(lcl_tcp_param_arr), tcps);
18495
18496	/*
18497	 * Note: To really walk the device tree you need the devinfo
18498	 * pointer to your device which is only available after probe/attach.
18499	 * The following is safe only because it uses ddi_root_node()
18500	 */
18501	tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr,
18502	    tcp_opt_obj.odb_opt_arr_cnt);
18503
18504	/*
18505	 * Initialize RFC 1948 secret values.  This will probably be reset once
18506	 * by the boot scripts.
18507	 *
18508	 * Use NULL name, as the name is caught by the new lockstats.
18509	 *
18510	 * Initialize with some random, non-guessable string, like the global
18511	 * T_INFO_ACK.
18512	 */
18513
18514	tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack,
18515	    sizeof (tcp_g_t_info_ack), tcps);
18516
18517	tcps->tcps_kstat = tcp_kstat2_init(stackid, &tcps->tcps_statistics);
18518	tcps->tcps_mibkp = tcp_kstat_init(stackid, tcps);
18519
18520	major = mod_name_to_major(INET_NAME);
18521	error = ldi_ident_from_major(major, &tcps->tcps_ldi_ident);
18522	ASSERT(error == 0);
18523	tcps->tcps_ixa_cleanup_mp = allocb_wait(0, BPRI_MED, STR_NOSIG, NULL);
18524	ASSERT(tcps->tcps_ixa_cleanup_mp != NULL);
18525	cv_init(&tcps->tcps_ixa_cleanup_cv, NULL, CV_DEFAULT, NULL);
18526	mutex_init(&tcps->tcps_ixa_cleanup_lock, NULL, MUTEX_DEFAULT, NULL);
18527
18528	return (tcps);
18529}
18530
18531/*
18532 * Called when the IP module is about to be unloaded.
18533 */
18534void
18535tcp_ddi_g_destroy(void)
18536{
18537	tcp_g_kstat_fini(tcp_g_kstat);
18538	tcp_g_kstat = NULL;
18539	bzero(&tcp_g_statistics, sizeof (tcp_g_statistics));
18540
18541	mutex_destroy(&tcp_random_lock);
18542
18543	kmem_cache_destroy(tcp_timercache);
18544	kmem_cache_destroy(tcp_sack_info_cache);
18545
18546	netstack_unregister(NS_TCP);
18547}
18548
18549/*
18550 * Free the TCP stack instance.
18551 */
18552static void
18553tcp_stack_fini(netstackid_t stackid, void *arg)
18554{
18555	tcp_stack_t *tcps = (tcp_stack_t *)arg;
18556	int i;
18557
18558	freeb(tcps->tcps_ixa_cleanup_mp);
18559	tcps->tcps_ixa_cleanup_mp = NULL;
18560	cv_destroy(&tcps->tcps_ixa_cleanup_cv);
18561	mutex_destroy(&tcps->tcps_ixa_cleanup_lock);
18562
18563	nd_free(&tcps->tcps_g_nd);
18564	kmem_free(tcps->tcps_params, sizeof (lcl_tcp_param_arr));
18565	tcps->tcps_params = NULL;
18566	kmem_free(tcps->tcps_wroff_xtra_param, sizeof (tcpparam_t));
18567	tcps->tcps_wroff_xtra_param = NULL;
18568
18569	for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) {
18570		ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL);
18571		mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock);
18572	}
18573
18574	for (i = 0; i < TCP_FANOUT_SIZE; i++) {
18575		ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL);
18576		mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock);
18577	}
18578
18579	kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE);
18580	tcps->tcps_bind_fanout = NULL;
18581
18582	kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) * TCP_FANOUT_SIZE);
18583	tcps->tcps_acceptor_fanout = NULL;
18584
18585	mutex_destroy(&tcps->tcps_iss_key_lock);
18586	mutex_destroy(&tcps->tcps_epriv_port_lock);
18587
18588	ip_drop_unregister(&tcps->tcps_dropper);
18589
18590	tcp_kstat2_fini(stackid, tcps->tcps_kstat);
18591	tcps->tcps_kstat = NULL;
18592	bzero(&tcps->tcps_statistics, sizeof (tcps->tcps_statistics));
18593
18594	tcp_kstat_fini(stackid, tcps->tcps_mibkp);
18595	tcps->tcps_mibkp = NULL;
18596
18597	ldi_ident_release(tcps->tcps_ldi_ident);
18598	kmem_free(tcps, sizeof (*tcps));
18599}
18600
18601/*
18602 * Generate ISS, taking into account NDD changes may happen halfway through.
18603 * (If the iss is not zero, set it.)
18604 */
18605
18606static void
18607tcp_iss_init(tcp_t *tcp)
18608{
18609	MD5_CTX context;
18610	struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg;
18611	uint32_t answer[4];
18612	tcp_stack_t	*tcps = tcp->tcp_tcps;
18613	conn_t		*connp = tcp->tcp_connp;
18614
18615	tcps->tcps_iss_incr_extra += (ISS_INCR >> 1);
18616	tcp->tcp_iss = tcps->tcps_iss_incr_extra;
18617	switch (tcps->tcps_strong_iss) {
18618	case 2:
18619		mutex_enter(&tcps->tcps_iss_key_lock);
18620		context = tcps->tcps_iss_key;
18621		mutex_exit(&tcps->tcps_iss_key_lock);
18622		arg.ports = connp->conn_ports;
18623		arg.src = connp->conn_laddr_v6;
18624		arg.dst = connp->conn_faddr_v6;
18625		MD5Update(&context, (uchar_t *)&arg, sizeof (arg));
18626		MD5Final((uchar_t *)answer, &context);
18627		tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3];
18628		/*
18629		 * Now that we've hashed into a unique per-connection sequence
18630		 * space, add a random increment per strong_iss == 1.  So I
18631		 * guess we'll have to...
18632		 */
18633		/* FALLTHRU */
18634	case 1:
18635		tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random();
18636		break;
18637	default:
18638		tcp->tcp_iss += (uint32_t)gethrestime_sec() * ISS_INCR;
18639		break;
18640	}
18641	tcp->tcp_valid_bits = TCP_ISS_VALID;
18642	tcp->tcp_fss = tcp->tcp_iss - 1;
18643	tcp->tcp_suna = tcp->tcp_iss;
18644	tcp->tcp_snxt = tcp->tcp_iss + 1;
18645	tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
18646	tcp->tcp_csuna = tcp->tcp_snxt;
18647}
18648
18649/*
18650 * Exported routine for extracting active tcp connection status.
18651 *
18652 * This is used by the Solaris Cluster Networking software to
18653 * gather a list of connections that need to be forwarded to
18654 * specific nodes in the cluster when configuration changes occur.
18655 *
18656 * The callback is invoked for each tcp_t structure from all netstacks,
18657 * if 'stack_id' is less than 0. Otherwise, only for tcp_t structures
18658 * from the netstack with the specified stack_id. Returning
18659 * non-zero from the callback routine terminates the search.
18660 */
18661int
18662cl_tcp_walk_list(netstackid_t stack_id,
18663    int (*cl_callback)(cl_tcp_info_t *, void *), void *arg)
18664{
18665	netstack_handle_t nh;
18666	netstack_t *ns;
18667	int ret = 0;
18668
18669	if (stack_id >= 0) {
18670		if ((ns = netstack_find_by_stackid(stack_id)) == NULL)
18671			return (EINVAL);
18672
18673		ret = cl_tcp_walk_list_stack(cl_callback, arg,
18674		    ns->netstack_tcp);
18675		netstack_rele(ns);
18676		return (ret);
18677	}
18678
18679	netstack_next_init(&nh);
18680	while ((ns = netstack_next(&nh)) != NULL) {
18681		ret = cl_tcp_walk_list_stack(cl_callback, arg,
18682		    ns->netstack_tcp);
18683		netstack_rele(ns);
18684	}
18685	netstack_next_fini(&nh);
18686	return (ret);
18687}
18688
18689static int
18690cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), void *arg,
18691    tcp_stack_t *tcps)
18692{
18693	tcp_t *tcp;
18694	cl_tcp_info_t	cl_tcpi;
18695	connf_t	*connfp;
18696	conn_t	*connp;
18697	int	i;
18698	ip_stack_t	*ipst = tcps->tcps_netstack->netstack_ip;
18699
18700	ASSERT(callback != NULL);
18701
18702	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
18703		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
18704		connp = NULL;
18705
18706		while ((connp =
18707		    ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
18708
18709			tcp = connp->conn_tcp;
18710			cl_tcpi.cl_tcpi_version = CL_TCPI_V1;
18711			cl_tcpi.cl_tcpi_ipversion = connp->conn_ipversion;
18712			cl_tcpi.cl_tcpi_state = tcp->tcp_state;
18713			cl_tcpi.cl_tcpi_lport = connp->conn_lport;
18714			cl_tcpi.cl_tcpi_fport = connp->conn_fport;
18715			cl_tcpi.cl_tcpi_laddr_v6 = connp->conn_laddr_v6;
18716			cl_tcpi.cl_tcpi_faddr_v6 = connp->conn_faddr_v6;
18717
18718			/*
18719			 * If the callback returns non-zero
18720			 * we terminate the traversal.
18721			 */
18722			if ((*callback)(&cl_tcpi, arg) != 0) {
18723				CONN_DEC_REF(tcp->tcp_connp);
18724				return (1);
18725			}
18726		}
18727	}
18728
18729	return (0);
18730}
18731
18732/*
18733 * Macros used for accessing the different types of sockaddr
18734 * structures inside a tcp_ioc_abort_conn_t.
18735 */
18736#define	TCP_AC_V4LADDR(acp) ((sin_t *)&(acp)->ac_local)
18737#define	TCP_AC_V4RADDR(acp) ((sin_t *)&(acp)->ac_remote)
18738#define	TCP_AC_V4LOCAL(acp) (TCP_AC_V4LADDR(acp)->sin_addr.s_addr)
18739#define	TCP_AC_V4REMOTE(acp) (TCP_AC_V4RADDR(acp)->sin_addr.s_addr)
18740#define	TCP_AC_V4LPORT(acp) (TCP_AC_V4LADDR(acp)->sin_port)
18741#define	TCP_AC_V4RPORT(acp) (TCP_AC_V4RADDR(acp)->sin_port)
18742#define	TCP_AC_V6LADDR(acp) ((sin6_t *)&(acp)->ac_local)
18743#define	TCP_AC_V6RADDR(acp) ((sin6_t *)&(acp)->ac_remote)
18744#define	TCP_AC_V6LOCAL(acp) (TCP_AC_V6LADDR(acp)->sin6_addr)
18745#define	TCP_AC_V6REMOTE(acp) (TCP_AC_V6RADDR(acp)->sin6_addr)
18746#define	TCP_AC_V6LPORT(acp) (TCP_AC_V6LADDR(acp)->sin6_port)
18747#define	TCP_AC_V6RPORT(acp) (TCP_AC_V6RADDR(acp)->sin6_port)
18748
18749/*
18750 * Return the correct error code to mimic the behavior
18751 * of a connection reset.
18752 */
18753#define	TCP_AC_GET_ERRCODE(state, err) {	\
18754		switch ((state)) {		\
18755		case TCPS_SYN_SENT:		\
18756		case TCPS_SYN_RCVD:		\
18757			(err) = ECONNREFUSED;	\
18758			break;			\
18759		case TCPS_ESTABLISHED:		\
18760		case TCPS_FIN_WAIT_1:		\
18761		case TCPS_FIN_WAIT_2:		\
18762		case TCPS_CLOSE_WAIT:		\
18763			(err) = ECONNRESET;	\
18764			break;			\
18765		case TCPS_CLOSING:		\
18766		case TCPS_LAST_ACK:		\
18767		case TCPS_TIME_WAIT:		\
18768			(err) = 0;		\
18769			break;			\
18770		default:			\
18771			(err) = ENXIO;		\
18772		}				\
18773	}
18774
18775/*
18776 * Check if a tcp structure matches the info in acp.
18777 */
18778#define	TCP_AC_ADDR_MATCH(acp, connp, tcp)			\
18779	(((acp)->ac_local.ss_family == AF_INET) ?		\
18780	((TCP_AC_V4LOCAL((acp)) == INADDR_ANY ||		\
18781	TCP_AC_V4LOCAL((acp)) == (connp)->conn_laddr_v4) &&	\
18782	(TCP_AC_V4REMOTE((acp)) == INADDR_ANY ||		\
18783	TCP_AC_V4REMOTE((acp)) == (connp)->conn_faddr_v4) &&	\
18784	(TCP_AC_V4LPORT((acp)) == 0 ||				\
18785	TCP_AC_V4LPORT((acp)) == (connp)->conn_lport) &&	\
18786	(TCP_AC_V4RPORT((acp)) == 0 ||				\
18787	TCP_AC_V4RPORT((acp)) == (connp)->conn_fport) &&	\
18788	(acp)->ac_start <= (tcp)->tcp_state &&			\
18789	(acp)->ac_end >= (tcp)->tcp_state) :			\
18790	((IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL((acp))) ||	\
18791	IN6_ARE_ADDR_EQUAL(&TCP_AC_V6LOCAL((acp)),		\
18792	&(connp)->conn_laddr_v6)) &&				\
18793	(IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE((acp))) ||	\
18794	IN6_ARE_ADDR_EQUAL(&TCP_AC_V6REMOTE((acp)),		\
18795	&(connp)->conn_faddr_v6)) &&				\
18796	(TCP_AC_V6LPORT((acp)) == 0 ||				\
18797	TCP_AC_V6LPORT((acp)) == (connp)->conn_lport) &&	\
18798	(TCP_AC_V6RPORT((acp)) == 0 ||				\
18799	TCP_AC_V6RPORT((acp)) == (connp)->conn_fport) &&	\
18800	(acp)->ac_start <= (tcp)->tcp_state &&			\
18801	(acp)->ac_end >= (tcp)->tcp_state))
18802
18803#define	TCP_AC_MATCH(acp, connp, tcp)				\
18804	(((acp)->ac_zoneid == ALL_ZONES ||			\
18805	(acp)->ac_zoneid == (connp)->conn_zoneid) ?		\
18806	TCP_AC_ADDR_MATCH(acp, connp, tcp) : 0)
18807
18808/*
18809 * Build a message containing a tcp_ioc_abort_conn_t structure
18810 * which is filled in with information from acp and tp.
18811 */
18812static mblk_t *
18813tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *acp, tcp_t *tp)
18814{
18815	mblk_t *mp;
18816	tcp_ioc_abort_conn_t *tacp;
18817
18818	mp = allocb(sizeof (uint32_t) + sizeof (*acp), BPRI_LO);
18819	if (mp == NULL)
18820		return (NULL);
18821
18822	*((uint32_t *)mp->b_rptr) = TCP_IOC_ABORT_CONN;
18823	tacp = (tcp_ioc_abort_conn_t *)((uchar_t *)mp->b_rptr +
18824	    sizeof (uint32_t));
18825
18826	tacp->ac_start = acp->ac_start;
18827	tacp->ac_end = acp->ac_end;
18828	tacp->ac_zoneid = acp->ac_zoneid;
18829
18830	if (acp->ac_local.ss_family == AF_INET) {
18831		tacp->ac_local.ss_family = AF_INET;
18832		tacp->ac_remote.ss_family = AF_INET;
18833		TCP_AC_V4LOCAL(tacp) = tp->tcp_connp->conn_laddr_v4;
18834		TCP_AC_V4REMOTE(tacp) = tp->tcp_connp->conn_faddr_v4;
18835		TCP_AC_V4LPORT(tacp) = tp->tcp_connp->conn_lport;
18836		TCP_AC_V4RPORT(tacp) = tp->tcp_connp->conn_fport;
18837	} else {
18838		tacp->ac_local.ss_family = AF_INET6;
18839		tacp->ac_remote.ss_family = AF_INET6;
18840		TCP_AC_V6LOCAL(tacp) = tp->tcp_connp->conn_laddr_v6;
18841		TCP_AC_V6REMOTE(tacp) = tp->tcp_connp->conn_faddr_v6;
18842		TCP_AC_V6LPORT(tacp) = tp->tcp_connp->conn_lport;
18843		TCP_AC_V6RPORT(tacp) = tp->tcp_connp->conn_fport;
18844	}
18845	mp->b_wptr = (uchar_t *)mp->b_rptr + sizeof (uint32_t) + sizeof (*acp);
18846	return (mp);
18847}
18848
18849/*
18850 * Print a tcp_ioc_abort_conn_t structure.
18851 */
18852static void
18853tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *acp)
18854{
18855	char lbuf[128];
18856	char rbuf[128];
18857	sa_family_t af;
18858	in_port_t lport, rport;
18859	ushort_t logflags;
18860
18861	af = acp->ac_local.ss_family;
18862
18863	if (af == AF_INET) {
18864		(void) inet_ntop(af, (const void *)&TCP_AC_V4LOCAL(acp),
18865		    lbuf, 128);
18866		(void) inet_ntop(af, (const void *)&TCP_AC_V4REMOTE(acp),
18867		    rbuf, 128);
18868		lport = ntohs(TCP_AC_V4LPORT(acp));
18869		rport = ntohs(TCP_AC_V4RPORT(acp));
18870	} else {
18871		(void) inet_ntop(af, (const void *)&TCP_AC_V6LOCAL(acp),
18872		    lbuf, 128);
18873		(void) inet_ntop(af, (const void *)&TCP_AC_V6REMOTE(acp),
18874		    rbuf, 128);
18875		lport = ntohs(TCP_AC_V6LPORT(acp));
18876		rport = ntohs(TCP_AC_V6RPORT(acp));
18877	}
18878
18879	logflags = SL_TRACE | SL_NOTE;
18880	/*
18881	 * Don't print this message to the console if the operation was done
18882	 * to a non-global zone.
18883	 */
18884	if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES)
18885		logflags |= SL_CONSOLE;
18886	(void) strlog(TCP_MOD_ID, 0, 1, logflags,
18887	    "TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, "
18888	    "start = %d, end = %d\n", lbuf, lport, rbuf, rport,
18889	    acp->ac_start, acp->ac_end);
18890}
18891
18892/*
18893 * Called using SQ_FILL when a message built using
18894 * tcp_ioctl_abort_build_msg is put into a queue.
18895 * Note that when we get here there is no wildcard in acp any more.
18896 */
18897/* ARGSUSED2 */
18898static void
18899tcp_ioctl_abort_handler(void *arg, mblk_t *mp, void *arg2,
18900    ip_recv_attr_t *dummy)
18901{
18902	conn_t			*connp = (conn_t *)arg;
18903	tcp_t			*tcp = connp->conn_tcp;
18904	tcp_ioc_abort_conn_t	*acp;
18905
18906	/*
18907	 * Don't accept any input on a closed tcp as this TCP logically does
18908	 * not exist on the system. Don't proceed further with this TCP.
18909	 * For eg. this packet could trigger another close of this tcp
18910	 * which would be disastrous for tcp_refcnt. tcp_close_detached /
18911	 * tcp_clean_death / tcp_closei_local must be called at most once
18912	 * on a TCP.
18913	 */
18914	if (tcp->tcp_state == TCPS_CLOSED ||
18915	    tcp->tcp_state == TCPS_BOUND) {
18916		freemsg(mp);
18917		return;
18918	}
18919
18920	acp = (tcp_ioc_abort_conn_t *)(mp->b_rptr + sizeof (uint32_t));
18921	if (tcp->tcp_state <= acp->ac_end) {
18922		/*
18923		 * If we get here, we are already on the correct
18924		 * squeue. This ioctl follows the following path
18925		 * tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn
18926		 * ->tcp_ioctl_abort->squeue_enter (if on a
18927		 * different squeue)
18928		 */
18929		int errcode;
18930
18931		TCP_AC_GET_ERRCODE(tcp->tcp_state, errcode);
18932		(void) tcp_clean_death(tcp, errcode, 26);
18933	}
18934	freemsg(mp);
18935}
18936
18937/*
18938 * Abort all matching connections on a hash chain.
18939 */
18940static int
18941tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count,
18942    boolean_t exact, tcp_stack_t *tcps)
18943{
18944	int nmatch, err = 0;
18945	tcp_t *tcp;
18946	MBLKP mp, last, listhead = NULL;
18947	conn_t	*tconnp;
18948	connf_t	*connfp;
18949	ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
18950
18951	connfp = &ipst->ips_ipcl_conn_fanout[index];
18952
18953startover:
18954	nmatch = 0;
18955
18956	mutex_enter(&connfp->connf_lock);
18957	for (tconnp = connfp->connf_head; tconnp != NULL;
18958	    tconnp = tconnp->conn_next) {
18959		tcp = tconnp->conn_tcp;
18960		/*
18961		 * We are missing a check on sin6_scope_id for linklocals here,
18962		 * but current usage is just for aborting based on zoneid
18963		 * for shared-IP zones.
18964		 */
18965		if (TCP_AC_MATCH(acp, tconnp, tcp)) {
18966			CONN_INC_REF(tconnp);
18967			mp = tcp_ioctl_abort_build_msg(acp, tcp);
18968			if (mp == NULL) {
18969				err = ENOMEM;
18970				CONN_DEC_REF(tconnp);
18971				break;
18972			}
18973			mp->b_prev = (mblk_t *)tcp;
18974
18975			if (listhead == NULL) {
18976				listhead = mp;
18977				last = mp;
18978			} else {
18979				last->b_next = mp;
18980				last = mp;
18981			}
18982			nmatch++;
18983			if (exact)
18984				break;
18985		}
18986
18987		/* Avoid holding lock for too long. */
18988		if (nmatch >= 500)
18989			break;
18990	}
18991	mutex_exit(&connfp->connf_lock);
18992
18993	/* Pass mp into the correct tcp */
18994	while ((mp = listhead) != NULL) {
18995		listhead = listhead->b_next;
18996		tcp = (tcp_t *)mp->b_prev;
18997		mp->b_next = mp->b_prev = NULL;
18998		SQUEUE_ENTER_ONE(tcp->tcp_connp->conn_sqp, mp,
18999		    tcp_ioctl_abort_handler, tcp->tcp_connp, NULL,
19000		    SQ_FILL, SQTAG_TCP_ABORT_BUCKET);
19001	}
19002
19003	*count += nmatch;
19004	if (nmatch >= 500 && err == 0)
19005		goto startover;
19006	return (err);
19007}
19008
19009/*
19010 * Abort all connections that matches the attributes specified in acp.
19011 */
19012static int
19013tcp_ioctl_abort(tcp_ioc_abort_conn_t *acp, tcp_stack_t *tcps)
19014{
19015	sa_family_t af;
19016	uint32_t  ports;
19017	uint16_t *pports;
19018	int err = 0, count = 0;
19019	boolean_t exact = B_FALSE; /* set when there is no wildcard */
19020	int index = -1;
19021	ushort_t logflags;
19022	ip_stack_t	*ipst = tcps->tcps_netstack->netstack_ip;
19023
19024	af = acp->ac_local.ss_family;
19025
19026	if (af == AF_INET) {
19027		if (TCP_AC_V4REMOTE(acp) != INADDR_ANY &&
19028		    TCP_AC_V4LPORT(acp) != 0 && TCP_AC_V4RPORT(acp) != 0) {
19029			pports = (uint16_t *)&ports;
19030			pports[1] = TCP_AC_V4LPORT(acp);
19031			pports[0] = TCP_AC_V4RPORT(acp);
19032			exact = (TCP_AC_V4LOCAL(acp) != INADDR_ANY);
19033		}
19034	} else {
19035		if (!IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE(acp)) &&
19036		    TCP_AC_V6LPORT(acp) != 0 && TCP_AC_V6RPORT(acp) != 0) {
19037			pports = (uint16_t *)&ports;
19038			pports[1] = TCP_AC_V6LPORT(acp);
19039			pports[0] = TCP_AC_V6RPORT(acp);
19040			exact = !IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL(acp));
19041		}
19042	}
19043
19044	/*
19045	 * For cases where remote addr, local port, and remote port are non-
19046	 * wildcards, tcp_ioctl_abort_bucket will only be called once.
19047	 */
19048	if (index != -1) {
19049		err = tcp_ioctl_abort_bucket(acp, index,
19050		    &count, exact, tcps);
19051	} else {
19052		/*
19053		 * loop through all entries for wildcard case
19054		 */
19055		for (index = 0;
19056		    index < ipst->ips_ipcl_conn_fanout_size;
19057		    index++) {
19058			err = tcp_ioctl_abort_bucket(acp, index,
19059			    &count, exact, tcps);
19060			if (err != 0)
19061				break;
19062		}
19063	}
19064
19065	logflags = SL_TRACE | SL_NOTE;
19066	/*
19067	 * Don't print this message to the console if the operation was done
19068	 * to a non-global zone.
19069	 */
19070	if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES)
19071		logflags |= SL_CONSOLE;
19072	(void) strlog(TCP_MOD_ID, 0, 1, logflags, "TCP_IOC_ABORT_CONN: "
19073	    "aborted %d connection%c\n", count, ((count > 1) ? 's' : ' '));
19074	if (err == 0 && count == 0)
19075		err = ENOENT;
19076	return (err);
19077}
19078
19079/*
19080 * Process the TCP_IOC_ABORT_CONN ioctl request.
19081 */
19082static void
19083tcp_ioctl_abort_conn(queue_t *q, mblk_t *mp)
19084{
19085	int	err;
19086	IOCP    iocp;
19087	MBLKP   mp1;
19088	sa_family_t laf, raf;
19089	tcp_ioc_abort_conn_t *acp;
19090	zone_t		*zptr;
19091	conn_t		*connp = Q_TO_CONN(q);
19092	zoneid_t	zoneid = connp->conn_zoneid;
19093	tcp_t		*tcp = connp->conn_tcp;
19094	tcp_stack_t	*tcps = tcp->tcp_tcps;
19095
19096	iocp = (IOCP)mp->b_rptr;
19097
19098	if ((mp1 = mp->b_cont) == NULL ||
19099	    iocp->ioc_count != sizeof (tcp_ioc_abort_conn_t)) {
19100		err = EINVAL;
19101		goto out;
19102	}
19103
19104	/* check permissions */
19105	if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) {
19106		err = EPERM;
19107		goto out;
19108	}
19109
19110	if (mp1->b_cont != NULL) {
19111		freemsg(mp1->b_cont);
19112		mp1->b_cont = NULL;
19113	}
19114
19115	acp = (tcp_ioc_abort_conn_t *)mp1->b_rptr;
19116	laf = acp->ac_local.ss_family;
19117	raf = acp->ac_remote.ss_family;
19118
19119	/* check that a zone with the supplied zoneid exists */
19120	if (acp->ac_zoneid != GLOBAL_ZONEID && acp->ac_zoneid != ALL_ZONES) {
19121		zptr = zone_find_by_id(zoneid);
19122		if (zptr != NULL) {
19123			zone_rele(zptr);
19124		} else {
19125			err = EINVAL;
19126			goto out;
19127		}
19128	}
19129
19130	/*
19131	 * For exclusive stacks we set the zoneid to zero
19132	 * to make TCP operate as if in the global zone.
19133	 */
19134	if (tcps->tcps_netstack->netstack_stackid != GLOBAL_NETSTACKID)
19135		acp->ac_zoneid = GLOBAL_ZONEID;
19136
19137	if (acp->ac_start < TCPS_SYN_SENT || acp->ac_end > TCPS_TIME_WAIT ||
19138	    acp->ac_start > acp->ac_end || laf != raf ||
19139	    (laf != AF_INET && laf != AF_INET6)) {
19140		err = EINVAL;
19141		goto out;
19142	}
19143
19144	tcp_ioctl_abort_dump(acp);
19145	err = tcp_ioctl_abort(acp, tcps);
19146
19147out:
19148	if (mp1 != NULL) {
19149		freemsg(mp1);
19150		mp->b_cont = NULL;
19151	}
19152
19153	if (err != 0)
19154		miocnak(q, mp, 0, err);
19155	else
19156		miocack(q, mp, 0, 0);
19157}
19158
19159/*
19160 * tcp_time_wait_processing() handles processing of incoming packets when
19161 * the tcp is in the TIME_WAIT state.
19162 * A TIME_WAIT tcp that has an associated open TCP stream is never put
19163 * on the time wait list.
19164 */
19165void
19166tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq,
19167    uint32_t seg_ack, int seg_len, tcpha_t *tcpha, ip_recv_attr_t *ira)
19168{
19169	int32_t		bytes_acked;
19170	int32_t		gap;
19171	int32_t		rgap;
19172	tcp_opt_t	tcpopt;
19173	uint_t		flags;
19174	uint32_t	new_swnd = 0;
19175	conn_t		*nconnp;
19176	conn_t		*connp = tcp->tcp_connp;
19177	tcp_stack_t	*tcps = tcp->tcp_tcps;
19178
19179	BUMP_LOCAL(tcp->tcp_ibsegs);
19180	DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
19181
19182	flags = (unsigned int)tcpha->tha_flags & 0xFF;
19183	new_swnd = ntohs(tcpha->tha_win) <<
19184	    ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws);
19185	if (tcp->tcp_snd_ts_ok) {
19186		if (!tcp_paws_check(tcp, tcpha, &tcpopt)) {
19187			tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt,
19188			    tcp->tcp_rnxt, TH_ACK);
19189			goto done;
19190		}
19191	}
19192	gap = seg_seq - tcp->tcp_rnxt;
19193	rgap = tcp->tcp_rwnd - (gap + seg_len);
19194	if (gap < 0) {
19195		BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs);
19196		UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes,
19197		    (seg_len > -gap ? -gap : seg_len));
19198		seg_len += gap;
19199		if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) {
19200			if (flags & TH_RST) {
19201				goto done;
19202			}
19203			if ((flags & TH_FIN) && seg_len == -1) {
19204				/*
19205				 * When TCP receives a duplicate FIN in
19206				 * TIME_WAIT state, restart the 2 MSL timer.
19207				 * See page 73 in RFC 793. Make sure this TCP
19208				 * is already on the TIME_WAIT list. If not,
19209				 * just restart the timer.
19210				 */
19211				if (TCP_IS_DETACHED(tcp)) {
19212					if (tcp_time_wait_remove(tcp, NULL) ==
19213					    B_TRUE) {
19214						tcp_time_wait_append(tcp);
19215						TCP_DBGSTAT(tcps,
19216						    tcp_rput_time_wait);
19217					}
19218				} else {
19219					ASSERT(tcp != NULL);
19220					TCP_TIMER_RESTART(tcp,
19221					    tcps->tcps_time_wait_interval);
19222				}
19223				tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt,
19224				    tcp->tcp_rnxt, TH_ACK);
19225				goto done;
19226			}
19227			flags |=  TH_ACK_NEEDED;
19228			seg_len = 0;
19229			goto process_ack;
19230		}
19231
19232		/* Fix seg_seq, and chew the gap off the front. */
19233		seg_seq = tcp->tcp_rnxt;
19234	}
19235
19236	if ((flags & TH_SYN) && gap > 0 && rgap < 0) {
19237		/*
19238		 * Make sure that when we accept the connection, pick
19239		 * an ISS greater than (tcp_snxt + ISS_INCR/2) for the
19240		 * old connection.
19241		 *
19242		 * The next ISS generated is equal to tcp_iss_incr_extra
19243		 * + ISS_INCR/2 + other components depending on the
19244		 * value of tcp_strong_iss.  We pre-calculate the new
19245		 * ISS here and compare with tcp_snxt to determine if
19246		 * we need to make adjustment to tcp_iss_incr_extra.
19247		 *
19248		 * The above calculation is ugly and is a
19249		 * waste of CPU cycles...
19250		 */
19251		uint32_t new_iss = tcps->tcps_iss_incr_extra;
19252		int32_t adj;
19253		ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
19254
19255		switch (tcps->tcps_strong_iss) {
19256		case 2: {
19257			/* Add time and MD5 components. */
19258			uint32_t answer[4];
19259			struct {
19260				uint32_t ports;
19261				in6_addr_t src;
19262				in6_addr_t dst;
19263			} arg;
19264			MD5_CTX context;
19265
19266			mutex_enter(&tcps->tcps_iss_key_lock);
19267			context = tcps->tcps_iss_key;
19268			mutex_exit(&tcps->tcps_iss_key_lock);
19269			arg.ports = connp->conn_ports;
19270			/* We use MAPPED addresses in tcp_iss_init */
19271			arg.src = connp->conn_laddr_v6;
19272			arg.dst = connp->conn_faddr_v6;
19273			MD5Update(&context, (uchar_t *)&arg,
19274			    sizeof (arg));
19275			MD5Final((uchar_t *)answer, &context);
19276			answer[0] ^= answer[1] ^ answer[2] ^ answer[3];
19277			new_iss += (gethrtime() >> ISS_NSEC_SHT) + answer[0];
19278			break;
19279		}
19280		case 1:
19281			/* Add time component and min random (i.e. 1). */
19282			new_iss += (gethrtime() >> ISS_NSEC_SHT) + 1;
19283			break;
19284		default:
19285			/* Add only time component. */
19286			new_iss += (uint32_t)gethrestime_sec() * ISS_INCR;
19287			break;
19288		}
19289		if ((adj = (int32_t)(tcp->tcp_snxt - new_iss)) > 0) {
19290			/*
19291			 * New ISS not guaranteed to be ISS_INCR/2
19292			 * ahead of the current tcp_snxt, so add the
19293			 * difference to tcp_iss_incr_extra.
19294			 */
19295			tcps->tcps_iss_incr_extra += adj;
19296		}
19297		/*
19298		 * If tcp_clean_death() can not perform the task now,
19299		 * drop the SYN packet and let the other side re-xmit.
19300		 * Otherwise pass the SYN packet back in, since the
19301		 * old tcp state has been cleaned up or freed.
19302		 */
19303		if (tcp_clean_death(tcp, 0, 27) == -1)
19304			goto done;
19305		nconnp = ipcl_classify(mp, ira, ipst);
19306		if (nconnp != NULL) {
19307			TCP_STAT(tcps, tcp_time_wait_syn_success);
19308			/* Drops ref on nconnp */
19309			tcp_reinput(nconnp, mp, ira, ipst);
19310			return;
19311		}
19312		goto done;
19313	}
19314
19315	/*
19316	 * rgap is the amount of stuff received out of window.  A negative
19317	 * value is the amount out of window.
19318	 */
19319	if (rgap < 0) {
19320		BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs);
19321		UPDATE_MIB(&tcps->tcps_mib, tcpInDataPastWinBytes, -rgap);
19322		/* Fix seg_len and make sure there is something left. */
19323		seg_len += rgap;
19324		if (seg_len <= 0) {
19325			if (flags & TH_RST) {
19326				goto done;
19327			}
19328			flags |=  TH_ACK_NEEDED;
19329			seg_len = 0;
19330			goto process_ack;
19331		}
19332	}
19333	/*
19334	 * Check whether we can update tcp_ts_recent.  This test is
19335	 * NOT the one in RFC 1323 3.4.  It is from Braden, 1993, "TCP
19336	 * Extensions for High Performance: An Update", Internet Draft.
19337	 */
19338	if (tcp->tcp_snd_ts_ok &&
19339	    TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
19340	    SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
19341		tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
19342		tcp->tcp_last_rcv_lbolt = lbolt64;
19343	}
19344
19345	if (seg_seq != tcp->tcp_rnxt && seg_len > 0) {
19346		/* Always ack out of order packets */
19347		flags |= TH_ACK_NEEDED;
19348		seg_len = 0;
19349	} else if (seg_len > 0) {
19350		BUMP_MIB(&tcps->tcps_mib, tcpInClosed);
19351		BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs);
19352		UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len);
19353	}
19354	if (flags & TH_RST) {
19355		(void) tcp_clean_death(tcp, 0, 28);
19356		goto done;
19357	}
19358	if (flags & TH_SYN) {
19359		tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1,
19360		    TH_RST|TH_ACK);
19361		/*
19362		 * Do not delete the TCP structure if it is in
19363		 * TIME_WAIT state.  Refer to RFC 1122, 4.2.2.13.
19364		 */
19365		goto done;
19366	}
19367process_ack:
19368	if (flags & TH_ACK) {
19369		bytes_acked = (int)(seg_ack - tcp->tcp_suna);
19370		if (bytes_acked <= 0) {
19371			if (bytes_acked == 0 && seg_len == 0 &&
19372			    new_swnd == tcp->tcp_swnd)
19373				BUMP_MIB(&tcps->tcps_mib, tcpInDupAck);
19374		} else {
19375			/* Acks something not sent */
19376			flags |= TH_ACK_NEEDED;
19377		}
19378	}
19379	if (flags & TH_ACK_NEEDED) {
19380		/*
19381		 * Time to send an ack for some reason.
19382		 */
19383		tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt,
19384		    tcp->tcp_rnxt, TH_ACK);
19385	}
19386done:
19387	freemsg(mp);
19388}
19389
19390/*
19391 * TCP Timers Implementation.
19392 */
19393timeout_id_t
19394tcp_timeout(conn_t *connp, void (*f)(void *), clock_t tim)
19395{
19396	mblk_t *mp;
19397	tcp_timer_t *tcpt;
19398	tcp_t *tcp = connp->conn_tcp;
19399
19400	ASSERT(connp->conn_sqp != NULL);
19401
19402	TCP_DBGSTAT(tcp->tcp_tcps, tcp_timeout_calls);
19403
19404	if (tcp->tcp_timercache == NULL) {
19405		mp = tcp_timermp_alloc(KM_NOSLEEP | KM_PANIC);
19406	} else {
19407		TCP_DBGSTAT(tcp->tcp_tcps, tcp_timeout_cached_alloc);
19408		mp = tcp->tcp_timercache;
19409		tcp->tcp_timercache = mp->b_next;
19410		mp->b_next = NULL;
19411		ASSERT(mp->b_wptr == NULL);
19412	}
19413
19414	CONN_INC_REF(connp);
19415	tcpt = (tcp_timer_t *)mp->b_rptr;
19416	tcpt->connp = connp;
19417	tcpt->tcpt_proc = f;
19418	/*
19419	 * TCP timers are normal timeouts. Plus, they do not require more than
19420	 * a 10 millisecond resolution. By choosing a coarser resolution and by
19421	 * rounding up the expiration to the next resolution boundary, we can
19422	 * batch timers in the callout subsystem to make TCP timers more
19423	 * efficient. The roundup also protects short timers from expiring too
19424	 * early before they have a chance to be cancelled.
19425	 */
19426	tcpt->tcpt_tid = timeout_generic(CALLOUT_NORMAL, tcp_timer_callback, mp,
19427	    TICK_TO_NSEC(tim), CALLOUT_TCP_RESOLUTION, CALLOUT_FLAG_ROUNDUP);
19428
19429	return ((timeout_id_t)mp);
19430}
19431
19432static void
19433tcp_timer_callback(void *arg)
19434{
19435	mblk_t *mp = (mblk_t *)arg;
19436	tcp_timer_t *tcpt;
19437	conn_t	*connp;
19438
19439	tcpt = (tcp_timer_t *)mp->b_rptr;
19440	connp = tcpt->connp;
19441	SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_timer_handler, connp,
19442	    NULL, SQ_FILL, SQTAG_TCP_TIMER);
19443}
19444
19445/* ARGSUSED */
19446static void
19447tcp_timer_handler(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
19448{
19449	tcp_timer_t *tcpt;
19450	conn_t *connp = (conn_t *)arg;
19451	tcp_t *tcp = connp->conn_tcp;
19452
19453	tcpt = (tcp_timer_t *)mp->b_rptr;
19454	ASSERT(connp == tcpt->connp);
19455	ASSERT((squeue_t *)arg2 == connp->conn_sqp);
19456
19457	/*
19458	 * If the TCP has reached the closed state, don't proceed any
19459	 * further. This TCP logically does not exist on the system.
19460	 * tcpt_proc could for example access queues, that have already
19461	 * been qprocoff'ed off.
19462	 */
19463	if (tcp->tcp_state != TCPS_CLOSED) {
19464		(*tcpt->tcpt_proc)(connp);
19465	} else {
19466		tcp->tcp_timer_tid = 0;
19467	}
19468	tcp_timer_free(connp->conn_tcp, mp);
19469}
19470
19471/*
19472 * There is potential race with untimeout and the handler firing at the same
19473 * time. The mblock may be freed by the handler while we are trying to use
19474 * it. But since both should execute on the same squeue, this race should not
19475 * occur.
19476 */
19477clock_t
19478tcp_timeout_cancel(conn_t *connp, timeout_id_t id)
19479{
19480	mblk_t	*mp = (mblk_t *)id;
19481	tcp_timer_t *tcpt;
19482	clock_t delta;
19483
19484	TCP_DBGSTAT(connp->conn_tcp->tcp_tcps, tcp_timeout_cancel_reqs);
19485
19486	if (mp == NULL)
19487		return (-1);
19488
19489	tcpt = (tcp_timer_t *)mp->b_rptr;
19490	ASSERT(tcpt->connp == connp);
19491
19492	delta = untimeout_default(tcpt->tcpt_tid, 0);
19493
19494	if (delta >= 0) {
19495		TCP_DBGSTAT(connp->conn_tcp->tcp_tcps, tcp_timeout_canceled);
19496		tcp_timer_free(connp->conn_tcp, mp);
19497		CONN_DEC_REF(connp);
19498	}
19499
19500	return (delta);
19501}
19502
19503/*
19504 * Allocate space for the timer event. The allocation looks like mblk, but it is
19505 * not a proper mblk. To avoid confusion we set b_wptr to NULL.
19506 *
19507 * Dealing with failures: If we can't allocate from the timer cache we try
19508 * allocating from dblock caches using allocb_tryhard(). In this case b_wptr
19509 * points to b_rptr.
19510 * If we can't allocate anything using allocb_tryhard(), we perform a last
19511 * attempt and use kmem_alloc_tryhard(). In this case we set b_wptr to -1 and
19512 * save the actual allocation size in b_datap.
19513 */
19514mblk_t *
19515tcp_timermp_alloc(int kmflags)
19516{
19517	mblk_t *mp = (mblk_t *)kmem_cache_alloc(tcp_timercache,
19518	    kmflags & ~KM_PANIC);
19519
19520	if (mp != NULL) {
19521		mp->b_next = mp->b_prev = NULL;
19522		mp->b_rptr = (uchar_t *)(&mp[1]);
19523		mp->b_wptr = NULL;
19524		mp->b_datap = NULL;
19525		mp->b_queue = NULL;
19526		mp->b_cont = NULL;
19527	} else if (kmflags & KM_PANIC) {
19528		/*
19529		 * Failed to allocate memory for the timer. Try allocating from
19530		 * dblock caches.
19531		 */
19532		/* ipclassifier calls this from a constructor - hence no tcps */
19533		TCP_G_STAT(tcp_timermp_allocfail);
19534		mp = allocb_tryhard(sizeof (tcp_timer_t));
19535		if (mp == NULL) {
19536			size_t size = 0;
19537			/*
19538			 * Memory is really low. Try tryhard allocation.
19539			 *
19540			 * ipclassifier calls this from a constructor -
19541			 * hence no tcps
19542			 */
19543			TCP_G_STAT(tcp_timermp_allocdblfail);
19544			mp = kmem_alloc_tryhard(sizeof (mblk_t) +
19545			    sizeof (tcp_timer_t), &size, kmflags);
19546			mp->b_rptr = (uchar_t *)(&mp[1]);
19547			mp->b_next = mp->b_prev = NULL;
19548			mp->b_wptr = (uchar_t *)-1;
19549			mp->b_datap = (dblk_t *)size;
19550			mp->b_queue = NULL;
19551			mp->b_cont = NULL;
19552		}
19553		ASSERT(mp->b_wptr != NULL);
19554	}
19555	/* ipclassifier calls this from a constructor - hence no tcps */
19556	TCP_G_DBGSTAT(tcp_timermp_alloced);
19557
19558	return (mp);
19559}
19560
19561/*
19562 * Free per-tcp timer cache.
19563 * It can only contain entries from tcp_timercache.
19564 */
19565void
19566tcp_timermp_free(tcp_t *tcp)
19567{
19568	mblk_t *mp;
19569
19570	while ((mp = tcp->tcp_timercache) != NULL) {
19571		ASSERT(mp->b_wptr == NULL);
19572		tcp->tcp_timercache = tcp->tcp_timercache->b_next;
19573		kmem_cache_free(tcp_timercache, mp);
19574	}
19575}
19576
19577/*
19578 * Free timer event. Put it on the per-tcp timer cache if there is not too many
19579 * events there already (currently at most two events are cached).
19580 * If the event is not allocated from the timer cache, free it right away.
19581 */
19582static void
19583tcp_timer_free(tcp_t *tcp, mblk_t *mp)
19584{
19585	mblk_t *mp1 = tcp->tcp_timercache;
19586
19587	if (mp->b_wptr != NULL) {
19588		/*
19589		 * This allocation is not from a timer cache, free it right
19590		 * away.
19591		 */
19592		if (mp->b_wptr != (uchar_t *)-1)
19593			freeb(mp);
19594		else
19595			kmem_free(mp, (size_t)mp->b_datap);
19596	} else if (mp1 == NULL || mp1->b_next == NULL) {
19597		/* Cache this timer block for future allocations */
19598		mp->b_rptr = (uchar_t *)(&mp[1]);
19599		mp->b_next = mp1;
19600		tcp->tcp_timercache = mp;
19601	} else {
19602		kmem_cache_free(tcp_timercache, mp);
19603		TCP_DBGSTAT(tcp->tcp_tcps, tcp_timermp_freed);
19604	}
19605}
19606
19607/*
19608 * End of TCP Timers implementation.
19609 */
19610
19611/*
19612 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
19613 * on the specified backing STREAMS q. Note, the caller may make the
19614 * decision to call based on the tcp_t.tcp_flow_stopped value which
19615 * when check outside the q's lock is only an advisory check ...
19616 */
19617void
19618tcp_setqfull(tcp_t *tcp)
19619{
19620	tcp_stack_t	*tcps = tcp->tcp_tcps;
19621	conn_t	*connp = tcp->tcp_connp;
19622
19623	if (tcp->tcp_closed)
19624		return;
19625
19626	conn_setqfull(connp, &tcp->tcp_flow_stopped);
19627	if (tcp->tcp_flow_stopped)
19628		TCP_STAT(tcps, tcp_flwctl_on);
19629}
19630
19631void
19632tcp_clrqfull(tcp_t *tcp)
19633{
19634	conn_t  *connp = tcp->tcp_connp;
19635
19636	if (tcp->tcp_closed)
19637		return;
19638	conn_clrqfull(connp, &tcp->tcp_flow_stopped);
19639}
19640
19641/*
19642 * kstats related to squeues i.e. not per IP instance
19643 */
19644static void *
19645tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp)
19646{
19647	kstat_t *ksp;
19648
19649	tcp_g_stat_t template = {
19650		{ "tcp_timermp_alloced",	KSTAT_DATA_UINT64 },
19651		{ "tcp_timermp_allocfail",	KSTAT_DATA_UINT64 },
19652		{ "tcp_timermp_allocdblfail",	KSTAT_DATA_UINT64 },
19653		{ "tcp_freelist_cleanup",	KSTAT_DATA_UINT64 },
19654	};
19655
19656	ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net",
19657	    KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
19658	    KSTAT_FLAG_VIRTUAL);
19659
19660	if (ksp == NULL)
19661		return (NULL);
19662
19663	bcopy(&template, tcp_g_statp, sizeof (template));
19664	ksp->ks_data = (void *)tcp_g_statp;
19665
19666	kstat_install(ksp);
19667	return (ksp);
19668}
19669
19670static void
19671tcp_g_kstat_fini(kstat_t *ksp)
19672{
19673	if (ksp != NULL) {
19674		kstat_delete(ksp);
19675	}
19676}
19677
19678
19679static void *
19680tcp_kstat2_init(netstackid_t stackid, tcp_stat_t *tcps_statisticsp)
19681{
19682	kstat_t *ksp;
19683
19684	tcp_stat_t template = {
19685		{ "tcp_time_wait",		KSTAT_DATA_UINT64 },
19686		{ "tcp_time_wait_syn",		KSTAT_DATA_UINT64 },
19687		{ "tcp_time_wait_syn_success",	KSTAT_DATA_UINT64 },
19688		{ "tcp_detach_non_time_wait",	KSTAT_DATA_UINT64 },
19689		{ "tcp_detach_time_wait",	KSTAT_DATA_UINT64 },
19690		{ "tcp_time_wait_reap",		KSTAT_DATA_UINT64 },
19691		{ "tcp_clean_death_nondetached",	KSTAT_DATA_UINT64 },
19692		{ "tcp_reinit_calls",		KSTAT_DATA_UINT64 },
19693		{ "tcp_eager_err1",		KSTAT_DATA_UINT64 },
19694		{ "tcp_eager_err2",		KSTAT_DATA_UINT64 },
19695		{ "tcp_eager_blowoff_calls",	KSTAT_DATA_UINT64 },
19696		{ "tcp_eager_blowoff_q",	KSTAT_DATA_UINT64 },
19697		{ "tcp_eager_blowoff_q0",	KSTAT_DATA_UINT64 },
19698		{ "tcp_not_hard_bound",		KSTAT_DATA_UINT64 },
19699		{ "tcp_no_listener",		KSTAT_DATA_UINT64 },
19700		{ "tcp_found_eager",		KSTAT_DATA_UINT64 },
19701		{ "tcp_wrong_queue",		KSTAT_DATA_UINT64 },
19702		{ "tcp_found_eager_binding1",	KSTAT_DATA_UINT64 },
19703		{ "tcp_found_eager_bound1",	KSTAT_DATA_UINT64 },
19704		{ "tcp_eager_has_listener1",	KSTAT_DATA_UINT64 },
19705		{ "tcp_open_alloc",		KSTAT_DATA_UINT64 },
19706		{ "tcp_open_detached_alloc",	KSTAT_DATA_UINT64 },
19707		{ "tcp_rput_time_wait",		KSTAT_DATA_UINT64 },
19708		{ "tcp_listendrop",		KSTAT_DATA_UINT64 },
19709		{ "tcp_listendropq0",		KSTAT_DATA_UINT64 },
19710		{ "tcp_wrong_rq",		KSTAT_DATA_UINT64 },
19711		{ "tcp_rsrv_calls",		KSTAT_DATA_UINT64 },
19712		{ "tcp_eagerfree2",		KSTAT_DATA_UINT64 },
19713		{ "tcp_eagerfree3",		KSTAT_DATA_UINT64 },
19714		{ "tcp_eagerfree4",		KSTAT_DATA_UINT64 },
19715		{ "tcp_eagerfree5",		KSTAT_DATA_UINT64 },
19716		{ "tcp_timewait_syn_fail",	KSTAT_DATA_UINT64 },
19717		{ "tcp_listen_badflags",	KSTAT_DATA_UINT64 },
19718		{ "tcp_timeout_calls",		KSTAT_DATA_UINT64 },
19719		{ "tcp_timeout_cached_alloc",	KSTAT_DATA_UINT64 },
19720		{ "tcp_timeout_cancel_reqs",	KSTAT_DATA_UINT64 },
19721		{ "tcp_timeout_canceled",	KSTAT_DATA_UINT64 },
19722		{ "tcp_timermp_freed",		KSTAT_DATA_UINT64 },
19723		{ "tcp_push_timer_cnt",		KSTAT_DATA_UINT64 },
19724		{ "tcp_ack_timer_cnt",		KSTAT_DATA_UINT64 },
19725		{ "tcp_wsrv_called",		KSTAT_DATA_UINT64 },
19726		{ "tcp_flwctl_on",		KSTAT_DATA_UINT64 },
19727		{ "tcp_timer_fire_early",	KSTAT_DATA_UINT64 },
19728		{ "tcp_timer_fire_miss",	KSTAT_DATA_UINT64 },
19729		{ "tcp_rput_v6_error",		KSTAT_DATA_UINT64 },
19730		{ "tcp_zcopy_on",		KSTAT_DATA_UINT64 },
19731		{ "tcp_zcopy_off",		KSTAT_DATA_UINT64 },
19732		{ "tcp_zcopy_backoff",		KSTAT_DATA_UINT64 },
19733		{ "tcp_fusion_flowctl",		KSTAT_DATA_UINT64 },
19734		{ "tcp_fusion_backenabled",	KSTAT_DATA_UINT64 },
19735		{ "tcp_fusion_urg",		KSTAT_DATA_UINT64 },
19736		{ "tcp_fusion_putnext",		KSTAT_DATA_UINT64 },
19737		{ "tcp_fusion_unfusable",	KSTAT_DATA_UINT64 },
19738		{ "tcp_fusion_aborted",		KSTAT_DATA_UINT64 },
19739		{ "tcp_fusion_unqualified",	KSTAT_DATA_UINT64 },
19740		{ "tcp_fusion_rrw_busy",	KSTAT_DATA_UINT64 },
19741		{ "tcp_fusion_rrw_msgcnt",	KSTAT_DATA_UINT64 },
19742		{ "tcp_fusion_rrw_plugged",	KSTAT_DATA_UINT64 },
19743		{ "tcp_in_ack_unsent_drop",	KSTAT_DATA_UINT64 },
19744		{ "tcp_sock_fallback",		KSTAT_DATA_UINT64 },
19745		{ "tcp_lso_enabled",		KSTAT_DATA_UINT64 },
19746		{ "tcp_lso_disabled",		KSTAT_DATA_UINT64 },
19747		{ "tcp_lso_times",		KSTAT_DATA_UINT64 },
19748		{ "tcp_lso_pkt_out",		KSTAT_DATA_UINT64 },
19749	};
19750
19751	ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net",
19752	    KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
19753	    KSTAT_FLAG_VIRTUAL, stackid);
19754
19755	if (ksp == NULL)
19756		return (NULL);
19757
19758	bcopy(&template, tcps_statisticsp, sizeof (template));
19759	ksp->ks_data = (void *)tcps_statisticsp;
19760	ksp->ks_private = (void *)(uintptr_t)stackid;
19761
19762	kstat_install(ksp);
19763	return (ksp);
19764}
19765
19766static void
19767tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
19768{
19769	if (ksp != NULL) {
19770		ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
19771		kstat_delete_netstack(ksp, stackid);
19772	}
19773}
19774
19775/*
19776 * TCP Kstats implementation
19777 */
19778static void *
19779tcp_kstat_init(netstackid_t stackid, tcp_stack_t *tcps)
19780{
19781	kstat_t	*ksp;
19782
19783	tcp_named_kstat_t template = {
19784		{ "rtoAlgorithm",	KSTAT_DATA_INT32, 0 },
19785		{ "rtoMin",		KSTAT_DATA_INT32, 0 },
19786		{ "rtoMax",		KSTAT_DATA_INT32, 0 },
19787		{ "maxConn",		KSTAT_DATA_INT32, 0 },
19788		{ "activeOpens",	KSTAT_DATA_UINT32, 0 },
19789		{ "passiveOpens",	KSTAT_DATA_UINT32, 0 },
19790		{ "attemptFails",	KSTAT_DATA_UINT32, 0 },
19791		{ "estabResets",	KSTAT_DATA_UINT32, 0 },
19792		{ "currEstab",		KSTAT_DATA_UINT32, 0 },
19793		{ "inSegs",		KSTAT_DATA_UINT64, 0 },
19794		{ "outSegs",		KSTAT_DATA_UINT64, 0 },
19795		{ "retransSegs",	KSTAT_DATA_UINT32, 0 },
19796		{ "connTableSize",	KSTAT_DATA_INT32, 0 },
19797		{ "outRsts",		KSTAT_DATA_UINT32, 0 },
19798		{ "outDataSegs",	KSTAT_DATA_UINT32, 0 },
19799		{ "outDataBytes",	KSTAT_DATA_UINT32, 0 },
19800		{ "retransBytes",	KSTAT_DATA_UINT32, 0 },
19801		{ "outAck",		KSTAT_DATA_UINT32, 0 },
19802		{ "outAckDelayed",	KSTAT_DATA_UINT32, 0 },
19803		{ "outUrg",		KSTAT_DATA_UINT32, 0 },
19804		{ "outWinUpdate",	KSTAT_DATA_UINT32, 0 },
19805		{ "outWinProbe",	KSTAT_DATA_UINT32, 0 },
19806		{ "outControl",		KSTAT_DATA_UINT32, 0 },
19807		{ "outFastRetrans",	KSTAT_DATA_UINT32, 0 },
19808		{ "inAckSegs",		KSTAT_DATA_UINT32, 0 },
19809		{ "inAckBytes",		KSTAT_DATA_UINT32, 0 },
19810		{ "inDupAck",		KSTAT_DATA_UINT32, 0 },
19811		{ "inAckUnsent",	KSTAT_DATA_UINT32, 0 },
19812		{ "inDataInorderSegs",	KSTAT_DATA_UINT32, 0 },
19813		{ "inDataInorderBytes",	KSTAT_DATA_UINT32, 0 },
19814		{ "inDataUnorderSegs",	KSTAT_DATA_UINT32, 0 },
19815		{ "inDataUnorderBytes",	KSTAT_DATA_UINT32, 0 },
19816		{ "inDataDupSegs",	KSTAT_DATA_UINT32, 0 },
19817		{ "inDataDupBytes",	KSTAT_DATA_UINT32, 0 },
19818		{ "inDataPartDupSegs",	KSTAT_DATA_UINT32, 0 },
19819		{ "inDataPartDupBytes",	KSTAT_DATA_UINT32, 0 },
19820		{ "inDataPastWinSegs",	KSTAT_DATA_UINT32, 0 },
19821		{ "inDataPastWinBytes",	KSTAT_DATA_UINT32, 0 },
19822		{ "inWinProbe",		KSTAT_DATA_UINT32, 0 },
19823		{ "inWinUpdate",	KSTAT_DATA_UINT32, 0 },
19824		{ "inClosed",		KSTAT_DATA_UINT32, 0 },
19825		{ "rttUpdate",		KSTAT_DATA_UINT32, 0 },
19826		{ "rttNoUpdate",	KSTAT_DATA_UINT32, 0 },
19827		{ "timRetrans",		KSTAT_DATA_UINT32, 0 },
19828		{ "timRetransDrop",	KSTAT_DATA_UINT32, 0 },
19829		{ "timKeepalive",	KSTAT_DATA_UINT32, 0 },
19830		{ "timKeepaliveProbe",	KSTAT_DATA_UINT32, 0 },
19831		{ "timKeepaliveDrop",	KSTAT_DATA_UINT32, 0 },
19832		{ "listenDrop",		KSTAT_DATA_UINT32, 0 },
19833		{ "listenDropQ0",	KSTAT_DATA_UINT32, 0 },
19834		{ "halfOpenDrop",	KSTAT_DATA_UINT32, 0 },
19835		{ "outSackRetransSegs",	KSTAT_DATA_UINT32, 0 },
19836		{ "connTableSize6",	KSTAT_DATA_INT32, 0 }
19837	};
19838
19839	ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2",
19840	    KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid);
19841
19842	if (ksp == NULL)
19843		return (NULL);
19844
19845	template.rtoAlgorithm.value.ui32 = 4;
19846	template.rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min;
19847	template.rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max;
19848	template.maxConn.value.i32 = -1;
19849
19850	bcopy(&template, ksp->ks_data, sizeof (template));
19851	ksp->ks_update = tcp_kstat_update;
19852	ksp->ks_private = (void *)(uintptr_t)stackid;
19853
19854	kstat_install(ksp);
19855	return (ksp);
19856}
19857
19858static void
19859tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
19860{
19861	if (ksp != NULL) {
19862		ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
19863		kstat_delete_netstack(ksp, stackid);
19864	}
19865}
19866
19867static int
19868tcp_kstat_update(kstat_t *kp, int rw)
19869{
19870	tcp_named_kstat_t *tcpkp;
19871	tcp_t		*tcp;
19872	connf_t		*connfp;
19873	conn_t		*connp;
19874	int 		i;
19875	netstackid_t	stackid = (netstackid_t)(uintptr_t)kp->ks_private;
19876	netstack_t	*ns;
19877	tcp_stack_t	*tcps;
19878	ip_stack_t	*ipst;
19879
19880	if ((kp == NULL) || (kp->ks_data == NULL))
19881		return (EIO);
19882
19883	if (rw == KSTAT_WRITE)
19884		return (EACCES);
19885
19886	ns = netstack_find_by_stackid(stackid);
19887	if (ns == NULL)
19888		return (-1);
19889	tcps = ns->netstack_tcp;
19890	if (tcps == NULL) {
19891		netstack_rele(ns);
19892		return (-1);
19893	}
19894
19895	tcpkp = (tcp_named_kstat_t *)kp->ks_data;
19896
19897	tcpkp->currEstab.value.ui32 = 0;
19898
19899	ipst = ns->netstack_ip;
19900
19901	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
19902		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
19903		connp = NULL;
19904		while ((connp =
19905		    ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
19906			tcp = connp->conn_tcp;
19907			switch (tcp_snmp_state(tcp)) {
19908			case MIB2_TCP_established:
19909			case MIB2_TCP_closeWait:
19910				tcpkp->currEstab.value.ui32++;
19911				break;
19912			}
19913		}
19914	}
19915
19916	tcpkp->activeOpens.value.ui32 = tcps->tcps_mib.tcpActiveOpens;
19917	tcpkp->passiveOpens.value.ui32 = tcps->tcps_mib.tcpPassiveOpens;
19918	tcpkp->attemptFails.value.ui32 = tcps->tcps_mib.tcpAttemptFails;
19919	tcpkp->estabResets.value.ui32 = tcps->tcps_mib.tcpEstabResets;
19920	tcpkp->inSegs.value.ui64 = tcps->tcps_mib.tcpHCInSegs;
19921	tcpkp->outSegs.value.ui64 = tcps->tcps_mib.tcpHCOutSegs;
19922	tcpkp->retransSegs.value.ui32 =	tcps->tcps_mib.tcpRetransSegs;
19923	tcpkp->connTableSize.value.i32 = tcps->tcps_mib.tcpConnTableSize;
19924	tcpkp->outRsts.value.ui32 = tcps->tcps_mib.tcpOutRsts;
19925	tcpkp->outDataSegs.value.ui32 = tcps->tcps_mib.tcpOutDataSegs;
19926	tcpkp->outDataBytes.value.ui32 = tcps->tcps_mib.tcpOutDataBytes;
19927	tcpkp->retransBytes.value.ui32 = tcps->tcps_mib.tcpRetransBytes;
19928	tcpkp->outAck.value.ui32 = tcps->tcps_mib.tcpOutAck;
19929	tcpkp->outAckDelayed.value.ui32 = tcps->tcps_mib.tcpOutAckDelayed;
19930	tcpkp->outUrg.value.ui32 = tcps->tcps_mib.tcpOutUrg;
19931	tcpkp->outWinUpdate.value.ui32 = tcps->tcps_mib.tcpOutWinUpdate;
19932	tcpkp->outWinProbe.value.ui32 = tcps->tcps_mib.tcpOutWinProbe;
19933	tcpkp->outControl.value.ui32 = tcps->tcps_mib.tcpOutControl;
19934	tcpkp->outFastRetrans.value.ui32 = tcps->tcps_mib.tcpOutFastRetrans;
19935	tcpkp->inAckSegs.value.ui32 = tcps->tcps_mib.tcpInAckSegs;
19936	tcpkp->inAckBytes.value.ui32 = tcps->tcps_mib.tcpInAckBytes;
19937	tcpkp->inDupAck.value.ui32 = tcps->tcps_mib.tcpInDupAck;
19938	tcpkp->inAckUnsent.value.ui32 = tcps->tcps_mib.tcpInAckUnsent;
19939	tcpkp->inDataInorderSegs.value.ui32 =
19940	    tcps->tcps_mib.tcpInDataInorderSegs;
19941	tcpkp->inDataInorderBytes.value.ui32 =
19942	    tcps->tcps_mib.tcpInDataInorderBytes;
19943	tcpkp->inDataUnorderSegs.value.ui32 =
19944	    tcps->tcps_mib.tcpInDataUnorderSegs;
19945	tcpkp->inDataUnorderBytes.value.ui32 =
19946	    tcps->tcps_mib.tcpInDataUnorderBytes;
19947	tcpkp->inDataDupSegs.value.ui32 = tcps->tcps_mib.tcpInDataDupSegs;
19948	tcpkp->inDataDupBytes.value.ui32 = tcps->tcps_mib.tcpInDataDupBytes;
19949	tcpkp->inDataPartDupSegs.value.ui32 =
19950	    tcps->tcps_mib.tcpInDataPartDupSegs;
19951	tcpkp->inDataPartDupBytes.value.ui32 =
19952	    tcps->tcps_mib.tcpInDataPartDupBytes;
19953	tcpkp->inDataPastWinSegs.value.ui32 =
19954	    tcps->tcps_mib.tcpInDataPastWinSegs;
19955	tcpkp->inDataPastWinBytes.value.ui32 =
19956	    tcps->tcps_mib.tcpInDataPastWinBytes;
19957	tcpkp->inWinProbe.value.ui32 = tcps->tcps_mib.tcpInWinProbe;
19958	tcpkp->inWinUpdate.value.ui32 = tcps->tcps_mib.tcpInWinUpdate;
19959	tcpkp->inClosed.value.ui32 = tcps->tcps_mib.tcpInClosed;
19960	tcpkp->rttNoUpdate.value.ui32 = tcps->tcps_mib.tcpRttNoUpdate;
19961	tcpkp->rttUpdate.value.ui32 = tcps->tcps_mib.tcpRttUpdate;
19962	tcpkp->timRetrans.value.ui32 = tcps->tcps_mib.tcpTimRetrans;
19963	tcpkp->timRetransDrop.value.ui32 = tcps->tcps_mib.tcpTimRetransDrop;
19964	tcpkp->timKeepalive.value.ui32 = tcps->tcps_mib.tcpTimKeepalive;
19965	tcpkp->timKeepaliveProbe.value.ui32 =
19966	    tcps->tcps_mib.tcpTimKeepaliveProbe;
19967	tcpkp->timKeepaliveDrop.value.ui32 =
19968	    tcps->tcps_mib.tcpTimKeepaliveDrop;
19969	tcpkp->listenDrop.value.ui32 = tcps->tcps_mib.tcpListenDrop;
19970	tcpkp->listenDropQ0.value.ui32 = tcps->tcps_mib.tcpListenDropQ0;
19971	tcpkp->halfOpenDrop.value.ui32 = tcps->tcps_mib.tcpHalfOpenDrop;
19972	tcpkp->outSackRetransSegs.value.ui32 =
19973	    tcps->tcps_mib.tcpOutSackRetransSegs;
19974	tcpkp->connTableSize6.value.i32 = tcps->tcps_mib.tcp6ConnTableSize;
19975
19976	netstack_rele(ns);
19977	return (0);
19978}
19979
19980static int
19981tcp_squeue_switch(int val)
19982{
19983	int rval = SQ_FILL;
19984
19985	switch (val) {
19986	case 1:
19987		rval = SQ_NODRAIN;
19988		break;
19989	case 2:
19990		rval = SQ_PROCESS;
19991		break;
19992	default:
19993		break;
19994	}
19995	return (rval);
19996}
19997
19998/*
19999 * This is called once for each squeue - globally for all stack
20000 * instances.
20001 */
20002static void
20003tcp_squeue_add(squeue_t *sqp)
20004{
20005	tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc(
20006	    sizeof (tcp_squeue_priv_t), KM_SLEEP);
20007
20008	*squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait;
20009	tcp_time_wait->tcp_time_wait_tid =
20010	    timeout_generic(CALLOUT_NORMAL, tcp_time_wait_collector, sqp,
20011	    TICK_TO_NSEC(TCP_TIME_WAIT_DELAY), CALLOUT_TCP_RESOLUTION,
20012	    CALLOUT_FLAG_ROUNDUP);
20013	if (tcp_free_list_max_cnt == 0) {
20014		int tcp_ncpus = ((boot_max_ncpus == -1) ?
20015		    max_ncpus : boot_max_ncpus);
20016
20017		/*
20018		 * Limit number of entries to 1% of availble memory / tcp_ncpus
20019		 */
20020		tcp_free_list_max_cnt = (freemem * PAGESIZE) /
20021		    (tcp_ncpus * sizeof (tcp_t) * 100);
20022	}
20023	tcp_time_wait->tcp_free_list_cnt = 0;
20024}
20025
20026/*
20027 * On a labeled system we have some protocols above TCP, such as RPC, which
20028 * appear to assume that every mblk in a chain has a db_credp.
20029 */
20030static void
20031tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira)
20032{
20033	ASSERT(is_system_labeled());
20034	ASSERT(ira->ira_cred != NULL);
20035
20036	while (mp != NULL) {
20037		mblk_setcred(mp, ira->ira_cred, NOPID);
20038		mp = mp->b_cont;
20039	}
20040}
20041
20042static int
20043tcp_bind_select_lport(tcp_t *tcp, in_port_t *requested_port_ptr,
20044    boolean_t bind_to_req_port_only, cred_t *cr)
20045{
20046	in_port_t	mlp_port;
20047	mlp_type_t 	addrtype, mlptype;
20048	boolean_t	user_specified;
20049	in_port_t	allocated_port;
20050	in_port_t	requested_port = *requested_port_ptr;
20051	conn_t		*connp = tcp->tcp_connp;
20052	zone_t		*zone;
20053	tcp_stack_t	*tcps = tcp->tcp_tcps;
20054	in6_addr_t	v6addr = connp->conn_laddr_v6;
20055
20056	/*
20057	 * XXX It's up to the caller to specify bind_to_req_port_only or not.
20058	 */
20059	ASSERT(cr != NULL);
20060
20061	/*
20062	 * Get a valid port (within the anonymous range and should not
20063	 * be a privileged one) to use if the user has not given a port.
20064	 * If multiple threads are here, they may all start with
20065	 * with the same initial port. But, it should be fine as long as
20066	 * tcp_bindi will ensure that no two threads will be assigned
20067	 * the same port.
20068	 *
20069	 * NOTE: XXX If a privileged process asks for an anonymous port, we
20070	 * still check for ports only in the range > tcp_smallest_non_priv_port,
20071	 * unless TCP_ANONPRIVBIND option is set.
20072	 */
20073	mlptype = mlptSingle;
20074	mlp_port = requested_port;
20075	if (requested_port == 0) {
20076		requested_port = connp->conn_anon_priv_bind ?
20077		    tcp_get_next_priv_port(tcp) :
20078		    tcp_update_next_port(tcps->tcps_next_port_to_try,
20079		    tcp, B_TRUE);
20080		if (requested_port == 0) {
20081			return (-TNOADDR);
20082		}
20083		user_specified = B_FALSE;
20084
20085		/*
20086		 * If the user went through one of the RPC interfaces to create
20087		 * this socket and RPC is MLP in this zone, then give him an
20088		 * anonymous MLP.
20089		 */
20090		if (connp->conn_anon_mlp && is_system_labeled()) {
20091			zone = crgetzone(cr);
20092			addrtype = tsol_mlp_addr_type(
20093			    connp->conn_allzones ? ALL_ZONES : zone->zone_id,
20094			    IPV6_VERSION, &v6addr,
20095			    tcps->tcps_netstack->netstack_ip);
20096			if (addrtype == mlptSingle) {
20097				return (-TNOADDR);
20098			}
20099			mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP,
20100			    PMAPPORT, addrtype);
20101			mlp_port = PMAPPORT;
20102		}
20103	} else {
20104		int i;
20105		boolean_t priv = B_FALSE;
20106
20107		/*
20108		 * If the requested_port is in the well-known privileged range,
20109		 * verify that the stream was opened by a privileged user.
20110		 * Note: No locks are held when inspecting tcp_g_*epriv_ports
20111		 * but instead the code relies on:
20112		 * - the fact that the address of the array and its size never
20113		 *   changes
20114		 * - the atomic assignment of the elements of the array
20115		 */
20116		if (requested_port < tcps->tcps_smallest_nonpriv_port) {
20117			priv = B_TRUE;
20118		} else {
20119			for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) {
20120				if (requested_port ==
20121				    tcps->tcps_g_epriv_ports[i]) {
20122					priv = B_TRUE;
20123					break;
20124				}
20125			}
20126		}
20127		if (priv) {
20128			if (secpolicy_net_privaddr(cr, requested_port,
20129			    IPPROTO_TCP) != 0) {
20130				if (connp->conn_debug) {
20131					(void) strlog(TCP_MOD_ID, 0, 1,
20132					    SL_ERROR|SL_TRACE,
20133					    "tcp_bind: no priv for port %d",
20134					    requested_port);
20135				}
20136				return (-TACCES);
20137			}
20138		}
20139		user_specified = B_TRUE;
20140
20141		connp = tcp->tcp_connp;
20142		if (is_system_labeled()) {
20143			zone = crgetzone(cr);
20144			addrtype = tsol_mlp_addr_type(
20145			    connp->conn_allzones ? ALL_ZONES : zone->zone_id,
20146			    IPV6_VERSION, &v6addr,
20147			    tcps->tcps_netstack->netstack_ip);
20148			if (addrtype == mlptSingle) {
20149				return (-TNOADDR);
20150			}
20151			mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP,
20152			    requested_port, addrtype);
20153		}
20154	}
20155
20156	if (mlptype != mlptSingle) {
20157		if (secpolicy_net_bindmlp(cr) != 0) {
20158			if (connp->conn_debug) {
20159				(void) strlog(TCP_MOD_ID, 0, 1,
20160				    SL_ERROR|SL_TRACE,
20161				    "tcp_bind: no priv for multilevel port %d",
20162				    requested_port);
20163			}
20164			return (-TACCES);
20165		}
20166
20167		/*
20168		 * If we're specifically binding a shared IP address and the
20169		 * port is MLP on shared addresses, then check to see if this
20170		 * zone actually owns the MLP.  Reject if not.
20171		 */
20172		if (mlptype == mlptShared && addrtype == mlptShared) {
20173			/*
20174			 * No need to handle exclusive-stack zones since
20175			 * ALL_ZONES only applies to the shared stack.
20176			 */
20177			zoneid_t mlpzone;
20178
20179			mlpzone = tsol_mlp_findzone(IPPROTO_TCP,
20180			    htons(mlp_port));
20181			if (connp->conn_zoneid != mlpzone) {
20182				if (connp->conn_debug) {
20183					(void) strlog(TCP_MOD_ID, 0, 1,
20184					    SL_ERROR|SL_TRACE,
20185					    "tcp_bind: attempt to bind port "
20186					    "%d on shared addr in zone %d "
20187					    "(should be %d)",
20188					    mlp_port, connp->conn_zoneid,
20189					    mlpzone);
20190				}
20191				return (-TACCES);
20192			}
20193		}
20194
20195		if (!user_specified) {
20196			int err;
20197			err = tsol_mlp_anon(zone, mlptype, connp->conn_proto,
20198			    requested_port, B_TRUE);
20199			if (err != 0) {
20200				if (connp->conn_debug) {
20201					(void) strlog(TCP_MOD_ID, 0, 1,
20202					    SL_ERROR|SL_TRACE,
20203					    "tcp_bind: cannot establish anon "
20204					    "MLP for port %d",
20205					    requested_port);
20206				}
20207				return (err);
20208			}
20209			connp->conn_anon_port = B_TRUE;
20210		}
20211		connp->conn_mlp_type = mlptype;
20212	}
20213
20214	allocated_port = tcp_bindi(tcp, requested_port, &v6addr,
20215	    connp->conn_reuseaddr, B_FALSE, bind_to_req_port_only,
20216	    user_specified);
20217
20218	if (allocated_port == 0) {
20219		connp->conn_mlp_type = mlptSingle;
20220		if (connp->conn_anon_port) {
20221			connp->conn_anon_port = B_FALSE;
20222			(void) tsol_mlp_anon(zone, mlptype, connp->conn_proto,
20223			    requested_port, B_FALSE);
20224		}
20225		if (bind_to_req_port_only) {
20226			if (connp->conn_debug) {
20227				(void) strlog(TCP_MOD_ID, 0, 1,
20228				    SL_ERROR|SL_TRACE,
20229				    "tcp_bind: requested addr busy");
20230			}
20231			return (-TADDRBUSY);
20232		} else {
20233			/* If we are out of ports, fail the bind. */
20234			if (connp->conn_debug) {
20235				(void) strlog(TCP_MOD_ID, 0, 1,
20236				    SL_ERROR|SL_TRACE,
20237				    "tcp_bind: out of ports?");
20238			}
20239			return (-TNOADDR);
20240		}
20241	}
20242
20243	/* Pass the allocated port back */
20244	*requested_port_ptr = allocated_port;
20245	return (0);
20246}
20247
20248/*
20249 * Check the address and check/pick a local port number.
20250 */
20251static int
20252tcp_bind_check(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr,
20253    boolean_t bind_to_req_port_only)
20254{
20255	tcp_t	*tcp = connp->conn_tcp;
20256	sin_t	*sin;
20257	sin6_t  *sin6;
20258	in_port_t	requested_port;
20259	ipaddr_t	v4addr;
20260	in6_addr_t	v6addr;
20261	ip_laddr_t	laddr_type = IPVL_UNICAST_UP;	/* INADDR_ANY */
20262	zoneid_t	zoneid = IPCL_ZONEID(connp);
20263	ip_stack_t	*ipst = connp->conn_netstack->netstack_ip;
20264	uint_t		scopeid = 0;
20265	int		error = 0;
20266	ip_xmit_attr_t	*ixa = connp->conn_ixa;
20267
20268	ASSERT((uintptr_t)len <= (uintptr_t)INT_MAX);
20269
20270	if (tcp->tcp_state == TCPS_BOUND) {
20271		return (0);
20272	} else if (tcp->tcp_state > TCPS_BOUND) {
20273		if (connp->conn_debug) {
20274			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
20275			    "tcp_bind: bad state, %d", tcp->tcp_state);
20276		}
20277		return (-TOUTSTATE);
20278	}
20279
20280	ASSERT(sa != NULL && len != 0);
20281
20282	if (!OK_32PTR((char *)sa)) {
20283		if (connp->conn_debug) {
20284			(void) strlog(TCP_MOD_ID, 0, 1,
20285			    SL_ERROR|SL_TRACE,
20286			    "tcp_bind: bad address parameter, "
20287			    "address %p, len %d",
20288			    (void *)sa, len);
20289		}
20290		return (-TPROTO);
20291	}
20292
20293	error = proto_verify_ip_addr(connp->conn_family, sa, len);
20294	if (error != 0) {
20295		return (error);
20296	}
20297
20298	switch (len) {
20299	case sizeof (sin_t):	/* Complete IPv4 address */
20300		sin = (sin_t *)sa;
20301		requested_port = ntohs(sin->sin_port);
20302		v4addr = sin->sin_addr.s_addr;
20303		IN6_IPADDR_TO_V4MAPPED(v4addr, &v6addr);
20304		if (v4addr != INADDR_ANY) {
20305			laddr_type = ip_laddr_verify_v4(v4addr, zoneid, ipst,
20306			    B_FALSE);
20307		}
20308		break;
20309
20310	case sizeof (sin6_t): /* Complete IPv6 address */
20311		sin6 = (sin6_t *)sa;
20312		v6addr = sin6->sin6_addr;
20313		requested_port = ntohs(sin6->sin6_port);
20314		if (IN6_IS_ADDR_V4MAPPED(&v6addr)) {
20315			if (connp->conn_ipv6_v6only)
20316				return (EADDRNOTAVAIL);
20317
20318			IN6_V4MAPPED_TO_IPADDR(&v6addr, v4addr);
20319			if (v4addr != INADDR_ANY) {
20320				laddr_type = ip_laddr_verify_v4(v4addr,
20321				    zoneid, ipst, B_FALSE);
20322			}
20323		} else {
20324			if (!IN6_IS_ADDR_UNSPECIFIED(&v6addr)) {
20325				if (IN6_IS_ADDR_LINKSCOPE(&v6addr))
20326					scopeid = sin6->sin6_scope_id;
20327				laddr_type = ip_laddr_verify_v6(&v6addr,
20328				    zoneid, ipst, B_FALSE, scopeid);
20329			}
20330		}
20331		break;
20332
20333	default:
20334		if (connp->conn_debug) {
20335			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
20336			    "tcp_bind: bad address length, %d", len);
20337		}
20338		return (EAFNOSUPPORT);
20339		/* return (-TBADADDR); */
20340	}
20341
20342	/* Is the local address a valid unicast address? */
20343	if (laddr_type == IPVL_BAD)
20344		return (EADDRNOTAVAIL);
20345
20346	connp->conn_bound_addr_v6 = v6addr;
20347	if (scopeid != 0) {
20348		ixa->ixa_flags |= IXAF_SCOPEID_SET;
20349		ixa->ixa_scopeid = scopeid;
20350		connp->conn_incoming_ifindex = scopeid;
20351	} else {
20352		ixa->ixa_flags &= ~IXAF_SCOPEID_SET;
20353		connp->conn_incoming_ifindex = connp->conn_bound_if;
20354	}
20355
20356	connp->conn_laddr_v6 = v6addr;
20357	connp->conn_saddr_v6 = v6addr;
20358
20359	bind_to_req_port_only = requested_port != 0 && bind_to_req_port_only;
20360
20361	error = tcp_bind_select_lport(tcp, &requested_port,
20362	    bind_to_req_port_only, cr);
20363	if (error != 0) {
20364		connp->conn_laddr_v6 = ipv6_all_zeros;
20365		connp->conn_saddr_v6 = ipv6_all_zeros;
20366		connp->conn_bound_addr_v6 = ipv6_all_zeros;
20367	}
20368	return (error);
20369}
20370
20371/*
20372 * Return unix error is tli error is TSYSERR, otherwise return a negative
20373 * tli error.
20374 */
20375int
20376tcp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr,
20377    boolean_t bind_to_req_port_only)
20378{
20379	int error;
20380	tcp_t *tcp = connp->conn_tcp;
20381
20382	if (tcp->tcp_state >= TCPS_BOUND) {
20383		if (connp->conn_debug) {
20384			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
20385			    "tcp_bind: bad state, %d", tcp->tcp_state);
20386		}
20387		return (-TOUTSTATE);
20388	}
20389
20390	error = tcp_bind_check(connp, sa, len, cr, bind_to_req_port_only);
20391	if (error != 0)
20392		return (error);
20393
20394	ASSERT(tcp->tcp_state == TCPS_BOUND);
20395	tcp->tcp_conn_req_max = 0;
20396	return (0);
20397}
20398
20399int
20400tcp_bind(sock_lower_handle_t proto_handle, struct sockaddr *sa,
20401    socklen_t len, cred_t *cr)
20402{
20403	int 		error;
20404	conn_t		*connp = (conn_t *)proto_handle;
20405	squeue_t	*sqp = connp->conn_sqp;
20406
20407	/* All Solaris components should pass a cred for this operation. */
20408	ASSERT(cr != NULL);
20409
20410	ASSERT(sqp != NULL);
20411	ASSERT(connp->conn_upper_handle != NULL);
20412
20413	error = squeue_synch_enter(sqp, connp, NULL);
20414	if (error != 0) {
20415		/* failed to enter */
20416		return (ENOSR);
20417	}
20418
20419	/* binding to a NULL address really means unbind */
20420	if (sa == NULL) {
20421		if (connp->conn_tcp->tcp_state < TCPS_LISTEN)
20422			error = tcp_do_unbind(connp);
20423		else
20424			error = EINVAL;
20425	} else {
20426		error = tcp_do_bind(connp, sa, len, cr, B_TRUE);
20427	}
20428
20429	squeue_synch_exit(sqp, connp);
20430
20431	if (error < 0) {
20432		if (error == -TOUTSTATE)
20433			error = EINVAL;
20434		else
20435			error = proto_tlitosyserr(-error);
20436	}
20437
20438	return (error);
20439}
20440
20441/*
20442 * If the return value from this function is positive, it's a UNIX error.
20443 * Otherwise, if it's negative, then the absolute value is a TLI error.
20444 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
20445 */
20446int
20447tcp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len,
20448    cred_t *cr, pid_t pid)
20449{
20450	tcp_t		*tcp = connp->conn_tcp;
20451	sin_t		*sin = (sin_t *)sa;
20452	sin6_t		*sin6 = (sin6_t *)sa;
20453	ipaddr_t	*dstaddrp;
20454	in_port_t	dstport;
20455	uint_t		srcid;
20456	int		error;
20457	uint32_t	mss;
20458	mblk_t		*syn_mp;
20459	tcp_stack_t	*tcps = tcp->tcp_tcps;
20460	int32_t		oldstate;
20461	ip_xmit_attr_t	*ixa = connp->conn_ixa;
20462
20463	oldstate = tcp->tcp_state;
20464
20465	switch (len) {
20466	default:
20467		/*
20468		 * Should never happen
20469		 */
20470		return (EINVAL);
20471
20472	case sizeof (sin_t):
20473		sin = (sin_t *)sa;
20474		if (sin->sin_port == 0) {
20475			return (-TBADADDR);
20476		}
20477		if (connp->conn_ipv6_v6only) {
20478			return (EAFNOSUPPORT);
20479		}
20480		break;
20481
20482	case sizeof (sin6_t):
20483		sin6 = (sin6_t *)sa;
20484		if (sin6->sin6_port == 0) {
20485			return (-TBADADDR);
20486		}
20487		break;
20488	}
20489	/*
20490	 * If we're connecting to an IPv4-mapped IPv6 address, we need to
20491	 * make sure that the conn_ipversion is IPV4_VERSION.  We
20492	 * need to this before we call tcp_bindi() so that the port lookup
20493	 * code will look for ports in the correct port space (IPv4 and
20494	 * IPv6 have separate port spaces).
20495	 */
20496	if (connp->conn_family == AF_INET6 &&
20497	    connp->conn_ipversion == IPV6_VERSION &&
20498	    IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
20499		if (connp->conn_ipv6_v6only)
20500			return (EADDRNOTAVAIL);
20501
20502		connp->conn_ipversion = IPV4_VERSION;
20503	}
20504
20505	switch (tcp->tcp_state) {
20506	case TCPS_LISTEN:
20507		/*
20508		 * Listening sockets are not allowed to issue connect().
20509		 */
20510		if (IPCL_IS_NONSTR(connp))
20511			return (EOPNOTSUPP);
20512		/* FALLTHRU */
20513	case TCPS_IDLE:
20514		/*
20515		 * We support quick connect, refer to comments in
20516		 * tcp_connect_*()
20517		 */
20518		/* FALLTHRU */
20519	case TCPS_BOUND:
20520		break;
20521	default:
20522		return (-TOUTSTATE);
20523	}
20524
20525	/*
20526	 * We update our cred/cpid based on the caller of connect
20527	 */
20528	if (connp->conn_cred != cr) {
20529		crhold(cr);
20530		crfree(connp->conn_cred);
20531		connp->conn_cred = cr;
20532	}
20533	connp->conn_cpid = pid;
20534
20535	/* Cache things in the ixa without any refhold */
20536	ixa->ixa_cred = cr;
20537	ixa->ixa_cpid = pid;
20538	if (is_system_labeled()) {
20539		/* We need to restart with a label based on the cred */
20540		ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred);
20541	}
20542
20543	if (connp->conn_family == AF_INET6) {
20544		if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
20545			error = tcp_connect_ipv6(tcp, &sin6->sin6_addr,
20546			    sin6->sin6_port, sin6->sin6_flowinfo,
20547			    sin6->__sin6_src_id, sin6->sin6_scope_id);
20548		} else {
20549			/*
20550			 * Destination adress is mapped IPv6 address.
20551			 * Source bound address should be unspecified or
20552			 * IPv6 mapped address as well.
20553			 */
20554			if (!IN6_IS_ADDR_UNSPECIFIED(
20555			    &connp->conn_bound_addr_v6) &&
20556			    !IN6_IS_ADDR_V4MAPPED(&connp->conn_bound_addr_v6)) {
20557				return (EADDRNOTAVAIL);
20558			}
20559			dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr));
20560			dstport = sin6->sin6_port;
20561			srcid = sin6->__sin6_src_id;
20562			error = tcp_connect_ipv4(tcp, dstaddrp, dstport,
20563			    srcid);
20564		}
20565	} else {
20566		dstaddrp = &sin->sin_addr.s_addr;
20567		dstport = sin->sin_port;
20568		srcid = 0;
20569		error = tcp_connect_ipv4(tcp, dstaddrp, dstport, srcid);
20570	}
20571
20572	if (error != 0)
20573		goto connect_failed;
20574
20575	CL_INET_CONNECT(connp, B_TRUE, error);
20576	if (error != 0)
20577		goto connect_failed;
20578
20579	/* connect succeeded */
20580	BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens);
20581	tcp->tcp_active_open = 1;
20582
20583	/*
20584	 * tcp_set_destination() does not adjust for TCP/IP header length.
20585	 */
20586	mss = tcp->tcp_mss - connp->conn_ht_iphc_len;
20587
20588	/*
20589	 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
20590	 * to the nearest MSS.
20591	 *
20592	 * We do the round up here because we need to get the interface MTU
20593	 * first before we can do the round up.
20594	 */
20595	tcp->tcp_rwnd = connp->conn_rcvbuf;
20596	tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss),
20597	    tcps->tcps_recv_hiwat_minmss * mss);
20598	connp->conn_rcvbuf = tcp->tcp_rwnd;
20599	tcp_set_ws_value(tcp);
20600	tcp->tcp_tcpha->tha_win = htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
20601	if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always)
20602		tcp->tcp_snd_ws_ok = B_TRUE;
20603
20604	/*
20605	 * Set tcp_snd_ts_ok to true
20606	 * so that tcp_xmit_mp will
20607	 * include the timestamp
20608	 * option in the SYN segment.
20609	 */
20610	if (tcps->tcps_tstamp_always ||
20611	    (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) {
20612		tcp->tcp_snd_ts_ok = B_TRUE;
20613	}
20614
20615	/*
20616	 * tcp_snd_sack_ok can be set in
20617	 * tcp_set_destination() if the sack metric
20618	 * is set.  So check it here also.
20619	 */
20620	if (tcps->tcps_sack_permitted == 2 ||
20621	    tcp->tcp_snd_sack_ok) {
20622		if (tcp->tcp_sack_info == NULL) {
20623			tcp->tcp_sack_info = kmem_cache_alloc(
20624			    tcp_sack_info_cache, KM_SLEEP);
20625		}
20626		tcp->tcp_snd_sack_ok = B_TRUE;
20627	}
20628
20629	/*
20630	 * Should we use ECN?  Note that the current
20631	 * default value (SunOS 5.9) of tcp_ecn_permitted
20632	 * is 1.  The reason for doing this is that there
20633	 * are equipments out there that will drop ECN
20634	 * enabled IP packets.  Setting it to 1 avoids
20635	 * compatibility problems.
20636	 */
20637	if (tcps->tcps_ecn_permitted == 2)
20638		tcp->tcp_ecn_ok = B_TRUE;
20639
20640	TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
20641	syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL,
20642	    tcp->tcp_iss, B_FALSE, NULL, B_FALSE);
20643	if (syn_mp != NULL) {
20644		/*
20645		 * We must bump the generation before sending the syn
20646		 * to ensure that we use the right generation in case
20647		 * this thread issues a "connected" up call.
20648		 */
20649		SOCK_CONNID_BUMP(tcp->tcp_connid);
20650		tcp_send_data(tcp, syn_mp);
20651	}
20652
20653	if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
20654		tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
20655	return (0);
20656
20657connect_failed:
20658	connp->conn_faddr_v6 = ipv6_all_zeros;
20659	connp->conn_fport = 0;
20660	tcp->tcp_state = oldstate;
20661	if (tcp->tcp_conn.tcp_opts_conn_req != NULL)
20662		tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req);
20663	return (error);
20664}
20665
20666int
20667tcp_connect(sock_lower_handle_t proto_handle, const struct sockaddr *sa,
20668    socklen_t len, sock_connid_t *id, cred_t *cr)
20669{
20670	conn_t		*connp = (conn_t *)proto_handle;
20671	squeue_t	*sqp = connp->conn_sqp;
20672	int		error;
20673
20674	ASSERT(connp->conn_upper_handle != NULL);
20675
20676	/* All Solaris components should pass a cred for this operation. */
20677	ASSERT(cr != NULL);
20678
20679	error = proto_verify_ip_addr(connp->conn_family, sa, len);
20680	if (error != 0) {
20681		return (error);
20682	}
20683
20684	error = squeue_synch_enter(sqp, connp, NULL);
20685	if (error != 0) {
20686		/* failed to enter */
20687		return (ENOSR);
20688	}
20689
20690	/*
20691	 * TCP supports quick connect, so no need to do an implicit bind
20692	 */
20693	error = tcp_do_connect(connp, sa, len, cr, curproc->p_pid);
20694	if (error == 0) {
20695		*id = connp->conn_tcp->tcp_connid;
20696	} else if (error < 0) {
20697		if (error == -TOUTSTATE) {
20698			switch (connp->conn_tcp->tcp_state) {
20699			case TCPS_SYN_SENT:
20700				error = EALREADY;
20701				break;
20702			case TCPS_ESTABLISHED:
20703				error = EISCONN;
20704				break;
20705			case TCPS_LISTEN:
20706				error = EOPNOTSUPP;
20707				break;
20708			default:
20709				error = EINVAL;
20710				break;
20711			}
20712		} else {
20713			error = proto_tlitosyserr(-error);
20714		}
20715	}
20716
20717	if (connp->conn_tcp->tcp_loopback) {
20718		struct sock_proto_props sopp;
20719
20720		sopp.sopp_flags = SOCKOPT_LOOPBACK;
20721		sopp.sopp_loopback = B_TRUE;
20722
20723		(*connp->conn_upcalls->su_set_proto_props)(
20724		    connp->conn_upper_handle, &sopp);
20725	}
20726done:
20727	squeue_synch_exit(sqp, connp);
20728
20729	return ((error == 0) ? EINPROGRESS : error);
20730}
20731
20732/* ARGSUSED */
20733sock_lower_handle_t
20734tcp_create(int family, int type, int proto, sock_downcalls_t **sock_downcalls,
20735    uint_t *smodep, int *errorp, int flags, cred_t *credp)
20736{
20737	conn_t		*connp;
20738	boolean_t	isv6 = family == AF_INET6;
20739	if (type != SOCK_STREAM || (family != AF_INET && family != AF_INET6) ||
20740	    (proto != 0 && proto != IPPROTO_TCP)) {
20741		*errorp = EPROTONOSUPPORT;
20742		return (NULL);
20743	}
20744
20745	connp = tcp_create_common(credp, isv6, B_TRUE, errorp);
20746	if (connp == NULL) {
20747		return (NULL);
20748	}
20749
20750	/*
20751	 * Put the ref for TCP. Ref for IP was already put
20752	 * by ipcl_conn_create. Also Make the conn_t globally
20753	 * visible to walkers
20754	 */
20755	mutex_enter(&connp->conn_lock);
20756	CONN_INC_REF_LOCKED(connp);
20757	ASSERT(connp->conn_ref == 2);
20758	connp->conn_state_flags &= ~CONN_INCIPIENT;
20759
20760	connp->conn_flags |= IPCL_NONSTR;
20761	mutex_exit(&connp->conn_lock);
20762
20763	ASSERT(errorp != NULL);
20764	*errorp = 0;
20765	*sock_downcalls = &sock_tcp_downcalls;
20766	*smodep = SM_CONNREQUIRED | SM_EXDATA | SM_ACCEPTSUPP |
20767	    SM_SENDFILESUPP;
20768
20769	return ((sock_lower_handle_t)connp);
20770}
20771
20772/* ARGSUSED */
20773void
20774tcp_activate(sock_lower_handle_t proto_handle, sock_upper_handle_t sock_handle,
20775    sock_upcalls_t *sock_upcalls, int flags, cred_t *cr)
20776{
20777	conn_t *connp = (conn_t *)proto_handle;
20778	struct sock_proto_props sopp;
20779
20780	ASSERT(connp->conn_upper_handle == NULL);
20781
20782	/* All Solaris components should pass a cred for this operation. */
20783	ASSERT(cr != NULL);
20784
20785	sopp.sopp_flags = SOCKOPT_RCVHIWAT | SOCKOPT_RCVLOWAT |
20786	    SOCKOPT_MAXPSZ | SOCKOPT_MAXBLK | SOCKOPT_RCVTIMER |
20787	    SOCKOPT_RCVTHRESH | SOCKOPT_MAXADDRLEN | SOCKOPT_MINPSZ;
20788
20789	sopp.sopp_rxhiwat = SOCKET_RECVHIWATER;
20790	sopp.sopp_rxlowat = SOCKET_RECVLOWATER;
20791	sopp.sopp_maxpsz = INFPSZ;
20792	sopp.sopp_maxblk = INFPSZ;
20793	sopp.sopp_rcvtimer = SOCKET_TIMER_INTERVAL;
20794	sopp.sopp_rcvthresh = SOCKET_RECVHIWATER >> 3;
20795	sopp.sopp_maxaddrlen = sizeof (sin6_t);
20796	sopp.sopp_minpsz = (tcp_rinfo.mi_minpsz == 1) ? 0 :
20797	    tcp_rinfo.mi_minpsz;
20798
20799	connp->conn_upcalls = sock_upcalls;
20800	connp->conn_upper_handle = sock_handle;
20801
20802	ASSERT(connp->conn_rcvbuf != 0 &&
20803	    connp->conn_rcvbuf == connp->conn_tcp->tcp_rwnd);
20804	(*sock_upcalls->su_set_proto_props)(sock_handle, &sopp);
20805}
20806
20807/* ARGSUSED */
20808int
20809tcp_close(sock_lower_handle_t proto_handle, int flags, cred_t *cr)
20810{
20811	conn_t *connp = (conn_t *)proto_handle;
20812
20813	ASSERT(connp->conn_upper_handle != NULL);
20814
20815	/* All Solaris components should pass a cred for this operation. */
20816	ASSERT(cr != NULL);
20817
20818	tcp_close_common(connp, flags);
20819
20820	ip_free_helper_stream(connp);
20821
20822	/*
20823	 * Drop IP's reference on the conn. This is the last reference
20824	 * on the connp if the state was less than established. If the
20825	 * connection has gone into timewait state, then we will have
20826	 * one ref for the TCP and one more ref (total of two) for the
20827	 * classifier connected hash list (a timewait connections stays
20828	 * in connected hash till closed).
20829	 *
20830	 * We can't assert the references because there might be other
20831	 * transient reference places because of some walkers or queued
20832	 * packets in squeue for the timewait state.
20833	 */
20834	CONN_DEC_REF(connp);
20835	return (0);
20836}
20837
20838/* ARGSUSED */
20839int
20840tcp_sendmsg(sock_lower_handle_t proto_handle, mblk_t *mp, struct nmsghdr *msg,
20841    cred_t *cr)
20842{
20843	tcp_t		*tcp;
20844	uint32_t	msize;
20845	conn_t *connp = (conn_t *)proto_handle;
20846	int32_t		tcpstate;
20847
20848	/* All Solaris components should pass a cred for this operation. */
20849	ASSERT(cr != NULL);
20850
20851	ASSERT(connp->conn_ref >= 2);
20852	ASSERT(connp->conn_upper_handle != NULL);
20853
20854	if (msg->msg_controllen != 0) {
20855		freemsg(mp);
20856		return (EOPNOTSUPP);
20857	}
20858
20859	switch (DB_TYPE(mp)) {
20860	case M_DATA:
20861		tcp = connp->conn_tcp;
20862		ASSERT(tcp != NULL);
20863
20864		tcpstate = tcp->tcp_state;
20865		if (tcpstate < TCPS_ESTABLISHED) {
20866			freemsg(mp);
20867			/*
20868			 * We return ENOTCONN if the endpoint is trying to
20869			 * connect or has never been connected, and EPIPE if it
20870			 * has been disconnected. The connection id helps us
20871			 * distinguish between the last two cases.
20872			 */
20873			return ((tcpstate == TCPS_SYN_SENT) ? ENOTCONN :
20874			    ((tcp->tcp_connid > 0) ? EPIPE : ENOTCONN));
20875		} else if (tcpstate > TCPS_CLOSE_WAIT) {
20876			freemsg(mp);
20877			return (EPIPE);
20878		}
20879
20880		msize = msgdsize(mp);
20881
20882		mutex_enter(&tcp->tcp_non_sq_lock);
20883		tcp->tcp_squeue_bytes += msize;
20884		/*
20885		 * Squeue Flow Control
20886		 */
20887		if (TCP_UNSENT_BYTES(tcp) > connp->conn_sndbuf) {
20888			tcp_setqfull(tcp);
20889		}
20890		mutex_exit(&tcp->tcp_non_sq_lock);
20891
20892		/*
20893		 * The application may pass in an address in the msghdr, but
20894		 * we ignore the address on connection-oriented sockets.
20895		 * Just like BSD this code does not generate an error for
20896		 * TCP (a CONNREQUIRED socket) when sending to an address
20897		 * passed in with sendto/sendmsg. Instead the data is
20898		 * delivered on the connection as if no address had been
20899		 * supplied.
20900		 */
20901		CONN_INC_REF(connp);
20902
20903		if (msg->msg_flags & MSG_OOB) {
20904			SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output_urgent,
20905			    connp, NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT);
20906		} else {
20907			SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output,
20908			    connp, NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT);
20909		}
20910
20911		return (0);
20912
20913	default:
20914		ASSERT(0);
20915	}
20916
20917	freemsg(mp);
20918	return (0);
20919}
20920
20921/* ARGSUSED2 */
20922void
20923tcp_output_urgent(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
20924{
20925	int len;
20926	uint32_t msize;
20927	conn_t *connp = (conn_t *)arg;
20928	tcp_t *tcp = connp->conn_tcp;
20929
20930	msize = msgdsize(mp);
20931
20932	len = msize - 1;
20933	if (len < 0) {
20934		freemsg(mp);
20935		return;
20936	}
20937
20938	/*
20939	 * Try to force urgent data out on the wire. Even if we have unsent
20940	 * data this will at least send the urgent flag.
20941	 * XXX does not handle more flag correctly.
20942	 */
20943	len += tcp->tcp_unsent;
20944	len += tcp->tcp_snxt;
20945	tcp->tcp_urg = len;
20946	tcp->tcp_valid_bits |= TCP_URG_VALID;
20947
20948	/* Bypass tcp protocol for fused tcp loopback */
20949	if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
20950		return;
20951
20952	/* Strip off the T_EXDATA_REQ if the data is from TPI */
20953	if (DB_TYPE(mp) != M_DATA) {
20954		mblk_t *mp1 = mp;
20955		ASSERT(!IPCL_IS_NONSTR(connp));
20956		mp = mp->b_cont;
20957		freeb(mp1);
20958	}
20959	tcp_wput_data(tcp, mp, B_TRUE);
20960}
20961
20962/* ARGSUSED3 */
20963int
20964tcp_getpeername(sock_lower_handle_t proto_handle, struct sockaddr *addr,
20965    socklen_t *addrlenp, cred_t *cr)
20966{
20967	conn_t	*connp = (conn_t *)proto_handle;
20968	tcp_t	*tcp = connp->conn_tcp;
20969
20970	ASSERT(connp->conn_upper_handle != NULL);
20971	/* All Solaris components should pass a cred for this operation. */
20972	ASSERT(cr != NULL);
20973
20974	ASSERT(tcp != NULL);
20975	if (tcp->tcp_state < TCPS_SYN_RCVD)
20976		return (ENOTCONN);
20977
20978	return (conn_getpeername(connp, addr, addrlenp));
20979}
20980
20981/* ARGSUSED3 */
20982int
20983tcp_getsockname(sock_lower_handle_t proto_handle, struct sockaddr *addr,
20984    socklen_t *addrlenp, cred_t *cr)
20985{
20986	conn_t	*connp = (conn_t *)proto_handle;
20987
20988	/* All Solaris components should pass a cred for this operation. */
20989	ASSERT(cr != NULL);
20990
20991	ASSERT(connp->conn_upper_handle != NULL);
20992	return (conn_getsockname(connp, addr, addrlenp));
20993}
20994
20995/*
20996 * tcp_fallback
20997 *
20998 * A direct socket is falling back to using STREAMS. The queue
20999 * that is being passed down was created using tcp_open() with
21000 * the SO_FALLBACK flag set. As a result, the queue is not
21001 * associated with a conn, and the q_ptrs instead contain the
21002 * dev and minor area that should be used.
21003 *
21004 * The 'issocket' flag indicates whether the FireEngine
21005 * optimizations should be used. The common case would be that
21006 * optimizations are enabled, and they might be subsequently
21007 * disabled using the _SIOCSOCKFALLBACK ioctl.
21008 */
21009
21010/*
21011 * An active connection is falling back to TPI. Gather all the information
21012 * required by the STREAM head and TPI sonode and send it up.
21013 */
21014void
21015tcp_fallback_noneager(tcp_t *tcp, mblk_t *stropt_mp, queue_t *q,
21016    boolean_t issocket, so_proto_quiesced_cb_t quiesced_cb)
21017{
21018	conn_t			*connp = tcp->tcp_connp;
21019	struct stroptions	*stropt;
21020	struct T_capability_ack tca;
21021	struct sockaddr_in6	laddr, faddr;
21022	socklen_t 		laddrlen, faddrlen;
21023	short			opts;
21024	int			error;
21025	mblk_t			*mp;
21026
21027	connp->conn_dev = (dev_t)RD(q)->q_ptr;
21028	connp->conn_minor_arena = WR(q)->q_ptr;
21029
21030	RD(q)->q_ptr = WR(q)->q_ptr = connp;
21031
21032	connp->conn_rq = RD(q);
21033	connp->conn_wq = WR(q);
21034
21035	WR(q)->q_qinfo = &tcp_sock_winit;
21036
21037	if (!issocket)
21038		tcp_use_pure_tpi(tcp);
21039
21040	/*
21041	 * free the helper stream
21042	 */
21043	ip_free_helper_stream(connp);
21044
21045	/*
21046	 * Notify the STREAM head about options
21047	 */
21048	DB_TYPE(stropt_mp) = M_SETOPTS;
21049	stropt = (struct stroptions *)stropt_mp->b_rptr;
21050	stropt_mp->b_wptr += sizeof (struct stroptions);
21051	stropt->so_flags = SO_HIWAT | SO_WROFF | SO_MAXBLK;
21052
21053	stropt->so_wroff = connp->conn_ht_iphc_len + (tcp->tcp_loopback ? 0 :
21054	    tcp->tcp_tcps->tcps_wroff_xtra);
21055	if (tcp->tcp_snd_sack_ok)
21056		stropt->so_wroff += TCPOPT_MAX_SACK_LEN;
21057	stropt->so_hiwat = connp->conn_rcvbuf;
21058	stropt->so_maxblk = tcp_maxpsz_set(tcp, B_FALSE);
21059
21060	putnext(RD(q), stropt_mp);
21061
21062	/*
21063	 * Collect the information needed to sync with the sonode
21064	 */
21065	tcp_do_capability_ack(tcp, &tca, TC1_INFO|TC1_ACCEPTOR_ID);
21066
21067	laddrlen = faddrlen = sizeof (sin6_t);
21068	(void) tcp_getsockname((sock_lower_handle_t)connp,
21069	    (struct sockaddr *)&laddr, &laddrlen, CRED());
21070	error = tcp_getpeername((sock_lower_handle_t)connp,
21071	    (struct sockaddr *)&faddr, &faddrlen, CRED());
21072	if (error != 0)
21073		faddrlen = 0;
21074
21075	opts = 0;
21076	if (connp->conn_oobinline)
21077		opts |= SO_OOBINLINE;
21078	if (connp->conn_ixa->ixa_flags & IXAF_DONTROUTE)
21079		opts |= SO_DONTROUTE;
21080
21081	/*
21082	 * Notify the socket that the protocol is now quiescent,
21083	 * and it's therefore safe move data from the socket
21084	 * to the stream head.
21085	 */
21086	(*quiesced_cb)(connp->conn_upper_handle, q, &tca,
21087	    (struct sockaddr *)&laddr, laddrlen,
21088	    (struct sockaddr *)&faddr, faddrlen, opts);
21089
21090	while ((mp = tcp->tcp_rcv_list) != NULL) {
21091		tcp->tcp_rcv_list = mp->b_next;
21092		mp->b_next = NULL;
21093		/* We never do fallback for kernel RPC */
21094		putnext(q, mp);
21095	}
21096	tcp->tcp_rcv_last_head = NULL;
21097	tcp->tcp_rcv_last_tail = NULL;
21098	tcp->tcp_rcv_cnt = 0;
21099}
21100
21101/*
21102 * An eager is falling back to TPI. All we have to do is send
21103 * up a T_CONN_IND.
21104 */
21105void
21106tcp_fallback_eager(tcp_t *eager, boolean_t direct_sockfs)
21107{
21108	tcp_t *listener = eager->tcp_listener;
21109	mblk_t *mp = eager->tcp_conn.tcp_eager_conn_ind;
21110
21111	ASSERT(listener != NULL);
21112	ASSERT(mp != NULL);
21113
21114	eager->tcp_conn.tcp_eager_conn_ind = NULL;
21115
21116	/*
21117	 * TLI/XTI applications will get confused by
21118	 * sending eager as an option since it violates
21119	 * the option semantics. So remove the eager as
21120	 * option since TLI/XTI app doesn't need it anyway.
21121	 */
21122	if (!direct_sockfs) {
21123		struct T_conn_ind *conn_ind;
21124
21125		conn_ind = (struct T_conn_ind *)mp->b_rptr;
21126		conn_ind->OPT_length = 0;
21127		conn_ind->OPT_offset = 0;
21128	}
21129
21130	/*
21131	 * Sockfs guarantees that the listener will not be closed
21132	 * during fallback. So we can safely use the listener's queue.
21133	 */
21134	putnext(listener->tcp_connp->conn_rq, mp);
21135}
21136
21137int
21138tcp_fallback(sock_lower_handle_t proto_handle, queue_t *q,
21139    boolean_t direct_sockfs, so_proto_quiesced_cb_t quiesced_cb)
21140{
21141	tcp_t			*tcp;
21142	conn_t 			*connp = (conn_t *)proto_handle;
21143	int			error;
21144	mblk_t			*stropt_mp;
21145	mblk_t			*ordrel_mp;
21146
21147	tcp = connp->conn_tcp;
21148
21149	stropt_mp = allocb_wait(sizeof (struct stroptions), BPRI_HI, STR_NOSIG,
21150	    NULL);
21151
21152	/* Pre-allocate the T_ordrel_ind mblk. */
21153	ASSERT(tcp->tcp_ordrel_mp == NULL);
21154	ordrel_mp = allocb_wait(sizeof (struct T_ordrel_ind), BPRI_HI,
21155	    STR_NOSIG, NULL);
21156	ordrel_mp->b_datap->db_type = M_PROTO;
21157	((struct T_ordrel_ind *)ordrel_mp->b_rptr)->PRIM_type = T_ORDREL_IND;
21158	ordrel_mp->b_wptr += sizeof (struct T_ordrel_ind);
21159
21160	/*
21161	 * Enter the squeue so that no new packets can come in
21162	 */
21163	error = squeue_synch_enter(connp->conn_sqp, connp, NULL);
21164	if (error != 0) {
21165		/* failed to enter, free all the pre-allocated messages. */
21166		freeb(stropt_mp);
21167		freeb(ordrel_mp);
21168		/*
21169		 * We cannot process the eager, so at least send out a
21170		 * RST so the peer can reconnect.
21171		 */
21172		if (tcp->tcp_listener != NULL) {
21173			(void) tcp_eager_blowoff(tcp->tcp_listener,
21174			    tcp->tcp_conn_req_seqnum);
21175		}
21176		return (ENOMEM);
21177	}
21178
21179	/*
21180	 * Both endpoints must be of the same type (either STREAMS or
21181	 * non-STREAMS) for fusion to be enabled. So if we are fused,
21182	 * we have to unfuse.
21183	 */
21184	if (tcp->tcp_fused)
21185		tcp_unfuse(tcp);
21186
21187	/*
21188	 * No longer a direct socket
21189	 */
21190	connp->conn_flags &= ~IPCL_NONSTR;
21191	tcp->tcp_ordrel_mp = ordrel_mp;
21192
21193	if (tcp->tcp_listener != NULL) {
21194		/* The eager will deal with opts when accept() is called */
21195		freeb(stropt_mp);
21196		tcp_fallback_eager(tcp, direct_sockfs);
21197	} else {
21198		tcp_fallback_noneager(tcp, stropt_mp, q, direct_sockfs,
21199		    quiesced_cb);
21200	}
21201
21202	/*
21203	 * There should be atleast two ref's (IP + TCP)
21204	 */
21205	ASSERT(connp->conn_ref >= 2);
21206	squeue_synch_exit(connp->conn_sqp, connp);
21207
21208	return (0);
21209}
21210
21211/* ARGSUSED */
21212static void
21213tcp_shutdown_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
21214{
21215	conn_t 	*connp = (conn_t *)arg;
21216	tcp_t	*tcp = connp->conn_tcp;
21217
21218	freemsg(mp);
21219
21220	if (tcp->tcp_fused)
21221		tcp_unfuse(tcp);
21222
21223	if (tcp_xmit_end(tcp) != 0) {
21224		/*
21225		 * We were crossing FINs and got a reset from
21226		 * the other side. Just ignore it.
21227		 */
21228		if (connp->conn_debug) {
21229			(void) strlog(TCP_MOD_ID, 0, 1,
21230			    SL_ERROR|SL_TRACE,
21231			    "tcp_shutdown_output() out of state %s",
21232			    tcp_display(tcp, NULL, DISP_ADDR_AND_PORT));
21233		}
21234	}
21235}
21236
21237/* ARGSUSED */
21238int
21239tcp_shutdown(sock_lower_handle_t proto_handle, int how, cred_t *cr)
21240{
21241	conn_t  *connp = (conn_t *)proto_handle;
21242	tcp_t   *tcp = connp->conn_tcp;
21243
21244	ASSERT(connp->conn_upper_handle != NULL);
21245
21246	/* All Solaris components should pass a cred for this operation. */
21247	ASSERT(cr != NULL);
21248
21249	/*
21250	 * X/Open requires that we check the connected state.
21251	 */
21252	if (tcp->tcp_state < TCPS_SYN_SENT)
21253		return (ENOTCONN);
21254
21255	/* shutdown the send side */
21256	if (how != SHUT_RD) {
21257		mblk_t *bp;
21258
21259		bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL);
21260		CONN_INC_REF(connp);
21261		SQUEUE_ENTER_ONE(connp->conn_sqp, bp, tcp_shutdown_output,
21262		    connp, NULL, SQ_NODRAIN, SQTAG_TCP_SHUTDOWN_OUTPUT);
21263
21264		(*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle,
21265		    SOCK_OPCTL_SHUT_SEND, 0);
21266	}
21267
21268	/* shutdown the recv side */
21269	if (how != SHUT_WR)
21270		(*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle,
21271		    SOCK_OPCTL_SHUT_RECV, 0);
21272
21273	return (0);
21274}
21275
21276/*
21277 * SOP_LISTEN() calls into tcp_listen().
21278 */
21279/* ARGSUSED */
21280int
21281tcp_listen(sock_lower_handle_t proto_handle, int backlog, cred_t *cr)
21282{
21283	conn_t	*connp = (conn_t *)proto_handle;
21284	int 	error;
21285	squeue_t *sqp = connp->conn_sqp;
21286
21287	ASSERT(connp->conn_upper_handle != NULL);
21288
21289	/* All Solaris components should pass a cred for this operation. */
21290	ASSERT(cr != NULL);
21291
21292	error = squeue_synch_enter(sqp, connp, NULL);
21293	if (error != 0) {
21294		/* failed to enter */
21295		return (ENOBUFS);
21296	}
21297
21298	error = tcp_do_listen(connp, NULL, 0, backlog, cr, FALSE);
21299	if (error == 0) {
21300		(*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle,
21301		    SOCK_OPCTL_ENAB_ACCEPT, (uintptr_t)backlog);
21302	} else if (error < 0) {
21303		if (error == -TOUTSTATE)
21304			error = EINVAL;
21305		else
21306			error = proto_tlitosyserr(-error);
21307	}
21308	squeue_synch_exit(sqp, connp);
21309	return (error);
21310}
21311
21312static int
21313tcp_do_listen(conn_t *connp, struct sockaddr *sa, socklen_t len,
21314    int backlog, cred_t *cr, boolean_t bind_to_req_port_only)
21315{
21316	tcp_t		*tcp = connp->conn_tcp;
21317	int		error = 0;
21318	tcp_stack_t	*tcps = tcp->tcp_tcps;
21319
21320	/* All Solaris components should pass a cred for this operation. */
21321	ASSERT(cr != NULL);
21322
21323	if (tcp->tcp_state >= TCPS_BOUND) {
21324		if ((tcp->tcp_state == TCPS_BOUND ||
21325		    tcp->tcp_state == TCPS_LISTEN) && backlog > 0) {
21326			/*
21327			 * Handle listen() increasing backlog.
21328			 * This is more "liberal" then what the TPI spec
21329			 * requires but is needed to avoid a t_unbind
21330			 * when handling listen() since the port number
21331			 * might be "stolen" between the unbind and bind.
21332			 */
21333			goto do_listen;
21334		}
21335		if (connp->conn_debug) {
21336			(void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE,
21337			    "tcp_listen: bad state, %d", tcp->tcp_state);
21338		}
21339		return (-TOUTSTATE);
21340	} else {
21341		if (sa == NULL) {
21342			sin6_t	addr;
21343			sin_t *sin;
21344			sin6_t *sin6;
21345
21346			ASSERT(IPCL_IS_NONSTR(connp));
21347			/* Do an implicit bind: Request for a generic port. */
21348			if (connp->conn_family == AF_INET) {
21349				len = sizeof (sin_t);
21350				sin = (sin_t *)&addr;
21351				*sin = sin_null;
21352				sin->sin_family = AF_INET;
21353			} else {
21354				ASSERT(connp->conn_family == AF_INET6);
21355				len = sizeof (sin6_t);
21356				sin6 = (sin6_t *)&addr;
21357				*sin6 = sin6_null;
21358				sin6->sin6_family = AF_INET6;
21359			}
21360			sa = (struct sockaddr *)&addr;
21361		}
21362
21363		error = tcp_bind_check(connp, sa, len, cr,
21364		    bind_to_req_port_only);
21365		if (error)
21366			return (error);
21367		/* Fall through and do the fanout insertion */
21368	}
21369
21370do_listen:
21371	ASSERT(tcp->tcp_state == TCPS_BOUND || tcp->tcp_state == TCPS_LISTEN);
21372	tcp->tcp_conn_req_max = backlog;
21373	if (tcp->tcp_conn_req_max) {
21374		if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min)
21375			tcp->tcp_conn_req_max = tcps->tcps_conn_req_min;
21376		if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q)
21377			tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q;
21378		/*
21379		 * If this is a listener, do not reset the eager list
21380		 * and other stuffs.  Note that we don't check if the
21381		 * existing eager list meets the new tcp_conn_req_max
21382		 * requirement.
21383		 */
21384		if (tcp->tcp_state != TCPS_LISTEN) {
21385			tcp->tcp_state = TCPS_LISTEN;
21386			/* Initialize the chain. Don't need the eager_lock */
21387			tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp;
21388			tcp->tcp_eager_next_drop_q0 = tcp;
21389			tcp->tcp_eager_prev_drop_q0 = tcp;
21390			tcp->tcp_second_ctimer_threshold =
21391			    tcps->tcps_ip_abort_linterval;
21392		}
21393	}
21394
21395	/*
21396	 * We need to make sure that the conn_recv is set to a non-null
21397	 * value before we insert the conn into the classifier table.
21398	 * This is to avoid a race with an incoming packet which does an
21399	 * ipcl_classify().
21400	 * We initially set it to tcp_input_listener_unbound to try to
21401	 * pick a good squeue for the listener when the first SYN arrives.
21402	 * tcp_input_listener_unbound sets it to tcp_input_listener on that
21403	 * first SYN.
21404	 */
21405	connp->conn_recv = tcp_input_listener_unbound;
21406
21407	/* Insert the listener in the classifier table */
21408	error = ip_laddr_fanout_insert(connp);
21409	if (error != 0) {
21410		/* Undo the bind - release the port number */
21411		tcp->tcp_state = TCPS_IDLE;
21412		connp->conn_bound_addr_v6 = ipv6_all_zeros;
21413
21414		connp->conn_laddr_v6 = ipv6_all_zeros;
21415		connp->conn_saddr_v6 = ipv6_all_zeros;
21416		connp->conn_ports = 0;
21417
21418		if (connp->conn_anon_port) {
21419			zone_t		*zone;
21420
21421			zone = crgetzone(cr);
21422			connp->conn_anon_port = B_FALSE;
21423			(void) tsol_mlp_anon(zone, connp->conn_mlp_type,
21424			    connp->conn_proto, connp->conn_lport, B_FALSE);
21425		}
21426		connp->conn_mlp_type = mlptSingle;
21427
21428		tcp_bind_hash_remove(tcp);
21429		return (error);
21430	}
21431	return (error);
21432}
21433
21434void
21435tcp_clr_flowctrl(sock_lower_handle_t proto_handle)
21436{
21437	conn_t  *connp = (conn_t *)proto_handle;
21438	tcp_t	*tcp = connp->conn_tcp;
21439	mblk_t *mp;
21440	int error;
21441
21442	ASSERT(connp->conn_upper_handle != NULL);
21443
21444	/*
21445	 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_clr_flowctrl()
21446	 * is currently running.
21447	 */
21448	mutex_enter(&tcp->tcp_rsrv_mp_lock);
21449	if ((mp = tcp->tcp_rsrv_mp) == NULL) {
21450		mutex_exit(&tcp->tcp_rsrv_mp_lock);
21451		return;
21452	}
21453	tcp->tcp_rsrv_mp = NULL;
21454	mutex_exit(&tcp->tcp_rsrv_mp_lock);
21455
21456	error = squeue_synch_enter(connp->conn_sqp, connp, mp);
21457	ASSERT(error == 0);
21458
21459	mutex_enter(&tcp->tcp_rsrv_mp_lock);
21460	tcp->tcp_rsrv_mp = mp;
21461	mutex_exit(&tcp->tcp_rsrv_mp_lock);
21462
21463	if (tcp->tcp_fused) {
21464		tcp_fuse_backenable(tcp);
21465	} else {
21466		tcp->tcp_rwnd = connp->conn_rcvbuf;
21467		/*
21468		 * Send back a window update immediately if TCP is above
21469		 * ESTABLISHED state and the increase of the rcv window
21470		 * that the other side knows is at least 1 MSS after flow
21471		 * control is lifted.
21472		 */
21473		if (tcp->tcp_state >= TCPS_ESTABLISHED &&
21474		    tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) {
21475			tcp_xmit_ctl(NULL, tcp,
21476			    (tcp->tcp_swnd == 0) ? tcp->tcp_suna :
21477			    tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK);
21478		}
21479	}
21480
21481	squeue_synch_exit(connp->conn_sqp, connp);
21482}
21483
21484/* ARGSUSED */
21485int
21486tcp_ioctl(sock_lower_handle_t proto_handle, int cmd, intptr_t arg,
21487    int mode, int32_t *rvalp, cred_t *cr)
21488{
21489	conn_t  	*connp = (conn_t *)proto_handle;
21490	int		error;
21491
21492	ASSERT(connp->conn_upper_handle != NULL);
21493
21494	/* All Solaris components should pass a cred for this operation. */
21495	ASSERT(cr != NULL);
21496
21497	/*
21498	 * If we don't have a helper stream then create one.
21499	 * ip_create_helper_stream takes care of locking the conn_t,
21500	 * so this check for NULL is just a performance optimization.
21501	 */
21502	if (connp->conn_helper_info == NULL) {
21503		tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps;
21504
21505		/*
21506		 * Create a helper stream for non-STREAMS socket.
21507		 */
21508		error = ip_create_helper_stream(connp, tcps->tcps_ldi_ident);
21509		if (error != 0) {
21510			ip0dbg(("tcp_ioctl: create of IP helper stream "
21511			    "failed %d\n", error));
21512			return (error);
21513		}
21514	}
21515
21516	switch (cmd) {
21517		case ND_SET:
21518		case ND_GET:
21519		case _SIOCSOCKFALLBACK:
21520		case TCP_IOC_ABORT_CONN:
21521		case TI_GETPEERNAME:
21522		case TI_GETMYNAME:
21523			ip1dbg(("tcp_ioctl: cmd 0x%x on non sreams socket",
21524			    cmd));
21525			error = EINVAL;
21526			break;
21527		default:
21528			/*
21529			 * Pass on to IP using helper stream
21530			 */
21531			error = ldi_ioctl(connp->conn_helper_info->iphs_handle,
21532			    cmd, arg, mode, cr, rvalp);
21533			break;
21534	}
21535	return (error);
21536}
21537
21538sock_downcalls_t sock_tcp_downcalls = {
21539	tcp_activate,
21540	tcp_accept,
21541	tcp_bind,
21542	tcp_listen,
21543	tcp_connect,
21544	tcp_getpeername,
21545	tcp_getsockname,
21546	tcp_getsockopt,
21547	tcp_setsockopt,
21548	tcp_sendmsg,
21549	NULL,
21550	NULL,
21551	NULL,
21552	tcp_shutdown,
21553	tcp_clr_flowctrl,
21554	tcp_ioctl,
21555	tcp_close,
21556};
21557