uipc_socket.c revision 160549
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2006 Robert N. M. Watson
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
32 */
33
34/*
35 * Comments on the socket life cycle:
36 *
37 * soalloc() sets of socket layer state for a socket, called only by
38 * socreate() and sonewconn().  Socket layer private.
39 *
40 * sdealloc() tears down socket layer state for a socket, called only by
41 * sofree() and sonewconn().  Socket layer private.
42 *
43 * pru_attach() associates protocol layer state with an allocated socket;
44 * called only once, may fail, aborting socket allocation.  This is called
45 * from socreate() and sonewconn().  Socket layer private.
46 *
47 * pru_detach() disassociates protocol layer state from an attached socket,
48 * and will be called exactly once for sockets in which pru_attach() has
49 * been successfully called.  If pru_attach() returned an error,
50 * pru_detach() will not be called.  Socket layer private.
51 *
52 * socreate() creates a socket and attaches protocol state.  This is a public
53 * interface that may be used by socket layer consumers to create new
54 * sockets.
55 *
56 * sonewconn() creates a socket and attaches protocol state.  This is a
57 * public interface  that may be used by protocols to create new sockets when
58 * a new connection is received and will be available for accept() on a
59 * listen socket.
60 *
61 * soclose() destroys a socket after possibly waiting for it to disconnect.
62 * This is a public interface that socket consumers should use to close and
63 * release a socket when done with it.
64 *
65 * soabort() destroys a socket without waiting for it to disconnect (used
66 * only for incoming connections that are already partially or fully
67 * connected).  This is used internally by the socket layer when clearing
68 * listen socket queues (due to overflow or close on the listen socket), but
69 * is also a public interface protocols may use to abort connections in
70 * their incomplete listen queues should they no longer be required.  Sockets
71 * placed in completed connection listen queues should not be aborted.
72 *
73 * sofree() will free a socket and its protocol state if all references on
74 * the socket have been released, and is the public interface to attempt to
75 * free a socket when a reference is removed.  This is a socket layer private
76 * interface.
77 *
78 * NOTE: In addition to socreate() and soclose(), which provide a single
79 * socket reference to the consumer to be managed as required, there are two
80 * calls to explicitly manage socket references, soref(), and sorele().
81 * Currently, these are generally required only when transitioning a socket
82 * from a listen queue to a file descriptor, in order to prevent garbage
83 * collection of the socket at an untimely moment.  For a number of reasons,
84 * these interfaces are not preferred, and should be avoided.
85 *
86 * XXXRW: The behavior of sockets after soclose() but before the last
87 * sorele() is poorly defined.  We can probably entirely eliminate them with
88 * a little work, since consumers are managing references anyway.
89 */
90
91#include <sys/cdefs.h>
92__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 160549 2006-07-21 17:11:15Z rwatson $");
93
94#include "opt_inet.h"
95#include "opt_mac.h"
96#include "opt_zero.h"
97#include "opt_compat.h"
98
99#include <sys/param.h>
100#include <sys/systm.h>
101#include <sys/fcntl.h>
102#include <sys/limits.h>
103#include <sys/lock.h>
104#include <sys/mac.h>
105#include <sys/malloc.h>
106#include <sys/mbuf.h>
107#include <sys/mutex.h>
108#include <sys/domain.h>
109#include <sys/file.h>			/* for struct knote */
110#include <sys/kernel.h>
111#include <sys/event.h>
112#include <sys/eventhandler.h>
113#include <sys/poll.h>
114#include <sys/proc.h>
115#include <sys/protosw.h>
116#include <sys/socket.h>
117#include <sys/socketvar.h>
118#include <sys/resourcevar.h>
119#include <sys/signalvar.h>
120#include <sys/sysctl.h>
121#include <sys/uio.h>
122#include <sys/jail.h>
123
124#include <vm/uma.h>
125
126#ifdef COMPAT_IA32
127#include <sys/mount.h>
128#include <compat/freebsd32/freebsd32.h>
129
130extern struct sysentvec ia32_freebsd_sysvec;
131#endif
132
133static int	soreceive_rcvoob(struct socket *so, struct uio *uio,
134		    int flags);
135
136static void	filt_sordetach(struct knote *kn);
137static int	filt_soread(struct knote *kn, long hint);
138static void	filt_sowdetach(struct knote *kn);
139static int	filt_sowrite(struct knote *kn, long hint);
140static int	filt_solisten(struct knote *kn, long hint);
141
142static struct filterops solisten_filtops =
143	{ 1, NULL, filt_sordetach, filt_solisten };
144static struct filterops soread_filtops =
145	{ 1, NULL, filt_sordetach, filt_soread };
146static struct filterops sowrite_filtops =
147	{ 1, NULL, filt_sowdetach, filt_sowrite };
148
149uma_zone_t socket_zone;
150so_gen_t	so_gencnt;	/* generation count for sockets */
151
152int	maxsockets;
153
154MALLOC_DEFINE(M_SONAME, "soname", "socket name");
155MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
156
157static int somaxconn = SOMAXCONN;
158static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS);
159/* XXX: we dont have SYSCTL_USHORT */
160SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
161    0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection "
162    "queue size");
163static int numopensockets;
164SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
165    &numopensockets, 0, "Number of open sockets");
166#ifdef ZERO_COPY_SOCKETS
167/* These aren't static because they're used in other files. */
168int so_zero_copy_send = 1;
169int so_zero_copy_receive = 1;
170SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
171    "Zero copy controls");
172SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
173    &so_zero_copy_receive, 0, "Enable zero copy receive");
174SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
175    &so_zero_copy_send, 0, "Enable zero copy send");
176#endif /* ZERO_COPY_SOCKETS */
177
178/*
179 * accept_mtx locks down per-socket fields relating to accept queues.  See
180 * socketvar.h for an annotation of the protected fields of struct socket.
181 */
182struct mtx accept_mtx;
183MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
184
185/*
186 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
187 * so_gencnt field.
188 */
189static struct mtx so_global_mtx;
190MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
191
192SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
193
194static int
195sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
196{
197	int error, newmaxsockets;
198
199	newmaxsockets = maxsockets;
200	error = sysctl_handle_int(oidp, &newmaxsockets, sizeof(int), req);
201	if (error == 0 && req->newptr) {
202		if (newmaxsockets > maxsockets) {
203			maxsockets = newmaxsockets;
204			if (maxsockets > ((maxfiles / 4) * 3)) {
205				maxfiles = (maxsockets * 5) / 4;
206				maxfilesperproc = (maxfiles * 9) / 10;
207			}
208			EVENTHANDLER_INVOKE(maxsockets_change);
209		} else
210			error = EINVAL;
211	}
212	return (error);
213}
214
215SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
216    &maxsockets, 0, sysctl_maxsockets, "IU",
217    "Maximum number of sockets avaliable");
218
219/*
220 * Initialise maxsockets.
221 */
222static void init_maxsockets(void *ignored)
223{
224	TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
225	maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
226}
227SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
228
229/*
230 * Socket operation routines.
231 * These routines are called by the routines in
232 * sys_socket.c or from a system process, and
233 * implement the semantics of socket operations by
234 * switching out to the protocol specific routines.
235 */
236
237/*
238 * Get a socket structure from our zone, and initialize it.
239 * Note that it would probably be better to allocate socket
240 * and PCB at the same time, but I'm not convinced that all
241 * the protocols can be easily modified to do this.
242 *
243 * soalloc() returns a socket with a ref count of 0.
244 */
245static struct socket *
246soalloc(int mflags)
247{
248	struct socket *so;
249
250	so = uma_zalloc(socket_zone, mflags | M_ZERO);
251	if (so == NULL)
252		return (NULL);
253#ifdef MAC
254	if (mac_init_socket(so, mflags) != 0) {
255		uma_zfree(socket_zone, so);
256		return (NULL);
257	}
258#endif
259	SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
260	SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
261	TAILQ_INIT(&so->so_aiojobq);
262	mtx_lock(&so_global_mtx);
263	so->so_gencnt = ++so_gencnt;
264	++numopensockets;
265	mtx_unlock(&so_global_mtx);
266	return (so);
267}
268
269static void
270sodealloc(struct socket *so)
271{
272
273	KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
274	KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
275
276	mtx_lock(&so_global_mtx);
277	so->so_gencnt = ++so_gencnt;
278	mtx_unlock(&so_global_mtx);
279	if (so->so_rcv.sb_hiwat)
280		(void)chgsbsize(so->so_cred->cr_uidinfo,
281		    &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
282	if (so->so_snd.sb_hiwat)
283		(void)chgsbsize(so->so_cred->cr_uidinfo,
284		    &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
285#ifdef INET
286	/* remove acccept filter if one is present. */
287	if (so->so_accf != NULL)
288		do_setopt_accept_filter(so, NULL);
289#endif
290#ifdef MAC
291	mac_destroy_socket(so);
292#endif
293	crfree(so->so_cred);
294	SOCKBUF_LOCK_DESTROY(&so->so_snd);
295	SOCKBUF_LOCK_DESTROY(&so->so_rcv);
296	uma_zfree(socket_zone, so);
297	mtx_lock(&so_global_mtx);
298	--numopensockets;
299	mtx_unlock(&so_global_mtx);
300}
301
302/*
303 * socreate returns a socket with a ref count of 1.  The socket should be
304 * closed with soclose().
305 */
306int
307socreate(dom, aso, type, proto, cred, td)
308	int dom;
309	struct socket **aso;
310	int type;
311	int proto;
312	struct ucred *cred;
313	struct thread *td;
314{
315	struct protosw *prp;
316	struct socket *so;
317	int error;
318
319	if (proto)
320		prp = pffindproto(dom, proto, type);
321	else
322		prp = pffindtype(dom, type);
323
324	if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
325	    prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
326		return (EPROTONOSUPPORT);
327
328	if (jailed(cred) && jail_socket_unixiproute_only &&
329	    prp->pr_domain->dom_family != PF_LOCAL &&
330	    prp->pr_domain->dom_family != PF_INET &&
331	    prp->pr_domain->dom_family != PF_ROUTE) {
332		return (EPROTONOSUPPORT);
333	}
334
335	if (prp->pr_type != type)
336		return (EPROTOTYPE);
337	so = soalloc(M_WAITOK);
338	if (so == NULL)
339		return (ENOBUFS);
340
341	TAILQ_INIT(&so->so_incomp);
342	TAILQ_INIT(&so->so_comp);
343	so->so_type = type;
344	so->so_cred = crhold(cred);
345	so->so_proto = prp;
346#ifdef MAC
347	mac_create_socket(cred, so);
348#endif
349	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
350	    NULL, NULL, NULL);
351	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
352	    NULL, NULL, NULL);
353	so->so_count = 1;
354	error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
355	if (error) {
356		sodealloc(so);
357		return (error);
358	}
359	*aso = so;
360	return (0);
361}
362
363#ifdef REGRESSION
364static int regression_sonewconn_earlytest = 1;
365SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
366    &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
367#endif
368
369/*
370 * When an attempt at a new connection is noted on a socket
371 * which accepts connections, sonewconn is called.  If the
372 * connection is possible (subject to space constraints, etc.)
373 * then we allocate a new structure, propoerly linked into the
374 * data structure of the original socket, and return this.
375 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
376 *
377 * note: the ref count on the socket is 0 on return
378 */
379struct socket *
380sonewconn(head, connstatus)
381	register struct socket *head;
382	int connstatus;
383{
384	register struct socket *so;
385	int over;
386
387	ACCEPT_LOCK();
388	over = (head->so_qlen > 3 * head->so_qlimit / 2);
389	ACCEPT_UNLOCK();
390#ifdef REGRESSION
391	if (regression_sonewconn_earlytest && over)
392#else
393	if (over)
394#endif
395		return (NULL);
396	so = soalloc(M_NOWAIT);
397	if (so == NULL)
398		return (NULL);
399	if ((head->so_options & SO_ACCEPTFILTER) != 0)
400		connstatus = 0;
401	so->so_head = head;
402	so->so_type = head->so_type;
403	so->so_options = head->so_options &~ SO_ACCEPTCONN;
404	so->so_linger = head->so_linger;
405	so->so_state = head->so_state | SS_NOFDREF;
406	so->so_proto = head->so_proto;
407	so->so_timeo = head->so_timeo;
408	so->so_cred = crhold(head->so_cred);
409#ifdef MAC
410	SOCK_LOCK(head);
411	mac_create_socket_from_socket(head, so);
412	SOCK_UNLOCK(head);
413#endif
414	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
415	    NULL, NULL, NULL);
416	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
417	    NULL, NULL, NULL);
418	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
419	    (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
420		sodealloc(so);
421		return (NULL);
422	}
423	so->so_state |= connstatus;
424	ACCEPT_LOCK();
425	if (connstatus) {
426		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
427		so->so_qstate |= SQ_COMP;
428		head->so_qlen++;
429	} else {
430		/*
431		 * Keep removing sockets from the head until there's room for
432		 * us to insert on the tail.  In pre-locking revisions, this
433		 * was a simple if(), but as we could be racing with other
434		 * threads and soabort() requires dropping locks, we must
435		 * loop waiting for the condition to be true.
436		 */
437		while (head->so_incqlen > head->so_qlimit) {
438			struct socket *sp;
439			sp = TAILQ_FIRST(&head->so_incomp);
440			TAILQ_REMOVE(&head->so_incomp, sp, so_list);
441			head->so_incqlen--;
442			sp->so_qstate &= ~SQ_INCOMP;
443			sp->so_head = NULL;
444			ACCEPT_UNLOCK();
445			soabort(sp);
446			ACCEPT_LOCK();
447		}
448		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
449		so->so_qstate |= SQ_INCOMP;
450		head->so_incqlen++;
451	}
452	ACCEPT_UNLOCK();
453	if (connstatus) {
454		sorwakeup(head);
455		wakeup_one(&head->so_timeo);
456	}
457	return (so);
458}
459
460int
461sobind(so, nam, td)
462	struct socket *so;
463	struct sockaddr *nam;
464	struct thread *td;
465{
466
467	return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
468}
469
470/*
471 * solisten() transitions a socket from a non-listening state to a listening
472 * state, but can also be used to update the listen queue depth on an
473 * existing listen socket.  The protocol will call back into the sockets
474 * layer using solisten_proto_check() and solisten_proto() to check and set
475 * socket-layer listen state.  Call backs are used so that the protocol can
476 * acquire both protocol and socket layer locks in whatever order is required
477 * by the protocol.
478 *
479 * Protocol implementors are advised to hold the socket lock across the
480 * socket-layer test and set to avoid races at the socket layer.
481 */
482int
483solisten(so, backlog, td)
484	struct socket *so;
485	int backlog;
486	struct thread *td;
487{
488
489	return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
490}
491
492int
493solisten_proto_check(so)
494	struct socket *so;
495{
496
497	SOCK_LOCK_ASSERT(so);
498
499	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
500	    SS_ISDISCONNECTING))
501		return (EINVAL);
502	return (0);
503}
504
505void
506solisten_proto(so, backlog)
507	struct socket *so;
508	int backlog;
509{
510
511	SOCK_LOCK_ASSERT(so);
512
513	if (backlog < 0 || backlog > somaxconn)
514		backlog = somaxconn;
515	so->so_qlimit = backlog;
516	so->so_options |= SO_ACCEPTCONN;
517}
518
519/*
520 * Attempt to free a socket.  This should really be sotryfree().
521 *
522 * sofree() will succeed if:
523 *
524 * - There are no outstanding file descriptor references or related consumers
525 *   (so_count == 0).
526 *
527 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
528 *
529 * - The protocol does not have an outstanding strong reference on the socket
530 *   (SS_PROTOREF).
531 *
532 * - The socket is not in a completed connection queue, so a process has been
533 *   notified that it is present.  If it is removed, the user process may
534 *   block in accept() despite select() saying the socket was ready.
535 *
536 * Otherwise, it will quietly abort so that a future call to sofree(), when
537 * conditions are right, can succeed.
538 */
539void
540sofree(so)
541	struct socket *so;
542{
543	struct socket *head;
544
545	ACCEPT_LOCK_ASSERT();
546	SOCK_LOCK_ASSERT(so);
547
548	if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
549	    (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
550		SOCK_UNLOCK(so);
551		ACCEPT_UNLOCK();
552		return;
553	}
554
555	head = so->so_head;
556	if (head != NULL) {
557		KASSERT((so->so_qstate & SQ_COMP) != 0 ||
558		    (so->so_qstate & SQ_INCOMP) != 0,
559		    ("sofree: so_head != NULL, but neither SQ_COMP nor "
560		    "SQ_INCOMP"));
561		KASSERT((so->so_qstate & SQ_COMP) == 0 ||
562		    (so->so_qstate & SQ_INCOMP) == 0,
563		    ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
564		TAILQ_REMOVE(&head->so_incomp, so, so_list);
565		head->so_incqlen--;
566		so->so_qstate &= ~SQ_INCOMP;
567		so->so_head = NULL;
568	}
569	KASSERT((so->so_qstate & SQ_COMP) == 0 &&
570	    (so->so_qstate & SQ_INCOMP) == 0,
571	    ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
572	    so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
573	SOCK_UNLOCK(so);
574	ACCEPT_UNLOCK();
575
576	SOCKBUF_LOCK(&so->so_snd);
577	so->so_snd.sb_flags |= SB_NOINTR;
578	(void)sblock(&so->so_snd, M_WAITOK);
579	/*
580	 * socantsendmore_locked() drops the socket buffer mutex so that it
581	 * can safely perform wakeups.  Re-acquire the mutex before
582	 * continuing.
583	 */
584	socantsendmore_locked(so);
585	SOCKBUF_LOCK(&so->so_snd);
586	sbunlock(&so->so_snd);
587	sbrelease_locked(&so->so_snd, so);
588	SOCKBUF_UNLOCK(&so->so_snd);
589	sorflush(so);
590	knlist_destroy(&so->so_rcv.sb_sel.si_note);
591	knlist_destroy(&so->so_snd.sb_sel.si_note);
592	if (so->so_proto->pr_usrreqs->pru_detach != NULL)
593		(*so->so_proto->pr_usrreqs->pru_detach)(so);
594	sodealloc(so);
595}
596
597/*
598 * Close a socket on last file table reference removal.
599 * Initiate disconnect if connected.
600 * Free socket when disconnect complete.
601 *
602 * This function will sorele() the socket.  Note that soclose() may be
603 * called prior to the ref count reaching zero.  The actual socket
604 * structure will not be freed until the ref count reaches zero.
605 */
606int
607soclose(so)
608	struct socket *so;
609{
610	int error = 0;
611
612	KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
613
614	funsetown(&so->so_sigio);
615	if (so->so_options & SO_ACCEPTCONN) {
616		struct socket *sp;
617		ACCEPT_LOCK();
618		while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
619			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
620			so->so_incqlen--;
621			sp->so_qstate &= ~SQ_INCOMP;
622			sp->so_head = NULL;
623			ACCEPT_UNLOCK();
624			soabort(sp);
625			ACCEPT_LOCK();
626		}
627		while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
628			TAILQ_REMOVE(&so->so_comp, sp, so_list);
629			so->so_qlen--;
630			sp->so_qstate &= ~SQ_COMP;
631			sp->so_head = NULL;
632			ACCEPT_UNLOCK();
633			soabort(sp);
634			ACCEPT_LOCK();
635		}
636		ACCEPT_UNLOCK();
637	}
638	if (so->so_state & SS_ISCONNECTED) {
639		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
640			error = sodisconnect(so);
641			if (error)
642				goto drop;
643		}
644		if (so->so_options & SO_LINGER) {
645			if ((so->so_state & SS_ISDISCONNECTING) &&
646			    (so->so_state & SS_NBIO))
647				goto drop;
648			while (so->so_state & SS_ISCONNECTED) {
649				error = tsleep(&so->so_timeo,
650				    PSOCK | PCATCH, "soclos", so->so_linger * hz);
651				if (error)
652					break;
653			}
654		}
655	}
656
657drop:
658	if (so->so_proto->pr_usrreqs->pru_close != NULL)
659		(*so->so_proto->pr_usrreqs->pru_close)(so);
660	ACCEPT_LOCK();
661	SOCK_LOCK(so);
662	KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
663	so->so_state |= SS_NOFDREF;
664	sorele(so);
665	return (error);
666}
667
668/*
669 * soabort() is used to abruptly tear down a connection, such as when a
670 * resource limit is reached (listen queue depth exceeded), or if a listen
671 * socket is closed while there are sockets waiting to be accepted.
672 *
673 * This interface is tricky, because it is called on an unreferenced socket,
674 * and must be called only by a thread that has actually removed the socket
675 * from the listen queue it was on, or races with other threads are risked.
676 *
677 * This interface will call into the protocol code, so must not be called
678 * with any socket locks held.  Protocols do call it while holding their own
679 * recursible protocol mutexes, but this is something that should be subject
680 * to review in the future.
681 */
682void
683soabort(so)
684	struct socket *so;
685{
686
687	/*
688	 * In as much as is possible, assert that no references to this
689	 * socket are held.  This is not quite the same as asserting that the
690	 * current thread is responsible for arranging for no references, but
691	 * is as close as we can get for now.
692	 */
693	KASSERT(so->so_count == 0, ("soabort: so_count"));
694	KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
695	KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
696	KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
697	KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
698
699	if (so->so_proto->pr_usrreqs->pru_abort != NULL)
700		(*so->so_proto->pr_usrreqs->pru_abort)(so);
701	ACCEPT_LOCK();
702	SOCK_LOCK(so);
703	sofree(so);
704}
705
706int
707soaccept(so, nam)
708	struct socket *so;
709	struct sockaddr **nam;
710{
711	int error;
712
713	SOCK_LOCK(so);
714	KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
715	so->so_state &= ~SS_NOFDREF;
716	SOCK_UNLOCK(so);
717	error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
718	return (error);
719}
720
721int
722soconnect(so, nam, td)
723	struct socket *so;
724	struct sockaddr *nam;
725	struct thread *td;
726{
727	int error;
728
729	if (so->so_options & SO_ACCEPTCONN)
730		return (EOPNOTSUPP);
731	/*
732	 * If protocol is connection-based, can only connect once.
733	 * Otherwise, if connected, try to disconnect first.
734	 * This allows user to disconnect by connecting to, e.g.,
735	 * a null address.
736	 */
737	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
738	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
739	    (error = sodisconnect(so)))) {
740		error = EISCONN;
741	} else {
742		/*
743		 * Prevent accumulated error from previous connection
744		 * from biting us.
745		 */
746		so->so_error = 0;
747		error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
748	}
749
750	return (error);
751}
752
753int
754soconnect2(so1, so2)
755	struct socket *so1;
756	struct socket *so2;
757{
758
759	return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
760}
761
762int
763sodisconnect(so)
764	struct socket *so;
765{
766	int error;
767
768	if ((so->so_state & SS_ISCONNECTED) == 0)
769		return (ENOTCONN);
770	if (so->so_state & SS_ISDISCONNECTING)
771		return (EALREADY);
772	error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
773	return (error);
774}
775
776#ifdef ZERO_COPY_SOCKETS
777struct so_zerocopy_stats{
778	int size_ok;
779	int align_ok;
780	int found_ifp;
781};
782struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
783#include <netinet/in.h>
784#include <net/route.h>
785#include <netinet/in_pcb.h>
786#include <vm/vm.h>
787#include <vm/vm_page.h>
788#include <vm/vm_object.h>
789#endif /*ZERO_COPY_SOCKETS*/
790
791/*
792 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
793 * all of the data referenced by the uio.  If desired, it uses zero-copy.
794 * *space will be updated to reflect data copied in.
795 *
796 * NB: If atomic I/O is requested, the caller must already have checked that
797 * space can hold resid bytes.
798 *
799 * NB: In the event of an error, the caller may need to free the partial
800 * chain pointed to by *mpp.  The contents of both *uio and *space may be
801 * modified even in the case of an error.
802 */
803static int
804sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
805    int flags)
806{
807	struct mbuf *m, **mp, *top;
808	long len, resid;
809	int error;
810#ifdef ZERO_COPY_SOCKETS
811	int cow_send;
812#endif
813
814	*retmp = top = NULL;
815	mp = &top;
816	len = 0;
817	resid = uio->uio_resid;
818	error = 0;
819	do {
820#ifdef ZERO_COPY_SOCKETS
821		cow_send = 0;
822#endif /* ZERO_COPY_SOCKETS */
823		if (resid >= MINCLSIZE) {
824#ifdef ZERO_COPY_SOCKETS
825			if (top == NULL) {
826				MGETHDR(m, M_TRYWAIT, MT_DATA);
827				if (m == NULL) {
828					error = ENOBUFS;
829					goto out;
830				}
831				m->m_pkthdr.len = 0;
832				m->m_pkthdr.rcvif = NULL;
833			} else {
834				MGET(m, M_TRYWAIT, MT_DATA);
835				if (m == NULL) {
836					error = ENOBUFS;
837					goto out;
838				}
839			}
840			if (so_zero_copy_send &&
841			    resid>=PAGE_SIZE &&
842			    *space>=PAGE_SIZE &&
843			    uio->uio_iov->iov_len>=PAGE_SIZE) {
844				so_zerocp_stats.size_ok++;
845				so_zerocp_stats.align_ok++;
846				cow_send = socow_setup(m, uio);
847				len = cow_send;
848			}
849			if (!cow_send) {
850				MCLGET(m, M_TRYWAIT);
851				if ((m->m_flags & M_EXT) == 0) {
852					m_free(m);
853					m = NULL;
854				} else {
855					len = min(min(MCLBYTES, resid),
856					    *space);
857				}
858			}
859#else /* ZERO_COPY_SOCKETS */
860			if (top == NULL) {
861				m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
862				m->m_pkthdr.len = 0;
863				m->m_pkthdr.rcvif = NULL;
864			} else
865				m = m_getcl(M_TRYWAIT, MT_DATA, 0);
866			len = min(min(MCLBYTES, resid), *space);
867#endif /* ZERO_COPY_SOCKETS */
868		} else {
869			if (top == NULL) {
870				m = m_gethdr(M_TRYWAIT, MT_DATA);
871				m->m_pkthdr.len = 0;
872				m->m_pkthdr.rcvif = NULL;
873
874				len = min(min(MHLEN, resid), *space);
875				/*
876				 * For datagram protocols, leave room
877				 * for protocol headers in first mbuf.
878				 */
879				if (atomic && m && len < MHLEN)
880					MH_ALIGN(m, len);
881			} else {
882				m = m_get(M_TRYWAIT, MT_DATA);
883				len = min(min(MLEN, resid), *space);
884			}
885		}
886		if (m == NULL) {
887			error = ENOBUFS;
888			goto out;
889		}
890
891		*space -= len;
892#ifdef ZERO_COPY_SOCKETS
893		if (cow_send)
894			error = 0;
895		else
896#endif /* ZERO_COPY_SOCKETS */
897		error = uiomove(mtod(m, void *), (int)len, uio);
898		resid = uio->uio_resid;
899		m->m_len = len;
900		*mp = m;
901		top->m_pkthdr.len += len;
902		if (error)
903			goto out;
904		mp = &m->m_next;
905		if (resid <= 0) {
906			if (flags & MSG_EOR)
907				top->m_flags |= M_EOR;
908			break;
909		}
910	} while (*space > 0 && atomic);
911out:
912	*retmp = top;
913	return (error);
914}
915
916#define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
917
918int
919sosend_dgram(so, addr, uio, top, control, flags, td)
920	struct socket *so;
921	struct sockaddr *addr;
922	struct uio *uio;
923	struct mbuf *top;
924	struct mbuf *control;
925	int flags;
926	struct thread *td;
927{
928	long space, resid;
929	int clen = 0, error, dontroute;
930	int atomic = sosendallatonce(so) || top;
931
932	KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
933	KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
934	    ("sodgram_send: !PR_ATOMIC"));
935
936	if (uio != NULL)
937		resid = uio->uio_resid;
938	else
939		resid = top->m_pkthdr.len;
940	/*
941	 * In theory resid should be unsigned.
942	 * However, space must be signed, as it might be less than 0
943	 * if we over-committed, and we must use a signed comparison
944	 * of space and resid.  On the other hand, a negative resid
945	 * causes us to loop sending 0-length segments to the protocol.
946	 *
947	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
948	 * type sockets since that's an error.
949	 */
950	if (resid < 0) {
951		error = EINVAL;
952		goto out;
953	}
954
955	dontroute =
956	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
957	if (td != NULL)
958		td->td_proc->p_stats->p_ru.ru_msgsnd++;
959	if (control != NULL)
960		clen = control->m_len;
961
962	SOCKBUF_LOCK(&so->so_snd);
963	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
964		SOCKBUF_UNLOCK(&so->so_snd);
965		error = EPIPE;
966		goto out;
967	}
968	if (so->so_error) {
969		error = so->so_error;
970		so->so_error = 0;
971		SOCKBUF_UNLOCK(&so->so_snd);
972		goto out;
973	}
974	if ((so->so_state & SS_ISCONNECTED) == 0) {
975		/*
976		 * `sendto' and `sendmsg' is allowed on a connection-
977		 * based socket if it supports implied connect.
978		 * Return ENOTCONN if not connected and no address is
979		 * supplied.
980		 */
981		if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
982		    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
983			if ((so->so_state & SS_ISCONFIRMING) == 0 &&
984			    !(resid == 0 && clen != 0)) {
985				SOCKBUF_UNLOCK(&so->so_snd);
986				error = ENOTCONN;
987				goto out;
988			}
989		} else if (addr == NULL) {
990			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
991				error = ENOTCONN;
992			else
993				error = EDESTADDRREQ;
994			SOCKBUF_UNLOCK(&so->so_snd);
995			goto out;
996		}
997	}
998
999	/*
1000	 * Do we need MSG_OOB support in SOCK_DGRAM?  Signs here may be a
1001	 * problem and need fixing.
1002	 */
1003	space = sbspace(&so->so_snd);
1004	if (flags & MSG_OOB)
1005		space += 1024;
1006	space -= clen;
1007	if (resid > space) {
1008		error = EMSGSIZE;
1009		goto out;
1010	}
1011	SOCKBUF_UNLOCK(&so->so_snd);
1012	if (uio == NULL) {
1013		resid = 0;
1014		if (flags & MSG_EOR)
1015			top->m_flags |= M_EOR;
1016	} else {
1017		error = sosend_copyin(uio, &top, atomic, &space, flags);
1018		if (error)
1019			goto out;
1020		resid = uio->uio_resid;
1021	}
1022	KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1023	/*
1024	 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1025	 * than with.
1026	 */
1027	if (dontroute) {
1028		SOCK_LOCK(so);
1029		so->so_options |= SO_DONTROUTE;
1030		SOCK_UNLOCK(so);
1031	}
1032	/*
1033	 * XXX all the SBS_CANTSENDMORE checks previously
1034	 * done could be out of date.  We could have recieved
1035	 * a reset packet in an interrupt or maybe we slept
1036	 * while doing page faults in uiomove() etc. We could
1037	 * probably recheck again inside the locking protection
1038	 * here, but there are probably other places that this
1039	 * also happens.  We must rethink this.
1040	 */
1041	error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1042	    (flags & MSG_OOB) ? PRUS_OOB :
1043	/*
1044	 * If the user set MSG_EOF, the protocol
1045	 * understands this flag and nothing left to
1046	 * send then use PRU_SEND_EOF instead of PRU_SEND.
1047	 */
1048	    ((flags & MSG_EOF) &&
1049	     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1050	     (resid <= 0)) ?
1051		PRUS_EOF :
1052		/* If there is more to send set PRUS_MORETOCOME */
1053		(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1054		top, addr, control, td);
1055	if (dontroute) {
1056		SOCK_LOCK(so);
1057		so->so_options &= ~SO_DONTROUTE;
1058		SOCK_UNLOCK(so);
1059	}
1060	clen = 0;
1061	control = NULL;
1062	top = NULL;
1063out:
1064	if (top != NULL)
1065		m_freem(top);
1066	if (control != NULL)
1067		m_freem(control);
1068	return (error);
1069}
1070
1071/*
1072 * Send on a socket.
1073 * If send must go all at once and message is larger than
1074 * send buffering, then hard error.
1075 * Lock against other senders.
1076 * If must go all at once and not enough room now, then
1077 * inform user that this would block and do nothing.
1078 * Otherwise, if nonblocking, send as much as possible.
1079 * The data to be sent is described by "uio" if nonzero,
1080 * otherwise by the mbuf chain "top" (which must be null
1081 * if uio is not).  Data provided in mbuf chain must be small
1082 * enough to send all at once.
1083 *
1084 * Returns nonzero on error, timeout or signal; callers
1085 * must check for short counts if EINTR/ERESTART are returned.
1086 * Data and control buffers are freed on return.
1087 */
1088#define	snderr(errno)	{ error = (errno); goto release; }
1089int
1090sosend(so, addr, uio, top, control, flags, td)
1091	struct socket *so;
1092	struct sockaddr *addr;
1093	struct uio *uio;
1094	struct mbuf *top;
1095	struct mbuf *control;
1096	int flags;
1097	struct thread *td;
1098{
1099	long space, resid;
1100	int clen = 0, error, dontroute;
1101	int atomic = sosendallatonce(so) || top;
1102
1103	if (uio != NULL)
1104		resid = uio->uio_resid;
1105	else
1106		resid = top->m_pkthdr.len;
1107	/*
1108	 * In theory resid should be unsigned.
1109	 * However, space must be signed, as it might be less than 0
1110	 * if we over-committed, and we must use a signed comparison
1111	 * of space and resid.  On the other hand, a negative resid
1112	 * causes us to loop sending 0-length segments to the protocol.
1113	 *
1114	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1115	 * type sockets since that's an error.
1116	 */
1117	if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1118		error = EINVAL;
1119		goto out;
1120	}
1121
1122	dontroute =
1123	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1124	    (so->so_proto->pr_flags & PR_ATOMIC);
1125	if (td != NULL)
1126		td->td_proc->p_stats->p_ru.ru_msgsnd++;
1127	if (control != NULL)
1128		clen = control->m_len;
1129
1130	SOCKBUF_LOCK(&so->so_snd);
1131restart:
1132	SOCKBUF_LOCK_ASSERT(&so->so_snd);
1133	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1134	if (error)
1135		goto out_locked;
1136	do {
1137		SOCKBUF_LOCK_ASSERT(&so->so_snd);
1138		if (so->so_snd.sb_state & SBS_CANTSENDMORE)
1139			snderr(EPIPE);
1140		if (so->so_error) {
1141			error = so->so_error;
1142			so->so_error = 0;
1143			goto release;
1144		}
1145		if ((so->so_state & SS_ISCONNECTED) == 0) {
1146			/*
1147			 * `sendto' and `sendmsg' is allowed on a connection-
1148			 * based socket if it supports implied connect.
1149			 * Return ENOTCONN if not connected and no address is
1150			 * supplied.
1151			 */
1152			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1153			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1154				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1155				    !(resid == 0 && clen != 0))
1156					snderr(ENOTCONN);
1157			} else if (addr == NULL)
1158			    snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
1159				   ENOTCONN : EDESTADDRREQ);
1160		}
1161		space = sbspace(&so->so_snd);
1162		if (flags & MSG_OOB)
1163			space += 1024;
1164		if ((atomic && resid > so->so_snd.sb_hiwat) ||
1165		    clen > so->so_snd.sb_hiwat)
1166			snderr(EMSGSIZE);
1167		if (space < resid + clen &&
1168		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1169			if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO))
1170				snderr(EWOULDBLOCK);
1171			sbunlock(&so->so_snd);
1172			error = sbwait(&so->so_snd);
1173			if (error)
1174				goto out_locked;
1175			goto restart;
1176		}
1177		SOCKBUF_UNLOCK(&so->so_snd);
1178		space -= clen;
1179		do {
1180			if (uio == NULL) {
1181				resid = 0;
1182				if (flags & MSG_EOR)
1183					top->m_flags |= M_EOR;
1184			} else {
1185				error = sosend_copyin(uio, &top, atomic,
1186				    &space, flags);
1187				if (error != 0) {
1188					SOCKBUF_LOCK(&so->so_snd);
1189					goto release;
1190				}
1191				resid = uio->uio_resid;
1192			}
1193			if (dontroute) {
1194				SOCK_LOCK(so);
1195				so->so_options |= SO_DONTROUTE;
1196				SOCK_UNLOCK(so);
1197			}
1198			/*
1199			 * XXX all the SBS_CANTSENDMORE checks previously
1200			 * done could be out of date.  We could have recieved
1201			 * a reset packet in an interrupt or maybe we slept
1202			 * while doing page faults in uiomove() etc. We could
1203			 * probably recheck again inside the locking protection
1204			 * here, but there are probably other places that this
1205			 * also happens.  We must rethink this.
1206			 */
1207			error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1208			    (flags & MSG_OOB) ? PRUS_OOB :
1209			/*
1210			 * If the user set MSG_EOF, the protocol
1211			 * understands this flag and nothing left to
1212			 * send then use PRU_SEND_EOF instead of PRU_SEND.
1213			 */
1214			    ((flags & MSG_EOF) &&
1215			     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1216			     (resid <= 0)) ?
1217				PRUS_EOF :
1218			/* If there is more to send set PRUS_MORETOCOME */
1219			    (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1220			    top, addr, control, td);
1221			if (dontroute) {
1222				SOCK_LOCK(so);
1223				so->so_options &= ~SO_DONTROUTE;
1224				SOCK_UNLOCK(so);
1225			}
1226			clen = 0;
1227			control = NULL;
1228			top = NULL;
1229			if (error) {
1230				SOCKBUF_LOCK(&so->so_snd);
1231				goto release;
1232			}
1233		} while (resid && space > 0);
1234		SOCKBUF_LOCK(&so->so_snd);
1235	} while (resid);
1236
1237release:
1238	SOCKBUF_LOCK_ASSERT(&so->so_snd);
1239	sbunlock(&so->so_snd);
1240out_locked:
1241	SOCKBUF_LOCK_ASSERT(&so->so_snd);
1242	SOCKBUF_UNLOCK(&so->so_snd);
1243out:
1244	if (top != NULL)
1245		m_freem(top);
1246	if (control != NULL)
1247		m_freem(control);
1248	return (error);
1249}
1250#undef snderr
1251
1252/*
1253 * The part of soreceive() that implements reading non-inline out-of-band
1254 * data from a socket.  For more complete comments, see soreceive(), from
1255 * which this code originated.
1256 *
1257 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1258 * unable to return an mbuf chain to the caller.
1259 */
1260static int
1261soreceive_rcvoob(so, uio, flags)
1262	struct socket *so;
1263	struct uio *uio;
1264	int flags;
1265{
1266	struct protosw *pr = so->so_proto;
1267	struct mbuf *m;
1268	int error;
1269
1270	KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1271
1272	m = m_get(M_TRYWAIT, MT_DATA);
1273	if (m == NULL)
1274		return (ENOBUFS);
1275	error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1276	if (error)
1277		goto bad;
1278	do {
1279#ifdef ZERO_COPY_SOCKETS
1280		if (so_zero_copy_receive) {
1281			int disposable;
1282
1283			if ((m->m_flags & M_EXT)
1284			 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1285				disposable = 1;
1286			else
1287				disposable = 0;
1288
1289			error = uiomoveco(mtod(m, void *),
1290					  min(uio->uio_resid, m->m_len),
1291					  uio, disposable);
1292		} else
1293#endif /* ZERO_COPY_SOCKETS */
1294		error = uiomove(mtod(m, void *),
1295		    (int) min(uio->uio_resid, m->m_len), uio);
1296		m = m_free(m);
1297	} while (uio->uio_resid && error == 0 && m);
1298bad:
1299	if (m != NULL)
1300		m_freem(m);
1301	return (error);
1302}
1303
1304/*
1305 * Following replacement or removal of the first mbuf on the first mbuf chain
1306 * of a socket buffer, push necessary state changes back into the socket
1307 * buffer so that other consumers see the values consistently.  'nextrecord'
1308 * is the callers locally stored value of the original value of
1309 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1310 * NOTE: 'nextrecord' may be NULL.
1311 */
1312static __inline void
1313sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1314{
1315
1316	SOCKBUF_LOCK_ASSERT(sb);
1317	/*
1318	 * First, update for the new value of nextrecord.  If necessary, make
1319	 * it the first record.
1320	 */
1321	if (sb->sb_mb != NULL)
1322		sb->sb_mb->m_nextpkt = nextrecord;
1323	else
1324		sb->sb_mb = nextrecord;
1325
1326        /*
1327         * Now update any dependent socket buffer fields to reflect the new
1328         * state.  This is an expanded inline of SB_EMPTY_FIXUP(), with the
1329	 * addition of a second clause that takes care of the case where
1330	 * sb_mb has been updated, but remains the last record.
1331         */
1332        if (sb->sb_mb == NULL) {
1333                sb->sb_mbtail = NULL;
1334                sb->sb_lastrecord = NULL;
1335        } else if (sb->sb_mb->m_nextpkt == NULL)
1336                sb->sb_lastrecord = sb->sb_mb;
1337}
1338
1339
1340/*
1341 * Implement receive operations on a socket.
1342 * We depend on the way that records are added to the sockbuf
1343 * by sbappend*.  In particular, each record (mbufs linked through m_next)
1344 * must begin with an address if the protocol so specifies,
1345 * followed by an optional mbuf or mbufs containing ancillary data,
1346 * and then zero or more mbufs of data.
1347 * In order to avoid blocking network interrupts for the entire time here,
1348 * we splx() while doing the actual copy to user space.
1349 * Although the sockbuf is locked, new data may still be appended,
1350 * and thus we must maintain consistency of the sockbuf during that time.
1351 *
1352 * The caller may receive the data as a single mbuf chain by supplying
1353 * an mbuf **mp0 for use in returning the chain.  The uio is then used
1354 * only for the count in uio_resid.
1355 */
1356int
1357soreceive(so, psa, uio, mp0, controlp, flagsp)
1358	struct socket *so;
1359	struct sockaddr **psa;
1360	struct uio *uio;
1361	struct mbuf **mp0;
1362	struct mbuf **controlp;
1363	int *flagsp;
1364{
1365	struct mbuf *m, **mp;
1366	int flags, len, error, offset;
1367	struct protosw *pr = so->so_proto;
1368	struct mbuf *nextrecord;
1369	int moff, type = 0;
1370	int orig_resid = uio->uio_resid;
1371
1372	mp = mp0;
1373	if (psa != NULL)
1374		*psa = NULL;
1375	if (controlp != NULL)
1376		*controlp = NULL;
1377	if (flagsp != NULL)
1378		flags = *flagsp &~ MSG_EOR;
1379	else
1380		flags = 0;
1381	if (flags & MSG_OOB)
1382		return (soreceive_rcvoob(so, uio, flags));
1383	if (mp != NULL)
1384		*mp = NULL;
1385	if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1386	    && uio->uio_resid)
1387		(*pr->pr_usrreqs->pru_rcvd)(so, 0);
1388
1389	SOCKBUF_LOCK(&so->so_rcv);
1390restart:
1391	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1392	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1393	if (error)
1394		goto out;
1395
1396	m = so->so_rcv.sb_mb;
1397	/*
1398	 * If we have less data than requested, block awaiting more
1399	 * (subject to any timeout) if:
1400	 *   1. the current count is less than the low water mark, or
1401	 *   2. MSG_WAITALL is set, and it is possible to do the entire
1402	 *	receive operation at once if we block (resid <= hiwat).
1403	 *   3. MSG_DONTWAIT is not set
1404	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1405	 * we have to do the receive in sections, and thus risk returning
1406	 * a short count if a timeout or signal occurs after we start.
1407	 */
1408	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1409	    so->so_rcv.sb_cc < uio->uio_resid) &&
1410	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1411	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1412	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1413		KASSERT(m != NULL || !so->so_rcv.sb_cc,
1414		    ("receive: m == %p so->so_rcv.sb_cc == %u",
1415		    m, so->so_rcv.sb_cc));
1416		if (so->so_error) {
1417			if (m != NULL)
1418				goto dontblock;
1419			error = so->so_error;
1420			if ((flags & MSG_PEEK) == 0)
1421				so->so_error = 0;
1422			goto release;
1423		}
1424		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1425		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1426			if (m)
1427				goto dontblock;
1428			else
1429				goto release;
1430		}
1431		for (; m != NULL; m = m->m_next)
1432			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
1433				m = so->so_rcv.sb_mb;
1434				goto dontblock;
1435			}
1436		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1437		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1438			error = ENOTCONN;
1439			goto release;
1440		}
1441		if (uio->uio_resid == 0)
1442			goto release;
1443		if ((so->so_state & SS_NBIO) ||
1444		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1445			error = EWOULDBLOCK;
1446			goto release;
1447		}
1448		SBLASTRECORDCHK(&so->so_rcv);
1449		SBLASTMBUFCHK(&so->so_rcv);
1450		sbunlock(&so->so_rcv);
1451		error = sbwait(&so->so_rcv);
1452		if (error)
1453			goto out;
1454		goto restart;
1455	}
1456dontblock:
1457	/*
1458	 * From this point onward, we maintain 'nextrecord' as a cache of the
1459	 * pointer to the next record in the socket buffer.  We must keep the
1460	 * various socket buffer pointers and local stack versions of the
1461	 * pointers in sync, pushing out modifications before dropping the
1462	 * socket buffer mutex, and re-reading them when picking it up.
1463	 *
1464	 * Otherwise, we will race with the network stack appending new data
1465	 * or records onto the socket buffer by using inconsistent/stale
1466	 * versions of the field, possibly resulting in socket buffer
1467	 * corruption.
1468	 *
1469	 * By holding the high-level sblock(), we prevent simultaneous
1470	 * readers from pulling off the front of the socket buffer.
1471	 */
1472	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1473	if (uio->uio_td)
1474		uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
1475	KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1476	SBLASTRECORDCHK(&so->so_rcv);
1477	SBLASTMBUFCHK(&so->so_rcv);
1478	nextrecord = m->m_nextpkt;
1479	if (pr->pr_flags & PR_ADDR) {
1480		KASSERT(m->m_type == MT_SONAME,
1481		    ("m->m_type == %d", m->m_type));
1482		orig_resid = 0;
1483		if (psa != NULL)
1484			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
1485			    M_NOWAIT);
1486		if (flags & MSG_PEEK) {
1487			m = m->m_next;
1488		} else {
1489			sbfree(&so->so_rcv, m);
1490			so->so_rcv.sb_mb = m_free(m);
1491			m = so->so_rcv.sb_mb;
1492			sockbuf_pushsync(&so->so_rcv, nextrecord);
1493		}
1494	}
1495
1496	/*
1497	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1498	 * in the first mbuf chain on the socket buffer.  If MSG_PEEK, we
1499	 * just copy the data; if !MSG_PEEK, we call into the protocol to
1500	 * perform externalization (or freeing if controlp == NULL).
1501	 */
1502	if (m != NULL && m->m_type == MT_CONTROL) {
1503		struct mbuf *cm = NULL, *cmn;
1504		struct mbuf **cme = &cm;
1505
1506		do {
1507			if (flags & MSG_PEEK) {
1508				if (controlp != NULL) {
1509					*controlp = m_copy(m, 0, m->m_len);
1510					controlp = &(*controlp)->m_next;
1511				}
1512				m = m->m_next;
1513			} else {
1514				sbfree(&so->so_rcv, m);
1515				so->so_rcv.sb_mb = m->m_next;
1516				m->m_next = NULL;
1517				*cme = m;
1518				cme = &(*cme)->m_next;
1519				m = so->so_rcv.sb_mb;
1520			}
1521		} while (m != NULL && m->m_type == MT_CONTROL);
1522		if ((flags & MSG_PEEK) == 0)
1523			sockbuf_pushsync(&so->so_rcv, nextrecord);
1524		while (cm != NULL) {
1525			cmn = cm->m_next;
1526			cm->m_next = NULL;
1527			if (pr->pr_domain->dom_externalize != NULL) {
1528				SOCKBUF_UNLOCK(&so->so_rcv);
1529				error = (*pr->pr_domain->dom_externalize)
1530				    (cm, controlp);
1531				SOCKBUF_LOCK(&so->so_rcv);
1532			} else if (controlp != NULL)
1533				*controlp = cm;
1534			else
1535				m_freem(cm);
1536			if (controlp != NULL) {
1537				orig_resid = 0;
1538				while (*controlp != NULL)
1539					controlp = &(*controlp)->m_next;
1540			}
1541			cm = cmn;
1542		}
1543		if (so->so_rcv.sb_mb)
1544			nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1545		else
1546			nextrecord = NULL;
1547		orig_resid = 0;
1548	}
1549	if (m != NULL) {
1550		if ((flags & MSG_PEEK) == 0) {
1551			KASSERT(m->m_nextpkt == nextrecord,
1552			    ("soreceive: post-control, nextrecord !sync"));
1553			if (nextrecord == NULL) {
1554				KASSERT(so->so_rcv.sb_mb == m,
1555				    ("soreceive: post-control, sb_mb!=m"));
1556				KASSERT(so->so_rcv.sb_lastrecord == m,
1557				    ("soreceive: post-control, lastrecord!=m"));
1558			}
1559		}
1560		type = m->m_type;
1561		if (type == MT_OOBDATA)
1562			flags |= MSG_OOB;
1563	} else {
1564		if ((flags & MSG_PEEK) == 0) {
1565			KASSERT(so->so_rcv.sb_mb == nextrecord,
1566			    ("soreceive: sb_mb != nextrecord"));
1567			if (so->so_rcv.sb_mb == NULL) {
1568				KASSERT(so->so_rcv.sb_lastrecord == NULL,
1569				    ("soreceive: sb_lastercord != NULL"));
1570			}
1571		}
1572	}
1573	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1574	SBLASTRECORDCHK(&so->so_rcv);
1575	SBLASTMBUFCHK(&so->so_rcv);
1576
1577	/*
1578	 * Now continue to read any data mbufs off of the head of the socket
1579	 * buffer until the read request is satisfied.  Note that 'type' is
1580	 * used to store the type of any mbuf reads that have happened so far
1581	 * such that soreceive() can stop reading if the type changes, which
1582	 * causes soreceive() to return only one of regular data and inline
1583	 * out-of-band data in a single socket receive operation.
1584	 */
1585	moff = 0;
1586	offset = 0;
1587	while (m != NULL && uio->uio_resid > 0 && error == 0) {
1588		/*
1589		 * If the type of mbuf has changed since the last mbuf
1590		 * examined ('type'), end the receive operation.
1591	 	 */
1592		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1593		if (m->m_type == MT_OOBDATA) {
1594			if (type != MT_OOBDATA)
1595				break;
1596		} else if (type == MT_OOBDATA)
1597			break;
1598		else
1599		    KASSERT(m->m_type == MT_DATA,
1600			("m->m_type == %d", m->m_type));
1601		so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1602		len = uio->uio_resid;
1603		if (so->so_oobmark && len > so->so_oobmark - offset)
1604			len = so->so_oobmark - offset;
1605		if (len > m->m_len - moff)
1606			len = m->m_len - moff;
1607		/*
1608		 * If mp is set, just pass back the mbufs.
1609		 * Otherwise copy them out via the uio, then free.
1610		 * Sockbuf must be consistent here (points to current mbuf,
1611		 * it points to next record) when we drop priority;
1612		 * we must note any additions to the sockbuf when we
1613		 * block interrupts again.
1614		 */
1615		if (mp == NULL) {
1616			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1617			SBLASTRECORDCHK(&so->so_rcv);
1618			SBLASTMBUFCHK(&so->so_rcv);
1619			SOCKBUF_UNLOCK(&so->so_rcv);
1620#ifdef ZERO_COPY_SOCKETS
1621			if (so_zero_copy_receive) {
1622				int disposable;
1623
1624				if ((m->m_flags & M_EXT)
1625				 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1626					disposable = 1;
1627				else
1628					disposable = 0;
1629
1630				error = uiomoveco(mtod(m, char *) + moff,
1631						  (int)len, uio,
1632						  disposable);
1633			} else
1634#endif /* ZERO_COPY_SOCKETS */
1635			error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1636			SOCKBUF_LOCK(&so->so_rcv);
1637			if (error)
1638				goto release;
1639		} else
1640			uio->uio_resid -= len;
1641		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1642		if (len == m->m_len - moff) {
1643			if (m->m_flags & M_EOR)
1644				flags |= MSG_EOR;
1645			if (flags & MSG_PEEK) {
1646				m = m->m_next;
1647				moff = 0;
1648			} else {
1649				nextrecord = m->m_nextpkt;
1650				sbfree(&so->so_rcv, m);
1651				if (mp != NULL) {
1652					*mp = m;
1653					mp = &m->m_next;
1654					so->so_rcv.sb_mb = m = m->m_next;
1655					*mp = NULL;
1656				} else {
1657					so->so_rcv.sb_mb = m_free(m);
1658					m = so->so_rcv.sb_mb;
1659				}
1660				sockbuf_pushsync(&so->so_rcv, nextrecord);
1661				SBLASTRECORDCHK(&so->so_rcv);
1662				SBLASTMBUFCHK(&so->so_rcv);
1663			}
1664		} else {
1665			if (flags & MSG_PEEK)
1666				moff += len;
1667			else {
1668				if (mp != NULL) {
1669					int copy_flag;
1670
1671					if (flags & MSG_DONTWAIT)
1672						copy_flag = M_DONTWAIT;
1673					else
1674						copy_flag = M_TRYWAIT;
1675					if (copy_flag == M_TRYWAIT)
1676						SOCKBUF_UNLOCK(&so->so_rcv);
1677					*mp = m_copym(m, 0, len, copy_flag);
1678					if (copy_flag == M_TRYWAIT)
1679						SOCKBUF_LOCK(&so->so_rcv);
1680 					if (*mp == NULL) {
1681 						/*
1682 						 * m_copym() couldn't allocate an mbuf.
1683						 * Adjust uio_resid back (it was adjusted
1684						 * down by len bytes, which we didn't end
1685						 * up "copying" over).
1686 						 */
1687 						uio->uio_resid += len;
1688 						break;
1689 					}
1690				}
1691				m->m_data += len;
1692				m->m_len -= len;
1693				so->so_rcv.sb_cc -= len;
1694			}
1695		}
1696		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1697		if (so->so_oobmark) {
1698			if ((flags & MSG_PEEK) == 0) {
1699				so->so_oobmark -= len;
1700				if (so->so_oobmark == 0) {
1701					so->so_rcv.sb_state |= SBS_RCVATMARK;
1702					break;
1703				}
1704			} else {
1705				offset += len;
1706				if (offset == so->so_oobmark)
1707					break;
1708			}
1709		}
1710		if (flags & MSG_EOR)
1711			break;
1712		/*
1713		 * If the MSG_WAITALL flag is set (for non-atomic socket),
1714		 * we must not quit until "uio->uio_resid == 0" or an error
1715		 * termination.  If a signal/timeout occurs, return
1716		 * with a short count but without error.
1717		 * Keep sockbuf locked against other readers.
1718		 */
1719		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1720		    !sosendallatonce(so) && nextrecord == NULL) {
1721			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1722			if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1723				break;
1724			/*
1725			 * Notify the protocol that some data has been
1726			 * drained before blocking.
1727			 */
1728			if (pr->pr_flags & PR_WANTRCVD) {
1729				SOCKBUF_UNLOCK(&so->so_rcv);
1730				(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1731				SOCKBUF_LOCK(&so->so_rcv);
1732			}
1733			SBLASTRECORDCHK(&so->so_rcv);
1734			SBLASTMBUFCHK(&so->so_rcv);
1735			error = sbwait(&so->so_rcv);
1736			if (error)
1737				goto release;
1738			m = so->so_rcv.sb_mb;
1739			if (m != NULL)
1740				nextrecord = m->m_nextpkt;
1741		}
1742	}
1743
1744	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1745	if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1746		flags |= MSG_TRUNC;
1747		if ((flags & MSG_PEEK) == 0)
1748			(void) sbdroprecord_locked(&so->so_rcv);
1749	}
1750	if ((flags & MSG_PEEK) == 0) {
1751		if (m == NULL) {
1752			/*
1753			 * First part is an inline SB_EMPTY_FIXUP().  Second
1754			 * part makes sure sb_lastrecord is up-to-date if
1755			 * there is still data in the socket buffer.
1756			 */
1757			so->so_rcv.sb_mb = nextrecord;
1758			if (so->so_rcv.sb_mb == NULL) {
1759				so->so_rcv.sb_mbtail = NULL;
1760				so->so_rcv.sb_lastrecord = NULL;
1761			} else if (nextrecord->m_nextpkt == NULL)
1762				so->so_rcv.sb_lastrecord = nextrecord;
1763		}
1764		SBLASTRECORDCHK(&so->so_rcv);
1765		SBLASTMBUFCHK(&so->so_rcv);
1766		/*
1767		 * If soreceive() is being done from the socket callback, then
1768		 * don't need to generate ACK to peer to update window, since
1769		 * ACK will be generated on return to TCP.
1770		 */
1771		if (!(flags & MSG_SOCALLBCK) &&
1772		    (pr->pr_flags & PR_WANTRCVD)) {
1773			SOCKBUF_UNLOCK(&so->so_rcv);
1774			(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1775			SOCKBUF_LOCK(&so->so_rcv);
1776		}
1777	}
1778	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1779	if (orig_resid == uio->uio_resid && orig_resid &&
1780	    (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1781		sbunlock(&so->so_rcv);
1782		goto restart;
1783	}
1784
1785	if (flagsp != NULL)
1786		*flagsp |= flags;
1787release:
1788	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1789	sbunlock(&so->so_rcv);
1790out:
1791	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1792	SOCKBUF_UNLOCK(&so->so_rcv);
1793	return (error);
1794}
1795
1796int
1797soshutdown(so, how)
1798	struct socket *so;
1799	int how;
1800{
1801	struct protosw *pr = so->so_proto;
1802
1803	if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1804		return (EINVAL);
1805
1806	if (how != SHUT_WR)
1807		sorflush(so);
1808	if (how != SHUT_RD)
1809		return ((*pr->pr_usrreqs->pru_shutdown)(so));
1810	return (0);
1811}
1812
1813void
1814sorflush(so)
1815	struct socket *so;
1816{
1817	struct sockbuf *sb = &so->so_rcv;
1818	struct protosw *pr = so->so_proto;
1819	struct sockbuf asb;
1820
1821	/*
1822	 * XXXRW: This is quite ugly.  Previously, this code made a copy of
1823	 * the socket buffer, then zero'd the original to clear the buffer
1824	 * fields.  However, with mutexes in the socket buffer, this causes
1825	 * problems.  We only clear the zeroable bits of the original;
1826	 * however, we have to initialize and destroy the mutex in the copy
1827	 * so that dom_dispose() and sbrelease() can lock t as needed.
1828	 */
1829	SOCKBUF_LOCK(sb);
1830	sb->sb_flags |= SB_NOINTR;
1831	(void) sblock(sb, M_WAITOK);
1832	/*
1833	 * socantrcvmore_locked() drops the socket buffer mutex so that it
1834	 * can safely perform wakeups.  Re-acquire the mutex before
1835	 * continuing.
1836	 */
1837	socantrcvmore_locked(so);
1838	SOCKBUF_LOCK(sb);
1839	sbunlock(sb);
1840	/*
1841	 * Invalidate/clear most of the sockbuf structure, but leave
1842	 * selinfo and mutex data unchanged.
1843	 */
1844	bzero(&asb, offsetof(struct sockbuf, sb_startzero));
1845	bcopy(&sb->sb_startzero, &asb.sb_startzero,
1846	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1847	bzero(&sb->sb_startzero,
1848	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1849	SOCKBUF_UNLOCK(sb);
1850
1851	SOCKBUF_LOCK_INIT(&asb, "so_rcv");
1852	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1853		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
1854	sbrelease(&asb, so);
1855	SOCKBUF_LOCK_DESTROY(&asb);
1856}
1857
1858/*
1859 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1860 * an additional variant to handle the case where the option value needs
1861 * to be some kind of integer, but not a specific size.
1862 * In addition to their use here, these functions are also called by the
1863 * protocol-level pr_ctloutput() routines.
1864 */
1865int
1866sooptcopyin(sopt, buf, len, minlen)
1867	struct	sockopt *sopt;
1868	void	*buf;
1869	size_t	len;
1870	size_t	minlen;
1871{
1872	size_t	valsize;
1873
1874	/*
1875	 * If the user gives us more than we wanted, we ignore it,
1876	 * but if we don't get the minimum length the caller
1877	 * wants, we return EINVAL.  On success, sopt->sopt_valsize
1878	 * is set to however much we actually retrieved.
1879	 */
1880	if ((valsize = sopt->sopt_valsize) < minlen)
1881		return EINVAL;
1882	if (valsize > len)
1883		sopt->sopt_valsize = valsize = len;
1884
1885	if (sopt->sopt_td != NULL)
1886		return (copyin(sopt->sopt_val, buf, valsize));
1887
1888	bcopy(sopt->sopt_val, buf, valsize);
1889	return (0);
1890}
1891
1892/*
1893 * Kernel version of setsockopt(2)/
1894 * XXX: optlen is size_t, not socklen_t
1895 */
1896int
1897so_setsockopt(struct socket *so, int level, int optname, void *optval,
1898    size_t optlen)
1899{
1900	struct sockopt sopt;
1901
1902	sopt.sopt_level = level;
1903	sopt.sopt_name = optname;
1904	sopt.sopt_dir = SOPT_SET;
1905	sopt.sopt_val = optval;
1906	sopt.sopt_valsize = optlen;
1907	sopt.sopt_td = NULL;
1908	return (sosetopt(so, &sopt));
1909}
1910
1911int
1912sosetopt(so, sopt)
1913	struct socket *so;
1914	struct sockopt *sopt;
1915{
1916	int	error, optval;
1917	struct	linger l;
1918	struct	timeval tv;
1919	u_long  val;
1920#ifdef MAC
1921	struct mac extmac;
1922#endif
1923
1924	error = 0;
1925	if (sopt->sopt_level != SOL_SOCKET) {
1926		if (so->so_proto && so->so_proto->pr_ctloutput)
1927			return ((*so->so_proto->pr_ctloutput)
1928				  (so, sopt));
1929		error = ENOPROTOOPT;
1930	} else {
1931		switch (sopt->sopt_name) {
1932#ifdef INET
1933		case SO_ACCEPTFILTER:
1934			error = do_setopt_accept_filter(so, sopt);
1935			if (error)
1936				goto bad;
1937			break;
1938#endif
1939		case SO_LINGER:
1940			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1941			if (error)
1942				goto bad;
1943
1944			SOCK_LOCK(so);
1945			so->so_linger = l.l_linger;
1946			if (l.l_onoff)
1947				so->so_options |= SO_LINGER;
1948			else
1949				so->so_options &= ~SO_LINGER;
1950			SOCK_UNLOCK(so);
1951			break;
1952
1953		case SO_DEBUG:
1954		case SO_KEEPALIVE:
1955		case SO_DONTROUTE:
1956		case SO_USELOOPBACK:
1957		case SO_BROADCAST:
1958		case SO_REUSEADDR:
1959		case SO_REUSEPORT:
1960		case SO_OOBINLINE:
1961		case SO_TIMESTAMP:
1962		case SO_BINTIME:
1963		case SO_NOSIGPIPE:
1964			error = sooptcopyin(sopt, &optval, sizeof optval,
1965					    sizeof optval);
1966			if (error)
1967				goto bad;
1968			SOCK_LOCK(so);
1969			if (optval)
1970				so->so_options |= sopt->sopt_name;
1971			else
1972				so->so_options &= ~sopt->sopt_name;
1973			SOCK_UNLOCK(so);
1974			break;
1975
1976		case SO_SNDBUF:
1977		case SO_RCVBUF:
1978		case SO_SNDLOWAT:
1979		case SO_RCVLOWAT:
1980			error = sooptcopyin(sopt, &optval, sizeof optval,
1981					    sizeof optval);
1982			if (error)
1983				goto bad;
1984
1985			/*
1986			 * Values < 1 make no sense for any of these
1987			 * options, so disallow them.
1988			 */
1989			if (optval < 1) {
1990				error = EINVAL;
1991				goto bad;
1992			}
1993
1994			switch (sopt->sopt_name) {
1995			case SO_SNDBUF:
1996			case SO_RCVBUF:
1997				if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1998				    &so->so_snd : &so->so_rcv, (u_long)optval,
1999				    so, curthread) == 0) {
2000					error = ENOBUFS;
2001					goto bad;
2002				}
2003				break;
2004
2005			/*
2006			 * Make sure the low-water is never greater than
2007			 * the high-water.
2008			 */
2009			case SO_SNDLOWAT:
2010				SOCKBUF_LOCK(&so->so_snd);
2011				so->so_snd.sb_lowat =
2012				    (optval > so->so_snd.sb_hiwat) ?
2013				    so->so_snd.sb_hiwat : optval;
2014				SOCKBUF_UNLOCK(&so->so_snd);
2015				break;
2016			case SO_RCVLOWAT:
2017				SOCKBUF_LOCK(&so->so_rcv);
2018				so->so_rcv.sb_lowat =
2019				    (optval > so->so_rcv.sb_hiwat) ?
2020				    so->so_rcv.sb_hiwat : optval;
2021				SOCKBUF_UNLOCK(&so->so_rcv);
2022				break;
2023			}
2024			break;
2025
2026		case SO_SNDTIMEO:
2027		case SO_RCVTIMEO:
2028#ifdef COMPAT_IA32
2029			if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2030				struct timeval32 tv32;
2031
2032				error = sooptcopyin(sopt, &tv32, sizeof tv32,
2033				    sizeof tv32);
2034				CP(tv32, tv, tv_sec);
2035				CP(tv32, tv, tv_usec);
2036			} else
2037#endif
2038				error = sooptcopyin(sopt, &tv, sizeof tv,
2039				    sizeof tv);
2040			if (error)
2041				goto bad;
2042
2043			/* assert(hz > 0); */
2044			if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2045			    tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2046				error = EDOM;
2047				goto bad;
2048			}
2049			/* assert(tick > 0); */
2050			/* assert(ULONG_MAX - INT_MAX >= 1000000); */
2051			val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2052			if (val > INT_MAX) {
2053				error = EDOM;
2054				goto bad;
2055			}
2056			if (val == 0 && tv.tv_usec != 0)
2057				val = 1;
2058
2059			switch (sopt->sopt_name) {
2060			case SO_SNDTIMEO:
2061				so->so_snd.sb_timeo = val;
2062				break;
2063			case SO_RCVTIMEO:
2064				so->so_rcv.sb_timeo = val;
2065				break;
2066			}
2067			break;
2068
2069		case SO_LABEL:
2070#ifdef MAC
2071			error = sooptcopyin(sopt, &extmac, sizeof extmac,
2072			    sizeof extmac);
2073			if (error)
2074				goto bad;
2075			error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2076			    so, &extmac);
2077#else
2078			error = EOPNOTSUPP;
2079#endif
2080			break;
2081
2082		default:
2083			error = ENOPROTOOPT;
2084			break;
2085		}
2086		if (error == 0 && so->so_proto != NULL &&
2087		    so->so_proto->pr_ctloutput != NULL) {
2088			(void) ((*so->so_proto->pr_ctloutput)
2089				  (so, sopt));
2090		}
2091	}
2092bad:
2093	return (error);
2094}
2095
2096/* Helper routine for getsockopt */
2097int
2098sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2099{
2100	int	error;
2101	size_t	valsize;
2102
2103	error = 0;
2104
2105	/*
2106	 * Documented get behavior is that we always return a value,
2107	 * possibly truncated to fit in the user's buffer.
2108	 * Traditional behavior is that we always tell the user
2109	 * precisely how much we copied, rather than something useful
2110	 * like the total amount we had available for her.
2111	 * Note that this interface is not idempotent; the entire answer must
2112	 * generated ahead of time.
2113	 */
2114	valsize = min(len, sopt->sopt_valsize);
2115	sopt->sopt_valsize = valsize;
2116	if (sopt->sopt_val != NULL) {
2117		if (sopt->sopt_td != NULL)
2118			error = copyout(buf, sopt->sopt_val, valsize);
2119		else
2120			bcopy(buf, sopt->sopt_val, valsize);
2121	}
2122	return (error);
2123}
2124
2125int
2126sogetopt(so, sopt)
2127	struct socket *so;
2128	struct sockopt *sopt;
2129{
2130	int	error, optval;
2131	struct	linger l;
2132	struct	timeval tv;
2133#ifdef MAC
2134	struct mac extmac;
2135#endif
2136
2137	error = 0;
2138	if (sopt->sopt_level != SOL_SOCKET) {
2139		if (so->so_proto && so->so_proto->pr_ctloutput) {
2140			return ((*so->so_proto->pr_ctloutput)
2141				  (so, sopt));
2142		} else
2143			return (ENOPROTOOPT);
2144	} else {
2145		switch (sopt->sopt_name) {
2146#ifdef INET
2147		case SO_ACCEPTFILTER:
2148			error = do_getopt_accept_filter(so, sopt);
2149			break;
2150#endif
2151		case SO_LINGER:
2152			SOCK_LOCK(so);
2153			l.l_onoff = so->so_options & SO_LINGER;
2154			l.l_linger = so->so_linger;
2155			SOCK_UNLOCK(so);
2156			error = sooptcopyout(sopt, &l, sizeof l);
2157			break;
2158
2159		case SO_USELOOPBACK:
2160		case SO_DONTROUTE:
2161		case SO_DEBUG:
2162		case SO_KEEPALIVE:
2163		case SO_REUSEADDR:
2164		case SO_REUSEPORT:
2165		case SO_BROADCAST:
2166		case SO_OOBINLINE:
2167		case SO_ACCEPTCONN:
2168		case SO_TIMESTAMP:
2169		case SO_BINTIME:
2170		case SO_NOSIGPIPE:
2171			optval = so->so_options & sopt->sopt_name;
2172integer:
2173			error = sooptcopyout(sopt, &optval, sizeof optval);
2174			break;
2175
2176		case SO_TYPE:
2177			optval = so->so_type;
2178			goto integer;
2179
2180		case SO_ERROR:
2181			SOCK_LOCK(so);
2182			optval = so->so_error;
2183			so->so_error = 0;
2184			SOCK_UNLOCK(so);
2185			goto integer;
2186
2187		case SO_SNDBUF:
2188			optval = so->so_snd.sb_hiwat;
2189			goto integer;
2190
2191		case SO_RCVBUF:
2192			optval = so->so_rcv.sb_hiwat;
2193			goto integer;
2194
2195		case SO_SNDLOWAT:
2196			optval = so->so_snd.sb_lowat;
2197			goto integer;
2198
2199		case SO_RCVLOWAT:
2200			optval = so->so_rcv.sb_lowat;
2201			goto integer;
2202
2203		case SO_SNDTIMEO:
2204		case SO_RCVTIMEO:
2205			optval = (sopt->sopt_name == SO_SNDTIMEO ?
2206				  so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2207
2208			tv.tv_sec = optval / hz;
2209			tv.tv_usec = (optval % hz) * tick;
2210#ifdef COMPAT_IA32
2211			if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2212				struct timeval32 tv32;
2213
2214				CP(tv, tv32, tv_sec);
2215				CP(tv, tv32, tv_usec);
2216				error = sooptcopyout(sopt, &tv32, sizeof tv32);
2217			} else
2218#endif
2219				error = sooptcopyout(sopt, &tv, sizeof tv);
2220			break;
2221
2222		case SO_LABEL:
2223#ifdef MAC
2224			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2225			    sizeof(extmac));
2226			if (error)
2227				return (error);
2228			error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2229			    so, &extmac);
2230			if (error)
2231				return (error);
2232			error = sooptcopyout(sopt, &extmac, sizeof extmac);
2233#else
2234			error = EOPNOTSUPP;
2235#endif
2236			break;
2237
2238		case SO_PEERLABEL:
2239#ifdef MAC
2240			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2241			    sizeof(extmac));
2242			if (error)
2243				return (error);
2244			error = mac_getsockopt_peerlabel(
2245			    sopt->sopt_td->td_ucred, so, &extmac);
2246			if (error)
2247				return (error);
2248			error = sooptcopyout(sopt, &extmac, sizeof extmac);
2249#else
2250			error = EOPNOTSUPP;
2251#endif
2252			break;
2253
2254		case SO_LISTENQLIMIT:
2255			optval = so->so_qlimit;
2256			goto integer;
2257
2258		case SO_LISTENQLEN:
2259			optval = so->so_qlen;
2260			goto integer;
2261
2262		case SO_LISTENINCQLEN:
2263			optval = so->so_incqlen;
2264			goto integer;
2265
2266		default:
2267			error = ENOPROTOOPT;
2268			break;
2269		}
2270		return (error);
2271	}
2272}
2273
2274/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2275int
2276soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2277{
2278	struct mbuf *m, *m_prev;
2279	int sopt_size = sopt->sopt_valsize;
2280
2281	MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
2282	if (m == NULL)
2283		return ENOBUFS;
2284	if (sopt_size > MLEN) {
2285		MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
2286		if ((m->m_flags & M_EXT) == 0) {
2287			m_free(m);
2288			return ENOBUFS;
2289		}
2290		m->m_len = min(MCLBYTES, sopt_size);
2291	} else {
2292		m->m_len = min(MLEN, sopt_size);
2293	}
2294	sopt_size -= m->m_len;
2295	*mp = m;
2296	m_prev = m;
2297
2298	while (sopt_size) {
2299		MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
2300		if (m == NULL) {
2301			m_freem(*mp);
2302			return ENOBUFS;
2303		}
2304		if (sopt_size > MLEN) {
2305			MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT :
2306			    M_DONTWAIT);
2307			if ((m->m_flags & M_EXT) == 0) {
2308				m_freem(m);
2309				m_freem(*mp);
2310				return ENOBUFS;
2311			}
2312			m->m_len = min(MCLBYTES, sopt_size);
2313		} else {
2314			m->m_len = min(MLEN, sopt_size);
2315		}
2316		sopt_size -= m->m_len;
2317		m_prev->m_next = m;
2318		m_prev = m;
2319	}
2320	return (0);
2321}
2322
2323/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2324int
2325soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2326{
2327	struct mbuf *m0 = m;
2328
2329	if (sopt->sopt_val == NULL)
2330		return (0);
2331	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2332		if (sopt->sopt_td != NULL) {
2333			int error;
2334
2335			error = copyin(sopt->sopt_val, mtod(m, char *),
2336				       m->m_len);
2337			if (error != 0) {
2338				m_freem(m0);
2339				return(error);
2340			}
2341		} else
2342			bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2343		sopt->sopt_valsize -= m->m_len;
2344		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2345		m = m->m_next;
2346	}
2347	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2348		panic("ip6_sooptmcopyin");
2349	return (0);
2350}
2351
2352/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2353int
2354soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2355{
2356	struct mbuf *m0 = m;
2357	size_t valsize = 0;
2358
2359	if (sopt->sopt_val == NULL)
2360		return (0);
2361	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2362		if (sopt->sopt_td != NULL) {
2363			int error;
2364
2365			error = copyout(mtod(m, char *), sopt->sopt_val,
2366				       m->m_len);
2367			if (error != 0) {
2368				m_freem(m0);
2369				return(error);
2370			}
2371		} else
2372			bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2373	       sopt->sopt_valsize -= m->m_len;
2374	       sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2375	       valsize += m->m_len;
2376	       m = m->m_next;
2377	}
2378	if (m != NULL) {
2379		/* enough soopt buffer should be given from user-land */
2380		m_freem(m0);
2381		return(EINVAL);
2382	}
2383	sopt->sopt_valsize = valsize;
2384	return (0);
2385}
2386
2387void
2388sohasoutofband(so)
2389	struct socket *so;
2390{
2391	if (so->so_sigio != NULL)
2392		pgsigio(&so->so_sigio, SIGURG, 0);
2393	selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2394}
2395
2396int
2397sopoll(struct socket *so, int events, struct ucred *active_cred,
2398    struct thread *td)
2399{
2400	int revents = 0;
2401
2402	SOCKBUF_LOCK(&so->so_snd);
2403	SOCKBUF_LOCK(&so->so_rcv);
2404	if (events & (POLLIN | POLLRDNORM))
2405		if (soreadable(so))
2406			revents |= events & (POLLIN | POLLRDNORM);
2407
2408	if (events & POLLINIGNEOF)
2409		if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2410		    !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2411			revents |= POLLINIGNEOF;
2412
2413	if (events & (POLLOUT | POLLWRNORM))
2414		if (sowriteable(so))
2415			revents |= events & (POLLOUT | POLLWRNORM);
2416
2417	if (events & (POLLPRI | POLLRDBAND))
2418		if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2419			revents |= events & (POLLPRI | POLLRDBAND);
2420
2421	if (revents == 0) {
2422		if (events &
2423		    (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2424		     POLLRDBAND)) {
2425			selrecord(td, &so->so_rcv.sb_sel);
2426			so->so_rcv.sb_flags |= SB_SEL;
2427		}
2428
2429		if (events & (POLLOUT | POLLWRNORM)) {
2430			selrecord(td, &so->so_snd.sb_sel);
2431			so->so_snd.sb_flags |= SB_SEL;
2432		}
2433	}
2434
2435	SOCKBUF_UNLOCK(&so->so_rcv);
2436	SOCKBUF_UNLOCK(&so->so_snd);
2437	return (revents);
2438}
2439
2440int
2441soo_kqfilter(struct file *fp, struct knote *kn)
2442{
2443	struct socket *so = kn->kn_fp->f_data;
2444	struct sockbuf *sb;
2445
2446	switch (kn->kn_filter) {
2447	case EVFILT_READ:
2448		if (so->so_options & SO_ACCEPTCONN)
2449			kn->kn_fop = &solisten_filtops;
2450		else
2451			kn->kn_fop = &soread_filtops;
2452		sb = &so->so_rcv;
2453		break;
2454	case EVFILT_WRITE:
2455		kn->kn_fop = &sowrite_filtops;
2456		sb = &so->so_snd;
2457		break;
2458	default:
2459		return (EINVAL);
2460	}
2461
2462	SOCKBUF_LOCK(sb);
2463	knlist_add(&sb->sb_sel.si_note, kn, 1);
2464	sb->sb_flags |= SB_KNOTE;
2465	SOCKBUF_UNLOCK(sb);
2466	return (0);
2467}
2468
2469static void
2470filt_sordetach(struct knote *kn)
2471{
2472	struct socket *so = kn->kn_fp->f_data;
2473
2474	SOCKBUF_LOCK(&so->so_rcv);
2475	knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2476	if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2477		so->so_rcv.sb_flags &= ~SB_KNOTE;
2478	SOCKBUF_UNLOCK(&so->so_rcv);
2479}
2480
2481/*ARGSUSED*/
2482static int
2483filt_soread(struct knote *kn, long hint)
2484{
2485	struct socket *so;
2486
2487	so = kn->kn_fp->f_data;
2488	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2489
2490	kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2491	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2492		kn->kn_flags |= EV_EOF;
2493		kn->kn_fflags = so->so_error;
2494		return (1);
2495	} else if (so->so_error)	/* temporary udp error */
2496		return (1);
2497	else if (kn->kn_sfflags & NOTE_LOWAT)
2498		return (kn->kn_data >= kn->kn_sdata);
2499	else
2500		return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2501}
2502
2503static void
2504filt_sowdetach(struct knote *kn)
2505{
2506	struct socket *so = kn->kn_fp->f_data;
2507
2508	SOCKBUF_LOCK(&so->so_snd);
2509	knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2510	if (knlist_empty(&so->so_snd.sb_sel.si_note))
2511		so->so_snd.sb_flags &= ~SB_KNOTE;
2512	SOCKBUF_UNLOCK(&so->so_snd);
2513}
2514
2515/*ARGSUSED*/
2516static int
2517filt_sowrite(struct knote *kn, long hint)
2518{
2519	struct socket *so;
2520
2521	so = kn->kn_fp->f_data;
2522	SOCKBUF_LOCK_ASSERT(&so->so_snd);
2523	kn->kn_data = sbspace(&so->so_snd);
2524	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2525		kn->kn_flags |= EV_EOF;
2526		kn->kn_fflags = so->so_error;
2527		return (1);
2528	} else if (so->so_error)	/* temporary udp error */
2529		return (1);
2530	else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2531	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
2532		return (0);
2533	else if (kn->kn_sfflags & NOTE_LOWAT)
2534		return (kn->kn_data >= kn->kn_sdata);
2535	else
2536		return (kn->kn_data >= so->so_snd.sb_lowat);
2537}
2538
2539/*ARGSUSED*/
2540static int
2541filt_solisten(struct knote *kn, long hint)
2542{
2543	struct socket *so = kn->kn_fp->f_data;
2544
2545	kn->kn_data = so->so_qlen;
2546	return (! TAILQ_EMPTY(&so->so_comp));
2547}
2548
2549int
2550socheckuid(struct socket *so, uid_t uid)
2551{
2552
2553	if (so == NULL)
2554		return (EPERM);
2555	if (so->so_cred->cr_uid != uid)
2556		return (EPERM);
2557	return (0);
2558}
2559
2560static int
2561somaxconn_sysctl(SYSCTL_HANDLER_ARGS)
2562{
2563	int error;
2564	int val;
2565
2566	val = somaxconn;
2567	error = sysctl_handle_int(oidp, &val, sizeof(int), req);
2568	if (error || !req->newptr )
2569		return (error);
2570
2571	if (val < 1 || val > USHRT_MAX)
2572		return (EINVAL);
2573
2574	somaxconn = val;
2575	return (0);
2576}
2577