uipc_socket.c revision 178888
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2007 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
33 */
34
35/*
36 * Comments on the socket life cycle:
37 *
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn().  Socket layer private.
40 *
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn().  Socket layer private.
43 *
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation.  This is called
46 * from socreate() and sonewconn().  Socket layer private.
47 *
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called.  If pru_attach() returned an error,
51 * pru_detach() will not be called.  Socket layer private.
52 *
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection.  Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
57 * case.
58 *
59 * socreate() creates a socket and attaches protocol state.  This is a public
60 * interface that may be used by socket layer consumers to create new
61 * sockets.
62 *
63 * sonewconn() creates a socket and attaches protocol state.  This is a
64 * public interface  that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
66 * listen socket.
67 *
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
71 *
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected).  This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required.  Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation.  This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
82 *
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed.  This is a socket layer private
86 * interface.
87 *
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment.  For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
95 */
96
97#include <sys/cdefs.h>
98__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 178888 2008-05-09 23:03:00Z julian $");
99
100#include "opt_inet.h"
101#include "opt_mac.h"
102#include "opt_zero.h"
103#include "opt_compat.h"
104
105#include <sys/param.h>
106#include <sys/systm.h>
107#include <sys/fcntl.h>
108#include <sys/limits.h>
109#include <sys/lock.h>
110#include <sys/mac.h>
111#include <sys/malloc.h>
112#include <sys/mbuf.h>
113#include <sys/mutex.h>
114#include <sys/domain.h>
115#include <sys/file.h>			/* for struct knote */
116#include <sys/kernel.h>
117#include <sys/event.h>
118#include <sys/eventhandler.h>
119#include <sys/poll.h>
120#include <sys/proc.h>
121#include <sys/protosw.h>
122#include <sys/socket.h>
123#include <sys/socketvar.h>
124#include <sys/resourcevar.h>
125#include <net/route.h>
126#include <sys/signalvar.h>
127#include <sys/stat.h>
128#include <sys/sx.h>
129#include <sys/sysctl.h>
130#include <sys/uio.h>
131#include <sys/jail.h>
132
133#include <security/mac/mac_framework.h>
134
135#include <vm/uma.h>
136
137#ifdef COMPAT_IA32
138#include <sys/mount.h>
139#include <compat/freebsd32/freebsd32.h>
140
141extern struct sysentvec ia32_freebsd_sysvec;
142#endif
143
144static int	soreceive_rcvoob(struct socket *so, struct uio *uio,
145		    int flags);
146
147static void	filt_sordetach(struct knote *kn);
148static int	filt_soread(struct knote *kn, long hint);
149static void	filt_sowdetach(struct knote *kn);
150static int	filt_sowrite(struct knote *kn, long hint);
151static int	filt_solisten(struct knote *kn, long hint);
152
153static struct filterops solisten_filtops =
154	{ 1, NULL, filt_sordetach, filt_solisten };
155static struct filterops soread_filtops =
156	{ 1, NULL, filt_sordetach, filt_soread };
157static struct filterops sowrite_filtops =
158	{ 1, NULL, filt_sowdetach, filt_sowrite };
159
160uma_zone_t socket_zone;
161so_gen_t	so_gencnt;	/* generation count for sockets */
162
163int	maxsockets;
164
165MALLOC_DEFINE(M_SONAME, "soname", "socket name");
166MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
167
168static int somaxconn = SOMAXCONN;
169static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS);
170/* XXX: we dont have SYSCTL_USHORT */
171SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
172    0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection "
173    "queue size");
174static int numopensockets;
175SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
176    &numopensockets, 0, "Number of open sockets");
177#ifdef ZERO_COPY_SOCKETS
178/* These aren't static because they're used in other files. */
179int so_zero_copy_send = 1;
180int so_zero_copy_receive = 1;
181SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
182    "Zero copy controls");
183SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
184    &so_zero_copy_receive, 0, "Enable zero copy receive");
185SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
186    &so_zero_copy_send, 0, "Enable zero copy send");
187#endif /* ZERO_COPY_SOCKETS */
188
189/*
190 * accept_mtx locks down per-socket fields relating to accept queues.  See
191 * socketvar.h for an annotation of the protected fields of struct socket.
192 */
193struct mtx accept_mtx;
194MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
195
196/*
197 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
198 * so_gencnt field.
199 */
200static struct mtx so_global_mtx;
201MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
202
203/*
204 * General IPC sysctl name space, used by sockets and a variety of other IPC
205 * types.
206 */
207SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
208
209/*
210 * Sysctl to get and set the maximum global sockets limit.  Notify protocols
211 * of the change so that they can update their dependent limits as required.
212 */
213static int
214sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
215{
216	int error, newmaxsockets;
217
218	newmaxsockets = maxsockets;
219	error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
220	if (error == 0 && req->newptr) {
221		if (newmaxsockets > maxsockets) {
222			maxsockets = newmaxsockets;
223			if (maxsockets > ((maxfiles / 4) * 3)) {
224				maxfiles = (maxsockets * 5) / 4;
225				maxfilesperproc = (maxfiles * 9) / 10;
226			}
227			EVENTHANDLER_INVOKE(maxsockets_change);
228		} else
229			error = EINVAL;
230	}
231	return (error);
232}
233
234SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
235    &maxsockets, 0, sysctl_maxsockets, "IU",
236    "Maximum number of sockets avaliable");
237
238/*
239 * Initialise maxsockets.
240 */
241static void init_maxsockets(void *ignored)
242{
243	TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
244	maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
245}
246SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
247
248/*
249 * Socket operation routines.  These routines are called by the routines in
250 * sys_socket.c or from a system process, and implement the semantics of
251 * socket operations by switching out to the protocol specific routines.
252 */
253
254/*
255 * Get a socket structure from our zone, and initialize it.  Note that it
256 * would probably be better to allocate socket and PCB at the same time, but
257 * I'm not convinced that all the protocols can be easily modified to do
258 * this.
259 *
260 * soalloc() returns a socket with a ref count of 0.
261 */
262static struct socket *
263soalloc(void)
264{
265	struct socket *so;
266
267	so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
268	if (so == NULL)
269		return (NULL);
270#ifdef MAC
271	if (mac_socket_init(so, M_NOWAIT) != 0) {
272		uma_zfree(socket_zone, so);
273		return (NULL);
274	}
275#endif
276	SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
277	SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
278	sx_init(&so->so_snd.sb_sx, "so_snd_sx");
279	sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
280	TAILQ_INIT(&so->so_aiojobq);
281	mtx_lock(&so_global_mtx);
282	so->so_gencnt = ++so_gencnt;
283	++numopensockets;
284	mtx_unlock(&so_global_mtx);
285	return (so);
286}
287
288/*
289 * Free the storage associated with a socket at the socket layer, tear down
290 * locks, labels, etc.  All protocol state is assumed already to have been
291 * torn down (and possibly never set up) by the caller.
292 */
293static void
294sodealloc(struct socket *so)
295{
296
297	KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
298	KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
299
300	mtx_lock(&so_global_mtx);
301	so->so_gencnt = ++so_gencnt;
302	--numopensockets;	/* Could be below, but faster here. */
303	mtx_unlock(&so_global_mtx);
304	if (so->so_rcv.sb_hiwat)
305		(void)chgsbsize(so->so_cred->cr_uidinfo,
306		    &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
307	if (so->so_snd.sb_hiwat)
308		(void)chgsbsize(so->so_cred->cr_uidinfo,
309		    &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
310#ifdef INET
311	/* remove acccept filter if one is present. */
312	if (so->so_accf != NULL)
313		do_setopt_accept_filter(so, NULL);
314#endif
315#ifdef MAC
316	mac_socket_destroy(so);
317#endif
318	crfree(so->so_cred);
319	sx_destroy(&so->so_snd.sb_sx);
320	sx_destroy(&so->so_rcv.sb_sx);
321	SOCKBUF_LOCK_DESTROY(&so->so_snd);
322	SOCKBUF_LOCK_DESTROY(&so->so_rcv);
323	uma_zfree(socket_zone, so);
324}
325
326/*
327 * socreate returns a socket with a ref count of 1.  The socket should be
328 * closed with soclose().
329 */
330int
331socreate(int dom, struct socket **aso, int type, int proto,
332    struct ucred *cred, struct thread *td)
333{
334	struct protosw *prp;
335	struct socket *so;
336	int error;
337
338	if (proto)
339		prp = pffindproto(dom, proto, type);
340	else
341		prp = pffindtype(dom, type);
342
343	if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
344	    prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
345		return (EPROTONOSUPPORT);
346
347	if (jailed(cred) && jail_socket_unixiproute_only &&
348	    prp->pr_domain->dom_family != PF_LOCAL &&
349	    prp->pr_domain->dom_family != PF_INET &&
350	    prp->pr_domain->dom_family != PF_ROUTE) {
351		return (EPROTONOSUPPORT);
352	}
353
354	if (prp->pr_type != type)
355		return (EPROTOTYPE);
356	so = soalloc();
357	if (so == NULL)
358		return (ENOBUFS);
359
360	TAILQ_INIT(&so->so_incomp);
361	TAILQ_INIT(&so->so_comp);
362	so->so_type = type;
363	so->so_cred = crhold(cred);
364	if ((prp->pr_domain->dom_family == PF_INET) ||
365	    (prp->pr_domain->dom_family == PF_ROUTE))
366		so->so_fibnum = td->td_proc->p_fibnum;
367	else
368		so->so_fibnum = 0;
369	so->so_proto = prp;
370#ifdef MAC
371	mac_socket_create(cred, so);
372#endif
373	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
374	    NULL, NULL, NULL);
375	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
376	    NULL, NULL, NULL);
377	so->so_count = 1;
378	/*
379	 * Auto-sizing of socket buffers is managed by the protocols and
380	 * the appropriate flags must be set in the pru_attach function.
381	 */
382	error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
383	if (error) {
384		KASSERT(so->so_count == 1, ("socreate: so_count %d",
385		    so->so_count));
386		so->so_count = 0;
387		sodealloc(so);
388		return (error);
389	}
390	*aso = so;
391	return (0);
392}
393
394#ifdef REGRESSION
395static int regression_sonewconn_earlytest = 1;
396SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
397    &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
398#endif
399
400/*
401 * When an attempt at a new connection is noted on a socket which accepts
402 * connections, sonewconn is called.  If the connection is possible (subject
403 * to space constraints, etc.) then we allocate a new structure, propoerly
404 * linked into the data structure of the original socket, and return this.
405 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
406 *
407 * Note: the ref count on the socket is 0 on return.
408 */
409struct socket *
410sonewconn(struct socket *head, int connstatus)
411{
412	struct socket *so;
413	int over;
414
415	ACCEPT_LOCK();
416	over = (head->so_qlen > 3 * head->so_qlimit / 2);
417	ACCEPT_UNLOCK();
418#ifdef REGRESSION
419	if (regression_sonewconn_earlytest && over)
420#else
421	if (over)
422#endif
423		return (NULL);
424	so = soalloc();
425	if (so == NULL)
426		return (NULL);
427	if ((head->so_options & SO_ACCEPTFILTER) != 0)
428		connstatus = 0;
429	so->so_head = head;
430	so->so_type = head->so_type;
431	so->so_options = head->so_options &~ SO_ACCEPTCONN;
432	so->so_linger = head->so_linger;
433	so->so_state = head->so_state | SS_NOFDREF;
434	so->so_proto = head->so_proto;
435	so->so_cred = crhold(head->so_cred);
436#ifdef MAC
437	SOCK_LOCK(head);
438	mac_socket_newconn(head, so);
439	SOCK_UNLOCK(head);
440#endif
441	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
442	    NULL, NULL, NULL);
443	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
444	    NULL, NULL, NULL);
445	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
446	    (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
447		sodealloc(so);
448		return (NULL);
449	}
450	so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
451	so->so_snd.sb_lowat = head->so_snd.sb_lowat;
452	so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
453	so->so_snd.sb_timeo = head->so_snd.sb_timeo;
454	so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
455	so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
456	so->so_state |= connstatus;
457	ACCEPT_LOCK();
458	if (connstatus) {
459		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
460		so->so_qstate |= SQ_COMP;
461		head->so_qlen++;
462	} else {
463		/*
464		 * Keep removing sockets from the head until there's room for
465		 * us to insert on the tail.  In pre-locking revisions, this
466		 * was a simple if(), but as we could be racing with other
467		 * threads and soabort() requires dropping locks, we must
468		 * loop waiting for the condition to be true.
469		 */
470		while (head->so_incqlen > head->so_qlimit) {
471			struct socket *sp;
472			sp = TAILQ_FIRST(&head->so_incomp);
473			TAILQ_REMOVE(&head->so_incomp, sp, so_list);
474			head->so_incqlen--;
475			sp->so_qstate &= ~SQ_INCOMP;
476			sp->so_head = NULL;
477			ACCEPT_UNLOCK();
478			soabort(sp);
479			ACCEPT_LOCK();
480		}
481		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
482		so->so_qstate |= SQ_INCOMP;
483		head->so_incqlen++;
484	}
485	ACCEPT_UNLOCK();
486	if (connstatus) {
487		sorwakeup(head);
488		wakeup_one(&head->so_timeo);
489	}
490	return (so);
491}
492
493int
494sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
495{
496
497	return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
498}
499
500/*
501 * solisten() transitions a socket from a non-listening state to a listening
502 * state, but can also be used to update the listen queue depth on an
503 * existing listen socket.  The protocol will call back into the sockets
504 * layer using solisten_proto_check() and solisten_proto() to check and set
505 * socket-layer listen state.  Call backs are used so that the protocol can
506 * acquire both protocol and socket layer locks in whatever order is required
507 * by the protocol.
508 *
509 * Protocol implementors are advised to hold the socket lock across the
510 * socket-layer test and set to avoid races at the socket layer.
511 */
512int
513solisten(struct socket *so, int backlog, struct thread *td)
514{
515
516	return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
517}
518
519int
520solisten_proto_check(struct socket *so)
521{
522
523	SOCK_LOCK_ASSERT(so);
524
525	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
526	    SS_ISDISCONNECTING))
527		return (EINVAL);
528	return (0);
529}
530
531void
532solisten_proto(struct socket *so, int backlog)
533{
534
535	SOCK_LOCK_ASSERT(so);
536
537	if (backlog < 0 || backlog > somaxconn)
538		backlog = somaxconn;
539	so->so_qlimit = backlog;
540	so->so_options |= SO_ACCEPTCONN;
541}
542
543/*
544 * Attempt to free a socket.  This should really be sotryfree().
545 *
546 * sofree() will succeed if:
547 *
548 * - There are no outstanding file descriptor references or related consumers
549 *   (so_count == 0).
550 *
551 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
552 *
553 * - The protocol does not have an outstanding strong reference on the socket
554 *   (SS_PROTOREF).
555 *
556 * - The socket is not in a completed connection queue, so a process has been
557 *   notified that it is present.  If it is removed, the user process may
558 *   block in accept() despite select() saying the socket was ready.
559 *
560 * Otherwise, it will quietly abort so that a future call to sofree(), when
561 * conditions are right, can succeed.
562 */
563void
564sofree(struct socket *so)
565{
566	struct protosw *pr = so->so_proto;
567	struct socket *head;
568
569	ACCEPT_LOCK_ASSERT();
570	SOCK_LOCK_ASSERT(so);
571
572	if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
573	    (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
574		SOCK_UNLOCK(so);
575		ACCEPT_UNLOCK();
576		return;
577	}
578
579	head = so->so_head;
580	if (head != NULL) {
581		KASSERT((so->so_qstate & SQ_COMP) != 0 ||
582		    (so->so_qstate & SQ_INCOMP) != 0,
583		    ("sofree: so_head != NULL, but neither SQ_COMP nor "
584		    "SQ_INCOMP"));
585		KASSERT((so->so_qstate & SQ_COMP) == 0 ||
586		    (so->so_qstate & SQ_INCOMP) == 0,
587		    ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
588		TAILQ_REMOVE(&head->so_incomp, so, so_list);
589		head->so_incqlen--;
590		so->so_qstate &= ~SQ_INCOMP;
591		so->so_head = NULL;
592	}
593	KASSERT((so->so_qstate & SQ_COMP) == 0 &&
594	    (so->so_qstate & SQ_INCOMP) == 0,
595	    ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
596	    so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
597	if (so->so_options & SO_ACCEPTCONN) {
598		KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated"));
599		KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated"));
600	}
601	SOCK_UNLOCK(so);
602	ACCEPT_UNLOCK();
603
604	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
605		(*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
606	if (pr->pr_usrreqs->pru_detach != NULL)
607		(*pr->pr_usrreqs->pru_detach)(so);
608
609	/*
610	 * From this point on, we assume that no other references to this
611	 * socket exist anywhere else in the stack.  Therefore, no locks need
612	 * to be acquired or held.
613	 *
614	 * We used to do a lot of socket buffer and socket locking here, as
615	 * well as invoke sorflush() and perform wakeups.  The direct call to
616	 * dom_dispose() and sbrelease_internal() are an inlining of what was
617	 * necessary from sorflush().
618	 *
619	 * Notice that the socket buffer and kqueue state are torn down
620	 * before calling pru_detach.  This means that protocols shold not
621	 * assume they can perform socket wakeups, etc, in their detach code.
622	 */
623	sbdestroy(&so->so_snd, so);
624	sbdestroy(&so->so_rcv, so);
625	knlist_destroy(&so->so_rcv.sb_sel.si_note);
626	knlist_destroy(&so->so_snd.sb_sel.si_note);
627	sodealloc(so);
628}
629
630/*
631 * Close a socket on last file table reference removal.  Initiate disconnect
632 * if connected.  Free socket when disconnect complete.
633 *
634 * This function will sorele() the socket.  Note that soclose() may be called
635 * prior to the ref count reaching zero.  The actual socket structure will
636 * not be freed until the ref count reaches zero.
637 */
638int
639soclose(struct socket *so)
640{
641	int error = 0;
642
643	KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
644
645	funsetown(&so->so_sigio);
646	if (so->so_state & SS_ISCONNECTED) {
647		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
648			error = sodisconnect(so);
649			if (error)
650				goto drop;
651		}
652		if (so->so_options & SO_LINGER) {
653			if ((so->so_state & SS_ISDISCONNECTING) &&
654			    (so->so_state & SS_NBIO))
655				goto drop;
656			while (so->so_state & SS_ISCONNECTED) {
657				error = tsleep(&so->so_timeo,
658				    PSOCK | PCATCH, "soclos", so->so_linger * hz);
659				if (error)
660					break;
661			}
662		}
663	}
664
665drop:
666	if (so->so_proto->pr_usrreqs->pru_close != NULL)
667		(*so->so_proto->pr_usrreqs->pru_close)(so);
668	if (so->so_options & SO_ACCEPTCONN) {
669		struct socket *sp;
670		ACCEPT_LOCK();
671		while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
672			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
673			so->so_incqlen--;
674			sp->so_qstate &= ~SQ_INCOMP;
675			sp->so_head = NULL;
676			ACCEPT_UNLOCK();
677			soabort(sp);
678			ACCEPT_LOCK();
679		}
680		while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
681			TAILQ_REMOVE(&so->so_comp, sp, so_list);
682			so->so_qlen--;
683			sp->so_qstate &= ~SQ_COMP;
684			sp->so_head = NULL;
685			ACCEPT_UNLOCK();
686			soabort(sp);
687			ACCEPT_LOCK();
688		}
689		ACCEPT_UNLOCK();
690	}
691	ACCEPT_LOCK();
692	SOCK_LOCK(so);
693	KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
694	so->so_state |= SS_NOFDREF;
695	sorele(so);
696	return (error);
697}
698
699/*
700 * soabort() is used to abruptly tear down a connection, such as when a
701 * resource limit is reached (listen queue depth exceeded), or if a listen
702 * socket is closed while there are sockets waiting to be accepted.
703 *
704 * This interface is tricky, because it is called on an unreferenced socket,
705 * and must be called only by a thread that has actually removed the socket
706 * from the listen queue it was on, or races with other threads are risked.
707 *
708 * This interface will call into the protocol code, so must not be called
709 * with any socket locks held.  Protocols do call it while holding their own
710 * recursible protocol mutexes, but this is something that should be subject
711 * to review in the future.
712 */
713void
714soabort(struct socket *so)
715{
716
717	/*
718	 * In as much as is possible, assert that no references to this
719	 * socket are held.  This is not quite the same as asserting that the
720	 * current thread is responsible for arranging for no references, but
721	 * is as close as we can get for now.
722	 */
723	KASSERT(so->so_count == 0, ("soabort: so_count"));
724	KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
725	KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
726	KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
727	KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
728
729	if (so->so_proto->pr_usrreqs->pru_abort != NULL)
730		(*so->so_proto->pr_usrreqs->pru_abort)(so);
731	ACCEPT_LOCK();
732	SOCK_LOCK(so);
733	sofree(so);
734}
735
736int
737soaccept(struct socket *so, struct sockaddr **nam)
738{
739	int error;
740
741	SOCK_LOCK(so);
742	KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
743	so->so_state &= ~SS_NOFDREF;
744	SOCK_UNLOCK(so);
745	error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
746	return (error);
747}
748
749int
750soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
751{
752	int error;
753
754	if (so->so_options & SO_ACCEPTCONN)
755		return (EOPNOTSUPP);
756	/*
757	 * If protocol is connection-based, can only connect once.
758	 * Otherwise, if connected, try to disconnect first.  This allows
759	 * user to disconnect by connecting to, e.g., a null address.
760	 */
761	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
762	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
763	    (error = sodisconnect(so)))) {
764		error = EISCONN;
765	} else {
766		/*
767		 * Prevent accumulated error from previous connection from
768		 * biting us.
769		 */
770		so->so_error = 0;
771		error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
772	}
773
774	return (error);
775}
776
777int
778soconnect2(struct socket *so1, struct socket *so2)
779{
780
781	return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
782}
783
784int
785sodisconnect(struct socket *so)
786{
787	int error;
788
789	if ((so->so_state & SS_ISCONNECTED) == 0)
790		return (ENOTCONN);
791	if (so->so_state & SS_ISDISCONNECTING)
792		return (EALREADY);
793	error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
794	return (error);
795}
796
797#ifdef ZERO_COPY_SOCKETS
798struct so_zerocopy_stats{
799	int size_ok;
800	int align_ok;
801	int found_ifp;
802};
803struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
804#include <netinet/in.h>
805#include <net/route.h>
806#include <netinet/in_pcb.h>
807#include <vm/vm.h>
808#include <vm/vm_page.h>
809#include <vm/vm_object.h>
810
811/*
812 * sosend_copyin() is only used if zero copy sockets are enabled.  Otherwise
813 * sosend_dgram() and sosend_generic() use m_uiotombuf().
814 *
815 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
816 * all of the data referenced by the uio.  If desired, it uses zero-copy.
817 * *space will be updated to reflect data copied in.
818 *
819 * NB: If atomic I/O is requested, the caller must already have checked that
820 * space can hold resid bytes.
821 *
822 * NB: In the event of an error, the caller may need to free the partial
823 * chain pointed to by *mpp.  The contents of both *uio and *space may be
824 * modified even in the case of an error.
825 */
826static int
827sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
828    int flags)
829{
830	struct mbuf *m, **mp, *top;
831	long len, resid;
832	int error;
833#ifdef ZERO_COPY_SOCKETS
834	int cow_send;
835#endif
836
837	*retmp = top = NULL;
838	mp = &top;
839	len = 0;
840	resid = uio->uio_resid;
841	error = 0;
842	do {
843#ifdef ZERO_COPY_SOCKETS
844		cow_send = 0;
845#endif /* ZERO_COPY_SOCKETS */
846		if (resid >= MINCLSIZE) {
847#ifdef ZERO_COPY_SOCKETS
848			if (top == NULL) {
849				m = m_gethdr(M_WAITOK, MT_DATA);
850				m->m_pkthdr.len = 0;
851				m->m_pkthdr.rcvif = NULL;
852			} else
853				m = m_get(M_WAITOK, MT_DATA);
854			if (so_zero_copy_send &&
855			    resid>=PAGE_SIZE &&
856			    *space>=PAGE_SIZE &&
857			    uio->uio_iov->iov_len>=PAGE_SIZE) {
858				so_zerocp_stats.size_ok++;
859				so_zerocp_stats.align_ok++;
860				cow_send = socow_setup(m, uio);
861				len = cow_send;
862			}
863			if (!cow_send) {
864				m_clget(m, M_WAITOK);
865				len = min(min(MCLBYTES, resid), *space);
866			}
867#else /* ZERO_COPY_SOCKETS */
868			if (top == NULL) {
869				m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
870				m->m_pkthdr.len = 0;
871				m->m_pkthdr.rcvif = NULL;
872			} else
873				m = m_getcl(M_WAIT, MT_DATA, 0);
874			len = min(min(MCLBYTES, resid), *space);
875#endif /* ZERO_COPY_SOCKETS */
876		} else {
877			if (top == NULL) {
878				m = m_gethdr(M_WAIT, MT_DATA);
879				m->m_pkthdr.len = 0;
880				m->m_pkthdr.rcvif = NULL;
881
882				len = min(min(MHLEN, resid), *space);
883				/*
884				 * For datagram protocols, leave room
885				 * for protocol headers in first mbuf.
886				 */
887				if (atomic && m && len < MHLEN)
888					MH_ALIGN(m, len);
889			} else {
890				m = m_get(M_WAIT, MT_DATA);
891				len = min(min(MLEN, resid), *space);
892			}
893		}
894		if (m == NULL) {
895			error = ENOBUFS;
896			goto out;
897		}
898
899		*space -= len;
900#ifdef ZERO_COPY_SOCKETS
901		if (cow_send)
902			error = 0;
903		else
904#endif /* ZERO_COPY_SOCKETS */
905		error = uiomove(mtod(m, void *), (int)len, uio);
906		resid = uio->uio_resid;
907		m->m_len = len;
908		*mp = m;
909		top->m_pkthdr.len += len;
910		if (error)
911			goto out;
912		mp = &m->m_next;
913		if (resid <= 0) {
914			if (flags & MSG_EOR)
915				top->m_flags |= M_EOR;
916			break;
917		}
918	} while (*space > 0 && atomic);
919out:
920	*retmp = top;
921	return (error);
922}
923#endif /*ZERO_COPY_SOCKETS*/
924
925#define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
926
927int
928sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
929    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
930{
931	long space, resid;
932	int clen = 0, error, dontroute;
933#ifdef ZERO_COPY_SOCKETS
934	int atomic = sosendallatonce(so) || top;
935#endif
936
937	KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
938	KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
939	    ("sodgram_send: !PR_ATOMIC"));
940
941	if (uio != NULL)
942		resid = uio->uio_resid;
943	else
944		resid = top->m_pkthdr.len;
945	/*
946	 * In theory resid should be unsigned.  However, space must be
947	 * signed, as it might be less than 0 if we over-committed, and we
948	 * must use a signed comparison of space and resid.  On the other
949	 * hand, a negative resid causes us to loop sending 0-length
950	 * segments to the protocol.
951	 *
952	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
953	 * type sockets since that's an error.
954	 */
955	if (resid < 0) {
956		error = EINVAL;
957		goto out;
958	}
959
960	dontroute =
961	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
962	if (td != NULL)
963		td->td_ru.ru_msgsnd++;
964	if (control != NULL)
965		clen = control->m_len;
966
967	SOCKBUF_LOCK(&so->so_snd);
968	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
969		SOCKBUF_UNLOCK(&so->so_snd);
970		error = EPIPE;
971		goto out;
972	}
973	if (so->so_error) {
974		error = so->so_error;
975		so->so_error = 0;
976		SOCKBUF_UNLOCK(&so->so_snd);
977		goto out;
978	}
979	if ((so->so_state & SS_ISCONNECTED) == 0) {
980		/*
981		 * `sendto' and `sendmsg' is allowed on a connection-based
982		 * socket if it supports implied connect.  Return ENOTCONN if
983		 * not connected and no address is supplied.
984		 */
985		if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
986		    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
987			if ((so->so_state & SS_ISCONFIRMING) == 0 &&
988			    !(resid == 0 && clen != 0)) {
989				SOCKBUF_UNLOCK(&so->so_snd);
990				error = ENOTCONN;
991				goto out;
992			}
993		} else if (addr == NULL) {
994			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
995				error = ENOTCONN;
996			else
997				error = EDESTADDRREQ;
998			SOCKBUF_UNLOCK(&so->so_snd);
999			goto out;
1000		}
1001	}
1002
1003	/*
1004	 * Do we need MSG_OOB support in SOCK_DGRAM?  Signs here may be a
1005	 * problem and need fixing.
1006	 */
1007	space = sbspace(&so->so_snd);
1008	if (flags & MSG_OOB)
1009		space += 1024;
1010	space -= clen;
1011	SOCKBUF_UNLOCK(&so->so_snd);
1012	if (resid > space) {
1013		error = EMSGSIZE;
1014		goto out;
1015	}
1016	if (uio == NULL) {
1017		resid = 0;
1018		if (flags & MSG_EOR)
1019			top->m_flags |= M_EOR;
1020	} else {
1021#ifdef ZERO_COPY_SOCKETS
1022		error = sosend_copyin(uio, &top, atomic, &space, flags);
1023		if (error)
1024			goto out;
1025#else
1026		/*
1027		 * Copy the data from userland into a mbuf chain.
1028		 * If no data is to be copied in, a single empty mbuf
1029		 * is returned.
1030		 */
1031		top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1032		    (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1033		if (top == NULL) {
1034			error = EFAULT;	/* only possible error */
1035			goto out;
1036		}
1037		space -= resid - uio->uio_resid;
1038#endif
1039		resid = uio->uio_resid;
1040	}
1041	KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1042	/*
1043	 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1044	 * than with.
1045	 */
1046	if (dontroute) {
1047		SOCK_LOCK(so);
1048		so->so_options |= SO_DONTROUTE;
1049		SOCK_UNLOCK(so);
1050	}
1051	/*
1052	 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1053	 * of date.  We could have recieved a reset packet in an interrupt or
1054	 * maybe we slept while doing page faults in uiomove() etc.  We could
1055	 * probably recheck again inside the locking protection here, but
1056	 * there are probably other places that this also happens.  We must
1057	 * rethink this.
1058	 */
1059	error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1060	    (flags & MSG_OOB) ? PRUS_OOB :
1061	/*
1062	 * If the user set MSG_EOF, the protocol understands this flag and
1063	 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1064	 */
1065	    ((flags & MSG_EOF) &&
1066	     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1067	     (resid <= 0)) ?
1068		PRUS_EOF :
1069		/* If there is more to send set PRUS_MORETOCOME */
1070		(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1071		top, addr, control, td);
1072	if (dontroute) {
1073		SOCK_LOCK(so);
1074		so->so_options &= ~SO_DONTROUTE;
1075		SOCK_UNLOCK(so);
1076	}
1077	clen = 0;
1078	control = NULL;
1079	top = NULL;
1080out:
1081	if (top != NULL)
1082		m_freem(top);
1083	if (control != NULL)
1084		m_freem(control);
1085	return (error);
1086}
1087
1088/*
1089 * Send on a socket.  If send must go all at once and message is larger than
1090 * send buffering, then hard error.  Lock against other senders.  If must go
1091 * all at once and not enough room now, then inform user that this would
1092 * block and do nothing.  Otherwise, if nonblocking, send as much as
1093 * possible.  The data to be sent is described by "uio" if nonzero, otherwise
1094 * by the mbuf chain "top" (which must be null if uio is not).  Data provided
1095 * in mbuf chain must be small enough to send all at once.
1096 *
1097 * Returns nonzero on error, timeout or signal; callers must check for short
1098 * counts if EINTR/ERESTART are returned.  Data and control buffers are freed
1099 * on return.
1100 */
1101int
1102sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1103    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1104{
1105	long space, resid;
1106	int clen = 0, error, dontroute;
1107	int atomic = sosendallatonce(so) || top;
1108
1109	if (uio != NULL)
1110		resid = uio->uio_resid;
1111	else
1112		resid = top->m_pkthdr.len;
1113	/*
1114	 * In theory resid should be unsigned.  However, space must be
1115	 * signed, as it might be less than 0 if we over-committed, and we
1116	 * must use a signed comparison of space and resid.  On the other
1117	 * hand, a negative resid causes us to loop sending 0-length
1118	 * segments to the protocol.
1119	 *
1120	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1121	 * type sockets since that's an error.
1122	 */
1123	if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1124		error = EINVAL;
1125		goto out;
1126	}
1127
1128	dontroute =
1129	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1130	    (so->so_proto->pr_flags & PR_ATOMIC);
1131	if (td != NULL)
1132		td->td_ru.ru_msgsnd++;
1133	if (control != NULL)
1134		clen = control->m_len;
1135
1136	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1137	if (error)
1138		goto out;
1139
1140restart:
1141	do {
1142		SOCKBUF_LOCK(&so->so_snd);
1143		if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1144			SOCKBUF_UNLOCK(&so->so_snd);
1145			error = EPIPE;
1146			goto release;
1147		}
1148		if (so->so_error) {
1149			error = so->so_error;
1150			so->so_error = 0;
1151			SOCKBUF_UNLOCK(&so->so_snd);
1152			goto release;
1153		}
1154		if ((so->so_state & SS_ISCONNECTED) == 0) {
1155			/*
1156			 * `sendto' and `sendmsg' is allowed on a connection-
1157			 * based socket if it supports implied connect.
1158			 * Return ENOTCONN if not connected and no address is
1159			 * supplied.
1160			 */
1161			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1162			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1163				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1164				    !(resid == 0 && clen != 0)) {
1165					SOCKBUF_UNLOCK(&so->so_snd);
1166					error = ENOTCONN;
1167					goto release;
1168				}
1169			} else if (addr == NULL) {
1170				SOCKBUF_UNLOCK(&so->so_snd);
1171				if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1172					error = ENOTCONN;
1173				else
1174					error = EDESTADDRREQ;
1175				goto release;
1176			}
1177		}
1178		space = sbspace(&so->so_snd);
1179		if (flags & MSG_OOB)
1180			space += 1024;
1181		if ((atomic && resid > so->so_snd.sb_hiwat) ||
1182		    clen > so->so_snd.sb_hiwat) {
1183			SOCKBUF_UNLOCK(&so->so_snd);
1184			error = EMSGSIZE;
1185			goto release;
1186		}
1187		if (space < resid + clen &&
1188		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1189			if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1190				SOCKBUF_UNLOCK(&so->so_snd);
1191				error = EWOULDBLOCK;
1192				goto release;
1193			}
1194			error = sbwait(&so->so_snd);
1195			SOCKBUF_UNLOCK(&so->so_snd);
1196			if (error)
1197				goto release;
1198			goto restart;
1199		}
1200		SOCKBUF_UNLOCK(&so->so_snd);
1201		space -= clen;
1202		do {
1203			if (uio == NULL) {
1204				resid = 0;
1205				if (flags & MSG_EOR)
1206					top->m_flags |= M_EOR;
1207			} else {
1208#ifdef ZERO_COPY_SOCKETS
1209				error = sosend_copyin(uio, &top, atomic,
1210				    &space, flags);
1211				if (error != 0)
1212					goto release;
1213#else
1214				/*
1215				 * Copy the data from userland into a mbuf
1216				 * chain.  If no data is to be copied in,
1217				 * a single empty mbuf is returned.
1218				 */
1219				top = m_uiotombuf(uio, M_WAITOK, space,
1220				    (atomic ? max_hdr : 0),
1221				    (atomic ? M_PKTHDR : 0) |
1222				    ((flags & MSG_EOR) ? M_EOR : 0));
1223				if (top == NULL) {
1224					error = EFAULT; /* only possible error */
1225					goto release;
1226				}
1227				space -= resid - uio->uio_resid;
1228#endif
1229				resid = uio->uio_resid;
1230			}
1231			if (dontroute) {
1232				SOCK_LOCK(so);
1233				so->so_options |= SO_DONTROUTE;
1234				SOCK_UNLOCK(so);
1235			}
1236			/*
1237			 * XXX all the SBS_CANTSENDMORE checks previously
1238			 * done could be out of date.  We could have recieved
1239			 * a reset packet in an interrupt or maybe we slept
1240			 * while doing page faults in uiomove() etc.  We
1241			 * could probably recheck again inside the locking
1242			 * protection here, but there are probably other
1243			 * places that this also happens.  We must rethink
1244			 * this.
1245			 */
1246			error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1247			    (flags & MSG_OOB) ? PRUS_OOB :
1248			/*
1249			 * If the user set MSG_EOF, the protocol understands
1250			 * this flag and nothing left to send then use
1251			 * PRU_SEND_EOF instead of PRU_SEND.
1252			 */
1253			    ((flags & MSG_EOF) &&
1254			     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1255			     (resid <= 0)) ?
1256				PRUS_EOF :
1257			/* If there is more to send set PRUS_MORETOCOME. */
1258			    (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1259			    top, addr, control, td);
1260			if (dontroute) {
1261				SOCK_LOCK(so);
1262				so->so_options &= ~SO_DONTROUTE;
1263				SOCK_UNLOCK(so);
1264			}
1265			clen = 0;
1266			control = NULL;
1267			top = NULL;
1268			if (error)
1269				goto release;
1270		} while (resid && space > 0);
1271	} while (resid);
1272
1273release:
1274	sbunlock(&so->so_snd);
1275out:
1276	if (top != NULL)
1277		m_freem(top);
1278	if (control != NULL)
1279		m_freem(control);
1280	return (error);
1281}
1282
1283int
1284sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1285    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1286{
1287
1288	/* XXXRW: Temporary debugging. */
1289	KASSERT(so->so_proto->pr_usrreqs->pru_sosend != sosend,
1290	    ("sosend: protocol calls sosend"));
1291
1292	return (so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1293	    control, flags, td));
1294}
1295
1296/*
1297 * The part of soreceive() that implements reading non-inline out-of-band
1298 * data from a socket.  For more complete comments, see soreceive(), from
1299 * which this code originated.
1300 *
1301 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1302 * unable to return an mbuf chain to the caller.
1303 */
1304static int
1305soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1306{
1307	struct protosw *pr = so->so_proto;
1308	struct mbuf *m;
1309	int error;
1310
1311	KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1312
1313	m = m_get(M_WAIT, MT_DATA);
1314	error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1315	if (error)
1316		goto bad;
1317	do {
1318#ifdef ZERO_COPY_SOCKETS
1319		if (so_zero_copy_receive) {
1320			int disposable;
1321
1322			if ((m->m_flags & M_EXT)
1323			 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1324				disposable = 1;
1325			else
1326				disposable = 0;
1327
1328			error = uiomoveco(mtod(m, void *),
1329					  min(uio->uio_resid, m->m_len),
1330					  uio, disposable);
1331		} else
1332#endif /* ZERO_COPY_SOCKETS */
1333		error = uiomove(mtod(m, void *),
1334		    (int) min(uio->uio_resid, m->m_len), uio);
1335		m = m_free(m);
1336	} while (uio->uio_resid && error == 0 && m);
1337bad:
1338	if (m != NULL)
1339		m_freem(m);
1340	return (error);
1341}
1342
1343/*
1344 * Following replacement or removal of the first mbuf on the first mbuf chain
1345 * of a socket buffer, push necessary state changes back into the socket
1346 * buffer so that other consumers see the values consistently.  'nextrecord'
1347 * is the callers locally stored value of the original value of
1348 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1349 * NOTE: 'nextrecord' may be NULL.
1350 */
1351static __inline void
1352sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1353{
1354
1355	SOCKBUF_LOCK_ASSERT(sb);
1356	/*
1357	 * First, update for the new value of nextrecord.  If necessary, make
1358	 * it the first record.
1359	 */
1360	if (sb->sb_mb != NULL)
1361		sb->sb_mb->m_nextpkt = nextrecord;
1362	else
1363		sb->sb_mb = nextrecord;
1364
1365        /*
1366         * Now update any dependent socket buffer fields to reflect the new
1367         * state.  This is an expanded inline of SB_EMPTY_FIXUP(), with the
1368	 * addition of a second clause that takes care of the case where
1369	 * sb_mb has been updated, but remains the last record.
1370         */
1371        if (sb->sb_mb == NULL) {
1372                sb->sb_mbtail = NULL;
1373                sb->sb_lastrecord = NULL;
1374        } else if (sb->sb_mb->m_nextpkt == NULL)
1375                sb->sb_lastrecord = sb->sb_mb;
1376}
1377
1378
1379/*
1380 * Implement receive operations on a socket.  We depend on the way that
1381 * records are added to the sockbuf by sbappend.  In particular, each record
1382 * (mbufs linked through m_next) must begin with an address if the protocol
1383 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1384 * data, and then zero or more mbufs of data.  In order to allow parallelism
1385 * between network receive and copying to user space, as well as avoid
1386 * sleeping with a mutex held, we release the socket buffer mutex during the
1387 * user space copy.  Although the sockbuf is locked, new data may still be
1388 * appended, and thus we must maintain consistency of the sockbuf during that
1389 * time.
1390 *
1391 * The caller may receive the data as a single mbuf chain by supplying an
1392 * mbuf **mp0 for use in returning the chain.  The uio is then used only for
1393 * the count in uio_resid.
1394 */
1395int
1396soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1397    struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1398{
1399	struct mbuf *m, **mp;
1400	int flags, len, error, offset;
1401	struct protosw *pr = so->so_proto;
1402	struct mbuf *nextrecord;
1403	int moff, type = 0;
1404	int orig_resid = uio->uio_resid;
1405
1406	mp = mp0;
1407	if (psa != NULL)
1408		*psa = NULL;
1409	if (controlp != NULL)
1410		*controlp = NULL;
1411	if (flagsp != NULL)
1412		flags = *flagsp &~ MSG_EOR;
1413	else
1414		flags = 0;
1415	if (flags & MSG_OOB)
1416		return (soreceive_rcvoob(so, uio, flags));
1417	if (mp != NULL)
1418		*mp = NULL;
1419	if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1420	    && uio->uio_resid)
1421		(*pr->pr_usrreqs->pru_rcvd)(so, 0);
1422
1423	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1424	if (error)
1425		return (error);
1426
1427restart:
1428	SOCKBUF_LOCK(&so->so_rcv);
1429	m = so->so_rcv.sb_mb;
1430	/*
1431	 * If we have less data than requested, block awaiting more (subject
1432	 * to any timeout) if:
1433	 *   1. the current count is less than the low water mark, or
1434	 *   2. MSG_WAITALL is set, and it is possible to do the entire
1435	 *	receive operation at once if we block (resid <= hiwat).
1436	 *   3. MSG_DONTWAIT is not set
1437	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1438	 * we have to do the receive in sections, and thus risk returning a
1439	 * short count if a timeout or signal occurs after we start.
1440	 */
1441	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1442	    so->so_rcv.sb_cc < uio->uio_resid) &&
1443	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1444	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1445	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1446		KASSERT(m != NULL || !so->so_rcv.sb_cc,
1447		    ("receive: m == %p so->so_rcv.sb_cc == %u",
1448		    m, so->so_rcv.sb_cc));
1449		if (so->so_error) {
1450			if (m != NULL)
1451				goto dontblock;
1452			error = so->so_error;
1453			if ((flags & MSG_PEEK) == 0)
1454				so->so_error = 0;
1455			SOCKBUF_UNLOCK(&so->so_rcv);
1456			goto release;
1457		}
1458		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1459		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1460			if (m == NULL) {
1461				SOCKBUF_UNLOCK(&so->so_rcv);
1462				goto release;
1463			} else
1464				goto dontblock;
1465		}
1466		for (; m != NULL; m = m->m_next)
1467			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
1468				m = so->so_rcv.sb_mb;
1469				goto dontblock;
1470			}
1471		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1472		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1473			SOCKBUF_UNLOCK(&so->so_rcv);
1474			error = ENOTCONN;
1475			goto release;
1476		}
1477		if (uio->uio_resid == 0) {
1478			SOCKBUF_UNLOCK(&so->so_rcv);
1479			goto release;
1480		}
1481		if ((so->so_state & SS_NBIO) ||
1482		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1483			SOCKBUF_UNLOCK(&so->so_rcv);
1484			error = EWOULDBLOCK;
1485			goto release;
1486		}
1487		SBLASTRECORDCHK(&so->so_rcv);
1488		SBLASTMBUFCHK(&so->so_rcv);
1489		error = sbwait(&so->so_rcv);
1490		SOCKBUF_UNLOCK(&so->so_rcv);
1491		if (error)
1492			goto release;
1493		goto restart;
1494	}
1495dontblock:
1496	/*
1497	 * From this point onward, we maintain 'nextrecord' as a cache of the
1498	 * pointer to the next record in the socket buffer.  We must keep the
1499	 * various socket buffer pointers and local stack versions of the
1500	 * pointers in sync, pushing out modifications before dropping the
1501	 * socket buffer mutex, and re-reading them when picking it up.
1502	 *
1503	 * Otherwise, we will race with the network stack appending new data
1504	 * or records onto the socket buffer by using inconsistent/stale
1505	 * versions of the field, possibly resulting in socket buffer
1506	 * corruption.
1507	 *
1508	 * By holding the high-level sblock(), we prevent simultaneous
1509	 * readers from pulling off the front of the socket buffer.
1510	 */
1511	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1512	if (uio->uio_td)
1513		uio->uio_td->td_ru.ru_msgrcv++;
1514	KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1515	SBLASTRECORDCHK(&so->so_rcv);
1516	SBLASTMBUFCHK(&so->so_rcv);
1517	nextrecord = m->m_nextpkt;
1518	if (pr->pr_flags & PR_ADDR) {
1519		KASSERT(m->m_type == MT_SONAME,
1520		    ("m->m_type == %d", m->m_type));
1521		orig_resid = 0;
1522		if (psa != NULL)
1523			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
1524			    M_NOWAIT);
1525		if (flags & MSG_PEEK) {
1526			m = m->m_next;
1527		} else {
1528			sbfree(&so->so_rcv, m);
1529			so->so_rcv.sb_mb = m_free(m);
1530			m = so->so_rcv.sb_mb;
1531			sockbuf_pushsync(&so->so_rcv, nextrecord);
1532		}
1533	}
1534
1535	/*
1536	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1537	 * in the first mbuf chain on the socket buffer.  If MSG_PEEK, we
1538	 * just copy the data; if !MSG_PEEK, we call into the protocol to
1539	 * perform externalization (or freeing if controlp == NULL).
1540	 */
1541	if (m != NULL && m->m_type == MT_CONTROL) {
1542		struct mbuf *cm = NULL, *cmn;
1543		struct mbuf **cme = &cm;
1544
1545		do {
1546			if (flags & MSG_PEEK) {
1547				if (controlp != NULL) {
1548					*controlp = m_copy(m, 0, m->m_len);
1549					controlp = &(*controlp)->m_next;
1550				}
1551				m = m->m_next;
1552			} else {
1553				sbfree(&so->so_rcv, m);
1554				so->so_rcv.sb_mb = m->m_next;
1555				m->m_next = NULL;
1556				*cme = m;
1557				cme = &(*cme)->m_next;
1558				m = so->so_rcv.sb_mb;
1559			}
1560		} while (m != NULL && m->m_type == MT_CONTROL);
1561		if ((flags & MSG_PEEK) == 0)
1562			sockbuf_pushsync(&so->so_rcv, nextrecord);
1563		while (cm != NULL) {
1564			cmn = cm->m_next;
1565			cm->m_next = NULL;
1566			if (pr->pr_domain->dom_externalize != NULL) {
1567				SOCKBUF_UNLOCK(&so->so_rcv);
1568				error = (*pr->pr_domain->dom_externalize)
1569				    (cm, controlp);
1570				SOCKBUF_LOCK(&so->so_rcv);
1571			} else if (controlp != NULL)
1572				*controlp = cm;
1573			else
1574				m_freem(cm);
1575			if (controlp != NULL) {
1576				orig_resid = 0;
1577				while (*controlp != NULL)
1578					controlp = &(*controlp)->m_next;
1579			}
1580			cm = cmn;
1581		}
1582		if (m != NULL)
1583			nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1584		else
1585			nextrecord = so->so_rcv.sb_mb;
1586		orig_resid = 0;
1587	}
1588	if (m != NULL) {
1589		if ((flags & MSG_PEEK) == 0) {
1590			KASSERT(m->m_nextpkt == nextrecord,
1591			    ("soreceive: post-control, nextrecord !sync"));
1592			if (nextrecord == NULL) {
1593				KASSERT(so->so_rcv.sb_mb == m,
1594				    ("soreceive: post-control, sb_mb!=m"));
1595				KASSERT(so->so_rcv.sb_lastrecord == m,
1596				    ("soreceive: post-control, lastrecord!=m"));
1597			}
1598		}
1599		type = m->m_type;
1600		if (type == MT_OOBDATA)
1601			flags |= MSG_OOB;
1602	} else {
1603		if ((flags & MSG_PEEK) == 0) {
1604			KASSERT(so->so_rcv.sb_mb == nextrecord,
1605			    ("soreceive: sb_mb != nextrecord"));
1606			if (so->so_rcv.sb_mb == NULL) {
1607				KASSERT(so->so_rcv.sb_lastrecord == NULL,
1608				    ("soreceive: sb_lastercord != NULL"));
1609			}
1610		}
1611	}
1612	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1613	SBLASTRECORDCHK(&so->so_rcv);
1614	SBLASTMBUFCHK(&so->so_rcv);
1615
1616	/*
1617	 * Now continue to read any data mbufs off of the head of the socket
1618	 * buffer until the read request is satisfied.  Note that 'type' is
1619	 * used to store the type of any mbuf reads that have happened so far
1620	 * such that soreceive() can stop reading if the type changes, which
1621	 * causes soreceive() to return only one of regular data and inline
1622	 * out-of-band data in a single socket receive operation.
1623	 */
1624	moff = 0;
1625	offset = 0;
1626	while (m != NULL && uio->uio_resid > 0 && error == 0) {
1627		/*
1628		 * If the type of mbuf has changed since the last mbuf
1629		 * examined ('type'), end the receive operation.
1630	 	 */
1631		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1632		if (m->m_type == MT_OOBDATA) {
1633			if (type != MT_OOBDATA)
1634				break;
1635		} else if (type == MT_OOBDATA)
1636			break;
1637		else
1638		    KASSERT(m->m_type == MT_DATA,
1639			("m->m_type == %d", m->m_type));
1640		so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1641		len = uio->uio_resid;
1642		if (so->so_oobmark && len > so->so_oobmark - offset)
1643			len = so->so_oobmark - offset;
1644		if (len > m->m_len - moff)
1645			len = m->m_len - moff;
1646		/*
1647		 * If mp is set, just pass back the mbufs.  Otherwise copy
1648		 * them out via the uio, then free.  Sockbuf must be
1649		 * consistent here (points to current mbuf, it points to next
1650		 * record) when we drop priority; we must note any additions
1651		 * to the sockbuf when we block interrupts again.
1652		 */
1653		if (mp == NULL) {
1654			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1655			SBLASTRECORDCHK(&so->so_rcv);
1656			SBLASTMBUFCHK(&so->so_rcv);
1657			SOCKBUF_UNLOCK(&so->so_rcv);
1658#ifdef ZERO_COPY_SOCKETS
1659			if (so_zero_copy_receive) {
1660				int disposable;
1661
1662				if ((m->m_flags & M_EXT)
1663				 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1664					disposable = 1;
1665				else
1666					disposable = 0;
1667
1668				error = uiomoveco(mtod(m, char *) + moff,
1669						  (int)len, uio,
1670						  disposable);
1671			} else
1672#endif /* ZERO_COPY_SOCKETS */
1673			error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1674			SOCKBUF_LOCK(&so->so_rcv);
1675			if (error) {
1676				/*
1677				 * The MT_SONAME mbuf has already been removed
1678				 * from the record, so it is necessary to
1679				 * remove the data mbufs, if any, to preserve
1680				 * the invariant in the case of PR_ADDR that
1681				 * requires MT_SONAME mbufs at the head of
1682				 * each record.
1683				 */
1684				if (m && pr->pr_flags & PR_ATOMIC &&
1685				    ((flags & MSG_PEEK) == 0))
1686					(void)sbdroprecord_locked(&so->so_rcv);
1687				SOCKBUF_UNLOCK(&so->so_rcv);
1688				goto release;
1689			}
1690		} else
1691			uio->uio_resid -= len;
1692		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1693		if (len == m->m_len - moff) {
1694			if (m->m_flags & M_EOR)
1695				flags |= MSG_EOR;
1696			if (flags & MSG_PEEK) {
1697				m = m->m_next;
1698				moff = 0;
1699			} else {
1700				nextrecord = m->m_nextpkt;
1701				sbfree(&so->so_rcv, m);
1702				if (mp != NULL) {
1703					*mp = m;
1704					mp = &m->m_next;
1705					so->so_rcv.sb_mb = m = m->m_next;
1706					*mp = NULL;
1707				} else {
1708					so->so_rcv.sb_mb = m_free(m);
1709					m = so->so_rcv.sb_mb;
1710				}
1711				sockbuf_pushsync(&so->so_rcv, nextrecord);
1712				SBLASTRECORDCHK(&so->so_rcv);
1713				SBLASTMBUFCHK(&so->so_rcv);
1714			}
1715		} else {
1716			if (flags & MSG_PEEK)
1717				moff += len;
1718			else {
1719				if (mp != NULL) {
1720					int copy_flag;
1721
1722					if (flags & MSG_DONTWAIT)
1723						copy_flag = M_DONTWAIT;
1724					else
1725						copy_flag = M_WAIT;
1726					if (copy_flag == M_WAIT)
1727						SOCKBUF_UNLOCK(&so->so_rcv);
1728					*mp = m_copym(m, 0, len, copy_flag);
1729					if (copy_flag == M_WAIT)
1730						SOCKBUF_LOCK(&so->so_rcv);
1731 					if (*mp == NULL) {
1732 						/*
1733 						 * m_copym() couldn't
1734						 * allocate an mbuf.  Adjust
1735						 * uio_resid back (it was
1736						 * adjusted down by len
1737						 * bytes, which we didn't end
1738						 * up "copying" over).
1739 						 */
1740 						uio->uio_resid += len;
1741 						break;
1742 					}
1743				}
1744				m->m_data += len;
1745				m->m_len -= len;
1746				so->so_rcv.sb_cc -= len;
1747			}
1748		}
1749		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1750		if (so->so_oobmark) {
1751			if ((flags & MSG_PEEK) == 0) {
1752				so->so_oobmark -= len;
1753				if (so->so_oobmark == 0) {
1754					so->so_rcv.sb_state |= SBS_RCVATMARK;
1755					break;
1756				}
1757			} else {
1758				offset += len;
1759				if (offset == so->so_oobmark)
1760					break;
1761			}
1762		}
1763		if (flags & MSG_EOR)
1764			break;
1765		/*
1766		 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1767		 * must not quit until "uio->uio_resid == 0" or an error
1768		 * termination.  If a signal/timeout occurs, return with a
1769		 * short count but without error.  Keep sockbuf locked
1770		 * against other readers.
1771		 */
1772		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1773		    !sosendallatonce(so) && nextrecord == NULL) {
1774			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1775			if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1776				break;
1777			/*
1778			 * Notify the protocol that some data has been
1779			 * drained before blocking.
1780			 */
1781			if (pr->pr_flags & PR_WANTRCVD) {
1782				SOCKBUF_UNLOCK(&so->so_rcv);
1783				(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1784				SOCKBUF_LOCK(&so->so_rcv);
1785			}
1786			SBLASTRECORDCHK(&so->so_rcv);
1787			SBLASTMBUFCHK(&so->so_rcv);
1788			error = sbwait(&so->so_rcv);
1789			if (error) {
1790				SOCKBUF_UNLOCK(&so->so_rcv);
1791				goto release;
1792			}
1793			m = so->so_rcv.sb_mb;
1794			if (m != NULL)
1795				nextrecord = m->m_nextpkt;
1796		}
1797	}
1798
1799	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1800	if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1801		flags |= MSG_TRUNC;
1802		if ((flags & MSG_PEEK) == 0)
1803			(void) sbdroprecord_locked(&so->so_rcv);
1804	}
1805	if ((flags & MSG_PEEK) == 0) {
1806		if (m == NULL) {
1807			/*
1808			 * First part is an inline SB_EMPTY_FIXUP().  Second
1809			 * part makes sure sb_lastrecord is up-to-date if
1810			 * there is still data in the socket buffer.
1811			 */
1812			so->so_rcv.sb_mb = nextrecord;
1813			if (so->so_rcv.sb_mb == NULL) {
1814				so->so_rcv.sb_mbtail = NULL;
1815				so->so_rcv.sb_lastrecord = NULL;
1816			} else if (nextrecord->m_nextpkt == NULL)
1817				so->so_rcv.sb_lastrecord = nextrecord;
1818		}
1819		SBLASTRECORDCHK(&so->so_rcv);
1820		SBLASTMBUFCHK(&so->so_rcv);
1821		/*
1822		 * If soreceive() is being done from the socket callback,
1823		 * then don't need to generate ACK to peer to update window,
1824		 * since ACK will be generated on return to TCP.
1825		 */
1826		if (!(flags & MSG_SOCALLBCK) &&
1827		    (pr->pr_flags & PR_WANTRCVD)) {
1828			SOCKBUF_UNLOCK(&so->so_rcv);
1829			(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1830			SOCKBUF_LOCK(&so->so_rcv);
1831		}
1832	}
1833	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1834	if (orig_resid == uio->uio_resid && orig_resid &&
1835	    (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1836		SOCKBUF_UNLOCK(&so->so_rcv);
1837		goto restart;
1838	}
1839	SOCKBUF_UNLOCK(&so->so_rcv);
1840
1841	if (flagsp != NULL)
1842		*flagsp |= flags;
1843release:
1844	sbunlock(&so->so_rcv);
1845	return (error);
1846}
1847
1848int
1849soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
1850    struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1851{
1852
1853	/* XXXRW: Temporary debugging. */
1854	KASSERT(so->so_proto->pr_usrreqs->pru_soreceive != soreceive,
1855	    ("soreceive: protocol calls soreceive"));
1856
1857	return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
1858	    controlp, flagsp));
1859}
1860
1861int
1862soshutdown(struct socket *so, int how)
1863{
1864	struct protosw *pr = so->so_proto;
1865
1866	if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1867		return (EINVAL);
1868	if (pr->pr_usrreqs->pru_flush != NULL) {
1869	        (*pr->pr_usrreqs->pru_flush)(so, how);
1870	}
1871	if (how != SHUT_WR)
1872		sorflush(so);
1873	if (how != SHUT_RD)
1874		return ((*pr->pr_usrreqs->pru_shutdown)(so));
1875	return (0);
1876}
1877
1878void
1879sorflush(struct socket *so)
1880{
1881	struct sockbuf *sb = &so->so_rcv;
1882	struct protosw *pr = so->so_proto;
1883	struct sockbuf asb;
1884
1885	/*
1886	 * In order to avoid calling dom_dispose with the socket buffer mutex
1887	 * held, and in order to generally avoid holding the lock for a long
1888	 * time, we make a copy of the socket buffer and clear the original
1889	 * (except locks, state).  The new socket buffer copy won't have
1890	 * initialized locks so we can only call routines that won't use or
1891	 * assert those locks.
1892	 *
1893	 * Dislodge threads currently blocked in receive and wait to acquire
1894	 * a lock against other simultaneous readers before clearing the
1895	 * socket buffer.  Don't let our acquire be interrupted by a signal
1896	 * despite any existing socket disposition on interruptable waiting.
1897	 */
1898	socantrcvmore(so);
1899	(void) sblock(sb, SBL_WAIT | SBL_NOINTR);
1900
1901	/*
1902	 * Invalidate/clear most of the sockbuf structure, but leave selinfo
1903	 * and mutex data unchanged.
1904	 */
1905	SOCKBUF_LOCK(sb);
1906	bzero(&asb, offsetof(struct sockbuf, sb_startzero));
1907	bcopy(&sb->sb_startzero, &asb.sb_startzero,
1908	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1909	bzero(&sb->sb_startzero,
1910	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1911	SOCKBUF_UNLOCK(sb);
1912	sbunlock(sb);
1913
1914	/*
1915	 * Dispose of special rights and flush the socket buffer.  Don't call
1916	 * any unsafe routines (that rely on locks being initialized) on asb.
1917	 */
1918	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1919		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
1920	sbrelease_internal(&asb, so);
1921}
1922
1923/*
1924 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
1925 * additional variant to handle the case where the option value needs to be
1926 * some kind of integer, but not a specific size.  In addition to their use
1927 * here, these functions are also called by the protocol-level pr_ctloutput()
1928 * routines.
1929 */
1930int
1931sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
1932{
1933	size_t	valsize;
1934
1935	/*
1936	 * If the user gives us more than we wanted, we ignore it, but if we
1937	 * don't get the minimum length the caller wants, we return EINVAL.
1938	 * On success, sopt->sopt_valsize is set to however much we actually
1939	 * retrieved.
1940	 */
1941	if ((valsize = sopt->sopt_valsize) < minlen)
1942		return EINVAL;
1943	if (valsize > len)
1944		sopt->sopt_valsize = valsize = len;
1945
1946	if (sopt->sopt_td != NULL)
1947		return (copyin(sopt->sopt_val, buf, valsize));
1948
1949	bcopy(sopt->sopt_val, buf, valsize);
1950	return (0);
1951}
1952
1953/*
1954 * Kernel version of setsockopt(2).
1955 *
1956 * XXX: optlen is size_t, not socklen_t
1957 */
1958int
1959so_setsockopt(struct socket *so, int level, int optname, void *optval,
1960    size_t optlen)
1961{
1962	struct sockopt sopt;
1963
1964	sopt.sopt_level = level;
1965	sopt.sopt_name = optname;
1966	sopt.sopt_dir = SOPT_SET;
1967	sopt.sopt_val = optval;
1968	sopt.sopt_valsize = optlen;
1969	sopt.sopt_td = NULL;
1970	return (sosetopt(so, &sopt));
1971}
1972
1973int
1974sosetopt(struct socket *so, struct sockopt *sopt)
1975{
1976	int	error, optval;
1977	struct	linger l;
1978	struct	timeval tv;
1979	u_long  val;
1980#ifdef MAC
1981	struct mac extmac;
1982#endif
1983
1984	error = 0;
1985	if (sopt->sopt_level != SOL_SOCKET) {
1986		if (so->so_proto && so->so_proto->pr_ctloutput)
1987			return ((*so->so_proto->pr_ctloutput)
1988				  (so, sopt));
1989		error = ENOPROTOOPT;
1990	} else {
1991		switch (sopt->sopt_name) {
1992#ifdef INET
1993		case SO_ACCEPTFILTER:
1994			error = do_setopt_accept_filter(so, sopt);
1995			if (error)
1996				goto bad;
1997			break;
1998#endif
1999		case SO_LINGER:
2000			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2001			if (error)
2002				goto bad;
2003
2004			SOCK_LOCK(so);
2005			so->so_linger = l.l_linger;
2006			if (l.l_onoff)
2007				so->so_options |= SO_LINGER;
2008			else
2009				so->so_options &= ~SO_LINGER;
2010			SOCK_UNLOCK(so);
2011			break;
2012
2013		case SO_DEBUG:
2014		case SO_KEEPALIVE:
2015		case SO_DONTROUTE:
2016		case SO_USELOOPBACK:
2017		case SO_BROADCAST:
2018		case SO_REUSEADDR:
2019		case SO_REUSEPORT:
2020		case SO_OOBINLINE:
2021		case SO_TIMESTAMP:
2022		case SO_BINTIME:
2023		case SO_NOSIGPIPE:
2024			error = sooptcopyin(sopt, &optval, sizeof optval,
2025					    sizeof optval);
2026			if (error)
2027				goto bad;
2028			SOCK_LOCK(so);
2029			if (optval)
2030				so->so_options |= sopt->sopt_name;
2031			else
2032				so->so_options &= ~sopt->sopt_name;
2033			SOCK_UNLOCK(so);
2034			break;
2035
2036		case SO_SETFIB:
2037			error = sooptcopyin(sopt, &optval, sizeof optval,
2038					    sizeof optval);
2039			if (optval < 1 || optval > rt_numfibs) {
2040				error = EINVAL;
2041				goto bad;
2042			}
2043			if ((so->so_proto->pr_domain->dom_family == PF_INET) ||
2044			    (so->so_proto->pr_domain->dom_family == PF_ROUTE)) {
2045				so->so_fibnum = optval;
2046			} else {
2047				so->so_fibnum = 0;
2048			}
2049			break;
2050		case SO_SNDBUF:
2051		case SO_RCVBUF:
2052		case SO_SNDLOWAT:
2053		case SO_RCVLOWAT:
2054			error = sooptcopyin(sopt, &optval, sizeof optval,
2055					    sizeof optval);
2056			if (error)
2057				goto bad;
2058
2059			/*
2060			 * Values < 1 make no sense for any of these options,
2061			 * so disallow them.
2062			 */
2063			if (optval < 1) {
2064				error = EINVAL;
2065				goto bad;
2066			}
2067
2068			switch (sopt->sopt_name) {
2069			case SO_SNDBUF:
2070			case SO_RCVBUF:
2071				if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2072				    &so->so_snd : &so->so_rcv, (u_long)optval,
2073				    so, curthread) == 0) {
2074					error = ENOBUFS;
2075					goto bad;
2076				}
2077				(sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2078				    &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2079				break;
2080
2081			/*
2082			 * Make sure the low-water is never greater than the
2083			 * high-water.
2084			 */
2085			case SO_SNDLOWAT:
2086				SOCKBUF_LOCK(&so->so_snd);
2087				so->so_snd.sb_lowat =
2088				    (optval > so->so_snd.sb_hiwat) ?
2089				    so->so_snd.sb_hiwat : optval;
2090				SOCKBUF_UNLOCK(&so->so_snd);
2091				break;
2092			case SO_RCVLOWAT:
2093				SOCKBUF_LOCK(&so->so_rcv);
2094				so->so_rcv.sb_lowat =
2095				    (optval > so->so_rcv.sb_hiwat) ?
2096				    so->so_rcv.sb_hiwat : optval;
2097				SOCKBUF_UNLOCK(&so->so_rcv);
2098				break;
2099			}
2100			break;
2101
2102		case SO_SNDTIMEO:
2103		case SO_RCVTIMEO:
2104#ifdef COMPAT_IA32
2105			if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2106				struct timeval32 tv32;
2107
2108				error = sooptcopyin(sopt, &tv32, sizeof tv32,
2109				    sizeof tv32);
2110				CP(tv32, tv, tv_sec);
2111				CP(tv32, tv, tv_usec);
2112			} else
2113#endif
2114				error = sooptcopyin(sopt, &tv, sizeof tv,
2115				    sizeof tv);
2116			if (error)
2117				goto bad;
2118
2119			/* assert(hz > 0); */
2120			if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2121			    tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2122				error = EDOM;
2123				goto bad;
2124			}
2125			/* assert(tick > 0); */
2126			/* assert(ULONG_MAX - INT_MAX >= 1000000); */
2127			val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2128			if (val > INT_MAX) {
2129				error = EDOM;
2130				goto bad;
2131			}
2132			if (val == 0 && tv.tv_usec != 0)
2133				val = 1;
2134
2135			switch (sopt->sopt_name) {
2136			case SO_SNDTIMEO:
2137				so->so_snd.sb_timeo = val;
2138				break;
2139			case SO_RCVTIMEO:
2140				so->so_rcv.sb_timeo = val;
2141				break;
2142			}
2143			break;
2144
2145		case SO_LABEL:
2146#ifdef MAC
2147			error = sooptcopyin(sopt, &extmac, sizeof extmac,
2148			    sizeof extmac);
2149			if (error)
2150				goto bad;
2151			error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2152			    so, &extmac);
2153#else
2154			error = EOPNOTSUPP;
2155#endif
2156			break;
2157
2158		default:
2159			error = ENOPROTOOPT;
2160			break;
2161		}
2162		if (error == 0 && so->so_proto != NULL &&
2163		    so->so_proto->pr_ctloutput != NULL) {
2164			(void) ((*so->so_proto->pr_ctloutput)
2165				  (so, sopt));
2166		}
2167	}
2168bad:
2169	return (error);
2170}
2171
2172/*
2173 * Helper routine for getsockopt.
2174 */
2175int
2176sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2177{
2178	int	error;
2179	size_t	valsize;
2180
2181	error = 0;
2182
2183	/*
2184	 * Documented get behavior is that we always return a value, possibly
2185	 * truncated to fit in the user's buffer.  Traditional behavior is
2186	 * that we always tell the user precisely how much we copied, rather
2187	 * than something useful like the total amount we had available for
2188	 * her.  Note that this interface is not idempotent; the entire
2189	 * answer must generated ahead of time.
2190	 */
2191	valsize = min(len, sopt->sopt_valsize);
2192	sopt->sopt_valsize = valsize;
2193	if (sopt->sopt_val != NULL) {
2194		if (sopt->sopt_td != NULL)
2195			error = copyout(buf, sopt->sopt_val, valsize);
2196		else
2197			bcopy(buf, sopt->sopt_val, valsize);
2198	}
2199	return (error);
2200}
2201
2202int
2203sogetopt(struct socket *so, struct sockopt *sopt)
2204{
2205	int	error, optval;
2206	struct	linger l;
2207	struct	timeval tv;
2208#ifdef MAC
2209	struct mac extmac;
2210#endif
2211
2212	error = 0;
2213	if (sopt->sopt_level != SOL_SOCKET) {
2214		if (so->so_proto && so->so_proto->pr_ctloutput) {
2215			return ((*so->so_proto->pr_ctloutput)
2216				  (so, sopt));
2217		} else
2218			return (ENOPROTOOPT);
2219	} else {
2220		switch (sopt->sopt_name) {
2221#ifdef INET
2222		case SO_ACCEPTFILTER:
2223			error = do_getopt_accept_filter(so, sopt);
2224			break;
2225#endif
2226		case SO_LINGER:
2227			SOCK_LOCK(so);
2228			l.l_onoff = so->so_options & SO_LINGER;
2229			l.l_linger = so->so_linger;
2230			SOCK_UNLOCK(so);
2231			error = sooptcopyout(sopt, &l, sizeof l);
2232			break;
2233
2234		case SO_USELOOPBACK:
2235		case SO_DONTROUTE:
2236		case SO_DEBUG:
2237		case SO_KEEPALIVE:
2238		case SO_REUSEADDR:
2239		case SO_REUSEPORT:
2240		case SO_BROADCAST:
2241		case SO_OOBINLINE:
2242		case SO_ACCEPTCONN:
2243		case SO_TIMESTAMP:
2244		case SO_BINTIME:
2245		case SO_NOSIGPIPE:
2246			optval = so->so_options & sopt->sopt_name;
2247integer:
2248			error = sooptcopyout(sopt, &optval, sizeof optval);
2249			break;
2250
2251		case SO_TYPE:
2252			optval = so->so_type;
2253			goto integer;
2254
2255		case SO_ERROR:
2256			SOCK_LOCK(so);
2257			optval = so->so_error;
2258			so->so_error = 0;
2259			SOCK_UNLOCK(so);
2260			goto integer;
2261
2262		case SO_SNDBUF:
2263			optval = so->so_snd.sb_hiwat;
2264			goto integer;
2265
2266		case SO_RCVBUF:
2267			optval = so->so_rcv.sb_hiwat;
2268			goto integer;
2269
2270		case SO_SNDLOWAT:
2271			optval = so->so_snd.sb_lowat;
2272			goto integer;
2273
2274		case SO_RCVLOWAT:
2275			optval = so->so_rcv.sb_lowat;
2276			goto integer;
2277
2278		case SO_SNDTIMEO:
2279		case SO_RCVTIMEO:
2280			optval = (sopt->sopt_name == SO_SNDTIMEO ?
2281				  so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2282
2283			tv.tv_sec = optval / hz;
2284			tv.tv_usec = (optval % hz) * tick;
2285#ifdef COMPAT_IA32
2286			if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2287				struct timeval32 tv32;
2288
2289				CP(tv, tv32, tv_sec);
2290				CP(tv, tv32, tv_usec);
2291				error = sooptcopyout(sopt, &tv32, sizeof tv32);
2292			} else
2293#endif
2294				error = sooptcopyout(sopt, &tv, sizeof tv);
2295			break;
2296
2297		case SO_LABEL:
2298#ifdef MAC
2299			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2300			    sizeof(extmac));
2301			if (error)
2302				return (error);
2303			error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2304			    so, &extmac);
2305			if (error)
2306				return (error);
2307			error = sooptcopyout(sopt, &extmac, sizeof extmac);
2308#else
2309			error = EOPNOTSUPP;
2310#endif
2311			break;
2312
2313		case SO_PEERLABEL:
2314#ifdef MAC
2315			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2316			    sizeof(extmac));
2317			if (error)
2318				return (error);
2319			error = mac_getsockopt_peerlabel(
2320			    sopt->sopt_td->td_ucred, so, &extmac);
2321			if (error)
2322				return (error);
2323			error = sooptcopyout(sopt, &extmac, sizeof extmac);
2324#else
2325			error = EOPNOTSUPP;
2326#endif
2327			break;
2328
2329		case SO_LISTENQLIMIT:
2330			optval = so->so_qlimit;
2331			goto integer;
2332
2333		case SO_LISTENQLEN:
2334			optval = so->so_qlen;
2335			goto integer;
2336
2337		case SO_LISTENINCQLEN:
2338			optval = so->so_incqlen;
2339			goto integer;
2340
2341		default:
2342			error = ENOPROTOOPT;
2343			break;
2344		}
2345		return (error);
2346	}
2347}
2348
2349/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2350int
2351soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2352{
2353	struct mbuf *m, *m_prev;
2354	int sopt_size = sopt->sopt_valsize;
2355
2356	MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2357	if (m == NULL)
2358		return ENOBUFS;
2359	if (sopt_size > MLEN) {
2360		MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
2361		if ((m->m_flags & M_EXT) == 0) {
2362			m_free(m);
2363			return ENOBUFS;
2364		}
2365		m->m_len = min(MCLBYTES, sopt_size);
2366	} else {
2367		m->m_len = min(MLEN, sopt_size);
2368	}
2369	sopt_size -= m->m_len;
2370	*mp = m;
2371	m_prev = m;
2372
2373	while (sopt_size) {
2374		MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2375		if (m == NULL) {
2376			m_freem(*mp);
2377			return ENOBUFS;
2378		}
2379		if (sopt_size > MLEN) {
2380			MCLGET(m, sopt->sopt_td != NULL ? M_WAIT :
2381			    M_DONTWAIT);
2382			if ((m->m_flags & M_EXT) == 0) {
2383				m_freem(m);
2384				m_freem(*mp);
2385				return ENOBUFS;
2386			}
2387			m->m_len = min(MCLBYTES, sopt_size);
2388		} else {
2389			m->m_len = min(MLEN, sopt_size);
2390		}
2391		sopt_size -= m->m_len;
2392		m_prev->m_next = m;
2393		m_prev = m;
2394	}
2395	return (0);
2396}
2397
2398/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2399int
2400soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2401{
2402	struct mbuf *m0 = m;
2403
2404	if (sopt->sopt_val == NULL)
2405		return (0);
2406	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2407		if (sopt->sopt_td != NULL) {
2408			int error;
2409
2410			error = copyin(sopt->sopt_val, mtod(m, char *),
2411				       m->m_len);
2412			if (error != 0) {
2413				m_freem(m0);
2414				return(error);
2415			}
2416		} else
2417			bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2418		sopt->sopt_valsize -= m->m_len;
2419		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2420		m = m->m_next;
2421	}
2422	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2423		panic("ip6_sooptmcopyin");
2424	return (0);
2425}
2426
2427/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2428int
2429soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2430{
2431	struct mbuf *m0 = m;
2432	size_t valsize = 0;
2433
2434	if (sopt->sopt_val == NULL)
2435		return (0);
2436	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2437		if (sopt->sopt_td != NULL) {
2438			int error;
2439
2440			error = copyout(mtod(m, char *), sopt->sopt_val,
2441				       m->m_len);
2442			if (error != 0) {
2443				m_freem(m0);
2444				return(error);
2445			}
2446		} else
2447			bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2448	       sopt->sopt_valsize -= m->m_len;
2449	       sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2450	       valsize += m->m_len;
2451	       m = m->m_next;
2452	}
2453	if (m != NULL) {
2454		/* enough soopt buffer should be given from user-land */
2455		m_freem(m0);
2456		return(EINVAL);
2457	}
2458	sopt->sopt_valsize = valsize;
2459	return (0);
2460}
2461
2462/*
2463 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2464 * out-of-band data, which will then notify socket consumers.
2465 */
2466void
2467sohasoutofband(struct socket *so)
2468{
2469
2470	if (so->so_sigio != NULL)
2471		pgsigio(&so->so_sigio, SIGURG, 0);
2472	selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2473}
2474
2475int
2476sopoll(struct socket *so, int events, struct ucred *active_cred,
2477    struct thread *td)
2478{
2479
2480	/* XXXRW: Temporary debugging. */
2481	KASSERT(so->so_proto->pr_usrreqs->pru_sopoll != sopoll,
2482	    ("sopoll: protocol calls sopoll"));
2483
2484	return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2485	    td));
2486}
2487
2488int
2489sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
2490    struct thread *td)
2491{
2492	int revents = 0;
2493
2494	SOCKBUF_LOCK(&so->so_snd);
2495	SOCKBUF_LOCK(&so->so_rcv);
2496	if (events & (POLLIN | POLLRDNORM))
2497		if (soreadable(so))
2498			revents |= events & (POLLIN | POLLRDNORM);
2499
2500	if (events & POLLINIGNEOF)
2501		if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2502		    !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2503			revents |= POLLINIGNEOF;
2504
2505	if (events & (POLLOUT | POLLWRNORM))
2506		if (sowriteable(so))
2507			revents |= events & (POLLOUT | POLLWRNORM);
2508
2509	if (events & (POLLPRI | POLLRDBAND))
2510		if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2511			revents |= events & (POLLPRI | POLLRDBAND);
2512
2513	if (revents == 0) {
2514		if (events &
2515		    (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2516		     POLLRDBAND)) {
2517			selrecord(td, &so->so_rcv.sb_sel);
2518			so->so_rcv.sb_flags |= SB_SEL;
2519		}
2520
2521		if (events & (POLLOUT | POLLWRNORM)) {
2522			selrecord(td, &so->so_snd.sb_sel);
2523			so->so_snd.sb_flags |= SB_SEL;
2524		}
2525	}
2526
2527	SOCKBUF_UNLOCK(&so->so_rcv);
2528	SOCKBUF_UNLOCK(&so->so_snd);
2529	return (revents);
2530}
2531
2532int
2533soo_kqfilter(struct file *fp, struct knote *kn)
2534{
2535	struct socket *so = kn->kn_fp->f_data;
2536	struct sockbuf *sb;
2537
2538	switch (kn->kn_filter) {
2539	case EVFILT_READ:
2540		if (so->so_options & SO_ACCEPTCONN)
2541			kn->kn_fop = &solisten_filtops;
2542		else
2543			kn->kn_fop = &soread_filtops;
2544		sb = &so->so_rcv;
2545		break;
2546	case EVFILT_WRITE:
2547		kn->kn_fop = &sowrite_filtops;
2548		sb = &so->so_snd;
2549		break;
2550	default:
2551		return (EINVAL);
2552	}
2553
2554	SOCKBUF_LOCK(sb);
2555	knlist_add(&sb->sb_sel.si_note, kn, 1);
2556	sb->sb_flags |= SB_KNOTE;
2557	SOCKBUF_UNLOCK(sb);
2558	return (0);
2559}
2560
2561/*
2562 * Some routines that return EOPNOTSUPP for entry points that are not
2563 * supported by a protocol.  Fill in as needed.
2564 */
2565int
2566pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
2567{
2568
2569	return EOPNOTSUPP;
2570}
2571
2572int
2573pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
2574{
2575
2576	return EOPNOTSUPP;
2577}
2578
2579int
2580pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2581{
2582
2583	return EOPNOTSUPP;
2584}
2585
2586int
2587pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2588{
2589
2590	return EOPNOTSUPP;
2591}
2592
2593int
2594pru_connect2_notsupp(struct socket *so1, struct socket *so2)
2595{
2596
2597	return EOPNOTSUPP;
2598}
2599
2600int
2601pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
2602    struct ifnet *ifp, struct thread *td)
2603{
2604
2605	return EOPNOTSUPP;
2606}
2607
2608int
2609pru_disconnect_notsupp(struct socket *so)
2610{
2611
2612	return EOPNOTSUPP;
2613}
2614
2615int
2616pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
2617{
2618
2619	return EOPNOTSUPP;
2620}
2621
2622int
2623pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
2624{
2625
2626	return EOPNOTSUPP;
2627}
2628
2629int
2630pru_rcvd_notsupp(struct socket *so, int flags)
2631{
2632
2633	return EOPNOTSUPP;
2634}
2635
2636int
2637pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
2638{
2639
2640	return EOPNOTSUPP;
2641}
2642
2643int
2644pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
2645    struct sockaddr *addr, struct mbuf *control, struct thread *td)
2646{
2647
2648	return EOPNOTSUPP;
2649}
2650
2651/*
2652 * This isn't really a ``null'' operation, but it's the default one and
2653 * doesn't do anything destructive.
2654 */
2655int
2656pru_sense_null(struct socket *so, struct stat *sb)
2657{
2658
2659	sb->st_blksize = so->so_snd.sb_hiwat;
2660	return 0;
2661}
2662
2663int
2664pru_shutdown_notsupp(struct socket *so)
2665{
2666
2667	return EOPNOTSUPP;
2668}
2669
2670int
2671pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
2672{
2673
2674	return EOPNOTSUPP;
2675}
2676
2677int
2678pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
2679    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2680{
2681
2682	return EOPNOTSUPP;
2683}
2684
2685int
2686pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
2687    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2688{
2689
2690	return EOPNOTSUPP;
2691}
2692
2693int
2694pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
2695    struct thread *td)
2696{
2697
2698	return EOPNOTSUPP;
2699}
2700
2701static void
2702filt_sordetach(struct knote *kn)
2703{
2704	struct socket *so = kn->kn_fp->f_data;
2705
2706	SOCKBUF_LOCK(&so->so_rcv);
2707	knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2708	if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2709		so->so_rcv.sb_flags &= ~SB_KNOTE;
2710	SOCKBUF_UNLOCK(&so->so_rcv);
2711}
2712
2713/*ARGSUSED*/
2714static int
2715filt_soread(struct knote *kn, long hint)
2716{
2717	struct socket *so;
2718
2719	so = kn->kn_fp->f_data;
2720	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2721
2722	kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2723	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2724		kn->kn_flags |= EV_EOF;
2725		kn->kn_fflags = so->so_error;
2726		return (1);
2727	} else if (so->so_error)	/* temporary udp error */
2728		return (1);
2729	else if (kn->kn_sfflags & NOTE_LOWAT)
2730		return (kn->kn_data >= kn->kn_sdata);
2731	else
2732		return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2733}
2734
2735static void
2736filt_sowdetach(struct knote *kn)
2737{
2738	struct socket *so = kn->kn_fp->f_data;
2739
2740	SOCKBUF_LOCK(&so->so_snd);
2741	knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2742	if (knlist_empty(&so->so_snd.sb_sel.si_note))
2743		so->so_snd.sb_flags &= ~SB_KNOTE;
2744	SOCKBUF_UNLOCK(&so->so_snd);
2745}
2746
2747/*ARGSUSED*/
2748static int
2749filt_sowrite(struct knote *kn, long hint)
2750{
2751	struct socket *so;
2752
2753	so = kn->kn_fp->f_data;
2754	SOCKBUF_LOCK_ASSERT(&so->so_snd);
2755	kn->kn_data = sbspace(&so->so_snd);
2756	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2757		kn->kn_flags |= EV_EOF;
2758		kn->kn_fflags = so->so_error;
2759		return (1);
2760	} else if (so->so_error)	/* temporary udp error */
2761		return (1);
2762	else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2763	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
2764		return (0);
2765	else if (kn->kn_sfflags & NOTE_LOWAT)
2766		return (kn->kn_data >= kn->kn_sdata);
2767	else
2768		return (kn->kn_data >= so->so_snd.sb_lowat);
2769}
2770
2771/*ARGSUSED*/
2772static int
2773filt_solisten(struct knote *kn, long hint)
2774{
2775	struct socket *so = kn->kn_fp->f_data;
2776
2777	kn->kn_data = so->so_qlen;
2778	return (! TAILQ_EMPTY(&so->so_comp));
2779}
2780
2781int
2782socheckuid(struct socket *so, uid_t uid)
2783{
2784
2785	if (so == NULL)
2786		return (EPERM);
2787	if (so->so_cred->cr_uid != uid)
2788		return (EPERM);
2789	return (0);
2790}
2791
2792static int
2793sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
2794{
2795	int error;
2796	int val;
2797
2798	val = somaxconn;
2799	error = sysctl_handle_int(oidp, &val, 0, req);
2800	if (error || !req->newptr )
2801		return (error);
2802
2803	if (val < 1 || val > USHRT_MAX)
2804		return (EINVAL);
2805
2806	somaxconn = val;
2807	return (0);
2808}
2809
2810/*
2811 * These functions are used by protocols to notify the socket layer (and its
2812 * consumers) of state changes in the sockets driven by protocol-side events.
2813 */
2814
2815/*
2816 * Procedures to manipulate state flags of socket and do appropriate wakeups.
2817 *
2818 * Normal sequence from the active (originating) side is that
2819 * soisconnecting() is called during processing of connect() call, resulting
2820 * in an eventual call to soisconnected() if/when the connection is
2821 * established.  When the connection is torn down soisdisconnecting() is
2822 * called during processing of disconnect() call, and soisdisconnected() is
2823 * called when the connection to the peer is totally severed.  The semantics
2824 * of these routines are such that connectionless protocols can call
2825 * soisconnected() and soisdisconnected() only, bypassing the in-progress
2826 * calls when setting up a ``connection'' takes no time.
2827 *
2828 * From the passive side, a socket is created with two queues of sockets:
2829 * so_incomp for connections in progress and so_comp for connections already
2830 * made and awaiting user acceptance.  As a protocol is preparing incoming
2831 * connections, it creates a socket structure queued on so_incomp by calling
2832 * sonewconn().  When the connection is established, soisconnected() is
2833 * called, and transfers the socket structure to so_comp, making it available
2834 * to accept().
2835 *
2836 * If a socket is closed with sockets on either so_incomp or so_comp, these
2837 * sockets are dropped.
2838 *
2839 * If higher-level protocols are implemented in the kernel, the wakeups done
2840 * here will sometimes cause software-interrupt process scheduling.
2841 */
2842void
2843soisconnecting(struct socket *so)
2844{
2845
2846	SOCK_LOCK(so);
2847	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
2848	so->so_state |= SS_ISCONNECTING;
2849	SOCK_UNLOCK(so);
2850}
2851
2852void
2853soisconnected(struct socket *so)
2854{
2855	struct socket *head;
2856
2857	ACCEPT_LOCK();
2858	SOCK_LOCK(so);
2859	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
2860	so->so_state |= SS_ISCONNECTED;
2861	head = so->so_head;
2862	if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
2863		if ((so->so_options & SO_ACCEPTFILTER) == 0) {
2864			SOCK_UNLOCK(so);
2865			TAILQ_REMOVE(&head->so_incomp, so, so_list);
2866			head->so_incqlen--;
2867			so->so_qstate &= ~SQ_INCOMP;
2868			TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
2869			head->so_qlen++;
2870			so->so_qstate |= SQ_COMP;
2871			ACCEPT_UNLOCK();
2872			sorwakeup(head);
2873			wakeup_one(&head->so_timeo);
2874		} else {
2875			ACCEPT_UNLOCK();
2876			so->so_upcall =
2877			    head->so_accf->so_accept_filter->accf_callback;
2878			so->so_upcallarg = head->so_accf->so_accept_filter_arg;
2879			so->so_rcv.sb_flags |= SB_UPCALL;
2880			so->so_options &= ~SO_ACCEPTFILTER;
2881			SOCK_UNLOCK(so);
2882			so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
2883		}
2884		return;
2885	}
2886	SOCK_UNLOCK(so);
2887	ACCEPT_UNLOCK();
2888	wakeup(&so->so_timeo);
2889	sorwakeup(so);
2890	sowwakeup(so);
2891}
2892
2893void
2894soisdisconnecting(struct socket *so)
2895{
2896
2897	/*
2898	 * Note: This code assumes that SOCK_LOCK(so) and
2899	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
2900	 */
2901	SOCKBUF_LOCK(&so->so_rcv);
2902	so->so_state &= ~SS_ISCONNECTING;
2903	so->so_state |= SS_ISDISCONNECTING;
2904	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
2905	sorwakeup_locked(so);
2906	SOCKBUF_LOCK(&so->so_snd);
2907	so->so_snd.sb_state |= SBS_CANTSENDMORE;
2908	sowwakeup_locked(so);
2909	wakeup(&so->so_timeo);
2910}
2911
2912void
2913soisdisconnected(struct socket *so)
2914{
2915
2916	/*
2917	 * Note: This code assumes that SOCK_LOCK(so) and
2918	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
2919	 */
2920	SOCKBUF_LOCK(&so->so_rcv);
2921	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
2922	so->so_state |= SS_ISDISCONNECTED;
2923	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
2924	sorwakeup_locked(so);
2925	SOCKBUF_LOCK(&so->so_snd);
2926	so->so_snd.sb_state |= SBS_CANTSENDMORE;
2927	sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
2928	sowwakeup_locked(so);
2929	wakeup(&so->so_timeo);
2930}
2931
2932/*
2933 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
2934 */
2935struct sockaddr *
2936sodupsockaddr(const struct sockaddr *sa, int mflags)
2937{
2938	struct sockaddr *sa2;
2939
2940	sa2 = malloc(sa->sa_len, M_SONAME, mflags);
2941	if (sa2)
2942		bcopy(sa, sa2, sa->sa_len);
2943	return sa2;
2944}
2945
2946/*
2947 * Create an external-format (``xsocket'') structure using the information in
2948 * the kernel-format socket structure pointed to by so.  This is done to
2949 * reduce the spew of irrelevant information over this interface, to isolate
2950 * user code from changes in the kernel structure, and potentially to provide
2951 * information-hiding if we decide that some of this information should be
2952 * hidden from users.
2953 */
2954void
2955sotoxsocket(struct socket *so, struct xsocket *xso)
2956{
2957
2958	xso->xso_len = sizeof *xso;
2959	xso->xso_so = so;
2960	xso->so_type = so->so_type;
2961	xso->so_options = so->so_options;
2962	xso->so_linger = so->so_linger;
2963	xso->so_state = so->so_state;
2964	xso->so_pcb = so->so_pcb;
2965	xso->xso_protocol = so->so_proto->pr_protocol;
2966	xso->xso_family = so->so_proto->pr_domain->dom_family;
2967	xso->so_qlen = so->so_qlen;
2968	xso->so_incqlen = so->so_incqlen;
2969	xso->so_qlimit = so->so_qlimit;
2970	xso->so_timeo = so->so_timeo;
2971	xso->so_error = so->so_error;
2972	xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
2973	xso->so_oobmark = so->so_oobmark;
2974	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2975	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2976	xso->so_uid = so->so_cred->cr_uid;
2977}
2978