1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
33 */
34
35/*
36 * Comments on the socket life cycle:
37 *
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn().  Socket layer private.
40 *
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn().  Socket layer private.
43 *
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation.  This is called
46 * from socreate() and sonewconn().  Socket layer private.
47 *
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called.  If pru_attach() returned an error,
51 * pru_detach() will not be called.  Socket layer private.
52 *
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection.  Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
57 * case.
58 *
59 * socreate() creates a socket and attaches protocol state.  This is a public
60 * interface that may be used by socket layer consumers to create new
61 * sockets.
62 *
63 * sonewconn() creates a socket and attaches protocol state.  This is a
64 * public interface  that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
66 * listen socket.
67 *
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
71 *
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected).  This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required.  Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation.  This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
82 *
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed.  This is a socket layer private
86 * interface.
87 *
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment.  For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
95 *
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
101 */
102
103#include <sys/cdefs.h>
104__FBSDID("$FreeBSD$");
105
106#include "opt_inet.h"
107#include "opt_inet6.h"
108#include "opt_compat.h"
109
110#include <sys/param.h>
111#include <sys/systm.h>
112#include <sys/fcntl.h>
113#include <sys/limits.h>
114#include <sys/lock.h>
115#include <sys/mac.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/domain.h>
120#include <sys/file.h>			/* for struct knote */
121#include <sys/kernel.h>
122#include <sys/event.h>
123#include <sys/eventhandler.h>
124#include <sys/poll.h>
125#include <sys/proc.h>
126#include <sys/protosw.h>
127#include <sys/socket.h>
128#include <sys/socketvar.h>
129#include <sys/resourcevar.h>
130#include <net/route.h>
131#include <sys/signalvar.h>
132#include <sys/stat.h>
133#include <sys/sx.h>
134#include <sys/sysctl.h>
135#include <sys/uio.h>
136#include <sys/jail.h>
137#include <sys/syslog.h>
138#include <netinet/in.h>
139
140#include <net/vnet.h>
141
142#include <security/mac/mac_framework.h>
143
144#include <vm/uma.h>
145
146#ifdef COMPAT_FREEBSD32
147#include <sys/mount.h>
148#include <sys/sysent.h>
149#include <compat/freebsd32/freebsd32.h>
150#endif
151
152static int	soreceive_rcvoob(struct socket *so, struct uio *uio,
153		    int flags);
154
155static void	filt_sordetach(struct knote *kn);
156static int	filt_soread(struct knote *kn, long hint);
157static void	filt_sowdetach(struct knote *kn);
158static int	filt_sowrite(struct knote *kn, long hint);
159static int	filt_solisten(struct knote *kn, long hint);
160
161static struct filterops solisten_filtops = {
162	.f_isfd = 1,
163	.f_detach = filt_sordetach,
164	.f_event = filt_solisten,
165};
166static struct filterops soread_filtops = {
167	.f_isfd = 1,
168	.f_detach = filt_sordetach,
169	.f_event = filt_soread,
170};
171static struct filterops sowrite_filtops = {
172	.f_isfd = 1,
173	.f_detach = filt_sowdetach,
174	.f_event = filt_sowrite,
175};
176
177so_gen_t	so_gencnt;	/* generation count for sockets */
178
179MALLOC_DEFINE(M_SONAME, "soname", "socket name");
180MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
181
182#define	VNET_SO_ASSERT(so)						\
183	VNET_ASSERT(curvnet != NULL,					\
184	    ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
185
186/*
187 * Limit on the number of connections in the listen queue waiting
188 * for accept(2).
189 * NB: The original sysctl somaxconn is still available but hidden
190 * to prevent confusion about the actual purpose of this number.
191 */
192static int somaxconn = SOMAXCONN;
193
194static int
195sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
196{
197	int error;
198	int val;
199
200	val = somaxconn;
201	error = sysctl_handle_int(oidp, &val, 0, req);
202	if (error || !req->newptr )
203		return (error);
204
205	if (val < 1 || val > USHRT_MAX)
206		return (EINVAL);
207
208	somaxconn = val;
209	return (0);
210}
211SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
212    0, sizeof(int), sysctl_somaxconn, "I",
213    "Maximum listen socket pending connection accept queue size");
214SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
215    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
216    0, sizeof(int), sysctl_somaxconn, "I",
217    "Maximum listen socket pending connection accept queue size (compat)");
218
219static int numopensockets;
220SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
221    &numopensockets, 0, "Number of open sockets");
222
223/*
224 * accept_mtx locks down per-socket fields relating to accept queues.  See
225 * socketvar.h for an annotation of the protected fields of struct socket.
226 */
227struct mtx accept_mtx;
228MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
229
230/*
231 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
232 * so_gencnt field.
233 */
234static struct mtx so_global_mtx;
235MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
236
237/*
238 * General IPC sysctl name space, used by sockets and a variety of other IPC
239 * types.
240 */
241SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
242
243/*
244 * Initialize the socket subsystem and set up the socket
245 * memory allocator.
246 */
247static uma_zone_t socket_zone;
248int	maxsockets;
249
250static void
251socket_zone_change(void *tag)
252{
253
254	maxsockets = uma_zone_set_max(socket_zone, maxsockets);
255}
256
257static void
258socket_init(void *tag)
259{
260
261	socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
262	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
263	maxsockets = uma_zone_set_max(socket_zone, maxsockets);
264	uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
265	EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
266	    EVENTHANDLER_PRI_FIRST);
267}
268SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
269
270/*
271 * Initialise maxsockets.  This SYSINIT must be run after
272 * tunable_mbinit().
273 */
274static void
275init_maxsockets(void *ignored)
276{
277
278	TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
279	maxsockets = imax(maxsockets, maxfiles);
280}
281SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
282
283/*
284 * Sysctl to get and set the maximum global sockets limit.  Notify protocols
285 * of the change so that they can update their dependent limits as required.
286 */
287static int
288sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
289{
290	int error, newmaxsockets;
291
292	newmaxsockets = maxsockets;
293	error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
294	if (error == 0 && req->newptr) {
295		if (newmaxsockets > maxsockets &&
296		    newmaxsockets <= maxfiles) {
297			maxsockets = newmaxsockets;
298			EVENTHANDLER_INVOKE(maxsockets_change);
299		} else
300			error = EINVAL;
301	}
302	return (error);
303}
304SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
305    &maxsockets, 0, sysctl_maxsockets, "IU",
306    "Maximum number of sockets avaliable");
307
308/*
309 * Socket operation routines.  These routines are called by the routines in
310 * sys_socket.c or from a system process, and implement the semantics of
311 * socket operations by switching out to the protocol specific routines.
312 */
313
314/*
315 * Get a socket structure from our zone, and initialize it.  Note that it
316 * would probably be better to allocate socket and PCB at the same time, but
317 * I'm not convinced that all the protocols can be easily modified to do
318 * this.
319 *
320 * soalloc() returns a socket with a ref count of 0.
321 */
322static struct socket *
323soalloc(struct vnet *vnet)
324{
325	struct socket *so;
326
327	so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
328	if (so == NULL)
329		return (NULL);
330#ifdef MAC
331	if (mac_socket_init(so, M_NOWAIT) != 0) {
332		uma_zfree(socket_zone, so);
333		return (NULL);
334	}
335#endif
336	SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
337	SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
338	sx_init(&so->so_snd.sb_sx, "so_snd_sx");
339	sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
340	TAILQ_INIT(&so->so_aiojobq);
341	mtx_lock(&so_global_mtx);
342	so->so_gencnt = ++so_gencnt;
343	++numopensockets;
344#ifdef VIMAGE
345	VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
346	    __func__, __LINE__, so));
347	vnet->vnet_sockcnt++;
348	so->so_vnet = vnet;
349#endif
350	mtx_unlock(&so_global_mtx);
351	return (so);
352}
353
354/*
355 * Free the storage associated with a socket at the socket layer, tear down
356 * locks, labels, etc.  All protocol state is assumed already to have been
357 * torn down (and possibly never set up) by the caller.
358 */
359static void
360sodealloc(struct socket *so)
361{
362
363	KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
364	KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
365
366	mtx_lock(&so_global_mtx);
367	so->so_gencnt = ++so_gencnt;
368	--numopensockets;	/* Could be below, but faster here. */
369#ifdef VIMAGE
370	VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
371	    __func__, __LINE__, so));
372	so->so_vnet->vnet_sockcnt--;
373#endif
374	mtx_unlock(&so_global_mtx);
375	if (so->so_rcv.sb_hiwat)
376		(void)chgsbsize(so->so_cred->cr_uidinfo,
377		    &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
378	if (so->so_snd.sb_hiwat)
379		(void)chgsbsize(so->so_cred->cr_uidinfo,
380		    &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
381#ifdef INET
382	/* remove acccept filter if one is present. */
383	if (so->so_accf != NULL)
384		do_setopt_accept_filter(so, NULL);
385#endif
386#ifdef MAC
387	mac_socket_destroy(so);
388#endif
389	crfree(so->so_cred);
390	sx_destroy(&so->so_snd.sb_sx);
391	sx_destroy(&so->so_rcv.sb_sx);
392	SOCKBUF_LOCK_DESTROY(&so->so_snd);
393	SOCKBUF_LOCK_DESTROY(&so->so_rcv);
394	uma_zfree(socket_zone, so);
395}
396
397/*
398 * socreate returns a socket with a ref count of 1.  The socket should be
399 * closed with soclose().
400 */
401int
402socreate(int dom, struct socket **aso, int type, int proto,
403    struct ucred *cred, struct thread *td)
404{
405	struct protosw *prp;
406	struct socket *so;
407	int error;
408
409	if (proto)
410		prp = pffindproto(dom, proto, type);
411	else
412		prp = pffindtype(dom, type);
413
414	if (prp == NULL) {
415		/* No support for domain. */
416		if (pffinddomain(dom) == NULL)
417			return (EAFNOSUPPORT);
418		/* No support for socket type. */
419		if (proto == 0 && type != 0)
420			return (EPROTOTYPE);
421		return (EPROTONOSUPPORT);
422	}
423	if (prp->pr_usrreqs->pru_attach == NULL ||
424	    prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
425		return (EPROTONOSUPPORT);
426
427	if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
428		return (EPROTONOSUPPORT);
429
430	if (prp->pr_type != type)
431		return (EPROTOTYPE);
432	so = soalloc(CRED_TO_VNET(cred));
433	if (so == NULL)
434		return (ENOBUFS);
435
436	TAILQ_INIT(&so->so_incomp);
437	TAILQ_INIT(&so->so_comp);
438	so->so_type = type;
439	so->so_cred = crhold(cred);
440	if ((prp->pr_domain->dom_family == PF_INET) ||
441	    (prp->pr_domain->dom_family == PF_INET6) ||
442	    (prp->pr_domain->dom_family == PF_ROUTE))
443		so->so_fibnum = td->td_proc->p_fibnum;
444	else
445		so->so_fibnum = 0;
446	so->so_proto = prp;
447#ifdef MAC
448	mac_socket_create(cred, so);
449#endif
450	knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
451	knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
452	so->so_count = 1;
453	/*
454	 * Auto-sizing of socket buffers is managed by the protocols and
455	 * the appropriate flags must be set in the pru_attach function.
456	 */
457	CURVNET_SET(so->so_vnet);
458	error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
459	CURVNET_RESTORE();
460	if (error) {
461		KASSERT(so->so_count == 1, ("socreate: so_count %d",
462		    so->so_count));
463		so->so_count = 0;
464		sodealloc(so);
465		return (error);
466	}
467	*aso = so;
468	return (0);
469}
470
471#ifdef REGRESSION
472static int regression_sonewconn_earlytest = 1;
473SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
474    &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
475#endif
476
477/*
478 * When an attempt at a new connection is noted on a socket which accepts
479 * connections, sonewconn is called.  If the connection is possible (subject
480 * to space constraints, etc.) then we allocate a new structure, propoerly
481 * linked into the data structure of the original socket, and return this.
482 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
483 *
484 * Note: the ref count on the socket is 0 on return.
485 */
486struct socket *
487sonewconn(struct socket *head, int connstatus)
488{
489	static struct timeval lastover;
490	static struct timeval overinterval = { 60, 0 };
491	static int overcount;
492
493	struct socket *so;
494	int over;
495
496	ACCEPT_LOCK();
497	over = (head->so_qlen > 3 * head->so_qlimit / 2);
498	ACCEPT_UNLOCK();
499#ifdef REGRESSION
500	if (regression_sonewconn_earlytest && over) {
501#else
502	if (over) {
503#endif
504		overcount++;
505
506		if (ratecheck(&lastover, &overinterval)) {
507			log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
508			    "%i already in queue awaiting acceptance "
509			    "(%d occurrences)\n",
510			    __func__, head->so_pcb, head->so_qlen, overcount);
511
512			overcount = 0;
513		}
514
515		return (NULL);
516	}
517	VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
518	    __func__, __LINE__, head));
519	so = soalloc(head->so_vnet);
520	if (so == NULL) {
521		log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
522		    "limit reached or out of memory\n",
523		    __func__, head->so_pcb);
524		return (NULL);
525	}
526	if ((head->so_options & SO_ACCEPTFILTER) != 0)
527		connstatus = 0;
528	so->so_head = head;
529	so->so_type = head->so_type;
530	so->so_options = head->so_options &~ SO_ACCEPTCONN;
531	so->so_linger = head->so_linger;
532	so->so_state = head->so_state | SS_NOFDREF;
533	so->so_fibnum = head->so_fibnum;
534	so->so_proto = head->so_proto;
535	so->so_cred = crhold(head->so_cred);
536#ifdef MAC
537	mac_socket_newconn(head, so);
538#endif
539	knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
540	knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
541	VNET_SO_ASSERT(head);
542	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
543		sodealloc(so);
544		log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
545		    __func__, head->so_pcb);
546		return (NULL);
547	}
548	if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
549		sodealloc(so);
550		log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
551		    __func__, head->so_pcb);
552		return (NULL);
553	}
554	so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
555	so->so_snd.sb_lowat = head->so_snd.sb_lowat;
556	so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
557	so->so_snd.sb_timeo = head->so_snd.sb_timeo;
558	so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
559	so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
560	so->so_state |= connstatus;
561	ACCEPT_LOCK();
562	/*
563	 * The accept socket may be tearing down but we just
564	 * won a race on the ACCEPT_LOCK.
565	 * However, if sctp_peeloff() is called on a 1-to-many
566	 * style socket, the SO_ACCEPTCONN doesn't need to be set.
567	 */
568	if (!(head->so_options & SO_ACCEPTCONN) &&
569	    ((head->so_proto->pr_protocol != IPPROTO_SCTP) ||
570	     (head->so_type != SOCK_SEQPACKET))) {
571		SOCK_LOCK(so);
572		so->so_head = NULL;
573		sofree(so);		/* NB: returns ACCEPT_UNLOCK'ed. */
574		return (NULL);
575	}
576	if (connstatus) {
577		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
578		so->so_qstate |= SQ_COMP;
579		head->so_qlen++;
580	} else {
581		/*
582		 * Keep removing sockets from the head until there's room for
583		 * us to insert on the tail.  In pre-locking revisions, this
584		 * was a simple if(), but as we could be racing with other
585		 * threads and soabort() requires dropping locks, we must
586		 * loop waiting for the condition to be true.
587		 */
588		while (head->so_incqlen > head->so_qlimit) {
589			struct socket *sp;
590			sp = TAILQ_FIRST(&head->so_incomp);
591			TAILQ_REMOVE(&head->so_incomp, sp, so_list);
592			head->so_incqlen--;
593			sp->so_qstate &= ~SQ_INCOMP;
594			sp->so_head = NULL;
595			ACCEPT_UNLOCK();
596			soabort(sp);
597			ACCEPT_LOCK();
598		}
599		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
600		so->so_qstate |= SQ_INCOMP;
601		head->so_incqlen++;
602	}
603	ACCEPT_UNLOCK();
604	if (connstatus) {
605		sorwakeup(head);
606		wakeup_one(&head->so_timeo);
607	}
608	return (so);
609}
610
611int
612sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
613{
614	int error;
615
616	CURVNET_SET(so->so_vnet);
617	error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
618	CURVNET_RESTORE();
619	return (error);
620}
621
622int
623sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
624{
625	int error;
626
627	CURVNET_SET(so->so_vnet);
628	error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
629	CURVNET_RESTORE();
630	return (error);
631}
632
633/*
634 * solisten() transitions a socket from a non-listening state to a listening
635 * state, but can also be used to update the listen queue depth on an
636 * existing listen socket.  The protocol will call back into the sockets
637 * layer using solisten_proto_check() and solisten_proto() to check and set
638 * socket-layer listen state.  Call backs are used so that the protocol can
639 * acquire both protocol and socket layer locks in whatever order is required
640 * by the protocol.
641 *
642 * Protocol implementors are advised to hold the socket lock across the
643 * socket-layer test and set to avoid races at the socket layer.
644 */
645int
646solisten(struct socket *so, int backlog, struct thread *td)
647{
648	int error;
649
650	CURVNET_SET(so->so_vnet);
651	error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
652	CURVNET_RESTORE();
653	return (error);
654}
655
656int
657solisten_proto_check(struct socket *so)
658{
659
660	SOCK_LOCK_ASSERT(so);
661
662	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
663	    SS_ISDISCONNECTING))
664		return (EINVAL);
665	return (0);
666}
667
668void
669solisten_proto(struct socket *so, int backlog)
670{
671
672	SOCK_LOCK_ASSERT(so);
673
674	if (backlog < 0 || backlog > somaxconn)
675		backlog = somaxconn;
676	so->so_qlimit = backlog;
677	so->so_options |= SO_ACCEPTCONN;
678}
679
680/*
681 * Evaluate the reference count and named references on a socket; if no
682 * references remain, free it.  This should be called whenever a reference is
683 * released, such as in sorele(), but also when named reference flags are
684 * cleared in socket or protocol code.
685 *
686 * sofree() will free the socket if:
687 *
688 * - There are no outstanding file descriptor references or related consumers
689 *   (so_count == 0).
690 *
691 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
692 *
693 * - The protocol does not have an outstanding strong reference on the socket
694 *   (SS_PROTOREF).
695 *
696 * - The socket is not in a completed connection queue, so a process has been
697 *   notified that it is present.  If it is removed, the user process may
698 *   block in accept() despite select() saying the socket was ready.
699 */
700void
701sofree(struct socket *so)
702{
703	struct protosw *pr = so->so_proto;
704	struct socket *head;
705
706	ACCEPT_LOCK_ASSERT();
707	SOCK_LOCK_ASSERT(so);
708
709	if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
710	    (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
711		SOCK_UNLOCK(so);
712		ACCEPT_UNLOCK();
713		return;
714	}
715
716	head = so->so_head;
717	if (head != NULL) {
718		KASSERT((so->so_qstate & SQ_COMP) != 0 ||
719		    (so->so_qstate & SQ_INCOMP) != 0,
720		    ("sofree: so_head != NULL, but neither SQ_COMP nor "
721		    "SQ_INCOMP"));
722		KASSERT((so->so_qstate & SQ_COMP) == 0 ||
723		    (so->so_qstate & SQ_INCOMP) == 0,
724		    ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
725		TAILQ_REMOVE(&head->so_incomp, so, so_list);
726		head->so_incqlen--;
727		so->so_qstate &= ~SQ_INCOMP;
728		so->so_head = NULL;
729	}
730	KASSERT((so->so_qstate & SQ_COMP) == 0 &&
731	    (so->so_qstate & SQ_INCOMP) == 0,
732	    ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
733	    so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
734	if (so->so_options & SO_ACCEPTCONN) {
735		KASSERT((TAILQ_EMPTY(&so->so_comp)),
736		    ("sofree: so_comp populated"));
737		KASSERT((TAILQ_EMPTY(&so->so_incomp)),
738		    ("sofree: so_incomp populated"));
739	}
740	SOCK_UNLOCK(so);
741	ACCEPT_UNLOCK();
742
743	VNET_SO_ASSERT(so);
744	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) {
745		if (pr->pr_domain->dom_family == AF_LOCAL)
746			unp_dispose_so(so);
747		else
748			(*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
749	}
750	if (pr->pr_usrreqs->pru_detach != NULL)
751		(*pr->pr_usrreqs->pru_detach)(so);
752
753	/*
754	 * From this point on, we assume that no other references to this
755	 * socket exist anywhere else in the stack.  Therefore, no locks need
756	 * to be acquired or held.
757	 *
758	 * We used to do a lot of socket buffer and socket locking here, as
759	 * well as invoke sorflush() and perform wakeups.  The direct call to
760	 * dom_dispose() and sbrelease_internal() are an inlining of what was
761	 * necessary from sorflush().
762	 *
763	 * Notice that the socket buffer and kqueue state are torn down
764	 * before calling pru_detach.  This means that protocols shold not
765	 * assume they can perform socket wakeups, etc, in their detach code.
766	 */
767	sbdestroy(&so->so_snd, so);
768	sbdestroy(&so->so_rcv, so);
769	seldrain(&so->so_snd.sb_sel);
770	seldrain(&so->so_rcv.sb_sel);
771	knlist_destroy(&so->so_rcv.sb_sel.si_note);
772	knlist_destroy(&so->so_snd.sb_sel.si_note);
773	sodealloc(so);
774}
775
776/*
777 * Close a socket on last file table reference removal.  Initiate disconnect
778 * if connected.  Free socket when disconnect complete.
779 *
780 * This function will sorele() the socket.  Note that soclose() may be called
781 * prior to the ref count reaching zero.  The actual socket structure will
782 * not be freed until the ref count reaches zero.
783 */
784int
785soclose(struct socket *so)
786{
787	int error = 0;
788
789	KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
790
791	CURVNET_SET(so->so_vnet);
792	funsetown(&so->so_sigio);
793	if (so->so_state & SS_ISCONNECTED) {
794		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
795			error = sodisconnect(so);
796			if (error) {
797				if (error == ENOTCONN)
798					error = 0;
799				goto drop;
800			}
801		}
802		if (so->so_options & SO_LINGER) {
803			if ((so->so_state & SS_ISDISCONNECTING) &&
804			    (so->so_state & SS_NBIO))
805				goto drop;
806			while (so->so_state & SS_ISCONNECTED) {
807				error = tsleep(&so->so_timeo,
808				    PSOCK | PCATCH, "soclos",
809				    so->so_linger * hz);
810				if (error)
811					break;
812			}
813		}
814	}
815
816drop:
817	if (so->so_proto->pr_usrreqs->pru_close != NULL)
818		(*so->so_proto->pr_usrreqs->pru_close)(so);
819	ACCEPT_LOCK();
820	if (so->so_options & SO_ACCEPTCONN) {
821		struct socket *sp;
822		/*
823		 * Prevent new additions to the accept queues due
824		 * to ACCEPT_LOCK races while we are draining them.
825		 */
826		so->so_options &= ~SO_ACCEPTCONN;
827		while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
828			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
829			so->so_incqlen--;
830			sp->so_qstate &= ~SQ_INCOMP;
831			sp->so_head = NULL;
832			ACCEPT_UNLOCK();
833			soabort(sp);
834			ACCEPT_LOCK();
835		}
836		while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
837			TAILQ_REMOVE(&so->so_comp, sp, so_list);
838			so->so_qlen--;
839			sp->so_qstate &= ~SQ_COMP;
840			sp->so_head = NULL;
841			ACCEPT_UNLOCK();
842			soabort(sp);
843			ACCEPT_LOCK();
844		}
845		KASSERT((TAILQ_EMPTY(&so->so_comp)),
846		    ("%s: so_comp populated", __func__));
847		KASSERT((TAILQ_EMPTY(&so->so_incomp)),
848		    ("%s: so_incomp populated", __func__));
849	}
850	SOCK_LOCK(so);
851	KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
852	so->so_state |= SS_NOFDREF;
853	sorele(so);			/* NB: Returns with ACCEPT_UNLOCK(). */
854	CURVNET_RESTORE();
855	return (error);
856}
857
858/*
859 * soabort() is used to abruptly tear down a connection, such as when a
860 * resource limit is reached (listen queue depth exceeded), or if a listen
861 * socket is closed while there are sockets waiting to be accepted.
862 *
863 * This interface is tricky, because it is called on an unreferenced socket,
864 * and must be called only by a thread that has actually removed the socket
865 * from the listen queue it was on, or races with other threads are risked.
866 *
867 * This interface will call into the protocol code, so must not be called
868 * with any socket locks held.  Protocols do call it while holding their own
869 * recursible protocol mutexes, but this is something that should be subject
870 * to review in the future.
871 */
872void
873soabort(struct socket *so)
874{
875
876	/*
877	 * In as much as is possible, assert that no references to this
878	 * socket are held.  This is not quite the same as asserting that the
879	 * current thread is responsible for arranging for no references, but
880	 * is as close as we can get for now.
881	 */
882	KASSERT(so->so_count == 0, ("soabort: so_count"));
883	KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
884	KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
885	KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
886	KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
887	VNET_SO_ASSERT(so);
888
889	if (so->so_proto->pr_usrreqs->pru_abort != NULL)
890		(*so->so_proto->pr_usrreqs->pru_abort)(so);
891	ACCEPT_LOCK();
892	SOCK_LOCK(so);
893	sofree(so);
894}
895
896int
897soaccept(struct socket *so, struct sockaddr **nam)
898{
899	int error;
900
901	SOCK_LOCK(so);
902	KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
903	so->so_state &= ~SS_NOFDREF;
904	SOCK_UNLOCK(so);
905
906	CURVNET_SET(so->so_vnet);
907	error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
908	CURVNET_RESTORE();
909	return (error);
910}
911
912int
913soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
914{
915
916	return (soconnectat(AT_FDCWD, so, nam, td));
917}
918
919int
920soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
921{
922	int error;
923
924	if (so->so_options & SO_ACCEPTCONN)
925		return (EOPNOTSUPP);
926
927	CURVNET_SET(so->so_vnet);
928	/*
929	 * If protocol is connection-based, can only connect once.
930	 * Otherwise, if connected, try to disconnect first.  This allows
931	 * user to disconnect by connecting to, e.g., a null address.
932	 */
933	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
934	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
935	    (error = sodisconnect(so)))) {
936		error = EISCONN;
937	} else {
938		/*
939		 * Prevent accumulated error from previous connection from
940		 * biting us.
941		 */
942		so->so_error = 0;
943		if (fd == AT_FDCWD) {
944			error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
945			    nam, td);
946		} else {
947			error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
948			    so, nam, td);
949		}
950	}
951	CURVNET_RESTORE();
952
953	return (error);
954}
955
956int
957soconnect2(struct socket *so1, struct socket *so2)
958{
959	int error;
960
961	CURVNET_SET(so1->so_vnet);
962	error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
963	CURVNET_RESTORE();
964	return (error);
965}
966
967int
968sodisconnect(struct socket *so)
969{
970	int error;
971
972	if ((so->so_state & SS_ISCONNECTED) == 0)
973		return (ENOTCONN);
974	if (so->so_state & SS_ISDISCONNECTING)
975		return (EALREADY);
976	VNET_SO_ASSERT(so);
977	error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
978	return (error);
979}
980
981#define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
982
983int
984sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
985    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
986{
987	long space;
988	ssize_t resid;
989	int clen = 0, error, dontroute;
990
991	KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
992	KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
993	    ("sosend_dgram: !PR_ATOMIC"));
994
995	if (uio != NULL)
996		resid = uio->uio_resid;
997	else
998		resid = top->m_pkthdr.len;
999	/*
1000	 * In theory resid should be unsigned.  However, space must be
1001	 * signed, as it might be less than 0 if we over-committed, and we
1002	 * must use a signed comparison of space and resid.  On the other
1003	 * hand, a negative resid causes us to loop sending 0-length
1004	 * segments to the protocol.
1005	 */
1006	if (resid < 0) {
1007		error = EINVAL;
1008		goto out;
1009	}
1010
1011	dontroute =
1012	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1013	if (td != NULL)
1014		td->td_ru.ru_msgsnd++;
1015	if (control != NULL)
1016		clen = control->m_len;
1017
1018	SOCKBUF_LOCK(&so->so_snd);
1019	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1020		SOCKBUF_UNLOCK(&so->so_snd);
1021		error = EPIPE;
1022		goto out;
1023	}
1024	if (so->so_error) {
1025		error = so->so_error;
1026		so->so_error = 0;
1027		SOCKBUF_UNLOCK(&so->so_snd);
1028		goto out;
1029	}
1030	if ((so->so_state & SS_ISCONNECTED) == 0) {
1031		/*
1032		 * `sendto' and `sendmsg' is allowed on a connection-based
1033		 * socket if it supports implied connect.  Return ENOTCONN if
1034		 * not connected and no address is supplied.
1035		 */
1036		if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1037		    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1038			if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1039			    !(resid == 0 && clen != 0)) {
1040				SOCKBUF_UNLOCK(&so->so_snd);
1041				error = ENOTCONN;
1042				goto out;
1043			}
1044		} else if (addr == NULL) {
1045			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1046				error = ENOTCONN;
1047			else
1048				error = EDESTADDRREQ;
1049			SOCKBUF_UNLOCK(&so->so_snd);
1050			goto out;
1051		}
1052	}
1053
1054	/*
1055	 * Do we need MSG_OOB support in SOCK_DGRAM?  Signs here may be a
1056	 * problem and need fixing.
1057	 */
1058	space = sbspace(&so->so_snd);
1059	if (flags & MSG_OOB)
1060		space += 1024;
1061	space -= clen;
1062	SOCKBUF_UNLOCK(&so->so_snd);
1063	if (resid > space) {
1064		error = EMSGSIZE;
1065		goto out;
1066	}
1067	if (uio == NULL) {
1068		resid = 0;
1069		if (flags & MSG_EOR)
1070			top->m_flags |= M_EOR;
1071	} else {
1072		/*
1073		 * Copy the data from userland into a mbuf chain.
1074		 * If no data is to be copied in, a single empty mbuf
1075		 * is returned.
1076		 */
1077		top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1078		    (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1079		if (top == NULL) {
1080			error = EFAULT;	/* only possible error */
1081			goto out;
1082		}
1083		space -= resid - uio->uio_resid;
1084		resid = uio->uio_resid;
1085	}
1086	KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1087	/*
1088	 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1089	 * than with.
1090	 */
1091	if (dontroute) {
1092		SOCK_LOCK(so);
1093		so->so_options |= SO_DONTROUTE;
1094		SOCK_UNLOCK(so);
1095	}
1096	/*
1097	 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1098	 * of date.  We could have received a reset packet in an interrupt or
1099	 * maybe we slept while doing page faults in uiomove() etc.  We could
1100	 * probably recheck again inside the locking protection here, but
1101	 * there are probably other places that this also happens.  We must
1102	 * rethink this.
1103	 */
1104	VNET_SO_ASSERT(so);
1105	error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1106	    (flags & MSG_OOB) ? PRUS_OOB :
1107	/*
1108	 * If the user set MSG_EOF, the protocol understands this flag and
1109	 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1110	 */
1111	    ((flags & MSG_EOF) &&
1112	     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1113	     (resid <= 0)) ?
1114		PRUS_EOF :
1115		/* If there is more to send set PRUS_MORETOCOME */
1116		(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1117		top, addr, control, td);
1118	if (dontroute) {
1119		SOCK_LOCK(so);
1120		so->so_options &= ~SO_DONTROUTE;
1121		SOCK_UNLOCK(so);
1122	}
1123	clen = 0;
1124	control = NULL;
1125	top = NULL;
1126out:
1127	if (top != NULL)
1128		m_freem(top);
1129	if (control != NULL)
1130		m_freem(control);
1131	return (error);
1132}
1133
1134/*
1135 * Send on a socket.  If send must go all at once and message is larger than
1136 * send buffering, then hard error.  Lock against other senders.  If must go
1137 * all at once and not enough room now, then inform user that this would
1138 * block and do nothing.  Otherwise, if nonblocking, send as much as
1139 * possible.  The data to be sent is described by "uio" if nonzero, otherwise
1140 * by the mbuf chain "top" (which must be null if uio is not).  Data provided
1141 * in mbuf chain must be small enough to send all at once.
1142 *
1143 * Returns nonzero on error, timeout or signal; callers must check for short
1144 * counts if EINTR/ERESTART are returned.  Data and control buffers are freed
1145 * on return.
1146 */
1147int
1148sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1149    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1150{
1151	long space;
1152	ssize_t resid;
1153	int clen = 0, error, dontroute;
1154	int atomic = sosendallatonce(so) || top;
1155
1156	if (uio != NULL)
1157		resid = uio->uio_resid;
1158	else
1159		resid = top->m_pkthdr.len;
1160	/*
1161	 * In theory resid should be unsigned.  However, space must be
1162	 * signed, as it might be less than 0 if we over-committed, and we
1163	 * must use a signed comparison of space and resid.  On the other
1164	 * hand, a negative resid causes us to loop sending 0-length
1165	 * segments to the protocol.
1166	 *
1167	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1168	 * type sockets since that's an error.
1169	 */
1170	if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1171		error = EINVAL;
1172		goto out;
1173	}
1174
1175	dontroute =
1176	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1177	    (so->so_proto->pr_flags & PR_ATOMIC);
1178	if (td != NULL)
1179		td->td_ru.ru_msgsnd++;
1180	if (control != NULL)
1181		clen = control->m_len;
1182
1183	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1184	if (error)
1185		goto out;
1186
1187restart:
1188	do {
1189		SOCKBUF_LOCK(&so->so_snd);
1190		if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1191			SOCKBUF_UNLOCK(&so->so_snd);
1192			error = EPIPE;
1193			goto release;
1194		}
1195		if (so->so_error) {
1196			error = so->so_error;
1197			so->so_error = 0;
1198			SOCKBUF_UNLOCK(&so->so_snd);
1199			goto release;
1200		}
1201		if ((so->so_state & SS_ISCONNECTED) == 0) {
1202			/*
1203			 * `sendto' and `sendmsg' is allowed on a connection-
1204			 * based socket if it supports implied connect.
1205			 * Return ENOTCONN if not connected and no address is
1206			 * supplied.
1207			 */
1208			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1209			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1210				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1211				    !(resid == 0 && clen != 0)) {
1212					SOCKBUF_UNLOCK(&so->so_snd);
1213					error = ENOTCONN;
1214					goto release;
1215				}
1216			} else if (addr == NULL) {
1217				SOCKBUF_UNLOCK(&so->so_snd);
1218				if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1219					error = ENOTCONN;
1220				else
1221					error = EDESTADDRREQ;
1222				goto release;
1223			}
1224		}
1225		space = sbspace(&so->so_snd);
1226		if (flags & MSG_OOB)
1227			space += 1024;
1228		if ((atomic && resid > so->so_snd.sb_hiwat) ||
1229		    clen > so->so_snd.sb_hiwat) {
1230			SOCKBUF_UNLOCK(&so->so_snd);
1231			error = EMSGSIZE;
1232			goto release;
1233		}
1234		if (space < resid + clen &&
1235		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1236			if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1237				SOCKBUF_UNLOCK(&so->so_snd);
1238				error = EWOULDBLOCK;
1239				goto release;
1240			}
1241			error = sbwait(&so->so_snd);
1242			SOCKBUF_UNLOCK(&so->so_snd);
1243			if (error)
1244				goto release;
1245			goto restart;
1246		}
1247		SOCKBUF_UNLOCK(&so->so_snd);
1248		space -= clen;
1249		do {
1250			if (uio == NULL) {
1251				resid = 0;
1252				if (flags & MSG_EOR)
1253					top->m_flags |= M_EOR;
1254			} else {
1255				/*
1256				 * Copy the data from userland into a mbuf
1257				 * chain.  If no data is to be copied in,
1258				 * a single empty mbuf is returned.
1259				 */
1260				top = m_uiotombuf(uio, M_WAITOK, space,
1261				    (atomic ? max_hdr : 0),
1262				    (atomic ? M_PKTHDR : 0) |
1263				    ((flags & MSG_EOR) ? M_EOR : 0));
1264				if (top == NULL) {
1265					error = EFAULT; /* only possible error */
1266					goto release;
1267				}
1268				space -= resid - uio->uio_resid;
1269				resid = uio->uio_resid;
1270			}
1271			if (dontroute) {
1272				SOCK_LOCK(so);
1273				so->so_options |= SO_DONTROUTE;
1274				SOCK_UNLOCK(so);
1275			}
1276			/*
1277			 * XXX all the SBS_CANTSENDMORE checks previously
1278			 * done could be out of date.  We could have received
1279			 * a reset packet in an interrupt or maybe we slept
1280			 * while doing page faults in uiomove() etc.  We
1281			 * could probably recheck again inside the locking
1282			 * protection here, but there are probably other
1283			 * places that this also happens.  We must rethink
1284			 * this.
1285			 */
1286			VNET_SO_ASSERT(so);
1287			error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1288			    (flags & MSG_OOB) ? PRUS_OOB :
1289			/*
1290			 * If the user set MSG_EOF, the protocol understands
1291			 * this flag and nothing left to send then use
1292			 * PRU_SEND_EOF instead of PRU_SEND.
1293			 */
1294			    ((flags & MSG_EOF) &&
1295			     (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1296			     (resid <= 0)) ?
1297				PRUS_EOF :
1298			/* If there is more to send set PRUS_MORETOCOME. */
1299			    (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1300			    top, addr, control, td);
1301			if (dontroute) {
1302				SOCK_LOCK(so);
1303				so->so_options &= ~SO_DONTROUTE;
1304				SOCK_UNLOCK(so);
1305			}
1306			clen = 0;
1307			control = NULL;
1308			top = NULL;
1309			if (error)
1310				goto release;
1311		} while (resid && space > 0);
1312	} while (resid);
1313
1314release:
1315	sbunlock(&so->so_snd);
1316out:
1317	if (top != NULL)
1318		m_freem(top);
1319	if (control != NULL)
1320		m_freem(control);
1321	return (error);
1322}
1323
1324int
1325sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1326    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1327{
1328	int error;
1329
1330	CURVNET_SET(so->so_vnet);
1331	error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1332	    control, flags, td);
1333	CURVNET_RESTORE();
1334	return (error);
1335}
1336
1337/*
1338 * The part of soreceive() that implements reading non-inline out-of-band
1339 * data from a socket.  For more complete comments, see soreceive(), from
1340 * which this code originated.
1341 *
1342 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1343 * unable to return an mbuf chain to the caller.
1344 */
1345static int
1346soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1347{
1348	struct protosw *pr = so->so_proto;
1349	struct mbuf *m;
1350	int error;
1351
1352	KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1353	VNET_SO_ASSERT(so);
1354
1355	m = m_get(M_WAITOK, MT_DATA);
1356	error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1357	if (error)
1358		goto bad;
1359	do {
1360		error = uiomove(mtod(m, void *),
1361		    (int) min(uio->uio_resid, m->m_len), uio);
1362		m = m_free(m);
1363	} while (uio->uio_resid && error == 0 && m);
1364bad:
1365	if (m != NULL)
1366		m_freem(m);
1367	return (error);
1368}
1369
1370/*
1371 * Following replacement or removal of the first mbuf on the first mbuf chain
1372 * of a socket buffer, push necessary state changes back into the socket
1373 * buffer so that other consumers see the values consistently.  'nextrecord'
1374 * is the callers locally stored value of the original value of
1375 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1376 * NOTE: 'nextrecord' may be NULL.
1377 */
1378static __inline void
1379sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1380{
1381
1382	SOCKBUF_LOCK_ASSERT(sb);
1383	/*
1384	 * First, update for the new value of nextrecord.  If necessary, make
1385	 * it the first record.
1386	 */
1387	if (sb->sb_mb != NULL)
1388		sb->sb_mb->m_nextpkt = nextrecord;
1389	else
1390		sb->sb_mb = nextrecord;
1391
1392	/*
1393	 * Now update any dependent socket buffer fields to reflect the new
1394	 * state.  This is an expanded inline of SB_EMPTY_FIXUP(), with the
1395	 * addition of a second clause that takes care of the case where
1396	 * sb_mb has been updated, but remains the last record.
1397	 */
1398	if (sb->sb_mb == NULL) {
1399		sb->sb_mbtail = NULL;
1400		sb->sb_lastrecord = NULL;
1401	} else if (sb->sb_mb->m_nextpkt == NULL)
1402		sb->sb_lastrecord = sb->sb_mb;
1403}
1404
1405/*
1406 * Implement receive operations on a socket.  We depend on the way that
1407 * records are added to the sockbuf by sbappend.  In particular, each record
1408 * (mbufs linked through m_next) must begin with an address if the protocol
1409 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1410 * data, and then zero or more mbufs of data.  In order to allow parallelism
1411 * between network receive and copying to user space, as well as avoid
1412 * sleeping with a mutex held, we release the socket buffer mutex during the
1413 * user space copy.  Although the sockbuf is locked, new data may still be
1414 * appended, and thus we must maintain consistency of the sockbuf during that
1415 * time.
1416 *
1417 * The caller may receive the data as a single mbuf chain by supplying an
1418 * mbuf **mp0 for use in returning the chain.  The uio is then used only for
1419 * the count in uio_resid.
1420 */
1421int
1422soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1423    struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1424{
1425	struct mbuf *m, **mp;
1426	int flags, error, offset;
1427	ssize_t len;
1428	struct protosw *pr = so->so_proto;
1429	struct mbuf *nextrecord;
1430	int moff, type = 0;
1431	ssize_t orig_resid = uio->uio_resid;
1432
1433	mp = mp0;
1434	if (psa != NULL)
1435		*psa = NULL;
1436	if (controlp != NULL)
1437		*controlp = NULL;
1438	if (flagsp != NULL)
1439		flags = *flagsp &~ MSG_EOR;
1440	else
1441		flags = 0;
1442	if (flags & MSG_OOB)
1443		return (soreceive_rcvoob(so, uio, flags));
1444	if (mp != NULL)
1445		*mp = NULL;
1446	if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1447	    && uio->uio_resid) {
1448		VNET_SO_ASSERT(so);
1449		(*pr->pr_usrreqs->pru_rcvd)(so, 0);
1450	}
1451
1452	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1453	if (error)
1454		return (error);
1455
1456restart:
1457	SOCKBUF_LOCK(&so->so_rcv);
1458	m = so->so_rcv.sb_mb;
1459	/*
1460	 * If we have less data than requested, block awaiting more (subject
1461	 * to any timeout) if:
1462	 *   1. the current count is less than the low water mark, or
1463	 *   2. MSG_DONTWAIT is not set
1464	 */
1465	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1466	    so->so_rcv.sb_cc < uio->uio_resid) &&
1467	    so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
1468	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1469		KASSERT(m != NULL || !so->so_rcv.sb_cc,
1470		    ("receive: m == %p so->so_rcv.sb_cc == %u",
1471		    m, so->so_rcv.sb_cc));
1472		if (so->so_error) {
1473			if (m != NULL)
1474				goto dontblock;
1475			error = so->so_error;
1476			if ((flags & MSG_PEEK) == 0)
1477				so->so_error = 0;
1478			SOCKBUF_UNLOCK(&so->so_rcv);
1479			goto release;
1480		}
1481		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1482		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1483			if (m == NULL) {
1484				SOCKBUF_UNLOCK(&so->so_rcv);
1485				goto release;
1486			} else
1487				goto dontblock;
1488		}
1489		for (; m != NULL; m = m->m_next)
1490			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
1491				m = so->so_rcv.sb_mb;
1492				goto dontblock;
1493			}
1494		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1495		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1496			SOCKBUF_UNLOCK(&so->so_rcv);
1497			error = ENOTCONN;
1498			goto release;
1499		}
1500		if (uio->uio_resid == 0) {
1501			SOCKBUF_UNLOCK(&so->so_rcv);
1502			goto release;
1503		}
1504		if ((so->so_state & SS_NBIO) ||
1505		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1506			SOCKBUF_UNLOCK(&so->so_rcv);
1507			error = EWOULDBLOCK;
1508			goto release;
1509		}
1510		SBLASTRECORDCHK(&so->so_rcv);
1511		SBLASTMBUFCHK(&so->so_rcv);
1512		error = sbwait(&so->so_rcv);
1513		SOCKBUF_UNLOCK(&so->so_rcv);
1514		if (error)
1515			goto release;
1516		goto restart;
1517	}
1518dontblock:
1519	/*
1520	 * From this point onward, we maintain 'nextrecord' as a cache of the
1521	 * pointer to the next record in the socket buffer.  We must keep the
1522	 * various socket buffer pointers and local stack versions of the
1523	 * pointers in sync, pushing out modifications before dropping the
1524	 * socket buffer mutex, and re-reading them when picking it up.
1525	 *
1526	 * Otherwise, we will race with the network stack appending new data
1527	 * or records onto the socket buffer by using inconsistent/stale
1528	 * versions of the field, possibly resulting in socket buffer
1529	 * corruption.
1530	 *
1531	 * By holding the high-level sblock(), we prevent simultaneous
1532	 * readers from pulling off the front of the socket buffer.
1533	 */
1534	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1535	if (uio->uio_td)
1536		uio->uio_td->td_ru.ru_msgrcv++;
1537	KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1538	SBLASTRECORDCHK(&so->so_rcv);
1539	SBLASTMBUFCHK(&so->so_rcv);
1540	nextrecord = m->m_nextpkt;
1541	if (pr->pr_flags & PR_ADDR) {
1542		KASSERT(m->m_type == MT_SONAME,
1543		    ("m->m_type == %d", m->m_type));
1544		orig_resid = 0;
1545		if (psa != NULL)
1546			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
1547			    M_NOWAIT);
1548		if (flags & MSG_PEEK) {
1549			m = m->m_next;
1550		} else {
1551			sbfree(&so->so_rcv, m);
1552			so->so_rcv.sb_mb = m_free(m);
1553			m = so->so_rcv.sb_mb;
1554			sockbuf_pushsync(&so->so_rcv, nextrecord);
1555		}
1556	}
1557
1558	/*
1559	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1560	 * in the first mbuf chain on the socket buffer.  If MSG_PEEK, we
1561	 * just copy the data; if !MSG_PEEK, we call into the protocol to
1562	 * perform externalization (or freeing if controlp == NULL).
1563	 */
1564	if (m != NULL && m->m_type == MT_CONTROL) {
1565		struct mbuf *cm = NULL, *cmn;
1566		struct mbuf **cme = &cm;
1567
1568		do {
1569			if (flags & MSG_PEEK) {
1570				if (controlp != NULL) {
1571					*controlp = m_copy(m, 0, m->m_len);
1572					controlp = &(*controlp)->m_next;
1573				}
1574				m = m->m_next;
1575			} else {
1576				sbfree(&so->so_rcv, m);
1577				so->so_rcv.sb_mb = m->m_next;
1578				m->m_next = NULL;
1579				*cme = m;
1580				cme = &(*cme)->m_next;
1581				m = so->so_rcv.sb_mb;
1582			}
1583		} while (m != NULL && m->m_type == MT_CONTROL);
1584		if ((flags & MSG_PEEK) == 0)
1585			sockbuf_pushsync(&so->so_rcv, nextrecord);
1586		while (cm != NULL) {
1587			cmn = cm->m_next;
1588			cm->m_next = NULL;
1589			if (pr->pr_domain->dom_externalize != NULL) {
1590				SOCKBUF_UNLOCK(&so->so_rcv);
1591				VNET_SO_ASSERT(so);
1592				error = (*pr->pr_domain->dom_externalize)
1593				    (cm, controlp, flags);
1594				SOCKBUF_LOCK(&so->so_rcv);
1595			} else if (controlp != NULL)
1596				*controlp = cm;
1597			else
1598				m_freem(cm);
1599			if (controlp != NULL) {
1600				orig_resid = 0;
1601				while (*controlp != NULL)
1602					controlp = &(*controlp)->m_next;
1603			}
1604			cm = cmn;
1605		}
1606		if (m != NULL)
1607			nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1608		else
1609			nextrecord = so->so_rcv.sb_mb;
1610		orig_resid = 0;
1611	}
1612	if (m != NULL) {
1613		if ((flags & MSG_PEEK) == 0) {
1614			KASSERT(m->m_nextpkt == nextrecord,
1615			    ("soreceive: post-control, nextrecord !sync"));
1616			if (nextrecord == NULL) {
1617				KASSERT(so->so_rcv.sb_mb == m,
1618				    ("soreceive: post-control, sb_mb!=m"));
1619				KASSERT(so->so_rcv.sb_lastrecord == m,
1620				    ("soreceive: post-control, lastrecord!=m"));
1621			}
1622		}
1623		type = m->m_type;
1624		if (type == MT_OOBDATA)
1625			flags |= MSG_OOB;
1626	} else {
1627		if ((flags & MSG_PEEK) == 0) {
1628			KASSERT(so->so_rcv.sb_mb == nextrecord,
1629			    ("soreceive: sb_mb != nextrecord"));
1630			if (so->so_rcv.sb_mb == NULL) {
1631				KASSERT(so->so_rcv.sb_lastrecord == NULL,
1632				    ("soreceive: sb_lastercord != NULL"));
1633			}
1634		}
1635	}
1636	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1637	SBLASTRECORDCHK(&so->so_rcv);
1638	SBLASTMBUFCHK(&so->so_rcv);
1639
1640	/*
1641	 * Now continue to read any data mbufs off of the head of the socket
1642	 * buffer until the read request is satisfied.  Note that 'type' is
1643	 * used to store the type of any mbuf reads that have happened so far
1644	 * such that soreceive() can stop reading if the type changes, which
1645	 * causes soreceive() to return only one of regular data and inline
1646	 * out-of-band data in a single socket receive operation.
1647	 */
1648	moff = 0;
1649	offset = 0;
1650	while (m != NULL && uio->uio_resid > 0 && error == 0) {
1651		/*
1652		 * If the type of mbuf has changed since the last mbuf
1653		 * examined ('type'), end the receive operation.
1654		 */
1655		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1656		if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1657			if (type != m->m_type)
1658				break;
1659		} else if (type == MT_OOBDATA)
1660			break;
1661		else
1662		    KASSERT(m->m_type == MT_DATA,
1663			("m->m_type == %d", m->m_type));
1664		so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1665		len = uio->uio_resid;
1666		if (so->so_oobmark && len > so->so_oobmark - offset)
1667			len = so->so_oobmark - offset;
1668		if (len > m->m_len - moff)
1669			len = m->m_len - moff;
1670		/*
1671		 * If mp is set, just pass back the mbufs.  Otherwise copy
1672		 * them out via the uio, then free.  Sockbuf must be
1673		 * consistent here (points to current mbuf, it points to next
1674		 * record) when we drop priority; we must note any additions
1675		 * to the sockbuf when we block interrupts again.
1676		 */
1677		if (mp == NULL) {
1678			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1679			SBLASTRECORDCHK(&so->so_rcv);
1680			SBLASTMBUFCHK(&so->so_rcv);
1681			SOCKBUF_UNLOCK(&so->so_rcv);
1682			error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1683			SOCKBUF_LOCK(&so->so_rcv);
1684			if (error) {
1685				/*
1686				 * The MT_SONAME mbuf has already been removed
1687				 * from the record, so it is necessary to
1688				 * remove the data mbufs, if any, to preserve
1689				 * the invariant in the case of PR_ADDR that
1690				 * requires MT_SONAME mbufs at the head of
1691				 * each record.
1692				 */
1693				if (m && pr->pr_flags & PR_ATOMIC &&
1694				    ((flags & MSG_PEEK) == 0))
1695					(void)sbdroprecord_locked(&so->so_rcv);
1696				SOCKBUF_UNLOCK(&so->so_rcv);
1697				goto release;
1698			}
1699		} else
1700			uio->uio_resid -= len;
1701		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1702		if (len == m->m_len - moff) {
1703			if (m->m_flags & M_EOR)
1704				flags |= MSG_EOR;
1705			if (flags & MSG_PEEK) {
1706				m = m->m_next;
1707				moff = 0;
1708			} else {
1709				nextrecord = m->m_nextpkt;
1710				sbfree(&so->so_rcv, m);
1711				if (mp != NULL) {
1712					m->m_nextpkt = NULL;
1713					*mp = m;
1714					mp = &m->m_next;
1715					so->so_rcv.sb_mb = m = m->m_next;
1716					*mp = NULL;
1717				} else {
1718					so->so_rcv.sb_mb = m_free(m);
1719					m = so->so_rcv.sb_mb;
1720				}
1721				sockbuf_pushsync(&so->so_rcv, nextrecord);
1722				SBLASTRECORDCHK(&so->so_rcv);
1723				SBLASTMBUFCHK(&so->so_rcv);
1724			}
1725		} else {
1726			if (flags & MSG_PEEK)
1727				moff += len;
1728			else {
1729				if (mp != NULL) {
1730					int copy_flag;
1731
1732					if (flags & MSG_DONTWAIT)
1733						copy_flag = M_NOWAIT;
1734					else
1735						copy_flag = M_WAIT;
1736					if (copy_flag == M_WAITOK)
1737						SOCKBUF_UNLOCK(&so->so_rcv);
1738					*mp = m_copym(m, 0, len, copy_flag);
1739					if (copy_flag == M_WAITOK)
1740						SOCKBUF_LOCK(&so->so_rcv);
1741					if (*mp == NULL) {
1742						/*
1743						 * m_copym() couldn't
1744						 * allocate an mbuf.  Adjust
1745						 * uio_resid back (it was
1746						 * adjusted down by len
1747						 * bytes, which we didn't end
1748						 * up "copying" over).
1749						 */
1750						uio->uio_resid += len;
1751						break;
1752					}
1753				}
1754				m->m_data += len;
1755				m->m_len -= len;
1756				so->so_rcv.sb_cc -= len;
1757			}
1758		}
1759		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1760		if (so->so_oobmark) {
1761			if ((flags & MSG_PEEK) == 0) {
1762				so->so_oobmark -= len;
1763				if (so->so_oobmark == 0) {
1764					so->so_rcv.sb_state |= SBS_RCVATMARK;
1765					break;
1766				}
1767			} else {
1768				offset += len;
1769				if (offset == so->so_oobmark)
1770					break;
1771			}
1772		}
1773		if (flags & MSG_EOR)
1774			break;
1775		/*
1776		 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1777		 * must not quit until "uio->uio_resid == 0" or an error
1778		 * termination.  If a signal/timeout occurs, return with a
1779		 * short count but without error.  Keep sockbuf locked
1780		 * against other readers.
1781		 */
1782		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1783		    !sosendallatonce(so) && nextrecord == NULL) {
1784			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1785			if (so->so_error ||
1786			    so->so_rcv.sb_state & SBS_CANTRCVMORE)
1787				break;
1788			/*
1789			 * Notify the protocol that some data has been
1790			 * drained before blocking.
1791			 */
1792			if (pr->pr_flags & PR_WANTRCVD) {
1793				SOCKBUF_UNLOCK(&so->so_rcv);
1794				VNET_SO_ASSERT(so);
1795				(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1796				SOCKBUF_LOCK(&so->so_rcv);
1797			}
1798			SBLASTRECORDCHK(&so->so_rcv);
1799			SBLASTMBUFCHK(&so->so_rcv);
1800			/*
1801			 * We could receive some data while was notifying
1802			 * the protocol. Skip blocking in this case.
1803			 */
1804			if (so->so_rcv.sb_mb == NULL) {
1805				error = sbwait(&so->so_rcv);
1806				if (error) {
1807					SOCKBUF_UNLOCK(&so->so_rcv);
1808					goto release;
1809				}
1810			}
1811			m = so->so_rcv.sb_mb;
1812			if (m != NULL)
1813				nextrecord = m->m_nextpkt;
1814		}
1815	}
1816
1817	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1818	if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1819		flags |= MSG_TRUNC;
1820		if ((flags & MSG_PEEK) == 0)
1821			(void) sbdroprecord_locked(&so->so_rcv);
1822	}
1823	if ((flags & MSG_PEEK) == 0) {
1824		if (m == NULL) {
1825			/*
1826			 * First part is an inline SB_EMPTY_FIXUP().  Second
1827			 * part makes sure sb_lastrecord is up-to-date if
1828			 * there is still data in the socket buffer.
1829			 */
1830			so->so_rcv.sb_mb = nextrecord;
1831			if (so->so_rcv.sb_mb == NULL) {
1832				so->so_rcv.sb_mbtail = NULL;
1833				so->so_rcv.sb_lastrecord = NULL;
1834			} else if (nextrecord->m_nextpkt == NULL)
1835				so->so_rcv.sb_lastrecord = nextrecord;
1836		}
1837		SBLASTRECORDCHK(&so->so_rcv);
1838		SBLASTMBUFCHK(&so->so_rcv);
1839		/*
1840		 * If soreceive() is being done from the socket callback,
1841		 * then don't need to generate ACK to peer to update window,
1842		 * since ACK will be generated on return to TCP.
1843		 */
1844		if (!(flags & MSG_SOCALLBCK) &&
1845		    (pr->pr_flags & PR_WANTRCVD)) {
1846			SOCKBUF_UNLOCK(&so->so_rcv);
1847			VNET_SO_ASSERT(so);
1848			(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1849			SOCKBUF_LOCK(&so->so_rcv);
1850		}
1851	}
1852	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1853	if (orig_resid == uio->uio_resid && orig_resid &&
1854	    (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1855		SOCKBUF_UNLOCK(&so->so_rcv);
1856		goto restart;
1857	}
1858	SOCKBUF_UNLOCK(&so->so_rcv);
1859
1860	if (flagsp != NULL)
1861		*flagsp |= flags;
1862release:
1863	sbunlock(&so->so_rcv);
1864	return (error);
1865}
1866
1867/*
1868 * Optimized version of soreceive() for stream (TCP) sockets.
1869 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
1870 */
1871int
1872soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1873    struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1874{
1875	int len = 0, error = 0, flags, oresid;
1876	struct sockbuf *sb;
1877	struct mbuf *m, *n = NULL;
1878
1879	/* We only do stream sockets. */
1880	if (so->so_type != SOCK_STREAM)
1881		return (EINVAL);
1882	if (psa != NULL)
1883		*psa = NULL;
1884	if (controlp != NULL)
1885		return (EINVAL);
1886	if (flagsp != NULL)
1887		flags = *flagsp &~ MSG_EOR;
1888	else
1889		flags = 0;
1890	if (flags & MSG_OOB)
1891		return (soreceive_rcvoob(so, uio, flags));
1892	if (mp0 != NULL)
1893		*mp0 = NULL;
1894
1895	sb = &so->so_rcv;
1896
1897	/* Prevent other readers from entering the socket. */
1898	error = sblock(sb, SBLOCKWAIT(flags));
1899	if (error)
1900		goto out;
1901	SOCKBUF_LOCK(sb);
1902
1903	/* Easy one, no space to copyout anything. */
1904	if (uio->uio_resid == 0) {
1905		error = EINVAL;
1906		goto out;
1907	}
1908	oresid = uio->uio_resid;
1909
1910	/* We will never ever get anything unless we are or were connected. */
1911	if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1912		error = ENOTCONN;
1913		goto out;
1914	}
1915
1916restart:
1917	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1918
1919	/* Abort if socket has reported problems. */
1920	if (so->so_error) {
1921		if (sb->sb_cc > 0)
1922			goto deliver;
1923		if (oresid > uio->uio_resid)
1924			goto out;
1925		error = so->so_error;
1926		if (!(flags & MSG_PEEK))
1927			so->so_error = 0;
1928		goto out;
1929	}
1930
1931	/* Door is closed.  Deliver what is left, if any. */
1932	if (sb->sb_state & SBS_CANTRCVMORE) {
1933		if (sb->sb_cc > 0)
1934			goto deliver;
1935		else
1936			goto out;
1937	}
1938
1939	/* Socket buffer is empty and we shall not block. */
1940	if (sb->sb_cc == 0 &&
1941	    ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
1942		error = EAGAIN;
1943		goto out;
1944	}
1945
1946	/* Socket buffer got some data that we shall deliver now. */
1947	if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
1948	    ((so->so_state & SS_NBIO) ||
1949	     (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
1950	     sb->sb_cc >= sb->sb_lowat ||
1951	     sb->sb_cc >= uio->uio_resid ||
1952	     sb->sb_cc >= sb->sb_hiwat) ) {
1953		goto deliver;
1954	}
1955
1956	/* On MSG_WAITALL we must wait until all data or error arrives. */
1957	if ((flags & MSG_WAITALL) &&
1958	    (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_hiwat))
1959		goto deliver;
1960
1961	/*
1962	 * Wait and block until (more) data comes in.
1963	 * NB: Drops the sockbuf lock during wait.
1964	 */
1965	error = sbwait(sb);
1966	if (error)
1967		goto out;
1968	goto restart;
1969
1970deliver:
1971	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1972	KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
1973	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
1974
1975	/* Statistics. */
1976	if (uio->uio_td)
1977		uio->uio_td->td_ru.ru_msgrcv++;
1978
1979	/* Fill uio until full or current end of socket buffer is reached. */
1980	len = min(uio->uio_resid, sb->sb_cc);
1981	if (mp0 != NULL) {
1982		/* Dequeue as many mbufs as possible. */
1983		if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
1984			if (*mp0 == NULL)
1985				*mp0 = sb->sb_mb;
1986			else
1987				m_cat(*mp0, sb->sb_mb);
1988			for (m = sb->sb_mb;
1989			     m != NULL && m->m_len <= len;
1990			     m = m->m_next) {
1991				len -= m->m_len;
1992				uio->uio_resid -= m->m_len;
1993				sbfree(sb, m);
1994				n = m;
1995			}
1996			n->m_next = NULL;
1997			sb->sb_mb = m;
1998			sb->sb_lastrecord = sb->sb_mb;
1999			if (sb->sb_mb == NULL)
2000				SB_EMPTY_FIXUP(sb);
2001		}
2002		/* Copy the remainder. */
2003		if (len > 0) {
2004			KASSERT(sb->sb_mb != NULL,
2005			    ("%s: len > 0 && sb->sb_mb empty", __func__));
2006
2007			m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2008			if (m == NULL)
2009				len = 0;	/* Don't flush data from sockbuf. */
2010			else
2011				uio->uio_resid -= len;
2012			if (*mp0 != NULL)
2013				m_cat(*mp0, m);
2014			else
2015				*mp0 = m;
2016			if (*mp0 == NULL) {
2017				error = ENOBUFS;
2018				goto out;
2019			}
2020		}
2021	} else {
2022		/* NB: Must unlock socket buffer as uiomove may sleep. */
2023		SOCKBUF_UNLOCK(sb);
2024		error = m_mbuftouio(uio, sb->sb_mb, len);
2025		SOCKBUF_LOCK(sb);
2026		if (error)
2027			goto out;
2028	}
2029	SBLASTRECORDCHK(sb);
2030	SBLASTMBUFCHK(sb);
2031
2032	/*
2033	 * Remove the delivered data from the socket buffer unless we
2034	 * were only peeking.
2035	 */
2036	if (!(flags & MSG_PEEK)) {
2037		if (len > 0)
2038			sbdrop_locked(sb, len);
2039
2040		/* Notify protocol that we drained some data. */
2041		if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2042		    (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2043		     !(flags & MSG_SOCALLBCK))) {
2044			SOCKBUF_UNLOCK(sb);
2045			VNET_SO_ASSERT(so);
2046			(*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2047			SOCKBUF_LOCK(sb);
2048		}
2049	}
2050
2051	/*
2052	 * For MSG_WAITALL we may have to loop again and wait for
2053	 * more data to come in.
2054	 */
2055	if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2056		goto restart;
2057out:
2058	SOCKBUF_LOCK_ASSERT(sb);
2059	SBLASTRECORDCHK(sb);
2060	SBLASTMBUFCHK(sb);
2061	SOCKBUF_UNLOCK(sb);
2062	sbunlock(sb);
2063	return (error);
2064}
2065
2066/*
2067 * Optimized version of soreceive() for simple datagram cases from userspace.
2068 * Unlike in the stream case, we're able to drop a datagram if copyout()
2069 * fails, and because we handle datagrams atomically, we don't need to use a
2070 * sleep lock to prevent I/O interlacing.
2071 */
2072int
2073soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2074    struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2075{
2076	struct mbuf *m, *m2;
2077	int flags, error;
2078	ssize_t len;
2079	struct protosw *pr = so->so_proto;
2080	struct mbuf *nextrecord;
2081
2082	if (psa != NULL)
2083		*psa = NULL;
2084	if (controlp != NULL)
2085		*controlp = NULL;
2086	if (flagsp != NULL)
2087		flags = *flagsp &~ MSG_EOR;
2088	else
2089		flags = 0;
2090
2091	/*
2092	 * For any complicated cases, fall back to the full
2093	 * soreceive_generic().
2094	 */
2095	if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2096		return (soreceive_generic(so, psa, uio, mp0, controlp,
2097		    flagsp));
2098
2099	/*
2100	 * Enforce restrictions on use.
2101	 */
2102	KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2103	    ("soreceive_dgram: wantrcvd"));
2104	KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2105	KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2106	    ("soreceive_dgram: SBS_RCVATMARK"));
2107	KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2108	    ("soreceive_dgram: P_CONNREQUIRED"));
2109
2110	/*
2111	 * Loop blocking while waiting for a datagram.
2112	 */
2113	SOCKBUF_LOCK(&so->so_rcv);
2114	while ((m = so->so_rcv.sb_mb) == NULL) {
2115		KASSERT(so->so_rcv.sb_cc == 0,
2116		    ("soreceive_dgram: sb_mb NULL but sb_cc %u",
2117		    so->so_rcv.sb_cc));
2118		if (so->so_error) {
2119			error = so->so_error;
2120			so->so_error = 0;
2121			SOCKBUF_UNLOCK(&so->so_rcv);
2122			return (error);
2123		}
2124		if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2125		    uio->uio_resid == 0) {
2126			SOCKBUF_UNLOCK(&so->so_rcv);
2127			return (0);
2128		}
2129		if ((so->so_state & SS_NBIO) ||
2130		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2131			SOCKBUF_UNLOCK(&so->so_rcv);
2132			return (EWOULDBLOCK);
2133		}
2134		SBLASTRECORDCHK(&so->so_rcv);
2135		SBLASTMBUFCHK(&so->so_rcv);
2136		error = sbwait(&so->so_rcv);
2137		if (error) {
2138			SOCKBUF_UNLOCK(&so->so_rcv);
2139			return (error);
2140		}
2141	}
2142	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2143
2144	if (uio->uio_td)
2145		uio->uio_td->td_ru.ru_msgrcv++;
2146	SBLASTRECORDCHK(&so->so_rcv);
2147	SBLASTMBUFCHK(&so->so_rcv);
2148	nextrecord = m->m_nextpkt;
2149	if (nextrecord == NULL) {
2150		KASSERT(so->so_rcv.sb_lastrecord == m,
2151		    ("soreceive_dgram: lastrecord != m"));
2152	}
2153
2154	KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2155	    ("soreceive_dgram: m_nextpkt != nextrecord"));
2156
2157	/*
2158	 * Pull 'm' and its chain off the front of the packet queue.
2159	 */
2160	so->so_rcv.sb_mb = NULL;
2161	sockbuf_pushsync(&so->so_rcv, nextrecord);
2162
2163	/*
2164	 * Walk 'm's chain and free that many bytes from the socket buffer.
2165	 */
2166	for (m2 = m; m2 != NULL; m2 = m2->m_next)
2167		sbfree(&so->so_rcv, m2);
2168
2169	/*
2170	 * Do a few last checks before we let go of the lock.
2171	 */
2172	SBLASTRECORDCHK(&so->so_rcv);
2173	SBLASTMBUFCHK(&so->so_rcv);
2174	SOCKBUF_UNLOCK(&so->so_rcv);
2175
2176	if (pr->pr_flags & PR_ADDR) {
2177		KASSERT(m->m_type == MT_SONAME,
2178		    ("m->m_type == %d", m->m_type));
2179		if (psa != NULL)
2180			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
2181			    M_NOWAIT);
2182		m = m_free(m);
2183	}
2184	if (m == NULL) {
2185		/* XXXRW: Can this happen? */
2186		return (0);
2187	}
2188
2189	/*
2190	 * Packet to copyout() is now in 'm' and it is disconnected from the
2191	 * queue.
2192	 *
2193	 * Process one or more MT_CONTROL mbufs present before any data mbufs
2194	 * in the first mbuf chain on the socket buffer.  We call into the
2195	 * protocol to perform externalization (or freeing if controlp ==
2196	 * NULL). In some cases there can be only MT_CONTROL mbufs without
2197	 * MT_DATA mbufs.
2198	 */
2199	if (m->m_type == MT_CONTROL) {
2200		struct mbuf *cm = NULL, *cmn;
2201		struct mbuf **cme = &cm;
2202
2203		do {
2204			m2 = m->m_next;
2205			m->m_next = NULL;
2206			*cme = m;
2207			cme = &(*cme)->m_next;
2208			m = m2;
2209		} while (m != NULL && m->m_type == MT_CONTROL);
2210		while (cm != NULL) {
2211			cmn = cm->m_next;
2212			cm->m_next = NULL;
2213			if (pr->pr_domain->dom_externalize != NULL) {
2214				error = (*pr->pr_domain->dom_externalize)
2215				    (cm, controlp, flags);
2216			} else if (controlp != NULL)
2217				*controlp = cm;
2218			else
2219				m_freem(cm);
2220			if (controlp != NULL) {
2221				while (*controlp != NULL)
2222					controlp = &(*controlp)->m_next;
2223			}
2224			cm = cmn;
2225		}
2226	}
2227	KASSERT(m == NULL || m->m_type == MT_DATA,
2228	    ("soreceive_dgram: !data"));
2229	while (m != NULL && uio->uio_resid > 0) {
2230		len = uio->uio_resid;
2231		if (len > m->m_len)
2232			len = m->m_len;
2233		error = uiomove(mtod(m, char *), (int)len, uio);
2234		if (error) {
2235			m_freem(m);
2236			return (error);
2237		}
2238		if (len == m->m_len)
2239			m = m_free(m);
2240		else {
2241			m->m_data += len;
2242			m->m_len -= len;
2243		}
2244	}
2245	if (m != NULL) {
2246		flags |= MSG_TRUNC;
2247		m_freem(m);
2248	}
2249	if (flagsp != NULL)
2250		*flagsp |= flags;
2251	return (0);
2252}
2253
2254int
2255soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2256    struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2257{
2258	int error;
2259
2260	CURVNET_SET(so->so_vnet);
2261	error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2262	    controlp, flagsp));
2263	CURVNET_RESTORE();
2264	return (error);
2265}
2266
2267int
2268soshutdown(struct socket *so, int how)
2269{
2270	struct protosw *pr = so->so_proto;
2271	int error;
2272
2273	if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2274		return (EINVAL);
2275
2276	CURVNET_SET(so->so_vnet);
2277	if (pr->pr_usrreqs->pru_flush != NULL)
2278		(*pr->pr_usrreqs->pru_flush)(so, how);
2279	if (how != SHUT_WR)
2280		sorflush(so);
2281	if (how != SHUT_RD) {
2282		error = (*pr->pr_usrreqs->pru_shutdown)(so);
2283		wakeup(&so->so_timeo);
2284		CURVNET_RESTORE();
2285		return (error);
2286	}
2287	wakeup(&so->so_timeo);
2288	CURVNET_RESTORE();
2289	return (0);
2290}
2291
2292void
2293sorflush(struct socket *so)
2294{
2295	struct sockbuf *sb = &so->so_rcv;
2296	struct protosw *pr = so->so_proto;
2297	struct socket aso;
2298
2299	VNET_SO_ASSERT(so);
2300
2301	/*
2302	 * In order to avoid calling dom_dispose with the socket buffer mutex
2303	 * held, and in order to generally avoid holding the lock for a long
2304	 * time, we make a copy of the socket buffer and clear the original
2305	 * (except locks, state).  The new socket buffer copy won't have
2306	 * initialized locks so we can only call routines that won't use or
2307	 * assert those locks.
2308	 *
2309	 * Dislodge threads currently blocked in receive and wait to acquire
2310	 * a lock against other simultaneous readers before clearing the
2311	 * socket buffer.  Don't let our acquire be interrupted by a signal
2312	 * despite any existing socket disposition on interruptable waiting.
2313	 */
2314	socantrcvmore(so);
2315	(void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2316
2317	/*
2318	 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2319	 * and mutex data unchanged.
2320	 */
2321	SOCKBUF_LOCK(sb);
2322	bzero(&aso, sizeof(aso));
2323	aso.so_pcb = so->so_pcb;
2324	bcopy(&sb->sb_startzero, &aso.so_rcv.sb_startzero,
2325	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2326	bzero(&sb->sb_startzero,
2327	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2328	SOCKBUF_UNLOCK(sb);
2329	sbunlock(sb);
2330
2331	/*
2332	 * Dispose of special rights and flush the copied socket.  Don't call
2333	 * any unsafe routines (that rely on locks being initialized) on aso.
2334	 */
2335	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) {
2336		if (pr->pr_domain->dom_family == AF_LOCAL)
2337			unp_dispose_so(&aso);
2338		else
2339			(*pr->pr_domain->dom_dispose)(aso.so_rcv.sb_mb);
2340	}
2341	sbrelease_internal(&aso.so_rcv, so);
2342}
2343
2344/*
2345 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2346 * additional variant to handle the case where the option value needs to be
2347 * some kind of integer, but not a specific size.  In addition to their use
2348 * here, these functions are also called by the protocol-level pr_ctloutput()
2349 * routines.
2350 */
2351int
2352sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2353{
2354	size_t	valsize;
2355
2356	/*
2357	 * If the user gives us more than we wanted, we ignore it, but if we
2358	 * don't get the minimum length the caller wants, we return EINVAL.
2359	 * On success, sopt->sopt_valsize is set to however much we actually
2360	 * retrieved.
2361	 */
2362	if ((valsize = sopt->sopt_valsize) < minlen)
2363		return EINVAL;
2364	if (valsize > len)
2365		sopt->sopt_valsize = valsize = len;
2366
2367	if (sopt->sopt_td != NULL)
2368		return (copyin(sopt->sopt_val, buf, valsize));
2369
2370	bcopy(sopt->sopt_val, buf, valsize);
2371	return (0);
2372}
2373
2374/*
2375 * Kernel version of setsockopt(2).
2376 *
2377 * XXX: optlen is size_t, not socklen_t
2378 */
2379int
2380so_setsockopt(struct socket *so, int level, int optname, void *optval,
2381    size_t optlen)
2382{
2383	struct sockopt sopt;
2384
2385	sopt.sopt_level = level;
2386	sopt.sopt_name = optname;
2387	sopt.sopt_dir = SOPT_SET;
2388	sopt.sopt_val = optval;
2389	sopt.sopt_valsize = optlen;
2390	sopt.sopt_td = NULL;
2391	return (sosetopt(so, &sopt));
2392}
2393
2394int
2395sosetopt(struct socket *so, struct sockopt *sopt)
2396{
2397	int	error, optval;
2398	struct	linger l;
2399	struct	timeval tv;
2400	sbintime_t val;
2401	uint32_t val32;
2402#ifdef MAC
2403	struct mac extmac;
2404#endif
2405
2406	CURVNET_SET(so->so_vnet);
2407	error = 0;
2408	if (sopt->sopt_level != SOL_SOCKET) {
2409		if (so->so_proto->pr_ctloutput != NULL) {
2410			error = (*so->so_proto->pr_ctloutput)(so, sopt);
2411			CURVNET_RESTORE();
2412			return (error);
2413		}
2414		error = ENOPROTOOPT;
2415	} else {
2416		switch (sopt->sopt_name) {
2417#ifdef INET
2418		case SO_ACCEPTFILTER:
2419			error = do_setopt_accept_filter(so, sopt);
2420			if (error)
2421				goto bad;
2422			break;
2423#endif
2424		case SO_LINGER:
2425			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2426			if (error)
2427				goto bad;
2428
2429			SOCK_LOCK(so);
2430			so->so_linger = l.l_linger;
2431			if (l.l_onoff)
2432				so->so_options |= SO_LINGER;
2433			else
2434				so->so_options &= ~SO_LINGER;
2435			SOCK_UNLOCK(so);
2436			break;
2437
2438		case SO_DEBUG:
2439		case SO_KEEPALIVE:
2440		case SO_DONTROUTE:
2441		case SO_USELOOPBACK:
2442		case SO_BROADCAST:
2443		case SO_REUSEADDR:
2444		case SO_REUSEPORT:
2445		case SO_OOBINLINE:
2446		case SO_TIMESTAMP:
2447		case SO_BINTIME:
2448		case SO_NOSIGPIPE:
2449		case SO_NO_DDP:
2450		case SO_NO_OFFLOAD:
2451			error = sooptcopyin(sopt, &optval, sizeof optval,
2452			    sizeof optval);
2453			if (error)
2454				goto bad;
2455			SOCK_LOCK(so);
2456			if (optval)
2457				so->so_options |= sopt->sopt_name;
2458			else
2459				so->so_options &= ~sopt->sopt_name;
2460			SOCK_UNLOCK(so);
2461			break;
2462
2463		case SO_SETFIB:
2464			error = sooptcopyin(sopt, &optval, sizeof optval,
2465			    sizeof optval);
2466			if (error)
2467				goto bad;
2468
2469			if (optval < 0 || optval >= rt_numfibs) {
2470				error = EINVAL;
2471				goto bad;
2472			}
2473			if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2474			   (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2475			   (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2476				so->so_fibnum = optval;
2477			else
2478				so->so_fibnum = 0;
2479			break;
2480
2481		case SO_USER_COOKIE:
2482			error = sooptcopyin(sopt, &val32, sizeof val32,
2483			    sizeof val32);
2484			if (error)
2485				goto bad;
2486			so->so_user_cookie = val32;
2487			break;
2488
2489		case SO_SNDBUF:
2490		case SO_RCVBUF:
2491		case SO_SNDLOWAT:
2492		case SO_RCVLOWAT:
2493			error = sooptcopyin(sopt, &optval, sizeof optval,
2494			    sizeof optval);
2495			if (error)
2496				goto bad;
2497
2498			/*
2499			 * Values < 1 make no sense for any of these options,
2500			 * so disallow them.
2501			 */
2502			if (optval < 1) {
2503				error = EINVAL;
2504				goto bad;
2505			}
2506
2507			switch (sopt->sopt_name) {
2508			case SO_SNDBUF:
2509			case SO_RCVBUF:
2510				if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2511				    &so->so_snd : &so->so_rcv, (u_long)optval,
2512				    so, curthread) == 0) {
2513					error = ENOBUFS;
2514					goto bad;
2515				}
2516				(sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2517				    &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2518				break;
2519
2520			/*
2521			 * Make sure the low-water is never greater than the
2522			 * high-water.
2523			 */
2524			case SO_SNDLOWAT:
2525				SOCKBUF_LOCK(&so->so_snd);
2526				so->so_snd.sb_lowat =
2527				    (optval > so->so_snd.sb_hiwat) ?
2528				    so->so_snd.sb_hiwat : optval;
2529				SOCKBUF_UNLOCK(&so->so_snd);
2530				break;
2531			case SO_RCVLOWAT:
2532				SOCKBUF_LOCK(&so->so_rcv);
2533				so->so_rcv.sb_lowat =
2534				    (optval > so->so_rcv.sb_hiwat) ?
2535				    so->so_rcv.sb_hiwat : optval;
2536				SOCKBUF_UNLOCK(&so->so_rcv);
2537				break;
2538			}
2539			break;
2540
2541		case SO_SNDTIMEO:
2542		case SO_RCVTIMEO:
2543#ifdef COMPAT_FREEBSD32
2544			if (SV_CURPROC_FLAG(SV_ILP32)) {
2545				struct timeval32 tv32;
2546
2547				error = sooptcopyin(sopt, &tv32, sizeof tv32,
2548				    sizeof tv32);
2549				CP(tv32, tv, tv_sec);
2550				CP(tv32, tv, tv_usec);
2551			} else
2552#endif
2553				error = sooptcopyin(sopt, &tv, sizeof tv,
2554				    sizeof tv);
2555			if (error)
2556				goto bad;
2557			if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
2558			    tv.tv_usec >= 1000000) {
2559				error = EDOM;
2560				goto bad;
2561			}
2562			if (tv.tv_sec > INT32_MAX)
2563				val = SBT_MAX;
2564			else
2565				val = tvtosbt(tv);
2566			switch (sopt->sopt_name) {
2567			case SO_SNDTIMEO:
2568				so->so_snd.sb_timeo = val;
2569				break;
2570			case SO_RCVTIMEO:
2571				so->so_rcv.sb_timeo = val;
2572				break;
2573			}
2574			break;
2575
2576		case SO_LABEL:
2577#ifdef MAC
2578			error = sooptcopyin(sopt, &extmac, sizeof extmac,
2579			    sizeof extmac);
2580			if (error)
2581				goto bad;
2582			error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2583			    so, &extmac);
2584#else
2585			error = EOPNOTSUPP;
2586#endif
2587			break;
2588
2589		default:
2590			error = ENOPROTOOPT;
2591			break;
2592		}
2593		if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2594			(void)(*so->so_proto->pr_ctloutput)(so, sopt);
2595	}
2596bad:
2597	CURVNET_RESTORE();
2598	return (error);
2599}
2600
2601/*
2602 * Helper routine for getsockopt.
2603 */
2604int
2605sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2606{
2607	int	error;
2608	size_t	valsize;
2609
2610	error = 0;
2611
2612	/*
2613	 * Documented get behavior is that we always return a value, possibly
2614	 * truncated to fit in the user's buffer.  Traditional behavior is
2615	 * that we always tell the user precisely how much we copied, rather
2616	 * than something useful like the total amount we had available for
2617	 * her.  Note that this interface is not idempotent; the entire
2618	 * answer must generated ahead of time.
2619	 */
2620	valsize = min(len, sopt->sopt_valsize);
2621	sopt->sopt_valsize = valsize;
2622	if (sopt->sopt_val != NULL) {
2623		if (sopt->sopt_td != NULL)
2624			error = copyout(buf, sopt->sopt_val, valsize);
2625		else
2626			bcopy(buf, sopt->sopt_val, valsize);
2627	}
2628	return (error);
2629}
2630
2631int
2632sogetopt(struct socket *so, struct sockopt *sopt)
2633{
2634	int	error, optval;
2635	struct	linger l;
2636	struct	timeval tv;
2637#ifdef MAC
2638	struct mac extmac;
2639#endif
2640
2641	CURVNET_SET(so->so_vnet);
2642	error = 0;
2643	if (sopt->sopt_level != SOL_SOCKET) {
2644		if (so->so_proto->pr_ctloutput != NULL)
2645			error = (*so->so_proto->pr_ctloutput)(so, sopt);
2646		else
2647			error = ENOPROTOOPT;
2648		CURVNET_RESTORE();
2649		return (error);
2650	} else {
2651		switch (sopt->sopt_name) {
2652#ifdef INET
2653		case SO_ACCEPTFILTER:
2654			error = do_getopt_accept_filter(so, sopt);
2655			break;
2656#endif
2657		case SO_LINGER:
2658			SOCK_LOCK(so);
2659			l.l_onoff = so->so_options & SO_LINGER;
2660			l.l_linger = so->so_linger;
2661			SOCK_UNLOCK(so);
2662			error = sooptcopyout(sopt, &l, sizeof l);
2663			break;
2664
2665		case SO_USELOOPBACK:
2666		case SO_DONTROUTE:
2667		case SO_DEBUG:
2668		case SO_KEEPALIVE:
2669		case SO_REUSEADDR:
2670		case SO_REUSEPORT:
2671		case SO_BROADCAST:
2672		case SO_OOBINLINE:
2673		case SO_ACCEPTCONN:
2674		case SO_TIMESTAMP:
2675		case SO_BINTIME:
2676		case SO_NOSIGPIPE:
2677			optval = so->so_options & sopt->sopt_name;
2678integer:
2679			error = sooptcopyout(sopt, &optval, sizeof optval);
2680			break;
2681
2682		case SO_TYPE:
2683			optval = so->so_type;
2684			goto integer;
2685
2686		case SO_PROTOCOL:
2687			optval = so->so_proto->pr_protocol;
2688			goto integer;
2689
2690		case SO_ERROR:
2691			SOCK_LOCK(so);
2692			optval = so->so_error;
2693			so->so_error = 0;
2694			SOCK_UNLOCK(so);
2695			goto integer;
2696
2697		case SO_SNDBUF:
2698			optval = so->so_snd.sb_hiwat;
2699			goto integer;
2700
2701		case SO_RCVBUF:
2702			optval = so->so_rcv.sb_hiwat;
2703			goto integer;
2704
2705		case SO_SNDLOWAT:
2706			optval = so->so_snd.sb_lowat;
2707			goto integer;
2708
2709		case SO_RCVLOWAT:
2710			optval = so->so_rcv.sb_lowat;
2711			goto integer;
2712
2713		case SO_SNDTIMEO:
2714		case SO_RCVTIMEO:
2715			tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
2716			    so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2717#ifdef COMPAT_FREEBSD32
2718			if (SV_CURPROC_FLAG(SV_ILP32)) {
2719				struct timeval32 tv32;
2720
2721				CP(tv, tv32, tv_sec);
2722				CP(tv, tv32, tv_usec);
2723				error = sooptcopyout(sopt, &tv32, sizeof tv32);
2724			} else
2725#endif
2726				error = sooptcopyout(sopt, &tv, sizeof tv);
2727			break;
2728
2729		case SO_LABEL:
2730#ifdef MAC
2731			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2732			    sizeof(extmac));
2733			if (error)
2734				goto bad;
2735			error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2736			    so, &extmac);
2737			if (error)
2738				goto bad;
2739			error = sooptcopyout(sopt, &extmac, sizeof extmac);
2740#else
2741			error = EOPNOTSUPP;
2742#endif
2743			break;
2744
2745		case SO_PEERLABEL:
2746#ifdef MAC
2747			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2748			    sizeof(extmac));
2749			if (error)
2750				goto bad;
2751			error = mac_getsockopt_peerlabel(
2752			    sopt->sopt_td->td_ucred, so, &extmac);
2753			if (error)
2754				goto bad;
2755			error = sooptcopyout(sopt, &extmac, sizeof extmac);
2756#else
2757			error = EOPNOTSUPP;
2758#endif
2759			break;
2760
2761		case SO_LISTENQLIMIT:
2762			optval = so->so_qlimit;
2763			goto integer;
2764
2765		case SO_LISTENQLEN:
2766			optval = so->so_qlen;
2767			goto integer;
2768
2769		case SO_LISTENINCQLEN:
2770			optval = so->so_incqlen;
2771			goto integer;
2772
2773		default:
2774			error = ENOPROTOOPT;
2775			break;
2776		}
2777	}
2778#ifdef MAC
2779bad:
2780#endif
2781	CURVNET_RESTORE();
2782	return (error);
2783}
2784
2785int
2786soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2787{
2788	struct mbuf *m, *m_prev;
2789	int sopt_size = sopt->sopt_valsize;
2790
2791	MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2792	if (m == NULL)
2793		return ENOBUFS;
2794	if (sopt_size > MLEN) {
2795		MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
2796		if ((m->m_flags & M_EXT) == 0) {
2797			m_free(m);
2798			return ENOBUFS;
2799		}
2800		m->m_len = min(MCLBYTES, sopt_size);
2801	} else {
2802		m->m_len = min(MLEN, sopt_size);
2803	}
2804	sopt_size -= m->m_len;
2805	*mp = m;
2806	m_prev = m;
2807
2808	while (sopt_size) {
2809		MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2810		if (m == NULL) {
2811			m_freem(*mp);
2812			return ENOBUFS;
2813		}
2814		if (sopt_size > MLEN) {
2815			MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
2816			    M_NOWAIT);
2817			if ((m->m_flags & M_EXT) == 0) {
2818				m_freem(m);
2819				m_freem(*mp);
2820				return ENOBUFS;
2821			}
2822			m->m_len = min(MCLBYTES, sopt_size);
2823		} else {
2824			m->m_len = min(MLEN, sopt_size);
2825		}
2826		sopt_size -= m->m_len;
2827		m_prev->m_next = m;
2828		m_prev = m;
2829	}
2830	return (0);
2831}
2832
2833int
2834soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2835{
2836	struct mbuf *m0 = m;
2837
2838	if (sopt->sopt_val == NULL)
2839		return (0);
2840	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2841		if (sopt->sopt_td != NULL) {
2842			int error;
2843
2844			error = copyin(sopt->sopt_val, mtod(m, char *),
2845			    m->m_len);
2846			if (error != 0) {
2847				m_freem(m0);
2848				return(error);
2849			}
2850		} else
2851			bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2852		sopt->sopt_valsize -= m->m_len;
2853		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2854		m = m->m_next;
2855	}
2856	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2857		panic("ip6_sooptmcopyin");
2858	return (0);
2859}
2860
2861int
2862soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2863{
2864	struct mbuf *m0 = m;
2865	size_t valsize = 0;
2866
2867	if (sopt->sopt_val == NULL)
2868		return (0);
2869	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2870		if (sopt->sopt_td != NULL) {
2871			int error;
2872
2873			error = copyout(mtod(m, char *), sopt->sopt_val,
2874			    m->m_len);
2875			if (error != 0) {
2876				m_freem(m0);
2877				return(error);
2878			}
2879		} else
2880			bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2881		sopt->sopt_valsize -= m->m_len;
2882		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2883		valsize += m->m_len;
2884		m = m->m_next;
2885	}
2886	if (m != NULL) {
2887		/* enough soopt buffer should be given from user-land */
2888		m_freem(m0);
2889		return(EINVAL);
2890	}
2891	sopt->sopt_valsize = valsize;
2892	return (0);
2893}
2894
2895/*
2896 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2897 * out-of-band data, which will then notify socket consumers.
2898 */
2899void
2900sohasoutofband(struct socket *so)
2901{
2902
2903	if (so->so_sigio != NULL)
2904		pgsigio(&so->so_sigio, SIGURG, 0);
2905	selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2906}
2907
2908int
2909sopoll(struct socket *so, int events, struct ucred *active_cred,
2910    struct thread *td)
2911{
2912
2913	/*
2914	 * We do not need to set or assert curvnet as long as everyone uses
2915	 * sopoll_generic().
2916	 */
2917	return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2918	    td));
2919}
2920
2921int
2922sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
2923    struct thread *td)
2924{
2925	int revents = 0;
2926
2927	SOCKBUF_LOCK(&so->so_snd);
2928	SOCKBUF_LOCK(&so->so_rcv);
2929	if (events & (POLLIN | POLLRDNORM))
2930		if (soreadabledata(so))
2931			revents |= events & (POLLIN | POLLRDNORM);
2932
2933	if (events & (POLLOUT | POLLWRNORM))
2934		if (sowriteable(so))
2935			revents |= events & (POLLOUT | POLLWRNORM);
2936
2937	if (events & (POLLPRI | POLLRDBAND))
2938		if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2939			revents |= events & (POLLPRI | POLLRDBAND);
2940
2941	if ((events & POLLINIGNEOF) == 0) {
2942		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2943			revents |= events & (POLLIN | POLLRDNORM);
2944			if (so->so_snd.sb_state & SBS_CANTSENDMORE)
2945				revents |= POLLHUP;
2946		}
2947	}
2948
2949	if (revents == 0) {
2950		if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
2951			selrecord(td, &so->so_rcv.sb_sel);
2952			so->so_rcv.sb_flags |= SB_SEL;
2953		}
2954
2955		if (events & (POLLOUT | POLLWRNORM)) {
2956			selrecord(td, &so->so_snd.sb_sel);
2957			so->so_snd.sb_flags |= SB_SEL;
2958		}
2959	}
2960
2961	SOCKBUF_UNLOCK(&so->so_rcv);
2962	SOCKBUF_UNLOCK(&so->so_snd);
2963	return (revents);
2964}
2965
2966int
2967soo_kqfilter(struct file *fp, struct knote *kn)
2968{
2969	struct socket *so = kn->kn_fp->f_data;
2970	struct sockbuf *sb;
2971
2972	switch (kn->kn_filter) {
2973	case EVFILT_READ:
2974		if (so->so_options & SO_ACCEPTCONN)
2975			kn->kn_fop = &solisten_filtops;
2976		else
2977			kn->kn_fop = &soread_filtops;
2978		sb = &so->so_rcv;
2979		break;
2980	case EVFILT_WRITE:
2981		kn->kn_fop = &sowrite_filtops;
2982		sb = &so->so_snd;
2983		break;
2984	default:
2985		return (EINVAL);
2986	}
2987
2988	SOCKBUF_LOCK(sb);
2989	knlist_add(&sb->sb_sel.si_note, kn, 1);
2990	sb->sb_flags |= SB_KNOTE;
2991	SOCKBUF_UNLOCK(sb);
2992	return (0);
2993}
2994
2995/*
2996 * Some routines that return EOPNOTSUPP for entry points that are not
2997 * supported by a protocol.  Fill in as needed.
2998 */
2999int
3000pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3001{
3002
3003	return EOPNOTSUPP;
3004}
3005
3006int
3007pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3008{
3009
3010	return EOPNOTSUPP;
3011}
3012
3013int
3014pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3015{
3016
3017	return EOPNOTSUPP;
3018}
3019
3020int
3021pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3022    struct thread *td)
3023{
3024
3025	return EOPNOTSUPP;
3026}
3027
3028int
3029pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3030{
3031
3032	return EOPNOTSUPP;
3033}
3034
3035int
3036pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3037    struct thread *td)
3038{
3039
3040	return EOPNOTSUPP;
3041}
3042
3043int
3044pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3045{
3046
3047	return EOPNOTSUPP;
3048}
3049
3050int
3051pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3052    struct ifnet *ifp, struct thread *td)
3053{
3054
3055	return EOPNOTSUPP;
3056}
3057
3058int
3059pru_disconnect_notsupp(struct socket *so)
3060{
3061
3062	return EOPNOTSUPP;
3063}
3064
3065int
3066pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3067{
3068
3069	return EOPNOTSUPP;
3070}
3071
3072int
3073pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3074{
3075
3076	return EOPNOTSUPP;
3077}
3078
3079int
3080pru_rcvd_notsupp(struct socket *so, int flags)
3081{
3082
3083	return EOPNOTSUPP;
3084}
3085
3086int
3087pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3088{
3089
3090	return EOPNOTSUPP;
3091}
3092
3093int
3094pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3095    struct sockaddr *addr, struct mbuf *control, struct thread *td)
3096{
3097
3098	return EOPNOTSUPP;
3099}
3100
3101/*
3102 * This isn't really a ``null'' operation, but it's the default one and
3103 * doesn't do anything destructive.
3104 */
3105int
3106pru_sense_null(struct socket *so, struct stat *sb)
3107{
3108
3109	sb->st_blksize = so->so_snd.sb_hiwat;
3110	return 0;
3111}
3112
3113int
3114pru_shutdown_notsupp(struct socket *so)
3115{
3116
3117	return EOPNOTSUPP;
3118}
3119
3120int
3121pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3122{
3123
3124	return EOPNOTSUPP;
3125}
3126
3127int
3128pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3129    struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3130{
3131
3132	return EOPNOTSUPP;
3133}
3134
3135int
3136pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3137    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3138{
3139
3140	return EOPNOTSUPP;
3141}
3142
3143int
3144pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3145    struct thread *td)
3146{
3147
3148	return EOPNOTSUPP;
3149}
3150
3151static void
3152filt_sordetach(struct knote *kn)
3153{
3154	struct socket *so = kn->kn_fp->f_data;
3155
3156	SOCKBUF_LOCK(&so->so_rcv);
3157	knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3158	if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3159		so->so_rcv.sb_flags &= ~SB_KNOTE;
3160	SOCKBUF_UNLOCK(&so->so_rcv);
3161}
3162
3163/*ARGSUSED*/
3164static int
3165filt_soread(struct knote *kn, long hint)
3166{
3167	struct socket *so;
3168
3169	so = kn->kn_fp->f_data;
3170	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3171
3172	kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
3173	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3174		kn->kn_flags |= EV_EOF;
3175		kn->kn_fflags = so->so_error;
3176		return (1);
3177	} else if (so->so_error)	/* temporary udp error */
3178		return (1);
3179	else if (kn->kn_sfflags & NOTE_LOWAT)
3180		return (kn->kn_data >= kn->kn_sdata);
3181	else
3182		return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
3183}
3184
3185static void
3186filt_sowdetach(struct knote *kn)
3187{
3188	struct socket *so = kn->kn_fp->f_data;
3189
3190	SOCKBUF_LOCK(&so->so_snd);
3191	knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3192	if (knlist_empty(&so->so_snd.sb_sel.si_note))
3193		so->so_snd.sb_flags &= ~SB_KNOTE;
3194	SOCKBUF_UNLOCK(&so->so_snd);
3195}
3196
3197/*ARGSUSED*/
3198static int
3199filt_sowrite(struct knote *kn, long hint)
3200{
3201	struct socket *so;
3202
3203	so = kn->kn_fp->f_data;
3204	SOCKBUF_LOCK_ASSERT(&so->so_snd);
3205	kn->kn_data = sbspace(&so->so_snd);
3206	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3207		kn->kn_flags |= EV_EOF;
3208		kn->kn_fflags = so->so_error;
3209		return (1);
3210	} else if (so->so_error)	/* temporary udp error */
3211		return (1);
3212	else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3213	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
3214		return (0);
3215	else if (kn->kn_sfflags & NOTE_LOWAT)
3216		return (kn->kn_data >= kn->kn_sdata);
3217	else
3218		return (kn->kn_data >= so->so_snd.sb_lowat);
3219}
3220
3221/*ARGSUSED*/
3222static int
3223filt_solisten(struct knote *kn, long hint)
3224{
3225	struct socket *so = kn->kn_fp->f_data;
3226
3227	kn->kn_data = so->so_qlen;
3228	return (!TAILQ_EMPTY(&so->so_comp));
3229}
3230
3231int
3232socheckuid(struct socket *so, uid_t uid)
3233{
3234
3235	if (so == NULL)
3236		return (EPERM);
3237	if (so->so_cred->cr_uid != uid)
3238		return (EPERM);
3239	return (0);
3240}
3241
3242/*
3243 * These functions are used by protocols to notify the socket layer (and its
3244 * consumers) of state changes in the sockets driven by protocol-side events.
3245 */
3246
3247/*
3248 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3249 *
3250 * Normal sequence from the active (originating) side is that
3251 * soisconnecting() is called during processing of connect() call, resulting
3252 * in an eventual call to soisconnected() if/when the connection is
3253 * established.  When the connection is torn down soisdisconnecting() is
3254 * called during processing of disconnect() call, and soisdisconnected() is
3255 * called when the connection to the peer is totally severed.  The semantics
3256 * of these routines are such that connectionless protocols can call
3257 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3258 * calls when setting up a ``connection'' takes no time.
3259 *
3260 * From the passive side, a socket is created with two queues of sockets:
3261 * so_incomp for connections in progress and so_comp for connections already
3262 * made and awaiting user acceptance.  As a protocol is preparing incoming
3263 * connections, it creates a socket structure queued on so_incomp by calling
3264 * sonewconn().  When the connection is established, soisconnected() is
3265 * called, and transfers the socket structure to so_comp, making it available
3266 * to accept().
3267 *
3268 * If a socket is closed with sockets on either so_incomp or so_comp, these
3269 * sockets are dropped.
3270 *
3271 * If higher-level protocols are implemented in the kernel, the wakeups done
3272 * here will sometimes cause software-interrupt process scheduling.
3273 */
3274void
3275soisconnecting(struct socket *so)
3276{
3277
3278	SOCK_LOCK(so);
3279	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3280	so->so_state |= SS_ISCONNECTING;
3281	SOCK_UNLOCK(so);
3282}
3283
3284void
3285soisconnected(struct socket *so)
3286{
3287	struct socket *head;
3288	int ret;
3289
3290restart:
3291	ACCEPT_LOCK();
3292	SOCK_LOCK(so);
3293	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3294	so->so_state |= SS_ISCONNECTED;
3295	head = so->so_head;
3296	if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3297		if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3298			SOCK_UNLOCK(so);
3299			TAILQ_REMOVE(&head->so_incomp, so, so_list);
3300			head->so_incqlen--;
3301			so->so_qstate &= ~SQ_INCOMP;
3302			TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3303			head->so_qlen++;
3304			so->so_qstate |= SQ_COMP;
3305			ACCEPT_UNLOCK();
3306			sorwakeup(head);
3307			wakeup_one(&head->so_timeo);
3308		} else {
3309			ACCEPT_UNLOCK();
3310			soupcall_set(so, SO_RCV,
3311			    head->so_accf->so_accept_filter->accf_callback,
3312			    head->so_accf->so_accept_filter_arg);
3313			so->so_options &= ~SO_ACCEPTFILTER;
3314			ret = head->so_accf->so_accept_filter->accf_callback(so,
3315			    head->so_accf->so_accept_filter_arg, M_NOWAIT);
3316			if (ret == SU_ISCONNECTED)
3317				soupcall_clear(so, SO_RCV);
3318			SOCK_UNLOCK(so);
3319			if (ret == SU_ISCONNECTED)
3320				goto restart;
3321		}
3322		return;
3323	}
3324	SOCK_UNLOCK(so);
3325	ACCEPT_UNLOCK();
3326	wakeup(&so->so_timeo);
3327	sorwakeup(so);
3328	sowwakeup(so);
3329}
3330
3331void
3332soisdisconnecting(struct socket *so)
3333{
3334
3335	/*
3336	 * Note: This code assumes that SOCK_LOCK(so) and
3337	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3338	 */
3339	SOCKBUF_LOCK(&so->so_rcv);
3340	so->so_state &= ~SS_ISCONNECTING;
3341	so->so_state |= SS_ISDISCONNECTING;
3342	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3343	sorwakeup_locked(so);
3344	SOCKBUF_LOCK(&so->so_snd);
3345	so->so_snd.sb_state |= SBS_CANTSENDMORE;
3346	sowwakeup_locked(so);
3347	wakeup(&so->so_timeo);
3348}
3349
3350void
3351soisdisconnected(struct socket *so)
3352{
3353
3354	/*
3355	 * Note: This code assumes that SOCK_LOCK(so) and
3356	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3357	 */
3358	SOCKBUF_LOCK(&so->so_rcv);
3359	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3360	so->so_state |= SS_ISDISCONNECTED;
3361	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3362	sorwakeup_locked(so);
3363	SOCKBUF_LOCK(&so->so_snd);
3364	so->so_snd.sb_state |= SBS_CANTSENDMORE;
3365	sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
3366	sowwakeup_locked(so);
3367	wakeup(&so->so_timeo);
3368}
3369
3370/*
3371 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3372 */
3373struct sockaddr *
3374sodupsockaddr(const struct sockaddr *sa, int mflags)
3375{
3376	struct sockaddr *sa2;
3377
3378	sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3379	if (sa2)
3380		bcopy(sa, sa2, sa->sa_len);
3381	return sa2;
3382}
3383
3384/*
3385 * Register per-socket buffer upcalls.
3386 */
3387void
3388soupcall_set(struct socket *so, int which,
3389    int (*func)(struct socket *, void *, int), void *arg)
3390{
3391	struct sockbuf *sb;
3392
3393	switch (which) {
3394	case SO_RCV:
3395		sb = &so->so_rcv;
3396		break;
3397	case SO_SND:
3398		sb = &so->so_snd;
3399		break;
3400	default:
3401		panic("soupcall_set: bad which");
3402	}
3403	SOCKBUF_LOCK_ASSERT(sb);
3404#if 0
3405	/* XXX: accf_http actually wants to do this on purpose. */
3406	KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3407#endif
3408	sb->sb_upcall = func;
3409	sb->sb_upcallarg = arg;
3410	sb->sb_flags |= SB_UPCALL;
3411}
3412
3413void
3414soupcall_clear(struct socket *so, int which)
3415{
3416	struct sockbuf *sb;
3417
3418	switch (which) {
3419	case SO_RCV:
3420		sb = &so->so_rcv;
3421		break;
3422	case SO_SND:
3423		sb = &so->so_snd;
3424		break;
3425	default:
3426		panic("soupcall_clear: bad which");
3427	}
3428	SOCKBUF_LOCK_ASSERT(sb);
3429	KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3430	sb->sb_upcall = NULL;
3431	sb->sb_upcallarg = NULL;
3432	sb->sb_flags &= ~SB_UPCALL;
3433}
3434
3435/*
3436 * Create an external-format (``xsocket'') structure using the information in
3437 * the kernel-format socket structure pointed to by so.  This is done to
3438 * reduce the spew of irrelevant information over this interface, to isolate
3439 * user code from changes in the kernel structure, and potentially to provide
3440 * information-hiding if we decide that some of this information should be
3441 * hidden from users.
3442 */
3443void
3444sotoxsocket(struct socket *so, struct xsocket *xso)
3445{
3446
3447	xso->xso_len = sizeof *xso;
3448	xso->xso_so = so;
3449	xso->so_type = so->so_type;
3450	xso->so_options = so->so_options;
3451	xso->so_linger = so->so_linger;
3452	xso->so_state = so->so_state;
3453	xso->so_pcb = so->so_pcb;
3454	xso->xso_protocol = so->so_proto->pr_protocol;
3455	xso->xso_family = so->so_proto->pr_domain->dom_family;
3456	xso->so_qlen = so->so_qlen;
3457	xso->so_incqlen = so->so_incqlen;
3458	xso->so_qlimit = so->so_qlimit;
3459	xso->so_timeo = so->so_timeo;
3460	xso->so_error = so->so_error;
3461	xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3462	xso->so_oobmark = so->so_oobmark;
3463	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3464	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3465	xso->so_uid = so->so_cred->cr_uid;
3466}
3467
3468
3469/*
3470 * Socket accessor functions to provide external consumers with
3471 * a safe interface to socket state
3472 *
3473 */
3474
3475void
3476so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *),
3477    void *arg)
3478{
3479
3480	TAILQ_FOREACH(so, &so->so_comp, so_list)
3481		func(so, arg);
3482}
3483
3484struct sockbuf *
3485so_sockbuf_rcv(struct socket *so)
3486{
3487
3488	return (&so->so_rcv);
3489}
3490
3491struct sockbuf *
3492so_sockbuf_snd(struct socket *so)
3493{
3494
3495	return (&so->so_snd);
3496}
3497
3498int
3499so_state_get(const struct socket *so)
3500{
3501
3502	return (so->so_state);
3503}
3504
3505void
3506so_state_set(struct socket *so, int val)
3507{
3508
3509	so->so_state = val;
3510}
3511
3512int
3513so_options_get(const struct socket *so)
3514{
3515
3516	return (so->so_options);
3517}
3518
3519void
3520so_options_set(struct socket *so, int val)
3521{
3522
3523	so->so_options = val;
3524}
3525
3526int
3527so_error_get(const struct socket *so)
3528{
3529
3530	return (so->so_error);
3531}
3532
3533void
3534so_error_set(struct socket *so, int val)
3535{
3536
3537	so->so_error = val;
3538}
3539
3540int
3541so_linger_get(const struct socket *so)
3542{
3543
3544	return (so->so_linger);
3545}
3546
3547void
3548so_linger_set(struct socket *so, int val)
3549{
3550
3551	so->so_linger = val;
3552}
3553
3554struct protosw *
3555so_protosw_get(const struct socket *so)
3556{
3557
3558	return (so->so_proto);
3559}
3560
3561void
3562so_protosw_set(struct socket *so, struct protosw *val)
3563{
3564
3565	so->so_proto = val;
3566}
3567
3568void
3569so_sorwakeup(struct socket *so)
3570{
3571
3572	sorwakeup(so);
3573}
3574
3575void
3576so_sowwakeup(struct socket *so)
3577{
3578
3579	sowwakeup(so);
3580}
3581
3582void
3583so_sorwakeup_locked(struct socket *so)
3584{
3585
3586	sorwakeup_locked(so);
3587}
3588
3589void
3590so_sowwakeup_locked(struct socket *so)
3591{
3592
3593	sowwakeup_locked(so);
3594}
3595
3596void
3597so_lock(struct socket *so)
3598{
3599
3600	SOCK_LOCK(so);
3601}
3602
3603void
3604so_unlock(struct socket *so)
3605{
3606
3607	SOCK_UNLOCK(so);
3608}
3609