Deleted Added
full compact
uipc_socket.c (275326) uipc_socket.c (275329)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
33 */
34
35/*
36 * Comments on the socket life cycle:
37 *
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
40 *
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
43 *
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
47 *
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
52 *
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
57 * case.
58 *
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
61 * sockets.
62 *
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
66 * listen socket.
67 *
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
71 *
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
82 *
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
86 * interface.
87 *
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
95 *
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
101 */
102
103#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
33 */
34
35/*
36 * Comments on the socket life cycle:
37 *
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
40 *
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
43 *
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
47 *
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
52 *
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
57 * case.
58 *
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
61 * sockets.
62 *
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
66 * listen socket.
67 *
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
71 *
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
82 *
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
86 * interface.
87 *
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
95 *
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
101 */
102
103#include <sys/cdefs.h>
104__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 275326 2014-11-30 12:52:33Z glebius $");
104__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 275329 2014-11-30 13:24:21Z glebius $");
105
106#include "opt_inet.h"
107#include "opt_inet6.h"
108#include "opt_compat.h"
109
110#include <sys/param.h>
111#include <sys/systm.h>
112#include <sys/fcntl.h>
113#include <sys/limits.h>
114#include <sys/lock.h>
115#include <sys/mac.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/domain.h>
120#include <sys/file.h> /* for struct knote */
121#include <sys/hhook.h>
122#include <sys/kernel.h>
123#include <sys/khelp.h>
124#include <sys/event.h>
125#include <sys/eventhandler.h>
126#include <sys/poll.h>
127#include <sys/proc.h>
128#include <sys/protosw.h>
129#include <sys/socket.h>
130#include <sys/socketvar.h>
131#include <sys/resourcevar.h>
132#include <net/route.h>
133#include <sys/signalvar.h>
134#include <sys/stat.h>
135#include <sys/sx.h>
136#include <sys/sysctl.h>
137#include <sys/uio.h>
138#include <sys/jail.h>
139#include <sys/syslog.h>
140#include <netinet/in.h>
141
142#include <net/vnet.h>
143
144#include <security/mac/mac_framework.h>
145
146#include <vm/uma.h>
147
148#ifdef COMPAT_FREEBSD32
149#include <sys/mount.h>
150#include <sys/sysent.h>
151#include <compat/freebsd32/freebsd32.h>
152#endif
153
154static int soreceive_rcvoob(struct socket *so, struct uio *uio,
155 int flags);
156
157static void filt_sordetach(struct knote *kn);
158static int filt_soread(struct knote *kn, long hint);
159static void filt_sowdetach(struct knote *kn);
160static int filt_sowrite(struct knote *kn, long hint);
161static int filt_solisten(struct knote *kn, long hint);
162static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
163fo_kqfilter_t soo_kqfilter;
164
165static struct filterops solisten_filtops = {
166 .f_isfd = 1,
167 .f_detach = filt_sordetach,
168 .f_event = filt_solisten,
169};
170static struct filterops soread_filtops = {
171 .f_isfd = 1,
172 .f_detach = filt_sordetach,
173 .f_event = filt_soread,
174};
175static struct filterops sowrite_filtops = {
176 .f_isfd = 1,
177 .f_detach = filt_sowdetach,
178 .f_event = filt_sowrite,
179};
180
181so_gen_t so_gencnt; /* generation count for sockets */
182
183MALLOC_DEFINE(M_SONAME, "soname", "socket name");
184MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
185
186#define VNET_SO_ASSERT(so) \
187 VNET_ASSERT(curvnet != NULL, \
188 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
189
190VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
191#define V_socket_hhh VNET(socket_hhh)
192
193/*
194 * Limit on the number of connections in the listen queue waiting
195 * for accept(2).
196 * NB: The orginal sysctl somaxconn is still available but hidden
197 * to prevent confusion about the actual purpose of this number.
198 */
199static int somaxconn = SOMAXCONN;
200
201static int
202sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
203{
204 int error;
205 int val;
206
207 val = somaxconn;
208 error = sysctl_handle_int(oidp, &val, 0, req);
209 if (error || !req->newptr )
210 return (error);
211
212 if (val < 1 || val > USHRT_MAX)
213 return (EINVAL);
214
215 somaxconn = val;
216 return (0);
217}
218SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
219 0, sizeof(int), sysctl_somaxconn, "I",
220 "Maximum listen socket pending connection accept queue size");
221SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
222 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
223 0, sizeof(int), sysctl_somaxconn, "I",
224 "Maximum listen socket pending connection accept queue size (compat)");
225
226static int numopensockets;
227SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
228 &numopensockets, 0, "Number of open sockets");
229
230/*
231 * accept_mtx locks down per-socket fields relating to accept queues. See
232 * socketvar.h for an annotation of the protected fields of struct socket.
233 */
234struct mtx accept_mtx;
235MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
236
237/*
238 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
239 * so_gencnt field.
240 */
241static struct mtx so_global_mtx;
242MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
243
244/*
245 * General IPC sysctl name space, used by sockets and a variety of other IPC
246 * types.
247 */
248SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
249
250/*
251 * Initialize the socket subsystem and set up the socket
252 * memory allocator.
253 */
254static uma_zone_t socket_zone;
255int maxsockets;
256
257static void
258socket_zone_change(void *tag)
259{
260
261 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
262}
263
264static void
265socket_hhook_register(int subtype)
266{
267
268 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
269 &V_socket_hhh[subtype],
270 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
271 printf("%s: WARNING: unable to register hook\n", __func__);
272}
273
274static void
275socket_hhook_deregister(int subtype)
276{
277
278 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
279 printf("%s: WARNING: unable to deregister hook\n", __func__);
280}
281
282static void
283socket_init(void *tag)
284{
285
286 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
287 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
288 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
289 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
290 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
291 EVENTHANDLER_PRI_FIRST);
292}
293SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
294
295static void
296socket_vnet_init(const void *unused __unused)
297{
298 int i;
299
300 /* We expect a contiguous range */
301 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
302 socket_hhook_register(i);
303}
304VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
305 socket_vnet_init, NULL);
306
307static void
308socket_vnet_uninit(const void *unused __unused)
309{
310 int i;
311
312 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
313 socket_hhook_deregister(i);
314}
315VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
316 socket_vnet_uninit, NULL);
317
318/*
319 * Initialise maxsockets. This SYSINIT must be run after
320 * tunable_mbinit().
321 */
322static void
323init_maxsockets(void *ignored)
324{
325
326 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
327 maxsockets = imax(maxsockets, maxfiles);
328}
329SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
330
331/*
332 * Sysctl to get and set the maximum global sockets limit. Notify protocols
333 * of the change so that they can update their dependent limits as required.
334 */
335static int
336sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
337{
338 int error, newmaxsockets;
339
340 newmaxsockets = maxsockets;
341 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
342 if (error == 0 && req->newptr) {
343 if (newmaxsockets > maxsockets &&
344 newmaxsockets <= maxfiles) {
345 maxsockets = newmaxsockets;
346 EVENTHANDLER_INVOKE(maxsockets_change);
347 } else
348 error = EINVAL;
349 }
350 return (error);
351}
352SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
353 &maxsockets, 0, sysctl_maxsockets, "IU",
354 "Maximum number of sockets avaliable");
355
356/*
357 * Socket operation routines. These routines are called by the routines in
358 * sys_socket.c or from a system process, and implement the semantics of
359 * socket operations by switching out to the protocol specific routines.
360 */
361
362/*
363 * Get a socket structure from our zone, and initialize it. Note that it
364 * would probably be better to allocate socket and PCB at the same time, but
365 * I'm not convinced that all the protocols can be easily modified to do
366 * this.
367 *
368 * soalloc() returns a socket with a ref count of 0.
369 */
370static struct socket *
371soalloc(struct vnet *vnet)
372{
373 struct socket *so;
374
375 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
376 if (so == NULL)
377 return (NULL);
378#ifdef MAC
379 if (mac_socket_init(so, M_NOWAIT) != 0) {
380 uma_zfree(socket_zone, so);
381 return (NULL);
382 }
383#endif
384 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
385 uma_zfree(socket_zone, so);
386 return (NULL);
387 }
388
389 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
390 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
391 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
392 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
393 TAILQ_INIT(&so->so_aiojobq);
394#ifdef VIMAGE
395 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
396 __func__, __LINE__, so));
397 so->so_vnet = vnet;
398#endif
399 /* We shouldn't need the so_global_mtx */
400 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
401 /* Do we need more comprehensive error returns? */
402 uma_zfree(socket_zone, so);
403 return (NULL);
404 }
405 mtx_lock(&so_global_mtx);
406 so->so_gencnt = ++so_gencnt;
407 ++numopensockets;
408#ifdef VIMAGE
409 vnet->vnet_sockcnt++;
410#endif
411 mtx_unlock(&so_global_mtx);
412
413 return (so);
414}
415
416/*
417 * Free the storage associated with a socket at the socket layer, tear down
418 * locks, labels, etc. All protocol state is assumed already to have been
419 * torn down (and possibly never set up) by the caller.
420 */
421static void
422sodealloc(struct socket *so)
423{
424
425 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
426 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
427
428 mtx_lock(&so_global_mtx);
429 so->so_gencnt = ++so_gencnt;
430 --numopensockets; /* Could be below, but faster here. */
431#ifdef VIMAGE
432 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
433 __func__, __LINE__, so));
434 so->so_vnet->vnet_sockcnt--;
435#endif
436 mtx_unlock(&so_global_mtx);
437 if (so->so_rcv.sb_hiwat)
438 (void)chgsbsize(so->so_cred->cr_uidinfo,
439 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
440 if (so->so_snd.sb_hiwat)
441 (void)chgsbsize(so->so_cred->cr_uidinfo,
442 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
443 /* remove acccept filter if one is present. */
444 if (so->so_accf != NULL)
445 do_setopt_accept_filter(so, NULL);
446#ifdef MAC
447 mac_socket_destroy(so);
448#endif
449 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
450
451 crfree(so->so_cred);
452 khelp_destroy_osd(&so->osd);
453 sx_destroy(&so->so_snd.sb_sx);
454 sx_destroy(&so->so_rcv.sb_sx);
455 SOCKBUF_LOCK_DESTROY(&so->so_snd);
456 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
457 uma_zfree(socket_zone, so);
458}
459
460/*
461 * socreate returns a socket with a ref count of 1. The socket should be
462 * closed with soclose().
463 */
464int
465socreate(int dom, struct socket **aso, int type, int proto,
466 struct ucred *cred, struct thread *td)
467{
468 struct protosw *prp;
469 struct socket *so;
470 int error;
471
472 if (proto)
473 prp = pffindproto(dom, proto, type);
474 else
475 prp = pffindtype(dom, type);
476
477 if (prp == NULL) {
478 /* No support for domain. */
479 if (pffinddomain(dom) == NULL)
480 return (EAFNOSUPPORT);
481 /* No support for socket type. */
482 if (proto == 0 && type != 0)
483 return (EPROTOTYPE);
484 return (EPROTONOSUPPORT);
485 }
486 if (prp->pr_usrreqs->pru_attach == NULL ||
487 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
488 return (EPROTONOSUPPORT);
489
490 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
491 return (EPROTONOSUPPORT);
492
493 if (prp->pr_type != type)
494 return (EPROTOTYPE);
495 so = soalloc(CRED_TO_VNET(cred));
496 if (so == NULL)
497 return (ENOBUFS);
498
499 TAILQ_INIT(&so->so_incomp);
500 TAILQ_INIT(&so->so_comp);
501 so->so_type = type;
502 so->so_cred = crhold(cred);
503 if ((prp->pr_domain->dom_family == PF_INET) ||
504 (prp->pr_domain->dom_family == PF_INET6) ||
505 (prp->pr_domain->dom_family == PF_ROUTE))
506 so->so_fibnum = td->td_proc->p_fibnum;
507 else
508 so->so_fibnum = 0;
509 so->so_proto = prp;
510#ifdef MAC
511 mac_socket_create(cred, so);
512#endif
513 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
514 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
515 so->so_count = 1;
516 /*
517 * Auto-sizing of socket buffers is managed by the protocols and
518 * the appropriate flags must be set in the pru_attach function.
519 */
520 CURVNET_SET(so->so_vnet);
521 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
522 CURVNET_RESTORE();
523 if (error) {
524 KASSERT(so->so_count == 1, ("socreate: so_count %d",
525 so->so_count));
526 so->so_count = 0;
527 sodealloc(so);
528 return (error);
529 }
530 *aso = so;
531 return (0);
532}
533
534#ifdef REGRESSION
535static int regression_sonewconn_earlytest = 1;
536SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
537 &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
538#endif
539
540/*
541 * When an attempt at a new connection is noted on a socket which accepts
542 * connections, sonewconn is called. If the connection is possible (subject
543 * to space constraints, etc.) then we allocate a new structure, propoerly
544 * linked into the data structure of the original socket, and return this.
545 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
546 *
547 * Note: the ref count on the socket is 0 on return.
548 */
549struct socket *
550sonewconn(struct socket *head, int connstatus)
551{
552 static struct timeval lastover;
553 static struct timeval overinterval = { 60, 0 };
554 static int overcount;
555
556 struct socket *so;
557 int over;
558
559 ACCEPT_LOCK();
560 over = (head->so_qlen > 3 * head->so_qlimit / 2);
561 ACCEPT_UNLOCK();
562#ifdef REGRESSION
563 if (regression_sonewconn_earlytest && over) {
564#else
565 if (over) {
566#endif
567 overcount++;
568
569 if (ratecheck(&lastover, &overinterval)) {
570 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
571 "%i already in queue awaiting acceptance "
572 "(%d occurrences)\n",
573 __func__, head->so_pcb, head->so_qlen, overcount);
574
575 overcount = 0;
576 }
577
578 return (NULL);
579 }
580 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
581 __func__, __LINE__, head));
582 so = soalloc(head->so_vnet);
583 if (so == NULL) {
584 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
585 "limit reached or out of memory\n",
586 __func__, head->so_pcb);
587 return (NULL);
588 }
589 if ((head->so_options & SO_ACCEPTFILTER) != 0)
590 connstatus = 0;
591 so->so_head = head;
592 so->so_type = head->so_type;
593 so->so_options = head->so_options &~ SO_ACCEPTCONN;
594 so->so_linger = head->so_linger;
595 so->so_state = head->so_state | SS_NOFDREF;
596 so->so_fibnum = head->so_fibnum;
597 so->so_proto = head->so_proto;
598 so->so_cred = crhold(head->so_cred);
599#ifdef MAC
600 mac_socket_newconn(head, so);
601#endif
602 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
603 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
604 VNET_SO_ASSERT(head);
605 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
606 sodealloc(so);
607 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
608 __func__, head->so_pcb);
609 return (NULL);
610 }
611 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
612 sodealloc(so);
613 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
614 __func__, head->so_pcb);
615 return (NULL);
616 }
617 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
618 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
619 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
620 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
621 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
622 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
623 so->so_state |= connstatus;
624 ACCEPT_LOCK();
625 /*
626 * The accept socket may be tearing down but we just
627 * won a race on the ACCEPT_LOCK.
628 * However, if sctp_peeloff() is called on a 1-to-many
629 * style socket, the SO_ACCEPTCONN doesn't need to be set.
630 */
631 if (!(head->so_options & SO_ACCEPTCONN) &&
632 ((head->so_proto->pr_protocol != IPPROTO_SCTP) ||
633 (head->so_type != SOCK_SEQPACKET))) {
634 SOCK_LOCK(so);
635 so->so_head = NULL;
636 sofree(so); /* NB: returns ACCEPT_UNLOCK'ed. */
637 return (NULL);
638 }
639 if (connstatus) {
640 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
641 so->so_qstate |= SQ_COMP;
642 head->so_qlen++;
643 } else {
644 /*
645 * Keep removing sockets from the head until there's room for
646 * us to insert on the tail. In pre-locking revisions, this
647 * was a simple if(), but as we could be racing with other
648 * threads and soabort() requires dropping locks, we must
649 * loop waiting for the condition to be true.
650 */
651 while (head->so_incqlen > head->so_qlimit) {
652 struct socket *sp;
653 sp = TAILQ_FIRST(&head->so_incomp);
654 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
655 head->so_incqlen--;
656 sp->so_qstate &= ~SQ_INCOMP;
657 sp->so_head = NULL;
658 ACCEPT_UNLOCK();
659 soabort(sp);
660 ACCEPT_LOCK();
661 }
662 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
663 so->so_qstate |= SQ_INCOMP;
664 head->so_incqlen++;
665 }
666 ACCEPT_UNLOCK();
667 if (connstatus) {
668 sorwakeup(head);
669 wakeup_one(&head->so_timeo);
670 }
671 return (so);
672}
673
674int
675sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
676{
677 int error;
678
679 CURVNET_SET(so->so_vnet);
680 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
681 CURVNET_RESTORE();
682 return (error);
683}
684
685int
686sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
687{
688 int error;
689
690 CURVNET_SET(so->so_vnet);
691 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
692 CURVNET_RESTORE();
693 return (error);
694}
695
696/*
697 * solisten() transitions a socket from a non-listening state to a listening
698 * state, but can also be used to update the listen queue depth on an
699 * existing listen socket. The protocol will call back into the sockets
700 * layer using solisten_proto_check() and solisten_proto() to check and set
701 * socket-layer listen state. Call backs are used so that the protocol can
702 * acquire both protocol and socket layer locks in whatever order is required
703 * by the protocol.
704 *
705 * Protocol implementors are advised to hold the socket lock across the
706 * socket-layer test and set to avoid races at the socket layer.
707 */
708int
709solisten(struct socket *so, int backlog, struct thread *td)
710{
711 int error;
712
713 CURVNET_SET(so->so_vnet);
714 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
715 CURVNET_RESTORE();
716 return (error);
717}
718
719int
720solisten_proto_check(struct socket *so)
721{
722
723 SOCK_LOCK_ASSERT(so);
724
725 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
726 SS_ISDISCONNECTING))
727 return (EINVAL);
728 return (0);
729}
730
731void
732solisten_proto(struct socket *so, int backlog)
733{
734
735 SOCK_LOCK_ASSERT(so);
736
737 if (backlog < 0 || backlog > somaxconn)
738 backlog = somaxconn;
739 so->so_qlimit = backlog;
740 so->so_options |= SO_ACCEPTCONN;
741}
742
743/*
744 * Evaluate the reference count and named references on a socket; if no
745 * references remain, free it. This should be called whenever a reference is
746 * released, such as in sorele(), but also when named reference flags are
747 * cleared in socket or protocol code.
748 *
749 * sofree() will free the socket if:
750 *
751 * - There are no outstanding file descriptor references or related consumers
752 * (so_count == 0).
753 *
754 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
755 *
756 * - The protocol does not have an outstanding strong reference on the socket
757 * (SS_PROTOREF).
758 *
759 * - The socket is not in a completed connection queue, so a process has been
760 * notified that it is present. If it is removed, the user process may
761 * block in accept() despite select() saying the socket was ready.
762 */
763void
764sofree(struct socket *so)
765{
766 struct protosw *pr = so->so_proto;
767 struct socket *head;
768
769 ACCEPT_LOCK_ASSERT();
770 SOCK_LOCK_ASSERT(so);
771
772 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
773 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
774 SOCK_UNLOCK(so);
775 ACCEPT_UNLOCK();
776 return;
777 }
778
779 head = so->so_head;
780 if (head != NULL) {
781 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
782 (so->so_qstate & SQ_INCOMP) != 0,
783 ("sofree: so_head != NULL, but neither SQ_COMP nor "
784 "SQ_INCOMP"));
785 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
786 (so->so_qstate & SQ_INCOMP) == 0,
787 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
788 TAILQ_REMOVE(&head->so_incomp, so, so_list);
789 head->so_incqlen--;
790 so->so_qstate &= ~SQ_INCOMP;
791 so->so_head = NULL;
792 }
793 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
794 (so->so_qstate & SQ_INCOMP) == 0,
795 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
796 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
797 if (so->so_options & SO_ACCEPTCONN) {
798 KASSERT((TAILQ_EMPTY(&so->so_comp)),
799 ("sofree: so_comp populated"));
800 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
801 ("sofree: so_incomp populated"));
802 }
803 SOCK_UNLOCK(so);
804 ACCEPT_UNLOCK();
805
806 VNET_SO_ASSERT(so);
807 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
808 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
809 if (pr->pr_usrreqs->pru_detach != NULL)
810 (*pr->pr_usrreqs->pru_detach)(so);
811
812 /*
813 * From this point on, we assume that no other references to this
814 * socket exist anywhere else in the stack. Therefore, no locks need
815 * to be acquired or held.
816 *
817 * We used to do a lot of socket buffer and socket locking here, as
818 * well as invoke sorflush() and perform wakeups. The direct call to
819 * dom_dispose() and sbrelease_internal() are an inlining of what was
820 * necessary from sorflush().
821 *
822 * Notice that the socket buffer and kqueue state are torn down
823 * before calling pru_detach. This means that protocols shold not
824 * assume they can perform socket wakeups, etc, in their detach code.
825 */
826 sbdestroy(&so->so_snd, so);
827 sbdestroy(&so->so_rcv, so);
828 seldrain(&so->so_snd.sb_sel);
829 seldrain(&so->so_rcv.sb_sel);
830 knlist_destroy(&so->so_rcv.sb_sel.si_note);
831 knlist_destroy(&so->so_snd.sb_sel.si_note);
832 sodealloc(so);
833}
834
835/*
836 * Close a socket on last file table reference removal. Initiate disconnect
837 * if connected. Free socket when disconnect complete.
838 *
839 * This function will sorele() the socket. Note that soclose() may be called
840 * prior to the ref count reaching zero. The actual socket structure will
841 * not be freed until the ref count reaches zero.
842 */
843int
844soclose(struct socket *so)
845{
846 int error = 0;
847
848 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
849
850 CURVNET_SET(so->so_vnet);
851 funsetown(&so->so_sigio);
852 if (so->so_state & SS_ISCONNECTED) {
853 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
854 error = sodisconnect(so);
855 if (error) {
856 if (error == ENOTCONN)
857 error = 0;
858 goto drop;
859 }
860 }
861 if (so->so_options & SO_LINGER) {
862 if ((so->so_state & SS_ISDISCONNECTING) &&
863 (so->so_state & SS_NBIO))
864 goto drop;
865 while (so->so_state & SS_ISCONNECTED) {
866 error = tsleep(&so->so_timeo,
867 PSOCK | PCATCH, "soclos",
868 so->so_linger * hz);
869 if (error)
870 break;
871 }
872 }
873 }
874
875drop:
876 if (so->so_proto->pr_usrreqs->pru_close != NULL)
877 (*so->so_proto->pr_usrreqs->pru_close)(so);
878 ACCEPT_LOCK();
879 if (so->so_options & SO_ACCEPTCONN) {
880 struct socket *sp;
881 /*
882 * Prevent new additions to the accept queues due
883 * to ACCEPT_LOCK races while we are draining them.
884 */
885 so->so_options &= ~SO_ACCEPTCONN;
886 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
887 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
888 so->so_incqlen--;
889 sp->so_qstate &= ~SQ_INCOMP;
890 sp->so_head = NULL;
891 ACCEPT_UNLOCK();
892 soabort(sp);
893 ACCEPT_LOCK();
894 }
895 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
896 TAILQ_REMOVE(&so->so_comp, sp, so_list);
897 so->so_qlen--;
898 sp->so_qstate &= ~SQ_COMP;
899 sp->so_head = NULL;
900 ACCEPT_UNLOCK();
901 soabort(sp);
902 ACCEPT_LOCK();
903 }
904 KASSERT((TAILQ_EMPTY(&so->so_comp)),
905 ("%s: so_comp populated", __func__));
906 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
907 ("%s: so_incomp populated", __func__));
908 }
909 SOCK_LOCK(so);
910 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
911 so->so_state |= SS_NOFDREF;
912 sorele(so); /* NB: Returns with ACCEPT_UNLOCK(). */
913 CURVNET_RESTORE();
914 return (error);
915}
916
917/*
918 * soabort() is used to abruptly tear down a connection, such as when a
919 * resource limit is reached (listen queue depth exceeded), or if a listen
920 * socket is closed while there are sockets waiting to be accepted.
921 *
922 * This interface is tricky, because it is called on an unreferenced socket,
923 * and must be called only by a thread that has actually removed the socket
924 * from the listen queue it was on, or races with other threads are risked.
925 *
926 * This interface will call into the protocol code, so must not be called
927 * with any socket locks held. Protocols do call it while holding their own
928 * recursible protocol mutexes, but this is something that should be subject
929 * to review in the future.
930 */
931void
932soabort(struct socket *so)
933{
934
935 /*
936 * In as much as is possible, assert that no references to this
937 * socket are held. This is not quite the same as asserting that the
938 * current thread is responsible for arranging for no references, but
939 * is as close as we can get for now.
940 */
941 KASSERT(so->so_count == 0, ("soabort: so_count"));
942 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
943 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
944 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
945 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
946 VNET_SO_ASSERT(so);
947
948 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
949 (*so->so_proto->pr_usrreqs->pru_abort)(so);
950 ACCEPT_LOCK();
951 SOCK_LOCK(so);
952 sofree(so);
953}
954
955int
956soaccept(struct socket *so, struct sockaddr **nam)
957{
958 int error;
959
960 SOCK_LOCK(so);
961 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
962 so->so_state &= ~SS_NOFDREF;
963 SOCK_UNLOCK(so);
964
965 CURVNET_SET(so->so_vnet);
966 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
967 CURVNET_RESTORE();
968 return (error);
969}
970
971int
972soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
973{
974
975 return (soconnectat(AT_FDCWD, so, nam, td));
976}
977
978int
979soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
980{
981 int error;
982
983 if (so->so_options & SO_ACCEPTCONN)
984 return (EOPNOTSUPP);
985
986 CURVNET_SET(so->so_vnet);
987 /*
988 * If protocol is connection-based, can only connect once.
989 * Otherwise, if connected, try to disconnect first. This allows
990 * user to disconnect by connecting to, e.g., a null address.
991 */
992 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
993 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
994 (error = sodisconnect(so)))) {
995 error = EISCONN;
996 } else {
997 /*
998 * Prevent accumulated error from previous connection from
999 * biting us.
1000 */
1001 so->so_error = 0;
1002 if (fd == AT_FDCWD) {
1003 error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
1004 nam, td);
1005 } else {
1006 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
1007 so, nam, td);
1008 }
1009 }
1010 CURVNET_RESTORE();
1011
1012 return (error);
1013}
1014
1015int
1016soconnect2(struct socket *so1, struct socket *so2)
1017{
1018 int error;
1019
1020 CURVNET_SET(so1->so_vnet);
1021 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1022 CURVNET_RESTORE();
1023 return (error);
1024}
1025
1026int
1027sodisconnect(struct socket *so)
1028{
1029 int error;
1030
1031 if ((so->so_state & SS_ISCONNECTED) == 0)
1032 return (ENOTCONN);
1033 if (so->so_state & SS_ISDISCONNECTING)
1034 return (EALREADY);
1035 VNET_SO_ASSERT(so);
1036 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1037 return (error);
1038}
1039
1040#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1041
1042int
1043sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1044 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1045{
1046 long space;
1047 ssize_t resid;
1048 int clen = 0, error, dontroute;
1049
1050 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1051 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1052 ("sosend_dgram: !PR_ATOMIC"));
1053
1054 if (uio != NULL)
1055 resid = uio->uio_resid;
1056 else
1057 resid = top->m_pkthdr.len;
1058 /*
1059 * In theory resid should be unsigned. However, space must be
1060 * signed, as it might be less than 0 if we over-committed, and we
1061 * must use a signed comparison of space and resid. On the other
1062 * hand, a negative resid causes us to loop sending 0-length
1063 * segments to the protocol.
1064 */
1065 if (resid < 0) {
1066 error = EINVAL;
1067 goto out;
1068 }
1069
1070 dontroute =
1071 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1072 if (td != NULL)
1073 td->td_ru.ru_msgsnd++;
1074 if (control != NULL)
1075 clen = control->m_len;
1076
1077 SOCKBUF_LOCK(&so->so_snd);
1078 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1079 SOCKBUF_UNLOCK(&so->so_snd);
1080 error = EPIPE;
1081 goto out;
1082 }
1083 if (so->so_error) {
1084 error = so->so_error;
1085 so->so_error = 0;
1086 SOCKBUF_UNLOCK(&so->so_snd);
1087 goto out;
1088 }
1089 if ((so->so_state & SS_ISCONNECTED) == 0) {
1090 /*
1091 * `sendto' and `sendmsg' is allowed on a connection-based
1092 * socket if it supports implied connect. Return ENOTCONN if
1093 * not connected and no address is supplied.
1094 */
1095 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1096 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1097 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1098 !(resid == 0 && clen != 0)) {
1099 SOCKBUF_UNLOCK(&so->so_snd);
1100 error = ENOTCONN;
1101 goto out;
1102 }
1103 } else if (addr == NULL) {
1104 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1105 error = ENOTCONN;
1106 else
1107 error = EDESTADDRREQ;
1108 SOCKBUF_UNLOCK(&so->so_snd);
1109 goto out;
1110 }
1111 }
1112
1113 /*
1114 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1115 * problem and need fixing.
1116 */
1117 space = sbspace(&so->so_snd);
1118 if (flags & MSG_OOB)
1119 space += 1024;
1120 space -= clen;
1121 SOCKBUF_UNLOCK(&so->so_snd);
1122 if (resid > space) {
1123 error = EMSGSIZE;
1124 goto out;
1125 }
1126 if (uio == NULL) {
1127 resid = 0;
1128 if (flags & MSG_EOR)
1129 top->m_flags |= M_EOR;
1130 } else {
1131 /*
1132 * Copy the data from userland into a mbuf chain.
1133 * If no data is to be copied in, a single empty mbuf
1134 * is returned.
1135 */
1136 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1137 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1138 if (top == NULL) {
1139 error = EFAULT; /* only possible error */
1140 goto out;
1141 }
1142 space -= resid - uio->uio_resid;
1143 resid = uio->uio_resid;
1144 }
1145 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1146 /*
1147 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1148 * than with.
1149 */
1150 if (dontroute) {
1151 SOCK_LOCK(so);
1152 so->so_options |= SO_DONTROUTE;
1153 SOCK_UNLOCK(so);
1154 }
1155 /*
1156 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1157 * of date. We could have recieved a reset packet in an interrupt or
1158 * maybe we slept while doing page faults in uiomove() etc. We could
1159 * probably recheck again inside the locking protection here, but
1160 * there are probably other places that this also happens. We must
1161 * rethink this.
1162 */
1163 VNET_SO_ASSERT(so);
1164 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1165 (flags & MSG_OOB) ? PRUS_OOB :
1166 /*
1167 * If the user set MSG_EOF, the protocol understands this flag and
1168 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1169 */
1170 ((flags & MSG_EOF) &&
1171 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1172 (resid <= 0)) ?
1173 PRUS_EOF :
1174 /* If there is more to send set PRUS_MORETOCOME */
1175 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1176 top, addr, control, td);
1177 if (dontroute) {
1178 SOCK_LOCK(so);
1179 so->so_options &= ~SO_DONTROUTE;
1180 SOCK_UNLOCK(so);
1181 }
1182 clen = 0;
1183 control = NULL;
1184 top = NULL;
1185out:
1186 if (top != NULL)
1187 m_freem(top);
1188 if (control != NULL)
1189 m_freem(control);
1190 return (error);
1191}
1192
1193/*
1194 * Send on a socket. If send must go all at once and message is larger than
1195 * send buffering, then hard error. Lock against other senders. If must go
1196 * all at once and not enough room now, then inform user that this would
1197 * block and do nothing. Otherwise, if nonblocking, send as much as
1198 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1199 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1200 * in mbuf chain must be small enough to send all at once.
1201 *
1202 * Returns nonzero on error, timeout or signal; callers must check for short
1203 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1204 * on return.
1205 */
1206int
1207sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1208 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1209{
1210 long space;
1211 ssize_t resid;
1212 int clen = 0, error, dontroute;
1213 int atomic = sosendallatonce(so) || top;
1214
1215 if (uio != NULL)
1216 resid = uio->uio_resid;
1217 else
1218 resid = top->m_pkthdr.len;
1219 /*
1220 * In theory resid should be unsigned. However, space must be
1221 * signed, as it might be less than 0 if we over-committed, and we
1222 * must use a signed comparison of space and resid. On the other
1223 * hand, a negative resid causes us to loop sending 0-length
1224 * segments to the protocol.
1225 *
1226 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1227 * type sockets since that's an error.
1228 */
1229 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1230 error = EINVAL;
1231 goto out;
1232 }
1233
1234 dontroute =
1235 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1236 (so->so_proto->pr_flags & PR_ATOMIC);
1237 if (td != NULL)
1238 td->td_ru.ru_msgsnd++;
1239 if (control != NULL)
1240 clen = control->m_len;
1241
1242 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1243 if (error)
1244 goto out;
1245
1246restart:
1247 do {
1248 SOCKBUF_LOCK(&so->so_snd);
1249 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1250 SOCKBUF_UNLOCK(&so->so_snd);
1251 error = EPIPE;
1252 goto release;
1253 }
1254 if (so->so_error) {
1255 error = so->so_error;
1256 so->so_error = 0;
1257 SOCKBUF_UNLOCK(&so->so_snd);
1258 goto release;
1259 }
1260 if ((so->so_state & SS_ISCONNECTED) == 0) {
1261 /*
1262 * `sendto' and `sendmsg' is allowed on a connection-
1263 * based socket if it supports implied connect.
1264 * Return ENOTCONN if not connected and no address is
1265 * supplied.
1266 */
1267 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1268 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1269 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1270 !(resid == 0 && clen != 0)) {
1271 SOCKBUF_UNLOCK(&so->so_snd);
1272 error = ENOTCONN;
1273 goto release;
1274 }
1275 } else if (addr == NULL) {
1276 SOCKBUF_UNLOCK(&so->so_snd);
1277 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1278 error = ENOTCONN;
1279 else
1280 error = EDESTADDRREQ;
1281 goto release;
1282 }
1283 }
1284 space = sbspace(&so->so_snd);
1285 if (flags & MSG_OOB)
1286 space += 1024;
1287 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1288 clen > so->so_snd.sb_hiwat) {
1289 SOCKBUF_UNLOCK(&so->so_snd);
1290 error = EMSGSIZE;
1291 goto release;
1292 }
1293 if (space < resid + clen &&
1294 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1295 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1296 SOCKBUF_UNLOCK(&so->so_snd);
1297 error = EWOULDBLOCK;
1298 goto release;
1299 }
1300 error = sbwait(&so->so_snd);
1301 SOCKBUF_UNLOCK(&so->so_snd);
1302 if (error)
1303 goto release;
1304 goto restart;
1305 }
1306 SOCKBUF_UNLOCK(&so->so_snd);
1307 space -= clen;
1308 do {
1309 if (uio == NULL) {
1310 resid = 0;
1311 if (flags & MSG_EOR)
1312 top->m_flags |= M_EOR;
1313 } else if (resid > 0) {
1314 /*
1315 * Copy the data from userland into a mbuf
1316 * chain. If no data is to be copied in,
1317 * a single empty mbuf is returned.
1318 */
1319 top = m_uiotombuf(uio, M_WAITOK, space,
1320 (atomic ? max_hdr : 0),
1321 (atomic ? M_PKTHDR : 0) |
1322 ((flags & MSG_EOR) ? M_EOR : 0));
1323 if (top == NULL) {
1324 error = EFAULT; /* only possible error */
1325 goto release;
1326 }
1327 space -= resid - uio->uio_resid;
1328 resid = uio->uio_resid;
1329 }
1330 if (dontroute) {
1331 SOCK_LOCK(so);
1332 so->so_options |= SO_DONTROUTE;
1333 SOCK_UNLOCK(so);
1334 }
1335 /*
1336 * XXX all the SBS_CANTSENDMORE checks previously
1337 * done could be out of date. We could have recieved
1338 * a reset packet in an interrupt or maybe we slept
1339 * while doing page faults in uiomove() etc. We
1340 * could probably recheck again inside the locking
1341 * protection here, but there are probably other
1342 * places that this also happens. We must rethink
1343 * this.
1344 */
1345 VNET_SO_ASSERT(so);
1346 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1347 (flags & MSG_OOB) ? PRUS_OOB :
1348 /*
1349 * If the user set MSG_EOF, the protocol understands
1350 * this flag and nothing left to send then use
1351 * PRU_SEND_EOF instead of PRU_SEND.
1352 */
1353 ((flags & MSG_EOF) &&
1354 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1355 (resid <= 0)) ?
1356 PRUS_EOF :
1357 /* If there is more to send set PRUS_MORETOCOME. */
1358 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1359 top, addr, control, td);
1360 if (dontroute) {
1361 SOCK_LOCK(so);
1362 so->so_options &= ~SO_DONTROUTE;
1363 SOCK_UNLOCK(so);
1364 }
1365 clen = 0;
1366 control = NULL;
1367 top = NULL;
1368 if (error)
1369 goto release;
1370 } while (resid && space > 0);
1371 } while (resid);
1372
1373release:
1374 sbunlock(&so->so_snd);
1375out:
1376 if (top != NULL)
1377 m_freem(top);
1378 if (control != NULL)
1379 m_freem(control);
1380 return (error);
1381}
1382
1383int
1384sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1385 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1386{
1387 int error;
1388
1389 CURVNET_SET(so->so_vnet);
1390 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1391 control, flags, td);
1392 CURVNET_RESTORE();
1393 return (error);
1394}
1395
1396/*
1397 * The part of soreceive() that implements reading non-inline out-of-band
1398 * data from a socket. For more complete comments, see soreceive(), from
1399 * which this code originated.
1400 *
1401 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1402 * unable to return an mbuf chain to the caller.
1403 */
1404static int
1405soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1406{
1407 struct protosw *pr = so->so_proto;
1408 struct mbuf *m;
1409 int error;
1410
1411 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1412 VNET_SO_ASSERT(so);
1413
1414 m = m_get(M_WAITOK, MT_DATA);
1415 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1416 if (error)
1417 goto bad;
1418 do {
1419 error = uiomove(mtod(m, void *),
1420 (int) min(uio->uio_resid, m->m_len), uio);
1421 m = m_free(m);
1422 } while (uio->uio_resid && error == 0 && m);
1423bad:
1424 if (m != NULL)
1425 m_freem(m);
1426 return (error);
1427}
1428
1429/*
1430 * Following replacement or removal of the first mbuf on the first mbuf chain
1431 * of a socket buffer, push necessary state changes back into the socket
1432 * buffer so that other consumers see the values consistently. 'nextrecord'
1433 * is the callers locally stored value of the original value of
1434 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1435 * NOTE: 'nextrecord' may be NULL.
1436 */
1437static __inline void
1438sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1439{
1440
1441 SOCKBUF_LOCK_ASSERT(sb);
1442 /*
1443 * First, update for the new value of nextrecord. If necessary, make
1444 * it the first record.
1445 */
1446 if (sb->sb_mb != NULL)
1447 sb->sb_mb->m_nextpkt = nextrecord;
1448 else
1449 sb->sb_mb = nextrecord;
1450
1451 /*
1452 * Now update any dependent socket buffer fields to reflect the new
1453 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1454 * addition of a second clause that takes care of the case where
1455 * sb_mb has been updated, but remains the last record.
1456 */
1457 if (sb->sb_mb == NULL) {
1458 sb->sb_mbtail = NULL;
1459 sb->sb_lastrecord = NULL;
1460 } else if (sb->sb_mb->m_nextpkt == NULL)
1461 sb->sb_lastrecord = sb->sb_mb;
1462}
1463
1464/*
1465 * Implement receive operations on a socket. We depend on the way that
1466 * records are added to the sockbuf by sbappend. In particular, each record
1467 * (mbufs linked through m_next) must begin with an address if the protocol
1468 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1469 * data, and then zero or more mbufs of data. In order to allow parallelism
1470 * between network receive and copying to user space, as well as avoid
1471 * sleeping with a mutex held, we release the socket buffer mutex during the
1472 * user space copy. Although the sockbuf is locked, new data may still be
1473 * appended, and thus we must maintain consistency of the sockbuf during that
1474 * time.
1475 *
1476 * The caller may receive the data as a single mbuf chain by supplying an
1477 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1478 * the count in uio_resid.
1479 */
1480int
1481soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1482 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1483{
1484 struct mbuf *m, **mp;
1485 int flags, error, offset;
1486 ssize_t len;
1487 struct protosw *pr = so->so_proto;
1488 struct mbuf *nextrecord;
1489 int moff, type = 0;
1490 ssize_t orig_resid = uio->uio_resid;
1491
1492 mp = mp0;
1493 if (psa != NULL)
1494 *psa = NULL;
1495 if (controlp != NULL)
1496 *controlp = NULL;
1497 if (flagsp != NULL)
1498 flags = *flagsp &~ MSG_EOR;
1499 else
1500 flags = 0;
1501 if (flags & MSG_OOB)
1502 return (soreceive_rcvoob(so, uio, flags));
1503 if (mp != NULL)
1504 *mp = NULL;
1505 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1506 && uio->uio_resid) {
1507 VNET_SO_ASSERT(so);
1508 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1509 }
1510
1511 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1512 if (error)
1513 return (error);
1514
1515restart:
1516 SOCKBUF_LOCK(&so->so_rcv);
1517 m = so->so_rcv.sb_mb;
1518 /*
1519 * If we have less data than requested, block awaiting more (subject
1520 * to any timeout) if:
1521 * 1. the current count is less than the low water mark, or
1522 * 2. MSG_DONTWAIT is not set
1523 */
1524 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1525 sbavail(&so->so_rcv) < uio->uio_resid) &&
1526 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
1527 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1528 KASSERT(m != NULL || !sbavail(&so->so_rcv),
1529 ("receive: m == %p sbavail == %u",
1530 m, sbavail(&so->so_rcv)));
1531 if (so->so_error) {
1532 if (m != NULL)
1533 goto dontblock;
1534 error = so->so_error;
1535 if ((flags & MSG_PEEK) == 0)
1536 so->so_error = 0;
1537 SOCKBUF_UNLOCK(&so->so_rcv);
1538 goto release;
1539 }
1540 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1541 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1542 if (m == NULL) {
1543 SOCKBUF_UNLOCK(&so->so_rcv);
1544 goto release;
1545 } else
1546 goto dontblock;
1547 }
1548 for (; m != NULL; m = m->m_next)
1549 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1550 m = so->so_rcv.sb_mb;
1551 goto dontblock;
1552 }
1553 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1554 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1555 SOCKBUF_UNLOCK(&so->so_rcv);
1556 error = ENOTCONN;
1557 goto release;
1558 }
1559 if (uio->uio_resid == 0) {
1560 SOCKBUF_UNLOCK(&so->so_rcv);
1561 goto release;
1562 }
1563 if ((so->so_state & SS_NBIO) ||
1564 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1565 SOCKBUF_UNLOCK(&so->so_rcv);
1566 error = EWOULDBLOCK;
1567 goto release;
1568 }
1569 SBLASTRECORDCHK(&so->so_rcv);
1570 SBLASTMBUFCHK(&so->so_rcv);
1571 error = sbwait(&so->so_rcv);
1572 SOCKBUF_UNLOCK(&so->so_rcv);
1573 if (error)
1574 goto release;
1575 goto restart;
1576 }
1577dontblock:
1578 /*
1579 * From this point onward, we maintain 'nextrecord' as a cache of the
1580 * pointer to the next record in the socket buffer. We must keep the
1581 * various socket buffer pointers and local stack versions of the
1582 * pointers in sync, pushing out modifications before dropping the
1583 * socket buffer mutex, and re-reading them when picking it up.
1584 *
1585 * Otherwise, we will race with the network stack appending new data
1586 * or records onto the socket buffer by using inconsistent/stale
1587 * versions of the field, possibly resulting in socket buffer
1588 * corruption.
1589 *
1590 * By holding the high-level sblock(), we prevent simultaneous
1591 * readers from pulling off the front of the socket buffer.
1592 */
1593 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1594 if (uio->uio_td)
1595 uio->uio_td->td_ru.ru_msgrcv++;
1596 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1597 SBLASTRECORDCHK(&so->so_rcv);
1598 SBLASTMBUFCHK(&so->so_rcv);
1599 nextrecord = m->m_nextpkt;
1600 if (pr->pr_flags & PR_ADDR) {
1601 KASSERT(m->m_type == MT_SONAME,
1602 ("m->m_type == %d", m->m_type));
1603 orig_resid = 0;
1604 if (psa != NULL)
1605 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1606 M_NOWAIT);
1607 if (flags & MSG_PEEK) {
1608 m = m->m_next;
1609 } else {
1610 sbfree(&so->so_rcv, m);
1611 so->so_rcv.sb_mb = m_free(m);
1612 m = so->so_rcv.sb_mb;
1613 sockbuf_pushsync(&so->so_rcv, nextrecord);
1614 }
1615 }
1616
1617 /*
1618 * Process one or more MT_CONTROL mbufs present before any data mbufs
1619 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1620 * just copy the data; if !MSG_PEEK, we call into the protocol to
1621 * perform externalization (or freeing if controlp == NULL).
1622 */
1623 if (m != NULL && m->m_type == MT_CONTROL) {
1624 struct mbuf *cm = NULL, *cmn;
1625 struct mbuf **cme = &cm;
1626
1627 do {
1628 if (flags & MSG_PEEK) {
1629 if (controlp != NULL) {
1630 *controlp = m_copy(m, 0, m->m_len);
1631 controlp = &(*controlp)->m_next;
1632 }
1633 m = m->m_next;
1634 } else {
1635 sbfree(&so->so_rcv, m);
1636 so->so_rcv.sb_mb = m->m_next;
1637 m->m_next = NULL;
1638 *cme = m;
1639 cme = &(*cme)->m_next;
1640 m = so->so_rcv.sb_mb;
1641 }
1642 } while (m != NULL && m->m_type == MT_CONTROL);
1643 if ((flags & MSG_PEEK) == 0)
1644 sockbuf_pushsync(&so->so_rcv, nextrecord);
1645 while (cm != NULL) {
1646 cmn = cm->m_next;
1647 cm->m_next = NULL;
1648 if (pr->pr_domain->dom_externalize != NULL) {
1649 SOCKBUF_UNLOCK(&so->so_rcv);
1650 VNET_SO_ASSERT(so);
1651 error = (*pr->pr_domain->dom_externalize)
1652 (cm, controlp, flags);
1653 SOCKBUF_LOCK(&so->so_rcv);
1654 } else if (controlp != NULL)
1655 *controlp = cm;
1656 else
1657 m_freem(cm);
1658 if (controlp != NULL) {
1659 orig_resid = 0;
1660 while (*controlp != NULL)
1661 controlp = &(*controlp)->m_next;
1662 }
1663 cm = cmn;
1664 }
1665 if (m != NULL)
1666 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1667 else
1668 nextrecord = so->so_rcv.sb_mb;
1669 orig_resid = 0;
1670 }
1671 if (m != NULL) {
1672 if ((flags & MSG_PEEK) == 0) {
1673 KASSERT(m->m_nextpkt == nextrecord,
1674 ("soreceive: post-control, nextrecord !sync"));
1675 if (nextrecord == NULL) {
1676 KASSERT(so->so_rcv.sb_mb == m,
1677 ("soreceive: post-control, sb_mb!=m"));
1678 KASSERT(so->so_rcv.sb_lastrecord == m,
1679 ("soreceive: post-control, lastrecord!=m"));
1680 }
1681 }
1682 type = m->m_type;
1683 if (type == MT_OOBDATA)
1684 flags |= MSG_OOB;
1685 } else {
1686 if ((flags & MSG_PEEK) == 0) {
1687 KASSERT(so->so_rcv.sb_mb == nextrecord,
1688 ("soreceive: sb_mb != nextrecord"));
1689 if (so->so_rcv.sb_mb == NULL) {
1690 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1691 ("soreceive: sb_lastercord != NULL"));
1692 }
1693 }
1694 }
1695 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1696 SBLASTRECORDCHK(&so->so_rcv);
1697 SBLASTMBUFCHK(&so->so_rcv);
1698
1699 /*
1700 * Now continue to read any data mbufs off of the head of the socket
1701 * buffer until the read request is satisfied. Note that 'type' is
1702 * used to store the type of any mbuf reads that have happened so far
1703 * such that soreceive() can stop reading if the type changes, which
1704 * causes soreceive() to return only one of regular data and inline
1705 * out-of-band data in a single socket receive operation.
1706 */
1707 moff = 0;
1708 offset = 0;
1709 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
1710 && error == 0) {
1711 /*
1712 * If the type of mbuf has changed since the last mbuf
1713 * examined ('type'), end the receive operation.
1714 */
1715 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1716 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1717 if (type != m->m_type)
1718 break;
1719 } else if (type == MT_OOBDATA)
1720 break;
1721 else
1722 KASSERT(m->m_type == MT_DATA,
1723 ("m->m_type == %d", m->m_type));
1724 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1725 len = uio->uio_resid;
1726 if (so->so_oobmark && len > so->so_oobmark - offset)
1727 len = so->so_oobmark - offset;
1728 if (len > m->m_len - moff)
1729 len = m->m_len - moff;
1730 /*
1731 * If mp is set, just pass back the mbufs. Otherwise copy
1732 * them out via the uio, then free. Sockbuf must be
1733 * consistent here (points to current mbuf, it points to next
1734 * record) when we drop priority; we must note any additions
1735 * to the sockbuf when we block interrupts again.
1736 */
1737 if (mp == NULL) {
1738 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1739 SBLASTRECORDCHK(&so->so_rcv);
1740 SBLASTMBUFCHK(&so->so_rcv);
1741 SOCKBUF_UNLOCK(&so->so_rcv);
1742 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1743 SOCKBUF_LOCK(&so->so_rcv);
1744 if (error) {
1745 /*
1746 * The MT_SONAME mbuf has already been removed
1747 * from the record, so it is necessary to
1748 * remove the data mbufs, if any, to preserve
1749 * the invariant in the case of PR_ADDR that
1750 * requires MT_SONAME mbufs at the head of
1751 * each record.
1752 */
1753 if (m && pr->pr_flags & PR_ATOMIC &&
1754 ((flags & MSG_PEEK) == 0))
1755 (void)sbdroprecord_locked(&so->so_rcv);
1756 SOCKBUF_UNLOCK(&so->so_rcv);
1757 goto release;
1758 }
1759 } else
1760 uio->uio_resid -= len;
1761 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1762 if (len == m->m_len - moff) {
1763 if (m->m_flags & M_EOR)
1764 flags |= MSG_EOR;
1765 if (flags & MSG_PEEK) {
1766 m = m->m_next;
1767 moff = 0;
1768 } else {
1769 nextrecord = m->m_nextpkt;
1770 sbfree(&so->so_rcv, m);
1771 if (mp != NULL) {
1772 m->m_nextpkt = NULL;
1773 *mp = m;
1774 mp = &m->m_next;
1775 so->so_rcv.sb_mb = m = m->m_next;
1776 *mp = NULL;
1777 } else {
1778 so->so_rcv.sb_mb = m_free(m);
1779 m = so->so_rcv.sb_mb;
1780 }
1781 sockbuf_pushsync(&so->so_rcv, nextrecord);
1782 SBLASTRECORDCHK(&so->so_rcv);
1783 SBLASTMBUFCHK(&so->so_rcv);
1784 }
1785 } else {
1786 if (flags & MSG_PEEK)
1787 moff += len;
1788 else {
1789 if (mp != NULL) {
1790 if (flags & MSG_DONTWAIT) {
1791 *mp = m_copym(m, 0, len,
1792 M_NOWAIT);
1793 if (*mp == NULL) {
1794 /*
1795 * m_copym() couldn't
1796 * allocate an mbuf.
1797 * Adjust uio_resid back
1798 * (it was adjusted
1799 * down by len bytes,
1800 * which we didn't end
1801 * up "copying" over).
1802 */
1803 uio->uio_resid += len;
1804 break;
1805 }
1806 } else {
1807 SOCKBUF_UNLOCK(&so->so_rcv);
1808 *mp = m_copym(m, 0, len,
1809 M_WAITOK);
1810 SOCKBUF_LOCK(&so->so_rcv);
1811 }
1812 }
1813 sbcut_locked(&so->so_rcv, len);
1814 }
1815 }
1816 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1817 if (so->so_oobmark) {
1818 if ((flags & MSG_PEEK) == 0) {
1819 so->so_oobmark -= len;
1820 if (so->so_oobmark == 0) {
1821 so->so_rcv.sb_state |= SBS_RCVATMARK;
1822 break;
1823 }
1824 } else {
1825 offset += len;
1826 if (offset == so->so_oobmark)
1827 break;
1828 }
1829 }
1830 if (flags & MSG_EOR)
1831 break;
1832 /*
1833 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1834 * must not quit until "uio->uio_resid == 0" or an error
1835 * termination. If a signal/timeout occurs, return with a
1836 * short count but without error. Keep sockbuf locked
1837 * against other readers.
1838 */
1839 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1840 !sosendallatonce(so) && nextrecord == NULL) {
1841 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1842 if (so->so_error ||
1843 so->so_rcv.sb_state & SBS_CANTRCVMORE)
1844 break;
1845 /*
1846 * Notify the protocol that some data has been
1847 * drained before blocking.
1848 */
1849 if (pr->pr_flags & PR_WANTRCVD) {
1850 SOCKBUF_UNLOCK(&so->so_rcv);
1851 VNET_SO_ASSERT(so);
1852 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1853 SOCKBUF_LOCK(&so->so_rcv);
1854 }
1855 SBLASTRECORDCHK(&so->so_rcv);
1856 SBLASTMBUFCHK(&so->so_rcv);
1857 /*
1858 * We could receive some data while was notifying
1859 * the protocol. Skip blocking in this case.
1860 */
1861 if (so->so_rcv.sb_mb == NULL) {
1862 error = sbwait(&so->so_rcv);
1863 if (error) {
1864 SOCKBUF_UNLOCK(&so->so_rcv);
1865 goto release;
1866 }
1867 }
1868 m = so->so_rcv.sb_mb;
1869 if (m != NULL)
1870 nextrecord = m->m_nextpkt;
1871 }
1872 }
1873
1874 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1875 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1876 flags |= MSG_TRUNC;
1877 if ((flags & MSG_PEEK) == 0)
1878 (void) sbdroprecord_locked(&so->so_rcv);
1879 }
1880 if ((flags & MSG_PEEK) == 0) {
1881 if (m == NULL) {
1882 /*
1883 * First part is an inline SB_EMPTY_FIXUP(). Second
1884 * part makes sure sb_lastrecord is up-to-date if
1885 * there is still data in the socket buffer.
1886 */
1887 so->so_rcv.sb_mb = nextrecord;
1888 if (so->so_rcv.sb_mb == NULL) {
1889 so->so_rcv.sb_mbtail = NULL;
1890 so->so_rcv.sb_lastrecord = NULL;
1891 } else if (nextrecord->m_nextpkt == NULL)
1892 so->so_rcv.sb_lastrecord = nextrecord;
1893 }
1894 SBLASTRECORDCHK(&so->so_rcv);
1895 SBLASTMBUFCHK(&so->so_rcv);
1896 /*
1897 * If soreceive() is being done from the socket callback,
1898 * then don't need to generate ACK to peer to update window,
1899 * since ACK will be generated on return to TCP.
1900 */
1901 if (!(flags & MSG_SOCALLBCK) &&
1902 (pr->pr_flags & PR_WANTRCVD)) {
1903 SOCKBUF_UNLOCK(&so->so_rcv);
1904 VNET_SO_ASSERT(so);
1905 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1906 SOCKBUF_LOCK(&so->so_rcv);
1907 }
1908 }
1909 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1910 if (orig_resid == uio->uio_resid && orig_resid &&
1911 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1912 SOCKBUF_UNLOCK(&so->so_rcv);
1913 goto restart;
1914 }
1915 SOCKBUF_UNLOCK(&so->so_rcv);
1916
1917 if (flagsp != NULL)
1918 *flagsp |= flags;
1919release:
1920 sbunlock(&so->so_rcv);
1921 return (error);
1922}
1923
1924/*
1925 * Optimized version of soreceive() for stream (TCP) sockets.
1926 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
1927 */
1928int
1929soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1930 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1931{
1932 int len = 0, error = 0, flags, oresid;
1933 struct sockbuf *sb;
1934 struct mbuf *m, *n = NULL;
1935
1936 /* We only do stream sockets. */
1937 if (so->so_type != SOCK_STREAM)
1938 return (EINVAL);
1939 if (psa != NULL)
1940 *psa = NULL;
1941 if (controlp != NULL)
1942 return (EINVAL);
1943 if (flagsp != NULL)
1944 flags = *flagsp &~ MSG_EOR;
1945 else
1946 flags = 0;
1947 if (flags & MSG_OOB)
1948 return (soreceive_rcvoob(so, uio, flags));
1949 if (mp0 != NULL)
1950 *mp0 = NULL;
1951
1952 sb = &so->so_rcv;
1953
1954 /* Prevent other readers from entering the socket. */
1955 error = sblock(sb, SBLOCKWAIT(flags));
1956 if (error)
1957 goto out;
1958 SOCKBUF_LOCK(sb);
1959
1960 /* Easy one, no space to copyout anything. */
1961 if (uio->uio_resid == 0) {
1962 error = EINVAL;
1963 goto out;
1964 }
1965 oresid = uio->uio_resid;
1966
1967 /* We will never ever get anything unless we are or were connected. */
1968 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1969 error = ENOTCONN;
1970 goto out;
1971 }
1972
1973restart:
1974 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1975
1976 /* Abort if socket has reported problems. */
1977 if (so->so_error) {
1978 if (sbavail(sb) > 0)
1979 goto deliver;
1980 if (oresid > uio->uio_resid)
1981 goto out;
1982 error = so->so_error;
1983 if (!(flags & MSG_PEEK))
1984 so->so_error = 0;
1985 goto out;
1986 }
1987
1988 /* Door is closed. Deliver what is left, if any. */
1989 if (sb->sb_state & SBS_CANTRCVMORE) {
1990 if (sbavail(sb) > 0)
1991 goto deliver;
1992 else
1993 goto out;
1994 }
1995
1996 /* Socket buffer is empty and we shall not block. */
1997 if (sbavail(sb) == 0 &&
1998 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
1999 error = EAGAIN;
2000 goto out;
2001 }
2002
2003 /* Socket buffer got some data that we shall deliver now. */
2004 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2005 ((sb->sb_flags & SS_NBIO) ||
2006 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2007 sbavail(sb) >= sb->sb_lowat ||
2008 sbavail(sb) >= uio->uio_resid ||
2009 sbavail(sb) >= sb->sb_hiwat) ) {
2010 goto deliver;
2011 }
2012
2013 /* On MSG_WAITALL we must wait until all data or error arrives. */
2014 if ((flags & MSG_WAITALL) &&
2015 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2016 goto deliver;
2017
2018 /*
2019 * Wait and block until (more) data comes in.
2020 * NB: Drops the sockbuf lock during wait.
2021 */
2022 error = sbwait(sb);
2023 if (error)
2024 goto out;
2025 goto restart;
2026
2027deliver:
2028 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2029 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2030 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2031
2032 /* Statistics. */
2033 if (uio->uio_td)
2034 uio->uio_td->td_ru.ru_msgrcv++;
2035
2036 /* Fill uio until full or current end of socket buffer is reached. */
2037 len = min(uio->uio_resid, sbavail(sb));
2038 if (mp0 != NULL) {
2039 /* Dequeue as many mbufs as possible. */
2040 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2041 if (*mp0 == NULL)
2042 *mp0 = sb->sb_mb;
2043 else
2044 m_cat(*mp0, sb->sb_mb);
2045 for (m = sb->sb_mb;
2046 m != NULL && m->m_len <= len;
2047 m = m->m_next) {
2048 KASSERT(!(m->m_flags & M_NOTAVAIL),
2049 ("%s: m %p not available", __func__, m));
2050 len -= m->m_len;
2051 uio->uio_resid -= m->m_len;
2052 sbfree(sb, m);
2053 n = m;
2054 }
2055 n->m_next = NULL;
2056 sb->sb_mb = m;
2057 sb->sb_lastrecord = sb->sb_mb;
2058 if (sb->sb_mb == NULL)
2059 SB_EMPTY_FIXUP(sb);
2060 }
2061 /* Copy the remainder. */
2062 if (len > 0) {
2063 KASSERT(sb->sb_mb != NULL,
2064 ("%s: len > 0 && sb->sb_mb empty", __func__));
2065
2066 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2067 if (m == NULL)
2068 len = 0; /* Don't flush data from sockbuf. */
2069 else
2070 uio->uio_resid -= len;
2071 if (*mp0 != NULL)
2072 m_cat(*mp0, m);
2073 else
2074 *mp0 = m;
2075 if (*mp0 == NULL) {
2076 error = ENOBUFS;
2077 goto out;
2078 }
2079 }
2080 } else {
2081 /* NB: Must unlock socket buffer as uiomove may sleep. */
2082 SOCKBUF_UNLOCK(sb);
2083 error = m_mbuftouio(uio, sb->sb_mb, len);
2084 SOCKBUF_LOCK(sb);
2085 if (error)
2086 goto out;
2087 }
2088 SBLASTRECORDCHK(sb);
2089 SBLASTMBUFCHK(sb);
2090
2091 /*
2092 * Remove the delivered data from the socket buffer unless we
2093 * were only peeking.
2094 */
2095 if (!(flags & MSG_PEEK)) {
2096 if (len > 0)
2097 sbdrop_locked(sb, len);
2098
2099 /* Notify protocol that we drained some data. */
2100 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2101 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2102 !(flags & MSG_SOCALLBCK))) {
2103 SOCKBUF_UNLOCK(sb);
2104 VNET_SO_ASSERT(so);
2105 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2106 SOCKBUF_LOCK(sb);
2107 }
2108 }
2109
2110 /*
2111 * For MSG_WAITALL we may have to loop again and wait for
2112 * more data to come in.
2113 */
2114 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2115 goto restart;
2116out:
2117 SOCKBUF_LOCK_ASSERT(sb);
2118 SBLASTRECORDCHK(sb);
2119 SBLASTMBUFCHK(sb);
2120 SOCKBUF_UNLOCK(sb);
2121 sbunlock(sb);
2122 return (error);
2123}
2124
2125/*
2126 * Optimized version of soreceive() for simple datagram cases from userspace.
2127 * Unlike in the stream case, we're able to drop a datagram if copyout()
2128 * fails, and because we handle datagrams atomically, we don't need to use a
2129 * sleep lock to prevent I/O interlacing.
2130 */
2131int
2132soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2133 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2134{
2135 struct mbuf *m, *m2;
2136 int flags, error;
2137 ssize_t len;
2138 struct protosw *pr = so->so_proto;
2139 struct mbuf *nextrecord;
2140
2141 if (psa != NULL)
2142 *psa = NULL;
2143 if (controlp != NULL)
2144 *controlp = NULL;
2145 if (flagsp != NULL)
2146 flags = *flagsp &~ MSG_EOR;
2147 else
2148 flags = 0;
2149
2150 /*
2151 * For any complicated cases, fall back to the full
2152 * soreceive_generic().
2153 */
2154 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2155 return (soreceive_generic(so, psa, uio, mp0, controlp,
2156 flagsp));
2157
2158 /*
2159 * Enforce restrictions on use.
2160 */
2161 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2162 ("soreceive_dgram: wantrcvd"));
2163 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2164 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2165 ("soreceive_dgram: SBS_RCVATMARK"));
2166 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2167 ("soreceive_dgram: P_CONNREQUIRED"));
2168
2169 /*
2170 * Loop blocking while waiting for a datagram.
2171 */
2172 SOCKBUF_LOCK(&so->so_rcv);
2173 while ((m = so->so_rcv.sb_mb) == NULL) {
2174 KASSERT(sbavail(&so->so_rcv) == 0,
2175 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2176 sbavail(&so->so_rcv)));
2177 if (so->so_error) {
2178 error = so->so_error;
2179 so->so_error = 0;
2180 SOCKBUF_UNLOCK(&so->so_rcv);
2181 return (error);
2182 }
2183 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2184 uio->uio_resid == 0) {
2185 SOCKBUF_UNLOCK(&so->so_rcv);
2186 return (0);
2187 }
2188 if ((so->so_state & SS_NBIO) ||
2189 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2190 SOCKBUF_UNLOCK(&so->so_rcv);
2191 return (EWOULDBLOCK);
2192 }
2193 SBLASTRECORDCHK(&so->so_rcv);
2194 SBLASTMBUFCHK(&so->so_rcv);
2195 error = sbwait(&so->so_rcv);
2196 if (error) {
2197 SOCKBUF_UNLOCK(&so->so_rcv);
2198 return (error);
2199 }
2200 }
2201 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2202
2203 if (uio->uio_td)
2204 uio->uio_td->td_ru.ru_msgrcv++;
2205 SBLASTRECORDCHK(&so->so_rcv);
2206 SBLASTMBUFCHK(&so->so_rcv);
2207 nextrecord = m->m_nextpkt;
2208 if (nextrecord == NULL) {
2209 KASSERT(so->so_rcv.sb_lastrecord == m,
2210 ("soreceive_dgram: lastrecord != m"));
2211 }
2212
2213 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2214 ("soreceive_dgram: m_nextpkt != nextrecord"));
2215
2216 /*
2217 * Pull 'm' and its chain off the front of the packet queue.
2218 */
2219 so->so_rcv.sb_mb = NULL;
2220 sockbuf_pushsync(&so->so_rcv, nextrecord);
2221
2222 /*
2223 * Walk 'm's chain and free that many bytes from the socket buffer.
2224 */
2225 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2226 sbfree(&so->so_rcv, m2);
2227
2228 /*
2229 * Do a few last checks before we let go of the lock.
2230 */
2231 SBLASTRECORDCHK(&so->so_rcv);
2232 SBLASTMBUFCHK(&so->so_rcv);
2233 SOCKBUF_UNLOCK(&so->so_rcv);
2234
2235 if (pr->pr_flags & PR_ADDR) {
2236 KASSERT(m->m_type == MT_SONAME,
2237 ("m->m_type == %d", m->m_type));
2238 if (psa != NULL)
2239 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2240 M_NOWAIT);
2241 m = m_free(m);
2242 }
2243 if (m == NULL) {
2244 /* XXXRW: Can this happen? */
2245 return (0);
2246 }
2247
2248 /*
2249 * Packet to copyout() is now in 'm' and it is disconnected from the
2250 * queue.
2251 *
2252 * Process one or more MT_CONTROL mbufs present before any data mbufs
2253 * in the first mbuf chain on the socket buffer. We call into the
2254 * protocol to perform externalization (or freeing if controlp ==
2255 * NULL).
2256 */
2257 if (m->m_type == MT_CONTROL) {
2258 struct mbuf *cm = NULL, *cmn;
2259 struct mbuf **cme = &cm;
2260
2261 do {
2262 m2 = m->m_next;
2263 m->m_next = NULL;
2264 *cme = m;
2265 cme = &(*cme)->m_next;
2266 m = m2;
2267 } while (m != NULL && m->m_type == MT_CONTROL);
2268 while (cm != NULL) {
2269 cmn = cm->m_next;
2270 cm->m_next = NULL;
2271 if (pr->pr_domain->dom_externalize != NULL) {
2272 error = (*pr->pr_domain->dom_externalize)
2273 (cm, controlp, flags);
2274 } else if (controlp != NULL)
2275 *controlp = cm;
2276 else
2277 m_freem(cm);
2278 if (controlp != NULL) {
2279 while (*controlp != NULL)
2280 controlp = &(*controlp)->m_next;
2281 }
2282 cm = cmn;
2283 }
2284 }
2285 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
2286
2287 while (m != NULL && uio->uio_resid > 0) {
2288 len = uio->uio_resid;
2289 if (len > m->m_len)
2290 len = m->m_len;
2291 error = uiomove(mtod(m, char *), (int)len, uio);
2292 if (error) {
2293 m_freem(m);
2294 return (error);
2295 }
2296 if (len == m->m_len)
2297 m = m_free(m);
2298 else {
2299 m->m_data += len;
2300 m->m_len -= len;
2301 }
2302 }
2303 if (m != NULL)
2304 flags |= MSG_TRUNC;
2305 m_freem(m);
2306 if (flagsp != NULL)
2307 *flagsp |= flags;
2308 return (0);
2309}
2310
2311int
2312soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2313 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2314{
2315 int error;
2316
2317 CURVNET_SET(so->so_vnet);
2318 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2319 controlp, flagsp));
2320 CURVNET_RESTORE();
2321 return (error);
2322}
2323
2324int
2325soshutdown(struct socket *so, int how)
2326{
2327 struct protosw *pr = so->so_proto;
2328 int error;
2329
2330 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2331 return (EINVAL);
2332
2333 CURVNET_SET(so->so_vnet);
2334 if (pr->pr_usrreqs->pru_flush != NULL)
2335 (*pr->pr_usrreqs->pru_flush)(so, how);
2336 if (how != SHUT_WR)
2337 sorflush(so);
2338 if (how != SHUT_RD) {
2339 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2340 wakeup(&so->so_timeo);
2341 CURVNET_RESTORE();
2342 return (error);
2343 }
2344 wakeup(&so->so_timeo);
2345 CURVNET_RESTORE();
2346 return (0);
2347}
2348
2349void
2350sorflush(struct socket *so)
2351{
2352 struct sockbuf *sb = &so->so_rcv;
2353 struct protosw *pr = so->so_proto;
2354 struct sockbuf asb;
2355
2356 VNET_SO_ASSERT(so);
2357
2358 /*
2359 * In order to avoid calling dom_dispose with the socket buffer mutex
2360 * held, and in order to generally avoid holding the lock for a long
2361 * time, we make a copy of the socket buffer and clear the original
2362 * (except locks, state). The new socket buffer copy won't have
2363 * initialized locks so we can only call routines that won't use or
2364 * assert those locks.
2365 *
2366 * Dislodge threads currently blocked in receive and wait to acquire
2367 * a lock against other simultaneous readers before clearing the
2368 * socket buffer. Don't let our acquire be interrupted by a signal
2369 * despite any existing socket disposition on interruptable waiting.
2370 */
2371 socantrcvmore(so);
2372 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2373
2374 /*
2375 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2376 * and mutex data unchanged.
2377 */
2378 SOCKBUF_LOCK(sb);
2379 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
2380 bcopy(&sb->sb_startzero, &asb.sb_startzero,
2381 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2382 bzero(&sb->sb_startzero,
2383 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2384 SOCKBUF_UNLOCK(sb);
2385 sbunlock(sb);
2386
2387 /*
2388 * Dispose of special rights and flush the socket buffer. Don't call
2389 * any unsafe routines (that rely on locks being initialized) on asb.
2390 */
2391 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2392 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
2393 sbrelease_internal(&asb, so);
2394}
2395
2396/*
2397 * Wrapper for Socket established helper hook.
2398 * Parameters: socket, context of the hook point, hook id.
2399 */
2400static int inline
2401hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
2402{
2403 struct socket_hhook_data hhook_data = {
2404 .so = so,
2405 .hctx = hctx,
2406 .m = NULL,
2407 .status = 0
2408 };
2409
2410 CURVNET_SET(so->so_vnet);
2411 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
2412 CURVNET_RESTORE();
2413
2414 /* Ugly but needed, since hhooks return void for now */
2415 return (hhook_data.status);
2416}
2417
2418/*
2419 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2420 * additional variant to handle the case where the option value needs to be
2421 * some kind of integer, but not a specific size. In addition to their use
2422 * here, these functions are also called by the protocol-level pr_ctloutput()
2423 * routines.
2424 */
2425int
2426sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2427{
2428 size_t valsize;
2429
2430 /*
2431 * If the user gives us more than we wanted, we ignore it, but if we
2432 * don't get the minimum length the caller wants, we return EINVAL.
2433 * On success, sopt->sopt_valsize is set to however much we actually
2434 * retrieved.
2435 */
2436 if ((valsize = sopt->sopt_valsize) < minlen)
2437 return EINVAL;
2438 if (valsize > len)
2439 sopt->sopt_valsize = valsize = len;
2440
2441 if (sopt->sopt_td != NULL)
2442 return (copyin(sopt->sopt_val, buf, valsize));
2443
2444 bcopy(sopt->sopt_val, buf, valsize);
2445 return (0);
2446}
2447
2448/*
2449 * Kernel version of setsockopt(2).
2450 *
2451 * XXX: optlen is size_t, not socklen_t
2452 */
2453int
2454so_setsockopt(struct socket *so, int level, int optname, void *optval,
2455 size_t optlen)
2456{
2457 struct sockopt sopt;
2458
2459 sopt.sopt_level = level;
2460 sopt.sopt_name = optname;
2461 sopt.sopt_dir = SOPT_SET;
2462 sopt.sopt_val = optval;
2463 sopt.sopt_valsize = optlen;
2464 sopt.sopt_td = NULL;
2465 return (sosetopt(so, &sopt));
2466}
2467
2468int
2469sosetopt(struct socket *so, struct sockopt *sopt)
2470{
2471 int error, optval;
2472 struct linger l;
2473 struct timeval tv;
2474 sbintime_t val;
2475 uint32_t val32;
2476#ifdef MAC
2477 struct mac extmac;
2478#endif
2479
2480 CURVNET_SET(so->so_vnet);
2481 error = 0;
2482 if (sopt->sopt_level != SOL_SOCKET) {
2483 if (so->so_proto->pr_ctloutput != NULL) {
2484 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2485 CURVNET_RESTORE();
2486 return (error);
2487 }
2488 error = ENOPROTOOPT;
2489 } else {
2490 switch (sopt->sopt_name) {
2491 case SO_ACCEPTFILTER:
2492 error = do_setopt_accept_filter(so, sopt);
2493 if (error)
2494 goto bad;
2495 break;
2496
2497 case SO_LINGER:
2498 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2499 if (error)
2500 goto bad;
2501
2502 SOCK_LOCK(so);
2503 so->so_linger = l.l_linger;
2504 if (l.l_onoff)
2505 so->so_options |= SO_LINGER;
2506 else
2507 so->so_options &= ~SO_LINGER;
2508 SOCK_UNLOCK(so);
2509 break;
2510
2511 case SO_DEBUG:
2512 case SO_KEEPALIVE:
2513 case SO_DONTROUTE:
2514 case SO_USELOOPBACK:
2515 case SO_BROADCAST:
2516 case SO_REUSEADDR:
2517 case SO_REUSEPORT:
2518 case SO_OOBINLINE:
2519 case SO_TIMESTAMP:
2520 case SO_BINTIME:
2521 case SO_NOSIGPIPE:
2522 case SO_NO_DDP:
2523 case SO_NO_OFFLOAD:
2524 error = sooptcopyin(sopt, &optval, sizeof optval,
2525 sizeof optval);
2526 if (error)
2527 goto bad;
2528 SOCK_LOCK(so);
2529 if (optval)
2530 so->so_options |= sopt->sopt_name;
2531 else
2532 so->so_options &= ~sopt->sopt_name;
2533 SOCK_UNLOCK(so);
2534 break;
2535
2536 case SO_SETFIB:
2537 error = sooptcopyin(sopt, &optval, sizeof optval,
2538 sizeof optval);
2539 if (error)
2540 goto bad;
2541
2542 if (optval < 0 || optval >= rt_numfibs) {
2543 error = EINVAL;
2544 goto bad;
2545 }
2546 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2547 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2548 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2549 so->so_fibnum = optval;
2550 else
2551 so->so_fibnum = 0;
2552 break;
2553
2554 case SO_USER_COOKIE:
2555 error = sooptcopyin(sopt, &val32, sizeof val32,
2556 sizeof val32);
2557 if (error)
2558 goto bad;
2559 so->so_user_cookie = val32;
2560 break;
2561
2562 case SO_SNDBUF:
2563 case SO_RCVBUF:
2564 case SO_SNDLOWAT:
2565 case SO_RCVLOWAT:
2566 error = sooptcopyin(sopt, &optval, sizeof optval,
2567 sizeof optval);
2568 if (error)
2569 goto bad;
2570
2571 /*
2572 * Values < 1 make no sense for any of these options,
2573 * so disallow them.
2574 */
2575 if (optval < 1) {
2576 error = EINVAL;
2577 goto bad;
2578 }
2579
2580 switch (sopt->sopt_name) {
2581 case SO_SNDBUF:
2582 case SO_RCVBUF:
2583 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2584 &so->so_snd : &so->so_rcv, (u_long)optval,
2585 so, curthread) == 0) {
2586 error = ENOBUFS;
2587 goto bad;
2588 }
2589 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2590 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2591 break;
2592
2593 /*
2594 * Make sure the low-water is never greater than the
2595 * high-water.
2596 */
2597 case SO_SNDLOWAT:
2598 SOCKBUF_LOCK(&so->so_snd);
2599 so->so_snd.sb_lowat =
2600 (optval > so->so_snd.sb_hiwat) ?
2601 so->so_snd.sb_hiwat : optval;
2602 SOCKBUF_UNLOCK(&so->so_snd);
2603 break;
2604 case SO_RCVLOWAT:
2605 SOCKBUF_LOCK(&so->so_rcv);
2606 so->so_rcv.sb_lowat =
2607 (optval > so->so_rcv.sb_hiwat) ?
2608 so->so_rcv.sb_hiwat : optval;
2609 SOCKBUF_UNLOCK(&so->so_rcv);
2610 break;
2611 }
2612 break;
2613
2614 case SO_SNDTIMEO:
2615 case SO_RCVTIMEO:
2616#ifdef COMPAT_FREEBSD32
2617 if (SV_CURPROC_FLAG(SV_ILP32)) {
2618 struct timeval32 tv32;
2619
2620 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2621 sizeof tv32);
2622 CP(tv32, tv, tv_sec);
2623 CP(tv32, tv, tv_usec);
2624 } else
2625#endif
2626 error = sooptcopyin(sopt, &tv, sizeof tv,
2627 sizeof tv);
2628 if (error)
2629 goto bad;
2630 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
2631 tv.tv_usec >= 1000000) {
2632 error = EDOM;
2633 goto bad;
2634 }
2635 if (tv.tv_sec > INT32_MAX)
2636 val = SBT_MAX;
2637 else
2638 val = tvtosbt(tv);
2639 switch (sopt->sopt_name) {
2640 case SO_SNDTIMEO:
2641 so->so_snd.sb_timeo = val;
2642 break;
2643 case SO_RCVTIMEO:
2644 so->so_rcv.sb_timeo = val;
2645 break;
2646 }
2647 break;
2648
2649 case SO_LABEL:
2650#ifdef MAC
2651 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2652 sizeof extmac);
2653 if (error)
2654 goto bad;
2655 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2656 so, &extmac);
2657#else
2658 error = EOPNOTSUPP;
2659#endif
2660 break;
2661
2662 default:
2663 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2664 error = hhook_run_socket(so, sopt,
2665 HHOOK_SOCKET_OPT);
2666 else
2667 error = ENOPROTOOPT;
2668 break;
2669 }
2670 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2671 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
2672 }
2673bad:
2674 CURVNET_RESTORE();
2675 return (error);
2676}
2677
2678/*
2679 * Helper routine for getsockopt.
2680 */
2681int
2682sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2683{
2684 int error;
2685 size_t valsize;
2686
2687 error = 0;
2688
2689 /*
2690 * Documented get behavior is that we always return a value, possibly
2691 * truncated to fit in the user's buffer. Traditional behavior is
2692 * that we always tell the user precisely how much we copied, rather
2693 * than something useful like the total amount we had available for
2694 * her. Note that this interface is not idempotent; the entire
2695 * answer must generated ahead of time.
2696 */
2697 valsize = min(len, sopt->sopt_valsize);
2698 sopt->sopt_valsize = valsize;
2699 if (sopt->sopt_val != NULL) {
2700 if (sopt->sopt_td != NULL)
2701 error = copyout(buf, sopt->sopt_val, valsize);
2702 else
2703 bcopy(buf, sopt->sopt_val, valsize);
2704 }
2705 return (error);
2706}
2707
2708int
2709sogetopt(struct socket *so, struct sockopt *sopt)
2710{
2711 int error, optval;
2712 struct linger l;
2713 struct timeval tv;
2714#ifdef MAC
2715 struct mac extmac;
2716#endif
2717
2718 CURVNET_SET(so->so_vnet);
2719 error = 0;
2720 if (sopt->sopt_level != SOL_SOCKET) {
2721 if (so->so_proto->pr_ctloutput != NULL)
2722 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2723 else
2724 error = ENOPROTOOPT;
2725 CURVNET_RESTORE();
2726 return (error);
2727 } else {
2728 switch (sopt->sopt_name) {
2729 case SO_ACCEPTFILTER:
2730 error = do_getopt_accept_filter(so, sopt);
2731 break;
2732
2733 case SO_LINGER:
2734 SOCK_LOCK(so);
2735 l.l_onoff = so->so_options & SO_LINGER;
2736 l.l_linger = so->so_linger;
2737 SOCK_UNLOCK(so);
2738 error = sooptcopyout(sopt, &l, sizeof l);
2739 break;
2740
2741 case SO_USELOOPBACK:
2742 case SO_DONTROUTE:
2743 case SO_DEBUG:
2744 case SO_KEEPALIVE:
2745 case SO_REUSEADDR:
2746 case SO_REUSEPORT:
2747 case SO_BROADCAST:
2748 case SO_OOBINLINE:
2749 case SO_ACCEPTCONN:
2750 case SO_TIMESTAMP:
2751 case SO_BINTIME:
2752 case SO_NOSIGPIPE:
2753 optval = so->so_options & sopt->sopt_name;
2754integer:
2755 error = sooptcopyout(sopt, &optval, sizeof optval);
2756 break;
2757
2758 case SO_TYPE:
2759 optval = so->so_type;
2760 goto integer;
2761
2762 case SO_PROTOCOL:
2763 optval = so->so_proto->pr_protocol;
2764 goto integer;
2765
2766 case SO_ERROR:
2767 SOCK_LOCK(so);
2768 optval = so->so_error;
2769 so->so_error = 0;
2770 SOCK_UNLOCK(so);
2771 goto integer;
2772
2773 case SO_SNDBUF:
2774 optval = so->so_snd.sb_hiwat;
2775 goto integer;
2776
2777 case SO_RCVBUF:
2778 optval = so->so_rcv.sb_hiwat;
2779 goto integer;
2780
2781 case SO_SNDLOWAT:
2782 optval = so->so_snd.sb_lowat;
2783 goto integer;
2784
2785 case SO_RCVLOWAT:
2786 optval = so->so_rcv.sb_lowat;
2787 goto integer;
2788
2789 case SO_SNDTIMEO:
2790 case SO_RCVTIMEO:
2791 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
2792 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2793#ifdef COMPAT_FREEBSD32
2794 if (SV_CURPROC_FLAG(SV_ILP32)) {
2795 struct timeval32 tv32;
2796
2797 CP(tv, tv32, tv_sec);
2798 CP(tv, tv32, tv_usec);
2799 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2800 } else
2801#endif
2802 error = sooptcopyout(sopt, &tv, sizeof tv);
2803 break;
2804
2805 case SO_LABEL:
2806#ifdef MAC
2807 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2808 sizeof(extmac));
2809 if (error)
2810 goto bad;
2811 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2812 so, &extmac);
2813 if (error)
2814 goto bad;
2815 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2816#else
2817 error = EOPNOTSUPP;
2818#endif
2819 break;
2820
2821 case SO_PEERLABEL:
2822#ifdef MAC
2823 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2824 sizeof(extmac));
2825 if (error)
2826 goto bad;
2827 error = mac_getsockopt_peerlabel(
2828 sopt->sopt_td->td_ucred, so, &extmac);
2829 if (error)
2830 goto bad;
2831 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2832#else
2833 error = EOPNOTSUPP;
2834#endif
2835 break;
2836
2837 case SO_LISTENQLIMIT:
2838 optval = so->so_qlimit;
2839 goto integer;
2840
2841 case SO_LISTENQLEN:
2842 optval = so->so_qlen;
2843 goto integer;
2844
2845 case SO_LISTENINCQLEN:
2846 optval = so->so_incqlen;
2847 goto integer;
2848
2849 default:
2850 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2851 error = hhook_run_socket(so, sopt,
2852 HHOOK_SOCKET_OPT);
2853 else
2854 error = ENOPROTOOPT;
2855 break;
2856 }
2857 }
2858#ifdef MAC
2859bad:
2860#endif
2861 CURVNET_RESTORE();
2862 return (error);
2863}
2864
2865int
2866soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2867{
2868 struct mbuf *m, *m_prev;
2869 int sopt_size = sopt->sopt_valsize;
2870
2871 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2872 if (m == NULL)
2873 return ENOBUFS;
2874 if (sopt_size > MLEN) {
2875 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
2876 if ((m->m_flags & M_EXT) == 0) {
2877 m_free(m);
2878 return ENOBUFS;
2879 }
2880 m->m_len = min(MCLBYTES, sopt_size);
2881 } else {
2882 m->m_len = min(MLEN, sopt_size);
2883 }
2884 sopt_size -= m->m_len;
2885 *mp = m;
2886 m_prev = m;
2887
2888 while (sopt_size) {
2889 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2890 if (m == NULL) {
2891 m_freem(*mp);
2892 return ENOBUFS;
2893 }
2894 if (sopt_size > MLEN) {
2895 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
2896 M_NOWAIT);
2897 if ((m->m_flags & M_EXT) == 0) {
2898 m_freem(m);
2899 m_freem(*mp);
2900 return ENOBUFS;
2901 }
2902 m->m_len = min(MCLBYTES, sopt_size);
2903 } else {
2904 m->m_len = min(MLEN, sopt_size);
2905 }
2906 sopt_size -= m->m_len;
2907 m_prev->m_next = m;
2908 m_prev = m;
2909 }
2910 return (0);
2911}
2912
2913int
2914soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2915{
2916 struct mbuf *m0 = m;
2917
2918 if (sopt->sopt_val == NULL)
2919 return (0);
2920 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2921 if (sopt->sopt_td != NULL) {
2922 int error;
2923
2924 error = copyin(sopt->sopt_val, mtod(m, char *),
2925 m->m_len);
2926 if (error != 0) {
2927 m_freem(m0);
2928 return(error);
2929 }
2930 } else
2931 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2932 sopt->sopt_valsize -= m->m_len;
2933 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2934 m = m->m_next;
2935 }
2936 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2937 panic("ip6_sooptmcopyin");
2938 return (0);
2939}
2940
2941int
2942soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2943{
2944 struct mbuf *m0 = m;
2945 size_t valsize = 0;
2946
2947 if (sopt->sopt_val == NULL)
2948 return (0);
2949 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2950 if (sopt->sopt_td != NULL) {
2951 int error;
2952
2953 error = copyout(mtod(m, char *), sopt->sopt_val,
2954 m->m_len);
2955 if (error != 0) {
2956 m_freem(m0);
2957 return(error);
2958 }
2959 } else
2960 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2961 sopt->sopt_valsize -= m->m_len;
2962 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2963 valsize += m->m_len;
2964 m = m->m_next;
2965 }
2966 if (m != NULL) {
2967 /* enough soopt buffer should be given from user-land */
2968 m_freem(m0);
2969 return(EINVAL);
2970 }
2971 sopt->sopt_valsize = valsize;
2972 return (0);
2973}
2974
2975/*
2976 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2977 * out-of-band data, which will then notify socket consumers.
2978 */
2979void
2980sohasoutofband(struct socket *so)
2981{
2982
2983 if (so->so_sigio != NULL)
2984 pgsigio(&so->so_sigio, SIGURG, 0);
2985 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2986}
2987
2988int
2989sopoll(struct socket *so, int events, struct ucred *active_cred,
2990 struct thread *td)
2991{
2992
2993 /*
2994 * We do not need to set or assert curvnet as long as everyone uses
2995 * sopoll_generic().
2996 */
2997 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2998 td));
2999}
3000
3001int
3002sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3003 struct thread *td)
3004{
3005 int revents = 0;
3006
3007 SOCKBUF_LOCK(&so->so_snd);
3008 SOCKBUF_LOCK(&so->so_rcv);
3009 if (events & (POLLIN | POLLRDNORM))
3010 if (soreadabledata(so))
3011 revents |= events & (POLLIN | POLLRDNORM);
3012
3013 if (events & (POLLOUT | POLLWRNORM))
3014 if (sowriteable(so))
3015 revents |= events & (POLLOUT | POLLWRNORM);
3016
3017 if (events & (POLLPRI | POLLRDBAND))
3018 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
3019 revents |= events & (POLLPRI | POLLRDBAND);
3020
3021 if ((events & POLLINIGNEOF) == 0) {
3022 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3023 revents |= events & (POLLIN | POLLRDNORM);
3024 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3025 revents |= POLLHUP;
3026 }
3027 }
3028
3029 if (revents == 0) {
3030 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
3031 selrecord(td, &so->so_rcv.sb_sel);
3032 so->so_rcv.sb_flags |= SB_SEL;
3033 }
3034
3035 if (events & (POLLOUT | POLLWRNORM)) {
3036 selrecord(td, &so->so_snd.sb_sel);
3037 so->so_snd.sb_flags |= SB_SEL;
3038 }
3039 }
3040
3041 SOCKBUF_UNLOCK(&so->so_rcv);
3042 SOCKBUF_UNLOCK(&so->so_snd);
3043 return (revents);
3044}
3045
3046int
3047soo_kqfilter(struct file *fp, struct knote *kn)
3048{
3049 struct socket *so = kn->kn_fp->f_data;
3050 struct sockbuf *sb;
3051
3052 switch (kn->kn_filter) {
3053 case EVFILT_READ:
3054 if (so->so_options & SO_ACCEPTCONN)
3055 kn->kn_fop = &solisten_filtops;
3056 else
3057 kn->kn_fop = &soread_filtops;
3058 sb = &so->so_rcv;
3059 break;
3060 case EVFILT_WRITE:
3061 kn->kn_fop = &sowrite_filtops;
3062 sb = &so->so_snd;
3063 break;
3064 default:
3065 return (EINVAL);
3066 }
3067
3068 SOCKBUF_LOCK(sb);
3069 knlist_add(&sb->sb_sel.si_note, kn, 1);
3070 sb->sb_flags |= SB_KNOTE;
3071 SOCKBUF_UNLOCK(sb);
3072 return (0);
3073}
3074
3075/*
3076 * Some routines that return EOPNOTSUPP for entry points that are not
3077 * supported by a protocol. Fill in as needed.
3078 */
3079int
3080pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3081{
3082
3083 return EOPNOTSUPP;
3084}
3085
3086int
3087pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3088{
3089
3090 return EOPNOTSUPP;
3091}
3092
3093int
3094pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3095{
3096
3097 return EOPNOTSUPP;
3098}
3099
3100int
3101pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3102 struct thread *td)
3103{
3104
3105 return EOPNOTSUPP;
3106}
3107
3108int
3109pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3110{
3111
3112 return EOPNOTSUPP;
3113}
3114
3115int
3116pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3117 struct thread *td)
3118{
3119
3120 return EOPNOTSUPP;
3121}
3122
3123int
3124pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3125{
3126
3127 return EOPNOTSUPP;
3128}
3129
3130int
3131pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3132 struct ifnet *ifp, struct thread *td)
3133{
3134
3135 return EOPNOTSUPP;
3136}
3137
3138int
3139pru_disconnect_notsupp(struct socket *so)
3140{
3141
3142 return EOPNOTSUPP;
3143}
3144
3145int
3146pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3147{
3148
3149 return EOPNOTSUPP;
3150}
3151
3152int
3153pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3154{
3155
3156 return EOPNOTSUPP;
3157}
3158
3159int
3160pru_rcvd_notsupp(struct socket *so, int flags)
3161{
3162
3163 return EOPNOTSUPP;
3164}
3165
3166int
3167pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3168{
3169
3170 return EOPNOTSUPP;
3171}
3172
3173int
3174pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3175 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3176{
3177
3178 return EOPNOTSUPP;
3179}
3180
105
106#include "opt_inet.h"
107#include "opt_inet6.h"
108#include "opt_compat.h"
109
110#include <sys/param.h>
111#include <sys/systm.h>
112#include <sys/fcntl.h>
113#include <sys/limits.h>
114#include <sys/lock.h>
115#include <sys/mac.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/domain.h>
120#include <sys/file.h> /* for struct knote */
121#include <sys/hhook.h>
122#include <sys/kernel.h>
123#include <sys/khelp.h>
124#include <sys/event.h>
125#include <sys/eventhandler.h>
126#include <sys/poll.h>
127#include <sys/proc.h>
128#include <sys/protosw.h>
129#include <sys/socket.h>
130#include <sys/socketvar.h>
131#include <sys/resourcevar.h>
132#include <net/route.h>
133#include <sys/signalvar.h>
134#include <sys/stat.h>
135#include <sys/sx.h>
136#include <sys/sysctl.h>
137#include <sys/uio.h>
138#include <sys/jail.h>
139#include <sys/syslog.h>
140#include <netinet/in.h>
141
142#include <net/vnet.h>
143
144#include <security/mac/mac_framework.h>
145
146#include <vm/uma.h>
147
148#ifdef COMPAT_FREEBSD32
149#include <sys/mount.h>
150#include <sys/sysent.h>
151#include <compat/freebsd32/freebsd32.h>
152#endif
153
154static int soreceive_rcvoob(struct socket *so, struct uio *uio,
155 int flags);
156
157static void filt_sordetach(struct knote *kn);
158static int filt_soread(struct knote *kn, long hint);
159static void filt_sowdetach(struct knote *kn);
160static int filt_sowrite(struct knote *kn, long hint);
161static int filt_solisten(struct knote *kn, long hint);
162static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
163fo_kqfilter_t soo_kqfilter;
164
165static struct filterops solisten_filtops = {
166 .f_isfd = 1,
167 .f_detach = filt_sordetach,
168 .f_event = filt_solisten,
169};
170static struct filterops soread_filtops = {
171 .f_isfd = 1,
172 .f_detach = filt_sordetach,
173 .f_event = filt_soread,
174};
175static struct filterops sowrite_filtops = {
176 .f_isfd = 1,
177 .f_detach = filt_sowdetach,
178 .f_event = filt_sowrite,
179};
180
181so_gen_t so_gencnt; /* generation count for sockets */
182
183MALLOC_DEFINE(M_SONAME, "soname", "socket name");
184MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
185
186#define VNET_SO_ASSERT(so) \
187 VNET_ASSERT(curvnet != NULL, \
188 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
189
190VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
191#define V_socket_hhh VNET(socket_hhh)
192
193/*
194 * Limit on the number of connections in the listen queue waiting
195 * for accept(2).
196 * NB: The orginal sysctl somaxconn is still available but hidden
197 * to prevent confusion about the actual purpose of this number.
198 */
199static int somaxconn = SOMAXCONN;
200
201static int
202sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
203{
204 int error;
205 int val;
206
207 val = somaxconn;
208 error = sysctl_handle_int(oidp, &val, 0, req);
209 if (error || !req->newptr )
210 return (error);
211
212 if (val < 1 || val > USHRT_MAX)
213 return (EINVAL);
214
215 somaxconn = val;
216 return (0);
217}
218SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
219 0, sizeof(int), sysctl_somaxconn, "I",
220 "Maximum listen socket pending connection accept queue size");
221SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
222 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
223 0, sizeof(int), sysctl_somaxconn, "I",
224 "Maximum listen socket pending connection accept queue size (compat)");
225
226static int numopensockets;
227SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
228 &numopensockets, 0, "Number of open sockets");
229
230/*
231 * accept_mtx locks down per-socket fields relating to accept queues. See
232 * socketvar.h for an annotation of the protected fields of struct socket.
233 */
234struct mtx accept_mtx;
235MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
236
237/*
238 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
239 * so_gencnt field.
240 */
241static struct mtx so_global_mtx;
242MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
243
244/*
245 * General IPC sysctl name space, used by sockets and a variety of other IPC
246 * types.
247 */
248SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
249
250/*
251 * Initialize the socket subsystem and set up the socket
252 * memory allocator.
253 */
254static uma_zone_t socket_zone;
255int maxsockets;
256
257static void
258socket_zone_change(void *tag)
259{
260
261 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
262}
263
264static void
265socket_hhook_register(int subtype)
266{
267
268 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
269 &V_socket_hhh[subtype],
270 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
271 printf("%s: WARNING: unable to register hook\n", __func__);
272}
273
274static void
275socket_hhook_deregister(int subtype)
276{
277
278 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
279 printf("%s: WARNING: unable to deregister hook\n", __func__);
280}
281
282static void
283socket_init(void *tag)
284{
285
286 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
287 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
288 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
289 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
290 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
291 EVENTHANDLER_PRI_FIRST);
292}
293SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
294
295static void
296socket_vnet_init(const void *unused __unused)
297{
298 int i;
299
300 /* We expect a contiguous range */
301 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
302 socket_hhook_register(i);
303}
304VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
305 socket_vnet_init, NULL);
306
307static void
308socket_vnet_uninit(const void *unused __unused)
309{
310 int i;
311
312 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
313 socket_hhook_deregister(i);
314}
315VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
316 socket_vnet_uninit, NULL);
317
318/*
319 * Initialise maxsockets. This SYSINIT must be run after
320 * tunable_mbinit().
321 */
322static void
323init_maxsockets(void *ignored)
324{
325
326 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
327 maxsockets = imax(maxsockets, maxfiles);
328}
329SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
330
331/*
332 * Sysctl to get and set the maximum global sockets limit. Notify protocols
333 * of the change so that they can update their dependent limits as required.
334 */
335static int
336sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
337{
338 int error, newmaxsockets;
339
340 newmaxsockets = maxsockets;
341 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
342 if (error == 0 && req->newptr) {
343 if (newmaxsockets > maxsockets &&
344 newmaxsockets <= maxfiles) {
345 maxsockets = newmaxsockets;
346 EVENTHANDLER_INVOKE(maxsockets_change);
347 } else
348 error = EINVAL;
349 }
350 return (error);
351}
352SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
353 &maxsockets, 0, sysctl_maxsockets, "IU",
354 "Maximum number of sockets avaliable");
355
356/*
357 * Socket operation routines. These routines are called by the routines in
358 * sys_socket.c or from a system process, and implement the semantics of
359 * socket operations by switching out to the protocol specific routines.
360 */
361
362/*
363 * Get a socket structure from our zone, and initialize it. Note that it
364 * would probably be better to allocate socket and PCB at the same time, but
365 * I'm not convinced that all the protocols can be easily modified to do
366 * this.
367 *
368 * soalloc() returns a socket with a ref count of 0.
369 */
370static struct socket *
371soalloc(struct vnet *vnet)
372{
373 struct socket *so;
374
375 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
376 if (so == NULL)
377 return (NULL);
378#ifdef MAC
379 if (mac_socket_init(so, M_NOWAIT) != 0) {
380 uma_zfree(socket_zone, so);
381 return (NULL);
382 }
383#endif
384 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
385 uma_zfree(socket_zone, so);
386 return (NULL);
387 }
388
389 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
390 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
391 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
392 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
393 TAILQ_INIT(&so->so_aiojobq);
394#ifdef VIMAGE
395 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
396 __func__, __LINE__, so));
397 so->so_vnet = vnet;
398#endif
399 /* We shouldn't need the so_global_mtx */
400 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
401 /* Do we need more comprehensive error returns? */
402 uma_zfree(socket_zone, so);
403 return (NULL);
404 }
405 mtx_lock(&so_global_mtx);
406 so->so_gencnt = ++so_gencnt;
407 ++numopensockets;
408#ifdef VIMAGE
409 vnet->vnet_sockcnt++;
410#endif
411 mtx_unlock(&so_global_mtx);
412
413 return (so);
414}
415
416/*
417 * Free the storage associated with a socket at the socket layer, tear down
418 * locks, labels, etc. All protocol state is assumed already to have been
419 * torn down (and possibly never set up) by the caller.
420 */
421static void
422sodealloc(struct socket *so)
423{
424
425 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
426 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
427
428 mtx_lock(&so_global_mtx);
429 so->so_gencnt = ++so_gencnt;
430 --numopensockets; /* Could be below, but faster here. */
431#ifdef VIMAGE
432 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
433 __func__, __LINE__, so));
434 so->so_vnet->vnet_sockcnt--;
435#endif
436 mtx_unlock(&so_global_mtx);
437 if (so->so_rcv.sb_hiwat)
438 (void)chgsbsize(so->so_cred->cr_uidinfo,
439 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
440 if (so->so_snd.sb_hiwat)
441 (void)chgsbsize(so->so_cred->cr_uidinfo,
442 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
443 /* remove acccept filter if one is present. */
444 if (so->so_accf != NULL)
445 do_setopt_accept_filter(so, NULL);
446#ifdef MAC
447 mac_socket_destroy(so);
448#endif
449 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
450
451 crfree(so->so_cred);
452 khelp_destroy_osd(&so->osd);
453 sx_destroy(&so->so_snd.sb_sx);
454 sx_destroy(&so->so_rcv.sb_sx);
455 SOCKBUF_LOCK_DESTROY(&so->so_snd);
456 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
457 uma_zfree(socket_zone, so);
458}
459
460/*
461 * socreate returns a socket with a ref count of 1. The socket should be
462 * closed with soclose().
463 */
464int
465socreate(int dom, struct socket **aso, int type, int proto,
466 struct ucred *cred, struct thread *td)
467{
468 struct protosw *prp;
469 struct socket *so;
470 int error;
471
472 if (proto)
473 prp = pffindproto(dom, proto, type);
474 else
475 prp = pffindtype(dom, type);
476
477 if (prp == NULL) {
478 /* No support for domain. */
479 if (pffinddomain(dom) == NULL)
480 return (EAFNOSUPPORT);
481 /* No support for socket type. */
482 if (proto == 0 && type != 0)
483 return (EPROTOTYPE);
484 return (EPROTONOSUPPORT);
485 }
486 if (prp->pr_usrreqs->pru_attach == NULL ||
487 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
488 return (EPROTONOSUPPORT);
489
490 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
491 return (EPROTONOSUPPORT);
492
493 if (prp->pr_type != type)
494 return (EPROTOTYPE);
495 so = soalloc(CRED_TO_VNET(cred));
496 if (so == NULL)
497 return (ENOBUFS);
498
499 TAILQ_INIT(&so->so_incomp);
500 TAILQ_INIT(&so->so_comp);
501 so->so_type = type;
502 so->so_cred = crhold(cred);
503 if ((prp->pr_domain->dom_family == PF_INET) ||
504 (prp->pr_domain->dom_family == PF_INET6) ||
505 (prp->pr_domain->dom_family == PF_ROUTE))
506 so->so_fibnum = td->td_proc->p_fibnum;
507 else
508 so->so_fibnum = 0;
509 so->so_proto = prp;
510#ifdef MAC
511 mac_socket_create(cred, so);
512#endif
513 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
514 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
515 so->so_count = 1;
516 /*
517 * Auto-sizing of socket buffers is managed by the protocols and
518 * the appropriate flags must be set in the pru_attach function.
519 */
520 CURVNET_SET(so->so_vnet);
521 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
522 CURVNET_RESTORE();
523 if (error) {
524 KASSERT(so->so_count == 1, ("socreate: so_count %d",
525 so->so_count));
526 so->so_count = 0;
527 sodealloc(so);
528 return (error);
529 }
530 *aso = so;
531 return (0);
532}
533
534#ifdef REGRESSION
535static int regression_sonewconn_earlytest = 1;
536SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
537 &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
538#endif
539
540/*
541 * When an attempt at a new connection is noted on a socket which accepts
542 * connections, sonewconn is called. If the connection is possible (subject
543 * to space constraints, etc.) then we allocate a new structure, propoerly
544 * linked into the data structure of the original socket, and return this.
545 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
546 *
547 * Note: the ref count on the socket is 0 on return.
548 */
549struct socket *
550sonewconn(struct socket *head, int connstatus)
551{
552 static struct timeval lastover;
553 static struct timeval overinterval = { 60, 0 };
554 static int overcount;
555
556 struct socket *so;
557 int over;
558
559 ACCEPT_LOCK();
560 over = (head->so_qlen > 3 * head->so_qlimit / 2);
561 ACCEPT_UNLOCK();
562#ifdef REGRESSION
563 if (regression_sonewconn_earlytest && over) {
564#else
565 if (over) {
566#endif
567 overcount++;
568
569 if (ratecheck(&lastover, &overinterval)) {
570 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
571 "%i already in queue awaiting acceptance "
572 "(%d occurrences)\n",
573 __func__, head->so_pcb, head->so_qlen, overcount);
574
575 overcount = 0;
576 }
577
578 return (NULL);
579 }
580 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
581 __func__, __LINE__, head));
582 so = soalloc(head->so_vnet);
583 if (so == NULL) {
584 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
585 "limit reached or out of memory\n",
586 __func__, head->so_pcb);
587 return (NULL);
588 }
589 if ((head->so_options & SO_ACCEPTFILTER) != 0)
590 connstatus = 0;
591 so->so_head = head;
592 so->so_type = head->so_type;
593 so->so_options = head->so_options &~ SO_ACCEPTCONN;
594 so->so_linger = head->so_linger;
595 so->so_state = head->so_state | SS_NOFDREF;
596 so->so_fibnum = head->so_fibnum;
597 so->so_proto = head->so_proto;
598 so->so_cred = crhold(head->so_cred);
599#ifdef MAC
600 mac_socket_newconn(head, so);
601#endif
602 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
603 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
604 VNET_SO_ASSERT(head);
605 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
606 sodealloc(so);
607 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
608 __func__, head->so_pcb);
609 return (NULL);
610 }
611 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
612 sodealloc(so);
613 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
614 __func__, head->so_pcb);
615 return (NULL);
616 }
617 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
618 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
619 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
620 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
621 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
622 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
623 so->so_state |= connstatus;
624 ACCEPT_LOCK();
625 /*
626 * The accept socket may be tearing down but we just
627 * won a race on the ACCEPT_LOCK.
628 * However, if sctp_peeloff() is called on a 1-to-many
629 * style socket, the SO_ACCEPTCONN doesn't need to be set.
630 */
631 if (!(head->so_options & SO_ACCEPTCONN) &&
632 ((head->so_proto->pr_protocol != IPPROTO_SCTP) ||
633 (head->so_type != SOCK_SEQPACKET))) {
634 SOCK_LOCK(so);
635 so->so_head = NULL;
636 sofree(so); /* NB: returns ACCEPT_UNLOCK'ed. */
637 return (NULL);
638 }
639 if (connstatus) {
640 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
641 so->so_qstate |= SQ_COMP;
642 head->so_qlen++;
643 } else {
644 /*
645 * Keep removing sockets from the head until there's room for
646 * us to insert on the tail. In pre-locking revisions, this
647 * was a simple if(), but as we could be racing with other
648 * threads and soabort() requires dropping locks, we must
649 * loop waiting for the condition to be true.
650 */
651 while (head->so_incqlen > head->so_qlimit) {
652 struct socket *sp;
653 sp = TAILQ_FIRST(&head->so_incomp);
654 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
655 head->so_incqlen--;
656 sp->so_qstate &= ~SQ_INCOMP;
657 sp->so_head = NULL;
658 ACCEPT_UNLOCK();
659 soabort(sp);
660 ACCEPT_LOCK();
661 }
662 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
663 so->so_qstate |= SQ_INCOMP;
664 head->so_incqlen++;
665 }
666 ACCEPT_UNLOCK();
667 if (connstatus) {
668 sorwakeup(head);
669 wakeup_one(&head->so_timeo);
670 }
671 return (so);
672}
673
674int
675sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
676{
677 int error;
678
679 CURVNET_SET(so->so_vnet);
680 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
681 CURVNET_RESTORE();
682 return (error);
683}
684
685int
686sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
687{
688 int error;
689
690 CURVNET_SET(so->so_vnet);
691 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
692 CURVNET_RESTORE();
693 return (error);
694}
695
696/*
697 * solisten() transitions a socket from a non-listening state to a listening
698 * state, but can also be used to update the listen queue depth on an
699 * existing listen socket. The protocol will call back into the sockets
700 * layer using solisten_proto_check() and solisten_proto() to check and set
701 * socket-layer listen state. Call backs are used so that the protocol can
702 * acquire both protocol and socket layer locks in whatever order is required
703 * by the protocol.
704 *
705 * Protocol implementors are advised to hold the socket lock across the
706 * socket-layer test and set to avoid races at the socket layer.
707 */
708int
709solisten(struct socket *so, int backlog, struct thread *td)
710{
711 int error;
712
713 CURVNET_SET(so->so_vnet);
714 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
715 CURVNET_RESTORE();
716 return (error);
717}
718
719int
720solisten_proto_check(struct socket *so)
721{
722
723 SOCK_LOCK_ASSERT(so);
724
725 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
726 SS_ISDISCONNECTING))
727 return (EINVAL);
728 return (0);
729}
730
731void
732solisten_proto(struct socket *so, int backlog)
733{
734
735 SOCK_LOCK_ASSERT(so);
736
737 if (backlog < 0 || backlog > somaxconn)
738 backlog = somaxconn;
739 so->so_qlimit = backlog;
740 so->so_options |= SO_ACCEPTCONN;
741}
742
743/*
744 * Evaluate the reference count and named references on a socket; if no
745 * references remain, free it. This should be called whenever a reference is
746 * released, such as in sorele(), but also when named reference flags are
747 * cleared in socket or protocol code.
748 *
749 * sofree() will free the socket if:
750 *
751 * - There are no outstanding file descriptor references or related consumers
752 * (so_count == 0).
753 *
754 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
755 *
756 * - The protocol does not have an outstanding strong reference on the socket
757 * (SS_PROTOREF).
758 *
759 * - The socket is not in a completed connection queue, so a process has been
760 * notified that it is present. If it is removed, the user process may
761 * block in accept() despite select() saying the socket was ready.
762 */
763void
764sofree(struct socket *so)
765{
766 struct protosw *pr = so->so_proto;
767 struct socket *head;
768
769 ACCEPT_LOCK_ASSERT();
770 SOCK_LOCK_ASSERT(so);
771
772 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
773 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
774 SOCK_UNLOCK(so);
775 ACCEPT_UNLOCK();
776 return;
777 }
778
779 head = so->so_head;
780 if (head != NULL) {
781 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
782 (so->so_qstate & SQ_INCOMP) != 0,
783 ("sofree: so_head != NULL, but neither SQ_COMP nor "
784 "SQ_INCOMP"));
785 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
786 (so->so_qstate & SQ_INCOMP) == 0,
787 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
788 TAILQ_REMOVE(&head->so_incomp, so, so_list);
789 head->so_incqlen--;
790 so->so_qstate &= ~SQ_INCOMP;
791 so->so_head = NULL;
792 }
793 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
794 (so->so_qstate & SQ_INCOMP) == 0,
795 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
796 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
797 if (so->so_options & SO_ACCEPTCONN) {
798 KASSERT((TAILQ_EMPTY(&so->so_comp)),
799 ("sofree: so_comp populated"));
800 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
801 ("sofree: so_incomp populated"));
802 }
803 SOCK_UNLOCK(so);
804 ACCEPT_UNLOCK();
805
806 VNET_SO_ASSERT(so);
807 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
808 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
809 if (pr->pr_usrreqs->pru_detach != NULL)
810 (*pr->pr_usrreqs->pru_detach)(so);
811
812 /*
813 * From this point on, we assume that no other references to this
814 * socket exist anywhere else in the stack. Therefore, no locks need
815 * to be acquired or held.
816 *
817 * We used to do a lot of socket buffer and socket locking here, as
818 * well as invoke sorflush() and perform wakeups. The direct call to
819 * dom_dispose() and sbrelease_internal() are an inlining of what was
820 * necessary from sorflush().
821 *
822 * Notice that the socket buffer and kqueue state are torn down
823 * before calling pru_detach. This means that protocols shold not
824 * assume they can perform socket wakeups, etc, in their detach code.
825 */
826 sbdestroy(&so->so_snd, so);
827 sbdestroy(&so->so_rcv, so);
828 seldrain(&so->so_snd.sb_sel);
829 seldrain(&so->so_rcv.sb_sel);
830 knlist_destroy(&so->so_rcv.sb_sel.si_note);
831 knlist_destroy(&so->so_snd.sb_sel.si_note);
832 sodealloc(so);
833}
834
835/*
836 * Close a socket on last file table reference removal. Initiate disconnect
837 * if connected. Free socket when disconnect complete.
838 *
839 * This function will sorele() the socket. Note that soclose() may be called
840 * prior to the ref count reaching zero. The actual socket structure will
841 * not be freed until the ref count reaches zero.
842 */
843int
844soclose(struct socket *so)
845{
846 int error = 0;
847
848 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
849
850 CURVNET_SET(so->so_vnet);
851 funsetown(&so->so_sigio);
852 if (so->so_state & SS_ISCONNECTED) {
853 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
854 error = sodisconnect(so);
855 if (error) {
856 if (error == ENOTCONN)
857 error = 0;
858 goto drop;
859 }
860 }
861 if (so->so_options & SO_LINGER) {
862 if ((so->so_state & SS_ISDISCONNECTING) &&
863 (so->so_state & SS_NBIO))
864 goto drop;
865 while (so->so_state & SS_ISCONNECTED) {
866 error = tsleep(&so->so_timeo,
867 PSOCK | PCATCH, "soclos",
868 so->so_linger * hz);
869 if (error)
870 break;
871 }
872 }
873 }
874
875drop:
876 if (so->so_proto->pr_usrreqs->pru_close != NULL)
877 (*so->so_proto->pr_usrreqs->pru_close)(so);
878 ACCEPT_LOCK();
879 if (so->so_options & SO_ACCEPTCONN) {
880 struct socket *sp;
881 /*
882 * Prevent new additions to the accept queues due
883 * to ACCEPT_LOCK races while we are draining them.
884 */
885 so->so_options &= ~SO_ACCEPTCONN;
886 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
887 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
888 so->so_incqlen--;
889 sp->so_qstate &= ~SQ_INCOMP;
890 sp->so_head = NULL;
891 ACCEPT_UNLOCK();
892 soabort(sp);
893 ACCEPT_LOCK();
894 }
895 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
896 TAILQ_REMOVE(&so->so_comp, sp, so_list);
897 so->so_qlen--;
898 sp->so_qstate &= ~SQ_COMP;
899 sp->so_head = NULL;
900 ACCEPT_UNLOCK();
901 soabort(sp);
902 ACCEPT_LOCK();
903 }
904 KASSERT((TAILQ_EMPTY(&so->so_comp)),
905 ("%s: so_comp populated", __func__));
906 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
907 ("%s: so_incomp populated", __func__));
908 }
909 SOCK_LOCK(so);
910 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
911 so->so_state |= SS_NOFDREF;
912 sorele(so); /* NB: Returns with ACCEPT_UNLOCK(). */
913 CURVNET_RESTORE();
914 return (error);
915}
916
917/*
918 * soabort() is used to abruptly tear down a connection, such as when a
919 * resource limit is reached (listen queue depth exceeded), or if a listen
920 * socket is closed while there are sockets waiting to be accepted.
921 *
922 * This interface is tricky, because it is called on an unreferenced socket,
923 * and must be called only by a thread that has actually removed the socket
924 * from the listen queue it was on, or races with other threads are risked.
925 *
926 * This interface will call into the protocol code, so must not be called
927 * with any socket locks held. Protocols do call it while holding their own
928 * recursible protocol mutexes, but this is something that should be subject
929 * to review in the future.
930 */
931void
932soabort(struct socket *so)
933{
934
935 /*
936 * In as much as is possible, assert that no references to this
937 * socket are held. This is not quite the same as asserting that the
938 * current thread is responsible for arranging for no references, but
939 * is as close as we can get for now.
940 */
941 KASSERT(so->so_count == 0, ("soabort: so_count"));
942 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
943 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
944 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
945 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
946 VNET_SO_ASSERT(so);
947
948 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
949 (*so->so_proto->pr_usrreqs->pru_abort)(so);
950 ACCEPT_LOCK();
951 SOCK_LOCK(so);
952 sofree(so);
953}
954
955int
956soaccept(struct socket *so, struct sockaddr **nam)
957{
958 int error;
959
960 SOCK_LOCK(so);
961 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
962 so->so_state &= ~SS_NOFDREF;
963 SOCK_UNLOCK(so);
964
965 CURVNET_SET(so->so_vnet);
966 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
967 CURVNET_RESTORE();
968 return (error);
969}
970
971int
972soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
973{
974
975 return (soconnectat(AT_FDCWD, so, nam, td));
976}
977
978int
979soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
980{
981 int error;
982
983 if (so->so_options & SO_ACCEPTCONN)
984 return (EOPNOTSUPP);
985
986 CURVNET_SET(so->so_vnet);
987 /*
988 * If protocol is connection-based, can only connect once.
989 * Otherwise, if connected, try to disconnect first. This allows
990 * user to disconnect by connecting to, e.g., a null address.
991 */
992 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
993 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
994 (error = sodisconnect(so)))) {
995 error = EISCONN;
996 } else {
997 /*
998 * Prevent accumulated error from previous connection from
999 * biting us.
1000 */
1001 so->so_error = 0;
1002 if (fd == AT_FDCWD) {
1003 error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
1004 nam, td);
1005 } else {
1006 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
1007 so, nam, td);
1008 }
1009 }
1010 CURVNET_RESTORE();
1011
1012 return (error);
1013}
1014
1015int
1016soconnect2(struct socket *so1, struct socket *so2)
1017{
1018 int error;
1019
1020 CURVNET_SET(so1->so_vnet);
1021 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1022 CURVNET_RESTORE();
1023 return (error);
1024}
1025
1026int
1027sodisconnect(struct socket *so)
1028{
1029 int error;
1030
1031 if ((so->so_state & SS_ISCONNECTED) == 0)
1032 return (ENOTCONN);
1033 if (so->so_state & SS_ISDISCONNECTING)
1034 return (EALREADY);
1035 VNET_SO_ASSERT(so);
1036 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1037 return (error);
1038}
1039
1040#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1041
1042int
1043sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1044 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1045{
1046 long space;
1047 ssize_t resid;
1048 int clen = 0, error, dontroute;
1049
1050 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1051 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1052 ("sosend_dgram: !PR_ATOMIC"));
1053
1054 if (uio != NULL)
1055 resid = uio->uio_resid;
1056 else
1057 resid = top->m_pkthdr.len;
1058 /*
1059 * In theory resid should be unsigned. However, space must be
1060 * signed, as it might be less than 0 if we over-committed, and we
1061 * must use a signed comparison of space and resid. On the other
1062 * hand, a negative resid causes us to loop sending 0-length
1063 * segments to the protocol.
1064 */
1065 if (resid < 0) {
1066 error = EINVAL;
1067 goto out;
1068 }
1069
1070 dontroute =
1071 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1072 if (td != NULL)
1073 td->td_ru.ru_msgsnd++;
1074 if (control != NULL)
1075 clen = control->m_len;
1076
1077 SOCKBUF_LOCK(&so->so_snd);
1078 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1079 SOCKBUF_UNLOCK(&so->so_snd);
1080 error = EPIPE;
1081 goto out;
1082 }
1083 if (so->so_error) {
1084 error = so->so_error;
1085 so->so_error = 0;
1086 SOCKBUF_UNLOCK(&so->so_snd);
1087 goto out;
1088 }
1089 if ((so->so_state & SS_ISCONNECTED) == 0) {
1090 /*
1091 * `sendto' and `sendmsg' is allowed on a connection-based
1092 * socket if it supports implied connect. Return ENOTCONN if
1093 * not connected and no address is supplied.
1094 */
1095 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1096 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1097 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1098 !(resid == 0 && clen != 0)) {
1099 SOCKBUF_UNLOCK(&so->so_snd);
1100 error = ENOTCONN;
1101 goto out;
1102 }
1103 } else if (addr == NULL) {
1104 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1105 error = ENOTCONN;
1106 else
1107 error = EDESTADDRREQ;
1108 SOCKBUF_UNLOCK(&so->so_snd);
1109 goto out;
1110 }
1111 }
1112
1113 /*
1114 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1115 * problem and need fixing.
1116 */
1117 space = sbspace(&so->so_snd);
1118 if (flags & MSG_OOB)
1119 space += 1024;
1120 space -= clen;
1121 SOCKBUF_UNLOCK(&so->so_snd);
1122 if (resid > space) {
1123 error = EMSGSIZE;
1124 goto out;
1125 }
1126 if (uio == NULL) {
1127 resid = 0;
1128 if (flags & MSG_EOR)
1129 top->m_flags |= M_EOR;
1130 } else {
1131 /*
1132 * Copy the data from userland into a mbuf chain.
1133 * If no data is to be copied in, a single empty mbuf
1134 * is returned.
1135 */
1136 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1137 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1138 if (top == NULL) {
1139 error = EFAULT; /* only possible error */
1140 goto out;
1141 }
1142 space -= resid - uio->uio_resid;
1143 resid = uio->uio_resid;
1144 }
1145 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1146 /*
1147 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1148 * than with.
1149 */
1150 if (dontroute) {
1151 SOCK_LOCK(so);
1152 so->so_options |= SO_DONTROUTE;
1153 SOCK_UNLOCK(so);
1154 }
1155 /*
1156 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1157 * of date. We could have recieved a reset packet in an interrupt or
1158 * maybe we slept while doing page faults in uiomove() etc. We could
1159 * probably recheck again inside the locking protection here, but
1160 * there are probably other places that this also happens. We must
1161 * rethink this.
1162 */
1163 VNET_SO_ASSERT(so);
1164 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1165 (flags & MSG_OOB) ? PRUS_OOB :
1166 /*
1167 * If the user set MSG_EOF, the protocol understands this flag and
1168 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1169 */
1170 ((flags & MSG_EOF) &&
1171 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1172 (resid <= 0)) ?
1173 PRUS_EOF :
1174 /* If there is more to send set PRUS_MORETOCOME */
1175 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1176 top, addr, control, td);
1177 if (dontroute) {
1178 SOCK_LOCK(so);
1179 so->so_options &= ~SO_DONTROUTE;
1180 SOCK_UNLOCK(so);
1181 }
1182 clen = 0;
1183 control = NULL;
1184 top = NULL;
1185out:
1186 if (top != NULL)
1187 m_freem(top);
1188 if (control != NULL)
1189 m_freem(control);
1190 return (error);
1191}
1192
1193/*
1194 * Send on a socket. If send must go all at once and message is larger than
1195 * send buffering, then hard error. Lock against other senders. If must go
1196 * all at once and not enough room now, then inform user that this would
1197 * block and do nothing. Otherwise, if nonblocking, send as much as
1198 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1199 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1200 * in mbuf chain must be small enough to send all at once.
1201 *
1202 * Returns nonzero on error, timeout or signal; callers must check for short
1203 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1204 * on return.
1205 */
1206int
1207sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1208 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1209{
1210 long space;
1211 ssize_t resid;
1212 int clen = 0, error, dontroute;
1213 int atomic = sosendallatonce(so) || top;
1214
1215 if (uio != NULL)
1216 resid = uio->uio_resid;
1217 else
1218 resid = top->m_pkthdr.len;
1219 /*
1220 * In theory resid should be unsigned. However, space must be
1221 * signed, as it might be less than 0 if we over-committed, and we
1222 * must use a signed comparison of space and resid. On the other
1223 * hand, a negative resid causes us to loop sending 0-length
1224 * segments to the protocol.
1225 *
1226 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1227 * type sockets since that's an error.
1228 */
1229 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1230 error = EINVAL;
1231 goto out;
1232 }
1233
1234 dontroute =
1235 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1236 (so->so_proto->pr_flags & PR_ATOMIC);
1237 if (td != NULL)
1238 td->td_ru.ru_msgsnd++;
1239 if (control != NULL)
1240 clen = control->m_len;
1241
1242 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1243 if (error)
1244 goto out;
1245
1246restart:
1247 do {
1248 SOCKBUF_LOCK(&so->so_snd);
1249 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1250 SOCKBUF_UNLOCK(&so->so_snd);
1251 error = EPIPE;
1252 goto release;
1253 }
1254 if (so->so_error) {
1255 error = so->so_error;
1256 so->so_error = 0;
1257 SOCKBUF_UNLOCK(&so->so_snd);
1258 goto release;
1259 }
1260 if ((so->so_state & SS_ISCONNECTED) == 0) {
1261 /*
1262 * `sendto' and `sendmsg' is allowed on a connection-
1263 * based socket if it supports implied connect.
1264 * Return ENOTCONN if not connected and no address is
1265 * supplied.
1266 */
1267 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1268 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1269 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1270 !(resid == 0 && clen != 0)) {
1271 SOCKBUF_UNLOCK(&so->so_snd);
1272 error = ENOTCONN;
1273 goto release;
1274 }
1275 } else if (addr == NULL) {
1276 SOCKBUF_UNLOCK(&so->so_snd);
1277 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1278 error = ENOTCONN;
1279 else
1280 error = EDESTADDRREQ;
1281 goto release;
1282 }
1283 }
1284 space = sbspace(&so->so_snd);
1285 if (flags & MSG_OOB)
1286 space += 1024;
1287 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1288 clen > so->so_snd.sb_hiwat) {
1289 SOCKBUF_UNLOCK(&so->so_snd);
1290 error = EMSGSIZE;
1291 goto release;
1292 }
1293 if (space < resid + clen &&
1294 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1295 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1296 SOCKBUF_UNLOCK(&so->so_snd);
1297 error = EWOULDBLOCK;
1298 goto release;
1299 }
1300 error = sbwait(&so->so_snd);
1301 SOCKBUF_UNLOCK(&so->so_snd);
1302 if (error)
1303 goto release;
1304 goto restart;
1305 }
1306 SOCKBUF_UNLOCK(&so->so_snd);
1307 space -= clen;
1308 do {
1309 if (uio == NULL) {
1310 resid = 0;
1311 if (flags & MSG_EOR)
1312 top->m_flags |= M_EOR;
1313 } else if (resid > 0) {
1314 /*
1315 * Copy the data from userland into a mbuf
1316 * chain. If no data is to be copied in,
1317 * a single empty mbuf is returned.
1318 */
1319 top = m_uiotombuf(uio, M_WAITOK, space,
1320 (atomic ? max_hdr : 0),
1321 (atomic ? M_PKTHDR : 0) |
1322 ((flags & MSG_EOR) ? M_EOR : 0));
1323 if (top == NULL) {
1324 error = EFAULT; /* only possible error */
1325 goto release;
1326 }
1327 space -= resid - uio->uio_resid;
1328 resid = uio->uio_resid;
1329 }
1330 if (dontroute) {
1331 SOCK_LOCK(so);
1332 so->so_options |= SO_DONTROUTE;
1333 SOCK_UNLOCK(so);
1334 }
1335 /*
1336 * XXX all the SBS_CANTSENDMORE checks previously
1337 * done could be out of date. We could have recieved
1338 * a reset packet in an interrupt or maybe we slept
1339 * while doing page faults in uiomove() etc. We
1340 * could probably recheck again inside the locking
1341 * protection here, but there are probably other
1342 * places that this also happens. We must rethink
1343 * this.
1344 */
1345 VNET_SO_ASSERT(so);
1346 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1347 (flags & MSG_OOB) ? PRUS_OOB :
1348 /*
1349 * If the user set MSG_EOF, the protocol understands
1350 * this flag and nothing left to send then use
1351 * PRU_SEND_EOF instead of PRU_SEND.
1352 */
1353 ((flags & MSG_EOF) &&
1354 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1355 (resid <= 0)) ?
1356 PRUS_EOF :
1357 /* If there is more to send set PRUS_MORETOCOME. */
1358 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1359 top, addr, control, td);
1360 if (dontroute) {
1361 SOCK_LOCK(so);
1362 so->so_options &= ~SO_DONTROUTE;
1363 SOCK_UNLOCK(so);
1364 }
1365 clen = 0;
1366 control = NULL;
1367 top = NULL;
1368 if (error)
1369 goto release;
1370 } while (resid && space > 0);
1371 } while (resid);
1372
1373release:
1374 sbunlock(&so->so_snd);
1375out:
1376 if (top != NULL)
1377 m_freem(top);
1378 if (control != NULL)
1379 m_freem(control);
1380 return (error);
1381}
1382
1383int
1384sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1385 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1386{
1387 int error;
1388
1389 CURVNET_SET(so->so_vnet);
1390 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1391 control, flags, td);
1392 CURVNET_RESTORE();
1393 return (error);
1394}
1395
1396/*
1397 * The part of soreceive() that implements reading non-inline out-of-band
1398 * data from a socket. For more complete comments, see soreceive(), from
1399 * which this code originated.
1400 *
1401 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1402 * unable to return an mbuf chain to the caller.
1403 */
1404static int
1405soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1406{
1407 struct protosw *pr = so->so_proto;
1408 struct mbuf *m;
1409 int error;
1410
1411 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1412 VNET_SO_ASSERT(so);
1413
1414 m = m_get(M_WAITOK, MT_DATA);
1415 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1416 if (error)
1417 goto bad;
1418 do {
1419 error = uiomove(mtod(m, void *),
1420 (int) min(uio->uio_resid, m->m_len), uio);
1421 m = m_free(m);
1422 } while (uio->uio_resid && error == 0 && m);
1423bad:
1424 if (m != NULL)
1425 m_freem(m);
1426 return (error);
1427}
1428
1429/*
1430 * Following replacement or removal of the first mbuf on the first mbuf chain
1431 * of a socket buffer, push necessary state changes back into the socket
1432 * buffer so that other consumers see the values consistently. 'nextrecord'
1433 * is the callers locally stored value of the original value of
1434 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1435 * NOTE: 'nextrecord' may be NULL.
1436 */
1437static __inline void
1438sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1439{
1440
1441 SOCKBUF_LOCK_ASSERT(sb);
1442 /*
1443 * First, update for the new value of nextrecord. If necessary, make
1444 * it the first record.
1445 */
1446 if (sb->sb_mb != NULL)
1447 sb->sb_mb->m_nextpkt = nextrecord;
1448 else
1449 sb->sb_mb = nextrecord;
1450
1451 /*
1452 * Now update any dependent socket buffer fields to reflect the new
1453 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1454 * addition of a second clause that takes care of the case where
1455 * sb_mb has been updated, but remains the last record.
1456 */
1457 if (sb->sb_mb == NULL) {
1458 sb->sb_mbtail = NULL;
1459 sb->sb_lastrecord = NULL;
1460 } else if (sb->sb_mb->m_nextpkt == NULL)
1461 sb->sb_lastrecord = sb->sb_mb;
1462}
1463
1464/*
1465 * Implement receive operations on a socket. We depend on the way that
1466 * records are added to the sockbuf by sbappend. In particular, each record
1467 * (mbufs linked through m_next) must begin with an address if the protocol
1468 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1469 * data, and then zero or more mbufs of data. In order to allow parallelism
1470 * between network receive and copying to user space, as well as avoid
1471 * sleeping with a mutex held, we release the socket buffer mutex during the
1472 * user space copy. Although the sockbuf is locked, new data may still be
1473 * appended, and thus we must maintain consistency of the sockbuf during that
1474 * time.
1475 *
1476 * The caller may receive the data as a single mbuf chain by supplying an
1477 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1478 * the count in uio_resid.
1479 */
1480int
1481soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1482 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1483{
1484 struct mbuf *m, **mp;
1485 int flags, error, offset;
1486 ssize_t len;
1487 struct protosw *pr = so->so_proto;
1488 struct mbuf *nextrecord;
1489 int moff, type = 0;
1490 ssize_t orig_resid = uio->uio_resid;
1491
1492 mp = mp0;
1493 if (psa != NULL)
1494 *psa = NULL;
1495 if (controlp != NULL)
1496 *controlp = NULL;
1497 if (flagsp != NULL)
1498 flags = *flagsp &~ MSG_EOR;
1499 else
1500 flags = 0;
1501 if (flags & MSG_OOB)
1502 return (soreceive_rcvoob(so, uio, flags));
1503 if (mp != NULL)
1504 *mp = NULL;
1505 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1506 && uio->uio_resid) {
1507 VNET_SO_ASSERT(so);
1508 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1509 }
1510
1511 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1512 if (error)
1513 return (error);
1514
1515restart:
1516 SOCKBUF_LOCK(&so->so_rcv);
1517 m = so->so_rcv.sb_mb;
1518 /*
1519 * If we have less data than requested, block awaiting more (subject
1520 * to any timeout) if:
1521 * 1. the current count is less than the low water mark, or
1522 * 2. MSG_DONTWAIT is not set
1523 */
1524 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1525 sbavail(&so->so_rcv) < uio->uio_resid) &&
1526 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
1527 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1528 KASSERT(m != NULL || !sbavail(&so->so_rcv),
1529 ("receive: m == %p sbavail == %u",
1530 m, sbavail(&so->so_rcv)));
1531 if (so->so_error) {
1532 if (m != NULL)
1533 goto dontblock;
1534 error = so->so_error;
1535 if ((flags & MSG_PEEK) == 0)
1536 so->so_error = 0;
1537 SOCKBUF_UNLOCK(&so->so_rcv);
1538 goto release;
1539 }
1540 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1541 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1542 if (m == NULL) {
1543 SOCKBUF_UNLOCK(&so->so_rcv);
1544 goto release;
1545 } else
1546 goto dontblock;
1547 }
1548 for (; m != NULL; m = m->m_next)
1549 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1550 m = so->so_rcv.sb_mb;
1551 goto dontblock;
1552 }
1553 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1554 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1555 SOCKBUF_UNLOCK(&so->so_rcv);
1556 error = ENOTCONN;
1557 goto release;
1558 }
1559 if (uio->uio_resid == 0) {
1560 SOCKBUF_UNLOCK(&so->so_rcv);
1561 goto release;
1562 }
1563 if ((so->so_state & SS_NBIO) ||
1564 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1565 SOCKBUF_UNLOCK(&so->so_rcv);
1566 error = EWOULDBLOCK;
1567 goto release;
1568 }
1569 SBLASTRECORDCHK(&so->so_rcv);
1570 SBLASTMBUFCHK(&so->so_rcv);
1571 error = sbwait(&so->so_rcv);
1572 SOCKBUF_UNLOCK(&so->so_rcv);
1573 if (error)
1574 goto release;
1575 goto restart;
1576 }
1577dontblock:
1578 /*
1579 * From this point onward, we maintain 'nextrecord' as a cache of the
1580 * pointer to the next record in the socket buffer. We must keep the
1581 * various socket buffer pointers and local stack versions of the
1582 * pointers in sync, pushing out modifications before dropping the
1583 * socket buffer mutex, and re-reading them when picking it up.
1584 *
1585 * Otherwise, we will race with the network stack appending new data
1586 * or records onto the socket buffer by using inconsistent/stale
1587 * versions of the field, possibly resulting in socket buffer
1588 * corruption.
1589 *
1590 * By holding the high-level sblock(), we prevent simultaneous
1591 * readers from pulling off the front of the socket buffer.
1592 */
1593 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1594 if (uio->uio_td)
1595 uio->uio_td->td_ru.ru_msgrcv++;
1596 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1597 SBLASTRECORDCHK(&so->so_rcv);
1598 SBLASTMBUFCHK(&so->so_rcv);
1599 nextrecord = m->m_nextpkt;
1600 if (pr->pr_flags & PR_ADDR) {
1601 KASSERT(m->m_type == MT_SONAME,
1602 ("m->m_type == %d", m->m_type));
1603 orig_resid = 0;
1604 if (psa != NULL)
1605 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1606 M_NOWAIT);
1607 if (flags & MSG_PEEK) {
1608 m = m->m_next;
1609 } else {
1610 sbfree(&so->so_rcv, m);
1611 so->so_rcv.sb_mb = m_free(m);
1612 m = so->so_rcv.sb_mb;
1613 sockbuf_pushsync(&so->so_rcv, nextrecord);
1614 }
1615 }
1616
1617 /*
1618 * Process one or more MT_CONTROL mbufs present before any data mbufs
1619 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1620 * just copy the data; if !MSG_PEEK, we call into the protocol to
1621 * perform externalization (or freeing if controlp == NULL).
1622 */
1623 if (m != NULL && m->m_type == MT_CONTROL) {
1624 struct mbuf *cm = NULL, *cmn;
1625 struct mbuf **cme = &cm;
1626
1627 do {
1628 if (flags & MSG_PEEK) {
1629 if (controlp != NULL) {
1630 *controlp = m_copy(m, 0, m->m_len);
1631 controlp = &(*controlp)->m_next;
1632 }
1633 m = m->m_next;
1634 } else {
1635 sbfree(&so->so_rcv, m);
1636 so->so_rcv.sb_mb = m->m_next;
1637 m->m_next = NULL;
1638 *cme = m;
1639 cme = &(*cme)->m_next;
1640 m = so->so_rcv.sb_mb;
1641 }
1642 } while (m != NULL && m->m_type == MT_CONTROL);
1643 if ((flags & MSG_PEEK) == 0)
1644 sockbuf_pushsync(&so->so_rcv, nextrecord);
1645 while (cm != NULL) {
1646 cmn = cm->m_next;
1647 cm->m_next = NULL;
1648 if (pr->pr_domain->dom_externalize != NULL) {
1649 SOCKBUF_UNLOCK(&so->so_rcv);
1650 VNET_SO_ASSERT(so);
1651 error = (*pr->pr_domain->dom_externalize)
1652 (cm, controlp, flags);
1653 SOCKBUF_LOCK(&so->so_rcv);
1654 } else if (controlp != NULL)
1655 *controlp = cm;
1656 else
1657 m_freem(cm);
1658 if (controlp != NULL) {
1659 orig_resid = 0;
1660 while (*controlp != NULL)
1661 controlp = &(*controlp)->m_next;
1662 }
1663 cm = cmn;
1664 }
1665 if (m != NULL)
1666 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1667 else
1668 nextrecord = so->so_rcv.sb_mb;
1669 orig_resid = 0;
1670 }
1671 if (m != NULL) {
1672 if ((flags & MSG_PEEK) == 0) {
1673 KASSERT(m->m_nextpkt == nextrecord,
1674 ("soreceive: post-control, nextrecord !sync"));
1675 if (nextrecord == NULL) {
1676 KASSERT(so->so_rcv.sb_mb == m,
1677 ("soreceive: post-control, sb_mb!=m"));
1678 KASSERT(so->so_rcv.sb_lastrecord == m,
1679 ("soreceive: post-control, lastrecord!=m"));
1680 }
1681 }
1682 type = m->m_type;
1683 if (type == MT_OOBDATA)
1684 flags |= MSG_OOB;
1685 } else {
1686 if ((flags & MSG_PEEK) == 0) {
1687 KASSERT(so->so_rcv.sb_mb == nextrecord,
1688 ("soreceive: sb_mb != nextrecord"));
1689 if (so->so_rcv.sb_mb == NULL) {
1690 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1691 ("soreceive: sb_lastercord != NULL"));
1692 }
1693 }
1694 }
1695 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1696 SBLASTRECORDCHK(&so->so_rcv);
1697 SBLASTMBUFCHK(&so->so_rcv);
1698
1699 /*
1700 * Now continue to read any data mbufs off of the head of the socket
1701 * buffer until the read request is satisfied. Note that 'type' is
1702 * used to store the type of any mbuf reads that have happened so far
1703 * such that soreceive() can stop reading if the type changes, which
1704 * causes soreceive() to return only one of regular data and inline
1705 * out-of-band data in a single socket receive operation.
1706 */
1707 moff = 0;
1708 offset = 0;
1709 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
1710 && error == 0) {
1711 /*
1712 * If the type of mbuf has changed since the last mbuf
1713 * examined ('type'), end the receive operation.
1714 */
1715 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1716 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1717 if (type != m->m_type)
1718 break;
1719 } else if (type == MT_OOBDATA)
1720 break;
1721 else
1722 KASSERT(m->m_type == MT_DATA,
1723 ("m->m_type == %d", m->m_type));
1724 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1725 len = uio->uio_resid;
1726 if (so->so_oobmark && len > so->so_oobmark - offset)
1727 len = so->so_oobmark - offset;
1728 if (len > m->m_len - moff)
1729 len = m->m_len - moff;
1730 /*
1731 * If mp is set, just pass back the mbufs. Otherwise copy
1732 * them out via the uio, then free. Sockbuf must be
1733 * consistent here (points to current mbuf, it points to next
1734 * record) when we drop priority; we must note any additions
1735 * to the sockbuf when we block interrupts again.
1736 */
1737 if (mp == NULL) {
1738 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1739 SBLASTRECORDCHK(&so->so_rcv);
1740 SBLASTMBUFCHK(&so->so_rcv);
1741 SOCKBUF_UNLOCK(&so->so_rcv);
1742 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1743 SOCKBUF_LOCK(&so->so_rcv);
1744 if (error) {
1745 /*
1746 * The MT_SONAME mbuf has already been removed
1747 * from the record, so it is necessary to
1748 * remove the data mbufs, if any, to preserve
1749 * the invariant in the case of PR_ADDR that
1750 * requires MT_SONAME mbufs at the head of
1751 * each record.
1752 */
1753 if (m && pr->pr_flags & PR_ATOMIC &&
1754 ((flags & MSG_PEEK) == 0))
1755 (void)sbdroprecord_locked(&so->so_rcv);
1756 SOCKBUF_UNLOCK(&so->so_rcv);
1757 goto release;
1758 }
1759 } else
1760 uio->uio_resid -= len;
1761 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1762 if (len == m->m_len - moff) {
1763 if (m->m_flags & M_EOR)
1764 flags |= MSG_EOR;
1765 if (flags & MSG_PEEK) {
1766 m = m->m_next;
1767 moff = 0;
1768 } else {
1769 nextrecord = m->m_nextpkt;
1770 sbfree(&so->so_rcv, m);
1771 if (mp != NULL) {
1772 m->m_nextpkt = NULL;
1773 *mp = m;
1774 mp = &m->m_next;
1775 so->so_rcv.sb_mb = m = m->m_next;
1776 *mp = NULL;
1777 } else {
1778 so->so_rcv.sb_mb = m_free(m);
1779 m = so->so_rcv.sb_mb;
1780 }
1781 sockbuf_pushsync(&so->so_rcv, nextrecord);
1782 SBLASTRECORDCHK(&so->so_rcv);
1783 SBLASTMBUFCHK(&so->so_rcv);
1784 }
1785 } else {
1786 if (flags & MSG_PEEK)
1787 moff += len;
1788 else {
1789 if (mp != NULL) {
1790 if (flags & MSG_DONTWAIT) {
1791 *mp = m_copym(m, 0, len,
1792 M_NOWAIT);
1793 if (*mp == NULL) {
1794 /*
1795 * m_copym() couldn't
1796 * allocate an mbuf.
1797 * Adjust uio_resid back
1798 * (it was adjusted
1799 * down by len bytes,
1800 * which we didn't end
1801 * up "copying" over).
1802 */
1803 uio->uio_resid += len;
1804 break;
1805 }
1806 } else {
1807 SOCKBUF_UNLOCK(&so->so_rcv);
1808 *mp = m_copym(m, 0, len,
1809 M_WAITOK);
1810 SOCKBUF_LOCK(&so->so_rcv);
1811 }
1812 }
1813 sbcut_locked(&so->so_rcv, len);
1814 }
1815 }
1816 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1817 if (so->so_oobmark) {
1818 if ((flags & MSG_PEEK) == 0) {
1819 so->so_oobmark -= len;
1820 if (so->so_oobmark == 0) {
1821 so->so_rcv.sb_state |= SBS_RCVATMARK;
1822 break;
1823 }
1824 } else {
1825 offset += len;
1826 if (offset == so->so_oobmark)
1827 break;
1828 }
1829 }
1830 if (flags & MSG_EOR)
1831 break;
1832 /*
1833 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1834 * must not quit until "uio->uio_resid == 0" or an error
1835 * termination. If a signal/timeout occurs, return with a
1836 * short count but without error. Keep sockbuf locked
1837 * against other readers.
1838 */
1839 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1840 !sosendallatonce(so) && nextrecord == NULL) {
1841 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1842 if (so->so_error ||
1843 so->so_rcv.sb_state & SBS_CANTRCVMORE)
1844 break;
1845 /*
1846 * Notify the protocol that some data has been
1847 * drained before blocking.
1848 */
1849 if (pr->pr_flags & PR_WANTRCVD) {
1850 SOCKBUF_UNLOCK(&so->so_rcv);
1851 VNET_SO_ASSERT(so);
1852 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1853 SOCKBUF_LOCK(&so->so_rcv);
1854 }
1855 SBLASTRECORDCHK(&so->so_rcv);
1856 SBLASTMBUFCHK(&so->so_rcv);
1857 /*
1858 * We could receive some data while was notifying
1859 * the protocol. Skip blocking in this case.
1860 */
1861 if (so->so_rcv.sb_mb == NULL) {
1862 error = sbwait(&so->so_rcv);
1863 if (error) {
1864 SOCKBUF_UNLOCK(&so->so_rcv);
1865 goto release;
1866 }
1867 }
1868 m = so->so_rcv.sb_mb;
1869 if (m != NULL)
1870 nextrecord = m->m_nextpkt;
1871 }
1872 }
1873
1874 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1875 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1876 flags |= MSG_TRUNC;
1877 if ((flags & MSG_PEEK) == 0)
1878 (void) sbdroprecord_locked(&so->so_rcv);
1879 }
1880 if ((flags & MSG_PEEK) == 0) {
1881 if (m == NULL) {
1882 /*
1883 * First part is an inline SB_EMPTY_FIXUP(). Second
1884 * part makes sure sb_lastrecord is up-to-date if
1885 * there is still data in the socket buffer.
1886 */
1887 so->so_rcv.sb_mb = nextrecord;
1888 if (so->so_rcv.sb_mb == NULL) {
1889 so->so_rcv.sb_mbtail = NULL;
1890 so->so_rcv.sb_lastrecord = NULL;
1891 } else if (nextrecord->m_nextpkt == NULL)
1892 so->so_rcv.sb_lastrecord = nextrecord;
1893 }
1894 SBLASTRECORDCHK(&so->so_rcv);
1895 SBLASTMBUFCHK(&so->so_rcv);
1896 /*
1897 * If soreceive() is being done from the socket callback,
1898 * then don't need to generate ACK to peer to update window,
1899 * since ACK will be generated on return to TCP.
1900 */
1901 if (!(flags & MSG_SOCALLBCK) &&
1902 (pr->pr_flags & PR_WANTRCVD)) {
1903 SOCKBUF_UNLOCK(&so->so_rcv);
1904 VNET_SO_ASSERT(so);
1905 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1906 SOCKBUF_LOCK(&so->so_rcv);
1907 }
1908 }
1909 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1910 if (orig_resid == uio->uio_resid && orig_resid &&
1911 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1912 SOCKBUF_UNLOCK(&so->so_rcv);
1913 goto restart;
1914 }
1915 SOCKBUF_UNLOCK(&so->so_rcv);
1916
1917 if (flagsp != NULL)
1918 *flagsp |= flags;
1919release:
1920 sbunlock(&so->so_rcv);
1921 return (error);
1922}
1923
1924/*
1925 * Optimized version of soreceive() for stream (TCP) sockets.
1926 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
1927 */
1928int
1929soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1930 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1931{
1932 int len = 0, error = 0, flags, oresid;
1933 struct sockbuf *sb;
1934 struct mbuf *m, *n = NULL;
1935
1936 /* We only do stream sockets. */
1937 if (so->so_type != SOCK_STREAM)
1938 return (EINVAL);
1939 if (psa != NULL)
1940 *psa = NULL;
1941 if (controlp != NULL)
1942 return (EINVAL);
1943 if (flagsp != NULL)
1944 flags = *flagsp &~ MSG_EOR;
1945 else
1946 flags = 0;
1947 if (flags & MSG_OOB)
1948 return (soreceive_rcvoob(so, uio, flags));
1949 if (mp0 != NULL)
1950 *mp0 = NULL;
1951
1952 sb = &so->so_rcv;
1953
1954 /* Prevent other readers from entering the socket. */
1955 error = sblock(sb, SBLOCKWAIT(flags));
1956 if (error)
1957 goto out;
1958 SOCKBUF_LOCK(sb);
1959
1960 /* Easy one, no space to copyout anything. */
1961 if (uio->uio_resid == 0) {
1962 error = EINVAL;
1963 goto out;
1964 }
1965 oresid = uio->uio_resid;
1966
1967 /* We will never ever get anything unless we are or were connected. */
1968 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1969 error = ENOTCONN;
1970 goto out;
1971 }
1972
1973restart:
1974 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1975
1976 /* Abort if socket has reported problems. */
1977 if (so->so_error) {
1978 if (sbavail(sb) > 0)
1979 goto deliver;
1980 if (oresid > uio->uio_resid)
1981 goto out;
1982 error = so->so_error;
1983 if (!(flags & MSG_PEEK))
1984 so->so_error = 0;
1985 goto out;
1986 }
1987
1988 /* Door is closed. Deliver what is left, if any. */
1989 if (sb->sb_state & SBS_CANTRCVMORE) {
1990 if (sbavail(sb) > 0)
1991 goto deliver;
1992 else
1993 goto out;
1994 }
1995
1996 /* Socket buffer is empty and we shall not block. */
1997 if (sbavail(sb) == 0 &&
1998 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
1999 error = EAGAIN;
2000 goto out;
2001 }
2002
2003 /* Socket buffer got some data that we shall deliver now. */
2004 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2005 ((sb->sb_flags & SS_NBIO) ||
2006 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2007 sbavail(sb) >= sb->sb_lowat ||
2008 sbavail(sb) >= uio->uio_resid ||
2009 sbavail(sb) >= sb->sb_hiwat) ) {
2010 goto deliver;
2011 }
2012
2013 /* On MSG_WAITALL we must wait until all data or error arrives. */
2014 if ((flags & MSG_WAITALL) &&
2015 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2016 goto deliver;
2017
2018 /*
2019 * Wait and block until (more) data comes in.
2020 * NB: Drops the sockbuf lock during wait.
2021 */
2022 error = sbwait(sb);
2023 if (error)
2024 goto out;
2025 goto restart;
2026
2027deliver:
2028 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2029 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2030 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2031
2032 /* Statistics. */
2033 if (uio->uio_td)
2034 uio->uio_td->td_ru.ru_msgrcv++;
2035
2036 /* Fill uio until full or current end of socket buffer is reached. */
2037 len = min(uio->uio_resid, sbavail(sb));
2038 if (mp0 != NULL) {
2039 /* Dequeue as many mbufs as possible. */
2040 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2041 if (*mp0 == NULL)
2042 *mp0 = sb->sb_mb;
2043 else
2044 m_cat(*mp0, sb->sb_mb);
2045 for (m = sb->sb_mb;
2046 m != NULL && m->m_len <= len;
2047 m = m->m_next) {
2048 KASSERT(!(m->m_flags & M_NOTAVAIL),
2049 ("%s: m %p not available", __func__, m));
2050 len -= m->m_len;
2051 uio->uio_resid -= m->m_len;
2052 sbfree(sb, m);
2053 n = m;
2054 }
2055 n->m_next = NULL;
2056 sb->sb_mb = m;
2057 sb->sb_lastrecord = sb->sb_mb;
2058 if (sb->sb_mb == NULL)
2059 SB_EMPTY_FIXUP(sb);
2060 }
2061 /* Copy the remainder. */
2062 if (len > 0) {
2063 KASSERT(sb->sb_mb != NULL,
2064 ("%s: len > 0 && sb->sb_mb empty", __func__));
2065
2066 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2067 if (m == NULL)
2068 len = 0; /* Don't flush data from sockbuf. */
2069 else
2070 uio->uio_resid -= len;
2071 if (*mp0 != NULL)
2072 m_cat(*mp0, m);
2073 else
2074 *mp0 = m;
2075 if (*mp0 == NULL) {
2076 error = ENOBUFS;
2077 goto out;
2078 }
2079 }
2080 } else {
2081 /* NB: Must unlock socket buffer as uiomove may sleep. */
2082 SOCKBUF_UNLOCK(sb);
2083 error = m_mbuftouio(uio, sb->sb_mb, len);
2084 SOCKBUF_LOCK(sb);
2085 if (error)
2086 goto out;
2087 }
2088 SBLASTRECORDCHK(sb);
2089 SBLASTMBUFCHK(sb);
2090
2091 /*
2092 * Remove the delivered data from the socket buffer unless we
2093 * were only peeking.
2094 */
2095 if (!(flags & MSG_PEEK)) {
2096 if (len > 0)
2097 sbdrop_locked(sb, len);
2098
2099 /* Notify protocol that we drained some data. */
2100 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2101 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2102 !(flags & MSG_SOCALLBCK))) {
2103 SOCKBUF_UNLOCK(sb);
2104 VNET_SO_ASSERT(so);
2105 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2106 SOCKBUF_LOCK(sb);
2107 }
2108 }
2109
2110 /*
2111 * For MSG_WAITALL we may have to loop again and wait for
2112 * more data to come in.
2113 */
2114 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2115 goto restart;
2116out:
2117 SOCKBUF_LOCK_ASSERT(sb);
2118 SBLASTRECORDCHK(sb);
2119 SBLASTMBUFCHK(sb);
2120 SOCKBUF_UNLOCK(sb);
2121 sbunlock(sb);
2122 return (error);
2123}
2124
2125/*
2126 * Optimized version of soreceive() for simple datagram cases from userspace.
2127 * Unlike in the stream case, we're able to drop a datagram if copyout()
2128 * fails, and because we handle datagrams atomically, we don't need to use a
2129 * sleep lock to prevent I/O interlacing.
2130 */
2131int
2132soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2133 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2134{
2135 struct mbuf *m, *m2;
2136 int flags, error;
2137 ssize_t len;
2138 struct protosw *pr = so->so_proto;
2139 struct mbuf *nextrecord;
2140
2141 if (psa != NULL)
2142 *psa = NULL;
2143 if (controlp != NULL)
2144 *controlp = NULL;
2145 if (flagsp != NULL)
2146 flags = *flagsp &~ MSG_EOR;
2147 else
2148 flags = 0;
2149
2150 /*
2151 * For any complicated cases, fall back to the full
2152 * soreceive_generic().
2153 */
2154 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2155 return (soreceive_generic(so, psa, uio, mp0, controlp,
2156 flagsp));
2157
2158 /*
2159 * Enforce restrictions on use.
2160 */
2161 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2162 ("soreceive_dgram: wantrcvd"));
2163 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2164 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2165 ("soreceive_dgram: SBS_RCVATMARK"));
2166 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2167 ("soreceive_dgram: P_CONNREQUIRED"));
2168
2169 /*
2170 * Loop blocking while waiting for a datagram.
2171 */
2172 SOCKBUF_LOCK(&so->so_rcv);
2173 while ((m = so->so_rcv.sb_mb) == NULL) {
2174 KASSERT(sbavail(&so->so_rcv) == 0,
2175 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2176 sbavail(&so->so_rcv)));
2177 if (so->so_error) {
2178 error = so->so_error;
2179 so->so_error = 0;
2180 SOCKBUF_UNLOCK(&so->so_rcv);
2181 return (error);
2182 }
2183 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2184 uio->uio_resid == 0) {
2185 SOCKBUF_UNLOCK(&so->so_rcv);
2186 return (0);
2187 }
2188 if ((so->so_state & SS_NBIO) ||
2189 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2190 SOCKBUF_UNLOCK(&so->so_rcv);
2191 return (EWOULDBLOCK);
2192 }
2193 SBLASTRECORDCHK(&so->so_rcv);
2194 SBLASTMBUFCHK(&so->so_rcv);
2195 error = sbwait(&so->so_rcv);
2196 if (error) {
2197 SOCKBUF_UNLOCK(&so->so_rcv);
2198 return (error);
2199 }
2200 }
2201 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2202
2203 if (uio->uio_td)
2204 uio->uio_td->td_ru.ru_msgrcv++;
2205 SBLASTRECORDCHK(&so->so_rcv);
2206 SBLASTMBUFCHK(&so->so_rcv);
2207 nextrecord = m->m_nextpkt;
2208 if (nextrecord == NULL) {
2209 KASSERT(so->so_rcv.sb_lastrecord == m,
2210 ("soreceive_dgram: lastrecord != m"));
2211 }
2212
2213 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2214 ("soreceive_dgram: m_nextpkt != nextrecord"));
2215
2216 /*
2217 * Pull 'm' and its chain off the front of the packet queue.
2218 */
2219 so->so_rcv.sb_mb = NULL;
2220 sockbuf_pushsync(&so->so_rcv, nextrecord);
2221
2222 /*
2223 * Walk 'm's chain and free that many bytes from the socket buffer.
2224 */
2225 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2226 sbfree(&so->so_rcv, m2);
2227
2228 /*
2229 * Do a few last checks before we let go of the lock.
2230 */
2231 SBLASTRECORDCHK(&so->so_rcv);
2232 SBLASTMBUFCHK(&so->so_rcv);
2233 SOCKBUF_UNLOCK(&so->so_rcv);
2234
2235 if (pr->pr_flags & PR_ADDR) {
2236 KASSERT(m->m_type == MT_SONAME,
2237 ("m->m_type == %d", m->m_type));
2238 if (psa != NULL)
2239 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2240 M_NOWAIT);
2241 m = m_free(m);
2242 }
2243 if (m == NULL) {
2244 /* XXXRW: Can this happen? */
2245 return (0);
2246 }
2247
2248 /*
2249 * Packet to copyout() is now in 'm' and it is disconnected from the
2250 * queue.
2251 *
2252 * Process one or more MT_CONTROL mbufs present before any data mbufs
2253 * in the first mbuf chain on the socket buffer. We call into the
2254 * protocol to perform externalization (or freeing if controlp ==
2255 * NULL).
2256 */
2257 if (m->m_type == MT_CONTROL) {
2258 struct mbuf *cm = NULL, *cmn;
2259 struct mbuf **cme = &cm;
2260
2261 do {
2262 m2 = m->m_next;
2263 m->m_next = NULL;
2264 *cme = m;
2265 cme = &(*cme)->m_next;
2266 m = m2;
2267 } while (m != NULL && m->m_type == MT_CONTROL);
2268 while (cm != NULL) {
2269 cmn = cm->m_next;
2270 cm->m_next = NULL;
2271 if (pr->pr_domain->dom_externalize != NULL) {
2272 error = (*pr->pr_domain->dom_externalize)
2273 (cm, controlp, flags);
2274 } else if (controlp != NULL)
2275 *controlp = cm;
2276 else
2277 m_freem(cm);
2278 if (controlp != NULL) {
2279 while (*controlp != NULL)
2280 controlp = &(*controlp)->m_next;
2281 }
2282 cm = cmn;
2283 }
2284 }
2285 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
2286
2287 while (m != NULL && uio->uio_resid > 0) {
2288 len = uio->uio_resid;
2289 if (len > m->m_len)
2290 len = m->m_len;
2291 error = uiomove(mtod(m, char *), (int)len, uio);
2292 if (error) {
2293 m_freem(m);
2294 return (error);
2295 }
2296 if (len == m->m_len)
2297 m = m_free(m);
2298 else {
2299 m->m_data += len;
2300 m->m_len -= len;
2301 }
2302 }
2303 if (m != NULL)
2304 flags |= MSG_TRUNC;
2305 m_freem(m);
2306 if (flagsp != NULL)
2307 *flagsp |= flags;
2308 return (0);
2309}
2310
2311int
2312soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2313 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2314{
2315 int error;
2316
2317 CURVNET_SET(so->so_vnet);
2318 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2319 controlp, flagsp));
2320 CURVNET_RESTORE();
2321 return (error);
2322}
2323
2324int
2325soshutdown(struct socket *so, int how)
2326{
2327 struct protosw *pr = so->so_proto;
2328 int error;
2329
2330 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2331 return (EINVAL);
2332
2333 CURVNET_SET(so->so_vnet);
2334 if (pr->pr_usrreqs->pru_flush != NULL)
2335 (*pr->pr_usrreqs->pru_flush)(so, how);
2336 if (how != SHUT_WR)
2337 sorflush(so);
2338 if (how != SHUT_RD) {
2339 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2340 wakeup(&so->so_timeo);
2341 CURVNET_RESTORE();
2342 return (error);
2343 }
2344 wakeup(&so->so_timeo);
2345 CURVNET_RESTORE();
2346 return (0);
2347}
2348
2349void
2350sorflush(struct socket *so)
2351{
2352 struct sockbuf *sb = &so->so_rcv;
2353 struct protosw *pr = so->so_proto;
2354 struct sockbuf asb;
2355
2356 VNET_SO_ASSERT(so);
2357
2358 /*
2359 * In order to avoid calling dom_dispose with the socket buffer mutex
2360 * held, and in order to generally avoid holding the lock for a long
2361 * time, we make a copy of the socket buffer and clear the original
2362 * (except locks, state). The new socket buffer copy won't have
2363 * initialized locks so we can only call routines that won't use or
2364 * assert those locks.
2365 *
2366 * Dislodge threads currently blocked in receive and wait to acquire
2367 * a lock against other simultaneous readers before clearing the
2368 * socket buffer. Don't let our acquire be interrupted by a signal
2369 * despite any existing socket disposition on interruptable waiting.
2370 */
2371 socantrcvmore(so);
2372 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2373
2374 /*
2375 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2376 * and mutex data unchanged.
2377 */
2378 SOCKBUF_LOCK(sb);
2379 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
2380 bcopy(&sb->sb_startzero, &asb.sb_startzero,
2381 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2382 bzero(&sb->sb_startzero,
2383 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2384 SOCKBUF_UNLOCK(sb);
2385 sbunlock(sb);
2386
2387 /*
2388 * Dispose of special rights and flush the socket buffer. Don't call
2389 * any unsafe routines (that rely on locks being initialized) on asb.
2390 */
2391 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2392 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
2393 sbrelease_internal(&asb, so);
2394}
2395
2396/*
2397 * Wrapper for Socket established helper hook.
2398 * Parameters: socket, context of the hook point, hook id.
2399 */
2400static int inline
2401hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
2402{
2403 struct socket_hhook_data hhook_data = {
2404 .so = so,
2405 .hctx = hctx,
2406 .m = NULL,
2407 .status = 0
2408 };
2409
2410 CURVNET_SET(so->so_vnet);
2411 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
2412 CURVNET_RESTORE();
2413
2414 /* Ugly but needed, since hhooks return void for now */
2415 return (hhook_data.status);
2416}
2417
2418/*
2419 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2420 * additional variant to handle the case where the option value needs to be
2421 * some kind of integer, but not a specific size. In addition to their use
2422 * here, these functions are also called by the protocol-level pr_ctloutput()
2423 * routines.
2424 */
2425int
2426sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2427{
2428 size_t valsize;
2429
2430 /*
2431 * If the user gives us more than we wanted, we ignore it, but if we
2432 * don't get the minimum length the caller wants, we return EINVAL.
2433 * On success, sopt->sopt_valsize is set to however much we actually
2434 * retrieved.
2435 */
2436 if ((valsize = sopt->sopt_valsize) < minlen)
2437 return EINVAL;
2438 if (valsize > len)
2439 sopt->sopt_valsize = valsize = len;
2440
2441 if (sopt->sopt_td != NULL)
2442 return (copyin(sopt->sopt_val, buf, valsize));
2443
2444 bcopy(sopt->sopt_val, buf, valsize);
2445 return (0);
2446}
2447
2448/*
2449 * Kernel version of setsockopt(2).
2450 *
2451 * XXX: optlen is size_t, not socklen_t
2452 */
2453int
2454so_setsockopt(struct socket *so, int level, int optname, void *optval,
2455 size_t optlen)
2456{
2457 struct sockopt sopt;
2458
2459 sopt.sopt_level = level;
2460 sopt.sopt_name = optname;
2461 sopt.sopt_dir = SOPT_SET;
2462 sopt.sopt_val = optval;
2463 sopt.sopt_valsize = optlen;
2464 sopt.sopt_td = NULL;
2465 return (sosetopt(so, &sopt));
2466}
2467
2468int
2469sosetopt(struct socket *so, struct sockopt *sopt)
2470{
2471 int error, optval;
2472 struct linger l;
2473 struct timeval tv;
2474 sbintime_t val;
2475 uint32_t val32;
2476#ifdef MAC
2477 struct mac extmac;
2478#endif
2479
2480 CURVNET_SET(so->so_vnet);
2481 error = 0;
2482 if (sopt->sopt_level != SOL_SOCKET) {
2483 if (so->so_proto->pr_ctloutput != NULL) {
2484 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2485 CURVNET_RESTORE();
2486 return (error);
2487 }
2488 error = ENOPROTOOPT;
2489 } else {
2490 switch (sopt->sopt_name) {
2491 case SO_ACCEPTFILTER:
2492 error = do_setopt_accept_filter(so, sopt);
2493 if (error)
2494 goto bad;
2495 break;
2496
2497 case SO_LINGER:
2498 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2499 if (error)
2500 goto bad;
2501
2502 SOCK_LOCK(so);
2503 so->so_linger = l.l_linger;
2504 if (l.l_onoff)
2505 so->so_options |= SO_LINGER;
2506 else
2507 so->so_options &= ~SO_LINGER;
2508 SOCK_UNLOCK(so);
2509 break;
2510
2511 case SO_DEBUG:
2512 case SO_KEEPALIVE:
2513 case SO_DONTROUTE:
2514 case SO_USELOOPBACK:
2515 case SO_BROADCAST:
2516 case SO_REUSEADDR:
2517 case SO_REUSEPORT:
2518 case SO_OOBINLINE:
2519 case SO_TIMESTAMP:
2520 case SO_BINTIME:
2521 case SO_NOSIGPIPE:
2522 case SO_NO_DDP:
2523 case SO_NO_OFFLOAD:
2524 error = sooptcopyin(sopt, &optval, sizeof optval,
2525 sizeof optval);
2526 if (error)
2527 goto bad;
2528 SOCK_LOCK(so);
2529 if (optval)
2530 so->so_options |= sopt->sopt_name;
2531 else
2532 so->so_options &= ~sopt->sopt_name;
2533 SOCK_UNLOCK(so);
2534 break;
2535
2536 case SO_SETFIB:
2537 error = sooptcopyin(sopt, &optval, sizeof optval,
2538 sizeof optval);
2539 if (error)
2540 goto bad;
2541
2542 if (optval < 0 || optval >= rt_numfibs) {
2543 error = EINVAL;
2544 goto bad;
2545 }
2546 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2547 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2548 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2549 so->so_fibnum = optval;
2550 else
2551 so->so_fibnum = 0;
2552 break;
2553
2554 case SO_USER_COOKIE:
2555 error = sooptcopyin(sopt, &val32, sizeof val32,
2556 sizeof val32);
2557 if (error)
2558 goto bad;
2559 so->so_user_cookie = val32;
2560 break;
2561
2562 case SO_SNDBUF:
2563 case SO_RCVBUF:
2564 case SO_SNDLOWAT:
2565 case SO_RCVLOWAT:
2566 error = sooptcopyin(sopt, &optval, sizeof optval,
2567 sizeof optval);
2568 if (error)
2569 goto bad;
2570
2571 /*
2572 * Values < 1 make no sense for any of these options,
2573 * so disallow them.
2574 */
2575 if (optval < 1) {
2576 error = EINVAL;
2577 goto bad;
2578 }
2579
2580 switch (sopt->sopt_name) {
2581 case SO_SNDBUF:
2582 case SO_RCVBUF:
2583 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2584 &so->so_snd : &so->so_rcv, (u_long)optval,
2585 so, curthread) == 0) {
2586 error = ENOBUFS;
2587 goto bad;
2588 }
2589 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2590 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2591 break;
2592
2593 /*
2594 * Make sure the low-water is never greater than the
2595 * high-water.
2596 */
2597 case SO_SNDLOWAT:
2598 SOCKBUF_LOCK(&so->so_snd);
2599 so->so_snd.sb_lowat =
2600 (optval > so->so_snd.sb_hiwat) ?
2601 so->so_snd.sb_hiwat : optval;
2602 SOCKBUF_UNLOCK(&so->so_snd);
2603 break;
2604 case SO_RCVLOWAT:
2605 SOCKBUF_LOCK(&so->so_rcv);
2606 so->so_rcv.sb_lowat =
2607 (optval > so->so_rcv.sb_hiwat) ?
2608 so->so_rcv.sb_hiwat : optval;
2609 SOCKBUF_UNLOCK(&so->so_rcv);
2610 break;
2611 }
2612 break;
2613
2614 case SO_SNDTIMEO:
2615 case SO_RCVTIMEO:
2616#ifdef COMPAT_FREEBSD32
2617 if (SV_CURPROC_FLAG(SV_ILP32)) {
2618 struct timeval32 tv32;
2619
2620 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2621 sizeof tv32);
2622 CP(tv32, tv, tv_sec);
2623 CP(tv32, tv, tv_usec);
2624 } else
2625#endif
2626 error = sooptcopyin(sopt, &tv, sizeof tv,
2627 sizeof tv);
2628 if (error)
2629 goto bad;
2630 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
2631 tv.tv_usec >= 1000000) {
2632 error = EDOM;
2633 goto bad;
2634 }
2635 if (tv.tv_sec > INT32_MAX)
2636 val = SBT_MAX;
2637 else
2638 val = tvtosbt(tv);
2639 switch (sopt->sopt_name) {
2640 case SO_SNDTIMEO:
2641 so->so_snd.sb_timeo = val;
2642 break;
2643 case SO_RCVTIMEO:
2644 so->so_rcv.sb_timeo = val;
2645 break;
2646 }
2647 break;
2648
2649 case SO_LABEL:
2650#ifdef MAC
2651 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2652 sizeof extmac);
2653 if (error)
2654 goto bad;
2655 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2656 so, &extmac);
2657#else
2658 error = EOPNOTSUPP;
2659#endif
2660 break;
2661
2662 default:
2663 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2664 error = hhook_run_socket(so, sopt,
2665 HHOOK_SOCKET_OPT);
2666 else
2667 error = ENOPROTOOPT;
2668 break;
2669 }
2670 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2671 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
2672 }
2673bad:
2674 CURVNET_RESTORE();
2675 return (error);
2676}
2677
2678/*
2679 * Helper routine for getsockopt.
2680 */
2681int
2682sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2683{
2684 int error;
2685 size_t valsize;
2686
2687 error = 0;
2688
2689 /*
2690 * Documented get behavior is that we always return a value, possibly
2691 * truncated to fit in the user's buffer. Traditional behavior is
2692 * that we always tell the user precisely how much we copied, rather
2693 * than something useful like the total amount we had available for
2694 * her. Note that this interface is not idempotent; the entire
2695 * answer must generated ahead of time.
2696 */
2697 valsize = min(len, sopt->sopt_valsize);
2698 sopt->sopt_valsize = valsize;
2699 if (sopt->sopt_val != NULL) {
2700 if (sopt->sopt_td != NULL)
2701 error = copyout(buf, sopt->sopt_val, valsize);
2702 else
2703 bcopy(buf, sopt->sopt_val, valsize);
2704 }
2705 return (error);
2706}
2707
2708int
2709sogetopt(struct socket *so, struct sockopt *sopt)
2710{
2711 int error, optval;
2712 struct linger l;
2713 struct timeval tv;
2714#ifdef MAC
2715 struct mac extmac;
2716#endif
2717
2718 CURVNET_SET(so->so_vnet);
2719 error = 0;
2720 if (sopt->sopt_level != SOL_SOCKET) {
2721 if (so->so_proto->pr_ctloutput != NULL)
2722 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2723 else
2724 error = ENOPROTOOPT;
2725 CURVNET_RESTORE();
2726 return (error);
2727 } else {
2728 switch (sopt->sopt_name) {
2729 case SO_ACCEPTFILTER:
2730 error = do_getopt_accept_filter(so, sopt);
2731 break;
2732
2733 case SO_LINGER:
2734 SOCK_LOCK(so);
2735 l.l_onoff = so->so_options & SO_LINGER;
2736 l.l_linger = so->so_linger;
2737 SOCK_UNLOCK(so);
2738 error = sooptcopyout(sopt, &l, sizeof l);
2739 break;
2740
2741 case SO_USELOOPBACK:
2742 case SO_DONTROUTE:
2743 case SO_DEBUG:
2744 case SO_KEEPALIVE:
2745 case SO_REUSEADDR:
2746 case SO_REUSEPORT:
2747 case SO_BROADCAST:
2748 case SO_OOBINLINE:
2749 case SO_ACCEPTCONN:
2750 case SO_TIMESTAMP:
2751 case SO_BINTIME:
2752 case SO_NOSIGPIPE:
2753 optval = so->so_options & sopt->sopt_name;
2754integer:
2755 error = sooptcopyout(sopt, &optval, sizeof optval);
2756 break;
2757
2758 case SO_TYPE:
2759 optval = so->so_type;
2760 goto integer;
2761
2762 case SO_PROTOCOL:
2763 optval = so->so_proto->pr_protocol;
2764 goto integer;
2765
2766 case SO_ERROR:
2767 SOCK_LOCK(so);
2768 optval = so->so_error;
2769 so->so_error = 0;
2770 SOCK_UNLOCK(so);
2771 goto integer;
2772
2773 case SO_SNDBUF:
2774 optval = so->so_snd.sb_hiwat;
2775 goto integer;
2776
2777 case SO_RCVBUF:
2778 optval = so->so_rcv.sb_hiwat;
2779 goto integer;
2780
2781 case SO_SNDLOWAT:
2782 optval = so->so_snd.sb_lowat;
2783 goto integer;
2784
2785 case SO_RCVLOWAT:
2786 optval = so->so_rcv.sb_lowat;
2787 goto integer;
2788
2789 case SO_SNDTIMEO:
2790 case SO_RCVTIMEO:
2791 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
2792 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2793#ifdef COMPAT_FREEBSD32
2794 if (SV_CURPROC_FLAG(SV_ILP32)) {
2795 struct timeval32 tv32;
2796
2797 CP(tv, tv32, tv_sec);
2798 CP(tv, tv32, tv_usec);
2799 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2800 } else
2801#endif
2802 error = sooptcopyout(sopt, &tv, sizeof tv);
2803 break;
2804
2805 case SO_LABEL:
2806#ifdef MAC
2807 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2808 sizeof(extmac));
2809 if (error)
2810 goto bad;
2811 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2812 so, &extmac);
2813 if (error)
2814 goto bad;
2815 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2816#else
2817 error = EOPNOTSUPP;
2818#endif
2819 break;
2820
2821 case SO_PEERLABEL:
2822#ifdef MAC
2823 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2824 sizeof(extmac));
2825 if (error)
2826 goto bad;
2827 error = mac_getsockopt_peerlabel(
2828 sopt->sopt_td->td_ucred, so, &extmac);
2829 if (error)
2830 goto bad;
2831 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2832#else
2833 error = EOPNOTSUPP;
2834#endif
2835 break;
2836
2837 case SO_LISTENQLIMIT:
2838 optval = so->so_qlimit;
2839 goto integer;
2840
2841 case SO_LISTENQLEN:
2842 optval = so->so_qlen;
2843 goto integer;
2844
2845 case SO_LISTENINCQLEN:
2846 optval = so->so_incqlen;
2847 goto integer;
2848
2849 default:
2850 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2851 error = hhook_run_socket(so, sopt,
2852 HHOOK_SOCKET_OPT);
2853 else
2854 error = ENOPROTOOPT;
2855 break;
2856 }
2857 }
2858#ifdef MAC
2859bad:
2860#endif
2861 CURVNET_RESTORE();
2862 return (error);
2863}
2864
2865int
2866soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2867{
2868 struct mbuf *m, *m_prev;
2869 int sopt_size = sopt->sopt_valsize;
2870
2871 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2872 if (m == NULL)
2873 return ENOBUFS;
2874 if (sopt_size > MLEN) {
2875 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
2876 if ((m->m_flags & M_EXT) == 0) {
2877 m_free(m);
2878 return ENOBUFS;
2879 }
2880 m->m_len = min(MCLBYTES, sopt_size);
2881 } else {
2882 m->m_len = min(MLEN, sopt_size);
2883 }
2884 sopt_size -= m->m_len;
2885 *mp = m;
2886 m_prev = m;
2887
2888 while (sopt_size) {
2889 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2890 if (m == NULL) {
2891 m_freem(*mp);
2892 return ENOBUFS;
2893 }
2894 if (sopt_size > MLEN) {
2895 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
2896 M_NOWAIT);
2897 if ((m->m_flags & M_EXT) == 0) {
2898 m_freem(m);
2899 m_freem(*mp);
2900 return ENOBUFS;
2901 }
2902 m->m_len = min(MCLBYTES, sopt_size);
2903 } else {
2904 m->m_len = min(MLEN, sopt_size);
2905 }
2906 sopt_size -= m->m_len;
2907 m_prev->m_next = m;
2908 m_prev = m;
2909 }
2910 return (0);
2911}
2912
2913int
2914soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2915{
2916 struct mbuf *m0 = m;
2917
2918 if (sopt->sopt_val == NULL)
2919 return (0);
2920 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2921 if (sopt->sopt_td != NULL) {
2922 int error;
2923
2924 error = copyin(sopt->sopt_val, mtod(m, char *),
2925 m->m_len);
2926 if (error != 0) {
2927 m_freem(m0);
2928 return(error);
2929 }
2930 } else
2931 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2932 sopt->sopt_valsize -= m->m_len;
2933 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2934 m = m->m_next;
2935 }
2936 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2937 panic("ip6_sooptmcopyin");
2938 return (0);
2939}
2940
2941int
2942soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2943{
2944 struct mbuf *m0 = m;
2945 size_t valsize = 0;
2946
2947 if (sopt->sopt_val == NULL)
2948 return (0);
2949 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2950 if (sopt->sopt_td != NULL) {
2951 int error;
2952
2953 error = copyout(mtod(m, char *), sopt->sopt_val,
2954 m->m_len);
2955 if (error != 0) {
2956 m_freem(m0);
2957 return(error);
2958 }
2959 } else
2960 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2961 sopt->sopt_valsize -= m->m_len;
2962 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2963 valsize += m->m_len;
2964 m = m->m_next;
2965 }
2966 if (m != NULL) {
2967 /* enough soopt buffer should be given from user-land */
2968 m_freem(m0);
2969 return(EINVAL);
2970 }
2971 sopt->sopt_valsize = valsize;
2972 return (0);
2973}
2974
2975/*
2976 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2977 * out-of-band data, which will then notify socket consumers.
2978 */
2979void
2980sohasoutofband(struct socket *so)
2981{
2982
2983 if (so->so_sigio != NULL)
2984 pgsigio(&so->so_sigio, SIGURG, 0);
2985 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2986}
2987
2988int
2989sopoll(struct socket *so, int events, struct ucred *active_cred,
2990 struct thread *td)
2991{
2992
2993 /*
2994 * We do not need to set or assert curvnet as long as everyone uses
2995 * sopoll_generic().
2996 */
2997 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2998 td));
2999}
3000
3001int
3002sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3003 struct thread *td)
3004{
3005 int revents = 0;
3006
3007 SOCKBUF_LOCK(&so->so_snd);
3008 SOCKBUF_LOCK(&so->so_rcv);
3009 if (events & (POLLIN | POLLRDNORM))
3010 if (soreadabledata(so))
3011 revents |= events & (POLLIN | POLLRDNORM);
3012
3013 if (events & (POLLOUT | POLLWRNORM))
3014 if (sowriteable(so))
3015 revents |= events & (POLLOUT | POLLWRNORM);
3016
3017 if (events & (POLLPRI | POLLRDBAND))
3018 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
3019 revents |= events & (POLLPRI | POLLRDBAND);
3020
3021 if ((events & POLLINIGNEOF) == 0) {
3022 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3023 revents |= events & (POLLIN | POLLRDNORM);
3024 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3025 revents |= POLLHUP;
3026 }
3027 }
3028
3029 if (revents == 0) {
3030 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
3031 selrecord(td, &so->so_rcv.sb_sel);
3032 so->so_rcv.sb_flags |= SB_SEL;
3033 }
3034
3035 if (events & (POLLOUT | POLLWRNORM)) {
3036 selrecord(td, &so->so_snd.sb_sel);
3037 so->so_snd.sb_flags |= SB_SEL;
3038 }
3039 }
3040
3041 SOCKBUF_UNLOCK(&so->so_rcv);
3042 SOCKBUF_UNLOCK(&so->so_snd);
3043 return (revents);
3044}
3045
3046int
3047soo_kqfilter(struct file *fp, struct knote *kn)
3048{
3049 struct socket *so = kn->kn_fp->f_data;
3050 struct sockbuf *sb;
3051
3052 switch (kn->kn_filter) {
3053 case EVFILT_READ:
3054 if (so->so_options & SO_ACCEPTCONN)
3055 kn->kn_fop = &solisten_filtops;
3056 else
3057 kn->kn_fop = &soread_filtops;
3058 sb = &so->so_rcv;
3059 break;
3060 case EVFILT_WRITE:
3061 kn->kn_fop = &sowrite_filtops;
3062 sb = &so->so_snd;
3063 break;
3064 default:
3065 return (EINVAL);
3066 }
3067
3068 SOCKBUF_LOCK(sb);
3069 knlist_add(&sb->sb_sel.si_note, kn, 1);
3070 sb->sb_flags |= SB_KNOTE;
3071 SOCKBUF_UNLOCK(sb);
3072 return (0);
3073}
3074
3075/*
3076 * Some routines that return EOPNOTSUPP for entry points that are not
3077 * supported by a protocol. Fill in as needed.
3078 */
3079int
3080pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3081{
3082
3083 return EOPNOTSUPP;
3084}
3085
3086int
3087pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3088{
3089
3090 return EOPNOTSUPP;
3091}
3092
3093int
3094pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3095{
3096
3097 return EOPNOTSUPP;
3098}
3099
3100int
3101pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3102 struct thread *td)
3103{
3104
3105 return EOPNOTSUPP;
3106}
3107
3108int
3109pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3110{
3111
3112 return EOPNOTSUPP;
3113}
3114
3115int
3116pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3117 struct thread *td)
3118{
3119
3120 return EOPNOTSUPP;
3121}
3122
3123int
3124pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3125{
3126
3127 return EOPNOTSUPP;
3128}
3129
3130int
3131pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3132 struct ifnet *ifp, struct thread *td)
3133{
3134
3135 return EOPNOTSUPP;
3136}
3137
3138int
3139pru_disconnect_notsupp(struct socket *so)
3140{
3141
3142 return EOPNOTSUPP;
3143}
3144
3145int
3146pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3147{
3148
3149 return EOPNOTSUPP;
3150}
3151
3152int
3153pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3154{
3155
3156 return EOPNOTSUPP;
3157}
3158
3159int
3160pru_rcvd_notsupp(struct socket *so, int flags)
3161{
3162
3163 return EOPNOTSUPP;
3164}
3165
3166int
3167pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3168{
3169
3170 return EOPNOTSUPP;
3171}
3172
3173int
3174pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3175 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3176{
3177
3178 return EOPNOTSUPP;
3179}
3180
3181int
3182pru_ready_notsupp(struct socket *so, struct mbuf *m, int count)
3183{
3184
3185 return (EOPNOTSUPP);
3186}
3187
3181/*
3182 * This isn't really a ``null'' operation, but it's the default one and
3183 * doesn't do anything destructive.
3184 */
3185int
3186pru_sense_null(struct socket *so, struct stat *sb)
3187{
3188
3189 sb->st_blksize = so->so_snd.sb_hiwat;
3190 return 0;
3191}
3192
3193int
3194pru_shutdown_notsupp(struct socket *so)
3195{
3196
3197 return EOPNOTSUPP;
3198}
3199
3200int
3201pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3202{
3203
3204 return EOPNOTSUPP;
3205}
3206
3207int
3208pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3209 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3210{
3211
3212 return EOPNOTSUPP;
3213}
3214
3215int
3216pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3217 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3218{
3219
3220 return EOPNOTSUPP;
3221}
3222
3223int
3224pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3225 struct thread *td)
3226{
3227
3228 return EOPNOTSUPP;
3229}
3230
3231static void
3232filt_sordetach(struct knote *kn)
3233{
3234 struct socket *so = kn->kn_fp->f_data;
3235
3236 SOCKBUF_LOCK(&so->so_rcv);
3237 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3238 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3239 so->so_rcv.sb_flags &= ~SB_KNOTE;
3240 SOCKBUF_UNLOCK(&so->so_rcv);
3241}
3242
3243/*ARGSUSED*/
3244static int
3245filt_soread(struct knote *kn, long hint)
3246{
3247 struct socket *so;
3248
3249 so = kn->kn_fp->f_data;
3250 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3251
3252 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3253 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3254 kn->kn_flags |= EV_EOF;
3255 kn->kn_fflags = so->so_error;
3256 return (1);
3257 } else if (so->so_error) /* temporary udp error */
3258 return (1);
3259
3260 if (kn->kn_sfflags & NOTE_LOWAT) {
3261 if (kn->kn_data >= kn->kn_sdata)
3262 return 1;
3263 } else {
3264 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3265 return 1;
3266 }
3267
3268 /* This hook returning non-zero indicates an event, not error */
3269 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3270}
3271
3272static void
3273filt_sowdetach(struct knote *kn)
3274{
3275 struct socket *so = kn->kn_fp->f_data;
3276
3277 SOCKBUF_LOCK(&so->so_snd);
3278 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3279 if (knlist_empty(&so->so_snd.sb_sel.si_note))
3280 so->so_snd.sb_flags &= ~SB_KNOTE;
3281 SOCKBUF_UNLOCK(&so->so_snd);
3282}
3283
3284/*ARGSUSED*/
3285static int
3286filt_sowrite(struct knote *kn, long hint)
3287{
3288 struct socket *so;
3289
3290 so = kn->kn_fp->f_data;
3291 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3292 kn->kn_data = sbspace(&so->so_snd);
3293
3294 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3295
3296 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3297 kn->kn_flags |= EV_EOF;
3298 kn->kn_fflags = so->so_error;
3299 return (1);
3300 } else if (so->so_error) /* temporary udp error */
3301 return (1);
3302 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3303 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3304 return (0);
3305 else if (kn->kn_sfflags & NOTE_LOWAT)
3306 return (kn->kn_data >= kn->kn_sdata);
3307 else
3308 return (kn->kn_data >= so->so_snd.sb_lowat);
3309}
3310
3311/*ARGSUSED*/
3312static int
3313filt_solisten(struct knote *kn, long hint)
3314{
3315 struct socket *so = kn->kn_fp->f_data;
3316
3317 kn->kn_data = so->so_qlen;
3318 return (!TAILQ_EMPTY(&so->so_comp));
3319}
3320
3321int
3322socheckuid(struct socket *so, uid_t uid)
3323{
3324
3325 if (so == NULL)
3326 return (EPERM);
3327 if (so->so_cred->cr_uid != uid)
3328 return (EPERM);
3329 return (0);
3330}
3331
3332/*
3333 * These functions are used by protocols to notify the socket layer (and its
3334 * consumers) of state changes in the sockets driven by protocol-side events.
3335 */
3336
3337/*
3338 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3339 *
3340 * Normal sequence from the active (originating) side is that
3341 * soisconnecting() is called during processing of connect() call, resulting
3342 * in an eventual call to soisconnected() if/when the connection is
3343 * established. When the connection is torn down soisdisconnecting() is
3344 * called during processing of disconnect() call, and soisdisconnected() is
3345 * called when the connection to the peer is totally severed. The semantics
3346 * of these routines are such that connectionless protocols can call
3347 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3348 * calls when setting up a ``connection'' takes no time.
3349 *
3350 * From the passive side, a socket is created with two queues of sockets:
3351 * so_incomp for connections in progress and so_comp for connections already
3352 * made and awaiting user acceptance. As a protocol is preparing incoming
3353 * connections, it creates a socket structure queued on so_incomp by calling
3354 * sonewconn(). When the connection is established, soisconnected() is
3355 * called, and transfers the socket structure to so_comp, making it available
3356 * to accept().
3357 *
3358 * If a socket is closed with sockets on either so_incomp or so_comp, these
3359 * sockets are dropped.
3360 *
3361 * If higher-level protocols are implemented in the kernel, the wakeups done
3362 * here will sometimes cause software-interrupt process scheduling.
3363 */
3364void
3365soisconnecting(struct socket *so)
3366{
3367
3368 SOCK_LOCK(so);
3369 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3370 so->so_state |= SS_ISCONNECTING;
3371 SOCK_UNLOCK(so);
3372}
3373
3374void
3375soisconnected(struct socket *so)
3376{
3377 struct socket *head;
3378 int ret;
3379
3380restart:
3381 ACCEPT_LOCK();
3382 SOCK_LOCK(so);
3383 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3384 so->so_state |= SS_ISCONNECTED;
3385 head = so->so_head;
3386 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3387 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3388 SOCK_UNLOCK(so);
3389 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3390 head->so_incqlen--;
3391 so->so_qstate &= ~SQ_INCOMP;
3392 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3393 head->so_qlen++;
3394 so->so_qstate |= SQ_COMP;
3395 ACCEPT_UNLOCK();
3396 sorwakeup(head);
3397 wakeup_one(&head->so_timeo);
3398 } else {
3399 ACCEPT_UNLOCK();
3400 soupcall_set(so, SO_RCV,
3401 head->so_accf->so_accept_filter->accf_callback,
3402 head->so_accf->so_accept_filter_arg);
3403 so->so_options &= ~SO_ACCEPTFILTER;
3404 ret = head->so_accf->so_accept_filter->accf_callback(so,
3405 head->so_accf->so_accept_filter_arg, M_NOWAIT);
3406 if (ret == SU_ISCONNECTED)
3407 soupcall_clear(so, SO_RCV);
3408 SOCK_UNLOCK(so);
3409 if (ret == SU_ISCONNECTED)
3410 goto restart;
3411 }
3412 return;
3413 }
3414 SOCK_UNLOCK(so);
3415 ACCEPT_UNLOCK();
3416 wakeup(&so->so_timeo);
3417 sorwakeup(so);
3418 sowwakeup(so);
3419}
3420
3421void
3422soisdisconnecting(struct socket *so)
3423{
3424
3425 /*
3426 * Note: This code assumes that SOCK_LOCK(so) and
3427 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3428 */
3429 SOCKBUF_LOCK(&so->so_rcv);
3430 so->so_state &= ~SS_ISCONNECTING;
3431 so->so_state |= SS_ISDISCONNECTING;
3432 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3433 sorwakeup_locked(so);
3434 SOCKBUF_LOCK(&so->so_snd);
3435 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3436 sowwakeup_locked(so);
3437 wakeup(&so->so_timeo);
3438}
3439
3440void
3441soisdisconnected(struct socket *so)
3442{
3443
3444 /*
3445 * Note: This code assumes that SOCK_LOCK(so) and
3446 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3447 */
3448 SOCKBUF_LOCK(&so->so_rcv);
3449 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3450 so->so_state |= SS_ISDISCONNECTED;
3451 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3452 sorwakeup_locked(so);
3453 SOCKBUF_LOCK(&so->so_snd);
3454 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3455 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
3456 sowwakeup_locked(so);
3457 wakeup(&so->so_timeo);
3458}
3459
3460/*
3461 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3462 */
3463struct sockaddr *
3464sodupsockaddr(const struct sockaddr *sa, int mflags)
3465{
3466 struct sockaddr *sa2;
3467
3468 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3469 if (sa2)
3470 bcopy(sa, sa2, sa->sa_len);
3471 return sa2;
3472}
3473
3474/*
3475 * Register per-socket buffer upcalls.
3476 */
3477void
3478soupcall_set(struct socket *so, int which,
3479 int (*func)(struct socket *, void *, int), void *arg)
3480{
3481 struct sockbuf *sb;
3482
3483 switch (which) {
3484 case SO_RCV:
3485 sb = &so->so_rcv;
3486 break;
3487 case SO_SND:
3488 sb = &so->so_snd;
3489 break;
3490 default:
3491 panic("soupcall_set: bad which");
3492 }
3493 SOCKBUF_LOCK_ASSERT(sb);
3494#if 0
3495 /* XXX: accf_http actually wants to do this on purpose. */
3496 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3497#endif
3498 sb->sb_upcall = func;
3499 sb->sb_upcallarg = arg;
3500 sb->sb_flags |= SB_UPCALL;
3501}
3502
3503void
3504soupcall_clear(struct socket *so, int which)
3505{
3506 struct sockbuf *sb;
3507
3508 switch (which) {
3509 case SO_RCV:
3510 sb = &so->so_rcv;
3511 break;
3512 case SO_SND:
3513 sb = &so->so_snd;
3514 break;
3515 default:
3516 panic("soupcall_clear: bad which");
3517 }
3518 SOCKBUF_LOCK_ASSERT(sb);
3519 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3520 sb->sb_upcall = NULL;
3521 sb->sb_upcallarg = NULL;
3522 sb->sb_flags &= ~SB_UPCALL;
3523}
3524
3525/*
3526 * Create an external-format (``xsocket'') structure using the information in
3527 * the kernel-format socket structure pointed to by so. This is done to
3528 * reduce the spew of irrelevant information over this interface, to isolate
3529 * user code from changes in the kernel structure, and potentially to provide
3530 * information-hiding if we decide that some of this information should be
3531 * hidden from users.
3532 */
3533void
3534sotoxsocket(struct socket *so, struct xsocket *xso)
3535{
3536
3537 xso->xso_len = sizeof *xso;
3538 xso->xso_so = so;
3539 xso->so_type = so->so_type;
3540 xso->so_options = so->so_options;
3541 xso->so_linger = so->so_linger;
3542 xso->so_state = so->so_state;
3543 xso->so_pcb = so->so_pcb;
3544 xso->xso_protocol = so->so_proto->pr_protocol;
3545 xso->xso_family = so->so_proto->pr_domain->dom_family;
3546 xso->so_qlen = so->so_qlen;
3547 xso->so_incqlen = so->so_incqlen;
3548 xso->so_qlimit = so->so_qlimit;
3549 xso->so_timeo = so->so_timeo;
3550 xso->so_error = so->so_error;
3551 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3552 xso->so_oobmark = so->so_oobmark;
3553 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3554 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3555 xso->so_uid = so->so_cred->cr_uid;
3556}
3557
3558
3559/*
3560 * Socket accessor functions to provide external consumers with
3561 * a safe interface to socket state
3562 *
3563 */
3564
3565void
3566so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *),
3567 void *arg)
3568{
3569
3570 TAILQ_FOREACH(so, &so->so_comp, so_list)
3571 func(so, arg);
3572}
3573
3574struct sockbuf *
3575so_sockbuf_rcv(struct socket *so)
3576{
3577
3578 return (&so->so_rcv);
3579}
3580
3581struct sockbuf *
3582so_sockbuf_snd(struct socket *so)
3583{
3584
3585 return (&so->so_snd);
3586}
3587
3588int
3589so_state_get(const struct socket *so)
3590{
3591
3592 return (so->so_state);
3593}
3594
3595void
3596so_state_set(struct socket *so, int val)
3597{
3598
3599 so->so_state = val;
3600}
3601
3602int
3603so_options_get(const struct socket *so)
3604{
3605
3606 return (so->so_options);
3607}
3608
3609void
3610so_options_set(struct socket *so, int val)
3611{
3612
3613 so->so_options = val;
3614}
3615
3616int
3617so_error_get(const struct socket *so)
3618{
3619
3620 return (so->so_error);
3621}
3622
3623void
3624so_error_set(struct socket *so, int val)
3625{
3626
3627 so->so_error = val;
3628}
3629
3630int
3631so_linger_get(const struct socket *so)
3632{
3633
3634 return (so->so_linger);
3635}
3636
3637void
3638so_linger_set(struct socket *so, int val)
3639{
3640
3641 so->so_linger = val;
3642}
3643
3644struct protosw *
3645so_protosw_get(const struct socket *so)
3646{
3647
3648 return (so->so_proto);
3649}
3650
3651void
3652so_protosw_set(struct socket *so, struct protosw *val)
3653{
3654
3655 so->so_proto = val;
3656}
3657
3658void
3659so_sorwakeup(struct socket *so)
3660{
3661
3662 sorwakeup(so);
3663}
3664
3665void
3666so_sowwakeup(struct socket *so)
3667{
3668
3669 sowwakeup(so);
3670}
3671
3672void
3673so_sorwakeup_locked(struct socket *so)
3674{
3675
3676 sorwakeup_locked(so);
3677}
3678
3679void
3680so_sowwakeup_locked(struct socket *so)
3681{
3682
3683 sowwakeup_locked(so);
3684}
3685
3686void
3687so_lock(struct socket *so)
3688{
3689
3690 SOCK_LOCK(so);
3691}
3692
3693void
3694so_unlock(struct socket *so)
3695{
3696
3697 SOCK_UNLOCK(so);
3698}
3188/*
3189 * This isn't really a ``null'' operation, but it's the default one and
3190 * doesn't do anything destructive.
3191 */
3192int
3193pru_sense_null(struct socket *so, struct stat *sb)
3194{
3195
3196 sb->st_blksize = so->so_snd.sb_hiwat;
3197 return 0;
3198}
3199
3200int
3201pru_shutdown_notsupp(struct socket *so)
3202{
3203
3204 return EOPNOTSUPP;
3205}
3206
3207int
3208pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3209{
3210
3211 return EOPNOTSUPP;
3212}
3213
3214int
3215pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3216 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3217{
3218
3219 return EOPNOTSUPP;
3220}
3221
3222int
3223pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3224 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3225{
3226
3227 return EOPNOTSUPP;
3228}
3229
3230int
3231pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3232 struct thread *td)
3233{
3234
3235 return EOPNOTSUPP;
3236}
3237
3238static void
3239filt_sordetach(struct knote *kn)
3240{
3241 struct socket *so = kn->kn_fp->f_data;
3242
3243 SOCKBUF_LOCK(&so->so_rcv);
3244 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3245 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3246 so->so_rcv.sb_flags &= ~SB_KNOTE;
3247 SOCKBUF_UNLOCK(&so->so_rcv);
3248}
3249
3250/*ARGSUSED*/
3251static int
3252filt_soread(struct knote *kn, long hint)
3253{
3254 struct socket *so;
3255
3256 so = kn->kn_fp->f_data;
3257 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3258
3259 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3260 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3261 kn->kn_flags |= EV_EOF;
3262 kn->kn_fflags = so->so_error;
3263 return (1);
3264 } else if (so->so_error) /* temporary udp error */
3265 return (1);
3266
3267 if (kn->kn_sfflags & NOTE_LOWAT) {
3268 if (kn->kn_data >= kn->kn_sdata)
3269 return 1;
3270 } else {
3271 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3272 return 1;
3273 }
3274
3275 /* This hook returning non-zero indicates an event, not error */
3276 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3277}
3278
3279static void
3280filt_sowdetach(struct knote *kn)
3281{
3282 struct socket *so = kn->kn_fp->f_data;
3283
3284 SOCKBUF_LOCK(&so->so_snd);
3285 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3286 if (knlist_empty(&so->so_snd.sb_sel.si_note))
3287 so->so_snd.sb_flags &= ~SB_KNOTE;
3288 SOCKBUF_UNLOCK(&so->so_snd);
3289}
3290
3291/*ARGSUSED*/
3292static int
3293filt_sowrite(struct knote *kn, long hint)
3294{
3295 struct socket *so;
3296
3297 so = kn->kn_fp->f_data;
3298 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3299 kn->kn_data = sbspace(&so->so_snd);
3300
3301 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3302
3303 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3304 kn->kn_flags |= EV_EOF;
3305 kn->kn_fflags = so->so_error;
3306 return (1);
3307 } else if (so->so_error) /* temporary udp error */
3308 return (1);
3309 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3310 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3311 return (0);
3312 else if (kn->kn_sfflags & NOTE_LOWAT)
3313 return (kn->kn_data >= kn->kn_sdata);
3314 else
3315 return (kn->kn_data >= so->so_snd.sb_lowat);
3316}
3317
3318/*ARGSUSED*/
3319static int
3320filt_solisten(struct knote *kn, long hint)
3321{
3322 struct socket *so = kn->kn_fp->f_data;
3323
3324 kn->kn_data = so->so_qlen;
3325 return (!TAILQ_EMPTY(&so->so_comp));
3326}
3327
3328int
3329socheckuid(struct socket *so, uid_t uid)
3330{
3331
3332 if (so == NULL)
3333 return (EPERM);
3334 if (so->so_cred->cr_uid != uid)
3335 return (EPERM);
3336 return (0);
3337}
3338
3339/*
3340 * These functions are used by protocols to notify the socket layer (and its
3341 * consumers) of state changes in the sockets driven by protocol-side events.
3342 */
3343
3344/*
3345 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3346 *
3347 * Normal sequence from the active (originating) side is that
3348 * soisconnecting() is called during processing of connect() call, resulting
3349 * in an eventual call to soisconnected() if/when the connection is
3350 * established. When the connection is torn down soisdisconnecting() is
3351 * called during processing of disconnect() call, and soisdisconnected() is
3352 * called when the connection to the peer is totally severed. The semantics
3353 * of these routines are such that connectionless protocols can call
3354 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3355 * calls when setting up a ``connection'' takes no time.
3356 *
3357 * From the passive side, a socket is created with two queues of sockets:
3358 * so_incomp for connections in progress and so_comp for connections already
3359 * made and awaiting user acceptance. As a protocol is preparing incoming
3360 * connections, it creates a socket structure queued on so_incomp by calling
3361 * sonewconn(). When the connection is established, soisconnected() is
3362 * called, and transfers the socket structure to so_comp, making it available
3363 * to accept().
3364 *
3365 * If a socket is closed with sockets on either so_incomp or so_comp, these
3366 * sockets are dropped.
3367 *
3368 * If higher-level protocols are implemented in the kernel, the wakeups done
3369 * here will sometimes cause software-interrupt process scheduling.
3370 */
3371void
3372soisconnecting(struct socket *so)
3373{
3374
3375 SOCK_LOCK(so);
3376 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3377 so->so_state |= SS_ISCONNECTING;
3378 SOCK_UNLOCK(so);
3379}
3380
3381void
3382soisconnected(struct socket *so)
3383{
3384 struct socket *head;
3385 int ret;
3386
3387restart:
3388 ACCEPT_LOCK();
3389 SOCK_LOCK(so);
3390 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3391 so->so_state |= SS_ISCONNECTED;
3392 head = so->so_head;
3393 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3394 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3395 SOCK_UNLOCK(so);
3396 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3397 head->so_incqlen--;
3398 so->so_qstate &= ~SQ_INCOMP;
3399 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3400 head->so_qlen++;
3401 so->so_qstate |= SQ_COMP;
3402 ACCEPT_UNLOCK();
3403 sorwakeup(head);
3404 wakeup_one(&head->so_timeo);
3405 } else {
3406 ACCEPT_UNLOCK();
3407 soupcall_set(so, SO_RCV,
3408 head->so_accf->so_accept_filter->accf_callback,
3409 head->so_accf->so_accept_filter_arg);
3410 so->so_options &= ~SO_ACCEPTFILTER;
3411 ret = head->so_accf->so_accept_filter->accf_callback(so,
3412 head->so_accf->so_accept_filter_arg, M_NOWAIT);
3413 if (ret == SU_ISCONNECTED)
3414 soupcall_clear(so, SO_RCV);
3415 SOCK_UNLOCK(so);
3416 if (ret == SU_ISCONNECTED)
3417 goto restart;
3418 }
3419 return;
3420 }
3421 SOCK_UNLOCK(so);
3422 ACCEPT_UNLOCK();
3423 wakeup(&so->so_timeo);
3424 sorwakeup(so);
3425 sowwakeup(so);
3426}
3427
3428void
3429soisdisconnecting(struct socket *so)
3430{
3431
3432 /*
3433 * Note: This code assumes that SOCK_LOCK(so) and
3434 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3435 */
3436 SOCKBUF_LOCK(&so->so_rcv);
3437 so->so_state &= ~SS_ISCONNECTING;
3438 so->so_state |= SS_ISDISCONNECTING;
3439 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3440 sorwakeup_locked(so);
3441 SOCKBUF_LOCK(&so->so_snd);
3442 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3443 sowwakeup_locked(so);
3444 wakeup(&so->so_timeo);
3445}
3446
3447void
3448soisdisconnected(struct socket *so)
3449{
3450
3451 /*
3452 * Note: This code assumes that SOCK_LOCK(so) and
3453 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3454 */
3455 SOCKBUF_LOCK(&so->so_rcv);
3456 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3457 so->so_state |= SS_ISDISCONNECTED;
3458 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3459 sorwakeup_locked(so);
3460 SOCKBUF_LOCK(&so->so_snd);
3461 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3462 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
3463 sowwakeup_locked(so);
3464 wakeup(&so->so_timeo);
3465}
3466
3467/*
3468 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3469 */
3470struct sockaddr *
3471sodupsockaddr(const struct sockaddr *sa, int mflags)
3472{
3473 struct sockaddr *sa2;
3474
3475 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3476 if (sa2)
3477 bcopy(sa, sa2, sa->sa_len);
3478 return sa2;
3479}
3480
3481/*
3482 * Register per-socket buffer upcalls.
3483 */
3484void
3485soupcall_set(struct socket *so, int which,
3486 int (*func)(struct socket *, void *, int), void *arg)
3487{
3488 struct sockbuf *sb;
3489
3490 switch (which) {
3491 case SO_RCV:
3492 sb = &so->so_rcv;
3493 break;
3494 case SO_SND:
3495 sb = &so->so_snd;
3496 break;
3497 default:
3498 panic("soupcall_set: bad which");
3499 }
3500 SOCKBUF_LOCK_ASSERT(sb);
3501#if 0
3502 /* XXX: accf_http actually wants to do this on purpose. */
3503 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3504#endif
3505 sb->sb_upcall = func;
3506 sb->sb_upcallarg = arg;
3507 sb->sb_flags |= SB_UPCALL;
3508}
3509
3510void
3511soupcall_clear(struct socket *so, int which)
3512{
3513 struct sockbuf *sb;
3514
3515 switch (which) {
3516 case SO_RCV:
3517 sb = &so->so_rcv;
3518 break;
3519 case SO_SND:
3520 sb = &so->so_snd;
3521 break;
3522 default:
3523 panic("soupcall_clear: bad which");
3524 }
3525 SOCKBUF_LOCK_ASSERT(sb);
3526 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3527 sb->sb_upcall = NULL;
3528 sb->sb_upcallarg = NULL;
3529 sb->sb_flags &= ~SB_UPCALL;
3530}
3531
3532/*
3533 * Create an external-format (``xsocket'') structure using the information in
3534 * the kernel-format socket structure pointed to by so. This is done to
3535 * reduce the spew of irrelevant information over this interface, to isolate
3536 * user code from changes in the kernel structure, and potentially to provide
3537 * information-hiding if we decide that some of this information should be
3538 * hidden from users.
3539 */
3540void
3541sotoxsocket(struct socket *so, struct xsocket *xso)
3542{
3543
3544 xso->xso_len = sizeof *xso;
3545 xso->xso_so = so;
3546 xso->so_type = so->so_type;
3547 xso->so_options = so->so_options;
3548 xso->so_linger = so->so_linger;
3549 xso->so_state = so->so_state;
3550 xso->so_pcb = so->so_pcb;
3551 xso->xso_protocol = so->so_proto->pr_protocol;
3552 xso->xso_family = so->so_proto->pr_domain->dom_family;
3553 xso->so_qlen = so->so_qlen;
3554 xso->so_incqlen = so->so_incqlen;
3555 xso->so_qlimit = so->so_qlimit;
3556 xso->so_timeo = so->so_timeo;
3557 xso->so_error = so->so_error;
3558 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3559 xso->so_oobmark = so->so_oobmark;
3560 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3561 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3562 xso->so_uid = so->so_cred->cr_uid;
3563}
3564
3565
3566/*
3567 * Socket accessor functions to provide external consumers with
3568 * a safe interface to socket state
3569 *
3570 */
3571
3572void
3573so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *),
3574 void *arg)
3575{
3576
3577 TAILQ_FOREACH(so, &so->so_comp, so_list)
3578 func(so, arg);
3579}
3580
3581struct sockbuf *
3582so_sockbuf_rcv(struct socket *so)
3583{
3584
3585 return (&so->so_rcv);
3586}
3587
3588struct sockbuf *
3589so_sockbuf_snd(struct socket *so)
3590{
3591
3592 return (&so->so_snd);
3593}
3594
3595int
3596so_state_get(const struct socket *so)
3597{
3598
3599 return (so->so_state);
3600}
3601
3602void
3603so_state_set(struct socket *so, int val)
3604{
3605
3606 so->so_state = val;
3607}
3608
3609int
3610so_options_get(const struct socket *so)
3611{
3612
3613 return (so->so_options);
3614}
3615
3616void
3617so_options_set(struct socket *so, int val)
3618{
3619
3620 so->so_options = val;
3621}
3622
3623int
3624so_error_get(const struct socket *so)
3625{
3626
3627 return (so->so_error);
3628}
3629
3630void
3631so_error_set(struct socket *so, int val)
3632{
3633
3634 so->so_error = val;
3635}
3636
3637int
3638so_linger_get(const struct socket *so)
3639{
3640
3641 return (so->so_linger);
3642}
3643
3644void
3645so_linger_set(struct socket *so, int val)
3646{
3647
3648 so->so_linger = val;
3649}
3650
3651struct protosw *
3652so_protosw_get(const struct socket *so)
3653{
3654
3655 return (so->so_proto);
3656}
3657
3658void
3659so_protosw_set(struct socket *so, struct protosw *val)
3660{
3661
3662 so->so_proto = val;
3663}
3664
3665void
3666so_sorwakeup(struct socket *so)
3667{
3668
3669 sorwakeup(so);
3670}
3671
3672void
3673so_sowwakeup(struct socket *so)
3674{
3675
3676 sowwakeup(so);
3677}
3678
3679void
3680so_sorwakeup_locked(struct socket *so)
3681{
3682
3683 sorwakeup_locked(so);
3684}
3685
3686void
3687so_sowwakeup_locked(struct socket *so)
3688{
3689
3690 sowwakeup_locked(so);
3691}
3692
3693void
3694so_lock(struct socket *so)
3695{
3696
3697 SOCK_LOCK(so);
3698}
3699
3700void
3701so_unlock(struct socket *so)
3702{
3703
3704 SOCK_UNLOCK(so);
3705}