Deleted Added
sdiff udiff text old ( 157359 ) new ( 157366 )
full compact
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2006 Robert N. M. Watson
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
32 */
33
34/*
35 * Comments on the socket life cycle:
36 *
37 * soalloc() sets of socket layer state for a socket, called only by
38 * socreate() and sonewconn(). Socket layer private.
39 *
40 * sdealloc() tears down socket layer state for a socket, called only by
41 * sofree() and sonewconn(). Socket layer private.
42 *
43 * pru_attach() associates protocol layer state with an allocated socket;
44 * called only once, may fail, aborting socket allocation. This is called
45 * from socreate() and sonewconn(). Socket layer private.
46 *
47 * pru_detach() disassociates protocol layer state from an attached socket,
48 * and will be called exactly once for sockets in which pru_attach() has
49 * been successfully called. If pru_attach() returned an error,
50 * pru_detach() will not be called. Socket layer private.
51 *
52 * socreate() creates a socket and attaches protocol state. This is a public
53 * interface that may be used by socket layer consumers to create new
54 * sockets.
55 *
56 * sonewconn() creates a socket and attaches protocol state. This is a
57 * public interface that may be used by protocols to create new sockets when
58 * a new connection is received and will be available for accept() on a
59 * listen socket.
60 *
61 * soclose() destroys a socket after possibly waiting for it to disconnect.
62 * This is a public interface that socket consumers should use to close and
63 * release a socket when done with it.
64 *
65 * soabort() destroys a socket without waiting for it to disconnect (used
66 * only for incoming connections that are already partially or fully
67 * connected). This is used internally by the socket layer when clearing
68 * listen socket queues (due to overflow or close on the listen socket), but
69 * is also a public interface protocols may use to abort connections in
70 * their incomplete listen queues should they no longer be required. Sockets
71 * placed in completed connection listen queues should not be aborted.
72 *
73 * sofree() will free a socket and its protocol state if all references on
74 * the socket have been released, and is the public interface to attempt to
75 * free a socket when a reference is removed. This is a socket layer private
76 * interface.
77 *
78 * NOTE: In addition to socreate() and soclose(), which provide a single
79 * socket reference to the consumer to be managed as required, there are two
80 * calls to explicitly manage socket references, soref(), and sorele().
81 * Currently, these are generally required only when transitioning a socket
82 * from a listen queue to a file descriptor, in order to prevent garbage
83 * collection of the socket at an untimely moment. For a number of reasons,
84 * these interfaces are not preferred, and should be avoided.
85 *
86 * XXXRW: The behavior of sockets after soclose() but before the last
87 * sorele() is poorly defined. We can probably entirely eliminate them with
88 * a little work, since consumers are managing references anyway.
89 */
90
91#include <sys/cdefs.h>
92__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 157359 2006-04-01 10:45:52Z rwatson $");
93
94#include "opt_inet.h"
95#include "opt_mac.h"
96#include "opt_zero.h"
97#include "opt_compat.h"
98
99#include <sys/param.h>
100#include <sys/systm.h>
101#include <sys/fcntl.h>
102#include <sys/limits.h>
103#include <sys/lock.h>
104#include <sys/mac.h>
105#include <sys/malloc.h>
106#include <sys/mbuf.h>
107#include <sys/mutex.h>
108#include <sys/domain.h>
109#include <sys/file.h> /* for struct knote */
110#include <sys/kernel.h>
111#include <sys/event.h>
112#include <sys/poll.h>
113#include <sys/proc.h>
114#include <sys/protosw.h>
115#include <sys/socket.h>
116#include <sys/socketvar.h>
117#include <sys/resourcevar.h>
118#include <sys/signalvar.h>
119#include <sys/sysctl.h>
120#include <sys/uio.h>
121#include <sys/jail.h>
122
123#include <vm/uma.h>
124
125#ifdef COMPAT_IA32
126#include <sys/mount.h>
127#include <compat/freebsd32/freebsd32.h>
128
129extern struct sysentvec ia32_freebsd_sysvec;
130#endif
131
132static int soreceive_rcvoob(struct socket *so, struct uio *uio,
133 int flags);
134
135static void filt_sordetach(struct knote *kn);
136static int filt_soread(struct knote *kn, long hint);
137static void filt_sowdetach(struct knote *kn);
138static int filt_sowrite(struct knote *kn, long hint);
139static int filt_solisten(struct knote *kn, long hint);
140
141static struct filterops solisten_filtops =
142 { 1, NULL, filt_sordetach, filt_solisten };
143static struct filterops soread_filtops =
144 { 1, NULL, filt_sordetach, filt_soread };
145static struct filterops sowrite_filtops =
146 { 1, NULL, filt_sowdetach, filt_sowrite };
147
148uma_zone_t socket_zone;
149so_gen_t so_gencnt; /* generation count for sockets */
150
151MALLOC_DEFINE(M_SONAME, "soname", "socket name");
152MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
153
154SYSCTL_DECL(_kern_ipc);
155
156static int somaxconn = SOMAXCONN;
157static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS);
158/* XXX: we dont have SYSCTL_USHORT */
159SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
160 0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection "
161 "queue size");
162static int numopensockets;
163SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
164 &numopensockets, 0, "Number of open sockets");
165#ifdef ZERO_COPY_SOCKETS
166/* These aren't static because they're used in other files. */
167int so_zero_copy_send = 1;
168int so_zero_copy_receive = 1;
169SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
170 "Zero copy controls");
171SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
172 &so_zero_copy_receive, 0, "Enable zero copy receive");
173SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
174 &so_zero_copy_send, 0, "Enable zero copy send");
175#endif /* ZERO_COPY_SOCKETS */
176
177/*
178 * accept_mtx locks down per-socket fields relating to accept queues. See
179 * socketvar.h for an annotation of the protected fields of struct socket.
180 */
181struct mtx accept_mtx;
182MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
183
184/*
185 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
186 * so_gencnt field.
187 */
188static struct mtx so_global_mtx;
189MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
190
191/*
192 * Socket operation routines.
193 * These routines are called by the routines in
194 * sys_socket.c or from a system process, and
195 * implement the semantics of socket operations by
196 * switching out to the protocol specific routines.
197 */
198
199/*
200 * Get a socket structure from our zone, and initialize it.
201 * Note that it would probably be better to allocate socket
202 * and PCB at the same time, but I'm not convinced that all
203 * the protocols can be easily modified to do this.
204 *
205 * soalloc() returns a socket with a ref count of 0.
206 */
207struct socket *
208soalloc(int mflags)
209{
210 struct socket *so;
211
212 so = uma_zalloc(socket_zone, mflags | M_ZERO);
213 if (so != NULL) {
214#ifdef MAC
215 if (mac_init_socket(so, mflags) != 0) {
216 uma_zfree(socket_zone, so);
217 return (NULL);
218 }
219#endif
220 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
221 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
222 TAILQ_INIT(&so->so_aiojobq);
223 mtx_lock(&so_global_mtx);
224 so->so_gencnt = ++so_gencnt;
225 ++numopensockets;
226 mtx_unlock(&so_global_mtx);
227 }
228 return (so);
229}
230
231/*
232 * socreate returns a socket with a ref count of 1. The socket should be
233 * closed with soclose().
234 */
235int
236socreate(dom, aso, type, proto, cred, td)
237 int dom;
238 struct socket **aso;
239 int type;
240 int proto;
241 struct ucred *cred;
242 struct thread *td;
243{
244 struct protosw *prp;
245 struct socket *so;
246 int error;
247
248 if (proto)
249 prp = pffindproto(dom, proto, type);
250 else
251 prp = pffindtype(dom, type);
252
253 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
254 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
255 return (EPROTONOSUPPORT);
256
257 if (jailed(cred) && jail_socket_unixiproute_only &&
258 prp->pr_domain->dom_family != PF_LOCAL &&
259 prp->pr_domain->dom_family != PF_INET &&
260 prp->pr_domain->dom_family != PF_ROUTE) {
261 return (EPROTONOSUPPORT);
262 }
263
264 if (prp->pr_type != type)
265 return (EPROTOTYPE);
266 so = soalloc(M_WAITOK);
267 if (so == NULL)
268 return (ENOBUFS);
269
270 TAILQ_INIT(&so->so_incomp);
271 TAILQ_INIT(&so->so_comp);
272 so->so_type = type;
273 so->so_cred = crhold(cred);
274 so->so_proto = prp;
275#ifdef MAC
276 mac_create_socket(cred, so);
277#endif
278 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
279 NULL, NULL, NULL);
280 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
281 NULL, NULL, NULL);
282 so->so_count = 1;
283 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
284 if (error) {
285 ACCEPT_LOCK();
286 SOCK_LOCK(so);
287 so->so_state |= SS_NOFDREF;
288 sorele(so);
289 return (error);
290 }
291 *aso = so;
292 return (0);
293}
294
295int
296sobind(so, nam, td)
297 struct socket *so;
298 struct sockaddr *nam;
299 struct thread *td;
300{
301
302 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
303}
304
305void
306sodealloc(struct socket *so)
307{
308
309 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
310 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
311
312 mtx_lock(&so_global_mtx);
313 so->so_gencnt = ++so_gencnt;
314 mtx_unlock(&so_global_mtx);
315 if (so->so_rcv.sb_hiwat)
316 (void)chgsbsize(so->so_cred->cr_uidinfo,
317 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
318 if (so->so_snd.sb_hiwat)
319 (void)chgsbsize(so->so_cred->cr_uidinfo,
320 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
321#ifdef INET
322 /* remove acccept filter if one is present. */
323 if (so->so_accf != NULL)
324 do_setopt_accept_filter(so, NULL);
325#endif
326#ifdef MAC
327 mac_destroy_socket(so);
328#endif
329 crfree(so->so_cred);
330 SOCKBUF_LOCK_DESTROY(&so->so_snd);
331 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
332 uma_zfree(socket_zone, so);
333 mtx_lock(&so_global_mtx);
334 --numopensockets;
335 mtx_unlock(&so_global_mtx);
336}
337
338/*
339 * solisten() transitions a socket from a non-listening state to a listening
340 * state, but can also be used to update the listen queue depth on an
341 * existing listen socket. The protocol will call back into the sockets
342 * layer using solisten_proto_check() and solisten_proto() to check and set
343 * socket-layer listen state. Call backs are used so that the protocol can
344 * acquire both protocol and socket layer locks in whatever order is required
345 * by the protocol.
346 *
347 * Protocol implementors are advised to hold the socket lock across the
348 * socket-layer test and set to avoid races at the socket layer.
349 */
350int
351solisten(so, backlog, td)
352 struct socket *so;
353 int backlog;
354 struct thread *td;
355{
356
357 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
358}
359
360int
361solisten_proto_check(so)
362 struct socket *so;
363{
364
365 SOCK_LOCK_ASSERT(so);
366
367 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
368 SS_ISDISCONNECTING))
369 return (EINVAL);
370 return (0);
371}
372
373void
374solisten_proto(so, backlog)
375 struct socket *so;
376 int backlog;
377{
378
379 SOCK_LOCK_ASSERT(so);
380
381 if (backlog < 0 || backlog > somaxconn)
382 backlog = somaxconn;
383 so->so_qlimit = backlog;
384 so->so_options |= SO_ACCEPTCONN;
385}
386
387/*
388 * Attempt to free a socket. This should really be sotryfree().
389 *
390 * We free the socket if the protocol is no longer interested in the socket,
391 * there's no file descriptor reference, and the refcount is 0. While the
392 * calling macro sotryfree() tests the refcount, sofree() has to test it
393 * again as it's possible to race with an accept()ing thread if the socket is
394 * in an listen queue of a listen socket, as being in the listen queue
395 * doesn't elevate the reference count. sofree() acquires the accept mutex
396 * early for this test in order to avoid that race.
397 */
398void
399sofree(so)
400 struct socket *so;
401{
402 struct socket *head;
403
404 ACCEPT_LOCK_ASSERT();
405 SOCK_LOCK_ASSERT(so);
406
407 if (so->so_pcb != NULL || (so->so_state & SS_NOFDREF) == 0 ||
408 so->so_count != 0 || (so->so_state & SS_PROTOREF)) {
409 SOCK_UNLOCK(so);
410 ACCEPT_UNLOCK();
411 return;
412 }
413
414 head = so->so_head;
415 if (head != NULL) {
416 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
417 (so->so_qstate & SQ_INCOMP) != 0,
418 ("sofree: so_head != NULL, but neither SQ_COMP nor "
419 "SQ_INCOMP"));
420 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
421 (so->so_qstate & SQ_INCOMP) == 0,
422 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
423 /*
424 * accept(2) is responsible draining the completed
425 * connection queue and freeing those sockets, so
426 * we just return here if this socket is currently
427 * on the completed connection queue. Otherwise,
428 * accept(2) may hang after select(2) has indicating
429 * that a listening socket was ready. If it's an
430 * incomplete connection, we remove it from the queue
431 * and free it; otherwise, it won't be released until
432 * the listening socket is closed.
433 */
434 if ((so->so_qstate & SQ_COMP) != 0) {
435 SOCK_UNLOCK(so);
436 ACCEPT_UNLOCK();
437 return;
438 }
439 TAILQ_REMOVE(&head->so_incomp, so, so_list);
440 head->so_incqlen--;
441 so->so_qstate &= ~SQ_INCOMP;
442 so->so_head = NULL;
443 }
444 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
445 (so->so_qstate & SQ_INCOMP) == 0,
446 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
447 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
448 SOCK_UNLOCK(so);
449 ACCEPT_UNLOCK();
450 SOCKBUF_LOCK(&so->so_snd);
451 so->so_snd.sb_flags |= SB_NOINTR;
452 (void)sblock(&so->so_snd, M_WAITOK);
453 /*
454 * socantsendmore_locked() drops the socket buffer mutex so that it
455 * can safely perform wakeups. Re-acquire the mutex before
456 * continuing.
457 */
458 socantsendmore_locked(so);
459 SOCKBUF_LOCK(&so->so_snd);
460 sbunlock(&so->so_snd);
461 sbrelease_locked(&so->so_snd, so);
462 SOCKBUF_UNLOCK(&so->so_snd);
463 sorflush(so);
464 knlist_destroy(&so->so_rcv.sb_sel.si_note);
465 knlist_destroy(&so->so_snd.sb_sel.si_note);
466 sodealloc(so);
467}
468
469/*
470 * Close a socket on last file table reference removal.
471 * Initiate disconnect if connected.
472 * Free socket when disconnect complete.
473 *
474 * This function will sorele() the socket. Note that soclose() may be
475 * called prior to the ref count reaching zero. The actual socket
476 * structure will not be freed until the ref count reaches zero.
477 */
478int
479soclose(so)
480 struct socket *so;
481{
482 int error = 0;
483
484 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
485
486 funsetown(&so->so_sigio);
487 if (so->so_options & SO_ACCEPTCONN) {
488 struct socket *sp;
489 ACCEPT_LOCK();
490 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
491 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
492 so->so_incqlen--;
493 sp->so_qstate &= ~SQ_INCOMP;
494 sp->so_head = NULL;
495 ACCEPT_UNLOCK();
496 soabort(sp);
497 ACCEPT_LOCK();
498 }
499 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
500 TAILQ_REMOVE(&so->so_comp, sp, so_list);
501 so->so_qlen--;
502 sp->so_qstate &= ~SQ_COMP;
503 sp->so_head = NULL;
504 ACCEPT_UNLOCK();
505 soabort(sp);
506 ACCEPT_LOCK();
507 }
508 ACCEPT_UNLOCK();
509 }
510 if (so->so_pcb == NULL)
511 goto discard;
512 if (so->so_state & SS_ISCONNECTED) {
513 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
514 error = sodisconnect(so);
515 if (error)
516 goto drop;
517 }
518 if (so->so_options & SO_LINGER) {
519 if ((so->so_state & SS_ISDISCONNECTING) &&
520 (so->so_state & SS_NBIO))
521 goto drop;
522 while (so->so_state & SS_ISCONNECTED) {
523 error = tsleep(&so->so_timeo,
524 PSOCK | PCATCH, "soclos", so->so_linger * hz);
525 if (error)
526 break;
527 }
528 }
529 }
530drop:
531 if (so->so_pcb != NULL) {
532 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
533 if (error == 0)
534 error = error2;
535 }
536discard:
537 ACCEPT_LOCK();
538 SOCK_LOCK(so);
539 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
540 so->so_state |= SS_NOFDREF;
541 sorele(so);
542 return (error);
543}
544
545/*
546 * soabort() must not be called with any socket locks held, as it calls
547 * into the protocol, which will call back into the socket code causing
548 * it to acquire additional socket locks that may cause recursion or lock
549 * order reversals.
550 */
551void
552soabort(so)
553 struct socket *so;
554{
555 int error;
556
557 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
558 if (error) {
559 ACCEPT_LOCK();
560 SOCK_LOCK(so);
561 sotryfree(so); /* note: does not decrement the ref count */
562 }
563}
564
565int
566soaccept(so, nam)
567 struct socket *so;
568 struct sockaddr **nam;
569{
570 int error;
571
572 SOCK_LOCK(so);
573 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
574 so->so_state &= ~SS_NOFDREF;
575 SOCK_UNLOCK(so);
576 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
577 return (error);
578}
579
580int
581soconnect(so, nam, td)
582 struct socket *so;
583 struct sockaddr *nam;
584 struct thread *td;
585{
586 int error;
587
588 if (so->so_options & SO_ACCEPTCONN)
589 return (EOPNOTSUPP);
590 /*
591 * If protocol is connection-based, can only connect once.
592 * Otherwise, if connected, try to disconnect first.
593 * This allows user to disconnect by connecting to, e.g.,
594 * a null address.
595 */
596 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
597 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
598 (error = sodisconnect(so)))) {
599 error = EISCONN;
600 } else {
601 /*
602 * Prevent accumulated error from previous connection
603 * from biting us.
604 */
605 so->so_error = 0;
606 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
607 }
608
609 return (error);
610}
611
612int
613soconnect2(so1, so2)
614 struct socket *so1;
615 struct socket *so2;
616{
617
618 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
619}
620
621int
622sodisconnect(so)
623 struct socket *so;
624{
625 int error;
626
627 if ((so->so_state & SS_ISCONNECTED) == 0)
628 return (ENOTCONN);
629 if (so->so_state & SS_ISDISCONNECTING)
630 return (EALREADY);
631 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
632 return (error);
633}
634
635#ifdef ZERO_COPY_SOCKETS
636struct so_zerocopy_stats{
637 int size_ok;
638 int align_ok;
639 int found_ifp;
640};
641struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
642#include <netinet/in.h>
643#include <net/route.h>
644#include <netinet/in_pcb.h>
645#include <vm/vm.h>
646#include <vm/vm_page.h>
647#include <vm/vm_object.h>
648#endif /*ZERO_COPY_SOCKETS*/
649
650/*
651 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
652 * all of the data referenced by the uio. If desired, it uses zero-copy.
653 * *space will be updated to reflect data copied in.
654 *
655 * NB: If atomic I/O is requested, the caller must already have checked that
656 * space can hold resid bytes.
657 *
658 * NB: In the event of an error, the caller may need to free the partial
659 * chain pointed to by *mpp. The contents of both *uio and *space may be
660 * modified even in the case of an error.
661 */
662static int
663sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
664 int flags)
665{
666 struct mbuf *m, **mp, *top;
667 long len, resid;
668 int error;
669#ifdef ZERO_COPY_SOCKETS
670 int cow_send;
671#endif
672
673 *retmp = top = NULL;
674 mp = &top;
675 len = 0;
676 resid = uio->uio_resid;
677 error = 0;
678 do {
679#ifdef ZERO_COPY_SOCKETS
680 cow_send = 0;
681#endif /* ZERO_COPY_SOCKETS */
682 if (resid >= MINCLSIZE) {
683#ifdef ZERO_COPY_SOCKETS
684 if (top == NULL) {
685 MGETHDR(m, M_TRYWAIT, MT_DATA);
686 if (m == NULL) {
687 error = ENOBUFS;
688 goto out;
689 }
690 m->m_pkthdr.len = 0;
691 m->m_pkthdr.rcvif = NULL;
692 } else {
693 MGET(m, M_TRYWAIT, MT_DATA);
694 if (m == NULL) {
695 error = ENOBUFS;
696 goto out;
697 }
698 }
699 if (so_zero_copy_send &&
700 resid>=PAGE_SIZE &&
701 *space>=PAGE_SIZE &&
702 uio->uio_iov->iov_len>=PAGE_SIZE) {
703 so_zerocp_stats.size_ok++;
704 so_zerocp_stats.align_ok++;
705 cow_send = socow_setup(m, uio);
706 len = cow_send;
707 }
708 if (!cow_send) {
709 MCLGET(m, M_TRYWAIT);
710 if ((m->m_flags & M_EXT) == 0) {
711 m_free(m);
712 m = NULL;
713 } else {
714 len = min(min(MCLBYTES, resid),
715 *space);
716 }
717 }
718#else /* ZERO_COPY_SOCKETS */
719 if (top == NULL) {
720 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
721 m->m_pkthdr.len = 0;
722 m->m_pkthdr.rcvif = NULL;
723 } else
724 m = m_getcl(M_TRYWAIT, MT_DATA, 0);
725 len = min(min(MCLBYTES, resid), *space);
726#endif /* ZERO_COPY_SOCKETS */
727 } else {
728 if (top == NULL) {
729 m = m_gethdr(M_TRYWAIT, MT_DATA);
730 m->m_pkthdr.len = 0;
731 m->m_pkthdr.rcvif = NULL;
732
733 len = min(min(MHLEN, resid), *space);
734 /*
735 * For datagram protocols, leave room
736 * for protocol headers in first mbuf.
737 */
738 if (atomic && m && len < MHLEN)
739 MH_ALIGN(m, len);
740 } else {
741 m = m_get(M_TRYWAIT, MT_DATA);
742 len = min(min(MLEN, resid), *space);
743 }
744 }
745 if (m == NULL) {
746 error = ENOBUFS;
747 goto out;
748 }
749
750 *space -= len;
751#ifdef ZERO_COPY_SOCKETS
752 if (cow_send)
753 error = 0;
754 else
755#endif /* ZERO_COPY_SOCKETS */
756 error = uiomove(mtod(m, void *), (int)len, uio);
757 resid = uio->uio_resid;
758 m->m_len = len;
759 *mp = m;
760 top->m_pkthdr.len += len;
761 if (error)
762 goto out;
763 mp = &m->m_next;
764 if (resid <= 0) {
765 if (flags & MSG_EOR)
766 top->m_flags |= M_EOR;
767 break;
768 }
769 } while (*space > 0 && atomic);
770out:
771 *retmp = top;
772 return (error);
773}
774
775#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
776
777int
778sosend_dgram(so, addr, uio, top, control, flags, td)
779 struct socket *so;
780 struct sockaddr *addr;
781 struct uio *uio;
782 struct mbuf *top;
783 struct mbuf *control;
784 int flags;
785 struct thread *td;
786{
787 long space, resid;
788 int clen = 0, error, dontroute;
789 int atomic = sosendallatonce(so) || top;
790
791 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
792 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
793 ("sodgram_send: !PR_ATOMIC"));
794
795 if (uio != NULL)
796 resid = uio->uio_resid;
797 else
798 resid = top->m_pkthdr.len;
799 /*
800 * In theory resid should be unsigned.
801 * However, space must be signed, as it might be less than 0
802 * if we over-committed, and we must use a signed comparison
803 * of space and resid. On the other hand, a negative resid
804 * causes us to loop sending 0-length segments to the protocol.
805 *
806 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
807 * type sockets since that's an error.
808 */
809 if (resid < 0) {
810 error = EINVAL;
811 goto out;
812 }
813
814 dontroute =
815 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
816 if (td != NULL)
817 td->td_proc->p_stats->p_ru.ru_msgsnd++;
818 if (control != NULL)
819 clen = control->m_len;
820
821 SOCKBUF_LOCK(&so->so_snd);
822 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
823 SOCKBUF_UNLOCK(&so->so_snd);
824 error = EPIPE;
825 goto out;
826 }
827 if (so->so_error) {
828 error = so->so_error;
829 so->so_error = 0;
830 SOCKBUF_UNLOCK(&so->so_snd);
831 goto out;
832 }
833 if ((so->so_state & SS_ISCONNECTED) == 0) {
834 /*
835 * `sendto' and `sendmsg' is allowed on a connection-
836 * based socket if it supports implied connect.
837 * Return ENOTCONN if not connected and no address is
838 * supplied.
839 */
840 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
841 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
842 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
843 !(resid == 0 && clen != 0)) {
844 SOCKBUF_UNLOCK(&so->so_snd);
845 error = ENOTCONN;
846 goto out;
847 }
848 } else if (addr == NULL) {
849 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
850 error = ENOTCONN;
851 else
852 error = EDESTADDRREQ;
853 SOCKBUF_UNLOCK(&so->so_snd);
854 goto out;
855 }
856 }
857
858 /*
859 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
860 * problem and need fixing.
861 */
862 space = sbspace(&so->so_snd);
863 if (flags & MSG_OOB)
864 space += 1024;
865 space -= clen;
866 if (resid > space) {
867 error = EMSGSIZE;
868 goto out;
869 }
870 SOCKBUF_UNLOCK(&so->so_snd);
871 if (uio == NULL) {
872 resid = 0;
873 if (flags & MSG_EOR)
874 top->m_flags |= M_EOR;
875 } else {
876 error = sosend_copyin(uio, &top, atomic, &space, flags);
877 if (error)
878 goto out;
879 resid = uio->uio_resid;
880 }
881 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
882 /*
883 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
884 * than with.
885 */
886 if (dontroute) {
887 SOCK_LOCK(so);
888 so->so_options |= SO_DONTROUTE;
889 SOCK_UNLOCK(so);
890 }
891 /*
892 * XXX all the SBS_CANTSENDMORE checks previously
893 * done could be out of date. We could have recieved
894 * a reset packet in an interrupt or maybe we slept
895 * while doing page faults in uiomove() etc. We could
896 * probably recheck again inside the locking protection
897 * here, but there are probably other places that this
898 * also happens. We must rethink this.
899 */
900 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
901 (flags & MSG_OOB) ? PRUS_OOB :
902 /*
903 * If the user set MSG_EOF, the protocol
904 * understands this flag and nothing left to
905 * send then use PRU_SEND_EOF instead of PRU_SEND.
906 */
907 ((flags & MSG_EOF) &&
908 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
909 (resid <= 0)) ?
910 PRUS_EOF :
911 /* If there is more to send set PRUS_MORETOCOME */
912 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
913 top, addr, control, td);
914 if (dontroute) {
915 SOCK_LOCK(so);
916 so->so_options &= ~SO_DONTROUTE;
917 SOCK_UNLOCK(so);
918 }
919 clen = 0;
920 control = NULL;
921 top = NULL;
922out:
923 if (top != NULL)
924 m_freem(top);
925 if (control != NULL)
926 m_freem(control);
927 return (error);
928}
929
930/*
931 * Send on a socket.
932 * If send must go all at once and message is larger than
933 * send buffering, then hard error.
934 * Lock against other senders.
935 * If must go all at once and not enough room now, then
936 * inform user that this would block and do nothing.
937 * Otherwise, if nonblocking, send as much as possible.
938 * The data to be sent is described by "uio" if nonzero,
939 * otherwise by the mbuf chain "top" (which must be null
940 * if uio is not). Data provided in mbuf chain must be small
941 * enough to send all at once.
942 *
943 * Returns nonzero on error, timeout or signal; callers
944 * must check for short counts if EINTR/ERESTART are returned.
945 * Data and control buffers are freed on return.
946 */
947#define snderr(errno) { error = (errno); goto release; }
948int
949sosend(so, addr, uio, top, control, flags, td)
950 struct socket *so;
951 struct sockaddr *addr;
952 struct uio *uio;
953 struct mbuf *top;
954 struct mbuf *control;
955 int flags;
956 struct thread *td;
957{
958 long space, resid;
959 int clen = 0, error, dontroute;
960 int atomic = sosendallatonce(so) || top;
961
962 if (uio != NULL)
963 resid = uio->uio_resid;
964 else
965 resid = top->m_pkthdr.len;
966 /*
967 * In theory resid should be unsigned.
968 * However, space must be signed, as it might be less than 0
969 * if we over-committed, and we must use a signed comparison
970 * of space and resid. On the other hand, a negative resid
971 * causes us to loop sending 0-length segments to the protocol.
972 *
973 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
974 * type sockets since that's an error.
975 */
976 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
977 error = EINVAL;
978 goto out;
979 }
980
981 dontroute =
982 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
983 (so->so_proto->pr_flags & PR_ATOMIC);
984 if (td != NULL)
985 td->td_proc->p_stats->p_ru.ru_msgsnd++;
986 if (control != NULL)
987 clen = control->m_len;
988
989 SOCKBUF_LOCK(&so->so_snd);
990restart:
991 SOCKBUF_LOCK_ASSERT(&so->so_snd);
992 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
993 if (error)
994 goto out_locked;
995 do {
996 SOCKBUF_LOCK_ASSERT(&so->so_snd);
997 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
998 snderr(EPIPE);
999 if (so->so_error) {
1000 error = so->so_error;
1001 so->so_error = 0;
1002 goto release;
1003 }
1004 if ((so->so_state & SS_ISCONNECTED) == 0) {
1005 /*
1006 * `sendto' and `sendmsg' is allowed on a connection-
1007 * based socket if it supports implied connect.
1008 * Return ENOTCONN if not connected and no address is
1009 * supplied.
1010 */
1011 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1012 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1013 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1014 !(resid == 0 && clen != 0))
1015 snderr(ENOTCONN);
1016 } else if (addr == NULL)
1017 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
1018 ENOTCONN : EDESTADDRREQ);
1019 }
1020 space = sbspace(&so->so_snd);
1021 if (flags & MSG_OOB)
1022 space += 1024;
1023 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1024 clen > so->so_snd.sb_hiwat)
1025 snderr(EMSGSIZE);
1026 if (space < resid + clen &&
1027 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1028 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO))
1029 snderr(EWOULDBLOCK);
1030 sbunlock(&so->so_snd);
1031 error = sbwait(&so->so_snd);
1032 if (error)
1033 goto out_locked;
1034 goto restart;
1035 }
1036 SOCKBUF_UNLOCK(&so->so_snd);
1037 space -= clen;
1038 do {
1039 if (uio == NULL) {
1040 resid = 0;
1041 if (flags & MSG_EOR)
1042 top->m_flags |= M_EOR;
1043 } else {
1044 error = sosend_copyin(uio, &top, atomic,
1045 &space, flags);
1046 if (error != 0) {
1047 SOCKBUF_LOCK(&so->so_snd);
1048 goto release;
1049 }
1050 resid = uio->uio_resid;
1051 }
1052 if (dontroute) {
1053 SOCK_LOCK(so);
1054 so->so_options |= SO_DONTROUTE;
1055 SOCK_UNLOCK(so);
1056 }
1057 /*
1058 * XXX all the SBS_CANTSENDMORE checks previously
1059 * done could be out of date. We could have recieved
1060 * a reset packet in an interrupt or maybe we slept
1061 * while doing page faults in uiomove() etc. We could
1062 * probably recheck again inside the locking protection
1063 * here, but there are probably other places that this
1064 * also happens. We must rethink this.
1065 */
1066 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1067 (flags & MSG_OOB) ? PRUS_OOB :
1068 /*
1069 * If the user set MSG_EOF, the protocol
1070 * understands this flag and nothing left to
1071 * send then use PRU_SEND_EOF instead of PRU_SEND.
1072 */
1073 ((flags & MSG_EOF) &&
1074 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1075 (resid <= 0)) ?
1076 PRUS_EOF :
1077 /* If there is more to send set PRUS_MORETOCOME */
1078 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1079 top, addr, control, td);
1080 if (dontroute) {
1081 SOCK_LOCK(so);
1082 so->so_options &= ~SO_DONTROUTE;
1083 SOCK_UNLOCK(so);
1084 }
1085 clen = 0;
1086 control = NULL;
1087 top = NULL;
1088 if (error) {
1089 SOCKBUF_LOCK(&so->so_snd);
1090 goto release;
1091 }
1092 } while (resid && space > 0);
1093 SOCKBUF_LOCK(&so->so_snd);
1094 } while (resid);
1095
1096release:
1097 SOCKBUF_LOCK_ASSERT(&so->so_snd);
1098 sbunlock(&so->so_snd);
1099out_locked:
1100 SOCKBUF_LOCK_ASSERT(&so->so_snd);
1101 SOCKBUF_UNLOCK(&so->so_snd);
1102out:
1103 if (top != NULL)
1104 m_freem(top);
1105 if (control != NULL)
1106 m_freem(control);
1107 return (error);
1108}
1109#undef snderr
1110
1111/*
1112 * The part of soreceive() that implements reading non-inline out-of-band
1113 * data from a socket. For more complete comments, see soreceive(), from
1114 * which this code originated.
1115 *
1116 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1117 * unable to return an mbuf chain to the caller.
1118 */
1119static int
1120soreceive_rcvoob(so, uio, flags)
1121 struct socket *so;
1122 struct uio *uio;
1123 int flags;
1124{
1125 struct protosw *pr = so->so_proto;
1126 struct mbuf *m;
1127 int error;
1128
1129 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1130
1131 m = m_get(M_TRYWAIT, MT_DATA);
1132 if (m == NULL)
1133 return (ENOBUFS);
1134 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1135 if (error)
1136 goto bad;
1137 do {
1138#ifdef ZERO_COPY_SOCKETS
1139 if (so_zero_copy_receive) {
1140 int disposable;
1141
1142 if ((m->m_flags & M_EXT)
1143 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1144 disposable = 1;
1145 else
1146 disposable = 0;
1147
1148 error = uiomoveco(mtod(m, void *),
1149 min(uio->uio_resid, m->m_len),
1150 uio, disposable);
1151 } else
1152#endif /* ZERO_COPY_SOCKETS */
1153 error = uiomove(mtod(m, void *),
1154 (int) min(uio->uio_resid, m->m_len), uio);
1155 m = m_free(m);
1156 } while (uio->uio_resid && error == 0 && m);
1157bad:
1158 if (m != NULL)
1159 m_freem(m);
1160 return (error);
1161}
1162
1163/*
1164 * Following replacement or removal of the first mbuf on the first mbuf chain
1165 * of a socket buffer, push necessary state changes back into the socket
1166 * buffer so that other consumers see the values consistently. 'nextrecord'
1167 * is the callers locally stored value of the original value of
1168 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1169 * NOTE: 'nextrecord' may be NULL.
1170 */
1171static __inline void
1172sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1173{
1174
1175 SOCKBUF_LOCK_ASSERT(sb);
1176 /*
1177 * First, update for the new value of nextrecord. If necessary, make
1178 * it the first record.
1179 */
1180 if (sb->sb_mb != NULL)
1181 sb->sb_mb->m_nextpkt = nextrecord;
1182 else
1183 sb->sb_mb = nextrecord;
1184
1185 /*
1186 * Now update any dependent socket buffer fields to reflect the new
1187 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1188 * addition of a second clause that takes care of the case where
1189 * sb_mb has been updated, but remains the last record.
1190 */
1191 if (sb->sb_mb == NULL) {
1192 sb->sb_mbtail = NULL;
1193 sb->sb_lastrecord = NULL;
1194 } else if (sb->sb_mb->m_nextpkt == NULL)
1195 sb->sb_lastrecord = sb->sb_mb;
1196}
1197
1198
1199/*
1200 * Implement receive operations on a socket.
1201 * We depend on the way that records are added to the sockbuf
1202 * by sbappend*. In particular, each record (mbufs linked through m_next)
1203 * must begin with an address if the protocol so specifies,
1204 * followed by an optional mbuf or mbufs containing ancillary data,
1205 * and then zero or more mbufs of data.
1206 * In order to avoid blocking network interrupts for the entire time here,
1207 * we splx() while doing the actual copy to user space.
1208 * Although the sockbuf is locked, new data may still be appended,
1209 * and thus we must maintain consistency of the sockbuf during that time.
1210 *
1211 * The caller may receive the data as a single mbuf chain by supplying
1212 * an mbuf **mp0 for use in returning the chain. The uio is then used
1213 * only for the count in uio_resid.
1214 */
1215int
1216soreceive(so, psa, uio, mp0, controlp, flagsp)
1217 struct socket *so;
1218 struct sockaddr **psa;
1219 struct uio *uio;
1220 struct mbuf **mp0;
1221 struct mbuf **controlp;
1222 int *flagsp;
1223{
1224 struct mbuf *m, **mp;
1225 int flags, len, error, offset;
1226 struct protosw *pr = so->so_proto;
1227 struct mbuf *nextrecord;
1228 int moff, type = 0;
1229 int orig_resid = uio->uio_resid;
1230
1231 mp = mp0;
1232 if (psa != NULL)
1233 *psa = NULL;
1234 if (controlp != NULL)
1235 *controlp = NULL;
1236 if (flagsp != NULL)
1237 flags = *flagsp &~ MSG_EOR;
1238 else
1239 flags = 0;
1240 if (flags & MSG_OOB)
1241 return (soreceive_rcvoob(so, uio, flags));
1242 if (mp != NULL)
1243 *mp = NULL;
1244 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1245 && uio->uio_resid)
1246 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1247
1248 SOCKBUF_LOCK(&so->so_rcv);
1249restart:
1250 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1251 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1252 if (error)
1253 goto out;
1254
1255 m = so->so_rcv.sb_mb;
1256 /*
1257 * If we have less data than requested, block awaiting more
1258 * (subject to any timeout) if:
1259 * 1. the current count is less than the low water mark, or
1260 * 2. MSG_WAITALL is set, and it is possible to do the entire
1261 * receive operation at once if we block (resid <= hiwat).
1262 * 3. MSG_DONTWAIT is not set
1263 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1264 * we have to do the receive in sections, and thus risk returning
1265 * a short count if a timeout or signal occurs after we start.
1266 */
1267 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1268 so->so_rcv.sb_cc < uio->uio_resid) &&
1269 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1270 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1271 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1272 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1273 ("receive: m == %p so->so_rcv.sb_cc == %u",
1274 m, so->so_rcv.sb_cc));
1275 if (so->so_error) {
1276 if (m != NULL)
1277 goto dontblock;
1278 error = so->so_error;
1279 if ((flags & MSG_PEEK) == 0)
1280 so->so_error = 0;
1281 goto release;
1282 }
1283 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1284 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1285 if (m)
1286 goto dontblock;
1287 else
1288 goto release;
1289 }
1290 for (; m != NULL; m = m->m_next)
1291 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1292 m = so->so_rcv.sb_mb;
1293 goto dontblock;
1294 }
1295 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1296 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1297 error = ENOTCONN;
1298 goto release;
1299 }
1300 if (uio->uio_resid == 0)
1301 goto release;
1302 if ((so->so_state & SS_NBIO) ||
1303 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1304 error = EWOULDBLOCK;
1305 goto release;
1306 }
1307 SBLASTRECORDCHK(&so->so_rcv);
1308 SBLASTMBUFCHK(&so->so_rcv);
1309 sbunlock(&so->so_rcv);
1310 error = sbwait(&so->so_rcv);
1311 if (error)
1312 goto out;
1313 goto restart;
1314 }
1315dontblock:
1316 /*
1317 * From this point onward, we maintain 'nextrecord' as a cache of the
1318 * pointer to the next record in the socket buffer. We must keep the
1319 * various socket buffer pointers and local stack versions of the
1320 * pointers in sync, pushing out modifications before dropping the
1321 * socket buffer mutex, and re-reading them when picking it up.
1322 *
1323 * Otherwise, we will race with the network stack appending new data
1324 * or records onto the socket buffer by using inconsistent/stale
1325 * versions of the field, possibly resulting in socket buffer
1326 * corruption.
1327 *
1328 * By holding the high-level sblock(), we prevent simultaneous
1329 * readers from pulling off the front of the socket buffer.
1330 */
1331 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1332 if (uio->uio_td)
1333 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
1334 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1335 SBLASTRECORDCHK(&so->so_rcv);
1336 SBLASTMBUFCHK(&so->so_rcv);
1337 nextrecord = m->m_nextpkt;
1338 if (pr->pr_flags & PR_ADDR) {
1339 KASSERT(m->m_type == MT_SONAME,
1340 ("m->m_type == %d", m->m_type));
1341 orig_resid = 0;
1342 if (psa != NULL)
1343 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1344 M_NOWAIT);
1345 if (flags & MSG_PEEK) {
1346 m = m->m_next;
1347 } else {
1348 sbfree(&so->so_rcv, m);
1349 so->so_rcv.sb_mb = m_free(m);
1350 m = so->so_rcv.sb_mb;
1351 sockbuf_pushsync(&so->so_rcv, nextrecord);
1352 }
1353 }
1354
1355 /*
1356 * Process one or more MT_CONTROL mbufs present before any data mbufs
1357 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1358 * just copy the data; if !MSG_PEEK, we call into the protocol to
1359 * perform externalization (or freeing if controlp == NULL).
1360 */
1361 if (m != NULL && m->m_type == MT_CONTROL) {
1362 struct mbuf *cm = NULL, *cmn;
1363 struct mbuf **cme = &cm;
1364
1365 do {
1366 if (flags & MSG_PEEK) {
1367 if (controlp != NULL) {
1368 *controlp = m_copy(m, 0, m->m_len);
1369 controlp = &(*controlp)->m_next;
1370 }
1371 m = m->m_next;
1372 } else {
1373 sbfree(&so->so_rcv, m);
1374 so->so_rcv.sb_mb = m->m_next;
1375 m->m_next = NULL;
1376 *cme = m;
1377 cme = &(*cme)->m_next;
1378 m = so->so_rcv.sb_mb;
1379 }
1380 } while (m != NULL && m->m_type == MT_CONTROL);
1381 if ((flags & MSG_PEEK) == 0)
1382 sockbuf_pushsync(&so->so_rcv, nextrecord);
1383 while (cm != NULL) {
1384 cmn = cm->m_next;
1385 cm->m_next = NULL;
1386 if (pr->pr_domain->dom_externalize != NULL) {
1387 SOCKBUF_UNLOCK(&so->so_rcv);
1388 error = (*pr->pr_domain->dom_externalize)
1389 (cm, controlp);
1390 SOCKBUF_LOCK(&so->so_rcv);
1391 } else if (controlp != NULL)
1392 *controlp = cm;
1393 else
1394 m_freem(cm);
1395 if (controlp != NULL) {
1396 orig_resid = 0;
1397 while (*controlp != NULL)
1398 controlp = &(*controlp)->m_next;
1399 }
1400 cm = cmn;
1401 }
1402 if (so->so_rcv.sb_mb)
1403 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1404 else
1405 nextrecord = NULL;
1406 orig_resid = 0;
1407 }
1408 if (m != NULL) {
1409 if ((flags & MSG_PEEK) == 0) {
1410 KASSERT(m->m_nextpkt == nextrecord,
1411 ("soreceive: post-control, nextrecord !sync"));
1412 if (nextrecord == NULL) {
1413 KASSERT(so->so_rcv.sb_mb == m,
1414 ("soreceive: post-control, sb_mb!=m"));
1415 KASSERT(so->so_rcv.sb_lastrecord == m,
1416 ("soreceive: post-control, lastrecord!=m"));
1417 }
1418 }
1419 type = m->m_type;
1420 if (type == MT_OOBDATA)
1421 flags |= MSG_OOB;
1422 } else {
1423 if ((flags & MSG_PEEK) == 0) {
1424 KASSERT(so->so_rcv.sb_mb == nextrecord,
1425 ("soreceive: sb_mb != nextrecord"));
1426 if (so->so_rcv.sb_mb == NULL) {
1427 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1428 ("soreceive: sb_lastercord != NULL"));
1429 }
1430 }
1431 }
1432 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1433 SBLASTRECORDCHK(&so->so_rcv);
1434 SBLASTMBUFCHK(&so->so_rcv);
1435
1436 /*
1437 * Now continue to read any data mbufs off of the head of the socket
1438 * buffer until the read request is satisfied. Note that 'type' is
1439 * used to store the type of any mbuf reads that have happened so far
1440 * such that soreceive() can stop reading if the type changes, which
1441 * causes soreceive() to return only one of regular data and inline
1442 * out-of-band data in a single socket receive operation.
1443 */
1444 moff = 0;
1445 offset = 0;
1446 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1447 /*
1448 * If the type of mbuf has changed since the last mbuf
1449 * examined ('type'), end the receive operation.
1450 */
1451 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1452 if (m->m_type == MT_OOBDATA) {
1453 if (type != MT_OOBDATA)
1454 break;
1455 } else if (type == MT_OOBDATA)
1456 break;
1457 else
1458 KASSERT(m->m_type == MT_DATA,
1459 ("m->m_type == %d", m->m_type));
1460 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1461 len = uio->uio_resid;
1462 if (so->so_oobmark && len > so->so_oobmark - offset)
1463 len = so->so_oobmark - offset;
1464 if (len > m->m_len - moff)
1465 len = m->m_len - moff;
1466 /*
1467 * If mp is set, just pass back the mbufs.
1468 * Otherwise copy them out via the uio, then free.
1469 * Sockbuf must be consistent here (points to current mbuf,
1470 * it points to next record) when we drop priority;
1471 * we must note any additions to the sockbuf when we
1472 * block interrupts again.
1473 */
1474 if (mp == NULL) {
1475 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1476 SBLASTRECORDCHK(&so->so_rcv);
1477 SBLASTMBUFCHK(&so->so_rcv);
1478 SOCKBUF_UNLOCK(&so->so_rcv);
1479#ifdef ZERO_COPY_SOCKETS
1480 if (so_zero_copy_receive) {
1481 int disposable;
1482
1483 if ((m->m_flags & M_EXT)
1484 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1485 disposable = 1;
1486 else
1487 disposable = 0;
1488
1489 error = uiomoveco(mtod(m, char *) + moff,
1490 (int)len, uio,
1491 disposable);
1492 } else
1493#endif /* ZERO_COPY_SOCKETS */
1494 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1495 SOCKBUF_LOCK(&so->so_rcv);
1496 if (error)
1497 goto release;
1498 } else
1499 uio->uio_resid -= len;
1500 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1501 if (len == m->m_len - moff) {
1502 if (m->m_flags & M_EOR)
1503 flags |= MSG_EOR;
1504 if (flags & MSG_PEEK) {
1505 m = m->m_next;
1506 moff = 0;
1507 } else {
1508 nextrecord = m->m_nextpkt;
1509 sbfree(&so->so_rcv, m);
1510 if (mp != NULL) {
1511 *mp = m;
1512 mp = &m->m_next;
1513 so->so_rcv.sb_mb = m = m->m_next;
1514 *mp = NULL;
1515 } else {
1516 so->so_rcv.sb_mb = m_free(m);
1517 m = so->so_rcv.sb_mb;
1518 }
1519 sockbuf_pushsync(&so->so_rcv, nextrecord);
1520 SBLASTRECORDCHK(&so->so_rcv);
1521 SBLASTMBUFCHK(&so->so_rcv);
1522 }
1523 } else {
1524 if (flags & MSG_PEEK)
1525 moff += len;
1526 else {
1527 if (mp != NULL) {
1528 int copy_flag;
1529
1530 if (flags & MSG_DONTWAIT)
1531 copy_flag = M_DONTWAIT;
1532 else
1533 copy_flag = M_TRYWAIT;
1534 if (copy_flag == M_TRYWAIT)
1535 SOCKBUF_UNLOCK(&so->so_rcv);
1536 *mp = m_copym(m, 0, len, copy_flag);
1537 if (copy_flag == M_TRYWAIT)
1538 SOCKBUF_LOCK(&so->so_rcv);
1539 if (*mp == NULL) {
1540 /*
1541 * m_copym() couldn't allocate an mbuf.
1542 * Adjust uio_resid back (it was adjusted
1543 * down by len bytes, which we didn't end
1544 * up "copying" over).
1545 */
1546 uio->uio_resid += len;
1547 break;
1548 }
1549 }
1550 m->m_data += len;
1551 m->m_len -= len;
1552 so->so_rcv.sb_cc -= len;
1553 }
1554 }
1555 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1556 if (so->so_oobmark) {
1557 if ((flags & MSG_PEEK) == 0) {
1558 so->so_oobmark -= len;
1559 if (so->so_oobmark == 0) {
1560 so->so_rcv.sb_state |= SBS_RCVATMARK;
1561 break;
1562 }
1563 } else {
1564 offset += len;
1565 if (offset == so->so_oobmark)
1566 break;
1567 }
1568 }
1569 if (flags & MSG_EOR)
1570 break;
1571 /*
1572 * If the MSG_WAITALL flag is set (for non-atomic socket),
1573 * we must not quit until "uio->uio_resid == 0" or an error
1574 * termination. If a signal/timeout occurs, return
1575 * with a short count but without error.
1576 * Keep sockbuf locked against other readers.
1577 */
1578 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1579 !sosendallatonce(so) && nextrecord == NULL) {
1580 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1581 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1582 break;
1583 /*
1584 * Notify the protocol that some data has been
1585 * drained before blocking.
1586 */
1587 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) {
1588 SOCKBUF_UNLOCK(&so->so_rcv);
1589 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1590 SOCKBUF_LOCK(&so->so_rcv);
1591 }
1592 SBLASTRECORDCHK(&so->so_rcv);
1593 SBLASTMBUFCHK(&so->so_rcv);
1594 error = sbwait(&so->so_rcv);
1595 if (error)
1596 goto release;
1597 m = so->so_rcv.sb_mb;
1598 if (m != NULL)
1599 nextrecord = m->m_nextpkt;
1600 }
1601 }
1602
1603 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1604 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1605 flags |= MSG_TRUNC;
1606 if ((flags & MSG_PEEK) == 0)
1607 (void) sbdroprecord_locked(&so->so_rcv);
1608 }
1609 if ((flags & MSG_PEEK) == 0) {
1610 if (m == NULL) {
1611 /*
1612 * First part is an inline SB_EMPTY_FIXUP(). Second
1613 * part makes sure sb_lastrecord is up-to-date if
1614 * there is still data in the socket buffer.
1615 */
1616 so->so_rcv.sb_mb = nextrecord;
1617 if (so->so_rcv.sb_mb == NULL) {
1618 so->so_rcv.sb_mbtail = NULL;
1619 so->so_rcv.sb_lastrecord = NULL;
1620 } else if (nextrecord->m_nextpkt == NULL)
1621 so->so_rcv.sb_lastrecord = nextrecord;
1622 }
1623 SBLASTRECORDCHK(&so->so_rcv);
1624 SBLASTMBUFCHK(&so->so_rcv);
1625 /*
1626 * If soreceive() is being done from the socket callback, then
1627 * don't need to generate ACK to peer to update window, since
1628 * ACK will be generated on return to TCP.
1629 */
1630 if (!(flags & MSG_SOCALLBCK) &&
1631 (pr->pr_flags & PR_WANTRCVD) && so->so_pcb) {
1632 SOCKBUF_UNLOCK(&so->so_rcv);
1633 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1634 SOCKBUF_LOCK(&so->so_rcv);
1635 }
1636 }
1637 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1638 if (orig_resid == uio->uio_resid && orig_resid &&
1639 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1640 sbunlock(&so->so_rcv);
1641 goto restart;
1642 }
1643
1644 if (flagsp != NULL)
1645 *flagsp |= flags;
1646release:
1647 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1648 sbunlock(&so->so_rcv);
1649out:
1650 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1651 SOCKBUF_UNLOCK(&so->so_rcv);
1652 return (error);
1653}
1654
1655int
1656soshutdown(so, how)
1657 struct socket *so;
1658 int how;
1659{
1660 struct protosw *pr = so->so_proto;
1661
1662 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1663 return (EINVAL);
1664
1665 if (how != SHUT_WR)
1666 sorflush(so);
1667 if (how != SHUT_RD)
1668 return ((*pr->pr_usrreqs->pru_shutdown)(so));
1669 return (0);
1670}
1671
1672void
1673sorflush(so)
1674 struct socket *so;
1675{
1676 struct sockbuf *sb = &so->so_rcv;
1677 struct protosw *pr = so->so_proto;
1678 struct sockbuf asb;
1679
1680 /*
1681 * XXXRW: This is quite ugly. Previously, this code made a copy of
1682 * the socket buffer, then zero'd the original to clear the buffer
1683 * fields. However, with mutexes in the socket buffer, this causes
1684 * problems. We only clear the zeroable bits of the original;
1685 * however, we have to initialize and destroy the mutex in the copy
1686 * so that dom_dispose() and sbrelease() can lock t as needed.
1687 */
1688 SOCKBUF_LOCK(sb);
1689 sb->sb_flags |= SB_NOINTR;
1690 (void) sblock(sb, M_WAITOK);
1691 /*
1692 * socantrcvmore_locked() drops the socket buffer mutex so that it
1693 * can safely perform wakeups. Re-acquire the mutex before
1694 * continuing.
1695 */
1696 socantrcvmore_locked(so);
1697 SOCKBUF_LOCK(sb);
1698 sbunlock(sb);
1699 /*
1700 * Invalidate/clear most of the sockbuf structure, but leave
1701 * selinfo and mutex data unchanged.
1702 */
1703 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
1704 bcopy(&sb->sb_startzero, &asb.sb_startzero,
1705 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1706 bzero(&sb->sb_startzero,
1707 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1708 SOCKBUF_UNLOCK(sb);
1709
1710 SOCKBUF_LOCK_INIT(&asb, "so_rcv");
1711 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1712 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1713 sbrelease(&asb, so);
1714 SOCKBUF_LOCK_DESTROY(&asb);
1715}
1716
1717/*
1718 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1719 * an additional variant to handle the case where the option value needs
1720 * to be some kind of integer, but not a specific size.
1721 * In addition to their use here, these functions are also called by the
1722 * protocol-level pr_ctloutput() routines.
1723 */
1724int
1725sooptcopyin(sopt, buf, len, minlen)
1726 struct sockopt *sopt;
1727 void *buf;
1728 size_t len;
1729 size_t minlen;
1730{
1731 size_t valsize;
1732
1733 /*
1734 * If the user gives us more than we wanted, we ignore it,
1735 * but if we don't get the minimum length the caller
1736 * wants, we return EINVAL. On success, sopt->sopt_valsize
1737 * is set to however much we actually retrieved.
1738 */
1739 if ((valsize = sopt->sopt_valsize) < minlen)
1740 return EINVAL;
1741 if (valsize > len)
1742 sopt->sopt_valsize = valsize = len;
1743
1744 if (sopt->sopt_td != NULL)
1745 return (copyin(sopt->sopt_val, buf, valsize));
1746
1747 bcopy(sopt->sopt_val, buf, valsize);
1748 return (0);
1749}
1750
1751/*
1752 * Kernel version of setsockopt(2)/
1753 * XXX: optlen is size_t, not socklen_t
1754 */
1755int
1756so_setsockopt(struct socket *so, int level, int optname, void *optval,
1757 size_t optlen)
1758{
1759 struct sockopt sopt;
1760
1761 sopt.sopt_level = level;
1762 sopt.sopt_name = optname;
1763 sopt.sopt_dir = SOPT_SET;
1764 sopt.sopt_val = optval;
1765 sopt.sopt_valsize = optlen;
1766 sopt.sopt_td = NULL;
1767 return (sosetopt(so, &sopt));
1768}
1769
1770int
1771sosetopt(so, sopt)
1772 struct socket *so;
1773 struct sockopt *sopt;
1774{
1775 int error, optval;
1776 struct linger l;
1777 struct timeval tv;
1778 u_long val;
1779#ifdef MAC
1780 struct mac extmac;
1781#endif
1782
1783 error = 0;
1784 if (sopt->sopt_level != SOL_SOCKET) {
1785 if (so->so_proto && so->so_proto->pr_ctloutput)
1786 return ((*so->so_proto->pr_ctloutput)
1787 (so, sopt));
1788 error = ENOPROTOOPT;
1789 } else {
1790 switch (sopt->sopt_name) {
1791#ifdef INET
1792 case SO_ACCEPTFILTER:
1793 error = do_setopt_accept_filter(so, sopt);
1794 if (error)
1795 goto bad;
1796 break;
1797#endif
1798 case SO_LINGER:
1799 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1800 if (error)
1801 goto bad;
1802
1803 SOCK_LOCK(so);
1804 so->so_linger = l.l_linger;
1805 if (l.l_onoff)
1806 so->so_options |= SO_LINGER;
1807 else
1808 so->so_options &= ~SO_LINGER;
1809 SOCK_UNLOCK(so);
1810 break;
1811
1812 case SO_DEBUG:
1813 case SO_KEEPALIVE:
1814 case SO_DONTROUTE:
1815 case SO_USELOOPBACK:
1816 case SO_BROADCAST:
1817 case SO_REUSEADDR:
1818 case SO_REUSEPORT:
1819 case SO_OOBINLINE:
1820 case SO_TIMESTAMP:
1821 case SO_BINTIME:
1822 case SO_NOSIGPIPE:
1823 error = sooptcopyin(sopt, &optval, sizeof optval,
1824 sizeof optval);
1825 if (error)
1826 goto bad;
1827 SOCK_LOCK(so);
1828 if (optval)
1829 so->so_options |= sopt->sopt_name;
1830 else
1831 so->so_options &= ~sopt->sopt_name;
1832 SOCK_UNLOCK(so);
1833 break;
1834
1835 case SO_SNDBUF:
1836 case SO_RCVBUF:
1837 case SO_SNDLOWAT:
1838 case SO_RCVLOWAT:
1839 error = sooptcopyin(sopt, &optval, sizeof optval,
1840 sizeof optval);
1841 if (error)
1842 goto bad;
1843
1844 /*
1845 * Values < 1 make no sense for any of these
1846 * options, so disallow them.
1847 */
1848 if (optval < 1) {
1849 error = EINVAL;
1850 goto bad;
1851 }
1852
1853 switch (sopt->sopt_name) {
1854 case SO_SNDBUF:
1855 case SO_RCVBUF:
1856 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1857 &so->so_snd : &so->so_rcv, (u_long)optval,
1858 so, curthread) == 0) {
1859 error = ENOBUFS;
1860 goto bad;
1861 }
1862 break;
1863
1864 /*
1865 * Make sure the low-water is never greater than
1866 * the high-water.
1867 */
1868 case SO_SNDLOWAT:
1869 SOCKBUF_LOCK(&so->so_snd);
1870 so->so_snd.sb_lowat =
1871 (optval > so->so_snd.sb_hiwat) ?
1872 so->so_snd.sb_hiwat : optval;
1873 SOCKBUF_UNLOCK(&so->so_snd);
1874 break;
1875 case SO_RCVLOWAT:
1876 SOCKBUF_LOCK(&so->so_rcv);
1877 so->so_rcv.sb_lowat =
1878 (optval > so->so_rcv.sb_hiwat) ?
1879 so->so_rcv.sb_hiwat : optval;
1880 SOCKBUF_UNLOCK(&so->so_rcv);
1881 break;
1882 }
1883 break;
1884
1885 case SO_SNDTIMEO:
1886 case SO_RCVTIMEO:
1887#ifdef COMPAT_IA32
1888 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
1889 struct timeval32 tv32;
1890
1891 error = sooptcopyin(sopt, &tv32, sizeof tv32,
1892 sizeof tv32);
1893 CP(tv32, tv, tv_sec);
1894 CP(tv32, tv, tv_usec);
1895 } else
1896#endif
1897 error = sooptcopyin(sopt, &tv, sizeof tv,
1898 sizeof tv);
1899 if (error)
1900 goto bad;
1901
1902 /* assert(hz > 0); */
1903 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
1904 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1905 error = EDOM;
1906 goto bad;
1907 }
1908 /* assert(tick > 0); */
1909 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
1910 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1911 if (val > INT_MAX) {
1912 error = EDOM;
1913 goto bad;
1914 }
1915 if (val == 0 && tv.tv_usec != 0)
1916 val = 1;
1917
1918 switch (sopt->sopt_name) {
1919 case SO_SNDTIMEO:
1920 so->so_snd.sb_timeo = val;
1921 break;
1922 case SO_RCVTIMEO:
1923 so->so_rcv.sb_timeo = val;
1924 break;
1925 }
1926 break;
1927
1928 case SO_LABEL:
1929#ifdef MAC
1930 error = sooptcopyin(sopt, &extmac, sizeof extmac,
1931 sizeof extmac);
1932 if (error)
1933 goto bad;
1934 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
1935 so, &extmac);
1936#else
1937 error = EOPNOTSUPP;
1938#endif
1939 break;
1940
1941 default:
1942 error = ENOPROTOOPT;
1943 break;
1944 }
1945 if (error == 0 && so->so_proto != NULL &&
1946 so->so_proto->pr_ctloutput != NULL) {
1947 (void) ((*so->so_proto->pr_ctloutput)
1948 (so, sopt));
1949 }
1950 }
1951bad:
1952 return (error);
1953}
1954
1955/* Helper routine for getsockopt */
1956int
1957sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1958{
1959 int error;
1960 size_t valsize;
1961
1962 error = 0;
1963
1964 /*
1965 * Documented get behavior is that we always return a value,
1966 * possibly truncated to fit in the user's buffer.
1967 * Traditional behavior is that we always tell the user
1968 * precisely how much we copied, rather than something useful
1969 * like the total amount we had available for her.
1970 * Note that this interface is not idempotent; the entire answer must
1971 * generated ahead of time.
1972 */
1973 valsize = min(len, sopt->sopt_valsize);
1974 sopt->sopt_valsize = valsize;
1975 if (sopt->sopt_val != NULL) {
1976 if (sopt->sopt_td != NULL)
1977 error = copyout(buf, sopt->sopt_val, valsize);
1978 else
1979 bcopy(buf, sopt->sopt_val, valsize);
1980 }
1981 return (error);
1982}
1983
1984int
1985sogetopt(so, sopt)
1986 struct socket *so;
1987 struct sockopt *sopt;
1988{
1989 int error, optval;
1990 struct linger l;
1991 struct timeval tv;
1992#ifdef MAC
1993 struct mac extmac;
1994#endif
1995
1996 error = 0;
1997 if (sopt->sopt_level != SOL_SOCKET) {
1998 if (so->so_proto && so->so_proto->pr_ctloutput) {
1999 return ((*so->so_proto->pr_ctloutput)
2000 (so, sopt));
2001 } else
2002 return (ENOPROTOOPT);
2003 } else {
2004 switch (sopt->sopt_name) {
2005#ifdef INET
2006 case SO_ACCEPTFILTER:
2007 error = do_getopt_accept_filter(so, sopt);
2008 break;
2009#endif
2010 case SO_LINGER:
2011 SOCK_LOCK(so);
2012 l.l_onoff = so->so_options & SO_LINGER;
2013 l.l_linger = so->so_linger;
2014 SOCK_UNLOCK(so);
2015 error = sooptcopyout(sopt, &l, sizeof l);
2016 break;
2017
2018 case SO_USELOOPBACK:
2019 case SO_DONTROUTE:
2020 case SO_DEBUG:
2021 case SO_KEEPALIVE:
2022 case SO_REUSEADDR:
2023 case SO_REUSEPORT:
2024 case SO_BROADCAST:
2025 case SO_OOBINLINE:
2026 case SO_ACCEPTCONN:
2027 case SO_TIMESTAMP:
2028 case SO_BINTIME:
2029 case SO_NOSIGPIPE:
2030 optval = so->so_options & sopt->sopt_name;
2031integer:
2032 error = sooptcopyout(sopt, &optval, sizeof optval);
2033 break;
2034
2035 case SO_TYPE:
2036 optval = so->so_type;
2037 goto integer;
2038
2039 case SO_ERROR:
2040 optval = so->so_error;
2041 so->so_error = 0;
2042 goto integer;
2043
2044 case SO_SNDBUF:
2045 optval = so->so_snd.sb_hiwat;
2046 goto integer;
2047
2048 case SO_RCVBUF:
2049 optval = so->so_rcv.sb_hiwat;
2050 goto integer;
2051
2052 case SO_SNDLOWAT:
2053 optval = so->so_snd.sb_lowat;
2054 goto integer;
2055
2056 case SO_RCVLOWAT:
2057 optval = so->so_rcv.sb_lowat;
2058 goto integer;
2059
2060 case SO_SNDTIMEO:
2061 case SO_RCVTIMEO:
2062 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2063 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2064
2065 tv.tv_sec = optval / hz;
2066 tv.tv_usec = (optval % hz) * tick;
2067#ifdef COMPAT_IA32
2068 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2069 struct timeval32 tv32;
2070
2071 CP(tv, tv32, tv_sec);
2072 CP(tv, tv32, tv_usec);
2073 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2074 } else
2075#endif
2076 error = sooptcopyout(sopt, &tv, sizeof tv);
2077 break;
2078
2079 case SO_LABEL:
2080#ifdef MAC
2081 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2082 sizeof(extmac));
2083 if (error)
2084 return (error);
2085 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2086 so, &extmac);
2087 if (error)
2088 return (error);
2089 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2090#else
2091 error = EOPNOTSUPP;
2092#endif
2093 break;
2094
2095 case SO_PEERLABEL:
2096#ifdef MAC
2097 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2098 sizeof(extmac));
2099 if (error)
2100 return (error);
2101 error = mac_getsockopt_peerlabel(
2102 sopt->sopt_td->td_ucred, so, &extmac);
2103 if (error)
2104 return (error);
2105 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2106#else
2107 error = EOPNOTSUPP;
2108#endif
2109 break;
2110
2111 case SO_LISTENQLIMIT:
2112 optval = so->so_qlimit;
2113 goto integer;
2114
2115 case SO_LISTENQLEN:
2116 optval = so->so_qlen;
2117 goto integer;
2118
2119 case SO_LISTENINCQLEN:
2120 optval = so->so_incqlen;
2121 goto integer;
2122
2123 default:
2124 error = ENOPROTOOPT;
2125 break;
2126 }
2127 return (error);
2128 }
2129}
2130
2131/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2132int
2133soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2134{
2135 struct mbuf *m, *m_prev;
2136 int sopt_size = sopt->sopt_valsize;
2137
2138 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
2139 if (m == NULL)
2140 return ENOBUFS;
2141 if (sopt_size > MLEN) {
2142 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
2143 if ((m->m_flags & M_EXT) == 0) {
2144 m_free(m);
2145 return ENOBUFS;
2146 }
2147 m->m_len = min(MCLBYTES, sopt_size);
2148 } else {
2149 m->m_len = min(MLEN, sopt_size);
2150 }
2151 sopt_size -= m->m_len;
2152 *mp = m;
2153 m_prev = m;
2154
2155 while (sopt_size) {
2156 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
2157 if (m == NULL) {
2158 m_freem(*mp);
2159 return ENOBUFS;
2160 }
2161 if (sopt_size > MLEN) {
2162 MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT :
2163 M_DONTWAIT);
2164 if ((m->m_flags & M_EXT) == 0) {
2165 m_freem(m);
2166 m_freem(*mp);
2167 return ENOBUFS;
2168 }
2169 m->m_len = min(MCLBYTES, sopt_size);
2170 } else {
2171 m->m_len = min(MLEN, sopt_size);
2172 }
2173 sopt_size -= m->m_len;
2174 m_prev->m_next = m;
2175 m_prev = m;
2176 }
2177 return (0);
2178}
2179
2180/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2181int
2182soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2183{
2184 struct mbuf *m0 = m;
2185
2186 if (sopt->sopt_val == NULL)
2187 return (0);
2188 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2189 if (sopt->sopt_td != NULL) {
2190 int error;
2191
2192 error = copyin(sopt->sopt_val, mtod(m, char *),
2193 m->m_len);
2194 if (error != 0) {
2195 m_freem(m0);
2196 return(error);
2197 }
2198 } else
2199 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2200 sopt->sopt_valsize -= m->m_len;
2201 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2202 m = m->m_next;
2203 }
2204 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2205 panic("ip6_sooptmcopyin");
2206 return (0);
2207}
2208
2209/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2210int
2211soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2212{
2213 struct mbuf *m0 = m;
2214 size_t valsize = 0;
2215
2216 if (sopt->sopt_val == NULL)
2217 return (0);
2218 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2219 if (sopt->sopt_td != NULL) {
2220 int error;
2221
2222 error = copyout(mtod(m, char *), sopt->sopt_val,
2223 m->m_len);
2224 if (error != 0) {
2225 m_freem(m0);
2226 return(error);
2227 }
2228 } else
2229 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2230 sopt->sopt_valsize -= m->m_len;
2231 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2232 valsize += m->m_len;
2233 m = m->m_next;
2234 }
2235 if (m != NULL) {
2236 /* enough soopt buffer should be given from user-land */
2237 m_freem(m0);
2238 return(EINVAL);
2239 }
2240 sopt->sopt_valsize = valsize;
2241 return (0);
2242}
2243
2244void
2245sohasoutofband(so)
2246 struct socket *so;
2247{
2248 if (so->so_sigio != NULL)
2249 pgsigio(&so->so_sigio, SIGURG, 0);
2250 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2251}
2252
2253int
2254sopoll(struct socket *so, int events, struct ucred *active_cred,
2255 struct thread *td)
2256{
2257 int revents = 0;
2258
2259 SOCKBUF_LOCK(&so->so_snd);
2260 SOCKBUF_LOCK(&so->so_rcv);
2261 if (events & (POLLIN | POLLRDNORM))
2262 if (soreadable(so))
2263 revents |= events & (POLLIN | POLLRDNORM);
2264
2265 if (events & POLLINIGNEOF)
2266 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2267 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2268 revents |= POLLINIGNEOF;
2269
2270 if (events & (POLLOUT | POLLWRNORM))
2271 if (sowriteable(so))
2272 revents |= events & (POLLOUT | POLLWRNORM);
2273
2274 if (events & (POLLPRI | POLLRDBAND))
2275 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2276 revents |= events & (POLLPRI | POLLRDBAND);
2277
2278 if (revents == 0) {
2279 if (events &
2280 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2281 POLLRDBAND)) {
2282 selrecord(td, &so->so_rcv.sb_sel);
2283 so->so_rcv.sb_flags |= SB_SEL;
2284 }
2285
2286 if (events & (POLLOUT | POLLWRNORM)) {
2287 selrecord(td, &so->so_snd.sb_sel);
2288 so->so_snd.sb_flags |= SB_SEL;
2289 }
2290 }
2291
2292 SOCKBUF_UNLOCK(&so->so_rcv);
2293 SOCKBUF_UNLOCK(&so->so_snd);
2294 return (revents);
2295}
2296
2297int
2298soo_kqfilter(struct file *fp, struct knote *kn)
2299{
2300 struct socket *so = kn->kn_fp->f_data;
2301 struct sockbuf *sb;
2302
2303 switch (kn->kn_filter) {
2304 case EVFILT_READ:
2305 if (so->so_options & SO_ACCEPTCONN)
2306 kn->kn_fop = &solisten_filtops;
2307 else
2308 kn->kn_fop = &soread_filtops;
2309 sb = &so->so_rcv;
2310 break;
2311 case EVFILT_WRITE:
2312 kn->kn_fop = &sowrite_filtops;
2313 sb = &so->so_snd;
2314 break;
2315 default:
2316 return (EINVAL);
2317 }
2318
2319 SOCKBUF_LOCK(sb);
2320 knlist_add(&sb->sb_sel.si_note, kn, 1);
2321 sb->sb_flags |= SB_KNOTE;
2322 SOCKBUF_UNLOCK(sb);
2323 return (0);
2324}
2325
2326static void
2327filt_sordetach(struct knote *kn)
2328{
2329 struct socket *so = kn->kn_fp->f_data;
2330
2331 SOCKBUF_LOCK(&so->so_rcv);
2332 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2333 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2334 so->so_rcv.sb_flags &= ~SB_KNOTE;
2335 SOCKBUF_UNLOCK(&so->so_rcv);
2336}
2337
2338/*ARGSUSED*/
2339static int
2340filt_soread(struct knote *kn, long hint)
2341{
2342 struct socket *so;
2343
2344 so = kn->kn_fp->f_data;
2345 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2346
2347 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2348 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2349 kn->kn_flags |= EV_EOF;
2350 kn->kn_fflags = so->so_error;
2351 return (1);
2352 } else if (so->so_error) /* temporary udp error */
2353 return (1);
2354 else if (kn->kn_sfflags & NOTE_LOWAT)
2355 return (kn->kn_data >= kn->kn_sdata);
2356 else
2357 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2358}
2359
2360static void
2361filt_sowdetach(struct knote *kn)
2362{
2363 struct socket *so = kn->kn_fp->f_data;
2364
2365 SOCKBUF_LOCK(&so->so_snd);
2366 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2367 if (knlist_empty(&so->so_snd.sb_sel.si_note))
2368 so->so_snd.sb_flags &= ~SB_KNOTE;
2369 SOCKBUF_UNLOCK(&so->so_snd);
2370}
2371
2372/*ARGSUSED*/
2373static int
2374filt_sowrite(struct knote *kn, long hint)
2375{
2376 struct socket *so;
2377
2378 so = kn->kn_fp->f_data;
2379 SOCKBUF_LOCK_ASSERT(&so->so_snd);
2380 kn->kn_data = sbspace(&so->so_snd);
2381 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2382 kn->kn_flags |= EV_EOF;
2383 kn->kn_fflags = so->so_error;
2384 return (1);
2385 } else if (so->so_error) /* temporary udp error */
2386 return (1);
2387 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2388 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2389 return (0);
2390 else if (kn->kn_sfflags & NOTE_LOWAT)
2391 return (kn->kn_data >= kn->kn_sdata);
2392 else
2393 return (kn->kn_data >= so->so_snd.sb_lowat);
2394}
2395
2396/*ARGSUSED*/
2397static int
2398filt_solisten(struct knote *kn, long hint)
2399{
2400 struct socket *so = kn->kn_fp->f_data;
2401
2402 kn->kn_data = so->so_qlen;
2403 return (! TAILQ_EMPTY(&so->so_comp));
2404}
2405
2406int
2407socheckuid(struct socket *so, uid_t uid)
2408{
2409
2410 if (so == NULL)
2411 return (EPERM);
2412 if (so->so_cred->cr_uid != uid)
2413 return (EPERM);
2414 return (0);
2415}
2416
2417static int
2418somaxconn_sysctl(SYSCTL_HANDLER_ARGS)
2419{
2420 int error;
2421 int val;
2422
2423 val = somaxconn;
2424 error = sysctl_handle_int(oidp, &val, sizeof(int), req);
2425 if (error || !req->newptr )
2426 return (error);
2427
2428 if (val < 1 || val > USHRT_MAX)
2429 return (EINVAL);
2430
2431 somaxconn = val;
2432 return (0);
2433}