1/*
2 * Copyright (c) 1998-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1988, 1990, 1993
31 *	The Regents of the University of California.  All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 *    must display the following acknowledgement:
43 *	This product includes software developed by the University of
44 *	California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 *    may be used to endorse or promote products derived from this software
47 *    without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections.  This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/domain.h>
73#include <sys/kernel.h>
74#include <sys/proc_internal.h>
75#include <sys/kauth.h>
76#include <sys/malloc.h>
77#include <sys/mbuf.h>
78#include <sys/mcache.h>
79#include <sys/protosw.h>
80#include <sys/stat.h>
81#include <sys/socket.h>
82#include <sys/socketvar.h>
83#include <sys/signalvar.h>
84#include <sys/sysctl.h>
85#include <sys/syslog.h>
86#include <sys/ev.h>
87#include <kern/locks.h>
88#include <net/route.h>
89#include <netinet/in.h>
90#include <netinet/in_pcb.h>
91#include <sys/kdebug.h>
92#include <libkern/OSAtomic.h>
93
94#if CONFIG_MACF
95#include <security/mac_framework.h>
96#endif
97
98#include <mach/vm_param.h>
99
100/* TODO: this should be in a header file somewhere */
101extern void postevent(struct socket *, struct sockbuf *, int);
102
103#define	DBG_FNC_SBDROP		NETDBG_CODE(DBG_NETSOCK, 4)
104#define	DBG_FNC_SBAPPEND	NETDBG_CODE(DBG_NETSOCK, 5)
105
106static inline void sbcompress(struct sockbuf *, struct mbuf *, struct mbuf *);
107static struct socket *sonewconn_internal(struct socket *, int);
108static int sbappendaddr_internal(struct sockbuf *, struct sockaddr *,
109    struct mbuf *, struct mbuf *);
110static int sbappendcontrol_internal(struct sockbuf *, struct mbuf *,
111    struct mbuf *);
112static void soevent_ifdenied(struct socket *);
113
114/*
115 * Primitive routines for operating on sockets and socket buffers
116 */
117static int soqlimitcompat = 1;
118static int soqlencomp = 0;
119
120/*
121 * Based on the number of mbuf clusters configured, high_sb_max and sb_max can
122 * get scaled up or down to suit that memory configuration. high_sb_max is a
123 * higher limit on sb_max that is checked when sb_max gets set through sysctl.
124 */
125
126u_int32_t	sb_max = SB_MAX;		/* XXX should be static */
127u_int32_t	high_sb_max = SB_MAX;
128
129static	u_int32_t sb_efficiency = 8;	/* parameter for sbreserve() */
130__private_extern__ int32_t total_sbmb_cnt = 0;
131
132/* Control whether to throttle sockets eligible to be throttled */
133__private_extern__ u_int32_t net_io_policy_throttled = 0;
134static int sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS;
135
136u_int32_t net_io_policy_log = 0;	/* log socket policy changes */
137#if CONFIG_PROC_UUID_POLICY
138u_int32_t net_io_policy_uuid = 1;	/* enable UUID socket policy */
139#endif /* CONFIG_PROC_UUID_POLICY */
140
141/*
142 * Procedures to manipulate state flags of socket
143 * and do appropriate wakeups.  Normal sequence from the
144 * active (originating) side is that soisconnecting() is
145 * called during processing of connect() call,
146 * resulting in an eventual call to soisconnected() if/when the
147 * connection is established.  When the connection is torn down
148 * soisdisconnecting() is called during processing of disconnect() call,
149 * and soisdisconnected() is called when the connection to the peer
150 * is totally severed.  The semantics of these routines are such that
151 * connectionless protocols can call soisconnected() and soisdisconnected()
152 * only, bypassing the in-progress calls when setting up a ``connection''
153 * takes no time.
154 *
155 * From the passive side, a socket is created with
156 * two queues of sockets: so_incomp for connections in progress
157 * and so_comp for connections already made and awaiting user acceptance.
158 * As a protocol is preparing incoming connections, it creates a socket
159 * structure queued on so_incomp by calling sonewconn().  When the connection
160 * is established, soisconnected() is called, and transfers the
161 * socket structure to so_comp, making it available to accept().
162 *
163 * If a socket is closed with sockets on either
164 * so_incomp or so_comp, these sockets are dropped.
165 *
166 * If higher level protocols are implemented in
167 * the kernel, the wakeups done here will sometimes
168 * cause software-interrupt process scheduling.
169 */
170void
171soisconnecting(struct socket *so)
172{
173
174	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
175	so->so_state |= SS_ISCONNECTING;
176
177	sflt_notify(so, sock_evt_connecting, NULL);
178}
179
180void
181soisconnected(struct socket *so)
182{
183	struct socket *head = so->so_head;
184
185	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
186	so->so_state |= SS_ISCONNECTED;
187
188	sflt_notify(so, sock_evt_connected, NULL);
189
190	if (head && (so->so_state & SS_INCOMP)) {
191		so->so_state &= ~SS_INCOMP;
192		so->so_state |= SS_COMP;
193		if (head->so_proto->pr_getlock != NULL) {
194			socket_unlock(so, 0);
195			socket_lock(head, 1);
196		}
197		postevent(head, 0, EV_RCONN);
198		TAILQ_REMOVE(&head->so_incomp, so, so_list);
199		head->so_incqlen--;
200		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
201		sorwakeup(head);
202		wakeup_one((caddr_t)&head->so_timeo);
203		if (head->so_proto->pr_getlock != NULL) {
204			socket_unlock(head, 1);
205			socket_lock(so, 0);
206		}
207	} else {
208		postevent(so, 0, EV_WCONN);
209		wakeup((caddr_t)&so->so_timeo);
210		sorwakeup(so);
211		sowwakeup(so);
212		soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CONNECTED |
213		    SO_FILT_HINT_CONNINFO_UPDATED);
214	}
215}
216
217void
218soisdisconnecting(struct socket *so)
219{
220	so->so_state &= ~SS_ISCONNECTING;
221	so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
222	soevent(so, SO_FILT_HINT_LOCKED);
223	sflt_notify(so, sock_evt_disconnecting, NULL);
224	wakeup((caddr_t)&so->so_timeo);
225	sowwakeup(so);
226	sorwakeup(so);
227}
228
229void
230soisdisconnected(struct socket *so)
231{
232	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
233	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
234	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED |
235	    SO_FILT_HINT_CONNINFO_UPDATED);
236	sflt_notify(so, sock_evt_disconnected, NULL);
237	wakeup((caddr_t)&so->so_timeo);
238	sowwakeup(so);
239	sorwakeup(so);
240}
241
242/*
243 * This function will issue a wakeup like soisdisconnected but it will not
244 * notify the socket filters. This will avoid unlocking the socket
245 * in the midst of closing it.
246 */
247void
248sodisconnectwakeup(struct socket *so)
249{
250	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
251	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
252	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED |
253	    SO_FILT_HINT_CONNINFO_UPDATED);
254	wakeup((caddr_t)&so->so_timeo);
255	sowwakeup(so);
256	sorwakeup(so);
257}
258
259/*
260 * When an attempt at a new connection is noted on a socket
261 * which accepts connections, sonewconn is called.  If the
262 * connection is possible (subject to space constraints, etc.)
263 * then we allocate a new structure, propoerly linked into the
264 * data structure of the original socket, and return this.
265 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
266 */
267static struct socket *
268sonewconn_internal(struct socket *head, int connstatus)
269{
270	int so_qlen, error = 0;
271	struct socket *so;
272	lck_mtx_t *mutex_held;
273
274	if (head->so_proto->pr_getlock != NULL)
275		mutex_held = (*head->so_proto->pr_getlock)(head, 0);
276	else
277		mutex_held = head->so_proto->pr_domain->dom_mtx;
278	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
279
280	if (!soqlencomp) {
281		/*
282		 * This is the default case; so_qlen represents the
283		 * sum of both incomplete and completed queues.
284		 */
285		so_qlen = head->so_qlen;
286	} else {
287		/*
288		 * When kern.ipc.soqlencomp is set to 1, so_qlen
289		 * represents only the completed queue.  Since we
290		 * cannot let the incomplete queue goes unbounded
291		 * (in case of SYN flood), we cap the incomplete
292		 * queue length to at most somaxconn, and use that
293		 * as so_qlen so that we fail immediately below.
294		 */
295		so_qlen = head->so_qlen - head->so_incqlen;
296		if (head->so_incqlen > somaxconn)
297			so_qlen = somaxconn;
298	}
299
300	if (so_qlen >=
301	    (soqlimitcompat ? head->so_qlimit : (3 * head->so_qlimit / 2)))
302		return ((struct socket *)0);
303	so = soalloc(1, SOCK_DOM(head), head->so_type);
304	if (so == NULL)
305		return ((struct socket *)0);
306	/* check if head was closed during the soalloc */
307	if (head->so_proto == NULL) {
308		sodealloc(so);
309		return ((struct socket *)0);
310	}
311
312	so->so_type = head->so_type;
313	so->so_options = head->so_options &~ SO_ACCEPTCONN;
314	so->so_linger = head->so_linger;
315	so->so_state = head->so_state | SS_NOFDREF;
316	so->so_proto = head->so_proto;
317	so->so_timeo = head->so_timeo;
318	so->so_pgid  = head->so_pgid;
319	kauth_cred_ref(head->so_cred);
320	so->so_cred = head->so_cred;
321	so->last_pid = head->last_pid;
322	so->last_upid = head->last_upid;
323	memcpy(so->last_uuid, head->last_uuid, sizeof (so->last_uuid));
324	if (head->so_flags & SOF_DELEGATED) {
325		so->e_pid = head->e_pid;
326		so->e_upid = head->e_upid;
327		memcpy(so->e_uuid, head->e_uuid, sizeof (so->e_uuid));
328	}
329	/* inherit socket options stored in so_flags */
330	so->so_flags = head->so_flags &
331	    (SOF_NOSIGPIPE | SOF_NOADDRAVAIL | SOF_REUSESHAREUID |
332	    SOF_NOTIFYCONFLICT | SOF_BINDRANDOMPORT | SOF_NPX_SETOPTSHUT |
333	    SOF_NODEFUNCT | SOF_PRIVILEGED_TRAFFIC_CLASS| SOF_NOTSENT_LOWAT |
334	    SOF_USELRO | SOF_DELEGATED);
335	so->so_usecount = 1;
336	so->next_lock_lr = 0;
337	so->next_unlock_lr = 0;
338
339	so->so_rcv.sb_flags |= SB_RECV;	/* XXX */
340	so->so_rcv.sb_so = so->so_snd.sb_so = so;
341	TAILQ_INIT(&so->so_evlist);
342
343#if CONFIG_MACF_SOCKET
344	mac_socket_label_associate_accept(head, so);
345#endif
346
347	/* inherit traffic management properties of listener */
348	so->so_traffic_mgt_flags =
349	    head->so_traffic_mgt_flags & (TRAFFIC_MGT_SO_BACKGROUND);
350	so->so_background_thread = head->so_background_thread;
351	so->so_traffic_class = head->so_traffic_class;
352
353	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
354		sodealloc(so);
355		return ((struct socket *)0);
356	}
357	so->so_rcv.sb_flags |= (head->so_rcv.sb_flags & SB_USRSIZE);
358	so->so_snd.sb_flags |= (head->so_snd.sb_flags & SB_USRSIZE);
359
360	/*
361	 * Must be done with head unlocked to avoid deadlock
362	 * for protocol with per socket mutexes.
363	 */
364	if (head->so_proto->pr_unlock)
365		socket_unlock(head, 0);
366	if (((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL) != 0) ||
367	    error) {
368		sodealloc(so);
369		if (head->so_proto->pr_unlock)
370			socket_lock(head, 0);
371		return ((struct socket *)0);
372	}
373	if (head->so_proto->pr_unlock) {
374		socket_lock(head, 0);
375		/*
376		 * Radar 7385998 Recheck that the head is still accepting
377		 * to avoid race condition when head is getting closed.
378		 */
379		if ((head->so_options & SO_ACCEPTCONN) == 0) {
380			so->so_state &= ~SS_NOFDREF;
381			soclose(so);
382			return ((struct socket *)0);
383		}
384	}
385
386	atomic_add_32(&so->so_proto->pr_domain->dom_refs, 1);
387
388	/* Insert in head appropriate lists */
389	so->so_head = head;
390
391	/*
392	 * Since this socket is going to be inserted into the incomp
393	 * queue, it can be picked up by another thread in
394	 * tcp_dropdropablreq to get dropped before it is setup..
395	 * To prevent this race, set in-progress flag which can be
396	 * cleared later
397	 */
398	so->so_flags |= SOF_INCOMP_INPROGRESS;
399
400	if (connstatus) {
401		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
402		so->so_state |= SS_COMP;
403	} else {
404		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
405		so->so_state |= SS_INCOMP;
406		head->so_incqlen++;
407	}
408	head->so_qlen++;
409
410	/* Attach socket filters for this protocol */
411	sflt_initsock(so);
412
413	if (connstatus) {
414		so->so_state |= connstatus;
415		sorwakeup(head);
416		wakeup((caddr_t)&head->so_timeo);
417	}
418	return (so);
419}
420
421
422struct socket *
423sonewconn(struct socket *head, int connstatus, const struct sockaddr *from)
424{
425	int error = sflt_connectin(head, from);
426	if (error) {
427		return (NULL);
428	}
429
430	return (sonewconn_internal(head, connstatus));
431}
432
433/*
434 * Socantsendmore indicates that no more data will be sent on the
435 * socket; it would normally be applied to a socket when the user
436 * informs the system that no more data is to be sent, by the protocol
437 * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
438 * will be received, and will normally be applied to the socket by a
439 * protocol when it detects that the peer will send no more data.
440 * Data queued for reading in the socket may yet be read.
441 */
442
443void
444socantsendmore(struct socket *so)
445{
446	so->so_state |= SS_CANTSENDMORE;
447	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CANTSENDMORE);
448	sflt_notify(so, sock_evt_cantsendmore, NULL);
449	sowwakeup(so);
450}
451
452void
453socantrcvmore(struct socket *so)
454{
455	so->so_state |= SS_CANTRCVMORE;
456	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CANTRCVMORE);
457	sflt_notify(so, sock_evt_cantrecvmore, NULL);
458	sorwakeup(so);
459}
460
461/*
462 * Wait for data to arrive at/drain from a socket buffer.
463 */
464int
465sbwait(struct sockbuf *sb)
466{
467	boolean_t nointr = (sb->sb_flags & SB_NOINTR);
468	void *lr_saved = __builtin_return_address(0);
469	struct socket *so = sb->sb_so;
470	lck_mtx_t *mutex_held;
471	struct timespec ts;
472	int error = 0;
473
474	if (so == NULL) {
475		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
476		    __func__, sb, sb->sb_flags, lr_saved);
477		/* NOTREACHED */
478	} else if (so->so_usecount < 1) {
479		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
480		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
481		    so->so_usecount, lr_saved, solockhistory_nr(so));
482		/* NOTREACHED */
483	}
484
485	if (so->so_proto->pr_getlock != NULL)
486		mutex_held = (*so->so_proto->pr_getlock)(so, 0);
487	else
488		mutex_held = so->so_proto->pr_domain->dom_mtx;
489
490	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
491
492	ts.tv_sec = sb->sb_timeo.tv_sec;
493	ts.tv_nsec = sb->sb_timeo.tv_usec * 1000;
494
495	sb->sb_waiters++;
496	VERIFY(sb->sb_waiters != 0);
497
498	error = msleep((caddr_t)&sb->sb_cc, mutex_held,
499	    nointr ? PSOCK : PSOCK | PCATCH,
500	    nointr ? "sbwait_nointr" : "sbwait", &ts);
501
502	VERIFY(sb->sb_waiters != 0);
503	sb->sb_waiters--;
504
505	if (so->so_usecount < 1) {
506		panic("%s: 2 sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
507		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
508		    so->so_usecount, lr_saved, solockhistory_nr(so));
509		/* NOTREACHED */
510	}
511
512	if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
513		error = EBADF;
514		if (so->so_flags & SOF_DEFUNCT) {
515			SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
516			    "(%d)\n", __func__, proc_selfpid(),
517			    (uint64_t)VM_KERNEL_ADDRPERM(so),
518			    SOCK_DOM(so), SOCK_TYPE(so), error));
519		}
520	}
521
522	return (error);
523}
524
525void
526sbwakeup(struct sockbuf *sb)
527{
528	if (sb->sb_waiters > 0)
529		wakeup((caddr_t)&sb->sb_cc);
530}
531
532/*
533 * Wakeup processes waiting on a socket buffer.
534 * Do asynchronous notification via SIGIO
535 * if the socket has the SS_ASYNC flag set.
536 */
537void
538sowakeup(struct socket *so, struct sockbuf *sb)
539{
540	if (so->so_flags & SOF_DEFUNCT) {
541		SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] si 0x%x, "
542		    "fl 0x%x [%s]\n", __func__, proc_selfpid(),
543		    (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
544		    SOCK_TYPE(so), (uint32_t)sb->sb_sel.si_flags, sb->sb_flags,
545		    (sb->sb_flags & SB_RECV) ? "rcv" : "snd"));
546	}
547
548	sb->sb_flags &= ~SB_SEL;
549	selwakeup(&sb->sb_sel);
550	sbwakeup(sb);
551	if (so->so_state & SS_ASYNC) {
552		if (so->so_pgid < 0)
553			gsignal(-so->so_pgid, SIGIO);
554		else if (so->so_pgid > 0)
555			proc_signal(so->so_pgid, SIGIO);
556	}
557	if (sb->sb_flags & SB_KNOTE) {
558		KNOTE(&sb->sb_sel.si_note, SO_FILT_HINT_LOCKED);
559	}
560	if (sb->sb_flags & SB_UPCALL) {
561		void (*sb_upcall)(struct socket *, void *, int);
562		caddr_t sb_upcallarg;
563
564		sb_upcall = sb->sb_upcall;
565		sb_upcallarg = sb->sb_upcallarg;
566		/* Let close know that we're about to do an upcall */
567		so->so_upcallusecount++;
568
569		socket_unlock(so, 0);
570		(*sb_upcall)(so, sb_upcallarg, M_DONTWAIT);
571		socket_lock(so, 0);
572
573		so->so_upcallusecount--;
574		/* Tell close that it's safe to proceed */
575		if ((so->so_flags & SOF_CLOSEWAIT) &&
576		    so->so_upcallusecount == 0)
577			wakeup((caddr_t)&so->so_upcallusecount);
578	}
579}
580
581/*
582 * Socket buffer (struct sockbuf) utility routines.
583 *
584 * Each socket contains two socket buffers: one for sending data and
585 * one for receiving data.  Each buffer contains a queue of mbufs,
586 * information about the number of mbufs and amount of data in the
587 * queue, and other fields allowing select() statements and notification
588 * on data availability to be implemented.
589 *
590 * Data stored in a socket buffer is maintained as a list of records.
591 * Each record is a list of mbufs chained together with the m_next
592 * field.  Records are chained together with the m_nextpkt field. The upper
593 * level routine soreceive() expects the following conventions to be
594 * observed when placing information in the receive buffer:
595 *
596 * 1. If the protocol requires each message be preceded by the sender's
597 *    name, then a record containing that name must be present before
598 *    any associated data (mbuf's must be of type MT_SONAME).
599 * 2. If the protocol supports the exchange of ``access rights'' (really
600 *    just additional data associated with the message), and there are
601 *    ``rights'' to be received, then a record containing this data
602 *    should be present (mbuf's must be of type MT_RIGHTS).
603 * 3. If a name or rights record exists, then it must be followed by
604 *    a data record, perhaps of zero length.
605 *
606 * Before using a new socket structure it is first necessary to reserve
607 * buffer space to the socket, by calling sbreserve().  This should commit
608 * some of the available buffer space in the system buffer pool for the
609 * socket (currently, it does nothing but enforce limits).  The space
610 * should be released by calling sbrelease() when the socket is destroyed.
611 */
612
613/*
614 * Returns:	0			Success
615 *		ENOBUFS
616 */
617int
618soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc)
619{
620
621	if (sbreserve(&so->so_snd, sndcc) == 0)
622		goto bad;
623	else
624		so->so_snd.sb_idealsize = sndcc;
625
626	if (sbreserve(&so->so_rcv, rcvcc) == 0)
627		goto bad2;
628	else
629		so->so_rcv.sb_idealsize = rcvcc;
630
631	if (so->so_rcv.sb_lowat == 0)
632		so->so_rcv.sb_lowat = 1;
633	if (so->so_snd.sb_lowat == 0)
634		so->so_snd.sb_lowat = MCLBYTES;
635	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
636		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
637	return (0);
638bad2:
639	so->so_snd.sb_flags &= ~SB_SEL;
640	selthreadclear(&so->so_snd.sb_sel);
641	sbrelease(&so->so_snd);
642bad:
643	return (ENOBUFS);
644}
645
646/*
647 * Allot mbufs to a sockbuf.
648 * Attempt to scale mbmax so that mbcnt doesn't become limiting
649 * if buffering efficiency is near the normal case.
650 */
651int
652sbreserve(struct sockbuf *sb, u_int32_t cc)
653{
654	if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES))
655		return (0);
656	sb->sb_hiwat = cc;
657	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
658	if (sb->sb_lowat > sb->sb_hiwat)
659		sb->sb_lowat = sb->sb_hiwat;
660	return (1);
661}
662
663/*
664 * Free mbufs held by a socket, and reserved mbuf space.
665 */
666/*  WARNING needs to do selthreadclear() before calling this */
667void
668sbrelease(struct sockbuf *sb)
669{
670	sbflush(sb);
671	sb->sb_hiwat = 0;
672	sb->sb_mbmax = 0;
673}
674
675/*
676 * Routines to add and remove
677 * data from an mbuf queue.
678 *
679 * The routines sbappend() or sbappendrecord() are normally called to
680 * append new mbufs to a socket buffer, after checking that adequate
681 * space is available, comparing the function sbspace() with the amount
682 * of data to be added.  sbappendrecord() differs from sbappend() in
683 * that data supplied is treated as the beginning of a new record.
684 * To place a sender's address, optional access rights, and data in a
685 * socket receive buffer, sbappendaddr() should be used.  To place
686 * access rights and data in a socket receive buffer, sbappendrights()
687 * should be used.  In either case, the new data begins a new record.
688 * Note that unlike sbappend() and sbappendrecord(), these routines check
689 * for the caller that there will be enough space to store the data.
690 * Each fails if there is not enough space, or if it cannot find mbufs
691 * to store additional information in.
692 *
693 * Reliable protocols may use the socket send buffer to hold data
694 * awaiting acknowledgement.  Data is normally copied from a socket
695 * send buffer in a protocol with m_copy for output to a peer,
696 * and then removing the data from the socket buffer with sbdrop()
697 * or sbdroprecord() when the data is acknowledged by the peer.
698 */
699
700/*
701 * Append mbuf chain m to the last record in the
702 * socket buffer sb.  The additional space associated
703 * the mbuf chain is recorded in sb.  Empty mbufs are
704 * discarded and mbufs are compacted where possible.
705 */
706int
707sbappend(struct sockbuf *sb, struct mbuf *m)
708{
709	struct socket *so = sb->sb_so;
710
711	if (m == NULL || (sb->sb_flags & SB_DROP)) {
712		if (m != NULL)
713			m_freem(m);
714		return (0);
715	}
716
717	SBLASTRECORDCHK(sb, "sbappend 1");
718
719	if (sb->sb_lastrecord != NULL && (sb->sb_mbtail->m_flags & M_EOR))
720		return (sbappendrecord(sb, m));
721
722	if (sb->sb_flags & SB_RECV) {
723		int error = sflt_data_in(so, NULL, &m, NULL, 0);
724		SBLASTRECORDCHK(sb, "sbappend 2");
725		if (error != 0) {
726			if (error != EJUSTRETURN)
727				m_freem(m);
728			return (0);
729		}
730	}
731
732	/* If this is the first record, it's also the last record */
733	if (sb->sb_lastrecord == NULL)
734		sb->sb_lastrecord = m;
735
736	sbcompress(sb, m, sb->sb_mbtail);
737	SBLASTRECORDCHK(sb, "sbappend 3");
738	return (1);
739}
740
741/*
742 * Similar to sbappend, except that this is optimized for stream sockets.
743 */
744int
745sbappendstream(struct sockbuf *sb, struct mbuf *m)
746{
747	struct socket *so = sb->sb_so;
748
749	if (m == NULL || (sb->sb_flags & SB_DROP)) {
750		if (m != NULL)
751			m_freem(m);
752		return (0);
753	}
754
755	if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) {
756		panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n",
757		    m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
758		/* NOTREACHED */
759	}
760
761	SBLASTMBUFCHK(sb, __func__);
762
763	if (sb->sb_flags & SB_RECV) {
764		int error = sflt_data_in(so, NULL, &m, NULL, 0);
765		SBLASTRECORDCHK(sb, "sbappendstream 1");
766		if (error != 0) {
767			if (error != EJUSTRETURN)
768				m_freem(m);
769			return (0);
770		}
771	}
772
773	sbcompress(sb, m, sb->sb_mbtail);
774	sb->sb_lastrecord = sb->sb_mb;
775	SBLASTRECORDCHK(sb, "sbappendstream 2");
776	return (1);
777}
778
779#ifdef SOCKBUF_DEBUG
780void
781sbcheck(struct sockbuf *sb)
782{
783	struct mbuf *m;
784	struct mbuf *n = 0;
785	u_int32_t len = 0, mbcnt = 0;
786	lck_mtx_t *mutex_held;
787
788	if (sb->sb_so->so_proto->pr_getlock != NULL)
789		mutex_held = (*sb->sb_so->so_proto->pr_getlock)(sb->sb_so, 0);
790	else
791		mutex_held = sb->sb_so->so_proto->pr_domain->dom_mtx;
792
793	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
794
795	if (sbchecking == 0)
796		return;
797
798	for (m = sb->sb_mb; m; m = n) {
799		n = m->m_nextpkt;
800		for (; m; m = m->m_next) {
801			len += m->m_len;
802			mbcnt += MSIZE;
803			/* XXX pretty sure this is bogus */
804			if (m->m_flags & M_EXT)
805				mbcnt += m->m_ext.ext_size;
806		}
807	}
808	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
809		panic("cc %ld != %ld || mbcnt %ld != %ld\n", len, sb->sb_cc,
810		    mbcnt, sb->sb_mbcnt);
811	}
812}
813#endif
814
815void
816sblastrecordchk(struct sockbuf *sb, const char *where)
817{
818	struct mbuf *m = sb->sb_mb;
819
820	while (m && m->m_nextpkt)
821		m = m->m_nextpkt;
822
823	if (m != sb->sb_lastrecord) {
824		printf("sblastrecordchk: mb %p lastrecord %p last %p\n",
825		    sb->sb_mb, sb->sb_lastrecord, m);
826		printf("packet chain:\n");
827		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
828			printf("\t%p\n", m);
829		panic("sblastrecordchk from %s", where);
830	}
831}
832
833void
834sblastmbufchk(struct sockbuf *sb, const char *where)
835{
836	struct mbuf *m = sb->sb_mb;
837	struct mbuf *n;
838
839	while (m && m->m_nextpkt)
840		m = m->m_nextpkt;
841
842	while (m && m->m_next)
843		m = m->m_next;
844
845	if (m != sb->sb_mbtail) {
846		printf("sblastmbufchk: mb %p mbtail %p last %p\n",
847		    sb->sb_mb, sb->sb_mbtail, m);
848		printf("packet tree:\n");
849		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
850			printf("\t");
851			for (n = m; n != NULL; n = n->m_next)
852				printf("%p ", n);
853			printf("\n");
854		}
855		panic("sblastmbufchk from %s", where);
856	}
857}
858
859/*
860 * Similar to sbappend, except the mbuf chain begins a new record.
861 */
862int
863sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
864{
865	struct mbuf *m;
866	int space = 0;
867
868	if (m0 == NULL || (sb->sb_flags & SB_DROP)) {
869		if (m0 != NULL)
870			m_freem(m0);
871		return (0);
872	}
873
874	for (m = m0; m != NULL; m = m->m_next)
875		space += m->m_len;
876
877	if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) {
878		m_freem(m0);
879		return (0);
880	}
881
882	if (sb->sb_flags & SB_RECV) {
883		int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
884		    sock_data_filt_flag_record);
885		if (error != 0) {
886			SBLASTRECORDCHK(sb, "sbappendrecord 1");
887			if (error != EJUSTRETURN)
888				m_freem(m0);
889			return (0);
890		}
891	}
892
893	/*
894	 * Note this permits zero length records.
895	 */
896	sballoc(sb, m0);
897	SBLASTRECORDCHK(sb, "sbappendrecord 2");
898	if (sb->sb_lastrecord != NULL) {
899		sb->sb_lastrecord->m_nextpkt = m0;
900	} else {
901		sb->sb_mb = m0;
902	}
903	sb->sb_lastrecord = m0;
904	sb->sb_mbtail = m0;
905
906	m = m0->m_next;
907	m0->m_next = 0;
908	if (m && (m0->m_flags & M_EOR)) {
909		m0->m_flags &= ~M_EOR;
910		m->m_flags |= M_EOR;
911	}
912	sbcompress(sb, m, m0);
913	SBLASTRECORDCHK(sb, "sbappendrecord 3");
914	return (1);
915}
916
917/*
918 * As above except that OOB data
919 * is inserted at the beginning of the sockbuf,
920 * but after any other OOB data.
921 */
922int
923sbinsertoob(struct sockbuf *sb, struct mbuf *m0)
924{
925	struct mbuf *m;
926	struct mbuf **mp;
927
928	if (m0 == 0)
929		return (0);
930
931	SBLASTRECORDCHK(sb, "sbinsertoob 1");
932
933	if ((sb->sb_flags & SB_RECV) != 0) {
934		int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
935		    sock_data_filt_flag_oob);
936
937		SBLASTRECORDCHK(sb, "sbinsertoob 2");
938		if (error) {
939			if (error != EJUSTRETURN) {
940				m_freem(m0);
941			}
942			return (0);
943		}
944	}
945
946	for (mp = &sb->sb_mb; *mp; mp = &((*mp)->m_nextpkt)) {
947		m = *mp;
948again:
949		switch (m->m_type) {
950
951		case MT_OOBDATA:
952			continue;		/* WANT next train */
953
954		case MT_CONTROL:
955			m = m->m_next;
956			if (m)
957				goto again;	/* inspect THIS train further */
958		}
959		break;
960	}
961	/*
962	 * Put the first mbuf on the queue.
963	 * Note this permits zero length records.
964	 */
965	sballoc(sb, m0);
966	m0->m_nextpkt = *mp;
967	if (*mp == NULL) {
968		/* m0 is actually the new tail */
969		sb->sb_lastrecord = m0;
970	}
971	*mp = m0;
972	m = m0->m_next;
973	m0->m_next = 0;
974	if (m && (m0->m_flags & M_EOR)) {
975		m0->m_flags &= ~M_EOR;
976		m->m_flags |= M_EOR;
977	}
978	sbcompress(sb, m, m0);
979	SBLASTRECORDCHK(sb, "sbinsertoob 3");
980	return (1);
981}
982
983/*
984 * Append address and data, and optionally, control (ancillary) data
985 * to the receive queue of a socket.  If present,
986 * m0 must include a packet header with total length.
987 * Returns 0 if no space in sockbuf or insufficient mbufs.
988 *
989 * Returns:	0			No space/out of mbufs
990 *		1			Success
991 */
992static int
993sbappendaddr_internal(struct sockbuf *sb, struct sockaddr *asa,
994    struct mbuf *m0, struct mbuf *control)
995{
996	struct mbuf *m, *n, *nlast;
997	int space = asa->sa_len;
998
999	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
1000		panic("sbappendaddr");
1001
1002	if (m0)
1003		space += m0->m_pkthdr.len;
1004	for (n = control; n; n = n->m_next) {
1005		space += n->m_len;
1006		if (n->m_next == 0)	/* keep pointer to last control buf */
1007			break;
1008	}
1009	if (space > sbspace(sb))
1010		return (0);
1011	if (asa->sa_len > MLEN)
1012		return (0);
1013	MGET(m, M_DONTWAIT, MT_SONAME);
1014	if (m == 0)
1015		return (0);
1016	m->m_len = asa->sa_len;
1017	bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len);
1018	if (n)
1019		n->m_next = m0;		/* concatenate data to control */
1020	else
1021		control = m0;
1022	m->m_next = control;
1023
1024	SBLASTRECORDCHK(sb, "sbappendadddr 1");
1025
1026	for (n = m; n->m_next != NULL; n = n->m_next)
1027		sballoc(sb, n);
1028	sballoc(sb, n);
1029	nlast = n;
1030
1031	if (sb->sb_lastrecord != NULL) {
1032		sb->sb_lastrecord->m_nextpkt = m;
1033	} else {
1034		sb->sb_mb = m;
1035	}
1036	sb->sb_lastrecord = m;
1037	sb->sb_mbtail = nlast;
1038
1039	SBLASTMBUFCHK(sb, __func__);
1040	SBLASTRECORDCHK(sb, "sbappendadddr 2");
1041
1042	postevent(0, sb, EV_RWBYTES);
1043	return (1);
1044}
1045
1046/*
1047 * Returns:	0			Error: No space/out of mbufs/etc.
1048 *		1			Success
1049 *
1050 * Imputed:	(*error_out)		errno for error
1051 *		ENOBUFS
1052 *	sflt_data_in:???		[whatever a filter author chooses]
1053 */
1054int
1055sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0,
1056    struct mbuf *control, int *error_out)
1057{
1058	int result = 0;
1059	boolean_t sb_unix = (sb->sb_flags & SB_UNIX);
1060
1061	if (error_out)
1062		*error_out = 0;
1063
1064	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
1065		panic("sbappendaddrorfree");
1066
1067	if (sb->sb_flags & SB_DROP) {
1068		if (m0 != NULL)
1069			m_freem(m0);
1070		if (control != NULL && !sb_unix)
1071			m_freem(control);
1072		if (error_out != NULL)
1073			*error_out = EINVAL;
1074		return (0);
1075	}
1076
1077	/* Call socket data in filters */
1078	if ((sb->sb_flags & SB_RECV) != 0) {
1079		int error;
1080		error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0);
1081		SBLASTRECORDCHK(sb, __func__);
1082		if (error) {
1083			if (error != EJUSTRETURN) {
1084				if (m0)
1085					m_freem(m0);
1086				if (control != NULL && !sb_unix)
1087					m_freem(control);
1088				if (error_out)
1089					*error_out = error;
1090			}
1091			return (0);
1092		}
1093	}
1094
1095	result = sbappendaddr_internal(sb, asa, m0, control);
1096	if (result == 0) {
1097		if (m0)
1098			m_freem(m0);
1099		if (control != NULL && !sb_unix)
1100			m_freem(control);
1101		if (error_out)
1102			*error_out = ENOBUFS;
1103	}
1104
1105	return (result);
1106}
1107
1108static int
1109sbappendcontrol_internal(struct sockbuf *sb, struct mbuf *m0,
1110    struct mbuf *control)
1111{
1112	struct mbuf *m, *mlast, *n;
1113	int space = 0;
1114
1115	if (control == 0)
1116		panic("sbappendcontrol");
1117
1118	for (m = control; ; m = m->m_next) {
1119		space += m->m_len;
1120		if (m->m_next == 0)
1121			break;
1122	}
1123	n = m;			/* save pointer to last control buffer */
1124	for (m = m0; m; m = m->m_next)
1125		space += m->m_len;
1126	if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX))
1127		return (0);
1128	n->m_next = m0;			/* concatenate data to control */
1129	SBLASTRECORDCHK(sb, "sbappendcontrol 1");
1130
1131	for (m = control; m->m_next != NULL; m = m->m_next)
1132		sballoc(sb, m);
1133	sballoc(sb, m);
1134	mlast = m;
1135
1136	if (sb->sb_lastrecord != NULL) {
1137		sb->sb_lastrecord->m_nextpkt = control;
1138	} else {
1139		sb->sb_mb = control;
1140	}
1141	sb->sb_lastrecord = control;
1142	sb->sb_mbtail = mlast;
1143
1144	SBLASTMBUFCHK(sb, __func__);
1145	SBLASTRECORDCHK(sb, "sbappendcontrol 2");
1146
1147	postevent(0, sb, EV_RWBYTES);
1148	return (1);
1149}
1150
1151int
1152sbappendcontrol(struct sockbuf *sb, struct mbuf	*m0, struct mbuf *control,
1153    int *error_out)
1154{
1155	int result = 0;
1156	boolean_t sb_unix = (sb->sb_flags & SB_UNIX);
1157
1158	if (error_out)
1159		*error_out = 0;
1160
1161	if (sb->sb_flags & SB_DROP) {
1162		if (m0 != NULL)
1163			m_freem(m0);
1164		if (control != NULL && !sb_unix)
1165			m_freem(control);
1166		if (error_out != NULL)
1167			*error_out = EINVAL;
1168		return (0);
1169	}
1170
1171	if (sb->sb_flags & SB_RECV) {
1172		int error;
1173
1174		error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0);
1175		SBLASTRECORDCHK(sb, __func__);
1176		if (error) {
1177			if (error != EJUSTRETURN) {
1178				if (m0)
1179					m_freem(m0);
1180				if (control != NULL && !sb_unix)
1181					m_freem(control);
1182				if (error_out)
1183					*error_out = error;
1184			}
1185			return (0);
1186		}
1187	}
1188
1189	result = sbappendcontrol_internal(sb, m0, control);
1190	if (result == 0) {
1191		if (m0)
1192			m_freem(m0);
1193		if (control != NULL && !sb_unix)
1194			m_freem(control);
1195		if (error_out)
1196			*error_out = ENOBUFS;
1197	}
1198
1199	return (result);
1200}
1201
1202/*
1203 * Append a contiguous TCP data blob with TCP sequence number as control data
1204 * as a new msg to the receive socket buffer.
1205 */
1206int
1207sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum,
1208    int unordered)
1209{
1210	struct mbuf *m_eor = NULL;
1211	u_int32_t data_len = 0;
1212	int ret = 0;
1213	struct socket *so = sb->sb_so;
1214
1215	VERIFY((m->m_flags & M_PKTHDR) && m_pktlen(m) > 0);
1216	VERIFY(so->so_msg_state != NULL);
1217	VERIFY(sb->sb_flags & SB_RECV);
1218
1219	/* Keep the TCP sequence number in the mbuf pkthdr */
1220	m->m_pkthdr.msg_seq = seqnum;
1221
1222	/* find last mbuf and set M_EOR */
1223	for (m_eor = m; ; m_eor = m_eor->m_next) {
1224		/*
1225		 * If the msg is unordered, we need to account for
1226		 * these bytes in receive socket buffer size. Otherwise,
1227		 * the receive window advertised will shrink because
1228		 * of the additional unordered bytes added to the
1229		 * receive buffer.
1230		 */
1231		if (unordered) {
1232			m_eor->m_flags |= M_UNORDERED_DATA;
1233			data_len += m_eor->m_len;
1234			so->so_msg_state->msg_uno_bytes += m_eor->m_len;
1235		} else  {
1236			m_eor->m_flags &= ~M_UNORDERED_DATA;
1237		}
1238
1239		if (m_eor->m_next == NULL)
1240			break;
1241	}
1242
1243	/* set EOR flag at end of byte blob */
1244	m_eor->m_flags |= M_EOR;
1245
1246	/* expand the receive socket buffer to allow unordered data */
1247	if (unordered && !sbreserve(sb, sb->sb_hiwat + data_len)) {
1248		/*
1249		 * Could not allocate memory for unordered data, it
1250		 * means this packet will have to be delivered in order
1251		 */
1252		printf("%s: could not reserve space for unordered data\n",
1253		    __func__);
1254	}
1255
1256	ret = sbappendrecord(sb, m);
1257	return (ret);
1258}
1259
1260/*
1261 * TCP streams have message based out of order delivery support, or have
1262 * Multipath TCP support, or are regular TCP sockets
1263 */
1264int
1265sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, uint32_t seqnum,
1266	int unordered)
1267{
1268	int ret = 0;
1269
1270	if ((m != NULL) && (m_pktlen(m) <= 0)) {
1271		m_freem(m);
1272		return (ret);
1273	}
1274
1275	if (so->so_flags & SOF_ENABLE_MSGS) {
1276		ret = sbappendmsgstream_rcv(&so->so_rcv, m, seqnum, unordered);
1277	}
1278#if MPTCP
1279	else if (so->so_flags & SOF_MPTCP_TRUE) {
1280		ret = sbappendmptcpstream_rcv(&so->so_rcv, m);
1281	}
1282#endif /* MPTCP */
1283	else {
1284		ret = sbappendstream(&so->so_rcv, m);
1285	}
1286	return (ret);
1287}
1288
1289#if MPTCP
1290int
1291sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m)
1292{
1293	struct socket *so = sb->sb_so;
1294
1295	VERIFY(m == NULL || (m->m_flags & M_PKTHDR));
1296	/* SB_NOCOMPRESS must be set prevent loss of M_PKTHDR data */
1297	VERIFY((sb->sb_flags & (SB_RECV|SB_NOCOMPRESS)) ==
1298	    (SB_RECV|SB_NOCOMPRESS));
1299
1300	if (m == NULL || m_pktlen(m) == 0 || (sb->sb_flags & SB_DROP) ||
1301	    (so->so_state & SS_CANTRCVMORE)) {
1302		if (m != NULL)
1303			m_freem(m);
1304		return (0);
1305	}
1306	/* the socket is not closed, so SOF_MP_SUBFLOW must be set */
1307	VERIFY(so->so_flags & SOF_MP_SUBFLOW);
1308
1309	if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) {
1310		panic("%s: nexpkt %p || mb %p != lastrecord %p\n", __func__,
1311		    m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
1312		/* NOTREACHED */
1313	}
1314
1315	SBLASTMBUFCHK(sb, __func__);
1316
1317	mptcp_adj_rmap(so, m);
1318
1319	/* No filter support (SB_RECV) on mptcp subflow sockets */
1320
1321	sbcompress(sb, m, sb->sb_mbtail);
1322	sb->sb_lastrecord = sb->sb_mb;
1323	SBLASTRECORDCHK(sb, __func__);
1324	return (1);
1325}
1326#endif /* MPTCP */
1327
1328/*
1329 * Append message to send socket buffer based on priority.
1330 */
1331int
1332sbappendmsg_snd(struct sockbuf *sb, struct mbuf *m)
1333{
1334	struct socket *so = sb->sb_so;
1335	struct msg_priq *priq;
1336	int set_eor = 0;
1337
1338	VERIFY(so->so_msg_state != NULL);
1339
1340	if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord))
1341		panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n",
1342		    m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
1343
1344	SBLASTMBUFCHK(sb, __func__);
1345
1346	if (m == NULL || (sb->sb_flags & SB_DROP) || so->so_msg_state == NULL) {
1347		if (m != NULL)
1348			m_freem(m);
1349		return (0);
1350	}
1351
1352	priq = &so->so_msg_state->msg_priq[m->m_pkthdr.msg_pri];
1353
1354	/* note if we need to propogate M_EOR to the last mbuf */
1355	if (m->m_flags & M_EOR) {
1356		set_eor = 1;
1357
1358		/* Reset M_EOR from the first mbuf */
1359		m->m_flags &= ~(M_EOR);
1360	}
1361
1362	if (priq->msgq_head == NULL) {
1363		VERIFY(priq->msgq_tail == NULL && priq->msgq_lastmsg == NULL);
1364		priq->msgq_head = priq->msgq_lastmsg = m;
1365	} else {
1366		VERIFY(priq->msgq_tail->m_next == NULL);
1367
1368		/* Check if the last message has M_EOR flag set */
1369		if (priq->msgq_tail->m_flags & M_EOR) {
1370			/* Insert as a new message */
1371			priq->msgq_lastmsg->m_nextpkt = m;
1372
1373			/* move the lastmsg pointer */
1374			priq->msgq_lastmsg = m;
1375		} else {
1376			/* Append to the existing message */
1377			priq->msgq_tail->m_next = m;
1378		}
1379	}
1380
1381	/* Update accounting and the queue tail pointer */
1382
1383	while (m->m_next != NULL) {
1384		sballoc(sb, m);
1385		priq->msgq_bytes += m->m_len;
1386		m = m->m_next;
1387	}
1388	sballoc(sb, m);
1389	priq->msgq_bytes += m->m_len;
1390
1391	if (set_eor) {
1392		m->m_flags |= M_EOR;
1393
1394		/*
1395		 * Since the user space can not write a new msg
1396		 * without completing the previous one, we can
1397		 * reset this flag to start sending again.
1398		 */
1399		priq->msgq_flags &= ~(MSGQ_MSG_NOTDONE);
1400	}
1401
1402	priq->msgq_tail = m;
1403
1404	SBLASTRECORDCHK(sb, "sbappendstream 2");
1405	postevent(0, sb, EV_RWBYTES);
1406	return (1);
1407}
1408
1409/*
1410 * Pull data from priority queues to the serial snd queue
1411 * right before sending.
1412 */
1413void
1414sbpull_unordered_data(struct socket *so, int32_t off, int32_t len)
1415{
1416	int32_t topull, i;
1417	struct msg_priq *priq = NULL;
1418
1419	VERIFY(so->so_msg_state != NULL);
1420
1421	topull = (off + len) - so->so_msg_state->msg_serial_bytes;
1422
1423	i = MSG_PRI_MAX;
1424	while (i >= MSG_PRI_MIN && topull > 0) {
1425		struct mbuf *m = NULL, *mqhead = NULL, *mend = NULL;
1426		priq = &so->so_msg_state->msg_priq[i];
1427		if ((priq->msgq_flags & MSGQ_MSG_NOTDONE) &&
1428		    priq->msgq_head == NULL) {
1429			/*
1430			 * We were in the middle of sending
1431			 * a message and we have not seen the
1432			 * end of it.
1433			 */
1434			VERIFY(priq->msgq_lastmsg == NULL &&
1435			    priq->msgq_tail == NULL);
1436			return;
1437		}
1438		if (priq->msgq_head != NULL) {
1439			int32_t bytes = 0, topull_tmp = topull;
1440			/*
1441			 * We found a msg while scanning the priority
1442			 * queue from high to low priority.
1443			 */
1444			m = priq->msgq_head;
1445			mqhead = m;
1446			mend = m;
1447
1448			/*
1449			 * Move bytes from the priority queue to the
1450			 * serial queue. Compute the number of bytes
1451			 * being added.
1452			 */
1453			while (mqhead->m_next != NULL && topull_tmp > 0) {
1454				bytes += mqhead->m_len;
1455				topull_tmp -= mqhead->m_len;
1456				mend = mqhead;
1457				mqhead = mqhead->m_next;
1458			}
1459
1460			if (mqhead->m_next == NULL) {
1461				/*
1462				 * If we have only one more mbuf left,
1463				 * move the last mbuf of this message to
1464				 * serial queue and set the head of the
1465				 * queue to be the next message.
1466				 */
1467				bytes += mqhead->m_len;
1468				mend = mqhead;
1469				mqhead = m->m_nextpkt;
1470				if (!(mend->m_flags & M_EOR)) {
1471					/*
1472					 * We have not seen the end of
1473					 * this message, so we can not
1474					 * pull anymore.
1475					 */
1476					priq->msgq_flags |= MSGQ_MSG_NOTDONE;
1477				} else {
1478					/* Reset M_EOR */
1479					mend->m_flags &= ~(M_EOR);
1480				}
1481			} else {
1482				/* propogate the next msg pointer */
1483				mqhead->m_nextpkt = m->m_nextpkt;
1484			}
1485			priq->msgq_head = mqhead;
1486
1487			/*
1488			 * if the lastmsg pointer points to
1489			 * the mbuf that is being dequeued, update
1490			 * it to point to the new head.
1491			 */
1492			if (priq->msgq_lastmsg == m)
1493				priq->msgq_lastmsg = priq->msgq_head;
1494
1495			m->m_nextpkt = NULL;
1496			mend->m_next = NULL;
1497
1498			if (priq->msgq_head == NULL) {
1499				/* Moved all messages, update tail */
1500				priq->msgq_tail = NULL;
1501				VERIFY(priq->msgq_lastmsg == NULL);
1502			}
1503
1504			/* Move it to serial sb_mb queue */
1505			if (so->so_snd.sb_mb == NULL) {
1506				so->so_snd.sb_mb = m;
1507			} else {
1508				so->so_snd.sb_mbtail->m_next = m;
1509			}
1510
1511			priq->msgq_bytes -= bytes;
1512			VERIFY(priq->msgq_bytes >= 0);
1513			sbwakeup(&so->so_snd);
1514
1515			so->so_msg_state->msg_serial_bytes += bytes;
1516			so->so_snd.sb_mbtail = mend;
1517			so->so_snd.sb_lastrecord = so->so_snd.sb_mb;
1518
1519			topull =
1520			    (off + len) - so->so_msg_state->msg_serial_bytes;
1521
1522			if (priq->msgq_flags & MSGQ_MSG_NOTDONE)
1523				break;
1524		} else {
1525			--i;
1526		}
1527	}
1528	sblastrecordchk(&so->so_snd, "sbpull_unordered_data");
1529	sblastmbufchk(&so->so_snd, "sbpull_unordered_data");
1530}
1531
1532/*
1533 * Compress mbuf chain m into the socket
1534 * buffer sb following mbuf n.  If n
1535 * is null, the buffer is presumed empty.
1536 */
1537static inline void
1538sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
1539{
1540	int eor = 0, compress = (!(sb->sb_flags & SB_NOCOMPRESS));
1541	struct mbuf *o;
1542
1543	if (m == NULL) {
1544		/* There is nothing to compress; just update the tail */
1545		for (; n->m_next != NULL; n = n->m_next)
1546			;
1547		sb->sb_mbtail = n;
1548		goto done;
1549	}
1550
1551	while (m != NULL) {
1552		eor |= m->m_flags & M_EOR;
1553		if (compress && m->m_len == 0 && (eor == 0 ||
1554		    (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) {
1555			if (sb->sb_lastrecord == m)
1556				sb->sb_lastrecord = m->m_next;
1557			m = m_free(m);
1558			continue;
1559		}
1560		if (compress && n != NULL && (n->m_flags & M_EOR) == 0 &&
1561#ifndef __APPLE__
1562		    M_WRITABLE(n) &&
1563#endif
1564		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1565		    m->m_len <= M_TRAILINGSPACE(n) &&
1566		    n->m_type == m->m_type) {
1567			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
1568			    (unsigned)m->m_len);
1569			n->m_len += m->m_len;
1570			sb->sb_cc += m->m_len;
1571			if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1572			    m->m_type != MT_OOBDATA) {
1573				/* XXX: Probably don't need */
1574				sb->sb_ctl += m->m_len;
1575			}
1576			m = m_free(m);
1577			continue;
1578		}
1579		if (n != NULL)
1580			n->m_next = m;
1581		else
1582			sb->sb_mb = m;
1583		sb->sb_mbtail = m;
1584		sballoc(sb, m);
1585		n = m;
1586		m->m_flags &= ~M_EOR;
1587		m = m->m_next;
1588		n->m_next = NULL;
1589	}
1590	if (eor != 0) {
1591		if (n != NULL)
1592			n->m_flags |= eor;
1593		else
1594			printf("semi-panic: sbcompress\n");
1595	}
1596done:
1597	SBLASTMBUFCHK(sb, __func__);
1598	postevent(0, sb, EV_RWBYTES);
1599}
1600
1601void
1602sb_empty_assert(struct sockbuf *sb, const char *where)
1603{
1604	if (!(sb->sb_cc == 0 && sb->sb_mb == NULL && sb->sb_mbcnt == 0 &&
1605	    sb->sb_mbtail == NULL && sb->sb_lastrecord == NULL)) {
1606		panic("%s: sb %p so %p cc %d mbcnt %d mb %p mbtail %p "
1607		    "lastrecord %p\n", where, sb, sb->sb_so, sb->sb_cc,
1608		    sb->sb_mbcnt, sb->sb_mb, sb->sb_mbtail,
1609		    sb->sb_lastrecord);
1610		/* NOTREACHED */
1611	}
1612}
1613
1614static void
1615sbflush_priq(struct msg_priq *priq)
1616{
1617	struct mbuf *m;
1618	m = priq->msgq_head;
1619	if (m != NULL)
1620		m_freem_list(m);
1621	priq->msgq_head = priq->msgq_tail = priq->msgq_lastmsg = NULL;
1622	priq->msgq_bytes = priq->msgq_flags = 0;
1623}
1624
1625/*
1626 * Free all mbufs in a sockbuf.
1627 * Check that all resources are reclaimed.
1628 */
1629void
1630sbflush(struct sockbuf *sb)
1631{
1632	void *lr_saved = __builtin_return_address(0);
1633	struct socket *so = sb->sb_so;
1634#ifdef notyet
1635	lck_mtx_t *mutex_held;
1636#endif
1637	u_int32_t i;
1638
1639	/* so_usecount may be 0 if we get here from sofreelastref() */
1640	if (so == NULL) {
1641		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
1642		    __func__, sb, sb->sb_flags, lr_saved);
1643		/* NOTREACHED */
1644	} else if (so->so_usecount < 0) {
1645		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
1646		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
1647		    so->so_usecount, lr_saved, solockhistory_nr(so));
1648		/* NOTREACHED */
1649	}
1650#ifdef notyet
1651	/*
1652	 * XXX: This code is currently commented out, because we may get here
1653	 * as part of sofreelastref(), and at that time, pr_getlock() may no
1654	 * longer be able to return us the lock; this will be fixed in future.
1655	 */
1656	if (so->so_proto->pr_getlock != NULL)
1657		mutex_held = (*so->so_proto->pr_getlock)(so, 0);
1658	else
1659		mutex_held = so->so_proto->pr_domain->dom_mtx;
1660
1661	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1662#endif
1663
1664	/*
1665	 * Obtain lock on the socket buffer (SB_LOCK).  This is required
1666	 * to prevent the socket buffer from being unexpectedly altered
1667	 * while it is used by another thread in socket send/receive.
1668	 *
1669	 * sblock() must not fail here, hence the assertion.
1670	 */
1671	(void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
1672	VERIFY(sb->sb_flags & SB_LOCK);
1673
1674	while (sb->sb_mbcnt > 0) {
1675		/*
1676		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1677		 * we would loop forever. Panic instead.
1678		 */
1679		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1680			break;
1681		sbdrop(sb, (int)sb->sb_cc);
1682	}
1683
1684	if (!(sb->sb_flags & SB_RECV) && (so->so_flags & SOF_ENABLE_MSGS)) {
1685		VERIFY(so->so_msg_state != NULL);
1686		for (i = MSG_PRI_MIN; i <= MSG_PRI_MAX; ++i) {
1687			sbflush_priq(&so->so_msg_state->msg_priq[i]);
1688		}
1689		so->so_msg_state->msg_serial_bytes = 0;
1690		so->so_msg_state->msg_uno_bytes = 0;
1691	}
1692
1693	sb_empty_assert(sb, __func__);
1694	postevent(0, sb, EV_RWBYTES);
1695
1696	sbunlock(sb, TRUE);	/* keep socket locked */
1697}
1698
1699/*
1700 * Drop data from (the front of) a sockbuf.
1701 * use m_freem_list to free the mbuf structures
1702 * under a single lock... this is done by pruning
1703 * the top of the tree from the body by keeping track
1704 * of where we get to in the tree and then zeroing the
1705 * two pertinent pointers m_nextpkt and m_next
1706 * the socket buffer is then updated to point at the new
1707 * top of the tree and the pruned area is released via
1708 * m_freem_list.
1709 */
1710void
1711sbdrop(struct sockbuf *sb, int len)
1712{
1713	struct mbuf *m, *free_list, *ml;
1714	struct mbuf *next, *last;
1715
1716	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1717#if MPTCP
1718	if ((m != NULL) && (len > 0) &&
1719	    (!(sb->sb_flags & SB_RECV)) &&
1720	    ((sb->sb_so->so_flags & SOF_MP_SUBFLOW) ||
1721	    ((SOCK_CHECK_DOM(sb->sb_so, PF_MULTIPATH)) &&
1722	    (SOCK_CHECK_PROTO(sb->sb_so, IPPROTO_TCP))))) {
1723		mptcp_preproc_sbdrop(m, (unsigned int)len);
1724	}
1725#endif /* MPTCP */
1726	KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_START), sb, len, 0, 0, 0);
1727
1728	free_list = last = m;
1729	ml = (struct mbuf *)0;
1730
1731	while (len > 0) {
1732		if (m == 0) {
1733			if (next == 0) {
1734				/*
1735				 * temporarily replacing this panic with printf
1736				 * because it occurs occasionally when closing
1737				 * a socket when there is no harm in ignoring
1738				 * it. This problem will be investigated
1739				 * further.
1740				 */
1741				/* panic("sbdrop"); */
1742				printf("sbdrop - count not zero\n");
1743				len = 0;
1744				/*
1745				 * zero the counts. if we have no mbufs,
1746				 * we have no data (PR-2986815)
1747				 */
1748				sb->sb_cc = 0;
1749				sb->sb_mbcnt = 0;
1750				if (!(sb->sb_flags & SB_RECV) &&
1751				    (sb->sb_so->so_flags & SOF_ENABLE_MSGS)) {
1752					sb->sb_so->so_msg_state->
1753					    msg_serial_bytes = 0;
1754				}
1755				break;
1756			}
1757			m = last = next;
1758			next = m->m_nextpkt;
1759			continue;
1760		}
1761		if (m->m_len > len) {
1762			m->m_len -= len;
1763			m->m_data += len;
1764			sb->sb_cc -= len;
1765			if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1766			    m->m_type != MT_OOBDATA)
1767				sb->sb_ctl -= len;
1768			break;
1769		}
1770		len -= m->m_len;
1771		sbfree(sb, m);
1772
1773		ml = m;
1774		m = m->m_next;
1775	}
1776	while (m && m->m_len == 0) {
1777		sbfree(sb, m);
1778
1779		ml = m;
1780		m = m->m_next;
1781	}
1782	if (ml) {
1783		ml->m_next = (struct mbuf *)0;
1784		last->m_nextpkt = (struct mbuf *)0;
1785		m_freem_list(free_list);
1786	}
1787	if (m) {
1788		sb->sb_mb = m;
1789		m->m_nextpkt = next;
1790	} else {
1791		sb->sb_mb = next;
1792	}
1793
1794	/*
1795	 * First part is an inline SB_EMPTY_FIXUP().  Second part
1796	 * makes sure sb_lastrecord is up-to-date if we dropped
1797	 * part of the last record.
1798	 */
1799	m = sb->sb_mb;
1800	if (m == NULL) {
1801		sb->sb_mbtail = NULL;
1802		sb->sb_lastrecord = NULL;
1803	} else if (m->m_nextpkt == NULL) {
1804		sb->sb_lastrecord = m;
1805	}
1806
1807	postevent(0, sb, EV_RWBYTES);
1808
1809	KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_END), sb, 0, 0, 0, 0);
1810}
1811
1812/*
1813 * Drop a record off the front of a sockbuf
1814 * and move the next record to the front.
1815 */
1816void
1817sbdroprecord(struct sockbuf *sb)
1818{
1819	struct mbuf *m, *mn;
1820
1821	m = sb->sb_mb;
1822	if (m) {
1823		sb->sb_mb = m->m_nextpkt;
1824		do {
1825			sbfree(sb, m);
1826			MFREE(m, mn);
1827			m = mn;
1828		} while (m);
1829	}
1830	SB_EMPTY_FIXUP(sb);
1831	postevent(0, sb, EV_RWBYTES);
1832}
1833
1834/*
1835 * Create a "control" mbuf containing the specified data
1836 * with the specified type for presentation on a socket buffer.
1837 */
1838struct mbuf *
1839sbcreatecontrol(caddr_t p, int size, int type, int level)
1840{
1841	struct cmsghdr *cp;
1842	struct mbuf *m;
1843
1844	if (CMSG_SPACE((u_int)size) > MLEN)
1845		return ((struct mbuf *)NULL);
1846	if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
1847		return ((struct mbuf *)NULL);
1848	cp = mtod(m, struct cmsghdr *);
1849	VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1850	/* XXX check size? */
1851	(void) memcpy(CMSG_DATA(cp), p, size);
1852	m->m_len = CMSG_SPACE(size);
1853	cp->cmsg_len = CMSG_LEN(size);
1854	cp->cmsg_level = level;
1855	cp->cmsg_type = type;
1856	return (m);
1857}
1858
1859struct mbuf **
1860sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf **mp)
1861{
1862	struct mbuf *m;
1863	struct cmsghdr *cp;
1864
1865	if (*mp == NULL) {
1866		*mp = sbcreatecontrol(p, size, type, level);
1867		return (mp);
1868	}
1869
1870	if (CMSG_SPACE((u_int)size) + (*mp)->m_len > MLEN) {
1871		mp = &(*mp)->m_next;
1872		*mp = sbcreatecontrol(p, size, type, level);
1873		return (mp);
1874	}
1875
1876	m = *mp;
1877
1878	cp = (struct cmsghdr *)(void *)(mtod(m, char *) + m->m_len);
1879	/* CMSG_SPACE ensures 32-bit alignment */
1880	VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1881	m->m_len += CMSG_SPACE(size);
1882
1883	/* XXX check size? */
1884	(void) memcpy(CMSG_DATA(cp), p, size);
1885	cp->cmsg_len = CMSG_LEN(size);
1886	cp->cmsg_level = level;
1887	cp->cmsg_type = type;
1888
1889	return (mp);
1890}
1891
1892
1893/*
1894 * Some routines that return EOPNOTSUPP for entry points that are not
1895 * supported by a protocol.  Fill in as needed.
1896 */
1897int
1898pru_abort_notsupp(struct socket *so)
1899{
1900#pragma unused(so)
1901	return (EOPNOTSUPP);
1902}
1903
1904int
1905pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
1906{
1907#pragma unused(so, nam)
1908	return (EOPNOTSUPP);
1909}
1910
1911int
1912pru_attach_notsupp(struct socket *so, int proto, struct proc *p)
1913{
1914#pragma unused(so, proto, p)
1915	return (EOPNOTSUPP);
1916}
1917
1918int
1919pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
1920{
1921#pragma unused(so, nam, p)
1922	return (EOPNOTSUPP);
1923}
1924
1925int
1926pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
1927{
1928#pragma unused(so, nam, p)
1929	return (EOPNOTSUPP);
1930}
1931
1932int
1933pru_connect2_notsupp(struct socket *so1, struct socket *so2)
1934{
1935#pragma unused(so1, so2)
1936	return (EOPNOTSUPP);
1937}
1938
1939int
1940pru_connectx_notsupp(struct socket *so, struct sockaddr_list **src_sl,
1941    struct sockaddr_list **dst_sl, struct proc *p, uint32_t ifscope,
1942    associd_t aid, connid_t *pcid, uint32_t flags, void *arg,
1943    uint32_t arglen)
1944{
1945#pragma unused(so, src_sl, dst_sl, p, ifscope, aid, pcid, flags, arg, arglen)
1946	return (EOPNOTSUPP);
1947}
1948
1949int
1950pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
1951    struct ifnet *ifp, struct proc *p)
1952{
1953#pragma unused(so, cmd, data, ifp, p)
1954	return (EOPNOTSUPP);
1955}
1956
1957int
1958pru_detach_notsupp(struct socket *so)
1959{
1960#pragma unused(so)
1961	return (EOPNOTSUPP);
1962}
1963
1964int
1965pru_disconnect_notsupp(struct socket *so)
1966{
1967#pragma unused(so)
1968	return (EOPNOTSUPP);
1969}
1970
1971int
1972pru_disconnectx_notsupp(struct socket *so, associd_t aid, connid_t cid)
1973{
1974#pragma unused(so, aid, cid)
1975	return (EOPNOTSUPP);
1976}
1977
1978int
1979pru_listen_notsupp(struct socket *so, struct proc *p)
1980{
1981#pragma unused(so, p)
1982	return (EOPNOTSUPP);
1983}
1984
1985int
1986pru_peeloff_notsupp(struct socket *so, associd_t aid, struct socket **psop)
1987{
1988#pragma unused(so, aid, psop)
1989	return (EOPNOTSUPP);
1990}
1991
1992int
1993pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
1994{
1995#pragma unused(so, nam)
1996	return (EOPNOTSUPP);
1997}
1998
1999int
2000pru_rcvd_notsupp(struct socket *so, int flags)
2001{
2002#pragma unused(so, flags)
2003	return (EOPNOTSUPP);
2004}
2005
2006int
2007pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
2008{
2009#pragma unused(so, m, flags)
2010	return (EOPNOTSUPP);
2011}
2012
2013int
2014pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
2015    struct sockaddr *addr, struct mbuf *control, struct proc *p)
2016{
2017#pragma unused(so, flags, m, addr, control, p)
2018	return (EOPNOTSUPP);
2019}
2020
2021/*
2022 * This isn't really a ``null'' operation, but it's the default one
2023 * and doesn't do anything destructive.
2024 */
2025int
2026pru_sense_null(struct socket *so, void *ub, int isstat64)
2027{
2028	if (isstat64 != 0) {
2029		struct stat64 *sb64;
2030
2031		sb64 = (struct stat64 *)ub;
2032		sb64->st_blksize = so->so_snd.sb_hiwat;
2033	} else {
2034		struct stat *sb;
2035
2036		sb = (struct stat *)ub;
2037		sb->st_blksize = so->so_snd.sb_hiwat;
2038	}
2039
2040	return (0);
2041}
2042
2043
2044int
2045pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
2046    struct mbuf *top, struct mbuf *control, int flags)
2047{
2048#pragma unused(so, addr, uio, top, control, flags)
2049	return (EOPNOTSUPP);
2050}
2051
2052int
2053pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
2054    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2055{
2056#pragma unused(so, paddr, uio, mp0, controlp, flagsp)
2057	return (EOPNOTSUPP);
2058}
2059
2060int
2061pru_shutdown_notsupp(struct socket *so)
2062{
2063#pragma unused(so)
2064	return (EOPNOTSUPP);
2065}
2066
2067int
2068pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
2069{
2070#pragma unused(so, nam)
2071	return (EOPNOTSUPP);
2072}
2073
2074int
2075pru_sopoll_notsupp(struct socket *so, int events, kauth_cred_t cred, void *wql)
2076{
2077#pragma unused(so, events, cred, wql)
2078	return (EOPNOTSUPP);
2079}
2080
2081int
2082pru_socheckopt_null(struct socket *so, struct sockopt *sopt)
2083{
2084#pragma unused(so, sopt)
2085	/*
2086	 * Allow all options for set/get by default.
2087	 */
2088	return (0);
2089}
2090
2091void
2092pru_sanitize(struct pr_usrreqs *pru)
2093{
2094#define	DEFAULT(foo, bar)	if ((foo) == NULL) (foo) = (bar)
2095	DEFAULT(pru->pru_abort, pru_abort_notsupp);
2096	DEFAULT(pru->pru_accept, pru_accept_notsupp);
2097	DEFAULT(pru->pru_attach, pru_attach_notsupp);
2098	DEFAULT(pru->pru_bind, pru_bind_notsupp);
2099	DEFAULT(pru->pru_connect, pru_connect_notsupp);
2100	DEFAULT(pru->pru_connect2, pru_connect2_notsupp);
2101	DEFAULT(pru->pru_connectx, pru_connectx_notsupp);
2102	DEFAULT(pru->pru_control, pru_control_notsupp);
2103	DEFAULT(pru->pru_detach, pru_detach_notsupp);
2104	DEFAULT(pru->pru_disconnect, pru_disconnect_notsupp);
2105	DEFAULT(pru->pru_disconnectx, pru_disconnectx_notsupp);
2106	DEFAULT(pru->pru_listen, pru_listen_notsupp);
2107	DEFAULT(pru->pru_peeloff, pru_peeloff_notsupp);
2108	DEFAULT(pru->pru_peeraddr, pru_peeraddr_notsupp);
2109	DEFAULT(pru->pru_rcvd, pru_rcvd_notsupp);
2110	DEFAULT(pru->pru_rcvoob, pru_rcvoob_notsupp);
2111	DEFAULT(pru->pru_send, pru_send_notsupp);
2112	DEFAULT(pru->pru_sense, pru_sense_null);
2113	DEFAULT(pru->pru_shutdown, pru_shutdown_notsupp);
2114	DEFAULT(pru->pru_sockaddr, pru_sockaddr_notsupp);
2115	DEFAULT(pru->pru_sopoll, pru_sopoll_notsupp);
2116	DEFAULT(pru->pru_soreceive, pru_soreceive_notsupp);
2117	DEFAULT(pru->pru_sosend, pru_sosend_notsupp);
2118	DEFAULT(pru->pru_socheckopt, pru_socheckopt_null);
2119#undef DEFAULT
2120}
2121
2122/*
2123 * The following are macros on BSD and functions on Darwin
2124 */
2125
2126/*
2127 * Do we need to notify the other side when I/O is possible?
2128 */
2129
2130int
2131sb_notify(struct sockbuf *sb)
2132{
2133	return (sb->sb_waiters > 0 ||
2134	    (sb->sb_flags & (SB_SEL|SB_ASYNC|SB_UPCALL|SB_KNOTE)));
2135}
2136
2137/*
2138 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
2139 * This is problematical if the fields are unsigned, as the space might
2140 * still be negative (cc > hiwat or mbcnt > mbmax).  Should detect
2141 * overflow and return 0.
2142 */
2143int
2144sbspace(struct sockbuf *sb)
2145{
2146	int space = imin((int)(sb->sb_hiwat - sb->sb_cc),
2147	    (int)(sb->sb_mbmax - sb->sb_mbcnt));
2148	if (space < 0)
2149		space = 0;
2150
2151	return (space);
2152}
2153
2154/*
2155 * If this socket has priority queues, check if there is enough
2156 * space in the priority queue for this msg.
2157 */
2158int
2159msgq_sbspace(struct socket *so, struct mbuf *control)
2160{
2161	int space = 0, error;
2162	u_int32_t msgpri;
2163	VERIFY(so->so_type == SOCK_STREAM && SOCK_PROTO(so) == IPPROTO_TCP &&
2164	    control != NULL);
2165	error = tcp_get_msg_priority(control, &msgpri);
2166	if (error)
2167		return (0);
2168	space = (so->so_snd.sb_idealsize / MSG_PRI_COUNT) -
2169	    so->so_msg_state->msg_priq[msgpri].msgq_bytes;
2170	if (space < 0)
2171		space = 0;
2172	return (space);
2173}
2174
2175/* do we have to send all at once on a socket? */
2176int
2177sosendallatonce(struct socket *so)
2178{
2179	return (so->so_proto->pr_flags & PR_ATOMIC);
2180}
2181
2182/* can we read something from so? */
2183int
2184soreadable(struct socket *so)
2185{
2186	return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2187	    (so->so_state & SS_CANTRCVMORE) ||
2188	    so->so_comp.tqh_first || so->so_error);
2189}
2190
2191/* can we write something to so? */
2192
2193int
2194sowriteable(struct socket *so)
2195{
2196	return ((!so_wait_for_if_feedback(so) &&
2197	    sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat &&
2198	    ((so->so_state & SS_ISCONNECTED) ||
2199	    (so->so_proto->pr_flags & PR_CONNREQUIRED) == 0)) ||
2200	    (so->so_state & SS_CANTSENDMORE) ||
2201	    so->so_error);
2202}
2203
2204/* adjust counters in sb reflecting allocation of m */
2205
2206void
2207sballoc(struct sockbuf *sb, struct mbuf *m)
2208{
2209	u_int32_t cnt = 1;
2210	sb->sb_cc += m->m_len;
2211	if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
2212	    m->m_type != MT_OOBDATA)
2213		sb->sb_ctl += m->m_len;
2214	sb->sb_mbcnt += MSIZE;
2215
2216	if (m->m_flags & M_EXT) {
2217		sb->sb_mbcnt += m->m_ext.ext_size;
2218		cnt += (m->m_ext.ext_size >> MSIZESHIFT);
2219	}
2220	OSAddAtomic(cnt, &total_sbmb_cnt);
2221	VERIFY(total_sbmb_cnt > 0);
2222}
2223
2224/* adjust counters in sb reflecting freeing of m */
2225void
2226sbfree(struct sockbuf *sb, struct mbuf *m)
2227{
2228	int cnt = -1;
2229
2230	sb->sb_cc -= m->m_len;
2231	if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
2232	    m->m_type != MT_OOBDATA)
2233		sb->sb_ctl -= m->m_len;
2234	sb->sb_mbcnt -= MSIZE;
2235	if (m->m_flags & M_EXT) {
2236		sb->sb_mbcnt -= m->m_ext.ext_size;
2237		cnt -= (m->m_ext.ext_size >> MSIZESHIFT);
2238	}
2239	OSAddAtomic(cnt, &total_sbmb_cnt);
2240	VERIFY(total_sbmb_cnt >= 0);
2241}
2242
2243/*
2244 * Set lock on sockbuf sb; sleep if lock is already held.
2245 * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
2246 * Returns error without lock if sleep is interrupted.
2247 */
2248int
2249sblock(struct sockbuf *sb, uint32_t flags)
2250{
2251	boolean_t nointr = ((sb->sb_flags & SB_NOINTR) || (flags & SBL_NOINTR));
2252	void *lr_saved = __builtin_return_address(0);
2253	struct socket *so = sb->sb_so;
2254	void * wchan;
2255	int error = 0;
2256
2257	VERIFY((flags & SBL_VALID) == flags);
2258
2259	/* so_usecount may be 0 if we get here from sofreelastref() */
2260	if (so == NULL) {
2261		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
2262		    __func__, sb, sb->sb_flags, lr_saved);
2263		/* NOTREACHED */
2264	} else if (so->so_usecount < 0) {
2265		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2266		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
2267		    so->so_usecount, lr_saved, solockhistory_nr(so));
2268		/* NOTREACHED */
2269	}
2270
2271	if ((sb->sb_flags & SB_LOCK) && !(flags & SBL_WAIT))
2272		return (EWOULDBLOCK);
2273
2274	/*
2275	 * We may get here from sorflush(), in which case "sb" may not
2276	 * point to the real socket buffer.  Use the actual socket buffer
2277	 * address from the socket instead.
2278	 */
2279	wchan = (sb->sb_flags & SB_RECV) ?
2280	    &so->so_rcv.sb_flags : &so->so_snd.sb_flags;
2281
2282	while (sb->sb_flags & SB_LOCK) {
2283		lck_mtx_t *mutex_held;
2284
2285		/*
2286		 * XXX: This code should be moved up above outside of this loop;
2287		 * however, we may get here as part of sofreelastref(), and
2288		 * at that time pr_getlock() may no longer be able to return
2289		 * us the lock.  This will be fixed in future.
2290		 */
2291		if (so->so_proto->pr_getlock != NULL)
2292			mutex_held = (*so->so_proto->pr_getlock)(so, 0);
2293		else
2294			mutex_held = so->so_proto->pr_domain->dom_mtx;
2295
2296		lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2297
2298		sb->sb_wantlock++;
2299		VERIFY(sb->sb_wantlock != 0);
2300
2301		error = msleep(wchan, mutex_held,
2302		    nointr ? PSOCK : PSOCK | PCATCH,
2303		    nointr ? "sb_lock_nointr" : "sb_lock", NULL);
2304
2305		VERIFY(sb->sb_wantlock != 0);
2306		sb->sb_wantlock--;
2307
2308		if (error == 0 && (so->so_flags & SOF_DEFUNCT) &&
2309		    !(flags & SBL_IGNDEFUNCT)) {
2310			error = EBADF;
2311			SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
2312			    "(%d)\n", __func__, proc_selfpid(),
2313			    (uint64_t)VM_KERNEL_ADDRPERM(so),
2314			    SOCK_DOM(so), SOCK_TYPE(so), error));
2315		}
2316
2317		if (error != 0)
2318			return (error);
2319	}
2320	sb->sb_flags |= SB_LOCK;
2321	return (0);
2322}
2323
2324/*
2325 * Release lock on sockbuf sb
2326 */
2327void
2328sbunlock(struct sockbuf *sb, boolean_t keeplocked)
2329{
2330	void *lr_saved = __builtin_return_address(0);
2331	struct socket *so = sb->sb_so;
2332
2333	/* so_usecount may be 0 if we get here from sofreelastref() */
2334	if (so == NULL) {
2335		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
2336		    __func__, sb, sb->sb_flags, lr_saved);
2337		/* NOTREACHED */
2338	} else if (so->so_usecount < 0) {
2339		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2340		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
2341		    so->so_usecount, lr_saved, solockhistory_nr(so));
2342		/* NOTREACHED */
2343	}
2344
2345	VERIFY(sb->sb_flags & SB_LOCK);
2346	sb->sb_flags &= ~SB_LOCK;
2347
2348	if (sb->sb_wantlock > 0) {
2349		/*
2350		 * We may get here from sorflush(), in which case "sb" may not
2351		 * point to the real socket buffer.  Use the actual socket
2352		 * buffer address from the socket instead.
2353		 */
2354		wakeup((sb->sb_flags & SB_RECV) ? &so->so_rcv.sb_flags :
2355		    &so->so_snd.sb_flags);
2356	}
2357
2358	if (!keeplocked) {	/* unlock on exit */
2359		lck_mtx_t *mutex_held;
2360
2361		if (so->so_proto->pr_getlock != NULL)
2362			mutex_held = (*so->so_proto->pr_getlock)(so, 0);
2363		else
2364			mutex_held = so->so_proto->pr_domain->dom_mtx;
2365
2366		lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2367
2368		VERIFY(so->so_usecount != 0);
2369		so->so_usecount--;
2370		so->unlock_lr[so->next_unlock_lr] = lr_saved;
2371		so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2372		lck_mtx_unlock(mutex_held);
2373	}
2374}
2375
2376void
2377sorwakeup(struct socket *so)
2378{
2379	if (sb_notify(&so->so_rcv))
2380		sowakeup(so, &so->so_rcv);
2381}
2382
2383void
2384sowwakeup(struct socket *so)
2385{
2386	if (sb_notify(&so->so_snd))
2387		sowakeup(so, &so->so_snd);
2388}
2389
2390void
2391soevent(struct socket *so, long hint)
2392{
2393	if (so->so_flags & SOF_KNOTE)
2394		KNOTE(&so->so_klist, hint);
2395
2396	soevupcall(so, hint);
2397
2398	/* Don't post an event if this a subflow socket */
2399	if ((hint & SO_FILT_HINT_IFDENIED) && !(so->so_flags & SOF_MP_SUBFLOW))
2400		soevent_ifdenied(so);
2401}
2402
2403void
2404soevupcall(struct socket *so, u_int32_t hint)
2405{
2406	void (*so_event)(struct socket *, void *, uint32_t);
2407
2408	if ((so_event = so->so_event) != NULL) {
2409		caddr_t so_eventarg = so->so_eventarg;
2410
2411		hint &= so->so_eventmask;
2412		if (hint != 0) {
2413			socket_unlock(so, 0);
2414			so->so_event(so, so_eventarg, hint);
2415			socket_lock(so, 0);
2416		}
2417	}
2418}
2419
2420static void
2421soevent_ifdenied(struct socket *so)
2422{
2423	struct kev_netpolicy_ifdenied ev_ifdenied;
2424
2425	bzero(&ev_ifdenied, sizeof (ev_ifdenied));
2426	/*
2427	 * The event consumer is interested about the effective {upid,pid,uuid}
2428	 * info which can be different than the those related to the process
2429	 * that recently performed a system call on the socket, i.e. when the
2430	 * socket is delegated.
2431	 */
2432	if (so->so_flags & SOF_DELEGATED) {
2433		ev_ifdenied.ev_data.eupid = so->e_upid;
2434		ev_ifdenied.ev_data.epid = so->e_pid;
2435		uuid_copy(ev_ifdenied.ev_data.euuid, so->e_uuid);
2436	} else {
2437		ev_ifdenied.ev_data.eupid = so->last_upid;
2438		ev_ifdenied.ev_data.epid = so->last_pid;
2439		uuid_copy(ev_ifdenied.ev_data.euuid, so->last_uuid);
2440	}
2441
2442	if (++so->so_ifdenied_notifies > 1) {
2443		/*
2444		 * Allow for at most one kernel event to be generated per
2445		 * socket; so_ifdenied_notifies is reset upon changes in
2446		 * the UUID policy.  See comments in inp_update_policy.
2447		 */
2448		if (net_io_policy_log) {
2449			uuid_string_t buf;
2450
2451			uuid_unparse(ev_ifdenied.ev_data.euuid, buf);
2452			log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %d "
2453			    "euuid %s%s has %d redundant events supressed\n",
2454			    __func__, so->last_pid,
2455			    (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
2456			    SOCK_TYPE(so), ev_ifdenied.ev_data.epid, buf,
2457			    ((so->so_flags & SOF_DELEGATED) ?
2458			    " [delegated]" : ""), so->so_ifdenied_notifies);
2459		}
2460	} else {
2461		if (net_io_policy_log) {
2462			uuid_string_t buf;
2463
2464			uuid_unparse(ev_ifdenied.ev_data.euuid, buf);
2465			log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %d "
2466			    "euuid %s%s event posted\n", __func__,
2467			    so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so),
2468			    SOCK_DOM(so), SOCK_TYPE(so),
2469			    ev_ifdenied.ev_data.epid, buf,
2470			    ((so->so_flags & SOF_DELEGATED) ?
2471			    " [delegated]" : ""));
2472		}
2473		netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data,
2474		    sizeof (ev_ifdenied));
2475	}
2476}
2477
2478/*
2479 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
2480 */
2481struct sockaddr *
2482dup_sockaddr(struct sockaddr *sa, int canwait)
2483{
2484	struct sockaddr *sa2;
2485
2486	MALLOC(sa2, struct sockaddr *, sa->sa_len, M_SONAME,
2487	    canwait ? M_WAITOK : M_NOWAIT);
2488	if (sa2)
2489		bcopy(sa, sa2, sa->sa_len);
2490	return (sa2);
2491}
2492
2493/*
2494 * Create an external-format (``xsocket'') structure using the information
2495 * in the kernel-format socket structure pointed to by so.  This is done
2496 * to reduce the spew of irrelevant information over this interface,
2497 * to isolate user code from changes in the kernel structure, and
2498 * potentially to provide information-hiding if we decide that
2499 * some of this information should be hidden from users.
2500 */
2501void
2502sotoxsocket(struct socket *so, struct xsocket *xso)
2503{
2504	xso->xso_len = sizeof (*xso);
2505	xso->xso_so = (_XSOCKET_PTR(struct socket *))VM_KERNEL_ADDRPERM(so);
2506	xso->so_type = so->so_type;
2507	xso->so_options = (short)(so->so_options & 0xffff);
2508	xso->so_linger = so->so_linger;
2509	xso->so_state = so->so_state;
2510	xso->so_pcb = (_XSOCKET_PTR(caddr_t))VM_KERNEL_ADDRPERM(so->so_pcb);
2511	if (so->so_proto) {
2512		xso->xso_protocol = SOCK_PROTO(so);
2513		xso->xso_family = SOCK_DOM(so);
2514	} else {
2515		xso->xso_protocol = xso->xso_family = 0;
2516	}
2517	xso->so_qlen = so->so_qlen;
2518	xso->so_incqlen = so->so_incqlen;
2519	xso->so_qlimit = so->so_qlimit;
2520	xso->so_timeo = so->so_timeo;
2521	xso->so_error = so->so_error;
2522	xso->so_pgid = so->so_pgid;
2523	xso->so_oobmark = so->so_oobmark;
2524	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2525	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2526	xso->so_uid = kauth_cred_getuid(so->so_cred);
2527}
2528
2529
2530
2531void
2532sotoxsocket64(struct socket *so, struct xsocket64 *xso)
2533{
2534	xso->xso_len = sizeof (*xso);
2535	xso->xso_so = (u_int64_t)VM_KERNEL_ADDRPERM(so);
2536	xso->so_type = so->so_type;
2537	xso->so_options = (short)(so->so_options & 0xffff);
2538	xso->so_linger = so->so_linger;
2539	xso->so_state = so->so_state;
2540	xso->so_pcb = (u_int64_t)VM_KERNEL_ADDRPERM(so->so_pcb);
2541	if (so->so_proto) {
2542		xso->xso_protocol = SOCK_PROTO(so);
2543		xso->xso_family = SOCK_DOM(so);
2544	} else {
2545		xso->xso_protocol = xso->xso_family = 0;
2546	}
2547	xso->so_qlen = so->so_qlen;
2548	xso->so_incqlen = so->so_incqlen;
2549	xso->so_qlimit = so->so_qlimit;
2550	xso->so_timeo = so->so_timeo;
2551	xso->so_error = so->so_error;
2552	xso->so_pgid = so->so_pgid;
2553	xso->so_oobmark = so->so_oobmark;
2554	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2555	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2556	xso->so_uid = kauth_cred_getuid(so->so_cred);
2557}
2558
2559
2560/*
2561 * This does the same for sockbufs.  Note that the xsockbuf structure,
2562 * since it is always embedded in a socket, does not include a self
2563 * pointer nor a length.  We make this entry point public in case
2564 * some other mechanism needs it.
2565 */
2566void
2567sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
2568{
2569	xsb->sb_cc = sb->sb_cc;
2570	xsb->sb_hiwat = sb->sb_hiwat;
2571	xsb->sb_mbcnt = sb->sb_mbcnt;
2572	xsb->sb_mbmax = sb->sb_mbmax;
2573	xsb->sb_lowat = sb->sb_lowat;
2574	xsb->sb_flags = sb->sb_flags;
2575	xsb->sb_timeo = (short)
2576	    (sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick;
2577	if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0)
2578		xsb->sb_timeo = 1;
2579}
2580
2581/*
2582 * Based on the policy set by an all knowing decison maker, throttle sockets
2583 * that either have been marked as belonging to "background" process.
2584 */
2585int
2586soisthrottled(struct socket *so)
2587{
2588	/*
2589	 * On non-embedded, we rely on implicit throttling by the
2590	 * application, as we're missing the system wide "decision maker"
2591	 */
2592	return (
2593		(so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND));
2594}
2595
2596int
2597soisprivilegedtraffic(struct socket *so)
2598{
2599	return ((so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS) ? 1 : 0);
2600}
2601
2602int
2603soissrcbackground(struct socket *so)
2604{
2605	return ((so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) ||
2606		IS_SO_TC_BACKGROUND(so->so_traffic_class));
2607}
2608
2609/*
2610 * Here is the definition of some of the basic objects in the kern.ipc
2611 * branch of the MIB.
2612 */
2613SYSCTL_NODE(_kern, KERN_IPC, ipc,
2614	CTLFLAG_RW|CTLFLAG_LOCKED|CTLFLAG_ANYBODY, 0, "IPC");
2615
2616/* Check that the maximum socket buffer size is within a range */
2617
2618static int
2619sysctl_sb_max SYSCTL_HANDLER_ARGS
2620{
2621#pragma unused(oidp, arg1, arg2)
2622	u_int32_t new_value;
2623	int changed = 0;
2624	int error = sysctl_io_number(req, sb_max, sizeof (u_int32_t),
2625	    &new_value, &changed);
2626	if (!error && changed) {
2627		if (new_value > LOW_SB_MAX && new_value <= high_sb_max) {
2628			sb_max = new_value;
2629		} else {
2630			error = ERANGE;
2631		}
2632	}
2633	return (error);
2634}
2635
2636static int
2637sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS
2638{
2639#pragma unused(arg1, arg2)
2640	int i, err;
2641
2642	i = net_io_policy_throttled;
2643
2644	err = sysctl_handle_int(oidp, &i, 0, req);
2645	if (err != 0 || req->newptr == USER_ADDR_NULL)
2646		return (err);
2647
2648	if (i != net_io_policy_throttled)
2649		SOTHROTTLELOG(("throttle: network IO policy throttling is "
2650		    "now %s\n", i ? "ON" : "OFF"));
2651
2652	net_io_policy_throttled = i;
2653
2654	return (err);
2655}
2656
2657SYSCTL_PROC(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf,
2658	CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2659	&sb_max, 0, &sysctl_sb_max, "IU", "Maximum socket buffer size");
2660
2661SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets,
2662	CTLFLAG_RD | CTLFLAG_LOCKED, &maxsockets, 0,
2663	"Maximum number of sockets avaliable");
2664
2665SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor,
2666	CTLFLAG_RW | CTLFLAG_LOCKED, &sb_efficiency, 0, "");
2667
2668SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters,
2669	CTLFLAG_RD | CTLFLAG_LOCKED, &nmbclusters, 0, "");
2670
2671SYSCTL_INT(_kern_ipc, OID_AUTO, njcl,
2672	CTLFLAG_RD | CTLFLAG_LOCKED, &njcl, 0, "");
2673
2674SYSCTL_INT(_kern_ipc, OID_AUTO, njclbytes,
2675	CTLFLAG_RD | CTLFLAG_LOCKED, &njclbytes, 0, "");
2676
2677SYSCTL_INT(_kern_ipc, KIPC_SOQLIMITCOMPAT, soqlimitcompat,
2678	CTLFLAG_RW | CTLFLAG_LOCKED, &soqlimitcompat, 1,
2679	"Enable socket queue limit compatibility");
2680
2681SYSCTL_INT(_kern_ipc, OID_AUTO, soqlencomp, CTLFLAG_RW | CTLFLAG_LOCKED,
2682	&soqlencomp, 0, "Listen backlog represents only complete queue");
2683
2684SYSCTL_NODE(_kern_ipc, OID_AUTO, io_policy, CTLFLAG_RW, 0, "network IO policy");
2685
2686SYSCTL_PROC(_kern_ipc_io_policy, OID_AUTO, throttled,
2687	CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &net_io_policy_throttled, 0,
2688	sysctl_io_policy_throttled, "I", "");
2689
2690SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED,
2691	&net_io_policy_log, 0, "");
2692
2693#if CONFIG_PROC_UUID_POLICY
2694SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, uuid, CTLFLAG_RW | CTLFLAG_LOCKED,
2695	&net_io_policy_uuid, 0, "");
2696#endif /* CONFIG_PROC_UUID_POLICY */
2697