1/*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1988, 1990, 1993
31 *	The Regents of the University of California.  All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 *    must display the following acknowledgement:
43 *	This product includes software developed by the University of
44 *	California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 *    may be used to endorse or promote products derived from this software
47 *    without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections.  This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/domain.h>
73#include <sys/kernel.h>
74#include <sys/proc_internal.h>
75#include <sys/kauth.h>
76#include <sys/malloc.h>
77#include <sys/mbuf.h>
78#include <sys/mcache.h>
79#include <sys/protosw.h>
80#include <sys/stat.h>
81#include <sys/socket.h>
82#include <sys/socketvar.h>
83#include <sys/signalvar.h>
84#include <sys/sysctl.h>
85#include <sys/syslog.h>
86#include <sys/ev.h>
87#include <kern/locks.h>
88#include <net/route.h>
89#include <net/content_filter.h>
90#include <netinet/in.h>
91#include <netinet/in_pcb.h>
92#include <sys/kdebug.h>
93#include <libkern/OSAtomic.h>
94
95#if CONFIG_MACF
96#include <security/mac_framework.h>
97#endif
98
99#include <mach/vm_param.h>
100
101#if MPTCP
102#include <netinet/mptcp_var.h>
103#endif
104
105#define	DBG_FNC_SBDROP		NETDBG_CODE(DBG_NETSOCK, 4)
106#define	DBG_FNC_SBAPPEND	NETDBG_CODE(DBG_NETSOCK, 5)
107
108static inline void sbcompress(struct sockbuf *, struct mbuf *, struct mbuf *);
109static struct socket *sonewconn_internal(struct socket *, int);
110static int sbappendaddr_internal(struct sockbuf *, struct sockaddr *,
111    struct mbuf *, struct mbuf *);
112static int sbappendcontrol_internal(struct sockbuf *, struct mbuf *,
113    struct mbuf *);
114static void soevent_ifdenied(struct socket *);
115
116/*
117 * Primitive routines for operating on sockets and socket buffers
118 */
119static int soqlimitcompat = 1;
120static int soqlencomp = 0;
121
122/*
123 * Based on the number of mbuf clusters configured, high_sb_max and sb_max can
124 * get scaled up or down to suit that memory configuration. high_sb_max is a
125 * higher limit on sb_max that is checked when sb_max gets set through sysctl.
126 */
127
128u_int32_t	sb_max = SB_MAX;		/* XXX should be static */
129u_int32_t	high_sb_max = SB_MAX;
130
131static	u_int32_t sb_efficiency = 8;	/* parameter for sbreserve() */
132int32_t total_sbmb_cnt __attribute__((aligned(8))) = 0;
133int32_t total_sbmb_cnt_peak __attribute__((aligned(8))) = 0;
134int64_t sbmb_limreached __attribute__((aligned(8))) = 0;
135
136/* Control whether to throttle sockets eligible to be throttled */
137__private_extern__ u_int32_t net_io_policy_throttled = 0;
138static int sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS;
139
140u_int32_t net_io_policy_log = 0;	/* log socket policy changes */
141#if CONFIG_PROC_UUID_POLICY
142u_int32_t net_io_policy_uuid = 1;	/* enable UUID socket policy */
143#endif /* CONFIG_PROC_UUID_POLICY */
144
145/*
146 * Procedures to manipulate state flags of socket
147 * and do appropriate wakeups.  Normal sequence from the
148 * active (originating) side is that soisconnecting() is
149 * called during processing of connect() call,
150 * resulting in an eventual call to soisconnected() if/when the
151 * connection is established.  When the connection is torn down
152 * soisdisconnecting() is called during processing of disconnect() call,
153 * and soisdisconnected() is called when the connection to the peer
154 * is totally severed.  The semantics of these routines are such that
155 * connectionless protocols can call soisconnected() and soisdisconnected()
156 * only, bypassing the in-progress calls when setting up a ``connection''
157 * takes no time.
158 *
159 * From the passive side, a socket is created with
160 * two queues of sockets: so_incomp for connections in progress
161 * and so_comp for connections already made and awaiting user acceptance.
162 * As a protocol is preparing incoming connections, it creates a socket
163 * structure queued on so_incomp by calling sonewconn().  When the connection
164 * is established, soisconnected() is called, and transfers the
165 * socket structure to so_comp, making it available to accept().
166 *
167 * If a socket is closed with sockets on either
168 * so_incomp or so_comp, these sockets are dropped.
169 *
170 * If higher level protocols are implemented in
171 * the kernel, the wakeups done here will sometimes
172 * cause software-interrupt process scheduling.
173 */
174void
175soisconnecting(struct socket *so)
176{
177
178	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
179	so->so_state |= SS_ISCONNECTING;
180
181	sflt_notify(so, sock_evt_connecting, NULL);
182}
183
184void
185soisconnected(struct socket *so)
186{
187	struct socket *head = so->so_head;
188
189	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
190	so->so_state |= SS_ISCONNECTED;
191
192	sflt_notify(so, sock_evt_connected, NULL);
193
194	if (head && (so->so_state & SS_INCOMP)) {
195		so->so_state &= ~SS_INCOMP;
196		so->so_state |= SS_COMP;
197		if (head->so_proto->pr_getlock != NULL) {
198			socket_unlock(so, 0);
199			socket_lock(head, 1);
200		}
201		postevent(head, 0, EV_RCONN);
202		TAILQ_REMOVE(&head->so_incomp, so, so_list);
203		head->so_incqlen--;
204		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
205		sorwakeup(head);
206		wakeup_one((caddr_t)&head->so_timeo);
207		if (head->so_proto->pr_getlock != NULL) {
208			socket_unlock(head, 1);
209			socket_lock(so, 0);
210		}
211	} else {
212		postevent(so, 0, EV_WCONN);
213		wakeup((caddr_t)&so->so_timeo);
214		sorwakeup(so);
215		sowwakeup(so);
216		soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CONNECTED |
217		    SO_FILT_HINT_CONNINFO_UPDATED);
218	}
219}
220
221void
222soisdisconnecting(struct socket *so)
223{
224	so->so_state &= ~SS_ISCONNECTING;
225	so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
226	soevent(so, SO_FILT_HINT_LOCKED);
227	sflt_notify(so, sock_evt_disconnecting, NULL);
228	wakeup((caddr_t)&so->so_timeo);
229	sowwakeup(so);
230	sorwakeup(so);
231}
232
233void
234soisdisconnected(struct socket *so)
235{
236	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
237	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
238	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED |
239	    SO_FILT_HINT_CONNINFO_UPDATED);
240	sflt_notify(so, sock_evt_disconnected, NULL);
241	wakeup((caddr_t)&so->so_timeo);
242	sowwakeup(so);
243	sorwakeup(so);
244
245#if CONTENT_FILTER
246	/* Notify content filters as soon as we cannot send/receive data */
247	cfil_sock_notify_shutdown(so, SHUT_RDWR);
248#endif /* CONTENT_FILTER */
249}
250
251/*
252 * This function will issue a wakeup like soisdisconnected but it will not
253 * notify the socket filters. This will avoid unlocking the socket
254 * in the midst of closing it.
255 */
256void
257sodisconnectwakeup(struct socket *so)
258{
259	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
260	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
261	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED |
262	    SO_FILT_HINT_CONNINFO_UPDATED);
263	wakeup((caddr_t)&so->so_timeo);
264	sowwakeup(so);
265	sorwakeup(so);
266
267#if CONTENT_FILTER
268	/* Notify content filters as soon as we cannot send/receive data */
269	cfil_sock_notify_shutdown(so, SHUT_RDWR);
270#endif /* CONTENT_FILTER */
271}
272
273/*
274 * When an attempt at a new connection is noted on a socket
275 * which accepts connections, sonewconn is called.  If the
276 * connection is possible (subject to space constraints, etc.)
277 * then we allocate a new structure, propoerly linked into the
278 * data structure of the original socket, and return this.
279 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
280 */
281static struct socket *
282sonewconn_internal(struct socket *head, int connstatus)
283{
284	int so_qlen, error = 0;
285	struct socket *so;
286	lck_mtx_t *mutex_held;
287
288	if (head->so_proto->pr_getlock != NULL)
289		mutex_held = (*head->so_proto->pr_getlock)(head, 0);
290	else
291		mutex_held = head->so_proto->pr_domain->dom_mtx;
292	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
293
294	if (!soqlencomp) {
295		/*
296		 * This is the default case; so_qlen represents the
297		 * sum of both incomplete and completed queues.
298		 */
299		so_qlen = head->so_qlen;
300	} else {
301		/*
302		 * When kern.ipc.soqlencomp is set to 1, so_qlen
303		 * represents only the completed queue.  Since we
304		 * cannot let the incomplete queue goes unbounded
305		 * (in case of SYN flood), we cap the incomplete
306		 * queue length to at most somaxconn, and use that
307		 * as so_qlen so that we fail immediately below.
308		 */
309		so_qlen = head->so_qlen - head->so_incqlen;
310		if (head->so_incqlen > somaxconn)
311			so_qlen = somaxconn;
312	}
313
314	if (so_qlen >=
315	    (soqlimitcompat ? head->so_qlimit : (3 * head->so_qlimit / 2)))
316		return ((struct socket *)0);
317	so = soalloc(1, SOCK_DOM(head), head->so_type);
318	if (so == NULL)
319		return ((struct socket *)0);
320	/* check if head was closed during the soalloc */
321	if (head->so_proto == NULL) {
322		sodealloc(so);
323		return ((struct socket *)0);
324	}
325
326	so->so_type = head->so_type;
327	so->so_options = head->so_options &~ SO_ACCEPTCONN;
328	so->so_linger = head->so_linger;
329	so->so_state = head->so_state | SS_NOFDREF;
330	so->so_proto = head->so_proto;
331	so->so_timeo = head->so_timeo;
332	so->so_pgid  = head->so_pgid;
333	kauth_cred_ref(head->so_cred);
334	so->so_cred = head->so_cred;
335	so->last_pid = head->last_pid;
336	so->last_upid = head->last_upid;
337	memcpy(so->last_uuid, head->last_uuid, sizeof (so->last_uuid));
338	if (head->so_flags & SOF_DELEGATED) {
339		so->e_pid = head->e_pid;
340		so->e_upid = head->e_upid;
341		memcpy(so->e_uuid, head->e_uuid, sizeof (so->e_uuid));
342	}
343	/* inherit socket options stored in so_flags */
344	so->so_flags = head->so_flags &
345	    (SOF_NOSIGPIPE | SOF_NOADDRAVAIL | SOF_REUSESHAREUID |
346	    SOF_NOTIFYCONFLICT | SOF_BINDRANDOMPORT | SOF_NPX_SETOPTSHUT |
347	    SOF_NODEFUNCT | SOF_PRIVILEGED_TRAFFIC_CLASS| SOF_NOTSENT_LOWAT |
348	    SOF_USELRO | SOF_DELEGATED);
349	so->so_usecount = 1;
350	so->next_lock_lr = 0;
351	so->next_unlock_lr = 0;
352
353	so->so_rcv.sb_flags |= SB_RECV;	/* XXX */
354	so->so_rcv.sb_so = so->so_snd.sb_so = so;
355	TAILQ_INIT(&so->so_evlist);
356
357#if CONFIG_MACF_SOCKET
358	mac_socket_label_associate_accept(head, so);
359#endif
360
361	/* inherit traffic management properties of listener */
362	so->so_traffic_mgt_flags =
363	    head->so_traffic_mgt_flags & (TRAFFIC_MGT_SO_BACKGROUND);
364	so->so_background_thread = head->so_background_thread;
365	so->so_traffic_class = head->so_traffic_class;
366
367	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
368		sodealloc(so);
369		return ((struct socket *)0);
370	}
371	so->so_rcv.sb_flags |= (head->so_rcv.sb_flags & SB_USRSIZE);
372	so->so_snd.sb_flags |= (head->so_snd.sb_flags & SB_USRSIZE);
373
374	/*
375	 * Must be done with head unlocked to avoid deadlock
376	 * for protocol with per socket mutexes.
377	 */
378	if (head->so_proto->pr_unlock)
379		socket_unlock(head, 0);
380	if (((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL) != 0) ||
381	    error) {
382		sodealloc(so);
383		if (head->so_proto->pr_unlock)
384			socket_lock(head, 0);
385		return ((struct socket *)0);
386	}
387	if (head->so_proto->pr_unlock) {
388		socket_lock(head, 0);
389		/*
390		 * Radar 7385998 Recheck that the head is still accepting
391		 * to avoid race condition when head is getting closed.
392		 */
393		if ((head->so_options & SO_ACCEPTCONN) == 0) {
394			so->so_state &= ~SS_NOFDREF;
395			soclose(so);
396			return ((struct socket *)0);
397		}
398	}
399
400	atomic_add_32(&so->so_proto->pr_domain->dom_refs, 1);
401
402	/* Insert in head appropriate lists */
403	so->so_head = head;
404
405	/*
406	 * Since this socket is going to be inserted into the incomp
407	 * queue, it can be picked up by another thread in
408	 * tcp_dropdropablreq to get dropped before it is setup..
409	 * To prevent this race, set in-progress flag which can be
410	 * cleared later
411	 */
412	so->so_flags |= SOF_INCOMP_INPROGRESS;
413
414	if (connstatus) {
415		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
416		so->so_state |= SS_COMP;
417	} else {
418		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
419		so->so_state |= SS_INCOMP;
420		head->so_incqlen++;
421	}
422	head->so_qlen++;
423
424	/* Attach socket filters for this protocol */
425	sflt_initsock(so);
426
427	if (connstatus) {
428		so->so_state |= connstatus;
429		sorwakeup(head);
430		wakeup((caddr_t)&head->so_timeo);
431	}
432	return (so);
433}
434
435
436struct socket *
437sonewconn(struct socket *head, int connstatus, const struct sockaddr *from)
438{
439	int error = sflt_connectin(head, from);
440	if (error) {
441		return (NULL);
442	}
443
444	return (sonewconn_internal(head, connstatus));
445}
446
447/*
448 * Socantsendmore indicates that no more data will be sent on the
449 * socket; it would normally be applied to a socket when the user
450 * informs the system that no more data is to be sent, by the protocol
451 * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
452 * will be received, and will normally be applied to the socket by a
453 * protocol when it detects that the peer will send no more data.
454 * Data queued for reading in the socket may yet be read.
455 */
456
457void
458socantsendmore(struct socket *so)
459{
460	so->so_state |= SS_CANTSENDMORE;
461	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CANTSENDMORE);
462	sflt_notify(so, sock_evt_cantsendmore, NULL);
463	sowwakeup(so);
464}
465
466void
467socantrcvmore(struct socket *so)
468{
469	so->so_state |= SS_CANTRCVMORE;
470	soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_CANTRCVMORE);
471	sflt_notify(so, sock_evt_cantrecvmore, NULL);
472	sorwakeup(so);
473}
474
475/*
476 * Wait for data to arrive at/drain from a socket buffer.
477 */
478int
479sbwait(struct sockbuf *sb)
480{
481	boolean_t nointr = (sb->sb_flags & SB_NOINTR);
482	void *lr_saved = __builtin_return_address(0);
483	struct socket *so = sb->sb_so;
484	lck_mtx_t *mutex_held;
485	struct timespec ts;
486	int error = 0;
487
488	if (so == NULL) {
489		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
490		    __func__, sb, sb->sb_flags, lr_saved);
491		/* NOTREACHED */
492	} else if (so->so_usecount < 1) {
493		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
494		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
495		    so->so_usecount, lr_saved, solockhistory_nr(so));
496		/* NOTREACHED */
497	}
498
499	if (so->so_proto->pr_getlock != NULL)
500		mutex_held = (*so->so_proto->pr_getlock)(so, 0);
501	else
502		mutex_held = so->so_proto->pr_domain->dom_mtx;
503
504	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
505
506	ts.tv_sec = sb->sb_timeo.tv_sec;
507	ts.tv_nsec = sb->sb_timeo.tv_usec * 1000;
508
509	sb->sb_waiters++;
510	VERIFY(sb->sb_waiters != 0);
511
512	error = msleep((caddr_t)&sb->sb_cc, mutex_held,
513	    nointr ? PSOCK : PSOCK | PCATCH,
514	    nointr ? "sbwait_nointr" : "sbwait", &ts);
515
516	VERIFY(sb->sb_waiters != 0);
517	sb->sb_waiters--;
518
519	if (so->so_usecount < 1) {
520		panic("%s: 2 sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
521		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
522		    so->so_usecount, lr_saved, solockhistory_nr(so));
523		/* NOTREACHED */
524	}
525
526	if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
527		error = EBADF;
528		if (so->so_flags & SOF_DEFUNCT) {
529			SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
530			    "(%d)\n", __func__, proc_selfpid(),
531			    (uint64_t)VM_KERNEL_ADDRPERM(so),
532			    SOCK_DOM(so), SOCK_TYPE(so), error));
533		}
534	}
535
536	return (error);
537}
538
539void
540sbwakeup(struct sockbuf *sb)
541{
542	if (sb->sb_waiters > 0)
543		wakeup((caddr_t)&sb->sb_cc);
544}
545
546/*
547 * Wakeup processes waiting on a socket buffer.
548 * Do asynchronous notification via SIGIO
549 * if the socket has the SS_ASYNC flag set.
550 */
551void
552sowakeup(struct socket *so, struct sockbuf *sb)
553{
554	if (so->so_flags & SOF_DEFUNCT) {
555		SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] si 0x%x, "
556		    "fl 0x%x [%s]\n", __func__, proc_selfpid(),
557		    (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
558		    SOCK_TYPE(so), (uint32_t)sb->sb_sel.si_flags, sb->sb_flags,
559		    (sb->sb_flags & SB_RECV) ? "rcv" : "snd"));
560	}
561
562	sb->sb_flags &= ~SB_SEL;
563	selwakeup(&sb->sb_sel);
564	sbwakeup(sb);
565	if (so->so_state & SS_ASYNC) {
566		if (so->so_pgid < 0)
567			gsignal(-so->so_pgid, SIGIO);
568		else if (so->so_pgid > 0)
569			proc_signal(so->so_pgid, SIGIO);
570	}
571	if (sb->sb_flags & SB_KNOTE) {
572		KNOTE(&sb->sb_sel.si_note, SO_FILT_HINT_LOCKED);
573	}
574	if (sb->sb_flags & SB_UPCALL) {
575		void (*sb_upcall)(struct socket *, void *, int);
576		caddr_t sb_upcallarg;
577
578		sb_upcall = sb->sb_upcall;
579		sb_upcallarg = sb->sb_upcallarg;
580		/* Let close know that we're about to do an upcall */
581		so->so_upcallusecount++;
582
583		socket_unlock(so, 0);
584		(*sb_upcall)(so, sb_upcallarg, M_DONTWAIT);
585		socket_lock(so, 0);
586
587		so->so_upcallusecount--;
588		/* Tell close that it's safe to proceed */
589		if ((so->so_flags & SOF_CLOSEWAIT) &&
590		    so->so_upcallusecount == 0)
591			wakeup((caddr_t)&so->so_upcallusecount);
592	}
593#if CONTENT_FILTER
594	/*
595	 * Trap disconnection events for content filters
596	 */
597	if ((so->so_flags & SOF_CONTENT_FILTER) != 0) {
598		if ((sb->sb_flags & SB_RECV)) {
599			if (so->so_state & (SS_CANTRCVMORE))
600				cfil_sock_notify_shutdown(so, SHUT_RD);
601		} else {
602			if (so->so_state & (SS_CANTSENDMORE))
603				cfil_sock_notify_shutdown(so, SHUT_WR);
604		}
605	}
606#endif /* CONTENT_FILTER */
607}
608
609/*
610 * Socket buffer (struct sockbuf) utility routines.
611 *
612 * Each socket contains two socket buffers: one for sending data and
613 * one for receiving data.  Each buffer contains a queue of mbufs,
614 * information about the number of mbufs and amount of data in the
615 * queue, and other fields allowing select() statements and notification
616 * on data availability to be implemented.
617 *
618 * Data stored in a socket buffer is maintained as a list of records.
619 * Each record is a list of mbufs chained together with the m_next
620 * field.  Records are chained together with the m_nextpkt field. The upper
621 * level routine soreceive() expects the following conventions to be
622 * observed when placing information in the receive buffer:
623 *
624 * 1. If the protocol requires each message be preceded by the sender's
625 *    name, then a record containing that name must be present before
626 *    any associated data (mbuf's must be of type MT_SONAME).
627 * 2. If the protocol supports the exchange of ``access rights'' (really
628 *    just additional data associated with the message), and there are
629 *    ``rights'' to be received, then a record containing this data
630 *    should be present (mbuf's must be of type MT_RIGHTS).
631 * 3. If a name or rights record exists, then it must be followed by
632 *    a data record, perhaps of zero length.
633 *
634 * Before using a new socket structure it is first necessary to reserve
635 * buffer space to the socket, by calling sbreserve().  This should commit
636 * some of the available buffer space in the system buffer pool for the
637 * socket (currently, it does nothing but enforce limits).  The space
638 * should be released by calling sbrelease() when the socket is destroyed.
639 */
640
641/*
642 * Returns:	0			Success
643 *		ENOBUFS
644 */
645int
646soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc)
647{
648
649	if (sbreserve(&so->so_snd, sndcc) == 0)
650		goto bad;
651	else
652		so->so_snd.sb_idealsize = sndcc;
653
654	if (sbreserve(&so->so_rcv, rcvcc) == 0)
655		goto bad2;
656	else
657		so->so_rcv.sb_idealsize = rcvcc;
658
659	if (so->so_rcv.sb_lowat == 0)
660		so->so_rcv.sb_lowat = 1;
661	if (so->so_snd.sb_lowat == 0)
662		so->so_snd.sb_lowat = MCLBYTES;
663	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
664		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
665	return (0);
666bad2:
667	so->so_snd.sb_flags &= ~SB_SEL;
668	selthreadclear(&so->so_snd.sb_sel);
669	sbrelease(&so->so_snd);
670bad:
671	return (ENOBUFS);
672}
673
674/*
675 * Allot mbufs to a sockbuf.
676 * Attempt to scale mbmax so that mbcnt doesn't become limiting
677 * if buffering efficiency is near the normal case.
678 */
679int
680sbreserve(struct sockbuf *sb, u_int32_t cc)
681{
682	if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES))
683		return (0);
684	sb->sb_hiwat = cc;
685	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
686	if (sb->sb_lowat > sb->sb_hiwat)
687		sb->sb_lowat = sb->sb_hiwat;
688	return (1);
689}
690
691/*
692 * Free mbufs held by a socket, and reserved mbuf space.
693 */
694/*  WARNING needs to do selthreadclear() before calling this */
695void
696sbrelease(struct sockbuf *sb)
697{
698	sbflush(sb);
699	sb->sb_hiwat = 0;
700	sb->sb_mbmax = 0;
701}
702
703/*
704 * Routines to add and remove
705 * data from an mbuf queue.
706 *
707 * The routines sbappend() or sbappendrecord() are normally called to
708 * append new mbufs to a socket buffer, after checking that adequate
709 * space is available, comparing the function sbspace() with the amount
710 * of data to be added.  sbappendrecord() differs from sbappend() in
711 * that data supplied is treated as the beginning of a new record.
712 * To place a sender's address, optional access rights, and data in a
713 * socket receive buffer, sbappendaddr() should be used.  To place
714 * access rights and data in a socket receive buffer, sbappendrights()
715 * should be used.  In either case, the new data begins a new record.
716 * Note that unlike sbappend() and sbappendrecord(), these routines check
717 * for the caller that there will be enough space to store the data.
718 * Each fails if there is not enough space, or if it cannot find mbufs
719 * to store additional information in.
720 *
721 * Reliable protocols may use the socket send buffer to hold data
722 * awaiting acknowledgement.  Data is normally copied from a socket
723 * send buffer in a protocol with m_copy for output to a peer,
724 * and then removing the data from the socket buffer with sbdrop()
725 * or sbdroprecord() when the data is acknowledged by the peer.
726 */
727
728/*
729 * Append mbuf chain m to the last record in the
730 * socket buffer sb.  The additional space associated
731 * the mbuf chain is recorded in sb.  Empty mbufs are
732 * discarded and mbufs are compacted where possible.
733 */
734int
735sbappend(struct sockbuf *sb, struct mbuf *m)
736{
737	struct socket *so = sb->sb_so;
738
739	if (m == NULL || (sb->sb_flags & SB_DROP)) {
740		if (m != NULL)
741			m_freem(m);
742		return (0);
743	}
744
745	SBLASTRECORDCHK(sb, "sbappend 1");
746
747	if (sb->sb_lastrecord != NULL && (sb->sb_mbtail->m_flags & M_EOR))
748		return (sbappendrecord(sb, m));
749
750	if (sb->sb_flags & SB_RECV && !(m && m->m_flags & M_SKIPCFIL)) {
751		int error = sflt_data_in(so, NULL, &m, NULL, 0);
752		SBLASTRECORDCHK(sb, "sbappend 2");
753
754#if CONTENT_FILTER
755		if (error == 0)
756			error = cfil_sock_data_in(so, NULL, m, NULL, 0);
757#endif /* CONTENT_FILTER */
758
759		if (error != 0) {
760			if (error != EJUSTRETURN)
761				m_freem(m);
762			return (0);
763		}
764	} else if (m) {
765		m->m_flags &= ~M_SKIPCFIL;
766	}
767
768	/* If this is the first record, it's also the last record */
769	if (sb->sb_lastrecord == NULL)
770		sb->sb_lastrecord = m;
771
772	sbcompress(sb, m, sb->sb_mbtail);
773	SBLASTRECORDCHK(sb, "sbappend 3");
774	return (1);
775}
776
777/*
778 * Similar to sbappend, except that this is optimized for stream sockets.
779 */
780int
781sbappendstream(struct sockbuf *sb, struct mbuf *m)
782{
783	struct socket *so = sb->sb_so;
784
785	if (m == NULL || (sb->sb_flags & SB_DROP)) {
786		if (m != NULL)
787			m_freem(m);
788		return (0);
789	}
790
791	if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) {
792		panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n",
793		    m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
794		/* NOTREACHED */
795	}
796
797	SBLASTMBUFCHK(sb, __func__);
798
799	if (sb->sb_flags & SB_RECV && !(m && m->m_flags & M_SKIPCFIL)) {
800		int error = sflt_data_in(so, NULL, &m, NULL, 0);
801		SBLASTRECORDCHK(sb, "sbappendstream 1");
802
803#if CONTENT_FILTER
804		if (error == 0)
805			error = cfil_sock_data_in(so, NULL, m, NULL, 0);
806#endif /* CONTENT_FILTER */
807
808		if (error != 0) {
809			if (error != EJUSTRETURN)
810				m_freem(m);
811			return (0);
812		}
813	} else if (m) {
814		m->m_flags &= ~M_SKIPCFIL;
815	}
816
817	sbcompress(sb, m, sb->sb_mbtail);
818	sb->sb_lastrecord = sb->sb_mb;
819	SBLASTRECORDCHK(sb, "sbappendstream 2");
820	return (1);
821}
822
823#ifdef SOCKBUF_DEBUG
824void
825sbcheck(struct sockbuf *sb)
826{
827	struct mbuf *m;
828	struct mbuf *n = 0;
829	u_int32_t len = 0, mbcnt = 0;
830	lck_mtx_t *mutex_held;
831
832	if (sb->sb_so->so_proto->pr_getlock != NULL)
833		mutex_held = (*sb->sb_so->so_proto->pr_getlock)(sb->sb_so, 0);
834	else
835		mutex_held = sb->sb_so->so_proto->pr_domain->dom_mtx;
836
837	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
838
839	if (sbchecking == 0)
840		return;
841
842	for (m = sb->sb_mb; m; m = n) {
843		n = m->m_nextpkt;
844		for (; m; m = m->m_next) {
845			len += m->m_len;
846			mbcnt += MSIZE;
847			/* XXX pretty sure this is bogus */
848			if (m->m_flags & M_EXT)
849				mbcnt += m->m_ext.ext_size;
850		}
851	}
852	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
853		panic("cc %ld != %ld || mbcnt %ld != %ld\n", len, sb->sb_cc,
854		    mbcnt, sb->sb_mbcnt);
855	}
856}
857#endif
858
859void
860sblastrecordchk(struct sockbuf *sb, const char *where)
861{
862	struct mbuf *m = sb->sb_mb;
863
864	while (m && m->m_nextpkt)
865		m = m->m_nextpkt;
866
867	if (m != sb->sb_lastrecord) {
868		printf("sblastrecordchk: mb 0x%llx lastrecord 0x%llx "
869		    "last 0x%llx\n",
870		    (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_mb),
871		    (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_lastrecord),
872		    (uint64_t)VM_KERNEL_ADDRPERM(m));
873		printf("packet chain:\n");
874		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
875			printf("\t0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(m));
876		panic("sblastrecordchk from %s", where);
877	}
878}
879
880void
881sblastmbufchk(struct sockbuf *sb, const char *where)
882{
883	struct mbuf *m = sb->sb_mb;
884	struct mbuf *n;
885
886	while (m && m->m_nextpkt)
887		m = m->m_nextpkt;
888
889	while (m && m->m_next)
890		m = m->m_next;
891
892	if (m != sb->sb_mbtail) {
893		printf("sblastmbufchk: mb 0x%llx mbtail 0x%llx last 0x%llx\n",
894		    (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_mb),
895		    (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_mbtail),
896		    (uint64_t)VM_KERNEL_ADDRPERM(m));
897		printf("packet tree:\n");
898		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
899			printf("\t");
900			for (n = m; n != NULL; n = n->m_next)
901				printf("0x%llx ", (uint64_t)VM_KERNEL_ADDRPERM(n));
902			printf("\n");
903		}
904		panic("sblastmbufchk from %s", where);
905	}
906}
907
908/*
909 * Similar to sbappend, except the mbuf chain begins a new record.
910 */
911int
912sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
913{
914	struct mbuf *m;
915	int space = 0;
916
917	if (m0 == NULL || (sb->sb_flags & SB_DROP)) {
918		if (m0 != NULL)
919			m_freem(m0);
920		return (0);
921	}
922
923	for (m = m0; m != NULL; m = m->m_next)
924		space += m->m_len;
925
926	if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) {
927		m_freem(m0);
928		return (0);
929	}
930
931	if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) {
932		int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
933		    sock_data_filt_flag_record);
934
935#if CONTENT_FILTER
936		if (error == 0)
937			error = cfil_sock_data_in(sb->sb_so, NULL, m0, NULL, 0);
938#endif /* CONTENT_FILTER */
939
940		if (error != 0) {
941			SBLASTRECORDCHK(sb, "sbappendrecord 1");
942			if (error != EJUSTRETURN)
943				m_freem(m0);
944			return (0);
945		}
946	} else if (m0) {
947		m0->m_flags &= ~M_SKIPCFIL;
948	}
949
950	/*
951	 * Note this permits zero length records.
952	 */
953	sballoc(sb, m0);
954	SBLASTRECORDCHK(sb, "sbappendrecord 2");
955	if (sb->sb_lastrecord != NULL) {
956		sb->sb_lastrecord->m_nextpkt = m0;
957	} else {
958		sb->sb_mb = m0;
959	}
960	sb->sb_lastrecord = m0;
961	sb->sb_mbtail = m0;
962
963	m = m0->m_next;
964	m0->m_next = 0;
965	if (m && (m0->m_flags & M_EOR)) {
966		m0->m_flags &= ~M_EOR;
967		m->m_flags |= M_EOR;
968	}
969	sbcompress(sb, m, m0);
970	SBLASTRECORDCHK(sb, "sbappendrecord 3");
971	return (1);
972}
973
974/*
975 * As above except that OOB data
976 * is inserted at the beginning of the sockbuf,
977 * but after any other OOB data.
978 */
979int
980sbinsertoob(struct sockbuf *sb, struct mbuf *m0)
981{
982	struct mbuf *m;
983	struct mbuf **mp;
984
985	if (m0 == 0)
986		return (0);
987
988	SBLASTRECORDCHK(sb, "sbinsertoob 1");
989
990	if ((sb->sb_flags & SB_RECV && !(m0->m_flags & M_SKIPCFIL)) != 0) {
991		int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
992		    sock_data_filt_flag_oob);
993
994		SBLASTRECORDCHK(sb, "sbinsertoob 2");
995
996#if CONTENT_FILTER
997		if (error == 0)
998			error = cfil_sock_data_in(sb->sb_so, NULL, m0, NULL, 0);
999#endif /* CONTENT_FILTER */
1000
1001		if (error) {
1002			if (error != EJUSTRETURN) {
1003				m_freem(m0);
1004			}
1005			return (0);
1006		}
1007	} else if (m0) {
1008		m0->m_flags &= ~M_SKIPCFIL;
1009	}
1010
1011	for (mp = &sb->sb_mb; *mp; mp = &((*mp)->m_nextpkt)) {
1012		m = *mp;
1013again:
1014		switch (m->m_type) {
1015
1016		case MT_OOBDATA:
1017			continue;		/* WANT next train */
1018
1019		case MT_CONTROL:
1020			m = m->m_next;
1021			if (m)
1022				goto again;	/* inspect THIS train further */
1023		}
1024		break;
1025	}
1026	/*
1027	 * Put the first mbuf on the queue.
1028	 * Note this permits zero length records.
1029	 */
1030	sballoc(sb, m0);
1031	m0->m_nextpkt = *mp;
1032	if (*mp == NULL) {
1033		/* m0 is actually the new tail */
1034		sb->sb_lastrecord = m0;
1035	}
1036	*mp = m0;
1037	m = m0->m_next;
1038	m0->m_next = 0;
1039	if (m && (m0->m_flags & M_EOR)) {
1040		m0->m_flags &= ~M_EOR;
1041		m->m_flags |= M_EOR;
1042	}
1043	sbcompress(sb, m, m0);
1044	SBLASTRECORDCHK(sb, "sbinsertoob 3");
1045	return (1);
1046}
1047
1048/*
1049 * Append address and data, and optionally, control (ancillary) data
1050 * to the receive queue of a socket.  If present,
1051 * m0 must include a packet header with total length.
1052 * Returns 0 if no space in sockbuf or insufficient mbufs.
1053 *
1054 * Returns:	0			No space/out of mbufs
1055 *		1			Success
1056 */
1057static int
1058sbappendaddr_internal(struct sockbuf *sb, struct sockaddr *asa,
1059    struct mbuf *m0, struct mbuf *control)
1060{
1061	struct mbuf *m, *n, *nlast;
1062	int space = asa->sa_len;
1063
1064	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
1065		panic("sbappendaddr");
1066
1067	if (m0)
1068		space += m0->m_pkthdr.len;
1069	for (n = control; n; n = n->m_next) {
1070		space += n->m_len;
1071		if (n->m_next == 0)	/* keep pointer to last control buf */
1072			break;
1073	}
1074	if (space > sbspace(sb))
1075		return (0);
1076	if (asa->sa_len > MLEN)
1077		return (0);
1078	MGET(m, M_DONTWAIT, MT_SONAME);
1079	if (m == 0)
1080		return (0);
1081	m->m_len = asa->sa_len;
1082	bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len);
1083	if (n)
1084		n->m_next = m0;		/* concatenate data to control */
1085	else
1086		control = m0;
1087	m->m_next = control;
1088
1089	SBLASTRECORDCHK(sb, "sbappendadddr 1");
1090
1091	for (n = m; n->m_next != NULL; n = n->m_next)
1092		sballoc(sb, n);
1093	sballoc(sb, n);
1094	nlast = n;
1095
1096	if (sb->sb_lastrecord != NULL) {
1097		sb->sb_lastrecord->m_nextpkt = m;
1098	} else {
1099		sb->sb_mb = m;
1100	}
1101	sb->sb_lastrecord = m;
1102	sb->sb_mbtail = nlast;
1103
1104	SBLASTMBUFCHK(sb, __func__);
1105	SBLASTRECORDCHK(sb, "sbappendadddr 2");
1106
1107	postevent(0, sb, EV_RWBYTES);
1108	return (1);
1109}
1110
1111/*
1112 * Returns:	0			Error: No space/out of mbufs/etc.
1113 *		1			Success
1114 *
1115 * Imputed:	(*error_out)		errno for error
1116 *		ENOBUFS
1117 *	sflt_data_in:???		[whatever a filter author chooses]
1118 */
1119int
1120sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0,
1121    struct mbuf *control, int *error_out)
1122{
1123	int result = 0;
1124	boolean_t sb_unix = (sb->sb_flags & SB_UNIX);
1125
1126	if (error_out)
1127		*error_out = 0;
1128
1129	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
1130		panic("sbappendaddrorfree");
1131
1132	if (sb->sb_flags & SB_DROP) {
1133		if (m0 != NULL)
1134			m_freem(m0);
1135		if (control != NULL && !sb_unix)
1136			m_freem(control);
1137		if (error_out != NULL)
1138			*error_out = EINVAL;
1139		return (0);
1140	}
1141
1142	/* Call socket data in filters */
1143	if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) {
1144		int error;
1145		error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0);
1146		SBLASTRECORDCHK(sb, __func__);
1147
1148#if CONTENT_FILTER
1149		if (error == 0)
1150			error = cfil_sock_data_in(sb->sb_so, asa, m0, control, 0);
1151#endif /* CONTENT_FILTER */
1152
1153		if (error) {
1154			if (error != EJUSTRETURN) {
1155				if (m0)
1156					m_freem(m0);
1157				if (control != NULL && !sb_unix)
1158					m_freem(control);
1159				if (error_out)
1160					*error_out = error;
1161			}
1162			return (0);
1163		}
1164	} else if (m0) {
1165		m0->m_flags &= ~M_SKIPCFIL;
1166	}
1167
1168	result = sbappendaddr_internal(sb, asa, m0, control);
1169	if (result == 0) {
1170		if (m0)
1171			m_freem(m0);
1172		if (control != NULL && !sb_unix)
1173			m_freem(control);
1174		if (error_out)
1175			*error_out = ENOBUFS;
1176	}
1177
1178	return (result);
1179}
1180
1181static int
1182sbappendcontrol_internal(struct sockbuf *sb, struct mbuf *m0,
1183    struct mbuf *control)
1184{
1185	struct mbuf *m, *mlast, *n;
1186	int space = 0;
1187
1188	if (control == 0)
1189		panic("sbappendcontrol");
1190
1191	for (m = control; ; m = m->m_next) {
1192		space += m->m_len;
1193		if (m->m_next == 0)
1194			break;
1195	}
1196	n = m;			/* save pointer to last control buffer */
1197	for (m = m0; m; m = m->m_next)
1198		space += m->m_len;
1199	if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX))
1200		return (0);
1201	n->m_next = m0;			/* concatenate data to control */
1202	SBLASTRECORDCHK(sb, "sbappendcontrol 1");
1203
1204	for (m = control; m->m_next != NULL; m = m->m_next)
1205		sballoc(sb, m);
1206	sballoc(sb, m);
1207	mlast = m;
1208
1209	if (sb->sb_lastrecord != NULL) {
1210		sb->sb_lastrecord->m_nextpkt = control;
1211	} else {
1212		sb->sb_mb = control;
1213	}
1214	sb->sb_lastrecord = control;
1215	sb->sb_mbtail = mlast;
1216
1217	SBLASTMBUFCHK(sb, __func__);
1218	SBLASTRECORDCHK(sb, "sbappendcontrol 2");
1219
1220	postevent(0, sb, EV_RWBYTES);
1221	return (1);
1222}
1223
1224int
1225sbappendcontrol(struct sockbuf *sb, struct mbuf	*m0, struct mbuf *control,
1226    int *error_out)
1227{
1228	int result = 0;
1229	boolean_t sb_unix = (sb->sb_flags & SB_UNIX);
1230
1231	if (error_out)
1232		*error_out = 0;
1233
1234	if (sb->sb_flags & SB_DROP) {
1235		if (m0 != NULL)
1236			m_freem(m0);
1237		if (control != NULL && !sb_unix)
1238			m_freem(control);
1239		if (error_out != NULL)
1240			*error_out = EINVAL;
1241		return (0);
1242	}
1243
1244	if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) {
1245		int error;
1246
1247		error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0);
1248		SBLASTRECORDCHK(sb, __func__);
1249
1250#if CONTENT_FILTER
1251		if (error == 0)
1252			error = cfil_sock_data_in(sb->sb_so, NULL, m0, control, 0);
1253#endif /* CONTENT_FILTER */
1254
1255		if (error) {
1256			if (error != EJUSTRETURN) {
1257				if (m0)
1258					m_freem(m0);
1259				if (control != NULL && !sb_unix)
1260					m_freem(control);
1261				if (error_out)
1262					*error_out = error;
1263			}
1264			return (0);
1265		}
1266	} else if (m0) {
1267		m0->m_flags &= ~M_SKIPCFIL;
1268	}
1269
1270	result = sbappendcontrol_internal(sb, m0, control);
1271	if (result == 0) {
1272		if (m0)
1273			m_freem(m0);
1274		if (control != NULL && !sb_unix)
1275			m_freem(control);
1276		if (error_out)
1277			*error_out = ENOBUFS;
1278	}
1279
1280	return (result);
1281}
1282
1283/*
1284 * Append a contiguous TCP data blob with TCP sequence number as control data
1285 * as a new msg to the receive socket buffer.
1286 */
1287int
1288sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum,
1289    int unordered)
1290{
1291	struct mbuf *m_eor = NULL;
1292	u_int32_t data_len = 0;
1293	int ret = 0;
1294	struct socket *so = sb->sb_so;
1295
1296	VERIFY((m->m_flags & M_PKTHDR) && m_pktlen(m) > 0);
1297	VERIFY(so->so_msg_state != NULL);
1298	VERIFY(sb->sb_flags & SB_RECV);
1299
1300	/* Keep the TCP sequence number in the mbuf pkthdr */
1301	m->m_pkthdr.msg_seq = seqnum;
1302
1303	/* find last mbuf and set M_EOR */
1304	for (m_eor = m; ; m_eor = m_eor->m_next) {
1305		/*
1306		 * If the msg is unordered, we need to account for
1307		 * these bytes in receive socket buffer size. Otherwise,
1308		 * the receive window advertised will shrink because
1309		 * of the additional unordered bytes added to the
1310		 * receive buffer.
1311		 */
1312		if (unordered) {
1313			m_eor->m_flags |= M_UNORDERED_DATA;
1314			data_len += m_eor->m_len;
1315			so->so_msg_state->msg_uno_bytes += m_eor->m_len;
1316		} else {
1317			m_eor->m_flags &= ~M_UNORDERED_DATA;
1318		}
1319		if (m_eor->m_next == NULL)
1320			break;
1321	}
1322
1323	/* set EOR flag at end of byte blob */
1324	m_eor->m_flags |= M_EOR;
1325
1326	/* expand the receive socket buffer to allow unordered data */
1327	if (unordered && !sbreserve(sb, sb->sb_hiwat + data_len)) {
1328		/*
1329		 * Could not allocate memory for unordered data, it
1330		 * means this packet will have to be delivered in order
1331		 */
1332		printf("%s: could not reserve space for unordered data\n",
1333		    __func__);
1334	}
1335
1336	if (!unordered && (sb->sb_mbtail != NULL) &&
1337		!(sb->sb_mbtail->m_flags & M_UNORDERED_DATA)) {
1338		sb->sb_mbtail->m_flags &= ~M_EOR;
1339		sbcompress(sb, m, sb->sb_mbtail);
1340		ret = 1;
1341	} else {
1342		ret = sbappendrecord(sb, m);
1343	}
1344	VERIFY(sb->sb_mbtail->m_flags & M_EOR);
1345	return (ret);
1346}
1347
1348/*
1349 * TCP streams have message based out of order delivery support, or have
1350 * Multipath TCP support, or are regular TCP sockets
1351 */
1352int
1353sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, uint32_t seqnum,
1354	int unordered)
1355{
1356	int ret = 0;
1357
1358	if ((m != NULL) && (m_pktlen(m) <= 0)) {
1359		m_freem(m);
1360		return (ret);
1361	}
1362
1363	if (so->so_flags & SOF_ENABLE_MSGS) {
1364		ret = sbappendmsgstream_rcv(&so->so_rcv, m, seqnum, unordered);
1365	}
1366#if MPTCP
1367	else if (so->so_flags & SOF_MPTCP_TRUE) {
1368		ret = sbappendmptcpstream_rcv(&so->so_rcv, m);
1369	}
1370#endif /* MPTCP */
1371	else {
1372		ret = sbappendstream(&so->so_rcv, m);
1373	}
1374	return (ret);
1375}
1376
1377#if MPTCP
1378int
1379sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m)
1380{
1381	struct socket *so = sb->sb_so;
1382
1383	VERIFY(m == NULL || (m->m_flags & M_PKTHDR));
1384	/* SB_NOCOMPRESS must be set prevent loss of M_PKTHDR data */
1385	VERIFY((sb->sb_flags & (SB_RECV|SB_NOCOMPRESS)) ==
1386	    (SB_RECV|SB_NOCOMPRESS));
1387
1388	if (m == NULL || m_pktlen(m) == 0 || (sb->sb_flags & SB_DROP) ||
1389	    (so->so_state & SS_CANTRCVMORE)) {
1390		if (m != NULL)
1391			m_freem(m);
1392		return (0);
1393	}
1394	/* the socket is not closed, so SOF_MP_SUBFLOW must be set */
1395	VERIFY(so->so_flags & SOF_MP_SUBFLOW);
1396
1397	if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) {
1398		panic("%s: nexpkt %p || mb %p != lastrecord %p\n", __func__,
1399		    m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
1400		/* NOTREACHED */
1401	}
1402
1403	SBLASTMBUFCHK(sb, __func__);
1404
1405	if (mptcp_adj_rmap(so, m) != 0)
1406		return (0);
1407
1408	/* No filter support (SB_RECV) on mptcp subflow sockets */
1409
1410	sbcompress(sb, m, sb->sb_mbtail);
1411	sb->sb_lastrecord = sb->sb_mb;
1412	SBLASTRECORDCHK(sb, __func__);
1413	return (1);
1414}
1415#endif /* MPTCP */
1416
1417/*
1418 * Append message to send socket buffer based on priority.
1419 */
1420int
1421sbappendmsg_snd(struct sockbuf *sb, struct mbuf *m)
1422{
1423	struct socket *so = sb->sb_so;
1424	struct msg_priq *priq;
1425	int set_eor = 0;
1426
1427	VERIFY(so->so_msg_state != NULL);
1428
1429	if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord))
1430		panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n",
1431		    m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord);
1432
1433	SBLASTMBUFCHK(sb, __func__);
1434
1435	if (m == NULL || (sb->sb_flags & SB_DROP) || so->so_msg_state == NULL) {
1436		if (m != NULL)
1437			m_freem(m);
1438		return (0);
1439	}
1440
1441	priq = &so->so_msg_state->msg_priq[m->m_pkthdr.msg_pri];
1442
1443	/* note if we need to propogate M_EOR to the last mbuf */
1444	if (m->m_flags & M_EOR) {
1445		set_eor = 1;
1446
1447		/* Reset M_EOR from the first mbuf */
1448		m->m_flags &= ~(M_EOR);
1449	}
1450
1451	if (priq->msgq_head == NULL) {
1452		VERIFY(priq->msgq_tail == NULL && priq->msgq_lastmsg == NULL);
1453		priq->msgq_head = priq->msgq_lastmsg = m;
1454	} else {
1455		VERIFY(priq->msgq_tail->m_next == NULL);
1456
1457		/* Check if the last message has M_EOR flag set */
1458		if (priq->msgq_tail->m_flags & M_EOR) {
1459			/* Insert as a new message */
1460			priq->msgq_lastmsg->m_nextpkt = m;
1461
1462			/* move the lastmsg pointer */
1463			priq->msgq_lastmsg = m;
1464		} else {
1465			/* Append to the existing message */
1466			priq->msgq_tail->m_next = m;
1467		}
1468	}
1469
1470	/* Update accounting and the queue tail pointer */
1471
1472	while (m->m_next != NULL) {
1473		sballoc(sb, m);
1474		priq->msgq_bytes += m->m_len;
1475		m = m->m_next;
1476	}
1477	sballoc(sb, m);
1478	priq->msgq_bytes += m->m_len;
1479
1480	if (set_eor) {
1481		m->m_flags |= M_EOR;
1482
1483		/*
1484		 * Since the user space can not write a new msg
1485		 * without completing the previous one, we can
1486		 * reset this flag to start sending again.
1487		 */
1488		priq->msgq_flags &= ~(MSGQ_MSG_NOTDONE);
1489	}
1490
1491	priq->msgq_tail = m;
1492
1493	SBLASTRECORDCHK(sb, "sbappendstream 2");
1494	postevent(0, sb, EV_RWBYTES);
1495	return (1);
1496}
1497
1498/*
1499 * Pull data from priority queues to the serial snd queue
1500 * right before sending.
1501 */
1502void
1503sbpull_unordered_data(struct socket *so, int32_t off, int32_t len)
1504{
1505	int32_t topull, i;
1506	struct msg_priq *priq = NULL;
1507
1508	VERIFY(so->so_msg_state != NULL);
1509
1510	topull = (off + len) - so->so_msg_state->msg_serial_bytes;
1511
1512	i = MSG_PRI_MAX;
1513	while (i >= MSG_PRI_MIN && topull > 0) {
1514		struct mbuf *m = NULL, *mqhead = NULL, *mend = NULL;
1515		priq = &so->so_msg_state->msg_priq[i];
1516		if ((priq->msgq_flags & MSGQ_MSG_NOTDONE) &&
1517		    priq->msgq_head == NULL) {
1518			/*
1519			 * We were in the middle of sending
1520			 * a message and we have not seen the
1521			 * end of it.
1522			 */
1523			VERIFY(priq->msgq_lastmsg == NULL &&
1524			    priq->msgq_tail == NULL);
1525			return;
1526		}
1527		if (priq->msgq_head != NULL) {
1528			int32_t bytes = 0, topull_tmp = topull;
1529			/*
1530			 * We found a msg while scanning the priority
1531			 * queue from high to low priority.
1532			 */
1533			m = priq->msgq_head;
1534			mqhead = m;
1535			mend = m;
1536
1537			/*
1538			 * Move bytes from the priority queue to the
1539			 * serial queue. Compute the number of bytes
1540			 * being added.
1541			 */
1542			while (mqhead->m_next != NULL && topull_tmp > 0) {
1543				bytes += mqhead->m_len;
1544				topull_tmp -= mqhead->m_len;
1545				mend = mqhead;
1546				mqhead = mqhead->m_next;
1547			}
1548
1549			if (mqhead->m_next == NULL) {
1550				/*
1551				 * If we have only one more mbuf left,
1552				 * move the last mbuf of this message to
1553				 * serial queue and set the head of the
1554				 * queue to be the next message.
1555				 */
1556				bytes += mqhead->m_len;
1557				mend = mqhead;
1558				mqhead = m->m_nextpkt;
1559				if (!(mend->m_flags & M_EOR)) {
1560					/*
1561					 * We have not seen the end of
1562					 * this message, so we can not
1563					 * pull anymore.
1564					 */
1565					priq->msgq_flags |= MSGQ_MSG_NOTDONE;
1566				} else {
1567					/* Reset M_EOR */
1568					mend->m_flags &= ~(M_EOR);
1569				}
1570			} else {
1571				/* propogate the next msg pointer */
1572				mqhead->m_nextpkt = m->m_nextpkt;
1573			}
1574			priq->msgq_head = mqhead;
1575
1576			/*
1577			 * if the lastmsg pointer points to
1578			 * the mbuf that is being dequeued, update
1579			 * it to point to the new head.
1580			 */
1581			if (priq->msgq_lastmsg == m)
1582				priq->msgq_lastmsg = priq->msgq_head;
1583
1584			m->m_nextpkt = NULL;
1585			mend->m_next = NULL;
1586
1587			if (priq->msgq_head == NULL) {
1588				/* Moved all messages, update tail */
1589				priq->msgq_tail = NULL;
1590				VERIFY(priq->msgq_lastmsg == NULL);
1591			}
1592
1593			/* Move it to serial sb_mb queue */
1594			if (so->so_snd.sb_mb == NULL) {
1595				so->so_snd.sb_mb = m;
1596			} else {
1597				so->so_snd.sb_mbtail->m_next = m;
1598			}
1599
1600			priq->msgq_bytes -= bytes;
1601			VERIFY(priq->msgq_bytes >= 0);
1602			sbwakeup(&so->so_snd);
1603
1604			so->so_msg_state->msg_serial_bytes += bytes;
1605			so->so_snd.sb_mbtail = mend;
1606			so->so_snd.sb_lastrecord = so->so_snd.sb_mb;
1607
1608			topull =
1609			    (off + len) - so->so_msg_state->msg_serial_bytes;
1610
1611			if (priq->msgq_flags & MSGQ_MSG_NOTDONE)
1612				break;
1613		} else {
1614			--i;
1615		}
1616	}
1617	sblastrecordchk(&so->so_snd, "sbpull_unordered_data");
1618	sblastmbufchk(&so->so_snd, "sbpull_unordered_data");
1619}
1620
1621/*
1622 * Compress mbuf chain m into the socket
1623 * buffer sb following mbuf n.  If n
1624 * is null, the buffer is presumed empty.
1625 */
1626static inline void
1627sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
1628{
1629	int eor = 0, compress = (!(sb->sb_flags & SB_NOCOMPRESS));
1630	struct mbuf *o;
1631
1632	if (m == NULL) {
1633		/* There is nothing to compress; just update the tail */
1634		for (; n->m_next != NULL; n = n->m_next)
1635			;
1636		sb->sb_mbtail = n;
1637		goto done;
1638	}
1639
1640	while (m != NULL) {
1641		eor |= m->m_flags & M_EOR;
1642		if (compress && m->m_len == 0 && (eor == 0 ||
1643		    (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) {
1644			if (sb->sb_lastrecord == m)
1645				sb->sb_lastrecord = m->m_next;
1646			m = m_free(m);
1647			continue;
1648		}
1649		if (compress && n != NULL && (n->m_flags & M_EOR) == 0 &&
1650#ifndef __APPLE__
1651		    M_WRITABLE(n) &&
1652#endif
1653		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1654		    m->m_len <= M_TRAILINGSPACE(n) &&
1655		    n->m_type == m->m_type) {
1656			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
1657			    (unsigned)m->m_len);
1658			n->m_len += m->m_len;
1659			sb->sb_cc += m->m_len;
1660			if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1661			    m->m_type != MT_OOBDATA) {
1662				/* XXX: Probably don't need */
1663				sb->sb_ctl += m->m_len;
1664			}
1665			m = m_free(m);
1666			continue;
1667		}
1668		if (n != NULL)
1669			n->m_next = m;
1670		else
1671			sb->sb_mb = m;
1672		sb->sb_mbtail = m;
1673		sballoc(sb, m);
1674		n = m;
1675		m->m_flags &= ~M_EOR;
1676		m = m->m_next;
1677		n->m_next = NULL;
1678	}
1679	if (eor != 0) {
1680		if (n != NULL)
1681			n->m_flags |= eor;
1682		else
1683			printf("semi-panic: sbcompress\n");
1684	}
1685done:
1686	SBLASTMBUFCHK(sb, __func__);
1687	postevent(0, sb, EV_RWBYTES);
1688}
1689
1690void
1691sb_empty_assert(struct sockbuf *sb, const char *where)
1692{
1693	if (!(sb->sb_cc == 0 && sb->sb_mb == NULL && sb->sb_mbcnt == 0 &&
1694	    sb->sb_mbtail == NULL && sb->sb_lastrecord == NULL)) {
1695		panic("%s: sb %p so %p cc %d mbcnt %d mb %p mbtail %p "
1696		    "lastrecord %p\n", where, sb, sb->sb_so, sb->sb_cc,
1697		    sb->sb_mbcnt, sb->sb_mb, sb->sb_mbtail,
1698		    sb->sb_lastrecord);
1699		/* NOTREACHED */
1700	}
1701}
1702
1703static void
1704sbflush_priq(struct msg_priq *priq)
1705{
1706	struct mbuf *m;
1707	m = priq->msgq_head;
1708	if (m != NULL)
1709		m_freem_list(m);
1710	priq->msgq_head = priq->msgq_tail = priq->msgq_lastmsg = NULL;
1711	priq->msgq_bytes = priq->msgq_flags = 0;
1712}
1713
1714/*
1715 * Free all mbufs in a sockbuf.
1716 * Check that all resources are reclaimed.
1717 */
1718void
1719sbflush(struct sockbuf *sb)
1720{
1721	void *lr_saved = __builtin_return_address(0);
1722	struct socket *so = sb->sb_so;
1723#ifdef notyet
1724	lck_mtx_t *mutex_held;
1725#endif
1726	u_int32_t i;
1727
1728	/* so_usecount may be 0 if we get here from sofreelastref() */
1729	if (so == NULL) {
1730		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
1731		    __func__, sb, sb->sb_flags, lr_saved);
1732		/* NOTREACHED */
1733	} else if (so->so_usecount < 0) {
1734		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
1735		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
1736		    so->so_usecount, lr_saved, solockhistory_nr(so));
1737		/* NOTREACHED */
1738	}
1739#ifdef notyet
1740	/*
1741	 * XXX: This code is currently commented out, because we may get here
1742	 * as part of sofreelastref(), and at that time, pr_getlock() may no
1743	 * longer be able to return us the lock; this will be fixed in future.
1744	 */
1745	if (so->so_proto->pr_getlock != NULL)
1746		mutex_held = (*so->so_proto->pr_getlock)(so, 0);
1747	else
1748		mutex_held = so->so_proto->pr_domain->dom_mtx;
1749
1750	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1751#endif
1752
1753	/*
1754	 * Obtain lock on the socket buffer (SB_LOCK).  This is required
1755	 * to prevent the socket buffer from being unexpectedly altered
1756	 * while it is used by another thread in socket send/receive.
1757	 *
1758	 * sblock() must not fail here, hence the assertion.
1759	 */
1760	(void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
1761	VERIFY(sb->sb_flags & SB_LOCK);
1762
1763	while (sb->sb_mbcnt > 0) {
1764		/*
1765		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1766		 * we would loop forever. Panic instead.
1767		 */
1768		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1769			break;
1770		sbdrop(sb, (int)sb->sb_cc);
1771	}
1772
1773	if (!(sb->sb_flags & SB_RECV) && (so->so_flags & SOF_ENABLE_MSGS)) {
1774		VERIFY(so->so_msg_state != NULL);
1775		for (i = MSG_PRI_MIN; i <= MSG_PRI_MAX; ++i) {
1776			sbflush_priq(&so->so_msg_state->msg_priq[i]);
1777		}
1778		so->so_msg_state->msg_serial_bytes = 0;
1779		so->so_msg_state->msg_uno_bytes = 0;
1780	}
1781
1782	sb_empty_assert(sb, __func__);
1783	postevent(0, sb, EV_RWBYTES);
1784
1785	sbunlock(sb, TRUE);	/* keep socket locked */
1786}
1787
1788/*
1789 * Drop data from (the front of) a sockbuf.
1790 * use m_freem_list to free the mbuf structures
1791 * under a single lock... this is done by pruning
1792 * the top of the tree from the body by keeping track
1793 * of where we get to in the tree and then zeroing the
1794 * two pertinent pointers m_nextpkt and m_next
1795 * the socket buffer is then updated to point at the new
1796 * top of the tree and the pruned area is released via
1797 * m_freem_list.
1798 */
1799void
1800sbdrop(struct sockbuf *sb, int len)
1801{
1802	struct mbuf *m, *free_list, *ml;
1803	struct mbuf *next, *last;
1804
1805	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1806#if MPTCP
1807	if ((m != NULL) && (len > 0) &&
1808	    (!(sb->sb_flags & SB_RECV)) &&
1809	    ((sb->sb_so->so_flags & SOF_MP_SUBFLOW) ||
1810	    ((SOCK_CHECK_DOM(sb->sb_so, PF_MULTIPATH)) &&
1811	    (SOCK_CHECK_PROTO(sb->sb_so, IPPROTO_TCP)))) &&
1812	    (!(sb->sb_so->so_flags1 & SOF1_POST_FALLBACK_SYNC))) {
1813		mptcp_preproc_sbdrop(m, (unsigned int)len);
1814	}
1815#endif /* MPTCP */
1816	KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_START), sb, len, 0, 0, 0);
1817
1818	free_list = last = m;
1819	ml = (struct mbuf *)0;
1820
1821	while (len > 0) {
1822		if (m == 0) {
1823			if (next == 0) {
1824				/*
1825				 * temporarily replacing this panic with printf
1826				 * because it occurs occasionally when closing
1827				 * a socket when there is no harm in ignoring
1828				 * it. This problem will be investigated
1829				 * further.
1830				 */
1831				/* panic("sbdrop"); */
1832				printf("sbdrop - count not zero\n");
1833				len = 0;
1834				/*
1835				 * zero the counts. if we have no mbufs,
1836				 * we have no data (PR-2986815)
1837				 */
1838				sb->sb_cc = 0;
1839				sb->sb_mbcnt = 0;
1840				if (!(sb->sb_flags & SB_RECV) &&
1841				    (sb->sb_so->so_flags & SOF_ENABLE_MSGS)) {
1842					sb->sb_so->so_msg_state->
1843					    msg_serial_bytes = 0;
1844				}
1845				break;
1846			}
1847			m = last = next;
1848			next = m->m_nextpkt;
1849			continue;
1850		}
1851		if (m->m_len > len) {
1852			m->m_len -= len;
1853			m->m_data += len;
1854			sb->sb_cc -= len;
1855			if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1856			    m->m_type != MT_OOBDATA)
1857				sb->sb_ctl -= len;
1858			break;
1859		}
1860		len -= m->m_len;
1861		sbfree(sb, m);
1862
1863		ml = m;
1864		m = m->m_next;
1865	}
1866	while (m && m->m_len == 0) {
1867		sbfree(sb, m);
1868
1869		ml = m;
1870		m = m->m_next;
1871	}
1872	if (ml) {
1873		ml->m_next = (struct mbuf *)0;
1874		last->m_nextpkt = (struct mbuf *)0;
1875		m_freem_list(free_list);
1876	}
1877	if (m) {
1878		sb->sb_mb = m;
1879		m->m_nextpkt = next;
1880	} else {
1881		sb->sb_mb = next;
1882	}
1883
1884	/*
1885	 * First part is an inline SB_EMPTY_FIXUP().  Second part
1886	 * makes sure sb_lastrecord is up-to-date if we dropped
1887	 * part of the last record.
1888	 */
1889	m = sb->sb_mb;
1890	if (m == NULL) {
1891		sb->sb_mbtail = NULL;
1892		sb->sb_lastrecord = NULL;
1893	} else if (m->m_nextpkt == NULL) {
1894		sb->sb_lastrecord = m;
1895	}
1896
1897#if CONTENT_FILTER
1898	cfil_sock_buf_update(sb);
1899#endif /* CONTENT_FILTER */
1900
1901	postevent(0, sb, EV_RWBYTES);
1902
1903	KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_END), sb, 0, 0, 0, 0);
1904}
1905
1906/*
1907 * Drop a record off the front of a sockbuf
1908 * and move the next record to the front.
1909 */
1910void
1911sbdroprecord(struct sockbuf *sb)
1912{
1913	struct mbuf *m, *mn;
1914
1915	m = sb->sb_mb;
1916	if (m) {
1917		sb->sb_mb = m->m_nextpkt;
1918		do {
1919			sbfree(sb, m);
1920			MFREE(m, mn);
1921			m = mn;
1922		} while (m);
1923	}
1924	SB_EMPTY_FIXUP(sb);
1925	postevent(0, sb, EV_RWBYTES);
1926}
1927
1928/*
1929 * Create a "control" mbuf containing the specified data
1930 * with the specified type for presentation on a socket buffer.
1931 */
1932struct mbuf *
1933sbcreatecontrol(caddr_t p, int size, int type, int level)
1934{
1935	struct cmsghdr *cp;
1936	struct mbuf *m;
1937
1938	if (CMSG_SPACE((u_int)size) > MLEN)
1939		return ((struct mbuf *)NULL);
1940	if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
1941		return ((struct mbuf *)NULL);
1942	cp = mtod(m, struct cmsghdr *);
1943	VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1944	/* XXX check size? */
1945	(void) memcpy(CMSG_DATA(cp), p, size);
1946	m->m_len = CMSG_SPACE(size);
1947	cp->cmsg_len = CMSG_LEN(size);
1948	cp->cmsg_level = level;
1949	cp->cmsg_type = type;
1950	return (m);
1951}
1952
1953struct mbuf **
1954sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf **mp)
1955{
1956	struct mbuf *m;
1957	struct cmsghdr *cp;
1958
1959	if (*mp == NULL) {
1960		*mp = sbcreatecontrol(p, size, type, level);
1961		return (mp);
1962	}
1963
1964	if (CMSG_SPACE((u_int)size) + (*mp)->m_len > MLEN) {
1965		mp = &(*mp)->m_next;
1966		*mp = sbcreatecontrol(p, size, type, level);
1967		return (mp);
1968	}
1969
1970	m = *mp;
1971
1972	cp = (struct cmsghdr *)(void *)(mtod(m, char *) + m->m_len);
1973	/* CMSG_SPACE ensures 32-bit alignment */
1974	VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
1975	m->m_len += CMSG_SPACE(size);
1976
1977	/* XXX check size? */
1978	(void) memcpy(CMSG_DATA(cp), p, size);
1979	cp->cmsg_len = CMSG_LEN(size);
1980	cp->cmsg_level = level;
1981	cp->cmsg_type = type;
1982
1983	return (mp);
1984}
1985
1986
1987/*
1988 * Some routines that return EOPNOTSUPP for entry points that are not
1989 * supported by a protocol.  Fill in as needed.
1990 */
1991int
1992pru_abort_notsupp(struct socket *so)
1993{
1994#pragma unused(so)
1995	return (EOPNOTSUPP);
1996}
1997
1998int
1999pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
2000{
2001#pragma unused(so, nam)
2002	return (EOPNOTSUPP);
2003}
2004
2005int
2006pru_attach_notsupp(struct socket *so, int proto, struct proc *p)
2007{
2008#pragma unused(so, proto, p)
2009	return (EOPNOTSUPP);
2010}
2011
2012int
2013pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
2014{
2015#pragma unused(so, nam, p)
2016	return (EOPNOTSUPP);
2017}
2018
2019int
2020pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
2021{
2022#pragma unused(so, nam, p)
2023	return (EOPNOTSUPP);
2024}
2025
2026int
2027pru_connect2_notsupp(struct socket *so1, struct socket *so2)
2028{
2029#pragma unused(so1, so2)
2030	return (EOPNOTSUPP);
2031}
2032
2033int
2034pru_connectx_notsupp(struct socket *so, struct sockaddr_list **src_sl,
2035    struct sockaddr_list **dst_sl, struct proc *p, uint32_t ifscope,
2036    associd_t aid, connid_t *pcid, uint32_t flags, void *arg,
2037    uint32_t arglen)
2038{
2039#pragma unused(so, src_sl, dst_sl, p, ifscope, aid, pcid, flags, arg, arglen)
2040	return (EOPNOTSUPP);
2041}
2042
2043int
2044pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
2045    struct ifnet *ifp, struct proc *p)
2046{
2047#pragma unused(so, cmd, data, ifp, p)
2048	return (EOPNOTSUPP);
2049}
2050
2051int
2052pru_detach_notsupp(struct socket *so)
2053{
2054#pragma unused(so)
2055	return (EOPNOTSUPP);
2056}
2057
2058int
2059pru_disconnect_notsupp(struct socket *so)
2060{
2061#pragma unused(so)
2062	return (EOPNOTSUPP);
2063}
2064
2065int
2066pru_disconnectx_notsupp(struct socket *so, associd_t aid, connid_t cid)
2067{
2068#pragma unused(so, aid, cid)
2069	return (EOPNOTSUPP);
2070}
2071
2072int
2073pru_listen_notsupp(struct socket *so, struct proc *p)
2074{
2075#pragma unused(so, p)
2076	return (EOPNOTSUPP);
2077}
2078
2079int
2080pru_peeloff_notsupp(struct socket *so, associd_t aid, struct socket **psop)
2081{
2082#pragma unused(so, aid, psop)
2083	return (EOPNOTSUPP);
2084}
2085
2086int
2087pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
2088{
2089#pragma unused(so, nam)
2090	return (EOPNOTSUPP);
2091}
2092
2093int
2094pru_rcvd_notsupp(struct socket *so, int flags)
2095{
2096#pragma unused(so, flags)
2097	return (EOPNOTSUPP);
2098}
2099
2100int
2101pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
2102{
2103#pragma unused(so, m, flags)
2104	return (EOPNOTSUPP);
2105}
2106
2107int
2108pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
2109    struct sockaddr *addr, struct mbuf *control, struct proc *p)
2110{
2111#pragma unused(so, flags, m, addr, control, p)
2112	return (EOPNOTSUPP);
2113}
2114
2115int
2116pru_send_list_notsupp(struct socket *so, int flags, struct mbuf *m,
2117    struct sockaddr *addr, struct mbuf *control, struct proc *p)
2118{
2119#pragma unused(so, flags, m, addr, control, p)
2120	return (EOPNOTSUPP);
2121}
2122
2123/*
2124 * This isn't really a ``null'' operation, but it's the default one
2125 * and doesn't do anything destructive.
2126 */
2127int
2128pru_sense_null(struct socket *so, void *ub, int isstat64)
2129{
2130	if (isstat64 != 0) {
2131		struct stat64 *sb64;
2132
2133		sb64 = (struct stat64 *)ub;
2134		sb64->st_blksize = so->so_snd.sb_hiwat;
2135	} else {
2136		struct stat *sb;
2137
2138		sb = (struct stat *)ub;
2139		sb->st_blksize = so->so_snd.sb_hiwat;
2140	}
2141
2142	return (0);
2143}
2144
2145
2146int
2147pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
2148    struct mbuf *top, struct mbuf *control, int flags)
2149{
2150#pragma unused(so, addr, uio, top, control, flags)
2151	return (EOPNOTSUPP);
2152}
2153
2154int
2155pru_sosend_list_notsupp(struct socket *so, struct sockaddr *addr, struct uio **uio,
2156    u_int uiocnt, struct mbuf *top, struct mbuf *control, int flags)
2157{
2158#pragma unused(so, addr, uio, uiocnt, top, control, flags)
2159	return (EOPNOTSUPP);
2160}
2161
2162int
2163pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
2164    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2165{
2166#pragma unused(so, paddr, uio, mp0, controlp, flagsp)
2167	return (EOPNOTSUPP);
2168}
2169
2170int
2171pru_soreceive_list_notsupp(struct socket *so, struct sockaddr **paddr,
2172    struct uio **uio, u_int uiocnt, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2173{
2174#pragma unused(so, paddr, uio, uiocnt, mp0, controlp, flagsp)
2175	return (EOPNOTSUPP);
2176}
2177
2178int
2179pru_shutdown_notsupp(struct socket *so)
2180{
2181#pragma unused(so)
2182	return (EOPNOTSUPP);
2183}
2184
2185int
2186pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
2187{
2188#pragma unused(so, nam)
2189	return (EOPNOTSUPP);
2190}
2191
2192int
2193pru_sopoll_notsupp(struct socket *so, int events, kauth_cred_t cred, void *wql)
2194{
2195#pragma unused(so, events, cred, wql)
2196	return (EOPNOTSUPP);
2197}
2198
2199int
2200pru_socheckopt_null(struct socket *so, struct sockopt *sopt)
2201{
2202#pragma unused(so, sopt)
2203	/*
2204	 * Allow all options for set/get by default.
2205	 */
2206	return (0);
2207}
2208
2209void
2210pru_sanitize(struct pr_usrreqs *pru)
2211{
2212#define	DEFAULT(foo, bar)	if ((foo) == NULL) (foo) = (bar)
2213	DEFAULT(pru->pru_abort, pru_abort_notsupp);
2214	DEFAULT(pru->pru_accept, pru_accept_notsupp);
2215	DEFAULT(pru->pru_attach, pru_attach_notsupp);
2216	DEFAULT(pru->pru_bind, pru_bind_notsupp);
2217	DEFAULT(pru->pru_connect, pru_connect_notsupp);
2218	DEFAULT(pru->pru_connect2, pru_connect2_notsupp);
2219	DEFAULT(pru->pru_connectx, pru_connectx_notsupp);
2220	DEFAULT(pru->pru_control, pru_control_notsupp);
2221	DEFAULT(pru->pru_detach, pru_detach_notsupp);
2222	DEFAULT(pru->pru_disconnect, pru_disconnect_notsupp);
2223	DEFAULT(pru->pru_disconnectx, pru_disconnectx_notsupp);
2224	DEFAULT(pru->pru_listen, pru_listen_notsupp);
2225	DEFAULT(pru->pru_peeloff, pru_peeloff_notsupp);
2226	DEFAULT(pru->pru_peeraddr, pru_peeraddr_notsupp);
2227	DEFAULT(pru->pru_rcvd, pru_rcvd_notsupp);
2228	DEFAULT(pru->pru_rcvoob, pru_rcvoob_notsupp);
2229	DEFAULT(pru->pru_send, pru_send_notsupp);
2230	DEFAULT(pru->pru_send_list, pru_send_list_notsupp);
2231	DEFAULT(pru->pru_sense, pru_sense_null);
2232	DEFAULT(pru->pru_shutdown, pru_shutdown_notsupp);
2233	DEFAULT(pru->pru_sockaddr, pru_sockaddr_notsupp);
2234	DEFAULT(pru->pru_sopoll, pru_sopoll_notsupp);
2235	DEFAULT(pru->pru_soreceive, pru_soreceive_notsupp);
2236	DEFAULT(pru->pru_soreceive_list, pru_soreceive_list_notsupp);
2237	DEFAULT(pru->pru_sosend, pru_sosend_notsupp);
2238	DEFAULT(pru->pru_sosend_list, pru_sosend_list_notsupp);
2239	DEFAULT(pru->pru_socheckopt, pru_socheckopt_null);
2240#undef DEFAULT
2241}
2242
2243/*
2244 * The following are macros on BSD and functions on Darwin
2245 */
2246
2247/*
2248 * Do we need to notify the other side when I/O is possible?
2249 */
2250
2251int
2252sb_notify(struct sockbuf *sb)
2253{
2254	return (sb->sb_waiters > 0 ||
2255	    (sb->sb_flags & (SB_SEL|SB_ASYNC|SB_UPCALL|SB_KNOTE)));
2256}
2257
2258/*
2259 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
2260 * This is problematical if the fields are unsigned, as the space might
2261 * still be negative (cc > hiwat or mbcnt > mbmax).  Should detect
2262 * overflow and return 0.
2263 */
2264int
2265sbspace(struct sockbuf *sb)
2266{
2267	int pending = 0;
2268	int space = imin((int)(sb->sb_hiwat - sb->sb_cc),
2269	    (int)(sb->sb_mbmax - sb->sb_mbcnt));
2270	if (space < 0)
2271		space = 0;
2272
2273	/* Compensate for data being processed by content filters */
2274#if CONTENT_FILTER
2275	pending = cfil_sock_data_space(sb);
2276#endif /* CONTENT_FILTER */
2277	if (pending > space)
2278		space = 0;
2279	else
2280		space -= pending;
2281
2282	return (space);
2283}
2284
2285/*
2286 * If this socket has priority queues, check if there is enough
2287 * space in the priority queue for this msg.
2288 */
2289int
2290msgq_sbspace(struct socket *so, struct mbuf *control)
2291{
2292	int space = 0, error;
2293	u_int32_t msgpri;
2294	VERIFY(so->so_type == SOCK_STREAM &&
2295		SOCK_PROTO(so) == IPPROTO_TCP);
2296	if (control != NULL) {
2297		error = tcp_get_msg_priority(control, &msgpri);
2298		if (error)
2299			return (0);
2300	} else {
2301		msgpri = MSG_PRI_0;
2302	}
2303	space = (so->so_snd.sb_idealsize / MSG_PRI_COUNT) -
2304	    so->so_msg_state->msg_priq[msgpri].msgq_bytes;
2305	if (space < 0)
2306		space = 0;
2307	return (space);
2308}
2309
2310/* do we have to send all at once on a socket? */
2311int
2312sosendallatonce(struct socket *so)
2313{
2314	return (so->so_proto->pr_flags & PR_ATOMIC);
2315}
2316
2317/* can we read something from so? */
2318int
2319soreadable(struct socket *so)
2320{
2321	return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2322	    ((so->so_state & SS_CANTRCVMORE)
2323#if CONTENT_FILTER
2324	    && cfil_sock_data_pending(&so->so_rcv) == 0
2325#endif /* CONTENT_FILTER */
2326            ) ||
2327	    so->so_comp.tqh_first || so->so_error);
2328}
2329
2330/* can we write something to so? */
2331
2332int
2333sowriteable(struct socket *so)
2334{
2335	if ((so->so_state & SS_CANTSENDMORE) ||
2336	    so->so_error > 0)
2337		return (1);
2338
2339	if (!so_wait_for_if_feedback(so) &&
2340	    sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat &&
2341	    ((so->so_state & SS_ISCONNECTED) ||
2342	    !(so->so_proto->pr_flags & PR_CONNREQUIRED))) {
2343		if (so->so_flags & SOF_NOTSENT_LOWAT) {
2344			if ((SOCK_DOM(so) == PF_INET6
2345			    || SOCK_DOM(so) == PF_INET)
2346			    && so->so_type == SOCK_STREAM) {
2347				return (tcp_notsent_lowat_check(so));
2348			}
2349#if MPTCP
2350			else if ((SOCK_DOM(so) == PF_MULTIPATH) &&
2351			    (SOCK_PROTO(so) == IPPROTO_TCP)) {
2352				return (mptcp_notsent_lowat_check(so));
2353			}
2354#endif
2355			else {
2356				return (1);
2357			}
2358		} else {
2359			return (1);
2360		}
2361	}
2362	return (0);
2363}
2364
2365/* adjust counters in sb reflecting allocation of m */
2366
2367void
2368sballoc(struct sockbuf *sb, struct mbuf *m)
2369{
2370	u_int32_t cnt = 1;
2371	sb->sb_cc += m->m_len;
2372	if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
2373	    m->m_type != MT_OOBDATA)
2374		sb->sb_ctl += m->m_len;
2375	sb->sb_mbcnt += MSIZE;
2376
2377	if (m->m_flags & M_EXT) {
2378		sb->sb_mbcnt += m->m_ext.ext_size;
2379		cnt += (m->m_ext.ext_size >> MSIZESHIFT);
2380	}
2381	OSAddAtomic(cnt, &total_sbmb_cnt);
2382	VERIFY(total_sbmb_cnt > 0);
2383	if (total_sbmb_cnt > total_sbmb_cnt_peak)
2384		total_sbmb_cnt_peak = total_sbmb_cnt;
2385}
2386
2387/* adjust counters in sb reflecting freeing of m */
2388void
2389sbfree(struct sockbuf *sb, struct mbuf *m)
2390{
2391	int cnt = -1;
2392
2393	sb->sb_cc -= m->m_len;
2394	if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
2395	    m->m_type != MT_OOBDATA)
2396		sb->sb_ctl -= m->m_len;
2397	sb->sb_mbcnt -= MSIZE;
2398	if (m->m_flags & M_EXT) {
2399		sb->sb_mbcnt -= m->m_ext.ext_size;
2400		cnt -= (m->m_ext.ext_size >> MSIZESHIFT);
2401	}
2402	OSAddAtomic(cnt, &total_sbmb_cnt);
2403	VERIFY(total_sbmb_cnt >= 0);
2404}
2405
2406/*
2407 * Set lock on sockbuf sb; sleep if lock is already held.
2408 * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
2409 * Returns error without lock if sleep is interrupted.
2410 */
2411int
2412sblock(struct sockbuf *sb, uint32_t flags)
2413{
2414	boolean_t nointr = ((sb->sb_flags & SB_NOINTR) || (flags & SBL_NOINTR));
2415	void *lr_saved = __builtin_return_address(0);
2416	struct socket *so = sb->sb_so;
2417	void * wchan;
2418	int error = 0;
2419	thread_t tp = current_thread();
2420
2421	VERIFY((flags & SBL_VALID) == flags);
2422
2423	/* so_usecount may be 0 if we get here from sofreelastref() */
2424	if (so == NULL) {
2425		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
2426		    __func__, sb, sb->sb_flags, lr_saved);
2427		/* NOTREACHED */
2428	} else if (so->so_usecount < 0) {
2429		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2430		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
2431		    so->so_usecount, lr_saved, solockhistory_nr(so));
2432		/* NOTREACHED */
2433	}
2434
2435	/*
2436	 * The content filter thread must hold the sockbuf lock
2437	 */
2438	if ((so->so_flags & SOF_CONTENT_FILTER) && sb->sb_cfil_thread == tp) {
2439		/*
2440		 * Don't panic if we are defunct because SB_LOCK has
2441		 * been cleared by sodefunct()
2442		 */
2443		if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK))
2444			panic("%s: SB_LOCK not held for %p\n",
2445		   	 __func__, sb);
2446
2447		/* Keep the sockbuf locked */
2448		return (0);
2449	}
2450
2451	if ((sb->sb_flags & SB_LOCK) && !(flags & SBL_WAIT))
2452		return (EWOULDBLOCK);
2453	/*
2454	 * We may get here from sorflush(), in which case "sb" may not
2455	 * point to the real socket buffer.  Use the actual socket buffer
2456	 * address from the socket instead.
2457	 */
2458	wchan = (sb->sb_flags & SB_RECV) ?
2459	    &so->so_rcv.sb_flags : &so->so_snd.sb_flags;
2460
2461	/*
2462	 * A content filter thread has exclusive access to the sockbuf
2463	 * until it clears the
2464	 */
2465	while ((sb->sb_flags & SB_LOCK) ||
2466		((so->so_flags & SOF_CONTENT_FILTER) &&
2467		sb->sb_cfil_thread != NULL)) {
2468		lck_mtx_t *mutex_held;
2469
2470		/*
2471		 * XXX: This code should be moved up above outside of this loop;
2472		 * however, we may get here as part of sofreelastref(), and
2473		 * at that time pr_getlock() may no longer be able to return
2474		 * us the lock.  This will be fixed in future.
2475		 */
2476		if (so->so_proto->pr_getlock != NULL)
2477			mutex_held = (*so->so_proto->pr_getlock)(so, 0);
2478		else
2479			mutex_held = so->so_proto->pr_domain->dom_mtx;
2480
2481		lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2482
2483		sb->sb_wantlock++;
2484		VERIFY(sb->sb_wantlock != 0);
2485
2486		error = msleep(wchan, mutex_held,
2487		    nointr ? PSOCK : PSOCK | PCATCH,
2488		    nointr ? "sb_lock_nointr" : "sb_lock", NULL);
2489
2490		VERIFY(sb->sb_wantlock != 0);
2491		sb->sb_wantlock--;
2492
2493		if (error == 0 && (so->so_flags & SOF_DEFUNCT) &&
2494		    !(flags & SBL_IGNDEFUNCT)) {
2495			error = EBADF;
2496			SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
2497			    "(%d)\n", __func__, proc_selfpid(),
2498			    (uint64_t)VM_KERNEL_ADDRPERM(so),
2499			    SOCK_DOM(so), SOCK_TYPE(so), error));
2500		}
2501
2502		if (error != 0)
2503			return (error);
2504	}
2505	sb->sb_flags |= SB_LOCK;
2506	return (0);
2507}
2508
2509/*
2510 * Release lock on sockbuf sb
2511 */
2512void
2513sbunlock(struct sockbuf *sb, boolean_t keeplocked)
2514{
2515	void *lr_saved = __builtin_return_address(0);
2516	struct socket *so = sb->sb_so;
2517	thread_t tp = current_thread();
2518
2519	/* so_usecount may be 0 if we get here from sofreelastref() */
2520	if (so == NULL) {
2521		panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
2522		    __func__, sb, sb->sb_flags, lr_saved);
2523		/* NOTREACHED */
2524	} else if (so->so_usecount < 0) {
2525		panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2526		    "lrh= %s\n", __func__, sb, sb->sb_flags, so,
2527		    so->so_usecount, lr_saved, solockhistory_nr(so));
2528		/* NOTREACHED */
2529	}
2530
2531	/*
2532	 * The content filter thread must hold the sockbuf lock
2533	 */
2534	if ((so->so_flags & SOF_CONTENT_FILTER) && sb->sb_cfil_thread == tp) {
2535		/*
2536		 * Don't panic if we are defunct because SB_LOCK has
2537		 * been cleared by sodefunct()
2538		 */
2539		if (!(so->so_flags & SOF_DEFUNCT) &&
2540	            !(sb->sb_flags & SB_LOCK) &&
2541		    !(so->so_state & SS_DEFUNCT) &&
2542		    !(so->so_flags1 & SOF1_DEFUNCTINPROG)) {
2543			panic("%s: SB_LOCK not held for %p\n",
2544				__func__, sb);
2545		}
2546		/* Keep the sockbuf locked and proceed*/
2547	} else {
2548		VERIFY((sb->sb_flags & SB_LOCK) ||
2549		       (so->so_state & SS_DEFUNCT) ||
2550		       (so->so_flags1 & SOF1_DEFUNCTINPROG));
2551
2552		sb->sb_flags &= ~SB_LOCK;
2553
2554		if (sb->sb_wantlock > 0) {
2555			/*
2556			 * We may get here from sorflush(), in which case "sb" may not
2557			 * point to the real socket buffer.  Use the actual socket
2558			 * buffer address from the socket instead.
2559			 */
2560			wakeup((sb->sb_flags & SB_RECV) ? &so->so_rcv.sb_flags :
2561			    &so->so_snd.sb_flags);
2562		}
2563	}
2564
2565	if (!keeplocked) {	/* unlock on exit */
2566		lck_mtx_t *mutex_held;
2567
2568		if (so->so_proto->pr_getlock != NULL)
2569			mutex_held = (*so->so_proto->pr_getlock)(so, 0);
2570		else
2571			mutex_held = so->so_proto->pr_domain->dom_mtx;
2572
2573		lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2574
2575		VERIFY(so->so_usecount != 0);
2576		so->so_usecount--;
2577		so->unlock_lr[so->next_unlock_lr] = lr_saved;
2578		so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2579		lck_mtx_unlock(mutex_held);
2580	}
2581}
2582
2583void
2584sorwakeup(struct socket *so)
2585{
2586	if (sb_notify(&so->so_rcv))
2587		sowakeup(so, &so->so_rcv);
2588}
2589
2590void
2591sowwakeup(struct socket *so)
2592{
2593	if (sb_notify(&so->so_snd))
2594		sowakeup(so, &so->so_snd);
2595}
2596
2597void
2598soevent(struct socket *so, long hint)
2599{
2600	if (so->so_flags & SOF_KNOTE)
2601		KNOTE(&so->so_klist, hint);
2602
2603	soevupcall(so, hint);
2604
2605	/*
2606	 * Don't post an event if this a subflow socket or
2607	 * the app has opted out of using cellular interface
2608	 */
2609	if ((hint & SO_FILT_HINT_IFDENIED) &&
2610	    !(so->so_flags & SOF_MP_SUBFLOW) &&
2611	    !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR) &&
2612	    !(so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE))
2613		soevent_ifdenied(so);
2614}
2615
2616void
2617soevupcall(struct socket *so, u_int32_t hint)
2618{
2619	if (so->so_event != NULL) {
2620		caddr_t so_eventarg = so->so_eventarg;
2621
2622		hint &= so->so_eventmask;
2623		if (hint != 0) {
2624			socket_unlock(so, 0);
2625			so->so_event(so, so_eventarg, hint);
2626			socket_lock(so, 0);
2627		}
2628	}
2629}
2630
2631static void
2632soevent_ifdenied(struct socket *so)
2633{
2634	struct kev_netpolicy_ifdenied ev_ifdenied;
2635
2636	bzero(&ev_ifdenied, sizeof (ev_ifdenied));
2637	/*
2638	 * The event consumer is interested about the effective {upid,pid,uuid}
2639	 * info which can be different than the those related to the process
2640	 * that recently performed a system call on the socket, i.e. when the
2641	 * socket is delegated.
2642	 */
2643	if (so->so_flags & SOF_DELEGATED) {
2644		ev_ifdenied.ev_data.eupid = so->e_upid;
2645		ev_ifdenied.ev_data.epid = so->e_pid;
2646		uuid_copy(ev_ifdenied.ev_data.euuid, so->e_uuid);
2647	} else {
2648		ev_ifdenied.ev_data.eupid = so->last_upid;
2649		ev_ifdenied.ev_data.epid = so->last_pid;
2650		uuid_copy(ev_ifdenied.ev_data.euuid, so->last_uuid);
2651	}
2652
2653	if (++so->so_ifdenied_notifies > 1) {
2654		/*
2655		 * Allow for at most one kernel event to be generated per
2656		 * socket; so_ifdenied_notifies is reset upon changes in
2657		 * the UUID policy.  See comments in inp_update_policy.
2658		 */
2659		if (net_io_policy_log) {
2660			uuid_string_t buf;
2661
2662			uuid_unparse(ev_ifdenied.ev_data.euuid, buf);
2663			log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %d "
2664			    "euuid %s%s has %d redundant events supressed\n",
2665			    __func__, so->last_pid,
2666			    (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
2667			    SOCK_TYPE(so), ev_ifdenied.ev_data.epid, buf,
2668			    ((so->so_flags & SOF_DELEGATED) ?
2669			    " [delegated]" : ""), so->so_ifdenied_notifies);
2670		}
2671	} else {
2672		if (net_io_policy_log) {
2673			uuid_string_t buf;
2674
2675			uuid_unparse(ev_ifdenied.ev_data.euuid, buf);
2676			log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %d "
2677			    "euuid %s%s event posted\n", __func__,
2678			    so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so),
2679			    SOCK_DOM(so), SOCK_TYPE(so),
2680			    ev_ifdenied.ev_data.epid, buf,
2681			    ((so->so_flags & SOF_DELEGATED) ?
2682			    " [delegated]" : ""));
2683		}
2684		netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data,
2685		    sizeof (ev_ifdenied));
2686	}
2687}
2688
2689/*
2690 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
2691 */
2692struct sockaddr *
2693dup_sockaddr(struct sockaddr *sa, int canwait)
2694{
2695	struct sockaddr *sa2;
2696
2697	MALLOC(sa2, struct sockaddr *, sa->sa_len, M_SONAME,
2698	    canwait ? M_WAITOK : M_NOWAIT);
2699	if (sa2)
2700		bcopy(sa, sa2, sa->sa_len);
2701	return (sa2);
2702}
2703
2704/*
2705 * Create an external-format (``xsocket'') structure using the information
2706 * in the kernel-format socket structure pointed to by so.  This is done
2707 * to reduce the spew of irrelevant information over this interface,
2708 * to isolate user code from changes in the kernel structure, and
2709 * potentially to provide information-hiding if we decide that
2710 * some of this information should be hidden from users.
2711 */
2712void
2713sotoxsocket(struct socket *so, struct xsocket *xso)
2714{
2715	xso->xso_len = sizeof (*xso);
2716	xso->xso_so = (_XSOCKET_PTR(struct socket *))VM_KERNEL_ADDRPERM(so);
2717	xso->so_type = so->so_type;
2718	xso->so_options = (short)(so->so_options & 0xffff);
2719	xso->so_linger = so->so_linger;
2720	xso->so_state = so->so_state;
2721	xso->so_pcb = (_XSOCKET_PTR(caddr_t))VM_KERNEL_ADDRPERM(so->so_pcb);
2722	if (so->so_proto) {
2723		xso->xso_protocol = SOCK_PROTO(so);
2724		xso->xso_family = SOCK_DOM(so);
2725	} else {
2726		xso->xso_protocol = xso->xso_family = 0;
2727	}
2728	xso->so_qlen = so->so_qlen;
2729	xso->so_incqlen = so->so_incqlen;
2730	xso->so_qlimit = so->so_qlimit;
2731	xso->so_timeo = so->so_timeo;
2732	xso->so_error = so->so_error;
2733	xso->so_pgid = so->so_pgid;
2734	xso->so_oobmark = so->so_oobmark;
2735	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2736	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2737	xso->so_uid = kauth_cred_getuid(so->so_cred);
2738}
2739
2740
2741
2742void
2743sotoxsocket64(struct socket *so, struct xsocket64 *xso)
2744{
2745	xso->xso_len = sizeof (*xso);
2746	xso->xso_so = (u_int64_t)VM_KERNEL_ADDRPERM(so);
2747	xso->so_type = so->so_type;
2748	xso->so_options = (short)(so->so_options & 0xffff);
2749	xso->so_linger = so->so_linger;
2750	xso->so_state = so->so_state;
2751	xso->so_pcb = (u_int64_t)VM_KERNEL_ADDRPERM(so->so_pcb);
2752	if (so->so_proto) {
2753		xso->xso_protocol = SOCK_PROTO(so);
2754		xso->xso_family = SOCK_DOM(so);
2755	} else {
2756		xso->xso_protocol = xso->xso_family = 0;
2757	}
2758	xso->so_qlen = so->so_qlen;
2759	xso->so_incqlen = so->so_incqlen;
2760	xso->so_qlimit = so->so_qlimit;
2761	xso->so_timeo = so->so_timeo;
2762	xso->so_error = so->so_error;
2763	xso->so_pgid = so->so_pgid;
2764	xso->so_oobmark = so->so_oobmark;
2765	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
2766	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
2767	xso->so_uid = kauth_cred_getuid(so->so_cred);
2768}
2769
2770
2771/*
2772 * This does the same for sockbufs.  Note that the xsockbuf structure,
2773 * since it is always embedded in a socket, does not include a self
2774 * pointer nor a length.  We make this entry point public in case
2775 * some other mechanism needs it.
2776 */
2777void
2778sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
2779{
2780	xsb->sb_cc = sb->sb_cc;
2781	xsb->sb_hiwat = sb->sb_hiwat;
2782	xsb->sb_mbcnt = sb->sb_mbcnt;
2783	xsb->sb_mbmax = sb->sb_mbmax;
2784	xsb->sb_lowat = sb->sb_lowat;
2785	xsb->sb_flags = sb->sb_flags;
2786	xsb->sb_timeo = (short)
2787	    (sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick;
2788	if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0)
2789		xsb->sb_timeo = 1;
2790}
2791
2792/*
2793 * Based on the policy set by an all knowing decison maker, throttle sockets
2794 * that either have been marked as belonging to "background" process.
2795 */
2796int
2797soisthrottled(struct socket *so)
2798{
2799	/*
2800	 * On non-embedded, we rely on implicit throttling by the
2801	 * application, as we're missing the system wide "decision maker"
2802	 */
2803	return (
2804		(so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND));
2805}
2806
2807int
2808soisprivilegedtraffic(struct socket *so)
2809{
2810	return ((so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS) ? 1 : 0);
2811}
2812
2813int
2814soissrcbackground(struct socket *so)
2815{
2816	return ((so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) ||
2817		IS_SO_TC_BACKGROUND(so->so_traffic_class));
2818}
2819
2820int
2821soissrcrealtime(struct socket *so)
2822{
2823	return (so->so_traffic_class >= SO_TC_AV);
2824}
2825
2826void
2827sonullevent(struct socket *so, void *arg, uint32_t hint)
2828{
2829#pragma unused(so, arg, hint)
2830}
2831
2832/*
2833 * Here is the definition of some of the basic objects in the kern.ipc
2834 * branch of the MIB.
2835 */
2836SYSCTL_NODE(_kern, KERN_IPC, ipc,
2837	CTLFLAG_RW|CTLFLAG_LOCKED|CTLFLAG_ANYBODY, 0, "IPC");
2838
2839/* Check that the maximum socket buffer size is within a range */
2840
2841static int
2842sysctl_sb_max SYSCTL_HANDLER_ARGS
2843{
2844#pragma unused(oidp, arg1, arg2)
2845	u_int32_t new_value;
2846	int changed = 0;
2847	int error = sysctl_io_number(req, sb_max, sizeof (u_int32_t),
2848	    &new_value, &changed);
2849	if (!error && changed) {
2850		if (new_value > LOW_SB_MAX && new_value <= high_sb_max) {
2851			sb_max = new_value;
2852		} else {
2853			error = ERANGE;
2854		}
2855	}
2856	return (error);
2857}
2858
2859static int
2860sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS
2861{
2862#pragma unused(arg1, arg2)
2863	int i, err;
2864
2865	i = net_io_policy_throttled;
2866
2867	err = sysctl_handle_int(oidp, &i, 0, req);
2868	if (err != 0 || req->newptr == USER_ADDR_NULL)
2869		return (err);
2870
2871	if (i != net_io_policy_throttled)
2872		SOTHROTTLELOG(("throttle: network IO policy throttling is "
2873		    "now %s\n", i ? "ON" : "OFF"));
2874
2875	net_io_policy_throttled = i;
2876
2877	return (err);
2878}
2879
2880SYSCTL_PROC(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf,
2881	CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2882	&sb_max, 0, &sysctl_sb_max, "IU", "Maximum socket buffer size");
2883
2884SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor,
2885	CTLFLAG_RW | CTLFLAG_LOCKED, &sb_efficiency, 0, "");
2886
2887SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters,
2888	CTLFLAG_RD | CTLFLAG_LOCKED, &nmbclusters, 0, "");
2889
2890SYSCTL_INT(_kern_ipc, OID_AUTO, njcl,
2891	CTLFLAG_RD | CTLFLAG_LOCKED, &njcl, 0, "");
2892
2893SYSCTL_INT(_kern_ipc, OID_AUTO, njclbytes,
2894	CTLFLAG_RD | CTLFLAG_LOCKED, &njclbytes, 0, "");
2895
2896SYSCTL_INT(_kern_ipc, KIPC_SOQLIMITCOMPAT, soqlimitcompat,
2897	CTLFLAG_RW | CTLFLAG_LOCKED, &soqlimitcompat, 1,
2898	"Enable socket queue limit compatibility");
2899
2900SYSCTL_INT(_kern_ipc, OID_AUTO, soqlencomp, CTLFLAG_RW | CTLFLAG_LOCKED,
2901	&soqlencomp, 0, "Listen backlog represents only complete queue");
2902
2903SYSCTL_NODE(_kern_ipc, OID_AUTO, io_policy, CTLFLAG_RW, 0, "network IO policy");
2904
2905SYSCTL_PROC(_kern_ipc_io_policy, OID_AUTO, throttled,
2906	CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &net_io_policy_throttled, 0,
2907	sysctl_io_policy_throttled, "I", "");
2908
2909SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED,
2910	&net_io_policy_log, 0, "");
2911
2912#if CONFIG_PROC_UUID_POLICY
2913SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, uuid, CTLFLAG_RW | CTLFLAG_LOCKED,
2914	&net_io_policy_uuid, 0, "");
2915#endif /* CONFIG_PROC_UUID_POLICY */
2916