uipc_sockbuf.c revision 268434
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: releng/10.0/sys/kern/uipc_sockbuf.c 268434 2014-07-08 21:55:27Z delphij $");
34
35#include "opt_param.h"
36
37#include <sys/param.h>
38#include <sys/aio.h> /* for aio_swake proto */
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mbuf.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/protosw.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/socket.h>
48#include <sys/socketvar.h>
49#include <sys/sx.h>
50#include <sys/sysctl.h>
51
52/*
53 * Function pointer set by the AIO routines so that the socket buffer code
54 * can call back into the AIO module if it is loaded.
55 */
56void	(*aio_swake)(struct socket *, struct sockbuf *);
57
58/*
59 * Primitive routines for operating on socket buffers
60 */
61
62u_long	sb_max = SB_MAX;
63u_long sb_max_adj =
64       (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
65
66static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
67
68static struct mbuf	*sbcut_internal(struct sockbuf *sb, int len);
69static void	sbflush_internal(struct sockbuf *sb);
70
71/*
72 * Socantsendmore indicates that no more data will be sent on the socket; it
73 * would normally be applied to a socket when the user informs the system
74 * that no more data is to be sent, by the protocol code (in case
75 * PRU_SHUTDOWN).  Socantrcvmore indicates that no more data will be
76 * received, and will normally be applied to the socket by a protocol when it
77 * detects that the peer will send no more data.  Data queued for reading in
78 * the socket may yet be read.
79 */
80void
81socantsendmore_locked(struct socket *so)
82{
83
84	SOCKBUF_LOCK_ASSERT(&so->so_snd);
85
86	so->so_snd.sb_state |= SBS_CANTSENDMORE;
87	sowwakeup_locked(so);
88	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
89}
90
91void
92socantsendmore(struct socket *so)
93{
94
95	SOCKBUF_LOCK(&so->so_snd);
96	socantsendmore_locked(so);
97	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
98}
99
100void
101socantrcvmore_locked(struct socket *so)
102{
103
104	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
105
106	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
107	sorwakeup_locked(so);
108	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
109}
110
111void
112socantrcvmore(struct socket *so)
113{
114
115	SOCKBUF_LOCK(&so->so_rcv);
116	socantrcvmore_locked(so);
117	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
118}
119
120/*
121 * Wait for data to arrive at/drain from a socket buffer.
122 */
123int
124sbwait(struct sockbuf *sb)
125{
126
127	SOCKBUF_LOCK_ASSERT(sb);
128
129	sb->sb_flags |= SB_WAIT;
130	return (msleep_sbt(&sb->sb_cc, &sb->sb_mtx,
131	    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
132	    sb->sb_timeo, 0, 0));
133}
134
135int
136sblock(struct sockbuf *sb, int flags)
137{
138
139	KASSERT((flags & SBL_VALID) == flags,
140	    ("sblock: flags invalid (0x%x)", flags));
141
142	if (flags & SBL_WAIT) {
143		if ((sb->sb_flags & SB_NOINTR) ||
144		    (flags & SBL_NOINTR)) {
145			sx_xlock(&sb->sb_sx);
146			return (0);
147		}
148		return (sx_xlock_sig(&sb->sb_sx));
149	} else {
150		if (sx_try_xlock(&sb->sb_sx) == 0)
151			return (EWOULDBLOCK);
152		return (0);
153	}
154}
155
156void
157sbunlock(struct sockbuf *sb)
158{
159
160	sx_xunlock(&sb->sb_sx);
161}
162
163/*
164 * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
165 * via SIGIO if the socket has the SS_ASYNC flag set.
166 *
167 * Called with the socket buffer lock held; will release the lock by the end
168 * of the function.  This allows the caller to acquire the socket buffer lock
169 * while testing for the need for various sorts of wakeup and hold it through
170 * to the point where it's no longer required.  We currently hold the lock
171 * through calls out to other subsystems (with the exception of kqueue), and
172 * then release it to avoid lock order issues.  It's not clear that's
173 * correct.
174 */
175void
176sowakeup(struct socket *so, struct sockbuf *sb)
177{
178	int ret;
179
180	SOCKBUF_LOCK_ASSERT(sb);
181
182	selwakeuppri(&sb->sb_sel, PSOCK);
183	if (!SEL_WAITING(&sb->sb_sel))
184		sb->sb_flags &= ~SB_SEL;
185	if (sb->sb_flags & SB_WAIT) {
186		sb->sb_flags &= ~SB_WAIT;
187		wakeup(&sb->sb_cc);
188	}
189	KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
190	if (sb->sb_upcall != NULL) {
191		ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
192		if (ret == SU_ISCONNECTED) {
193			KASSERT(sb == &so->so_rcv,
194			    ("SO_SND upcall returned SU_ISCONNECTED"));
195			soupcall_clear(so, SO_RCV);
196		}
197	} else
198		ret = SU_OK;
199	if (sb->sb_flags & SB_AIO)
200		aio_swake(so, sb);
201	SOCKBUF_UNLOCK(sb);
202	if (ret == SU_ISCONNECTED)
203		soisconnected(so);
204	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
205		pgsigio(&so->so_sigio, SIGIO, 0);
206	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
207}
208
209/*
210 * Socket buffer (struct sockbuf) utility routines.
211 *
212 * Each socket contains two socket buffers: one for sending data and one for
213 * receiving data.  Each buffer contains a queue of mbufs, information about
214 * the number of mbufs and amount of data in the queue, and other fields
215 * allowing select() statements and notification on data availability to be
216 * implemented.
217 *
218 * Data stored in a socket buffer is maintained as a list of records.  Each
219 * record is a list of mbufs chained together with the m_next field.  Records
220 * are chained together with the m_nextpkt field. The upper level routine
221 * soreceive() expects the following conventions to be observed when placing
222 * information in the receive buffer:
223 *
224 * 1. If the protocol requires each message be preceded by the sender's name,
225 *    then a record containing that name must be present before any
226 *    associated data (mbuf's must be of type MT_SONAME).
227 * 2. If the protocol supports the exchange of ``access rights'' (really just
228 *    additional data associated with the message), and there are ``rights''
229 *    to be received, then a record containing this data should be present
230 *    (mbuf's must be of type MT_RIGHTS).
231 * 3. If a name or rights record exists, then it must be followed by a data
232 *    record, perhaps of zero length.
233 *
234 * Before using a new socket structure it is first necessary to reserve
235 * buffer space to the socket, by calling sbreserve().  This should commit
236 * some of the available buffer space in the system buffer pool for the
237 * socket (currently, it does nothing but enforce limits).  The space should
238 * be released by calling sbrelease() when the socket is destroyed.
239 */
240int
241soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
242{
243	struct thread *td = curthread;
244
245	SOCKBUF_LOCK(&so->so_snd);
246	SOCKBUF_LOCK(&so->so_rcv);
247	if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
248		goto bad;
249	if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
250		goto bad2;
251	if (so->so_rcv.sb_lowat == 0)
252		so->so_rcv.sb_lowat = 1;
253	if (so->so_snd.sb_lowat == 0)
254		so->so_snd.sb_lowat = MCLBYTES;
255	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
256		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
257	SOCKBUF_UNLOCK(&so->so_rcv);
258	SOCKBUF_UNLOCK(&so->so_snd);
259	return (0);
260bad2:
261	sbrelease_locked(&so->so_snd, so);
262bad:
263	SOCKBUF_UNLOCK(&so->so_rcv);
264	SOCKBUF_UNLOCK(&so->so_snd);
265	return (ENOBUFS);
266}
267
268static int
269sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
270{
271	int error = 0;
272	u_long tmp_sb_max = sb_max;
273
274	error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
275	if (error || !req->newptr)
276		return (error);
277	if (tmp_sb_max < MSIZE + MCLBYTES)
278		return (EINVAL);
279	sb_max = tmp_sb_max;
280	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
281	return (0);
282}
283
284/*
285 * Allot mbufs to a sockbuf.  Attempt to scale mbmax so that mbcnt doesn't
286 * become limiting if buffering efficiency is near the normal case.
287 */
288int
289sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
290    struct thread *td)
291{
292	rlim_t sbsize_limit;
293
294	SOCKBUF_LOCK_ASSERT(sb);
295
296	/*
297	 * When a thread is passed, we take into account the thread's socket
298	 * buffer size limit.  The caller will generally pass curthread, but
299	 * in the TCP input path, NULL will be passed to indicate that no
300	 * appropriate thread resource limits are available.  In that case,
301	 * we don't apply a process limit.
302	 */
303	if (cc > sb_max_adj)
304		return (0);
305	if (td != NULL) {
306		PROC_LOCK(td->td_proc);
307		sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
308		PROC_UNLOCK(td->td_proc);
309	} else
310		sbsize_limit = RLIM_INFINITY;
311	if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
312	    sbsize_limit))
313		return (0);
314	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
315	if (sb->sb_lowat > sb->sb_hiwat)
316		sb->sb_lowat = sb->sb_hiwat;
317	return (1);
318}
319
320int
321sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
322    struct thread *td)
323{
324	int error;
325
326	SOCKBUF_LOCK(sb);
327	error = sbreserve_locked(sb, cc, so, td);
328	SOCKBUF_UNLOCK(sb);
329	return (error);
330}
331
332/*
333 * Free mbufs held by a socket, and reserved mbuf space.
334 */
335void
336sbrelease_internal(struct sockbuf *sb, struct socket *so)
337{
338
339	sbflush_internal(sb);
340	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
341	    RLIM_INFINITY);
342	sb->sb_mbmax = 0;
343}
344
345void
346sbrelease_locked(struct sockbuf *sb, struct socket *so)
347{
348
349	SOCKBUF_LOCK_ASSERT(sb);
350
351	sbrelease_internal(sb, so);
352}
353
354void
355sbrelease(struct sockbuf *sb, struct socket *so)
356{
357
358	SOCKBUF_LOCK(sb);
359	sbrelease_locked(sb, so);
360	SOCKBUF_UNLOCK(sb);
361}
362
363void
364sbdestroy(struct sockbuf *sb, struct socket *so)
365{
366
367	sbrelease_internal(sb, so);
368}
369
370/*
371 * Routines to add and remove data from an mbuf queue.
372 *
373 * The routines sbappend() or sbappendrecord() are normally called to append
374 * new mbufs to a socket buffer, after checking that adequate space is
375 * available, comparing the function sbspace() with the amount of data to be
376 * added.  sbappendrecord() differs from sbappend() in that data supplied is
377 * treated as the beginning of a new record.  To place a sender's address,
378 * optional access rights, and data in a socket receive buffer,
379 * sbappendaddr() should be used.  To place access rights and data in a
380 * socket receive buffer, sbappendrights() should be used.  In either case,
381 * the new data begins a new record.  Note that unlike sbappend() and
382 * sbappendrecord(), these routines check for the caller that there will be
383 * enough space to store the data.  Each fails if there is not enough space,
384 * or if it cannot find mbufs to store additional information in.
385 *
386 * Reliable protocols may use the socket send buffer to hold data awaiting
387 * acknowledgement.  Data is normally copied from a socket send buffer in a
388 * protocol with m_copy for output to a peer, and then removing the data from
389 * the socket buffer with sbdrop() or sbdroprecord() when the data is
390 * acknowledged by the peer.
391 */
392#ifdef SOCKBUF_DEBUG
393void
394sblastrecordchk(struct sockbuf *sb, const char *file, int line)
395{
396	struct mbuf *m = sb->sb_mb;
397
398	SOCKBUF_LOCK_ASSERT(sb);
399
400	while (m && m->m_nextpkt)
401		m = m->m_nextpkt;
402
403	if (m != sb->sb_lastrecord) {
404		printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
405			__func__, sb->sb_mb, sb->sb_lastrecord, m);
406		printf("packet chain:\n");
407		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
408			printf("\t%p\n", m);
409		panic("%s from %s:%u", __func__, file, line);
410	}
411}
412
413void
414sblastmbufchk(struct sockbuf *sb, const char *file, int line)
415{
416	struct mbuf *m = sb->sb_mb;
417	struct mbuf *n;
418
419	SOCKBUF_LOCK_ASSERT(sb);
420
421	while (m && m->m_nextpkt)
422		m = m->m_nextpkt;
423
424	while (m && m->m_next)
425		m = m->m_next;
426
427	if (m != sb->sb_mbtail) {
428		printf("%s: sb_mb %p sb_mbtail %p last %p\n",
429			__func__, sb->sb_mb, sb->sb_mbtail, m);
430		printf("packet tree:\n");
431		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
432			printf("\t");
433			for (n = m; n != NULL; n = n->m_next)
434				printf("%p ", n);
435			printf("\n");
436		}
437		panic("%s from %s:%u", __func__, file, line);
438	}
439}
440#endif /* SOCKBUF_DEBUG */
441
442#define SBLINKRECORD(sb, m0) do {					\
443	SOCKBUF_LOCK_ASSERT(sb);					\
444	if ((sb)->sb_lastrecord != NULL)				\
445		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
446	else								\
447		(sb)->sb_mb = (m0);					\
448	(sb)->sb_lastrecord = (m0);					\
449} while (/*CONSTCOND*/0)
450
451/*
452 * Append mbuf chain m to the last record in the socket buffer sb.  The
453 * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
454 * are discarded and mbufs are compacted where possible.
455 */
456void
457sbappend_locked(struct sockbuf *sb, struct mbuf *m)
458{
459	struct mbuf *n;
460
461	SOCKBUF_LOCK_ASSERT(sb);
462
463	if (m == 0)
464		return;
465
466	SBLASTRECORDCHK(sb);
467	n = sb->sb_mb;
468	if (n) {
469		while (n->m_nextpkt)
470			n = n->m_nextpkt;
471		do {
472			if (n->m_flags & M_EOR) {
473				sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
474				return;
475			}
476		} while (n->m_next && (n = n->m_next));
477	} else {
478		/*
479		 * XXX Would like to simply use sb_mbtail here, but
480		 * XXX I need to verify that I won't miss an EOR that
481		 * XXX way.
482		 */
483		if ((n = sb->sb_lastrecord) != NULL) {
484			do {
485				if (n->m_flags & M_EOR) {
486					sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
487					return;
488				}
489			} while (n->m_next && (n = n->m_next));
490		} else {
491			/*
492			 * If this is the first record in the socket buffer,
493			 * it's also the last record.
494			 */
495			sb->sb_lastrecord = m;
496		}
497	}
498	sbcompress(sb, m, n);
499	SBLASTRECORDCHK(sb);
500}
501
502/*
503 * Append mbuf chain m to the last record in the socket buffer sb.  The
504 * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
505 * are discarded and mbufs are compacted where possible.
506 */
507void
508sbappend(struct sockbuf *sb, struct mbuf *m)
509{
510
511	SOCKBUF_LOCK(sb);
512	sbappend_locked(sb, m);
513	SOCKBUF_UNLOCK(sb);
514}
515
516/*
517 * This version of sbappend() should only be used when the caller absolutely
518 * knows that there will never be more than one record in the socket buffer,
519 * that is, a stream protocol (such as TCP).
520 */
521void
522sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
523{
524	SOCKBUF_LOCK_ASSERT(sb);
525
526	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
527	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
528
529	SBLASTMBUFCHK(sb);
530
531	/* Remove all packet headers and mbuf tags to get a pure data chain. */
532	m_demote(m, 1);
533
534	sbcompress(sb, m, sb->sb_mbtail);
535
536	sb->sb_lastrecord = sb->sb_mb;
537	SBLASTRECORDCHK(sb);
538}
539
540/*
541 * This version of sbappend() should only be used when the caller absolutely
542 * knows that there will never be more than one record in the socket buffer,
543 * that is, a stream protocol (such as TCP).
544 */
545void
546sbappendstream(struct sockbuf *sb, struct mbuf *m)
547{
548
549	SOCKBUF_LOCK(sb);
550	sbappendstream_locked(sb, m);
551	SOCKBUF_UNLOCK(sb);
552}
553
554#ifdef SOCKBUF_DEBUG
555void
556sbcheck(struct sockbuf *sb)
557{
558	struct mbuf *m;
559	struct mbuf *n = 0;
560	u_long len = 0, mbcnt = 0;
561
562	SOCKBUF_LOCK_ASSERT(sb);
563
564	for (m = sb->sb_mb; m; m = n) {
565	    n = m->m_nextpkt;
566	    for (; m; m = m->m_next) {
567		len += m->m_len;
568		mbcnt += MSIZE;
569		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
570			mbcnt += m->m_ext.ext_size;
571	    }
572	}
573	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
574		printf("cc %ld != %u || mbcnt %ld != %u\n", len, sb->sb_cc,
575		    mbcnt, sb->sb_mbcnt);
576		panic("sbcheck");
577	}
578}
579#endif
580
581/*
582 * As above, except the mbuf chain begins a new record.
583 */
584void
585sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
586{
587	struct mbuf *m;
588
589	SOCKBUF_LOCK_ASSERT(sb);
590
591	if (m0 == 0)
592		return;
593	/*
594	 * Put the first mbuf on the queue.  Note this permits zero length
595	 * records.
596	 */
597	sballoc(sb, m0);
598	SBLASTRECORDCHK(sb);
599	SBLINKRECORD(sb, m0);
600	sb->sb_mbtail = m0;
601	m = m0->m_next;
602	m0->m_next = 0;
603	if (m && (m0->m_flags & M_EOR)) {
604		m0->m_flags &= ~M_EOR;
605		m->m_flags |= M_EOR;
606	}
607	/* always call sbcompress() so it can do SBLASTMBUFCHK() */
608	sbcompress(sb, m, m0);
609}
610
611/*
612 * As above, except the mbuf chain begins a new record.
613 */
614void
615sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
616{
617
618	SOCKBUF_LOCK(sb);
619	sbappendrecord_locked(sb, m0);
620	SOCKBUF_UNLOCK(sb);
621}
622
623/*
624 * Append address and data, and optionally, control (ancillary) data to the
625 * receive queue of a socket.  If present, m0 must include a packet header
626 * with total length.  Returns 0 if no space in sockbuf or insufficient
627 * mbufs.
628 */
629int
630sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
631    struct mbuf *m0, struct mbuf *control)
632{
633	struct mbuf *m, *n, *nlast;
634	int space = asa->sa_len;
635
636	SOCKBUF_LOCK_ASSERT(sb);
637
638	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
639		panic("sbappendaddr_locked");
640	if (m0)
641		space += m0->m_pkthdr.len;
642	space += m_length(control, &n);
643
644	if (space > sbspace(sb))
645		return (0);
646#if MSIZE <= 256
647	if (asa->sa_len > MLEN)
648		return (0);
649#endif
650	m = m_get(M_NOWAIT, MT_SONAME);
651	if (m == NULL)
652		return (0);
653	m->m_len = asa->sa_len;
654	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
655	if (n)
656		n->m_next = m0;		/* concatenate data to control */
657	else
658		control = m0;
659	m->m_next = control;
660	for (n = m; n->m_next != NULL; n = n->m_next)
661		sballoc(sb, n);
662	sballoc(sb, n);
663	nlast = n;
664	SBLINKRECORD(sb, m);
665
666	sb->sb_mbtail = nlast;
667	SBLASTMBUFCHK(sb);
668
669	SBLASTRECORDCHK(sb);
670	return (1);
671}
672
673/*
674 * Append address and data, and optionally, control (ancillary) data to the
675 * receive queue of a socket.  If present, m0 must include a packet header
676 * with total length.  Returns 0 if no space in sockbuf or insufficient
677 * mbufs.
678 */
679int
680sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
681    struct mbuf *m0, struct mbuf *control)
682{
683	int retval;
684
685	SOCKBUF_LOCK(sb);
686	retval = sbappendaddr_locked(sb, asa, m0, control);
687	SOCKBUF_UNLOCK(sb);
688	return (retval);
689}
690
691int
692sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
693    struct mbuf *control)
694{
695	struct mbuf *m, *n, *mlast;
696	int space;
697
698	SOCKBUF_LOCK_ASSERT(sb);
699
700	if (control == 0)
701		panic("sbappendcontrol_locked");
702	space = m_length(control, &n) + m_length(m0, NULL);
703
704	if (space > sbspace(sb))
705		return (0);
706	n->m_next = m0;			/* concatenate data to control */
707
708	SBLASTRECORDCHK(sb);
709
710	for (m = control; m->m_next; m = m->m_next)
711		sballoc(sb, m);
712	sballoc(sb, m);
713	mlast = m;
714	SBLINKRECORD(sb, control);
715
716	sb->sb_mbtail = mlast;
717	SBLASTMBUFCHK(sb);
718
719	SBLASTRECORDCHK(sb);
720	return (1);
721}
722
723int
724sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
725{
726	int retval;
727
728	SOCKBUF_LOCK(sb);
729	retval = sbappendcontrol_locked(sb, m0, control);
730	SOCKBUF_UNLOCK(sb);
731	return (retval);
732}
733
734/*
735 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
736 * (n).  If (n) is NULL, the buffer is presumed empty.
737 *
738 * When the data is compressed, mbufs in the chain may be handled in one of
739 * three ways:
740 *
741 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
742 *     record boundary, and no change in data type).
743 *
744 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
745 *     an mbuf already in the socket buffer.  This can occur if an
746 *     appropriate mbuf exists, there is room, and no merging of data types
747 *     will occur.
748 *
749 * (3) The mbuf may be appended to the end of the existing mbuf chain.
750 *
751 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
752 * end-of-record.
753 */
754void
755sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
756{
757	int eor = 0;
758	struct mbuf *o;
759
760	SOCKBUF_LOCK_ASSERT(sb);
761
762	while (m) {
763		eor |= m->m_flags & M_EOR;
764		if (m->m_len == 0 &&
765		    (eor == 0 ||
766		     (((o = m->m_next) || (o = n)) &&
767		      o->m_type == m->m_type))) {
768			if (sb->sb_lastrecord == m)
769				sb->sb_lastrecord = m->m_next;
770			m = m_free(m);
771			continue;
772		}
773		if (n && (n->m_flags & M_EOR) == 0 &&
774		    M_WRITABLE(n) &&
775		    ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
776		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
777		    m->m_len <= M_TRAILINGSPACE(n) &&
778		    n->m_type == m->m_type) {
779			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
780			    (unsigned)m->m_len);
781			n->m_len += m->m_len;
782			sb->sb_cc += m->m_len;
783			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
784				/* XXX: Probably don't need.*/
785				sb->sb_ctl += m->m_len;
786			m = m_free(m);
787			continue;
788		}
789		if (n)
790			n->m_next = m;
791		else
792			sb->sb_mb = m;
793		sb->sb_mbtail = m;
794		sballoc(sb, m);
795		n = m;
796		m->m_flags &= ~M_EOR;
797		m = m->m_next;
798		n->m_next = 0;
799	}
800	if (eor) {
801		KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
802		n->m_flags |= eor;
803	}
804	SBLASTMBUFCHK(sb);
805}
806
807/*
808 * Free all mbufs in a sockbuf.  Check that all resources are reclaimed.
809 */
810static void
811sbflush_internal(struct sockbuf *sb)
812{
813
814	while (sb->sb_mbcnt) {
815		/*
816		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
817		 * we would loop forever. Panic instead.
818		 */
819		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
820			break;
821		m_freem(sbcut_internal(sb, (int)sb->sb_cc));
822	}
823	if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
824		panic("sbflush_internal: cc %u || mb %p || mbcnt %u",
825		    sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
826}
827
828void
829sbflush_locked(struct sockbuf *sb)
830{
831
832	SOCKBUF_LOCK_ASSERT(sb);
833	sbflush_internal(sb);
834}
835
836void
837sbflush(struct sockbuf *sb)
838{
839
840	SOCKBUF_LOCK(sb);
841	sbflush_locked(sb);
842	SOCKBUF_UNLOCK(sb);
843}
844
845/*
846 * Cut data from (the front of) a sockbuf.
847 */
848static struct mbuf *
849sbcut_internal(struct sockbuf *sb, int len)
850{
851	struct mbuf *m, *n, *next, *mfree;
852
853	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
854	mfree = NULL;
855
856	while (len > 0) {
857		if (m == 0) {
858			if (next == 0)
859				panic("sbdrop");
860			m = next;
861			next = m->m_nextpkt;
862			continue;
863		}
864		if (m->m_len > len) {
865			m->m_len -= len;
866			m->m_data += len;
867			sb->sb_cc -= len;
868			if (sb->sb_sndptroff != 0)
869				sb->sb_sndptroff -= len;
870			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
871				sb->sb_ctl -= len;
872			break;
873		}
874		len -= m->m_len;
875		sbfree(sb, m);
876		n = m->m_next;
877		m->m_next = mfree;
878		mfree = m;
879		m = n;
880	}
881	while (m && m->m_len == 0) {
882		sbfree(sb, m);
883		n = m->m_next;
884		m->m_next = mfree;
885		mfree = m;
886		m = n;
887	}
888	if (m) {
889		sb->sb_mb = m;
890		m->m_nextpkt = next;
891	} else
892		sb->sb_mb = next;
893	/*
894	 * First part is an inline SB_EMPTY_FIXUP().  Second part makes sure
895	 * sb_lastrecord is up-to-date if we dropped part of the last record.
896	 */
897	m = sb->sb_mb;
898	if (m == NULL) {
899		sb->sb_mbtail = NULL;
900		sb->sb_lastrecord = NULL;
901	} else if (m->m_nextpkt == NULL) {
902		sb->sb_lastrecord = m;
903	}
904
905	return (mfree);
906}
907
908/*
909 * Drop data from (the front of) a sockbuf.
910 */
911void
912sbdrop_locked(struct sockbuf *sb, int len)
913{
914
915	SOCKBUF_LOCK_ASSERT(sb);
916	m_freem(sbcut_internal(sb, len));
917}
918
919/*
920 * Drop data from (the front of) a sockbuf,
921 * and return it to caller.
922 */
923struct mbuf *
924sbcut_locked(struct sockbuf *sb, int len)
925{
926
927	SOCKBUF_LOCK_ASSERT(sb);
928	return (sbcut_internal(sb, len));
929}
930
931void
932sbdrop(struct sockbuf *sb, int len)
933{
934	struct mbuf *mfree;
935
936	SOCKBUF_LOCK(sb);
937	mfree = sbcut_internal(sb, len);
938	SOCKBUF_UNLOCK(sb);
939
940	m_freem(mfree);
941}
942
943/*
944 * Maintain a pointer and offset pair into the socket buffer mbuf chain to
945 * avoid traversal of the entire socket buffer for larger offsets.
946 */
947struct mbuf *
948sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
949{
950	struct mbuf *m, *ret;
951
952	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
953	KASSERT(off + len <= sb->sb_cc, ("%s: beyond sb", __func__));
954	KASSERT(sb->sb_sndptroff <= sb->sb_cc, ("%s: sndptroff broken", __func__));
955
956	/*
957	 * Is off below stored offset? Happens on retransmits.
958	 * Just return, we can't help here.
959	 */
960	if (sb->sb_sndptroff > off) {
961		*moff = off;
962		return (sb->sb_mb);
963	}
964
965	/* Return closest mbuf in chain for current offset. */
966	*moff = off - sb->sb_sndptroff;
967	m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb;
968	if (*moff == m->m_len) {
969		*moff = 0;
970		sb->sb_sndptroff += m->m_len;
971		m = ret = m->m_next;
972		KASSERT(ret->m_len > 0,
973		    ("mbuf %p in sockbuf %p chain has no valid data", ret, sb));
974	}
975
976	/* Advance by len to be as close as possible for the next transmit. */
977	for (off = off - sb->sb_sndptroff + len - 1;
978	     off > 0 && m != NULL && off >= m->m_len;
979	     m = m->m_next) {
980		sb->sb_sndptroff += m->m_len;
981		off -= m->m_len;
982	}
983	if (off > 0 && m == NULL)
984		panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret);
985	sb->sb_sndptr = m;
986
987	return (ret);
988}
989
990/*
991 * Drop a record off the front of a sockbuf and move the next record to the
992 * front.
993 */
994void
995sbdroprecord_locked(struct sockbuf *sb)
996{
997	struct mbuf *m;
998
999	SOCKBUF_LOCK_ASSERT(sb);
1000
1001	m = sb->sb_mb;
1002	if (m) {
1003		sb->sb_mb = m->m_nextpkt;
1004		do {
1005			sbfree(sb, m);
1006			m = m_free(m);
1007		} while (m);
1008	}
1009	SB_EMPTY_FIXUP(sb);
1010}
1011
1012/*
1013 * Drop a record off the front of a sockbuf and move the next record to the
1014 * front.
1015 */
1016void
1017sbdroprecord(struct sockbuf *sb)
1018{
1019
1020	SOCKBUF_LOCK(sb);
1021	sbdroprecord_locked(sb);
1022	SOCKBUF_UNLOCK(sb);
1023}
1024
1025/*
1026 * Create a "control" mbuf containing the specified data with the specified
1027 * type for presentation on a socket buffer.
1028 */
1029struct mbuf *
1030sbcreatecontrol(caddr_t p, int size, int type, int level)
1031{
1032	struct cmsghdr *cp;
1033	struct mbuf *m;
1034
1035	if (CMSG_SPACE((u_int)size) > MCLBYTES)
1036		return ((struct mbuf *) NULL);
1037	if (CMSG_SPACE((u_int)size) > MLEN)
1038		m = m_getcl(M_NOWAIT, MT_CONTROL, 0);
1039	else
1040		m = m_get(M_NOWAIT, MT_CONTROL);
1041	if (m == NULL)
1042		return ((struct mbuf *) NULL);
1043	cp = mtod(m, struct cmsghdr *);
1044	m->m_len = 0;
1045	KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1046	    ("sbcreatecontrol: short mbuf"));
1047	/*
1048	 * Don't leave the padding between the msg header and the
1049	 * cmsg data and the padding after the cmsg data un-initialized.
1050	 */
1051	bzero(cp, CMSG_SPACE((u_int)size));
1052	if (p != NULL)
1053		(void)memcpy(CMSG_DATA(cp), p, size);
1054	m->m_len = CMSG_SPACE(size);
1055	cp->cmsg_len = CMSG_LEN(size);
1056	cp->cmsg_level = level;
1057	cp->cmsg_type = type;
1058	return (m);
1059}
1060
1061/*
1062 * This does the same for socket buffers that sotoxsocket does for sockets:
1063 * generate an user-format data structure describing the socket buffer.  Note
1064 * that the xsockbuf structure, since it is always embedded in a socket, does
1065 * not include a self pointer nor a length.  We make this entry point public
1066 * in case some other mechanism needs it.
1067 */
1068void
1069sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1070{
1071
1072	xsb->sb_cc = sb->sb_cc;
1073	xsb->sb_hiwat = sb->sb_hiwat;
1074	xsb->sb_mbcnt = sb->sb_mbcnt;
1075	xsb->sb_mcnt = sb->sb_mcnt;
1076	xsb->sb_ccnt = sb->sb_ccnt;
1077	xsb->sb_mbmax = sb->sb_mbmax;
1078	xsb->sb_lowat = sb->sb_lowat;
1079	xsb->sb_flags = sb->sb_flags;
1080	xsb->sb_timeo = sb->sb_timeo;
1081}
1082
1083/* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1084static int dummy;
1085SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1086SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1087    &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1088SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1089    &sb_efficiency, 0, "Socket buffer size waste factor");
1090