uipc_sockbuf.c revision 276058
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/uipc_sockbuf.c 276058 2014-12-22 15:39:24Z glebius $");
34
35#include "opt_param.h"
36
37#include <sys/param.h>
38#include <sys/aio.h> /* for aio_swake proto */
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mbuf.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/protosw.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/socket.h>
48#include <sys/socketvar.h>
49#include <sys/sx.h>
50#include <sys/sysctl.h>
51
52/*
53 * Function pointer set by the AIO routines so that the socket buffer code
54 * can call back into the AIO module if it is loaded.
55 */
56void	(*aio_swake)(struct socket *, struct sockbuf *);
57
58/*
59 * Primitive routines for operating on socket buffers
60 */
61
62u_long	sb_max = SB_MAX;
63u_long sb_max_adj =
64       (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
65
66static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
67
68static struct mbuf	*sbcut_internal(struct sockbuf *sb, int len);
69static void	sbflush_internal(struct sockbuf *sb);
70
71/*
72 * Mark ready "count" mbufs starting with "m".
73 */
74int
75sbready(struct sockbuf *sb, struct mbuf *m, int count)
76{
77	u_int blocker;
78
79	SOCKBUF_LOCK_ASSERT(sb);
80	KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb));
81
82	blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0;
83
84	for (int i = 0; i < count; i++, m = m->m_next) {
85		KASSERT(m->m_flags & M_NOTREADY,
86		    ("%s: m %p !M_NOTREADY", __func__, m));
87		m->m_flags &= ~(M_NOTREADY | blocker);
88		if (blocker)
89			sb->sb_acc += m->m_len;
90	}
91
92	if (!blocker)
93		return (EINPROGRESS);
94
95	/* This one was blocking all the queue. */
96	for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) {
97		KASSERT(m->m_flags & M_BLOCKED,
98		    ("%s: m %p !M_BLOCKED", __func__, m));
99		m->m_flags &= ~M_BLOCKED;
100		sb->sb_acc += m->m_len;
101	}
102
103	sb->sb_fnrdy = m;
104
105	return (0);
106}
107
108/*
109 * Adjust sockbuf state reflecting allocation of m.
110 */
111void
112sballoc(struct sockbuf *sb, struct mbuf *m)
113{
114
115	SOCKBUF_LOCK_ASSERT(sb);
116
117	sb->sb_ccc += m->m_len;
118
119	if (sb->sb_fnrdy == NULL) {
120		if (m->m_flags & M_NOTREADY)
121			sb->sb_fnrdy = m;
122		else
123			sb->sb_acc += m->m_len;
124	} else
125		m->m_flags |= M_BLOCKED;
126
127	if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
128		sb->sb_ctl += m->m_len;
129
130	sb->sb_mbcnt += MSIZE;
131	sb->sb_mcnt += 1;
132
133	if (m->m_flags & M_EXT) {
134		sb->sb_mbcnt += m->m_ext.ext_size;
135		sb->sb_ccnt += 1;
136	}
137}
138
139/*
140 * Adjust sockbuf state reflecting freeing of m.
141 */
142void
143sbfree(struct sockbuf *sb, struct mbuf *m)
144{
145
146#if 0	/* XXX: not yet: soclose() call path comes here w/o lock. */
147	SOCKBUF_LOCK_ASSERT(sb);
148#endif
149
150	sb->sb_ccc -= m->m_len;
151
152	if (!(m->m_flags & M_NOTAVAIL))
153		sb->sb_acc -= m->m_len;
154
155	if (m == sb->sb_fnrdy) {
156		struct mbuf *n;
157
158		KASSERT(m->m_flags & M_NOTREADY,
159		    ("%s: m %p !M_NOTREADY", __func__, m));
160
161		n = m->m_next;
162		while (n != NULL && !(n->m_flags & M_NOTREADY)) {
163			n->m_flags &= ~M_BLOCKED;
164			sb->sb_acc += n->m_len;
165			n = n->m_next;
166		}
167		sb->sb_fnrdy = n;
168	}
169
170	if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
171		sb->sb_ctl -= m->m_len;
172
173	sb->sb_mbcnt -= MSIZE;
174	sb->sb_mcnt -= 1;
175	if (m->m_flags & M_EXT) {
176		sb->sb_mbcnt -= m->m_ext.ext_size;
177		sb->sb_ccnt -= 1;
178	}
179
180	if (sb->sb_sndptr == m) {
181		sb->sb_sndptr = NULL;
182		sb->sb_sndptroff = 0;
183	}
184	if (sb->sb_sndptroff != 0)
185		sb->sb_sndptroff -= m->m_len;
186}
187
188/*
189 * Socantsendmore indicates that no more data will be sent on the socket; it
190 * would normally be applied to a socket when the user informs the system
191 * that no more data is to be sent, by the protocol code (in case
192 * PRU_SHUTDOWN).  Socantrcvmore indicates that no more data will be
193 * received, and will normally be applied to the socket by a protocol when it
194 * detects that the peer will send no more data.  Data queued for reading in
195 * the socket may yet be read.
196 */
197void
198socantsendmore_locked(struct socket *so)
199{
200
201	SOCKBUF_LOCK_ASSERT(&so->so_snd);
202
203	so->so_snd.sb_state |= SBS_CANTSENDMORE;
204	sowwakeup_locked(so);
205	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
206}
207
208void
209socantsendmore(struct socket *so)
210{
211
212	SOCKBUF_LOCK(&so->so_snd);
213	socantsendmore_locked(so);
214	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
215}
216
217void
218socantrcvmore_locked(struct socket *so)
219{
220
221	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
222
223	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
224	sorwakeup_locked(so);
225	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
226}
227
228void
229socantrcvmore(struct socket *so)
230{
231
232	SOCKBUF_LOCK(&so->so_rcv);
233	socantrcvmore_locked(so);
234	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
235}
236
237/*
238 * Wait for data to arrive at/drain from a socket buffer.
239 */
240int
241sbwait(struct sockbuf *sb)
242{
243
244	SOCKBUF_LOCK_ASSERT(sb);
245
246	sb->sb_flags |= SB_WAIT;
247	return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx,
248	    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
249	    sb->sb_timeo, 0, 0));
250}
251
252int
253sblock(struct sockbuf *sb, int flags)
254{
255
256	KASSERT((flags & SBL_VALID) == flags,
257	    ("sblock: flags invalid (0x%x)", flags));
258
259	if (flags & SBL_WAIT) {
260		if ((sb->sb_flags & SB_NOINTR) ||
261		    (flags & SBL_NOINTR)) {
262			sx_xlock(&sb->sb_sx);
263			return (0);
264		}
265		return (sx_xlock_sig(&sb->sb_sx));
266	} else {
267		if (sx_try_xlock(&sb->sb_sx) == 0)
268			return (EWOULDBLOCK);
269		return (0);
270	}
271}
272
273void
274sbunlock(struct sockbuf *sb)
275{
276
277	sx_xunlock(&sb->sb_sx);
278}
279
280/*
281 * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
282 * via SIGIO if the socket has the SS_ASYNC flag set.
283 *
284 * Called with the socket buffer lock held; will release the lock by the end
285 * of the function.  This allows the caller to acquire the socket buffer lock
286 * while testing for the need for various sorts of wakeup and hold it through
287 * to the point where it's no longer required.  We currently hold the lock
288 * through calls out to other subsystems (with the exception of kqueue), and
289 * then release it to avoid lock order issues.  It's not clear that's
290 * correct.
291 */
292void
293sowakeup(struct socket *so, struct sockbuf *sb)
294{
295	int ret;
296
297	SOCKBUF_LOCK_ASSERT(sb);
298
299	selwakeuppri(&sb->sb_sel, PSOCK);
300	if (!SEL_WAITING(&sb->sb_sel))
301		sb->sb_flags &= ~SB_SEL;
302	if (sb->sb_flags & SB_WAIT) {
303		sb->sb_flags &= ~SB_WAIT;
304		wakeup(&sb->sb_acc);
305	}
306	KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
307	if (sb->sb_upcall != NULL) {
308		ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
309		if (ret == SU_ISCONNECTED) {
310			KASSERT(sb == &so->so_rcv,
311			    ("SO_SND upcall returned SU_ISCONNECTED"));
312			soupcall_clear(so, SO_RCV);
313		}
314	} else
315		ret = SU_OK;
316	if (sb->sb_flags & SB_AIO)
317		aio_swake(so, sb);
318	SOCKBUF_UNLOCK(sb);
319	if (ret == SU_ISCONNECTED)
320		soisconnected(so);
321	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
322		pgsigio(&so->so_sigio, SIGIO, 0);
323	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
324}
325
326/*
327 * Socket buffer (struct sockbuf) utility routines.
328 *
329 * Each socket contains two socket buffers: one for sending data and one for
330 * receiving data.  Each buffer contains a queue of mbufs, information about
331 * the number of mbufs and amount of data in the queue, and other fields
332 * allowing select() statements and notification on data availability to be
333 * implemented.
334 *
335 * Data stored in a socket buffer is maintained as a list of records.  Each
336 * record is a list of mbufs chained together with the m_next field.  Records
337 * are chained together with the m_nextpkt field. The upper level routine
338 * soreceive() expects the following conventions to be observed when placing
339 * information in the receive buffer:
340 *
341 * 1. If the protocol requires each message be preceded by the sender's name,
342 *    then a record containing that name must be present before any
343 *    associated data (mbuf's must be of type MT_SONAME).
344 * 2. If the protocol supports the exchange of ``access rights'' (really just
345 *    additional data associated with the message), and there are ``rights''
346 *    to be received, then a record containing this data should be present
347 *    (mbuf's must be of type MT_RIGHTS).
348 * 3. If a name or rights record exists, then it must be followed by a data
349 *    record, perhaps of zero length.
350 *
351 * Before using a new socket structure it is first necessary to reserve
352 * buffer space to the socket, by calling sbreserve().  This should commit
353 * some of the available buffer space in the system buffer pool for the
354 * socket (currently, it does nothing but enforce limits).  The space should
355 * be released by calling sbrelease() when the socket is destroyed.
356 */
357int
358soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
359{
360	struct thread *td = curthread;
361
362	SOCKBUF_LOCK(&so->so_snd);
363	SOCKBUF_LOCK(&so->so_rcv);
364	if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
365		goto bad;
366	if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
367		goto bad2;
368	if (so->so_rcv.sb_lowat == 0)
369		so->so_rcv.sb_lowat = 1;
370	if (so->so_snd.sb_lowat == 0)
371		so->so_snd.sb_lowat = MCLBYTES;
372	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
373		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
374	SOCKBUF_UNLOCK(&so->so_rcv);
375	SOCKBUF_UNLOCK(&so->so_snd);
376	return (0);
377bad2:
378	sbrelease_locked(&so->so_snd, so);
379bad:
380	SOCKBUF_UNLOCK(&so->so_rcv);
381	SOCKBUF_UNLOCK(&so->so_snd);
382	return (ENOBUFS);
383}
384
385static int
386sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
387{
388	int error = 0;
389	u_long tmp_sb_max = sb_max;
390
391	error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
392	if (error || !req->newptr)
393		return (error);
394	if (tmp_sb_max < MSIZE + MCLBYTES)
395		return (EINVAL);
396	sb_max = tmp_sb_max;
397	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
398	return (0);
399}
400
401/*
402 * Allot mbufs to a sockbuf.  Attempt to scale mbmax so that mbcnt doesn't
403 * become limiting if buffering efficiency is near the normal case.
404 */
405int
406sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
407    struct thread *td)
408{
409	rlim_t sbsize_limit;
410
411	SOCKBUF_LOCK_ASSERT(sb);
412
413	/*
414	 * When a thread is passed, we take into account the thread's socket
415	 * buffer size limit.  The caller will generally pass curthread, but
416	 * in the TCP input path, NULL will be passed to indicate that no
417	 * appropriate thread resource limits are available.  In that case,
418	 * we don't apply a process limit.
419	 */
420	if (cc > sb_max_adj)
421		return (0);
422	if (td != NULL) {
423		PROC_LOCK(td->td_proc);
424		sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
425		PROC_UNLOCK(td->td_proc);
426	} else
427		sbsize_limit = RLIM_INFINITY;
428	if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
429	    sbsize_limit))
430		return (0);
431	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
432	if (sb->sb_lowat > sb->sb_hiwat)
433		sb->sb_lowat = sb->sb_hiwat;
434	return (1);
435}
436
437int
438sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
439    struct thread *td)
440{
441	int error;
442
443	SOCKBUF_LOCK(sb);
444	error = sbreserve_locked(sb, cc, so, td);
445	SOCKBUF_UNLOCK(sb);
446	return (error);
447}
448
449/*
450 * Free mbufs held by a socket, and reserved mbuf space.
451 */
452void
453sbrelease_internal(struct sockbuf *sb, struct socket *so)
454{
455
456	sbflush_internal(sb);
457	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
458	    RLIM_INFINITY);
459	sb->sb_mbmax = 0;
460}
461
462void
463sbrelease_locked(struct sockbuf *sb, struct socket *so)
464{
465
466	SOCKBUF_LOCK_ASSERT(sb);
467
468	sbrelease_internal(sb, so);
469}
470
471void
472sbrelease(struct sockbuf *sb, struct socket *so)
473{
474
475	SOCKBUF_LOCK(sb);
476	sbrelease_locked(sb, so);
477	SOCKBUF_UNLOCK(sb);
478}
479
480void
481sbdestroy(struct sockbuf *sb, struct socket *so)
482{
483
484	sbrelease_internal(sb, so);
485}
486
487/*
488 * Routines to add and remove data from an mbuf queue.
489 *
490 * The routines sbappend() or sbappendrecord() are normally called to append
491 * new mbufs to a socket buffer, after checking that adequate space is
492 * available, comparing the function sbspace() with the amount of data to be
493 * added.  sbappendrecord() differs from sbappend() in that data supplied is
494 * treated as the beginning of a new record.  To place a sender's address,
495 * optional access rights, and data in a socket receive buffer,
496 * sbappendaddr() should be used.  To place access rights and data in a
497 * socket receive buffer, sbappendrights() should be used.  In either case,
498 * the new data begins a new record.  Note that unlike sbappend() and
499 * sbappendrecord(), these routines check for the caller that there will be
500 * enough space to store the data.  Each fails if there is not enough space,
501 * or if it cannot find mbufs to store additional information in.
502 *
503 * Reliable protocols may use the socket send buffer to hold data awaiting
504 * acknowledgement.  Data is normally copied from a socket send buffer in a
505 * protocol with m_copy for output to a peer, and then removing the data from
506 * the socket buffer with sbdrop() or sbdroprecord() when the data is
507 * acknowledged by the peer.
508 */
509#ifdef SOCKBUF_DEBUG
510void
511sblastrecordchk(struct sockbuf *sb, const char *file, int line)
512{
513	struct mbuf *m = sb->sb_mb;
514
515	SOCKBUF_LOCK_ASSERT(sb);
516
517	while (m && m->m_nextpkt)
518		m = m->m_nextpkt;
519
520	if (m != sb->sb_lastrecord) {
521		printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
522			__func__, sb->sb_mb, sb->sb_lastrecord, m);
523		printf("packet chain:\n");
524		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
525			printf("\t%p\n", m);
526		panic("%s from %s:%u", __func__, file, line);
527	}
528}
529
530void
531sblastmbufchk(struct sockbuf *sb, const char *file, int line)
532{
533	struct mbuf *m = sb->sb_mb;
534	struct mbuf *n;
535
536	SOCKBUF_LOCK_ASSERT(sb);
537
538	while (m && m->m_nextpkt)
539		m = m->m_nextpkt;
540
541	while (m && m->m_next)
542		m = m->m_next;
543
544	if (m != sb->sb_mbtail) {
545		printf("%s: sb_mb %p sb_mbtail %p last %p\n",
546			__func__, sb->sb_mb, sb->sb_mbtail, m);
547		printf("packet tree:\n");
548		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
549			printf("\t");
550			for (n = m; n != NULL; n = n->m_next)
551				printf("%p ", n);
552			printf("\n");
553		}
554		panic("%s from %s:%u", __func__, file, line);
555	}
556}
557#endif /* SOCKBUF_DEBUG */
558
559#define SBLINKRECORD(sb, m0) do {					\
560	SOCKBUF_LOCK_ASSERT(sb);					\
561	if ((sb)->sb_lastrecord != NULL)				\
562		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
563	else								\
564		(sb)->sb_mb = (m0);					\
565	(sb)->sb_lastrecord = (m0);					\
566} while (/*CONSTCOND*/0)
567
568/*
569 * Append mbuf chain m to the last record in the socket buffer sb.  The
570 * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
571 * are discarded and mbufs are compacted where possible.
572 */
573void
574sbappend_locked(struct sockbuf *sb, struct mbuf *m)
575{
576	struct mbuf *n;
577
578	SOCKBUF_LOCK_ASSERT(sb);
579
580	if (m == 0)
581		return;
582	m_clrprotoflags(m);
583	SBLASTRECORDCHK(sb);
584	n = sb->sb_mb;
585	if (n) {
586		while (n->m_nextpkt)
587			n = n->m_nextpkt;
588		do {
589			if (n->m_flags & M_EOR) {
590				sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
591				return;
592			}
593		} while (n->m_next && (n = n->m_next));
594	} else {
595		/*
596		 * XXX Would like to simply use sb_mbtail here, but
597		 * XXX I need to verify that I won't miss an EOR that
598		 * XXX way.
599		 */
600		if ((n = sb->sb_lastrecord) != NULL) {
601			do {
602				if (n->m_flags & M_EOR) {
603					sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
604					return;
605				}
606			} while (n->m_next && (n = n->m_next));
607		} else {
608			/*
609			 * If this is the first record in the socket buffer,
610			 * it's also the last record.
611			 */
612			sb->sb_lastrecord = m;
613		}
614	}
615	sbcompress(sb, m, n);
616	SBLASTRECORDCHK(sb);
617}
618
619/*
620 * Append mbuf chain m to the last record in the socket buffer sb.  The
621 * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
622 * are discarded and mbufs are compacted where possible.
623 */
624void
625sbappend(struct sockbuf *sb, struct mbuf *m)
626{
627
628	SOCKBUF_LOCK(sb);
629	sbappend_locked(sb, m);
630	SOCKBUF_UNLOCK(sb);
631}
632
633/*
634 * This version of sbappend() should only be used when the caller absolutely
635 * knows that there will never be more than one record in the socket buffer,
636 * that is, a stream protocol (such as TCP).
637 */
638void
639sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
640{
641	SOCKBUF_LOCK_ASSERT(sb);
642
643	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
644	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
645
646	SBLASTMBUFCHK(sb);
647
648	/* Remove all packet headers and mbuf tags to get a pure data chain. */
649	m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);
650
651	sbcompress(sb, m, sb->sb_mbtail);
652
653	sb->sb_lastrecord = sb->sb_mb;
654	SBLASTRECORDCHK(sb);
655}
656
657/*
658 * This version of sbappend() should only be used when the caller absolutely
659 * knows that there will never be more than one record in the socket buffer,
660 * that is, a stream protocol (such as TCP).
661 */
662void
663sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags)
664{
665
666	SOCKBUF_LOCK(sb);
667	sbappendstream_locked(sb, m, flags);
668	SOCKBUF_UNLOCK(sb);
669}
670
671#ifdef SOCKBUF_DEBUG
672void
673sbcheck(struct sockbuf *sb, const char *file, int line)
674{
675	struct mbuf *m, *n, *fnrdy;
676	u_long acc, ccc, mbcnt;
677
678	SOCKBUF_LOCK_ASSERT(sb);
679
680	acc = ccc = mbcnt = 0;
681	fnrdy = NULL;
682
683	for (m = sb->sb_mb; m; m = n) {
684	    n = m->m_nextpkt;
685	    for (; m; m = m->m_next) {
686		if (m->m_len == 0) {
687			printf("sb %p empty mbuf %p\n", sb, m);
688			goto fail;
689		}
690		if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) {
691			if (m != sb->sb_fnrdy) {
692				printf("sb %p: fnrdy %p != m %p\n",
693				    sb, sb->sb_fnrdy, m);
694				goto fail;
695			}
696			fnrdy = m;
697		}
698		if (fnrdy) {
699			if (!(m->m_flags & M_NOTAVAIL)) {
700				printf("sb %p: fnrdy %p, m %p is avail\n",
701				    sb, sb->sb_fnrdy, m);
702				goto fail;
703			}
704		} else
705			acc += m->m_len;
706		ccc += m->m_len;
707		mbcnt += MSIZE;
708		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
709			mbcnt += m->m_ext.ext_size;
710	    }
711	}
712	if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) {
713		printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n",
714		    acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt);
715		goto fail;
716	}
717	return;
718fail:
719	panic("%s from %s:%u", __func__, file, line);
720}
721#endif
722
723/*
724 * As above, except the mbuf chain begins a new record.
725 */
726void
727sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
728{
729	struct mbuf *m;
730
731	SOCKBUF_LOCK_ASSERT(sb);
732
733	if (m0 == 0)
734		return;
735	m_clrprotoflags(m0);
736	/*
737	 * Put the first mbuf on the queue.  Note this permits zero length
738	 * records.
739	 */
740	sballoc(sb, m0);
741	SBLASTRECORDCHK(sb);
742	SBLINKRECORD(sb, m0);
743	sb->sb_mbtail = m0;
744	m = m0->m_next;
745	m0->m_next = 0;
746	if (m && (m0->m_flags & M_EOR)) {
747		m0->m_flags &= ~M_EOR;
748		m->m_flags |= M_EOR;
749	}
750	/* always call sbcompress() so it can do SBLASTMBUFCHK() */
751	sbcompress(sb, m, m0);
752}
753
754/*
755 * As above, except the mbuf chain begins a new record.
756 */
757void
758sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
759{
760
761	SOCKBUF_LOCK(sb);
762	sbappendrecord_locked(sb, m0);
763	SOCKBUF_UNLOCK(sb);
764}
765
766/* Helper routine that appends data, control, and address to a sockbuf. */
767static int
768sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa,
769    struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last)
770{
771	struct mbuf *m, *n, *nlast;
772#if MSIZE <= 256
773	if (asa->sa_len > MLEN)
774		return (0);
775#endif
776	m = m_get(M_NOWAIT, MT_SONAME);
777	if (m == NULL)
778		return (0);
779	m->m_len = asa->sa_len;
780	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
781	if (m0)
782		m_clrprotoflags(m0);
783	if (ctrl_last)
784		ctrl_last->m_next = m0;	/* concatenate data to control */
785	else
786		control = m0;
787	m->m_next = control;
788	for (n = m; n->m_next != NULL; n = n->m_next)
789		sballoc(sb, n);
790	sballoc(sb, n);
791	nlast = n;
792	SBLINKRECORD(sb, m);
793
794	sb->sb_mbtail = nlast;
795	SBLASTMBUFCHK(sb);
796
797	SBLASTRECORDCHK(sb);
798	return (1);
799}
800
801/*
802 * Append address and data, and optionally, control (ancillary) data to the
803 * receive queue of a socket.  If present, m0 must include a packet header
804 * with total length.  Returns 0 if no space in sockbuf or insufficient
805 * mbufs.
806 */
807int
808sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
809    struct mbuf *m0, struct mbuf *control)
810{
811	struct mbuf *ctrl_last;
812	int space = asa->sa_len;
813
814	SOCKBUF_LOCK_ASSERT(sb);
815
816	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
817		panic("sbappendaddr_locked");
818	if (m0)
819		space += m0->m_pkthdr.len;
820	space += m_length(control, &ctrl_last);
821
822	if (space > sbspace(sb))
823		return (0);
824	return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
825}
826
827/*
828 * Append address and data, and optionally, control (ancillary) data to the
829 * receive queue of a socket.  If present, m0 must include a packet header
830 * with total length.  Returns 0 if insufficient mbufs.  Does not validate space
831 * on the receiving sockbuf.
832 */
833int
834sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa,
835    struct mbuf *m0, struct mbuf *control)
836{
837	struct mbuf *ctrl_last;
838
839	SOCKBUF_LOCK_ASSERT(sb);
840
841	ctrl_last = (control == NULL) ? NULL : m_last(control);
842	return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
843}
844
845/*
846 * Append address and data, and optionally, control (ancillary) data to the
847 * receive queue of a socket.  If present, m0 must include a packet header
848 * with total length.  Returns 0 if no space in sockbuf or insufficient
849 * mbufs.
850 */
851int
852sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
853    struct mbuf *m0, struct mbuf *control)
854{
855	int retval;
856
857	SOCKBUF_LOCK(sb);
858	retval = sbappendaddr_locked(sb, asa, m0, control);
859	SOCKBUF_UNLOCK(sb);
860	return (retval);
861}
862
863int
864sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
865    struct mbuf *control)
866{
867	struct mbuf *m, *n, *mlast;
868	int space;
869
870	SOCKBUF_LOCK_ASSERT(sb);
871
872	if (control == 0)
873		panic("sbappendcontrol_locked");
874	space = m_length(control, &n) + m_length(m0, NULL);
875
876	if (space > sbspace(sb))
877		return (0);
878	m_clrprotoflags(m0);
879	n->m_next = m0;			/* concatenate data to control */
880
881	SBLASTRECORDCHK(sb);
882
883	for (m = control; m->m_next; m = m->m_next)
884		sballoc(sb, m);
885	sballoc(sb, m);
886	mlast = m;
887	SBLINKRECORD(sb, control);
888
889	sb->sb_mbtail = mlast;
890	SBLASTMBUFCHK(sb);
891
892	SBLASTRECORDCHK(sb);
893	return (1);
894}
895
896int
897sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
898{
899	int retval;
900
901	SOCKBUF_LOCK(sb);
902	retval = sbappendcontrol_locked(sb, m0, control);
903	SOCKBUF_UNLOCK(sb);
904	return (retval);
905}
906
907/*
908 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
909 * (n).  If (n) is NULL, the buffer is presumed empty.
910 *
911 * When the data is compressed, mbufs in the chain may be handled in one of
912 * three ways:
913 *
914 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
915 *     record boundary, and no change in data type).
916 *
917 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
918 *     an mbuf already in the socket buffer.  This can occur if an
919 *     appropriate mbuf exists, there is room, both mbufs are not marked as
920 *     not ready, and no merging of data types will occur.
921 *
922 * (3) The mbuf may be appended to the end of the existing mbuf chain.
923 *
924 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
925 * end-of-record.
926 */
927void
928sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
929{
930	int eor = 0;
931	struct mbuf *o;
932
933	SOCKBUF_LOCK_ASSERT(sb);
934
935	while (m) {
936		eor |= m->m_flags & M_EOR;
937		if (m->m_len == 0 &&
938		    (eor == 0 ||
939		     (((o = m->m_next) || (o = n)) &&
940		      o->m_type == m->m_type))) {
941			if (sb->sb_lastrecord == m)
942				sb->sb_lastrecord = m->m_next;
943			m = m_free(m);
944			continue;
945		}
946		if (n && (n->m_flags & M_EOR) == 0 &&
947		    M_WRITABLE(n) &&
948		    ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
949		    !(m->m_flags & M_NOTREADY) &&
950		    !(n->m_flags & M_NOTREADY) &&
951		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
952		    m->m_len <= M_TRAILINGSPACE(n) &&
953		    n->m_type == m->m_type) {
954			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
955			    (unsigned)m->m_len);
956			n->m_len += m->m_len;
957			sb->sb_ccc += m->m_len;
958			if (sb->sb_fnrdy == NULL)
959				sb->sb_acc += m->m_len;
960			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
961				/* XXX: Probably don't need.*/
962				sb->sb_ctl += m->m_len;
963			m = m_free(m);
964			continue;
965		}
966		if (n)
967			n->m_next = m;
968		else
969			sb->sb_mb = m;
970		sb->sb_mbtail = m;
971		sballoc(sb, m);
972		n = m;
973		m->m_flags &= ~M_EOR;
974		m = m->m_next;
975		n->m_next = 0;
976	}
977	if (eor) {
978		KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
979		n->m_flags |= eor;
980	}
981	SBLASTMBUFCHK(sb);
982}
983
984/*
985 * Free all mbufs in a sockbuf.  Check that all resources are reclaimed.
986 */
987static void
988sbflush_internal(struct sockbuf *sb)
989{
990
991	while (sb->sb_mbcnt) {
992		/*
993		 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty:
994		 * we would loop forever. Panic instead.
995		 */
996		if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len))
997			break;
998		m_freem(sbcut_internal(sb, (int)sb->sb_ccc));
999	}
1000	KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
1001	    ("%s: ccc %u mb %p mbcnt %u", __func__,
1002	    sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
1003}
1004
1005void
1006sbflush_locked(struct sockbuf *sb)
1007{
1008
1009	SOCKBUF_LOCK_ASSERT(sb);
1010	sbflush_internal(sb);
1011}
1012
1013void
1014sbflush(struct sockbuf *sb)
1015{
1016
1017	SOCKBUF_LOCK(sb);
1018	sbflush_locked(sb);
1019	SOCKBUF_UNLOCK(sb);
1020}
1021
1022/*
1023 * Cut data from (the front of) a sockbuf.
1024 */
1025static struct mbuf *
1026sbcut_internal(struct sockbuf *sb, int len)
1027{
1028	struct mbuf *m, *next, *mfree;
1029
1030	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1031	mfree = NULL;
1032
1033	while (len > 0) {
1034		if (m == NULL) {
1035			KASSERT(next, ("%s: no next, len %d", __func__, len));
1036			m = next;
1037			next = m->m_nextpkt;
1038		}
1039		if (m->m_len > len) {
1040			KASSERT(!(m->m_flags & M_NOTAVAIL),
1041			    ("%s: m %p M_NOTAVAIL", __func__, m));
1042			m->m_len -= len;
1043			m->m_data += len;
1044			sb->sb_ccc -= len;
1045			sb->sb_acc -= len;
1046			if (sb->sb_sndptroff != 0)
1047				sb->sb_sndptroff -= len;
1048			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1049				sb->sb_ctl -= len;
1050			break;
1051		}
1052		len -= m->m_len;
1053		sbfree(sb, m);
1054		/*
1055		 * Do not put M_NOTREADY buffers to the free list, they
1056		 * are referenced from outside.
1057		 */
1058		if (m->m_flags & M_NOTREADY)
1059			m = m->m_next;
1060		else {
1061			struct mbuf *n;
1062
1063			n = m->m_next;
1064			m->m_next = mfree;
1065			mfree = m;
1066			m = n;
1067		}
1068	}
1069	/*
1070	 * Free any zero-length mbufs from the buffer.
1071	 * For SOCK_DGRAM sockets such mbufs represent empty records.
1072	 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer,
1073	 * when sosend_generic() needs to send only control data.
1074	 */
1075	while (m && m->m_len == 0) {
1076		struct mbuf *n;
1077
1078		sbfree(sb, m);
1079		n = m->m_next;
1080		m->m_next = mfree;
1081		mfree = m;
1082		m = n;
1083	}
1084	if (m) {
1085		sb->sb_mb = m;
1086		m->m_nextpkt = next;
1087	} else
1088		sb->sb_mb = next;
1089	/*
1090	 * First part is an inline SB_EMPTY_FIXUP().  Second part makes sure
1091	 * sb_lastrecord is up-to-date if we dropped part of the last record.
1092	 */
1093	m = sb->sb_mb;
1094	if (m == NULL) {
1095		sb->sb_mbtail = NULL;
1096		sb->sb_lastrecord = NULL;
1097	} else if (m->m_nextpkt == NULL) {
1098		sb->sb_lastrecord = m;
1099	}
1100
1101	return (mfree);
1102}
1103
1104/*
1105 * Drop data from (the front of) a sockbuf.
1106 */
1107void
1108sbdrop_locked(struct sockbuf *sb, int len)
1109{
1110
1111	SOCKBUF_LOCK_ASSERT(sb);
1112	m_freem(sbcut_internal(sb, len));
1113}
1114
1115/*
1116 * Drop data from (the front of) a sockbuf,
1117 * and return it to caller.
1118 */
1119struct mbuf *
1120sbcut_locked(struct sockbuf *sb, int len)
1121{
1122
1123	SOCKBUF_LOCK_ASSERT(sb);
1124	return (sbcut_internal(sb, len));
1125}
1126
1127void
1128sbdrop(struct sockbuf *sb, int len)
1129{
1130	struct mbuf *mfree;
1131
1132	SOCKBUF_LOCK(sb);
1133	mfree = sbcut_internal(sb, len);
1134	SOCKBUF_UNLOCK(sb);
1135
1136	m_freem(mfree);
1137}
1138
1139/*
1140 * Maintain a pointer and offset pair into the socket buffer mbuf chain to
1141 * avoid traversal of the entire socket buffer for larger offsets.
1142 */
1143struct mbuf *
1144sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
1145{
1146	struct mbuf *m, *ret;
1147
1148	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1149	KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__));
1150	KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__));
1151
1152	/*
1153	 * Is off below stored offset? Happens on retransmits.
1154	 * Just return, we can't help here.
1155	 */
1156	if (sb->sb_sndptroff > off) {
1157		*moff = off;
1158		return (sb->sb_mb);
1159	}
1160
1161	/* Return closest mbuf in chain for current offset. */
1162	*moff = off - sb->sb_sndptroff;
1163	m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb;
1164	if (*moff == m->m_len) {
1165		*moff = 0;
1166		sb->sb_sndptroff += m->m_len;
1167		m = ret = m->m_next;
1168		KASSERT(ret->m_len > 0,
1169		    ("mbuf %p in sockbuf %p chain has no valid data", ret, sb));
1170	}
1171
1172	/* Advance by len to be as close as possible for the next transmit. */
1173	for (off = off - sb->sb_sndptroff + len - 1;
1174	     off > 0 && m != NULL && off >= m->m_len;
1175	     m = m->m_next) {
1176		sb->sb_sndptroff += m->m_len;
1177		off -= m->m_len;
1178	}
1179	if (off > 0 && m == NULL)
1180		panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret);
1181	sb->sb_sndptr = m;
1182
1183	return (ret);
1184}
1185
1186/*
1187 * Return the first mbuf and the mbuf data offset for the provided
1188 * send offset without changing the "sb_sndptroff" field.
1189 */
1190struct mbuf *
1191sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff)
1192{
1193	struct mbuf *m;
1194
1195	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1196
1197	/*
1198	 * If the "off" is below the stored offset, which happens on
1199	 * retransmits, just use "sb_mb":
1200	 */
1201	if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
1202		m = sb->sb_mb;
1203	} else {
1204		m = sb->sb_sndptr;
1205		off -= sb->sb_sndptroff;
1206	}
1207	while (off > 0 && m != NULL) {
1208		if (off < m->m_len)
1209			break;
1210		off -= m->m_len;
1211		m = m->m_next;
1212	}
1213	*moff = off;
1214	return (m);
1215}
1216
1217/*
1218 * Drop a record off the front of a sockbuf and move the next record to the
1219 * front.
1220 */
1221void
1222sbdroprecord_locked(struct sockbuf *sb)
1223{
1224	struct mbuf *m;
1225
1226	SOCKBUF_LOCK_ASSERT(sb);
1227
1228	m = sb->sb_mb;
1229	if (m) {
1230		sb->sb_mb = m->m_nextpkt;
1231		do {
1232			sbfree(sb, m);
1233			m = m_free(m);
1234		} while (m);
1235	}
1236	SB_EMPTY_FIXUP(sb);
1237}
1238
1239/*
1240 * Drop a record off the front of a sockbuf and move the next record to the
1241 * front.
1242 */
1243void
1244sbdroprecord(struct sockbuf *sb)
1245{
1246
1247	SOCKBUF_LOCK(sb);
1248	sbdroprecord_locked(sb);
1249	SOCKBUF_UNLOCK(sb);
1250}
1251
1252/*
1253 * Create a "control" mbuf containing the specified data with the specified
1254 * type for presentation on a socket buffer.
1255 */
1256struct mbuf *
1257sbcreatecontrol(caddr_t p, int size, int type, int level)
1258{
1259	struct cmsghdr *cp;
1260	struct mbuf *m;
1261
1262	if (CMSG_SPACE((u_int)size) > MCLBYTES)
1263		return ((struct mbuf *) NULL);
1264	if (CMSG_SPACE((u_int)size) > MLEN)
1265		m = m_getcl(M_NOWAIT, MT_CONTROL, 0);
1266	else
1267		m = m_get(M_NOWAIT, MT_CONTROL);
1268	if (m == NULL)
1269		return ((struct mbuf *) NULL);
1270	cp = mtod(m, struct cmsghdr *);
1271	m->m_len = 0;
1272	KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1273	    ("sbcreatecontrol: short mbuf"));
1274	/*
1275	 * Don't leave the padding between the msg header and the
1276	 * cmsg data and the padding after the cmsg data un-initialized.
1277	 */
1278	bzero(cp, CMSG_SPACE((u_int)size));
1279	if (p != NULL)
1280		(void)memcpy(CMSG_DATA(cp), p, size);
1281	m->m_len = CMSG_SPACE(size);
1282	cp->cmsg_len = CMSG_LEN(size);
1283	cp->cmsg_level = level;
1284	cp->cmsg_type = type;
1285	return (m);
1286}
1287
1288/*
1289 * This does the same for socket buffers that sotoxsocket does for sockets:
1290 * generate an user-format data structure describing the socket buffer.  Note
1291 * that the xsockbuf structure, since it is always embedded in a socket, does
1292 * not include a self pointer nor a length.  We make this entry point public
1293 * in case some other mechanism needs it.
1294 */
1295void
1296sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1297{
1298
1299	xsb->sb_cc = sb->sb_ccc;
1300	xsb->sb_hiwat = sb->sb_hiwat;
1301	xsb->sb_mbcnt = sb->sb_mbcnt;
1302	xsb->sb_mcnt = sb->sb_mcnt;
1303	xsb->sb_ccnt = sb->sb_ccnt;
1304	xsb->sb_mbmax = sb->sb_mbmax;
1305	xsb->sb_lowat = sb->sb_lowat;
1306	xsb->sb_flags = sb->sb_flags;
1307	xsb->sb_timeo = sb->sb_timeo;
1308}
1309
1310/* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1311static int dummy;
1312SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1313SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1314    &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1315SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1316    &sb_efficiency, 0, "Socket buffer size waste factor");
1317