uipc_socket.c revision 6127
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
34 * $Id: uipc_socket.c,v 1.5 1994/10/02 17:35:32 phk Exp $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/proc.h>
40#include <sys/file.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/domain.h>
44#include <sys/kernel.h>
45#include <sys/protosw.h>
46#include <sys/socket.h>
47#include <sys/socketvar.h>
48#include <sys/resourcevar.h>
49#include <sys/signalvar.h>
50
51/*
52 * Socket operation routines.
53 * These routines are called by the routines in
54 * sys_socket.c or from a system process, and
55 * implement the semantics of socket operations by
56 * switching out to the protocol specific routines.
57 */
58/*ARGSUSED*/
59int
60socreate(dom, aso, type, proto)
61	int dom;
62	struct socket **aso;
63	register int type;
64	int proto;
65{
66	struct proc *p = curproc;		/* XXX */
67	register struct protosw *prp;
68	register struct socket *so;
69	register int error;
70
71	if (proto)
72		prp = pffindproto(dom, proto, type);
73	else
74		prp = pffindtype(dom, type);
75	if (prp == 0 || prp->pr_usrreq == 0)
76		return (EPROTONOSUPPORT);
77	if (prp->pr_type != type)
78		return (EPROTOTYPE);
79	MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
80	bzero((caddr_t)so, sizeof(*so));
81	so->so_type = type;
82	if (p->p_ucred->cr_uid == 0)
83		so->so_state = SS_PRIV;
84	so->so_proto = prp;
85	error =
86	    (*prp->pr_usrreq)(so, PRU_ATTACH,
87		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
88	if (error) {
89		so->so_state |= SS_NOFDREF;
90		sofree(so);
91		return (error);
92	}
93	*aso = so;
94	return (0);
95}
96
97int
98sobind(so, nam)
99	struct socket *so;
100	struct mbuf *nam;
101{
102	int s = splnet();
103	int error;
104
105	error =
106	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
107		(struct mbuf *)0, nam, (struct mbuf *)0);
108	splx(s);
109	return (error);
110}
111
112int
113solisten(so, backlog)
114	register struct socket *so;
115	int backlog;
116{
117	int s = splnet(), error;
118
119	error =
120	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
121		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
122	if (error) {
123		splx(s);
124		return (error);
125	}
126	if (so->so_q == 0)
127		so->so_options |= SO_ACCEPTCONN;
128	if (backlog < 0)
129		backlog = 0;
130	so->so_qlimit = min(backlog, SOMAXCONN);
131	splx(s);
132	return (0);
133}
134
135void
136sofree(so)
137	register struct socket *so;
138{
139
140	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
141		return;
142	if (so->so_head) {
143		if (!soqremque(so, 0) && !soqremque(so, 1))
144			panic("sofree dq");
145		so->so_head = 0;
146	}
147	sbrelease(&so->so_snd);
148	sorflush(so);
149	FREE(so, M_SOCKET);
150}
151
152/*
153 * Close a socket on last file table reference removal.
154 * Initiate disconnect if connected.
155 * Free socket when disconnect complete.
156 */
157int
158soclose(so)
159	register struct socket *so;
160{
161	int s = splnet();		/* conservative */
162	int error = 0;
163
164	if (so->so_options & SO_ACCEPTCONN) {
165		while (so->so_q0)
166			(void) soabort(so->so_q0);
167		while (so->so_q)
168			(void) soabort(so->so_q);
169	}
170	if (so->so_pcb == 0)
171		goto discard;
172	if (so->so_state & SS_ISCONNECTED) {
173		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
174			error = sodisconnect(so);
175			if (error)
176				goto drop;
177		}
178		if (so->so_options & SO_LINGER) {
179			if ((so->so_state & SS_ISDISCONNECTING) &&
180			    (so->so_state & SS_NBIO))
181				goto drop;
182			while (so->so_state & SS_ISCONNECTED) {
183				error = tsleep((caddr_t)&so->so_timeo,
184				    PSOCK | PCATCH, netcls, so->so_linger);
185				if (error)
186					break;
187			}
188		}
189	}
190drop:
191	if (so->so_pcb) {
192		int error2 =
193		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
194			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
195		if (error == 0)
196			error = error2;
197	}
198discard:
199	if (so->so_state & SS_NOFDREF)
200		panic("soclose: NOFDREF");
201	so->so_state |= SS_NOFDREF;
202	sofree(so);
203	splx(s);
204	return (error);
205}
206
207/*
208 * Must be called at splnet...
209 */
210int
211soabort(so)
212	struct socket *so;
213{
214
215	return (
216	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
217		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
218}
219
220int
221soaccept(so, nam)
222	register struct socket *so;
223	struct mbuf *nam;
224{
225	int s = splnet();
226	int error;
227
228	if ((so->so_state & SS_NOFDREF) == 0)
229		panic("soaccept: !NOFDREF");
230	so->so_state &= ~SS_NOFDREF;
231	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
232	    (struct mbuf *)0, nam, (struct mbuf *)0);
233	splx(s);
234	return (error);
235}
236
237int
238soconnect(so, nam)
239	register struct socket *so;
240	struct mbuf *nam;
241{
242	int s;
243	int error;
244
245	if (so->so_options & SO_ACCEPTCONN)
246		return (EOPNOTSUPP);
247	s = splnet();
248	/*
249	 * If protocol is connection-based, can only connect once.
250	 * Otherwise, if connected, try to disconnect first.
251	 * This allows user to disconnect by connecting to, e.g.,
252	 * a null address.
253	 */
254	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
255	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
256	    (error = sodisconnect(so))))
257		error = EISCONN;
258	else
259		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
260		    (struct mbuf *)0, nam, (struct mbuf *)0);
261	splx(s);
262	return (error);
263}
264
265int
266soconnect2(so1, so2)
267	register struct socket *so1;
268	struct socket *so2;
269{
270	int s = splnet();
271	int error;
272
273	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
274	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
275	splx(s);
276	return (error);
277}
278
279int
280sodisconnect(so)
281	register struct socket *so;
282{
283	int s = splnet();
284	int error;
285
286	if ((so->so_state & SS_ISCONNECTED) == 0) {
287		error = ENOTCONN;
288		goto bad;
289	}
290	if (so->so_state & SS_ISDISCONNECTING) {
291		error = EALREADY;
292		goto bad;
293	}
294	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
295	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
296bad:
297	splx(s);
298	return (error);
299}
300
301#define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_KERNEL : M_WAITOK)
302/*
303 * Send on a socket.
304 * If send must go all at once and message is larger than
305 * send buffering, then hard error.
306 * Lock against other senders.
307 * If must go all at once and not enough room now, then
308 * inform user that this would block and do nothing.
309 * Otherwise, if nonblocking, send as much as possible.
310 * The data to be sent is described by "uio" if nonzero,
311 * otherwise by the mbuf chain "top" (which must be null
312 * if uio is not).  Data provided in mbuf chain must be small
313 * enough to send all at once.
314 *
315 * Returns nonzero on error, timeout or signal; callers
316 * must check for short counts if EINTR/ERESTART are returned.
317 * Data and control buffers are freed on return.
318 */
319int
320sosend(so, addr, uio, top, control, flags)
321	register struct socket *so;
322	struct mbuf *addr;
323	struct uio *uio;
324	struct mbuf *top;
325	struct mbuf *control;
326	int flags;
327{
328	struct proc *p = curproc;		/* XXX */
329	struct mbuf **mp;
330	register struct mbuf *m;
331	register long space, len, resid;
332	int clen = 0, error, s, dontroute, mlen;
333	int atomic = sosendallatonce(so) || top;
334
335	if (uio)
336		resid = uio->uio_resid;
337	else
338		resid = top->m_pkthdr.len;
339	/*
340	 * In theory resid should be unsigned.
341	 * However, space must be signed, as it might be less than 0
342	 * if we over-committed, and we must use a signed comparison
343	 * of space and resid.  On the other hand, a negative resid
344	 * causes us to loop sending 0-length segments to the protocol.
345	 */
346	if (resid < 0)
347		return (EINVAL);
348	dontroute =
349	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
350	    (so->so_proto->pr_flags & PR_ATOMIC);
351	p->p_stats->p_ru.ru_msgsnd++;
352	if (control)
353		clen = control->m_len;
354#define	snderr(errno)	{ error = errno; splx(s); goto release; }
355
356restart:
357	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
358	if (error)
359		goto out;
360	do {
361		s = splnet();
362		if (so->so_state & SS_CANTSENDMORE)
363			snderr(EPIPE);
364		if (so->so_error)
365			snderr(so->so_error);
366		if ((so->so_state & SS_ISCONNECTED) == 0) {
367			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
368				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
369				    !(resid == 0 && clen != 0))
370					snderr(ENOTCONN);
371			} else if (addr == 0)
372				snderr(EDESTADDRREQ);
373		}
374		space = sbspace(&so->so_snd);
375		if (flags & MSG_OOB)
376			space += 1024;
377		if ((atomic && resid > so->so_snd.sb_hiwat) ||
378		    clen > so->so_snd.sb_hiwat)
379			snderr(EMSGSIZE);
380		if (space < resid + clen && uio &&
381		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
382			if (so->so_state & SS_NBIO)
383				snderr(EWOULDBLOCK);
384			sbunlock(&so->so_snd);
385			error = sbwait(&so->so_snd);
386			splx(s);
387			if (error)
388				goto out;
389			goto restart;
390		}
391		splx(s);
392		mp = &top;
393		space -= clen;
394		do {
395		    if (uio == NULL) {
396			/*
397			 * Data is prepackaged in "top".
398			 */
399			resid = 0;
400			if (flags & MSG_EOR)
401				top->m_flags |= M_EOR;
402		    } else do {
403			if (top == 0) {
404				MGETHDR(m, M_WAIT, MT_DATA);
405				mlen = MHLEN;
406				m->m_pkthdr.len = 0;
407				m->m_pkthdr.rcvif = (struct ifnet *)0;
408			} else {
409				MGET(m, M_WAIT, MT_DATA);
410				mlen = MLEN;
411			}
412			if (resid >= MINCLSIZE) {
413				MCLGET(m, M_WAIT);
414				if ((m->m_flags & M_EXT) == 0)
415					goto nopages;
416				mlen = MCLBYTES;
417				len = min(min(mlen, resid), space);
418			} else {
419nopages:
420				len = min(min(mlen, resid), space);
421				/*
422				 * For datagram protocols, leave room
423				 * for protocol headers in first mbuf.
424				 */
425				if (atomic && top == 0 && len < mlen)
426					MH_ALIGN(m, len);
427			}
428			space -= len;
429			error = uiomove(mtod(m, caddr_t), (int)len, uio);
430			resid = uio->uio_resid;
431			m->m_len = len;
432			*mp = m;
433			top->m_pkthdr.len += len;
434			if (error)
435				goto release;
436			mp = &m->m_next;
437			if (resid <= 0) {
438				if (flags & MSG_EOR)
439					top->m_flags |= M_EOR;
440				break;
441			}
442		    } while (space > 0 && atomic);
443		    if (dontroute)
444			    so->so_options |= SO_DONTROUTE;
445		    s = splnet();				/* XXX */
446		    error = (*so->so_proto->pr_usrreq)(so,
447			(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
448			top, addr, control);
449		    splx(s);
450		    if (dontroute)
451			    so->so_options &= ~SO_DONTROUTE;
452		    clen = 0;
453		    control = 0;
454		    top = 0;
455		    mp = &top;
456		    if (error)
457			goto release;
458		} while (resid && space > 0);
459	} while (resid);
460
461release:
462	sbunlock(&so->so_snd);
463out:
464	if (top)
465		m_freem(top);
466	if (control)
467		m_freem(control);
468	return (error);
469}
470
471/*
472 * Implement receive operations on a socket.
473 * We depend on the way that records are added to the sockbuf
474 * by sbappend*.  In particular, each record (mbufs linked through m_next)
475 * must begin with an address if the protocol so specifies,
476 * followed by an optional mbuf or mbufs containing ancillary data,
477 * and then zero or more mbufs of data.
478 * In order to avoid blocking network interrupts for the entire time here,
479 * we splx() while doing the actual copy to user space.
480 * Although the sockbuf is locked, new data may still be appended,
481 * and thus we must maintain consistency of the sockbuf during that time.
482 *
483 * The caller may receive the data as a single mbuf chain by supplying
484 * an mbuf **mp0 for use in returning the chain.  The uio is then used
485 * only for the count in uio_resid.
486 */
487int
488soreceive(so, paddr, uio, mp0, controlp, flagsp)
489	register struct socket *so;
490	struct mbuf **paddr;
491	struct uio *uio;
492	struct mbuf **mp0;
493	struct mbuf **controlp;
494	int *flagsp;
495{
496	register struct mbuf *m, **mp;
497	register int flags, len, error, s, offset;
498	struct protosw *pr = so->so_proto;
499	struct mbuf *nextrecord;
500	int moff, type = 0;
501	int orig_resid = uio->uio_resid;
502
503	mp = mp0;
504	if (paddr)
505		*paddr = 0;
506	if (controlp)
507		*controlp = 0;
508	if (flagsp)
509		flags = *flagsp &~ MSG_EOR;
510	else
511		flags = 0;
512	if (flags & MSG_OOB) {
513		m = m_get(M_WAIT, MT_DATA);
514		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
515		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
516		if (error)
517			goto bad;
518		do {
519			error = uiomove(mtod(m, caddr_t),
520			    (int) min(uio->uio_resid, m->m_len), uio);
521			m = m_free(m);
522		} while (uio->uio_resid && error == 0 && m);
523bad:
524		if (m)
525			m_freem(m);
526		return (error);
527	}
528	if (mp)
529		*mp = (struct mbuf *)0;
530	if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
531		(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
532		    (struct mbuf *)0, (struct mbuf *)0);
533
534restart:
535	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
536	if (error)
537		return (error);
538	s = splnet();
539
540	m = so->so_rcv.sb_mb;
541	/*
542	 * If we have less data than requested, block awaiting more
543	 * (subject to any timeout) if:
544	 *   1. the current count is less than the low water mark, or
545	 *   2. MSG_WAITALL is set, and it is possible to do the entire
546	 *	receive operation at once if we block (resid <= hiwat).
547	 *   3. MSG_DONTWAIT is not set
548	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
549	 * we have to do the receive in sections, and thus risk returning
550	 * a short count if a timeout or signal occurs after we start.
551	 */
552	if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
553	    so->so_rcv.sb_cc < uio->uio_resid) &&
554	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
555	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
556	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
557#ifdef DIAGNOSTIC
558		if (m == 0 && so->so_rcv.sb_cc)
559			panic("receive 1");
560#endif
561		if (so->so_error) {
562			if (m)
563				goto dontblock;
564			error = so->so_error;
565			if ((flags & MSG_PEEK) == 0)
566				so->so_error = 0;
567			goto release;
568		}
569		if (so->so_state & SS_CANTRCVMORE) {
570			if (m)
571				goto dontblock;
572			else
573				goto release;
574		}
575		for (; m; m = m->m_next)
576			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
577				m = so->so_rcv.sb_mb;
578				goto dontblock;
579			}
580		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
581		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
582			error = ENOTCONN;
583			goto release;
584		}
585		if (uio->uio_resid == 0)
586			goto release;
587		if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
588			error = EWOULDBLOCK;
589			goto release;
590		}
591		sbunlock(&so->so_rcv);
592		error = sbwait(&so->so_rcv);
593		splx(s);
594		if (error)
595			return (error);
596		goto restart;
597	}
598dontblock:
599	if (uio->uio_procp)
600		uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
601	nextrecord = m->m_nextpkt;
602	if (pr->pr_flags & PR_ADDR) {
603#ifdef DIAGNOSTIC
604		if (m->m_type != MT_SONAME)
605			panic("receive 1a");
606#endif
607		orig_resid = 0;
608		if (flags & MSG_PEEK) {
609			if (paddr)
610				*paddr = m_copy(m, 0, m->m_len);
611			m = m->m_next;
612		} else {
613			sbfree(&so->so_rcv, m);
614			if (paddr) {
615				*paddr = m;
616				so->so_rcv.sb_mb = m->m_next;
617				m->m_next = 0;
618				m = so->so_rcv.sb_mb;
619			} else {
620				MFREE(m, so->so_rcv.sb_mb);
621				m = so->so_rcv.sb_mb;
622			}
623		}
624	}
625	while (m && m->m_type == MT_CONTROL && error == 0) {
626		if (flags & MSG_PEEK) {
627			if (controlp)
628				*controlp = m_copy(m, 0, m->m_len);
629			m = m->m_next;
630		} else {
631			sbfree(&so->so_rcv, m);
632			if (controlp) {
633				if (pr->pr_domain->dom_externalize &&
634				    mtod(m, struct cmsghdr *)->cmsg_type ==
635				    SCM_RIGHTS)
636				   error = (*pr->pr_domain->dom_externalize)(m);
637				*controlp = m;
638				so->so_rcv.sb_mb = m->m_next;
639				m->m_next = 0;
640				m = so->so_rcv.sb_mb;
641			} else {
642				MFREE(m, so->so_rcv.sb_mb);
643				m = so->so_rcv.sb_mb;
644			}
645		}
646		if (controlp) {
647			orig_resid = 0;
648			controlp = &(*controlp)->m_next;
649		}
650	}
651	if (m) {
652		if ((flags & MSG_PEEK) == 0)
653			m->m_nextpkt = nextrecord;
654		type = m->m_type;
655		if (type == MT_OOBDATA)
656			flags |= MSG_OOB;
657	}
658	moff = 0;
659	offset = 0;
660	while (m && uio->uio_resid > 0 && error == 0) {
661		if (m->m_type == MT_OOBDATA) {
662			if (type != MT_OOBDATA)
663				break;
664		} else if (type == MT_OOBDATA)
665			break;
666#ifdef DIAGNOSTIC
667		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
668			panic("receive 3");
669#endif
670		so->so_state &= ~SS_RCVATMARK;
671		len = uio->uio_resid;
672		if (so->so_oobmark && len > so->so_oobmark - offset)
673			len = so->so_oobmark - offset;
674		if (len > m->m_len - moff)
675			len = m->m_len - moff;
676		/*
677		 * If mp is set, just pass back the mbufs.
678		 * Otherwise copy them out via the uio, then free.
679		 * Sockbuf must be consistent here (points to current mbuf,
680		 * it points to next record) when we drop priority;
681		 * we must note any additions to the sockbuf when we
682		 * block interrupts again.
683		 */
684		if (mp == 0) {
685			splx(s);
686			error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
687			s = splnet();
688		} else
689			uio->uio_resid -= len;
690		if (len == m->m_len - moff) {
691			if (m->m_flags & M_EOR)
692				flags |= MSG_EOR;
693			if (flags & MSG_PEEK) {
694				m = m->m_next;
695				moff = 0;
696			} else {
697				nextrecord = m->m_nextpkt;
698				sbfree(&so->so_rcv, m);
699				if (mp) {
700					*mp = m;
701					mp = &m->m_next;
702					so->so_rcv.sb_mb = m = m->m_next;
703					*mp = (struct mbuf *)0;
704				} else {
705					MFREE(m, so->so_rcv.sb_mb);
706					m = so->so_rcv.sb_mb;
707				}
708				if (m)
709					m->m_nextpkt = nextrecord;
710			}
711		} else {
712			if (flags & MSG_PEEK)
713				moff += len;
714			else {
715				if (mp)
716					*mp = m_copym(m, 0, len, M_WAIT);
717				m->m_data += len;
718				m->m_len -= len;
719				so->so_rcv.sb_cc -= len;
720			}
721		}
722		if (so->so_oobmark) {
723			if ((flags & MSG_PEEK) == 0) {
724				so->so_oobmark -= len;
725				if (so->so_oobmark == 0) {
726					so->so_state |= SS_RCVATMARK;
727					break;
728				}
729			} else {
730				offset += len;
731				if (offset == so->so_oobmark)
732					break;
733			}
734		}
735		if (flags & MSG_EOR)
736			break;
737		/*
738		 * If the MSG_WAITALL flag is set (for non-atomic socket),
739		 * we must not quit until "uio->uio_resid == 0" or an error
740		 * termination.  If a signal/timeout occurs, return
741		 * with a short count but without error.
742		 * Keep sockbuf locked against other readers.
743		 */
744		while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
745		    !sosendallatonce(so) && !nextrecord) {
746			if (so->so_error || so->so_state & SS_CANTRCVMORE)
747				break;
748			error = sbwait(&so->so_rcv);
749			if (error) {
750				sbunlock(&so->so_rcv);
751				splx(s);
752				return (0);
753			}
754			m = so->so_rcv.sb_mb;
755			if (m)
756				nextrecord = m->m_nextpkt;
757		}
758	}
759
760	if (m && pr->pr_flags & PR_ATOMIC) {
761		flags |= MSG_TRUNC;
762		if ((flags & MSG_PEEK) == 0)
763			(void) sbdroprecord(&so->so_rcv);
764	}
765	if ((flags & MSG_PEEK) == 0) {
766		if (m == 0)
767			so->so_rcv.sb_mb = nextrecord;
768		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
769			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
770			    (struct mbuf *)flags, (struct mbuf *)0,
771			    (struct mbuf *)0);
772	}
773	if (orig_resid == uio->uio_resid && orig_resid &&
774	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
775		sbunlock(&so->so_rcv);
776		splx(s);
777		goto restart;
778	}
779
780	if (flagsp)
781		*flagsp |= flags;
782release:
783	sbunlock(&so->so_rcv);
784	splx(s);
785	return (error);
786}
787
788int
789soshutdown(so, how)
790	register struct socket *so;
791	register int how;
792{
793	register struct protosw *pr = so->so_proto;
794
795	how++;
796	if (how & FREAD)
797		sorflush(so);
798	if (how & FWRITE)
799		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
800		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
801	return (0);
802}
803
804void
805sorflush(so)
806	register struct socket *so;
807{
808	register struct sockbuf *sb = &so->so_rcv;
809	register struct protosw *pr = so->so_proto;
810	register int s;
811	struct sockbuf asb;
812
813	sb->sb_flags |= SB_NOINTR;
814	(void) sblock(sb, M_WAITOK);
815	s = splimp();
816	socantrcvmore(so);
817	sbunlock(sb);
818	asb = *sb;
819	bzero((caddr_t)sb, sizeof (*sb));
820	splx(s);
821	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
822		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
823	sbrelease(&asb);
824}
825
826int
827sosetopt(so, level, optname, m0)
828	register struct socket *so;
829	int level, optname;
830	struct mbuf *m0;
831{
832	int error = 0;
833	register struct mbuf *m = m0;
834
835	if (level != SOL_SOCKET) {
836		if (so->so_proto && so->so_proto->pr_ctloutput)
837			return ((*so->so_proto->pr_ctloutput)
838				  (PRCO_SETOPT, so, level, optname, &m0));
839		error = ENOPROTOOPT;
840	} else {
841		switch (optname) {
842
843		case SO_LINGER:
844			if (m == NULL || m->m_len != sizeof (struct linger)) {
845				error = EINVAL;
846				goto bad;
847			}
848			so->so_linger = mtod(m, struct linger *)->l_linger;
849			/* fall thru... */
850
851		case SO_DEBUG:
852		case SO_KEEPALIVE:
853		case SO_DONTROUTE:
854		case SO_USELOOPBACK:
855		case SO_BROADCAST:
856		case SO_REUSEADDR:
857		case SO_REUSEPORT:
858		case SO_OOBINLINE:
859			if (m == NULL || m->m_len < sizeof (int)) {
860				error = EINVAL;
861				goto bad;
862			}
863			if (*mtod(m, int *))
864				so->so_options |= optname;
865			else
866				so->so_options &= ~optname;
867			break;
868
869		case SO_SNDBUF:
870		case SO_RCVBUF:
871		case SO_SNDLOWAT:
872		case SO_RCVLOWAT:
873			if (m == NULL || m->m_len < sizeof (int)) {
874				error = EINVAL;
875				goto bad;
876			}
877			switch (optname) {
878
879			case SO_SNDBUF:
880			case SO_RCVBUF:
881				if (sbreserve(optname == SO_SNDBUF ?
882				    &so->so_snd : &so->so_rcv,
883				    (u_long) *mtod(m, int *)) == 0) {
884					error = ENOBUFS;
885					goto bad;
886				}
887				break;
888
889			case SO_SNDLOWAT:
890				so->so_snd.sb_lowat = *mtod(m, int *);
891				break;
892			case SO_RCVLOWAT:
893				so->so_rcv.sb_lowat = *mtod(m, int *);
894				break;
895			}
896			break;
897
898		case SO_SNDTIMEO:
899		case SO_RCVTIMEO:
900		    {
901			struct timeval *tv;
902			short val;
903
904			if (m == NULL || m->m_len < sizeof (*tv)) {
905				error = EINVAL;
906				goto bad;
907			}
908			tv = mtod(m, struct timeval *);
909			if (tv->tv_sec > SHRT_MAX / hz - hz) {
910				error = EDOM;
911				goto bad;
912			}
913			val = tv->tv_sec * hz + tv->tv_usec / tick;
914
915			switch (optname) {
916
917			case SO_SNDTIMEO:
918				so->so_snd.sb_timeo = val;
919				break;
920			case SO_RCVTIMEO:
921				so->so_rcv.sb_timeo = val;
922				break;
923			}
924			break;
925		    }
926
927		default:
928			error = ENOPROTOOPT;
929			break;
930		}
931		if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
932			(void) ((*so->so_proto->pr_ctloutput)
933				  (PRCO_SETOPT, so, level, optname, &m0));
934			m = NULL;	/* freed by protocol */
935		}
936	}
937bad:
938	if (m)
939		(void) m_free(m);
940	return (error);
941}
942
943int
944sogetopt(so, level, optname, mp)
945	register struct socket *so;
946	int level, optname;
947	struct mbuf **mp;
948{
949	register struct mbuf *m;
950
951	if (level != SOL_SOCKET) {
952		if (so->so_proto && so->so_proto->pr_ctloutput) {
953			return ((*so->so_proto->pr_ctloutput)
954				  (PRCO_GETOPT, so, level, optname, mp));
955		} else
956			return (ENOPROTOOPT);
957	} else {
958		m = m_get(M_WAIT, MT_SOOPTS);
959		m->m_len = sizeof (int);
960
961		switch (optname) {
962
963		case SO_LINGER:
964			m->m_len = sizeof (struct linger);
965			mtod(m, struct linger *)->l_onoff =
966				so->so_options & SO_LINGER;
967			mtod(m, struct linger *)->l_linger = so->so_linger;
968			break;
969
970		case SO_USELOOPBACK:
971		case SO_DONTROUTE:
972		case SO_DEBUG:
973		case SO_KEEPALIVE:
974		case SO_REUSEADDR:
975		case SO_REUSEPORT:
976		case SO_BROADCAST:
977		case SO_OOBINLINE:
978			*mtod(m, int *) = so->so_options & optname;
979			break;
980
981		case SO_TYPE:
982			*mtod(m, int *) = so->so_type;
983			break;
984
985		case SO_ERROR:
986			*mtod(m, int *) = so->so_error;
987			so->so_error = 0;
988			break;
989
990		case SO_SNDBUF:
991			*mtod(m, int *) = so->so_snd.sb_hiwat;
992			break;
993
994		case SO_RCVBUF:
995			*mtod(m, int *) = so->so_rcv.sb_hiwat;
996			break;
997
998		case SO_SNDLOWAT:
999			*mtod(m, int *) = so->so_snd.sb_lowat;
1000			break;
1001
1002		case SO_RCVLOWAT:
1003			*mtod(m, int *) = so->so_rcv.sb_lowat;
1004			break;
1005
1006		case SO_SNDTIMEO:
1007		case SO_RCVTIMEO:
1008		    {
1009			int val = (optname == SO_SNDTIMEO ?
1010			     so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1011
1012			m->m_len = sizeof(struct timeval);
1013			mtod(m, struct timeval *)->tv_sec = val / hz;
1014			mtod(m, struct timeval *)->tv_usec =
1015			    (val % hz) / tick;
1016			break;
1017		    }
1018
1019		default:
1020			(void)m_free(m);
1021			return (ENOPROTOOPT);
1022		}
1023		*mp = m;
1024		return (0);
1025	}
1026}
1027
1028void
1029sohasoutofband(so)
1030	register struct socket *so;
1031{
1032	struct proc *p;
1033
1034	if (so->so_pgid < 0)
1035		gsignal(-so->so_pgid, SIGURG);
1036	else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1037		psignal(p, SIGURG);
1038	selwakeup(&so->so_rcv.sb_sel);
1039}
1040