uipc_socket.c revision 1817
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
34 * $Id$
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/proc.h>
40#include <sys/file.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/domain.h>
44#include <sys/kernel.h>
45#include <sys/protosw.h>
46#include <sys/socket.h>
47#include <sys/socketvar.h>
48#include <sys/resourcevar.h>
49
50void	sofree		__P((struct socket *));
51void	sorflush	__P((struct socket *));
52
53/*
54 * Socket operation routines.
55 * These routines are called by the routines in
56 * sys_socket.c or from a system process, and
57 * implement the semantics of socket operations by
58 * switching out to the protocol specific routines.
59 */
60/*ARGSUSED*/
61int
62socreate(dom, aso, type, proto)
63	int dom;
64	struct socket **aso;
65	register int type;
66	int proto;
67{
68	struct proc *p = curproc;		/* XXX */
69	register struct protosw *prp;
70	register struct socket *so;
71	register int error;
72
73	if (proto)
74		prp = pffindproto(dom, proto, type);
75	else
76		prp = pffindtype(dom, type);
77	if (prp == 0 || prp->pr_usrreq == 0)
78		return (EPROTONOSUPPORT);
79	if (prp->pr_type != type)
80		return (EPROTOTYPE);
81	MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
82	bzero((caddr_t)so, sizeof(*so));
83	so->so_type = type;
84	if (p->p_ucred->cr_uid == 0)
85		so->so_state = SS_PRIV;
86	so->so_proto = prp;
87	error =
88	    (*prp->pr_usrreq)(so, PRU_ATTACH,
89		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
90	if (error) {
91		so->so_state |= SS_NOFDREF;
92		sofree(so);
93		return (error);
94	}
95	*aso = so;
96	return (0);
97}
98
99int
100sobind(so, nam)
101	struct socket *so;
102	struct mbuf *nam;
103{
104	int s = splnet();
105	int error;
106
107	error =
108	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
109		(struct mbuf *)0, nam, (struct mbuf *)0);
110	splx(s);
111	return (error);
112}
113
114int
115solisten(so, backlog)
116	register struct socket *so;
117	int backlog;
118{
119	int s = splnet(), error;
120
121	error =
122	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
123		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
124	if (error) {
125		splx(s);
126		return (error);
127	}
128	if (so->so_q == 0)
129		so->so_options |= SO_ACCEPTCONN;
130	if (backlog < 0)
131		backlog = 0;
132	so->so_qlimit = min(backlog, SOMAXCONN);
133	splx(s);
134	return (0);
135}
136
137void
138sofree(so)
139	register struct socket *so;
140{
141
142	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
143		return;
144	if (so->so_head) {
145		if (!soqremque(so, 0) && !soqremque(so, 1))
146			panic("sofree dq");
147		so->so_head = 0;
148	}
149	sbrelease(&so->so_snd);
150	sorflush(so);
151	FREE(so, M_SOCKET);
152}
153
154/*
155 * Close a socket on last file table reference removal.
156 * Initiate disconnect if connected.
157 * Free socket when disconnect complete.
158 */
159int
160soclose(so)
161	register struct socket *so;
162{
163	int s = splnet();		/* conservative */
164	int error = 0;
165
166	if (so->so_options & SO_ACCEPTCONN) {
167		while (so->so_q0)
168			(void) soabort(so->so_q0);
169		while (so->so_q)
170			(void) soabort(so->so_q);
171	}
172	if (so->so_pcb == 0)
173		goto discard;
174	if (so->so_state & SS_ISCONNECTED) {
175		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
176			error = sodisconnect(so);
177			if (error)
178				goto drop;
179		}
180		if (so->so_options & SO_LINGER) {
181			if ((so->so_state & SS_ISDISCONNECTING) &&
182			    (so->so_state & SS_NBIO))
183				goto drop;
184			while (so->so_state & SS_ISCONNECTED)
185				if (error = tsleep((caddr_t)&so->so_timeo,
186				    PSOCK | PCATCH, netcls, so->so_linger))
187					break;
188		}
189	}
190drop:
191	if (so->so_pcb) {
192		int error2 =
193		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
194			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
195		if (error == 0)
196			error = error2;
197	}
198discard:
199	if (so->so_state & SS_NOFDREF)
200		panic("soclose: NOFDREF");
201	so->so_state |= SS_NOFDREF;
202	sofree(so);
203	splx(s);
204	return (error);
205}
206
207/*
208 * Must be called at splnet...
209 */
210int
211soabort(so)
212	struct socket *so;
213{
214
215	return (
216	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
217		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
218}
219
220int
221soaccept(so, nam)
222	register struct socket *so;
223	struct mbuf *nam;
224{
225	int s = splnet();
226	int error;
227
228	if ((so->so_state & SS_NOFDREF) == 0)
229		panic("soaccept: !NOFDREF");
230	so->so_state &= ~SS_NOFDREF;
231	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
232	    (struct mbuf *)0, nam, (struct mbuf *)0);
233	splx(s);
234	return (error);
235}
236
237int
238soconnect(so, nam)
239	register struct socket *so;
240	struct mbuf *nam;
241{
242	int s;
243	int error;
244
245	if (so->so_options & SO_ACCEPTCONN)
246		return (EOPNOTSUPP);
247	s = splnet();
248	/*
249	 * If protocol is connection-based, can only connect once.
250	 * Otherwise, if connected, try to disconnect first.
251	 * This allows user to disconnect by connecting to, e.g.,
252	 * a null address.
253	 */
254	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
255	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
256	    (error = sodisconnect(so))))
257		error = EISCONN;
258	else
259		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
260		    (struct mbuf *)0, nam, (struct mbuf *)0);
261	splx(s);
262	return (error);
263}
264
265int
266soconnect2(so1, so2)
267	register struct socket *so1;
268	struct socket *so2;
269{
270	int s = splnet();
271	int error;
272
273	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
274	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
275	splx(s);
276	return (error);
277}
278
279int
280sodisconnect(so)
281	register struct socket *so;
282{
283	int s = splnet();
284	int error;
285
286	if ((so->so_state & SS_ISCONNECTED) == 0) {
287		error = ENOTCONN;
288		goto bad;
289	}
290	if (so->so_state & SS_ISDISCONNECTING) {
291		error = EALREADY;
292		goto bad;
293	}
294	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
295	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
296bad:
297	splx(s);
298	return (error);
299}
300
301#define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
302/*
303 * Send on a socket.
304 * If send must go all at once and message is larger than
305 * send buffering, then hard error.
306 * Lock against other senders.
307 * If must go all at once and not enough room now, then
308 * inform user that this would block and do nothing.
309 * Otherwise, if nonblocking, send as much as possible.
310 * The data to be sent is described by "uio" if nonzero,
311 * otherwise by the mbuf chain "top" (which must be null
312 * if uio is not).  Data provided in mbuf chain must be small
313 * enough to send all at once.
314 *
315 * Returns nonzero on error, timeout or signal; callers
316 * must check for short counts if EINTR/ERESTART are returned.
317 * Data and control buffers are freed on return.
318 */
319int
320sosend(so, addr, uio, top, control, flags)
321	register struct socket *so;
322	struct mbuf *addr;
323	struct uio *uio;
324	struct mbuf *top;
325	struct mbuf *control;
326	int flags;
327{
328	struct proc *p = curproc;		/* XXX */
329	struct mbuf **mp;
330	register struct mbuf *m;
331	register long space, len, resid;
332	int clen = 0, error, s, dontroute, mlen;
333	int atomic = sosendallatonce(so) || top;
334
335	if (uio)
336		resid = uio->uio_resid;
337	else
338		resid = top->m_pkthdr.len;
339	/*
340	 * In theory resid should be unsigned.
341	 * However, space must be signed, as it might be less than 0
342	 * if we over-committed, and we must use a signed comparison
343	 * of space and resid.  On the other hand, a negative resid
344	 * causes us to loop sending 0-length segments to the protocol.
345	 */
346	if (resid < 0)
347		return (EINVAL);
348	dontroute =
349	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
350	    (so->so_proto->pr_flags & PR_ATOMIC);
351	p->p_stats->p_ru.ru_msgsnd++;
352	if (control)
353		clen = control->m_len;
354#define	snderr(errno)	{ error = errno; splx(s); goto release; }
355
356restart:
357	if (error = sblock(&so->so_snd, SBLOCKWAIT(flags)))
358		goto out;
359	do {
360		s = splnet();
361		if (so->so_state & SS_CANTSENDMORE)
362			snderr(EPIPE);
363		if (so->so_error)
364			snderr(so->so_error);
365		if ((so->so_state & SS_ISCONNECTED) == 0) {
366			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
367				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
368				    !(resid == 0 && clen != 0))
369					snderr(ENOTCONN);
370			} else if (addr == 0)
371				snderr(EDESTADDRREQ);
372		}
373		space = sbspace(&so->so_snd);
374		if (flags & MSG_OOB)
375			space += 1024;
376		if (atomic && resid > so->so_snd.sb_hiwat ||
377		    clen > so->so_snd.sb_hiwat)
378			snderr(EMSGSIZE);
379		if (space < resid + clen && uio &&
380		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
381			if (so->so_state & SS_NBIO)
382				snderr(EWOULDBLOCK);
383			sbunlock(&so->so_snd);
384			error = sbwait(&so->so_snd);
385			splx(s);
386			if (error)
387				goto out;
388			goto restart;
389		}
390		splx(s);
391		mp = &top;
392		space -= clen;
393		do {
394		    if (uio == NULL) {
395			/*
396			 * Data is prepackaged in "top".
397			 */
398			resid = 0;
399			if (flags & MSG_EOR)
400				top->m_flags |= M_EOR;
401		    } else do {
402			if (top == 0) {
403				MGETHDR(m, M_WAIT, MT_DATA);
404				mlen = MHLEN;
405				m->m_pkthdr.len = 0;
406				m->m_pkthdr.rcvif = (struct ifnet *)0;
407			} else {
408				MGET(m, M_WAIT, MT_DATA);
409				mlen = MLEN;
410			}
411			if (resid >= MINCLSIZE) {
412				MCLGET(m, M_WAIT);
413				if ((m->m_flags & M_EXT) == 0)
414					goto nopages;
415				mlen = MCLBYTES;
416				len = min(min(mlen, resid), space);
417			} else {
418nopages:
419				len = min(min(mlen, resid), space);
420				/*
421				 * For datagram protocols, leave room
422				 * for protocol headers in first mbuf.
423				 */
424				if (atomic && top == 0 && len < mlen)
425					MH_ALIGN(m, len);
426			}
427			space -= len;
428			error = uiomove(mtod(m, caddr_t), (int)len, uio);
429			resid = uio->uio_resid;
430			m->m_len = len;
431			*mp = m;
432			top->m_pkthdr.len += len;
433			if (error)
434				goto release;
435			mp = &m->m_next;
436			if (resid <= 0) {
437				if (flags & MSG_EOR)
438					top->m_flags |= M_EOR;
439				break;
440			}
441		    } while (space > 0 && atomic);
442		    if (dontroute)
443			    so->so_options |= SO_DONTROUTE;
444		    s = splnet();				/* XXX */
445		    error = (*so->so_proto->pr_usrreq)(so,
446			(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
447			top, addr, control);
448		    splx(s);
449		    if (dontroute)
450			    so->so_options &= ~SO_DONTROUTE;
451		    clen = 0;
452		    control = 0;
453		    top = 0;
454		    mp = &top;
455		    if (error)
456			goto release;
457		} while (resid && space > 0);
458	} while (resid);
459
460release:
461	sbunlock(&so->so_snd);
462out:
463	if (top)
464		m_freem(top);
465	if (control)
466		m_freem(control);
467	return (error);
468}
469
470/*
471 * Implement receive operations on a socket.
472 * We depend on the way that records are added to the sockbuf
473 * by sbappend*.  In particular, each record (mbufs linked through m_next)
474 * must begin with an address if the protocol so specifies,
475 * followed by an optional mbuf or mbufs containing ancillary data,
476 * and then zero or more mbufs of data.
477 * In order to avoid blocking network interrupts for the entire time here,
478 * we splx() while doing the actual copy to user space.
479 * Although the sockbuf is locked, new data may still be appended,
480 * and thus we must maintain consistency of the sockbuf during that time.
481 *
482 * The caller may receive the data as a single mbuf chain by supplying
483 * an mbuf **mp0 for use in returning the chain.  The uio is then used
484 * only for the count in uio_resid.
485 */
486int
487soreceive(so, paddr, uio, mp0, controlp, flagsp)
488	register struct socket *so;
489	struct mbuf **paddr;
490	struct uio *uio;
491	struct mbuf **mp0;
492	struct mbuf **controlp;
493	int *flagsp;
494{
495	register struct mbuf *m, **mp;
496	register int flags, len, error, s, offset;
497	struct protosw *pr = so->so_proto;
498	struct mbuf *nextrecord;
499	int moff, type = 0;
500	int orig_resid = uio->uio_resid;
501
502	mp = mp0;
503	if (paddr)
504		*paddr = 0;
505	if (controlp)
506		*controlp = 0;
507	if (flagsp)
508		flags = *flagsp &~ MSG_EOR;
509	else
510		flags = 0;
511	if (flags & MSG_OOB) {
512		m = m_get(M_WAIT, MT_DATA);
513		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
514		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
515		if (error)
516			goto bad;
517		do {
518			error = uiomove(mtod(m, caddr_t),
519			    (int) min(uio->uio_resid, m->m_len), uio);
520			m = m_free(m);
521		} while (uio->uio_resid && error == 0 && m);
522bad:
523		if (m)
524			m_freem(m);
525		return (error);
526	}
527	if (mp)
528		*mp = (struct mbuf *)0;
529	if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
530		(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
531		    (struct mbuf *)0, (struct mbuf *)0);
532
533restart:
534	if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags)))
535		return (error);
536	s = splnet();
537
538	m = so->so_rcv.sb_mb;
539	/*
540	 * If we have less data than requested, block awaiting more
541	 * (subject to any timeout) if:
542	 *   1. the current count is less than the low water mark, or
543	 *   2. MSG_WAITALL is set, and it is possible to do the entire
544	 *	receive operation at once if we block (resid <= hiwat).
545	 *   3. MSG_DONTWAIT is not set
546	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
547	 * we have to do the receive in sections, and thus risk returning
548	 * a short count if a timeout or signal occurs after we start.
549	 */
550	if (m == 0 || ((flags & MSG_DONTWAIT) == 0 &&
551	    so->so_rcv.sb_cc < uio->uio_resid) &&
552	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
553	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
554	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0) {
555#ifdef DIAGNOSTIC
556		if (m == 0 && so->so_rcv.sb_cc)
557			panic("receive 1");
558#endif
559		if (so->so_error) {
560			if (m)
561				goto dontblock;
562			error = so->so_error;
563			if ((flags & MSG_PEEK) == 0)
564				so->so_error = 0;
565			goto release;
566		}
567		if (so->so_state & SS_CANTRCVMORE) {
568			if (m)
569				goto dontblock;
570			else
571				goto release;
572		}
573		for (; m; m = m->m_next)
574			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
575				m = so->so_rcv.sb_mb;
576				goto dontblock;
577			}
578		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
579		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
580			error = ENOTCONN;
581			goto release;
582		}
583		if (uio->uio_resid == 0)
584			goto release;
585		if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
586			error = EWOULDBLOCK;
587			goto release;
588		}
589		sbunlock(&so->so_rcv);
590		error = sbwait(&so->so_rcv);
591		splx(s);
592		if (error)
593			return (error);
594		goto restart;
595	}
596dontblock:
597	if (uio->uio_procp)
598		uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
599	nextrecord = m->m_nextpkt;
600	if (pr->pr_flags & PR_ADDR) {
601#ifdef DIAGNOSTIC
602		if (m->m_type != MT_SONAME)
603			panic("receive 1a");
604#endif
605		orig_resid = 0;
606		if (flags & MSG_PEEK) {
607			if (paddr)
608				*paddr = m_copy(m, 0, m->m_len);
609			m = m->m_next;
610		} else {
611			sbfree(&so->so_rcv, m);
612			if (paddr) {
613				*paddr = m;
614				so->so_rcv.sb_mb = m->m_next;
615				m->m_next = 0;
616				m = so->so_rcv.sb_mb;
617			} else {
618				MFREE(m, so->so_rcv.sb_mb);
619				m = so->so_rcv.sb_mb;
620			}
621		}
622	}
623	while (m && m->m_type == MT_CONTROL && error == 0) {
624		if (flags & MSG_PEEK) {
625			if (controlp)
626				*controlp = m_copy(m, 0, m->m_len);
627			m = m->m_next;
628		} else {
629			sbfree(&so->so_rcv, m);
630			if (controlp) {
631				if (pr->pr_domain->dom_externalize &&
632				    mtod(m, struct cmsghdr *)->cmsg_type ==
633				    SCM_RIGHTS)
634				   error = (*pr->pr_domain->dom_externalize)(m);
635				*controlp = m;
636				so->so_rcv.sb_mb = m->m_next;
637				m->m_next = 0;
638				m = so->so_rcv.sb_mb;
639			} else {
640				MFREE(m, so->so_rcv.sb_mb);
641				m = so->so_rcv.sb_mb;
642			}
643		}
644		if (controlp) {
645			orig_resid = 0;
646			controlp = &(*controlp)->m_next;
647		}
648	}
649	if (m) {
650		if ((flags & MSG_PEEK) == 0)
651			m->m_nextpkt = nextrecord;
652		type = m->m_type;
653		if (type == MT_OOBDATA)
654			flags |= MSG_OOB;
655	}
656	moff = 0;
657	offset = 0;
658	while (m && uio->uio_resid > 0 && error == 0) {
659		if (m->m_type == MT_OOBDATA) {
660			if (type != MT_OOBDATA)
661				break;
662		} else if (type == MT_OOBDATA)
663			break;
664#ifdef DIAGNOSTIC
665		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
666			panic("receive 3");
667#endif
668		so->so_state &= ~SS_RCVATMARK;
669		len = uio->uio_resid;
670		if (so->so_oobmark && len > so->so_oobmark - offset)
671			len = so->so_oobmark - offset;
672		if (len > m->m_len - moff)
673			len = m->m_len - moff;
674		/*
675		 * If mp is set, just pass back the mbufs.
676		 * Otherwise copy them out via the uio, then free.
677		 * Sockbuf must be consistent here (points to current mbuf,
678		 * it points to next record) when we drop priority;
679		 * we must note any additions to the sockbuf when we
680		 * block interrupts again.
681		 */
682		if (mp == 0) {
683			splx(s);
684			error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
685			s = splnet();
686		} else
687			uio->uio_resid -= len;
688		if (len == m->m_len - moff) {
689			if (m->m_flags & M_EOR)
690				flags |= MSG_EOR;
691			if (flags & MSG_PEEK) {
692				m = m->m_next;
693				moff = 0;
694			} else {
695				nextrecord = m->m_nextpkt;
696				sbfree(&so->so_rcv, m);
697				if (mp) {
698					*mp = m;
699					mp = &m->m_next;
700					so->so_rcv.sb_mb = m = m->m_next;
701					*mp = (struct mbuf *)0;
702				} else {
703					MFREE(m, so->so_rcv.sb_mb);
704					m = so->so_rcv.sb_mb;
705				}
706				if (m)
707					m->m_nextpkt = nextrecord;
708			}
709		} else {
710			if (flags & MSG_PEEK)
711				moff += len;
712			else {
713				if (mp)
714					*mp = m_copym(m, 0, len, M_WAIT);
715				m->m_data += len;
716				m->m_len -= len;
717				so->so_rcv.sb_cc -= len;
718			}
719		}
720		if (so->so_oobmark) {
721			if ((flags & MSG_PEEK) == 0) {
722				so->so_oobmark -= len;
723				if (so->so_oobmark == 0) {
724					so->so_state |= SS_RCVATMARK;
725					break;
726				}
727			} else {
728				offset += len;
729				if (offset == so->so_oobmark)
730					break;
731			}
732		}
733		if (flags & MSG_EOR)
734			break;
735		/*
736		 * If the MSG_WAITALL flag is set (for non-atomic socket),
737		 * we must not quit until "uio->uio_resid == 0" or an error
738		 * termination.  If a signal/timeout occurs, return
739		 * with a short count but without error.
740		 * Keep sockbuf locked against other readers.
741		 */
742		while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
743		    !sosendallatonce(so) && !nextrecord) {
744			if (so->so_error || so->so_state & SS_CANTRCVMORE)
745				break;
746			error = sbwait(&so->so_rcv);
747			if (error) {
748				sbunlock(&so->so_rcv);
749				splx(s);
750				return (0);
751			}
752			if (m = so->so_rcv.sb_mb)
753				nextrecord = m->m_nextpkt;
754		}
755	}
756
757	if (m && pr->pr_flags & PR_ATOMIC) {
758		flags |= MSG_TRUNC;
759		if ((flags & MSG_PEEK) == 0)
760			(void) sbdroprecord(&so->so_rcv);
761	}
762	if ((flags & MSG_PEEK) == 0) {
763		if (m == 0)
764			so->so_rcv.sb_mb = nextrecord;
765		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
766			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
767			    (struct mbuf *)flags, (struct mbuf *)0,
768			    (struct mbuf *)0);
769	}
770	if (orig_resid == uio->uio_resid && orig_resid &&
771	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
772		sbunlock(&so->so_rcv);
773		splx(s);
774		goto restart;
775	}
776
777	if (flagsp)
778		*flagsp |= flags;
779release:
780	sbunlock(&so->so_rcv);
781	splx(s);
782	return (error);
783}
784
785int
786soshutdown(so, how)
787	register struct socket *so;
788	register int how;
789{
790	register struct protosw *pr = so->so_proto;
791
792	how++;
793	if (how & FREAD)
794		sorflush(so);
795	if (how & FWRITE)
796		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
797		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
798	return (0);
799}
800
801void
802sorflush(so)
803	register struct socket *so;
804{
805	register struct sockbuf *sb = &so->so_rcv;
806	register struct protosw *pr = so->so_proto;
807	register int s;
808	struct sockbuf asb;
809
810	sb->sb_flags |= SB_NOINTR;
811	(void) sblock(sb, M_WAITOK);
812	s = splimp();
813	socantrcvmore(so);
814	sbunlock(sb);
815	asb = *sb;
816	bzero((caddr_t)sb, sizeof (*sb));
817	splx(s);
818	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
819		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
820	sbrelease(&asb);
821}
822
823int
824sosetopt(so, level, optname, m0)
825	register struct socket *so;
826	int level, optname;
827	struct mbuf *m0;
828{
829	int error = 0;
830	register struct mbuf *m = m0;
831
832	if (level != SOL_SOCKET) {
833		if (so->so_proto && so->so_proto->pr_ctloutput)
834			return ((*so->so_proto->pr_ctloutput)
835				  (PRCO_SETOPT, so, level, optname, &m0));
836		error = ENOPROTOOPT;
837	} else {
838		switch (optname) {
839
840		case SO_LINGER:
841			if (m == NULL || m->m_len != sizeof (struct linger)) {
842				error = EINVAL;
843				goto bad;
844			}
845			so->so_linger = mtod(m, struct linger *)->l_linger;
846			/* fall thru... */
847
848		case SO_DEBUG:
849		case SO_KEEPALIVE:
850		case SO_DONTROUTE:
851		case SO_USELOOPBACK:
852		case SO_BROADCAST:
853		case SO_REUSEADDR:
854		case SO_REUSEPORT:
855		case SO_OOBINLINE:
856			if (m == NULL || m->m_len < sizeof (int)) {
857				error = EINVAL;
858				goto bad;
859			}
860			if (*mtod(m, int *))
861				so->so_options |= optname;
862			else
863				so->so_options &= ~optname;
864			break;
865
866		case SO_SNDBUF:
867		case SO_RCVBUF:
868		case SO_SNDLOWAT:
869		case SO_RCVLOWAT:
870			if (m == NULL || m->m_len < sizeof (int)) {
871				error = EINVAL;
872				goto bad;
873			}
874			switch (optname) {
875
876			case SO_SNDBUF:
877			case SO_RCVBUF:
878				if (sbreserve(optname == SO_SNDBUF ?
879				    &so->so_snd : &so->so_rcv,
880				    (u_long) *mtod(m, int *)) == 0) {
881					error = ENOBUFS;
882					goto bad;
883				}
884				break;
885
886			case SO_SNDLOWAT:
887				so->so_snd.sb_lowat = *mtod(m, int *);
888				break;
889			case SO_RCVLOWAT:
890				so->so_rcv.sb_lowat = *mtod(m, int *);
891				break;
892			}
893			break;
894
895		case SO_SNDTIMEO:
896		case SO_RCVTIMEO:
897		    {
898			struct timeval *tv;
899			short val;
900
901			if (m == NULL || m->m_len < sizeof (*tv)) {
902				error = EINVAL;
903				goto bad;
904			}
905			tv = mtod(m, struct timeval *);
906			if (tv->tv_sec > SHRT_MAX / hz - hz) {
907				error = EDOM;
908				goto bad;
909			}
910			val = tv->tv_sec * hz + tv->tv_usec / tick;
911
912			switch (optname) {
913
914			case SO_SNDTIMEO:
915				so->so_snd.sb_timeo = val;
916				break;
917			case SO_RCVTIMEO:
918				so->so_rcv.sb_timeo = val;
919				break;
920			}
921			break;
922		    }
923
924		default:
925			error = ENOPROTOOPT;
926			break;
927		}
928		if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
929			(void) ((*so->so_proto->pr_ctloutput)
930				  (PRCO_SETOPT, so, level, optname, &m0));
931			m = NULL;	/* freed by protocol */
932		}
933	}
934bad:
935	if (m)
936		(void) m_free(m);
937	return (error);
938}
939
940int
941sogetopt(so, level, optname, mp)
942	register struct socket *so;
943	int level, optname;
944	struct mbuf **mp;
945{
946	register struct mbuf *m;
947
948	if (level != SOL_SOCKET) {
949		if (so->so_proto && so->so_proto->pr_ctloutput) {
950			return ((*so->so_proto->pr_ctloutput)
951				  (PRCO_GETOPT, so, level, optname, mp));
952		} else
953			return (ENOPROTOOPT);
954	} else {
955		m = m_get(M_WAIT, MT_SOOPTS);
956		m->m_len = sizeof (int);
957
958		switch (optname) {
959
960		case SO_LINGER:
961			m->m_len = sizeof (struct linger);
962			mtod(m, struct linger *)->l_onoff =
963				so->so_options & SO_LINGER;
964			mtod(m, struct linger *)->l_linger = so->so_linger;
965			break;
966
967		case SO_USELOOPBACK:
968		case SO_DONTROUTE:
969		case SO_DEBUG:
970		case SO_KEEPALIVE:
971		case SO_REUSEADDR:
972		case SO_REUSEPORT:
973		case SO_BROADCAST:
974		case SO_OOBINLINE:
975			*mtod(m, int *) = so->so_options & optname;
976			break;
977
978		case SO_TYPE:
979			*mtod(m, int *) = so->so_type;
980			break;
981
982		case SO_ERROR:
983			*mtod(m, int *) = so->so_error;
984			so->so_error = 0;
985			break;
986
987		case SO_SNDBUF:
988			*mtod(m, int *) = so->so_snd.sb_hiwat;
989			break;
990
991		case SO_RCVBUF:
992			*mtod(m, int *) = so->so_rcv.sb_hiwat;
993			break;
994
995		case SO_SNDLOWAT:
996			*mtod(m, int *) = so->so_snd.sb_lowat;
997			break;
998
999		case SO_RCVLOWAT:
1000			*mtod(m, int *) = so->so_rcv.sb_lowat;
1001			break;
1002
1003		case SO_SNDTIMEO:
1004		case SO_RCVTIMEO:
1005		    {
1006			int val = (optname == SO_SNDTIMEO ?
1007			     so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1008
1009			m->m_len = sizeof(struct timeval);
1010			mtod(m, struct timeval *)->tv_sec = val / hz;
1011			mtod(m, struct timeval *)->tv_usec =
1012			    (val % hz) / tick;
1013			break;
1014		    }
1015
1016		default:
1017			(void)m_free(m);
1018			return (ENOPROTOOPT);
1019		}
1020		*mp = m;
1021		return (0);
1022	}
1023}
1024
1025void
1026sohasoutofband(so)
1027	register struct socket *so;
1028{
1029	struct proc *p;
1030
1031	if (so->so_pgid < 0)
1032		gsignal(-so->so_pgid, SIGURG);
1033	else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1034		psignal(p, SIGURG);
1035	selwakeup(&so->so_rcv.sb_sel);
1036}
1037