Deleted Added
full compact
uipc_socket.c (58225) uipc_socket.c (59288)
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
34 * $FreeBSD: head/sys/kern/uipc_socket.c 58225 2000-03-18 08:56:56Z fenner $
34 * $FreeBSD: head/sys/kern/uipc_socket.c 59288 2000-04-16 18:53:38Z jlemon $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/fcntl.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/domain.h>
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/fcntl.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/domain.h>
43#include <sys/file.h> /* for struct knote */
43#include <sys/kernel.h>
44#include <sys/malloc.h>
44#include <sys/kernel.h>
45#include <sys/malloc.h>
46#include <sys/event.h>
45#include <sys/poll.h>
46#include <sys/proc.h>
47#include <sys/protosw.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/resourcevar.h>
51#include <sys/signalvar.h>
52#include <sys/sysctl.h>
53#include <sys/uio.h>
54#include <vm/vm_zone.h>
55
56#include <machine/limits.h>
57
47#include <sys/poll.h>
48#include <sys/proc.h>
49#include <sys/protosw.h>
50#include <sys/socket.h>
51#include <sys/socketvar.h>
52#include <sys/resourcevar.h>
53#include <sys/signalvar.h>
54#include <sys/sysctl.h>
55#include <sys/uio.h>
56#include <vm/vm_zone.h>
57
58#include <machine/limits.h>
59
60static int filt_sorattach(struct knote *kn);
61static void filt_sordetach(struct knote *kn);
62static int filt_soread(struct knote *kn, long hint);
63static int filt_sowattach(struct knote *kn);
64static void filt_sowdetach(struct knote *kn);
65static int filt_sowrite(struct knote *kn, long hint);
66static int filt_solisten(struct knote *kn, long hint);
67
68static struct filterops solisten_filtops =
69 { 1, filt_sorattach, filt_sordetach, filt_solisten };
70
71struct filterops so_rwfiltops[] = {
72 { 1, filt_sorattach, filt_sordetach, filt_soread },
73 { 1, filt_sowattach, filt_sowdetach, filt_sowrite },
74};
75
58struct vm_zone *socket_zone;
59so_gen_t so_gencnt; /* generation count for sockets */
60
61MALLOC_DEFINE(M_SONAME, "soname", "socket name");
62MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
63
64SYSCTL_DECL(_kern_ipc);
65
66static int somaxconn = SOMAXCONN;
67SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
68 &somaxconn, 0, "Maximum pending socket connection queue size");
69
70/*
71 * Socket operation routines.
72 * These routines are called by the routines in
73 * sys_socket.c or from a system process, and
74 * implement the semantics of socket operations by
75 * switching out to the protocol specific routines.
76 */
77
78/*
79 * Get a socket structure from our zone, and initialize it.
80 * We don't implement `waitok' yet (see comments in uipc_domain.c).
81 * Note that it would probably be better to allocate socket
82 * and PCB at the same time, but I'm not convinced that all
83 * the protocols can be easily modified to do this.
84 */
85struct socket *
86soalloc(waitok)
87 int waitok;
88{
89 struct socket *so;
90
91 so = zalloci(socket_zone);
92 if (so) {
93 /* XXX race condition for reentrant kernel */
94 bzero(so, sizeof *so);
95 so->so_gencnt = ++so_gencnt;
96 so->so_zone = socket_zone;
97 TAILQ_INIT(&so->so_aiojobq);
98 }
99 return so;
100}
101
102int
103socreate(dom, aso, type, proto, p)
104 int dom;
105 struct socket **aso;
106 register int type;
107 int proto;
108 struct proc *p;
109{
110 register struct protosw *prp;
111 register struct socket *so;
112 register int error;
113
114 if (proto)
115 prp = pffindproto(dom, proto, type);
116 else
117 prp = pffindtype(dom, type);
118 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
119 return (EPROTONOSUPPORT);
120 if (prp->pr_type != type)
121 return (EPROTOTYPE);
122 so = soalloc(p != 0);
123 if (so == 0)
124 return (ENOBUFS);
125
126 TAILQ_INIT(&so->so_incomp);
127 TAILQ_INIT(&so->so_comp);
128 so->so_type = type;
129 so->so_cred = p->p_ucred;
130 crhold(so->so_cred);
131 so->so_proto = prp;
132 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
133 if (error) {
134 so->so_state |= SS_NOFDREF;
135 sofree(so);
136 return (error);
137 }
138 *aso = so;
139 return (0);
140}
141
142int
143sobind(so, nam, p)
144 struct socket *so;
145 struct sockaddr *nam;
146 struct proc *p;
147{
148 int s = splnet();
149 int error;
150
151 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
152 splx(s);
153 return (error);
154}
155
156void
157sodealloc(so)
158 struct socket *so;
159{
160
161 so->so_gencnt = ++so_gencnt;
162 if (so->so_rcv.sb_hiwat)
163 (void)chgsbsize(so->so_cred->cr_uid,
164 -(rlim_t)so->so_rcv.sb_hiwat);
165 if (so->so_snd.sb_hiwat)
166 (void)chgsbsize(so->so_cred->cr_uid,
167 -(rlim_t)so->so_snd.sb_hiwat);
168 crfree(so->so_cred);
169 zfreei(so->so_zone, so);
170}
171
172int
173solisten(so, backlog, p)
174 register struct socket *so;
175 int backlog;
176 struct proc *p;
177{
178 int s, error;
179
180 s = splnet();
181 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
182 if (error) {
183 splx(s);
184 return (error);
185 }
186 if (TAILQ_EMPTY(&so->so_comp))
187 so->so_options |= SO_ACCEPTCONN;
188 if (backlog < 0 || backlog > somaxconn)
189 backlog = somaxconn;
190 so->so_qlimit = backlog;
191 splx(s);
192 return (0);
193}
194
195void
196sofree(so)
197 register struct socket *so;
198{
199 struct socket *head = so->so_head;
200
201 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
202 return;
203 if (head != NULL) {
204 if (so->so_state & SS_INCOMP) {
205 TAILQ_REMOVE(&head->so_incomp, so, so_list);
206 head->so_incqlen--;
207 } else if (so->so_state & SS_COMP) {
208 /*
209 * We must not decommission a socket that's
210 * on the accept(2) queue. If we do, then
211 * accept(2) may hang after select(2) indicated
212 * that the listening socket was ready.
213 */
214 return;
215 } else {
216 panic("sofree: not queued");
217 }
218 head->so_qlen--;
219 so->so_state &= ~SS_INCOMP;
220 so->so_head = NULL;
221 }
222 sbrelease(&so->so_snd, so);
223 sorflush(so);
224 sodealloc(so);
225}
226
227/*
228 * Close a socket on last file table reference removal.
229 * Initiate disconnect if connected.
230 * Free socket when disconnect complete.
231 */
232int
233soclose(so)
234 register struct socket *so;
235{
236 int s = splnet(); /* conservative */
237 int error = 0;
238
239 funsetown(so->so_sigio);
240 if (so->so_options & SO_ACCEPTCONN) {
241 struct socket *sp, *sonext;
242
243 sp = TAILQ_FIRST(&so->so_incomp);
244 for (; sp != NULL; sp = sonext) {
245 sonext = TAILQ_NEXT(sp, so_list);
246 (void) soabort(sp);
247 }
248 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
249 sonext = TAILQ_NEXT(sp, so_list);
250 /* Dequeue from so_comp since sofree() won't do it */
251 TAILQ_REMOVE(&so->so_comp, sp, so_list);
252 so->so_qlen--;
253 sp->so_state &= ~SS_COMP;
254 sp->so_head = NULL;
255 (void) soabort(sp);
256 }
257 }
258 if (so->so_pcb == 0)
259 goto discard;
260 if (so->so_state & SS_ISCONNECTED) {
261 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
262 error = sodisconnect(so);
263 if (error)
264 goto drop;
265 }
266 if (so->so_options & SO_LINGER) {
267 if ((so->so_state & SS_ISDISCONNECTING) &&
268 (so->so_state & SS_NBIO))
269 goto drop;
270 while (so->so_state & SS_ISCONNECTED) {
271 error = tsleep((caddr_t)&so->so_timeo,
272 PSOCK | PCATCH, "soclos", so->so_linger * hz);
273 if (error)
274 break;
275 }
276 }
277 }
278drop:
279 if (so->so_pcb) {
280 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
281 if (error == 0)
282 error = error2;
283 }
284discard:
285 if (so->so_state & SS_NOFDREF)
286 panic("soclose: NOFDREF");
287 so->so_state |= SS_NOFDREF;
288 sofree(so);
289 splx(s);
290 return (error);
291}
292
293/*
294 * Must be called at splnet...
295 */
296int
297soabort(so)
298 struct socket *so;
299{
300 int error;
301
302 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
303 if (error) {
304 sofree(so);
305 return error;
306 }
307 return (0);
308}
309
310int
311soaccept(so, nam)
312 register struct socket *so;
313 struct sockaddr **nam;
314{
315 int s = splnet();
316 int error;
317
318 if ((so->so_state & SS_NOFDREF) == 0)
319 panic("soaccept: !NOFDREF");
320 so->so_state &= ~SS_NOFDREF;
321 if ((so->so_state & SS_ISDISCONNECTED) == 0)
322 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
323 else {
324 if (nam)
325 *nam = 0;
326 error = 0;
327 }
328 splx(s);
329 return (error);
330}
331
332int
333soconnect(so, nam, p)
334 register struct socket *so;
335 struct sockaddr *nam;
336 struct proc *p;
337{
338 int s;
339 int error;
340
341 if (so->so_options & SO_ACCEPTCONN)
342 return (EOPNOTSUPP);
343 s = splnet();
344 /*
345 * If protocol is connection-based, can only connect once.
346 * Otherwise, if connected, try to disconnect first.
347 * This allows user to disconnect by connecting to, e.g.,
348 * a null address.
349 */
350 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
351 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
352 (error = sodisconnect(so))))
353 error = EISCONN;
354 else
355 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
356 splx(s);
357 return (error);
358}
359
360int
361soconnect2(so1, so2)
362 register struct socket *so1;
363 struct socket *so2;
364{
365 int s = splnet();
366 int error;
367
368 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
369 splx(s);
370 return (error);
371}
372
373int
374sodisconnect(so)
375 register struct socket *so;
376{
377 int s = splnet();
378 int error;
379
380 if ((so->so_state & SS_ISCONNECTED) == 0) {
381 error = ENOTCONN;
382 goto bad;
383 }
384 if (so->so_state & SS_ISDISCONNECTING) {
385 error = EALREADY;
386 goto bad;
387 }
388 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
389bad:
390 splx(s);
391 return (error);
392}
393
394#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
395/*
396 * Send on a socket.
397 * If send must go all at once and message is larger than
398 * send buffering, then hard error.
399 * Lock against other senders.
400 * If must go all at once and not enough room now, then
401 * inform user that this would block and do nothing.
402 * Otherwise, if nonblocking, send as much as possible.
403 * The data to be sent is described by "uio" if nonzero,
404 * otherwise by the mbuf chain "top" (which must be null
405 * if uio is not). Data provided in mbuf chain must be small
406 * enough to send all at once.
407 *
408 * Returns nonzero on error, timeout or signal; callers
409 * must check for short counts if EINTR/ERESTART are returned.
410 * Data and control buffers are freed on return.
411 */
412int
413sosend(so, addr, uio, top, control, flags, p)
414 register struct socket *so;
415 struct sockaddr *addr;
416 struct uio *uio;
417 struct mbuf *top;
418 struct mbuf *control;
419 int flags;
420 struct proc *p;
421{
422 struct mbuf **mp;
423 register struct mbuf *m;
424 register long space, len, resid;
425 int clen = 0, error, s, dontroute, mlen;
426 int atomic = sosendallatonce(so) || top;
427
428 if (uio)
429 resid = uio->uio_resid;
430 else
431 resid = top->m_pkthdr.len;
432 /*
433 * In theory resid should be unsigned.
434 * However, space must be signed, as it might be less than 0
435 * if we over-committed, and we must use a signed comparison
436 * of space and resid. On the other hand, a negative resid
437 * causes us to loop sending 0-length segments to the protocol.
438 *
439 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
440 * type sockets since that's an error.
441 */
442 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
443 error = EINVAL;
444 goto out;
445 }
446
447 dontroute =
448 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
449 (so->so_proto->pr_flags & PR_ATOMIC);
450 if (p)
451 p->p_stats->p_ru.ru_msgsnd++;
452 if (control)
453 clen = control->m_len;
454#define snderr(errno) { error = errno; splx(s); goto release; }
455
456restart:
457 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
458 if (error)
459 goto out;
460 do {
461 s = splnet();
462 if (so->so_state & SS_CANTSENDMORE)
463 snderr(EPIPE);
464 if (so->so_error) {
465 error = so->so_error;
466 so->so_error = 0;
467 splx(s);
468 goto release;
469 }
470 if ((so->so_state & SS_ISCONNECTED) == 0) {
471 /*
472 * `sendto' and `sendmsg' is allowed on a connection-
473 * based socket if it supports implied connect.
474 * Return ENOTCONN if not connected and no address is
475 * supplied.
476 */
477 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
478 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
479 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
480 !(resid == 0 && clen != 0))
481 snderr(ENOTCONN);
482 } else if (addr == 0)
483 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
484 ENOTCONN : EDESTADDRREQ);
485 }
486 space = sbspace(&so->so_snd);
487 if (flags & MSG_OOB)
488 space += 1024;
489 if ((atomic && resid > so->so_snd.sb_hiwat) ||
490 clen > so->so_snd.sb_hiwat)
491 snderr(EMSGSIZE);
492 if (space < resid + clen && uio &&
493 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
494 if (so->so_state & SS_NBIO)
495 snderr(EWOULDBLOCK);
496 sbunlock(&so->so_snd);
497 error = sbwait(&so->so_snd);
498 splx(s);
499 if (error)
500 goto out;
501 goto restart;
502 }
503 splx(s);
504 mp = &top;
505 space -= clen;
506 do {
507 if (uio == NULL) {
508 /*
509 * Data is prepackaged in "top".
510 */
511 resid = 0;
512 if (flags & MSG_EOR)
513 top->m_flags |= M_EOR;
514 } else do {
515 if (top == 0) {
516 MGETHDR(m, M_WAIT, MT_DATA);
517 if (m == NULL) {
518 error = ENOBUFS;
519 goto release;
520 }
521 mlen = MHLEN;
522 m->m_pkthdr.len = 0;
523 m->m_pkthdr.rcvif = (struct ifnet *)0;
524 } else {
525 MGET(m, M_WAIT, MT_DATA);
526 if (m == NULL) {
527 error = ENOBUFS;
528 goto release;
529 }
530 mlen = MLEN;
531 }
532 if (resid >= MINCLSIZE) {
533 MCLGET(m, M_WAIT);
534 if ((m->m_flags & M_EXT) == 0)
535 goto nopages;
536 mlen = MCLBYTES;
537 len = min(min(mlen, resid), space);
538 } else {
539nopages:
540 len = min(min(mlen, resid), space);
541 /*
542 * For datagram protocols, leave room
543 * for protocol headers in first mbuf.
544 */
545 if (atomic && top == 0 && len < mlen)
546 MH_ALIGN(m, len);
547 }
548 space -= len;
549 error = uiomove(mtod(m, caddr_t), (int)len, uio);
550 resid = uio->uio_resid;
551 m->m_len = len;
552 *mp = m;
553 top->m_pkthdr.len += len;
554 if (error)
555 goto release;
556 mp = &m->m_next;
557 if (resid <= 0) {
558 if (flags & MSG_EOR)
559 top->m_flags |= M_EOR;
560 break;
561 }
562 } while (space > 0 && atomic);
563 if (dontroute)
564 so->so_options |= SO_DONTROUTE;
565 s = splnet(); /* XXX */
566 /*
567 * XXX all the SS_CANTSENDMORE checks previously
568 * done could be out of date. We could have recieved
569 * a reset packet in an interrupt or maybe we slept
570 * while doing page faults in uiomove() etc. We could
571 * probably recheck again inside the splnet() protection
572 * here, but there are probably other places that this
573 * also happens. We must rethink this.
574 */
575 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
576 (flags & MSG_OOB) ? PRUS_OOB :
577 /*
578 * If the user set MSG_EOF, the protocol
579 * understands this flag and nothing left to
580 * send then use PRU_SEND_EOF instead of PRU_SEND.
581 */
582 ((flags & MSG_EOF) &&
583 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
584 (resid <= 0)) ?
585 PRUS_EOF :
586 /* If there is more to send set PRUS_MORETOCOME */
587 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
588 top, addr, control, p);
589 splx(s);
590 if (dontroute)
591 so->so_options &= ~SO_DONTROUTE;
592 clen = 0;
593 control = 0;
594 top = 0;
595 mp = &top;
596 if (error)
597 goto release;
598 } while (resid && space > 0);
599 } while (resid);
600
601release:
602 sbunlock(&so->so_snd);
603out:
604 if (top)
605 m_freem(top);
606 if (control)
607 m_freem(control);
608 return (error);
609}
610
611/*
612 * Implement receive operations on a socket.
613 * We depend on the way that records are added to the sockbuf
614 * by sbappend*. In particular, each record (mbufs linked through m_next)
615 * must begin with an address if the protocol so specifies,
616 * followed by an optional mbuf or mbufs containing ancillary data,
617 * and then zero or more mbufs of data.
618 * In order to avoid blocking network interrupts for the entire time here,
619 * we splx() while doing the actual copy to user space.
620 * Although the sockbuf is locked, new data may still be appended,
621 * and thus we must maintain consistency of the sockbuf during that time.
622 *
623 * The caller may receive the data as a single mbuf chain by supplying
624 * an mbuf **mp0 for use in returning the chain. The uio is then used
625 * only for the count in uio_resid.
626 */
627int
628soreceive(so, psa, uio, mp0, controlp, flagsp)
629 register struct socket *so;
630 struct sockaddr **psa;
631 struct uio *uio;
632 struct mbuf **mp0;
633 struct mbuf **controlp;
634 int *flagsp;
635{
636 register struct mbuf *m, **mp;
637 register int flags, len, error, s, offset;
638 struct protosw *pr = so->so_proto;
639 struct mbuf *nextrecord;
640 int moff, type = 0;
641 int orig_resid = uio->uio_resid;
642
643 mp = mp0;
644 if (psa)
645 *psa = 0;
646 if (controlp)
647 *controlp = 0;
648 if (flagsp)
649 flags = *flagsp &~ MSG_EOR;
650 else
651 flags = 0;
652 if (flags & MSG_OOB) {
653 m = m_get(M_WAIT, MT_DATA);
654 if (m == NULL)
655 return (ENOBUFS);
656 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
657 if (error)
658 goto bad;
659 do {
660 error = uiomove(mtod(m, caddr_t),
661 (int) min(uio->uio_resid, m->m_len), uio);
662 m = m_free(m);
663 } while (uio->uio_resid && error == 0 && m);
664bad:
665 if (m)
666 m_freem(m);
667 return (error);
668 }
669 if (mp)
670 *mp = (struct mbuf *)0;
671 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
672 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
673
674restart:
675 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
676 if (error)
677 return (error);
678 s = splnet();
679
680 m = so->so_rcv.sb_mb;
681 /*
682 * If we have less data than requested, block awaiting more
683 * (subject to any timeout) if:
684 * 1. the current count is less than the low water mark, or
685 * 2. MSG_WAITALL is set, and it is possible to do the entire
686 * receive operation at once if we block (resid <= hiwat).
687 * 3. MSG_DONTWAIT is not set
688 * If MSG_WAITALL is set but resid is larger than the receive buffer,
689 * we have to do the receive in sections, and thus risk returning
690 * a short count if a timeout or signal occurs after we start.
691 */
692 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
693 so->so_rcv.sb_cc < uio->uio_resid) &&
694 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
695 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
696 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
697 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
698 if (so->so_error) {
699 if (m)
700 goto dontblock;
701 error = so->so_error;
702 if ((flags & MSG_PEEK) == 0)
703 so->so_error = 0;
704 goto release;
705 }
706 if (so->so_state & SS_CANTRCVMORE) {
707 if (m)
708 goto dontblock;
709 else
710 goto release;
711 }
712 for (; m; m = m->m_next)
713 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
714 m = so->so_rcv.sb_mb;
715 goto dontblock;
716 }
717 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
718 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
719 error = ENOTCONN;
720 goto release;
721 }
722 if (uio->uio_resid == 0)
723 goto release;
724 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
725 error = EWOULDBLOCK;
726 goto release;
727 }
728 sbunlock(&so->so_rcv);
729 error = sbwait(&so->so_rcv);
730 splx(s);
731 if (error)
732 return (error);
733 goto restart;
734 }
735dontblock:
736 if (uio->uio_procp)
737 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
738 nextrecord = m->m_nextpkt;
739 if (pr->pr_flags & PR_ADDR) {
740 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
741 orig_resid = 0;
742 if (psa)
743 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
744 mp0 == 0);
745 if (flags & MSG_PEEK) {
746 m = m->m_next;
747 } else {
748 sbfree(&so->so_rcv, m);
749 MFREE(m, so->so_rcv.sb_mb);
750 m = so->so_rcv.sb_mb;
751 }
752 }
753 while (m && m->m_type == MT_CONTROL && error == 0) {
754 if (flags & MSG_PEEK) {
755 if (controlp)
756 *controlp = m_copy(m, 0, m->m_len);
757 m = m->m_next;
758 } else {
759 sbfree(&so->so_rcv, m);
760 if (controlp) {
761 if (pr->pr_domain->dom_externalize &&
762 mtod(m, struct cmsghdr *)->cmsg_type ==
763 SCM_RIGHTS)
764 error = (*pr->pr_domain->dom_externalize)(m);
765 *controlp = m;
766 so->so_rcv.sb_mb = m->m_next;
767 m->m_next = 0;
768 m = so->so_rcv.sb_mb;
769 } else {
770 MFREE(m, so->so_rcv.sb_mb);
771 m = so->so_rcv.sb_mb;
772 }
773 }
774 if (controlp) {
775 orig_resid = 0;
776 controlp = &(*controlp)->m_next;
777 }
778 }
779 if (m) {
780 if ((flags & MSG_PEEK) == 0)
781 m->m_nextpkt = nextrecord;
782 type = m->m_type;
783 if (type == MT_OOBDATA)
784 flags |= MSG_OOB;
785 }
786 moff = 0;
787 offset = 0;
788 while (m && uio->uio_resid > 0 && error == 0) {
789 if (m->m_type == MT_OOBDATA) {
790 if (type != MT_OOBDATA)
791 break;
792 } else if (type == MT_OOBDATA)
793 break;
794 else
795 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
796 ("receive 3"));
797 so->so_state &= ~SS_RCVATMARK;
798 len = uio->uio_resid;
799 if (so->so_oobmark && len > so->so_oobmark - offset)
800 len = so->so_oobmark - offset;
801 if (len > m->m_len - moff)
802 len = m->m_len - moff;
803 /*
804 * If mp is set, just pass back the mbufs.
805 * Otherwise copy them out via the uio, then free.
806 * Sockbuf must be consistent here (points to current mbuf,
807 * it points to next record) when we drop priority;
808 * we must note any additions to the sockbuf when we
809 * block interrupts again.
810 */
811 if (mp == 0) {
812 splx(s);
813 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
814 s = splnet();
815 if (error)
816 goto release;
817 } else
818 uio->uio_resid -= len;
819 if (len == m->m_len - moff) {
820 if (m->m_flags & M_EOR)
821 flags |= MSG_EOR;
822 if (flags & MSG_PEEK) {
823 m = m->m_next;
824 moff = 0;
825 } else {
826 nextrecord = m->m_nextpkt;
827 sbfree(&so->so_rcv, m);
828 if (mp) {
829 *mp = m;
830 mp = &m->m_next;
831 so->so_rcv.sb_mb = m = m->m_next;
832 *mp = (struct mbuf *)0;
833 } else {
834 MFREE(m, so->so_rcv.sb_mb);
835 m = so->so_rcv.sb_mb;
836 }
837 if (m)
838 m->m_nextpkt = nextrecord;
839 }
840 } else {
841 if (flags & MSG_PEEK)
842 moff += len;
843 else {
844 if (mp)
845 *mp = m_copym(m, 0, len, M_WAIT);
846 m->m_data += len;
847 m->m_len -= len;
848 so->so_rcv.sb_cc -= len;
849 }
850 }
851 if (so->so_oobmark) {
852 if ((flags & MSG_PEEK) == 0) {
853 so->so_oobmark -= len;
854 if (so->so_oobmark == 0) {
855 so->so_state |= SS_RCVATMARK;
856 break;
857 }
858 } else {
859 offset += len;
860 if (offset == so->so_oobmark)
861 break;
862 }
863 }
864 if (flags & MSG_EOR)
865 break;
866 /*
867 * If the MSG_WAITALL flag is set (for non-atomic socket),
868 * we must not quit until "uio->uio_resid == 0" or an error
869 * termination. If a signal/timeout occurs, return
870 * with a short count but without error.
871 * Keep sockbuf locked against other readers.
872 */
873 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
874 !sosendallatonce(so) && !nextrecord) {
875 if (so->so_error || so->so_state & SS_CANTRCVMORE)
876 break;
877 error = sbwait(&so->so_rcv);
878 if (error) {
879 sbunlock(&so->so_rcv);
880 splx(s);
881 return (0);
882 }
883 m = so->so_rcv.sb_mb;
884 if (m)
885 nextrecord = m->m_nextpkt;
886 }
887 }
888
889 if (m && pr->pr_flags & PR_ATOMIC) {
890 flags |= MSG_TRUNC;
891 if ((flags & MSG_PEEK) == 0)
892 (void) sbdroprecord(&so->so_rcv);
893 }
894 if ((flags & MSG_PEEK) == 0) {
895 if (m == 0)
896 so->so_rcv.sb_mb = nextrecord;
897 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
898 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
899 }
900 if (orig_resid == uio->uio_resid && orig_resid &&
901 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
902 sbunlock(&so->so_rcv);
903 splx(s);
904 goto restart;
905 }
906
907 if (flagsp)
908 *flagsp |= flags;
909release:
910 sbunlock(&so->so_rcv);
911 splx(s);
912 return (error);
913}
914
915int
916soshutdown(so, how)
917 register struct socket *so;
918 register int how;
919{
920 register struct protosw *pr = so->so_proto;
921
922 how++;
923 if (how & FREAD)
924 sorflush(so);
925 if (how & FWRITE)
926 return ((*pr->pr_usrreqs->pru_shutdown)(so));
927 return (0);
928}
929
930void
931sorflush(so)
932 register struct socket *so;
933{
934 register struct sockbuf *sb = &so->so_rcv;
935 register struct protosw *pr = so->so_proto;
936 register int s;
937 struct sockbuf asb;
938
939 sb->sb_flags |= SB_NOINTR;
940 (void) sblock(sb, M_WAITOK);
941 s = splimp();
942 socantrcvmore(so);
943 sbunlock(sb);
944 asb = *sb;
945 bzero((caddr_t)sb, sizeof (*sb));
946 splx(s);
947 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
948 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
949 sbrelease(&asb, so);
950}
951
952/*
953 * Perhaps this routine, and sooptcopyout(), below, ought to come in
954 * an additional variant to handle the case where the option value needs
955 * to be some kind of integer, but not a specific size.
956 * In addition to their use here, these functions are also called by the
957 * protocol-level pr_ctloutput() routines.
958 */
959int
960sooptcopyin(sopt, buf, len, minlen)
961 struct sockopt *sopt;
962 void *buf;
963 size_t len;
964 size_t minlen;
965{
966 size_t valsize;
967
968 /*
969 * If the user gives us more than we wanted, we ignore it,
970 * but if we don't get the minimum length the caller
971 * wants, we return EINVAL. On success, sopt->sopt_valsize
972 * is set to however much we actually retrieved.
973 */
974 if ((valsize = sopt->sopt_valsize) < minlen)
975 return EINVAL;
976 if (valsize > len)
977 sopt->sopt_valsize = valsize = len;
978
979 if (sopt->sopt_p != 0)
980 return (copyin(sopt->sopt_val, buf, valsize));
981
982 bcopy(sopt->sopt_val, buf, valsize);
983 return 0;
984}
985
986int
987sosetopt(so, sopt)
988 struct socket *so;
989 struct sockopt *sopt;
990{
991 int error, optval;
992 struct linger l;
993 struct timeval tv;
994 u_long val;
995
996 error = 0;
997 if (sopt->sopt_level != SOL_SOCKET) {
998 if (so->so_proto && so->so_proto->pr_ctloutput)
999 return ((*so->so_proto->pr_ctloutput)
1000 (so, sopt));
1001 error = ENOPROTOOPT;
1002 } else {
1003 switch (sopt->sopt_name) {
1004 case SO_LINGER:
1005 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1006 if (error)
1007 goto bad;
1008
1009 so->so_linger = l.l_linger;
1010 if (l.l_onoff)
1011 so->so_options |= SO_LINGER;
1012 else
1013 so->so_options &= ~SO_LINGER;
1014 break;
1015
1016 case SO_DEBUG:
1017 case SO_KEEPALIVE:
1018 case SO_DONTROUTE:
1019 case SO_USELOOPBACK:
1020 case SO_BROADCAST:
1021 case SO_REUSEADDR:
1022 case SO_REUSEPORT:
1023 case SO_OOBINLINE:
1024 case SO_TIMESTAMP:
1025 error = sooptcopyin(sopt, &optval, sizeof optval,
1026 sizeof optval);
1027 if (error)
1028 goto bad;
1029 if (optval)
1030 so->so_options |= sopt->sopt_name;
1031 else
1032 so->so_options &= ~sopt->sopt_name;
1033 break;
1034
1035 case SO_SNDBUF:
1036 case SO_RCVBUF:
1037 case SO_SNDLOWAT:
1038 case SO_RCVLOWAT:
1039 error = sooptcopyin(sopt, &optval, sizeof optval,
1040 sizeof optval);
1041 if (error)
1042 goto bad;
1043
1044 /*
1045 * Values < 1 make no sense for any of these
1046 * options, so disallow them.
1047 */
1048 if (optval < 1) {
1049 error = EINVAL;
1050 goto bad;
1051 }
1052
1053 switch (sopt->sopt_name) {
1054 case SO_SNDBUF:
1055 case SO_RCVBUF:
1056 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1057 &so->so_snd : &so->so_rcv, (u_long)optval,
1058 so, curproc) == 0) {
1059 error = ENOBUFS;
1060 goto bad;
1061 }
1062 break;
1063
1064 /*
1065 * Make sure the low-water is never greater than
1066 * the high-water.
1067 */
1068 case SO_SNDLOWAT:
1069 so->so_snd.sb_lowat =
1070 (optval > so->so_snd.sb_hiwat) ?
1071 so->so_snd.sb_hiwat : optval;
1072 break;
1073 case SO_RCVLOWAT:
1074 so->so_rcv.sb_lowat =
1075 (optval > so->so_rcv.sb_hiwat) ?
1076 so->so_rcv.sb_hiwat : optval;
1077 break;
1078 }
1079 break;
1080
1081 case SO_SNDTIMEO:
1082 case SO_RCVTIMEO:
1083 error = sooptcopyin(sopt, &tv, sizeof tv,
1084 sizeof tv);
1085 if (error)
1086 goto bad;
1087
1088 /* assert(hz > 0); */
1089 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1090 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1091 error = EDOM;
1092 goto bad;
1093 }
1094 /* assert(tick > 0); */
1095 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1096 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1097 if (val > SHRT_MAX) {
1098 error = EDOM;
1099 goto bad;
1100 }
1101
1102 switch (sopt->sopt_name) {
1103 case SO_SNDTIMEO:
1104 so->so_snd.sb_timeo = val;
1105 break;
1106 case SO_RCVTIMEO:
1107 so->so_rcv.sb_timeo = val;
1108 break;
1109 }
1110 break;
1111
1112 default:
1113 error = ENOPROTOOPT;
1114 break;
1115 }
1116 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1117 (void) ((*so->so_proto->pr_ctloutput)
1118 (so, sopt));
1119 }
1120 }
1121bad:
1122 return (error);
1123}
1124
1125/* Helper routine for getsockopt */
1126int
1127sooptcopyout(sopt, buf, len)
1128 struct sockopt *sopt;
1129 void *buf;
1130 size_t len;
1131{
1132 int error;
1133 size_t valsize;
1134
1135 error = 0;
1136
1137 /*
1138 * Documented get behavior is that we always return a value,
1139 * possibly truncated to fit in the user's buffer.
1140 * Traditional behavior is that we always tell the user
1141 * precisely how much we copied, rather than something useful
1142 * like the total amount we had available for her.
1143 * Note that this interface is not idempotent; the entire answer must
1144 * generated ahead of time.
1145 */
1146 valsize = min(len, sopt->sopt_valsize);
1147 sopt->sopt_valsize = valsize;
1148 if (sopt->sopt_val != 0) {
1149 if (sopt->sopt_p != 0)
1150 error = copyout(buf, sopt->sopt_val, valsize);
1151 else
1152 bcopy(buf, sopt->sopt_val, valsize);
1153 }
1154 return error;
1155}
1156
1157int
1158sogetopt(so, sopt)
1159 struct socket *so;
1160 struct sockopt *sopt;
1161{
1162 int error, optval;
1163 struct linger l;
1164 struct timeval tv;
1165
1166 error = 0;
1167 if (sopt->sopt_level != SOL_SOCKET) {
1168 if (so->so_proto && so->so_proto->pr_ctloutput) {
1169 return ((*so->so_proto->pr_ctloutput)
1170 (so, sopt));
1171 } else
1172 return (ENOPROTOOPT);
1173 } else {
1174 switch (sopt->sopt_name) {
1175 case SO_LINGER:
1176 l.l_onoff = so->so_options & SO_LINGER;
1177 l.l_linger = so->so_linger;
1178 error = sooptcopyout(sopt, &l, sizeof l);
1179 break;
1180
1181 case SO_USELOOPBACK:
1182 case SO_DONTROUTE:
1183 case SO_DEBUG:
1184 case SO_KEEPALIVE:
1185 case SO_REUSEADDR:
1186 case SO_REUSEPORT:
1187 case SO_BROADCAST:
1188 case SO_OOBINLINE:
1189 case SO_TIMESTAMP:
1190 optval = so->so_options & sopt->sopt_name;
1191integer:
1192 error = sooptcopyout(sopt, &optval, sizeof optval);
1193 break;
1194
1195 case SO_TYPE:
1196 optval = so->so_type;
1197 goto integer;
1198
1199 case SO_ERROR:
1200 optval = so->so_error;
1201 so->so_error = 0;
1202 goto integer;
1203
1204 case SO_SNDBUF:
1205 optval = so->so_snd.sb_hiwat;
1206 goto integer;
1207
1208 case SO_RCVBUF:
1209 optval = so->so_rcv.sb_hiwat;
1210 goto integer;
1211
1212 case SO_SNDLOWAT:
1213 optval = so->so_snd.sb_lowat;
1214 goto integer;
1215
1216 case SO_RCVLOWAT:
1217 optval = so->so_rcv.sb_lowat;
1218 goto integer;
1219
1220 case SO_SNDTIMEO:
1221 case SO_RCVTIMEO:
1222 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1223 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1224
1225 tv.tv_sec = optval / hz;
1226 tv.tv_usec = (optval % hz) * tick;
1227 error = sooptcopyout(sopt, &tv, sizeof tv);
1228 break;
1229
1230 default:
1231 error = ENOPROTOOPT;
1232 break;
1233 }
1234 return (error);
1235 }
1236}
1237
1238/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1239int
1240soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1241{
1242 struct mbuf *m, *m_prev;
1243 int sopt_size = sopt->sopt_valsize;
1244
1245 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1246 if (m == 0)
1247 return ENOBUFS;
1248 if (sopt_size > MLEN) {
1249 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1250 if ((m->m_flags & M_EXT) == 0) {
1251 m_free(m);
1252 return ENOBUFS;
1253 }
1254 m->m_len = min(MCLBYTES, sopt_size);
1255 } else {
1256 m->m_len = min(MLEN, sopt_size);
1257 }
1258 sopt_size -= m->m_len;
1259 *mp = m;
1260 m_prev = m;
1261
1262 while (sopt_size) {
1263 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1264 if (m == 0) {
1265 m_freem(*mp);
1266 return ENOBUFS;
1267 }
1268 if (sopt_size > MLEN) {
1269 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1270 if ((m->m_flags & M_EXT) == 0) {
1271 m_freem(*mp);
1272 return ENOBUFS;
1273 }
1274 m->m_len = min(MCLBYTES, sopt_size);
1275 } else {
1276 m->m_len = min(MLEN, sopt_size);
1277 }
1278 sopt_size -= m->m_len;
1279 m_prev->m_next = m;
1280 m_prev = m;
1281 }
1282 return 0;
1283}
1284
1285/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1286int
1287soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1288{
1289 struct mbuf *m0 = m;
1290
1291 if (sopt->sopt_val == NULL)
1292 return 0;
1293 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1294 if (sopt->sopt_p != NULL) {
1295 int error;
1296
1297 error = copyin(sopt->sopt_val, mtod(m, char *),
1298 m->m_len);
1299 if (error != 0) {
1300 m_freem(m0);
1301 return(error);
1302 }
1303 } else
1304 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1305 sopt->sopt_valsize -= m->m_len;
1306 (caddr_t)sopt->sopt_val += m->m_len;
1307 m = m->m_next;
1308 }
1309 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1310 panic("ip6_sooptmcopyin");
1311 return 0;
1312}
1313
1314/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1315int
1316soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1317{
1318 struct mbuf *m0 = m;
1319 size_t valsize = 0;
1320
1321 if (sopt->sopt_val == NULL)
1322 return 0;
1323 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1324 if (sopt->sopt_p != NULL) {
1325 int error;
1326
1327 error = copyout(mtod(m, char *), sopt->sopt_val,
1328 m->m_len);
1329 if (error != 0) {
1330 m_freem(m0);
1331 return(error);
1332 }
1333 } else
1334 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1335 sopt->sopt_valsize -= m->m_len;
1336 (caddr_t)sopt->sopt_val += m->m_len;
1337 valsize += m->m_len;
1338 m = m->m_next;
1339 }
1340 if (m != NULL) {
1341 /* enough soopt buffer should be given from user-land */
1342 m_freem(m0);
1343 return(EINVAL);
1344 }
1345 sopt->sopt_valsize = valsize;
1346 return 0;
1347}
1348
1349void
1350sohasoutofband(so)
1351 register struct socket *so;
1352{
1353 if (so->so_sigio != NULL)
1354 pgsigio(so->so_sigio, SIGURG, 0);
1355 selwakeup(&so->so_rcv.sb_sel);
1356}
1357
1358int
1359sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
1360{
1361 int revents = 0;
1362 int s = splnet();
1363
1364 if (events & (POLLIN | POLLRDNORM))
1365 if (soreadable(so))
1366 revents |= events & (POLLIN | POLLRDNORM);
1367
1368 if (events & (POLLOUT | POLLWRNORM))
1369 if (sowriteable(so))
1370 revents |= events & (POLLOUT | POLLWRNORM);
1371
1372 if (events & (POLLPRI | POLLRDBAND))
1373 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1374 revents |= events & (POLLPRI | POLLRDBAND);
1375
1376 if (revents == 0) {
1377 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1378 selrecord(p, &so->so_rcv.sb_sel);
1379 so->so_rcv.sb_flags |= SB_SEL;
1380 }
1381
1382 if (events & (POLLOUT | POLLWRNORM)) {
1383 selrecord(p, &so->so_snd.sb_sel);
1384 so->so_snd.sb_flags |= SB_SEL;
1385 }
1386 }
1387
1388 splx(s);
1389 return (revents);
1390}
76struct vm_zone *socket_zone;
77so_gen_t so_gencnt; /* generation count for sockets */
78
79MALLOC_DEFINE(M_SONAME, "soname", "socket name");
80MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
81
82SYSCTL_DECL(_kern_ipc);
83
84static int somaxconn = SOMAXCONN;
85SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
86 &somaxconn, 0, "Maximum pending socket connection queue size");
87
88/*
89 * Socket operation routines.
90 * These routines are called by the routines in
91 * sys_socket.c or from a system process, and
92 * implement the semantics of socket operations by
93 * switching out to the protocol specific routines.
94 */
95
96/*
97 * Get a socket structure from our zone, and initialize it.
98 * We don't implement `waitok' yet (see comments in uipc_domain.c).
99 * Note that it would probably be better to allocate socket
100 * and PCB at the same time, but I'm not convinced that all
101 * the protocols can be easily modified to do this.
102 */
103struct socket *
104soalloc(waitok)
105 int waitok;
106{
107 struct socket *so;
108
109 so = zalloci(socket_zone);
110 if (so) {
111 /* XXX race condition for reentrant kernel */
112 bzero(so, sizeof *so);
113 so->so_gencnt = ++so_gencnt;
114 so->so_zone = socket_zone;
115 TAILQ_INIT(&so->so_aiojobq);
116 }
117 return so;
118}
119
120int
121socreate(dom, aso, type, proto, p)
122 int dom;
123 struct socket **aso;
124 register int type;
125 int proto;
126 struct proc *p;
127{
128 register struct protosw *prp;
129 register struct socket *so;
130 register int error;
131
132 if (proto)
133 prp = pffindproto(dom, proto, type);
134 else
135 prp = pffindtype(dom, type);
136 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
137 return (EPROTONOSUPPORT);
138 if (prp->pr_type != type)
139 return (EPROTOTYPE);
140 so = soalloc(p != 0);
141 if (so == 0)
142 return (ENOBUFS);
143
144 TAILQ_INIT(&so->so_incomp);
145 TAILQ_INIT(&so->so_comp);
146 so->so_type = type;
147 so->so_cred = p->p_ucred;
148 crhold(so->so_cred);
149 so->so_proto = prp;
150 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
151 if (error) {
152 so->so_state |= SS_NOFDREF;
153 sofree(so);
154 return (error);
155 }
156 *aso = so;
157 return (0);
158}
159
160int
161sobind(so, nam, p)
162 struct socket *so;
163 struct sockaddr *nam;
164 struct proc *p;
165{
166 int s = splnet();
167 int error;
168
169 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
170 splx(s);
171 return (error);
172}
173
174void
175sodealloc(so)
176 struct socket *so;
177{
178
179 so->so_gencnt = ++so_gencnt;
180 if (so->so_rcv.sb_hiwat)
181 (void)chgsbsize(so->so_cred->cr_uid,
182 -(rlim_t)so->so_rcv.sb_hiwat);
183 if (so->so_snd.sb_hiwat)
184 (void)chgsbsize(so->so_cred->cr_uid,
185 -(rlim_t)so->so_snd.sb_hiwat);
186 crfree(so->so_cred);
187 zfreei(so->so_zone, so);
188}
189
190int
191solisten(so, backlog, p)
192 register struct socket *so;
193 int backlog;
194 struct proc *p;
195{
196 int s, error;
197
198 s = splnet();
199 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
200 if (error) {
201 splx(s);
202 return (error);
203 }
204 if (TAILQ_EMPTY(&so->so_comp))
205 so->so_options |= SO_ACCEPTCONN;
206 if (backlog < 0 || backlog > somaxconn)
207 backlog = somaxconn;
208 so->so_qlimit = backlog;
209 splx(s);
210 return (0);
211}
212
213void
214sofree(so)
215 register struct socket *so;
216{
217 struct socket *head = so->so_head;
218
219 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
220 return;
221 if (head != NULL) {
222 if (so->so_state & SS_INCOMP) {
223 TAILQ_REMOVE(&head->so_incomp, so, so_list);
224 head->so_incqlen--;
225 } else if (so->so_state & SS_COMP) {
226 /*
227 * We must not decommission a socket that's
228 * on the accept(2) queue. If we do, then
229 * accept(2) may hang after select(2) indicated
230 * that the listening socket was ready.
231 */
232 return;
233 } else {
234 panic("sofree: not queued");
235 }
236 head->so_qlen--;
237 so->so_state &= ~SS_INCOMP;
238 so->so_head = NULL;
239 }
240 sbrelease(&so->so_snd, so);
241 sorflush(so);
242 sodealloc(so);
243}
244
245/*
246 * Close a socket on last file table reference removal.
247 * Initiate disconnect if connected.
248 * Free socket when disconnect complete.
249 */
250int
251soclose(so)
252 register struct socket *so;
253{
254 int s = splnet(); /* conservative */
255 int error = 0;
256
257 funsetown(so->so_sigio);
258 if (so->so_options & SO_ACCEPTCONN) {
259 struct socket *sp, *sonext;
260
261 sp = TAILQ_FIRST(&so->so_incomp);
262 for (; sp != NULL; sp = sonext) {
263 sonext = TAILQ_NEXT(sp, so_list);
264 (void) soabort(sp);
265 }
266 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
267 sonext = TAILQ_NEXT(sp, so_list);
268 /* Dequeue from so_comp since sofree() won't do it */
269 TAILQ_REMOVE(&so->so_comp, sp, so_list);
270 so->so_qlen--;
271 sp->so_state &= ~SS_COMP;
272 sp->so_head = NULL;
273 (void) soabort(sp);
274 }
275 }
276 if (so->so_pcb == 0)
277 goto discard;
278 if (so->so_state & SS_ISCONNECTED) {
279 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
280 error = sodisconnect(so);
281 if (error)
282 goto drop;
283 }
284 if (so->so_options & SO_LINGER) {
285 if ((so->so_state & SS_ISDISCONNECTING) &&
286 (so->so_state & SS_NBIO))
287 goto drop;
288 while (so->so_state & SS_ISCONNECTED) {
289 error = tsleep((caddr_t)&so->so_timeo,
290 PSOCK | PCATCH, "soclos", so->so_linger * hz);
291 if (error)
292 break;
293 }
294 }
295 }
296drop:
297 if (so->so_pcb) {
298 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
299 if (error == 0)
300 error = error2;
301 }
302discard:
303 if (so->so_state & SS_NOFDREF)
304 panic("soclose: NOFDREF");
305 so->so_state |= SS_NOFDREF;
306 sofree(so);
307 splx(s);
308 return (error);
309}
310
311/*
312 * Must be called at splnet...
313 */
314int
315soabort(so)
316 struct socket *so;
317{
318 int error;
319
320 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
321 if (error) {
322 sofree(so);
323 return error;
324 }
325 return (0);
326}
327
328int
329soaccept(so, nam)
330 register struct socket *so;
331 struct sockaddr **nam;
332{
333 int s = splnet();
334 int error;
335
336 if ((so->so_state & SS_NOFDREF) == 0)
337 panic("soaccept: !NOFDREF");
338 so->so_state &= ~SS_NOFDREF;
339 if ((so->so_state & SS_ISDISCONNECTED) == 0)
340 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
341 else {
342 if (nam)
343 *nam = 0;
344 error = 0;
345 }
346 splx(s);
347 return (error);
348}
349
350int
351soconnect(so, nam, p)
352 register struct socket *so;
353 struct sockaddr *nam;
354 struct proc *p;
355{
356 int s;
357 int error;
358
359 if (so->so_options & SO_ACCEPTCONN)
360 return (EOPNOTSUPP);
361 s = splnet();
362 /*
363 * If protocol is connection-based, can only connect once.
364 * Otherwise, if connected, try to disconnect first.
365 * This allows user to disconnect by connecting to, e.g.,
366 * a null address.
367 */
368 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
369 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
370 (error = sodisconnect(so))))
371 error = EISCONN;
372 else
373 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
374 splx(s);
375 return (error);
376}
377
378int
379soconnect2(so1, so2)
380 register struct socket *so1;
381 struct socket *so2;
382{
383 int s = splnet();
384 int error;
385
386 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
387 splx(s);
388 return (error);
389}
390
391int
392sodisconnect(so)
393 register struct socket *so;
394{
395 int s = splnet();
396 int error;
397
398 if ((so->so_state & SS_ISCONNECTED) == 0) {
399 error = ENOTCONN;
400 goto bad;
401 }
402 if (so->so_state & SS_ISDISCONNECTING) {
403 error = EALREADY;
404 goto bad;
405 }
406 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
407bad:
408 splx(s);
409 return (error);
410}
411
412#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
413/*
414 * Send on a socket.
415 * If send must go all at once and message is larger than
416 * send buffering, then hard error.
417 * Lock against other senders.
418 * If must go all at once and not enough room now, then
419 * inform user that this would block and do nothing.
420 * Otherwise, if nonblocking, send as much as possible.
421 * The data to be sent is described by "uio" if nonzero,
422 * otherwise by the mbuf chain "top" (which must be null
423 * if uio is not). Data provided in mbuf chain must be small
424 * enough to send all at once.
425 *
426 * Returns nonzero on error, timeout or signal; callers
427 * must check for short counts if EINTR/ERESTART are returned.
428 * Data and control buffers are freed on return.
429 */
430int
431sosend(so, addr, uio, top, control, flags, p)
432 register struct socket *so;
433 struct sockaddr *addr;
434 struct uio *uio;
435 struct mbuf *top;
436 struct mbuf *control;
437 int flags;
438 struct proc *p;
439{
440 struct mbuf **mp;
441 register struct mbuf *m;
442 register long space, len, resid;
443 int clen = 0, error, s, dontroute, mlen;
444 int atomic = sosendallatonce(so) || top;
445
446 if (uio)
447 resid = uio->uio_resid;
448 else
449 resid = top->m_pkthdr.len;
450 /*
451 * In theory resid should be unsigned.
452 * However, space must be signed, as it might be less than 0
453 * if we over-committed, and we must use a signed comparison
454 * of space and resid. On the other hand, a negative resid
455 * causes us to loop sending 0-length segments to the protocol.
456 *
457 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
458 * type sockets since that's an error.
459 */
460 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
461 error = EINVAL;
462 goto out;
463 }
464
465 dontroute =
466 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
467 (so->so_proto->pr_flags & PR_ATOMIC);
468 if (p)
469 p->p_stats->p_ru.ru_msgsnd++;
470 if (control)
471 clen = control->m_len;
472#define snderr(errno) { error = errno; splx(s); goto release; }
473
474restart:
475 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
476 if (error)
477 goto out;
478 do {
479 s = splnet();
480 if (so->so_state & SS_CANTSENDMORE)
481 snderr(EPIPE);
482 if (so->so_error) {
483 error = so->so_error;
484 so->so_error = 0;
485 splx(s);
486 goto release;
487 }
488 if ((so->so_state & SS_ISCONNECTED) == 0) {
489 /*
490 * `sendto' and `sendmsg' is allowed on a connection-
491 * based socket if it supports implied connect.
492 * Return ENOTCONN if not connected and no address is
493 * supplied.
494 */
495 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
496 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
497 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
498 !(resid == 0 && clen != 0))
499 snderr(ENOTCONN);
500 } else if (addr == 0)
501 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
502 ENOTCONN : EDESTADDRREQ);
503 }
504 space = sbspace(&so->so_snd);
505 if (flags & MSG_OOB)
506 space += 1024;
507 if ((atomic && resid > so->so_snd.sb_hiwat) ||
508 clen > so->so_snd.sb_hiwat)
509 snderr(EMSGSIZE);
510 if (space < resid + clen && uio &&
511 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
512 if (so->so_state & SS_NBIO)
513 snderr(EWOULDBLOCK);
514 sbunlock(&so->so_snd);
515 error = sbwait(&so->so_snd);
516 splx(s);
517 if (error)
518 goto out;
519 goto restart;
520 }
521 splx(s);
522 mp = &top;
523 space -= clen;
524 do {
525 if (uio == NULL) {
526 /*
527 * Data is prepackaged in "top".
528 */
529 resid = 0;
530 if (flags & MSG_EOR)
531 top->m_flags |= M_EOR;
532 } else do {
533 if (top == 0) {
534 MGETHDR(m, M_WAIT, MT_DATA);
535 if (m == NULL) {
536 error = ENOBUFS;
537 goto release;
538 }
539 mlen = MHLEN;
540 m->m_pkthdr.len = 0;
541 m->m_pkthdr.rcvif = (struct ifnet *)0;
542 } else {
543 MGET(m, M_WAIT, MT_DATA);
544 if (m == NULL) {
545 error = ENOBUFS;
546 goto release;
547 }
548 mlen = MLEN;
549 }
550 if (resid >= MINCLSIZE) {
551 MCLGET(m, M_WAIT);
552 if ((m->m_flags & M_EXT) == 0)
553 goto nopages;
554 mlen = MCLBYTES;
555 len = min(min(mlen, resid), space);
556 } else {
557nopages:
558 len = min(min(mlen, resid), space);
559 /*
560 * For datagram protocols, leave room
561 * for protocol headers in first mbuf.
562 */
563 if (atomic && top == 0 && len < mlen)
564 MH_ALIGN(m, len);
565 }
566 space -= len;
567 error = uiomove(mtod(m, caddr_t), (int)len, uio);
568 resid = uio->uio_resid;
569 m->m_len = len;
570 *mp = m;
571 top->m_pkthdr.len += len;
572 if (error)
573 goto release;
574 mp = &m->m_next;
575 if (resid <= 0) {
576 if (flags & MSG_EOR)
577 top->m_flags |= M_EOR;
578 break;
579 }
580 } while (space > 0 && atomic);
581 if (dontroute)
582 so->so_options |= SO_DONTROUTE;
583 s = splnet(); /* XXX */
584 /*
585 * XXX all the SS_CANTSENDMORE checks previously
586 * done could be out of date. We could have recieved
587 * a reset packet in an interrupt or maybe we slept
588 * while doing page faults in uiomove() etc. We could
589 * probably recheck again inside the splnet() protection
590 * here, but there are probably other places that this
591 * also happens. We must rethink this.
592 */
593 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
594 (flags & MSG_OOB) ? PRUS_OOB :
595 /*
596 * If the user set MSG_EOF, the protocol
597 * understands this flag and nothing left to
598 * send then use PRU_SEND_EOF instead of PRU_SEND.
599 */
600 ((flags & MSG_EOF) &&
601 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
602 (resid <= 0)) ?
603 PRUS_EOF :
604 /* If there is more to send set PRUS_MORETOCOME */
605 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
606 top, addr, control, p);
607 splx(s);
608 if (dontroute)
609 so->so_options &= ~SO_DONTROUTE;
610 clen = 0;
611 control = 0;
612 top = 0;
613 mp = &top;
614 if (error)
615 goto release;
616 } while (resid && space > 0);
617 } while (resid);
618
619release:
620 sbunlock(&so->so_snd);
621out:
622 if (top)
623 m_freem(top);
624 if (control)
625 m_freem(control);
626 return (error);
627}
628
629/*
630 * Implement receive operations on a socket.
631 * We depend on the way that records are added to the sockbuf
632 * by sbappend*. In particular, each record (mbufs linked through m_next)
633 * must begin with an address if the protocol so specifies,
634 * followed by an optional mbuf or mbufs containing ancillary data,
635 * and then zero or more mbufs of data.
636 * In order to avoid blocking network interrupts for the entire time here,
637 * we splx() while doing the actual copy to user space.
638 * Although the sockbuf is locked, new data may still be appended,
639 * and thus we must maintain consistency of the sockbuf during that time.
640 *
641 * The caller may receive the data as a single mbuf chain by supplying
642 * an mbuf **mp0 for use in returning the chain. The uio is then used
643 * only for the count in uio_resid.
644 */
645int
646soreceive(so, psa, uio, mp0, controlp, flagsp)
647 register struct socket *so;
648 struct sockaddr **psa;
649 struct uio *uio;
650 struct mbuf **mp0;
651 struct mbuf **controlp;
652 int *flagsp;
653{
654 register struct mbuf *m, **mp;
655 register int flags, len, error, s, offset;
656 struct protosw *pr = so->so_proto;
657 struct mbuf *nextrecord;
658 int moff, type = 0;
659 int orig_resid = uio->uio_resid;
660
661 mp = mp0;
662 if (psa)
663 *psa = 0;
664 if (controlp)
665 *controlp = 0;
666 if (flagsp)
667 flags = *flagsp &~ MSG_EOR;
668 else
669 flags = 0;
670 if (flags & MSG_OOB) {
671 m = m_get(M_WAIT, MT_DATA);
672 if (m == NULL)
673 return (ENOBUFS);
674 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
675 if (error)
676 goto bad;
677 do {
678 error = uiomove(mtod(m, caddr_t),
679 (int) min(uio->uio_resid, m->m_len), uio);
680 m = m_free(m);
681 } while (uio->uio_resid && error == 0 && m);
682bad:
683 if (m)
684 m_freem(m);
685 return (error);
686 }
687 if (mp)
688 *mp = (struct mbuf *)0;
689 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
690 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
691
692restart:
693 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
694 if (error)
695 return (error);
696 s = splnet();
697
698 m = so->so_rcv.sb_mb;
699 /*
700 * If we have less data than requested, block awaiting more
701 * (subject to any timeout) if:
702 * 1. the current count is less than the low water mark, or
703 * 2. MSG_WAITALL is set, and it is possible to do the entire
704 * receive operation at once if we block (resid <= hiwat).
705 * 3. MSG_DONTWAIT is not set
706 * If MSG_WAITALL is set but resid is larger than the receive buffer,
707 * we have to do the receive in sections, and thus risk returning
708 * a short count if a timeout or signal occurs after we start.
709 */
710 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
711 so->so_rcv.sb_cc < uio->uio_resid) &&
712 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
713 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
714 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
715 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
716 if (so->so_error) {
717 if (m)
718 goto dontblock;
719 error = so->so_error;
720 if ((flags & MSG_PEEK) == 0)
721 so->so_error = 0;
722 goto release;
723 }
724 if (so->so_state & SS_CANTRCVMORE) {
725 if (m)
726 goto dontblock;
727 else
728 goto release;
729 }
730 for (; m; m = m->m_next)
731 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
732 m = so->so_rcv.sb_mb;
733 goto dontblock;
734 }
735 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
736 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
737 error = ENOTCONN;
738 goto release;
739 }
740 if (uio->uio_resid == 0)
741 goto release;
742 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
743 error = EWOULDBLOCK;
744 goto release;
745 }
746 sbunlock(&so->so_rcv);
747 error = sbwait(&so->so_rcv);
748 splx(s);
749 if (error)
750 return (error);
751 goto restart;
752 }
753dontblock:
754 if (uio->uio_procp)
755 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
756 nextrecord = m->m_nextpkt;
757 if (pr->pr_flags & PR_ADDR) {
758 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
759 orig_resid = 0;
760 if (psa)
761 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
762 mp0 == 0);
763 if (flags & MSG_PEEK) {
764 m = m->m_next;
765 } else {
766 sbfree(&so->so_rcv, m);
767 MFREE(m, so->so_rcv.sb_mb);
768 m = so->so_rcv.sb_mb;
769 }
770 }
771 while (m && m->m_type == MT_CONTROL && error == 0) {
772 if (flags & MSG_PEEK) {
773 if (controlp)
774 *controlp = m_copy(m, 0, m->m_len);
775 m = m->m_next;
776 } else {
777 sbfree(&so->so_rcv, m);
778 if (controlp) {
779 if (pr->pr_domain->dom_externalize &&
780 mtod(m, struct cmsghdr *)->cmsg_type ==
781 SCM_RIGHTS)
782 error = (*pr->pr_domain->dom_externalize)(m);
783 *controlp = m;
784 so->so_rcv.sb_mb = m->m_next;
785 m->m_next = 0;
786 m = so->so_rcv.sb_mb;
787 } else {
788 MFREE(m, so->so_rcv.sb_mb);
789 m = so->so_rcv.sb_mb;
790 }
791 }
792 if (controlp) {
793 orig_resid = 0;
794 controlp = &(*controlp)->m_next;
795 }
796 }
797 if (m) {
798 if ((flags & MSG_PEEK) == 0)
799 m->m_nextpkt = nextrecord;
800 type = m->m_type;
801 if (type == MT_OOBDATA)
802 flags |= MSG_OOB;
803 }
804 moff = 0;
805 offset = 0;
806 while (m && uio->uio_resid > 0 && error == 0) {
807 if (m->m_type == MT_OOBDATA) {
808 if (type != MT_OOBDATA)
809 break;
810 } else if (type == MT_OOBDATA)
811 break;
812 else
813 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
814 ("receive 3"));
815 so->so_state &= ~SS_RCVATMARK;
816 len = uio->uio_resid;
817 if (so->so_oobmark && len > so->so_oobmark - offset)
818 len = so->so_oobmark - offset;
819 if (len > m->m_len - moff)
820 len = m->m_len - moff;
821 /*
822 * If mp is set, just pass back the mbufs.
823 * Otherwise copy them out via the uio, then free.
824 * Sockbuf must be consistent here (points to current mbuf,
825 * it points to next record) when we drop priority;
826 * we must note any additions to the sockbuf when we
827 * block interrupts again.
828 */
829 if (mp == 0) {
830 splx(s);
831 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
832 s = splnet();
833 if (error)
834 goto release;
835 } else
836 uio->uio_resid -= len;
837 if (len == m->m_len - moff) {
838 if (m->m_flags & M_EOR)
839 flags |= MSG_EOR;
840 if (flags & MSG_PEEK) {
841 m = m->m_next;
842 moff = 0;
843 } else {
844 nextrecord = m->m_nextpkt;
845 sbfree(&so->so_rcv, m);
846 if (mp) {
847 *mp = m;
848 mp = &m->m_next;
849 so->so_rcv.sb_mb = m = m->m_next;
850 *mp = (struct mbuf *)0;
851 } else {
852 MFREE(m, so->so_rcv.sb_mb);
853 m = so->so_rcv.sb_mb;
854 }
855 if (m)
856 m->m_nextpkt = nextrecord;
857 }
858 } else {
859 if (flags & MSG_PEEK)
860 moff += len;
861 else {
862 if (mp)
863 *mp = m_copym(m, 0, len, M_WAIT);
864 m->m_data += len;
865 m->m_len -= len;
866 so->so_rcv.sb_cc -= len;
867 }
868 }
869 if (so->so_oobmark) {
870 if ((flags & MSG_PEEK) == 0) {
871 so->so_oobmark -= len;
872 if (so->so_oobmark == 0) {
873 so->so_state |= SS_RCVATMARK;
874 break;
875 }
876 } else {
877 offset += len;
878 if (offset == so->so_oobmark)
879 break;
880 }
881 }
882 if (flags & MSG_EOR)
883 break;
884 /*
885 * If the MSG_WAITALL flag is set (for non-atomic socket),
886 * we must not quit until "uio->uio_resid == 0" or an error
887 * termination. If a signal/timeout occurs, return
888 * with a short count but without error.
889 * Keep sockbuf locked against other readers.
890 */
891 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
892 !sosendallatonce(so) && !nextrecord) {
893 if (so->so_error || so->so_state & SS_CANTRCVMORE)
894 break;
895 error = sbwait(&so->so_rcv);
896 if (error) {
897 sbunlock(&so->so_rcv);
898 splx(s);
899 return (0);
900 }
901 m = so->so_rcv.sb_mb;
902 if (m)
903 nextrecord = m->m_nextpkt;
904 }
905 }
906
907 if (m && pr->pr_flags & PR_ATOMIC) {
908 flags |= MSG_TRUNC;
909 if ((flags & MSG_PEEK) == 0)
910 (void) sbdroprecord(&so->so_rcv);
911 }
912 if ((flags & MSG_PEEK) == 0) {
913 if (m == 0)
914 so->so_rcv.sb_mb = nextrecord;
915 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
916 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
917 }
918 if (orig_resid == uio->uio_resid && orig_resid &&
919 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
920 sbunlock(&so->so_rcv);
921 splx(s);
922 goto restart;
923 }
924
925 if (flagsp)
926 *flagsp |= flags;
927release:
928 sbunlock(&so->so_rcv);
929 splx(s);
930 return (error);
931}
932
933int
934soshutdown(so, how)
935 register struct socket *so;
936 register int how;
937{
938 register struct protosw *pr = so->so_proto;
939
940 how++;
941 if (how & FREAD)
942 sorflush(so);
943 if (how & FWRITE)
944 return ((*pr->pr_usrreqs->pru_shutdown)(so));
945 return (0);
946}
947
948void
949sorflush(so)
950 register struct socket *so;
951{
952 register struct sockbuf *sb = &so->so_rcv;
953 register struct protosw *pr = so->so_proto;
954 register int s;
955 struct sockbuf asb;
956
957 sb->sb_flags |= SB_NOINTR;
958 (void) sblock(sb, M_WAITOK);
959 s = splimp();
960 socantrcvmore(so);
961 sbunlock(sb);
962 asb = *sb;
963 bzero((caddr_t)sb, sizeof (*sb));
964 splx(s);
965 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
966 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
967 sbrelease(&asb, so);
968}
969
970/*
971 * Perhaps this routine, and sooptcopyout(), below, ought to come in
972 * an additional variant to handle the case where the option value needs
973 * to be some kind of integer, but not a specific size.
974 * In addition to their use here, these functions are also called by the
975 * protocol-level pr_ctloutput() routines.
976 */
977int
978sooptcopyin(sopt, buf, len, minlen)
979 struct sockopt *sopt;
980 void *buf;
981 size_t len;
982 size_t minlen;
983{
984 size_t valsize;
985
986 /*
987 * If the user gives us more than we wanted, we ignore it,
988 * but if we don't get the minimum length the caller
989 * wants, we return EINVAL. On success, sopt->sopt_valsize
990 * is set to however much we actually retrieved.
991 */
992 if ((valsize = sopt->sopt_valsize) < minlen)
993 return EINVAL;
994 if (valsize > len)
995 sopt->sopt_valsize = valsize = len;
996
997 if (sopt->sopt_p != 0)
998 return (copyin(sopt->sopt_val, buf, valsize));
999
1000 bcopy(sopt->sopt_val, buf, valsize);
1001 return 0;
1002}
1003
1004int
1005sosetopt(so, sopt)
1006 struct socket *so;
1007 struct sockopt *sopt;
1008{
1009 int error, optval;
1010 struct linger l;
1011 struct timeval tv;
1012 u_long val;
1013
1014 error = 0;
1015 if (sopt->sopt_level != SOL_SOCKET) {
1016 if (so->so_proto && so->so_proto->pr_ctloutput)
1017 return ((*so->so_proto->pr_ctloutput)
1018 (so, sopt));
1019 error = ENOPROTOOPT;
1020 } else {
1021 switch (sopt->sopt_name) {
1022 case SO_LINGER:
1023 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1024 if (error)
1025 goto bad;
1026
1027 so->so_linger = l.l_linger;
1028 if (l.l_onoff)
1029 so->so_options |= SO_LINGER;
1030 else
1031 so->so_options &= ~SO_LINGER;
1032 break;
1033
1034 case SO_DEBUG:
1035 case SO_KEEPALIVE:
1036 case SO_DONTROUTE:
1037 case SO_USELOOPBACK:
1038 case SO_BROADCAST:
1039 case SO_REUSEADDR:
1040 case SO_REUSEPORT:
1041 case SO_OOBINLINE:
1042 case SO_TIMESTAMP:
1043 error = sooptcopyin(sopt, &optval, sizeof optval,
1044 sizeof optval);
1045 if (error)
1046 goto bad;
1047 if (optval)
1048 so->so_options |= sopt->sopt_name;
1049 else
1050 so->so_options &= ~sopt->sopt_name;
1051 break;
1052
1053 case SO_SNDBUF:
1054 case SO_RCVBUF:
1055 case SO_SNDLOWAT:
1056 case SO_RCVLOWAT:
1057 error = sooptcopyin(sopt, &optval, sizeof optval,
1058 sizeof optval);
1059 if (error)
1060 goto bad;
1061
1062 /*
1063 * Values < 1 make no sense for any of these
1064 * options, so disallow them.
1065 */
1066 if (optval < 1) {
1067 error = EINVAL;
1068 goto bad;
1069 }
1070
1071 switch (sopt->sopt_name) {
1072 case SO_SNDBUF:
1073 case SO_RCVBUF:
1074 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1075 &so->so_snd : &so->so_rcv, (u_long)optval,
1076 so, curproc) == 0) {
1077 error = ENOBUFS;
1078 goto bad;
1079 }
1080 break;
1081
1082 /*
1083 * Make sure the low-water is never greater than
1084 * the high-water.
1085 */
1086 case SO_SNDLOWAT:
1087 so->so_snd.sb_lowat =
1088 (optval > so->so_snd.sb_hiwat) ?
1089 so->so_snd.sb_hiwat : optval;
1090 break;
1091 case SO_RCVLOWAT:
1092 so->so_rcv.sb_lowat =
1093 (optval > so->so_rcv.sb_hiwat) ?
1094 so->so_rcv.sb_hiwat : optval;
1095 break;
1096 }
1097 break;
1098
1099 case SO_SNDTIMEO:
1100 case SO_RCVTIMEO:
1101 error = sooptcopyin(sopt, &tv, sizeof tv,
1102 sizeof tv);
1103 if (error)
1104 goto bad;
1105
1106 /* assert(hz > 0); */
1107 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1108 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1109 error = EDOM;
1110 goto bad;
1111 }
1112 /* assert(tick > 0); */
1113 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1114 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1115 if (val > SHRT_MAX) {
1116 error = EDOM;
1117 goto bad;
1118 }
1119
1120 switch (sopt->sopt_name) {
1121 case SO_SNDTIMEO:
1122 so->so_snd.sb_timeo = val;
1123 break;
1124 case SO_RCVTIMEO:
1125 so->so_rcv.sb_timeo = val;
1126 break;
1127 }
1128 break;
1129
1130 default:
1131 error = ENOPROTOOPT;
1132 break;
1133 }
1134 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1135 (void) ((*so->so_proto->pr_ctloutput)
1136 (so, sopt));
1137 }
1138 }
1139bad:
1140 return (error);
1141}
1142
1143/* Helper routine for getsockopt */
1144int
1145sooptcopyout(sopt, buf, len)
1146 struct sockopt *sopt;
1147 void *buf;
1148 size_t len;
1149{
1150 int error;
1151 size_t valsize;
1152
1153 error = 0;
1154
1155 /*
1156 * Documented get behavior is that we always return a value,
1157 * possibly truncated to fit in the user's buffer.
1158 * Traditional behavior is that we always tell the user
1159 * precisely how much we copied, rather than something useful
1160 * like the total amount we had available for her.
1161 * Note that this interface is not idempotent; the entire answer must
1162 * generated ahead of time.
1163 */
1164 valsize = min(len, sopt->sopt_valsize);
1165 sopt->sopt_valsize = valsize;
1166 if (sopt->sopt_val != 0) {
1167 if (sopt->sopt_p != 0)
1168 error = copyout(buf, sopt->sopt_val, valsize);
1169 else
1170 bcopy(buf, sopt->sopt_val, valsize);
1171 }
1172 return error;
1173}
1174
1175int
1176sogetopt(so, sopt)
1177 struct socket *so;
1178 struct sockopt *sopt;
1179{
1180 int error, optval;
1181 struct linger l;
1182 struct timeval tv;
1183
1184 error = 0;
1185 if (sopt->sopt_level != SOL_SOCKET) {
1186 if (so->so_proto && so->so_proto->pr_ctloutput) {
1187 return ((*so->so_proto->pr_ctloutput)
1188 (so, sopt));
1189 } else
1190 return (ENOPROTOOPT);
1191 } else {
1192 switch (sopt->sopt_name) {
1193 case SO_LINGER:
1194 l.l_onoff = so->so_options & SO_LINGER;
1195 l.l_linger = so->so_linger;
1196 error = sooptcopyout(sopt, &l, sizeof l);
1197 break;
1198
1199 case SO_USELOOPBACK:
1200 case SO_DONTROUTE:
1201 case SO_DEBUG:
1202 case SO_KEEPALIVE:
1203 case SO_REUSEADDR:
1204 case SO_REUSEPORT:
1205 case SO_BROADCAST:
1206 case SO_OOBINLINE:
1207 case SO_TIMESTAMP:
1208 optval = so->so_options & sopt->sopt_name;
1209integer:
1210 error = sooptcopyout(sopt, &optval, sizeof optval);
1211 break;
1212
1213 case SO_TYPE:
1214 optval = so->so_type;
1215 goto integer;
1216
1217 case SO_ERROR:
1218 optval = so->so_error;
1219 so->so_error = 0;
1220 goto integer;
1221
1222 case SO_SNDBUF:
1223 optval = so->so_snd.sb_hiwat;
1224 goto integer;
1225
1226 case SO_RCVBUF:
1227 optval = so->so_rcv.sb_hiwat;
1228 goto integer;
1229
1230 case SO_SNDLOWAT:
1231 optval = so->so_snd.sb_lowat;
1232 goto integer;
1233
1234 case SO_RCVLOWAT:
1235 optval = so->so_rcv.sb_lowat;
1236 goto integer;
1237
1238 case SO_SNDTIMEO:
1239 case SO_RCVTIMEO:
1240 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1241 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1242
1243 tv.tv_sec = optval / hz;
1244 tv.tv_usec = (optval % hz) * tick;
1245 error = sooptcopyout(sopt, &tv, sizeof tv);
1246 break;
1247
1248 default:
1249 error = ENOPROTOOPT;
1250 break;
1251 }
1252 return (error);
1253 }
1254}
1255
1256/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1257int
1258soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1259{
1260 struct mbuf *m, *m_prev;
1261 int sopt_size = sopt->sopt_valsize;
1262
1263 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1264 if (m == 0)
1265 return ENOBUFS;
1266 if (sopt_size > MLEN) {
1267 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1268 if ((m->m_flags & M_EXT) == 0) {
1269 m_free(m);
1270 return ENOBUFS;
1271 }
1272 m->m_len = min(MCLBYTES, sopt_size);
1273 } else {
1274 m->m_len = min(MLEN, sopt_size);
1275 }
1276 sopt_size -= m->m_len;
1277 *mp = m;
1278 m_prev = m;
1279
1280 while (sopt_size) {
1281 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1282 if (m == 0) {
1283 m_freem(*mp);
1284 return ENOBUFS;
1285 }
1286 if (sopt_size > MLEN) {
1287 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1288 if ((m->m_flags & M_EXT) == 0) {
1289 m_freem(*mp);
1290 return ENOBUFS;
1291 }
1292 m->m_len = min(MCLBYTES, sopt_size);
1293 } else {
1294 m->m_len = min(MLEN, sopt_size);
1295 }
1296 sopt_size -= m->m_len;
1297 m_prev->m_next = m;
1298 m_prev = m;
1299 }
1300 return 0;
1301}
1302
1303/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1304int
1305soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1306{
1307 struct mbuf *m0 = m;
1308
1309 if (sopt->sopt_val == NULL)
1310 return 0;
1311 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1312 if (sopt->sopt_p != NULL) {
1313 int error;
1314
1315 error = copyin(sopt->sopt_val, mtod(m, char *),
1316 m->m_len);
1317 if (error != 0) {
1318 m_freem(m0);
1319 return(error);
1320 }
1321 } else
1322 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1323 sopt->sopt_valsize -= m->m_len;
1324 (caddr_t)sopt->sopt_val += m->m_len;
1325 m = m->m_next;
1326 }
1327 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1328 panic("ip6_sooptmcopyin");
1329 return 0;
1330}
1331
1332/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1333int
1334soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1335{
1336 struct mbuf *m0 = m;
1337 size_t valsize = 0;
1338
1339 if (sopt->sopt_val == NULL)
1340 return 0;
1341 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1342 if (sopt->sopt_p != NULL) {
1343 int error;
1344
1345 error = copyout(mtod(m, char *), sopt->sopt_val,
1346 m->m_len);
1347 if (error != 0) {
1348 m_freem(m0);
1349 return(error);
1350 }
1351 } else
1352 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1353 sopt->sopt_valsize -= m->m_len;
1354 (caddr_t)sopt->sopt_val += m->m_len;
1355 valsize += m->m_len;
1356 m = m->m_next;
1357 }
1358 if (m != NULL) {
1359 /* enough soopt buffer should be given from user-land */
1360 m_freem(m0);
1361 return(EINVAL);
1362 }
1363 sopt->sopt_valsize = valsize;
1364 return 0;
1365}
1366
1367void
1368sohasoutofband(so)
1369 register struct socket *so;
1370{
1371 if (so->so_sigio != NULL)
1372 pgsigio(so->so_sigio, SIGURG, 0);
1373 selwakeup(&so->so_rcv.sb_sel);
1374}
1375
1376int
1377sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
1378{
1379 int revents = 0;
1380 int s = splnet();
1381
1382 if (events & (POLLIN | POLLRDNORM))
1383 if (soreadable(so))
1384 revents |= events & (POLLIN | POLLRDNORM);
1385
1386 if (events & (POLLOUT | POLLWRNORM))
1387 if (sowriteable(so))
1388 revents |= events & (POLLOUT | POLLWRNORM);
1389
1390 if (events & (POLLPRI | POLLRDBAND))
1391 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1392 revents |= events & (POLLPRI | POLLRDBAND);
1393
1394 if (revents == 0) {
1395 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1396 selrecord(p, &so->so_rcv.sb_sel);
1397 so->so_rcv.sb_flags |= SB_SEL;
1398 }
1399
1400 if (events & (POLLOUT | POLLWRNORM)) {
1401 selrecord(p, &so->so_snd.sb_sel);
1402 so->so_snd.sb_flags |= SB_SEL;
1403 }
1404 }
1405
1406 splx(s);
1407 return (revents);
1408}
1409
1410static int
1411filt_sorattach(struct knote *kn)
1412{
1413 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1414 int s = splnet();
1415
1416 if (so->so_options & SO_ACCEPTCONN)
1417 kn->kn_fop = &solisten_filtops;
1418 SLIST_INSERT_HEAD(&so->so_rcv.sb_sel.si_note, kn, kn_selnext);
1419 so->so_rcv.sb_flags |= SB_KNOTE;
1420 splx(s);
1421 return (0);
1422}
1423
1424static void
1425filt_sordetach(struct knote *kn)
1426{
1427 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1428 int s = splnet();
1429
1430 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1431 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1432 so->so_rcv.sb_flags &= ~SB_KNOTE;
1433 splx(s);
1434}
1435
1436/*ARGSUSED*/
1437static int
1438filt_soread(struct knote *kn, long hint)
1439{
1440 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1441
1442 kn->kn_data = so->so_rcv.sb_cc;
1443 if (so->so_state & SS_CANTRCVMORE) {
1444 kn->kn_flags |= EV_EOF;
1445 return (1);
1446 }
1447 return (kn->kn_data > 0);
1448}
1449
1450static int
1451filt_sowattach(struct knote *kn)
1452{
1453 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1454 int s = splnet();
1455
1456 SLIST_INSERT_HEAD(&so->so_snd.sb_sel.si_note, kn, kn_selnext);
1457 so->so_snd.sb_flags |= SB_KNOTE;
1458 splx(s);
1459 return (0);
1460}
1461
1462static void
1463filt_sowdetach(struct knote *kn)
1464{
1465 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1466 int s = splnet();
1467
1468 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1469 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1470 so->so_snd.sb_flags &= ~SB_KNOTE;
1471 splx(s);
1472}
1473
1474/*ARGSUSED*/
1475static int
1476filt_sowrite(struct knote *kn, long hint)
1477{
1478 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1479
1480 kn->kn_data = sbspace(&so->so_snd);
1481 if (so->so_state & SS_CANTSENDMORE) {
1482 kn->kn_flags |= EV_EOF;
1483 return (1);
1484 }
1485 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1486 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1487 return (0);
1488 return (kn->kn_data >= so->so_snd.sb_lowat);
1489}
1490
1491/*ARGSUSED*/
1492static int
1493filt_solisten(struct knote *kn, long hint)
1494{
1495 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1496
1497 kn->kn_data = so->so_qlen - so->so_incqlen;
1498 return (! TAILQ_EMPTY(&so->so_comp));
1499}