uipc_usrreq.c revision 67708
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	From: @(#)uipc_usrreq.c	8.3 (Berkeley) 1/4/94
34 * $FreeBSD: head/sys/kern/uipc_usrreq.c 67708 2000-10-27 11:45:49Z phk $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/domain.h>
41#include <sys/fcntl.h>
42#include <sys/malloc.h>		/* XXX must be before <sys/file.h> */
43#include <sys/file.h>
44#include <sys/filedesc.h>
45#include <sys/mbuf.h>
46#include <sys/namei.h>
47#include <sys/proc.h>
48#include <sys/protosw.h>
49#include <sys/socket.h>
50#include <sys/socketvar.h>
51#include <sys/resourcevar.h>
52#include <sys/stat.h>
53#include <sys/sysctl.h>
54#include <sys/un.h>
55#include <sys/unpcb.h>
56#include <sys/vnode.h>
57
58#include <vm/vm_zone.h>
59
60static	struct vm_zone *unp_zone;
61static	unp_gen_t unp_gencnt;
62static	u_int unp_count;
63
64static	struct unp_head unp_shead, unp_dhead;
65
66/*
67 * Unix communications domain.
68 *
69 * TODO:
70 *	SEQPACKET, RDM
71 *	rethink name space problems
72 *	need a proper out-of-band
73 *	lock pushdown
74 */
75static struct	sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
76static ino_t	unp_ino;		/* prototype for fake inode numbers */
77
78static int     unp_attach __P((struct socket *));
79static void    unp_detach __P((struct unpcb *));
80static int     unp_bind __P((struct unpcb *,struct sockaddr *, struct proc *));
81static int     unp_connect __P((struct socket *,struct sockaddr *,
82				struct proc *));
83static void    unp_disconnect __P((struct unpcb *));
84static void    unp_shutdown __P((struct unpcb *));
85static void    unp_drop __P((struct unpcb *, int));
86static void    unp_gc __P((void));
87static void    unp_scan __P((struct mbuf *, void (*)(struct file *)));
88static void    unp_mark __P((struct file *));
89static void    unp_discard __P((struct file *));
90static int     unp_internalize __P((struct mbuf *, struct proc *));
91
92static int
93uipc_abort(struct socket *so)
94{
95	struct unpcb *unp = sotounpcb(so);
96
97	if (unp == 0)
98		return EINVAL;
99	unp_drop(unp, ECONNABORTED);
100	return 0;
101}
102
103static int
104uipc_accept(struct socket *so, struct sockaddr **nam)
105{
106	struct unpcb *unp = sotounpcb(so);
107
108	if (unp == 0)
109		return EINVAL;
110
111	/*
112	 * Pass back name of connected socket,
113	 * if it was bound and we are still connected
114	 * (our peer may have closed already!).
115	 */
116	if (unp->unp_conn && unp->unp_conn->unp_addr) {
117		*nam = dup_sockaddr((struct sockaddr *)unp->unp_conn->unp_addr,
118				    1);
119	} else {
120		*nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1);
121	}
122	return 0;
123}
124
125static int
126uipc_attach(struct socket *so, int proto, struct proc *p)
127{
128	struct unpcb *unp = sotounpcb(so);
129
130	if (unp != 0)
131		return EISCONN;
132	return unp_attach(so);
133}
134
135static int
136uipc_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
137{
138	struct unpcb *unp = sotounpcb(so);
139
140	if (unp == 0)
141		return EINVAL;
142
143	return unp_bind(unp, nam, p);
144}
145
146static int
147uipc_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
148{
149	struct unpcb *unp = sotounpcb(so);
150
151	if (unp == 0)
152		return EINVAL;
153	return unp_connect(so, nam, curproc);
154}
155
156static int
157uipc_connect2(struct socket *so1, struct socket *so2)
158{
159	struct unpcb *unp = sotounpcb(so1);
160
161	if (unp == 0)
162		return EINVAL;
163
164	return unp_connect2(so1, so2);
165}
166
167/* control is EOPNOTSUPP */
168
169static int
170uipc_detach(struct socket *so)
171{
172	struct unpcb *unp = sotounpcb(so);
173
174	if (unp == 0)
175		return EINVAL;
176
177	unp_detach(unp);
178	return 0;
179}
180
181static int
182uipc_disconnect(struct socket *so)
183{
184	struct unpcb *unp = sotounpcb(so);
185
186	if (unp == 0)
187		return EINVAL;
188	unp_disconnect(unp);
189	return 0;
190}
191
192static int
193uipc_listen(struct socket *so, struct proc *p)
194{
195	struct unpcb *unp = sotounpcb(so);
196
197	if (unp == 0 || unp->unp_vnode == 0)
198		return EINVAL;
199	return 0;
200}
201
202static int
203uipc_peeraddr(struct socket *so, struct sockaddr **nam)
204{
205	struct unpcb *unp = sotounpcb(so);
206
207	if (unp == 0)
208		return EINVAL;
209	if (unp->unp_conn && unp->unp_conn->unp_addr)
210		*nam = dup_sockaddr((struct sockaddr *)unp->unp_conn->unp_addr,
211				    1);
212	return 0;
213}
214
215static int
216uipc_rcvd(struct socket *so, int flags)
217{
218	struct unpcb *unp = sotounpcb(so);
219	struct socket *so2;
220	u_long newhiwat;
221
222	if (unp == 0)
223		return EINVAL;
224	switch (so->so_type) {
225	case SOCK_DGRAM:
226		panic("uipc_rcvd DGRAM?");
227		/*NOTREACHED*/
228
229	case SOCK_STREAM:
230		if (unp->unp_conn == 0)
231			break;
232		so2 = unp->unp_conn->unp_socket;
233		/*
234		 * Adjust backpressure on sender
235		 * and wakeup any waiting to write.
236		 */
237		so2->so_snd.sb_mbmax += unp->unp_mbcnt - so->so_rcv.sb_mbcnt;
238		unp->unp_mbcnt = so->so_rcv.sb_mbcnt;
239		newhiwat = so2->so_snd.sb_hiwat + unp->unp_cc -
240		    so->so_rcv.sb_cc;
241		(void)chgsbsize(so2->so_cred->cr_uidinfo, &so2->so_snd.sb_hiwat,
242		    newhiwat, RLIM_INFINITY);
243		unp->unp_cc = so->so_rcv.sb_cc;
244		sowwakeup(so2);
245		break;
246
247	default:
248		panic("uipc_rcvd unknown socktype");
249	}
250	return 0;
251}
252
253/* pru_rcvoob is EOPNOTSUPP */
254
255static int
256uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
257	  struct mbuf *control, struct proc *p)
258{
259	int error = 0;
260	struct unpcb *unp = sotounpcb(so);
261	struct socket *so2;
262	u_long newhiwat;
263
264	if (unp == 0) {
265		error = EINVAL;
266		goto release;
267	}
268	if (flags & PRUS_OOB) {
269		error = EOPNOTSUPP;
270		goto release;
271	}
272
273	if (control && (error = unp_internalize(control, p)))
274		goto release;
275
276	switch (so->so_type) {
277	case SOCK_DGRAM:
278	{
279		struct sockaddr *from;
280
281		if (nam) {
282			if (unp->unp_conn) {
283				error = EISCONN;
284				break;
285			}
286			error = unp_connect(so, nam, p);
287			if (error)
288				break;
289		} else {
290			if (unp->unp_conn == 0) {
291				error = ENOTCONN;
292				break;
293			}
294		}
295		so2 = unp->unp_conn->unp_socket;
296		if (unp->unp_addr)
297			from = (struct sockaddr *)unp->unp_addr;
298		else
299			from = &sun_noname;
300		if (sbappendaddr(&so2->so_rcv, from, m, control)) {
301			sorwakeup(so2);
302			m = 0;
303			control = 0;
304		} else
305			error = ENOBUFS;
306		if (nam)
307			unp_disconnect(unp);
308		break;
309	}
310
311	case SOCK_STREAM:
312		/* Connect if not connected yet. */
313		/*
314		 * Note: A better implementation would complain
315		 * if not equal to the peer's address.
316		 */
317		if ((so->so_state & SS_ISCONNECTED) == 0) {
318			if (nam) {
319				error = unp_connect(so, nam, p);
320				if (error)
321					break;	/* XXX */
322			} else {
323				error = ENOTCONN;
324				break;
325			}
326		}
327
328		if (so->so_state & SS_CANTSENDMORE) {
329			error = EPIPE;
330			break;
331		}
332		if (unp->unp_conn == 0)
333			panic("uipc_send connected but no connection?");
334		so2 = unp->unp_conn->unp_socket;
335		/*
336		 * Send to paired receive port, and then reduce
337		 * send buffer hiwater marks to maintain backpressure.
338		 * Wake up readers.
339		 */
340		if (control) {
341			if (sbappendcontrol(&so2->so_rcv, m, control))
342				control = 0;
343		} else
344			sbappend(&so2->so_rcv, m);
345		so->so_snd.sb_mbmax -=
346			so2->so_rcv.sb_mbcnt - unp->unp_conn->unp_mbcnt;
347		unp->unp_conn->unp_mbcnt = so2->so_rcv.sb_mbcnt;
348		newhiwat = so->so_snd.sb_hiwat -
349		    (so2->so_rcv.sb_cc - unp->unp_conn->unp_cc);
350		(void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat,
351		    newhiwat, RLIM_INFINITY);
352		unp->unp_conn->unp_cc = so2->so_rcv.sb_cc;
353		sorwakeup(so2);
354		m = 0;
355		break;
356
357	default:
358		panic("uipc_send unknown socktype");
359	}
360
361	/*
362	 * SEND_EOF is equivalent to a SEND followed by
363	 * a SHUTDOWN.
364	 */
365	if (flags & PRUS_EOF) {
366		socantsendmore(so);
367		unp_shutdown(unp);
368	}
369
370	if (control && error != 0)
371		unp_dispose(control);
372
373release:
374	if (control)
375		m_freem(control);
376	if (m)
377		m_freem(m);
378	return error;
379}
380
381static int
382uipc_sense(struct socket *so, struct stat *sb)
383{
384	struct unpcb *unp = sotounpcb(so);
385	struct socket *so2;
386
387	if (unp == 0)
388		return EINVAL;
389	sb->st_blksize = so->so_snd.sb_hiwat;
390	if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) {
391		so2 = unp->unp_conn->unp_socket;
392		sb->st_blksize += so2->so_rcv.sb_cc;
393	}
394	sb->st_dev = NOUDEV;
395	if (unp->unp_ino == 0)
396		unp->unp_ino = unp_ino++;
397	sb->st_ino = unp->unp_ino;
398	return (0);
399}
400
401static int
402uipc_shutdown(struct socket *so)
403{
404	struct unpcb *unp = sotounpcb(so);
405
406	if (unp == 0)
407		return EINVAL;
408	socantsendmore(so);
409	unp_shutdown(unp);
410	return 0;
411}
412
413static int
414uipc_sockaddr(struct socket *so, struct sockaddr **nam)
415{
416	struct unpcb *unp = sotounpcb(so);
417
418	if (unp == 0)
419		return EINVAL;
420	if (unp->unp_addr)
421		*nam = dup_sockaddr((struct sockaddr *)unp->unp_addr, 1);
422	return 0;
423}
424
425struct pr_usrreqs uipc_usrreqs = {
426	uipc_abort, uipc_accept, uipc_attach, uipc_bind, uipc_connect,
427	uipc_connect2, pru_control_notsupp, uipc_detach, uipc_disconnect,
428	uipc_listen, uipc_peeraddr, uipc_rcvd, pru_rcvoob_notsupp,
429	uipc_send, uipc_sense, uipc_shutdown, uipc_sockaddr,
430	sosend, soreceive, sopoll
431};
432
433/*
434 * Both send and receive buffers are allocated PIPSIZ bytes of buffering
435 * for stream sockets, although the total for sender and receiver is
436 * actually only PIPSIZ.
437 * Datagram sockets really use the sendspace as the maximum datagram size,
438 * and don't really want to reserve the sendspace.  Their recvspace should
439 * be large enough for at least one max-size datagram plus address.
440 */
441#ifndef PIPSIZ
442#define	PIPSIZ	8192
443#endif
444static u_long	unpst_sendspace = PIPSIZ;
445static u_long	unpst_recvspace = PIPSIZ;
446static u_long	unpdg_sendspace = 2*1024;	/* really max datagram size */
447static u_long	unpdg_recvspace = 4*1024;
448
449static int	unp_rights;			/* file descriptors in flight */
450
451SYSCTL_DECL(_net_local_stream);
452SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
453	   &unpst_sendspace, 0, "");
454SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
455	   &unpst_recvspace, 0, "");
456SYSCTL_DECL(_net_local_dgram);
457SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
458	   &unpdg_sendspace, 0, "");
459SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
460	   &unpdg_recvspace, 0, "");
461SYSCTL_DECL(_net_local);
462SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, "");
463
464static int
465unp_attach(so)
466	struct socket *so;
467{
468	register struct unpcb *unp;
469	int error;
470
471	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
472		switch (so->so_type) {
473
474		case SOCK_STREAM:
475			error = soreserve(so, unpst_sendspace, unpst_recvspace);
476			break;
477
478		case SOCK_DGRAM:
479			error = soreserve(so, unpdg_sendspace, unpdg_recvspace);
480			break;
481
482		default:
483			panic("unp_attach");
484		}
485		if (error)
486			return (error);
487	}
488	unp = zalloc(unp_zone);
489	if (unp == NULL)
490		return (ENOBUFS);
491	bzero(unp, sizeof *unp);
492	unp->unp_gencnt = ++unp_gencnt;
493	unp_count++;
494	LIST_INIT(&unp->unp_refs);
495	unp->unp_socket = so;
496	unp->unp_rvnode = curproc->p_fd->fd_rdir;
497	LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead
498			 : &unp_shead, unp, unp_link);
499	so->so_pcb = (caddr_t)unp;
500	return (0);
501}
502
503static void
504unp_detach(unp)
505	register struct unpcb *unp;
506{
507	LIST_REMOVE(unp, unp_link);
508	unp->unp_gencnt = ++unp_gencnt;
509	--unp_count;
510	if (unp->unp_vnode) {
511		unp->unp_vnode->v_socket = 0;
512		vrele(unp->unp_vnode);
513		unp->unp_vnode = 0;
514	}
515	if (unp->unp_conn)
516		unp_disconnect(unp);
517	while (!LIST_EMPTY(&unp->unp_refs))
518		unp_drop(LIST_FIRST(&unp->unp_refs), ECONNRESET);
519	soisdisconnected(unp->unp_socket);
520	unp->unp_socket->so_pcb = 0;
521	if (unp_rights) {
522		/*
523		 * Normally the receive buffer is flushed later,
524		 * in sofree, but if our receive buffer holds references
525		 * to descriptors that are now garbage, we will dispose
526		 * of those descriptor references after the garbage collector
527		 * gets them (resulting in a "panic: closef: count < 0").
528		 */
529		sorflush(unp->unp_socket);
530		unp_gc();
531	}
532	if (unp->unp_addr)
533		FREE(unp->unp_addr, M_SONAME);
534	zfree(unp_zone, unp);
535}
536
537static int
538unp_bind(unp, nam, p)
539	struct unpcb *unp;
540	struct sockaddr *nam;
541	struct proc *p;
542{
543	struct sockaddr_un *soun = (struct sockaddr_un *)nam;
544	struct vnode *vp;
545	struct mount *mp;
546	struct vattr vattr;
547	int error, namelen;
548	struct nameidata nd;
549	char buf[SOCK_MAXADDRLEN];
550
551	if (unp->unp_vnode != NULL)
552		return (EINVAL);
553	namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
554	if (namelen <= 0)
555		return EINVAL;
556	strncpy(buf, soun->sun_path, namelen);
557	buf[namelen] = 0;	/* null-terminate the string */
558restart:
559	NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE,
560	    buf, p);
561/* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
562	error = namei(&nd);
563	if (error)
564		return (error);
565	vp = nd.ni_vp;
566	if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
567		NDFREE(&nd, NDF_ONLY_PNBUF);
568		if (nd.ni_dvp == vp)
569			vrele(nd.ni_dvp);
570		else
571			vput(nd.ni_dvp);
572		if (vp != NULL) {
573			vrele(vp);
574			return (EADDRINUSE);
575		}
576		if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
577			return (error);
578		goto restart;
579	}
580	VATTR_NULL(&vattr);
581	vattr.va_type = VSOCK;
582	vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask);
583	VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
584	error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
585	NDFREE(&nd, NDF_ONLY_PNBUF);
586	vput(nd.ni_dvp);
587	if (error)
588		return (error);
589	vp = nd.ni_vp;
590	vp->v_socket = unp->unp_socket;
591	unp->unp_vnode = vp;
592	unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam, 1);
593	VOP_UNLOCK(vp, 0, p);
594	vn_finished_write(mp);
595	return (0);
596}
597
598static int
599unp_connect(so, nam, p)
600	struct socket *so;
601	struct sockaddr *nam;
602	struct proc *p;
603{
604	register struct sockaddr_un *soun = (struct sockaddr_un *)nam;
605	register struct vnode *vp;
606	register struct socket *so2, *so3;
607	struct unpcb *unp2, *unp3;
608	int error, len;
609	struct nameidata nd;
610	char buf[SOCK_MAXADDRLEN];
611
612	len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
613	if (len <= 0)
614		return EINVAL;
615	strncpy(buf, soun->sun_path, len);
616	buf[len] = 0;
617
618	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, buf, p);
619	error = namei(&nd);
620	if (error)
621		return (error);
622	vp = nd.ni_vp;
623	NDFREE(&nd, NDF_ONLY_PNBUF);
624	if (vp->v_type != VSOCK) {
625		error = ENOTSOCK;
626		goto bad;
627	}
628	error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p);
629	if (error)
630		goto bad;
631	so2 = vp->v_socket;
632	if (so2 == 0) {
633		error = ECONNREFUSED;
634		goto bad;
635	}
636	if (so->so_type != so2->so_type) {
637		error = EPROTOTYPE;
638		goto bad;
639	}
640	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
641		if ((so2->so_options & SO_ACCEPTCONN) == 0 ||
642		    (so3 = sonewconn3(so2, 0, p)) == 0) {
643			error = ECONNREFUSED;
644			goto bad;
645		}
646		unp2 = sotounpcb(so2);
647		unp3 = sotounpcb(so3);
648		if (unp2->unp_addr)
649			unp3->unp_addr = (struct sockaddr_un *)
650				dup_sockaddr((struct sockaddr *)
651					     unp2->unp_addr, 1);
652		so2 = so3;
653	}
654	error = unp_connect2(so, so2);
655bad:
656	vput(vp);
657	return (error);
658}
659
660int
661unp_connect2(so, so2)
662	register struct socket *so;
663	register struct socket *so2;
664{
665	register struct unpcb *unp = sotounpcb(so);
666	register struct unpcb *unp2;
667
668	if (so2->so_type != so->so_type)
669		return (EPROTOTYPE);
670	unp2 = sotounpcb(so2);
671	unp->unp_conn = unp2;
672	switch (so->so_type) {
673
674	case SOCK_DGRAM:
675		LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
676		soisconnected(so);
677		break;
678
679	case SOCK_STREAM:
680		unp2->unp_conn = unp;
681		soisconnected(so);
682		soisconnected(so2);
683		break;
684
685	default:
686		panic("unp_connect2");
687	}
688	return (0);
689}
690
691static void
692unp_disconnect(unp)
693	struct unpcb *unp;
694{
695	register struct unpcb *unp2 = unp->unp_conn;
696
697	if (unp2 == 0)
698		return;
699	unp->unp_conn = 0;
700	switch (unp->unp_socket->so_type) {
701
702	case SOCK_DGRAM:
703		LIST_REMOVE(unp, unp_reflink);
704		unp->unp_socket->so_state &= ~SS_ISCONNECTED;
705		break;
706
707	case SOCK_STREAM:
708		soisdisconnected(unp->unp_socket);
709		unp2->unp_conn = 0;
710		soisdisconnected(unp2->unp_socket);
711		break;
712	}
713}
714
715#ifdef notdef
716void
717unp_abort(unp)
718	struct unpcb *unp;
719{
720
721	unp_detach(unp);
722}
723#endif
724
725static int
726prison_unpcb(struct proc *p, struct unpcb *unp)
727{
728	if (!p->p_prison)
729		return (0);
730	if (p->p_fd->fd_rdir == unp->unp_rvnode)
731		return (0);
732	return (1);
733}
734
735static int
736unp_pcblist(SYSCTL_HANDLER_ARGS)
737{
738	int error, i, n;
739	struct unpcb *unp, **unp_list;
740	unp_gen_t gencnt;
741	struct xunpgen xug;
742	struct unp_head *head;
743
744	head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead);
745
746	/*
747	 * The process of preparing the PCB list is too time-consuming and
748	 * resource-intensive to repeat twice on every request.
749	 */
750	if (req->oldptr == 0) {
751		n = unp_count;
752		req->oldidx = 2 * (sizeof xug)
753			+ (n + n/8) * sizeof(struct xunpcb);
754		return 0;
755	}
756
757	if (req->newptr != 0)
758		return EPERM;
759
760	/*
761	 * OK, now we're committed to doing something.
762	 */
763	gencnt = unp_gencnt;
764	n = unp_count;
765
766	xug.xug_len = sizeof xug;
767	xug.xug_count = n;
768	xug.xug_gen = gencnt;
769	xug.xug_sogen = so_gencnt;
770	error = SYSCTL_OUT(req, &xug, sizeof xug);
771	if (error)
772		return error;
773
774	unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
775	if (unp_list == 0)
776		return ENOMEM;
777
778	for (unp = LIST_FIRST(head), i = 0; unp && i < n;
779	     unp = LIST_NEXT(unp, unp_link)) {
780		if (unp->unp_gencnt <= gencnt && !prison_unpcb(req->p, unp))
781			unp_list[i++] = unp;
782	}
783	n = i;			/* in case we lost some during malloc */
784
785	error = 0;
786	for (i = 0; i < n; i++) {
787		unp = unp_list[i];
788		if (unp->unp_gencnt <= gencnt) {
789			struct xunpcb xu;
790			xu.xu_len = sizeof xu;
791			xu.xu_unpp = unp;
792			/*
793			 * XXX - need more locking here to protect against
794			 * connect/disconnect races for SMP.
795			 */
796			if (unp->unp_addr)
797				bcopy(unp->unp_addr, &xu.xu_addr,
798				      unp->unp_addr->sun_len);
799			if (unp->unp_conn && unp->unp_conn->unp_addr)
800				bcopy(unp->unp_conn->unp_addr,
801				      &xu.xu_caddr,
802				      unp->unp_conn->unp_addr->sun_len);
803			bcopy(unp, &xu.xu_unp, sizeof *unp);
804			sotoxsocket(unp->unp_socket, &xu.xu_socket);
805			error = SYSCTL_OUT(req, &xu, sizeof xu);
806		}
807	}
808	if (!error) {
809		/*
810		 * Give the user an updated idea of our state.
811		 * If the generation differs from what we told
812		 * her before, she knows that something happened
813		 * while we were processing this request, and it
814		 * might be necessary to retry.
815		 */
816		xug.xug_gen = unp_gencnt;
817		xug.xug_sogen = so_gencnt;
818		xug.xug_count = unp_count;
819		error = SYSCTL_OUT(req, &xug, sizeof xug);
820	}
821	free(unp_list, M_TEMP);
822	return error;
823}
824
825SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD,
826	    (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
827	    "List of active local datagram sockets");
828SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD,
829	    (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
830	    "List of active local stream sockets");
831
832static void
833unp_shutdown(unp)
834	struct unpcb *unp;
835{
836	struct socket *so;
837
838	if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn &&
839	    (so = unp->unp_conn->unp_socket))
840		socantrcvmore(so);
841}
842
843static void
844unp_drop(unp, errno)
845	struct unpcb *unp;
846	int errno;
847{
848	struct socket *so = unp->unp_socket;
849
850	so->so_error = errno;
851	unp_disconnect(unp);
852	if (so->so_head) {
853		LIST_REMOVE(unp, unp_link);
854		unp->unp_gencnt = ++unp_gencnt;
855		unp_count--;
856		so->so_pcb = (caddr_t) 0;
857		if (unp->unp_addr)
858			FREE(unp->unp_addr, M_SONAME);
859		zfree(unp_zone, unp);
860		sofree(so);
861	}
862}
863
864#ifdef notdef
865void
866unp_drain()
867{
868
869}
870#endif
871
872int
873unp_externalize(rights)
874	struct mbuf *rights;
875{
876	struct proc *p = curproc;		/* XXX */
877	register int i;
878	register struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
879	register int *fdp;
880	register struct file **rp;
881	register struct file *fp;
882	int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm))
883		/ sizeof (struct file *);
884	int f;
885
886	/*
887	 * if the new FD's will not fit, then we free them all
888	 */
889	if (!fdavail(p, newfds)) {
890		rp = (struct file **)CMSG_DATA(cm);
891		for (i = 0; i < newfds; i++) {
892			fp = *rp;
893			/*
894			 * zero the pointer before calling unp_discard,
895			 * since it may end up in unp_gc()..
896			 */
897			*rp++ = 0;
898			unp_discard(fp);
899		}
900		return (EMSGSIZE);
901	}
902	/*
903	 * now change each pointer to an fd in the global table to
904	 * an integer that is the index to the local fd table entry
905	 * that we set up to point to the global one we are transferring.
906	 * If sizeof (struct file *) is bigger than or equal to sizeof int,
907	 * then do it in forward order. In that case, an integer will
908	 * always come in the same place or before its corresponding
909	 * struct file pointer.
910	 * If sizeof (struct file *) is smaller than sizeof int, then
911	 * do it in reverse order.
912	 */
913	if (sizeof (struct file *) >= sizeof (int)) {
914		fdp = (int *)(cm + 1);
915		rp = (struct file **)CMSG_DATA(cm);
916		for (i = 0; i < newfds; i++) {
917			if (fdalloc(p, 0, &f))
918				panic("unp_externalize");
919			fp = *rp++;
920			p->p_fd->fd_ofiles[f] = fp;
921			fp->f_msgcount--;
922			unp_rights--;
923			*fdp++ = f;
924		}
925	} else {
926		fdp = (int *)(cm + 1) + newfds - 1;
927		rp = (struct file **)CMSG_DATA(cm) + newfds - 1;
928		for (i = 0; i < newfds; i++) {
929			if (fdalloc(p, 0, &f))
930				panic("unp_externalize");
931			fp = *rp--;
932			p->p_fd->fd_ofiles[f] = fp;
933			fp->f_msgcount--;
934			unp_rights--;
935			*fdp-- = f;
936		}
937	}
938
939	/*
940	 * Adjust length, in case sizeof(struct file *) and sizeof(int)
941	 * differs.
942	 */
943	cm->cmsg_len = CMSG_LEN(newfds * sizeof(int));
944	rights->m_len = cm->cmsg_len;
945	return (0);
946}
947
948void
949unp_init(void)
950{
951	unp_zone = zinit("unpcb", sizeof(struct unpcb), nmbclusters, 0, 0);
952	if (unp_zone == 0)
953		panic("unp_init");
954	LIST_INIT(&unp_dhead);
955	LIST_INIT(&unp_shead);
956}
957
958#ifndef MIN
959#define	MIN(a,b) (((a)<(b))?(a):(b))
960#endif
961
962static int
963unp_internalize(control, p)
964	struct mbuf *control;
965	struct proc *p;
966{
967	struct filedesc *fdescp = p->p_fd;
968	register struct cmsghdr *cm = mtod(control, struct cmsghdr *);
969	register struct file **rp;
970	register struct file *fp;
971	register int i, fd, *fdp;
972	register struct cmsgcred *cmcred;
973	int oldfds;
974	u_int newlen;
975
976	if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) ||
977	    cm->cmsg_level != SOL_SOCKET || cm->cmsg_len != control->m_len)
978		return (EINVAL);
979
980	/*
981	 * Fill in credential information.
982	 */
983	if (cm->cmsg_type == SCM_CREDS) {
984		cmcred = (struct cmsgcred *)(cm + 1);
985		cmcred->cmcred_pid = p->p_pid;
986		cmcred->cmcred_uid = p->p_cred->p_ruid;
987		cmcred->cmcred_gid = p->p_cred->p_rgid;
988		cmcred->cmcred_euid = p->p_ucred->cr_uid;
989		cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups,
990							CMGROUP_MAX);
991		for (i = 0; i < cmcred->cmcred_ngroups; i++)
992			cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i];
993		return(0);
994	}
995
996	oldfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int);
997	/*
998	 * check that all the FDs passed in refer to legal OPEN files
999	 * If not, reject the entire operation.
1000	 */
1001	fdp = (int *)(cm + 1);
1002	for (i = 0; i < oldfds; i++) {
1003		fd = *fdp++;
1004		if ((unsigned)fd >= fdescp->fd_nfiles ||
1005		    fdescp->fd_ofiles[fd] == NULL)
1006			return (EBADF);
1007	}
1008	/*
1009	 * Now replace the integer FDs with pointers to
1010	 * the associated global file table entry..
1011	 * Allocate a bigger buffer as necessary. But if an cluster is not
1012	 * enough, return E2BIG.
1013	 */
1014	newlen = CMSG_LEN(oldfds * sizeof(struct file *));
1015	if (newlen > MCLBYTES)
1016		return (E2BIG);
1017	if (newlen - control->m_len > M_TRAILINGSPACE(control)) {
1018		if (control->m_flags & M_EXT)
1019			return (E2BIG);
1020		MCLGET(control, M_WAIT);
1021		if ((control->m_flags & M_EXT) == 0)
1022			return (ENOBUFS);
1023
1024		/* copy the data to the cluster */
1025		memcpy(mtod(control, char *), cm, cm->cmsg_len);
1026		cm = mtod(control, struct cmsghdr *);
1027	}
1028
1029	/*
1030	 * Adjust length, in case sizeof(struct file *) and sizeof(int)
1031	 * differs.
1032	 */
1033	control->m_len = cm->cmsg_len = newlen;
1034
1035	/*
1036	 * Transform the file descriptors into struct file pointers.
1037	 * If sizeof (struct file *) is bigger than or equal to sizeof int,
1038	 * then do it in reverse order so that the int won't get until
1039	 * we're done.
1040	 * If sizeof (struct file *) is smaller than sizeof int, then
1041	 * do it in forward order.
1042	 */
1043	if (sizeof (struct file *) >= sizeof (int)) {
1044		fdp = (int *)(cm + 1) + oldfds - 1;
1045		rp = (struct file **)CMSG_DATA(cm) + oldfds - 1;
1046		for (i = 0; i < oldfds; i++) {
1047			fp = fdescp->fd_ofiles[*fdp--];
1048			*rp-- = fp;
1049			fp->f_count++;
1050			fp->f_msgcount++;
1051			unp_rights++;
1052		}
1053	} else {
1054		fdp = (int *)(cm + 1);
1055		rp = (struct file **)CMSG_DATA(cm);
1056		for (i = 0; i < oldfds; i++) {
1057			fp = fdescp->fd_ofiles[*fdp++];
1058			*rp++ = fp;
1059			fp->f_count++;
1060			fp->f_msgcount++;
1061			unp_rights++;
1062		}
1063	}
1064	return (0);
1065}
1066
1067static int	unp_defer, unp_gcing;
1068
1069static void
1070unp_gc()
1071{
1072	register struct file *fp, *nextfp;
1073	register struct socket *so;
1074	struct file **extra_ref, **fpp;
1075	int nunref, i;
1076
1077	if (unp_gcing)
1078		return;
1079	unp_gcing = 1;
1080	unp_defer = 0;
1081	/*
1082	 * before going through all this, set all FDs to
1083	 * be NOT defered and NOT externally accessible
1084	 */
1085	LIST_FOREACH(fp, &filehead, f_list)
1086		fp->f_flag &= ~(FMARK|FDEFER);
1087	do {
1088		LIST_FOREACH(fp, &filehead, f_list) {
1089			/*
1090			 * If the file is not open, skip it
1091			 */
1092			if (fp->f_count == 0)
1093				continue;
1094			/*
1095			 * If we already marked it as 'defer'  in a
1096			 * previous pass, then try process it this time
1097			 * and un-mark it
1098			 */
1099			if (fp->f_flag & FDEFER) {
1100				fp->f_flag &= ~FDEFER;
1101				unp_defer--;
1102			} else {
1103				/*
1104				 * if it's not defered, then check if it's
1105				 * already marked.. if so skip it
1106				 */
1107				if (fp->f_flag & FMARK)
1108					continue;
1109				/*
1110				 * If all references are from messages
1111				 * in transit, then skip it. it's not
1112				 * externally accessible.
1113				 */
1114				if (fp->f_count == fp->f_msgcount)
1115					continue;
1116				/*
1117				 * If it got this far then it must be
1118				 * externally accessible.
1119				 */
1120				fp->f_flag |= FMARK;
1121			}
1122			/*
1123			 * either it was defered, or it is externally
1124			 * accessible and not already marked so.
1125			 * Now check if it is possibly one of OUR sockets.
1126			 */
1127			if (fp->f_type != DTYPE_SOCKET ||
1128			    (so = (struct socket *)fp->f_data) == 0)
1129				continue;
1130			if (so->so_proto->pr_domain != &localdomain ||
1131			    (so->so_proto->pr_flags&PR_RIGHTS) == 0)
1132				continue;
1133#ifdef notdef
1134			if (so->so_rcv.sb_flags & SB_LOCK) {
1135				/*
1136				 * This is problematical; it's not clear
1137				 * we need to wait for the sockbuf to be
1138				 * unlocked (on a uniprocessor, at least),
1139				 * and it's also not clear what to do
1140				 * if sbwait returns an error due to receipt
1141				 * of a signal.  If sbwait does return
1142				 * an error, we'll go into an infinite
1143				 * loop.  Delete all of this for now.
1144				 */
1145				(void) sbwait(&so->so_rcv);
1146				goto restart;
1147			}
1148#endif
1149			/*
1150			 * So, Ok, it's one of our sockets and it IS externally
1151			 * accessible (or was defered). Now we look
1152			 * to see if we hold any file descriptors in its
1153			 * message buffers. Follow those links and mark them
1154			 * as accessible too.
1155			 */
1156			unp_scan(so->so_rcv.sb_mb, unp_mark);
1157		}
1158	} while (unp_defer);
1159	/*
1160	 * We grab an extra reference to each of the file table entries
1161	 * that are not otherwise accessible and then free the rights
1162	 * that are stored in messages on them.
1163	 *
1164	 * The bug in the orginal code is a little tricky, so I'll describe
1165	 * what's wrong with it here.
1166	 *
1167	 * It is incorrect to simply unp_discard each entry for f_msgcount
1168	 * times -- consider the case of sockets A and B that contain
1169	 * references to each other.  On a last close of some other socket,
1170	 * we trigger a gc since the number of outstanding rights (unp_rights)
1171	 * is non-zero.  If during the sweep phase the gc code un_discards,
1172	 * we end up doing a (full) closef on the descriptor.  A closef on A
1173	 * results in the following chain.  Closef calls soo_close, which
1174	 * calls soclose.   Soclose calls first (through the switch
1175	 * uipc_usrreq) unp_detach, which re-invokes unp_gc.  Unp_gc simply
1176	 * returns because the previous instance had set unp_gcing, and
1177	 * we return all the way back to soclose, which marks the socket
1178	 * with SS_NOFDREF, and then calls sofree.  Sofree calls sorflush
1179	 * to free up the rights that are queued in messages on the socket A,
1180	 * i.e., the reference on B.  The sorflush calls via the dom_dispose
1181	 * switch unp_dispose, which unp_scans with unp_discard.  This second
1182	 * instance of unp_discard just calls closef on B.
1183	 *
1184	 * Well, a similar chain occurs on B, resulting in a sorflush on B,
1185	 * which results in another closef on A.  Unfortunately, A is already
1186	 * being closed, and the descriptor has already been marked with
1187	 * SS_NOFDREF, and soclose panics at this point.
1188	 *
1189	 * Here, we first take an extra reference to each inaccessible
1190	 * descriptor.  Then, we call sorflush ourself, since we know
1191	 * it is a Unix domain socket anyhow.  After we destroy all the
1192	 * rights carried in messages, we do a last closef to get rid
1193	 * of our extra reference.  This is the last close, and the
1194	 * unp_detach etc will shut down the socket.
1195	 *
1196	 * 91/09/19, bsy@cs.cmu.edu
1197	 */
1198	extra_ref = malloc(nfiles * sizeof(struct file *), M_FILE, M_WAITOK);
1199	for (nunref = 0, fp = LIST_FIRST(&filehead), fpp = extra_ref; fp != 0;
1200	    fp = nextfp) {
1201		nextfp = LIST_NEXT(fp, f_list);
1202		/*
1203		 * If it's not open, skip it
1204		 */
1205		if (fp->f_count == 0)
1206			continue;
1207		/*
1208		 * If all refs are from msgs, and it's not marked accessible
1209		 * then it must be referenced from some unreachable cycle
1210		 * of (shut-down) FDs, so include it in our
1211		 * list of FDs to remove
1212		 */
1213		if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) {
1214			*fpp++ = fp;
1215			nunref++;
1216			fp->f_count++;
1217		}
1218	}
1219	/*
1220	 * for each FD on our hit list, do the following two things
1221	 */
1222	for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
1223		struct file *tfp = *fpp;
1224		if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL)
1225			sorflush((struct socket *)(tfp->f_data));
1226	}
1227	for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
1228		closef(*fpp, (struct proc *) NULL);
1229	free((caddr_t)extra_ref, M_FILE);
1230	unp_gcing = 0;
1231}
1232
1233void
1234unp_dispose(m)
1235	struct mbuf *m;
1236{
1237
1238	if (m)
1239		unp_scan(m, unp_discard);
1240}
1241
1242static void
1243unp_scan(m0, op)
1244	register struct mbuf *m0;
1245	void (*op) __P((struct file *));
1246{
1247	register struct mbuf *m;
1248	register struct file **rp;
1249	register struct cmsghdr *cm;
1250	register int i;
1251	int qfds;
1252
1253	while (m0) {
1254		for (m = m0; m; m = m->m_next)
1255			if (m->m_type == MT_CONTROL &&
1256			    m->m_len >= sizeof(*cm)) {
1257				cm = mtod(m, struct cmsghdr *);
1258				if (cm->cmsg_level != SOL_SOCKET ||
1259				    cm->cmsg_type != SCM_RIGHTS)
1260					continue;
1261				qfds = (cm->cmsg_len -
1262					(CMSG_DATA(cm) - (u_char *)cm))
1263						/ sizeof (struct file *);
1264				rp = (struct file **)CMSG_DATA(cm);
1265				for (i = 0; i < qfds; i++)
1266					(*op)(*rp++);
1267				break;		/* XXX, but saves time */
1268			}
1269		m0 = m0->m_act;
1270	}
1271}
1272
1273static void
1274unp_mark(fp)
1275	struct file *fp;
1276{
1277
1278	if (fp->f_flag & FMARK)
1279		return;
1280	unp_defer++;
1281	fp->f_flag |= (FMARK|FDEFER);
1282}
1283
1284static void
1285unp_discard(fp)
1286	struct file *fp;
1287{
1288
1289	fp->f_msgcount--;
1290	unp_rights--;
1291	(void) closef(fp, (struct proc *)NULL);
1292}
1293