svc_vc.c revision 193272
1/*	$NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $	*/
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part.  Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California  94043
30 */
31
32#if defined(LIBC_SCCS) && !defined(lint)
33static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
34static char *sccsid = "@(#)svc_tcp.c	2.2 88/08/01 4.0 RPCSRC";
35#endif
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/rpc/svc_vc.c 193272 2009-06-01 21:17:03Z jhb $");
38
39/*
40 * svc_vc.c, Server side for Connection Oriented based RPC.
41 *
42 * Actually implements two flavors of transporter -
43 * a tcp rendezvouser (a listner and connection establisher)
44 * and a record/tcp stream.
45 */
46
47#include <sys/param.h>
48#include <sys/lock.h>
49#include <sys/kernel.h>
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/mutex.h>
53#include <sys/protosw.h>
54#include <sys/queue.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sx.h>
58#include <sys/systm.h>
59#include <sys/uio.h>
60#include <netinet/tcp.h>
61
62#include <rpc/rpc.h>
63
64#include <rpc/rpc_com.h>
65
66static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
67    struct sockaddr **, struct mbuf **);
68static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
69static void svc_vc_rendezvous_destroy(SVCXPRT *);
70static bool_t svc_vc_null(void);
71static void svc_vc_destroy(SVCXPRT *);
72static enum xprt_stat svc_vc_stat(SVCXPRT *);
73static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
74    struct sockaddr **, struct mbuf **);
75static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
76    struct sockaddr *, struct mbuf *);
77static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
78static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
79    void *in);
80static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
81    struct sockaddr *raddr);
82static int svc_vc_accept(struct socket *head, struct socket **sop);
83static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
84
85static struct xp_ops svc_vc_rendezvous_ops = {
86	.xp_recv =	svc_vc_rendezvous_recv,
87	.xp_stat =	svc_vc_rendezvous_stat,
88	.xp_reply =	(bool_t (*)(SVCXPRT *, struct rpc_msg *,
89		struct sockaddr *, struct mbuf *))svc_vc_null,
90	.xp_destroy =	svc_vc_rendezvous_destroy,
91	.xp_control =	svc_vc_rendezvous_control
92};
93
94static struct xp_ops svc_vc_ops = {
95	.xp_recv =	svc_vc_recv,
96	.xp_stat =	svc_vc_stat,
97	.xp_reply =	svc_vc_reply,
98	.xp_destroy =	svc_vc_destroy,
99	.xp_control =	svc_vc_control
100};
101
102struct cf_conn {  /* kept in xprt->xp_p1 for actual connection */
103	enum xprt_stat strm_stat;
104	struct mbuf *mpending;	/* unparsed data read from the socket */
105	struct mbuf *mreq;	/* current record being built from mpending */
106	uint32_t resid;		/* number of bytes needed for fragment */
107	bool_t eor;		/* reading last fragment of current record */
108};
109
110/*
111 * Usage:
112 *	xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
113 *
114 * Creates, registers, and returns a (rpc) tcp based transporter.
115 * Once *xprt is initialized, it is registered as a transporter
116 * see (svc.h, xprt_register).  This routine returns
117 * a NULL if a problem occurred.
118 *
119 * The filedescriptor passed in is expected to refer to a bound, but
120 * not yet connected socket.
121 *
122 * Since streams do buffered io similar to stdio, the caller can specify
123 * how big the send and receive buffers are via the second and third parms;
124 * 0 => use the system default.
125 */
126SVCXPRT *
127svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
128    size_t recvsize)
129{
130	SVCXPRT *xprt;
131	struct sockaddr* sa;
132	int error;
133
134	if (so->so_state & SS_ISCONNECTED) {
135		error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
136		if (error)
137			return (NULL);
138		xprt = svc_vc_create_conn(pool, so, sa);
139		free(sa, M_SONAME);
140		return (xprt);
141	}
142
143	xprt = svc_xprt_alloc();
144	sx_init(&xprt->xp_lock, "xprt->xp_lock");
145	xprt->xp_pool = pool;
146	xprt->xp_socket = so;
147	xprt->xp_p1 = NULL;
148	xprt->xp_p2 = NULL;
149	xprt->xp_ops = &svc_vc_rendezvous_ops;
150
151	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
152	if (error)
153		goto cleanup_svc_vc_create;
154
155	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
156	free(sa, M_SONAME);
157
158	xprt_register(xprt);
159
160	solisten(so, SOMAXCONN, curthread);
161
162	SOCKBUF_LOCK(&so->so_rcv);
163	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
164	SOCKBUF_UNLOCK(&so->so_rcv);
165
166	return (xprt);
167cleanup_svc_vc_create:
168	if (xprt)
169		svc_xprt_free(xprt);
170	return (NULL);
171}
172
173/*
174 * Create a new transport for a socket optained via soaccept().
175 */
176SVCXPRT *
177svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
178{
179	SVCXPRT *xprt = NULL;
180	struct cf_conn *cd = NULL;
181	struct sockaddr* sa = NULL;
182	struct sockopt opt;
183	int one = 1;
184	int error;
185
186	bzero(&opt, sizeof(struct sockopt));
187	opt.sopt_dir = SOPT_SET;
188	opt.sopt_level = SOL_SOCKET;
189	opt.sopt_name = SO_KEEPALIVE;
190	opt.sopt_val = &one;
191	opt.sopt_valsize = sizeof(one);
192	error = sosetopt(so, &opt);
193	if (error)
194		return (NULL);
195
196	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
197		bzero(&opt, sizeof(struct sockopt));
198		opt.sopt_dir = SOPT_SET;
199		opt.sopt_level = IPPROTO_TCP;
200		opt.sopt_name = TCP_NODELAY;
201		opt.sopt_val = &one;
202		opt.sopt_valsize = sizeof(one);
203		error = sosetopt(so, &opt);
204		if (error)
205			return (NULL);
206	}
207
208	cd = mem_alloc(sizeof(*cd));
209	cd->strm_stat = XPRT_IDLE;
210
211	xprt = svc_xprt_alloc();
212	sx_init(&xprt->xp_lock, "xprt->xp_lock");
213	xprt->xp_pool = pool;
214	xprt->xp_socket = so;
215	xprt->xp_p1 = cd;
216	xprt->xp_p2 = NULL;
217	xprt->xp_ops = &svc_vc_ops;
218
219	/*
220	 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
221	 * has a 5 minute timer, server has a 6 minute timer.
222	 */
223	xprt->xp_idletimeout = 6 * 60;
224
225	memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
226
227	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
228	if (error)
229		goto cleanup_svc_vc_create;
230
231	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
232	free(sa, M_SONAME);
233
234	xprt_register(xprt);
235
236	SOCKBUF_LOCK(&so->so_rcv);
237	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
238	SOCKBUF_UNLOCK(&so->so_rcv);
239
240	/*
241	 * Throw the transport into the active list in case it already
242	 * has some data buffered.
243	 */
244	sx_xlock(&xprt->xp_lock);
245	xprt_active(xprt);
246	sx_xunlock(&xprt->xp_lock);
247
248	return (xprt);
249cleanup_svc_vc_create:
250	if (xprt) {
251		mem_free(xprt, sizeof(*xprt));
252	}
253	if (cd)
254		mem_free(cd, sizeof(*cd));
255	return (NULL);
256}
257
258/*
259 * This does all of the accept except the final call to soaccept. The
260 * caller will call soaccept after dropping its locks (soaccept may
261 * call malloc).
262 */
263int
264svc_vc_accept(struct socket *head, struct socket **sop)
265{
266	int error = 0;
267	struct socket *so;
268
269	if ((head->so_options & SO_ACCEPTCONN) == 0) {
270		error = EINVAL;
271		goto done;
272	}
273#ifdef MAC
274	SOCK_LOCK(head);
275	error = mac_socket_check_accept(td->td_ucred, head);
276	SOCK_UNLOCK(head);
277	if (error != 0)
278		goto done;
279#endif
280	ACCEPT_LOCK();
281	if (TAILQ_EMPTY(&head->so_comp)) {
282		ACCEPT_UNLOCK();
283		error = EWOULDBLOCK;
284		goto done;
285	}
286	so = TAILQ_FIRST(&head->so_comp);
287	KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
288	KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
289
290	/*
291	 * Before changing the flags on the socket, we have to bump the
292	 * reference count.  Otherwise, if the protocol calls sofree(),
293	 * the socket will be released due to a zero refcount.
294	 * XXX might not need soref() since this is simpler than kern_accept.
295	 */
296	SOCK_LOCK(so);			/* soref() and so_state update */
297	soref(so);			/* file descriptor reference */
298
299	TAILQ_REMOVE(&head->so_comp, so, so_list);
300	head->so_qlen--;
301	so->so_state |= (head->so_state & SS_NBIO);
302	so->so_qstate &= ~SQ_COMP;
303	so->so_head = NULL;
304
305	SOCK_UNLOCK(so);
306	ACCEPT_UNLOCK();
307
308	*sop = so;
309
310	/* connection has been removed from the listen queue */
311	KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
312done:
313	return (error);
314}
315
316/*ARGSUSED*/
317static bool_t
318svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
319    struct sockaddr **addrp, struct mbuf **mp)
320{
321	struct socket *so = NULL;
322	struct sockaddr *sa = NULL;
323	int error;
324
325	/*
326	 * The socket upcall calls xprt_active() which will eventually
327	 * cause the server to call us here. We attempt to accept a
328	 * connection from the socket and turn it into a new
329	 * transport. If the accept fails, we have drained all pending
330	 * connections so we call xprt_inactive().
331	 */
332	sx_xlock(&xprt->xp_lock);
333
334	error = svc_vc_accept(xprt->xp_socket, &so);
335
336	if (error == EWOULDBLOCK) {
337		/*
338		 * We must re-test for new connections after taking
339		 * the lock to protect us in the case where a new
340		 * connection arrives after our call to accept fails
341		 * with EWOULDBLOCK. The pool lock protects us from
342		 * racing the upcall after our TAILQ_EMPTY() call
343		 * returns false.
344		 */
345		ACCEPT_LOCK();
346		mtx_lock(&xprt->xp_pool->sp_lock);
347		if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
348			xprt_inactive_locked(xprt);
349		mtx_unlock(&xprt->xp_pool->sp_lock);
350		ACCEPT_UNLOCK();
351		sx_xunlock(&xprt->xp_lock);
352		return (FALSE);
353	}
354
355	if (error) {
356		SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
357		soupcall_clear(xprt->xp_socket, SO_RCV);
358		SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
359		xprt_inactive(xprt);
360		sx_xunlock(&xprt->xp_lock);
361		return (FALSE);
362	}
363
364	sx_xunlock(&xprt->xp_lock);
365
366	sa = 0;
367	error = soaccept(so, &sa);
368
369	if (error) {
370		/*
371		 * XXX not sure if I need to call sofree or soclose here.
372		 */
373		if (sa)
374			free(sa, M_SONAME);
375		return (FALSE);
376	}
377
378	/*
379	 * svc_vc_create_conn will call xprt_register - we don't need
380	 * to do anything with the new connection.
381	 */
382	if (!svc_vc_create_conn(xprt->xp_pool, so, sa))
383		soclose(so);
384
385	free(sa, M_SONAME);
386
387	return (FALSE); /* there is never an rpc msg to be processed */
388}
389
390/*ARGSUSED*/
391static enum xprt_stat
392svc_vc_rendezvous_stat(SVCXPRT *xprt)
393{
394
395	return (XPRT_IDLE);
396}
397
398static void
399svc_vc_destroy_common(SVCXPRT *xprt)
400{
401	SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
402	soupcall_clear(xprt->xp_socket, SO_RCV);
403	SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
404
405	sx_destroy(&xprt->xp_lock);
406	if (xprt->xp_socket)
407		(void)soclose(xprt->xp_socket);
408
409	if (xprt->xp_netid)
410		(void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
411	svc_xprt_free(xprt);
412}
413
414static void
415svc_vc_rendezvous_destroy(SVCXPRT *xprt)
416{
417
418	svc_vc_destroy_common(xprt);
419}
420
421static void
422svc_vc_destroy(SVCXPRT *xprt)
423{
424	struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
425
426	svc_vc_destroy_common(xprt);
427
428	if (cd->mreq)
429		m_freem(cd->mreq);
430	if (cd->mpending)
431		m_freem(cd->mpending);
432	mem_free(cd, sizeof(*cd));
433}
434
435/*ARGSUSED*/
436static bool_t
437svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
438{
439	return (FALSE);
440}
441
442static bool_t
443svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
444{
445
446	return (FALSE);
447}
448
449static enum xprt_stat
450svc_vc_stat(SVCXPRT *xprt)
451{
452	struct cf_conn *cd;
453	struct mbuf *m;
454	size_t n;
455
456	cd = (struct cf_conn *)(xprt->xp_p1);
457
458	if (cd->strm_stat == XPRT_DIED)
459		return (XPRT_DIED);
460
461	/*
462	 * Return XPRT_MOREREQS if we have buffered data and we are
463	 * mid-record or if we have enough data for a record
464	 * marker. Since this is only a hint, we read mpending and
465	 * resid outside the lock. We do need to take the lock if we
466	 * have to traverse the mbuf chain.
467	 */
468	if (cd->mpending) {
469		if (cd->resid)
470			return (XPRT_MOREREQS);
471		n = 0;
472		sx_xlock(&xprt->xp_lock);
473		m = cd->mpending;
474		while (m && n < sizeof(uint32_t)) {
475			n += m->m_len;
476			m = m->m_next;
477		}
478		sx_xunlock(&xprt->xp_lock);
479		if (n >= sizeof(uint32_t))
480			return (XPRT_MOREREQS);
481	}
482
483	if (soreadable(xprt->xp_socket))
484		return (XPRT_MOREREQS);
485
486	return (XPRT_IDLE);
487}
488
489static bool_t
490svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
491    struct sockaddr **addrp, struct mbuf **mp)
492{
493	struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
494	struct uio uio;
495	struct mbuf *m;
496	XDR xdrs;
497	int error, rcvflag;
498
499	/*
500	 * Serialise access to the socket and our own record parsing
501	 * state.
502	 */
503	sx_xlock(&xprt->xp_lock);
504
505	for (;;) {
506		/*
507		 * If we have an mbuf chain in cd->mpending, try to parse a
508		 * record from it, leaving the result in cd->mreq. If we don't
509		 * have a complete record, leave the partial result in
510		 * cd->mreq and try to read more from the socket.
511		 */
512		if (cd->mpending) {
513			/*
514			 * If cd->resid is non-zero, we have part of the
515			 * record already, otherwise we are expecting a record
516			 * marker.
517			 */
518			if (!cd->resid) {
519				/*
520				 * See if there is enough data buffered to
521				 * make up a record marker. Make sure we can
522				 * handle the case where the record marker is
523				 * split across more than one mbuf.
524				 */
525				size_t n = 0;
526				uint32_t header;
527
528				m = cd->mpending;
529				while (n < sizeof(uint32_t) && m) {
530					n += m->m_len;
531					m = m->m_next;
532				}
533				if (n < sizeof(uint32_t))
534					goto readmore;
535				if (cd->mpending->m_len < sizeof(uint32_t))
536					cd->mpending = m_pullup(cd->mpending,
537					    sizeof(uint32_t));
538				memcpy(&header, mtod(cd->mpending, uint32_t *),
539				    sizeof(header));
540				header = ntohl(header);
541				cd->eor = (header & 0x80000000) != 0;
542				cd->resid = header & 0x7fffffff;
543				m_adj(cd->mpending, sizeof(uint32_t));
544			}
545
546			/*
547			 * Start pulling off mbufs from cd->mpending
548			 * until we either have a complete record or
549			 * we run out of data. We use m_split to pull
550			 * data - it will pull as much as possible and
551			 * split the last mbuf if necessary.
552			 */
553			while (cd->mpending && cd->resid) {
554				m = cd->mpending;
555				if (cd->mpending->m_next
556				    || cd->mpending->m_len > cd->resid)
557					cd->mpending = m_split(cd->mpending,
558					    cd->resid, M_WAIT);
559				else
560					cd->mpending = NULL;
561				if (cd->mreq)
562					m_last(cd->mreq)->m_next = m;
563				else
564					cd->mreq = m;
565				while (m) {
566					cd->resid -= m->m_len;
567					m = m->m_next;
568				}
569			}
570
571			/*
572			 * If cd->resid is zero now, we have managed to
573			 * receive a record fragment from the stream. Check
574			 * for the end-of-record mark to see if we need more.
575			 */
576			if (cd->resid == 0) {
577				if (!cd->eor)
578					continue;
579
580				/*
581				 * Success - we have a complete record in
582				 * cd->mreq.
583				 */
584				xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
585				cd->mreq = NULL;
586				sx_xunlock(&xprt->xp_lock);
587
588				if (! xdr_callmsg(&xdrs, msg)) {
589					XDR_DESTROY(&xdrs);
590					return (FALSE);
591				}
592
593				*addrp = NULL;
594				*mp = xdrmbuf_getall(&xdrs);
595				XDR_DESTROY(&xdrs);
596
597				return (TRUE);
598			}
599		}
600
601	readmore:
602		/*
603		 * The socket upcall calls xprt_active() which will eventually
604		 * cause the server to call us here. We attempt to
605		 * read as much as possible from the socket and put
606		 * the result in cd->mpending. If the read fails,
607		 * we have drained both cd->mpending and the socket so
608		 * we can call xprt_inactive().
609		 */
610		uio.uio_resid = 1000000000;
611		uio.uio_td = curthread;
612		m = NULL;
613		rcvflag = MSG_DONTWAIT;
614		error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
615		    &rcvflag);
616
617		if (error == EWOULDBLOCK) {
618			/*
619			 * We must re-test for readability after
620			 * taking the lock to protect us in the case
621			 * where a new packet arrives on the socket
622			 * after our call to soreceive fails with
623			 * EWOULDBLOCK. The pool lock protects us from
624			 * racing the upcall after our soreadable()
625			 * call returns false.
626			 */
627			mtx_lock(&xprt->xp_pool->sp_lock);
628			if (!soreadable(xprt->xp_socket))
629				xprt_inactive_locked(xprt);
630			mtx_unlock(&xprt->xp_pool->sp_lock);
631			sx_xunlock(&xprt->xp_lock);
632			return (FALSE);
633		}
634
635		if (error) {
636			SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
637			soupcall_clear(xprt->xp_socket, SO_RCV);
638			SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
639			xprt_inactive(xprt);
640			cd->strm_stat = XPRT_DIED;
641			sx_xunlock(&xprt->xp_lock);
642			return (FALSE);
643		}
644
645		if (!m) {
646			/*
647			 * EOF - the other end has closed the socket.
648			 */
649			xprt_inactive(xprt);
650			cd->strm_stat = XPRT_DIED;
651			sx_xunlock(&xprt->xp_lock);
652			return (FALSE);
653		}
654
655		if (cd->mpending)
656			m_last(cd->mpending)->m_next = m;
657		else
658			cd->mpending = m;
659	}
660}
661
662static bool_t
663svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
664    struct sockaddr *addr, struct mbuf *m)
665{
666	XDR xdrs;
667	struct mbuf *mrep;
668	bool_t stat = TRUE;
669	int error;
670
671	/*
672	 * Leave space for record mark.
673	 */
674	MGETHDR(mrep, M_WAIT, MT_DATA);
675	mrep->m_len = 0;
676	mrep->m_data += sizeof(uint32_t);
677
678	xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
679
680	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
681	    msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
682		if (!xdr_replymsg(&xdrs, msg))
683			stat = FALSE;
684		else
685			xdrmbuf_append(&xdrs, m);
686	} else {
687		stat = xdr_replymsg(&xdrs, msg);
688	}
689
690	if (stat) {
691		m_fixhdr(mrep);
692
693		/*
694		 * Prepend a record marker containing the reply length.
695		 */
696		M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
697		*mtod(mrep, uint32_t *) =
698			htonl(0x80000000 | (mrep->m_pkthdr.len
699				- sizeof(uint32_t)));
700		error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
701		    0, curthread);
702		if (!error) {
703			stat = TRUE;
704		}
705	} else {
706		m_freem(mrep);
707	}
708
709	XDR_DESTROY(&xdrs);
710	xprt->xp_p2 = NULL;
711
712	return (stat);
713}
714
715static bool_t
716svc_vc_null()
717{
718
719	return (FALSE);
720}
721
722static int
723svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
724{
725	SVCXPRT *xprt = (SVCXPRT *) arg;
726
727	xprt_active(xprt);
728	return (SU_OK);
729}
730
731#if 0
732/*
733 * Get the effective UID of the sending process. Used by rpcbind, keyserv
734 * and rpc.yppasswdd on AF_LOCAL.
735 */
736int
737__rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
738	int sock, ret;
739	gid_t egid;
740	uid_t euid;
741	struct sockaddr *sa;
742
743	sock = transp->xp_fd;
744	sa = (struct sockaddr *)transp->xp_rtaddr;
745	if (sa->sa_family == AF_LOCAL) {
746		ret = getpeereid(sock, &euid, &egid);
747		if (ret == 0)
748			*uid = euid;
749		return (ret);
750	} else
751		return (-1);
752}
753#endif
754