svc_vc.c revision 261057
1/*	$NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $	*/
2
3/*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 *   this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 *   this list of conditions and the following disclaimer in the documentation
13 *   and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 *   contributors may be used to endorse or promote products derived
16 *   from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#if defined(LIBC_SCCS) && !defined(lint)
32static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
33static char *sccsid = "@(#)svc_tcp.c	2.2 88/08/01 4.0 RPCSRC";
34#endif
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/9/sys/rpc/svc_vc.c 261057 2014-01-23 00:28:17Z mav $");
37
38/*
39 * svc_vc.c, Server side for Connection Oriented based RPC.
40 *
41 * Actually implements two flavors of transporter -
42 * a tcp rendezvouser (a listner and connection establisher)
43 * and a record/tcp stream.
44 */
45
46#include <sys/param.h>
47#include <sys/lock.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/protosw.h>
54#include <sys/queue.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sx.h>
58#include <sys/systm.h>
59#include <sys/uio.h>
60
61#include <net/vnet.h>
62
63#include <netinet/tcp.h>
64
65#include <rpc/rpc.h>
66
67#include <rpc/rpc_com.h>
68
69#include <security/mac/mac_framework.h>
70
71static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
72    struct sockaddr **, struct mbuf **);
73static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
74static void svc_vc_rendezvous_destroy(SVCXPRT *);
75static bool_t svc_vc_null(void);
76static void svc_vc_destroy(SVCXPRT *);
77static enum xprt_stat svc_vc_stat(SVCXPRT *);
78static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
79    struct sockaddr **, struct mbuf **);
80static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
81    struct sockaddr *, struct mbuf *);
82static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
83static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
84    void *in);
85static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
86    struct sockaddr *raddr);
87static int svc_vc_accept(struct socket *head, struct socket **sop);
88static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
89
90static struct xp_ops svc_vc_rendezvous_ops = {
91	.xp_recv =	svc_vc_rendezvous_recv,
92	.xp_stat =	svc_vc_rendezvous_stat,
93	.xp_reply =	(bool_t (*)(SVCXPRT *, struct rpc_msg *,
94		struct sockaddr *, struct mbuf *))svc_vc_null,
95	.xp_destroy =	svc_vc_rendezvous_destroy,
96	.xp_control =	svc_vc_rendezvous_control
97};
98
99static struct xp_ops svc_vc_ops = {
100	.xp_recv =	svc_vc_recv,
101	.xp_stat =	svc_vc_stat,
102	.xp_reply =	svc_vc_reply,
103	.xp_destroy =	svc_vc_destroy,
104	.xp_control =	svc_vc_control
105};
106
107struct cf_conn {  /* kept in xprt->xp_p1 for actual connection */
108	enum xprt_stat strm_stat;
109	struct mbuf *mpending;	/* unparsed data read from the socket */
110	struct mbuf *mreq;	/* current record being built from mpending */
111	uint32_t resid;		/* number of bytes needed for fragment */
112	bool_t eor;		/* reading last fragment of current record */
113};
114
115/*
116 * Usage:
117 *	xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
118 *
119 * Creates, registers, and returns a (rpc) tcp based transporter.
120 * Once *xprt is initialized, it is registered as a transporter
121 * see (svc.h, xprt_register).  This routine returns
122 * a NULL if a problem occurred.
123 *
124 * The filedescriptor passed in is expected to refer to a bound, but
125 * not yet connected socket.
126 *
127 * Since streams do buffered io similar to stdio, the caller can specify
128 * how big the send and receive buffers are via the second and third parms;
129 * 0 => use the system default.
130 */
131SVCXPRT *
132svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
133    size_t recvsize)
134{
135	SVCXPRT *xprt;
136	struct sockaddr* sa;
137	int error;
138
139	SOCK_LOCK(so);
140	if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
141		SOCK_UNLOCK(so);
142		error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
143		if (error)
144			return (NULL);
145		xprt = svc_vc_create_conn(pool, so, sa);
146		free(sa, M_SONAME);
147		return (xprt);
148	}
149	SOCK_UNLOCK(so);
150
151	xprt = svc_xprt_alloc();
152	sx_init(&xprt->xp_lock, "xprt->xp_lock");
153	xprt->xp_pool = pool;
154	xprt->xp_socket = so;
155	xprt->xp_p1 = NULL;
156	xprt->xp_p2 = NULL;
157	xprt->xp_ops = &svc_vc_rendezvous_ops;
158
159	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
160	if (error) {
161		goto cleanup_svc_vc_create;
162	}
163
164	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
165	free(sa, M_SONAME);
166
167	xprt_register(xprt);
168
169	solisten(so, SOMAXCONN, curthread);
170
171	SOCKBUF_LOCK(&so->so_rcv);
172	xprt->xp_upcallset = 1;
173	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
174	SOCKBUF_UNLOCK(&so->so_rcv);
175
176	return (xprt);
177cleanup_svc_vc_create:
178	if (xprt)
179		svc_xprt_free(xprt);
180	return (NULL);
181}
182
183/*
184 * Create a new transport for a socket optained via soaccept().
185 */
186SVCXPRT *
187svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
188{
189	SVCXPRT *xprt = NULL;
190	struct cf_conn *cd = NULL;
191	struct sockaddr* sa = NULL;
192	struct sockopt opt;
193	int one = 1;
194	int error;
195
196	bzero(&opt, sizeof(struct sockopt));
197	opt.sopt_dir = SOPT_SET;
198	opt.sopt_level = SOL_SOCKET;
199	opt.sopt_name = SO_KEEPALIVE;
200	opt.sopt_val = &one;
201	opt.sopt_valsize = sizeof(one);
202	error = sosetopt(so, &opt);
203	if (error) {
204		return (NULL);
205	}
206
207	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
208		bzero(&opt, sizeof(struct sockopt));
209		opt.sopt_dir = SOPT_SET;
210		opt.sopt_level = IPPROTO_TCP;
211		opt.sopt_name = TCP_NODELAY;
212		opt.sopt_val = &one;
213		opt.sopt_valsize = sizeof(one);
214		error = sosetopt(so, &opt);
215		if (error) {
216			return (NULL);
217		}
218	}
219
220	cd = mem_alloc(sizeof(*cd));
221	cd->strm_stat = XPRT_IDLE;
222
223	xprt = svc_xprt_alloc();
224	sx_init(&xprt->xp_lock, "xprt->xp_lock");
225	xprt->xp_pool = pool;
226	xprt->xp_socket = so;
227	xprt->xp_p1 = cd;
228	xprt->xp_p2 = NULL;
229	xprt->xp_ops = &svc_vc_ops;
230
231	/*
232	 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
233	 * has a 5 minute timer, server has a 6 minute timer.
234	 */
235	xprt->xp_idletimeout = 6 * 60;
236
237	memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
238
239	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
240	if (error)
241		goto cleanup_svc_vc_create;
242
243	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
244	free(sa, M_SONAME);
245
246	xprt_register(xprt);
247
248	SOCKBUF_LOCK(&so->so_rcv);
249	xprt->xp_upcallset = 1;
250	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
251	SOCKBUF_UNLOCK(&so->so_rcv);
252
253	/*
254	 * Throw the transport into the active list in case it already
255	 * has some data buffered.
256	 */
257	sx_xlock(&xprt->xp_lock);
258	xprt_active(xprt);
259	sx_xunlock(&xprt->xp_lock);
260
261	return (xprt);
262cleanup_svc_vc_create:
263	if (xprt) {
264		mem_free(xprt, sizeof(*xprt));
265	}
266	if (cd)
267		mem_free(cd, sizeof(*cd));
268	return (NULL);
269}
270
271/*
272 * This does all of the accept except the final call to soaccept. The
273 * caller will call soaccept after dropping its locks (soaccept may
274 * call malloc).
275 */
276int
277svc_vc_accept(struct socket *head, struct socket **sop)
278{
279	int error = 0;
280	struct socket *so;
281
282	if ((head->so_options & SO_ACCEPTCONN) == 0) {
283		error = EINVAL;
284		goto done;
285	}
286#ifdef MAC
287	error = mac_socket_check_accept(curthread->td_ucred, head);
288	if (error != 0)
289		goto done;
290#endif
291	ACCEPT_LOCK();
292	if (TAILQ_EMPTY(&head->so_comp)) {
293		ACCEPT_UNLOCK();
294		error = EWOULDBLOCK;
295		goto done;
296	}
297	so = TAILQ_FIRST(&head->so_comp);
298	KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
299	KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
300
301	/*
302	 * Before changing the flags on the socket, we have to bump the
303	 * reference count.  Otherwise, if the protocol calls sofree(),
304	 * the socket will be released due to a zero refcount.
305	 * XXX might not need soref() since this is simpler than kern_accept.
306	 */
307	SOCK_LOCK(so);			/* soref() and so_state update */
308	soref(so);			/* file descriptor reference */
309
310	TAILQ_REMOVE(&head->so_comp, so, so_list);
311	head->so_qlen--;
312	so->so_state |= (head->so_state & SS_NBIO);
313	so->so_qstate &= ~SQ_COMP;
314	so->so_head = NULL;
315
316	SOCK_UNLOCK(so);
317	ACCEPT_UNLOCK();
318
319	*sop = so;
320
321	/* connection has been removed from the listen queue */
322	KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
323done:
324	return (error);
325}
326
327/*ARGSUSED*/
328static bool_t
329svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
330    struct sockaddr **addrp, struct mbuf **mp)
331{
332	struct socket *so = NULL;
333	struct sockaddr *sa = NULL;
334	int error;
335	SVCXPRT *new_xprt;
336
337	/*
338	 * The socket upcall calls xprt_active() which will eventually
339	 * cause the server to call us here. We attempt to accept a
340	 * connection from the socket and turn it into a new
341	 * transport. If the accept fails, we have drained all pending
342	 * connections so we call xprt_inactive().
343	 */
344	sx_xlock(&xprt->xp_lock);
345
346	error = svc_vc_accept(xprt->xp_socket, &so);
347
348	if (error == EWOULDBLOCK) {
349		/*
350		 * We must re-test for new connections after taking
351		 * the lock to protect us in the case where a new
352		 * connection arrives after our call to accept fails
353		 * with EWOULDBLOCK. The pool lock protects us from
354		 * racing the upcall after our TAILQ_EMPTY() call
355		 * returns false.
356		 */
357		ACCEPT_LOCK();
358		mtx_lock(&xprt->xp_pool->sp_lock);
359		if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
360			xprt_inactive_locked(xprt);
361		mtx_unlock(&xprt->xp_pool->sp_lock);
362		ACCEPT_UNLOCK();
363		sx_xunlock(&xprt->xp_lock);
364		return (FALSE);
365	}
366
367	if (error) {
368		SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
369		if (xprt->xp_upcallset) {
370			xprt->xp_upcallset = 0;
371			soupcall_clear(xprt->xp_socket, SO_RCV);
372		}
373		SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
374		xprt_inactive(xprt);
375		sx_xunlock(&xprt->xp_lock);
376		return (FALSE);
377	}
378
379	sx_xunlock(&xprt->xp_lock);
380
381	sa = 0;
382	error = soaccept(so, &sa);
383
384	if (error) {
385		/*
386		 * XXX not sure if I need to call sofree or soclose here.
387		 */
388		if (sa)
389			free(sa, M_SONAME);
390		return (FALSE);
391	}
392
393	/*
394	 * svc_vc_create_conn will call xprt_register - we don't need
395	 * to do anything with the new connection except derefence it.
396	 */
397	new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
398	if (!new_xprt) {
399		soclose(so);
400	} else {
401		SVC_RELEASE(new_xprt);
402	}
403
404	free(sa, M_SONAME);
405
406	return (FALSE); /* there is never an rpc msg to be processed */
407}
408
409/*ARGSUSED*/
410static enum xprt_stat
411svc_vc_rendezvous_stat(SVCXPRT *xprt)
412{
413
414	return (XPRT_IDLE);
415}
416
417static void
418svc_vc_destroy_common(SVCXPRT *xprt)
419{
420	SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
421	if (xprt->xp_upcallset) {
422		xprt->xp_upcallset = 0;
423		soupcall_clear(xprt->xp_socket, SO_RCV);
424	}
425	SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
426
427	sx_destroy(&xprt->xp_lock);
428	if (xprt->xp_socket)
429		(void)soclose(xprt->xp_socket);
430
431	if (xprt->xp_netid)
432		(void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
433	svc_xprt_free(xprt);
434}
435
436static void
437svc_vc_rendezvous_destroy(SVCXPRT *xprt)
438{
439
440	svc_vc_destroy_common(xprt);
441}
442
443static void
444svc_vc_destroy(SVCXPRT *xprt)
445{
446	struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
447
448	svc_vc_destroy_common(xprt);
449
450	if (cd->mreq)
451		m_freem(cd->mreq);
452	if (cd->mpending)
453		m_freem(cd->mpending);
454	mem_free(cd, sizeof(*cd));
455}
456
457/*ARGSUSED*/
458static bool_t
459svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
460{
461	return (FALSE);
462}
463
464static bool_t
465svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
466{
467
468	return (FALSE);
469}
470
471static enum xprt_stat
472svc_vc_stat(SVCXPRT *xprt)
473{
474	struct cf_conn *cd;
475	struct mbuf *m;
476	size_t n;
477
478	cd = (struct cf_conn *)(xprt->xp_p1);
479
480	if (cd->strm_stat == XPRT_DIED)
481		return (XPRT_DIED);
482
483	/*
484	 * Return XPRT_MOREREQS if we have buffered data and we are
485	 * mid-record or if we have enough data for a record
486	 * marker. Since this is only a hint, we read mpending and
487	 * resid outside the lock. We do need to take the lock if we
488	 * have to traverse the mbuf chain.
489	 */
490	if (cd->mpending) {
491		if (cd->resid)
492			return (XPRT_MOREREQS);
493		n = 0;
494		sx_xlock(&xprt->xp_lock);
495		m = cd->mpending;
496		while (m && n < sizeof(uint32_t)) {
497			n += m->m_len;
498			m = m->m_next;
499		}
500		sx_xunlock(&xprt->xp_lock);
501		if (n >= sizeof(uint32_t))
502			return (XPRT_MOREREQS);
503	}
504
505	if (soreadable(xprt->xp_socket))
506		return (XPRT_MOREREQS);
507
508	return (XPRT_IDLE);
509}
510
511static bool_t
512svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
513    struct sockaddr **addrp, struct mbuf **mp)
514{
515	struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
516	struct uio uio;
517	struct mbuf *m;
518	XDR xdrs;
519	int error, rcvflag;
520
521	/*
522	 * Serialise access to the socket and our own record parsing
523	 * state.
524	 */
525	sx_xlock(&xprt->xp_lock);
526
527	for (;;) {
528		/*
529		 * If we have an mbuf chain in cd->mpending, try to parse a
530		 * record from it, leaving the result in cd->mreq. If we don't
531		 * have a complete record, leave the partial result in
532		 * cd->mreq and try to read more from the socket.
533		 */
534		if (cd->mpending) {
535			/*
536			 * If cd->resid is non-zero, we have part of the
537			 * record already, otherwise we are expecting a record
538			 * marker.
539			 */
540			if (!cd->resid) {
541				/*
542				 * See if there is enough data buffered to
543				 * make up a record marker. Make sure we can
544				 * handle the case where the record marker is
545				 * split across more than one mbuf.
546				 */
547				size_t n = 0;
548				uint32_t header;
549
550				m = cd->mpending;
551				while (n < sizeof(uint32_t) && m) {
552					n += m->m_len;
553					m = m->m_next;
554				}
555				if (n < sizeof(uint32_t))
556					goto readmore;
557				m_copydata(cd->mpending, 0, sizeof(header),
558				    (char *)&header);
559				header = ntohl(header);
560				cd->eor = (header & 0x80000000) != 0;
561				cd->resid = header & 0x7fffffff;
562				m_adj(cd->mpending, sizeof(uint32_t));
563			}
564
565			/*
566			 * Start pulling off mbufs from cd->mpending
567			 * until we either have a complete record or
568			 * we run out of data. We use m_split to pull
569			 * data - it will pull as much as possible and
570			 * split the last mbuf if necessary.
571			 */
572			while (cd->mpending && cd->resid) {
573				m = cd->mpending;
574				if (cd->mpending->m_next
575				    || cd->mpending->m_len > cd->resid)
576					cd->mpending = m_split(cd->mpending,
577					    cd->resid, M_WAIT);
578				else
579					cd->mpending = NULL;
580				if (cd->mreq)
581					m_last(cd->mreq)->m_next = m;
582				else
583					cd->mreq = m;
584				while (m) {
585					cd->resid -= m->m_len;
586					m = m->m_next;
587				}
588			}
589
590			/*
591			 * If cd->resid is zero now, we have managed to
592			 * receive a record fragment from the stream. Check
593			 * for the end-of-record mark to see if we need more.
594			 */
595			if (cd->resid == 0) {
596				if (!cd->eor)
597					continue;
598
599				/*
600				 * Success - we have a complete record in
601				 * cd->mreq.
602				 */
603				xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
604				cd->mreq = NULL;
605				sx_xunlock(&xprt->xp_lock);
606
607				if (! xdr_callmsg(&xdrs, msg)) {
608					XDR_DESTROY(&xdrs);
609					return (FALSE);
610				}
611
612				*addrp = NULL;
613				*mp = xdrmbuf_getall(&xdrs);
614				XDR_DESTROY(&xdrs);
615
616				return (TRUE);
617			}
618		}
619
620	readmore:
621		/*
622		 * The socket upcall calls xprt_active() which will eventually
623		 * cause the server to call us here. We attempt to
624		 * read as much as possible from the socket and put
625		 * the result in cd->mpending. If the read fails,
626		 * we have drained both cd->mpending and the socket so
627		 * we can call xprt_inactive().
628		 */
629		uio.uio_resid = 1000000000;
630		uio.uio_td = curthread;
631		m = NULL;
632		rcvflag = MSG_DONTWAIT;
633		error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
634		    &rcvflag);
635
636		if (error == EWOULDBLOCK) {
637			/*
638			 * We must re-test for readability after
639			 * taking the lock to protect us in the case
640			 * where a new packet arrives on the socket
641			 * after our call to soreceive fails with
642			 * EWOULDBLOCK. The pool lock protects us from
643			 * racing the upcall after our soreadable()
644			 * call returns false.
645			 */
646			mtx_lock(&xprt->xp_pool->sp_lock);
647			if (!soreadable(xprt->xp_socket))
648				xprt_inactive_locked(xprt);
649			mtx_unlock(&xprt->xp_pool->sp_lock);
650			sx_xunlock(&xprt->xp_lock);
651			return (FALSE);
652		}
653
654		if (error) {
655			SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
656			if (xprt->xp_upcallset) {
657				xprt->xp_upcallset = 0;
658				soupcall_clear(xprt->xp_socket, SO_RCV);
659			}
660			SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
661			xprt_inactive(xprt);
662			cd->strm_stat = XPRT_DIED;
663			sx_xunlock(&xprt->xp_lock);
664			return (FALSE);
665		}
666
667		if (!m) {
668			/*
669			 * EOF - the other end has closed the socket.
670			 */
671			xprt_inactive(xprt);
672			cd->strm_stat = XPRT_DIED;
673			sx_xunlock(&xprt->xp_lock);
674			return (FALSE);
675		}
676
677		if (cd->mpending)
678			m_last(cd->mpending)->m_next = m;
679		else
680			cd->mpending = m;
681	}
682}
683
684static bool_t
685svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
686    struct sockaddr *addr, struct mbuf *m)
687{
688	XDR xdrs;
689	struct mbuf *mrep;
690	bool_t stat = TRUE;
691	int error;
692
693	/*
694	 * Leave space for record mark.
695	 */
696	MGETHDR(mrep, M_WAIT, MT_DATA);
697	mrep->m_len = 0;
698	mrep->m_data += sizeof(uint32_t);
699
700	xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
701
702	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
703	    msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
704		if (!xdr_replymsg(&xdrs, msg))
705			stat = FALSE;
706		else
707			xdrmbuf_append(&xdrs, m);
708	} else {
709		stat = xdr_replymsg(&xdrs, msg);
710	}
711
712	if (stat) {
713		m_fixhdr(mrep);
714
715		/*
716		 * Prepend a record marker containing the reply length.
717		 */
718		M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
719		*mtod(mrep, uint32_t *) =
720			htonl(0x80000000 | (mrep->m_pkthdr.len
721				- sizeof(uint32_t)));
722		error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
723		    0, curthread);
724		if (!error) {
725			stat = TRUE;
726		}
727	} else {
728		m_freem(mrep);
729	}
730
731	XDR_DESTROY(&xdrs);
732	xprt->xp_p2 = NULL;
733
734	return (stat);
735}
736
737static bool_t
738svc_vc_null()
739{
740
741	return (FALSE);
742}
743
744static int
745svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
746{
747	SVCXPRT *xprt = (SVCXPRT *) arg;
748
749	xprt_active(xprt);
750	return (SU_OK);
751}
752
753#if 0
754/*
755 * Get the effective UID of the sending process. Used by rpcbind, keyserv
756 * and rpc.yppasswdd on AF_LOCAL.
757 */
758int
759__rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
760	int sock, ret;
761	gid_t egid;
762	uid_t euid;
763	struct sockaddr *sa;
764
765	sock = transp->xp_fd;
766	sa = (struct sockaddr *)transp->xp_rtaddr;
767	if (sa->sa_family == AF_LOCAL) {
768		ret = getpeereid(sock, &euid, &egid);
769		if (ret == 0)
770			*uid = euid;
771		return (ret);
772	} else
773		return (-1);
774}
775#endif
776