Deleted Added
full compact
svc_vc.c (184588) svc_vc.c (193272)
1/* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31
32#if defined(LIBC_SCCS) && !defined(lint)
33static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
34static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC";
35#endif
36#include <sys/cdefs.h>
1/* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31
32#if defined(LIBC_SCCS) && !defined(lint)
33static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
34static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC";
35#endif
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/rpc/svc_vc.c 184588 2008-11-03 10:38:00Z dfr $");
37__FBSDID("$FreeBSD: head/sys/rpc/svc_vc.c 193272 2009-06-01 21:17:03Z jhb $");
38
39/*
40 * svc_vc.c, Server side for Connection Oriented based RPC.
41 *
42 * Actually implements two flavors of transporter -
43 * a tcp rendezvouser (a listner and connection establisher)
44 * and a record/tcp stream.
45 */
46
47#include <sys/param.h>
48#include <sys/lock.h>
49#include <sys/kernel.h>
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/mutex.h>
53#include <sys/protosw.h>
54#include <sys/queue.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sx.h>
58#include <sys/systm.h>
59#include <sys/uio.h>
60#include <netinet/tcp.h>
61
62#include <rpc/rpc.h>
63
64#include <rpc/rpc_com.h>
65
66static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
67 struct sockaddr **, struct mbuf **);
68static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
69static void svc_vc_rendezvous_destroy(SVCXPRT *);
70static bool_t svc_vc_null(void);
71static void svc_vc_destroy(SVCXPRT *);
72static enum xprt_stat svc_vc_stat(SVCXPRT *);
73static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
74 struct sockaddr **, struct mbuf **);
75static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
76 struct sockaddr *, struct mbuf *);
77static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
78static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
79 void *in);
80static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
81 struct sockaddr *raddr);
82static int svc_vc_accept(struct socket *head, struct socket **sop);
38
39/*
40 * svc_vc.c, Server side for Connection Oriented based RPC.
41 *
42 * Actually implements two flavors of transporter -
43 * a tcp rendezvouser (a listner and connection establisher)
44 * and a record/tcp stream.
45 */
46
47#include <sys/param.h>
48#include <sys/lock.h>
49#include <sys/kernel.h>
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/mutex.h>
53#include <sys/protosw.h>
54#include <sys/queue.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sx.h>
58#include <sys/systm.h>
59#include <sys/uio.h>
60#include <netinet/tcp.h>
61
62#include <rpc/rpc.h>
63
64#include <rpc/rpc_com.h>
65
66static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
67 struct sockaddr **, struct mbuf **);
68static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
69static void svc_vc_rendezvous_destroy(SVCXPRT *);
70static bool_t svc_vc_null(void);
71static void svc_vc_destroy(SVCXPRT *);
72static enum xprt_stat svc_vc_stat(SVCXPRT *);
73static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
74 struct sockaddr **, struct mbuf **);
75static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
76 struct sockaddr *, struct mbuf *);
77static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
78static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
79 void *in);
80static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
81 struct sockaddr *raddr);
82static int svc_vc_accept(struct socket *head, struct socket **sop);
83static void svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
83static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
84
85static struct xp_ops svc_vc_rendezvous_ops = {
86 .xp_recv = svc_vc_rendezvous_recv,
87 .xp_stat = svc_vc_rendezvous_stat,
88 .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *,
89 struct sockaddr *, struct mbuf *))svc_vc_null,
90 .xp_destroy = svc_vc_rendezvous_destroy,
91 .xp_control = svc_vc_rendezvous_control
92};
93
94static struct xp_ops svc_vc_ops = {
95 .xp_recv = svc_vc_recv,
96 .xp_stat = svc_vc_stat,
97 .xp_reply = svc_vc_reply,
98 .xp_destroy = svc_vc_destroy,
99 .xp_control = svc_vc_control
100};
101
102struct cf_conn { /* kept in xprt->xp_p1 for actual connection */
103 enum xprt_stat strm_stat;
104 struct mbuf *mpending; /* unparsed data read from the socket */
105 struct mbuf *mreq; /* current record being built from mpending */
106 uint32_t resid; /* number of bytes needed for fragment */
107 bool_t eor; /* reading last fragment of current record */
108};
109
110/*
111 * Usage:
112 * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
113 *
114 * Creates, registers, and returns a (rpc) tcp based transporter.
115 * Once *xprt is initialized, it is registered as a transporter
116 * see (svc.h, xprt_register). This routine returns
117 * a NULL if a problem occurred.
118 *
119 * The filedescriptor passed in is expected to refer to a bound, but
120 * not yet connected socket.
121 *
122 * Since streams do buffered io similar to stdio, the caller can specify
123 * how big the send and receive buffers are via the second and third parms;
124 * 0 => use the system default.
125 */
126SVCXPRT *
127svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
128 size_t recvsize)
129{
130 SVCXPRT *xprt;
131 struct sockaddr* sa;
132 int error;
133
134 if (so->so_state & SS_ISCONNECTED) {
135 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
136 if (error)
137 return (NULL);
138 xprt = svc_vc_create_conn(pool, so, sa);
139 free(sa, M_SONAME);
140 return (xprt);
141 }
142
143 xprt = svc_xprt_alloc();
144 sx_init(&xprt->xp_lock, "xprt->xp_lock");
145 xprt->xp_pool = pool;
146 xprt->xp_socket = so;
147 xprt->xp_p1 = NULL;
148 xprt->xp_p2 = NULL;
149 xprt->xp_ops = &svc_vc_rendezvous_ops;
150
151 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
152 if (error)
153 goto cleanup_svc_vc_create;
154
155 memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
156 free(sa, M_SONAME);
157
158 xprt_register(xprt);
159
160 solisten(so, SOMAXCONN, curthread);
161
162 SOCKBUF_LOCK(&so->so_rcv);
84
85static struct xp_ops svc_vc_rendezvous_ops = {
86 .xp_recv = svc_vc_rendezvous_recv,
87 .xp_stat = svc_vc_rendezvous_stat,
88 .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *,
89 struct sockaddr *, struct mbuf *))svc_vc_null,
90 .xp_destroy = svc_vc_rendezvous_destroy,
91 .xp_control = svc_vc_rendezvous_control
92};
93
94static struct xp_ops svc_vc_ops = {
95 .xp_recv = svc_vc_recv,
96 .xp_stat = svc_vc_stat,
97 .xp_reply = svc_vc_reply,
98 .xp_destroy = svc_vc_destroy,
99 .xp_control = svc_vc_control
100};
101
102struct cf_conn { /* kept in xprt->xp_p1 for actual connection */
103 enum xprt_stat strm_stat;
104 struct mbuf *mpending; /* unparsed data read from the socket */
105 struct mbuf *mreq; /* current record being built from mpending */
106 uint32_t resid; /* number of bytes needed for fragment */
107 bool_t eor; /* reading last fragment of current record */
108};
109
110/*
111 * Usage:
112 * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
113 *
114 * Creates, registers, and returns a (rpc) tcp based transporter.
115 * Once *xprt is initialized, it is registered as a transporter
116 * see (svc.h, xprt_register). This routine returns
117 * a NULL if a problem occurred.
118 *
119 * The filedescriptor passed in is expected to refer to a bound, but
120 * not yet connected socket.
121 *
122 * Since streams do buffered io similar to stdio, the caller can specify
123 * how big the send and receive buffers are via the second and third parms;
124 * 0 => use the system default.
125 */
126SVCXPRT *
127svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
128 size_t recvsize)
129{
130 SVCXPRT *xprt;
131 struct sockaddr* sa;
132 int error;
133
134 if (so->so_state & SS_ISCONNECTED) {
135 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
136 if (error)
137 return (NULL);
138 xprt = svc_vc_create_conn(pool, so, sa);
139 free(sa, M_SONAME);
140 return (xprt);
141 }
142
143 xprt = svc_xprt_alloc();
144 sx_init(&xprt->xp_lock, "xprt->xp_lock");
145 xprt->xp_pool = pool;
146 xprt->xp_socket = so;
147 xprt->xp_p1 = NULL;
148 xprt->xp_p2 = NULL;
149 xprt->xp_ops = &svc_vc_rendezvous_ops;
150
151 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
152 if (error)
153 goto cleanup_svc_vc_create;
154
155 memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
156 free(sa, M_SONAME);
157
158 xprt_register(xprt);
159
160 solisten(so, SOMAXCONN, curthread);
161
162 SOCKBUF_LOCK(&so->so_rcv);
163 so->so_upcallarg = xprt;
164 so->so_upcall = svc_vc_soupcall;
165 so->so_rcv.sb_flags |= SB_UPCALL;
163 soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
166 SOCKBUF_UNLOCK(&so->so_rcv);
167
168 return (xprt);
169cleanup_svc_vc_create:
170 if (xprt)
171 svc_xprt_free(xprt);
172 return (NULL);
173}
174
175/*
176 * Create a new transport for a socket optained via soaccept().
177 */
178SVCXPRT *
179svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
180{
181 SVCXPRT *xprt = NULL;
182 struct cf_conn *cd = NULL;
183 struct sockaddr* sa = NULL;
184 struct sockopt opt;
185 int one = 1;
186 int error;
187
188 bzero(&opt, sizeof(struct sockopt));
189 opt.sopt_dir = SOPT_SET;
190 opt.sopt_level = SOL_SOCKET;
191 opt.sopt_name = SO_KEEPALIVE;
192 opt.sopt_val = &one;
193 opt.sopt_valsize = sizeof(one);
194 error = sosetopt(so, &opt);
195 if (error)
196 return (NULL);
197
198 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
199 bzero(&opt, sizeof(struct sockopt));
200 opt.sopt_dir = SOPT_SET;
201 opt.sopt_level = IPPROTO_TCP;
202 opt.sopt_name = TCP_NODELAY;
203 opt.sopt_val = &one;
204 opt.sopt_valsize = sizeof(one);
205 error = sosetopt(so, &opt);
206 if (error)
207 return (NULL);
208 }
209
210 cd = mem_alloc(sizeof(*cd));
211 cd->strm_stat = XPRT_IDLE;
212
213 xprt = svc_xprt_alloc();
214 sx_init(&xprt->xp_lock, "xprt->xp_lock");
215 xprt->xp_pool = pool;
216 xprt->xp_socket = so;
217 xprt->xp_p1 = cd;
218 xprt->xp_p2 = NULL;
219 xprt->xp_ops = &svc_vc_ops;
220
221 /*
222 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
223 * has a 5 minute timer, server has a 6 minute timer.
224 */
225 xprt->xp_idletimeout = 6 * 60;
226
227 memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
228
229 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
230 if (error)
231 goto cleanup_svc_vc_create;
232
233 memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
234 free(sa, M_SONAME);
235
236 xprt_register(xprt);
237
238 SOCKBUF_LOCK(&so->so_rcv);
164 SOCKBUF_UNLOCK(&so->so_rcv);
165
166 return (xprt);
167cleanup_svc_vc_create:
168 if (xprt)
169 svc_xprt_free(xprt);
170 return (NULL);
171}
172
173/*
174 * Create a new transport for a socket optained via soaccept().
175 */
176SVCXPRT *
177svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
178{
179 SVCXPRT *xprt = NULL;
180 struct cf_conn *cd = NULL;
181 struct sockaddr* sa = NULL;
182 struct sockopt opt;
183 int one = 1;
184 int error;
185
186 bzero(&opt, sizeof(struct sockopt));
187 opt.sopt_dir = SOPT_SET;
188 opt.sopt_level = SOL_SOCKET;
189 opt.sopt_name = SO_KEEPALIVE;
190 opt.sopt_val = &one;
191 opt.sopt_valsize = sizeof(one);
192 error = sosetopt(so, &opt);
193 if (error)
194 return (NULL);
195
196 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
197 bzero(&opt, sizeof(struct sockopt));
198 opt.sopt_dir = SOPT_SET;
199 opt.sopt_level = IPPROTO_TCP;
200 opt.sopt_name = TCP_NODELAY;
201 opt.sopt_val = &one;
202 opt.sopt_valsize = sizeof(one);
203 error = sosetopt(so, &opt);
204 if (error)
205 return (NULL);
206 }
207
208 cd = mem_alloc(sizeof(*cd));
209 cd->strm_stat = XPRT_IDLE;
210
211 xprt = svc_xprt_alloc();
212 sx_init(&xprt->xp_lock, "xprt->xp_lock");
213 xprt->xp_pool = pool;
214 xprt->xp_socket = so;
215 xprt->xp_p1 = cd;
216 xprt->xp_p2 = NULL;
217 xprt->xp_ops = &svc_vc_ops;
218
219 /*
220 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
221 * has a 5 minute timer, server has a 6 minute timer.
222 */
223 xprt->xp_idletimeout = 6 * 60;
224
225 memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
226
227 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
228 if (error)
229 goto cleanup_svc_vc_create;
230
231 memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
232 free(sa, M_SONAME);
233
234 xprt_register(xprt);
235
236 SOCKBUF_LOCK(&so->so_rcv);
239 so->so_upcallarg = xprt;
240 so->so_upcall = svc_vc_soupcall;
241 so->so_rcv.sb_flags |= SB_UPCALL;
237 soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
242 SOCKBUF_UNLOCK(&so->so_rcv);
243
244 /*
245 * Throw the transport into the active list in case it already
246 * has some data buffered.
247 */
248 sx_xlock(&xprt->xp_lock);
249 xprt_active(xprt);
250 sx_xunlock(&xprt->xp_lock);
251
252 return (xprt);
253cleanup_svc_vc_create:
254 if (xprt) {
255 mem_free(xprt, sizeof(*xprt));
256 }
257 if (cd)
258 mem_free(cd, sizeof(*cd));
259 return (NULL);
260}
261
262/*
263 * This does all of the accept except the final call to soaccept. The
264 * caller will call soaccept after dropping its locks (soaccept may
265 * call malloc).
266 */
267int
268svc_vc_accept(struct socket *head, struct socket **sop)
269{
270 int error = 0;
271 struct socket *so;
272
273 if ((head->so_options & SO_ACCEPTCONN) == 0) {
274 error = EINVAL;
275 goto done;
276 }
277#ifdef MAC
278 SOCK_LOCK(head);
279 error = mac_socket_check_accept(td->td_ucred, head);
280 SOCK_UNLOCK(head);
281 if (error != 0)
282 goto done;
283#endif
284 ACCEPT_LOCK();
285 if (TAILQ_EMPTY(&head->so_comp)) {
286 ACCEPT_UNLOCK();
287 error = EWOULDBLOCK;
288 goto done;
289 }
290 so = TAILQ_FIRST(&head->so_comp);
291 KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
292 KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
293
294 /*
295 * Before changing the flags on the socket, we have to bump the
296 * reference count. Otherwise, if the protocol calls sofree(),
297 * the socket will be released due to a zero refcount.
298 * XXX might not need soref() since this is simpler than kern_accept.
299 */
300 SOCK_LOCK(so); /* soref() and so_state update */
301 soref(so); /* file descriptor reference */
302
303 TAILQ_REMOVE(&head->so_comp, so, so_list);
304 head->so_qlen--;
305 so->so_state |= (head->so_state & SS_NBIO);
306 so->so_qstate &= ~SQ_COMP;
307 so->so_head = NULL;
308
309 SOCK_UNLOCK(so);
310 ACCEPT_UNLOCK();
311
312 *sop = so;
313
314 /* connection has been removed from the listen queue */
315 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
316done:
317 return (error);
318}
319
320/*ARGSUSED*/
321static bool_t
322svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
323 struct sockaddr **addrp, struct mbuf **mp)
324{
325 struct socket *so = NULL;
326 struct sockaddr *sa = NULL;
327 int error;
328
329 /*
330 * The socket upcall calls xprt_active() which will eventually
331 * cause the server to call us here. We attempt to accept a
332 * connection from the socket and turn it into a new
333 * transport. If the accept fails, we have drained all pending
334 * connections so we call xprt_inactive().
335 */
336 sx_xlock(&xprt->xp_lock);
337
338 error = svc_vc_accept(xprt->xp_socket, &so);
339
340 if (error == EWOULDBLOCK) {
341 /*
342 * We must re-test for new connections after taking
343 * the lock to protect us in the case where a new
344 * connection arrives after our call to accept fails
345 * with EWOULDBLOCK. The pool lock protects us from
346 * racing the upcall after our TAILQ_EMPTY() call
347 * returns false.
348 */
349 ACCEPT_LOCK();
350 mtx_lock(&xprt->xp_pool->sp_lock);
351 if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
352 xprt_inactive_locked(xprt);
353 mtx_unlock(&xprt->xp_pool->sp_lock);
354 ACCEPT_UNLOCK();
355 sx_xunlock(&xprt->xp_lock);
356 return (FALSE);
357 }
358
359 if (error) {
360 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
238 SOCKBUF_UNLOCK(&so->so_rcv);
239
240 /*
241 * Throw the transport into the active list in case it already
242 * has some data buffered.
243 */
244 sx_xlock(&xprt->xp_lock);
245 xprt_active(xprt);
246 sx_xunlock(&xprt->xp_lock);
247
248 return (xprt);
249cleanup_svc_vc_create:
250 if (xprt) {
251 mem_free(xprt, sizeof(*xprt));
252 }
253 if (cd)
254 mem_free(cd, sizeof(*cd));
255 return (NULL);
256}
257
258/*
259 * This does all of the accept except the final call to soaccept. The
260 * caller will call soaccept after dropping its locks (soaccept may
261 * call malloc).
262 */
263int
264svc_vc_accept(struct socket *head, struct socket **sop)
265{
266 int error = 0;
267 struct socket *so;
268
269 if ((head->so_options & SO_ACCEPTCONN) == 0) {
270 error = EINVAL;
271 goto done;
272 }
273#ifdef MAC
274 SOCK_LOCK(head);
275 error = mac_socket_check_accept(td->td_ucred, head);
276 SOCK_UNLOCK(head);
277 if (error != 0)
278 goto done;
279#endif
280 ACCEPT_LOCK();
281 if (TAILQ_EMPTY(&head->so_comp)) {
282 ACCEPT_UNLOCK();
283 error = EWOULDBLOCK;
284 goto done;
285 }
286 so = TAILQ_FIRST(&head->so_comp);
287 KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
288 KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
289
290 /*
291 * Before changing the flags on the socket, we have to bump the
292 * reference count. Otherwise, if the protocol calls sofree(),
293 * the socket will be released due to a zero refcount.
294 * XXX might not need soref() since this is simpler than kern_accept.
295 */
296 SOCK_LOCK(so); /* soref() and so_state update */
297 soref(so); /* file descriptor reference */
298
299 TAILQ_REMOVE(&head->so_comp, so, so_list);
300 head->so_qlen--;
301 so->so_state |= (head->so_state & SS_NBIO);
302 so->so_qstate &= ~SQ_COMP;
303 so->so_head = NULL;
304
305 SOCK_UNLOCK(so);
306 ACCEPT_UNLOCK();
307
308 *sop = so;
309
310 /* connection has been removed from the listen queue */
311 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
312done:
313 return (error);
314}
315
316/*ARGSUSED*/
317static bool_t
318svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
319 struct sockaddr **addrp, struct mbuf **mp)
320{
321 struct socket *so = NULL;
322 struct sockaddr *sa = NULL;
323 int error;
324
325 /*
326 * The socket upcall calls xprt_active() which will eventually
327 * cause the server to call us here. We attempt to accept a
328 * connection from the socket and turn it into a new
329 * transport. If the accept fails, we have drained all pending
330 * connections so we call xprt_inactive().
331 */
332 sx_xlock(&xprt->xp_lock);
333
334 error = svc_vc_accept(xprt->xp_socket, &so);
335
336 if (error == EWOULDBLOCK) {
337 /*
338 * We must re-test for new connections after taking
339 * the lock to protect us in the case where a new
340 * connection arrives after our call to accept fails
341 * with EWOULDBLOCK. The pool lock protects us from
342 * racing the upcall after our TAILQ_EMPTY() call
343 * returns false.
344 */
345 ACCEPT_LOCK();
346 mtx_lock(&xprt->xp_pool->sp_lock);
347 if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
348 xprt_inactive_locked(xprt);
349 mtx_unlock(&xprt->xp_pool->sp_lock);
350 ACCEPT_UNLOCK();
351 sx_xunlock(&xprt->xp_lock);
352 return (FALSE);
353 }
354
355 if (error) {
356 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
361 xprt->xp_socket->so_upcallarg = NULL;
362 xprt->xp_socket->so_upcall = NULL;
363 xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
357 soupcall_clear(xprt->xp_socket, SO_RCV);
364 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
365 xprt_inactive(xprt);
366 sx_xunlock(&xprt->xp_lock);
367 return (FALSE);
368 }
369
370 sx_xunlock(&xprt->xp_lock);
371
372 sa = 0;
373 error = soaccept(so, &sa);
374
375 if (error) {
376 /*
377 * XXX not sure if I need to call sofree or soclose here.
378 */
379 if (sa)
380 free(sa, M_SONAME);
381 return (FALSE);
382 }
383
384 /*
385 * svc_vc_create_conn will call xprt_register - we don't need
386 * to do anything with the new connection.
387 */
388 if (!svc_vc_create_conn(xprt->xp_pool, so, sa))
389 soclose(so);
390
391 free(sa, M_SONAME);
392
393 return (FALSE); /* there is never an rpc msg to be processed */
394}
395
396/*ARGSUSED*/
397static enum xprt_stat
398svc_vc_rendezvous_stat(SVCXPRT *xprt)
399{
400
401 return (XPRT_IDLE);
402}
403
404static void
405svc_vc_destroy_common(SVCXPRT *xprt)
406{
407 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
358 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
359 xprt_inactive(xprt);
360 sx_xunlock(&xprt->xp_lock);
361 return (FALSE);
362 }
363
364 sx_xunlock(&xprt->xp_lock);
365
366 sa = 0;
367 error = soaccept(so, &sa);
368
369 if (error) {
370 /*
371 * XXX not sure if I need to call sofree or soclose here.
372 */
373 if (sa)
374 free(sa, M_SONAME);
375 return (FALSE);
376 }
377
378 /*
379 * svc_vc_create_conn will call xprt_register - we don't need
380 * to do anything with the new connection.
381 */
382 if (!svc_vc_create_conn(xprt->xp_pool, so, sa))
383 soclose(so);
384
385 free(sa, M_SONAME);
386
387 return (FALSE); /* there is never an rpc msg to be processed */
388}
389
390/*ARGSUSED*/
391static enum xprt_stat
392svc_vc_rendezvous_stat(SVCXPRT *xprt)
393{
394
395 return (XPRT_IDLE);
396}
397
398static void
399svc_vc_destroy_common(SVCXPRT *xprt)
400{
401 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
408 xprt->xp_socket->so_upcallarg = NULL;
409 xprt->xp_socket->so_upcall = NULL;
410 xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
402 soupcall_clear(xprt->xp_socket, SO_RCV);
411 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
412
413 sx_destroy(&xprt->xp_lock);
414 if (xprt->xp_socket)
415 (void)soclose(xprt->xp_socket);
416
417 if (xprt->xp_netid)
418 (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
419 svc_xprt_free(xprt);
420}
421
422static void
423svc_vc_rendezvous_destroy(SVCXPRT *xprt)
424{
425
426 svc_vc_destroy_common(xprt);
427}
428
429static void
430svc_vc_destroy(SVCXPRT *xprt)
431{
432 struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
433
434 svc_vc_destroy_common(xprt);
435
436 if (cd->mreq)
437 m_freem(cd->mreq);
438 if (cd->mpending)
439 m_freem(cd->mpending);
440 mem_free(cd, sizeof(*cd));
441}
442
443/*ARGSUSED*/
444static bool_t
445svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
446{
447 return (FALSE);
448}
449
450static bool_t
451svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
452{
453
454 return (FALSE);
455}
456
457static enum xprt_stat
458svc_vc_stat(SVCXPRT *xprt)
459{
460 struct cf_conn *cd;
461 struct mbuf *m;
462 size_t n;
463
464 cd = (struct cf_conn *)(xprt->xp_p1);
465
466 if (cd->strm_stat == XPRT_DIED)
467 return (XPRT_DIED);
468
469 /*
470 * Return XPRT_MOREREQS if we have buffered data and we are
471 * mid-record or if we have enough data for a record
472 * marker. Since this is only a hint, we read mpending and
473 * resid outside the lock. We do need to take the lock if we
474 * have to traverse the mbuf chain.
475 */
476 if (cd->mpending) {
477 if (cd->resid)
478 return (XPRT_MOREREQS);
479 n = 0;
480 sx_xlock(&xprt->xp_lock);
481 m = cd->mpending;
482 while (m && n < sizeof(uint32_t)) {
483 n += m->m_len;
484 m = m->m_next;
485 }
486 sx_xunlock(&xprt->xp_lock);
487 if (n >= sizeof(uint32_t))
488 return (XPRT_MOREREQS);
489 }
490
491 if (soreadable(xprt->xp_socket))
492 return (XPRT_MOREREQS);
493
494 return (XPRT_IDLE);
495}
496
497static bool_t
498svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
499 struct sockaddr **addrp, struct mbuf **mp)
500{
501 struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
502 struct uio uio;
503 struct mbuf *m;
504 XDR xdrs;
505 int error, rcvflag;
506
507 /*
508 * Serialise access to the socket and our own record parsing
509 * state.
510 */
511 sx_xlock(&xprt->xp_lock);
512
513 for (;;) {
514 /*
515 * If we have an mbuf chain in cd->mpending, try to parse a
516 * record from it, leaving the result in cd->mreq. If we don't
517 * have a complete record, leave the partial result in
518 * cd->mreq and try to read more from the socket.
519 */
520 if (cd->mpending) {
521 /*
522 * If cd->resid is non-zero, we have part of the
523 * record already, otherwise we are expecting a record
524 * marker.
525 */
526 if (!cd->resid) {
527 /*
528 * See if there is enough data buffered to
529 * make up a record marker. Make sure we can
530 * handle the case where the record marker is
531 * split across more than one mbuf.
532 */
533 size_t n = 0;
534 uint32_t header;
535
536 m = cd->mpending;
537 while (n < sizeof(uint32_t) && m) {
538 n += m->m_len;
539 m = m->m_next;
540 }
541 if (n < sizeof(uint32_t))
542 goto readmore;
543 if (cd->mpending->m_len < sizeof(uint32_t))
544 cd->mpending = m_pullup(cd->mpending,
545 sizeof(uint32_t));
546 memcpy(&header, mtod(cd->mpending, uint32_t *),
547 sizeof(header));
548 header = ntohl(header);
549 cd->eor = (header & 0x80000000) != 0;
550 cd->resid = header & 0x7fffffff;
551 m_adj(cd->mpending, sizeof(uint32_t));
552 }
553
554 /*
555 * Start pulling off mbufs from cd->mpending
556 * until we either have a complete record or
557 * we run out of data. We use m_split to pull
558 * data - it will pull as much as possible and
559 * split the last mbuf if necessary.
560 */
561 while (cd->mpending && cd->resid) {
562 m = cd->mpending;
563 if (cd->mpending->m_next
564 || cd->mpending->m_len > cd->resid)
565 cd->mpending = m_split(cd->mpending,
566 cd->resid, M_WAIT);
567 else
568 cd->mpending = NULL;
569 if (cd->mreq)
570 m_last(cd->mreq)->m_next = m;
571 else
572 cd->mreq = m;
573 while (m) {
574 cd->resid -= m->m_len;
575 m = m->m_next;
576 }
577 }
578
579 /*
580 * If cd->resid is zero now, we have managed to
581 * receive a record fragment from the stream. Check
582 * for the end-of-record mark to see if we need more.
583 */
584 if (cd->resid == 0) {
585 if (!cd->eor)
586 continue;
587
588 /*
589 * Success - we have a complete record in
590 * cd->mreq.
591 */
592 xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
593 cd->mreq = NULL;
594 sx_xunlock(&xprt->xp_lock);
595
596 if (! xdr_callmsg(&xdrs, msg)) {
597 XDR_DESTROY(&xdrs);
598 return (FALSE);
599 }
600
601 *addrp = NULL;
602 *mp = xdrmbuf_getall(&xdrs);
603 XDR_DESTROY(&xdrs);
604
605 return (TRUE);
606 }
607 }
608
609 readmore:
610 /*
611 * The socket upcall calls xprt_active() which will eventually
612 * cause the server to call us here. We attempt to
613 * read as much as possible from the socket and put
614 * the result in cd->mpending. If the read fails,
615 * we have drained both cd->mpending and the socket so
616 * we can call xprt_inactive().
617 */
618 uio.uio_resid = 1000000000;
619 uio.uio_td = curthread;
620 m = NULL;
621 rcvflag = MSG_DONTWAIT;
622 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
623 &rcvflag);
624
625 if (error == EWOULDBLOCK) {
626 /*
627 * We must re-test for readability after
628 * taking the lock to protect us in the case
629 * where a new packet arrives on the socket
630 * after our call to soreceive fails with
631 * EWOULDBLOCK. The pool lock protects us from
632 * racing the upcall after our soreadable()
633 * call returns false.
634 */
635 mtx_lock(&xprt->xp_pool->sp_lock);
636 if (!soreadable(xprt->xp_socket))
637 xprt_inactive_locked(xprt);
638 mtx_unlock(&xprt->xp_pool->sp_lock);
639 sx_xunlock(&xprt->xp_lock);
640 return (FALSE);
641 }
642
643 if (error) {
644 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
403 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
404
405 sx_destroy(&xprt->xp_lock);
406 if (xprt->xp_socket)
407 (void)soclose(xprt->xp_socket);
408
409 if (xprt->xp_netid)
410 (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
411 svc_xprt_free(xprt);
412}
413
414static void
415svc_vc_rendezvous_destroy(SVCXPRT *xprt)
416{
417
418 svc_vc_destroy_common(xprt);
419}
420
421static void
422svc_vc_destroy(SVCXPRT *xprt)
423{
424 struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
425
426 svc_vc_destroy_common(xprt);
427
428 if (cd->mreq)
429 m_freem(cd->mreq);
430 if (cd->mpending)
431 m_freem(cd->mpending);
432 mem_free(cd, sizeof(*cd));
433}
434
435/*ARGSUSED*/
436static bool_t
437svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
438{
439 return (FALSE);
440}
441
442static bool_t
443svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
444{
445
446 return (FALSE);
447}
448
449static enum xprt_stat
450svc_vc_stat(SVCXPRT *xprt)
451{
452 struct cf_conn *cd;
453 struct mbuf *m;
454 size_t n;
455
456 cd = (struct cf_conn *)(xprt->xp_p1);
457
458 if (cd->strm_stat == XPRT_DIED)
459 return (XPRT_DIED);
460
461 /*
462 * Return XPRT_MOREREQS if we have buffered data and we are
463 * mid-record or if we have enough data for a record
464 * marker. Since this is only a hint, we read mpending and
465 * resid outside the lock. We do need to take the lock if we
466 * have to traverse the mbuf chain.
467 */
468 if (cd->mpending) {
469 if (cd->resid)
470 return (XPRT_MOREREQS);
471 n = 0;
472 sx_xlock(&xprt->xp_lock);
473 m = cd->mpending;
474 while (m && n < sizeof(uint32_t)) {
475 n += m->m_len;
476 m = m->m_next;
477 }
478 sx_xunlock(&xprt->xp_lock);
479 if (n >= sizeof(uint32_t))
480 return (XPRT_MOREREQS);
481 }
482
483 if (soreadable(xprt->xp_socket))
484 return (XPRT_MOREREQS);
485
486 return (XPRT_IDLE);
487}
488
489static bool_t
490svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
491 struct sockaddr **addrp, struct mbuf **mp)
492{
493 struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
494 struct uio uio;
495 struct mbuf *m;
496 XDR xdrs;
497 int error, rcvflag;
498
499 /*
500 * Serialise access to the socket and our own record parsing
501 * state.
502 */
503 sx_xlock(&xprt->xp_lock);
504
505 for (;;) {
506 /*
507 * If we have an mbuf chain in cd->mpending, try to parse a
508 * record from it, leaving the result in cd->mreq. If we don't
509 * have a complete record, leave the partial result in
510 * cd->mreq and try to read more from the socket.
511 */
512 if (cd->mpending) {
513 /*
514 * If cd->resid is non-zero, we have part of the
515 * record already, otherwise we are expecting a record
516 * marker.
517 */
518 if (!cd->resid) {
519 /*
520 * See if there is enough data buffered to
521 * make up a record marker. Make sure we can
522 * handle the case where the record marker is
523 * split across more than one mbuf.
524 */
525 size_t n = 0;
526 uint32_t header;
527
528 m = cd->mpending;
529 while (n < sizeof(uint32_t) && m) {
530 n += m->m_len;
531 m = m->m_next;
532 }
533 if (n < sizeof(uint32_t))
534 goto readmore;
535 if (cd->mpending->m_len < sizeof(uint32_t))
536 cd->mpending = m_pullup(cd->mpending,
537 sizeof(uint32_t));
538 memcpy(&header, mtod(cd->mpending, uint32_t *),
539 sizeof(header));
540 header = ntohl(header);
541 cd->eor = (header & 0x80000000) != 0;
542 cd->resid = header & 0x7fffffff;
543 m_adj(cd->mpending, sizeof(uint32_t));
544 }
545
546 /*
547 * Start pulling off mbufs from cd->mpending
548 * until we either have a complete record or
549 * we run out of data. We use m_split to pull
550 * data - it will pull as much as possible and
551 * split the last mbuf if necessary.
552 */
553 while (cd->mpending && cd->resid) {
554 m = cd->mpending;
555 if (cd->mpending->m_next
556 || cd->mpending->m_len > cd->resid)
557 cd->mpending = m_split(cd->mpending,
558 cd->resid, M_WAIT);
559 else
560 cd->mpending = NULL;
561 if (cd->mreq)
562 m_last(cd->mreq)->m_next = m;
563 else
564 cd->mreq = m;
565 while (m) {
566 cd->resid -= m->m_len;
567 m = m->m_next;
568 }
569 }
570
571 /*
572 * If cd->resid is zero now, we have managed to
573 * receive a record fragment from the stream. Check
574 * for the end-of-record mark to see if we need more.
575 */
576 if (cd->resid == 0) {
577 if (!cd->eor)
578 continue;
579
580 /*
581 * Success - we have a complete record in
582 * cd->mreq.
583 */
584 xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
585 cd->mreq = NULL;
586 sx_xunlock(&xprt->xp_lock);
587
588 if (! xdr_callmsg(&xdrs, msg)) {
589 XDR_DESTROY(&xdrs);
590 return (FALSE);
591 }
592
593 *addrp = NULL;
594 *mp = xdrmbuf_getall(&xdrs);
595 XDR_DESTROY(&xdrs);
596
597 return (TRUE);
598 }
599 }
600
601 readmore:
602 /*
603 * The socket upcall calls xprt_active() which will eventually
604 * cause the server to call us here. We attempt to
605 * read as much as possible from the socket and put
606 * the result in cd->mpending. If the read fails,
607 * we have drained both cd->mpending and the socket so
608 * we can call xprt_inactive().
609 */
610 uio.uio_resid = 1000000000;
611 uio.uio_td = curthread;
612 m = NULL;
613 rcvflag = MSG_DONTWAIT;
614 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
615 &rcvflag);
616
617 if (error == EWOULDBLOCK) {
618 /*
619 * We must re-test for readability after
620 * taking the lock to protect us in the case
621 * where a new packet arrives on the socket
622 * after our call to soreceive fails with
623 * EWOULDBLOCK. The pool lock protects us from
624 * racing the upcall after our soreadable()
625 * call returns false.
626 */
627 mtx_lock(&xprt->xp_pool->sp_lock);
628 if (!soreadable(xprt->xp_socket))
629 xprt_inactive_locked(xprt);
630 mtx_unlock(&xprt->xp_pool->sp_lock);
631 sx_xunlock(&xprt->xp_lock);
632 return (FALSE);
633 }
634
635 if (error) {
636 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
645 xprt->xp_socket->so_upcallarg = NULL;
646 xprt->xp_socket->so_upcall = NULL;
647 xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
637 soupcall_clear(xprt->xp_socket, SO_RCV);
648 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
649 xprt_inactive(xprt);
650 cd->strm_stat = XPRT_DIED;
651 sx_xunlock(&xprt->xp_lock);
652 return (FALSE);
653 }
654
655 if (!m) {
656 /*
657 * EOF - the other end has closed the socket.
658 */
659 xprt_inactive(xprt);
660 cd->strm_stat = XPRT_DIED;
661 sx_xunlock(&xprt->xp_lock);
662 return (FALSE);
663 }
664
665 if (cd->mpending)
666 m_last(cd->mpending)->m_next = m;
667 else
668 cd->mpending = m;
669 }
670}
671
672static bool_t
673svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
674 struct sockaddr *addr, struct mbuf *m)
675{
676 XDR xdrs;
677 struct mbuf *mrep;
678 bool_t stat = TRUE;
679 int error;
680
681 /*
682 * Leave space for record mark.
683 */
684 MGETHDR(mrep, M_WAIT, MT_DATA);
685 mrep->m_len = 0;
686 mrep->m_data += sizeof(uint32_t);
687
688 xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
689
690 if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
691 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
692 if (!xdr_replymsg(&xdrs, msg))
693 stat = FALSE;
694 else
695 xdrmbuf_append(&xdrs, m);
696 } else {
697 stat = xdr_replymsg(&xdrs, msg);
698 }
699
700 if (stat) {
701 m_fixhdr(mrep);
702
703 /*
704 * Prepend a record marker containing the reply length.
705 */
706 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
707 *mtod(mrep, uint32_t *) =
708 htonl(0x80000000 | (mrep->m_pkthdr.len
709 - sizeof(uint32_t)));
710 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
711 0, curthread);
712 if (!error) {
713 stat = TRUE;
714 }
715 } else {
716 m_freem(mrep);
717 }
718
719 XDR_DESTROY(&xdrs);
720 xprt->xp_p2 = NULL;
721
722 return (stat);
723}
724
725static bool_t
726svc_vc_null()
727{
728
729 return (FALSE);
730}
731
638 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
639 xprt_inactive(xprt);
640 cd->strm_stat = XPRT_DIED;
641 sx_xunlock(&xprt->xp_lock);
642 return (FALSE);
643 }
644
645 if (!m) {
646 /*
647 * EOF - the other end has closed the socket.
648 */
649 xprt_inactive(xprt);
650 cd->strm_stat = XPRT_DIED;
651 sx_xunlock(&xprt->xp_lock);
652 return (FALSE);
653 }
654
655 if (cd->mpending)
656 m_last(cd->mpending)->m_next = m;
657 else
658 cd->mpending = m;
659 }
660}
661
662static bool_t
663svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
664 struct sockaddr *addr, struct mbuf *m)
665{
666 XDR xdrs;
667 struct mbuf *mrep;
668 bool_t stat = TRUE;
669 int error;
670
671 /*
672 * Leave space for record mark.
673 */
674 MGETHDR(mrep, M_WAIT, MT_DATA);
675 mrep->m_len = 0;
676 mrep->m_data += sizeof(uint32_t);
677
678 xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
679
680 if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
681 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
682 if (!xdr_replymsg(&xdrs, msg))
683 stat = FALSE;
684 else
685 xdrmbuf_append(&xdrs, m);
686 } else {
687 stat = xdr_replymsg(&xdrs, msg);
688 }
689
690 if (stat) {
691 m_fixhdr(mrep);
692
693 /*
694 * Prepend a record marker containing the reply length.
695 */
696 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
697 *mtod(mrep, uint32_t *) =
698 htonl(0x80000000 | (mrep->m_pkthdr.len
699 - sizeof(uint32_t)));
700 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
701 0, curthread);
702 if (!error) {
703 stat = TRUE;
704 }
705 } else {
706 m_freem(mrep);
707 }
708
709 XDR_DESTROY(&xdrs);
710 xprt->xp_p2 = NULL;
711
712 return (stat);
713}
714
715static bool_t
716svc_vc_null()
717{
718
719 return (FALSE);
720}
721
732static void
722static int
733svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
734{
735 SVCXPRT *xprt = (SVCXPRT *) arg;
736
737 xprt_active(xprt);
723svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
724{
725 SVCXPRT *xprt = (SVCXPRT *) arg;
726
727 xprt_active(xprt);
728 return (SU_OK);
738}
739
740#if 0
741/*
742 * Get the effective UID of the sending process. Used by rpcbind, keyserv
743 * and rpc.yppasswdd on AF_LOCAL.
744 */
745int
746__rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
747 int sock, ret;
748 gid_t egid;
749 uid_t euid;
750 struct sockaddr *sa;
751
752 sock = transp->xp_fd;
753 sa = (struct sockaddr *)transp->xp_rtaddr;
754 if (sa->sa_family == AF_LOCAL) {
755 ret = getpeereid(sock, &euid, &egid);
756 if (ret == 0)
757 *uid = euid;
758 return (ret);
759 } else
760 return (-1);
761}
762#endif
729}
730
731#if 0
732/*
733 * Get the effective UID of the sending process. Used by rpcbind, keyserv
734 * and rpc.yppasswdd on AF_LOCAL.
735 */
736int
737__rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
738 int sock, ret;
739 gid_t egid;
740 uid_t euid;
741 struct sockaddr *sa;
742
743 sock = transp->xp_fd;
744 sa = (struct sockaddr *)transp->xp_rtaddr;
745 if (sa->sa_family == AF_LOCAL) {
746 ret = getpeereid(sock, &euid, &egid);
747 if (ret == 0)
748 *uid = euid;
749 return (ret);
750 } else
751 return (-1);
752}
753#endif