Deleted Added
full compact
clnt_dg.c (99996) clnt_dg.c (105189)
1/* $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $ */
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31/*
32 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
33 */
34
35/* #ident "@(#)clnt_dg.c 1.23 94/04/22 SMI" */
36
37#if !defined(lint) && defined(SCCSIDS)
38static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
39#endif
40#include <sys/cdefs.h>
1/* $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $ */
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31/*
32 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
33 */
34
35/* #ident "@(#)clnt_dg.c 1.23 94/04/22 SMI" */
36
37#if !defined(lint) && defined(SCCSIDS)
38static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
39#endif
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/lib/libc/rpc/clnt_dg.c 99996 2002-07-14 23:14:02Z alfred $");
41__FBSDID("$FreeBSD: head/lib/libc/rpc/clnt_dg.c 105189 2002-10-15 22:28:59Z iedowse $");
42
43/*
44 * Implements a connectionless client side RPC.
45 */
46
47#include "namespace.h"
48#include "reentrant.h"
42
43/*
44 * Implements a connectionless client side RPC.
45 */
46
47#include "namespace.h"
48#include "reentrant.h"
49#include <sys/poll.h>
50#include <sys/types.h>
49#include <sys/types.h>
50#include <sys/event.h>
51#include <sys/time.h>
52#include <sys/socket.h>
53#include <sys/ioctl.h>
54#include <arpa/inet.h>
55#include <rpc/rpc.h>
56#include <errno.h>
57#include <stdlib.h>
58#include <string.h>
59#include <signal.h>
60#include <unistd.h>
61#include <err.h>
62#include "un-namespace.h"
63#include "rpc_com.h"
64
65
66#define RPC_MAX_BACKOFF 30 /* seconds */
67
68
69static struct clnt_ops *clnt_dg_ops(void);
70static bool_t time_not_ok(struct timeval *);
71static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
72 xdrproc_t, void *, struct timeval);
73static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
74static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
75static void clnt_dg_abort(CLIENT *);
76static bool_t clnt_dg_control(CLIENT *, u_int, void *);
77static void clnt_dg_destroy(CLIENT *);
51#include <sys/time.h>
52#include <sys/socket.h>
53#include <sys/ioctl.h>
54#include <arpa/inet.h>
55#include <rpc/rpc.h>
56#include <errno.h>
57#include <stdlib.h>
58#include <string.h>
59#include <signal.h>
60#include <unistd.h>
61#include <err.h>
62#include "un-namespace.h"
63#include "rpc_com.h"
64
65
66#define RPC_MAX_BACKOFF 30 /* seconds */
67
68
69static struct clnt_ops *clnt_dg_ops(void);
70static bool_t time_not_ok(struct timeval *);
71static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
72 xdrproc_t, void *, struct timeval);
73static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
74static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
75static void clnt_dg_abort(CLIENT *);
76static bool_t clnt_dg_control(CLIENT *, u_int, void *);
77static void clnt_dg_destroy(CLIENT *);
78static int __rpc_timeval_to_msec(struct timeval *);
79
80
81
82
83/*
84 * This machinery implements per-fd locks for MT-safety. It is not
85 * sufficient to do per-CLIENT handle locks for MT-safety because a
86 * user may create more than one CLIENT handle with the same fd behind
87 * it. Therfore, we allocate an array of flags (dg_fd_locks), protected
88 * by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
89 * similarly protected. Dg_fd_lock[fd] == 1 => a call is activte on some
90 * CLIENT handle created for that fd.
91 * The current implementation holds locks across the entire RPC and reply,
92 * including retransmissions. Yes, this is silly, and as soon as this
93 * code is proven to work, this should be the first thing fixed. One step
94 * at a time.
95 */
96static int *dg_fd_locks;
97extern mutex_t clnt_fd_lock;
98static cond_t *dg_cv;
99#define release_fd_lock(fd, mask) { \
100 mutex_lock(&clnt_fd_lock); \
101 dg_fd_locks[fd] = 0; \
102 mutex_unlock(&clnt_fd_lock); \
78
79
80
81
82/*
83 * This machinery implements per-fd locks for MT-safety. It is not
84 * sufficient to do per-CLIENT handle locks for MT-safety because a
85 * user may create more than one CLIENT handle with the same fd behind
86 * it. Therfore, we allocate an array of flags (dg_fd_locks), protected
87 * by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
88 * similarly protected. Dg_fd_lock[fd] == 1 => a call is activte on some
89 * CLIENT handle created for that fd.
90 * The current implementation holds locks across the entire RPC and reply,
91 * including retransmissions. Yes, this is silly, and as soon as this
92 * code is proven to work, this should be the first thing fixed. One step
93 * at a time.
94 */
95static int *dg_fd_locks;
96extern mutex_t clnt_fd_lock;
97static cond_t *dg_cv;
98#define release_fd_lock(fd, mask) { \
99 mutex_lock(&clnt_fd_lock); \
100 dg_fd_locks[fd] = 0; \
101 mutex_unlock(&clnt_fd_lock); \
103 thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL); \
102 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
104 cond_signal(&dg_cv[fd]); \
105}
106
107static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
108
109/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
110
111/*
112 * Private data kept per client handle
113 */
114struct cu_data {
115 int cu_fd; /* connections fd */
116 bool_t cu_closeit; /* opened by library */
117 struct sockaddr_storage cu_raddr; /* remote address */
118 int cu_rlen;
119 struct timeval cu_wait; /* retransmit interval */
120 struct timeval cu_total; /* total time for the call */
121 struct rpc_err cu_error;
122 XDR cu_outxdrs;
123 u_int cu_xdrpos;
124 u_int cu_sendsz; /* send size */
125 char *cu_outbuf;
126 u_int cu_recvsz; /* recv size */
103 cond_signal(&dg_cv[fd]); \
104}
105
106static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
107
108/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
109
110/*
111 * Private data kept per client handle
112 */
113struct cu_data {
114 int cu_fd; /* connections fd */
115 bool_t cu_closeit; /* opened by library */
116 struct sockaddr_storage cu_raddr; /* remote address */
117 int cu_rlen;
118 struct timeval cu_wait; /* retransmit interval */
119 struct timeval cu_total; /* total time for the call */
120 struct rpc_err cu_error;
121 XDR cu_outxdrs;
122 u_int cu_xdrpos;
123 u_int cu_sendsz; /* send size */
124 char *cu_outbuf;
125 u_int cu_recvsz; /* recv size */
127 struct pollfd pfdp;
128 int cu_async;
129 int cu_connect; /* Use connect(). */
130 int cu_connected; /* Have done connect(). */
126 int cu_async;
127 int cu_connect; /* Use connect(). */
128 int cu_connected; /* Have done connect(). */
129 struct kevent cu_kin;
130 int cu_kq;
131 char cu_inbuf[1];
132};
133
134/*
135 * Connection less client creation returns with client handle parameters.
136 * Default options are set, which the user can change using clnt_control().
137 * fd should be open and bound.
138 * NB: The rpch->cl_auth is initialized to null authentication.
139 * Caller may wish to set this something more useful.
140 *
141 * sendsz and recvsz are the maximum allowable packet sizes that can be
142 * sent and received. Normally they are the same, but they can be
143 * changed to improve the program efficiency and buffer allocation.
144 * If they are 0, use the transport default.
145 *
146 * If svcaddr is NULL, returns NULL.
147 */
148CLIENT *
149clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz)
150 int fd; /* open file descriptor */
151 const struct netbuf *svcaddr; /* servers address */
152 rpcprog_t program; /* program number */
153 rpcvers_t version; /* version number */
154 u_int sendsz; /* buffer recv size */
155 u_int recvsz; /* buffer send size */
156{
157 CLIENT *cl = NULL; /* client handle */
158 struct cu_data *cu = NULL; /* private data */
159 struct timeval now;
160 struct rpc_msg call_msg;
161 sigset_t mask;
162 sigset_t newmask;
163 struct __rpc_sockinfo si;
164 int one = 1;
165
166 sigfillset(&newmask);
167 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
168 mutex_lock(&clnt_fd_lock);
169 if (dg_fd_locks == (int *) NULL) {
170 int cv_allocsz;
171 size_t fd_allocsz;
172 int dtbsize = __rpc_dtbsize();
173
174 fd_allocsz = dtbsize * sizeof (int);
175 dg_fd_locks = (int *) mem_alloc(fd_allocsz);
176 if (dg_fd_locks == (int *) NULL) {
177 mutex_unlock(&clnt_fd_lock);
178 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
179 goto err1;
180 } else
181 memset(dg_fd_locks, '\0', fd_allocsz);
182
183 cv_allocsz = dtbsize * sizeof (cond_t);
184 dg_cv = (cond_t *) mem_alloc(cv_allocsz);
185 if (dg_cv == (cond_t *) NULL) {
186 mem_free(dg_fd_locks, fd_allocsz);
187 dg_fd_locks = (int *) NULL;
188 mutex_unlock(&clnt_fd_lock);
189 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
190 goto err1;
191 } else {
192 int i;
193
194 for (i = 0; i < dtbsize; i++)
195 cond_init(&dg_cv[i], 0, (void *) 0);
196 }
197 }
198
199 mutex_unlock(&clnt_fd_lock);
200 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
201
202 if (svcaddr == NULL) {
203 rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
204 return (NULL);
205 }
206
207 if (!__rpc_fd2sockinfo(fd, &si)) {
208 rpc_createerr.cf_stat = RPC_TLIERROR;
209 rpc_createerr.cf_error.re_errno = 0;
210 return (NULL);
211 }
212 /*
213 * Find the receive and the send size
214 */
215 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
216 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
217 if ((sendsz == 0) || (recvsz == 0)) {
218 rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
219 rpc_createerr.cf_error.re_errno = 0;
220 return (NULL);
221 }
222
223 if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
224 goto err1;
225 /*
226 * Should be multiple of 4 for XDR.
227 */
228 sendsz = ((sendsz + 3) / 4) * 4;
229 recvsz = ((recvsz + 3) / 4) * 4;
230 cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
231 if (cu == NULL)
232 goto err1;
233 (void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
234 cu->cu_rlen = svcaddr->len;
235 cu->cu_outbuf = &cu->cu_inbuf[recvsz];
236 /* Other values can also be set through clnt_control() */
237 cu->cu_wait.tv_sec = 15; /* heuristically chosen */
238 cu->cu_wait.tv_usec = 0;
239 cu->cu_total.tv_sec = -1;
240 cu->cu_total.tv_usec = -1;
241 cu->cu_sendsz = sendsz;
242 cu->cu_recvsz = recvsz;
243 cu->cu_async = FALSE;
244 cu->cu_connect = FALSE;
245 cu->cu_connected = FALSE;
246 (void) gettimeofday(&now, NULL);
247 call_msg.rm_xid = __RPC_GETXID(&now);
248 call_msg.rm_call.cb_prog = program;
249 call_msg.rm_call.cb_vers = version;
250 xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
251 if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
252 rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */
253 rpc_createerr.cf_error.re_errno = 0;
254 goto err2;
255 }
256 cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
257
258 /* XXX fvdl - do we still want this? */
259#if 0
260 (void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
261#endif
262 _ioctl(fd, FIONBIO, (char *)(void *)&one);
263
264 /*
265 * By default, closeit is always FALSE. It is users responsibility
266 * to do a close on it, else the user may use clnt_control
267 * to let clnt_destroy do it for him/her.
268 */
269 cu->cu_closeit = FALSE;
270 cu->cu_fd = fd;
271 cl->cl_ops = clnt_dg_ops();
272 cl->cl_private = (caddr_t)(void *)cu;
273 cl->cl_auth = authnone_create();
274 cl->cl_tp = NULL;
275 cl->cl_netid = NULL;
131 char cu_inbuf[1];
132};
133
134/*
135 * Connection less client creation returns with client handle parameters.
136 * Default options are set, which the user can change using clnt_control().
137 * fd should be open and bound.
138 * NB: The rpch->cl_auth is initialized to null authentication.
139 * Caller may wish to set this something more useful.
140 *
141 * sendsz and recvsz are the maximum allowable packet sizes that can be
142 * sent and received. Normally they are the same, but they can be
143 * changed to improve the program efficiency and buffer allocation.
144 * If they are 0, use the transport default.
145 *
146 * If svcaddr is NULL, returns NULL.
147 */
148CLIENT *
149clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz)
150 int fd; /* open file descriptor */
151 const struct netbuf *svcaddr; /* servers address */
152 rpcprog_t program; /* program number */
153 rpcvers_t version; /* version number */
154 u_int sendsz; /* buffer recv size */
155 u_int recvsz; /* buffer send size */
156{
157 CLIENT *cl = NULL; /* client handle */
158 struct cu_data *cu = NULL; /* private data */
159 struct timeval now;
160 struct rpc_msg call_msg;
161 sigset_t mask;
162 sigset_t newmask;
163 struct __rpc_sockinfo si;
164 int one = 1;
165
166 sigfillset(&newmask);
167 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
168 mutex_lock(&clnt_fd_lock);
169 if (dg_fd_locks == (int *) NULL) {
170 int cv_allocsz;
171 size_t fd_allocsz;
172 int dtbsize = __rpc_dtbsize();
173
174 fd_allocsz = dtbsize * sizeof (int);
175 dg_fd_locks = (int *) mem_alloc(fd_allocsz);
176 if (dg_fd_locks == (int *) NULL) {
177 mutex_unlock(&clnt_fd_lock);
178 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
179 goto err1;
180 } else
181 memset(dg_fd_locks, '\0', fd_allocsz);
182
183 cv_allocsz = dtbsize * sizeof (cond_t);
184 dg_cv = (cond_t *) mem_alloc(cv_allocsz);
185 if (dg_cv == (cond_t *) NULL) {
186 mem_free(dg_fd_locks, fd_allocsz);
187 dg_fd_locks = (int *) NULL;
188 mutex_unlock(&clnt_fd_lock);
189 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
190 goto err1;
191 } else {
192 int i;
193
194 for (i = 0; i < dtbsize; i++)
195 cond_init(&dg_cv[i], 0, (void *) 0);
196 }
197 }
198
199 mutex_unlock(&clnt_fd_lock);
200 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
201
202 if (svcaddr == NULL) {
203 rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
204 return (NULL);
205 }
206
207 if (!__rpc_fd2sockinfo(fd, &si)) {
208 rpc_createerr.cf_stat = RPC_TLIERROR;
209 rpc_createerr.cf_error.re_errno = 0;
210 return (NULL);
211 }
212 /*
213 * Find the receive and the send size
214 */
215 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
216 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
217 if ((sendsz == 0) || (recvsz == 0)) {
218 rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
219 rpc_createerr.cf_error.re_errno = 0;
220 return (NULL);
221 }
222
223 if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
224 goto err1;
225 /*
226 * Should be multiple of 4 for XDR.
227 */
228 sendsz = ((sendsz + 3) / 4) * 4;
229 recvsz = ((recvsz + 3) / 4) * 4;
230 cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
231 if (cu == NULL)
232 goto err1;
233 (void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
234 cu->cu_rlen = svcaddr->len;
235 cu->cu_outbuf = &cu->cu_inbuf[recvsz];
236 /* Other values can also be set through clnt_control() */
237 cu->cu_wait.tv_sec = 15; /* heuristically chosen */
238 cu->cu_wait.tv_usec = 0;
239 cu->cu_total.tv_sec = -1;
240 cu->cu_total.tv_usec = -1;
241 cu->cu_sendsz = sendsz;
242 cu->cu_recvsz = recvsz;
243 cu->cu_async = FALSE;
244 cu->cu_connect = FALSE;
245 cu->cu_connected = FALSE;
246 (void) gettimeofday(&now, NULL);
247 call_msg.rm_xid = __RPC_GETXID(&now);
248 call_msg.rm_call.cb_prog = program;
249 call_msg.rm_call.cb_vers = version;
250 xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
251 if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
252 rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */
253 rpc_createerr.cf_error.re_errno = 0;
254 goto err2;
255 }
256 cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
257
258 /* XXX fvdl - do we still want this? */
259#if 0
260 (void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
261#endif
262 _ioctl(fd, FIONBIO, (char *)(void *)&one);
263
264 /*
265 * By default, closeit is always FALSE. It is users responsibility
266 * to do a close on it, else the user may use clnt_control
267 * to let clnt_destroy do it for him/her.
268 */
269 cu->cu_closeit = FALSE;
270 cu->cu_fd = fd;
271 cl->cl_ops = clnt_dg_ops();
272 cl->cl_private = (caddr_t)(void *)cu;
273 cl->cl_auth = authnone_create();
274 cl->cl_tp = NULL;
275 cl->cl_netid = NULL;
276 cu->pfdp.fd = cu->cu_fd;
277 cu->pfdp.events = POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND;
276 cu->cu_kq = -1;
277 EV_SET(&cu->cu_kin, cu->cu_fd, EVFILT_READ, EV_ADD, 0, 0, 0);
278 return (cl);
279err1:
280 warnx(mem_err_clnt_dg);
281 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
282 rpc_createerr.cf_error.re_errno = errno;
283err2:
284 if (cl) {
285 mem_free(cl, sizeof (CLIENT));
286 if (cu)
287 mem_free(cu, sizeof (*cu) + sendsz + recvsz);
288 }
289 return (NULL);
290}
291
292static enum clnt_stat
293clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout)
294 CLIENT *cl; /* client handle */
295 rpcproc_t proc; /* procedure number */
296 xdrproc_t xargs; /* xdr routine for args */
297 void *argsp; /* pointer to args */
298 xdrproc_t xresults; /* xdr routine for results */
299 void *resultsp; /* pointer to results */
300 struct timeval utimeout; /* seconds to wait before giving up */
301{
302 struct cu_data *cu = (struct cu_data *)cl->cl_private;
303 XDR *xdrs;
278 return (cl);
279err1:
280 warnx(mem_err_clnt_dg);
281 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
282 rpc_createerr.cf_error.re_errno = errno;
283err2:
284 if (cl) {
285 mem_free(cl, sizeof (CLIENT));
286 if (cu)
287 mem_free(cu, sizeof (*cu) + sendsz + recvsz);
288 }
289 return (NULL);
290}
291
292static enum clnt_stat
293clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout)
294 CLIENT *cl; /* client handle */
295 rpcproc_t proc; /* procedure number */
296 xdrproc_t xargs; /* xdr routine for args */
297 void *argsp; /* pointer to args */
298 xdrproc_t xresults; /* xdr routine for results */
299 void *resultsp; /* pointer to results */
300 struct timeval utimeout; /* seconds to wait before giving up */
301{
302 struct cu_data *cu = (struct cu_data *)cl->cl_private;
303 XDR *xdrs;
304 size_t outlen;
304 size_t outlen = 0;
305 struct rpc_msg reply_msg;
306 XDR reply_xdrs;
305 struct rpc_msg reply_msg;
306 XDR reply_xdrs;
307 struct timeval time_waited;
308 bool_t ok;
309 int nrefreshes = 2; /* number of times to refresh cred */
310 struct timeval timeout;
311 struct timeval retransmit_time;
307 bool_t ok;
308 int nrefreshes = 2; /* number of times to refresh cred */
309 struct timeval timeout;
310 struct timeval retransmit_time;
312 struct timeval startime, curtime;
313 int firsttimeout = 1;
311 struct timeval next_sendtime, starttime, time_waited, tv;
312 struct timespec ts;
313 struct kevent kv;
314 struct sockaddr *sa;
315 sigset_t mask;
316 sigset_t newmask;
317 socklen_t inlen, salen;
318 ssize_t recvlen = 0;
314 struct sockaddr *sa;
315 sigset_t mask;
316 sigset_t newmask;
317 socklen_t inlen, salen;
318 ssize_t recvlen = 0;
319 int rpc_lock_value;
319 int kin_len, n, rpc_lock_value;
320 u_int32_t xid;
321
322 outlen = 0;
323 sigfillset(&newmask);
324 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
325 mutex_lock(&clnt_fd_lock);
326 while (dg_fd_locks[cu->cu_fd])
327 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
328 if (__isthreaded)
329 rpc_lock_value = 1;
330 else
331 rpc_lock_value = 0;
332 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
333 mutex_unlock(&clnt_fd_lock);
334 if (cu->cu_total.tv_usec == -1) {
335 timeout = utimeout; /* use supplied timeout */
336 } else {
337 timeout = cu->cu_total; /* use default timeout */
338 }
339
340 if (cu->cu_connect && !cu->cu_connected) {
341 if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
342 cu->cu_rlen) < 0) {
320 u_int32_t xid;
321
322 outlen = 0;
323 sigfillset(&newmask);
324 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
325 mutex_lock(&clnt_fd_lock);
326 while (dg_fd_locks[cu->cu_fd])
327 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
328 if (__isthreaded)
329 rpc_lock_value = 1;
330 else
331 rpc_lock_value = 0;
332 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
333 mutex_unlock(&clnt_fd_lock);
334 if (cu->cu_total.tv_usec == -1) {
335 timeout = utimeout; /* use supplied timeout */
336 } else {
337 timeout = cu->cu_total; /* use default timeout */
338 }
339
340 if (cu->cu_connect && !cu->cu_connected) {
341 if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
342 cu->cu_rlen) < 0) {
343 release_fd_lock(cu->cu_fd, mask);
344 cu->cu_error.re_errno = errno;
343 cu->cu_error.re_errno = errno;
345 return (cu->cu_error.re_status = RPC_CANTSEND);
344 cu->cu_error.re_status = RPC_CANTSEND;
345 goto out;
346 }
347 cu->cu_connected = 1;
348 }
349 if (cu->cu_connected) {
350 sa = NULL;
351 salen = 0;
352 } else {
353 sa = (struct sockaddr *)&cu->cu_raddr;
354 salen = cu->cu_rlen;
355 }
356 time_waited.tv_sec = 0;
357 time_waited.tv_usec = 0;
346 }
347 cu->cu_connected = 1;
348 }
349 if (cu->cu_connected) {
350 sa = NULL;
351 salen = 0;
352 } else {
353 sa = (struct sockaddr *)&cu->cu_raddr;
354 salen = cu->cu_rlen;
355 }
356 time_waited.tv_sec = 0;
357 time_waited.tv_usec = 0;
358 retransmit_time = cu->cu_wait;
358 retransmit_time = next_sendtime = cu->cu_wait;
359 gettimeofday(&starttime, NULL);
359
360
361 /* Clean up in case the last call ended in a longjmp(3) call. */
362 if (cu->cu_kq >= 0)
363 _close(cu->cu_kq);
364 if ((cu->cu_kq = kqueue()) < 0) {
365 cu->cu_error.re_errno = errno;
366 cu->cu_error.re_status = RPC_CANTSEND;
367 goto out;
368 }
369 kin_len = 1;
370
360call_again:
361 xdrs = &(cu->cu_outxdrs);
362 if (cu->cu_async == TRUE && xargs == NULL)
363 goto get_reply;
364 xdrs->x_op = XDR_ENCODE;
365 XDR_SETPOS(xdrs, cu->cu_xdrpos);
366 /*
367 * the transaction is the first thing in the out buffer
368 * XXX Yes, and it's in network byte order, so we should to
369 * be careful when we increment it, shouldn't we.
370 */
371 xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
372 xid++;
373 *(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
374
375 if ((! XDR_PUTINT32(xdrs, &proc)) ||
376 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
377 (! (*xargs)(xdrs, argsp))) {
371call_again:
372 xdrs = &(cu->cu_outxdrs);
373 if (cu->cu_async == TRUE && xargs == NULL)
374 goto get_reply;
375 xdrs->x_op = XDR_ENCODE;
376 XDR_SETPOS(xdrs, cu->cu_xdrpos);
377 /*
378 * the transaction is the first thing in the out buffer
379 * XXX Yes, and it's in network byte order, so we should to
380 * be careful when we increment it, shouldn't we.
381 */
382 xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
383 xid++;
384 *(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
385
386 if ((! XDR_PUTINT32(xdrs, &proc)) ||
387 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
388 (! (*xargs)(xdrs, argsp))) {
378 release_fd_lock(cu->cu_fd, mask);
379 return (cu->cu_error.re_status = RPC_CANTENCODEARGS);
389 cu->cu_error.re_status = RPC_CANTENCODEARGS;
390 goto out;
380 }
381 outlen = (size_t)XDR_GETPOS(xdrs);
382
383send_again:
384 if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
385 cu->cu_error.re_errno = errno;
391 }
392 outlen = (size_t)XDR_GETPOS(xdrs);
393
394send_again:
395 if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
396 cu->cu_error.re_errno = errno;
386 release_fd_lock(cu->cu_fd, mask);
387 return (cu->cu_error.re_status = RPC_CANTSEND);
397 cu->cu_error.re_status = RPC_CANTSEND;
398 goto out;
388 }
389
390 /*
391 * Hack to provide rpc-based message passing
392 */
393 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
399 }
400
401 /*
402 * Hack to provide rpc-based message passing
403 */
404 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
394 release_fd_lock(cu->cu_fd, mask);
395 return (cu->cu_error.re_status = RPC_TIMEDOUT);
405 cu->cu_error.re_status = RPC_TIMEDOUT;
406 goto out;
396 }
397
398get_reply:
399
400 /*
401 * sub-optimal code appears here because we have
402 * some clock time to spare while the packets are in flight.
403 * (We assume that this is actually only executed once.)
404 */
405 reply_msg.acpted_rply.ar_verf = _null_auth;
406 reply_msg.acpted_rply.ar_results.where = resultsp;
407 reply_msg.acpted_rply.ar_results.proc = xresults;
408
407 }
408
409get_reply:
410
411 /*
412 * sub-optimal code appears here because we have
413 * some clock time to spare while the packets are in flight.
414 * (We assume that this is actually only executed once.)
415 */
416 reply_msg.acpted_rply.ar_verf = _null_auth;
417 reply_msg.acpted_rply.ar_results.where = resultsp;
418 reply_msg.acpted_rply.ar_results.proc = xresults;
419
409
410 for (;;) {
420 for (;;) {
411 switch (_poll(&cu->pfdp, 1,
412 __rpc_timeval_to_msec(&retransmit_time))) {
413 case 0:
414 time_waited.tv_sec += retransmit_time.tv_sec;
415 time_waited.tv_usec += retransmit_time.tv_usec;
416 while (time_waited.tv_usec >= 1000000) {
417 time_waited.tv_sec++;
418 time_waited.tv_usec -= 1000000;
419 }
420 /* update retransmit_time */
421 if (retransmit_time.tv_sec < RPC_MAX_BACKOFF) {
422 retransmit_time.tv_usec *= 2;
423 retransmit_time.tv_sec *= 2;
424 while (retransmit_time.tv_usec >= 1000000) {
425 retransmit_time.tv_sec++;
426 retransmit_time.tv_usec -= 1000000;
427 }
428 }
421 /* Decide how long to wait. */
422 if (timercmp(&next_sendtime, &timeout, <))
423 timersub(&next_sendtime, &time_waited, &tv);
424 else
425 timersub(&timeout, &time_waited, &tv);
426 if (tv.tv_sec < 0 || tv.tv_usec < 0)
427 tv.tv_sec = tv.tv_usec = 0;
428 TIMEVAL_TO_TIMESPEC(&tv, &ts);
429
429
430 if ((time_waited.tv_sec < timeout.tv_sec) ||
431 ((time_waited.tv_sec == timeout.tv_sec) &&
432 (time_waited.tv_usec < timeout.tv_usec)))
433 goto send_again;
434 release_fd_lock(cu->cu_fd, mask);
435 return (cu->cu_error.re_status = RPC_TIMEDOUT);
430 n = _kevent(cu->cu_kq, &cu->cu_kin, kin_len, &kv, 1, &ts);
431 /* We don't need to register the event again. */
432 kin_len = 0;
436
433
437 case -1:
438 if (errno == EBADF) {
434 if (n == 1) {
435 if (kv.flags & EV_ERROR) {
436 cu->cu_error.re_errno = kv.data;
437 cu->cu_error.re_status = RPC_CANTRECV;
438 goto out;
439 }
440 /* We have some data now */
441 do {
442 recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
443 cu->cu_recvsz, 0, NULL, NULL);
444 } while (recvlen < 0 && errno == EINTR);
445 if (recvlen < 0 && errno != EWOULDBLOCK) {
439 cu->cu_error.re_errno = errno;
446 cu->cu_error.re_errno = errno;
440 release_fd_lock(cu->cu_fd, mask);
441 return (cu->cu_error.re_status = RPC_CANTRECV);
447 cu->cu_error.re_status = RPC_CANTRECV;
448 goto out;
442 }
449 }
443 if (errno != EINTR) {
444 errno = 0; /* reset it */
445 continue;
450 if (recvlen >= sizeof(u_int32_t) &&
451 (cu->cu_async == TRUE ||
452 *((u_int32_t *)(void *)(cu->cu_inbuf)) ==
453 *((u_int32_t *)(void *)(cu->cu_outbuf)))) {
454 /* We now assume we have the proper reply. */
455 break;
446 }
456 }
447 /* interrupted by another signal, update time_waited */
448 if (firsttimeout) {
449 /*
450 * Could have done gettimeofday before clnt_call
451 * but that means 1 more system call per each
452 * clnt_call, so do it after first time out
453 */
454 if (gettimeofday(&startime,
455 (struct timezone *) NULL) == -1) {
456 errno = 0;
457 continue;
458 }
459 firsttimeout = 0;
460 errno = 0;
461 continue;
462 };
463 if (gettimeofday(&curtime,
464 (struct timezone *) NULL) == -1) {
465 errno = 0;
466 continue;
467 };
468 time_waited.tv_sec += curtime.tv_sec - startime.tv_sec;
469 time_waited.tv_usec += curtime.tv_usec -
470 startime.tv_usec;
471 while (time_waited.tv_usec < 0) {
472 time_waited.tv_sec--;
473 time_waited.tv_usec += 1000000;
474 };
475 while (time_waited.tv_usec >= 1000000) {
476 time_waited.tv_sec++;
477 time_waited.tv_usec -= 1000000;
478 }
479 startime.tv_sec = curtime.tv_sec;
480 startime.tv_usec = curtime.tv_usec;
481 if ((time_waited.tv_sec > timeout.tv_sec) ||
482 ((time_waited.tv_sec == timeout.tv_sec) &&
483 (time_waited.tv_usec > timeout.tv_usec))) {
484 release_fd_lock(cu->cu_fd, mask);
485 return (cu->cu_error.re_status = RPC_TIMEDOUT);
486 }
487 errno = 0; /* reset it */
488 continue;
489 };
490
491 if (cu->pfdp.revents & POLLNVAL || (cu->pfdp.revents == 0)) {
457 }
458 if (n == -1 && errno != EINTR) {
459 cu->cu_error.re_errno = errno;
492 cu->cu_error.re_status = RPC_CANTRECV;
460 cu->cu_error.re_status = RPC_CANTRECV;
493 /*
494 * Note: we're faking errno here because we
495 * previously would have expected _poll() to
496 * return -1 with errno EBADF. Poll(BA_OS)
497 * returns 0 and sets the POLLNVAL revents flag
498 * instead.
499 */
500 cu->cu_error.re_errno = errno = EBADF;
501 release_fd_lock(cu->cu_fd, mask);
502 return (-1);
461 goto out;
503 }
462 }
463 gettimeofday(&tv, NULL);
464 timersub(&tv, &starttime, &time_waited);
504
465
505 /* We have some data now */
506 do {
507 if (errno == EINTR) {
508 /*
509 * Must make sure errno was not already
510 * EINTR in case _recvfrom() returns -1.
511 */
512 errno = 0;
513 }
514 recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
515 cu->cu_recvsz, 0, NULL, NULL);
516 } while (recvlen < 0 && errno == EINTR);
517 if (recvlen < 0) {
518 if (errno == EWOULDBLOCK)
519 continue;
520 cu->cu_error.re_errno = errno;
521 release_fd_lock(cu->cu_fd, mask);
522 return (cu->cu_error.re_status = RPC_CANTRECV);
466 /* Check for timeout. */
467 if (timercmp(&time_waited, &timeout, >)) {
468 cu->cu_error.re_status = RPC_TIMEDOUT;
469 goto out;
523 }
470 }
524 if (recvlen < sizeof (u_int32_t))
525 continue;
526 /* see if reply transaction id matches sent id */
527 if (cu->cu_async == FALSE &&
528 *((u_int32_t *)(void *)(cu->cu_inbuf)) !=
529 *((u_int32_t *)(void *)(cu->cu_outbuf)))
530 continue;
531 /* we now assume we have the proper reply */
532 break;
471
472 /* Retransmit if necessary. */
473 if (timercmp(&time_waited, &next_sendtime, >)) {
474 /* update retransmit_time */
475 if (retransmit_time.tv_sec < RPC_MAX_BACKOFF)
476 timeradd(&retransmit_time, &retransmit_time,
477 &retransmit_time);
478 timeradd(&next_sendtime, &retransmit_time,
479 &next_sendtime);
480 goto send_again;
481 }
533 }
534 inlen = (socklen_t)recvlen;
535
536 /*
537 * now decode and validate the response
538 */
539
540 xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE);
541 ok = xdr_replymsg(&reply_xdrs, &reply_msg);
542 /* XDR_DESTROY(&reply_xdrs); save a few cycles on noop destroy */
543 if (ok) {
544 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
545 (reply_msg.acpted_rply.ar_stat == SUCCESS))
546 cu->cu_error.re_status = RPC_SUCCESS;
547 else
548 _seterr_reply(&reply_msg, &(cu->cu_error));
549
550 if (cu->cu_error.re_status == RPC_SUCCESS) {
551 if (! AUTH_VALIDATE(cl->cl_auth,
552 &reply_msg.acpted_rply.ar_verf)) {
553 cu->cu_error.re_status = RPC_AUTHERROR;
554 cu->cu_error.re_why = AUTH_INVALIDRESP;
555 }
556 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
557 xdrs->x_op = XDR_FREE;
558 (void) xdr_opaque_auth(xdrs,
559 &(reply_msg.acpted_rply.ar_verf));
560 }
561 } /* end successful completion */
562 /*
563 * If unsuccesful AND error is an authentication error
564 * then refresh credentials and try again, else break
565 */
566 else if (cu->cu_error.re_status == RPC_AUTHERROR)
567 /* maybe our credentials need to be refreshed ... */
568 if (nrefreshes > 0 &&
569 AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
570 nrefreshes--;
571 goto call_again;
572 }
573 /* end of unsuccessful completion */
574 } /* end of valid reply message */
575 else {
576 cu->cu_error.re_status = RPC_CANTDECODERES;
577
578 }
482 }
483 inlen = (socklen_t)recvlen;
484
485 /*
486 * now decode and validate the response
487 */
488
489 xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE);
490 ok = xdr_replymsg(&reply_xdrs, &reply_msg);
491 /* XDR_DESTROY(&reply_xdrs); save a few cycles on noop destroy */
492 if (ok) {
493 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
494 (reply_msg.acpted_rply.ar_stat == SUCCESS))
495 cu->cu_error.re_status = RPC_SUCCESS;
496 else
497 _seterr_reply(&reply_msg, &(cu->cu_error));
498
499 if (cu->cu_error.re_status == RPC_SUCCESS) {
500 if (! AUTH_VALIDATE(cl->cl_auth,
501 &reply_msg.acpted_rply.ar_verf)) {
502 cu->cu_error.re_status = RPC_AUTHERROR;
503 cu->cu_error.re_why = AUTH_INVALIDRESP;
504 }
505 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
506 xdrs->x_op = XDR_FREE;
507 (void) xdr_opaque_auth(xdrs,
508 &(reply_msg.acpted_rply.ar_verf));
509 }
510 } /* end successful completion */
511 /*
512 * If unsuccesful AND error is an authentication error
513 * then refresh credentials and try again, else break
514 */
515 else if (cu->cu_error.re_status == RPC_AUTHERROR)
516 /* maybe our credentials need to be refreshed ... */
517 if (nrefreshes > 0 &&
518 AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
519 nrefreshes--;
520 goto call_again;
521 }
522 /* end of unsuccessful completion */
523 } /* end of valid reply message */
524 else {
525 cu->cu_error.re_status = RPC_CANTDECODERES;
526
527 }
528out:
529 if (cu->cu_kq >= 0)
530 _close(cu->cu_kq);
531 cu->cu_kq = -1;
579 release_fd_lock(cu->cu_fd, mask);
580 return (cu->cu_error.re_status);
581}
582
583static void
584clnt_dg_geterr(cl, errp)
585 CLIENT *cl;
586 struct rpc_err *errp;
587{
588 struct cu_data *cu = (struct cu_data *)cl->cl_private;
589
590 *errp = cu->cu_error;
591}
592
593static bool_t
594clnt_dg_freeres(cl, xdr_res, res_ptr)
595 CLIENT *cl;
596 xdrproc_t xdr_res;
597 void *res_ptr;
598{
599 struct cu_data *cu = (struct cu_data *)cl->cl_private;
600 XDR *xdrs = &(cu->cu_outxdrs);
601 bool_t dummy;
602 sigset_t mask;
603 sigset_t newmask;
604
605 sigfillset(&newmask);
606 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
607 mutex_lock(&clnt_fd_lock);
608 while (dg_fd_locks[cu->cu_fd])
609 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
610 xdrs->x_op = XDR_FREE;
611 dummy = (*xdr_res)(xdrs, res_ptr);
612 mutex_unlock(&clnt_fd_lock);
613 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
614 cond_signal(&dg_cv[cu->cu_fd]);
615 return (dummy);
616}
617
618/*ARGSUSED*/
619static void
620clnt_dg_abort(h)
621 CLIENT *h;
622{
623}
624
625static bool_t
626clnt_dg_control(cl, request, info)
627 CLIENT *cl;
628 u_int request;
629 void *info;
630{
631 struct cu_data *cu = (struct cu_data *)cl->cl_private;
632 struct netbuf *addr;
633 sigset_t mask;
634 sigset_t newmask;
635 int rpc_lock_value;
636
637 sigfillset(&newmask);
638 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
639 mutex_lock(&clnt_fd_lock);
640 while (dg_fd_locks[cu->cu_fd])
641 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
642 if (__isthreaded)
643 rpc_lock_value = 1;
644 else
645 rpc_lock_value = 0;
646 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
647 mutex_unlock(&clnt_fd_lock);
648 switch (request) {
649 case CLSET_FD_CLOSE:
650 cu->cu_closeit = TRUE;
651 release_fd_lock(cu->cu_fd, mask);
652 return (TRUE);
653 case CLSET_FD_NCLOSE:
654 cu->cu_closeit = FALSE;
655 release_fd_lock(cu->cu_fd, mask);
656 return (TRUE);
657 }
658
659 /* for other requests which use info */
660 if (info == NULL) {
661 release_fd_lock(cu->cu_fd, mask);
662 return (FALSE);
663 }
664 switch (request) {
665 case CLSET_TIMEOUT:
666 if (time_not_ok((struct timeval *)info)) {
667 release_fd_lock(cu->cu_fd, mask);
668 return (FALSE);
669 }
670 cu->cu_total = *(struct timeval *)info;
671 break;
672 case CLGET_TIMEOUT:
673 *(struct timeval *)info = cu->cu_total;
674 break;
675 case CLGET_SERVER_ADDR: /* Give him the fd address */
676 /* Now obsolete. Only for backward compatibility */
677 (void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
678 break;
679 case CLSET_RETRY_TIMEOUT:
680 if (time_not_ok((struct timeval *)info)) {
681 release_fd_lock(cu->cu_fd, mask);
682 return (FALSE);
683 }
684 cu->cu_wait = *(struct timeval *)info;
685 break;
686 case CLGET_RETRY_TIMEOUT:
687 *(struct timeval *)info = cu->cu_wait;
688 break;
689 case CLGET_FD:
690 *(int *)info = cu->cu_fd;
691 break;
692 case CLGET_SVC_ADDR:
693 addr = (struct netbuf *)info;
694 addr->buf = &cu->cu_raddr;
695 addr->len = cu->cu_rlen;
696 addr->maxlen = sizeof cu->cu_raddr;
697 break;
698 case CLSET_SVC_ADDR: /* set to new address */
699 addr = (struct netbuf *)info;
700 if (addr->len < sizeof cu->cu_raddr) {
701 release_fd_lock(cu->cu_fd, mask);
702 return (FALSE);
703 }
704 (void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
705 cu->cu_rlen = addr->len;
706 break;
707 case CLGET_XID:
708 /*
709 * use the knowledge that xid is the
710 * first element in the call structure *.
711 * This will get the xid of the PREVIOUS call
712 */
713 *(u_int32_t *)info =
714 ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
715 break;
716
717 case CLSET_XID:
718 /* This will set the xid of the NEXT call */
719 *(u_int32_t *)(void *)cu->cu_outbuf =
720 htonl(*(u_int32_t *)info - 1);
721 /* decrement by 1 as clnt_dg_call() increments once */
722 break;
723
724 case CLGET_VERS:
725 /*
726 * This RELIES on the information that, in the call body,
727 * the version number field is the fifth field from the
728 * begining of the RPC header. MUST be changed if the
729 * call_struct is changed
730 */
731 *(u_int32_t *)info =
732 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
733 4 * BYTES_PER_XDR_UNIT));
734 break;
735
736 case CLSET_VERS:
737 *(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
738 = htonl(*(u_int32_t *)info);
739 break;
740
741 case CLGET_PROG:
742 /*
743 * This RELIES on the information that, in the call body,
744 * the program number field is the fourth field from the
745 * begining of the RPC header. MUST be changed if the
746 * call_struct is changed
747 */
748 *(u_int32_t *)info =
749 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
750 3 * BYTES_PER_XDR_UNIT));
751 break;
752
753 case CLSET_PROG:
754 *(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
755 = htonl(*(u_int32_t *)info);
756 break;
757 case CLSET_ASYNC:
758 cu->cu_async = *(int *)info;
759 break;
760 case CLSET_CONNECT:
761 cu->cu_connect = *(int *)info;
762 break;
763 default:
764 release_fd_lock(cu->cu_fd, mask);
765 return (FALSE);
766 }
767 release_fd_lock(cu->cu_fd, mask);
768 return (TRUE);
769}
770
771static void
772clnt_dg_destroy(cl)
773 CLIENT *cl;
774{
775 struct cu_data *cu = (struct cu_data *)cl->cl_private;
776 int cu_fd = cu->cu_fd;
777 sigset_t mask;
778 sigset_t newmask;
779
780 sigfillset(&newmask);
781 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
782 mutex_lock(&clnt_fd_lock);
783 while (dg_fd_locks[cu_fd])
784 cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
785 if (cu->cu_closeit)
786 (void)_close(cu_fd);
532 release_fd_lock(cu->cu_fd, mask);
533 return (cu->cu_error.re_status);
534}
535
536static void
537clnt_dg_geterr(cl, errp)
538 CLIENT *cl;
539 struct rpc_err *errp;
540{
541 struct cu_data *cu = (struct cu_data *)cl->cl_private;
542
543 *errp = cu->cu_error;
544}
545
546static bool_t
547clnt_dg_freeres(cl, xdr_res, res_ptr)
548 CLIENT *cl;
549 xdrproc_t xdr_res;
550 void *res_ptr;
551{
552 struct cu_data *cu = (struct cu_data *)cl->cl_private;
553 XDR *xdrs = &(cu->cu_outxdrs);
554 bool_t dummy;
555 sigset_t mask;
556 sigset_t newmask;
557
558 sigfillset(&newmask);
559 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
560 mutex_lock(&clnt_fd_lock);
561 while (dg_fd_locks[cu->cu_fd])
562 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
563 xdrs->x_op = XDR_FREE;
564 dummy = (*xdr_res)(xdrs, res_ptr);
565 mutex_unlock(&clnt_fd_lock);
566 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
567 cond_signal(&dg_cv[cu->cu_fd]);
568 return (dummy);
569}
570
571/*ARGSUSED*/
572static void
573clnt_dg_abort(h)
574 CLIENT *h;
575{
576}
577
578static bool_t
579clnt_dg_control(cl, request, info)
580 CLIENT *cl;
581 u_int request;
582 void *info;
583{
584 struct cu_data *cu = (struct cu_data *)cl->cl_private;
585 struct netbuf *addr;
586 sigset_t mask;
587 sigset_t newmask;
588 int rpc_lock_value;
589
590 sigfillset(&newmask);
591 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
592 mutex_lock(&clnt_fd_lock);
593 while (dg_fd_locks[cu->cu_fd])
594 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
595 if (__isthreaded)
596 rpc_lock_value = 1;
597 else
598 rpc_lock_value = 0;
599 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
600 mutex_unlock(&clnt_fd_lock);
601 switch (request) {
602 case CLSET_FD_CLOSE:
603 cu->cu_closeit = TRUE;
604 release_fd_lock(cu->cu_fd, mask);
605 return (TRUE);
606 case CLSET_FD_NCLOSE:
607 cu->cu_closeit = FALSE;
608 release_fd_lock(cu->cu_fd, mask);
609 return (TRUE);
610 }
611
612 /* for other requests which use info */
613 if (info == NULL) {
614 release_fd_lock(cu->cu_fd, mask);
615 return (FALSE);
616 }
617 switch (request) {
618 case CLSET_TIMEOUT:
619 if (time_not_ok((struct timeval *)info)) {
620 release_fd_lock(cu->cu_fd, mask);
621 return (FALSE);
622 }
623 cu->cu_total = *(struct timeval *)info;
624 break;
625 case CLGET_TIMEOUT:
626 *(struct timeval *)info = cu->cu_total;
627 break;
628 case CLGET_SERVER_ADDR: /* Give him the fd address */
629 /* Now obsolete. Only for backward compatibility */
630 (void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
631 break;
632 case CLSET_RETRY_TIMEOUT:
633 if (time_not_ok((struct timeval *)info)) {
634 release_fd_lock(cu->cu_fd, mask);
635 return (FALSE);
636 }
637 cu->cu_wait = *(struct timeval *)info;
638 break;
639 case CLGET_RETRY_TIMEOUT:
640 *(struct timeval *)info = cu->cu_wait;
641 break;
642 case CLGET_FD:
643 *(int *)info = cu->cu_fd;
644 break;
645 case CLGET_SVC_ADDR:
646 addr = (struct netbuf *)info;
647 addr->buf = &cu->cu_raddr;
648 addr->len = cu->cu_rlen;
649 addr->maxlen = sizeof cu->cu_raddr;
650 break;
651 case CLSET_SVC_ADDR: /* set to new address */
652 addr = (struct netbuf *)info;
653 if (addr->len < sizeof cu->cu_raddr) {
654 release_fd_lock(cu->cu_fd, mask);
655 return (FALSE);
656 }
657 (void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
658 cu->cu_rlen = addr->len;
659 break;
660 case CLGET_XID:
661 /*
662 * use the knowledge that xid is the
663 * first element in the call structure *.
664 * This will get the xid of the PREVIOUS call
665 */
666 *(u_int32_t *)info =
667 ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
668 break;
669
670 case CLSET_XID:
671 /* This will set the xid of the NEXT call */
672 *(u_int32_t *)(void *)cu->cu_outbuf =
673 htonl(*(u_int32_t *)info - 1);
674 /* decrement by 1 as clnt_dg_call() increments once */
675 break;
676
677 case CLGET_VERS:
678 /*
679 * This RELIES on the information that, in the call body,
680 * the version number field is the fifth field from the
681 * begining of the RPC header. MUST be changed if the
682 * call_struct is changed
683 */
684 *(u_int32_t *)info =
685 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
686 4 * BYTES_PER_XDR_UNIT));
687 break;
688
689 case CLSET_VERS:
690 *(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
691 = htonl(*(u_int32_t *)info);
692 break;
693
694 case CLGET_PROG:
695 /*
696 * This RELIES on the information that, in the call body,
697 * the program number field is the fourth field from the
698 * begining of the RPC header. MUST be changed if the
699 * call_struct is changed
700 */
701 *(u_int32_t *)info =
702 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
703 3 * BYTES_PER_XDR_UNIT));
704 break;
705
706 case CLSET_PROG:
707 *(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
708 = htonl(*(u_int32_t *)info);
709 break;
710 case CLSET_ASYNC:
711 cu->cu_async = *(int *)info;
712 break;
713 case CLSET_CONNECT:
714 cu->cu_connect = *(int *)info;
715 break;
716 default:
717 release_fd_lock(cu->cu_fd, mask);
718 return (FALSE);
719 }
720 release_fd_lock(cu->cu_fd, mask);
721 return (TRUE);
722}
723
724static void
725clnt_dg_destroy(cl)
726 CLIENT *cl;
727{
728 struct cu_data *cu = (struct cu_data *)cl->cl_private;
729 int cu_fd = cu->cu_fd;
730 sigset_t mask;
731 sigset_t newmask;
732
733 sigfillset(&newmask);
734 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
735 mutex_lock(&clnt_fd_lock);
736 while (dg_fd_locks[cu_fd])
737 cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
738 if (cu->cu_closeit)
739 (void)_close(cu_fd);
740 if (cu->cu_kq >= 0)
741 _close(cu->cu_kq);
787 XDR_DESTROY(&(cu->cu_outxdrs));
788 mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
789 if (cl->cl_netid && cl->cl_netid[0])
790 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
791 if (cl->cl_tp && cl->cl_tp[0])
792 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
793 mem_free(cl, sizeof (CLIENT));
794 mutex_unlock(&clnt_fd_lock);
795 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
796 cond_signal(&dg_cv[cu_fd]);
797}
798
799static struct clnt_ops *
800clnt_dg_ops()
801{
802 static struct clnt_ops ops;
803 extern mutex_t ops_lock;
804 sigset_t mask;
805 sigset_t newmask;
806
807/* VARIABLES PROTECTED BY ops_lock: ops */
808
809 sigfillset(&newmask);
810 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
811 mutex_lock(&ops_lock);
812 if (ops.cl_call == NULL) {
813 ops.cl_call = clnt_dg_call;
814 ops.cl_abort = clnt_dg_abort;
815 ops.cl_geterr = clnt_dg_geterr;
816 ops.cl_freeres = clnt_dg_freeres;
817 ops.cl_destroy = clnt_dg_destroy;
818 ops.cl_control = clnt_dg_control;
819 }
820 mutex_unlock(&ops_lock);
821 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
822 return (&ops);
823}
824
825/*
826 * Make sure that the time is not garbage. -1 value is allowed.
827 */
828static bool_t
829time_not_ok(t)
830 struct timeval *t;
831{
832 return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
833 t->tv_usec < -1 || t->tv_usec > 1000000);
834}
835
742 XDR_DESTROY(&(cu->cu_outxdrs));
743 mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
744 if (cl->cl_netid && cl->cl_netid[0])
745 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
746 if (cl->cl_tp && cl->cl_tp[0])
747 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
748 mem_free(cl, sizeof (CLIENT));
749 mutex_unlock(&clnt_fd_lock);
750 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
751 cond_signal(&dg_cv[cu_fd]);
752}
753
754static struct clnt_ops *
755clnt_dg_ops()
756{
757 static struct clnt_ops ops;
758 extern mutex_t ops_lock;
759 sigset_t mask;
760 sigset_t newmask;
761
762/* VARIABLES PROTECTED BY ops_lock: ops */
763
764 sigfillset(&newmask);
765 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
766 mutex_lock(&ops_lock);
767 if (ops.cl_call == NULL) {
768 ops.cl_call = clnt_dg_call;
769 ops.cl_abort = clnt_dg_abort;
770 ops.cl_geterr = clnt_dg_geterr;
771 ops.cl_freeres = clnt_dg_freeres;
772 ops.cl_destroy = clnt_dg_destroy;
773 ops.cl_control = clnt_dg_control;
774 }
775 mutex_unlock(&ops_lock);
776 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
777 return (&ops);
778}
779
780/*
781 * Make sure that the time is not garbage. -1 value is allowed.
782 */
783static bool_t
784time_not_ok(t)
785 struct timeval *t;
786{
787 return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
788 t->tv_usec < -1 || t->tv_usec > 1000000);
789}
790
836
837/*
838 * Convert from timevals (used by select) to milliseconds (used by poll).
839 */
840static int
841__rpc_timeval_to_msec(t)
842 struct timeval *t;
843{
844 int t1, tmp;
845
846 /*
847 * We're really returning t->tv_sec * 1000 + (t->tv_usec / 1000)
848 * but try to do so efficiently. Note: 1000 = 1024 - 16 - 8.
849 */
850 tmp = (int)t->tv_sec << 3;
851 t1 = -tmp;
852 t1 += t1 << 1;
853 t1 += tmp << 7;
854 if (t->tv_usec)
855 t1 += (int)(t->tv_usec / 1000);
856
857 return (t1);
858}