Deleted Added
full compact
1/* $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $ */
2/* $FreeBSD: head/lib/libc/rpc/clnt_dg.c 74462 2001-03-19 12:50:13Z alfred $ */
2/* $FreeBSD: head/lib/libc/rpc/clnt_dg.c 74879 2001-03-27 21:27:33Z wpaul $ */
3
4/*
5 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
6 * unrestricted use provided that this legend is included on all tape
7 * media and as a part of the software program in whole or part. Users
8 * may copy or modify Sun RPC without charge, but are not authorized
9 * to license or distribute it to anyone else except as part of a product or
10 * program developed by the user.
11 *
12 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
13 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
14 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
15 *
16 * Sun RPC is provided with no support and without any obligation on the
17 * part of Sun Microsystems, Inc. to assist in its use, correction,
18 * modification or enhancement.
19 *
20 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
21 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
22 * OR ANY PART THEREOF.
23 *
24 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
25 * or profits or other special, indirect and consequential damages, even if
26 * Sun has been advised of the possibility of such damages.
27 *
28 * Sun Microsystems, Inc.
29 * 2550 Garcia Avenue
30 * Mountain View, California 94043
31 */
32/*
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 */
35
36/* #ident "@(#)clnt_dg.c 1.23 94/04/22 SMI" */
37
38#if 0
39#if !defined(lint) && defined(SCCSIDS)
40static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
41#endif
42#endif
43
44/*
45 * Implements a connectionless client side RPC.
46 */
47
48#include "reentrant.h"
49#include "namespace.h"
50#include <sys/poll.h>
51#include <sys/types.h>
52#include <sys/time.h>
53#include <sys/socket.h>
54#include <sys/ioctl.h>
55#include <rpc/rpc.h>
56#include <errno.h>
57#include <stdlib.h>
58#include <string.h>
59#include <signal.h>
60#include <unistd.h>
61#include <err.h>
62#include "un-namespace.h"
63#include "rpc_com.h"
64
65
66#define RPC_MAX_BACKOFF 30 /* seconds */
67
68
69static struct clnt_ops *clnt_dg_ops __P((void));
70static bool_t time_not_ok __P((struct timeval *));
71static enum clnt_stat clnt_dg_call __P((CLIENT *, rpcproc_t, xdrproc_t, caddr_t,
72 xdrproc_t, caddr_t, struct timeval));
73static void clnt_dg_geterr __P((CLIENT *, struct rpc_err *));
74static bool_t clnt_dg_freeres __P((CLIENT *, xdrproc_t, caddr_t));
75static void clnt_dg_abort __P((CLIENT *));
76static bool_t clnt_dg_control __P((CLIENT *, u_int, char *));
77static void clnt_dg_destroy __P((CLIENT *));
78static int __rpc_timeval_to_msec __P((struct timeval *));
79
80
81
82
83/*
84 * This machinery implements per-fd locks for MT-safety. It is not
85 * sufficient to do per-CLIENT handle locks for MT-safety because a
86 * user may create more than one CLIENT handle with the same fd behind
87 * it. Therfore, we allocate an array of flags (dg_fd_locks), protected
88 * by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
89 * similarly protected. Dg_fd_lock[fd] == 1 => a call is activte on some
90 * CLIENT handle created for that fd.
91 * The current implementation holds locks across the entire RPC and reply,
92 * including retransmissions. Yes, this is silly, and as soon as this
93 * code is proven to work, this should be the first thing fixed. One step
94 * at a time.
95 */
96static int *dg_fd_locks;
97extern mutex_t clnt_fd_lock;
98static cond_t *dg_cv;
99#define release_fd_lock(fd, mask) { \
100 mutex_lock(&clnt_fd_lock); \
101 if (__isthreaded) \
102 dg_fd_locks[fd] = 0; \
103 mutex_unlock(&clnt_fd_lock); \
104 thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL); \
105 cond_signal(&dg_cv[fd]); \
106}
107
108static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
109
110/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
111
112/*
113 * Private data kept per client handle
114 */
115struct cu_data {
116 int cu_fd; /* connections fd */
117 bool_t cu_closeit; /* opened by library */
118 struct sockaddr_storage cu_raddr; /* remote address */
119 int cu_rlen;
120 struct timeval cu_wait; /* retransmit interval */
121 struct timeval cu_total; /* total time for the call */
122 struct rpc_err cu_error;
123 XDR cu_outxdrs;
124 u_int cu_xdrpos;
125 u_int cu_sendsz; /* send size */
126 char *cu_outbuf;
127 u_int cu_recvsz; /* recv size */
128 struct pollfd pfdp;
129 int cu_async;
130 char cu_inbuf[1];
131};
132
133/*
134 * Connection less client creation returns with client handle parameters.
135 * Default options are set, which the user can change using clnt_control().
136 * fd should be open and bound.
137 * NB: The rpch->cl_auth is initialized to null authentication.
138 * Caller may wish to set this something more useful.
139 *
140 * sendsz and recvsz are the maximum allowable packet sizes that can be
141 * sent and received. Normally they are the same, but they can be
142 * changed to improve the program efficiency and buffer allocation.
143 * If they are 0, use the transport default.
144 *
145 * If svcaddr is NULL, returns NULL.
146 */
147CLIENT *
148clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz)
149 int fd; /* open file descriptor */
150 const struct netbuf *svcaddr; /* servers address */
151 rpcprog_t program; /* program number */
152 rpcvers_t version; /* version number */
153 u_int sendsz; /* buffer recv size */
154 u_int recvsz; /* buffer send size */
155{
156 CLIENT *cl = NULL; /* client handle */
157 struct cu_data *cu = NULL; /* private data */
158 struct timeval now;
159 struct rpc_msg call_msg;
160 sigset_t mask;
161 sigset_t newmask;
162 struct __rpc_sockinfo si;
163 int one = 1;
164
165 sigfillset(&newmask);
166 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
167 mutex_lock(&clnt_fd_lock);
168 if (dg_fd_locks == (int *) NULL) {
169 int cv_allocsz;
170 size_t fd_allocsz;
171 int dtbsize = __rpc_dtbsize();
172
173 fd_allocsz = dtbsize * sizeof (int);
174 dg_fd_locks = (int *) mem_alloc(fd_allocsz);
175 if (dg_fd_locks == (int *) NULL) {
176 mutex_unlock(&clnt_fd_lock);
177 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
178 goto err1;
179 } else
180 memset(dg_fd_locks, '\0', fd_allocsz);
181
182 cv_allocsz = dtbsize * sizeof (cond_t);
183 dg_cv = (cond_t *) mem_alloc(cv_allocsz);
184 if (dg_cv == (cond_t *) NULL) {
185 mem_free(dg_fd_locks, fd_allocsz);
186 dg_fd_locks = (int *) NULL;
187 mutex_unlock(&clnt_fd_lock);
188 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
189 goto err1;
190 } else {
191 int i;
192
193 for (i = 0; i < dtbsize; i++)
194 cond_init(&dg_cv[i], 0, (void *) 0);
195 }
196 }
197
198 mutex_unlock(&clnt_fd_lock);
199 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
200
201 if (svcaddr == NULL) {
202 rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
203 return (NULL);
204 }
205
206 if (!__rpc_fd2sockinfo(fd, &si)) {
207 rpc_createerr.cf_stat = RPC_TLIERROR;
208 rpc_createerr.cf_error.re_errno = 0;
209 return (NULL);
210 }
211 /*
212 * Find the receive and the send size
213 */
214 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
215 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
216 if ((sendsz == 0) || (recvsz == 0)) {
217 rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
218 rpc_createerr.cf_error.re_errno = 0;
219 return (NULL);
220 }
221
222 if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
223 goto err1;
224 /*
225 * Should be multiple of 4 for XDR.
226 */
227 sendsz = ((sendsz + 3) / 4) * 4;
228 recvsz = ((recvsz + 3) / 4) * 4;
229 cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
230 if (cu == NULL)
231 goto err1;
232 (void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
233 cu->cu_rlen = svcaddr->len;
234 cu->cu_outbuf = &cu->cu_inbuf[recvsz];
235 /* Other values can also be set through clnt_control() */
236 cu->cu_wait.tv_sec = 15; /* heuristically chosen */
237 cu->cu_wait.tv_usec = 0;
238 cu->cu_total.tv_sec = -1;
239 cu->cu_total.tv_usec = -1;
240 cu->cu_sendsz = sendsz;
241 cu->cu_recvsz = recvsz;
242 cu->cu_async = FALSE;
243 (void) gettimeofday(&now, NULL);
244 call_msg.rm_xid = __RPC_GETXID(&now);
245 call_msg.rm_call.cb_prog = program;
246 call_msg.rm_call.cb_vers = version;
247 xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
248 if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
249 rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */
250 rpc_createerr.cf_error.re_errno = 0;
251 goto err2;
252 }
253 cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
254
255 /* XXX fvdl - do we still want this? */
256#if 0
257 (void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
258#endif
259 _ioctl(fd, FIONBIO, (char *)(void *)&one);
260
261 /*
262 * By default, closeit is always FALSE. It is users responsibility
263 * to do a close on it, else the user may use clnt_control
264 * to let clnt_destroy do it for him/her.
265 */
266 cu->cu_closeit = FALSE;
267 cu->cu_fd = fd;
268 cl->cl_ops = clnt_dg_ops();
269 cl->cl_private = (caddr_t)(void *)cu;
270 cl->cl_auth = authnone_create();
271 cl->cl_tp = NULL;
272 cl->cl_netid = NULL;
273 cu->pfdp.fd = cu->cu_fd;
274 cu->pfdp.events = POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND;
275 return (cl);
276err1:
277 warnx(mem_err_clnt_dg);
278 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
279 rpc_createerr.cf_error.re_errno = errno;
280err2:
281 if (cl) {
282 mem_free(cl, sizeof (CLIENT));
283 if (cu)
284 mem_free(cu, sizeof (*cu) + sendsz + recvsz);
285 }
286 return (NULL);
287}
288
289static enum clnt_stat
290clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout)
291 CLIENT *cl; /* client handle */
292 rpcproc_t proc; /* procedure number */
293 xdrproc_t xargs; /* xdr routine for args */
294 caddr_t argsp; /* pointer to args */
295 xdrproc_t xresults; /* xdr routine for results */
296 caddr_t resultsp; /* pointer to results */
297 struct timeval utimeout; /* seconds to wait before giving up */
298{
299 struct cu_data *cu = (struct cu_data *)cl->cl_private;
300 XDR *xdrs;
301 size_t outlen;
302 struct rpc_msg reply_msg;
303 XDR reply_xdrs;
304 struct timeval time_waited;
305 bool_t ok;
306 int nrefreshes = 2; /* number of times to refresh cred */
307 struct timeval timeout;
308 struct timeval retransmit_time;
309 struct timeval startime, curtime;
310 int firsttimeout = 1;
311 int dtbsize = __rpc_dtbsize();
312 sigset_t mask;
313 sigset_t newmask;
314 socklen_t fromlen, inlen;
315 ssize_t recvlen = 0;
316 int rpc_lock_value;
317 u_int32_t xid;
318
319 sigfillset(&newmask);
320 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
321 mutex_lock(&clnt_fd_lock);
322 while (dg_fd_locks[cu->cu_fd])
323 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
324 if (__isthreaded)
325 rpc_lock_value = 1;
326 else
327 rpc_lock_value = 0;
328 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
329 mutex_unlock(&clnt_fd_lock);
330 if (cu->cu_total.tv_usec == -1) {
331 timeout = utimeout; /* use supplied timeout */
332 } else {
333 timeout = cu->cu_total; /* use default timeout */
334 }
335
336 time_waited.tv_sec = 0;
337 time_waited.tv_usec = 0;
338 retransmit_time = cu->cu_wait;
339
340call_again:
341 xdrs = &(cu->cu_outxdrs);
342 if (cu->cu_async == TRUE && xargs == NULL)
343 goto get_reply;
344 xdrs->x_op = XDR_ENCODE;
345 XDR_SETPOS(xdrs, cu->cu_xdrpos);
346 /*
347 * the transaction is the first thing in the out buffer
348 * XXX Yes, and it's in network byte order, so we should to
349 * be careful when we increment it, shouldn't we.
350 */
344 (*(u_int32_t *)(void *)(cu->cu_outbuf))++;
351 xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
352 xid++;
353 *(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
354
355 if ((! XDR_PUTINT32(xdrs, &proc)) ||
356 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
357 (! (*xargs)(xdrs, argsp))) {
358 release_fd_lock(cu->cu_fd, mask);
359 return (cu->cu_error.re_status = RPC_CANTENCODEARGS);
360 }
361 outlen = (size_t)XDR_GETPOS(xdrs);
362
363send_again:
364 if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0,
365 (struct sockaddr *)(void *)&cu->cu_raddr, (socklen_t)cu->cu_rlen)
366 != outlen) {
367 cu->cu_error.re_errno = errno;
368 release_fd_lock(cu->cu_fd, mask);
369 return (cu->cu_error.re_status = RPC_CANTSEND);
370 }
371
372 /*
373 * Hack to provide rpc-based message passing
374 */
375 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
376 release_fd_lock(cu->cu_fd, mask);
377 return (cu->cu_error.re_status = RPC_TIMEDOUT);
378 }
379
380get_reply:
381
382 /*
383 * sub-optimal code appears here because we have
384 * some clock time to spare while the packets are in flight.
385 * (We assume that this is actually only executed once.)
386 */
387 reply_msg.acpted_rply.ar_verf = _null_auth;
388 reply_msg.acpted_rply.ar_results.where = resultsp;
389 reply_msg.acpted_rply.ar_results.proc = xresults;
390
391
392 for (;;) {
393 switch (_poll(&cu->pfdp, 1,
394 __rpc_timeval_to_msec(&retransmit_time))) {
395 case 0:
396 time_waited.tv_sec += retransmit_time.tv_sec;
397 time_waited.tv_usec += retransmit_time.tv_usec;
398 while (time_waited.tv_usec >= 1000000) {
399 time_waited.tv_sec++;
400 time_waited.tv_usec -= 1000000;
401 }
402 /* update retransmit_time */
403 if (retransmit_time.tv_sec < RPC_MAX_BACKOFF) {
404 retransmit_time.tv_usec *= 2;
405 retransmit_time.tv_sec *= 2;
406 while (retransmit_time.tv_usec >= 1000000) {
407 retransmit_time.tv_sec++;
408 retransmit_time.tv_usec -= 1000000;
409 }
410 }
411
412 if ((time_waited.tv_sec < timeout.tv_sec) ||
413 ((time_waited.tv_sec == timeout.tv_sec) &&
414 (time_waited.tv_usec < timeout.tv_usec)))
415 goto send_again;
416 release_fd_lock(cu->cu_fd, mask);
417 return (cu->cu_error.re_status = RPC_TIMEDOUT);
418
419 case -1:
420 if (errno == EBADF) {
421 cu->cu_error.re_errno = errno;
422 release_fd_lock(cu->cu_fd, mask);
423 return (cu->cu_error.re_status = RPC_CANTRECV);
424 }
425 if (errno != EINTR) {
426 errno = 0; /* reset it */
427 continue;
428 }
429 /* interrupted by another signal, update time_waited */
430 if (firsttimeout) {
431 /*
432 * Could have done gettimeofday before clnt_call
433 * but that means 1 more system call per each
434 * clnt_call, so do it after first time out
435 */
436 if (gettimeofday(&startime,
437 (struct timezone *) NULL) == -1) {
438 errno = 0;
439 continue;
440 }
441 firsttimeout = 0;
442 errno = 0;
443 continue;
444 };
445 if (gettimeofday(&curtime,
446 (struct timezone *) NULL) == -1) {
447 errno = 0;
448 continue;
449 };
450 time_waited.tv_sec += curtime.tv_sec - startime.tv_sec;
451 time_waited.tv_usec += curtime.tv_usec -
452 startime.tv_usec;
453 while (time_waited.tv_usec < 0) {
454 time_waited.tv_sec--;
455 time_waited.tv_usec += 1000000;
456 };
457 while (time_waited.tv_usec >= 1000000) {
458 time_waited.tv_sec++;
459 time_waited.tv_usec -= 1000000;
460 }
461 startime.tv_sec = curtime.tv_sec;
462 startime.tv_usec = curtime.tv_usec;
463 if ((time_waited.tv_sec > timeout.tv_sec) ||
464 ((time_waited.tv_sec == timeout.tv_sec) &&
465 (time_waited.tv_usec > timeout.tv_usec))) {
466 release_fd_lock(cu->cu_fd, mask);
467 return (cu->cu_error.re_status = RPC_TIMEDOUT);
468 }
469 errno = 0; /* reset it */
470 continue;
471 };
472
473 if (cu->pfdp.revents & POLLNVAL || (cu->pfdp.revents == 0)) {
474 cu->cu_error.re_status = RPC_CANTRECV;
475 /*
476 * Note: we're faking errno here because we
477 * previously would have expected _poll() to
478 * return -1 with errno EBADF. Poll(BA_OS)
479 * returns 0 and sets the POLLNVAL revents flag
480 * instead.
481 */
482 cu->cu_error.re_errno = errno = EBADF;
483 release_fd_lock(cu->cu_fd, mask);
484 return (-1);
485 }
486
487 /* We have some data now */
488 do {
489 if (errno == EINTR) {
490 /*
491 * Must make sure errno was not already
492 * EINTR in case _recvfrom() returns -1.
493 */
494 errno = 0;
495 }
496 fromlen = sizeof (struct sockaddr_storage);
497 recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
498 cu->cu_recvsz, 0, (struct sockaddr *)(void *)&cu->cu_raddr,
499 &fromlen);
500 } while (recvlen < 0 && errno == EINTR);
501 if (recvlen < 0) {
502 if (errno == EWOULDBLOCK)
503 continue;
504 cu->cu_error.re_errno = errno;
505 release_fd_lock(cu->cu_fd, mask);
506 return (cu->cu_error.re_status = RPC_CANTRECV);
507 }
508 if (recvlen < sizeof (u_int32_t))
509 continue;
510 /* see if reply transaction id matches sent id */
498 if (*((u_int32_t *)(void *)(cu->cu_inbuf)) !=
511 if (cu->cu_async == FALSE &&
512 *((u_int32_t *)(void *)(cu->cu_inbuf)) !=
513 *((u_int32_t *)(void *)(cu->cu_outbuf)))
514 continue;
515 /* we now assume we have the proper reply */
516 break;
517 }
518 inlen = (socklen_t)recvlen;
519
520 /*
521 * now decode and validate the response
522 */
523
524 xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE);
525 ok = xdr_replymsg(&reply_xdrs, &reply_msg);
526 /* XDR_DESTROY(&reply_xdrs); save a few cycles on noop destroy */
527 if (ok) {
528 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
529 (reply_msg.acpted_rply.ar_stat == SUCCESS))
530 cu->cu_error.re_status = RPC_SUCCESS;
531 else
532 _seterr_reply(&reply_msg, &(cu->cu_error));
533
534 if (cu->cu_error.re_status == RPC_SUCCESS) {
535 if (! AUTH_VALIDATE(cl->cl_auth,
536 &reply_msg.acpted_rply.ar_verf)) {
537 cu->cu_error.re_status = RPC_AUTHERROR;
538 cu->cu_error.re_why = AUTH_INVALIDRESP;
539 }
540 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
541 xdrs->x_op = XDR_FREE;
542 (void) xdr_opaque_auth(xdrs,
543 &(reply_msg.acpted_rply.ar_verf));
544 }
545 } /* end successful completion */
546 /*
547 * If unsuccesful AND error is an authentication error
548 * then refresh credentials and try again, else break
549 */
550 else if (cu->cu_error.re_status == RPC_AUTHERROR)
551 /* maybe our credentials need to be refreshed ... */
552 if (nrefreshes > 0 &&
553 AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
554 nrefreshes--;
555 goto call_again;
556 }
557 /* end of unsuccessful completion */
558 } /* end of valid reply message */
559 else {
560 cu->cu_error.re_status = RPC_CANTDECODERES;
561
562 }
563 release_fd_lock(cu->cu_fd, mask);
564 return (cu->cu_error.re_status);
565}
566
567static void
568clnt_dg_geterr(cl, errp)
569 CLIENT *cl;
570 struct rpc_err *errp;
571{
572 struct cu_data *cu = (struct cu_data *)cl->cl_private;
573
574 *errp = cu->cu_error;
575}
576
577static bool_t
578clnt_dg_freeres(cl, xdr_res, res_ptr)
579 CLIENT *cl;
580 xdrproc_t xdr_res;
581 caddr_t res_ptr;
582{
583 struct cu_data *cu = (struct cu_data *)cl->cl_private;
584 XDR *xdrs = &(cu->cu_outxdrs);
585 bool_t dummy;
586 sigset_t mask;
587 sigset_t newmask;
588
589 sigfillset(&newmask);
590 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
591 mutex_lock(&clnt_fd_lock);
592 while (dg_fd_locks[cu->cu_fd])
593 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
594 xdrs->x_op = XDR_FREE;
595 dummy = (*xdr_res)(xdrs, res_ptr);
596 mutex_unlock(&clnt_fd_lock);
597 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
598 cond_signal(&dg_cv[cu->cu_fd]);
599 return (dummy);
600}
601
602/*ARGSUSED*/
603static void
604clnt_dg_abort(h)
605 CLIENT *h;
606{
607}
608
609static bool_t
610clnt_dg_control(cl, request, info)
611 CLIENT *cl;
612 u_int request;
613 char *info;
614{
615 struct cu_data *cu = (struct cu_data *)cl->cl_private;
616 struct netbuf *addr;
617 sigset_t mask;
618 sigset_t newmask;
619 int rpc_lock_value;
620
621 sigfillset(&newmask);
622 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
623 mutex_lock(&clnt_fd_lock);
624 while (dg_fd_locks[cu->cu_fd])
625 cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
626 if (__isthreaded)
627 rpc_lock_value = 1;
628 else
629 rpc_lock_value = 0;
630 dg_fd_locks[cu->cu_fd] = rpc_lock_value;
631 mutex_unlock(&clnt_fd_lock);
632 switch (request) {
633 case CLSET_FD_CLOSE:
634 cu->cu_closeit = TRUE;
635 release_fd_lock(cu->cu_fd, mask);
636 return (TRUE);
637 case CLSET_FD_NCLOSE:
638 cu->cu_closeit = FALSE;
639 release_fd_lock(cu->cu_fd, mask);
640 return (TRUE);
641 }
642
643 /* for other requests which use info */
644 if (info == NULL) {
645 release_fd_lock(cu->cu_fd, mask);
646 return (FALSE);
647 }
648 switch (request) {
649 case CLSET_TIMEOUT:
650 if (time_not_ok((struct timeval *)(void *)info)) {
651 release_fd_lock(cu->cu_fd, mask);
652 return (FALSE);
653 }
654 cu->cu_total = *(struct timeval *)(void *)info;
655 break;
656 case CLGET_TIMEOUT:
657 *(struct timeval *)(void *)info = cu->cu_total;
658 break;
659 case CLGET_SERVER_ADDR: /* Give him the fd address */
660 /* Now obsolete. Only for backward compatibility */
661 (void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
662 break;
663 case CLSET_RETRY_TIMEOUT:
664 if (time_not_ok((struct timeval *)(void *)info)) {
665 release_fd_lock(cu->cu_fd, mask);
666 return (FALSE);
667 }
668 cu->cu_wait = *(struct timeval *)(void *)info;
669 break;
670 case CLGET_RETRY_TIMEOUT:
671 *(struct timeval *)(void *)info = cu->cu_wait;
672 break;
673 case CLGET_FD:
674 *(int *)(void *)info = cu->cu_fd;
675 break;
676 case CLGET_SVC_ADDR:
677 addr = (struct netbuf *)(void *)info;
678 addr->buf = &cu->cu_raddr;
679 addr->len = cu->cu_rlen;
680 addr->maxlen = sizeof cu->cu_raddr;
681 break;
682 case CLSET_SVC_ADDR: /* set to new address */
683 addr = (struct netbuf *)(void *)info;
684 if (addr->len < sizeof cu->cu_raddr)
685 return (FALSE);
686 (void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
687 cu->cu_rlen = addr->len;
688 break;
689 case CLGET_XID:
690 /*
691 * use the knowledge that xid is the
692 * first element in the call structure *.
693 * This will get the xid of the PREVIOUS call
694 */
695 *(u_int32_t *)(void *)info =
696 ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
697 break;
698
699 case CLSET_XID:
700 /* This will set the xid of the NEXT call */
701 *(u_int32_t *)(void *)cu->cu_outbuf =
702 htonl(*(u_int32_t *)(void *)info - 1);
703 /* decrement by 1 as clnt_dg_call() increments once */
704 break;
705
706 case CLGET_VERS:
707 /*
708 * This RELIES on the information that, in the call body,
709 * the version number field is the fifth field from the
710 * begining of the RPC header. MUST be changed if the
711 * call_struct is changed
712 */
713 *(u_int32_t *)(void *)info =
714 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
715 4 * BYTES_PER_XDR_UNIT));
716 break;
717
718 case CLSET_VERS:
719 *(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
720 = htonl(*(u_int32_t *)(void *)info);
721 break;
722
723 case CLGET_PROG:
724 /*
725 * This RELIES on the information that, in the call body,
726 * the program number field is the fourth field from the
727 * begining of the RPC header. MUST be changed if the
728 * call_struct is changed
729 */
730 *(u_int32_t *)(void *)info =
731 ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
732 3 * BYTES_PER_XDR_UNIT));
733 break;
734
735 case CLSET_PROG:
736 *(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
737 = htonl(*(u_int32_t *)(void *)info);
738 break;
725
739 case CLSET_ASYNC:
740 cu->cu_async = *(int *)(void *)info;
741 break;
742 default:
743 release_fd_lock(cu->cu_fd, mask);
744 return (FALSE);
745 }
746 release_fd_lock(cu->cu_fd, mask);
747 return (TRUE);
748}
749
750static void
751clnt_dg_destroy(cl)
752 CLIENT *cl;
753{
754 struct cu_data *cu = (struct cu_data *)cl->cl_private;
755 int cu_fd = cu->cu_fd;
756 sigset_t mask;
757 sigset_t newmask;
758
759 sigfillset(&newmask);
760 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
761 mutex_lock(&clnt_fd_lock);
762 while (dg_fd_locks[cu_fd])
763 cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
764 if (cu->cu_closeit)
765 (void)_close(cu_fd);
766 XDR_DESTROY(&(cu->cu_outxdrs));
767 mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
768 if (cl->cl_netid && cl->cl_netid[0])
769 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
770 if (cl->cl_tp && cl->cl_tp[0])
771 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
772 mem_free(cl, sizeof (CLIENT));
773 mutex_unlock(&clnt_fd_lock);
774 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
775 cond_signal(&dg_cv[cu_fd]);
776}
777
778static struct clnt_ops *
779clnt_dg_ops()
780{
781 static struct clnt_ops ops;
782 extern mutex_t ops_lock;
783 sigset_t mask;
784 sigset_t newmask;
785
786/* VARIABLES PROTECTED BY ops_lock: ops */
787
788 sigfillset(&newmask);
789 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
790 mutex_lock(&ops_lock);
791 if (ops.cl_call == NULL) {
792 ops.cl_call = clnt_dg_call;
793 ops.cl_abort = clnt_dg_abort;
794 ops.cl_geterr = clnt_dg_geterr;
795 ops.cl_freeres = clnt_dg_freeres;
796 ops.cl_destroy = clnt_dg_destroy;
797 ops.cl_control = clnt_dg_control;
798 }
799 mutex_unlock(&ops_lock);
800 thr_sigsetmask(SIG_SETMASK, &mask, NULL);
801 return (&ops);
802}
803
804/*
805 * Make sure that the time is not garbage. -1 value is allowed.
806 */
807static bool_t
808time_not_ok(t)
809 struct timeval *t;
810{
811 return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
812 t->tv_usec < -1 || t->tv_usec > 1000000);
813}
814
815
816/*
817 * Convert from timevals (used by select) to milliseconds (used by poll).
818 */
819static int
820__rpc_timeval_to_msec(t)
821 struct timeval *t;
822{
823 int t1, tmp;
824
825 /*
826 * We're really returning t->tv_sec * 1000 + (t->tv_usec / 1000)
827 * but try to do so efficiently. Note: 1000 = 1024 - 16 - 8.
828 */
829 tmp = (int)t->tv_sec << 3;
830 t1 = -tmp;
831 t1 += t1 << 1;
832 t1 += tmp << 7;
833 if (t->tv_usec)
834 t1 += (int)(t->tv_usec / 1000);
835
836 return (t1);
837}