clnt_dg.c revision 92941
1/*	$NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $	*/
2/*	$FreeBSD: head/lib/libc/rpc/clnt_dg.c 92941 2002-03-22 09:22:15Z obrien $ */
3
4/*
5 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
6 * unrestricted use provided that this legend is included on all tape
7 * media and as a part of the software program in whole or part.  Users
8 * may copy or modify Sun RPC without charge, but are not authorized
9 * to license or distribute it to anyone else except as part of a product or
10 * program developed by the user.
11 *
12 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
13 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
14 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
15 *
16 * Sun RPC is provided with no support and without any obligation on the
17 * part of Sun Microsystems, Inc. to assist in its use, correction,
18 * modification or enhancement.
19 *
20 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
21 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
22 * OR ANY PART THEREOF.
23 *
24 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
25 * or profits or other special, indirect and consequential damages, even if
26 * Sun has been advised of the possibility of such damages.
27 *
28 * Sun Microsystems, Inc.
29 * 2550 Garcia Avenue
30 * Mountain View, California  94043
31 */
32/*
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 */
35
36/* #ident	"@(#)clnt_dg.c	1.23	94/04/22 SMI" */
37
38#if 0
39#if !defined(lint) && defined(SCCSIDS)
40static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
41#endif
42#endif
43
44/*
45 * Implements a connectionless client side RPC.
46 */
47
48#include "namespace.h"
49#include "reentrant.h"
50#include <sys/poll.h>
51#include <sys/types.h>
52#include <sys/time.h>
53#include <sys/socket.h>
54#include <sys/ioctl.h>
55#include <arpa/inet.h>
56#include <rpc/rpc.h>
57#include <errno.h>
58#include <stdlib.h>
59#include <string.h>
60#include <signal.h>
61#include <unistd.h>
62#include <err.h>
63#include "un-namespace.h"
64#include "rpc_com.h"
65
66
67#define	RPC_MAX_BACKOFF		30 /* seconds */
68
69
70static struct clnt_ops *clnt_dg_ops(void);
71static bool_t time_not_ok(struct timeval *);
72static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, caddr_t,
73	    xdrproc_t, caddr_t, struct timeval);
74static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
75static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, caddr_t);
76static void clnt_dg_abort(CLIENT *);
77static bool_t clnt_dg_control(CLIENT *, u_int, char *);
78static void clnt_dg_destroy(CLIENT *);
79static int __rpc_timeval_to_msec(struct timeval *);
80
81
82
83
84/*
85 *	This machinery implements per-fd locks for MT-safety.  It is not
86 *	sufficient to do per-CLIENT handle locks for MT-safety because a
87 *	user may create more than one CLIENT handle with the same fd behind
88 *	it.  Therfore, we allocate an array of flags (dg_fd_locks), protected
89 *	by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
90 *	similarly protected.  Dg_fd_lock[fd] == 1 => a call is activte on some
91 *	CLIENT handle created for that fd.
92 *	The current implementation holds locks across the entire RPC and reply,
93 *	including retransmissions.  Yes, this is silly, and as soon as this
94 *	code is proven to work, this should be the first thing fixed.  One step
95 *	at a time.
96 */
97static int	*dg_fd_locks;
98extern mutex_t clnt_fd_lock;
99static cond_t	*dg_cv;
100#define	release_fd_lock(fd, mask) {		\
101	mutex_lock(&clnt_fd_lock);	\
102	dg_fd_locks[fd] = 0;		\
103	mutex_unlock(&clnt_fd_lock);	\
104	thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL);	\
105	cond_signal(&dg_cv[fd]);	\
106}
107
108static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
109
110/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
111
112/*
113 * Private data kept per client handle
114 */
115struct cu_data {
116	int			cu_fd;		/* connections fd */
117	bool_t			cu_closeit;	/* opened by library */
118	struct sockaddr_storage	cu_raddr;	/* remote address */
119	int			cu_rlen;
120	struct timeval		cu_wait;	/* retransmit interval */
121	struct timeval		cu_total;	/* total time for the call */
122	struct rpc_err		cu_error;
123	XDR			cu_outxdrs;
124	u_int			cu_xdrpos;
125	u_int			cu_sendsz;	/* send size */
126	char			*cu_outbuf;
127	u_int			cu_recvsz;	/* recv size */
128	struct pollfd		pfdp;
129	int			cu_async;
130	int			cu_connect;	/* Use connect(). */
131	int			cu_connected;	/* Have done connect(). */
132	char			cu_inbuf[1];
133};
134
135/*
136 * Connection less client creation returns with client handle parameters.
137 * Default options are set, which the user can change using clnt_control().
138 * fd should be open and bound.
139 * NB: The rpch->cl_auth is initialized to null authentication.
140 * 	Caller may wish to set this something more useful.
141 *
142 * sendsz and recvsz are the maximum allowable packet sizes that can be
143 * sent and received. Normally they are the same, but they can be
144 * changed to improve the program efficiency and buffer allocation.
145 * If they are 0, use the transport default.
146 *
147 * If svcaddr is NULL, returns NULL.
148 */
149CLIENT *
150clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz)
151	int fd;				/* open file descriptor */
152	const struct netbuf *svcaddr;	/* servers address */
153	rpcprog_t program;		/* program number */
154	rpcvers_t version;		/* version number */
155	u_int sendsz;			/* buffer recv size */
156	u_int recvsz;			/* buffer send size */
157{
158	CLIENT *cl = NULL;		/* client handle */
159	struct cu_data *cu = NULL;	/* private data */
160	struct timeval now;
161	struct rpc_msg call_msg;
162	sigset_t mask;
163	sigset_t newmask;
164	struct __rpc_sockinfo si;
165	int one = 1;
166
167	sigfillset(&newmask);
168	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
169	mutex_lock(&clnt_fd_lock);
170	if (dg_fd_locks == (int *) NULL) {
171		int cv_allocsz;
172		size_t fd_allocsz;
173		int dtbsize = __rpc_dtbsize();
174
175		fd_allocsz = dtbsize * sizeof (int);
176		dg_fd_locks = (int *) mem_alloc(fd_allocsz);
177		if (dg_fd_locks == (int *) NULL) {
178			mutex_unlock(&clnt_fd_lock);
179			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
180			goto err1;
181		} else
182			memset(dg_fd_locks, '\0', fd_allocsz);
183
184		cv_allocsz = dtbsize * sizeof (cond_t);
185		dg_cv = (cond_t *) mem_alloc(cv_allocsz);
186		if (dg_cv == (cond_t *) NULL) {
187			mem_free(dg_fd_locks, fd_allocsz);
188			dg_fd_locks = (int *) NULL;
189			mutex_unlock(&clnt_fd_lock);
190			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
191			goto err1;
192		} else {
193			int i;
194
195			for (i = 0; i < dtbsize; i++)
196				cond_init(&dg_cv[i], 0, (void *) 0);
197		}
198	}
199
200	mutex_unlock(&clnt_fd_lock);
201	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
202
203	if (svcaddr == NULL) {
204		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
205		return (NULL);
206	}
207
208	if (!__rpc_fd2sockinfo(fd, &si)) {
209		rpc_createerr.cf_stat = RPC_TLIERROR;
210		rpc_createerr.cf_error.re_errno = 0;
211		return (NULL);
212	}
213	/*
214	 * Find the receive and the send size
215	 */
216	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
217	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
218	if ((sendsz == 0) || (recvsz == 0)) {
219		rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
220		rpc_createerr.cf_error.re_errno = 0;
221		return (NULL);
222	}
223
224	if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
225		goto err1;
226	/*
227	 * Should be multiple of 4 for XDR.
228	 */
229	sendsz = ((sendsz + 3) / 4) * 4;
230	recvsz = ((recvsz + 3) / 4) * 4;
231	cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
232	if (cu == NULL)
233		goto err1;
234	(void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
235	cu->cu_rlen = svcaddr->len;
236	cu->cu_outbuf = &cu->cu_inbuf[recvsz];
237	/* Other values can also be set through clnt_control() */
238	cu->cu_wait.tv_sec = 15;	/* heuristically chosen */
239	cu->cu_wait.tv_usec = 0;
240	cu->cu_total.tv_sec = -1;
241	cu->cu_total.tv_usec = -1;
242	cu->cu_sendsz = sendsz;
243	cu->cu_recvsz = recvsz;
244	cu->cu_async = FALSE;
245	cu->cu_connect = FALSE;
246	cu->cu_connected = FALSE;
247	(void) gettimeofday(&now, NULL);
248	call_msg.rm_xid = __RPC_GETXID(&now);
249	call_msg.rm_call.cb_prog = program;
250	call_msg.rm_call.cb_vers = version;
251	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
252	if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
253		rpc_createerr.cf_stat = RPC_CANTENCODEARGS;  /* XXX */
254		rpc_createerr.cf_error.re_errno = 0;
255		goto err2;
256	}
257	cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
258
259	/* XXX fvdl - do we still want this? */
260#if 0
261	(void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
262#endif
263	_ioctl(fd, FIONBIO, (char *)(void *)&one);
264
265	/*
266	 * By default, closeit is always FALSE. It is users responsibility
267	 * to do a close on it, else the user may use clnt_control
268	 * to let clnt_destroy do it for him/her.
269	 */
270	cu->cu_closeit = FALSE;
271	cu->cu_fd = fd;
272	cl->cl_ops = clnt_dg_ops();
273	cl->cl_private = (caddr_t)(void *)cu;
274	cl->cl_auth = authnone_create();
275	cl->cl_tp = NULL;
276	cl->cl_netid = NULL;
277	cu->pfdp.fd = cu->cu_fd;
278	cu->pfdp.events = POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND;
279	return (cl);
280err1:
281	warnx(mem_err_clnt_dg);
282	rpc_createerr.cf_stat = RPC_SYSTEMERROR;
283	rpc_createerr.cf_error.re_errno = errno;
284err2:
285	if (cl) {
286		mem_free(cl, sizeof (CLIENT));
287		if (cu)
288			mem_free(cu, sizeof (*cu) + sendsz + recvsz);
289	}
290	return (NULL);
291}
292
293static enum clnt_stat
294clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout)
295	CLIENT	*cl;			/* client handle */
296	rpcproc_t	proc;		/* procedure number */
297	xdrproc_t	xargs;		/* xdr routine for args */
298	caddr_t		argsp;		/* pointer to args */
299	xdrproc_t	xresults;	/* xdr routine for results */
300	caddr_t		resultsp;	/* pointer to results */
301	struct timeval	utimeout;	/* seconds to wait before giving up */
302{
303	struct cu_data *cu = (struct cu_data *)cl->cl_private;
304	XDR *xdrs;
305	size_t outlen;
306	struct rpc_msg reply_msg;
307	XDR reply_xdrs;
308	struct timeval time_waited;
309	bool_t ok;
310	int nrefreshes = 2;		/* number of times to refresh cred */
311	struct timeval timeout;
312	struct timeval retransmit_time;
313	struct timeval startime, curtime;
314	int firsttimeout = 1;
315	struct sockaddr *sa;
316	sigset_t mask;
317	sigset_t newmask;
318	socklen_t inlen, salen;
319	ssize_t recvlen = 0;
320	int rpc_lock_value;
321	u_int32_t xid;
322
323	outlen = 0;
324	sigfillset(&newmask);
325	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
326	mutex_lock(&clnt_fd_lock);
327	while (dg_fd_locks[cu->cu_fd])
328		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
329	if (__isthreaded)
330		rpc_lock_value = 1;
331	else
332		rpc_lock_value = 0;
333	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
334	mutex_unlock(&clnt_fd_lock);
335	if (cu->cu_total.tv_usec == -1) {
336		timeout = utimeout;	/* use supplied timeout */
337	} else {
338		timeout = cu->cu_total;	/* use default timeout */
339	}
340
341	if (cu->cu_connect && !cu->cu_connected) {
342		if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
343		    cu->cu_rlen) < 0) {
344			release_fd_lock(cu->cu_fd, mask);
345			cu->cu_error.re_errno = errno;
346			return (cu->cu_error.re_status = RPC_CANTSEND);
347		}
348		cu->cu_connected = 1;
349	}
350	if (cu->cu_connected) {
351		sa = NULL;
352		salen = 0;
353	} else {
354		sa = (struct sockaddr *)&cu->cu_raddr;
355		salen = cu->cu_rlen;
356	}
357	time_waited.tv_sec = 0;
358	time_waited.tv_usec = 0;
359	retransmit_time = cu->cu_wait;
360
361call_again:
362	xdrs = &(cu->cu_outxdrs);
363	if (cu->cu_async == TRUE && xargs == NULL)
364		goto get_reply;
365	xdrs->x_op = XDR_ENCODE;
366	XDR_SETPOS(xdrs, cu->cu_xdrpos);
367	/*
368	 * the transaction is the first thing in the out buffer
369	 * XXX Yes, and it's in network byte order, so we should to
370	 * be careful when we increment it, shouldn't we.
371	 */
372	xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
373	xid++;
374	*(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
375
376	if ((! XDR_PUTINT32(xdrs, &proc)) ||
377	    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
378	    (! (*xargs)(xdrs, argsp))) {
379		release_fd_lock(cu->cu_fd, mask);
380		return (cu->cu_error.re_status = RPC_CANTENCODEARGS);
381	}
382	outlen = (size_t)XDR_GETPOS(xdrs);
383
384send_again:
385	if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
386		cu->cu_error.re_errno = errno;
387		release_fd_lock(cu->cu_fd, mask);
388		return (cu->cu_error.re_status = RPC_CANTSEND);
389	}
390
391	/*
392	 * Hack to provide rpc-based message passing
393	 */
394	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
395		release_fd_lock(cu->cu_fd, mask);
396		return (cu->cu_error.re_status = RPC_TIMEDOUT);
397	}
398
399get_reply:
400
401	/*
402	 * sub-optimal code appears here because we have
403	 * some clock time to spare while the packets are in flight.
404	 * (We assume that this is actually only executed once.)
405	 */
406	reply_msg.acpted_rply.ar_verf = _null_auth;
407	reply_msg.acpted_rply.ar_results.where = resultsp;
408	reply_msg.acpted_rply.ar_results.proc = xresults;
409
410
411	for (;;) {
412		switch (_poll(&cu->pfdp, 1,
413		    __rpc_timeval_to_msec(&retransmit_time))) {
414		case 0:
415			time_waited.tv_sec += retransmit_time.tv_sec;
416			time_waited.tv_usec += retransmit_time.tv_usec;
417			while (time_waited.tv_usec >= 1000000) {
418				time_waited.tv_sec++;
419				time_waited.tv_usec -= 1000000;
420			}
421			/* update retransmit_time */
422			if (retransmit_time.tv_sec < RPC_MAX_BACKOFF) {
423				retransmit_time.tv_usec *= 2;
424				retransmit_time.tv_sec *= 2;
425				while (retransmit_time.tv_usec >= 1000000) {
426					retransmit_time.tv_sec++;
427					retransmit_time.tv_usec -= 1000000;
428				}
429			}
430
431			if ((time_waited.tv_sec < timeout.tv_sec) ||
432			    ((time_waited.tv_sec == timeout.tv_sec) &&
433				(time_waited.tv_usec < timeout.tv_usec)))
434				goto send_again;
435			release_fd_lock(cu->cu_fd, mask);
436			return (cu->cu_error.re_status = RPC_TIMEDOUT);
437
438		case -1:
439			if (errno == EBADF) {
440				cu->cu_error.re_errno = errno;
441				release_fd_lock(cu->cu_fd, mask);
442				return (cu->cu_error.re_status = RPC_CANTRECV);
443			}
444			if (errno != EINTR) {
445				errno = 0; /* reset it */
446				continue;
447			}
448			/* interrupted by another signal, update time_waited */
449			if (firsttimeout) {
450				/*
451				 * Could have done gettimeofday before clnt_call
452				 * but that means 1 more system call per each
453				 * clnt_call, so do it after first time out
454				 */
455				if (gettimeofday(&startime,
456					(struct timezone *) NULL) == -1) {
457					errno = 0;
458					continue;
459				}
460				firsttimeout = 0;
461				errno = 0;
462				continue;
463			};
464			if (gettimeofday(&curtime,
465				(struct timezone *) NULL) == -1) {
466				errno = 0;
467				continue;
468			};
469			time_waited.tv_sec += curtime.tv_sec - startime.tv_sec;
470			time_waited.tv_usec += curtime.tv_usec -
471							startime.tv_usec;
472			while (time_waited.tv_usec < 0) {
473				time_waited.tv_sec--;
474				time_waited.tv_usec += 1000000;
475			};
476			while (time_waited.tv_usec >= 1000000) {
477				time_waited.tv_sec++;
478				time_waited.tv_usec -= 1000000;
479			}
480			startime.tv_sec = curtime.tv_sec;
481			startime.tv_usec = curtime.tv_usec;
482			if ((time_waited.tv_sec > timeout.tv_sec) ||
483				((time_waited.tv_sec == timeout.tv_sec) &&
484				(time_waited.tv_usec > timeout.tv_usec))) {
485				release_fd_lock(cu->cu_fd, mask);
486				return (cu->cu_error.re_status = RPC_TIMEDOUT);
487			}
488			errno = 0; /* reset it */
489			continue;
490		};
491
492		if (cu->pfdp.revents & POLLNVAL || (cu->pfdp.revents == 0)) {
493			cu->cu_error.re_status = RPC_CANTRECV;
494			/*
495			 *	Note:  we're faking errno here because we
496			 *	previously would have expected _poll() to
497			 *	return -1 with errno EBADF.  Poll(BA_OS)
498			 *	returns 0 and sets the POLLNVAL revents flag
499			 *	instead.
500			 */
501			cu->cu_error.re_errno = errno = EBADF;
502			release_fd_lock(cu->cu_fd, mask);
503			return (-1);
504		}
505
506		/* We have some data now */
507		do {
508			if (errno == EINTR) {
509				/*
510				 * Must make sure errno was not already
511				 * EINTR in case _recvfrom() returns -1.
512				 */
513				errno = 0;
514			}
515			recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
516			    cu->cu_recvsz, 0, NULL, NULL);
517		} while (recvlen < 0 && errno == EINTR);
518		if (recvlen < 0) {
519			if (errno == EWOULDBLOCK)
520				continue;
521			cu->cu_error.re_errno = errno;
522			release_fd_lock(cu->cu_fd, mask);
523			return (cu->cu_error.re_status = RPC_CANTRECV);
524		}
525		if (recvlen < sizeof (u_int32_t))
526			continue;
527		/* see if reply transaction id matches sent id */
528		if (cu->cu_async == FALSE &&
529		    *((u_int32_t *)(void *)(cu->cu_inbuf)) !=
530		    *((u_int32_t *)(void *)(cu->cu_outbuf)))
531			continue;
532		/* we now assume we have the proper reply */
533		break;
534	}
535	inlen = (socklen_t)recvlen;
536
537	/*
538	 * now decode and validate the response
539	 */
540
541	xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE);
542	ok = xdr_replymsg(&reply_xdrs, &reply_msg);
543	/* XDR_DESTROY(&reply_xdrs);	save a few cycles on noop destroy */
544	if (ok) {
545		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
546			(reply_msg.acpted_rply.ar_stat == SUCCESS))
547			cu->cu_error.re_status = RPC_SUCCESS;
548		else
549			_seterr_reply(&reply_msg, &(cu->cu_error));
550
551		if (cu->cu_error.re_status == RPC_SUCCESS) {
552			if (! AUTH_VALIDATE(cl->cl_auth,
553					    &reply_msg.acpted_rply.ar_verf)) {
554				cu->cu_error.re_status = RPC_AUTHERROR;
555				cu->cu_error.re_why = AUTH_INVALIDRESP;
556			}
557			if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
558				xdrs->x_op = XDR_FREE;
559				(void) xdr_opaque_auth(xdrs,
560					&(reply_msg.acpted_rply.ar_verf));
561			}
562		}		/* end successful completion */
563		/*
564		 * If unsuccesful AND error is an authentication error
565		 * then refresh credentials and try again, else break
566		 */
567		else if (cu->cu_error.re_status == RPC_AUTHERROR)
568			/* maybe our credentials need to be refreshed ... */
569			if (nrefreshes > 0 &&
570			    AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
571				nrefreshes--;
572				goto call_again;
573			}
574		/* end of unsuccessful completion */
575	}	/* end of valid reply message */
576	else {
577		cu->cu_error.re_status = RPC_CANTDECODERES;
578
579	}
580	release_fd_lock(cu->cu_fd, mask);
581	return (cu->cu_error.re_status);
582}
583
584static void
585clnt_dg_geterr(cl, errp)
586	CLIENT *cl;
587	struct rpc_err *errp;
588{
589	struct cu_data *cu = (struct cu_data *)cl->cl_private;
590
591	*errp = cu->cu_error;
592}
593
594static bool_t
595clnt_dg_freeres(cl, xdr_res, res_ptr)
596	CLIENT *cl;
597	xdrproc_t xdr_res;
598	caddr_t res_ptr;
599{
600	struct cu_data *cu = (struct cu_data *)cl->cl_private;
601	XDR *xdrs = &(cu->cu_outxdrs);
602	bool_t dummy;
603	sigset_t mask;
604	sigset_t newmask;
605
606	sigfillset(&newmask);
607	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
608	mutex_lock(&clnt_fd_lock);
609	while (dg_fd_locks[cu->cu_fd])
610		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
611	xdrs->x_op = XDR_FREE;
612	dummy = (*xdr_res)(xdrs, res_ptr);
613	mutex_unlock(&clnt_fd_lock);
614	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
615	cond_signal(&dg_cv[cu->cu_fd]);
616	return (dummy);
617}
618
619/*ARGSUSED*/
620static void
621clnt_dg_abort(h)
622	CLIENT *h;
623{
624}
625
626static bool_t
627clnt_dg_control(cl, request, info)
628	CLIENT *cl;
629	u_int request;
630	char *info;
631{
632	struct cu_data *cu = (struct cu_data *)cl->cl_private;
633	struct netbuf *addr;
634	sigset_t mask;
635	sigset_t newmask;
636	int rpc_lock_value;
637
638	sigfillset(&newmask);
639	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
640	mutex_lock(&clnt_fd_lock);
641	while (dg_fd_locks[cu->cu_fd])
642		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
643	if (__isthreaded)
644                rpc_lock_value = 1;
645        else
646                rpc_lock_value = 0;
647	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
648	mutex_unlock(&clnt_fd_lock);
649	switch (request) {
650	case CLSET_FD_CLOSE:
651		cu->cu_closeit = TRUE;
652		release_fd_lock(cu->cu_fd, mask);
653		return (TRUE);
654	case CLSET_FD_NCLOSE:
655		cu->cu_closeit = FALSE;
656		release_fd_lock(cu->cu_fd, mask);
657		return (TRUE);
658	}
659
660	/* for other requests which use info */
661	if (info == NULL) {
662		release_fd_lock(cu->cu_fd, mask);
663		return (FALSE);
664	}
665	switch (request) {
666	case CLSET_TIMEOUT:
667		if (time_not_ok((struct timeval *)(void *)info)) {
668			release_fd_lock(cu->cu_fd, mask);
669			return (FALSE);
670		}
671		cu->cu_total = *(struct timeval *)(void *)info;
672		break;
673	case CLGET_TIMEOUT:
674		*(struct timeval *)(void *)info = cu->cu_total;
675		break;
676	case CLGET_SERVER_ADDR:		/* Give him the fd address */
677		/* Now obsolete. Only for backward compatibility */
678		(void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
679		break;
680	case CLSET_RETRY_TIMEOUT:
681		if (time_not_ok((struct timeval *)(void *)info)) {
682			release_fd_lock(cu->cu_fd, mask);
683			return (FALSE);
684		}
685		cu->cu_wait = *(struct timeval *)(void *)info;
686		break;
687	case CLGET_RETRY_TIMEOUT:
688		*(struct timeval *)(void *)info = cu->cu_wait;
689		break;
690	case CLGET_FD:
691		*(int *)(void *)info = cu->cu_fd;
692		break;
693	case CLGET_SVC_ADDR:
694		addr = (struct netbuf *)(void *)info;
695		addr->buf = &cu->cu_raddr;
696		addr->len = cu->cu_rlen;
697		addr->maxlen = sizeof cu->cu_raddr;
698		break;
699	case CLSET_SVC_ADDR:		/* set to new address */
700		addr = (struct netbuf *)(void *)info;
701		if (addr->len < sizeof cu->cu_raddr) {
702			release_fd_lock(cu->cu_fd, mask);
703			return (FALSE);
704		}
705		(void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
706		cu->cu_rlen = addr->len;
707		break;
708	case CLGET_XID:
709		/*
710		 * use the knowledge that xid is the
711		 * first element in the call structure *.
712		 * This will get the xid of the PREVIOUS call
713		 */
714		*(u_int32_t *)(void *)info =
715		    ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
716		break;
717
718	case CLSET_XID:
719		/* This will set the xid of the NEXT call */
720		*(u_int32_t *)(void *)cu->cu_outbuf =
721		    htonl(*(u_int32_t *)(void *)info - 1);
722		/* decrement by 1 as clnt_dg_call() increments once */
723		break;
724
725	case CLGET_VERS:
726		/*
727		 * This RELIES on the information that, in the call body,
728		 * the version number field is the fifth field from the
729		 * begining of the RPC header. MUST be changed if the
730		 * call_struct is changed
731		 */
732		*(u_int32_t *)(void *)info =
733		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
734		    4 * BYTES_PER_XDR_UNIT));
735		break;
736
737	case CLSET_VERS:
738		*(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
739			= htonl(*(u_int32_t *)(void *)info);
740		break;
741
742	case CLGET_PROG:
743		/*
744		 * This RELIES on the information that, in the call body,
745		 * the program number field is the fourth field from the
746		 * begining of the RPC header. MUST be changed if the
747		 * call_struct is changed
748		 */
749		*(u_int32_t *)(void *)info =
750		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
751		    3 * BYTES_PER_XDR_UNIT));
752		break;
753
754	case CLSET_PROG:
755		*(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
756			= htonl(*(u_int32_t *)(void *)info);
757		break;
758	case CLSET_ASYNC:
759		cu->cu_async = *(int *)(void *)info;
760		break;
761	case CLSET_CONNECT:
762		cu->cu_connect = *(int *)(void *)info;
763		break;
764	default:
765		release_fd_lock(cu->cu_fd, mask);
766		return (FALSE);
767	}
768	release_fd_lock(cu->cu_fd, mask);
769	return (TRUE);
770}
771
772static void
773clnt_dg_destroy(cl)
774	CLIENT *cl;
775{
776	struct cu_data *cu = (struct cu_data *)cl->cl_private;
777	int cu_fd = cu->cu_fd;
778	sigset_t mask;
779	sigset_t newmask;
780
781	sigfillset(&newmask);
782	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
783	mutex_lock(&clnt_fd_lock);
784	while (dg_fd_locks[cu_fd])
785		cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
786	if (cu->cu_closeit)
787		(void)_close(cu_fd);
788	XDR_DESTROY(&(cu->cu_outxdrs));
789	mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
790	if (cl->cl_netid && cl->cl_netid[0])
791		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
792	if (cl->cl_tp && cl->cl_tp[0])
793		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
794	mem_free(cl, sizeof (CLIENT));
795	mutex_unlock(&clnt_fd_lock);
796	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
797	cond_signal(&dg_cv[cu_fd]);
798}
799
800static struct clnt_ops *
801clnt_dg_ops()
802{
803	static struct clnt_ops ops;
804	extern mutex_t	ops_lock;
805	sigset_t mask;
806	sigset_t newmask;
807
808/* VARIABLES PROTECTED BY ops_lock: ops */
809
810	sigfillset(&newmask);
811	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
812	mutex_lock(&ops_lock);
813	if (ops.cl_call == NULL) {
814		ops.cl_call = clnt_dg_call;
815		ops.cl_abort = clnt_dg_abort;
816		ops.cl_geterr = clnt_dg_geterr;
817		ops.cl_freeres = clnt_dg_freeres;
818		ops.cl_destroy = clnt_dg_destroy;
819		ops.cl_control = clnt_dg_control;
820	}
821	mutex_unlock(&ops_lock);
822	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
823	return (&ops);
824}
825
826/*
827 * Make sure that the time is not garbage.  -1 value is allowed.
828 */
829static bool_t
830time_not_ok(t)
831	struct timeval *t;
832{
833	return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
834		t->tv_usec < -1 || t->tv_usec > 1000000);
835}
836
837
838/*
839 *	Convert from timevals (used by select) to milliseconds (used by poll).
840 */
841static int
842__rpc_timeval_to_msec(t)
843	struct timeval	*t;
844{
845	int	t1, tmp;
846
847	/*
848	 *	We're really returning t->tv_sec * 1000 + (t->tv_usec / 1000)
849	 *	but try to do so efficiently.  Note:  1000 = 1024 - 16 - 8.
850	 */
851	tmp = (int)t->tv_sec << 3;
852	t1 = -tmp;
853	t1 += t1 << 1;
854	t1 += tmp << 7;
855	if (t->tv_usec)
856		t1 += (int)(t->tv_usec / 1000);
857
858	return (t1);
859}
860