clnt_dg.c revision 99996
1/*	$NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $	*/
2
3/*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part.  Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California  94043
30 */
31/*
32 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
33 */
34
35/* #ident	"@(#)clnt_dg.c	1.23	94/04/22 SMI" */
36
37#if !defined(lint) && defined(SCCSIDS)
38static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
39#endif
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/lib/libc/rpc/clnt_dg.c 99996 2002-07-14 23:14:02Z alfred $");
42
43/*
44 * Implements a connectionless client side RPC.
45 */
46
47#include "namespace.h"
48#include "reentrant.h"
49#include <sys/poll.h>
50#include <sys/types.h>
51#include <sys/time.h>
52#include <sys/socket.h>
53#include <sys/ioctl.h>
54#include <arpa/inet.h>
55#include <rpc/rpc.h>
56#include <errno.h>
57#include <stdlib.h>
58#include <string.h>
59#include <signal.h>
60#include <unistd.h>
61#include <err.h>
62#include "un-namespace.h"
63#include "rpc_com.h"
64
65
66#define	RPC_MAX_BACKOFF		30 /* seconds */
67
68
69static struct clnt_ops *clnt_dg_ops(void);
70static bool_t time_not_ok(struct timeval *);
71static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
72	    xdrproc_t, void *, struct timeval);
73static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
74static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
75static void clnt_dg_abort(CLIENT *);
76static bool_t clnt_dg_control(CLIENT *, u_int, void *);
77static void clnt_dg_destroy(CLIENT *);
78static int __rpc_timeval_to_msec(struct timeval *);
79
80
81
82
83/*
84 *	This machinery implements per-fd locks for MT-safety.  It is not
85 *	sufficient to do per-CLIENT handle locks for MT-safety because a
86 *	user may create more than one CLIENT handle with the same fd behind
87 *	it.  Therfore, we allocate an array of flags (dg_fd_locks), protected
88 *	by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
89 *	similarly protected.  Dg_fd_lock[fd] == 1 => a call is activte on some
90 *	CLIENT handle created for that fd.
91 *	The current implementation holds locks across the entire RPC and reply,
92 *	including retransmissions.  Yes, this is silly, and as soon as this
93 *	code is proven to work, this should be the first thing fixed.  One step
94 *	at a time.
95 */
96static int	*dg_fd_locks;
97extern mutex_t clnt_fd_lock;
98static cond_t	*dg_cv;
99#define	release_fd_lock(fd, mask) {		\
100	mutex_lock(&clnt_fd_lock);	\
101	dg_fd_locks[fd] = 0;		\
102	mutex_unlock(&clnt_fd_lock);	\
103	thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL);	\
104	cond_signal(&dg_cv[fd]);	\
105}
106
107static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
108
109/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
110
111/*
112 * Private data kept per client handle
113 */
114struct cu_data {
115	int			cu_fd;		/* connections fd */
116	bool_t			cu_closeit;	/* opened by library */
117	struct sockaddr_storage	cu_raddr;	/* remote address */
118	int			cu_rlen;
119	struct timeval		cu_wait;	/* retransmit interval */
120	struct timeval		cu_total;	/* total time for the call */
121	struct rpc_err		cu_error;
122	XDR			cu_outxdrs;
123	u_int			cu_xdrpos;
124	u_int			cu_sendsz;	/* send size */
125	char			*cu_outbuf;
126	u_int			cu_recvsz;	/* recv size */
127	struct pollfd		pfdp;
128	int			cu_async;
129	int			cu_connect;	/* Use connect(). */
130	int			cu_connected;	/* Have done connect(). */
131	char			cu_inbuf[1];
132};
133
134/*
135 * Connection less client creation returns with client handle parameters.
136 * Default options are set, which the user can change using clnt_control().
137 * fd should be open and bound.
138 * NB: The rpch->cl_auth is initialized to null authentication.
139 * 	Caller may wish to set this something more useful.
140 *
141 * sendsz and recvsz are the maximum allowable packet sizes that can be
142 * sent and received. Normally they are the same, but they can be
143 * changed to improve the program efficiency and buffer allocation.
144 * If they are 0, use the transport default.
145 *
146 * If svcaddr is NULL, returns NULL.
147 */
148CLIENT *
149clnt_dg_create(fd, svcaddr, program, version, sendsz, recvsz)
150	int fd;				/* open file descriptor */
151	const struct netbuf *svcaddr;	/* servers address */
152	rpcprog_t program;		/* program number */
153	rpcvers_t version;		/* version number */
154	u_int sendsz;			/* buffer recv size */
155	u_int recvsz;			/* buffer send size */
156{
157	CLIENT *cl = NULL;		/* client handle */
158	struct cu_data *cu = NULL;	/* private data */
159	struct timeval now;
160	struct rpc_msg call_msg;
161	sigset_t mask;
162	sigset_t newmask;
163	struct __rpc_sockinfo si;
164	int one = 1;
165
166	sigfillset(&newmask);
167	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
168	mutex_lock(&clnt_fd_lock);
169	if (dg_fd_locks == (int *) NULL) {
170		int cv_allocsz;
171		size_t fd_allocsz;
172		int dtbsize = __rpc_dtbsize();
173
174		fd_allocsz = dtbsize * sizeof (int);
175		dg_fd_locks = (int *) mem_alloc(fd_allocsz);
176		if (dg_fd_locks == (int *) NULL) {
177			mutex_unlock(&clnt_fd_lock);
178			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
179			goto err1;
180		} else
181			memset(dg_fd_locks, '\0', fd_allocsz);
182
183		cv_allocsz = dtbsize * sizeof (cond_t);
184		dg_cv = (cond_t *) mem_alloc(cv_allocsz);
185		if (dg_cv == (cond_t *) NULL) {
186			mem_free(dg_fd_locks, fd_allocsz);
187			dg_fd_locks = (int *) NULL;
188			mutex_unlock(&clnt_fd_lock);
189			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
190			goto err1;
191		} else {
192			int i;
193
194			for (i = 0; i < dtbsize; i++)
195				cond_init(&dg_cv[i], 0, (void *) 0);
196		}
197	}
198
199	mutex_unlock(&clnt_fd_lock);
200	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
201
202	if (svcaddr == NULL) {
203		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
204		return (NULL);
205	}
206
207	if (!__rpc_fd2sockinfo(fd, &si)) {
208		rpc_createerr.cf_stat = RPC_TLIERROR;
209		rpc_createerr.cf_error.re_errno = 0;
210		return (NULL);
211	}
212	/*
213	 * Find the receive and the send size
214	 */
215	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
216	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
217	if ((sendsz == 0) || (recvsz == 0)) {
218		rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
219		rpc_createerr.cf_error.re_errno = 0;
220		return (NULL);
221	}
222
223	if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
224		goto err1;
225	/*
226	 * Should be multiple of 4 for XDR.
227	 */
228	sendsz = ((sendsz + 3) / 4) * 4;
229	recvsz = ((recvsz + 3) / 4) * 4;
230	cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
231	if (cu == NULL)
232		goto err1;
233	(void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
234	cu->cu_rlen = svcaddr->len;
235	cu->cu_outbuf = &cu->cu_inbuf[recvsz];
236	/* Other values can also be set through clnt_control() */
237	cu->cu_wait.tv_sec = 15;	/* heuristically chosen */
238	cu->cu_wait.tv_usec = 0;
239	cu->cu_total.tv_sec = -1;
240	cu->cu_total.tv_usec = -1;
241	cu->cu_sendsz = sendsz;
242	cu->cu_recvsz = recvsz;
243	cu->cu_async = FALSE;
244	cu->cu_connect = FALSE;
245	cu->cu_connected = FALSE;
246	(void) gettimeofday(&now, NULL);
247	call_msg.rm_xid = __RPC_GETXID(&now);
248	call_msg.rm_call.cb_prog = program;
249	call_msg.rm_call.cb_vers = version;
250	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
251	if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
252		rpc_createerr.cf_stat = RPC_CANTENCODEARGS;  /* XXX */
253		rpc_createerr.cf_error.re_errno = 0;
254		goto err2;
255	}
256	cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
257
258	/* XXX fvdl - do we still want this? */
259#if 0
260	(void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
261#endif
262	_ioctl(fd, FIONBIO, (char *)(void *)&one);
263
264	/*
265	 * By default, closeit is always FALSE. It is users responsibility
266	 * to do a close on it, else the user may use clnt_control
267	 * to let clnt_destroy do it for him/her.
268	 */
269	cu->cu_closeit = FALSE;
270	cu->cu_fd = fd;
271	cl->cl_ops = clnt_dg_ops();
272	cl->cl_private = (caddr_t)(void *)cu;
273	cl->cl_auth = authnone_create();
274	cl->cl_tp = NULL;
275	cl->cl_netid = NULL;
276	cu->pfdp.fd = cu->cu_fd;
277	cu->pfdp.events = POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND;
278	return (cl);
279err1:
280	warnx(mem_err_clnt_dg);
281	rpc_createerr.cf_stat = RPC_SYSTEMERROR;
282	rpc_createerr.cf_error.re_errno = errno;
283err2:
284	if (cl) {
285		mem_free(cl, sizeof (CLIENT));
286		if (cu)
287			mem_free(cu, sizeof (*cu) + sendsz + recvsz);
288	}
289	return (NULL);
290}
291
292static enum clnt_stat
293clnt_dg_call(cl, proc, xargs, argsp, xresults, resultsp, utimeout)
294	CLIENT	*cl;			/* client handle */
295	rpcproc_t	proc;		/* procedure number */
296	xdrproc_t	xargs;		/* xdr routine for args */
297	void		*argsp;		/* pointer to args */
298	xdrproc_t	xresults;	/* xdr routine for results */
299	void		*resultsp;	/* pointer to results */
300	struct timeval	utimeout;	/* seconds to wait before giving up */
301{
302	struct cu_data *cu = (struct cu_data *)cl->cl_private;
303	XDR *xdrs;
304	size_t outlen;
305	struct rpc_msg reply_msg;
306	XDR reply_xdrs;
307	struct timeval time_waited;
308	bool_t ok;
309	int nrefreshes = 2;		/* number of times to refresh cred */
310	struct timeval timeout;
311	struct timeval retransmit_time;
312	struct timeval startime, curtime;
313	int firsttimeout = 1;
314	struct sockaddr *sa;
315	sigset_t mask;
316	sigset_t newmask;
317	socklen_t inlen, salen;
318	ssize_t recvlen = 0;
319	int rpc_lock_value;
320	u_int32_t xid;
321
322	outlen = 0;
323	sigfillset(&newmask);
324	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
325	mutex_lock(&clnt_fd_lock);
326	while (dg_fd_locks[cu->cu_fd])
327		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
328	if (__isthreaded)
329		rpc_lock_value = 1;
330	else
331		rpc_lock_value = 0;
332	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
333	mutex_unlock(&clnt_fd_lock);
334	if (cu->cu_total.tv_usec == -1) {
335		timeout = utimeout;	/* use supplied timeout */
336	} else {
337		timeout = cu->cu_total;	/* use default timeout */
338	}
339
340	if (cu->cu_connect && !cu->cu_connected) {
341		if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
342		    cu->cu_rlen) < 0) {
343			release_fd_lock(cu->cu_fd, mask);
344			cu->cu_error.re_errno = errno;
345			return (cu->cu_error.re_status = RPC_CANTSEND);
346		}
347		cu->cu_connected = 1;
348	}
349	if (cu->cu_connected) {
350		sa = NULL;
351		salen = 0;
352	} else {
353		sa = (struct sockaddr *)&cu->cu_raddr;
354		salen = cu->cu_rlen;
355	}
356	time_waited.tv_sec = 0;
357	time_waited.tv_usec = 0;
358	retransmit_time = cu->cu_wait;
359
360call_again:
361	xdrs = &(cu->cu_outxdrs);
362	if (cu->cu_async == TRUE && xargs == NULL)
363		goto get_reply;
364	xdrs->x_op = XDR_ENCODE;
365	XDR_SETPOS(xdrs, cu->cu_xdrpos);
366	/*
367	 * the transaction is the first thing in the out buffer
368	 * XXX Yes, and it's in network byte order, so we should to
369	 * be careful when we increment it, shouldn't we.
370	 */
371	xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
372	xid++;
373	*(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
374
375	if ((! XDR_PUTINT32(xdrs, &proc)) ||
376	    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
377	    (! (*xargs)(xdrs, argsp))) {
378		release_fd_lock(cu->cu_fd, mask);
379		return (cu->cu_error.re_status = RPC_CANTENCODEARGS);
380	}
381	outlen = (size_t)XDR_GETPOS(xdrs);
382
383send_again:
384	if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
385		cu->cu_error.re_errno = errno;
386		release_fd_lock(cu->cu_fd, mask);
387		return (cu->cu_error.re_status = RPC_CANTSEND);
388	}
389
390	/*
391	 * Hack to provide rpc-based message passing
392	 */
393	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
394		release_fd_lock(cu->cu_fd, mask);
395		return (cu->cu_error.re_status = RPC_TIMEDOUT);
396	}
397
398get_reply:
399
400	/*
401	 * sub-optimal code appears here because we have
402	 * some clock time to spare while the packets are in flight.
403	 * (We assume that this is actually only executed once.)
404	 */
405	reply_msg.acpted_rply.ar_verf = _null_auth;
406	reply_msg.acpted_rply.ar_results.where = resultsp;
407	reply_msg.acpted_rply.ar_results.proc = xresults;
408
409
410	for (;;) {
411		switch (_poll(&cu->pfdp, 1,
412		    __rpc_timeval_to_msec(&retransmit_time))) {
413		case 0:
414			time_waited.tv_sec += retransmit_time.tv_sec;
415			time_waited.tv_usec += retransmit_time.tv_usec;
416			while (time_waited.tv_usec >= 1000000) {
417				time_waited.tv_sec++;
418				time_waited.tv_usec -= 1000000;
419			}
420			/* update retransmit_time */
421			if (retransmit_time.tv_sec < RPC_MAX_BACKOFF) {
422				retransmit_time.tv_usec *= 2;
423				retransmit_time.tv_sec *= 2;
424				while (retransmit_time.tv_usec >= 1000000) {
425					retransmit_time.tv_sec++;
426					retransmit_time.tv_usec -= 1000000;
427				}
428			}
429
430			if ((time_waited.tv_sec < timeout.tv_sec) ||
431			    ((time_waited.tv_sec == timeout.tv_sec) &&
432				(time_waited.tv_usec < timeout.tv_usec)))
433				goto send_again;
434			release_fd_lock(cu->cu_fd, mask);
435			return (cu->cu_error.re_status = RPC_TIMEDOUT);
436
437		case -1:
438			if (errno == EBADF) {
439				cu->cu_error.re_errno = errno;
440				release_fd_lock(cu->cu_fd, mask);
441				return (cu->cu_error.re_status = RPC_CANTRECV);
442			}
443			if (errno != EINTR) {
444				errno = 0; /* reset it */
445				continue;
446			}
447			/* interrupted by another signal, update time_waited */
448			if (firsttimeout) {
449				/*
450				 * Could have done gettimeofday before clnt_call
451				 * but that means 1 more system call per each
452				 * clnt_call, so do it after first time out
453				 */
454				if (gettimeofday(&startime,
455					(struct timezone *) NULL) == -1) {
456					errno = 0;
457					continue;
458				}
459				firsttimeout = 0;
460				errno = 0;
461				continue;
462			};
463			if (gettimeofday(&curtime,
464				(struct timezone *) NULL) == -1) {
465				errno = 0;
466				continue;
467			};
468			time_waited.tv_sec += curtime.tv_sec - startime.tv_sec;
469			time_waited.tv_usec += curtime.tv_usec -
470							startime.tv_usec;
471			while (time_waited.tv_usec < 0) {
472				time_waited.tv_sec--;
473				time_waited.tv_usec += 1000000;
474			};
475			while (time_waited.tv_usec >= 1000000) {
476				time_waited.tv_sec++;
477				time_waited.tv_usec -= 1000000;
478			}
479			startime.tv_sec = curtime.tv_sec;
480			startime.tv_usec = curtime.tv_usec;
481			if ((time_waited.tv_sec > timeout.tv_sec) ||
482				((time_waited.tv_sec == timeout.tv_sec) &&
483				(time_waited.tv_usec > timeout.tv_usec))) {
484				release_fd_lock(cu->cu_fd, mask);
485				return (cu->cu_error.re_status = RPC_TIMEDOUT);
486			}
487			errno = 0; /* reset it */
488			continue;
489		};
490
491		if (cu->pfdp.revents & POLLNVAL || (cu->pfdp.revents == 0)) {
492			cu->cu_error.re_status = RPC_CANTRECV;
493			/*
494			 *	Note:  we're faking errno here because we
495			 *	previously would have expected _poll() to
496			 *	return -1 with errno EBADF.  Poll(BA_OS)
497			 *	returns 0 and sets the POLLNVAL revents flag
498			 *	instead.
499			 */
500			cu->cu_error.re_errno = errno = EBADF;
501			release_fd_lock(cu->cu_fd, mask);
502			return (-1);
503		}
504
505		/* We have some data now */
506		do {
507			if (errno == EINTR) {
508				/*
509				 * Must make sure errno was not already
510				 * EINTR in case _recvfrom() returns -1.
511				 */
512				errno = 0;
513			}
514			recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
515			    cu->cu_recvsz, 0, NULL, NULL);
516		} while (recvlen < 0 && errno == EINTR);
517		if (recvlen < 0) {
518			if (errno == EWOULDBLOCK)
519				continue;
520			cu->cu_error.re_errno = errno;
521			release_fd_lock(cu->cu_fd, mask);
522			return (cu->cu_error.re_status = RPC_CANTRECV);
523		}
524		if (recvlen < sizeof (u_int32_t))
525			continue;
526		/* see if reply transaction id matches sent id */
527		if (cu->cu_async == FALSE &&
528		    *((u_int32_t *)(void *)(cu->cu_inbuf)) !=
529		    *((u_int32_t *)(void *)(cu->cu_outbuf)))
530			continue;
531		/* we now assume we have the proper reply */
532		break;
533	}
534	inlen = (socklen_t)recvlen;
535
536	/*
537	 * now decode and validate the response
538	 */
539
540	xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)inlen, XDR_DECODE);
541	ok = xdr_replymsg(&reply_xdrs, &reply_msg);
542	/* XDR_DESTROY(&reply_xdrs);	save a few cycles on noop destroy */
543	if (ok) {
544		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
545			(reply_msg.acpted_rply.ar_stat == SUCCESS))
546			cu->cu_error.re_status = RPC_SUCCESS;
547		else
548			_seterr_reply(&reply_msg, &(cu->cu_error));
549
550		if (cu->cu_error.re_status == RPC_SUCCESS) {
551			if (! AUTH_VALIDATE(cl->cl_auth,
552					    &reply_msg.acpted_rply.ar_verf)) {
553				cu->cu_error.re_status = RPC_AUTHERROR;
554				cu->cu_error.re_why = AUTH_INVALIDRESP;
555			}
556			if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
557				xdrs->x_op = XDR_FREE;
558				(void) xdr_opaque_auth(xdrs,
559					&(reply_msg.acpted_rply.ar_verf));
560			}
561		}		/* end successful completion */
562		/*
563		 * If unsuccesful AND error is an authentication error
564		 * then refresh credentials and try again, else break
565		 */
566		else if (cu->cu_error.re_status == RPC_AUTHERROR)
567			/* maybe our credentials need to be refreshed ... */
568			if (nrefreshes > 0 &&
569			    AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
570				nrefreshes--;
571				goto call_again;
572			}
573		/* end of unsuccessful completion */
574	}	/* end of valid reply message */
575	else {
576		cu->cu_error.re_status = RPC_CANTDECODERES;
577
578	}
579	release_fd_lock(cu->cu_fd, mask);
580	return (cu->cu_error.re_status);
581}
582
583static void
584clnt_dg_geterr(cl, errp)
585	CLIENT *cl;
586	struct rpc_err *errp;
587{
588	struct cu_data *cu = (struct cu_data *)cl->cl_private;
589
590	*errp = cu->cu_error;
591}
592
593static bool_t
594clnt_dg_freeres(cl, xdr_res, res_ptr)
595	CLIENT *cl;
596	xdrproc_t xdr_res;
597	void *res_ptr;
598{
599	struct cu_data *cu = (struct cu_data *)cl->cl_private;
600	XDR *xdrs = &(cu->cu_outxdrs);
601	bool_t dummy;
602	sigset_t mask;
603	sigset_t newmask;
604
605	sigfillset(&newmask);
606	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
607	mutex_lock(&clnt_fd_lock);
608	while (dg_fd_locks[cu->cu_fd])
609		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
610	xdrs->x_op = XDR_FREE;
611	dummy = (*xdr_res)(xdrs, res_ptr);
612	mutex_unlock(&clnt_fd_lock);
613	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
614	cond_signal(&dg_cv[cu->cu_fd]);
615	return (dummy);
616}
617
618/*ARGSUSED*/
619static void
620clnt_dg_abort(h)
621	CLIENT *h;
622{
623}
624
625static bool_t
626clnt_dg_control(cl, request, info)
627	CLIENT *cl;
628	u_int request;
629	void *info;
630{
631	struct cu_data *cu = (struct cu_data *)cl->cl_private;
632	struct netbuf *addr;
633	sigset_t mask;
634	sigset_t newmask;
635	int rpc_lock_value;
636
637	sigfillset(&newmask);
638	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
639	mutex_lock(&clnt_fd_lock);
640	while (dg_fd_locks[cu->cu_fd])
641		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
642	if (__isthreaded)
643                rpc_lock_value = 1;
644        else
645                rpc_lock_value = 0;
646	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
647	mutex_unlock(&clnt_fd_lock);
648	switch (request) {
649	case CLSET_FD_CLOSE:
650		cu->cu_closeit = TRUE;
651		release_fd_lock(cu->cu_fd, mask);
652		return (TRUE);
653	case CLSET_FD_NCLOSE:
654		cu->cu_closeit = FALSE;
655		release_fd_lock(cu->cu_fd, mask);
656		return (TRUE);
657	}
658
659	/* for other requests which use info */
660	if (info == NULL) {
661		release_fd_lock(cu->cu_fd, mask);
662		return (FALSE);
663	}
664	switch (request) {
665	case CLSET_TIMEOUT:
666		if (time_not_ok((struct timeval *)info)) {
667			release_fd_lock(cu->cu_fd, mask);
668			return (FALSE);
669		}
670		cu->cu_total = *(struct timeval *)info;
671		break;
672	case CLGET_TIMEOUT:
673		*(struct timeval *)info = cu->cu_total;
674		break;
675	case CLGET_SERVER_ADDR:		/* Give him the fd address */
676		/* Now obsolete. Only for backward compatibility */
677		(void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
678		break;
679	case CLSET_RETRY_TIMEOUT:
680		if (time_not_ok((struct timeval *)info)) {
681			release_fd_lock(cu->cu_fd, mask);
682			return (FALSE);
683		}
684		cu->cu_wait = *(struct timeval *)info;
685		break;
686	case CLGET_RETRY_TIMEOUT:
687		*(struct timeval *)info = cu->cu_wait;
688		break;
689	case CLGET_FD:
690		*(int *)info = cu->cu_fd;
691		break;
692	case CLGET_SVC_ADDR:
693		addr = (struct netbuf *)info;
694		addr->buf = &cu->cu_raddr;
695		addr->len = cu->cu_rlen;
696		addr->maxlen = sizeof cu->cu_raddr;
697		break;
698	case CLSET_SVC_ADDR:		/* set to new address */
699		addr = (struct netbuf *)info;
700		if (addr->len < sizeof cu->cu_raddr) {
701			release_fd_lock(cu->cu_fd, mask);
702			return (FALSE);
703		}
704		(void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
705		cu->cu_rlen = addr->len;
706		break;
707	case CLGET_XID:
708		/*
709		 * use the knowledge that xid is the
710		 * first element in the call structure *.
711		 * This will get the xid of the PREVIOUS call
712		 */
713		*(u_int32_t *)info =
714		    ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
715		break;
716
717	case CLSET_XID:
718		/* This will set the xid of the NEXT call */
719		*(u_int32_t *)(void *)cu->cu_outbuf =
720		    htonl(*(u_int32_t *)info - 1);
721		/* decrement by 1 as clnt_dg_call() increments once */
722		break;
723
724	case CLGET_VERS:
725		/*
726		 * This RELIES on the information that, in the call body,
727		 * the version number field is the fifth field from the
728		 * begining of the RPC header. MUST be changed if the
729		 * call_struct is changed
730		 */
731		*(u_int32_t *)info =
732		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
733		    4 * BYTES_PER_XDR_UNIT));
734		break;
735
736	case CLSET_VERS:
737		*(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
738			= htonl(*(u_int32_t *)info);
739		break;
740
741	case CLGET_PROG:
742		/*
743		 * This RELIES on the information that, in the call body,
744		 * the program number field is the fourth field from the
745		 * begining of the RPC header. MUST be changed if the
746		 * call_struct is changed
747		 */
748		*(u_int32_t *)info =
749		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
750		    3 * BYTES_PER_XDR_UNIT));
751		break;
752
753	case CLSET_PROG:
754		*(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
755			= htonl(*(u_int32_t *)info);
756		break;
757	case CLSET_ASYNC:
758		cu->cu_async = *(int *)info;
759		break;
760	case CLSET_CONNECT:
761		cu->cu_connect = *(int *)info;
762		break;
763	default:
764		release_fd_lock(cu->cu_fd, mask);
765		return (FALSE);
766	}
767	release_fd_lock(cu->cu_fd, mask);
768	return (TRUE);
769}
770
771static void
772clnt_dg_destroy(cl)
773	CLIENT *cl;
774{
775	struct cu_data *cu = (struct cu_data *)cl->cl_private;
776	int cu_fd = cu->cu_fd;
777	sigset_t mask;
778	sigset_t newmask;
779
780	sigfillset(&newmask);
781	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
782	mutex_lock(&clnt_fd_lock);
783	while (dg_fd_locks[cu_fd])
784		cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
785	if (cu->cu_closeit)
786		(void)_close(cu_fd);
787	XDR_DESTROY(&(cu->cu_outxdrs));
788	mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
789	if (cl->cl_netid && cl->cl_netid[0])
790		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
791	if (cl->cl_tp && cl->cl_tp[0])
792		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
793	mem_free(cl, sizeof (CLIENT));
794	mutex_unlock(&clnt_fd_lock);
795	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
796	cond_signal(&dg_cv[cu_fd]);
797}
798
799static struct clnt_ops *
800clnt_dg_ops()
801{
802	static struct clnt_ops ops;
803	extern mutex_t	ops_lock;
804	sigset_t mask;
805	sigset_t newmask;
806
807/* VARIABLES PROTECTED BY ops_lock: ops */
808
809	sigfillset(&newmask);
810	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
811	mutex_lock(&ops_lock);
812	if (ops.cl_call == NULL) {
813		ops.cl_call = clnt_dg_call;
814		ops.cl_abort = clnt_dg_abort;
815		ops.cl_geterr = clnt_dg_geterr;
816		ops.cl_freeres = clnt_dg_freeres;
817		ops.cl_destroy = clnt_dg_destroy;
818		ops.cl_control = clnt_dg_control;
819	}
820	mutex_unlock(&ops_lock);
821	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
822	return (&ops);
823}
824
825/*
826 * Make sure that the time is not garbage.  -1 value is allowed.
827 */
828static bool_t
829time_not_ok(t)
830	struct timeval *t;
831{
832	return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
833		t->tv_usec < -1 || t->tv_usec > 1000000);
834}
835
836
837/*
838 *	Convert from timevals (used by select) to milliseconds (used by poll).
839 */
840static int
841__rpc_timeval_to_msec(t)
842	struct timeval	*t;
843{
844	int	t1, tmp;
845
846	/*
847	 *	We're really returning t->tv_sec * 1000 + (t->tv_usec / 1000)
848	 *	but try to do so efficiently.  Note:  1000 = 1024 - 16 - 8.
849	 */
850	tmp = (int)t->tv_sec << 3;
851	t1 = -tmp;
852	t1 += t1 << 1;
853	t1 += tmp << 7;
854	if (t->tv_usec)
855		t1 += (int)(t->tv_usec / 1000);
856
857	return (t1);
858}
859