svc_vc.c revision 1219:f89f56c2d9ac
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23/*
24 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
25 * Use is subject to license terms.
26 */
27
28/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29/* All Rights Reserved */
30/*
31 * Portions of this source code were derived from Berkeley
32 * 4.3 BSD under license from the Regents of the University of
33 * California.
34 */
35
36#pragma ident	"%Z%%M%	%I%	%E% SMI"
37
38/*
39 * Server side for Connection Oriented RPC.
40 *
41 * Actually implements two flavors of transporter -
42 * a rendezvouser (a listener and connection establisher)
43 * and a record stream.
44 */
45
46#include "mt.h"
47#include "rpc_mt.h"
48#include <stdio.h>
49#include <stdlib.h>
50#include <rpc/rpc.h>
51#include <sys/types.h>
52#include <errno.h>
53#include <sys/stat.h>
54#include <sys/mkdev.h>
55#include <sys/poll.h>
56#include <syslog.h>
57#include <rpc/nettype.h>
58#include <tiuser.h>
59#include <string.h>
60#include <stropts.h>
61#include <stdlib.h>
62#include <unistd.h>
63#include <sys/timod.h>
64#include <limits.h>
65
66#ifndef MIN
67#define	MIN(a, b)	(((a) < (b)) ? (a) : (b))
68#endif
69
70#define	CLEANUP_SIZE	1024
71
72extern int nsvc_xdrs;
73extern int __rpc_connmaxrec;
74extern int __rpc_irtimeout;
75
76extern SVCXPRT	**svc_xports;
77extern int	__td_setnodelay(int);
78extern bool_t	__xdrrec_getbytes_nonblock(XDR *, enum xprt_stat *);
79extern bool_t	__xdrrec_set_conn_nonblock(XDR *, uint32_t);
80extern int	_t_do_ioctl(int, char *, int, int, int *);
81extern int	__rpc_legal_connmaxrec(int);
82/* Structure used to initialize SVC_XP_AUTH(xprt).svc_ah_ops. */
83extern struct svc_auth_ops svc_auth_any_ops;
84extern void	__xprt_unregister_private(const SVCXPRT *, bool_t);
85
86static struct xp_ops 	*svc_vc_ops(void);
87static struct xp_ops 	*svc_vc_rendezvous_ops(void);
88static void		svc_vc_destroy(SVCXPRT *);
89static bool_t		svc_vc_nonblock(SVCXPRT *, SVCXPRT *);
90static int		read_vc(SVCXPRT *, caddr_t, int);
91static int		write_vc(SVCXPRT *, caddr_t, int);
92static SVCXPRT		*makefd_xprt(int, uint_t, uint_t, t_scalar_t, char *);
93static bool_t		fd_is_dead(int);
94static void		update_nonblock_timestamps(SVCXPRT *);
95
96struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
97	uint_t sendsize;
98	uint_t recvsize;
99	struct t_call *t_call;
100	struct t_bind *t_bind;
101	t_scalar_t cf_tsdu;
102	char *cf_cache;
103	int tcp_flag;
104	int tcp_keepalive;
105	int cf_connmaxrec;
106};
107
108struct cf_conn {	/* kept in xprt->xp_p1 for actual connection */
109	uint_t sendsize;
110	uint_t recvsize;
111	enum xprt_stat strm_stat;
112	uint32_t x_id;
113	t_scalar_t cf_tsdu;
114	XDR xdrs;
115	char *cf_cache;
116	char verf_body[MAX_AUTH_BYTES];
117	bool_t cf_conn_nonblock;
118	time_t cf_conn_nonblock_timestamp;
119};
120
121static int t_rcvall(int, char *, int);
122static int t_rcvnonblock(SVCXPRT *, caddr_t, int);
123static void svc_timeout_nonblock_xprt_and_LRU(bool_t);
124
125extern int __xdrrec_setfirst(XDR *);
126extern int __xdrrec_resetfirst(XDR *);
127extern int __is_xdrrec_first(XDR *);
128
129void __svc_nisplus_enable_timestamps(void);
130void __svc_timeout_nonblock_xprt(void);
131
132/*
133 * This is intended as a performance improvement on the old string handling
134 * stuff by read only moving data into the  text segment.
135 * Format = <routine> : <error>
136 */
137
138static const char errstring[] = " %s : %s";
139
140/* Routine names */
141
142static const char svc_vc_create_str[] = "svc_vc_create";
143static const char svc_fd_create_str[] = "svc_fd_create";
144static const char makefd_xprt_str[] = "svc_vc_create: makefd_xprt ";
145static const char rendezvous_request_str[] = "rendezvous_request";
146static const char svc_vc_fderr[] =
147		"fd > FD_SETSIZE; Use rpc_control(RPC_SVC_USE_POLLFD,...);";
148static const char do_accept_str[] = "do_accept";
149
150/* error messages */
151
152static const char no_mem_str[] = "out of memory";
153static const char no_tinfo_str[] = "could not get transport information";
154static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
155static const char no_nonblock_str[] = "could not set transport non-blocking";
156
157/*
158 *  Records a timestamp when data comes in on a descriptor.  This is
159 *  only used if timestamps are enabled with __svc_nisplus_enable_timestamps().
160 */
161static long *timestamps;
162static int ntimestamps; /* keep track how many timestamps */
163static mutex_t timestamp_lock = DEFAULTMUTEX;
164
165/*
166 * Used to determine whether the time-out logic should be executed.
167 */
168static bool_t check_nonblock_timestamps = FALSE;
169
170void
171svc_vc_xprtfree(SVCXPRT *xprt)
172{
173/* LINTED pointer alignment */
174	SVCXPRT_EXT		*xt = xprt ? SVCEXT(xprt) : NULL;
175	struct cf_rendezvous	*r = xprt ?
176/* LINTED pointer alignment */
177				    (struct cf_rendezvous *)xprt->xp_p1 : NULL;
178
179	if (!xprt)
180		return;
181
182	if (xprt->xp_tp)
183		free(xprt->xp_tp);
184	if (xprt->xp_netid)
185		free(xprt->xp_netid);
186	if (xt && (xt->parent == NULL)) {
187		if (xprt->xp_ltaddr.buf)
188			free(xprt->xp_ltaddr.buf);
189		if (xprt->xp_rtaddr.buf)
190			free(xprt->xp_rtaddr.buf);
191	}
192	if (r) {
193		if (r->t_call)
194			(void) t_free((char *)r->t_call, T_CALL);
195		if (r->t_bind)
196			(void) t_free((char *)r->t_bind, T_BIND);
197		free(r);
198	}
199	svc_xprt_free(xprt);
200}
201
202/*
203 * Usage:
204 *	xprt = svc_vc_create(fd, sendsize, recvsize);
205 * Since connection streams do buffered io similar to stdio, the caller
206 * can specify how big the send and receive buffers are. If recvsize
207 * or sendsize are 0, defaults will be chosen.
208 * fd should be open and bound.
209 */
210SVCXPRT *
211svc_vc_create_private(int fd, uint_t sendsize, uint_t recvsize)
212{
213	struct cf_rendezvous *r;
214	SVCXPRT *xprt;
215	struct t_info tinfo;
216
217	if (RPC_FD_NOTIN_FDSET(fd)) {
218		errno = EBADF;
219		t_errno = TBADF;
220		(void) syslog(LOG_ERR, errstring, svc_vc_create_str,
221		    svc_vc_fderr);
222		return (NULL);
223	}
224	if ((xprt = svc_xprt_alloc()) == NULL) {
225		(void) syslog(LOG_ERR, errstring,
226		    svc_vc_create_str, no_mem_str);
227		return (NULL);
228	}
229/* LINTED pointer alignment */
230	svc_flags(xprt) |= SVC_RENDEZVOUS;
231
232	r = calloc(1, sizeof (*r));
233	if (r == NULL) {
234		(void) syslog(LOG_ERR, errstring,
235			svc_vc_create_str, no_mem_str);
236		svc_vc_xprtfree(xprt);
237		return (NULL);
238	}
239	if (t_getinfo(fd, &tinfo) == -1) {
240		char errorstr[100];
241
242		__tli_sys_strerror(errorstr, sizeof (errorstr),
243				t_errno, errno);
244		(void) syslog(LOG_ERR, "%s : %s : %s",
245			svc_vc_create_str, no_tinfo_str, errorstr);
246		free(r);
247		svc_vc_xprtfree(xprt);
248		return (NULL);
249	}
250	/*
251	 * Find the receive and the send size
252	 */
253	r->sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
254	r->recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
255	if ((r->sendsize == 0) || (r->recvsize == 0)) {
256		syslog(LOG_ERR,
257		    "svc_vc_create:  transport does not support "
258		    "data transfer");
259		free(r);
260		svc_vc_xprtfree(xprt);
261		return (NULL);
262	}
263
264/* LINTED pointer alignment */
265	r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
266	if (r->t_call == NULL) {
267		(void) syslog(LOG_ERR, errstring,
268			svc_vc_create_str, no_mem_str);
269		free(r);
270		svc_vc_xprtfree(xprt);
271		return (NULL);
272	}
273
274/* LINTED pointer alignment */
275	r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
276	if (r->t_bind == NULL) {
277		(void) syslog(LOG_ERR, errstring,
278			svc_vc_create_str, no_mem_str);
279		(void) t_free((char *)r->t_call, T_CALL);
280		free(r);
281		svc_vc_xprtfree(xprt);
282		return (NULL);
283	}
284
285	r->cf_tsdu = tinfo.tsdu;
286	r->tcp_flag = FALSE;
287	r->tcp_keepalive = FALSE;
288	r->cf_connmaxrec = __rpc_connmaxrec;
289	xprt->xp_fd = fd;
290	xprt->xp_p1 = (caddr_t)r;
291	xprt->xp_p2 = NULL;
292	xprt->xp_verf = _null_auth;
293	xprt->xp_ops = svc_vc_rendezvous_ops();
294/* LINTED pointer alignment */
295	SVC_XP_AUTH(xprt).svc_ah_ops = svc_auth_any_ops;
296/* LINTED pointer alignment */
297	SVC_XP_AUTH(xprt).svc_ah_private = NULL;
298
299	return (xprt);
300}
301
302SVCXPRT *
303svc_vc_create(const int fd, const uint_t sendsize, const uint_t recvsize)
304{
305	SVCXPRT *xprt;
306
307	if ((xprt = svc_vc_create_private(fd, sendsize, recvsize)) != NULL)
308		xprt_register(xprt);
309	return (xprt);
310}
311
312SVCXPRT *
313svc_vc_xprtcopy(SVCXPRT *parent)
314{
315	SVCXPRT			*xprt;
316	struct cf_rendezvous	*r, *pr;
317	int			fd = parent->xp_fd;
318
319	if ((xprt = svc_xprt_alloc()) == NULL)
320		return (NULL);
321
322/* LINTED pointer alignment */
323	SVCEXT(xprt)->parent = parent;
324/* LINTED pointer alignment */
325	SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
326
327	xprt->xp_fd = fd;
328	xprt->xp_ops = svc_vc_rendezvous_ops();
329	if (parent->xp_tp) {
330		xprt->xp_tp = (char *)strdup(parent->xp_tp);
331		if (xprt->xp_tp == NULL) {
332			syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
333			svc_vc_xprtfree(xprt);
334			return (NULL);
335		}
336	}
337	if (parent->xp_netid) {
338		xprt->xp_netid = (char *)strdup(parent->xp_netid);
339		if (xprt->xp_netid == NULL) {
340			syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
341			if (xprt->xp_tp)
342				free(xprt->xp_tp);
343			svc_vc_xprtfree(xprt);
344			return (NULL);
345		}
346	}
347
348	/*
349	 * can share both local and remote address
350	 */
351	xprt->xp_ltaddr = parent->xp_ltaddr;
352	xprt->xp_rtaddr = parent->xp_rtaddr; /* XXX - not used for rendezvous */
353	xprt->xp_type = parent->xp_type;
354	xprt->xp_verf = parent->xp_verf;
355
356	if ((r = calloc(1, sizeof (*r))) == NULL) {
357		svc_vc_xprtfree(xprt);
358		return (NULL);
359	}
360	xprt->xp_p1 = (caddr_t)r;
361/* LINTED pointer alignment */
362	pr = (struct cf_rendezvous *)parent->xp_p1;
363	r->sendsize = pr->sendsize;
364	r->recvsize = pr->recvsize;
365	r->cf_tsdu = pr->cf_tsdu;
366	r->cf_cache = pr->cf_cache;
367	r->tcp_flag = pr->tcp_flag;
368	r->tcp_keepalive = pr->tcp_keepalive;
369	r->cf_connmaxrec = pr->cf_connmaxrec;
370/* LINTED pointer alignment */
371	r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
372	if (r->t_call == NULL) {
373		svc_vc_xprtfree(xprt);
374		return (NULL);
375	}
376/* LINTED pointer alignment */
377	r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
378	if (r->t_bind == NULL) {
379		svc_vc_xprtfree(xprt);
380		return (NULL);
381	}
382
383	return (xprt);
384}
385
386/*
387 * XXX : Used for setting flag to indicate that this is TCP
388 */
389
390/*ARGSUSED*/
391int
392__svc_vc_setflag(SVCXPRT *xprt, int flag)
393{
394	struct cf_rendezvous *r;
395
396/* LINTED pointer alignment */
397	r = (struct cf_rendezvous *)xprt->xp_p1;
398	r->tcp_flag = TRUE;
399	return (1);
400}
401
402/*
403 * used for the actual connection.
404 */
405SVCXPRT *
406svc_fd_create_private(int fd, uint_t sendsize, uint_t recvsize)
407{
408	struct t_info tinfo;
409	SVCXPRT *dummy;
410	struct netbuf tres = {0};
411
412	if (RPC_FD_NOTIN_FDSET(fd)) {
413		errno = EBADF;
414		t_errno = TBADF;
415		(void) syslog(LOG_ERR, errstring,
416		    svc_fd_create_str, svc_vc_fderr);
417		return (NULL);
418	}
419	if (t_getinfo(fd, &tinfo) == -1) {
420		char errorstr[100];
421
422		__tli_sys_strerror(errorstr, sizeof (errorstr),
423				t_errno, errno);
424		(void) syslog(LOG_ERR, "%s : %s : %s",
425			svc_fd_create_str, no_tinfo_str, errorstr);
426		return (NULL);
427	}
428	/*
429	 * Find the receive and the send size
430	 */
431	sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
432	recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
433	if ((sendsize == 0) || (recvsize == 0)) {
434		syslog(LOG_ERR, errstring, svc_fd_create_str,
435			"transport does not support data transfer");
436		return (NULL);
437	}
438	dummy = makefd_xprt(fd, sendsize, recvsize, tinfo.tsdu, NULL);
439				/* NULL signifies no dup cache */
440	/* Assign the local bind address */
441	if (t_getname(fd, &tres, LOCALNAME) == -1)
442		tres.len = 0;
443	dummy->xp_ltaddr = tres;
444	/* Fill in type of service */
445	dummy->xp_type = tinfo.servtype;
446	return (dummy);
447}
448
449SVCXPRT *
450svc_fd_create(const int fd, const uint_t sendsize, const uint_t recvsize)
451{
452	SVCXPRT *xprt;
453
454	if ((xprt = svc_fd_create_private(fd, sendsize, recvsize)) != NULL)
455		xprt_register(xprt);
456	return (xprt);
457}
458
459void
460svc_fd_xprtfree(SVCXPRT *xprt)
461{
462/* LINTED pointer alignment */
463	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
464/* LINTED pointer alignment */
465	struct cf_conn	*cd = xprt ? (struct cf_conn *)xprt->xp_p1 : NULL;
466
467	if (!xprt)
468		return;
469
470	if (xprt->xp_tp)
471		free(xprt->xp_tp);
472	if (xprt->xp_netid)
473		free(xprt->xp_netid);
474	if (xt && (xt->parent == NULL)) {
475		if (xprt->xp_ltaddr.buf)
476			free(xprt->xp_ltaddr.buf);
477		if (xprt->xp_rtaddr.buf)
478			free(xprt->xp_rtaddr.buf);
479	}
480	if (cd) {
481		XDR_DESTROY(&(cd->xdrs));
482		free(cd);
483	}
484	if (xt && (xt->parent == NULL) && xprt->xp_p2) {
485/* LINTED pointer alignment */
486		free(((struct netbuf *)xprt->xp_p2)->buf);
487		free(xprt->xp_p2);
488	}
489	svc_xprt_free(xprt);
490}
491
492static SVCXPRT *
493makefd_xprt(int fd, uint_t sendsize, uint_t recvsize, t_scalar_t tsdu,
494    char *cache)
495{
496	SVCXPRT *xprt;
497	struct cf_conn *cd;
498
499	xprt = svc_xprt_alloc();
500	if (xprt == NULL) {
501		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
502		return (NULL);
503	}
504/* LINTED pointer alignment */
505	svc_flags(xprt) |= SVC_CONNECTION;
506
507	cd = malloc(sizeof (struct cf_conn));
508	if (cd == NULL) {
509		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
510		svc_fd_xprtfree(xprt);
511		return (NULL);
512	}
513	cd->sendsize = sendsize;
514	cd->recvsize = recvsize;
515	cd->strm_stat = XPRT_IDLE;
516	cd->cf_tsdu = tsdu;
517	cd->cf_cache = cache;
518	cd->cf_conn_nonblock = FALSE;
519	cd->cf_conn_nonblock_timestamp = 0;
520	cd->xdrs.x_ops = NULL;
521	xdrrec_create(&(cd->xdrs), sendsize, 0, (caddr_t)xprt,
522			(int(*)())NULL, (int(*)(void *, char *, int))write_vc);
523	if (cd->xdrs.x_ops == NULL) {
524		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
525		free(cd);
526		svc_fd_xprtfree(xprt);
527		return (NULL);
528	}
529
530	(void) rw_wrlock(&svc_fd_lock);
531	if (svc_xdrs == NULL) {
532		svc_xdrs = calloc(FD_INCREMENT,  sizeof (XDR *));
533		if (svc_xdrs == NULL) {
534			(void) syslog(LOG_ERR, errstring, makefd_xprt_str,
535								no_mem_str);
536			XDR_DESTROY(&(cd->xdrs));
537			free(cd);
538			svc_fd_xprtfree(xprt);
539			(void) rw_unlock(&svc_fd_lock);
540			return (NULL);
541		}
542		nsvc_xdrs = FD_INCREMENT;
543	}
544
545	while (fd >= nsvc_xdrs) {
546		XDR **tmp_xdrs = svc_xdrs;
547		tmp_xdrs = realloc(svc_xdrs,
548				sizeof (XDR *) * (nsvc_xdrs + FD_INCREMENT));
549		if (tmp_xdrs == NULL) {
550			(void) syslog(LOG_ERR, errstring, makefd_xprt_str,
551								no_mem_str);
552			XDR_DESTROY(&(cd->xdrs));
553			free(cd);
554			svc_fd_xprtfree(xprt);
555			(void) rw_unlock(&svc_fd_lock);
556			return (NULL);
557		}
558
559		svc_xdrs = tmp_xdrs;
560		/* initial the new array to 0 from the last allocated array */
561		(void) memset(&svc_xdrs[nsvc_xdrs], 0,
562					sizeof (XDR *) * FD_INCREMENT);
563		nsvc_xdrs += FD_INCREMENT;
564	}
565
566	if (svc_xdrs[fd] != NULL) {
567		XDR_DESTROY(svc_xdrs[fd]);
568	} else if ((svc_xdrs[fd] = malloc(sizeof (XDR))) == NULL) {
569		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
570		XDR_DESTROY(&(cd->xdrs));
571		free(cd);
572		svc_fd_xprtfree(xprt);
573		(void) rw_unlock(&svc_fd_lock);
574		return (NULL);
575	}
576	(void) memset(svc_xdrs[fd], 0, sizeof (XDR));
577	xdrrec_create(svc_xdrs[fd], 0, recvsize, (caddr_t)xprt,
578			(int(*)(void *, char *, int))read_vc, (int(*)())NULL);
579	if (svc_xdrs[fd]->x_ops == NULL) {
580		free(svc_xdrs[fd]);
581		svc_xdrs[fd] = NULL;
582		XDR_DESTROY(&(cd->xdrs));
583		free(cd);
584		svc_fd_xprtfree(xprt);
585		(void) rw_unlock(&svc_fd_lock);
586		return (NULL);
587	}
588	(void) rw_unlock(&svc_fd_lock);
589
590	xprt->xp_p1 = (caddr_t)cd;
591	xprt->xp_p2 = NULL;
592	xprt->xp_verf.oa_base = cd->verf_body;
593	xprt->xp_ops = svc_vc_ops();	/* truely deals with calls */
594	xprt->xp_fd = fd;
595	return (xprt);
596}
597
598SVCXPRT *
599svc_fd_xprtcopy(SVCXPRT *parent)
600{
601	SVCXPRT			*xprt;
602	struct cf_conn		*cd, *pcd;
603
604	if ((xprt = svc_xprt_alloc()) == NULL)
605		return (NULL);
606
607/* LINTED pointer alignment */
608	SVCEXT(xprt)->parent = parent;
609/* LINTED pointer alignment */
610	SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
611
612	xprt->xp_fd = parent->xp_fd;
613	xprt->xp_ops = svc_vc_ops();
614	if (parent->xp_tp) {
615		xprt->xp_tp = (char *)strdup(parent->xp_tp);
616		if (xprt->xp_tp == NULL) {
617			syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
618			svc_fd_xprtfree(xprt);
619			return (NULL);
620		}
621	}
622	if (parent->xp_netid) {
623		xprt->xp_netid = (char *)strdup(parent->xp_netid);
624		if (xprt->xp_netid == NULL) {
625			syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
626			if (xprt->xp_tp)
627				free(xprt->xp_tp);
628			svc_fd_xprtfree(xprt);
629			return (NULL);
630		}
631	}
632	/*
633	 * share local and remote addresses with parent
634	 */
635	xprt->xp_ltaddr = parent->xp_ltaddr;
636	xprt->xp_rtaddr = parent->xp_rtaddr;
637	xprt->xp_type = parent->xp_type;
638
639	if ((cd = malloc(sizeof (struct cf_conn))) == NULL) {
640		svc_fd_xprtfree(xprt);
641		return (NULL);
642	}
643/* LINTED pointer alignment */
644	pcd = (struct cf_conn *)parent->xp_p1;
645	cd->sendsize = pcd->sendsize;
646	cd->recvsize = pcd->recvsize;
647	cd->strm_stat = pcd->strm_stat;
648	cd->x_id = pcd->x_id;
649	cd->cf_tsdu = pcd->cf_tsdu;
650	cd->cf_cache = pcd->cf_cache;
651	cd->cf_conn_nonblock = pcd->cf_conn_nonblock;
652	cd->cf_conn_nonblock_timestamp = pcd->cf_conn_nonblock_timestamp;
653	cd->xdrs.x_ops = NULL;
654	xdrrec_create(&(cd->xdrs), cd->sendsize, 0, (caddr_t)xprt,
655			(int(*)())NULL, (int(*)(void *, char *, int))write_vc);
656	if (cd->xdrs.x_ops == NULL) {
657		free(cd);
658		svc_fd_xprtfree(xprt);
659		return (NULL);
660	}
661	xprt->xp_verf.oa_base = cd->verf_body;
662	xprt->xp_p1 = (char *)cd;
663	xprt->xp_p2 = parent->xp_p2;	/* shared */
664
665	return (xprt);
666}
667
668/*
669 * This routine is called by svc_getreqset(), when a packet is recd.
670 * The listener process creates another end point on which the actual
671 * connection is carried. It returns FALSE to indicate that it was
672 * not a rpc packet (falsely though), but as a side effect creates
673 * another endpoint which is also registered, which then always
674 * has a request ready to be served.
675 */
676/* ARGSUSED1 */
677static bool_t
678rendezvous_request(SVCXPRT *xprt, struct rpc_msg *msg)
679{
680	struct cf_rendezvous *r;
681	char *tpname = NULL;
682	char devbuf[256];
683	static void do_accept();
684
685/* LINTED pointer alignment */
686	r = (struct cf_rendezvous *)xprt->xp_p1;
687
688again:
689	switch (t_look(xprt->xp_fd)) {
690	case T_DISCONNECT:
691		(void) t_rcvdis(xprt->xp_fd, NULL);
692		return (FALSE);
693
694	case T_LISTEN:
695
696		if (t_listen(xprt->xp_fd, r->t_call) == -1) {
697			if ((t_errno == TSYSERR) && (errno == EINTR))
698				goto again;
699
700			if (t_errno == TLOOK) {
701				if (t_look(xprt->xp_fd) == T_DISCONNECT)
702				    (void) t_rcvdis(xprt->xp_fd, NULL);
703			}
704			return (FALSE);
705		}
706		break;
707	default:
708		return (FALSE);
709	}
710	/*
711	 * Now create another endpoint, and accept the connection
712	 * on it.
713	 */
714
715	if (xprt->xp_tp) {
716		tpname = xprt->xp_tp;
717	} else {
718		/*
719		 * If xprt->xp_tp is NULL, then try to extract the
720		 * transport protocol information from the transport
721		 * protcol corresponding to xprt->xp_fd
722		 */
723		struct netconfig *nconf;
724		tpname = devbuf;
725		if ((nconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
726				== NULL) {
727			(void) syslog(LOG_ERR, errstring,
728					rendezvous_request_str,
729					"no suitable transport");
730			goto err;
731		}
732		(void) strcpy(tpname, nconf->nc_device);
733		freenetconfigent(nconf);
734	}
735
736	do_accept(xprt->xp_fd, tpname, xprt->xp_netid, r->t_call, r);
737
738err:
739	return (FALSE); /* there is never an rpc msg to be processed */
740}
741
742static void
743do_accept(int srcfd, char *tpname, char *netid, struct t_call *tcp,
744    struct cf_rendezvous *r)
745{
746	int	destfd;
747	struct t_call	t_call;
748	struct t_call	*tcp2 = NULL;
749	struct t_info	tinfo;
750	SVCXPRT	*xprt = NULL;
751	SVCXPRT	*xprt_srcfd = NULL;
752	char *option, *option_ret;
753	struct opthdr *opt;
754	struct t_optmgmt optreq, optret;
755	int *p_optval;
756
757	destfd = t_open(tpname, O_RDWR, &tinfo);
758	if (check_nonblock_timestamps) {
759		if (destfd == -1 && t_errno == TSYSERR && errno == EMFILE) {
760			/*
761			 * Since there are nonblocking connection xprts and
762			 * too many open files, the LRU connection xprt should
763			 * get destroyed in case an attacker has been creating
764			 * many connections.
765			 */
766			(void) mutex_lock(&svc_mutex);
767			svc_timeout_nonblock_xprt_and_LRU(TRUE);
768			(void) mutex_unlock(&svc_mutex);
769			destfd = t_open(tpname, O_RDWR, &tinfo);
770		} else {
771			/*
772			 * Destroy/timeout all nonblock connection xprts
773			 * that have not had recent activity.
774			 * Do not destroy LRU xprt unless there are
775			 * too many open files.
776			 */
777			(void) mutex_lock(&svc_mutex);
778			svc_timeout_nonblock_xprt_and_LRU(FALSE);
779			(void) mutex_unlock(&svc_mutex);
780		}
781	}
782	if (destfd == -1) {
783		char errorstr[100];
784
785		__tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
786			errno);
787		(void) syslog(LOG_ERR, "%s : %s : %s", do_accept_str,
788				"can't open connection", errorstr);
789		(void) t_snddis(srcfd, tcp);
790		return;
791	}
792	if (destfd < 256) {
793		int nfd;
794
795		nfd = fcntl(destfd, F_DUPFD, 256);
796		if (nfd != -1) {
797			if (t_close(destfd) == -1) {
798				char errorstr[100];
799
800				__tli_sys_strerror(errorstr, sizeof (errorstr),
801						t_errno, errno);
802				(void) syslog(LOG_ERR,
803		"could not t_close() old fd %d; mem & fd leak error: %s",
804						destfd, errorstr);
805			}
806			destfd = nfd;
807			if (t_sync(destfd) == -1) {
808				char errorstr[100];
809
810				__tli_sys_strerror(errorstr, sizeof (errorstr),
811						t_errno, errno);
812				(void) syslog(LOG_ERR,
813				    "could not t_sync() duped fd %d: %s",
814						destfd, errorstr);
815				(void) t_snddis(srcfd, tcp);
816				return;
817			}
818		}
819	}
820	if (RPC_FD_NOTIN_FDSET(destfd)) {
821		(void) syslog(LOG_ERR, errstring, do_accept_str,
822						svc_vc_fderr);
823		(void) t_close(destfd);
824		(void) t_snddis(srcfd, tcp);
825		errno = EBADF;
826		t_errno = TBADF;
827		return;
828	}
829	(void) fcntl(destfd, F_SETFD, 1); /* make it "close on exec" */
830	if ((tinfo.servtype != T_COTS) && (tinfo.servtype != T_COTS_ORD)) {
831		/* Not a connection oriented mode */
832		(void) syslog(LOG_ERR, errstring, do_accept_str,
833				"do_accept:  illegal transport");
834		(void) t_close(destfd);
835		(void) t_snddis(srcfd, tcp);
836		return;
837	}
838
839
840	if (t_bind(destfd, NULL, r->t_bind) == -1) {
841		char errorstr[100];
842
843		__tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
844				errno);
845		(void) syslog(LOG_ERR, " %s : %s : %s", do_accept_str,
846			"t_bind failed", errorstr);
847		(void) t_close(destfd);
848		(void) t_snddis(srcfd, tcp);
849		return;
850	}
851
852	if (r->tcp_flag)	/* if TCP, set NODELAY flag */
853		(void) __td_setnodelay(destfd);
854
855	/*
856	 * This connection is not listening, hence no need to set
857	 * the qlen.
858	 */
859
860	/*
861	 * XXX: The local transport chokes on its own listen
862	 * options so we zero them for now
863	 */
864	t_call = *tcp;
865	t_call.opt.len = 0;
866	t_call.opt.maxlen = 0;
867	t_call.opt.buf = NULL;
868
869	while (t_accept(srcfd, destfd, &t_call) == -1) {
870		char errorstr[100];
871
872		switch (t_errno) {
873		case TLOOK:
874again:
875			switch (t_look(srcfd)) {
876			case T_CONNECT:
877			case T_DATA:
878			case T_EXDATA:
879				/* this should not happen */
880				break;
881
882			case T_DISCONNECT:
883				(void) t_rcvdis(srcfd, NULL);
884				break;
885
886			case T_LISTEN:
887				if (tcp2 == NULL)
888/* LINTED pointer alignment */
889					tcp2 = (struct t_call *)t_alloc(srcfd,
890					    T_CALL, T_ADDR | T_OPT);
891				if (tcp2 == NULL) {
892
893					(void) t_close(destfd);
894					(void) t_snddis(srcfd, tcp);
895					syslog(LOG_ERR, errstring,
896						do_accept_str, no_mem_str);
897					return;
898					/* NOTREACHED */
899				}
900				if (t_listen(srcfd, tcp2) == -1) {
901					switch (t_errno) {
902					case TSYSERR:
903						if (errno == EINTR)
904							goto again;
905						break;
906
907					case TLOOK:
908						goto again;
909					}
910					(void) t_free((char *)tcp2, T_CALL);
911					(void) t_close(destfd);
912					(void) t_snddis(srcfd, tcp);
913					return;
914					/* NOTREACHED */
915				}
916				do_accept(srcfd, tpname, netid, tcp2, r);
917				break;
918
919			case T_ORDREL:
920				(void) t_rcvrel(srcfd);
921				(void) t_sndrel(srcfd);
922				break;
923			}
924			if (tcp2) {
925				(void) t_free((char *)tcp2, T_CALL);
926				tcp2 = NULL;
927			}
928			break;
929
930		case TBADSEQ:
931			/*
932			 * This can happen if the remote side has
933			 * disconnected before the connection is
934			 * accepted.  In this case, a disconnect
935			 * should not be sent on srcfd (important!
936			 * the listening fd will be hosed otherwise!).
937			 * This error is not logged since this is an
938			 * operational situation that is recoverable.
939			 */
940			(void) t_close(destfd);
941			return;
942			/* NOTREACHED */
943
944		case TOUTSTATE:
945			/*
946			 * This can happen if the t_rcvdis() or t_rcvrel()/
947			 * t_sndrel() put srcfd into the T_IDLE state.
948			 */
949			if (t_getstate(srcfd) == T_IDLE) {
950				(void) t_close(destfd);
951				(void) t_snddis(srcfd, tcp);
952				return;
953			}
954			/* else FALL THROUGH TO */
955
956		default:
957			__tli_sys_strerror(errorstr, sizeof (errorstr),
958					t_errno, errno);
959			(void) syslog(LOG_ERR,
960			    "cannot accept connection:  %s (current state %d)",
961			    errorstr, t_getstate(srcfd));
962			(void) t_close(destfd);
963			(void) t_snddis(srcfd, tcp);
964			return;
965			/* NOTREACHED */
966		}
967	}
968
969	if (r->tcp_flag && r->tcp_keepalive) {
970		option = malloc(sizeof (struct opthdr) + sizeof (int));
971		option_ret = malloc(sizeof (struct opthdr) + sizeof (int));
972		if (option && option_ret) {
973			/* LINTED pointer cast */
974			opt = (struct opthdr *)option;
975			opt->level = SOL_SOCKET;
976			opt->name  = SO_KEEPALIVE;
977			opt->len  = sizeof (int);
978			p_optval = (int *)(opt + 1);
979			*p_optval = SO_KEEPALIVE;
980			optreq.opt.maxlen = optreq.opt.len =
981				sizeof (struct opthdr) + sizeof (int);
982			optreq.opt.buf = (char *)option;
983			optreq.flags = T_NEGOTIATE;
984			optret.opt.maxlen = sizeof (struct opthdr)
985					+ sizeof (int);
986			optret.opt.buf = (char *)option_ret;
987			(void) t_optmgmt(destfd, &optreq, &optret);
988			free(option);
989			free(option_ret);
990		} else {
991			if (option)
992				free(option);
993			if (option_ret)
994				free(option_ret);
995		}
996	}
997
998
999	/*
1000	 * make a new transporter
1001	 */
1002	xprt = makefd_xprt(destfd, r->sendsize, r->recvsize, r->cf_tsdu,
1003				r->cf_cache);
1004	if (xprt == NULL) {
1005		/*
1006		 * makefd_xprt() returns a NULL xprt only when
1007		 * it's out of memory.
1008		 */
1009		goto memerr;
1010	}
1011
1012	/*
1013	 * Copy the new local and remote bind information
1014	 */
1015
1016	xprt->xp_rtaddr.len = tcp->addr.len;
1017	xprt->xp_rtaddr.maxlen = tcp->addr.len;
1018	if ((xprt->xp_rtaddr.buf = malloc(tcp->addr.len)) == NULL)
1019		goto memerr;
1020	(void) memcpy(xprt->xp_rtaddr.buf, tcp->addr.buf, tcp->addr.len);
1021
1022	if (strcmp(netid, "tcp") == 0) {
1023		xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in);
1024		if ((xprt->xp_ltaddr.buf =
1025			malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1026			goto memerr;
1027		if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1028		    (void) syslog(LOG_ERR,
1029				"do_accept: t_getname for tcp failed!");
1030			goto xprt_err;
1031		}
1032	} else if (strcmp(netid, "tcp6") == 0) {
1033		xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in6);
1034		if ((xprt->xp_ltaddr.buf =
1035			malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1036			goto memerr;
1037		if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1038			(void) syslog(LOG_ERR,
1039				"do_accept: t_getname for tcp6 failed!");
1040			goto xprt_err;
1041		}
1042	}
1043
1044	xprt->xp_tp = strdup(tpname);
1045	xprt->xp_netid = strdup(netid);
1046	if ((xprt->xp_tp == NULL) ||
1047	    (xprt->xp_netid == NULL)) {
1048		goto memerr;
1049	}
1050	if (tcp->opt.len > 0) {
1051		struct netbuf *netptr;
1052
1053		xprt->xp_p2 = malloc(sizeof (struct netbuf));
1054
1055		if (xprt->xp_p2 != NULL) {
1056/* LINTED pointer alignment */
1057			netptr = (struct netbuf *)xprt->xp_p2;
1058
1059			netptr->len = tcp->opt.len;
1060			netptr->maxlen = tcp->opt.len;
1061			if ((netptr->buf = malloc(tcp->opt.len)) == NULL)
1062				goto memerr;
1063			(void) memcpy(netptr->buf, tcp->opt.buf, tcp->opt.len);
1064		} else
1065			goto memerr;
1066	}
1067/*	(void) ioctl(destfd, I_POP, NULL);    */
1068
1069	/*
1070	 * If a nonblocked connection fd has been requested,
1071	 * perform the necessary operations.
1072	 */
1073	xprt_srcfd = svc_xports[srcfd];
1074	/* LINTED pointer cast */
1075	if (((struct cf_rendezvous *)(xprt_srcfd->xp_p1))->cf_connmaxrec) {
1076		if (!svc_vc_nonblock(xprt_srcfd, xprt))
1077			goto xprt_err;
1078	}
1079
1080	/*
1081	 * Copy the call back declared for the service to the current
1082	 * connection
1083	 */
1084	xprt->xp_closeclnt = xprt_srcfd->xp_closeclnt;
1085	xprt_register(xprt);
1086
1087	return;
1088
1089memerr:
1090	(void) syslog(LOG_ERR, errstring, do_accept_str, no_mem_str);
1091xprt_err:
1092	if (xprt)
1093		svc_vc_destroy(xprt);
1094	(void) t_close(destfd);
1095}
1096
1097/*
1098 * This routine performs the necessary fcntl() operations to create
1099 * a nonblocked connection fd.
1100 * It also adjusts the sizes and allocates the buffer
1101 * for the nonblocked operations, and updates the associated
1102 * timestamp field in struct cf_conn for timeout bookkeeping.
1103 */
1104static bool_t
1105svc_vc_nonblock(SVCXPRT *xprt_rendezvous, SVCXPRT *xprt_conn)
1106{
1107	int nn;
1108	int fdconn = xprt_conn->xp_fd;
1109	struct cf_rendezvous *r =
1110		/* LINTED pointer cast */
1111		(struct cf_rendezvous *)xprt_rendezvous->xp_p1;
1112	/* LINTED pointer cast */
1113	struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1114	uint32_t maxrecsz;
1115
1116	if ((nn = fcntl(fdconn, F_GETFL, 0)) < 0) {
1117		(void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1118			    no_fcntl_getfl_str);
1119		return (FALSE);
1120	}
1121
1122	if (fcntl(fdconn, F_SETFL, nn|O_NONBLOCK) != 0) {
1123		(void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1124				no_nonblock_str);
1125		return (FALSE);
1126	}
1127
1128	cd->cf_conn_nonblock = TRUE;
1129	/*
1130	 * If the max fragment size has not been set via
1131	 * rpc_control(), use the default.
1132	 */
1133	if ((maxrecsz = r->cf_connmaxrec) == 0)
1134		maxrecsz = r->recvsize;
1135	/* Set XDR stream to use non-blocking semantics. */
1136	if (__xdrrec_set_conn_nonblock(svc_xdrs[fdconn], maxrecsz)) {
1137		check_nonblock_timestamps = TRUE;
1138		update_nonblock_timestamps(xprt_conn);
1139		return (TRUE);
1140	}
1141	return (FALSE);
1142}
1143
1144/* ARGSUSED */
1145static enum xprt_stat
1146rendezvous_stat(SVCXPRT *xprt)
1147{
1148	return (XPRT_IDLE);
1149}
1150
1151static void
1152svc_vc_destroy(SVCXPRT *xprt)
1153{
1154	(void) mutex_lock(&svc_mutex);
1155	_svc_vc_destroy_private(xprt, TRUE);
1156	(void) svc_timeout_nonblock_xprt_and_LRU(FALSE);
1157	(void) mutex_unlock(&svc_mutex);
1158}
1159
1160void
1161_svc_vc_destroy_private(SVCXPRT *xprt, bool_t lock_not_held)
1162{
1163	if (svc_mt_mode != RPC_SVC_MT_NONE) {
1164/* LINTED pointer alignment */
1165		if (SVCEXT(xprt)->parent)
1166/* LINTED pointer alignment */
1167			xprt = SVCEXT(xprt)->parent;
1168/* LINTED pointer alignment */
1169		svc_flags(xprt) |= SVC_DEFUNCT;
1170/* LINTED pointer alignment */
1171		if (SVCEXT(xprt)->refcnt > 0)
1172			return;
1173	}
1174
1175	if (xprt->xp_closeclnt != NULL) {
1176		svc_errorhandler_t cb = xprt->xp_closeclnt;
1177
1178		/*
1179		 * Reset the pointer here to avoid reentrance on the same
1180		 * SVCXPRT handle.
1181		 */
1182		xprt->xp_closeclnt = NULL;
1183		cb(xprt, (xprt->xp_rtaddr.len != 0));
1184	}
1185
1186	__xprt_unregister_private(xprt, lock_not_held);
1187	(void) t_close(xprt->xp_fd);
1188
1189	(void) mutex_lock(&timestamp_lock);
1190	if (timestamps && xprt->xp_fd < ntimestamps) {
1191		timestamps[xprt->xp_fd] = 0;
1192	}
1193	(void) mutex_unlock(&timestamp_lock);
1194
1195	if (svc_mt_mode != RPC_SVC_MT_NONE) {
1196		svc_xprt_destroy(xprt);
1197	} else {
1198/* LINTED pointer alignment */
1199		if (svc_type(xprt) == SVC_RENDEZVOUS)
1200			svc_vc_xprtfree(xprt);
1201		else
1202			svc_fd_xprtfree(xprt);
1203	}
1204}
1205
1206/*ARGSUSED*/
1207static bool_t
1208svc_vc_control(SVCXPRT *xprt, const uint_t rq, void *in)
1209{
1210	switch (rq) {
1211	case SVCSET_RECVERRHANDLER:
1212		xprt->xp_closeclnt = (svc_errorhandler_t)in;
1213		return (TRUE);
1214	case SVCGET_RECVERRHANDLER:
1215		*(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1216		return (TRUE);
1217	case SVCGET_XID:
1218		if (xprt->xp_p1 == NULL)
1219			return (FALSE);
1220		/* LINTED pointer alignment */
1221		*(uint32_t *)in = ((struct cf_conn *)(xprt->xp_p1))->x_id;
1222		return (TRUE);
1223	default:
1224		return (FALSE);
1225	}
1226}
1227
1228static bool_t
1229rendezvous_control(SVCXPRT *xprt, const uint_t rq, void *in)
1230{
1231	struct cf_rendezvous *r;
1232	int tmp;
1233
1234	switch (rq) {
1235	case SVCSET_RECVERRHANDLER:
1236		xprt->xp_closeclnt = (svc_errorhandler_t)in;
1237		return (TRUE);
1238	case SVCGET_RECVERRHANDLER:
1239		*(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1240		return (TRUE);
1241	case SVCSET_KEEPALIVE:
1242		/* LINTED pointer cast */
1243		r = (struct cf_rendezvous *)xprt->xp_p1;
1244		if (r->tcp_flag) {
1245			r->tcp_keepalive = (int)(intptr_t)in;
1246			return (TRUE);
1247		}
1248		return (FALSE);
1249	case SVCSET_CONNMAXREC:
1250		/*
1251		 * Override the default maximum record size, set via
1252		 * rpc_control(), for this connection. Only appropriate
1253		 * for connection oriented transports, but is ignored for
1254		 * the connectionless case, so no need to check the
1255		 * connection type here.
1256		 */
1257		/* LINTED pointer cast */
1258		r = (struct cf_rendezvous *)xprt->xp_p1;
1259		tmp = __rpc_legal_connmaxrec(*(int *)in);
1260		if (r != 0 && tmp >= 0) {
1261			r->cf_connmaxrec = tmp;
1262			return (TRUE);
1263		}
1264		return (FALSE);
1265	case SVCGET_CONNMAXREC:
1266		/* LINTED pointer cast */
1267		r = (struct cf_rendezvous *)xprt->xp_p1;
1268		if (r != 0) {
1269			*(int *)in = r->cf_connmaxrec;
1270			return (TRUE);
1271		}
1272		return (FALSE);
1273	case SVCGET_XID:	/* fall through for now */
1274	default:
1275		return (FALSE);
1276	}
1277}
1278
1279/*
1280 * All read operations timeout after 35 seconds.
1281 * A timeout is fatal for the connection.
1282 * update_timestamps() is used by nisplus operations,
1283 * update_nonblock_timestamps() is used for nonblocked
1284 * connection fds.
1285 */
1286#define	WAIT_PER_TRY	35000	/* milliseconds */
1287
1288static void
1289update_timestamps(int fd)
1290{
1291	(void) mutex_lock(&timestamp_lock);
1292	if (timestamps) {
1293		struct timeval tv;
1294
1295		(void) gettimeofday(&tv, NULL);
1296		while (fd >= ntimestamps) {
1297			long *tmp_timestamps = timestamps;
1298
1299			/* allocate more timestamps */
1300			tmp_timestamps = realloc(timestamps,
1301				sizeof (long) *
1302				(ntimestamps + FD_INCREMENT));
1303			if (tmp_timestamps == NULL) {
1304				(void) mutex_unlock(&timestamp_lock);
1305				syslog(LOG_ERR,
1306					"update_timestamps: out of memory");
1307				return;
1308			}
1309
1310			timestamps = tmp_timestamps;
1311			(void) memset(&timestamps[ntimestamps], 0,
1312				sizeof (long) * FD_INCREMENT);
1313			ntimestamps += FD_INCREMENT;
1314		}
1315		timestamps[fd] = tv.tv_sec;
1316	}
1317	(void) mutex_unlock(&timestamp_lock);
1318}
1319
1320static  void
1321update_nonblock_timestamps(SVCXPRT *xprt_conn)
1322{
1323	struct timeval tv;
1324	/* LINTED pointer cast */
1325	struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1326
1327	(void) gettimeofday(&tv, NULL);
1328	cd->cf_conn_nonblock_timestamp = tv.tv_sec;
1329}
1330
1331/*
1332 * reads data from the vc conection.
1333 * any error is fatal and the connection is closed.
1334 * (And a read of zero bytes is a half closed stream => error.)
1335 */
1336static int
1337read_vc(SVCXPRT *xprt, caddr_t buf, int len)
1338{
1339	int fd = xprt->xp_fd;
1340	XDR *xdrs = svc_xdrs[fd];
1341	struct pollfd pfd;
1342	int ret;
1343
1344	/*
1345	 * Make sure the connection is not already dead.
1346	 */
1347/* LINTED pointer alignment */
1348	if (svc_failed(xprt))
1349		return (-1);
1350
1351	/* LINTED pointer cast */
1352	if (((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock) {
1353		/*
1354		 * For nonblocked reads, only update the
1355		 * timestamps to record the activity so the
1356		 * connection will not be timedout.
1357		 * Up to "len" bytes are requested.
1358		 * If fewer than "len" bytes are received, the
1359		 * connection is poll()ed again.
1360		 * The poll() for the connection fd is performed
1361		 * in the main poll() so that all outstanding fds
1362		 * are polled rather than just the vc connection.
1363		 * Polling on only the vc connection until the entire
1364		 * fragment has been read can be exploited in
1365		 * a Denial of Service Attack such as telnet <host> 111.
1366		 */
1367		if ((len = t_rcvnonblock(xprt, buf, len)) >= 0) {
1368			if (len > 0) {
1369				update_timestamps(fd);
1370				update_nonblock_timestamps(xprt);
1371			}
1372			return (len);
1373		}
1374		goto fatal_err;
1375	}
1376
1377	if (!__is_xdrrec_first(xdrs)) {
1378
1379		pfd.fd = fd;
1380		pfd.events = MASKVAL;
1381
1382		do {
1383			if ((ret = poll(&pfd, 1, WAIT_PER_TRY)) <= 0) {
1384				/*
1385				 * If errno is EINTR, ERESTART, or EAGAIN
1386				 * ignore error and repeat poll
1387				 */
1388				if (ret < 0 && (errno == EINTR ||
1389				    errno == ERESTART || errno == EAGAIN))
1390					continue;
1391				goto fatal_err;
1392			}
1393		} while (pfd.revents == 0);
1394		if (pfd.revents & POLLNVAL)
1395			goto fatal_err;
1396	}
1397	(void) __xdrrec_resetfirst(xdrs);
1398	if ((len = t_rcvall(fd, buf, len)) > 0) {
1399		update_timestamps(fd);
1400		return (len);
1401	}
1402
1403fatal_err:
1404/* LINTED pointer alignment */
1405	((struct cf_conn *)(xprt->xp_p1))->strm_stat = XPRT_DIED;
1406/* LINTED pointer alignment */
1407	svc_flags(xprt) |= SVC_FAILED;
1408	return (-1);
1409}
1410
1411/*
1412 * Requests up to "len" bytes of data.
1413 * Returns number of bytes actually received, or error indication.
1414 */
1415static int
1416t_rcvnonblock(SVCXPRT *xprt, caddr_t buf, int len)
1417{
1418	int fd = xprt->xp_fd;
1419	int flag;
1420	int res;
1421
1422	res = t_rcv(fd, buf, (unsigned)len, &flag);
1423	if (res == -1) {
1424		switch (t_errno) {
1425		case TLOOK:
1426			switch (t_look(fd)) {
1427			case T_DISCONNECT:
1428				(void) t_rcvdis(fd, NULL);
1429				break;
1430			case T_ORDREL:
1431				(void) t_rcvrel(fd);
1432				(void) t_sndrel(fd);
1433				break;
1434			default:
1435				break;
1436			}
1437			break;
1438		case TNODATA:
1439			/*
1440			 * Either poll() lied, or the xprt/fd was closed and
1441			 * re-opened under our feet. Return 0, so that we go
1442			 * back to waiting for data.
1443			 */
1444			res = 0;
1445			break;
1446		/* Should handle TBUFOVFLW TSYSERR ? */
1447		default:
1448			break;
1449		}
1450	}
1451	return (res);
1452}
1453
1454/*
1455 * Timeout out nonblocked connection fds
1456 * If there has been no activity on the fd for __rpc_irtimeout
1457 * seconds, timeout the fd  by destroying its xprt.
1458 * If the caller gets an EMFILE error, the caller may also request
1459 * that the least busy xprt gets destroyed as well.
1460 * svc_thr_mutex is held when this is called.
1461 * svc_mutex is held when this is called.
1462 */
1463static void
1464svc_timeout_nonblock_xprt_and_LRU(bool_t destroy_lru)
1465{
1466	SVCXPRT *xprt;
1467	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1468	SVCXPRT *candidate_xprt = NULL;
1469	struct cf_conn *cd;
1470	int i, fd_idx = 0, dead_idx = 0;
1471	struct timeval now;
1472	time_t lasttime, maxctime = 0;
1473	extern rwlock_t svc_fd_lock;
1474
1475	if (!check_nonblock_timestamps)
1476		return;
1477
1478	(void) gettimeofday(&now, NULL);
1479	if (svc_xports == NULL)
1480		return;
1481	/*
1482	 * Hold svc_fd_lock to protect
1483	 * svc_xports, svc_maxpollfd, svc_max_pollfd
1484	 */
1485	(void) rw_wrlock(&svc_fd_lock);
1486	for (;;) {
1487		/*
1488		 * Timeout upto CLEANUP_SIZE connection fds per
1489		 * iteration for the while(1) loop
1490		 */
1491		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1492			if ((xprt = svc_xports[fd_idx]) == NULL) {
1493				continue;
1494			}
1495			/* Only look at connection fds */
1496			/* LINTED pointer cast */
1497			if (svc_type(xprt) != SVC_CONNECTION) {
1498				continue;
1499			}
1500			/* LINTED pointer cast */
1501			cd = (struct cf_conn *)xprt->xp_p1;
1502			if (!cd->cf_conn_nonblock)
1503				continue;
1504			lasttime = now.tv_sec - cd->cf_conn_nonblock_timestamp;
1505			if (lasttime >= __rpc_irtimeout &&
1506			    __rpc_irtimeout != 0) {
1507				/* Enter in timedout/dead array */
1508				dead_xprt[dead_idx++] = xprt;
1509				if (dead_idx >= CLEANUP_SIZE)
1510					break;
1511			} else
1512			if (lasttime > maxctime) {
1513				/* Possible LRU xprt */
1514				candidate_xprt = xprt;
1515				maxctime = lasttime;
1516			}
1517		}
1518
1519		for (i = 0; i < dead_idx; i++) {
1520			/* Still holding svc_fd_lock */
1521			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1522		}
1523
1524		/*
1525		 * If all the nonblocked fds have been checked, we're done.
1526		 */
1527		if (fd_idx++ >= svc_max_pollfd)
1528			break;
1529	}
1530	if ((destroy_lru) && (candidate_xprt != NULL)) {
1531		_svc_vc_destroy_private(candidate_xprt, FALSE);
1532	}
1533	(void) rw_unlock(&svc_fd_lock);
1534}
1535/*
1536 * Receive the required bytes of data, even if it is fragmented.
1537 */
1538static int
1539t_rcvall(int fd, char *buf, int len)
1540{
1541	int flag;
1542	int final = 0;
1543	int res;
1544
1545	do {
1546		res = t_rcv(fd, buf, (unsigned)len, &flag);
1547		if (res == -1) {
1548			if (t_errno == TLOOK) {
1549				switch (t_look(fd)) {
1550				case T_DISCONNECT:
1551					(void) t_rcvdis(fd, NULL);
1552					break;
1553				case T_ORDREL:
1554					(void) t_rcvrel(fd);
1555					(void) t_sndrel(fd);
1556					break;
1557				default:
1558					break;
1559				}
1560			}
1561			break;
1562		}
1563		final += res;
1564		buf += res;
1565		len -= res;
1566	} while (len && (flag & T_MORE));
1567	return (res == -1 ? -1 : final);
1568}
1569
1570/*
1571 * writes data to the vc connection.
1572 * Any error is fatal and the connection is closed.
1573 */
1574static int
1575write_vc(SVCXPRT *xprt, caddr_t buf, int len)
1576{
1577	int i, cnt;
1578	int flag;
1579	int maxsz;
1580	int nonblock;
1581	struct pollfd pfd;
1582
1583/* LINTED pointer alignment */
1584	maxsz = ((struct cf_conn *)(xprt->xp_p1))->cf_tsdu;
1585	/* LINTED pointer cast */
1586	nonblock = ((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock;
1587	if (nonblock && maxsz <= 0)
1588		maxsz = len;
1589	if ((maxsz == 0) || (maxsz == -1)) {
1590		if ((len = t_snd(xprt->xp_fd, buf, (unsigned)len,
1591				(int)0)) == -1) {
1592			if (t_errno == TLOOK) {
1593				switch (t_look(xprt->xp_fd)) {
1594				case T_DISCONNECT:
1595					(void) t_rcvdis(xprt->xp_fd, NULL);
1596					break;
1597				case T_ORDREL:
1598					(void) t_rcvrel(xprt->xp_fd);
1599					(void) t_sndrel(xprt->xp_fd);
1600					break;
1601				default:
1602					break;
1603				}
1604			}
1605/* LINTED pointer alignment */
1606			((struct cf_conn *)(xprt->xp_p1))->strm_stat
1607					= XPRT_DIED;
1608/* LINTED pointer alignment */
1609			svc_flags(xprt) |= SVC_FAILED;
1610		}
1611		return (len);
1612	}
1613
1614	/*
1615	 * Setup for polling. We want to be able to write normal
1616	 * data to the transport
1617	 */
1618	pfd.fd = xprt->xp_fd;
1619	pfd.events = POLLWRNORM;
1620
1621	/*
1622	 * This for those transports which have a max size for data,
1623	 * and for the non-blocking case, where t_snd() may send less
1624	 * than requested.
1625	 */
1626	for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
1627		flag = cnt > maxsz ? T_MORE : 0;
1628		if ((i = t_snd(xprt->xp_fd, buf,
1629			(unsigned)MIN(cnt, maxsz), flag)) == -1) {
1630			if (t_errno == TLOOK) {
1631				switch (t_look(xprt->xp_fd)) {
1632				case T_DISCONNECT:
1633					(void) t_rcvdis(xprt->xp_fd, NULL);
1634					break;
1635				case T_ORDREL:
1636					(void) t_rcvrel(xprt->xp_fd);
1637					break;
1638				default:
1639					break;
1640				}
1641			} else if (t_errno == TFLOW) {
1642				/* Try again */
1643				i = 0;
1644				/* Wait till we can write to the transport */
1645				do {
1646				    if (poll(&pfd, 1, WAIT_PER_TRY) < 0) {
1647					/*
1648					 * If errno is ERESTART, or
1649					 * EAGAIN ignore error and repeat poll
1650					 */
1651					if (errno == ERESTART ||
1652					    errno == EAGAIN)
1653						continue;
1654					else
1655						goto fatal_err;
1656				    }
1657				} while (pfd.revents == 0);
1658				if (pfd.revents & (POLLNVAL | POLLERR |
1659						    POLLHUP))
1660					goto fatal_err;
1661				continue;
1662			}
1663fatal_err:
1664/* LINTED pointer alignment */
1665			((struct cf_conn *)(xprt->xp_p1))->strm_stat
1666					= XPRT_DIED;
1667/* LINTED pointer alignment */
1668			svc_flags(xprt) |= SVC_FAILED;
1669			return (-1);
1670		}
1671	}
1672	return (len);
1673}
1674
1675static enum xprt_stat
1676svc_vc_stat(SVCXPRT *xprt)
1677{
1678/* LINTED pointer alignment */
1679	SVCXPRT *parent = SVCEXT(xprt)->parent ? SVCEXT(xprt)->parent : xprt;
1680
1681/* LINTED pointer alignment */
1682	if (svc_failed(parent) || svc_failed(xprt))
1683		return (XPRT_DIED);
1684	if (!xdrrec_eof(svc_xdrs[xprt->xp_fd]))
1685		return (XPRT_MOREREQS);
1686	/*
1687	 * xdrrec_eof could have noticed that the connection is dead, so
1688	 * check status again.
1689	 */
1690/* LINTED pointer alignment */
1691	if (svc_failed(parent) || svc_failed(xprt))
1692		return (XPRT_DIED);
1693	return (XPRT_IDLE);
1694}
1695
1696
1697
1698static bool_t
1699svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
1700{
1701/* LINTED pointer alignment */
1702	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1703	XDR *xdrs = svc_xdrs[xprt->xp_fd];
1704
1705	xdrs->x_op = XDR_DECODE;
1706
1707	if (cd->cf_conn_nonblock) {
1708		/* Get the next input */
1709		if (!__xdrrec_getbytes_nonblock(xdrs, &cd->strm_stat)) {
1710			/*
1711			 * The entire record has not been received.
1712			 * If the xprt has died, pass it along in svc_flags.
1713			 * Return FALSE; For nonblocked vc connection,
1714			 * xdr_callmsg() is called only after the entire
1715			 * record has been received.  For blocked vc
1716			 * connection, the data is received on the fly as it
1717			 * is being processed through the xdr routines.
1718			 */
1719			if (cd->strm_stat == XPRT_DIED)
1720				/* LINTED pointer cast */
1721				svc_flags(xprt) |= SVC_FAILED;
1722			return (FALSE);
1723		}
1724	} else {
1725		if (!xdrrec_skiprecord(xdrs))
1726			return (FALSE);
1727		(void) __xdrrec_setfirst(xdrs);
1728	}
1729
1730	if (xdr_callmsg(xdrs, msg)) {
1731		cd->x_id = msg->rm_xid;
1732		return (TRUE);
1733	}
1734
1735	/*
1736	 * If a non-blocking connection, drop it when message decode fails.
1737	 * We are either under attack, or we're talking to a broken client.
1738	 */
1739	if (cd->cf_conn_nonblock) {
1740		/* LINTED pointer cast */
1741		svc_flags(xprt) |= SVC_FAILED;
1742	}
1743
1744	return (FALSE);
1745}
1746
1747static bool_t
1748svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1749{
1750	bool_t dummy;
1751
1752/* LINTED pointer alignment */
1753	dummy = SVCAUTH_UNWRAP(&SVC_XP_AUTH(xprt), svc_xdrs[xprt->xp_fd],
1754							xdr_args, args_ptr);
1755	if (svc_mt_mode != RPC_SVC_MT_NONE)
1756		svc_args_done(xprt);
1757	return (dummy);
1758}
1759
1760static bool_t
1761svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1762{
1763/* LINTED pointer alignment */
1764	XDR *xdrs = &(((struct cf_conn *)(xprt->xp_p1))->xdrs);
1765
1766	xdrs->x_op = XDR_FREE;
1767	return ((*xdr_args)(xdrs, args_ptr));
1768}
1769
1770static bool_t
1771svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
1772{
1773/* LINTED pointer alignment */
1774	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1775	XDR *xdrs = &(cd->xdrs);
1776	bool_t stat = FALSE;
1777	xdrproc_t xdr_results;
1778	caddr_t xdr_location;
1779	bool_t has_args;
1780
1781#ifdef __lock_lint
1782	(void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1783#else
1784	if (svc_mt_mode != RPC_SVC_MT_NONE)
1785/* LINTED pointer alignment */
1786		(void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1787#endif
1788
1789	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
1790				msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
1791		has_args = TRUE;
1792		xdr_results = msg->acpted_rply.ar_results.proc;
1793		xdr_location = msg->acpted_rply.ar_results.where;
1794		msg->acpted_rply.ar_results.proc = xdr_void;
1795		msg->acpted_rply.ar_results.where = NULL;
1796	} else
1797		has_args = FALSE;
1798
1799	xdrs->x_op = XDR_ENCODE;
1800	msg->rm_xid = cd->x_id;
1801/* LINTED pointer alignment */
1802	if (xdr_replymsg(xdrs, msg) && (!has_args || SVCAUTH_WRAP(
1803			&SVC_XP_AUTH(xprt), xdrs, xdr_results, xdr_location))) {
1804		stat = TRUE;
1805	}
1806	(void) xdrrec_endofrecord(xdrs, TRUE);
1807
1808#ifdef __lock_lint
1809	(void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1810#else
1811	if (svc_mt_mode != RPC_SVC_MT_NONE)
1812/* LINTED pointer alignment */
1813		(void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1814#endif
1815
1816	return (stat);
1817}
1818
1819static struct xp_ops *
1820svc_vc_ops(void)
1821{
1822	static struct xp_ops ops;
1823	extern mutex_t ops_lock;
1824
1825/* VARIABLES PROTECTED BY ops_lock: ops */
1826
1827	(void) mutex_lock(&ops_lock);
1828	if (ops.xp_recv == NULL) {
1829		ops.xp_recv = svc_vc_recv;
1830		ops.xp_stat = svc_vc_stat;
1831		ops.xp_getargs = svc_vc_getargs;
1832		ops.xp_reply = svc_vc_reply;
1833		ops.xp_freeargs = svc_vc_freeargs;
1834		ops.xp_destroy = svc_vc_destroy;
1835		ops.xp_control = svc_vc_control;
1836	}
1837	(void) mutex_unlock(&ops_lock);
1838	return (&ops);
1839}
1840
1841static struct xp_ops *
1842svc_vc_rendezvous_ops(void)
1843{
1844	static struct xp_ops ops;
1845	extern mutex_t ops_lock;
1846
1847	(void) mutex_lock(&ops_lock);
1848	if (ops.xp_recv == NULL) {
1849		ops.xp_recv = rendezvous_request;
1850		ops.xp_stat = rendezvous_stat;
1851		ops.xp_getargs = (bool_t (*)())abort;
1852		ops.xp_reply = (bool_t (*)())abort;
1853		ops.xp_freeargs = (bool_t (*)())abort,
1854		ops.xp_destroy = svc_vc_destroy;
1855		ops.xp_control = rendezvous_control;
1856	}
1857	(void) mutex_unlock(&ops_lock);
1858	return (&ops);
1859}
1860
1861/*
1862 * PRIVATE RPC INTERFACE
1863 *
1864 * This is a hack to let NIS+ clean up connections that have already been
1865 * closed.  This problem arises because rpc.nisd forks a child to handle
1866 * existing connections when it does checkpointing.  The child may close
1867 * some of these connections.  But the descriptors still stay open in the
1868 * parent, and because TLI descriptors don't support persistent EOF
1869 * condition (like sockets do), the parent will never detect that these
1870 * descriptors are dead.
1871 *
1872 * The following internal procedure __svc_nisplus_fdcleanup_hack() - should
1873 * be removed as soon as rpc.nisd is rearchitected to do the right thing.
1874 * This procedure should not find its way into any header files.
1875 *
1876 * This procedure should be called only when rpc.nisd knows that there
1877 * are no children servicing clients.
1878 */
1879
1880static bool_t
1881fd_is_dead(int fd)
1882{
1883	struct T_info_ack inforeq;
1884	int retval;
1885
1886	inforeq.PRIM_type = T_INFO_REQ;
1887	if (!_t_do_ioctl(fd, (caddr_t)&inforeq, sizeof (struct T_info_req),
1888						TI_GETINFO, &retval))
1889		return (TRUE);
1890	if (retval != (int)sizeof (struct T_info_ack))
1891		return (TRUE);
1892
1893	switch (inforeq.CURRENT_state) {
1894	case TS_UNBND:
1895	case TS_IDLE:
1896		return (TRUE);
1897	default:
1898		break;
1899	}
1900	return (FALSE);
1901}
1902
1903void
1904__svc_nisplus_fdcleanup_hack(void)
1905{
1906	SVCXPRT *xprt;
1907	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1908	int i, fd_idx = 0, dead_idx = 0;
1909
1910	if (svc_xports == NULL)
1911		return;
1912	for (;;) {
1913		(void) rw_wrlock(&svc_fd_lock);
1914		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1915			if ((xprt = svc_xports[fd_idx]) == NULL)
1916				continue;
1917/* LINTED pointer alignment */
1918			if (svc_type(xprt) != SVC_CONNECTION)
1919				continue;
1920			if (fd_is_dead(fd_idx)) {
1921				dead_xprt[dead_idx++] = xprt;
1922				if (dead_idx >= CLEANUP_SIZE)
1923					break;
1924			}
1925		}
1926
1927		for (i = 0; i < dead_idx; i++) {
1928			/* Still holding svc_fd_lock */
1929			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1930		}
1931		(void) rw_unlock(&svc_fd_lock);
1932		if (fd_idx++ >= svc_max_pollfd)
1933			return;
1934	}
1935}
1936
1937void
1938__svc_nisplus_enable_timestamps(void)
1939{
1940	(void) mutex_lock(&timestamp_lock);
1941	if (!timestamps) {
1942		timestamps = calloc(FD_INCREMENT, sizeof (long));
1943		if (timestamps != NULL)
1944			ntimestamps = FD_INCREMENT;
1945		else {
1946			(void) mutex_unlock(&timestamp_lock);
1947			syslog(LOG_ERR,
1948				"__svc_nisplus_enable_timestamps: "
1949				"out of memory");
1950			return;
1951		}
1952	}
1953	(void) mutex_unlock(&timestamp_lock);
1954}
1955
1956void
1957__svc_nisplus_purge_since(long since)
1958{
1959	SVCXPRT *xprt;
1960	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1961	int i, fd_idx = 0, dead_idx = 0;
1962
1963	if (svc_xports == NULL)
1964		return;
1965	for (;;) {
1966		(void) rw_wrlock(&svc_fd_lock);
1967		(void) mutex_lock(&timestamp_lock);
1968		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1969			if ((xprt = svc_xports[fd_idx]) == NULL) {
1970				continue;
1971			}
1972			/* LINTED pointer cast */
1973			if (svc_type(xprt) != SVC_CONNECTION) {
1974				continue;
1975			}
1976			if (fd_idx >= ntimestamps) {
1977				break;
1978			}
1979			if (timestamps[fd_idx] &&
1980			    timestamps[fd_idx] < since) {
1981				dead_xprt[dead_idx++] = xprt;
1982				if (dead_idx >= CLEANUP_SIZE)
1983					break;
1984			}
1985		}
1986		(void) mutex_unlock(&timestamp_lock);
1987
1988		for (i = 0; i < dead_idx; i++) {
1989			/* Still holding svc_fd_lock */
1990			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1991		}
1992		(void) rw_unlock(&svc_fd_lock);
1993		if (fd_idx++ >= svc_max_pollfd)
1994			return;
1995	}
1996}
1997
1998/*
1999 * dup cache wrapper functions for vc requests. The set of dup
2000 * functions were written with the view that they may be expanded
2001 * during creation of a generic svc_vc_enablecache routine
2002 * which would have a size based cache, rather than a time based cache.
2003 * The real work is done in generic svc.c
2004 */
2005bool_t
2006__svc_vc_dupcache_init(SVCXPRT *xprt, void *condition, int basis)
2007{
2008	return (__svc_dupcache_init(condition, basis,
2009		/* LINTED pointer alignment */
2010		&(((struct cf_rendezvous *)xprt->xp_p1)->cf_cache)));
2011}
2012
2013int
2014__svc_vc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz)
2015{
2016	return (__svc_dup(req, resp_buf, resp_bufsz,
2017		/* LINTED pointer alignment */
2018		((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
2019}
2020
2021int
2022__svc_vc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2023				int status)
2024{
2025	return (__svc_dupdone(req, resp_buf, resp_bufsz, status,
2026		/* LINTED pointer alignment */
2027		((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
2028}
2029