1177633Sdfr/*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
2177633Sdfr
3261046Smav/*-
4261046Smav * Copyright (c) 2009, Sun Microsystems, Inc.
5261046Smav * All rights reserved.
6177633Sdfr *
7261046Smav * Redistribution and use in source and binary forms, with or without
8261046Smav * modification, are permitted provided that the following conditions are met:
9261046Smav * - Redistributions of source code must retain the above copyright notice,
10261046Smav *   this list of conditions and the following disclaimer.
11261046Smav * - Redistributions in binary form must reproduce the above copyright notice,
12261046Smav *   this list of conditions and the following disclaimer in the documentation
13261046Smav *   and/or other materials provided with the distribution.
14261046Smav * - Neither the name of Sun Microsystems, Inc. nor the names of its
15261046Smav *   contributors may be used to endorse or promote products derived
16261046Smav *   from this software without specific prior written permission.
17261046Smav *
18261046Smav * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19261046Smav * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20261046Smav * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21261046Smav * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22261046Smav * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23261046Smav * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24261046Smav * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25261046Smav * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26261046Smav * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27261046Smav * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28261046Smav * POSSIBILITY OF SUCH DAMAGE.
29177633Sdfr */
30177633Sdfr
31177633Sdfr#if defined(LIBC_SCCS) && !defined(lint)
32177633Sdfrstatic char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33177633Sdfrstatic char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
34177633Sdfr#endif
35177633Sdfr#include <sys/cdefs.h>
36177633Sdfr__FBSDID("$FreeBSD: releng/10.3/sys/rpc/svc.c 291384 2015-11-27 14:38:36Z mav $");
37177633Sdfr
38177633Sdfr/*
39177633Sdfr * svc.c, Server-side remote procedure call interface.
40177633Sdfr *
41177633Sdfr * There are two sets of procedures here.  The xprt routines are
42177633Sdfr * for handling transport handles.  The svc routines handle the
43177633Sdfr * list of service routines.
44177633Sdfr *
45177633Sdfr * Copyright (C) 1984, Sun Microsystems, Inc.
46177633Sdfr */
47177633Sdfr
48177633Sdfr#include <sys/param.h>
49177633Sdfr#include <sys/lock.h>
50177633Sdfr#include <sys/kernel.h>
51184588Sdfr#include <sys/kthread.h>
52177633Sdfr#include <sys/malloc.h>
53184588Sdfr#include <sys/mbuf.h>
54177633Sdfr#include <sys/mutex.h>
55184588Sdfr#include <sys/proc.h>
56177633Sdfr#include <sys/queue.h>
57184588Sdfr#include <sys/socketvar.h>
58177633Sdfr#include <sys/systm.h>
59267742Smav#include <sys/smp.h>
60261055Smav#include <sys/sx.h>
61177633Sdfr#include <sys/ucred.h>
62177633Sdfr
63177633Sdfr#include <rpc/rpc.h>
64177633Sdfr#include <rpc/rpcb_clnt.h>
65184588Sdfr#include <rpc/replay.h>
66177633Sdfr
67177685Sdfr#include <rpc/rpc_com.h>
68177633Sdfr
69177633Sdfr#define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
70184588Sdfr#define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
71177633Sdfr
72177633Sdfrstatic struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
73177633Sdfr    char *);
74267742Smavstatic void svc_new_thread(SVCGROUP *grp);
75184588Sdfrstatic void xprt_unregister_locked(SVCXPRT *xprt);
76290203Swollmanstatic void svc_change_space_used(SVCPOOL *pool, long delta);
77261054Smavstatic bool_t svc_request_space_available(SVCPOOL *pool);
78177633Sdfr
79177633Sdfr/* ***************  SVCXPRT related stuff **************** */
80177633Sdfr
81184588Sdfrstatic int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
82184588Sdfrstatic int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
83267742Smavstatic int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
84184588Sdfr
85177633SdfrSVCPOOL*
86184588Sdfrsvcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
87177633Sdfr{
88177633Sdfr	SVCPOOL *pool;
89267742Smav	SVCGROUP *grp;
90267742Smav	int g;
91177633Sdfr
92177633Sdfr	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
93177633Sdfr
94177633Sdfr	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
95184588Sdfr	pool->sp_name = name;
96184588Sdfr	pool->sp_state = SVCPOOL_INIT;
97184588Sdfr	pool->sp_proc = NULL;
98177633Sdfr	TAILQ_INIT(&pool->sp_callouts);
99261055Smav	TAILQ_INIT(&pool->sp_lcallouts);
100184588Sdfr	pool->sp_minthreads = 1;
101184588Sdfr	pool->sp_maxthreads = 1;
102267742Smav	pool->sp_groupcount = 1;
103267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
104267742Smav		grp = &pool->sp_groups[g];
105267742Smav		mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
106267742Smav		grp->sg_pool = pool;
107267742Smav		grp->sg_state = SVCPOOL_ACTIVE;
108267742Smav		TAILQ_INIT(&grp->sg_xlist);
109267742Smav		TAILQ_INIT(&grp->sg_active);
110267742Smav		LIST_INIT(&grp->sg_idlethreads);
111267742Smav		grp->sg_minthreads = 1;
112267742Smav		grp->sg_maxthreads = 1;
113267742Smav	}
114177633Sdfr
115184588Sdfr	/*
116290203Swollman	 * Don't use more than a quarter of mbuf clusters.  Nota bene:
117290203Swollman	 * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow
118290203Swollman	 * on LP64 architectures, so cast to u_long to avoid undefined
119290203Swollman	 * behavior.  (ILP32 architectures cannot have nmbclusters
120290203Swollman	 * large enough to overflow for other reasons.)
121184588Sdfr	 */
122290203Swollman	pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4;
123290203Swollman	pool->sp_space_low = (pool->sp_space_high / 3) * 2;
124184588Sdfr
125184588Sdfr	sysctl_ctx_init(&pool->sp_sysctl);
126184588Sdfr	if (sysctl_base) {
127184588Sdfr		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
128184588Sdfr		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
129267742Smav		    pool, 0, svcpool_minthread_sysctl, "I",
130267742Smav		    "Minimal number of threads");
131184588Sdfr		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
132184588Sdfr		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
133267742Smav		    pool, 0, svcpool_maxthread_sysctl, "I",
134267742Smav		    "Maximal number of threads");
135267742Smav		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
136267742Smav		    "threads", CTLTYPE_INT | CTLFLAG_RD,
137267742Smav		    pool, 0, svcpool_threads_sysctl, "I",
138267742Smav		    "Current number of threads");
139184588Sdfr		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
140267742Smav		    "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
141267742Smav		    "Number of thread groups");
142184588Sdfr
143290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
144184588Sdfr		    "request_space_used", CTLFLAG_RD,
145290203Swollman		    &pool->sp_space_used,
146184588Sdfr		    "Space in parsed but not handled requests.");
147184588Sdfr
148290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
149184588Sdfr		    "request_space_used_highest", CTLFLAG_RD,
150290203Swollman		    &pool->sp_space_used_highest,
151184588Sdfr		    "Highest space used since reboot.");
152184588Sdfr
153290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
154184588Sdfr		    "request_space_high", CTLFLAG_RW,
155290203Swollman		    &pool->sp_space_high,
156184588Sdfr		    "Maximum space in parsed but not handled requests.");
157184588Sdfr
158290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
159184588Sdfr		    "request_space_low", CTLFLAG_RW,
160290203Swollman		    &pool->sp_space_low,
161184588Sdfr		    "Low water mark for request space.");
162184588Sdfr
163217326Smdf		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
164184588Sdfr		    "request_space_throttled", CTLFLAG_RD,
165184588Sdfr		    &pool->sp_space_throttled, 0,
166184588Sdfr		    "Whether nfs requests are currently throttled");
167184588Sdfr
168217326Smdf		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
169184588Sdfr		    "request_space_throttle_count", CTLFLAG_RD,
170184588Sdfr		    &pool->sp_space_throttle_count, 0,
171184588Sdfr		    "Count of times throttling based on request space has occurred");
172184588Sdfr	}
173184588Sdfr
174177633Sdfr	return pool;
175177633Sdfr}
176177633Sdfr
177177633Sdfrvoid
178177633Sdfrsvcpool_destroy(SVCPOOL *pool)
179177633Sdfr{
180267742Smav	SVCGROUP *grp;
181184588Sdfr	SVCXPRT *xprt, *nxprt;
182177633Sdfr	struct svc_callout *s;
183261055Smav	struct svc_loss_callout *sl;
184184588Sdfr	struct svcxprt_list cleanup;
185267742Smav	int g;
186177633Sdfr
187184588Sdfr	TAILQ_INIT(&cleanup);
188177633Sdfr
189267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
190267742Smav		grp = &pool->sp_groups[g];
191267742Smav		mtx_lock(&grp->sg_lock);
192267742Smav		while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
193267742Smav			xprt_unregister_locked(xprt);
194267742Smav			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
195267742Smav		}
196267742Smav		mtx_unlock(&grp->sg_lock);
197177633Sdfr	}
198267742Smav	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
199267742Smav		SVC_RELEASE(xprt);
200267742Smav	}
201177633Sdfr
202267742Smav	mtx_lock(&pool->sp_lock);
203261055Smav	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
204177633Sdfr		mtx_unlock(&pool->sp_lock);
205177633Sdfr		svc_unreg(pool, s->sc_prog, s->sc_vers);
206177633Sdfr		mtx_lock(&pool->sp_lock);
207177633Sdfr	}
208261055Smav	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
209261055Smav		mtx_unlock(&pool->sp_lock);
210261055Smav		svc_loss_unreg(pool, sl->slc_dispatch);
211261055Smav		mtx_lock(&pool->sp_lock);
212261055Smav	}
213193603Srmacklem	mtx_unlock(&pool->sp_lock);
214177633Sdfr
215267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
216267742Smav		grp = &pool->sp_groups[g];
217267742Smav		mtx_destroy(&grp->sg_lock);
218184588Sdfr	}
219193436Srmacklem	mtx_destroy(&pool->sp_lock);
220193436Srmacklem
221184588Sdfr	if (pool->sp_rcache)
222184588Sdfr		replay_freecache(pool->sp_rcache);
223184588Sdfr
224184588Sdfr	sysctl_ctx_free(&pool->sp_sysctl);
225177633Sdfr	free(pool, M_RPC);
226177633Sdfr}
227177633Sdfr
228267742Smav/*
229267742Smav * Sysctl handler to get the present thread count on a pool
230267742Smav */
231267742Smavstatic int
232267742Smavsvcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
233184588Sdfr{
234267742Smav	SVCPOOL *pool;
235267742Smav	int threads, error, g;
236184588Sdfr
237267742Smav	pool = oidp->oid_arg1;
238267742Smav	threads = 0;
239267742Smav	mtx_lock(&pool->sp_lock);
240267742Smav	for (g = 0; g < pool->sp_groupcount; g++)
241267742Smav		threads += pool->sp_groups[g].sg_threadcount;
242267742Smav	mtx_unlock(&pool->sp_lock);
243267742Smav	error = sysctl_handle_int(oidp, &threads, 0, req);
244267742Smav	return (error);
245184588Sdfr}
246184588Sdfr
247177633Sdfr/*
248184588Sdfr * Sysctl handler to set the minimum thread count on a pool
249184588Sdfr */
250184588Sdfrstatic int
251184588Sdfrsvcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
252184588Sdfr{
253184588Sdfr	SVCPOOL *pool;
254267742Smav	int newminthreads, error, g;
255184588Sdfr
256184588Sdfr	pool = oidp->oid_arg1;
257184588Sdfr	newminthreads = pool->sp_minthreads;
258184588Sdfr	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
259184588Sdfr	if (error == 0 && newminthreads != pool->sp_minthreads) {
260184588Sdfr		if (newminthreads > pool->sp_maxthreads)
261184588Sdfr			return (EINVAL);
262184588Sdfr		mtx_lock(&pool->sp_lock);
263267742Smav		pool->sp_minthreads = newminthreads;
264267742Smav		for (g = 0; g < pool->sp_groupcount; g++) {
265267742Smav			pool->sp_groups[g].sg_minthreads = max(1,
266267742Smav			    pool->sp_minthreads / pool->sp_groupcount);
267184588Sdfr		}
268184588Sdfr		mtx_unlock(&pool->sp_lock);
269184588Sdfr	}
270184588Sdfr	return (error);
271184588Sdfr}
272184588Sdfr
273184588Sdfr/*
274184588Sdfr * Sysctl handler to set the maximum thread count on a pool
275184588Sdfr */
276184588Sdfrstatic int
277184588Sdfrsvcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
278184588Sdfr{
279184588Sdfr	SVCPOOL *pool;
280267742Smav	int newmaxthreads, error, g;
281184588Sdfr
282184588Sdfr	pool = oidp->oid_arg1;
283184588Sdfr	newmaxthreads = pool->sp_maxthreads;
284184588Sdfr	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
285184588Sdfr	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
286184588Sdfr		if (newmaxthreads < pool->sp_minthreads)
287184588Sdfr			return (EINVAL);
288184588Sdfr		mtx_lock(&pool->sp_lock);
289267742Smav		pool->sp_maxthreads = newmaxthreads;
290267742Smav		for (g = 0; g < pool->sp_groupcount; g++) {
291267742Smav			pool->sp_groups[g].sg_maxthreads = max(1,
292267742Smav			    pool->sp_maxthreads / pool->sp_groupcount);
293184588Sdfr		}
294184588Sdfr		mtx_unlock(&pool->sp_lock);
295184588Sdfr	}
296184588Sdfr	return (error);
297184588Sdfr}
298184588Sdfr
299184588Sdfr/*
300177633Sdfr * Activate a transport handle.
301177633Sdfr */
302177633Sdfrvoid
303177633Sdfrxprt_register(SVCXPRT *xprt)
304177633Sdfr{
305177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
306267742Smav	SVCGROUP *grp;
307267742Smav	int g;
308177633Sdfr
309194407Srmacklem	SVC_ACQUIRE(xprt);
310267742Smav	g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
311267742Smav	xprt->xp_group = grp = &pool->sp_groups[g];
312267742Smav	mtx_lock(&grp->sg_lock);
313177633Sdfr	xprt->xp_registered = TRUE;
314177633Sdfr	xprt->xp_active = FALSE;
315267742Smav	TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
316267742Smav	mtx_unlock(&grp->sg_lock);
317177633Sdfr}
318177633Sdfr
319177633Sdfr/*
320184588Sdfr * De-activate a transport handle. Note: the locked version doesn't
321184588Sdfr * release the transport - caller must do that after dropping the pool
322184588Sdfr * lock.
323177633Sdfr */
324177633Sdfrstatic void
325184588Sdfrxprt_unregister_locked(SVCXPRT *xprt)
326177633Sdfr{
327267742Smav	SVCGROUP *grp = xprt->xp_group;
328177633Sdfr
329267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
330193649Srmacklem	KASSERT(xprt->xp_registered == TRUE,
331193649Srmacklem	    ("xprt_unregister_locked: not registered"));
332261048Smav	xprt_inactive_locked(xprt);
333267742Smav	TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
334177633Sdfr	xprt->xp_registered = FALSE;
335184588Sdfr}
336177633Sdfr
337184588Sdfrvoid
338184588Sdfrxprt_unregister(SVCXPRT *xprt)
339184588Sdfr{
340267742Smav	SVCGROUP *grp = xprt->xp_group;
341184588Sdfr
342267742Smav	mtx_lock(&grp->sg_lock);
343193649Srmacklem	if (xprt->xp_registered == FALSE) {
344193649Srmacklem		/* Already unregistered by another thread */
345267742Smav		mtx_unlock(&grp->sg_lock);
346193649Srmacklem		return;
347193649Srmacklem	}
348184588Sdfr	xprt_unregister_locked(xprt);
349267742Smav	mtx_unlock(&grp->sg_lock);
350184588Sdfr
351184588Sdfr	SVC_RELEASE(xprt);
352177633Sdfr}
353177633Sdfr
354261048Smav/*
355261048Smav * Attempt to assign a service thread to this transport.
356261048Smav */
357261048Smavstatic int
358184588Sdfrxprt_assignthread(SVCXPRT *xprt)
359184588Sdfr{
360267742Smav	SVCGROUP *grp = xprt->xp_group;
361184588Sdfr	SVCTHREAD *st;
362184588Sdfr
363267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
364267742Smav	st = LIST_FIRST(&grp->sg_idlethreads);
365184588Sdfr	if (st) {
366261048Smav		LIST_REMOVE(st, st_ilink);
367184588Sdfr		SVC_ACQUIRE(xprt);
368184588Sdfr		xprt->xp_thread = st;
369184588Sdfr		st->st_xprt = xprt;
370184588Sdfr		cv_signal(&st->st_cond);
371261048Smav		return (TRUE);
372184588Sdfr	} else {
373184588Sdfr		/*
374184588Sdfr		 * See if we can create a new thread. The
375184588Sdfr		 * actual thread creation happens in
376184588Sdfr		 * svc_run_internal because our locking state
377184588Sdfr		 * is poorly defined (we are typically called
378184588Sdfr		 * from a socket upcall). Don't create more
379184588Sdfr		 * than one thread per second.
380184588Sdfr		 */
381267742Smav		if (grp->sg_state == SVCPOOL_ACTIVE
382267742Smav		    && grp->sg_lastcreatetime < time_uptime
383267742Smav		    && grp->sg_threadcount < grp->sg_maxthreads) {
384267742Smav			grp->sg_state = SVCPOOL_THREADWANTED;
385184588Sdfr		}
386184588Sdfr	}
387261048Smav	return (FALSE);
388184588Sdfr}
389184588Sdfr
390177633Sdfrvoid
391177633Sdfrxprt_active(SVCXPRT *xprt)
392177633Sdfr{
393267742Smav	SVCGROUP *grp = xprt->xp_group;
394177633Sdfr
395267742Smav	mtx_lock(&grp->sg_lock);
396193436Srmacklem
397184588Sdfr	if (!xprt->xp_registered) {
398184588Sdfr		/*
399184588Sdfr		 * Race with xprt_unregister - we lose.
400184588Sdfr		 */
401267742Smav		mtx_unlock(&grp->sg_lock);
402184588Sdfr		return;
403184588Sdfr	}
404184588Sdfr
405177633Sdfr	if (!xprt->xp_active) {
406177633Sdfr		xprt->xp_active = TRUE;
407261048Smav		if (xprt->xp_thread == NULL) {
408267742Smav			if (!svc_request_space_available(xprt->xp_pool) ||
409261054Smav			    !xprt_assignthread(xprt))
410267742Smav				TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
411261048Smav				    xp_alink);
412261048Smav		}
413177633Sdfr	}
414177633Sdfr
415267742Smav	mtx_unlock(&grp->sg_lock);
416177633Sdfr}
417177633Sdfr
418177633Sdfrvoid
419184588Sdfrxprt_inactive_locked(SVCXPRT *xprt)
420177633Sdfr{
421267742Smav	SVCGROUP *grp = xprt->xp_group;
422177633Sdfr
423267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
424177633Sdfr	if (xprt->xp_active) {
425261048Smav		if (xprt->xp_thread == NULL)
426267742Smav			TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
427177633Sdfr		xprt->xp_active = FALSE;
428177633Sdfr	}
429184588Sdfr}
430177633Sdfr
431184588Sdfrvoid
432184588Sdfrxprt_inactive(SVCXPRT *xprt)
433184588Sdfr{
434267742Smav	SVCGROUP *grp = xprt->xp_group;
435184588Sdfr
436267742Smav	mtx_lock(&grp->sg_lock);
437184588Sdfr	xprt_inactive_locked(xprt);
438267742Smav	mtx_unlock(&grp->sg_lock);
439177633Sdfr}
440177633Sdfr
441177633Sdfr/*
442261053Smav * Variant of xprt_inactive() for use only when sure that port is
443261053Smav * assigned to thread. For example, withing receive handlers.
444261053Smav */
445261053Smavvoid
446261053Smavxprt_inactive_self(SVCXPRT *xprt)
447261053Smav{
448261053Smav
449261053Smav	KASSERT(xprt->xp_thread != NULL,
450261053Smav	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
451261053Smav	xprt->xp_active = FALSE;
452261053Smav}
453261053Smav
454261053Smav/*
455177633Sdfr * Add a service program to the callout list.
456177633Sdfr * The dispatch routine will be called when a rpc request for this
457177633Sdfr * program number comes in.
458177633Sdfr */
459177633Sdfrbool_t
460177633Sdfrsvc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
461177633Sdfr    void (*dispatch)(struct svc_req *, SVCXPRT *),
462177633Sdfr    const struct netconfig *nconf)
463177633Sdfr{
464177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
465177633Sdfr	struct svc_callout *s;
466177633Sdfr	char *netid = NULL;
467177633Sdfr	int flag = 0;
468177633Sdfr
469177633Sdfr/* VARIABLES PROTECTED BY svc_lock: s, svc_head */
470177633Sdfr
471177633Sdfr	if (xprt->xp_netid) {
472177633Sdfr		netid = strdup(xprt->xp_netid, M_RPC);
473177633Sdfr		flag = 1;
474177633Sdfr	} else if (nconf && nconf->nc_netid) {
475177633Sdfr		netid = strdup(nconf->nc_netid, M_RPC);
476177633Sdfr		flag = 1;
477177633Sdfr	} /* must have been created with svc_raw_create */
478177633Sdfr	if ((netid == NULL) && (flag == 1)) {
479177633Sdfr		return (FALSE);
480177633Sdfr	}
481177633Sdfr
482177633Sdfr	mtx_lock(&pool->sp_lock);
483177633Sdfr	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
484177633Sdfr		if (netid)
485177633Sdfr			free(netid, M_RPC);
486177633Sdfr		if (s->sc_dispatch == dispatch)
487177633Sdfr			goto rpcb_it; /* he is registering another xptr */
488177633Sdfr		mtx_unlock(&pool->sp_lock);
489177633Sdfr		return (FALSE);
490177633Sdfr	}
491177633Sdfr	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
492177633Sdfr	if (s == NULL) {
493177633Sdfr		if (netid)
494177633Sdfr			free(netid, M_RPC);
495177633Sdfr		mtx_unlock(&pool->sp_lock);
496177633Sdfr		return (FALSE);
497177633Sdfr	}
498177633Sdfr
499177633Sdfr	s->sc_prog = prog;
500177633Sdfr	s->sc_vers = vers;
501177633Sdfr	s->sc_dispatch = dispatch;
502177633Sdfr	s->sc_netid = netid;
503177633Sdfr	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
504177633Sdfr
505177633Sdfr	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
506177633Sdfr		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
507177633Sdfr
508177633Sdfrrpcb_it:
509177633Sdfr	mtx_unlock(&pool->sp_lock);
510177633Sdfr	/* now register the information with the local binder service */
511177633Sdfr	if (nconf) {
512177633Sdfr		bool_t dummy;
513177633Sdfr		struct netconfig tnc;
514184588Sdfr		struct netbuf nb;
515177633Sdfr		tnc = *nconf;
516184588Sdfr		nb.buf = &xprt->xp_ltaddr;
517184588Sdfr		nb.len = xprt->xp_ltaddr.ss_len;
518184588Sdfr		dummy = rpcb_set(prog, vers, &tnc, &nb);
519177633Sdfr		return (dummy);
520177633Sdfr	}
521177633Sdfr	return (TRUE);
522177633Sdfr}
523177633Sdfr
524177633Sdfr/*
525177633Sdfr * Remove a service program from the callout list.
526177633Sdfr */
527177633Sdfrvoid
528177633Sdfrsvc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
529177633Sdfr{
530177633Sdfr	struct svc_callout *s;
531177633Sdfr
532177633Sdfr	/* unregister the information anyway */
533177633Sdfr	(void) rpcb_unset(prog, vers, NULL);
534177633Sdfr	mtx_lock(&pool->sp_lock);
535177633Sdfr	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
536177633Sdfr		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
537177633Sdfr		if (s->sc_netid)
538177633Sdfr			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
539177633Sdfr		mem_free(s, sizeof (struct svc_callout));
540177633Sdfr	}
541177633Sdfr	mtx_unlock(&pool->sp_lock);
542177633Sdfr}
543177633Sdfr
544261055Smav/*
545261055Smav * Add a service connection loss program to the callout list.
546261055Smav * The dispatch routine will be called when some port in ths pool die.
547261055Smav */
548261055Smavbool_t
549261055Smavsvc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
550261055Smav{
551261055Smav	SVCPOOL *pool = xprt->xp_pool;
552261055Smav	struct svc_loss_callout *s;
553261055Smav
554261055Smav	mtx_lock(&pool->sp_lock);
555261055Smav	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
556261055Smav		if (s->slc_dispatch == dispatch)
557261055Smav			break;
558261055Smav	}
559261055Smav	if (s != NULL) {
560261055Smav		mtx_unlock(&pool->sp_lock);
561261055Smav		return (TRUE);
562261055Smav	}
563261055Smav	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
564261055Smav	if (s == NULL) {
565261055Smav		mtx_unlock(&pool->sp_lock);
566261055Smav		return (FALSE);
567261055Smav	}
568261055Smav	s->slc_dispatch = dispatch;
569261055Smav	TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
570261055Smav	mtx_unlock(&pool->sp_lock);
571261055Smav	return (TRUE);
572261055Smav}
573261055Smav
574261055Smav/*
575261055Smav * Remove a service connection loss program from the callout list.
576261055Smav */
577261055Smavvoid
578261055Smavsvc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
579261055Smav{
580261055Smav	struct svc_loss_callout *s;
581261055Smav
582261055Smav	mtx_lock(&pool->sp_lock);
583261055Smav	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
584261055Smav		if (s->slc_dispatch == dispatch) {
585261055Smav			TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
586261055Smav			free(s, M_RPC);
587261055Smav			break;
588261055Smav		}
589261055Smav	}
590261055Smav	mtx_unlock(&pool->sp_lock);
591261055Smav}
592261055Smav
593177633Sdfr/* ********************** CALLOUT list related stuff ************* */
594177633Sdfr
595177633Sdfr/*
596177633Sdfr * Search the callout list for a program number, return the callout
597177633Sdfr * struct.
598177633Sdfr */
599177633Sdfrstatic struct svc_callout *
600177633Sdfrsvc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
601177633Sdfr{
602177633Sdfr	struct svc_callout *s;
603177633Sdfr
604177633Sdfr	mtx_assert(&pool->sp_lock, MA_OWNED);
605177633Sdfr	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
606177633Sdfr		if (s->sc_prog == prog && s->sc_vers == vers
607177633Sdfr		    && (netid == NULL || s->sc_netid == NULL ||
608177633Sdfr			strcmp(netid, s->sc_netid) == 0))
609177633Sdfr			break;
610177633Sdfr	}
611177633Sdfr
612177633Sdfr	return (s);
613177633Sdfr}
614177633Sdfr
615177633Sdfr/* ******************* REPLY GENERATION ROUTINES  ************ */
616177633Sdfr
617184588Sdfrstatic bool_t
618184588Sdfrsvc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
619184588Sdfr    struct mbuf *body)
620184588Sdfr{
621184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
622184588Sdfr	bool_t ok;
623184588Sdfr
624184588Sdfr	if (rqstp->rq_args) {
625184588Sdfr		m_freem(rqstp->rq_args);
626184588Sdfr		rqstp->rq_args = NULL;
627184588Sdfr	}
628184588Sdfr
629184588Sdfr	if (xprt->xp_pool->sp_rcache)
630184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
631184588Sdfr		    rply, svc_getrpccaller(rqstp), body);
632184588Sdfr
633184588Sdfr	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
634184588Sdfr		return (FALSE);
635184588Sdfr
636261055Smav	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
637184588Sdfr	if (rqstp->rq_addr) {
638184588Sdfr		free(rqstp->rq_addr, M_SONAME);
639184588Sdfr		rqstp->rq_addr = NULL;
640184588Sdfr	}
641184588Sdfr
642184588Sdfr	return (ok);
643184588Sdfr}
644184588Sdfr
645177633Sdfr/*
646177633Sdfr * Send a reply to an rpc request
647177633Sdfr */
648177633Sdfrbool_t
649184588Sdfrsvc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
650177633Sdfr{
651177633Sdfr	struct rpc_msg rply;
652184588Sdfr	struct mbuf *m;
653184588Sdfr	XDR xdrs;
654184588Sdfr	bool_t ok;
655177633Sdfr
656184588Sdfr	rply.rm_xid = rqstp->rq_xid;
657177633Sdfr	rply.rm_direction = REPLY;
658177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
659184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
660177633Sdfr	rply.acpted_rply.ar_stat = SUCCESS;
661184588Sdfr	rply.acpted_rply.ar_results.where = NULL;
662184588Sdfr	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
663177633Sdfr
664248195Sglebius	m = m_getcl(M_WAITOK, MT_DATA, 0);
665184588Sdfr	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
666184588Sdfr	ok = xdr_results(&xdrs, xdr_location);
667184588Sdfr	XDR_DESTROY(&xdrs);
668184588Sdfr
669184588Sdfr	if (ok) {
670184588Sdfr		return (svc_sendreply_common(rqstp, &rply, m));
671184588Sdfr	} else {
672184588Sdfr		m_freem(m);
673184588Sdfr		return (FALSE);
674184588Sdfr	}
675177633Sdfr}
676177633Sdfr
677184588Sdfrbool_t
678184588Sdfrsvc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
679184588Sdfr{
680184588Sdfr	struct rpc_msg rply;
681184588Sdfr
682184588Sdfr	rply.rm_xid = rqstp->rq_xid;
683184588Sdfr	rply.rm_direction = REPLY;
684184588Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
685184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
686184588Sdfr	rply.acpted_rply.ar_stat = SUCCESS;
687184588Sdfr	rply.acpted_rply.ar_results.where = NULL;
688184588Sdfr	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
689184588Sdfr
690184588Sdfr	return (svc_sendreply_common(rqstp, &rply, m));
691184588Sdfr}
692184588Sdfr
693177633Sdfr/*
694177633Sdfr * No procedure error reply
695177633Sdfr */
696177633Sdfrvoid
697184588Sdfrsvcerr_noproc(struct svc_req *rqstp)
698177633Sdfr{
699184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
700177633Sdfr	struct rpc_msg rply;
701177633Sdfr
702184588Sdfr	rply.rm_xid = rqstp->rq_xid;
703177633Sdfr	rply.rm_direction = REPLY;
704177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
705184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
706177633Sdfr	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
707177633Sdfr
708184588Sdfr	if (xprt->xp_pool->sp_rcache)
709184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
710184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
711184588Sdfr
712184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
713177633Sdfr}
714177633Sdfr
715177633Sdfr/*
716177633Sdfr * Can't decode args error reply
717177633Sdfr */
718177633Sdfrvoid
719184588Sdfrsvcerr_decode(struct svc_req *rqstp)
720177633Sdfr{
721184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
722177633Sdfr	struct rpc_msg rply;
723177633Sdfr
724184588Sdfr	rply.rm_xid = rqstp->rq_xid;
725177633Sdfr	rply.rm_direction = REPLY;
726177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
727184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
728177633Sdfr	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
729177633Sdfr
730184588Sdfr	if (xprt->xp_pool->sp_rcache)
731184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
732184588Sdfr		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
733184588Sdfr
734184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
735177633Sdfr}
736177633Sdfr
737177633Sdfr/*
738177633Sdfr * Some system error
739177633Sdfr */
740177633Sdfrvoid
741184588Sdfrsvcerr_systemerr(struct svc_req *rqstp)
742177633Sdfr{
743184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
744177633Sdfr	struct rpc_msg rply;
745177633Sdfr
746184588Sdfr	rply.rm_xid = rqstp->rq_xid;
747177633Sdfr	rply.rm_direction = REPLY;
748177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
749184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
750177633Sdfr	rply.acpted_rply.ar_stat = SYSTEM_ERR;
751177633Sdfr
752184588Sdfr	if (xprt->xp_pool->sp_rcache)
753184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
754184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
755184588Sdfr
756184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
757177633Sdfr}
758177633Sdfr
759177633Sdfr/*
760177633Sdfr * Authentication error reply
761177633Sdfr */
762177633Sdfrvoid
763184588Sdfrsvcerr_auth(struct svc_req *rqstp, enum auth_stat why)
764177633Sdfr{
765184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
766177633Sdfr	struct rpc_msg rply;
767177633Sdfr
768184588Sdfr	rply.rm_xid = rqstp->rq_xid;
769177633Sdfr	rply.rm_direction = REPLY;
770177633Sdfr	rply.rm_reply.rp_stat = MSG_DENIED;
771177633Sdfr	rply.rjcted_rply.rj_stat = AUTH_ERROR;
772177633Sdfr	rply.rjcted_rply.rj_why = why;
773177633Sdfr
774184588Sdfr	if (xprt->xp_pool->sp_rcache)
775184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
776184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
777184588Sdfr
778184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
779177633Sdfr}
780177633Sdfr
781177633Sdfr/*
782177633Sdfr * Auth too weak error reply
783177633Sdfr */
784177633Sdfrvoid
785184588Sdfrsvcerr_weakauth(struct svc_req *rqstp)
786177633Sdfr{
787177633Sdfr
788184588Sdfr	svcerr_auth(rqstp, AUTH_TOOWEAK);
789177633Sdfr}
790177633Sdfr
791177633Sdfr/*
792177633Sdfr * Program unavailable error reply
793177633Sdfr */
794177633Sdfrvoid
795184588Sdfrsvcerr_noprog(struct svc_req *rqstp)
796177633Sdfr{
797184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
798177633Sdfr	struct rpc_msg rply;
799177633Sdfr
800184588Sdfr	rply.rm_xid = rqstp->rq_xid;
801177633Sdfr	rply.rm_direction = REPLY;
802177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
803184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
804177633Sdfr	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
805177633Sdfr
806184588Sdfr	if (xprt->xp_pool->sp_rcache)
807184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
808184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
809184588Sdfr
810184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
811177633Sdfr}
812177633Sdfr
813177633Sdfr/*
814177633Sdfr * Program version mismatch error reply
815177633Sdfr */
816177633Sdfrvoid
817184588Sdfrsvcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
818177633Sdfr{
819184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
820177633Sdfr	struct rpc_msg rply;
821177633Sdfr
822184588Sdfr	rply.rm_xid = rqstp->rq_xid;
823177633Sdfr	rply.rm_direction = REPLY;
824177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
825184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
826177633Sdfr	rply.acpted_rply.ar_stat = PROG_MISMATCH;
827177633Sdfr	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
828177633Sdfr	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
829177633Sdfr
830184588Sdfr	if (xprt->xp_pool->sp_rcache)
831184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
832184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
833184588Sdfr
834184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
835177633Sdfr}
836177633Sdfr
837184588Sdfr/*
838184588Sdfr * Allocate a new server transport structure. All fields are
839184588Sdfr * initialized to zero and xp_p3 is initialized to point at an
840184588Sdfr * extension structure to hold various flags and authentication
841184588Sdfr * parameters.
842184588Sdfr */
843184588SdfrSVCXPRT *
844184588Sdfrsvc_xprt_alloc()
845184588Sdfr{
846184588Sdfr	SVCXPRT *xprt;
847184588Sdfr	SVCXPRT_EXT *ext;
848184588Sdfr
849184588Sdfr	xprt = mem_alloc(sizeof(SVCXPRT));
850184588Sdfr	memset(xprt, 0, sizeof(SVCXPRT));
851184588Sdfr	ext = mem_alloc(sizeof(SVCXPRT_EXT));
852184588Sdfr	memset(ext, 0, sizeof(SVCXPRT_EXT));
853184588Sdfr	xprt->xp_p3 = ext;
854184588Sdfr	refcount_init(&xprt->xp_refs, 1);
855184588Sdfr
856184588Sdfr	return (xprt);
857184588Sdfr}
858184588Sdfr
859184588Sdfr/*
860184588Sdfr * Free a server transport structure.
861184588Sdfr */
862184588Sdfrvoid
863184588Sdfrsvc_xprt_free(xprt)
864184588Sdfr	SVCXPRT *xprt;
865184588Sdfr{
866184588Sdfr
867184588Sdfr	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
868184588Sdfr	mem_free(xprt, sizeof(SVCXPRT));
869184588Sdfr}
870184588Sdfr
871177633Sdfr/* ******************* SERVER INPUT STUFF ******************* */
872177633Sdfr
873177633Sdfr/*
874184588Sdfr * Read RPC requests from a transport and queue them to be
875184588Sdfr * executed. We handle authentication and replay cache replies here.
876184588Sdfr * Actually dispatching the RPC is deferred till svc_executereq.
877177633Sdfr */
878184588Sdfrstatic enum xprt_stat
879184588Sdfrsvc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
880177633Sdfr{
881177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
882184588Sdfr	struct svc_req *r;
883177633Sdfr	struct rpc_msg msg;
884184588Sdfr	struct mbuf *args;
885261055Smav	struct svc_loss_callout *s;
886177633Sdfr	enum xprt_stat stat;
887177633Sdfr
888177633Sdfr	/* now receive msgs from xprtprt (support batch calls) */
889184588Sdfr	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
890177633Sdfr
891184588Sdfr	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
892184588Sdfr	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
893184588Sdfr	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
894184588Sdfr	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
895184588Sdfr		enum auth_stat why;
896177633Sdfr
897184588Sdfr		/*
898184588Sdfr		 * Handle replays and authenticate before queuing the
899184588Sdfr		 * request to be executed.
900184588Sdfr		 */
901184588Sdfr		SVC_ACQUIRE(xprt);
902184588Sdfr		r->rq_xprt = xprt;
903184588Sdfr		if (pool->sp_rcache) {
904184588Sdfr			struct rpc_msg repmsg;
905184588Sdfr			struct mbuf *repbody;
906184588Sdfr			enum replay_state rs;
907184588Sdfr			rs = replay_find(pool->sp_rcache, &msg,
908184588Sdfr			    svc_getrpccaller(r), &repmsg, &repbody);
909184588Sdfr			switch (rs) {
910184588Sdfr			case RS_NEW:
911184588Sdfr				break;
912184588Sdfr			case RS_DONE:
913184588Sdfr				SVC_REPLY(xprt, &repmsg, r->rq_addr,
914261055Smav				    repbody, &r->rq_reply_seq);
915184588Sdfr				if (r->rq_addr) {
916184588Sdfr					free(r->rq_addr, M_SONAME);
917184588Sdfr					r->rq_addr = NULL;
918184588Sdfr				}
919205562Srmacklem				m_freem(args);
920177633Sdfr				goto call_done;
921184588Sdfr
922184588Sdfr			default:
923205562Srmacklem				m_freem(args);
924184588Sdfr				goto call_done;
925177633Sdfr			}
926184588Sdfr		}
927184588Sdfr
928184588Sdfr		r->rq_xid = msg.rm_xid;
929184588Sdfr		r->rq_prog = msg.rm_call.cb_prog;
930184588Sdfr		r->rq_vers = msg.rm_call.cb_vers;
931184588Sdfr		r->rq_proc = msg.rm_call.cb_proc;
932184588Sdfr		r->rq_size = sizeof(*r) + m_length(args, NULL);
933184588Sdfr		r->rq_args = args;
934184588Sdfr		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
935177633Sdfr			/*
936184588Sdfr			 * RPCSEC_GSS uses this return code
937184588Sdfr			 * for requests that form part of its
938184588Sdfr			 * context establishment protocol and
939184588Sdfr			 * should not be dispatched to the
940184588Sdfr			 * application.
941177633Sdfr			 */
942184588Sdfr			if (why != RPCSEC_GSS_NODISPATCH)
943184588Sdfr				svcerr_auth(r, why);
944184588Sdfr			goto call_done;
945177633Sdfr		}
946184588Sdfr
947184588Sdfr		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
948184588Sdfr			svcerr_decode(r);
949184588Sdfr			goto call_done;
950184588Sdfr		}
951184588Sdfr
952177633Sdfr		/*
953184588Sdfr		 * Everything checks out, return request to caller.
954177633Sdfr		 */
955184588Sdfr		*rqstp_ret = r;
956184588Sdfr		r = NULL;
957184588Sdfr	}
958177633Sdfrcall_done:
959184588Sdfr	if (r) {
960184588Sdfr		svc_freereq(r);
961184588Sdfr		r = NULL;
962184588Sdfr	}
963184588Sdfr	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
964261055Smav		TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
965261055Smav			(*s->slc_dispatch)(xprt);
966184588Sdfr		xprt_unregister(xprt);
967184588Sdfr	}
968184588Sdfr
969184588Sdfr	return (stat);
970184588Sdfr}
971184588Sdfr
972184588Sdfrstatic void
973184588Sdfrsvc_executereq(struct svc_req *rqstp)
974184588Sdfr{
975184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
976184588Sdfr	SVCPOOL *pool = xprt->xp_pool;
977184588Sdfr	int prog_found;
978184588Sdfr	rpcvers_t low_vers;
979184588Sdfr	rpcvers_t high_vers;
980184588Sdfr	struct svc_callout *s;
981184588Sdfr
982184588Sdfr	/* now match message with a registered service*/
983184588Sdfr	prog_found = FALSE;
984184588Sdfr	low_vers = (rpcvers_t) -1L;
985184588Sdfr	high_vers = (rpcvers_t) 0L;
986184588Sdfr	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
987184588Sdfr		if (s->sc_prog == rqstp->rq_prog) {
988184588Sdfr			if (s->sc_vers == rqstp->rq_vers) {
989184588Sdfr				/*
990184588Sdfr				 * We hand ownership of r to the
991184588Sdfr				 * dispatch method - they must call
992184588Sdfr				 * svc_freereq.
993184588Sdfr				 */
994184588Sdfr				(*s->sc_dispatch)(rqstp, xprt);
995184588Sdfr				return;
996184588Sdfr			}  /* found correct version */
997184588Sdfr			prog_found = TRUE;
998184588Sdfr			if (s->sc_vers < low_vers)
999184588Sdfr				low_vers = s->sc_vers;
1000184588Sdfr			if (s->sc_vers > high_vers)
1001184588Sdfr				high_vers = s->sc_vers;
1002184588Sdfr		}   /* found correct program */
1003184588Sdfr	}
1004184588Sdfr
1005184588Sdfr	/*
1006184588Sdfr	 * if we got here, the program or version
1007184588Sdfr	 * is not served ...
1008184588Sdfr	 */
1009184588Sdfr	if (prog_found)
1010184588Sdfr		svcerr_progvers(rqstp, low_vers, high_vers);
1011184588Sdfr	else
1012184588Sdfr		svcerr_noprog(rqstp);
1013184588Sdfr
1014184588Sdfr	svc_freereq(rqstp);
1015184588Sdfr}
1016184588Sdfr
1017184588Sdfrstatic void
1018267742Smavsvc_checkidle(SVCGROUP *grp)
1019184588Sdfr{
1020184588Sdfr	SVCXPRT *xprt, *nxprt;
1021184588Sdfr	time_t timo;
1022184588Sdfr	struct svcxprt_list cleanup;
1023184588Sdfr
1024184588Sdfr	TAILQ_INIT(&cleanup);
1025267742Smav	TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1026184588Sdfr		/*
1027184588Sdfr		 * Only some transports have idle timers. Don't time
1028184588Sdfr		 * something out which is just waking up.
1029184588Sdfr		 */
1030184588Sdfr		if (!xprt->xp_idletimeout || xprt->xp_thread)
1031184588Sdfr			continue;
1032184588Sdfr
1033184588Sdfr		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1034184588Sdfr		if (time_uptime > timo) {
1035184588Sdfr			xprt_unregister_locked(xprt);
1036184588Sdfr			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1037177633Sdfr		}
1038184588Sdfr	}
1039184588Sdfr
1040267742Smav	mtx_unlock(&grp->sg_lock);
1041184588Sdfr	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1042184588Sdfr		SVC_RELEASE(xprt);
1043184588Sdfr	}
1044267742Smav	mtx_lock(&grp->sg_lock);
1045177633Sdfr}
1046177633Sdfr
1047184588Sdfrstatic void
1048184588Sdfrsvc_assign_waiting_sockets(SVCPOOL *pool)
1049177633Sdfr{
1050267742Smav	SVCGROUP *grp;
1051177633Sdfr	SVCXPRT *xprt;
1052267742Smav	int g;
1053184588Sdfr
1054267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1055267742Smav		grp = &pool->sp_groups[g];
1056267742Smav		mtx_lock(&grp->sg_lock);
1057267742Smav		while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1058267742Smav			if (xprt_assignthread(xprt))
1059267742Smav				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1060267742Smav			else
1061267742Smav				break;
1062267742Smav		}
1063267742Smav		mtx_unlock(&grp->sg_lock);
1064184588Sdfr	}
1065184588Sdfr}
1066184588Sdfr
1067261054Smavstatic void
1068290203Swollmansvc_change_space_used(SVCPOOL *pool, long delta)
1069184588Sdfr{
1070290203Swollman	unsigned long value;
1071184588Sdfr
1072290203Swollman	value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta;
1073261054Smav	if (delta > 0) {
1074261054Smav		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1075261054Smav			pool->sp_space_throttled = TRUE;
1076261054Smav			pool->sp_space_throttle_count++;
1077261054Smav		}
1078261054Smav		if (value > pool->sp_space_used_highest)
1079261054Smav			pool->sp_space_used_highest = value;
1080261054Smav	} else {
1081261054Smav		if (value < pool->sp_space_low && pool->sp_space_throttled) {
1082184588Sdfr			pool->sp_space_throttled = FALSE;
1083184588Sdfr			svc_assign_waiting_sockets(pool);
1084184588Sdfr		}
1085184588Sdfr	}
1086184588Sdfr}
1087184588Sdfr
1088261054Smavstatic bool_t
1089261054Smavsvc_request_space_available(SVCPOOL *pool)
1090261054Smav{
1091261054Smav
1092261054Smav	if (pool->sp_space_throttled)
1093261054Smav		return (FALSE);
1094261054Smav	return (TRUE);
1095261054Smav}
1096261054Smav
1097184588Sdfrstatic void
1098267742Smavsvc_run_internal(SVCGROUP *grp, bool_t ismaster)
1099184588Sdfr{
1100267742Smav	SVCPOOL *pool = grp->sg_pool;
1101184588Sdfr	SVCTHREAD *st, *stpref;
1102184588Sdfr	SVCXPRT *xprt;
1103184588Sdfr	enum xprt_stat stat;
1104184588Sdfr	struct svc_req *rqstp;
1105275796Skib	struct proc *p;
1106290203Swollman	long sz;
1107177633Sdfr	int error;
1108177633Sdfr
1109184588Sdfr	st = mem_alloc(sizeof(*st));
1110267740Smav	mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1111261054Smav	st->st_pool = pool;
1112184588Sdfr	st->st_xprt = NULL;
1113184588Sdfr	STAILQ_INIT(&st->st_reqs);
1114184588Sdfr	cv_init(&st->st_cond, "rpcsvc");
1115184588Sdfr
1116267742Smav	mtx_lock(&grp->sg_lock);
1117177633Sdfr
1118184588Sdfr	/*
1119184588Sdfr	 * If we are a new thread which was spawned to cope with
1120184588Sdfr	 * increased load, set the state back to SVCPOOL_ACTIVE.
1121184588Sdfr	 */
1122267742Smav	if (grp->sg_state == SVCPOOL_THREADSTARTING)
1123267742Smav		grp->sg_state = SVCPOOL_ACTIVE;
1124177633Sdfr
1125267742Smav	while (grp->sg_state != SVCPOOL_CLOSING) {
1126184588Sdfr		/*
1127261045Smav		 * Create new thread if requested.
1128261045Smav		 */
1129267742Smav		if (grp->sg_state == SVCPOOL_THREADWANTED) {
1130267742Smav			grp->sg_state = SVCPOOL_THREADSTARTING;
1131267742Smav			grp->sg_lastcreatetime = time_uptime;
1132267742Smav			mtx_unlock(&grp->sg_lock);
1133267742Smav			svc_new_thread(grp);
1134267742Smav			mtx_lock(&grp->sg_lock);
1135261045Smav			continue;
1136261045Smav		}
1137261045Smav
1138261045Smav		/*
1139184588Sdfr		 * Check for idle transports once per second.
1140184588Sdfr		 */
1141267742Smav		if (time_uptime > grp->sg_lastidlecheck) {
1142267742Smav			grp->sg_lastidlecheck = time_uptime;
1143267742Smav			svc_checkidle(grp);
1144184588Sdfr		}
1145184588Sdfr
1146184588Sdfr		xprt = st->st_xprt;
1147267740Smav		if (!xprt) {
1148184588Sdfr			/*
1149184588Sdfr			 * Enforce maxthreads count.
1150184588Sdfr			 */
1151267742Smav			if (grp->sg_threadcount > grp->sg_maxthreads)
1152177633Sdfr				break;
1153184588Sdfr
1154184588Sdfr			/*
1155184588Sdfr			 * Before sleeping, see if we can find an
1156184588Sdfr			 * active transport which isn't being serviced
1157184588Sdfr			 * by a thread.
1158184588Sdfr			 */
1159261048Smav			if (svc_request_space_available(pool) &&
1160267742Smav			    (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1161267742Smav				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1162261048Smav				SVC_ACQUIRE(xprt);
1163261048Smav				xprt->xp_thread = st;
1164261048Smav				st->st_xprt = xprt;
1165261048Smav				continue;
1166184588Sdfr			}
1167184588Sdfr
1168267742Smav			LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1169261045Smav			if (ismaster || (!ismaster &&
1170267742Smav			    grp->sg_threadcount > grp->sg_minthreads))
1171261045Smav				error = cv_timedwait_sig(&st->st_cond,
1172267742Smav				    &grp->sg_lock, 5 * hz);
1173261045Smav			else
1174261045Smav				error = cv_wait_sig(&st->st_cond,
1175267742Smav				    &grp->sg_lock);
1176267741Smav			if (st->st_xprt == NULL)
1177261048Smav				LIST_REMOVE(st, st_ilink);
1178184588Sdfr
1179184588Sdfr			/*
1180184588Sdfr			 * Reduce worker thread count when idle.
1181184588Sdfr			 */
1182184588Sdfr			if (error == EWOULDBLOCK) {
1183184588Sdfr				if (!ismaster
1184267742Smav				    && (grp->sg_threadcount
1185267742Smav					> grp->sg_minthreads)
1186267740Smav					&& !st->st_xprt)
1187184588Sdfr					break;
1188275796Skib			} else if (error != 0) {
1189275796Skib				KASSERT(error == EINTR || error == ERESTART,
1190275796Skib				    ("non-signal error %d", error));
1191267742Smav				mtx_unlock(&grp->sg_lock);
1192275796Skib				p = curproc;
1193275796Skib				PROC_LOCK(p);
1194276272Skib				if (P_SHOULDSTOP(p) ||
1195276272Skib				    (p->p_flag & P_TOTAL_STOP) != 0) {
1196275796Skib					thread_suspend_check(0);
1197275796Skib					PROC_UNLOCK(p);
1198275796Skib					mtx_lock(&grp->sg_lock);
1199275796Skib				} else {
1200275796Skib					PROC_UNLOCK(p);
1201275796Skib					svc_exit(pool);
1202275796Skib					mtx_lock(&grp->sg_lock);
1203275796Skib					break;
1204275796Skib				}
1205184588Sdfr			}
1206177633Sdfr			continue;
1207177633Sdfr		}
1208267742Smav		mtx_unlock(&grp->sg_lock);
1209177633Sdfr
1210267740Smav		/*
1211267740Smav		 * Drain the transport socket and queue up any RPCs.
1212267740Smav		 */
1213267740Smav		xprt->xp_lastactive = time_uptime;
1214267740Smav		do {
1215267740Smav			if (!svc_request_space_available(pool))
1216267740Smav				break;
1217267740Smav			rqstp = NULL;
1218267740Smav			stat = svc_getreq(xprt, &rqstp);
1219267740Smav			if (rqstp) {
1220267740Smav				svc_change_space_used(pool, rqstp->rq_size);
1221267740Smav				/*
1222267740Smav				 * See if the application has a preference
1223267740Smav				 * for some other thread.
1224267740Smav				 */
1225267740Smav				if (pool->sp_assign) {
1226267740Smav					stpref = pool->sp_assign(st, rqstp);
1227184588Sdfr					rqstp->rq_thread = stpref;
1228184588Sdfr					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1229184588Sdfr					    rqstp, rq_link);
1230267740Smav					mtx_unlock(&stpref->st_lock);
1231267740Smav					if (stpref != st)
1232267740Smav						rqstp = NULL;
1233267740Smav				} else {
1234267740Smav					rqstp->rq_thread = st;
1235267740Smav					STAILQ_INSERT_TAIL(&st->st_reqs,
1236267740Smav					    rqstp, rq_link);
1237267740Smav				}
1238267740Smav			}
1239267740Smav		} while (rqstp == NULL && stat == XPRT_MOREREQS
1240267742Smav		    && grp->sg_state != SVCPOOL_CLOSING);
1241184588Sdfr
1242267740Smav		/*
1243267740Smav		 * Move this transport to the end of the active list to
1244267740Smav		 * ensure fairness when multiple transports are active.
1245267740Smav		 * If this was the last queued request, svc_getreq will end
1246267740Smav		 * up calling xprt_inactive to remove from the active list.
1247267740Smav		 */
1248267742Smav		mtx_lock(&grp->sg_lock);
1249267740Smav		xprt->xp_thread = NULL;
1250267740Smav		st->st_xprt = NULL;
1251267740Smav		if (xprt->xp_active) {
1252267740Smav			if (!svc_request_space_available(pool) ||
1253267740Smav			    !xprt_assignthread(xprt))
1254267742Smav				TAILQ_INSERT_TAIL(&grp->sg_active,
1255267740Smav				    xprt, xp_alink);
1256184588Sdfr		}
1257267742Smav		mtx_unlock(&grp->sg_lock);
1258267740Smav		SVC_RELEASE(xprt);
1259184588Sdfr
1260177633Sdfr		/*
1261184588Sdfr		 * Execute what we have queued.
1262177633Sdfr		 */
1263267740Smav		mtx_lock(&st->st_lock);
1264267740Smav		while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1265267740Smav			STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1266267740Smav			mtx_unlock(&st->st_lock);
1267290203Swollman			sz = (long)rqstp->rq_size;
1268184588Sdfr			svc_executereq(rqstp);
1269290203Swollman			svc_change_space_used(pool, -sz);
1270267740Smav			mtx_lock(&st->st_lock);
1271184588Sdfr		}
1272267740Smav		mtx_unlock(&st->st_lock);
1273267742Smav		mtx_lock(&grp->sg_lock);
1274184588Sdfr	}
1275177633Sdfr
1276184588Sdfr	if (st->st_xprt) {
1277184588Sdfr		xprt = st->st_xprt;
1278184588Sdfr		st->st_xprt = NULL;
1279184588Sdfr		SVC_RELEASE(xprt);
1280177633Sdfr	}
1281184588Sdfr	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1282267740Smav	mtx_destroy(&st->st_lock);
1283184588Sdfr	cv_destroy(&st->st_cond);
1284184588Sdfr	mem_free(st, sizeof(*st));
1285184588Sdfr
1286267742Smav	grp->sg_threadcount--;
1287184588Sdfr	if (!ismaster)
1288267742Smav		wakeup(grp);
1289267742Smav	mtx_unlock(&grp->sg_lock);
1290177633Sdfr}
1291177633Sdfr
1292184588Sdfrstatic void
1293184588Sdfrsvc_thread_start(void *arg)
1294184588Sdfr{
1295184588Sdfr
1296267742Smav	svc_run_internal((SVCGROUP *) arg, FALSE);
1297184588Sdfr	kthread_exit();
1298184588Sdfr}
1299184588Sdfr
1300184588Sdfrstatic void
1301267742Smavsvc_new_thread(SVCGROUP *grp)
1302184588Sdfr{
1303267742Smav	SVCPOOL *pool = grp->sg_pool;
1304184588Sdfr	struct thread *td;
1305184588Sdfr
1306291384Smav	mtx_lock(&grp->sg_lock);
1307267742Smav	grp->sg_threadcount++;
1308291384Smav	mtx_unlock(&grp->sg_lock);
1309267742Smav	kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1310184588Sdfr	    "%s: service", pool->sp_name);
1311184588Sdfr}
1312184588Sdfr
1313177633Sdfrvoid
1314184588Sdfrsvc_run(SVCPOOL *pool)
1315184588Sdfr{
1316267742Smav	int g, i;
1317184588Sdfr	struct proc *p;
1318184588Sdfr	struct thread *td;
1319267742Smav	SVCGROUP *grp;
1320184588Sdfr
1321184588Sdfr	p = curproc;
1322184588Sdfr	td = curthread;
1323184588Sdfr	snprintf(td->td_name, sizeof(td->td_name),
1324184588Sdfr	    "%s: master", pool->sp_name);
1325184588Sdfr	pool->sp_state = SVCPOOL_ACTIVE;
1326184588Sdfr	pool->sp_proc = p;
1327184588Sdfr
1328267742Smav	/* Choose group count based on number of threads and CPUs. */
1329267742Smav	pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1330267742Smav	    min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1331267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1332267742Smav		grp = &pool->sp_groups[g];
1333267742Smav		grp->sg_minthreads = max(1,
1334267742Smav		    pool->sp_minthreads / pool->sp_groupcount);
1335267742Smav		grp->sg_maxthreads = max(1,
1336267742Smav		    pool->sp_maxthreads / pool->sp_groupcount);
1337267742Smav		grp->sg_lastcreatetime = time_uptime;
1338184588Sdfr	}
1339184588Sdfr
1340267742Smav	/* Starting threads */
1341291384Smav	pool->sp_groups[0].sg_threadcount++;
1342267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1343267742Smav		grp = &pool->sp_groups[g];
1344267742Smav		for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1345267742Smav			svc_new_thread(grp);
1346267742Smav	}
1347267742Smav	svc_run_internal(&pool->sp_groups[0], TRUE);
1348184588Sdfr
1349267742Smav	/* Waiting for threads to stop. */
1350267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1351267742Smav		grp = &pool->sp_groups[g];
1352267742Smav		mtx_lock(&grp->sg_lock);
1353267742Smav		while (grp->sg_threadcount > 0)
1354267742Smav			msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1355267742Smav		mtx_unlock(&grp->sg_lock);
1356267742Smav	}
1357184588Sdfr}
1358184588Sdfr
1359184588Sdfrvoid
1360177633Sdfrsvc_exit(SVCPOOL *pool)
1361177633Sdfr{
1362267742Smav	SVCGROUP *grp;
1363184588Sdfr	SVCTHREAD *st;
1364267742Smav	int g;
1365184588Sdfr
1366267742Smav	pool->sp_state = SVCPOOL_CLOSING;
1367267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1368267742Smav		grp = &pool->sp_groups[g];
1369267742Smav		mtx_lock(&grp->sg_lock);
1370267742Smav		if (grp->sg_state != SVCPOOL_CLOSING) {
1371267742Smav			grp->sg_state = SVCPOOL_CLOSING;
1372267742Smav			LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1373267742Smav				cv_signal(&st->st_cond);
1374267742Smav		}
1375267742Smav		mtx_unlock(&grp->sg_lock);
1376261045Smav	}
1377177633Sdfr}
1378184588Sdfr
1379184588Sdfrbool_t
1380184588Sdfrsvc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1381184588Sdfr{
1382184588Sdfr	struct mbuf *m;
1383184588Sdfr	XDR xdrs;
1384184588Sdfr	bool_t stat;
1385184588Sdfr
1386184588Sdfr	m = rqstp->rq_args;
1387184588Sdfr	rqstp->rq_args = NULL;
1388184588Sdfr
1389184588Sdfr	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1390184588Sdfr	stat = xargs(&xdrs, args);
1391184588Sdfr	XDR_DESTROY(&xdrs);
1392184588Sdfr
1393184588Sdfr	return (stat);
1394184588Sdfr}
1395184588Sdfr
1396184588Sdfrbool_t
1397184588Sdfrsvc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1398184588Sdfr{
1399184588Sdfr	XDR xdrs;
1400184588Sdfr
1401184588Sdfr	if (rqstp->rq_addr) {
1402184588Sdfr		free(rqstp->rq_addr, M_SONAME);
1403184588Sdfr		rqstp->rq_addr = NULL;
1404184588Sdfr	}
1405184588Sdfr
1406184588Sdfr	xdrs.x_op = XDR_FREE;
1407184588Sdfr	return (xargs(&xdrs, args));
1408184588Sdfr}
1409184588Sdfr
1410184588Sdfrvoid
1411184588Sdfrsvc_freereq(struct svc_req *rqstp)
1412184588Sdfr{
1413184588Sdfr	SVCTHREAD *st;
1414184588Sdfr	SVCPOOL *pool;
1415184588Sdfr
1416184588Sdfr	st = rqstp->rq_thread;
1417184588Sdfr	if (st) {
1418261054Smav		pool = st->st_pool;
1419184588Sdfr		if (pool->sp_done)
1420184588Sdfr			pool->sp_done(st, rqstp);
1421184588Sdfr	}
1422184588Sdfr
1423184588Sdfr	if (rqstp->rq_auth.svc_ah_ops)
1424184588Sdfr		SVCAUTH_RELEASE(&rqstp->rq_auth);
1425184588Sdfr
1426184588Sdfr	if (rqstp->rq_xprt) {
1427184588Sdfr		SVC_RELEASE(rqstp->rq_xprt);
1428184588Sdfr	}
1429184588Sdfr
1430184588Sdfr	if (rqstp->rq_addr)
1431184588Sdfr		free(rqstp->rq_addr, M_SONAME);
1432184588Sdfr
1433184588Sdfr	if (rqstp->rq_args)
1434184588Sdfr		m_freem(rqstp->rq_args);
1435184588Sdfr
1436184588Sdfr	free(rqstp, M_RPC);
1437184588Sdfr}
1438