1177633Sdfr/*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
2177633Sdfr
3261046Smav/*-
4261046Smav * Copyright (c) 2009, Sun Microsystems, Inc.
5261046Smav * All rights reserved.
6177633Sdfr *
7261046Smav * Redistribution and use in source and binary forms, with or without
8261046Smav * modification, are permitted provided that the following conditions are met:
9261046Smav * - Redistributions of source code must retain the above copyright notice,
10261046Smav *   this list of conditions and the following disclaimer.
11261046Smav * - Redistributions in binary form must reproduce the above copyright notice,
12261046Smav *   this list of conditions and the following disclaimer in the documentation
13261046Smav *   and/or other materials provided with the distribution.
14261046Smav * - Neither the name of Sun Microsystems, Inc. nor the names of its
15261046Smav *   contributors may be used to endorse or promote products derived
16261046Smav *   from this software without specific prior written permission.
17261046Smav *
18261046Smav * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19261046Smav * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20261046Smav * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21261046Smav * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22261046Smav * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23261046Smav * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24261046Smav * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25261046Smav * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26261046Smav * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27261046Smav * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28261046Smav * POSSIBILITY OF SUCH DAMAGE.
29177633Sdfr */
30177633Sdfr
31177633Sdfr#if defined(LIBC_SCCS) && !defined(lint)
32177633Sdfrstatic char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33177633Sdfrstatic char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
34177633Sdfr#endif
35177633Sdfr#include <sys/cdefs.h>
36177633Sdfr__FBSDID("$FreeBSD: stable/10/sys/rpc/svc.c 336928 2018-07-30 19:29:31Z rmacklem $");
37177633Sdfr
38177633Sdfr/*
39177633Sdfr * svc.c, Server-side remote procedure call interface.
40177633Sdfr *
41177633Sdfr * There are two sets of procedures here.  The xprt routines are
42177633Sdfr * for handling transport handles.  The svc routines handle the
43177633Sdfr * list of service routines.
44177633Sdfr *
45177633Sdfr * Copyright (C) 1984, Sun Microsystems, Inc.
46177633Sdfr */
47177633Sdfr
48177633Sdfr#include <sys/param.h>
49177633Sdfr#include <sys/lock.h>
50177633Sdfr#include <sys/kernel.h>
51184588Sdfr#include <sys/kthread.h>
52177633Sdfr#include <sys/malloc.h>
53184588Sdfr#include <sys/mbuf.h>
54177633Sdfr#include <sys/mutex.h>
55184588Sdfr#include <sys/proc.h>
56177633Sdfr#include <sys/queue.h>
57184588Sdfr#include <sys/socketvar.h>
58177633Sdfr#include <sys/systm.h>
59267742Smav#include <sys/smp.h>
60261055Smav#include <sys/sx.h>
61177633Sdfr#include <sys/ucred.h>
62177633Sdfr
63177633Sdfr#include <rpc/rpc.h>
64177633Sdfr#include <rpc/rpcb_clnt.h>
65184588Sdfr#include <rpc/replay.h>
66177633Sdfr
67177685Sdfr#include <rpc/rpc_com.h>
68177633Sdfr
69177633Sdfr#define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
70184588Sdfr#define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
71177633Sdfr
72177633Sdfrstatic struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
73177633Sdfr    char *);
74267742Smavstatic void svc_new_thread(SVCGROUP *grp);
75184588Sdfrstatic void xprt_unregister_locked(SVCXPRT *xprt);
76290203Swollmanstatic void svc_change_space_used(SVCPOOL *pool, long delta);
77261054Smavstatic bool_t svc_request_space_available(SVCPOOL *pool);
78314034Savgstatic void svcpool_cleanup(SVCPOOL *pool);
79177633Sdfr
80177633Sdfr/* ***************  SVCXPRT related stuff **************** */
81177633Sdfr
82184588Sdfrstatic int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
83184588Sdfrstatic int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
84267742Smavstatic int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
85184588Sdfr
86177633SdfrSVCPOOL*
87184588Sdfrsvcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
88177633Sdfr{
89177633Sdfr	SVCPOOL *pool;
90267742Smav	SVCGROUP *grp;
91267742Smav	int g;
92177633Sdfr
93177633Sdfr	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
94177633Sdfr
95177633Sdfr	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
96184588Sdfr	pool->sp_name = name;
97184588Sdfr	pool->sp_state = SVCPOOL_INIT;
98184588Sdfr	pool->sp_proc = NULL;
99177633Sdfr	TAILQ_INIT(&pool->sp_callouts);
100261055Smav	TAILQ_INIT(&pool->sp_lcallouts);
101184588Sdfr	pool->sp_minthreads = 1;
102184588Sdfr	pool->sp_maxthreads = 1;
103267742Smav	pool->sp_groupcount = 1;
104267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
105267742Smav		grp = &pool->sp_groups[g];
106267742Smav		mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
107267742Smav		grp->sg_pool = pool;
108267742Smav		grp->sg_state = SVCPOOL_ACTIVE;
109267742Smav		TAILQ_INIT(&grp->sg_xlist);
110267742Smav		TAILQ_INIT(&grp->sg_active);
111267742Smav		LIST_INIT(&grp->sg_idlethreads);
112267742Smav		grp->sg_minthreads = 1;
113267742Smav		grp->sg_maxthreads = 1;
114267742Smav	}
115177633Sdfr
116184588Sdfr	/*
117290203Swollman	 * Don't use more than a quarter of mbuf clusters.  Nota bene:
118290203Swollman	 * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow
119290203Swollman	 * on LP64 architectures, so cast to u_long to avoid undefined
120290203Swollman	 * behavior.  (ILP32 architectures cannot have nmbclusters
121290203Swollman	 * large enough to overflow for other reasons.)
122184588Sdfr	 */
123290203Swollman	pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4;
124290203Swollman	pool->sp_space_low = (pool->sp_space_high / 3) * 2;
125184588Sdfr
126184588Sdfr	sysctl_ctx_init(&pool->sp_sysctl);
127184588Sdfr	if (sysctl_base) {
128184588Sdfr		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
129184588Sdfr		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
130267742Smav		    pool, 0, svcpool_minthread_sysctl, "I",
131267742Smav		    "Minimal number of threads");
132184588Sdfr		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
133184588Sdfr		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
134267742Smav		    pool, 0, svcpool_maxthread_sysctl, "I",
135267742Smav		    "Maximal number of threads");
136267742Smav		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
137267742Smav		    "threads", CTLTYPE_INT | CTLFLAG_RD,
138267742Smav		    pool, 0, svcpool_threads_sysctl, "I",
139267742Smav		    "Current number of threads");
140184588Sdfr		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
141267742Smav		    "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
142267742Smav		    "Number of thread groups");
143184588Sdfr
144290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
145184588Sdfr		    "request_space_used", CTLFLAG_RD,
146290203Swollman		    &pool->sp_space_used,
147184588Sdfr		    "Space in parsed but not handled requests.");
148184588Sdfr
149290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
150184588Sdfr		    "request_space_used_highest", CTLFLAG_RD,
151290203Swollman		    &pool->sp_space_used_highest,
152184588Sdfr		    "Highest space used since reboot.");
153184588Sdfr
154290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
155184588Sdfr		    "request_space_high", CTLFLAG_RW,
156290203Swollman		    &pool->sp_space_high,
157184588Sdfr		    "Maximum space in parsed but not handled requests.");
158184588Sdfr
159290203Swollman		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
160184588Sdfr		    "request_space_low", CTLFLAG_RW,
161290203Swollman		    &pool->sp_space_low,
162184588Sdfr		    "Low water mark for request space.");
163184588Sdfr
164217326Smdf		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
165184588Sdfr		    "request_space_throttled", CTLFLAG_RD,
166184588Sdfr		    &pool->sp_space_throttled, 0,
167184588Sdfr		    "Whether nfs requests are currently throttled");
168184588Sdfr
169217326Smdf		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
170184588Sdfr		    "request_space_throttle_count", CTLFLAG_RD,
171184588Sdfr		    &pool->sp_space_throttle_count, 0,
172184588Sdfr		    "Count of times throttling based on request space has occurred");
173184588Sdfr	}
174184588Sdfr
175177633Sdfr	return pool;
176177633Sdfr}
177177633Sdfr
178314034Savg/*
179314034Savg * Code common to svcpool_destroy() and svcpool_close(), which cleans up
180314034Savg * the pool data structures.
181314034Savg */
182314034Savgstatic void
183314034Savgsvcpool_cleanup(SVCPOOL *pool)
184177633Sdfr{
185267742Smav	SVCGROUP *grp;
186184588Sdfr	SVCXPRT *xprt, *nxprt;
187177633Sdfr	struct svc_callout *s;
188261055Smav	struct svc_loss_callout *sl;
189184588Sdfr	struct svcxprt_list cleanup;
190267742Smav	int g;
191177633Sdfr
192184588Sdfr	TAILQ_INIT(&cleanup);
193177633Sdfr
194267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
195267742Smav		grp = &pool->sp_groups[g];
196267742Smav		mtx_lock(&grp->sg_lock);
197267742Smav		while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
198267742Smav			xprt_unregister_locked(xprt);
199267742Smav			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
200267742Smav		}
201267742Smav		mtx_unlock(&grp->sg_lock);
202177633Sdfr	}
203267742Smav	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
204267742Smav		SVC_RELEASE(xprt);
205267742Smav	}
206177633Sdfr
207267742Smav	mtx_lock(&pool->sp_lock);
208261055Smav	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
209177633Sdfr		mtx_unlock(&pool->sp_lock);
210177633Sdfr		svc_unreg(pool, s->sc_prog, s->sc_vers);
211177633Sdfr		mtx_lock(&pool->sp_lock);
212177633Sdfr	}
213261055Smav	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
214261055Smav		mtx_unlock(&pool->sp_lock);
215261055Smav		svc_loss_unreg(pool, sl->slc_dispatch);
216261055Smav		mtx_lock(&pool->sp_lock);
217261055Smav	}
218193603Srmacklem	mtx_unlock(&pool->sp_lock);
219314034Savg}
220177633Sdfr
221314034Savgvoid
222314034Savgsvcpool_destroy(SVCPOOL *pool)
223314034Savg{
224314034Savg	SVCGROUP *grp;
225314034Savg	int g;
226314034Savg
227314034Savg	svcpool_cleanup(pool);
228314034Savg
229267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
230267742Smav		grp = &pool->sp_groups[g];
231267742Smav		mtx_destroy(&grp->sg_lock);
232184588Sdfr	}
233193436Srmacklem	mtx_destroy(&pool->sp_lock);
234193436Srmacklem
235184588Sdfr	if (pool->sp_rcache)
236184588Sdfr		replay_freecache(pool->sp_rcache);
237184588Sdfr
238184588Sdfr	sysctl_ctx_free(&pool->sp_sysctl);
239177633Sdfr	free(pool, M_RPC);
240177633Sdfr}
241177633Sdfr
242267742Smav/*
243314034Savg * Similar to svcpool_destroy(), except that it does not destroy the actual
244314034Savg * data structures.  As such, "pool" may be used again.
245314034Savg */
246314034Savgvoid
247314034Savgsvcpool_close(SVCPOOL *pool)
248314034Savg{
249314034Savg	SVCGROUP *grp;
250314034Savg	int g;
251314034Savg
252314034Savg	svcpool_cleanup(pool);
253314034Savg
254314034Savg	/* Now, initialize the pool's state for a fresh svc_run() call. */
255314034Savg	mtx_lock(&pool->sp_lock);
256314034Savg	pool->sp_state = SVCPOOL_INIT;
257314034Savg	mtx_unlock(&pool->sp_lock);
258314034Savg	for (g = 0; g < SVC_MAXGROUPS; g++) {
259314034Savg		grp = &pool->sp_groups[g];
260314034Savg		mtx_lock(&grp->sg_lock);
261314034Savg		grp->sg_state = SVCPOOL_ACTIVE;
262314034Savg		mtx_unlock(&grp->sg_lock);
263314034Savg	}
264314034Savg}
265314034Savg
266314034Savg/*
267267742Smav * Sysctl handler to get the present thread count on a pool
268267742Smav */
269267742Smavstatic int
270267742Smavsvcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
271184588Sdfr{
272267742Smav	SVCPOOL *pool;
273267742Smav	int threads, error, g;
274184588Sdfr
275267742Smav	pool = oidp->oid_arg1;
276267742Smav	threads = 0;
277267742Smav	mtx_lock(&pool->sp_lock);
278267742Smav	for (g = 0; g < pool->sp_groupcount; g++)
279267742Smav		threads += pool->sp_groups[g].sg_threadcount;
280267742Smav	mtx_unlock(&pool->sp_lock);
281267742Smav	error = sysctl_handle_int(oidp, &threads, 0, req);
282267742Smav	return (error);
283184588Sdfr}
284184588Sdfr
285177633Sdfr/*
286184588Sdfr * Sysctl handler to set the minimum thread count on a pool
287184588Sdfr */
288184588Sdfrstatic int
289184588Sdfrsvcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
290184588Sdfr{
291184588Sdfr	SVCPOOL *pool;
292267742Smav	int newminthreads, error, g;
293184588Sdfr
294184588Sdfr	pool = oidp->oid_arg1;
295184588Sdfr	newminthreads = pool->sp_minthreads;
296184588Sdfr	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
297184588Sdfr	if (error == 0 && newminthreads != pool->sp_minthreads) {
298184588Sdfr		if (newminthreads > pool->sp_maxthreads)
299184588Sdfr			return (EINVAL);
300184588Sdfr		mtx_lock(&pool->sp_lock);
301267742Smav		pool->sp_minthreads = newminthreads;
302267742Smav		for (g = 0; g < pool->sp_groupcount; g++) {
303267742Smav			pool->sp_groups[g].sg_minthreads = max(1,
304267742Smav			    pool->sp_minthreads / pool->sp_groupcount);
305184588Sdfr		}
306184588Sdfr		mtx_unlock(&pool->sp_lock);
307184588Sdfr	}
308184588Sdfr	return (error);
309184588Sdfr}
310184588Sdfr
311184588Sdfr/*
312184588Sdfr * Sysctl handler to set the maximum thread count on a pool
313184588Sdfr */
314184588Sdfrstatic int
315184588Sdfrsvcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
316184588Sdfr{
317184588Sdfr	SVCPOOL *pool;
318267742Smav	int newmaxthreads, error, g;
319184588Sdfr
320184588Sdfr	pool = oidp->oid_arg1;
321184588Sdfr	newmaxthreads = pool->sp_maxthreads;
322184588Sdfr	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
323184588Sdfr	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
324184588Sdfr		if (newmaxthreads < pool->sp_minthreads)
325184588Sdfr			return (EINVAL);
326184588Sdfr		mtx_lock(&pool->sp_lock);
327267742Smav		pool->sp_maxthreads = newmaxthreads;
328267742Smav		for (g = 0; g < pool->sp_groupcount; g++) {
329267742Smav			pool->sp_groups[g].sg_maxthreads = max(1,
330267742Smav			    pool->sp_maxthreads / pool->sp_groupcount);
331184588Sdfr		}
332184588Sdfr		mtx_unlock(&pool->sp_lock);
333184588Sdfr	}
334184588Sdfr	return (error);
335184588Sdfr}
336184588Sdfr
337184588Sdfr/*
338177633Sdfr * Activate a transport handle.
339177633Sdfr */
340177633Sdfrvoid
341177633Sdfrxprt_register(SVCXPRT *xprt)
342177633Sdfr{
343177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
344267742Smav	SVCGROUP *grp;
345267742Smav	int g;
346177633Sdfr
347194407Srmacklem	SVC_ACQUIRE(xprt);
348267742Smav	g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
349267742Smav	xprt->xp_group = grp = &pool->sp_groups[g];
350267742Smav	mtx_lock(&grp->sg_lock);
351177633Sdfr	xprt->xp_registered = TRUE;
352177633Sdfr	xprt->xp_active = FALSE;
353267742Smav	TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
354267742Smav	mtx_unlock(&grp->sg_lock);
355177633Sdfr}
356177633Sdfr
357177633Sdfr/*
358184588Sdfr * De-activate a transport handle. Note: the locked version doesn't
359184588Sdfr * release the transport - caller must do that after dropping the pool
360184588Sdfr * lock.
361177633Sdfr */
362177633Sdfrstatic void
363184588Sdfrxprt_unregister_locked(SVCXPRT *xprt)
364177633Sdfr{
365267742Smav	SVCGROUP *grp = xprt->xp_group;
366177633Sdfr
367267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
368193649Srmacklem	KASSERT(xprt->xp_registered == TRUE,
369193649Srmacklem	    ("xprt_unregister_locked: not registered"));
370261048Smav	xprt_inactive_locked(xprt);
371267742Smav	TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
372177633Sdfr	xprt->xp_registered = FALSE;
373184588Sdfr}
374177633Sdfr
375184588Sdfrvoid
376184588Sdfrxprt_unregister(SVCXPRT *xprt)
377184588Sdfr{
378267742Smav	SVCGROUP *grp = xprt->xp_group;
379184588Sdfr
380267742Smav	mtx_lock(&grp->sg_lock);
381193649Srmacklem	if (xprt->xp_registered == FALSE) {
382193649Srmacklem		/* Already unregistered by another thread */
383267742Smav		mtx_unlock(&grp->sg_lock);
384193649Srmacklem		return;
385193649Srmacklem	}
386184588Sdfr	xprt_unregister_locked(xprt);
387267742Smav	mtx_unlock(&grp->sg_lock);
388184588Sdfr
389184588Sdfr	SVC_RELEASE(xprt);
390177633Sdfr}
391177633Sdfr
392261048Smav/*
393261048Smav * Attempt to assign a service thread to this transport.
394261048Smav */
395261048Smavstatic int
396184588Sdfrxprt_assignthread(SVCXPRT *xprt)
397184588Sdfr{
398267742Smav	SVCGROUP *grp = xprt->xp_group;
399184588Sdfr	SVCTHREAD *st;
400184588Sdfr
401267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
402267742Smav	st = LIST_FIRST(&grp->sg_idlethreads);
403184588Sdfr	if (st) {
404261048Smav		LIST_REMOVE(st, st_ilink);
405184588Sdfr		SVC_ACQUIRE(xprt);
406184588Sdfr		xprt->xp_thread = st;
407184588Sdfr		st->st_xprt = xprt;
408184588Sdfr		cv_signal(&st->st_cond);
409261048Smav		return (TRUE);
410184588Sdfr	} else {
411184588Sdfr		/*
412184588Sdfr		 * See if we can create a new thread. The
413184588Sdfr		 * actual thread creation happens in
414184588Sdfr		 * svc_run_internal because our locking state
415184588Sdfr		 * is poorly defined (we are typically called
416184588Sdfr		 * from a socket upcall). Don't create more
417184588Sdfr		 * than one thread per second.
418184588Sdfr		 */
419267742Smav		if (grp->sg_state == SVCPOOL_ACTIVE
420267742Smav		    && grp->sg_lastcreatetime < time_uptime
421267742Smav		    && grp->sg_threadcount < grp->sg_maxthreads) {
422267742Smav			grp->sg_state = SVCPOOL_THREADWANTED;
423184588Sdfr		}
424184588Sdfr	}
425261048Smav	return (FALSE);
426184588Sdfr}
427184588Sdfr
428177633Sdfrvoid
429177633Sdfrxprt_active(SVCXPRT *xprt)
430177633Sdfr{
431267742Smav	SVCGROUP *grp = xprt->xp_group;
432177633Sdfr
433267742Smav	mtx_lock(&grp->sg_lock);
434193436Srmacklem
435184588Sdfr	if (!xprt->xp_registered) {
436184588Sdfr		/*
437184588Sdfr		 * Race with xprt_unregister - we lose.
438184588Sdfr		 */
439267742Smav		mtx_unlock(&grp->sg_lock);
440184588Sdfr		return;
441184588Sdfr	}
442184588Sdfr
443177633Sdfr	if (!xprt->xp_active) {
444177633Sdfr		xprt->xp_active = TRUE;
445261048Smav		if (xprt->xp_thread == NULL) {
446267742Smav			if (!svc_request_space_available(xprt->xp_pool) ||
447261054Smav			    !xprt_assignthread(xprt))
448267742Smav				TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
449261048Smav				    xp_alink);
450261048Smav		}
451177633Sdfr	}
452177633Sdfr
453267742Smav	mtx_unlock(&grp->sg_lock);
454177633Sdfr}
455177633Sdfr
456177633Sdfrvoid
457184588Sdfrxprt_inactive_locked(SVCXPRT *xprt)
458177633Sdfr{
459267742Smav	SVCGROUP *grp = xprt->xp_group;
460177633Sdfr
461267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
462177633Sdfr	if (xprt->xp_active) {
463261048Smav		if (xprt->xp_thread == NULL)
464267742Smav			TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
465177633Sdfr		xprt->xp_active = FALSE;
466177633Sdfr	}
467184588Sdfr}
468177633Sdfr
469184588Sdfrvoid
470184588Sdfrxprt_inactive(SVCXPRT *xprt)
471184588Sdfr{
472267742Smav	SVCGROUP *grp = xprt->xp_group;
473184588Sdfr
474267742Smav	mtx_lock(&grp->sg_lock);
475184588Sdfr	xprt_inactive_locked(xprt);
476267742Smav	mtx_unlock(&grp->sg_lock);
477177633Sdfr}
478177633Sdfr
479177633Sdfr/*
480261053Smav * Variant of xprt_inactive() for use only when sure that port is
481261053Smav * assigned to thread. For example, withing receive handlers.
482261053Smav */
483261053Smavvoid
484261053Smavxprt_inactive_self(SVCXPRT *xprt)
485261053Smav{
486261053Smav
487261053Smav	KASSERT(xprt->xp_thread != NULL,
488261053Smav	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
489261053Smav	xprt->xp_active = FALSE;
490261053Smav}
491261053Smav
492261053Smav/*
493177633Sdfr * Add a service program to the callout list.
494177633Sdfr * The dispatch routine will be called when a rpc request for this
495177633Sdfr * program number comes in.
496177633Sdfr */
497177633Sdfrbool_t
498177633Sdfrsvc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
499177633Sdfr    void (*dispatch)(struct svc_req *, SVCXPRT *),
500177633Sdfr    const struct netconfig *nconf)
501177633Sdfr{
502177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
503177633Sdfr	struct svc_callout *s;
504177633Sdfr	char *netid = NULL;
505177633Sdfr	int flag = 0;
506177633Sdfr
507177633Sdfr/* VARIABLES PROTECTED BY svc_lock: s, svc_head */
508177633Sdfr
509177633Sdfr	if (xprt->xp_netid) {
510177633Sdfr		netid = strdup(xprt->xp_netid, M_RPC);
511177633Sdfr		flag = 1;
512177633Sdfr	} else if (nconf && nconf->nc_netid) {
513177633Sdfr		netid = strdup(nconf->nc_netid, M_RPC);
514177633Sdfr		flag = 1;
515177633Sdfr	} /* must have been created with svc_raw_create */
516177633Sdfr	if ((netid == NULL) && (flag == 1)) {
517177633Sdfr		return (FALSE);
518177633Sdfr	}
519177633Sdfr
520177633Sdfr	mtx_lock(&pool->sp_lock);
521177633Sdfr	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
522177633Sdfr		if (netid)
523177633Sdfr			free(netid, M_RPC);
524177633Sdfr		if (s->sc_dispatch == dispatch)
525177633Sdfr			goto rpcb_it; /* he is registering another xptr */
526177633Sdfr		mtx_unlock(&pool->sp_lock);
527177633Sdfr		return (FALSE);
528177633Sdfr	}
529177633Sdfr	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
530177633Sdfr	if (s == NULL) {
531177633Sdfr		if (netid)
532177633Sdfr			free(netid, M_RPC);
533177633Sdfr		mtx_unlock(&pool->sp_lock);
534177633Sdfr		return (FALSE);
535177633Sdfr	}
536177633Sdfr
537177633Sdfr	s->sc_prog = prog;
538177633Sdfr	s->sc_vers = vers;
539177633Sdfr	s->sc_dispatch = dispatch;
540177633Sdfr	s->sc_netid = netid;
541177633Sdfr	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
542177633Sdfr
543177633Sdfr	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
544177633Sdfr		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
545177633Sdfr
546177633Sdfrrpcb_it:
547177633Sdfr	mtx_unlock(&pool->sp_lock);
548177633Sdfr	/* now register the information with the local binder service */
549177633Sdfr	if (nconf) {
550177633Sdfr		bool_t dummy;
551177633Sdfr		struct netconfig tnc;
552184588Sdfr		struct netbuf nb;
553177633Sdfr		tnc = *nconf;
554184588Sdfr		nb.buf = &xprt->xp_ltaddr;
555184588Sdfr		nb.len = xprt->xp_ltaddr.ss_len;
556184588Sdfr		dummy = rpcb_set(prog, vers, &tnc, &nb);
557177633Sdfr		return (dummy);
558177633Sdfr	}
559177633Sdfr	return (TRUE);
560177633Sdfr}
561177633Sdfr
562177633Sdfr/*
563177633Sdfr * Remove a service program from the callout list.
564177633Sdfr */
565177633Sdfrvoid
566177633Sdfrsvc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
567177633Sdfr{
568177633Sdfr	struct svc_callout *s;
569177633Sdfr
570177633Sdfr	/* unregister the information anyway */
571177633Sdfr	(void) rpcb_unset(prog, vers, NULL);
572177633Sdfr	mtx_lock(&pool->sp_lock);
573177633Sdfr	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
574177633Sdfr		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
575177633Sdfr		if (s->sc_netid)
576177633Sdfr			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
577177633Sdfr		mem_free(s, sizeof (struct svc_callout));
578177633Sdfr	}
579177633Sdfr	mtx_unlock(&pool->sp_lock);
580177633Sdfr}
581177633Sdfr
582261055Smav/*
583261055Smav * Add a service connection loss program to the callout list.
584261055Smav * The dispatch routine will be called when some port in ths pool die.
585261055Smav */
586261055Smavbool_t
587261055Smavsvc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
588261055Smav{
589261055Smav	SVCPOOL *pool = xprt->xp_pool;
590261055Smav	struct svc_loss_callout *s;
591261055Smav
592261055Smav	mtx_lock(&pool->sp_lock);
593261055Smav	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
594261055Smav		if (s->slc_dispatch == dispatch)
595261055Smav			break;
596261055Smav	}
597261055Smav	if (s != NULL) {
598261055Smav		mtx_unlock(&pool->sp_lock);
599261055Smav		return (TRUE);
600261055Smav	}
601297342Smav	s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT);
602261055Smav	if (s == NULL) {
603261055Smav		mtx_unlock(&pool->sp_lock);
604261055Smav		return (FALSE);
605261055Smav	}
606261055Smav	s->slc_dispatch = dispatch;
607261055Smav	TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
608261055Smav	mtx_unlock(&pool->sp_lock);
609261055Smav	return (TRUE);
610261055Smav}
611261055Smav
612261055Smav/*
613261055Smav * Remove a service connection loss program from the callout list.
614261055Smav */
615261055Smavvoid
616261055Smavsvc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
617261055Smav{
618261055Smav	struct svc_loss_callout *s;
619261055Smav
620261055Smav	mtx_lock(&pool->sp_lock);
621261055Smav	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
622261055Smav		if (s->slc_dispatch == dispatch) {
623261055Smav			TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
624261055Smav			free(s, M_RPC);
625261055Smav			break;
626261055Smav		}
627261055Smav	}
628261055Smav	mtx_unlock(&pool->sp_lock);
629261055Smav}
630261055Smav
631177633Sdfr/* ********************** CALLOUT list related stuff ************* */
632177633Sdfr
633177633Sdfr/*
634177633Sdfr * Search the callout list for a program number, return the callout
635177633Sdfr * struct.
636177633Sdfr */
637177633Sdfrstatic struct svc_callout *
638177633Sdfrsvc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
639177633Sdfr{
640177633Sdfr	struct svc_callout *s;
641177633Sdfr
642177633Sdfr	mtx_assert(&pool->sp_lock, MA_OWNED);
643177633Sdfr	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
644177633Sdfr		if (s->sc_prog == prog && s->sc_vers == vers
645177633Sdfr		    && (netid == NULL || s->sc_netid == NULL ||
646177633Sdfr			strcmp(netid, s->sc_netid) == 0))
647177633Sdfr			break;
648177633Sdfr	}
649177633Sdfr
650177633Sdfr	return (s);
651177633Sdfr}
652177633Sdfr
653177633Sdfr/* ******************* REPLY GENERATION ROUTINES  ************ */
654177633Sdfr
655184588Sdfrstatic bool_t
656184588Sdfrsvc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
657184588Sdfr    struct mbuf *body)
658184588Sdfr{
659184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
660184588Sdfr	bool_t ok;
661184588Sdfr
662184588Sdfr	if (rqstp->rq_args) {
663184588Sdfr		m_freem(rqstp->rq_args);
664184588Sdfr		rqstp->rq_args = NULL;
665184588Sdfr	}
666184588Sdfr
667184588Sdfr	if (xprt->xp_pool->sp_rcache)
668184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
669184588Sdfr		    rply, svc_getrpccaller(rqstp), body);
670184588Sdfr
671184588Sdfr	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
672184588Sdfr		return (FALSE);
673184588Sdfr
674261055Smav	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
675184588Sdfr	if (rqstp->rq_addr) {
676184588Sdfr		free(rqstp->rq_addr, M_SONAME);
677184588Sdfr		rqstp->rq_addr = NULL;
678184588Sdfr	}
679184588Sdfr
680184588Sdfr	return (ok);
681184588Sdfr}
682184588Sdfr
683177633Sdfr/*
684177633Sdfr * Send a reply to an rpc request
685177633Sdfr */
686177633Sdfrbool_t
687184588Sdfrsvc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
688177633Sdfr{
689177633Sdfr	struct rpc_msg rply;
690184588Sdfr	struct mbuf *m;
691184588Sdfr	XDR xdrs;
692184588Sdfr	bool_t ok;
693177633Sdfr
694184588Sdfr	rply.rm_xid = rqstp->rq_xid;
695177633Sdfr	rply.rm_direction = REPLY;
696177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
697184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
698177633Sdfr	rply.acpted_rply.ar_stat = SUCCESS;
699184588Sdfr	rply.acpted_rply.ar_results.where = NULL;
700184588Sdfr	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
701177633Sdfr
702248195Sglebius	m = m_getcl(M_WAITOK, MT_DATA, 0);
703184588Sdfr	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
704184588Sdfr	ok = xdr_results(&xdrs, xdr_location);
705184588Sdfr	XDR_DESTROY(&xdrs);
706184588Sdfr
707184588Sdfr	if (ok) {
708184588Sdfr		return (svc_sendreply_common(rqstp, &rply, m));
709184588Sdfr	} else {
710184588Sdfr		m_freem(m);
711184588Sdfr		return (FALSE);
712184588Sdfr	}
713177633Sdfr}
714177633Sdfr
715184588Sdfrbool_t
716184588Sdfrsvc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
717184588Sdfr{
718184588Sdfr	struct rpc_msg rply;
719184588Sdfr
720184588Sdfr	rply.rm_xid = rqstp->rq_xid;
721184588Sdfr	rply.rm_direction = REPLY;
722184588Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
723184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
724184588Sdfr	rply.acpted_rply.ar_stat = SUCCESS;
725184588Sdfr	rply.acpted_rply.ar_results.where = NULL;
726184588Sdfr	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
727184588Sdfr
728184588Sdfr	return (svc_sendreply_common(rqstp, &rply, m));
729184588Sdfr}
730184588Sdfr
731177633Sdfr/*
732177633Sdfr * No procedure error reply
733177633Sdfr */
734177633Sdfrvoid
735184588Sdfrsvcerr_noproc(struct svc_req *rqstp)
736177633Sdfr{
737184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
738177633Sdfr	struct rpc_msg rply;
739177633Sdfr
740184588Sdfr	rply.rm_xid = rqstp->rq_xid;
741177633Sdfr	rply.rm_direction = REPLY;
742177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
743184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
744177633Sdfr	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
745177633Sdfr
746184588Sdfr	if (xprt->xp_pool->sp_rcache)
747184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
748184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
749184588Sdfr
750184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
751177633Sdfr}
752177633Sdfr
753177633Sdfr/*
754177633Sdfr * Can't decode args error reply
755177633Sdfr */
756177633Sdfrvoid
757184588Sdfrsvcerr_decode(struct svc_req *rqstp)
758177633Sdfr{
759184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
760177633Sdfr	struct rpc_msg rply;
761177633Sdfr
762184588Sdfr	rply.rm_xid = rqstp->rq_xid;
763177633Sdfr	rply.rm_direction = REPLY;
764177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
765184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
766177633Sdfr	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
767177633Sdfr
768184588Sdfr	if (xprt->xp_pool->sp_rcache)
769184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
770184588Sdfr		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
771184588Sdfr
772184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
773177633Sdfr}
774177633Sdfr
775177633Sdfr/*
776177633Sdfr * Some system error
777177633Sdfr */
778177633Sdfrvoid
779184588Sdfrsvcerr_systemerr(struct svc_req *rqstp)
780177633Sdfr{
781184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
782177633Sdfr	struct rpc_msg rply;
783177633Sdfr
784184588Sdfr	rply.rm_xid = rqstp->rq_xid;
785177633Sdfr	rply.rm_direction = REPLY;
786177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
787184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
788177633Sdfr	rply.acpted_rply.ar_stat = SYSTEM_ERR;
789177633Sdfr
790184588Sdfr	if (xprt->xp_pool->sp_rcache)
791184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
792184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
793184588Sdfr
794184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
795177633Sdfr}
796177633Sdfr
797177633Sdfr/*
798177633Sdfr * Authentication error reply
799177633Sdfr */
800177633Sdfrvoid
801184588Sdfrsvcerr_auth(struct svc_req *rqstp, enum auth_stat why)
802177633Sdfr{
803184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
804177633Sdfr	struct rpc_msg rply;
805177633Sdfr
806184588Sdfr	rply.rm_xid = rqstp->rq_xid;
807177633Sdfr	rply.rm_direction = REPLY;
808177633Sdfr	rply.rm_reply.rp_stat = MSG_DENIED;
809177633Sdfr	rply.rjcted_rply.rj_stat = AUTH_ERROR;
810177633Sdfr	rply.rjcted_rply.rj_why = why;
811177633Sdfr
812184588Sdfr	if (xprt->xp_pool->sp_rcache)
813184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
814184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
815184588Sdfr
816184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
817177633Sdfr}
818177633Sdfr
819177633Sdfr/*
820177633Sdfr * Auth too weak error reply
821177633Sdfr */
822177633Sdfrvoid
823184588Sdfrsvcerr_weakauth(struct svc_req *rqstp)
824177633Sdfr{
825177633Sdfr
826184588Sdfr	svcerr_auth(rqstp, AUTH_TOOWEAK);
827177633Sdfr}
828177633Sdfr
829177633Sdfr/*
830177633Sdfr * Program unavailable error reply
831177633Sdfr */
832177633Sdfrvoid
833184588Sdfrsvcerr_noprog(struct svc_req *rqstp)
834177633Sdfr{
835184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
836177633Sdfr	struct rpc_msg rply;
837177633Sdfr
838184588Sdfr	rply.rm_xid = rqstp->rq_xid;
839177633Sdfr	rply.rm_direction = REPLY;
840177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
841184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
842177633Sdfr	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
843177633Sdfr
844184588Sdfr	if (xprt->xp_pool->sp_rcache)
845184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
846184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
847184588Sdfr
848184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
849177633Sdfr}
850177633Sdfr
851177633Sdfr/*
852177633Sdfr * Program version mismatch error reply
853177633Sdfr */
854177633Sdfrvoid
855184588Sdfrsvcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
856177633Sdfr{
857184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
858177633Sdfr	struct rpc_msg rply;
859177633Sdfr
860184588Sdfr	rply.rm_xid = rqstp->rq_xid;
861177633Sdfr	rply.rm_direction = REPLY;
862177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
863184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
864177633Sdfr	rply.acpted_rply.ar_stat = PROG_MISMATCH;
865177633Sdfr	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
866177633Sdfr	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
867177633Sdfr
868184588Sdfr	if (xprt->xp_pool->sp_rcache)
869184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
870184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
871184588Sdfr
872184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
873177633Sdfr}
874177633Sdfr
875184588Sdfr/*
876184588Sdfr * Allocate a new server transport structure. All fields are
877184588Sdfr * initialized to zero and xp_p3 is initialized to point at an
878184588Sdfr * extension structure to hold various flags and authentication
879184588Sdfr * parameters.
880184588Sdfr */
881184588SdfrSVCXPRT *
882303692Sngiesvc_xprt_alloc(void)
883184588Sdfr{
884184588Sdfr	SVCXPRT *xprt;
885184588Sdfr	SVCXPRT_EXT *ext;
886184588Sdfr
887184588Sdfr	xprt = mem_alloc(sizeof(SVCXPRT));
888184588Sdfr	ext = mem_alloc(sizeof(SVCXPRT_EXT));
889184588Sdfr	xprt->xp_p3 = ext;
890184588Sdfr	refcount_init(&xprt->xp_refs, 1);
891184588Sdfr
892184588Sdfr	return (xprt);
893184588Sdfr}
894184588Sdfr
895184588Sdfr/*
896184588Sdfr * Free a server transport structure.
897184588Sdfr */
898184588Sdfrvoid
899303692Sngiesvc_xprt_free(SVCXPRT *xprt)
900184588Sdfr{
901184588Sdfr
902184588Sdfr	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
903184588Sdfr	mem_free(xprt, sizeof(SVCXPRT));
904184588Sdfr}
905184588Sdfr
906177633Sdfr/* ******************* SERVER INPUT STUFF ******************* */
907177633Sdfr
908177633Sdfr/*
909184588Sdfr * Read RPC requests from a transport and queue them to be
910184588Sdfr * executed. We handle authentication and replay cache replies here.
911184588Sdfr * Actually dispatching the RPC is deferred till svc_executereq.
912177633Sdfr */
913184588Sdfrstatic enum xprt_stat
914184588Sdfrsvc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
915177633Sdfr{
916177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
917184588Sdfr	struct svc_req *r;
918177633Sdfr	struct rpc_msg msg;
919184588Sdfr	struct mbuf *args;
920261055Smav	struct svc_loss_callout *s;
921177633Sdfr	enum xprt_stat stat;
922177633Sdfr
923177633Sdfr	/* now receive msgs from xprtprt (support batch calls) */
924184588Sdfr	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
925177633Sdfr
926184588Sdfr	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
927184588Sdfr	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
928184588Sdfr	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
929184588Sdfr	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
930184588Sdfr		enum auth_stat why;
931177633Sdfr
932184588Sdfr		/*
933184588Sdfr		 * Handle replays and authenticate before queuing the
934184588Sdfr		 * request to be executed.
935184588Sdfr		 */
936184588Sdfr		SVC_ACQUIRE(xprt);
937184588Sdfr		r->rq_xprt = xprt;
938184588Sdfr		if (pool->sp_rcache) {
939184588Sdfr			struct rpc_msg repmsg;
940184588Sdfr			struct mbuf *repbody;
941184588Sdfr			enum replay_state rs;
942184588Sdfr			rs = replay_find(pool->sp_rcache, &msg,
943184588Sdfr			    svc_getrpccaller(r), &repmsg, &repbody);
944184588Sdfr			switch (rs) {
945184588Sdfr			case RS_NEW:
946184588Sdfr				break;
947184588Sdfr			case RS_DONE:
948184588Sdfr				SVC_REPLY(xprt, &repmsg, r->rq_addr,
949261055Smav				    repbody, &r->rq_reply_seq);
950184588Sdfr				if (r->rq_addr) {
951184588Sdfr					free(r->rq_addr, M_SONAME);
952184588Sdfr					r->rq_addr = NULL;
953184588Sdfr				}
954205562Srmacklem				m_freem(args);
955177633Sdfr				goto call_done;
956184588Sdfr
957184588Sdfr			default:
958205562Srmacklem				m_freem(args);
959184588Sdfr				goto call_done;
960177633Sdfr			}
961184588Sdfr		}
962184588Sdfr
963184588Sdfr		r->rq_xid = msg.rm_xid;
964184588Sdfr		r->rq_prog = msg.rm_call.cb_prog;
965184588Sdfr		r->rq_vers = msg.rm_call.cb_vers;
966184588Sdfr		r->rq_proc = msg.rm_call.cb_proc;
967184588Sdfr		r->rq_size = sizeof(*r) + m_length(args, NULL);
968184588Sdfr		r->rq_args = args;
969184588Sdfr		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
970177633Sdfr			/*
971184588Sdfr			 * RPCSEC_GSS uses this return code
972184588Sdfr			 * for requests that form part of its
973184588Sdfr			 * context establishment protocol and
974184588Sdfr			 * should not be dispatched to the
975184588Sdfr			 * application.
976177633Sdfr			 */
977184588Sdfr			if (why != RPCSEC_GSS_NODISPATCH)
978184588Sdfr				svcerr_auth(r, why);
979184588Sdfr			goto call_done;
980177633Sdfr		}
981184588Sdfr
982184588Sdfr		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
983184588Sdfr			svcerr_decode(r);
984184588Sdfr			goto call_done;
985184588Sdfr		}
986184588Sdfr
987177633Sdfr		/*
988184588Sdfr		 * Everything checks out, return request to caller.
989177633Sdfr		 */
990184588Sdfr		*rqstp_ret = r;
991184588Sdfr		r = NULL;
992184588Sdfr	}
993177633Sdfrcall_done:
994184588Sdfr	if (r) {
995184588Sdfr		svc_freereq(r);
996184588Sdfr		r = NULL;
997184588Sdfr	}
998184588Sdfr	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
999261055Smav		TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
1000261055Smav			(*s->slc_dispatch)(xprt);
1001184588Sdfr		xprt_unregister(xprt);
1002184588Sdfr	}
1003184588Sdfr
1004184588Sdfr	return (stat);
1005184588Sdfr}
1006184588Sdfr
1007184588Sdfrstatic void
1008184588Sdfrsvc_executereq(struct svc_req *rqstp)
1009184588Sdfr{
1010184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
1011184588Sdfr	SVCPOOL *pool = xprt->xp_pool;
1012184588Sdfr	int prog_found;
1013184588Sdfr	rpcvers_t low_vers;
1014184588Sdfr	rpcvers_t high_vers;
1015184588Sdfr	struct svc_callout *s;
1016184588Sdfr
1017184588Sdfr	/* now match message with a registered service*/
1018184588Sdfr	prog_found = FALSE;
1019184588Sdfr	low_vers = (rpcvers_t) -1L;
1020184588Sdfr	high_vers = (rpcvers_t) 0L;
1021184588Sdfr	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
1022184588Sdfr		if (s->sc_prog == rqstp->rq_prog) {
1023184588Sdfr			if (s->sc_vers == rqstp->rq_vers) {
1024184588Sdfr				/*
1025184588Sdfr				 * We hand ownership of r to the
1026184588Sdfr				 * dispatch method - they must call
1027184588Sdfr				 * svc_freereq.
1028184588Sdfr				 */
1029184588Sdfr				(*s->sc_dispatch)(rqstp, xprt);
1030184588Sdfr				return;
1031184588Sdfr			}  /* found correct version */
1032184588Sdfr			prog_found = TRUE;
1033184588Sdfr			if (s->sc_vers < low_vers)
1034184588Sdfr				low_vers = s->sc_vers;
1035184588Sdfr			if (s->sc_vers > high_vers)
1036184588Sdfr				high_vers = s->sc_vers;
1037184588Sdfr		}   /* found correct program */
1038184588Sdfr	}
1039184588Sdfr
1040184588Sdfr	/*
1041184588Sdfr	 * if we got here, the program or version
1042184588Sdfr	 * is not served ...
1043184588Sdfr	 */
1044184588Sdfr	if (prog_found)
1045184588Sdfr		svcerr_progvers(rqstp, low_vers, high_vers);
1046184588Sdfr	else
1047184588Sdfr		svcerr_noprog(rqstp);
1048184588Sdfr
1049184588Sdfr	svc_freereq(rqstp);
1050184588Sdfr}
1051184588Sdfr
1052184588Sdfrstatic void
1053267742Smavsvc_checkidle(SVCGROUP *grp)
1054184588Sdfr{
1055184588Sdfr	SVCXPRT *xprt, *nxprt;
1056184588Sdfr	time_t timo;
1057184588Sdfr	struct svcxprt_list cleanup;
1058184588Sdfr
1059184588Sdfr	TAILQ_INIT(&cleanup);
1060267742Smav	TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1061184588Sdfr		/*
1062184588Sdfr		 * Only some transports have idle timers. Don't time
1063184588Sdfr		 * something out which is just waking up.
1064184588Sdfr		 */
1065184588Sdfr		if (!xprt->xp_idletimeout || xprt->xp_thread)
1066184588Sdfr			continue;
1067184588Sdfr
1068184588Sdfr		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1069184588Sdfr		if (time_uptime > timo) {
1070184588Sdfr			xprt_unregister_locked(xprt);
1071184588Sdfr			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1072177633Sdfr		}
1073184588Sdfr	}
1074184588Sdfr
1075267742Smav	mtx_unlock(&grp->sg_lock);
1076184588Sdfr	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1077184588Sdfr		SVC_RELEASE(xprt);
1078184588Sdfr	}
1079267742Smav	mtx_lock(&grp->sg_lock);
1080177633Sdfr}
1081177633Sdfr
1082184588Sdfrstatic void
1083184588Sdfrsvc_assign_waiting_sockets(SVCPOOL *pool)
1084177633Sdfr{
1085267742Smav	SVCGROUP *grp;
1086177633Sdfr	SVCXPRT *xprt;
1087267742Smav	int g;
1088184588Sdfr
1089267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1090267742Smav		grp = &pool->sp_groups[g];
1091267742Smav		mtx_lock(&grp->sg_lock);
1092267742Smav		while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1093267742Smav			if (xprt_assignthread(xprt))
1094267742Smav				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1095267742Smav			else
1096267742Smav				break;
1097267742Smav		}
1098267742Smav		mtx_unlock(&grp->sg_lock);
1099184588Sdfr	}
1100184588Sdfr}
1101184588Sdfr
1102261054Smavstatic void
1103290203Swollmansvc_change_space_used(SVCPOOL *pool, long delta)
1104184588Sdfr{
1105290203Swollman	unsigned long value;
1106184588Sdfr
1107290203Swollman	value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta;
1108261054Smav	if (delta > 0) {
1109261054Smav		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1110261054Smav			pool->sp_space_throttled = TRUE;
1111261054Smav			pool->sp_space_throttle_count++;
1112261054Smav		}
1113261054Smav		if (value > pool->sp_space_used_highest)
1114261054Smav			pool->sp_space_used_highest = value;
1115261054Smav	} else {
1116261054Smav		if (value < pool->sp_space_low && pool->sp_space_throttled) {
1117184588Sdfr			pool->sp_space_throttled = FALSE;
1118184588Sdfr			svc_assign_waiting_sockets(pool);
1119184588Sdfr		}
1120184588Sdfr	}
1121184588Sdfr}
1122184588Sdfr
1123261054Smavstatic bool_t
1124261054Smavsvc_request_space_available(SVCPOOL *pool)
1125261054Smav{
1126261054Smav
1127261054Smav	if (pool->sp_space_throttled)
1128261054Smav		return (FALSE);
1129261054Smav	return (TRUE);
1130261054Smav}
1131261054Smav
1132184588Sdfrstatic void
1133267742Smavsvc_run_internal(SVCGROUP *grp, bool_t ismaster)
1134184588Sdfr{
1135267742Smav	SVCPOOL *pool = grp->sg_pool;
1136184588Sdfr	SVCTHREAD *st, *stpref;
1137184588Sdfr	SVCXPRT *xprt;
1138184588Sdfr	enum xprt_stat stat;
1139184588Sdfr	struct svc_req *rqstp;
1140275796Skib	struct proc *p;
1141290203Swollman	long sz;
1142177633Sdfr	int error;
1143177633Sdfr
1144184588Sdfr	st = mem_alloc(sizeof(*st));
1145267740Smav	mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1146261054Smav	st->st_pool = pool;
1147184588Sdfr	st->st_xprt = NULL;
1148184588Sdfr	STAILQ_INIT(&st->st_reqs);
1149184588Sdfr	cv_init(&st->st_cond, "rpcsvc");
1150184588Sdfr
1151267742Smav	mtx_lock(&grp->sg_lock);
1152177633Sdfr
1153184588Sdfr	/*
1154184588Sdfr	 * If we are a new thread which was spawned to cope with
1155184588Sdfr	 * increased load, set the state back to SVCPOOL_ACTIVE.
1156184588Sdfr	 */
1157267742Smav	if (grp->sg_state == SVCPOOL_THREADSTARTING)
1158267742Smav		grp->sg_state = SVCPOOL_ACTIVE;
1159177633Sdfr
1160267742Smav	while (grp->sg_state != SVCPOOL_CLOSING) {
1161184588Sdfr		/*
1162261045Smav		 * Create new thread if requested.
1163261045Smav		 */
1164267742Smav		if (grp->sg_state == SVCPOOL_THREADWANTED) {
1165267742Smav			grp->sg_state = SVCPOOL_THREADSTARTING;
1166267742Smav			grp->sg_lastcreatetime = time_uptime;
1167267742Smav			mtx_unlock(&grp->sg_lock);
1168267742Smav			svc_new_thread(grp);
1169267742Smav			mtx_lock(&grp->sg_lock);
1170261045Smav			continue;
1171261045Smav		}
1172261045Smav
1173261045Smav		/*
1174184588Sdfr		 * Check for idle transports once per second.
1175184588Sdfr		 */
1176267742Smav		if (time_uptime > grp->sg_lastidlecheck) {
1177267742Smav			grp->sg_lastidlecheck = time_uptime;
1178267742Smav			svc_checkidle(grp);
1179184588Sdfr		}
1180184588Sdfr
1181184588Sdfr		xprt = st->st_xprt;
1182267740Smav		if (!xprt) {
1183184588Sdfr			/*
1184184588Sdfr			 * Enforce maxthreads count.
1185184588Sdfr			 */
1186336928Srmacklem			if (!ismaster && grp->sg_threadcount >
1187336928Srmacklem			    grp->sg_maxthreads)
1188177633Sdfr				break;
1189184588Sdfr
1190184588Sdfr			/*
1191184588Sdfr			 * Before sleeping, see if we can find an
1192184588Sdfr			 * active transport which isn't being serviced
1193184588Sdfr			 * by a thread.
1194184588Sdfr			 */
1195261048Smav			if (svc_request_space_available(pool) &&
1196267742Smav			    (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1197267742Smav				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1198261048Smav				SVC_ACQUIRE(xprt);
1199261048Smav				xprt->xp_thread = st;
1200261048Smav				st->st_xprt = xprt;
1201261048Smav				continue;
1202184588Sdfr			}
1203184588Sdfr
1204267742Smav			LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1205261045Smav			if (ismaster || (!ismaster &&
1206267742Smav			    grp->sg_threadcount > grp->sg_minthreads))
1207261045Smav				error = cv_timedwait_sig(&st->st_cond,
1208267742Smav				    &grp->sg_lock, 5 * hz);
1209261045Smav			else
1210261045Smav				error = cv_wait_sig(&st->st_cond,
1211267742Smav				    &grp->sg_lock);
1212267741Smav			if (st->st_xprt == NULL)
1213261048Smav				LIST_REMOVE(st, st_ilink);
1214184588Sdfr
1215184588Sdfr			/*
1216184588Sdfr			 * Reduce worker thread count when idle.
1217184588Sdfr			 */
1218184588Sdfr			if (error == EWOULDBLOCK) {
1219184588Sdfr				if (!ismaster
1220267742Smav				    && (grp->sg_threadcount
1221267742Smav					> grp->sg_minthreads)
1222267740Smav					&& !st->st_xprt)
1223184588Sdfr					break;
1224275796Skib			} else if (error != 0) {
1225275796Skib				KASSERT(error == EINTR || error == ERESTART,
1226275796Skib				    ("non-signal error %d", error));
1227267742Smav				mtx_unlock(&grp->sg_lock);
1228275796Skib				p = curproc;
1229275796Skib				PROC_LOCK(p);
1230276272Skib				if (P_SHOULDSTOP(p) ||
1231276272Skib				    (p->p_flag & P_TOTAL_STOP) != 0) {
1232275796Skib					thread_suspend_check(0);
1233275796Skib					PROC_UNLOCK(p);
1234275796Skib					mtx_lock(&grp->sg_lock);
1235275796Skib				} else {
1236275796Skib					PROC_UNLOCK(p);
1237275796Skib					svc_exit(pool);
1238275796Skib					mtx_lock(&grp->sg_lock);
1239275796Skib					break;
1240275796Skib				}
1241184588Sdfr			}
1242177633Sdfr			continue;
1243177633Sdfr		}
1244267742Smav		mtx_unlock(&grp->sg_lock);
1245177633Sdfr
1246267740Smav		/*
1247267740Smav		 * Drain the transport socket and queue up any RPCs.
1248267740Smav		 */
1249267740Smav		xprt->xp_lastactive = time_uptime;
1250267740Smav		do {
1251267740Smav			if (!svc_request_space_available(pool))
1252267740Smav				break;
1253267740Smav			rqstp = NULL;
1254267740Smav			stat = svc_getreq(xprt, &rqstp);
1255267740Smav			if (rqstp) {
1256267740Smav				svc_change_space_used(pool, rqstp->rq_size);
1257267740Smav				/*
1258267740Smav				 * See if the application has a preference
1259267740Smav				 * for some other thread.
1260267740Smav				 */
1261267740Smav				if (pool->sp_assign) {
1262267740Smav					stpref = pool->sp_assign(st, rqstp);
1263184588Sdfr					rqstp->rq_thread = stpref;
1264184588Sdfr					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1265184588Sdfr					    rqstp, rq_link);
1266267740Smav					mtx_unlock(&stpref->st_lock);
1267267740Smav					if (stpref != st)
1268267740Smav						rqstp = NULL;
1269267740Smav				} else {
1270267740Smav					rqstp->rq_thread = st;
1271267740Smav					STAILQ_INSERT_TAIL(&st->st_reqs,
1272267740Smav					    rqstp, rq_link);
1273267740Smav				}
1274267740Smav			}
1275267740Smav		} while (rqstp == NULL && stat == XPRT_MOREREQS
1276267742Smav		    && grp->sg_state != SVCPOOL_CLOSING);
1277184588Sdfr
1278267740Smav		/*
1279267740Smav		 * Move this transport to the end of the active list to
1280267740Smav		 * ensure fairness when multiple transports are active.
1281267740Smav		 * If this was the last queued request, svc_getreq will end
1282267740Smav		 * up calling xprt_inactive to remove from the active list.
1283267740Smav		 */
1284267742Smav		mtx_lock(&grp->sg_lock);
1285267740Smav		xprt->xp_thread = NULL;
1286267740Smav		st->st_xprt = NULL;
1287267740Smav		if (xprt->xp_active) {
1288267740Smav			if (!svc_request_space_available(pool) ||
1289267740Smav			    !xprt_assignthread(xprt))
1290267742Smav				TAILQ_INSERT_TAIL(&grp->sg_active,
1291267740Smav				    xprt, xp_alink);
1292184588Sdfr		}
1293267742Smav		mtx_unlock(&grp->sg_lock);
1294267740Smav		SVC_RELEASE(xprt);
1295184588Sdfr
1296177633Sdfr		/*
1297184588Sdfr		 * Execute what we have queued.
1298177633Sdfr		 */
1299267740Smav		mtx_lock(&st->st_lock);
1300267740Smav		while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1301267740Smav			STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1302267740Smav			mtx_unlock(&st->st_lock);
1303290203Swollman			sz = (long)rqstp->rq_size;
1304184588Sdfr			svc_executereq(rqstp);
1305290203Swollman			svc_change_space_used(pool, -sz);
1306267740Smav			mtx_lock(&st->st_lock);
1307184588Sdfr		}
1308267740Smav		mtx_unlock(&st->st_lock);
1309267742Smav		mtx_lock(&grp->sg_lock);
1310184588Sdfr	}
1311177633Sdfr
1312184588Sdfr	if (st->st_xprt) {
1313184588Sdfr		xprt = st->st_xprt;
1314184588Sdfr		st->st_xprt = NULL;
1315184588Sdfr		SVC_RELEASE(xprt);
1316177633Sdfr	}
1317184588Sdfr	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1318267740Smav	mtx_destroy(&st->st_lock);
1319184588Sdfr	cv_destroy(&st->st_cond);
1320184588Sdfr	mem_free(st, sizeof(*st));
1321184588Sdfr
1322267742Smav	grp->sg_threadcount--;
1323184588Sdfr	if (!ismaster)
1324267742Smav		wakeup(grp);
1325267742Smav	mtx_unlock(&grp->sg_lock);
1326177633Sdfr}
1327177633Sdfr
1328184588Sdfrstatic void
1329184588Sdfrsvc_thread_start(void *arg)
1330184588Sdfr{
1331184588Sdfr
1332267742Smav	svc_run_internal((SVCGROUP *) arg, FALSE);
1333184588Sdfr	kthread_exit();
1334184588Sdfr}
1335184588Sdfr
1336184588Sdfrstatic void
1337267742Smavsvc_new_thread(SVCGROUP *grp)
1338184588Sdfr{
1339267742Smav	SVCPOOL *pool = grp->sg_pool;
1340184588Sdfr	struct thread *td;
1341184588Sdfr
1342291384Smav	mtx_lock(&grp->sg_lock);
1343267742Smav	grp->sg_threadcount++;
1344291384Smav	mtx_unlock(&grp->sg_lock);
1345267742Smav	kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1346184588Sdfr	    "%s: service", pool->sp_name);
1347184588Sdfr}
1348184588Sdfr
1349177633Sdfrvoid
1350184588Sdfrsvc_run(SVCPOOL *pool)
1351184588Sdfr{
1352267742Smav	int g, i;
1353184588Sdfr	struct proc *p;
1354184588Sdfr	struct thread *td;
1355267742Smav	SVCGROUP *grp;
1356184588Sdfr
1357184588Sdfr	p = curproc;
1358184588Sdfr	td = curthread;
1359184588Sdfr	snprintf(td->td_name, sizeof(td->td_name),
1360184588Sdfr	    "%s: master", pool->sp_name);
1361184588Sdfr	pool->sp_state = SVCPOOL_ACTIVE;
1362184588Sdfr	pool->sp_proc = p;
1363184588Sdfr
1364267742Smav	/* Choose group count based on number of threads and CPUs. */
1365267742Smav	pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1366267742Smav	    min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1367267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1368267742Smav		grp = &pool->sp_groups[g];
1369267742Smav		grp->sg_minthreads = max(1,
1370267742Smav		    pool->sp_minthreads / pool->sp_groupcount);
1371267742Smav		grp->sg_maxthreads = max(1,
1372267742Smav		    pool->sp_maxthreads / pool->sp_groupcount);
1373267742Smav		grp->sg_lastcreatetime = time_uptime;
1374184588Sdfr	}
1375184588Sdfr
1376267742Smav	/* Starting threads */
1377291384Smav	pool->sp_groups[0].sg_threadcount++;
1378267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1379267742Smav		grp = &pool->sp_groups[g];
1380267742Smav		for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1381267742Smav			svc_new_thread(grp);
1382267742Smav	}
1383267742Smav	svc_run_internal(&pool->sp_groups[0], TRUE);
1384184588Sdfr
1385267742Smav	/* Waiting for threads to stop. */
1386267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1387267742Smav		grp = &pool->sp_groups[g];
1388267742Smav		mtx_lock(&grp->sg_lock);
1389267742Smav		while (grp->sg_threadcount > 0)
1390267742Smav			msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1391267742Smav		mtx_unlock(&grp->sg_lock);
1392267742Smav	}
1393184588Sdfr}
1394184588Sdfr
1395184588Sdfrvoid
1396177633Sdfrsvc_exit(SVCPOOL *pool)
1397177633Sdfr{
1398267742Smav	SVCGROUP *grp;
1399184588Sdfr	SVCTHREAD *st;
1400267742Smav	int g;
1401184588Sdfr
1402267742Smav	pool->sp_state = SVCPOOL_CLOSING;
1403267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1404267742Smav		grp = &pool->sp_groups[g];
1405267742Smav		mtx_lock(&grp->sg_lock);
1406267742Smav		if (grp->sg_state != SVCPOOL_CLOSING) {
1407267742Smav			grp->sg_state = SVCPOOL_CLOSING;
1408267742Smav			LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1409267742Smav				cv_signal(&st->st_cond);
1410267742Smav		}
1411267742Smav		mtx_unlock(&grp->sg_lock);
1412261045Smav	}
1413177633Sdfr}
1414184588Sdfr
1415184588Sdfrbool_t
1416184588Sdfrsvc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1417184588Sdfr{
1418184588Sdfr	struct mbuf *m;
1419184588Sdfr	XDR xdrs;
1420184588Sdfr	bool_t stat;
1421184588Sdfr
1422184588Sdfr	m = rqstp->rq_args;
1423184588Sdfr	rqstp->rq_args = NULL;
1424184588Sdfr
1425184588Sdfr	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1426184588Sdfr	stat = xargs(&xdrs, args);
1427184588Sdfr	XDR_DESTROY(&xdrs);
1428184588Sdfr
1429184588Sdfr	return (stat);
1430184588Sdfr}
1431184588Sdfr
1432184588Sdfrbool_t
1433184588Sdfrsvc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1434184588Sdfr{
1435184588Sdfr	XDR xdrs;
1436184588Sdfr
1437184588Sdfr	if (rqstp->rq_addr) {
1438184588Sdfr		free(rqstp->rq_addr, M_SONAME);
1439184588Sdfr		rqstp->rq_addr = NULL;
1440184588Sdfr	}
1441184588Sdfr
1442184588Sdfr	xdrs.x_op = XDR_FREE;
1443184588Sdfr	return (xargs(&xdrs, args));
1444184588Sdfr}
1445184588Sdfr
1446184588Sdfrvoid
1447184588Sdfrsvc_freereq(struct svc_req *rqstp)
1448184588Sdfr{
1449184588Sdfr	SVCTHREAD *st;
1450184588Sdfr	SVCPOOL *pool;
1451184588Sdfr
1452184588Sdfr	st = rqstp->rq_thread;
1453184588Sdfr	if (st) {
1454261054Smav		pool = st->st_pool;
1455184588Sdfr		if (pool->sp_done)
1456184588Sdfr			pool->sp_done(st, rqstp);
1457184588Sdfr	}
1458184588Sdfr
1459184588Sdfr	if (rqstp->rq_auth.svc_ah_ops)
1460184588Sdfr		SVCAUTH_RELEASE(&rqstp->rq_auth);
1461184588Sdfr
1462184588Sdfr	if (rqstp->rq_xprt) {
1463184588Sdfr		SVC_RELEASE(rqstp->rq_xprt);
1464184588Sdfr	}
1465184588Sdfr
1466184588Sdfr	if (rqstp->rq_addr)
1467184588Sdfr		free(rqstp->rq_addr, M_SONAME);
1468184588Sdfr
1469184588Sdfr	if (rqstp->rq_args)
1470184588Sdfr		m_freem(rqstp->rq_args);
1471184588Sdfr
1472184588Sdfr	free(rqstp, M_RPC);
1473184588Sdfr}
1474