nfs_nfsiod.c revision 210455
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_syscalls.c	8.5 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_nfsiod.c 210455 2010-07-24 22:11:11Z rmacklem $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/sysproto.h>
41#include <sys/kernel.h>
42#include <sys/sysctl.h>
43#include <sys/file.h>
44#include <sys/filedesc.h>
45#include <sys/vnode.h>
46#include <sys/malloc.h>
47#include <sys/mount.h>
48#include <sys/proc.h>
49#include <sys/bio.h>
50#include <sys/buf.h>
51#include <sys/mbuf.h>
52#include <sys/socket.h>
53#include <sys/socketvar.h>
54#include <sys/domain.h>
55#include <sys/protosw.h>
56#include <sys/namei.h>
57#include <sys/unistd.h>
58#include <sys/kthread.h>
59#include <sys/fcntl.h>
60#include <sys/lockf.h>
61#include <sys/mutex.h>
62
63#include <netinet/in.h>
64#include <netinet/tcp.h>
65
66#include <nfs/xdr_subs.h>
67#include <nfs/nfsproto.h>
68#include <nfsclient/nfs.h>
69#include <nfsclient/nfsm_subs.h>
70#include <nfsclient/nfsmount.h>
71#include <nfsclient/nfsnode.h>
72#include <nfs/nfs_lock.h>
73
74static MALLOC_DEFINE(M_NFSSVC, "nfsclient_srvsock", "Nfs server structure");
75
76static void	nfssvc_iod(void *);
77
78static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
79
80SYSCTL_DECL(_vfs_nfs);
81
82/* Maximum number of seconds a nfsiod kthread will sleep before exiting */
83static unsigned int nfs_iodmaxidle = 120;
84SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
85    "Max number of seconds an nfsiod kthread will sleep before exiting");
86
87/* Maximum number of nfsiod kthreads */
88unsigned int nfs_iodmax = 20;
89
90/* Minimum number of nfsiod kthreads to keep as spares */
91static unsigned int nfs_iodmin = 0;
92
93static int
94sysctl_iodmin(SYSCTL_HANDLER_ARGS)
95{
96	int error, i;
97	int newmin;
98
99	newmin = nfs_iodmin;
100	error = sysctl_handle_int(oidp, &newmin, 0, req);
101	if (error || (req->newptr == NULL))
102		return (error);
103	mtx_lock(&nfs_iod_mtx);
104	if (newmin > nfs_iodmax) {
105		error = EINVAL;
106		goto out;
107	}
108	nfs_iodmin = newmin;
109	if (nfs_numasync >= nfs_iodmin)
110		goto out;
111	/*
112	 * If the current number of nfsiod is lower
113	 * than the new minimum, create some more.
114	 */
115	for (i = nfs_iodmin - nfs_numasync; i > 0; i--)
116		nfs_nfsiodnew(0);
117out:
118	mtx_unlock(&nfs_iod_mtx);
119	return (0);
120}
121SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
122    sizeof (nfs_iodmin), sysctl_iodmin, "IU",
123    "Min number of nfsiod kthreads to keep as spares");
124
125
126static int
127sysctl_iodmax(SYSCTL_HANDLER_ARGS)
128{
129	int error, i;
130	int iod, newmax;
131
132	newmax = nfs_iodmax;
133	error = sysctl_handle_int(oidp, &newmax, 0, req);
134	if (error || (req->newptr == NULL))
135		return (error);
136	if (newmax > NFS_MAXASYNCDAEMON)
137		return (EINVAL);
138	mtx_lock(&nfs_iod_mtx);
139	nfs_iodmax = newmax;
140	if (nfs_numasync <= nfs_iodmax)
141		goto out;
142	/*
143	 * If there are some asleep nfsiods that should
144	 * exit, wakeup() them so that they check nfs_iodmax
145	 * and exit.  Those who are active will exit as
146	 * soon as they finish I/O.
147	 */
148	iod = nfs_numasync - 1;
149	for (i = 0; i < nfs_numasync - nfs_iodmax; i++) {
150		if (nfs_iodwant[iod] == NFSIOD_AVAILABLE)
151			wakeup(&nfs_iodwant[iod]);
152		iod--;
153	}
154out:
155	mtx_unlock(&nfs_iod_mtx);
156	return (0);
157}
158SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
159    sizeof (nfs_iodmax), sysctl_iodmax, "IU",
160    "Max number of nfsiod kthreads");
161
162int
163nfs_nfsiodnew(int set_iodwant)
164{
165	int error, i;
166	int newiod;
167
168	if (nfs_numasync >= nfs_iodmax)
169		return (-1);
170	newiod = -1;
171	for (i = 0; i < nfs_iodmax; i++)
172		if (nfs_asyncdaemon[i] == 0) {
173			nfs_asyncdaemon[i]++;
174			newiod = i;
175			break;
176		}
177	if (newiod == -1)
178		return (-1);
179	if (set_iodwant > 0)
180		nfs_iodwant[i] = NFSIOD_CREATED_FOR_NFS_ASYNCIO;
181	mtx_unlock(&nfs_iod_mtx);
182	error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID,
183	    0, "nfsiod %d", newiod);
184	mtx_lock(&nfs_iod_mtx);
185	if (error) {
186		if (set_iodwant > 0)
187			nfs_iodwant[i] = NFSIOD_NOT_AVAILABLE;
188		return (-1);
189	}
190	nfs_numasync++;
191	return (newiod);
192}
193
194static void
195nfsiod_setup(void *dummy)
196{
197	int i;
198	int error;
199
200	TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
201	mtx_lock(&nfs_iod_mtx);
202	/* Silently limit the start number of nfsiod's */
203	if (nfs_iodmin > NFS_MAXASYNCDAEMON)
204		nfs_iodmin = NFS_MAXASYNCDAEMON;
205
206	for (i = 0; i < nfs_iodmin; i++) {
207		error = nfs_nfsiodnew(0);
208		if (error == -1)
209			panic("nfsiod_setup: nfs_nfsiodnew failed");
210	}
211	mtx_unlock(&nfs_iod_mtx);
212}
213SYSINIT(nfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
214
215static int nfs_defect = 0;
216SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
217    "Allow nfsiods to migrate serving different mounts");
218
219/*
220 * Asynchronous I/O daemons for client nfs.
221 * They do read-ahead and write-behind operations on the block I/O cache.
222 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
223 */
224static void
225nfssvc_iod(void *instance)
226{
227	struct buf *bp;
228	struct nfsmount *nmp;
229	int myiod, timo;
230	int error = 0;
231
232	mtx_lock(&nfs_iod_mtx);
233	myiod = (int *)instance - nfs_asyncdaemon;
234	/*
235	 * Main loop
236	 */
237	for (;;) {
238	    while (((nmp = nfs_iodmount[myiod]) == NULL)
239		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
240		if (myiod >= nfs_iodmax)
241			goto finish;
242		if (nmp)
243			nmp->nm_bufqiods--;
244		if (nfs_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
245			nfs_iodwant[myiod] = NFSIOD_AVAILABLE;
246		nfs_iodmount[myiod] = NULL;
247		/*
248		 * Always keep at least nfs_iodmin kthreads.
249		 */
250		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
251		error = msleep(&nfs_iodwant[myiod], &nfs_iod_mtx, PWAIT | PCATCH,
252		    "-", timo);
253		if (error) {
254			nmp = nfs_iodmount[myiod];
255			/*
256			 * Rechecking the nm_bufq closes a rare race where the
257			 * nfsiod is woken up at the exact time the idle timeout
258			 * fires
259			 */
260			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
261				error = 0;
262			break;
263		}
264	    }
265	    if (error)
266		    break;
267	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
268	        int giant_locked = 0;
269
270		/* Take one off the front of the list */
271		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
272		nmp->nm_bufqlen--;
273		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= nfs_numasync) {
274		    nmp->nm_bufqwant = 0;
275		    wakeup(&nmp->nm_bufq);
276		}
277		mtx_unlock(&nfs_iod_mtx);
278		if (NFS_ISV4(bp->b_vp)) {
279			giant_locked = 1;
280			mtx_lock(&Giant);
281		}
282		if (bp->b_flags & B_DIRECT) {
283			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
284			(void)nfs_doio_directwrite(bp);
285		} else {
286			if (bp->b_iocmd == BIO_READ)
287				(void) nfs_doio(bp->b_vp, bp, bp->b_rcred, NULL);
288			else
289				(void) nfs_doio(bp->b_vp, bp, bp->b_wcred, NULL);
290		}
291		if (giant_locked)
292			mtx_unlock(&Giant);
293		mtx_lock(&nfs_iod_mtx);
294		/*
295		 * If there are more than one iod on this mount, then defect
296		 * so that the iods can be shared out fairly between the mounts
297		 */
298		if (nfs_defect && nmp->nm_bufqiods > 1) {
299		    NFS_DPF(ASYNCIO,
300			    ("nfssvc_iod: iod %d defecting from mount %p\n",
301			     myiod, nmp));
302		    nfs_iodmount[myiod] = NULL;
303		    nmp->nm_bufqiods--;
304		    break;
305		}
306	    }
307	}
308finish:
309	nfs_asyncdaemon[myiod] = 0;
310	if (nmp)
311	    nmp->nm_bufqiods--;
312	nfs_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
313	nfs_iodmount[myiod] = NULL;
314	/* Someone may be waiting for the last nfsiod to terminate. */
315	if (--nfs_numasync == 0)
316		wakeup(&nfs_numasync);
317	mtx_unlock(&nfs_iod_mtx);
318	if ((error == 0) || (error == EWOULDBLOCK))
319		kproc_exit(0);
320	/* Abnormal termination */
321	kproc_exit(1);
322}
323