1/*	$NetBSD$	*/
2
3/*
4 * Copyright (c) 1989, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD$");
39
40#ifdef _KERNEL_OPT
41#include "opt_nfs.h"
42#include "opt_ddb.h"
43#endif
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/resourcevar.h>
48#include <sys/signalvar.h>
49#include <sys/proc.h>
50#include <sys/buf.h>
51#include <sys/vnode.h>
52#include <sys/mount.h>
53#include <sys/kernel.h>
54#include <sys/namei.h>
55#include <sys/dirent.h>
56#include <sys/kauth.h>
57
58#include <uvm/uvm_extern.h>
59#include <uvm/uvm.h>
60
61#include <nfs/rpcv2.h>
62#include <nfs/nfsproto.h>
63#include <nfs/nfs.h>
64#include <nfs/nfsmount.h>
65#include <nfs/nfsnode.h>
66#include <nfs/nfs_var.h>
67
68extern int nfs_numasync;
69extern int nfs_commitsize;
70extern struct nfsstats nfsstats;
71
72static int nfs_doio_read(struct buf *, struct uio *);
73static int nfs_doio_write(struct buf *, struct uio *);
74static int nfs_doio_phys(struct buf *, struct uio *);
75
76/*
77 * Vnode op for read using bio
78 * Any similarity to readip() is purely coincidental
79 */
80int
81nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag,
82	    kauth_cred_t cred, int cflag)
83{
84	struct nfsnode *np = VTONFS(vp);
85	struct buf *bp = NULL, *rabp;
86	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
87	struct nfsdircache *ndp = NULL, *nndp = NULL;
88	void *baddr;
89	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
90	int enough = 0;
91	struct dirent *dp, *pdp, *edp, *ep;
92	off_t curoff = 0;
93	int advice;
94	struct lwp *l = curlwp;
95
96#ifdef DIAGNOSTIC
97	if (uio->uio_rw != UIO_READ)
98		panic("nfs_read mode");
99#endif
100	if (uio->uio_resid == 0)
101		return (0);
102	if (vp->v_type != VDIR && uio->uio_offset < 0)
103		return (EINVAL);
104#ifndef NFS_V2_ONLY
105	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
106	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
107		(void)nfs_fsinfo(nmp, vp, cred, l);
108#endif
109	if (vp->v_type != VDIR &&
110	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
111		return (EFBIG);
112
113	/*
114	 * For nfs, cache consistency can only be maintained approximately.
115	 * Although RFC1094 does not specify the criteria, the following is
116	 * believed to be compatible with the reference port.
117	 *
118	 * If the file's modify time on the server has changed since the
119	 * last read rpc or you have written to the file,
120	 * you may have lost data cache consistency with the
121	 * server, so flush all of the file's data out of the cache.
122	 * Then force a getattr rpc to ensure that you have up to date
123	 * attributes.
124	 * NB: This implies that cache data can be read when up to
125	 * nfs_attrtimeo seconds out of date. If you find that you need current
126	 * attributes this could be forced by setting n_attrstamp to 0 before
127	 * the VOP_GETATTR() call.
128	 */
129
130	if (vp->v_type != VLNK) {
131		error = nfs_flushstalebuf(vp, cred, l,
132		    NFS_FLUSHSTALEBUF_MYWRITE);
133		if (error)
134			return error;
135	}
136
137	do {
138	    /*
139	     * Don't cache symlinks.
140	     */
141	    if ((vp->v_vflag & VV_ROOT) && vp->v_type == VLNK) {
142		return (nfs_readlinkrpc(vp, uio, cred));
143	    }
144	    baddr = (void *)0;
145	    switch (vp->v_type) {
146	    case VREG:
147		nfsstats.biocache_reads++;
148
149		advice = IO_ADV_DECODE(ioflag);
150		error = 0;
151		while (uio->uio_resid > 0) {
152			vsize_t bytelen;
153
154			nfs_delayedtruncate(vp);
155			if (np->n_size <= uio->uio_offset) {
156				break;
157			}
158			bytelen =
159			    MIN(np->n_size - uio->uio_offset, uio->uio_resid);
160			error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
161			    UBC_READ | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp));
162			if (error) {
163				/*
164				 * XXXkludge
165				 * the file has been truncated on the server.
166				 * there isn't much we can do.
167				 */
168				if (uio->uio_offset >= np->n_size) {
169					/* end of file */
170					error = 0;
171				} else {
172					break;
173				}
174			}
175		}
176		break;
177
178	    case VLNK:
179		nfsstats.biocache_readlinks++;
180		bp = nfs_getcacheblk(vp, (daddr_t)0, MAXPATHLEN, l);
181		if (!bp)
182			return (EINTR);
183		if ((bp->b_oflags & BO_DONE) == 0) {
184			bp->b_flags |= B_READ;
185			error = nfs_doio(bp);
186			if (error) {
187				brelse(bp, 0);
188				return (error);
189			}
190		}
191		n = MIN(uio->uio_resid, MAXPATHLEN - bp->b_resid);
192		got_buf = 1;
193		on = 0;
194		break;
195	    case VDIR:
196diragain:
197		nfsstats.biocache_readdirs++;
198		ndp = nfs_searchdircache(vp, uio->uio_offset,
199			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
200		if (!ndp) {
201			/*
202			 * We've been handed a cookie that is not
203			 * in the cache. If we're not translating
204			 * 32 <-> 64, it may be a value that was
205			 * flushed out of the cache because it grew
206			 * too big. Let the server judge if it's
207			 * valid or not. In the translation case,
208			 * we have no way of validating this value,
209			 * so punt.
210			 */
211			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
212				return (EINVAL);
213			ndp = nfs_enterdircache(vp, uio->uio_offset,
214				uio->uio_offset, 0, 0);
215		}
216
217		if (NFS_EOFVALID(np) &&
218		    ndp->dc_cookie == np->n_direofoffset) {
219			nfs_putdircache(np, ndp);
220			nfsstats.direofcache_hits++;
221			return (0);
222		}
223
224		bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
225		if (!bp)
226		    return (EINTR);
227		if ((bp->b_oflags & BO_DONE) == 0) {
228		    bp->b_flags |= B_READ;
229		    bp->b_dcookie = ndp->dc_blkcookie;
230		    error = nfs_doio(bp);
231		    if (error) {
232			/*
233			 * Yuck! The directory has been modified on the
234			 * server. Punt and let the userland code
235			 * deal with it.
236			 */
237			nfs_putdircache(np, ndp);
238			brelse(bp, 0);
239			/*
240			 * nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
241			 */
242			if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
243			    nfs_invaldircache(vp, 0);
244			    nfs_vinvalbuf(vp, 0, cred, l, 1);
245			}
246			return (error);
247		    }
248		}
249
250		/*
251		 * Just return if we hit EOF right away with this
252		 * block. Always check here, because direofoffset
253		 * may have been set by an nfsiod since the last
254		 * check.
255		 *
256		 * also, empty block implies EOF.
257		 */
258
259		if (bp->b_bcount == bp->b_resid ||
260		    (NFS_EOFVALID(np) &&
261		    ndp->dc_blkcookie == np->n_direofoffset)) {
262			KASSERT(bp->b_bcount != bp->b_resid ||
263			    ndp->dc_blkcookie == bp->b_dcookie);
264			nfs_putdircache(np, ndp);
265			brelse(bp, BC_NOCACHE);
266			return 0;
267		}
268
269		/*
270		 * Find the entry we were looking for in the block.
271		 */
272
273		en = ndp->dc_entry;
274
275		pdp = dp = (struct dirent *)bp->b_data;
276		edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
277		    bp->b_resid);
278		enn = 0;
279		while (enn < en && dp < edp) {
280			pdp = dp;
281			dp = _DIRENT_NEXT(dp);
282			enn++;
283		}
284
285		/*
286		 * If the entry number was bigger than the number of
287		 * entries in the block, or the cookie of the previous
288		 * entry doesn't match, the directory cache is
289		 * stale. Flush it and try again (i.e. go to
290		 * the server).
291		 */
292		if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
293		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
294#ifdef DEBUG
295		    	printf("invalid cache: %p %p %p off %jx %jx\n",
296				pdp, dp, edp,
297				(uintmax_t)uio->uio_offset,
298				(uintmax_t)NFS_GETCOOKIE(pdp));
299#endif
300			nfs_putdircache(np, ndp);
301			brelse(bp, 0);
302			nfs_invaldircache(vp, 0);
303			nfs_vinvalbuf(vp, 0, cred, l, 0);
304			goto diragain;
305		}
306
307		on = (char *)dp - (char *)bp->b_data;
308
309		/*
310		 * Cache all entries that may be exported to the
311		 * user, as they may be thrown back at us. The
312		 * NFSBIO_CACHECOOKIES flag indicates that all
313		 * entries are being 'exported', so cache them all.
314		 */
315
316		if (en == 0 && pdp == dp) {
317			dp = _DIRENT_NEXT(dp);
318			enn++;
319		}
320
321		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
322			n = uio->uio_resid;
323			enough = 1;
324		} else
325			n = bp->b_bcount - bp->b_resid - on;
326
327		ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
328
329		/*
330		 * Find last complete entry to copy, caching entries
331		 * (if requested) as we go.
332		 */
333
334		while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
335			if (cflag & NFSBIO_CACHECOOKIES) {
336				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
337				    ndp->dc_blkcookie, enn, bp->b_lblkno);
338				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
339					NFS_STASHCOOKIE32(pdp,
340					    nndp->dc_cookie32);
341				}
342				nfs_putdircache(np, nndp);
343			}
344			pdp = dp;
345			dp = _DIRENT_NEXT(dp);
346			enn++;
347		}
348		nfs_putdircache(np, ndp);
349
350		/*
351		 * If the last requested entry was not the last in the
352		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
353		 * cache the cookie of the last requested one, and
354		 * set of the offset to it.
355		 */
356
357		if ((on + n) < bp->b_bcount - bp->b_resid) {
358			curoff = NFS_GETCOOKIE(pdp);
359			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
360			    enn, bp->b_lblkno);
361			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
362				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
363				curoff = nndp->dc_cookie32;
364			}
365			nfs_putdircache(np, nndp);
366		} else
367			curoff = bp->b_dcookie;
368
369		/*
370		 * Always cache the entry for the next block,
371		 * so that readaheads can use it.
372		 */
373		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
374		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
375			if (curoff == bp->b_dcookie) {
376				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
377				curoff = nndp->dc_cookie32;
378			}
379		}
380
381		n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
382
383		/*
384		 * If not eof and read aheads are enabled, start one.
385		 * (You need the current block first, so that you have the
386		 *  directory offset cookie of the next block.)
387		 */
388		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
389		    !NFS_EOFVALID(np)) {
390			rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
391						NFS_DIRBLKSIZ, l);
392			if (rabp) {
393			    if ((rabp->b_oflags & (BO_DONE | BO_DELWRI)) == 0) {
394				rabp->b_dcookie = nndp->dc_cookie;
395				rabp->b_flags |= (B_READ | B_ASYNC);
396				if (nfs_asyncio(rabp)) {
397				    brelse(rabp, BC_INVAL);
398				}
399			    } else
400				brelse(rabp, 0);
401			}
402		}
403		nfs_putdircache(np, nndp);
404		got_buf = 1;
405		break;
406	    default:
407		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
408		break;
409	    }
410
411	    if (n > 0) {
412		if (!baddr)
413			baddr = bp->b_data;
414		error = uiomove((char *)baddr + on, (int)n, uio);
415	    }
416	    switch (vp->v_type) {
417	    case VREG:
418		break;
419	    case VLNK:
420		n = 0;
421		break;
422	    case VDIR:
423		uio->uio_offset = curoff;
424		if (enough)
425			n = 0;
426		break;
427	    default:
428		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
429	    }
430	    if (got_buf)
431		brelse(bp, 0);
432	} while (error == 0 && uio->uio_resid > 0 && n > 0);
433	return (error);
434}
435
436/*
437 * Vnode op for write using bio
438 */
439int
440nfs_write(void *v)
441{
442	struct vop_write_args /* {
443		struct vnode *a_vp;
444		struct uio *a_uio;
445		int  a_ioflag;
446		kauth_cred_t a_cred;
447	} */ *ap = v;
448	struct uio *uio = ap->a_uio;
449	struct lwp *l = curlwp;
450	struct vnode *vp = ap->a_vp;
451	struct nfsnode *np = VTONFS(vp);
452	kauth_cred_t cred = ap->a_cred;
453	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
454	voff_t oldoff, origoff;
455	vsize_t bytelen;
456	int error = 0;
457	int ioflag = ap->a_ioflag;
458	int extended = 0, wrotedata = 0;
459
460#ifdef DIAGNOSTIC
461	if (uio->uio_rw != UIO_WRITE)
462		panic("nfs_write mode");
463#endif
464	if (vp->v_type != VREG)
465		return (EIO);
466	if (np->n_flag & NWRITEERR) {
467		np->n_flag &= ~NWRITEERR;
468		return (np->n_error);
469	}
470#ifndef NFS_V2_ONLY
471	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
472	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
473		(void)nfs_fsinfo(nmp, vp, cred, l);
474#endif
475	if (ioflag & IO_APPEND) {
476		NFS_INVALIDATE_ATTRCACHE(np);
477		error = nfs_flushstalebuf(vp, cred, l,
478		    NFS_FLUSHSTALEBUF_MYWRITE);
479		if (error)
480			return (error);
481		uio->uio_offset = np->n_size;
482
483		/*
484		 * This is already checked above VOP_WRITE, but recheck
485		 * the append case here to make sure our idea of the
486		 * file size is as fresh as possible.
487		 */
488		if (uio->uio_offset + uio->uio_resid >
489		      l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
490			mutex_enter(proc_lock);
491			psignal(l->l_proc, SIGXFSZ);
492			mutex_exit(proc_lock);
493			return (EFBIG);
494		}
495	}
496	if (uio->uio_offset < 0)
497		return (EINVAL);
498	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
499		return (EFBIG);
500	if (uio->uio_resid == 0)
501		return (0);
502
503	origoff = uio->uio_offset;
504	do {
505		bool overwrite; /* if we are overwriting whole pages */
506		u_quad_t oldsize;
507		oldoff = uio->uio_offset;
508		bytelen = uio->uio_resid;
509
510		nfsstats.biocache_writes++;
511
512		oldsize = np->n_size;
513		np->n_flag |= NMODIFIED;
514		if (np->n_size < uio->uio_offset + bytelen) {
515			np->n_size = uio->uio_offset + bytelen;
516		}
517		overwrite = false;
518		if ((uio->uio_offset & PAGE_MASK) == 0) {
519			if ((vp->v_vflag & VV_MAPPED) == 0 &&
520			    bytelen > PAGE_SIZE) {
521				bytelen = trunc_page(bytelen);
522				overwrite = true;
523			} else if ((bytelen & PAGE_MASK) == 0 &&
524			    uio->uio_offset >= vp->v_size) {
525				overwrite = true;
526			}
527		}
528		if (vp->v_size < uio->uio_offset + bytelen) {
529			uvm_vnp_setwritesize(vp, uio->uio_offset + bytelen);
530		}
531		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
532		    UVM_ADV_RANDOM, UBC_WRITE | UBC_PARTIALOK |
533		    (overwrite ? UBC_FAULTBUSY : 0) |
534		    UBC_UNMAP_FLAG(vp));
535		if (error) {
536			uvm_vnp_setwritesize(vp, vp->v_size);
537			if (overwrite && np->n_size != oldsize) {
538				/*
539				 * backout size and free pages past eof.
540				 */
541				np->n_size = oldsize;
542				mutex_enter(vp->v_interlock);
543				(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
544				    0, PGO_SYNCIO | PGO_FREE);
545			}
546			break;
547		}
548		wrotedata = 1;
549
550		/*
551		 * update UVM's notion of the size now that we've
552		 * copied the data into the vnode's pages.
553		 */
554
555		if (vp->v_size < uio->uio_offset) {
556			uvm_vnp_setsize(vp, uio->uio_offset);
557			extended = 1;
558		}
559
560		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
561		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
562			mutex_enter(vp->v_interlock);
563			error = VOP_PUTPAGES(vp,
564			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
565			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
566				       ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
567		}
568	} while (uio->uio_resid > 0);
569	if (wrotedata)
570		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
571	if (error == 0 && (ioflag & IO_SYNC) != 0) {
572		mutex_enter(vp->v_interlock);
573		error = VOP_PUTPAGES(vp,
574		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
575		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
576			       ~(nmp->nm_wsize - 1)),
577		    PGO_CLEANIT | PGO_SYNCIO);
578	}
579	return error;
580}
581
582/*
583 * Get an nfs cache block.
584 * Allocate a new one if the block isn't currently in the cache
585 * and return the block marked busy. If the calling process is
586 * interrupted by a signal for an interruptible mount point, return
587 * NULL.
588 */
589struct buf *
590nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct lwp *l)
591{
592	struct buf *bp;
593	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
594
595	if (nmp->nm_flag & NFSMNT_INT) {
596		bp = getblk(vp, bn, size, PCATCH, 0);
597		while (bp == NULL) {
598			if (nfs_sigintr(nmp, NULL, l))
599				return (NULL);
600			bp = getblk(vp, bn, size, 0, 2 * hz);
601		}
602	} else
603		bp = getblk(vp, bn, size, 0, 0);
604	return (bp);
605}
606
607/*
608 * Flush and invalidate all dirty buffers. If another process is already
609 * doing the flush, just wait for completion.
610 */
611int
612nfs_vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred,
613		struct lwp *l, int intrflg)
614{
615	struct nfsnode *np = VTONFS(vp);
616	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
617	int error = 0, allerror = 0, slptimeo;
618	bool catch;
619
620	if ((nmp->nm_flag & NFSMNT_INT) == 0)
621		intrflg = 0;
622	if (intrflg) {
623		catch = true;
624		slptimeo = 2 * hz;
625	} else {
626		catch = false;
627		slptimeo = 0;
628	}
629	/*
630	 * First wait for any other process doing a flush to complete.
631	 */
632	mutex_enter(vp->v_interlock);
633	while (np->n_flag & NFLUSHINPROG) {
634		np->n_flag |= NFLUSHWANT;
635		error = mtsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
636			slptimeo, vp->v_interlock);
637		if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
638			mutex_exit(vp->v_interlock);
639			return EINTR;
640		}
641	}
642
643	/*
644	 * Now, flush as required.
645	 */
646	np->n_flag |= NFLUSHINPROG;
647	mutex_exit(vp->v_interlock);
648	error = vinvalbuf(vp, flags, cred, l, catch, 0);
649	while (error) {
650		if (allerror == 0)
651			allerror = error;
652		if (intrflg && nfs_sigintr(nmp, NULL, l)) {
653			error = EINTR;
654			break;
655		}
656		error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
657	}
658	mutex_enter(vp->v_interlock);
659	if (allerror != 0) {
660		/*
661		 * Keep error from vinvalbuf so fsync/close will know.
662		 */
663		np->n_error = allerror;
664		np->n_flag |= NWRITEERR;
665	}
666	if (error == 0)
667		np->n_flag &= ~NMODIFIED;
668	np->n_flag &= ~NFLUSHINPROG;
669	if (np->n_flag & NFLUSHWANT) {
670		np->n_flag &= ~NFLUSHWANT;
671		wakeup(&np->n_flag);
672	}
673	mutex_exit(vp->v_interlock);
674	return error;
675}
676
677/*
678 * nfs_flushstalebuf: flush cache if it's stale.
679 *
680 * => caller shouldn't own any pages or buffers which belong to the vnode.
681 */
682
683int
684nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
685    int flags)
686{
687	struct nfsnode *np = VTONFS(vp);
688	struct vattr vattr;
689	int error;
690
691	if (np->n_flag & NMODIFIED) {
692		if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
693		    || vp->v_type != VREG) {
694			error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
695			if (error)
696				return error;
697			if (vp->v_type == VDIR) {
698				nfs_invaldircache(vp, 0);
699			}
700		} else {
701			/*
702			 * XXX assuming writes are ours.
703			 */
704		}
705		NFS_INVALIDATE_ATTRCACHE(np);
706		error = VOP_GETATTR(vp, &vattr, cred);
707		if (error)
708			return error;
709		np->n_mtime = vattr.va_mtime;
710	} else {
711		error = VOP_GETATTR(vp, &vattr, cred);
712		if (error)
713			return error;
714		if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
715			if (vp->v_type == VDIR) {
716				nfs_invaldircache(vp, 0);
717			}
718			error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
719			if (error)
720				return error;
721			np->n_mtime = vattr.va_mtime;
722		}
723	}
724
725	return error;
726}
727
728/*
729 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
730 * This is mainly to avoid queueing async I/O requests when the nfsiods
731 * are all hung on a dead server.
732 */
733
734int
735nfs_asyncio(struct buf *bp)
736{
737	struct nfs_iod *iod;
738	struct nfsmount *nmp;
739	int slptimeo = 0, error;
740	bool catch = false;
741
742	if (nfs_numasync == 0)
743		return (EIO);
744
745	nmp = VFSTONFS(bp->b_vp->v_mount);
746again:
747	if (nmp->nm_flag & NFSMNT_INT)
748		catch = true;
749
750	/*
751	 * Find a free iod to process this request.
752	 */
753
754	mutex_enter(&nfs_iodlist_lock);
755	iod = LIST_FIRST(&nfs_iodlist_idle);
756	if (iod) {
757		/*
758		 * Found one, so wake it up and tell it which
759		 * mount to process.
760		 */
761		LIST_REMOVE(iod, nid_idle);
762		mutex_enter(&iod->nid_lock);
763		mutex_exit(&nfs_iodlist_lock);
764		KASSERT(iod->nid_mount == NULL);
765		iod->nid_mount = nmp;
766		cv_signal(&iod->nid_cv);
767		mutex_enter(&nmp->nm_lock);
768		mutex_exit(&iod->nid_lock);
769		nmp->nm_bufqiods++;
770		if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
771			cv_broadcast(&nmp->nm_aiocv);
772		}
773	} else {
774		mutex_exit(&nfs_iodlist_lock);
775		mutex_enter(&nmp->nm_lock);
776	}
777
778	KASSERT(mutex_owned(&nmp->nm_lock));
779
780	/*
781	 * If we have an iod which can process the request, then queue
782	 * the buffer.  However, even if we have an iod, do not initiate
783	 * queue cleaning if curproc is the pageout daemon. if the NFS mount
784	 * is via local loopback, we may put curproc (pagedaemon) to sleep
785	 * waiting for the writes to complete. But the server (ourself)
786	 * may block the write, waiting for its (ie., our) pagedaemon
787	 * to produce clean pages to handle the write: deadlock.
788	 * XXX: start non-loopback mounts straight away?  If "lots free",
789	 * let pagedaemon start loopback writes anyway?
790	 */
791	if (nmp->nm_bufqiods > 0) {
792
793		/*
794		 * Ensure that the queue never grows too large.
795		 */
796		if (curlwp == uvm.pagedaemon_lwp) {
797	  		/* Enque for later, to avoid free-page deadlock */
798		} else while (nmp->nm_bufqlen >= 2 * nmp->nm_bufqiods) {
799			if (catch) {
800				error = cv_timedwait_sig(&nmp->nm_aiocv,
801				    &nmp->nm_lock, slptimeo);
802			} else {
803				error = cv_timedwait(&nmp->nm_aiocv,
804				    &nmp->nm_lock, slptimeo);
805			}
806			if (error) {
807				if (nfs_sigintr(nmp, NULL, curlwp)) {
808					mutex_exit(&nmp->nm_lock);
809					return (EINTR);
810				}
811				if (catch) {
812					catch = false;
813					slptimeo = 2 * hz;
814				}
815			}
816
817			/*
818			 * We might have lost our iod while sleeping,
819			 * so check and loop if necessary.
820			 */
821
822			if (nmp->nm_bufqiods == 0) {
823				mutex_exit(&nmp->nm_lock);
824				goto again;
825			}
826		}
827		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
828		nmp->nm_bufqlen++;
829		mutex_exit(&nmp->nm_lock);
830		return (0);
831	}
832	mutex_exit(&nmp->nm_lock);
833
834	/*
835	 * All the iods are busy on other mounts, so return EIO to
836	 * force the caller to process the i/o synchronously.
837	 */
838
839	return (EIO);
840}
841
842/*
843 * nfs_doio for read.
844 */
845static int
846nfs_doio_read(struct buf *bp, struct uio *uiop)
847{
848	struct vnode *vp = bp->b_vp;
849	struct nfsnode *np = VTONFS(vp);
850	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
851	int error = 0;
852
853	uiop->uio_rw = UIO_READ;
854	switch (vp->v_type) {
855	case VREG:
856		nfsstats.read_bios++;
857		error = nfs_readrpc(vp, uiop);
858		if (!error && uiop->uio_resid) {
859			int diff, len;
860
861			/*
862			 * If uio_resid > 0, there is a hole in the file and
863			 * no writes after the hole have been pushed to
864			 * the server yet or the file has been truncated
865			 * on the server.
866			 * Just zero fill the rest of the valid area.
867			 */
868
869			KASSERT(vp->v_size >=
870			    uiop->uio_offset + uiop->uio_resid);
871			diff = bp->b_bcount - uiop->uio_resid;
872			len = uiop->uio_resid;
873			memset((char *)bp->b_data + diff, 0, len);
874			uiop->uio_resid = 0;
875		}
876#if 0
877		if (uiop->uio_lwp && (vp->v_iflag & VI_TEXT) &&
878		    timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
879		    	mutex_enter(proc_lock);
880			killproc(uiop->uio_lwp->l_proc, "process text file was modified");
881		    	mutex_exit(proc_lock);
882#if 0 /* XXX NJWLWP */
883			uiop->uio_lwp->l_proc->p_holdcnt++;
884#endif
885		}
886#endif
887		break;
888	case VLNK:
889		KASSERT(uiop->uio_offset == (off_t)0);
890		nfsstats.readlink_bios++;
891		error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
892		break;
893	case VDIR:
894		nfsstats.readdir_bios++;
895		uiop->uio_offset = bp->b_dcookie;
896#ifndef NFS_V2_ONLY
897		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
898			error = nfs_readdirplusrpc(vp, uiop,
899			    curlwp->l_cred);
900			/*
901			 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
902			 */
903			if (error == ENOTSUP)
904				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
905		}
906#else
907		nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
908#endif
909		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
910			error = nfs_readdirrpc(vp, uiop,
911			    curlwp->l_cred);
912		if (!error) {
913			bp->b_dcookie = uiop->uio_offset;
914		}
915		break;
916	default:
917		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
918		break;
919	}
920	bp->b_error = error;
921	return error;
922}
923
924/*
925 * nfs_doio for write.
926 */
927static int
928nfs_doio_write(struct buf *bp, struct uio *uiop)
929{
930	struct vnode *vp = bp->b_vp;
931	struct nfsnode *np = VTONFS(vp);
932	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
933	int iomode;
934	bool stalewriteverf = false;
935	int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
936	struct vm_page **pgs, *spgs[UBC_MAX_PAGES];
937#ifndef NFS_V2_ONLY
938	bool needcommit = true; /* need only COMMIT RPC */
939#else
940	bool needcommit = false; /* need only COMMIT RPC */
941#endif
942	bool pageprotected;
943	struct uvm_object *uobj = &vp->v_uobj;
944	int error;
945	off_t off, cnt;
946
947	if (npages < __arraycount(spgs))
948		pgs = spgs;
949	else {
950		if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) ==
951		    NULL)
952			return ENOMEM;
953	}
954
955	if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
956		iomode = NFSV3WRITE_UNSTABLE;
957	} else {
958		iomode = NFSV3WRITE_FILESYNC;
959	}
960
961#ifndef NFS_V2_ONLY
962again:
963#endif
964	rw_enter(&nmp->nm_writeverflock, RW_READER);
965
966	for (i = 0; i < npages; i++) {
967		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
968		if (pgs[i]->uobject == uobj &&
969		    pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
970			KASSERT(pgs[i]->flags & PG_BUSY);
971			/*
972			 * this page belongs to our object.
973			 */
974			mutex_enter(uobj->vmobjlock);
975			/*
976			 * write out the page stably if it's about to
977			 * be released because we can't resend it
978			 * on the server crash.
979			 *
980			 * XXX assuming PG_RELEASE|PG_PAGEOUT won't be
981			 * changed until unbusy the page.
982			 */
983			if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
984				iomode = NFSV3WRITE_FILESYNC;
985			/*
986			 * if we met a page which hasn't been sent yet,
987			 * we need do WRITE RPC.
988			 */
989			if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
990				needcommit = false;
991			mutex_exit(uobj->vmobjlock);
992		} else {
993			iomode = NFSV3WRITE_FILESYNC;
994			needcommit = false;
995		}
996	}
997	if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
998		mutex_enter(uobj->vmobjlock);
999		for (i = 0; i < npages; i++) {
1000			pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
1001			pmap_page_protect(pgs[i], VM_PROT_READ);
1002		}
1003		mutex_exit(uobj->vmobjlock);
1004		pageprotected = true; /* pages can't be modified during i/o. */
1005	} else
1006		pageprotected = false;
1007
1008	/*
1009	 * Send the data to the server if necessary,
1010	 * otherwise just send a commit rpc.
1011	 */
1012#ifndef NFS_V2_ONLY
1013	if (needcommit) {
1014
1015		/*
1016		 * If the buffer is in the range that we already committed,
1017		 * there's nothing to do.
1018		 *
1019		 * If it's in the range that we need to commit, push the
1020		 * whole range at once, otherwise only push the buffer.
1021		 * In both these cases, acquire the commit lock to avoid
1022		 * other processes modifying the range.
1023		 */
1024
1025		off = uiop->uio_offset;
1026		cnt = bp->b_bcount;
1027		mutex_enter(&np->n_commitlock);
1028		if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
1029			bool pushedrange;
1030			if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
1031				pushedrange = true;
1032				off = np->n_pushlo;
1033				cnt = np->n_pushhi - np->n_pushlo;
1034			} else {
1035				pushedrange = false;
1036			}
1037			error = nfs_commit(vp, off, cnt, curlwp);
1038			if (error == 0) {
1039				if (pushedrange) {
1040					nfs_merge_commit_ranges(vp);
1041				} else {
1042					nfs_add_committed_range(vp, off, cnt);
1043				}
1044			}
1045		} else {
1046			error = 0;
1047		}
1048		mutex_exit(&np->n_commitlock);
1049		rw_exit(&nmp->nm_writeverflock);
1050		if (!error) {
1051			/*
1052			 * pages are now on stable storage.
1053			 */
1054			uiop->uio_resid = 0;
1055			mutex_enter(uobj->vmobjlock);
1056			for (i = 0; i < npages; i++) {
1057				pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1058			}
1059			mutex_exit(uobj->vmobjlock);
1060			goto out;
1061		} else if (error == NFSERR_STALEWRITEVERF) {
1062			nfs_clearcommit(vp->v_mount);
1063			goto again;
1064		}
1065		if (error) {
1066			bp->b_error = np->n_error = error;
1067			np->n_flag |= NWRITEERR;
1068		}
1069		goto out;
1070	}
1071#endif
1072	off = uiop->uio_offset;
1073	cnt = bp->b_bcount;
1074	uiop->uio_rw = UIO_WRITE;
1075	nfsstats.write_bios++;
1076	error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
1077#ifndef NFS_V2_ONLY
1078	if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1079		/*
1080		 * we need to commit pages later.
1081		 */
1082		mutex_enter(&np->n_commitlock);
1083		nfs_add_tobecommitted_range(vp, off, cnt);
1084		/*
1085		 * if there can be too many uncommitted pages, commit them now.
1086		 */
1087		if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
1088			off = np->n_pushlo;
1089			cnt = nfs_commitsize >> 1;
1090			error = nfs_commit(vp, off, cnt, curlwp);
1091			if (!error) {
1092				nfs_add_committed_range(vp, off, cnt);
1093				nfs_del_tobecommitted_range(vp, off, cnt);
1094			}
1095			if (error == NFSERR_STALEWRITEVERF) {
1096				stalewriteverf = true;
1097				error = 0; /* it isn't a real error */
1098			}
1099		} else {
1100			/*
1101			 * re-dirty pages so that they will be passed
1102			 * to us later again.
1103			 */
1104			mutex_enter(uobj->vmobjlock);
1105			for (i = 0; i < npages; i++) {
1106				pgs[i]->flags &= ~PG_CLEAN;
1107			}
1108			mutex_exit(uobj->vmobjlock);
1109		}
1110		mutex_exit(&np->n_commitlock);
1111	} else
1112#endif
1113	if (!error) {
1114		/*
1115		 * pages are now on stable storage.
1116		 */
1117		mutex_enter(&np->n_commitlock);
1118		nfs_del_committed_range(vp, off, cnt);
1119		mutex_exit(&np->n_commitlock);
1120		mutex_enter(uobj->vmobjlock);
1121		for (i = 0; i < npages; i++) {
1122			pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1123		}
1124		mutex_exit(uobj->vmobjlock);
1125	} else {
1126		/*
1127		 * we got an error.
1128		 */
1129		bp->b_error = np->n_error = error;
1130		np->n_flag |= NWRITEERR;
1131	}
1132
1133	rw_exit(&nmp->nm_writeverflock);
1134
1135
1136	if (stalewriteverf) {
1137		nfs_clearcommit(vp->v_mount);
1138	}
1139#ifndef NFS_V2_ONLY
1140out:
1141#endif
1142	if (pgs != spgs)
1143		kmem_free(pgs, sizeof(*pgs) * npages);
1144	return error;
1145}
1146
1147/*
1148 * nfs_doio for B_PHYS.
1149 */
1150static int
1151nfs_doio_phys(struct buf *bp, struct uio *uiop)
1152{
1153	struct vnode *vp = bp->b_vp;
1154	int error;
1155
1156	uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
1157	if (bp->b_flags & B_READ) {
1158		uiop->uio_rw = UIO_READ;
1159		nfsstats.read_physios++;
1160		error = nfs_readrpc(vp, uiop);
1161	} else {
1162		int iomode = NFSV3WRITE_DATASYNC;
1163		bool stalewriteverf;
1164		struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1165
1166		uiop->uio_rw = UIO_WRITE;
1167		nfsstats.write_physios++;
1168		rw_enter(&nmp->nm_writeverflock, RW_READER);
1169		error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
1170		rw_exit(&nmp->nm_writeverflock);
1171		if (stalewriteverf) {
1172			nfs_clearcommit(bp->b_vp->v_mount);
1173		}
1174	}
1175	bp->b_error = error;
1176	return error;
1177}
1178
1179/*
1180 * Do an I/O operation to/from a cache block. This may be called
1181 * synchronously or from an nfsiod.
1182 */
1183int
1184nfs_doio(struct buf *bp)
1185{
1186	int error;
1187	struct uio uio;
1188	struct uio *uiop = &uio;
1189	struct iovec io;
1190	UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
1191
1192	uiop->uio_iov = &io;
1193	uiop->uio_iovcnt = 1;
1194	uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
1195	UIO_SETUP_SYSSPACE(uiop);
1196	io.iov_base = bp->b_data;
1197	io.iov_len = uiop->uio_resid = bp->b_bcount;
1198
1199	/*
1200	 * Historically, paging was done with physio, but no more...
1201	 */
1202	if (bp->b_flags & B_PHYS) {
1203		/*
1204		 * ...though reading /dev/drum still gets us here.
1205		 */
1206		error = nfs_doio_phys(bp, uiop);
1207	} else if (bp->b_flags & B_READ) {
1208		error = nfs_doio_read(bp, uiop);
1209	} else {
1210		error = nfs_doio_write(bp, uiop);
1211	}
1212	bp->b_resid = uiop->uio_resid;
1213	biodone(bp);
1214	return (error);
1215}
1216
1217/*
1218 * Vnode op for VM getpages.
1219 */
1220
1221int
1222nfs_getpages(void *v)
1223{
1224	struct vop_getpages_args /* {
1225		struct vnode *a_vp;
1226		voff_t a_offset;
1227		struct vm_page **a_m;
1228		int *a_count;
1229		int a_centeridx;
1230		vm_prot_t a_access_type;
1231		int a_advice;
1232		int a_flags;
1233	} */ *ap = v;
1234
1235	struct vnode *vp = ap->a_vp;
1236	struct uvm_object *uobj = &vp->v_uobj;
1237	struct nfsnode *np = VTONFS(vp);
1238	const int npages = *ap->a_count;
1239	struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES];
1240	off_t origoffset, len;
1241	int i, error;
1242	bool v3 = NFS_ISV3(vp);
1243	bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1244	bool locked = (ap->a_flags & PGO_LOCKED) != 0;
1245
1246	/*
1247	 * If we are not locked we are not really using opgs,
1248	 * so just initialize it
1249	 */
1250	if (!locked || npages < __arraycount(spgs))
1251		opgs = spgs;
1252	else {
1253		if ((opgs = kmem_alloc(npages * sizeof(*opgs), KM_NOSLEEP)) ==
1254		    NULL)
1255			return ENOMEM;
1256	}
1257
1258	/*
1259	 * call the genfs code to get the pages.  `pgs' may be NULL
1260	 * when doing read-ahead.
1261	 */
1262	pgs = ap->a_m;
1263	if (write && locked && v3) {
1264		KASSERT(pgs != NULL);
1265#ifdef DEBUG
1266
1267		/*
1268		 * If PGO_LOCKED is set, real pages shouldn't exists
1269		 * in the array.
1270		 */
1271
1272		for (i = 0; i < npages; i++)
1273			KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
1274#endif
1275		memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
1276	}
1277	error = genfs_getpages(v);
1278	if (error)
1279		goto out;
1280
1281	/*
1282	 * for read faults where the nfs node is not yet marked NMODIFIED,
1283	 * set PG_RDONLY on the pages so that we come back here if someone
1284	 * tries to modify later via the mapping that will be entered for
1285	 * this fault.
1286	 */
1287
1288	if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
1289		if (!locked) {
1290			mutex_enter(uobj->vmobjlock);
1291		}
1292		for (i = 0; i < npages; i++) {
1293			pg = pgs[i];
1294			if (pg == NULL || pg == PGO_DONTCARE) {
1295				continue;
1296			}
1297			pg->flags |= PG_RDONLY;
1298		}
1299		if (!locked) {
1300			mutex_exit(uobj->vmobjlock);
1301		}
1302	}
1303	if (!write)
1304		goto out;
1305
1306	/*
1307	 * this is a write fault, update the commit info.
1308	 */
1309
1310	origoffset = ap->a_offset;
1311	len = npages << PAGE_SHIFT;
1312
1313	if (v3) {
1314		if (!locked) {
1315			mutex_enter(&np->n_commitlock);
1316		} else {
1317			if (!mutex_tryenter(&np->n_commitlock)) {
1318
1319				/*
1320				 * Since PGO_LOCKED is set, we need to unbusy
1321				 * all pages fetched by genfs_getpages() above,
1322				 * tell the caller that there are no pages
1323				 * available and put back original pgs array.
1324				 */
1325
1326				mutex_enter(&uvm_pageqlock);
1327				uvm_page_unbusy(pgs, npages);
1328				mutex_exit(&uvm_pageqlock);
1329				*ap->a_count = 0;
1330				memcpy(pgs, opgs,
1331				    npages * sizeof(struct vm_pages *));
1332				error = EBUSY;
1333				goto out;
1334			}
1335		}
1336		nfs_del_committed_range(vp, origoffset, len);
1337		nfs_del_tobecommitted_range(vp, origoffset, len);
1338	}
1339	np->n_flag |= NMODIFIED;
1340	if (!locked) {
1341		mutex_enter(uobj->vmobjlock);
1342	}
1343	for (i = 0; i < npages; i++) {
1344		pg = pgs[i];
1345		if (pg == NULL || pg == PGO_DONTCARE) {
1346			continue;
1347		}
1348		pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1349	}
1350	if (!locked) {
1351		mutex_exit(uobj->vmobjlock);
1352	}
1353	if (v3) {
1354		mutex_exit(&np->n_commitlock);
1355	}
1356out:
1357	if (opgs != spgs)
1358		kmem_free(opgs, sizeof(*opgs) * npages);
1359	return error;
1360}
1361