smbfs_io.c revision 239065
1/*-
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/fs/smbfs/smbfs_io.c 239065 2012-08-05 14:11:42Z kib $
27 *
28 */
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/fcntl.h>
33#include <sys/bio.h>
34#include <sys/buf.h>
35#include <sys/mount.h>
36#include <sys/namei.h>
37#include <sys/vnode.h>
38#include <sys/dirent.h>
39#include <sys/signalvar.h>
40#include <sys/sysctl.h>
41#include <sys/vmmeter.h>
42
43#include <vm/vm.h>
44#include <vm/vm_param.h>
45#include <vm/vm_page.h>
46#include <vm/vm_extern.h>
47#include <vm/vm_object.h>
48#include <vm/vm_pager.h>
49#include <vm/vnode_pager.h>
50/*
51#include <sys/ioccom.h>
52*/
53#include <netsmb/smb.h>
54#include <netsmb/smb_conn.h>
55#include <netsmb/smb_subr.h>
56
57#include <fs/smbfs/smbfs.h>
58#include <fs/smbfs/smbfs_node.h>
59#include <fs/smbfs/smbfs_subr.h>
60
61/*#define SMBFS_RWGENERIC*/
62
63extern int smbfs_pbuf_freecnt;
64
65static int smbfs_fastlookup = 1;
66
67SYSCTL_DECL(_vfs_smbfs);
68SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
69
70
71#define DE_SIZE	(sizeof(struct dirent))
72
73static int
74smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
75{
76	struct dirent de;
77	struct componentname cn;
78	struct smb_cred scred;
79	struct smbfs_fctx *ctx;
80	struct vnode *newvp;
81	struct smbnode *np = VTOSMB(vp);
82	int error/*, *eofflag = ap->a_eofflag*/;
83	long offset, limit;
84
85	np = VTOSMB(vp);
86	SMBVDEBUG("dirname='%s'\n", np->n_name);
87	smb_makescred(&scred, uio->uio_td, cred);
88	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
89	limit = uio->uio_resid / DE_SIZE;
90	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
91		return EINVAL;
92	while (limit && offset < 2) {
93		limit--;
94		bzero((caddr_t)&de, DE_SIZE);
95		de.d_reclen = DE_SIZE;
96		de.d_fileno = (offset == 0) ? np->n_ino :
97		    (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
98		if (de.d_fileno == 0)
99			de.d_fileno = 0x7ffffffd + offset;
100		de.d_namlen = offset + 1;
101		de.d_name[0] = '.';
102		de.d_name[1] = '.';
103		de.d_name[offset + 1] = '\0';
104		de.d_type = DT_DIR;
105		error = uiomove(&de, DE_SIZE, uio);
106		if (error)
107			return error;
108		offset++;
109		uio->uio_offset += DE_SIZE;
110	}
111	if (limit == 0)
112		return 0;
113	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
114		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
115		if (np->n_dirseq) {
116			smbfs_findclose(np->n_dirseq, &scred);
117			np->n_dirseq = NULL;
118		}
119		np->n_dirofs = 2;
120		error = smbfs_findopen(np, "*", 1,
121		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
122		    &scred, &ctx);
123		if (error) {
124			SMBVDEBUG("can not open search, error = %d", error);
125			return error;
126		}
127		np->n_dirseq = ctx;
128	} else
129		ctx = np->n_dirseq;
130	while (np->n_dirofs < offset) {
131		error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
132		if (error) {
133			smbfs_findclose(np->n_dirseq, &scred);
134			np->n_dirseq = NULL;
135			return error == ENOENT ? 0 : error;
136		}
137	}
138	error = 0;
139	for (; limit; limit--, offset++) {
140		error = smbfs_findnext(ctx, limit, &scred);
141		if (error)
142			break;
143		np->n_dirofs++;
144		bzero((caddr_t)&de, DE_SIZE);
145		de.d_reclen = DE_SIZE;
146		de.d_fileno = ctx->f_attr.fa_ino;
147		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
148		de.d_namlen = ctx->f_nmlen;
149		bcopy(ctx->f_name, de.d_name, de.d_namlen);
150		de.d_name[de.d_namlen] = '\0';
151		if (smbfs_fastlookup) {
152			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
153			    ctx->f_nmlen, &ctx->f_attr, &newvp);
154			if (!error) {
155				cn.cn_nameptr = de.d_name;
156				cn.cn_namelen = de.d_namlen;
157				cache_enter(vp, newvp, &cn);
158				vput(newvp);
159			}
160		}
161		error = uiomove(&de, DE_SIZE, uio);
162		if (error)
163			break;
164	}
165	if (error == ENOENT)
166		error = 0;
167	uio->uio_offset = offset * DE_SIZE;
168	return error;
169}
170
171int
172smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
173{
174	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
175	struct smbnode *np = VTOSMB(vp);
176	struct thread *td;
177	struct vattr vattr;
178	struct smb_cred scred;
179	int error, lks;
180
181	/*
182	 * Protect against method which is not supported for now
183	 */
184	if (uiop->uio_segflg == UIO_NOCOPY)
185		return EOPNOTSUPP;
186
187	if (vp->v_type != VREG && vp->v_type != VDIR) {
188		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
189		return EIO;
190	}
191	if (uiop->uio_resid == 0)
192		return 0;
193	if (uiop->uio_offset < 0)
194		return EINVAL;
195/*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
196		return EFBIG;*/
197	td = uiop->uio_td;
198	if (vp->v_type == VDIR) {
199		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
200		if (lks == LK_SHARED)
201			vn_lock(vp, LK_UPGRADE | LK_RETRY);
202		error = smbfs_readvdir(vp, uiop, cred);
203		if (lks == LK_SHARED)
204			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
205		return error;
206	}
207
208/*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
209	if (np->n_flag & NMODIFIED) {
210		smbfs_attr_cacheremove(vp);
211		error = VOP_GETATTR(vp, &vattr, cred);
212		if (error)
213			return error;
214		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
215	} else {
216		error = VOP_GETATTR(vp, &vattr, cred);
217		if (error)
218			return error;
219		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
220			error = smbfs_vinvalbuf(vp, td);
221			if (error)
222				return error;
223			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
224		}
225	}
226	smb_makescred(&scred, td, cred);
227	return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
228}
229
230int
231smbfs_writevnode(struct vnode *vp, struct uio *uiop,
232	struct ucred *cred, int ioflag)
233{
234	struct smbmount *smp = VTOSMBFS(vp);
235	struct smbnode *np = VTOSMB(vp);
236	struct smb_cred scred;
237	struct thread *td;
238	int error = 0;
239
240	if (vp->v_type != VREG) {
241		SMBERROR("vn types other than VREG unsupported !\n");
242		return EIO;
243	}
244	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
245	if (uiop->uio_offset < 0)
246		return EINVAL;
247/*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
248		return (EFBIG);*/
249	td = uiop->uio_td;
250	if (ioflag & (IO_APPEND | IO_SYNC)) {
251		if (np->n_flag & NMODIFIED) {
252			smbfs_attr_cacheremove(vp);
253			error = smbfs_vinvalbuf(vp, td);
254			if (error)
255				return error;
256		}
257		if (ioflag & IO_APPEND) {
258#ifdef notyet
259			/*
260			 * File size can be changed by another client
261			 */
262			smbfs_attr_cacheremove(vp);
263			error = VOP_GETATTR(vp, &vattr, cred);
264			if (error) return (error);
265#endif
266			uiop->uio_offset = np->n_size;
267		}
268	}
269	if (uiop->uio_resid == 0)
270		return 0;
271
272	if (vn_rlimit_fsize(vp, uiop, td))
273		return (EFBIG);
274
275	smb_makescred(&scred, td, cred);
276	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
277	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
278	if (!error) {
279		if (uiop->uio_offset > np->n_size) {
280			np->n_size = uiop->uio_offset;
281			vnode_pager_setsize(vp, np->n_size);
282		}
283	}
284	return error;
285}
286
287/*
288 * Do an I/O operation to/from a cache block.
289 */
290int
291smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
292{
293	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
294	struct smbnode *np = VTOSMB(vp);
295	struct uio uio, *uiop = &uio;
296	struct iovec io;
297	struct smb_cred scred;
298	int error = 0;
299
300	uiop->uio_iov = &io;
301	uiop->uio_iovcnt = 1;
302	uiop->uio_segflg = UIO_SYSSPACE;
303	uiop->uio_td = td;
304
305	smb_makescred(&scred, td, cr);
306
307	if (bp->b_iocmd == BIO_READ) {
308	    io.iov_len = uiop->uio_resid = bp->b_bcount;
309	    io.iov_base = bp->b_data;
310	    uiop->uio_rw = UIO_READ;
311	    switch (vp->v_type) {
312	      case VREG:
313		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
314		error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
315		if (error)
316			break;
317		if (uiop->uio_resid) {
318			int left = uiop->uio_resid;
319			int nread = bp->b_bcount - left;
320			if (left > 0)
321			    bzero((char *)bp->b_data + nread, left);
322		}
323		break;
324	    default:
325		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
326		break;
327	    };
328	    if (error) {
329		bp->b_error = error;
330		bp->b_ioflags |= BIO_ERROR;
331	    }
332	} else { /* write */
333	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
334		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
335
336	    if (bp->b_dirtyend > bp->b_dirtyoff) {
337		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
338		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
339		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
340		uiop->uio_rw = UIO_WRITE;
341		error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
342
343		/*
344		 * For an interrupted write, the buffer is still valid
345		 * and the write hasn't been pushed to the server yet,
346		 * so we can't set BIO_ERROR and report the interruption
347		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
348		 * is not relevant, so the rpc attempt is essentially
349		 * a noop.  For the case of a V3 write rpc not being
350		 * committed to stable storage, the block is still
351		 * dirty and requires either a commit rpc or another
352		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
353		 * the block is reused. This is indicated by setting
354		 * the B_DELWRI and B_NEEDCOMMIT flags.
355		 */
356		if (error == EINTR
357		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
358			int s;
359
360			s = splbio();
361			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
362			if ((bp->b_flags & B_ASYNC) == 0)
363			    bp->b_flags |= B_EINTR;
364			if ((bp->b_flags & B_PAGING) == 0) {
365			    bdirty(bp);
366			    bp->b_flags &= ~B_DONE;
367			}
368			if ((bp->b_flags & B_ASYNC) == 0)
369			    bp->b_flags |= B_EINTR;
370			splx(s);
371		} else {
372			if (error) {
373				bp->b_ioflags |= BIO_ERROR;
374				bp->b_error = error;
375			}
376			bp->b_dirtyoff = bp->b_dirtyend = 0;
377		}
378	    } else {
379		bp->b_resid = 0;
380		bufdone(bp);
381		return 0;
382	    }
383	}
384	bp->b_resid = uiop->uio_resid;
385	bufdone(bp);
386	return error;
387}
388
389/*
390 * Vnode op for VM getpages.
391 * Wish wish .... get rid from multiple IO routines
392 */
393int
394smbfs_getpages(ap)
395	struct vop_getpages_args /* {
396		struct vnode *a_vp;
397		vm_page_t *a_m;
398		int a_count;
399		int a_reqpage;
400		vm_ooffset_t a_offset;
401	} */ *ap;
402{
403#ifdef SMBFS_RWGENERIC
404	return vop_stdgetpages(ap);
405#else
406	int i, error, nextoff, size, toff, npages, count, reqpage;
407	struct uio uio;
408	struct iovec iov;
409	vm_offset_t kva;
410	struct buf *bp;
411	struct vnode *vp;
412	struct thread *td;
413	struct ucred *cred;
414	struct smbmount *smp;
415	struct smbnode *np;
416	struct smb_cred scred;
417	vm_object_t object;
418	vm_page_t *pages, m;
419
420	vp = ap->a_vp;
421	if ((object = vp->v_object) == NULL) {
422		printf("smbfs_getpages: called with non-merged cache vnode??\n");
423		return VM_PAGER_ERROR;
424	}
425
426	td = curthread;				/* XXX */
427	cred = td->td_ucred;		/* XXX */
428	np = VTOSMB(vp);
429	smp = VFSTOSMBFS(vp->v_mount);
430	pages = ap->a_m;
431	count = ap->a_count;
432	npages = btoc(count);
433	reqpage = ap->a_reqpage;
434
435	/*
436	 * If the requested page is partially valid, just return it and
437	 * allow the pager to zero-out the blanks.  Partially valid pages
438	 * can only occur at the file EOF.
439	 */
440	m = pages[reqpage];
441
442	VM_OBJECT_LOCK(object);
443	if (m->valid != 0) {
444		for (i = 0; i < npages; ++i) {
445			if (i != reqpage) {
446				vm_page_lock(pages[i]);
447				vm_page_free(pages[i]);
448				vm_page_unlock(pages[i]);
449			}
450		}
451		VM_OBJECT_UNLOCK(object);
452		return 0;
453	}
454	VM_OBJECT_UNLOCK(object);
455
456	smb_makescred(&scred, td, cred);
457
458	bp = getpbuf(&smbfs_pbuf_freecnt);
459
460	kva = (vm_offset_t) bp->b_data;
461	pmap_qenter(kva, pages, npages);
462	PCPU_INC(cnt.v_vnodein);
463	PCPU_ADD(cnt.v_vnodepgsin, npages);
464
465	iov.iov_base = (caddr_t) kva;
466	iov.iov_len = count;
467	uio.uio_iov = &iov;
468	uio.uio_iovcnt = 1;
469	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
470	uio.uio_resid = count;
471	uio.uio_segflg = UIO_SYSSPACE;
472	uio.uio_rw = UIO_READ;
473	uio.uio_td = td;
474
475	error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
476	pmap_qremove(kva, npages);
477
478	relpbuf(bp, &smbfs_pbuf_freecnt);
479
480	VM_OBJECT_LOCK(object);
481	if (error && (uio.uio_resid == count)) {
482		printf("smbfs_getpages: error %d\n",error);
483		for (i = 0; i < npages; i++) {
484			if (reqpage != i) {
485				vm_page_lock(pages[i]);
486				vm_page_free(pages[i]);
487				vm_page_unlock(pages[i]);
488			}
489		}
490		VM_OBJECT_UNLOCK(object);
491		return VM_PAGER_ERROR;
492	}
493
494	size = count - uio.uio_resid;
495
496	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
497		vm_page_t m;
498		nextoff = toff + PAGE_SIZE;
499		m = pages[i];
500
501		if (nextoff <= size) {
502			/*
503			 * Read operation filled an entire page
504			 */
505			m->valid = VM_PAGE_BITS_ALL;
506			KASSERT(m->dirty == 0,
507			    ("smbfs_getpages: page %p is dirty", m));
508		} else if (size > toff) {
509			/*
510			 * Read operation filled a partial page.
511			 */
512			m->valid = 0;
513			vm_page_set_valid_range(m, 0, size - toff);
514			KASSERT(m->dirty == 0,
515			    ("smbfs_getpages: page %p is dirty", m));
516		} else {
517			/*
518			 * Read operation was short.  If no error occured
519			 * we may have hit a zero-fill section.   We simply
520			 * leave valid set to 0.
521			 */
522			;
523		}
524
525		if (i != reqpage)
526			vm_page_readahead_finish(m, error);
527	}
528	VM_OBJECT_UNLOCK(object);
529	return 0;
530#endif /* SMBFS_RWGENERIC */
531}
532
533/*
534 * Vnode op for VM putpages.
535 * possible bug: all IO done in sync mode
536 * Note that vop_close always invalidate pages before close, so it's
537 * not necessary to open vnode.
538 */
539int
540smbfs_putpages(ap)
541	struct vop_putpages_args /* {
542		struct vnode *a_vp;
543		vm_page_t *a_m;
544		int a_count;
545		int a_sync;
546		int *a_rtvals;
547		vm_ooffset_t a_offset;
548	} */ *ap;
549{
550	int error;
551	struct vnode *vp = ap->a_vp;
552	struct thread *td;
553	struct ucred *cred;
554
555#ifdef SMBFS_RWGENERIC
556	td = curthread;			/* XXX */
557	cred = td->td_ucred;		/* XXX */
558	VOP_OPEN(vp, FWRITE, cred, td, NULL);
559	error = vop_stdputpages(ap);
560	VOP_CLOSE(vp, FWRITE, cred, td);
561	return error;
562#else
563	struct uio uio;
564	struct iovec iov;
565	vm_offset_t kva;
566	struct buf *bp;
567	int i, npages, count;
568	int *rtvals;
569	struct smbmount *smp;
570	struct smbnode *np;
571	struct smb_cred scred;
572	vm_page_t *pages;
573
574	td = curthread;			/* XXX */
575	cred = td->td_ucred;		/* XXX */
576/*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
577	np = VTOSMB(vp);
578	smp = VFSTOSMBFS(vp->v_mount);
579	pages = ap->a_m;
580	count = ap->a_count;
581	rtvals = ap->a_rtvals;
582	npages = btoc(count);
583
584	for (i = 0; i < npages; i++) {
585		rtvals[i] = VM_PAGER_ERROR;
586	}
587
588	bp = getpbuf(&smbfs_pbuf_freecnt);
589
590	kva = (vm_offset_t) bp->b_data;
591	pmap_qenter(kva, pages, npages);
592	PCPU_INC(cnt.v_vnodeout);
593	PCPU_ADD(cnt.v_vnodepgsout, count);
594
595	iov.iov_base = (caddr_t) kva;
596	iov.iov_len = count;
597	uio.uio_iov = &iov;
598	uio.uio_iovcnt = 1;
599	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
600	uio.uio_resid = count;
601	uio.uio_segflg = UIO_SYSSPACE;
602	uio.uio_rw = UIO_WRITE;
603	uio.uio_td = td;
604	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
605
606	smb_makescred(&scred, td, cred);
607	error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
608/*	VOP_CLOSE(vp, FWRITE, cred, td);*/
609	SMBVDEBUG("paged write done: %d\n", error);
610
611	pmap_qremove(kva, npages);
612
613	relpbuf(bp, &smbfs_pbuf_freecnt);
614
615	if (!error)
616		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
617	return rtvals[0];
618#endif /* SMBFS_RWGENERIC */
619}
620
621/*
622 * Flush and invalidate all dirty buffers. If another process is already
623 * doing the flush, just wait for completion.
624 */
625int
626smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
627{
628	struct smbnode *np = VTOSMB(vp);
629	int error = 0;
630
631	if (vp->v_iflag & VI_DOOMED)
632		return 0;
633
634	while (np->n_flag & NFLUSHINPROG) {
635		np->n_flag |= NFLUSHWANT;
636		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
637		error = smb_td_intr(td);
638		if (error == EINTR)
639			return EINTR;
640	}
641	np->n_flag |= NFLUSHINPROG;
642
643	if (vp->v_bufobj.bo_object != NULL) {
644		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
645		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
646		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
647	}
648
649	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
650	while (error) {
651		if (error == ERESTART || error == EINTR) {
652			np->n_flag &= ~NFLUSHINPROG;
653			if (np->n_flag & NFLUSHWANT) {
654				np->n_flag &= ~NFLUSHWANT;
655				wakeup(&np->n_flag);
656			}
657			return EINTR;
658		}
659		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
660	}
661	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
662	if (np->n_flag & NFLUSHWANT) {
663		np->n_flag &= ~NFLUSHWANT;
664		wakeup(&np->n_flag);
665	}
666	return (error);
667}
668