smbfs_io.c revision 292373
1/*-
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/fs/smbfs/smbfs_io.c 292373 2015-12-16 21:30:45Z glebius $
27 *
28 */
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/fcntl.h>
33#include <sys/bio.h>
34#include <sys/buf.h>
35#include <sys/mount.h>
36#include <sys/namei.h>
37#include <sys/vnode.h>
38#include <sys/dirent.h>
39#include <sys/rwlock.h>
40#include <sys/signalvar.h>
41#include <sys/sysctl.h>
42#include <sys/vmmeter.h>
43
44#include <vm/vm.h>
45#include <vm/vm_param.h>
46#include <vm/vm_page.h>
47#include <vm/vm_extern.h>
48#include <vm/vm_object.h>
49#include <vm/vm_pager.h>
50#include <vm/vnode_pager.h>
51/*
52#include <sys/ioccom.h>
53*/
54#include <netsmb/smb.h>
55#include <netsmb/smb_conn.h>
56#include <netsmb/smb_subr.h>
57
58#include <fs/smbfs/smbfs.h>
59#include <fs/smbfs/smbfs_node.h>
60#include <fs/smbfs/smbfs_subr.h>
61
62/*#define SMBFS_RWGENERIC*/
63
64extern int smbfs_pbuf_freecnt;
65
66static int smbfs_fastlookup = 1;
67
68SYSCTL_DECL(_vfs_smbfs);
69SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
70
71
72#define DE_SIZE	(sizeof(struct dirent))
73
74static int
75smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
76{
77	struct dirent de;
78	struct componentname cn;
79	struct smb_cred *scred;
80	struct smbfs_fctx *ctx;
81	struct vnode *newvp;
82	struct smbnode *np = VTOSMB(vp);
83	int error/*, *eofflag = ap->a_eofflag*/;
84	long offset, limit;
85
86	np = VTOSMB(vp);
87	SMBVDEBUG("dirname='%s'\n", np->n_name);
88	scred = smbfs_malloc_scred();
89	smb_makescred(scred, uio->uio_td, cred);
90	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
91	limit = uio->uio_resid / DE_SIZE;
92	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
93		error = EINVAL;
94		goto out;
95	}
96	while (limit && offset < 2) {
97		limit--;
98		bzero((caddr_t)&de, DE_SIZE);
99		de.d_reclen = DE_SIZE;
100		de.d_fileno = (offset == 0) ? np->n_ino :
101		    (np->n_parent ? np->n_parentino : 2);
102		if (de.d_fileno == 0)
103			de.d_fileno = 0x7ffffffd + offset;
104		de.d_namlen = offset + 1;
105		de.d_name[0] = '.';
106		de.d_name[1] = '.';
107		de.d_name[offset + 1] = '\0';
108		de.d_type = DT_DIR;
109		error = uiomove(&de, DE_SIZE, uio);
110		if (error)
111			goto out;
112		offset++;
113		uio->uio_offset += DE_SIZE;
114	}
115	if (limit == 0) {
116		error = 0;
117		goto out;
118	}
119	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
120		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
121		if (np->n_dirseq) {
122			smbfs_findclose(np->n_dirseq, scred);
123			np->n_dirseq = NULL;
124		}
125		np->n_dirofs = 2;
126		error = smbfs_findopen(np, "*", 1,
127		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
128		    scred, &ctx);
129		if (error) {
130			SMBVDEBUG("can not open search, error = %d", error);
131			goto out;
132		}
133		np->n_dirseq = ctx;
134	} else
135		ctx = np->n_dirseq;
136	while (np->n_dirofs < offset) {
137		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
138		if (error) {
139			smbfs_findclose(np->n_dirseq, scred);
140			np->n_dirseq = NULL;
141			error = ENOENT ? 0 : error;
142			goto out;
143		}
144	}
145	error = 0;
146	for (; limit; limit--, offset++) {
147		error = smbfs_findnext(ctx, limit, scred);
148		if (error)
149			break;
150		np->n_dirofs++;
151		bzero((caddr_t)&de, DE_SIZE);
152		de.d_reclen = DE_SIZE;
153		de.d_fileno = ctx->f_attr.fa_ino;
154		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155		de.d_namlen = ctx->f_nmlen;
156		bcopy(ctx->f_name, de.d_name, de.d_namlen);
157		de.d_name[de.d_namlen] = '\0';
158		if (smbfs_fastlookup) {
159			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160			    ctx->f_nmlen, &ctx->f_attr, &newvp);
161			if (!error) {
162				cn.cn_nameptr = de.d_name;
163				cn.cn_namelen = de.d_namlen;
164				cache_enter(vp, newvp, &cn);
165				vput(newvp);
166			}
167		}
168		error = uiomove(&de, DE_SIZE, uio);
169		if (error)
170			break;
171	}
172	if (error == ENOENT)
173		error = 0;
174	uio->uio_offset = offset * DE_SIZE;
175out:
176	smbfs_free_scred(scred);
177	return error;
178}
179
180int
181smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
182{
183	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
184	struct smbnode *np = VTOSMB(vp);
185	struct thread *td;
186	struct vattr vattr;
187	struct smb_cred *scred;
188	int error, lks;
189
190	/*
191	 * Protect against method which is not supported for now
192	 */
193	if (uiop->uio_segflg == UIO_NOCOPY)
194		return EOPNOTSUPP;
195
196	if (vp->v_type != VREG && vp->v_type != VDIR) {
197		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
198		return EIO;
199	}
200	if (uiop->uio_resid == 0)
201		return 0;
202	if (uiop->uio_offset < 0)
203		return EINVAL;
204/*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
205		return EFBIG;*/
206	td = uiop->uio_td;
207	if (vp->v_type == VDIR) {
208		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
209		if (lks == LK_SHARED)
210			vn_lock(vp, LK_UPGRADE | LK_RETRY);
211		error = smbfs_readvdir(vp, uiop, cred);
212		if (lks == LK_SHARED)
213			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
214		return error;
215	}
216
217/*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
218	if (np->n_flag & NMODIFIED) {
219		smbfs_attr_cacheremove(vp);
220		error = VOP_GETATTR(vp, &vattr, cred);
221		if (error)
222			return error;
223		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
224	} else {
225		error = VOP_GETATTR(vp, &vattr, cred);
226		if (error)
227			return error;
228		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
229			error = smbfs_vinvalbuf(vp, td);
230			if (error)
231				return error;
232			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
233		}
234	}
235	scred = smbfs_malloc_scred();
236	smb_makescred(scred, td, cred);
237	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
238	smbfs_free_scred(scred);
239	return (error);
240}
241
242int
243smbfs_writevnode(struct vnode *vp, struct uio *uiop,
244	struct ucred *cred, int ioflag)
245{
246	struct smbmount *smp = VTOSMBFS(vp);
247	struct smbnode *np = VTOSMB(vp);
248	struct smb_cred *scred;
249	struct thread *td;
250	int error = 0;
251
252	if (vp->v_type != VREG) {
253		SMBERROR("vn types other than VREG unsupported !\n");
254		return EIO;
255	}
256	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
257	    uiop->uio_resid);
258	if (uiop->uio_offset < 0)
259		return EINVAL;
260/*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
261		return (EFBIG);*/
262	td = uiop->uio_td;
263	if (ioflag & (IO_APPEND | IO_SYNC)) {
264		if (np->n_flag & NMODIFIED) {
265			smbfs_attr_cacheremove(vp);
266			error = smbfs_vinvalbuf(vp, td);
267			if (error)
268				return error;
269		}
270		if (ioflag & IO_APPEND) {
271#ifdef notyet
272			/*
273			 * File size can be changed by another client
274			 */
275			smbfs_attr_cacheremove(vp);
276			error = VOP_GETATTR(vp, &vattr, cred);
277			if (error) return (error);
278#endif
279			uiop->uio_offset = np->n_size;
280		}
281	}
282	if (uiop->uio_resid == 0)
283		return 0;
284
285	if (vn_rlimit_fsize(vp, uiop, td))
286		return (EFBIG);
287
288	scred = smbfs_malloc_scred();
289	smb_makescred(scred, td, cred);
290	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
291	smbfs_free_scred(scred);
292	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
293	    uiop->uio_resid);
294	if (!error) {
295		if (uiop->uio_offset > np->n_size) {
296			np->n_size = uiop->uio_offset;
297			vnode_pager_setsize(vp, np->n_size);
298		}
299	}
300	return error;
301}
302
303/*
304 * Do an I/O operation to/from a cache block.
305 */
306int
307smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
308{
309	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
310	struct smbnode *np = VTOSMB(vp);
311	struct uio *uiop;
312	struct iovec io;
313	struct smb_cred *scred;
314	int error = 0;
315
316	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
317	uiop->uio_iov = &io;
318	uiop->uio_iovcnt = 1;
319	uiop->uio_segflg = UIO_SYSSPACE;
320	uiop->uio_td = td;
321
322	scred = smbfs_malloc_scred();
323	smb_makescred(scred, td, cr);
324
325	if (bp->b_iocmd == BIO_READ) {
326	    io.iov_len = uiop->uio_resid = bp->b_bcount;
327	    io.iov_base = bp->b_data;
328	    uiop->uio_rw = UIO_READ;
329	    switch (vp->v_type) {
330	      case VREG:
331		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
332		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
333		if (error)
334			break;
335		if (uiop->uio_resid) {
336			int left = uiop->uio_resid;
337			int nread = bp->b_bcount - left;
338			if (left > 0)
339			    bzero((char *)bp->b_data + nread, left);
340		}
341		break;
342	    default:
343		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
344		break;
345	    };
346	    if (error) {
347		bp->b_error = error;
348		bp->b_ioflags |= BIO_ERROR;
349	    }
350	} else { /* write */
351	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
352		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
353
354	    if (bp->b_dirtyend > bp->b_dirtyoff) {
355		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
356		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
357		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
358		uiop->uio_rw = UIO_WRITE;
359		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
360
361		/*
362		 * For an interrupted write, the buffer is still valid
363		 * and the write hasn't been pushed to the server yet,
364		 * so we can't set BIO_ERROR and report the interruption
365		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
366		 * is not relevant, so the rpc attempt is essentially
367		 * a noop.  For the case of a V3 write rpc not being
368		 * committed to stable storage, the block is still
369		 * dirty and requires either a commit rpc or another
370		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
371		 * the block is reused. This is indicated by setting
372		 * the B_DELWRI and B_NEEDCOMMIT flags.
373		 */
374		if (error == EINTR
375		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
376			int s;
377
378			s = splbio();
379			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
380			if ((bp->b_flags & B_ASYNC) == 0)
381			    bp->b_flags |= B_EINTR;
382			if ((bp->b_flags & B_PAGING) == 0) {
383			    bdirty(bp);
384			    bp->b_flags &= ~B_DONE;
385			}
386			if ((bp->b_flags & B_ASYNC) == 0)
387			    bp->b_flags |= B_EINTR;
388			splx(s);
389		} else {
390			if (error) {
391				bp->b_ioflags |= BIO_ERROR;
392				bp->b_error = error;
393			}
394			bp->b_dirtyoff = bp->b_dirtyend = 0;
395		}
396	    } else {
397		bp->b_resid = 0;
398		bufdone(bp);
399		free(uiop, M_SMBFSDATA);
400		smbfs_free_scred(scred);
401		return 0;
402	    }
403	}
404	bp->b_resid = uiop->uio_resid;
405	bufdone(bp);
406	free(uiop, M_SMBFSDATA);
407	smbfs_free_scred(scred);
408	return error;
409}
410
411/*
412 * Vnode op for VM getpages.
413 * Wish wish .... get rid from multiple IO routines
414 */
415int
416smbfs_getpages(ap)
417	struct vop_getpages_args /* {
418		struct vnode *a_vp;
419		vm_page_t *a_m;
420		int a_count;
421		int a_reqpage;
422	} */ *ap;
423{
424#ifdef SMBFS_RWGENERIC
425	return vop_stdgetpages(ap);
426#else
427	int i, error, nextoff, size, toff, npages, count;
428	struct uio uio;
429	struct iovec iov;
430	vm_offset_t kva;
431	struct buf *bp;
432	struct vnode *vp;
433	struct thread *td;
434	struct ucred *cred;
435	struct smbmount *smp;
436	struct smbnode *np;
437	struct smb_cred *scred;
438	vm_object_t object;
439	vm_page_t *pages;
440
441	vp = ap->a_vp;
442	if ((object = vp->v_object) == NULL) {
443		printf("smbfs_getpages: called with non-merged cache vnode??\n");
444		return VM_PAGER_ERROR;
445	}
446
447	td = curthread;				/* XXX */
448	cred = td->td_ucred;		/* XXX */
449	np = VTOSMB(vp);
450	smp = VFSTOSMBFS(vp->v_mount);
451	pages = ap->a_m;
452	count = ap->a_count;
453	npages = btoc(count);
454	if (ap->a_rbehind)
455		*ap->a_rbehind = 0;
456	if (ap->a_rahead)
457		*ap->a_rahead = 0;
458
459	/*
460	 * If the requested page is partially valid, just return it and
461	 * allow the pager to zero-out the blanks.  Partially valid pages
462	 * can only occur at the file EOF.
463	 *
464	 * XXXGL: is that true for SMB filesystem?
465	 */
466	VM_OBJECT_WLOCK(object);
467	if (pages[npages - 1]->valid != 0) {
468		if (--npages == 0) {
469			VM_OBJECT_WUNLOCK(object);
470			return (VM_PAGER_OK);
471		}
472		count = npages << PAGE_SHIFT;
473	}
474	VM_OBJECT_WUNLOCK(object);
475
476	scred = smbfs_malloc_scred();
477	smb_makescred(scred, td, cred);
478
479	bp = getpbuf(&smbfs_pbuf_freecnt);
480
481	kva = (vm_offset_t) bp->b_data;
482	pmap_qenter(kva, pages, npages);
483	PCPU_INC(cnt.v_vnodein);
484	PCPU_ADD(cnt.v_vnodepgsin, npages);
485
486	iov.iov_base = (caddr_t) kva;
487	iov.iov_len = count;
488	uio.uio_iov = &iov;
489	uio.uio_iovcnt = 1;
490	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
491	uio.uio_resid = count;
492	uio.uio_segflg = UIO_SYSSPACE;
493	uio.uio_rw = UIO_READ;
494	uio.uio_td = td;
495
496	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
497	smbfs_free_scred(scred);
498	pmap_qremove(kva, npages);
499
500	relpbuf(bp, &smbfs_pbuf_freecnt);
501
502	if (error && (uio.uio_resid == count)) {
503		printf("smbfs_getpages: error %d\n",error);
504		return VM_PAGER_ERROR;
505	}
506
507	size = count - uio.uio_resid;
508
509	VM_OBJECT_WLOCK(object);
510	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
511		vm_page_t m;
512		nextoff = toff + PAGE_SIZE;
513		m = pages[i];
514
515		if (nextoff <= size) {
516			/*
517			 * Read operation filled an entire page
518			 */
519			m->valid = VM_PAGE_BITS_ALL;
520			KASSERT(m->dirty == 0,
521			    ("smbfs_getpages: page %p is dirty", m));
522		} else if (size > toff) {
523			/*
524			 * Read operation filled a partial page.
525			 */
526			m->valid = 0;
527			vm_page_set_valid_range(m, 0, size - toff);
528			KASSERT(m->dirty == 0,
529			    ("smbfs_getpages: page %p is dirty", m));
530		} else {
531			/*
532			 * Read operation was short.  If no error occured
533			 * we may have hit a zero-fill section.   We simply
534			 * leave valid set to 0.
535			 */
536			;
537		}
538	}
539	VM_OBJECT_WUNLOCK(object);
540	return 0;
541#endif /* SMBFS_RWGENERIC */
542}
543
544/*
545 * Vnode op for VM putpages.
546 * possible bug: all IO done in sync mode
547 * Note that vop_close always invalidate pages before close, so it's
548 * not necessary to open vnode.
549 */
550int
551smbfs_putpages(ap)
552	struct vop_putpages_args /* {
553		struct vnode *a_vp;
554		vm_page_t *a_m;
555		int a_count;
556		int a_sync;
557		int *a_rtvals;
558	} */ *ap;
559{
560	int error;
561	struct vnode *vp = ap->a_vp;
562	struct thread *td;
563	struct ucred *cred;
564
565#ifdef SMBFS_RWGENERIC
566	td = curthread;			/* XXX */
567	cred = td->td_ucred;		/* XXX */
568	VOP_OPEN(vp, FWRITE, cred, td, NULL);
569	error = vop_stdputpages(ap);
570	VOP_CLOSE(vp, FWRITE, cred, td);
571	return error;
572#else
573	struct uio uio;
574	struct iovec iov;
575	vm_offset_t kva;
576	struct buf *bp;
577	int i, npages, count;
578	int *rtvals;
579	struct smbmount *smp;
580	struct smbnode *np;
581	struct smb_cred *scred;
582	vm_page_t *pages;
583
584	td = curthread;			/* XXX */
585	cred = td->td_ucred;		/* XXX */
586/*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
587	np = VTOSMB(vp);
588	smp = VFSTOSMBFS(vp->v_mount);
589	pages = ap->a_m;
590	count = ap->a_count;
591	rtvals = ap->a_rtvals;
592	npages = btoc(count);
593
594	for (i = 0; i < npages; i++) {
595		rtvals[i] = VM_PAGER_ERROR;
596	}
597
598	bp = getpbuf(&smbfs_pbuf_freecnt);
599
600	kva = (vm_offset_t) bp->b_data;
601	pmap_qenter(kva, pages, npages);
602	PCPU_INC(cnt.v_vnodeout);
603	PCPU_ADD(cnt.v_vnodepgsout, count);
604
605	iov.iov_base = (caddr_t) kva;
606	iov.iov_len = count;
607	uio.uio_iov = &iov;
608	uio.uio_iovcnt = 1;
609	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
610	uio.uio_resid = count;
611	uio.uio_segflg = UIO_SYSSPACE;
612	uio.uio_rw = UIO_WRITE;
613	uio.uio_td = td;
614	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
615	    uio.uio_resid);
616
617	scred = smbfs_malloc_scred();
618	smb_makescred(scred, td, cred);
619	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
620	smbfs_free_scred(scred);
621/*	VOP_CLOSE(vp, FWRITE, cred, td);*/
622	SMBVDEBUG("paged write done: %d\n", error);
623
624	pmap_qremove(kva, npages);
625
626	relpbuf(bp, &smbfs_pbuf_freecnt);
627
628	if (!error)
629		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
630	return rtvals[0];
631#endif /* SMBFS_RWGENERIC */
632}
633
634/*
635 * Flush and invalidate all dirty buffers. If another process is already
636 * doing the flush, just wait for completion.
637 */
638int
639smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
640{
641	struct smbnode *np = VTOSMB(vp);
642	int error = 0;
643
644	if (vp->v_iflag & VI_DOOMED)
645		return 0;
646
647	while (np->n_flag & NFLUSHINPROG) {
648		np->n_flag |= NFLUSHWANT;
649		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
650		error = smb_td_intr(td);
651		if (error == EINTR)
652			return EINTR;
653	}
654	np->n_flag |= NFLUSHINPROG;
655
656	if (vp->v_bufobj.bo_object != NULL) {
657		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
658		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
659		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
660	}
661
662	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
663	while (error) {
664		if (error == ERESTART || error == EINTR) {
665			np->n_flag &= ~NFLUSHINPROG;
666			if (np->n_flag & NFLUSHWANT) {
667				np->n_flag &= ~NFLUSHWANT;
668				wakeup(&np->n_flag);
669			}
670			return EINTR;
671		}
672		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
673	}
674	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
675	if (np->n_flag & NFLUSHWANT) {
676		np->n_flag &= ~NFLUSHWANT;
677		wakeup(&np->n_flag);
678	}
679	return (error);
680}
681