smbfs_io.c revision 176559
1/*-
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *    This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/fs/smbfs/smbfs_io.c 176559 2008-02-25 18:45:57Z attilio $
33 *
34 */
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/resourcevar.h>	/* defines plimit structure in proc struct */
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/fcntl.h>
41#include <sys/bio.h>
42#include <sys/buf.h>
43#include <sys/mount.h>
44#include <sys/namei.h>
45#include <sys/vnode.h>
46#include <sys/dirent.h>
47#include <sys/signalvar.h>
48#include <sys/sysctl.h>
49#include <sys/vmmeter.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_object.h>
55#include <vm/vm_pager.h>
56#include <vm/vnode_pager.h>
57/*
58#include <sys/ioccom.h>
59*/
60#include <netsmb/smb.h>
61#include <netsmb/smb_conn.h>
62#include <netsmb/smb_subr.h>
63
64#include <fs/smbfs/smbfs.h>
65#include <fs/smbfs/smbfs_node.h>
66#include <fs/smbfs/smbfs_subr.h>
67
68/*#define SMBFS_RWGENERIC*/
69
70extern int smbfs_pbuf_freecnt;
71
72static int smbfs_fastlookup = 1;
73
74SYSCTL_DECL(_vfs_smbfs);
75SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
76
77
78#define DE_SIZE	(sizeof(struct dirent))
79
80static int
81smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
82{
83	struct dirent de;
84	struct componentname cn;
85	struct smb_cred scred;
86	struct smbfs_fctx *ctx;
87	struct vnode *newvp;
88	struct smbnode *np = VTOSMB(vp);
89	int error/*, *eofflag = ap->a_eofflag*/;
90	long offset, limit;
91
92	np = VTOSMB(vp);
93	SMBVDEBUG("dirname='%s'\n", np->n_name);
94	smb_makescred(&scred, uio->uio_td, cred);
95	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
96	limit = uio->uio_resid / DE_SIZE;
97	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
98		return EINVAL;
99	while (limit && offset < 2) {
100		limit--;
101		bzero((caddr_t)&de, DE_SIZE);
102		de.d_reclen = DE_SIZE;
103		de.d_fileno = (offset == 0) ? np->n_ino :
104		    (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
105		if (de.d_fileno == 0)
106			de.d_fileno = 0x7ffffffd + offset;
107		de.d_namlen = offset + 1;
108		de.d_name[0] = '.';
109		de.d_name[1] = '.';
110		de.d_name[offset + 1] = '\0';
111		de.d_type = DT_DIR;
112		error = uiomove(&de, DE_SIZE, uio);
113		if (error)
114			return error;
115		offset++;
116		uio->uio_offset += DE_SIZE;
117	}
118	if (limit == 0)
119		return 0;
120	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
121		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
122		if (np->n_dirseq) {
123			smbfs_findclose(np->n_dirseq, &scred);
124			np->n_dirseq = NULL;
125		}
126		np->n_dirofs = 2;
127		error = smbfs_findopen(np, "*", 1,
128		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
129		    &scred, &ctx);
130		if (error) {
131			SMBVDEBUG("can not open search, error = %d", error);
132			return error;
133		}
134		np->n_dirseq = ctx;
135	} else
136		ctx = np->n_dirseq;
137	while (np->n_dirofs < offset) {
138		error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
139		if (error) {
140			smbfs_findclose(np->n_dirseq, &scred);
141			np->n_dirseq = NULL;
142			return error == ENOENT ? 0 : error;
143		}
144	}
145	error = 0;
146	for (; limit; limit--, offset++) {
147		error = smbfs_findnext(ctx, limit, &scred);
148		if (error)
149			break;
150		np->n_dirofs++;
151		bzero((caddr_t)&de, DE_SIZE);
152		de.d_reclen = DE_SIZE;
153		de.d_fileno = ctx->f_attr.fa_ino;
154		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155		de.d_namlen = ctx->f_nmlen;
156		bcopy(ctx->f_name, de.d_name, de.d_namlen);
157		de.d_name[de.d_namlen] = '\0';
158		if (smbfs_fastlookup) {
159			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160			    ctx->f_nmlen, &ctx->f_attr, &newvp);
161			if (!error) {
162				cn.cn_nameptr = de.d_name;
163				cn.cn_namelen = de.d_namlen;
164				cache_enter(vp, newvp, &cn);
165				vput(newvp);
166			}
167		}
168		error = uiomove(&de, DE_SIZE, uio);
169		if (error)
170			break;
171	}
172	if (error == ENOENT)
173		error = 0;
174	uio->uio_offset = offset * DE_SIZE;
175	return error;
176}
177
178int
179smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
180{
181	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
182	struct smbnode *np = VTOSMB(vp);
183	struct thread *td;
184	struct vattr vattr;
185	struct smb_cred scred;
186	int error, lks;
187
188	/*
189	 * Protect against method which is not supported for now
190	 */
191	if (uiop->uio_segflg == UIO_NOCOPY)
192		return EOPNOTSUPP;
193
194	if (vp->v_type != VREG && vp->v_type != VDIR) {
195		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
196		return EIO;
197	}
198	if (uiop->uio_resid == 0)
199		return 0;
200	if (uiop->uio_offset < 0)
201		return EINVAL;
202/*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
203		return EFBIG;*/
204	td = uiop->uio_td;
205	if (vp->v_type == VDIR) {
206		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
207		if (lks == LK_SHARED)
208			vn_lock(vp, LK_UPGRADE | LK_RETRY);
209		error = smbfs_readvdir(vp, uiop, cred);
210		if (lks == LK_SHARED)
211			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
212		return error;
213	}
214
215/*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
216	if (np->n_flag & NMODIFIED) {
217		smbfs_attr_cacheremove(vp);
218		error = VOP_GETATTR(vp, &vattr, cred, td);
219		if (error)
220			return error;
221		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
222	} else {
223		error = VOP_GETATTR(vp, &vattr, cred, td);
224		if (error)
225			return error;
226		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
227			error = smbfs_vinvalbuf(vp, td);
228			if (error)
229				return error;
230			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
231		}
232	}
233	smb_makescred(&scred, td, cred);
234	return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
235}
236
237int
238smbfs_writevnode(struct vnode *vp, struct uio *uiop,
239	struct ucred *cred, int ioflag)
240{
241	struct smbmount *smp = VTOSMBFS(vp);
242	struct smbnode *np = VTOSMB(vp);
243	struct smb_cred scred;
244	struct proc *p;
245	struct thread *td;
246	int error = 0;
247
248	if (vp->v_type != VREG) {
249		SMBERROR("vn types other than VREG unsupported !\n");
250		return EIO;
251	}
252	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
253	if (uiop->uio_offset < 0)
254		return EINVAL;
255/*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
256		return (EFBIG);*/
257	td = uiop->uio_td;
258	p = td->td_proc;
259	if (ioflag & (IO_APPEND | IO_SYNC)) {
260		if (np->n_flag & NMODIFIED) {
261			smbfs_attr_cacheremove(vp);
262			error = smbfs_vinvalbuf(vp, td);
263			if (error)
264				return error;
265		}
266		if (ioflag & IO_APPEND) {
267#ifdef notyet
268			/*
269			 * File size can be changed by another client
270			 */
271			smbfs_attr_cacheremove(vp);
272			error = VOP_GETATTR(vp, &vattr, cred, td);
273			if (error) return (error);
274#endif
275			uiop->uio_offset = np->n_size;
276		}
277	}
278	if (uiop->uio_resid == 0)
279		return 0;
280	if (p != NULL) {
281		PROC_LOCK(p);
282		if (uiop->uio_offset + uiop->uio_resid >
283		    lim_cur(p, RLIMIT_FSIZE)) {
284			psignal(p, SIGXFSZ);
285			PROC_UNLOCK(p);
286			return EFBIG;
287		}
288		PROC_UNLOCK(p);
289	}
290	smb_makescred(&scred, td, cred);
291	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
292	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
293	if (!error) {
294		if (uiop->uio_offset > np->n_size) {
295			np->n_size = uiop->uio_offset;
296			vnode_pager_setsize(vp, np->n_size);
297		}
298	}
299	return error;
300}
301
302/*
303 * Do an I/O operation to/from a cache block.
304 */
305int
306smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
307{
308	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
309	struct smbnode *np = VTOSMB(vp);
310	struct uio uio, *uiop = &uio;
311	struct iovec io;
312	struct smb_cred scred;
313	int error = 0;
314
315	uiop->uio_iov = &io;
316	uiop->uio_iovcnt = 1;
317	uiop->uio_segflg = UIO_SYSSPACE;
318	uiop->uio_td = td;
319
320	smb_makescred(&scred, td, cr);
321
322	if (bp->b_iocmd == BIO_READ) {
323	    io.iov_len = uiop->uio_resid = bp->b_bcount;
324	    io.iov_base = bp->b_data;
325	    uiop->uio_rw = UIO_READ;
326	    switch (vp->v_type) {
327	      case VREG:
328		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
329		error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
330		if (error)
331			break;
332		if (uiop->uio_resid) {
333			int left = uiop->uio_resid;
334			int nread = bp->b_bcount - left;
335			if (left > 0)
336			    bzero((char *)bp->b_data + nread, left);
337		}
338		break;
339	    default:
340		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
341		break;
342	    };
343	    if (error) {
344		bp->b_error = error;
345		bp->b_ioflags |= BIO_ERROR;
346	    }
347	} else { /* write */
348	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
349		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
350
351	    if (bp->b_dirtyend > bp->b_dirtyoff) {
352		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
353		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
354		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
355		uiop->uio_rw = UIO_WRITE;
356		error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
357
358		/*
359		 * For an interrupted write, the buffer is still valid
360		 * and the write hasn't been pushed to the server yet,
361		 * so we can't set BIO_ERROR and report the interruption
362		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
363		 * is not relevant, so the rpc attempt is essentially
364		 * a noop.  For the case of a V3 write rpc not being
365		 * committed to stable storage, the block is still
366		 * dirty and requires either a commit rpc or another
367		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
368		 * the block is reused. This is indicated by setting
369		 * the B_DELWRI and B_NEEDCOMMIT flags.
370		 */
371		if (error == EINTR
372		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
373			int s;
374
375			s = splbio();
376			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
377			if ((bp->b_flags & B_ASYNC) == 0)
378			    bp->b_flags |= B_EINTR;
379			if ((bp->b_flags & B_PAGING) == 0) {
380			    bdirty(bp);
381			    bp->b_flags &= ~B_DONE;
382			}
383			if ((bp->b_flags & B_ASYNC) == 0)
384			    bp->b_flags |= B_EINTR;
385			splx(s);
386		} else {
387			if (error) {
388				bp->b_ioflags |= BIO_ERROR;
389				bp->b_error = error;
390			}
391			bp->b_dirtyoff = bp->b_dirtyend = 0;
392		}
393	    } else {
394		bp->b_resid = 0;
395		bufdone(bp);
396		return 0;
397	    }
398	}
399	bp->b_resid = uiop->uio_resid;
400	bufdone(bp);
401	return error;
402}
403
404/*
405 * Vnode op for VM getpages.
406 * Wish wish .... get rid from multiple IO routines
407 */
408int
409smbfs_getpages(ap)
410	struct vop_getpages_args /* {
411		struct vnode *a_vp;
412		vm_page_t *a_m;
413		int a_count;
414		int a_reqpage;
415		vm_ooffset_t a_offset;
416	} */ *ap;
417{
418#ifdef SMBFS_RWGENERIC
419	return vop_stdgetpages(ap);
420#else
421	int i, error, nextoff, size, toff, npages, count, reqpage;
422	struct uio uio;
423	struct iovec iov;
424	vm_offset_t kva;
425	struct buf *bp;
426	struct vnode *vp;
427	struct thread *td;
428	struct ucred *cred;
429	struct smbmount *smp;
430	struct smbnode *np;
431	struct smb_cred scred;
432	vm_object_t object;
433	vm_page_t *pages, m;
434
435	vp = ap->a_vp;
436	if ((object = vp->v_object) == NULL) {
437		printf("smbfs_getpages: called with non-merged cache vnode??\n");
438		return VM_PAGER_ERROR;
439	}
440
441	td = curthread;				/* XXX */
442	cred = td->td_ucred;		/* XXX */
443	np = VTOSMB(vp);
444	smp = VFSTOSMBFS(vp->v_mount);
445	pages = ap->a_m;
446	count = ap->a_count;
447	npages = btoc(count);
448	reqpage = ap->a_reqpage;
449
450	/*
451	 * If the requested page is partially valid, just return it and
452	 * allow the pager to zero-out the blanks.  Partially valid pages
453	 * can only occur at the file EOF.
454	 */
455	m = pages[reqpage];
456
457	VM_OBJECT_LOCK(object);
458	if (m->valid != 0) {
459		/* handled by vm_fault now	  */
460		/* vm_page_zero_invalid(m, TRUE); */
461		vm_page_lock_queues();
462		for (i = 0; i < npages; ++i) {
463			if (i != reqpage)
464				vm_page_free(pages[i]);
465		}
466		vm_page_unlock_queues();
467		VM_OBJECT_UNLOCK(object);
468		return 0;
469	}
470	VM_OBJECT_UNLOCK(object);
471
472	smb_makescred(&scred, td, cred);
473
474	bp = getpbuf(&smbfs_pbuf_freecnt);
475
476	kva = (vm_offset_t) bp->b_data;
477	pmap_qenter(kva, pages, npages);
478	PCPU_INC(cnt.v_vnodein);
479	PCPU_ADD(cnt.v_vnodepgsin, npages);
480
481	iov.iov_base = (caddr_t) kva;
482	iov.iov_len = count;
483	uio.uio_iov = &iov;
484	uio.uio_iovcnt = 1;
485	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
486	uio.uio_resid = count;
487	uio.uio_segflg = UIO_SYSSPACE;
488	uio.uio_rw = UIO_READ;
489	uio.uio_td = td;
490
491	error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
492	pmap_qremove(kva, npages);
493
494	relpbuf(bp, &smbfs_pbuf_freecnt);
495
496	VM_OBJECT_LOCK(object);
497	if (error && (uio.uio_resid == count)) {
498		printf("smbfs_getpages: error %d\n",error);
499		vm_page_lock_queues();
500		for (i = 0; i < npages; i++) {
501			if (reqpage != i)
502				vm_page_free(pages[i]);
503		}
504		vm_page_unlock_queues();
505		VM_OBJECT_UNLOCK(object);
506		return VM_PAGER_ERROR;
507	}
508
509	size = count - uio.uio_resid;
510
511	vm_page_lock_queues();
512	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
513		vm_page_t m;
514		nextoff = toff + PAGE_SIZE;
515		m = pages[i];
516
517		if (nextoff <= size) {
518			/*
519			 * Read operation filled an entire page
520			 */
521			m->valid = VM_PAGE_BITS_ALL;
522			vm_page_undirty(m);
523		} else if (size > toff) {
524			/*
525			 * Read operation filled a partial page.
526			 */
527			m->valid = 0;
528			vm_page_set_validclean(m, 0, size - toff);
529			/* handled by vm_fault now	  */
530			/* vm_page_zero_invalid(m, TRUE); */
531		} else {
532			/*
533			 * Read operation was short.  If no error occured
534			 * we may have hit a zero-fill section.   We simply
535			 * leave valid set to 0.
536			 */
537			;
538		}
539
540		if (i != reqpage) {
541			/*
542			 * Whether or not to leave the page activated is up in
543			 * the air, but we should put the page on a page queue
544			 * somewhere (it already is in the object).  Result:
545			 * It appears that emperical results show that
546			 * deactivating pages is best.
547			 */
548
549			/*
550			 * Just in case someone was asking for this page we
551			 * now tell them that it is ok to use.
552			 */
553			if (!error) {
554				if (m->oflags & VPO_WANTED)
555					vm_page_activate(m);
556				else
557					vm_page_deactivate(m);
558				vm_page_wakeup(m);
559			} else {
560				vm_page_free(m);
561			}
562		}
563	}
564	vm_page_unlock_queues();
565	VM_OBJECT_UNLOCK(object);
566	return 0;
567#endif /* SMBFS_RWGENERIC */
568}
569
570/*
571 * Vnode op for VM putpages.
572 * possible bug: all IO done in sync mode
573 * Note that vop_close always invalidate pages before close, so it's
574 * not necessary to open vnode.
575 */
576int
577smbfs_putpages(ap)
578	struct vop_putpages_args /* {
579		struct vnode *a_vp;
580		vm_page_t *a_m;
581		int a_count;
582		int a_sync;
583		int *a_rtvals;
584		vm_ooffset_t a_offset;
585	} */ *ap;
586{
587	int error;
588	struct vnode *vp = ap->a_vp;
589	struct thread *td;
590	struct ucred *cred;
591
592#ifdef SMBFS_RWGENERIC
593	td = curthread;			/* XXX */
594	cred = td->td_ucred;		/* XXX */
595	VOP_OPEN(vp, FWRITE, cred, td, NULL);
596	error = vop_stdputpages(ap);
597	VOP_CLOSE(vp, FWRITE, cred, td);
598	return error;
599#else
600	struct uio uio;
601	struct iovec iov;
602	vm_offset_t kva;
603	struct buf *bp;
604	int i, npages, count;
605	int *rtvals;
606	struct smbmount *smp;
607	struct smbnode *np;
608	struct smb_cred scred;
609	vm_page_t *pages;
610
611	td = curthread;			/* XXX */
612	cred = td->td_ucred;		/* XXX */
613/*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
614	np = VTOSMB(vp);
615	smp = VFSTOSMBFS(vp->v_mount);
616	pages = ap->a_m;
617	count = ap->a_count;
618	rtvals = ap->a_rtvals;
619	npages = btoc(count);
620
621	for (i = 0; i < npages; i++) {
622		rtvals[i] = VM_PAGER_AGAIN;
623	}
624
625	bp = getpbuf(&smbfs_pbuf_freecnt);
626
627	kva = (vm_offset_t) bp->b_data;
628	pmap_qenter(kva, pages, npages);
629	PCPU_INC(cnt.v_vnodeout);
630	PCPU_ADD(cnt.v_vnodepgsout, count);
631
632	iov.iov_base = (caddr_t) kva;
633	iov.iov_len = count;
634	uio.uio_iov = &iov;
635	uio.uio_iovcnt = 1;
636	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
637	uio.uio_resid = count;
638	uio.uio_segflg = UIO_SYSSPACE;
639	uio.uio_rw = UIO_WRITE;
640	uio.uio_td = td;
641	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
642
643	smb_makescred(&scred, td, cred);
644	error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
645/*	VOP_CLOSE(vp, FWRITE, cred, td);*/
646	SMBVDEBUG("paged write done: %d\n", error);
647
648	pmap_qremove(kva, npages);
649
650	relpbuf(bp, &smbfs_pbuf_freecnt);
651
652	if (!error) {
653		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
654		vm_page_lock_queues();
655		for (i = 0; i < nwritten; i++) {
656			rtvals[i] = VM_PAGER_OK;
657			vm_page_undirty(pages[i]);
658		}
659		vm_page_unlock_queues();
660	}
661	return rtvals[0];
662#endif /* SMBFS_RWGENERIC */
663}
664
665/*
666 * Flush and invalidate all dirty buffers. If another process is already
667 * doing the flush, just wait for completion.
668 */
669int
670smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
671{
672	struct smbnode *np = VTOSMB(vp);
673	int error = 0;
674
675	if (vp->v_iflag & VI_DOOMED)
676		return 0;
677
678	while (np->n_flag & NFLUSHINPROG) {
679		np->n_flag |= NFLUSHWANT;
680		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
681		error = smb_td_intr(td);
682		if (error == EINTR)
683			return EINTR;
684	}
685	np->n_flag |= NFLUSHINPROG;
686
687	if (vp->v_bufobj.bo_object != NULL) {
688		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
689		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
690		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
691	}
692
693	error = vinvalbuf(vp, V_SAVE, td, PCATCH, 0);
694	while (error) {
695		if (error == ERESTART || error == EINTR) {
696			np->n_flag &= ~NFLUSHINPROG;
697			if (np->n_flag & NFLUSHWANT) {
698				np->n_flag &= ~NFLUSHWANT;
699				wakeup(&np->n_flag);
700			}
701			return EINTR;
702		}
703		error = vinvalbuf(vp, V_SAVE, td, PCATCH, 0);
704	}
705	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
706	if (np->n_flag & NFLUSHWANT) {
707		np->n_flag &= ~NFLUSHWANT;
708		wakeup(&np->n_flag);
709	}
710	return (error);
711}
712