Deleted Added
full compact
smbfs_io.c (107821) smbfs_io.c (111741)
1/*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
1/*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/fs/smbfs/smbfs_io.c 107821 2002-12-13 10:15:01Z tjr $
32 * $FreeBSD: head/sys/fs/smbfs/smbfs_io.c 111741 2003-03-02 15:50:23Z des $
33 *
34 */
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/resourcevar.h> /* defines plimit structure in proc struct */
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/fcntl.h>
41#include <sys/bio.h>
42#include <sys/buf.h>
43#include <sys/mount.h>
44#include <sys/namei.h>
45#include <sys/vnode.h>
46#include <sys/dirent.h>
47#include <sys/signalvar.h>
48#include <sys/sysctl.h>
49#include <sys/vmmeter.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_object.h>
55#include <vm/vm_pager.h>
56#include <vm/vnode_pager.h>
57/*
58#include <sys/ioccom.h>
59*/
60#include <netsmb/smb.h>
61#include <netsmb/smb_conn.h>
62#include <netsmb/smb_subr.h>
63
64#include <fs/smbfs/smbfs.h>
65#include <fs/smbfs/smbfs_node.h>
66#include <fs/smbfs/smbfs_subr.h>
67
68/*#define SMBFS_RWGENERIC*/
69
70extern int smbfs_pbuf_freecnt;
71
72static int smbfs_fastlookup = 1;
73
74SYSCTL_DECL(_vfs_smbfs);
75SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
76
77
78#define DE_SIZE (sizeof(struct dirent))
79
80static int
81smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
82{
83 struct dirent de;
84 struct componentname cn;
85 struct smb_cred scred;
86 struct smbfs_fctx *ctx;
87 struct vnode *newvp;
88 struct smbnode *np = VTOSMB(vp);
89 int error/*, *eofflag = ap->a_eofflag*/;
90 long offset, limit;
91
92 np = VTOSMB(vp);
93 SMBVDEBUG("dirname='%s'\n", np->n_name);
94 smb_makescred(&scred, uio->uio_td, cred);
95 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
96 limit = uio->uio_resid / DE_SIZE;
97 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
98 return EINVAL;
99 while (limit && offset < 2) {
100 limit--;
101 bzero((caddr_t)&de, DE_SIZE);
102 de.d_reclen = DE_SIZE;
103 de.d_fileno = (offset == 0) ? np->n_ino :
104 (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
105 if (de.d_fileno == 0)
106 de.d_fileno = 0x7ffffffd + offset;
107 de.d_namlen = offset + 1;
108 de.d_name[0] = '.';
109 de.d_name[1] = '.';
110 de.d_name[offset + 1] = '\0';
111 de.d_type = DT_DIR;
33 *
34 */
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/resourcevar.h> /* defines plimit structure in proc struct */
38#include <sys/kernel.h>
39#include <sys/proc.h>
40#include <sys/fcntl.h>
41#include <sys/bio.h>
42#include <sys/buf.h>
43#include <sys/mount.h>
44#include <sys/namei.h>
45#include <sys/vnode.h>
46#include <sys/dirent.h>
47#include <sys/signalvar.h>
48#include <sys/sysctl.h>
49#include <sys/vmmeter.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_object.h>
55#include <vm/vm_pager.h>
56#include <vm/vnode_pager.h>
57/*
58#include <sys/ioccom.h>
59*/
60#include <netsmb/smb.h>
61#include <netsmb/smb_conn.h>
62#include <netsmb/smb_subr.h>
63
64#include <fs/smbfs/smbfs.h>
65#include <fs/smbfs/smbfs_node.h>
66#include <fs/smbfs/smbfs_subr.h>
67
68/*#define SMBFS_RWGENERIC*/
69
70extern int smbfs_pbuf_freecnt;
71
72static int smbfs_fastlookup = 1;
73
74SYSCTL_DECL(_vfs_smbfs);
75SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
76
77
78#define DE_SIZE (sizeof(struct dirent))
79
80static int
81smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
82{
83 struct dirent de;
84 struct componentname cn;
85 struct smb_cred scred;
86 struct smbfs_fctx *ctx;
87 struct vnode *newvp;
88 struct smbnode *np = VTOSMB(vp);
89 int error/*, *eofflag = ap->a_eofflag*/;
90 long offset, limit;
91
92 np = VTOSMB(vp);
93 SMBVDEBUG("dirname='%s'\n", np->n_name);
94 smb_makescred(&scred, uio->uio_td, cred);
95 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
96 limit = uio->uio_resid / DE_SIZE;
97 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
98 return EINVAL;
99 while (limit && offset < 2) {
100 limit--;
101 bzero((caddr_t)&de, DE_SIZE);
102 de.d_reclen = DE_SIZE;
103 de.d_fileno = (offset == 0) ? np->n_ino :
104 (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
105 if (de.d_fileno == 0)
106 de.d_fileno = 0x7ffffffd + offset;
107 de.d_namlen = offset + 1;
108 de.d_name[0] = '.';
109 de.d_name[1] = '.';
110 de.d_name[offset + 1] = '\0';
111 de.d_type = DT_DIR;
112 error = uiomove((caddr_t)&de, DE_SIZE, uio);
112 error = uiomove(&de, DE_SIZE, uio);
113 if (error)
114 return error;
115 offset++;
116 uio->uio_offset += DE_SIZE;
117 }
118 if (limit == 0)
119 return 0;
120 if (offset != np->n_dirofs || np->n_dirseq == NULL) {
121 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
122 if (np->n_dirseq) {
123 smbfs_findclose(np->n_dirseq, &scred);
124 np->n_dirseq = NULL;
125 }
126 np->n_dirofs = 2;
127 error = smbfs_findopen(np, "*", 1,
128 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
129 &scred, &ctx);
130 if (error) {
131 SMBVDEBUG("can not open search, error = %d", error);
132 return error;
133 }
134 np->n_dirseq = ctx;
135 } else
136 ctx = np->n_dirseq;
137 while (np->n_dirofs < offset) {
138 error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
139 if (error) {
140 smbfs_findclose(np->n_dirseq, &scred);
141 np->n_dirseq = NULL;
142 return error == ENOENT ? 0 : error;
143 }
144 }
145 error = 0;
146 for (; limit; limit--, offset++) {
147 error = smbfs_findnext(ctx, limit, &scred);
148 if (error)
149 break;
150 np->n_dirofs++;
151 bzero((caddr_t)&de, DE_SIZE);
152 de.d_reclen = DE_SIZE;
153 de.d_fileno = ctx->f_attr.fa_ino;
154 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155 de.d_namlen = ctx->f_nmlen;
156 bcopy(ctx->f_name, de.d_name, de.d_namlen);
157 de.d_name[de.d_namlen] = '\0';
158 if (smbfs_fastlookup) {
159 error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160 ctx->f_nmlen, &ctx->f_attr, &newvp);
161 if (!error) {
162 cn.cn_nameptr = de.d_name;
163 cn.cn_namelen = de.d_namlen;
164 cache_enter(vp, newvp, &cn);
165 vput(newvp);
166 }
167 }
113 if (error)
114 return error;
115 offset++;
116 uio->uio_offset += DE_SIZE;
117 }
118 if (limit == 0)
119 return 0;
120 if (offset != np->n_dirofs || np->n_dirseq == NULL) {
121 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
122 if (np->n_dirseq) {
123 smbfs_findclose(np->n_dirseq, &scred);
124 np->n_dirseq = NULL;
125 }
126 np->n_dirofs = 2;
127 error = smbfs_findopen(np, "*", 1,
128 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
129 &scred, &ctx);
130 if (error) {
131 SMBVDEBUG("can not open search, error = %d", error);
132 return error;
133 }
134 np->n_dirseq = ctx;
135 } else
136 ctx = np->n_dirseq;
137 while (np->n_dirofs < offset) {
138 error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
139 if (error) {
140 smbfs_findclose(np->n_dirseq, &scred);
141 np->n_dirseq = NULL;
142 return error == ENOENT ? 0 : error;
143 }
144 }
145 error = 0;
146 for (; limit; limit--, offset++) {
147 error = smbfs_findnext(ctx, limit, &scred);
148 if (error)
149 break;
150 np->n_dirofs++;
151 bzero((caddr_t)&de, DE_SIZE);
152 de.d_reclen = DE_SIZE;
153 de.d_fileno = ctx->f_attr.fa_ino;
154 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155 de.d_namlen = ctx->f_nmlen;
156 bcopy(ctx->f_name, de.d_name, de.d_namlen);
157 de.d_name[de.d_namlen] = '\0';
158 if (smbfs_fastlookup) {
159 error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160 ctx->f_nmlen, &ctx->f_attr, &newvp);
161 if (!error) {
162 cn.cn_nameptr = de.d_name;
163 cn.cn_namelen = de.d_namlen;
164 cache_enter(vp, newvp, &cn);
165 vput(newvp);
166 }
167 }
168 error = uiomove((caddr_t)&de, DE_SIZE, uio);
168 error = uiomove(&de, DE_SIZE, uio);
169 if (error)
170 break;
171 }
172 if (error == ENOENT)
173 error = 0;
174 uio->uio_offset = offset * DE_SIZE;
175 return error;
176}
177
178int
179smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
180{
181 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
182 struct smbnode *np = VTOSMB(vp);
183 struct thread *td;
184 struct vattr vattr;
185 struct smb_cred scred;
186 int error, lks;
187
188 /*
189 * Protect against method which is not supported for now
190 */
191 if (uiop->uio_segflg == UIO_NOCOPY)
192 return EOPNOTSUPP;
193
194 if (vp->v_type != VREG && vp->v_type != VDIR) {
195 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
196 return EIO;
197 }
198 if (uiop->uio_resid == 0)
199 return 0;
200 if (uiop->uio_offset < 0)
201 return EINVAL;
202/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
203 return EFBIG;*/
204 td = uiop->uio_td;
205 if (vp->v_type == VDIR) {
206 lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/
207 if (lks == LK_SHARED)
208 vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
209 error = smbfs_readvdir(vp, uiop, cred);
210 if (lks == LK_SHARED)
211 vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td);
212 return error;
213 }
214
215/* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
216 if (np->n_flag & NMODIFIED) {
217 smbfs_attr_cacheremove(vp);
218 error = VOP_GETATTR(vp, &vattr, cred, td);
219 if (error)
220 return error;
221 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
222 } else {
223 error = VOP_GETATTR(vp, &vattr, cred, td);
224 if (error)
225 return error;
226 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
227 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
228 if (error)
229 return error;
230 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
231 }
232 }
233 smb_makescred(&scred, td, cred);
234 return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
235}
236
237int
238smbfs_writevnode(struct vnode *vp, struct uio *uiop,
239 struct ucred *cred, int ioflag)
240{
241 struct smbmount *smp = VTOSMBFS(vp);
242 struct smbnode *np = VTOSMB(vp);
243 struct smb_cred scred;
244 struct proc *p;
245 struct thread *td;
246 int error = 0;
247
248 if (vp->v_type != VREG) {
249 SMBERROR("vn types other than VREG unsupported !\n");
250 return EIO;
251 }
252 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
253 if (uiop->uio_offset < 0)
254 return EINVAL;
255/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
256 return (EFBIG);*/
257 td = uiop->uio_td;
258 p = td->td_proc;
259 if (ioflag & (IO_APPEND | IO_SYNC)) {
260 if (np->n_flag & NMODIFIED) {
261 smbfs_attr_cacheremove(vp);
262 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
263 if (error)
264 return error;
265 }
266 if (ioflag & IO_APPEND) {
267#if notyet
268 /*
269 * File size can be changed by another client
270 */
271 smbfs_attr_cacheremove(vp);
272 error = VOP_GETATTR(vp, &vattr, cred, td);
273 if (error) return (error);
274#endif
275 uiop->uio_offset = np->n_size;
276 }
277 }
278 if (uiop->uio_resid == 0)
279 return 0;
280 if (p && uiop->uio_offset + uiop->uio_resid > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
281 PROC_LOCK(td->td_proc);
282 psignal(td->td_proc, SIGXFSZ);
283 PROC_UNLOCK(td->td_proc);
284 return EFBIG;
285 }
286 smb_makescred(&scred, td, cred);
287 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
288 SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
289 if (!error) {
290 if (uiop->uio_offset > np->n_size) {
291 np->n_size = uiop->uio_offset;
292 vnode_pager_setsize(vp, np->n_size);
293 }
294 }
295 return error;
296}
297
298/*
299 * Do an I/O operation to/from a cache block.
300 */
301int
302smbfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
303{
304 struct vnode *vp = bp->b_vp;
305 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
306 struct smbnode *np = VTOSMB(vp);
307 struct uio uio, *uiop = &uio;
308 struct iovec io;
309 struct smb_cred scred;
310 int error = 0;
311
312 uiop->uio_iov = &io;
313 uiop->uio_iovcnt = 1;
314 uiop->uio_segflg = UIO_SYSSPACE;
315 uiop->uio_td = td;
316
317 smb_makescred(&scred, td, cr);
318
319 if (bp->b_iocmd == BIO_READ) {
320 io.iov_len = uiop->uio_resid = bp->b_bcount;
321 io.iov_base = bp->b_data;
322 uiop->uio_rw = UIO_READ;
323 switch (vp->v_type) {
324 case VREG:
325 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
326 error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
327 if (error)
328 break;
329 if (uiop->uio_resid) {
330 int left = uiop->uio_resid;
331 int nread = bp->b_bcount - left;
332 if (left > 0)
333 bzero((char *)bp->b_data + nread, left);
334 }
335 break;
336 default:
337 printf("smbfs_doio: type %x unexpected\n",vp->v_type);
338 break;
339 };
340 if (error) {
341 bp->b_error = error;
342 bp->b_ioflags |= BIO_ERROR;
343 }
344 } else { /* write */
345 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
346 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
347
348 if (bp->b_dirtyend > bp->b_dirtyoff) {
349 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
350 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
351 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
352 uiop->uio_rw = UIO_WRITE;
353 bp->b_flags |= B_WRITEINPROG;
354 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
355 bp->b_flags &= ~B_WRITEINPROG;
356
357 /*
358 * For an interrupted write, the buffer is still valid
359 * and the write hasn't been pushed to the server yet,
360 * so we can't set BIO_ERROR and report the interruption
361 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
362 * is not relevant, so the rpc attempt is essentially
363 * a noop. For the case of a V3 write rpc not being
364 * committed to stable storage, the block is still
365 * dirty and requires either a commit rpc or another
366 * write rpc with iomode == NFSV3WRITE_FILESYNC before
367 * the block is reused. This is indicated by setting
368 * the B_DELWRI and B_NEEDCOMMIT flags.
369 */
370 if (error == EINTR
371 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
372 int s;
373
374 s = splbio();
375 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
376 if ((bp->b_flags & B_ASYNC) == 0)
377 bp->b_flags |= B_EINTR;
378 if ((bp->b_flags & B_PAGING) == 0) {
379 bdirty(bp);
380 bp->b_flags &= ~B_DONE;
381 }
382 if ((bp->b_flags & B_ASYNC) == 0)
383 bp->b_flags |= B_EINTR;
384 splx(s);
385 } else {
386 if (error) {
387 bp->b_ioflags |= BIO_ERROR;
388 bp->b_error = error;
389 }
390 bp->b_dirtyoff = bp->b_dirtyend = 0;
391 }
392 } else {
393 bp->b_resid = 0;
394 bufdone(bp);
395 return 0;
396 }
397 }
398 bp->b_resid = uiop->uio_resid;
399 bufdone(bp);
400 return error;
401}
402
403/*
404 * Vnode op for VM getpages.
405 * Wish wish .... get rid from multiple IO routines
406 */
407int
408smbfs_getpages(ap)
409 struct vop_getpages_args /* {
410 struct vnode *a_vp;
411 vm_page_t *a_m;
412 int a_count;
413 int a_reqpage;
414 vm_ooffset_t a_offset;
415 } */ *ap;
416{
417#ifdef SMBFS_RWGENERIC
418 return vop_stdgetpages(ap);
419#else
420 int i, error, nextoff, size, toff, npages, count, reqpage;
421 struct uio uio;
422 struct iovec iov;
423 vm_offset_t kva;
424 struct buf *bp;
425 struct vnode *vp;
426 struct thread *td;
427 struct ucred *cred;
428 struct smbmount *smp;
429 struct smbnode *np;
430 struct smb_cred scred;
431 vm_page_t *pages, m;
432
433 vp = ap->a_vp;
434 if (vp->v_object == NULL) {
435 printf("smbfs_getpages: called with non-merged cache vnode??\n");
436 return VM_PAGER_ERROR;
437 }
438
439 td = curthread; /* XXX */
440 cred = td->td_ucred; /* XXX */
441 np = VTOSMB(vp);
442 smp = VFSTOSMBFS(vp->v_mount);
443 pages = ap->a_m;
444 count = ap->a_count;
445 npages = btoc(count);
446 reqpage = ap->a_reqpage;
447
448 /*
449 * If the requested page is partially valid, just return it and
450 * allow the pager to zero-out the blanks. Partially valid pages
451 * can only occur at the file EOF.
452 */
453 m = pages[reqpage];
454
455 if (m->valid != 0) {
456 /* handled by vm_fault now */
457 /* vm_page_zero_invalid(m, TRUE); */
458 vm_page_lock_queues();
459 for (i = 0; i < npages; ++i) {
460 if (i != reqpage)
461 vm_page_free(pages[i]);
462 }
463 vm_page_unlock_queues();
464 return 0;
465 }
466
467 smb_makescred(&scred, td, cred);
468
469 bp = getpbuf(&smbfs_pbuf_freecnt);
470
471 kva = (vm_offset_t) bp->b_data;
472 pmap_qenter(kva, pages, npages);
473 cnt.v_vnodein++;
474 cnt.v_vnodepgsin += npages;
475
476 iov.iov_base = (caddr_t) kva;
477 iov.iov_len = count;
478 uio.uio_iov = &iov;
479 uio.uio_iovcnt = 1;
480 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
481 uio.uio_resid = count;
482 uio.uio_segflg = UIO_SYSSPACE;
483 uio.uio_rw = UIO_READ;
484 uio.uio_td = td;
485
486 error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
487 pmap_qremove(kva, npages);
488
489 relpbuf(bp, &smbfs_pbuf_freecnt);
490
491 if (error && (uio.uio_resid == count)) {
492 printf("smbfs_getpages: error %d\n",error);
493 vm_page_lock_queues();
494 for (i = 0; i < npages; i++) {
495 if (reqpage != i)
496 vm_page_free(pages[i]);
497 }
498 vm_page_unlock_queues();
499 return VM_PAGER_ERROR;
500 }
501
502 size = count - uio.uio_resid;
503
504 vm_page_lock_queues();
505 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
506 vm_page_t m;
507 nextoff = toff + PAGE_SIZE;
508 m = pages[i];
509
510 m->flags &= ~PG_ZERO;
511
512 if (nextoff <= size) {
513 /*
514 * Read operation filled an entire page
515 */
516 m->valid = VM_PAGE_BITS_ALL;
517 vm_page_undirty(m);
518 } else if (size > toff) {
519 /*
520 * Read operation filled a partial page.
521 */
522 m->valid = 0;
523 vm_page_set_validclean(m, 0, size - toff);
524 /* handled by vm_fault now */
525 /* vm_page_zero_invalid(m, TRUE); */
526 } else {
527 /*
528 * Read operation was short. If no error occured
529 * we may have hit a zero-fill section. We simply
530 * leave valid set to 0.
531 */
532 ;
533 }
534
535 if (i != reqpage) {
536 /*
537 * Whether or not to leave the page activated is up in
538 * the air, but we should put the page on a page queue
539 * somewhere (it already is in the object). Result:
540 * It appears that emperical results show that
541 * deactivating pages is best.
542 */
543
544 /*
545 * Just in case someone was asking for this page we
546 * now tell them that it is ok to use.
547 */
548 if (!error) {
549 if (m->flags & PG_WANTED)
550 vm_page_activate(m);
551 else
552 vm_page_deactivate(m);
553 vm_page_wakeup(m);
554 } else {
555 vm_page_free(m);
556 }
557 }
558 }
559 vm_page_unlock_queues();
560 return 0;
561#endif /* SMBFS_RWGENERIC */
562}
563
564/*
565 * Vnode op for VM putpages.
566 * possible bug: all IO done in sync mode
567 * Note that vop_close always invalidate pages before close, so it's
568 * not necessary to open vnode.
569 */
570int
571smbfs_putpages(ap)
572 struct vop_putpages_args /* {
573 struct vnode *a_vp;
574 vm_page_t *a_m;
575 int a_count;
576 int a_sync;
577 int *a_rtvals;
578 vm_ooffset_t a_offset;
579 } */ *ap;
580{
581 int error;
582 struct vnode *vp = ap->a_vp;
583 struct thread *td;
584 struct ucred *cred;
585
586#ifdef SMBFS_RWGENERIC
587 td = curthread; /* XXX */
588 cred = td->td_ucred; /* XXX */
589 VOP_OPEN(vp, FWRITE, cred, td);
590 error = vop_stdputpages(ap);
591 VOP_CLOSE(vp, FWRITE, cred, td);
592 return error;
593#else
594 struct uio uio;
595 struct iovec iov;
596 vm_offset_t kva;
597 struct buf *bp;
598 int i, npages, count;
599 int *rtvals;
600 struct smbmount *smp;
601 struct smbnode *np;
602 struct smb_cred scred;
603 vm_page_t *pages;
604
605 td = curthread; /* XXX */
606 cred = td->td_ucred; /* XXX */
607/* VOP_OPEN(vp, FWRITE, cred, td);*/
608 np = VTOSMB(vp);
609 smp = VFSTOSMBFS(vp->v_mount);
610 pages = ap->a_m;
611 count = ap->a_count;
612 rtvals = ap->a_rtvals;
613 npages = btoc(count);
614
615 for (i = 0; i < npages; i++) {
616 rtvals[i] = VM_PAGER_AGAIN;
617 }
618
619 bp = getpbuf(&smbfs_pbuf_freecnt);
620
621 kva = (vm_offset_t) bp->b_data;
622 pmap_qenter(kva, pages, npages);
623 cnt.v_vnodeout++;
624 cnt.v_vnodepgsout += count;
625
626 iov.iov_base = (caddr_t) kva;
627 iov.iov_len = count;
628 uio.uio_iov = &iov;
629 uio.uio_iovcnt = 1;
630 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
631 uio.uio_resid = count;
632 uio.uio_segflg = UIO_SYSSPACE;
633 uio.uio_rw = UIO_WRITE;
634 uio.uio_td = td;
635 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
636
637 smb_makescred(&scred, td, cred);
638 error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
639/* VOP_CLOSE(vp, FWRITE, cred, td);*/
640 SMBVDEBUG("paged write done: %d\n", error);
641
642 pmap_qremove(kva, npages);
643
644 relpbuf(bp, &smbfs_pbuf_freecnt);
645
646 if (!error) {
647 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
648 vm_page_lock_queues();
649 for (i = 0; i < nwritten; i++) {
650 rtvals[i] = VM_PAGER_OK;
651 vm_page_undirty(pages[i]);
652 }
653 vm_page_unlock_queues();
654 }
655 return rtvals[0];
656#endif /* SMBFS_RWGENERIC */
657}
658
659/*
660 * Flush and invalidate all dirty buffers. If another process is already
661 * doing the flush, just wait for completion.
662 */
663int
664smbfs_vinvalbuf(vp, flags, cred, td, intrflg)
665 struct vnode *vp;
666 int flags;
667 struct ucred *cred;
668 struct thread *td;
669 int intrflg;
670{
671 struct smbnode *np = VTOSMB(vp);
672 int error = 0, slpflag, slptimeo;
673
674 VI_LOCK(vp);
675 if (vp->v_iflag & VI_XLOCK) {
676 VI_UNLOCK(vp);
677 return 0;
678 }
679 VI_UNLOCK(vp);
680
681 if (intrflg) {
682 slpflag = PCATCH;
683 slptimeo = 2 * hz;
684 } else {
685 slpflag = 0;
686 slptimeo = 0;
687 }
688 while (np->n_flag & NFLUSHINPROG) {
689 np->n_flag |= NFLUSHWANT;
690 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "smfsvinv", slptimeo);
691 error = smb_proc_intr(td->td_proc);
692 if (error == EINTR && intrflg)
693 return EINTR;
694 }
695 np->n_flag |= NFLUSHINPROG;
696 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
697 while (error) {
698 if (intrflg && (error == ERESTART || error == EINTR)) {
699 np->n_flag &= ~NFLUSHINPROG;
700 if (np->n_flag & NFLUSHWANT) {
701 np->n_flag &= ~NFLUSHWANT;
702 wakeup((caddr_t)&np->n_flag);
703 }
704 return EINTR;
705 }
706 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
707 }
708 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
709 if (np->n_flag & NFLUSHWANT) {
710 np->n_flag &= ~NFLUSHWANT;
711 wakeup((caddr_t)&np->n_flag);
712 }
713 return (error);
714}
169 if (error)
170 break;
171 }
172 if (error == ENOENT)
173 error = 0;
174 uio->uio_offset = offset * DE_SIZE;
175 return error;
176}
177
178int
179smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
180{
181 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
182 struct smbnode *np = VTOSMB(vp);
183 struct thread *td;
184 struct vattr vattr;
185 struct smb_cred scred;
186 int error, lks;
187
188 /*
189 * Protect against method which is not supported for now
190 */
191 if (uiop->uio_segflg == UIO_NOCOPY)
192 return EOPNOTSUPP;
193
194 if (vp->v_type != VREG && vp->v_type != VDIR) {
195 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
196 return EIO;
197 }
198 if (uiop->uio_resid == 0)
199 return 0;
200 if (uiop->uio_offset < 0)
201 return EINVAL;
202/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
203 return EFBIG;*/
204 td = uiop->uio_td;
205 if (vp->v_type == VDIR) {
206 lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/
207 if (lks == LK_SHARED)
208 vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
209 error = smbfs_readvdir(vp, uiop, cred);
210 if (lks == LK_SHARED)
211 vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td);
212 return error;
213 }
214
215/* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
216 if (np->n_flag & NMODIFIED) {
217 smbfs_attr_cacheremove(vp);
218 error = VOP_GETATTR(vp, &vattr, cred, td);
219 if (error)
220 return error;
221 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
222 } else {
223 error = VOP_GETATTR(vp, &vattr, cred, td);
224 if (error)
225 return error;
226 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
227 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
228 if (error)
229 return error;
230 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
231 }
232 }
233 smb_makescred(&scred, td, cred);
234 return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
235}
236
237int
238smbfs_writevnode(struct vnode *vp, struct uio *uiop,
239 struct ucred *cred, int ioflag)
240{
241 struct smbmount *smp = VTOSMBFS(vp);
242 struct smbnode *np = VTOSMB(vp);
243 struct smb_cred scred;
244 struct proc *p;
245 struct thread *td;
246 int error = 0;
247
248 if (vp->v_type != VREG) {
249 SMBERROR("vn types other than VREG unsupported !\n");
250 return EIO;
251 }
252 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
253 if (uiop->uio_offset < 0)
254 return EINVAL;
255/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
256 return (EFBIG);*/
257 td = uiop->uio_td;
258 p = td->td_proc;
259 if (ioflag & (IO_APPEND | IO_SYNC)) {
260 if (np->n_flag & NMODIFIED) {
261 smbfs_attr_cacheremove(vp);
262 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
263 if (error)
264 return error;
265 }
266 if (ioflag & IO_APPEND) {
267#if notyet
268 /*
269 * File size can be changed by another client
270 */
271 smbfs_attr_cacheremove(vp);
272 error = VOP_GETATTR(vp, &vattr, cred, td);
273 if (error) return (error);
274#endif
275 uiop->uio_offset = np->n_size;
276 }
277 }
278 if (uiop->uio_resid == 0)
279 return 0;
280 if (p && uiop->uio_offset + uiop->uio_resid > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
281 PROC_LOCK(td->td_proc);
282 psignal(td->td_proc, SIGXFSZ);
283 PROC_UNLOCK(td->td_proc);
284 return EFBIG;
285 }
286 smb_makescred(&scred, td, cred);
287 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
288 SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
289 if (!error) {
290 if (uiop->uio_offset > np->n_size) {
291 np->n_size = uiop->uio_offset;
292 vnode_pager_setsize(vp, np->n_size);
293 }
294 }
295 return error;
296}
297
298/*
299 * Do an I/O operation to/from a cache block.
300 */
301int
302smbfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
303{
304 struct vnode *vp = bp->b_vp;
305 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
306 struct smbnode *np = VTOSMB(vp);
307 struct uio uio, *uiop = &uio;
308 struct iovec io;
309 struct smb_cred scred;
310 int error = 0;
311
312 uiop->uio_iov = &io;
313 uiop->uio_iovcnt = 1;
314 uiop->uio_segflg = UIO_SYSSPACE;
315 uiop->uio_td = td;
316
317 smb_makescred(&scred, td, cr);
318
319 if (bp->b_iocmd == BIO_READ) {
320 io.iov_len = uiop->uio_resid = bp->b_bcount;
321 io.iov_base = bp->b_data;
322 uiop->uio_rw = UIO_READ;
323 switch (vp->v_type) {
324 case VREG:
325 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
326 error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
327 if (error)
328 break;
329 if (uiop->uio_resid) {
330 int left = uiop->uio_resid;
331 int nread = bp->b_bcount - left;
332 if (left > 0)
333 bzero((char *)bp->b_data + nread, left);
334 }
335 break;
336 default:
337 printf("smbfs_doio: type %x unexpected\n",vp->v_type);
338 break;
339 };
340 if (error) {
341 bp->b_error = error;
342 bp->b_ioflags |= BIO_ERROR;
343 }
344 } else { /* write */
345 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
346 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
347
348 if (bp->b_dirtyend > bp->b_dirtyoff) {
349 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
350 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
351 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
352 uiop->uio_rw = UIO_WRITE;
353 bp->b_flags |= B_WRITEINPROG;
354 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
355 bp->b_flags &= ~B_WRITEINPROG;
356
357 /*
358 * For an interrupted write, the buffer is still valid
359 * and the write hasn't been pushed to the server yet,
360 * so we can't set BIO_ERROR and report the interruption
361 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
362 * is not relevant, so the rpc attempt is essentially
363 * a noop. For the case of a V3 write rpc not being
364 * committed to stable storage, the block is still
365 * dirty and requires either a commit rpc or another
366 * write rpc with iomode == NFSV3WRITE_FILESYNC before
367 * the block is reused. This is indicated by setting
368 * the B_DELWRI and B_NEEDCOMMIT flags.
369 */
370 if (error == EINTR
371 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
372 int s;
373
374 s = splbio();
375 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
376 if ((bp->b_flags & B_ASYNC) == 0)
377 bp->b_flags |= B_EINTR;
378 if ((bp->b_flags & B_PAGING) == 0) {
379 bdirty(bp);
380 bp->b_flags &= ~B_DONE;
381 }
382 if ((bp->b_flags & B_ASYNC) == 0)
383 bp->b_flags |= B_EINTR;
384 splx(s);
385 } else {
386 if (error) {
387 bp->b_ioflags |= BIO_ERROR;
388 bp->b_error = error;
389 }
390 bp->b_dirtyoff = bp->b_dirtyend = 0;
391 }
392 } else {
393 bp->b_resid = 0;
394 bufdone(bp);
395 return 0;
396 }
397 }
398 bp->b_resid = uiop->uio_resid;
399 bufdone(bp);
400 return error;
401}
402
403/*
404 * Vnode op for VM getpages.
405 * Wish wish .... get rid from multiple IO routines
406 */
407int
408smbfs_getpages(ap)
409 struct vop_getpages_args /* {
410 struct vnode *a_vp;
411 vm_page_t *a_m;
412 int a_count;
413 int a_reqpage;
414 vm_ooffset_t a_offset;
415 } */ *ap;
416{
417#ifdef SMBFS_RWGENERIC
418 return vop_stdgetpages(ap);
419#else
420 int i, error, nextoff, size, toff, npages, count, reqpage;
421 struct uio uio;
422 struct iovec iov;
423 vm_offset_t kva;
424 struct buf *bp;
425 struct vnode *vp;
426 struct thread *td;
427 struct ucred *cred;
428 struct smbmount *smp;
429 struct smbnode *np;
430 struct smb_cred scred;
431 vm_page_t *pages, m;
432
433 vp = ap->a_vp;
434 if (vp->v_object == NULL) {
435 printf("smbfs_getpages: called with non-merged cache vnode??\n");
436 return VM_PAGER_ERROR;
437 }
438
439 td = curthread; /* XXX */
440 cred = td->td_ucred; /* XXX */
441 np = VTOSMB(vp);
442 smp = VFSTOSMBFS(vp->v_mount);
443 pages = ap->a_m;
444 count = ap->a_count;
445 npages = btoc(count);
446 reqpage = ap->a_reqpage;
447
448 /*
449 * If the requested page is partially valid, just return it and
450 * allow the pager to zero-out the blanks. Partially valid pages
451 * can only occur at the file EOF.
452 */
453 m = pages[reqpage];
454
455 if (m->valid != 0) {
456 /* handled by vm_fault now */
457 /* vm_page_zero_invalid(m, TRUE); */
458 vm_page_lock_queues();
459 for (i = 0; i < npages; ++i) {
460 if (i != reqpage)
461 vm_page_free(pages[i]);
462 }
463 vm_page_unlock_queues();
464 return 0;
465 }
466
467 smb_makescred(&scred, td, cred);
468
469 bp = getpbuf(&smbfs_pbuf_freecnt);
470
471 kva = (vm_offset_t) bp->b_data;
472 pmap_qenter(kva, pages, npages);
473 cnt.v_vnodein++;
474 cnt.v_vnodepgsin += npages;
475
476 iov.iov_base = (caddr_t) kva;
477 iov.iov_len = count;
478 uio.uio_iov = &iov;
479 uio.uio_iovcnt = 1;
480 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
481 uio.uio_resid = count;
482 uio.uio_segflg = UIO_SYSSPACE;
483 uio.uio_rw = UIO_READ;
484 uio.uio_td = td;
485
486 error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
487 pmap_qremove(kva, npages);
488
489 relpbuf(bp, &smbfs_pbuf_freecnt);
490
491 if (error && (uio.uio_resid == count)) {
492 printf("smbfs_getpages: error %d\n",error);
493 vm_page_lock_queues();
494 for (i = 0; i < npages; i++) {
495 if (reqpage != i)
496 vm_page_free(pages[i]);
497 }
498 vm_page_unlock_queues();
499 return VM_PAGER_ERROR;
500 }
501
502 size = count - uio.uio_resid;
503
504 vm_page_lock_queues();
505 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
506 vm_page_t m;
507 nextoff = toff + PAGE_SIZE;
508 m = pages[i];
509
510 m->flags &= ~PG_ZERO;
511
512 if (nextoff <= size) {
513 /*
514 * Read operation filled an entire page
515 */
516 m->valid = VM_PAGE_BITS_ALL;
517 vm_page_undirty(m);
518 } else if (size > toff) {
519 /*
520 * Read operation filled a partial page.
521 */
522 m->valid = 0;
523 vm_page_set_validclean(m, 0, size - toff);
524 /* handled by vm_fault now */
525 /* vm_page_zero_invalid(m, TRUE); */
526 } else {
527 /*
528 * Read operation was short. If no error occured
529 * we may have hit a zero-fill section. We simply
530 * leave valid set to 0.
531 */
532 ;
533 }
534
535 if (i != reqpage) {
536 /*
537 * Whether or not to leave the page activated is up in
538 * the air, but we should put the page on a page queue
539 * somewhere (it already is in the object). Result:
540 * It appears that emperical results show that
541 * deactivating pages is best.
542 */
543
544 /*
545 * Just in case someone was asking for this page we
546 * now tell them that it is ok to use.
547 */
548 if (!error) {
549 if (m->flags & PG_WANTED)
550 vm_page_activate(m);
551 else
552 vm_page_deactivate(m);
553 vm_page_wakeup(m);
554 } else {
555 vm_page_free(m);
556 }
557 }
558 }
559 vm_page_unlock_queues();
560 return 0;
561#endif /* SMBFS_RWGENERIC */
562}
563
564/*
565 * Vnode op for VM putpages.
566 * possible bug: all IO done in sync mode
567 * Note that vop_close always invalidate pages before close, so it's
568 * not necessary to open vnode.
569 */
570int
571smbfs_putpages(ap)
572 struct vop_putpages_args /* {
573 struct vnode *a_vp;
574 vm_page_t *a_m;
575 int a_count;
576 int a_sync;
577 int *a_rtvals;
578 vm_ooffset_t a_offset;
579 } */ *ap;
580{
581 int error;
582 struct vnode *vp = ap->a_vp;
583 struct thread *td;
584 struct ucred *cred;
585
586#ifdef SMBFS_RWGENERIC
587 td = curthread; /* XXX */
588 cred = td->td_ucred; /* XXX */
589 VOP_OPEN(vp, FWRITE, cred, td);
590 error = vop_stdputpages(ap);
591 VOP_CLOSE(vp, FWRITE, cred, td);
592 return error;
593#else
594 struct uio uio;
595 struct iovec iov;
596 vm_offset_t kva;
597 struct buf *bp;
598 int i, npages, count;
599 int *rtvals;
600 struct smbmount *smp;
601 struct smbnode *np;
602 struct smb_cred scred;
603 vm_page_t *pages;
604
605 td = curthread; /* XXX */
606 cred = td->td_ucred; /* XXX */
607/* VOP_OPEN(vp, FWRITE, cred, td);*/
608 np = VTOSMB(vp);
609 smp = VFSTOSMBFS(vp->v_mount);
610 pages = ap->a_m;
611 count = ap->a_count;
612 rtvals = ap->a_rtvals;
613 npages = btoc(count);
614
615 for (i = 0; i < npages; i++) {
616 rtvals[i] = VM_PAGER_AGAIN;
617 }
618
619 bp = getpbuf(&smbfs_pbuf_freecnt);
620
621 kva = (vm_offset_t) bp->b_data;
622 pmap_qenter(kva, pages, npages);
623 cnt.v_vnodeout++;
624 cnt.v_vnodepgsout += count;
625
626 iov.iov_base = (caddr_t) kva;
627 iov.iov_len = count;
628 uio.uio_iov = &iov;
629 uio.uio_iovcnt = 1;
630 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
631 uio.uio_resid = count;
632 uio.uio_segflg = UIO_SYSSPACE;
633 uio.uio_rw = UIO_WRITE;
634 uio.uio_td = td;
635 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
636
637 smb_makescred(&scred, td, cred);
638 error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
639/* VOP_CLOSE(vp, FWRITE, cred, td);*/
640 SMBVDEBUG("paged write done: %d\n", error);
641
642 pmap_qremove(kva, npages);
643
644 relpbuf(bp, &smbfs_pbuf_freecnt);
645
646 if (!error) {
647 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
648 vm_page_lock_queues();
649 for (i = 0; i < nwritten; i++) {
650 rtvals[i] = VM_PAGER_OK;
651 vm_page_undirty(pages[i]);
652 }
653 vm_page_unlock_queues();
654 }
655 return rtvals[0];
656#endif /* SMBFS_RWGENERIC */
657}
658
659/*
660 * Flush and invalidate all dirty buffers. If another process is already
661 * doing the flush, just wait for completion.
662 */
663int
664smbfs_vinvalbuf(vp, flags, cred, td, intrflg)
665 struct vnode *vp;
666 int flags;
667 struct ucred *cred;
668 struct thread *td;
669 int intrflg;
670{
671 struct smbnode *np = VTOSMB(vp);
672 int error = 0, slpflag, slptimeo;
673
674 VI_LOCK(vp);
675 if (vp->v_iflag & VI_XLOCK) {
676 VI_UNLOCK(vp);
677 return 0;
678 }
679 VI_UNLOCK(vp);
680
681 if (intrflg) {
682 slpflag = PCATCH;
683 slptimeo = 2 * hz;
684 } else {
685 slpflag = 0;
686 slptimeo = 0;
687 }
688 while (np->n_flag & NFLUSHINPROG) {
689 np->n_flag |= NFLUSHWANT;
690 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "smfsvinv", slptimeo);
691 error = smb_proc_intr(td->td_proc);
692 if (error == EINTR && intrflg)
693 return EINTR;
694 }
695 np->n_flag |= NFLUSHINPROG;
696 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
697 while (error) {
698 if (intrflg && (error == ERESTART || error == EINTR)) {
699 np->n_flag &= ~NFLUSHINPROG;
700 if (np->n_flag & NFLUSHWANT) {
701 np->n_flag &= ~NFLUSHWANT;
702 wakeup((caddr_t)&np->n_flag);
703 }
704 return EINTR;
705 }
706 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
707 }
708 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
709 if (np->n_flag & NFLUSHWANT) {
710 np->n_flag &= ~NFLUSHWANT;
711 wakeup((caddr_t)&np->n_flag);
712 }
713 return (error);
714}