Deleted Added
full compact
nfs_clbio.c (203119) nfs_clbio.c (207082)
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 203119 2010-01-28 16:17:24Z rmacklem $");
36__FBSDID("$FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 207082 2010-04-22 23:51:01Z rmacklem $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/kernel.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/vmmeter.h>
48#include <sys/vnode.h>
49
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_page.h>
53#include <vm/vm_object.h>
54#include <vm/vm_pager.h>
55#include <vm/vnode_pager.h>
56
57#include <fs/nfs/nfsport.h>
58#include <fs/nfsclient/nfsmount.h>
59#include <fs/nfsclient/nfs.h>
60#include <fs/nfsclient/nfsnode.h>
61
62extern int newnfs_directio_allow_mmap;
63extern struct nfsstats newnfsstats;
64extern struct mtx ncl_iod_mutex;
65extern int ncl_numasync;
66extern enum nfsiod_state ncl_iodwant[NFS_MAXRAHEAD];
67extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
68extern int newnfs_directio_enable;
69
70int ncl_pbuf_freecnt = -1; /* start out unlimited */
71
72static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73 struct thread *td);
74static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
75 struct ucred *cred, int ioflag);
76
77/*
78 * Vnode op for VM getpages.
79 */
80int
81ncl_getpages(struct vop_getpages_args *ap)
82{
83 int i, error, nextoff, size, toff, count, npages;
84 struct uio uio;
85 struct iovec iov;
86 vm_offset_t kva;
87 struct buf *bp;
88 struct vnode *vp;
89 struct thread *td;
90 struct ucred *cred;
91 struct nfsmount *nmp;
92 vm_object_t object;
93 vm_page_t *pages;
94 struct nfsnode *np;
95
96 vp = ap->a_vp;
97 np = VTONFS(vp);
98 td = curthread; /* XXX */
99 cred = curthread->td_ucred; /* XXX */
100 nmp = VFSTONFS(vp->v_mount);
101 pages = ap->a_m;
102 count = ap->a_count;
103
104 if ((object = vp->v_object) == NULL) {
105 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
106 return (VM_PAGER_ERROR);
107 }
108
109 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
110 mtx_lock(&np->n_mtx);
111 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
112 mtx_unlock(&np->n_mtx);
113 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
114 return (VM_PAGER_ERROR);
115 } else
116 mtx_unlock(&np->n_mtx);
117 }
118
119 mtx_lock(&nmp->nm_mtx);
120 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
121 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
122 mtx_unlock(&nmp->nm_mtx);
123 /* We'll never get here for v4, because we always have fsinfo */
124 (void)ncl_fsinfo(nmp, vp, cred, td);
125 } else
126 mtx_unlock(&nmp->nm_mtx);
127
128 npages = btoc(count);
129
130 /*
131 * If the requested page is partially valid, just return it and
132 * allow the pager to zero-out the blanks. Partially valid pages
133 * can only occur at the file EOF.
134 */
135 VM_OBJECT_LOCK(object);
136 if (pages[ap->a_reqpage]->valid != 0) {
137 vm_page_lock_queues();
138 for (i = 0; i < npages; ++i) {
139 if (i != ap->a_reqpage)
140 vm_page_free(pages[i]);
141 }
142 vm_page_unlock_queues();
143 VM_OBJECT_UNLOCK(object);
144 return (0);
145 }
146 VM_OBJECT_UNLOCK(object);
147
148 /*
149 * We use only the kva address for the buffer, but this is extremely
150 * convienient and fast.
151 */
152 bp = getpbuf(&ncl_pbuf_freecnt);
153
154 kva = (vm_offset_t) bp->b_data;
155 pmap_qenter(kva, pages, npages);
156 PCPU_INC(cnt.v_vnodein);
157 PCPU_ADD(cnt.v_vnodepgsin, npages);
158
159 iov.iov_base = (caddr_t) kva;
160 iov.iov_len = count;
161 uio.uio_iov = &iov;
162 uio.uio_iovcnt = 1;
163 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
164 uio.uio_resid = count;
165 uio.uio_segflg = UIO_SYSSPACE;
166 uio.uio_rw = UIO_READ;
167 uio.uio_td = td;
168
169 error = ncl_readrpc(vp, &uio, cred);
170 pmap_qremove(kva, npages);
171
172 relpbuf(bp, &ncl_pbuf_freecnt);
173
174 if (error && (uio.uio_resid == count)) {
175 ncl_printf("nfs_getpages: error %d\n", error);
176 VM_OBJECT_LOCK(object);
177 vm_page_lock_queues();
178 for (i = 0; i < npages; ++i) {
179 if (i != ap->a_reqpage)
180 vm_page_free(pages[i]);
181 }
182 vm_page_unlock_queues();
183 VM_OBJECT_UNLOCK(object);
184 return (VM_PAGER_ERROR);
185 }
186
187 /*
188 * Calculate the number of bytes read and validate only that number
189 * of bytes. Note that due to pending writes, size may be 0. This
190 * does not mean that the remaining data is invalid!
191 */
192
193 size = count - uio.uio_resid;
194 VM_OBJECT_LOCK(object);
195 vm_page_lock_queues();
196 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
197 vm_page_t m;
198 nextoff = toff + PAGE_SIZE;
199 m = pages[i];
200
201 if (nextoff <= size) {
202 /*
203 * Read operation filled an entire page
204 */
205 m->valid = VM_PAGE_BITS_ALL;
206 KASSERT(m->dirty == 0,
207 ("nfs_getpages: page %p is dirty", m));
208 } else if (size > toff) {
209 /*
210 * Read operation filled a partial page.
211 */
212 m->valid = 0;
213 vm_page_set_valid(m, 0, size - toff);
214 KASSERT(m->dirty == 0,
215 ("nfs_getpages: page %p is dirty", m));
216 } else {
217 /*
218 * Read operation was short. If no error occured
219 * we may have hit a zero-fill section. We simply
220 * leave valid set to 0.
221 */
222 ;
223 }
224 if (i != ap->a_reqpage) {
225 /*
226 * Whether or not to leave the page activated is up in
227 * the air, but we should put the page on a page queue
228 * somewhere (it already is in the object). Result:
229 * It appears that emperical results show that
230 * deactivating pages is best.
231 */
232
233 /*
234 * Just in case someone was asking for this page we
235 * now tell them that it is ok to use.
236 */
237 if (!error) {
238 if (m->oflags & VPO_WANTED)
239 vm_page_activate(m);
240 else
241 vm_page_deactivate(m);
242 vm_page_wakeup(m);
243 } else {
244 vm_page_free(m);
245 }
246 }
247 }
248 vm_page_unlock_queues();
249 VM_OBJECT_UNLOCK(object);
250 return (0);
251}
252
253/*
254 * Vnode op for VM putpages.
255 */
256int
257ncl_putpages(struct vop_putpages_args *ap)
258{
259 struct uio uio;
260 struct iovec iov;
261 vm_offset_t kva;
262 struct buf *bp;
263 int iomode, must_commit, i, error, npages, count;
264 off_t offset;
265 int *rtvals;
266 struct vnode *vp;
267 struct thread *td;
268 struct ucred *cred;
269 struct nfsmount *nmp;
270 struct nfsnode *np;
271 vm_page_t *pages;
272
273 vp = ap->a_vp;
274 np = VTONFS(vp);
275 td = curthread; /* XXX */
276 cred = curthread->td_ucred; /* XXX */
277 nmp = VFSTONFS(vp->v_mount);
278 pages = ap->a_m;
279 count = ap->a_count;
280 rtvals = ap->a_rtvals;
281 npages = btoc(count);
282 offset = IDX_TO_OFF(pages[0]->pindex);
283
284 mtx_lock(&nmp->nm_mtx);
285 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
286 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
287 mtx_unlock(&nmp->nm_mtx);
288 (void)ncl_fsinfo(nmp, vp, cred, td);
289 } else
290 mtx_unlock(&nmp->nm_mtx);
291
292 mtx_lock(&np->n_mtx);
293 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
294 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
295 mtx_unlock(&np->n_mtx);
296 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
297 mtx_lock(&np->n_mtx);
298 }
299
300 for (i = 0; i < npages; i++)
301 rtvals[i] = VM_PAGER_AGAIN;
302
303 /*
304 * When putting pages, do not extend file past EOF.
305 */
306 if (offset + count > np->n_size) {
307 count = np->n_size - offset;
308 if (count < 0)
309 count = 0;
310 }
311 mtx_unlock(&np->n_mtx);
312
313 /*
314 * We use only the kva address for the buffer, but this is extremely
315 * convienient and fast.
316 */
317 bp = getpbuf(&ncl_pbuf_freecnt);
318
319 kva = (vm_offset_t) bp->b_data;
320 pmap_qenter(kva, pages, npages);
321 PCPU_INC(cnt.v_vnodeout);
322 PCPU_ADD(cnt.v_vnodepgsout, count);
323
324 iov.iov_base = (caddr_t) kva;
325 iov.iov_len = count;
326 uio.uio_iov = &iov;
327 uio.uio_iovcnt = 1;
328 uio.uio_offset = offset;
329 uio.uio_resid = count;
330 uio.uio_segflg = UIO_SYSSPACE;
331 uio.uio_rw = UIO_WRITE;
332 uio.uio_td = td;
333
334 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
335 iomode = NFSWRITE_UNSTABLE;
336 else
337 iomode = NFSWRITE_FILESYNC;
338
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/kernel.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/vmmeter.h>
48#include <sys/vnode.h>
49
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_page.h>
53#include <vm/vm_object.h>
54#include <vm/vm_pager.h>
55#include <vm/vnode_pager.h>
56
57#include <fs/nfs/nfsport.h>
58#include <fs/nfsclient/nfsmount.h>
59#include <fs/nfsclient/nfs.h>
60#include <fs/nfsclient/nfsnode.h>
61
62extern int newnfs_directio_allow_mmap;
63extern struct nfsstats newnfsstats;
64extern struct mtx ncl_iod_mutex;
65extern int ncl_numasync;
66extern enum nfsiod_state ncl_iodwant[NFS_MAXRAHEAD];
67extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
68extern int newnfs_directio_enable;
69
70int ncl_pbuf_freecnt = -1; /* start out unlimited */
71
72static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73 struct thread *td);
74static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
75 struct ucred *cred, int ioflag);
76
77/*
78 * Vnode op for VM getpages.
79 */
80int
81ncl_getpages(struct vop_getpages_args *ap)
82{
83 int i, error, nextoff, size, toff, count, npages;
84 struct uio uio;
85 struct iovec iov;
86 vm_offset_t kva;
87 struct buf *bp;
88 struct vnode *vp;
89 struct thread *td;
90 struct ucred *cred;
91 struct nfsmount *nmp;
92 vm_object_t object;
93 vm_page_t *pages;
94 struct nfsnode *np;
95
96 vp = ap->a_vp;
97 np = VTONFS(vp);
98 td = curthread; /* XXX */
99 cred = curthread->td_ucred; /* XXX */
100 nmp = VFSTONFS(vp->v_mount);
101 pages = ap->a_m;
102 count = ap->a_count;
103
104 if ((object = vp->v_object) == NULL) {
105 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
106 return (VM_PAGER_ERROR);
107 }
108
109 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
110 mtx_lock(&np->n_mtx);
111 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
112 mtx_unlock(&np->n_mtx);
113 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
114 return (VM_PAGER_ERROR);
115 } else
116 mtx_unlock(&np->n_mtx);
117 }
118
119 mtx_lock(&nmp->nm_mtx);
120 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
121 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
122 mtx_unlock(&nmp->nm_mtx);
123 /* We'll never get here for v4, because we always have fsinfo */
124 (void)ncl_fsinfo(nmp, vp, cred, td);
125 } else
126 mtx_unlock(&nmp->nm_mtx);
127
128 npages = btoc(count);
129
130 /*
131 * If the requested page is partially valid, just return it and
132 * allow the pager to zero-out the blanks. Partially valid pages
133 * can only occur at the file EOF.
134 */
135 VM_OBJECT_LOCK(object);
136 if (pages[ap->a_reqpage]->valid != 0) {
137 vm_page_lock_queues();
138 for (i = 0; i < npages; ++i) {
139 if (i != ap->a_reqpage)
140 vm_page_free(pages[i]);
141 }
142 vm_page_unlock_queues();
143 VM_OBJECT_UNLOCK(object);
144 return (0);
145 }
146 VM_OBJECT_UNLOCK(object);
147
148 /*
149 * We use only the kva address for the buffer, but this is extremely
150 * convienient and fast.
151 */
152 bp = getpbuf(&ncl_pbuf_freecnt);
153
154 kva = (vm_offset_t) bp->b_data;
155 pmap_qenter(kva, pages, npages);
156 PCPU_INC(cnt.v_vnodein);
157 PCPU_ADD(cnt.v_vnodepgsin, npages);
158
159 iov.iov_base = (caddr_t) kva;
160 iov.iov_len = count;
161 uio.uio_iov = &iov;
162 uio.uio_iovcnt = 1;
163 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
164 uio.uio_resid = count;
165 uio.uio_segflg = UIO_SYSSPACE;
166 uio.uio_rw = UIO_READ;
167 uio.uio_td = td;
168
169 error = ncl_readrpc(vp, &uio, cred);
170 pmap_qremove(kva, npages);
171
172 relpbuf(bp, &ncl_pbuf_freecnt);
173
174 if (error && (uio.uio_resid == count)) {
175 ncl_printf("nfs_getpages: error %d\n", error);
176 VM_OBJECT_LOCK(object);
177 vm_page_lock_queues();
178 for (i = 0; i < npages; ++i) {
179 if (i != ap->a_reqpage)
180 vm_page_free(pages[i]);
181 }
182 vm_page_unlock_queues();
183 VM_OBJECT_UNLOCK(object);
184 return (VM_PAGER_ERROR);
185 }
186
187 /*
188 * Calculate the number of bytes read and validate only that number
189 * of bytes. Note that due to pending writes, size may be 0. This
190 * does not mean that the remaining data is invalid!
191 */
192
193 size = count - uio.uio_resid;
194 VM_OBJECT_LOCK(object);
195 vm_page_lock_queues();
196 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
197 vm_page_t m;
198 nextoff = toff + PAGE_SIZE;
199 m = pages[i];
200
201 if (nextoff <= size) {
202 /*
203 * Read operation filled an entire page
204 */
205 m->valid = VM_PAGE_BITS_ALL;
206 KASSERT(m->dirty == 0,
207 ("nfs_getpages: page %p is dirty", m));
208 } else if (size > toff) {
209 /*
210 * Read operation filled a partial page.
211 */
212 m->valid = 0;
213 vm_page_set_valid(m, 0, size - toff);
214 KASSERT(m->dirty == 0,
215 ("nfs_getpages: page %p is dirty", m));
216 } else {
217 /*
218 * Read operation was short. If no error occured
219 * we may have hit a zero-fill section. We simply
220 * leave valid set to 0.
221 */
222 ;
223 }
224 if (i != ap->a_reqpage) {
225 /*
226 * Whether or not to leave the page activated is up in
227 * the air, but we should put the page on a page queue
228 * somewhere (it already is in the object). Result:
229 * It appears that emperical results show that
230 * deactivating pages is best.
231 */
232
233 /*
234 * Just in case someone was asking for this page we
235 * now tell them that it is ok to use.
236 */
237 if (!error) {
238 if (m->oflags & VPO_WANTED)
239 vm_page_activate(m);
240 else
241 vm_page_deactivate(m);
242 vm_page_wakeup(m);
243 } else {
244 vm_page_free(m);
245 }
246 }
247 }
248 vm_page_unlock_queues();
249 VM_OBJECT_UNLOCK(object);
250 return (0);
251}
252
253/*
254 * Vnode op for VM putpages.
255 */
256int
257ncl_putpages(struct vop_putpages_args *ap)
258{
259 struct uio uio;
260 struct iovec iov;
261 vm_offset_t kva;
262 struct buf *bp;
263 int iomode, must_commit, i, error, npages, count;
264 off_t offset;
265 int *rtvals;
266 struct vnode *vp;
267 struct thread *td;
268 struct ucred *cred;
269 struct nfsmount *nmp;
270 struct nfsnode *np;
271 vm_page_t *pages;
272
273 vp = ap->a_vp;
274 np = VTONFS(vp);
275 td = curthread; /* XXX */
276 cred = curthread->td_ucred; /* XXX */
277 nmp = VFSTONFS(vp->v_mount);
278 pages = ap->a_m;
279 count = ap->a_count;
280 rtvals = ap->a_rtvals;
281 npages = btoc(count);
282 offset = IDX_TO_OFF(pages[0]->pindex);
283
284 mtx_lock(&nmp->nm_mtx);
285 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
286 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
287 mtx_unlock(&nmp->nm_mtx);
288 (void)ncl_fsinfo(nmp, vp, cred, td);
289 } else
290 mtx_unlock(&nmp->nm_mtx);
291
292 mtx_lock(&np->n_mtx);
293 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
294 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
295 mtx_unlock(&np->n_mtx);
296 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
297 mtx_lock(&np->n_mtx);
298 }
299
300 for (i = 0; i < npages; i++)
301 rtvals[i] = VM_PAGER_AGAIN;
302
303 /*
304 * When putting pages, do not extend file past EOF.
305 */
306 if (offset + count > np->n_size) {
307 count = np->n_size - offset;
308 if (count < 0)
309 count = 0;
310 }
311 mtx_unlock(&np->n_mtx);
312
313 /*
314 * We use only the kva address for the buffer, but this is extremely
315 * convienient and fast.
316 */
317 bp = getpbuf(&ncl_pbuf_freecnt);
318
319 kva = (vm_offset_t) bp->b_data;
320 pmap_qenter(kva, pages, npages);
321 PCPU_INC(cnt.v_vnodeout);
322 PCPU_ADD(cnt.v_vnodepgsout, count);
323
324 iov.iov_base = (caddr_t) kva;
325 iov.iov_len = count;
326 uio.uio_iov = &iov;
327 uio.uio_iovcnt = 1;
328 uio.uio_offset = offset;
329 uio.uio_resid = count;
330 uio.uio_segflg = UIO_SYSSPACE;
331 uio.uio_rw = UIO_WRITE;
332 uio.uio_td = td;
333
334 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
335 iomode = NFSWRITE_UNSTABLE;
336 else
337 iomode = NFSWRITE_FILESYNC;
338
339 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit);
339 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
340
341 pmap_qremove(kva, npages);
342 relpbuf(bp, &ncl_pbuf_freecnt);
343
344 if (!error) {
345 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
346 for (i = 0; i < nwritten; i++) {
347 rtvals[i] = VM_PAGER_OK;
348 vm_page_undirty(pages[i]);
349 }
350 if (must_commit) {
351 ncl_clearcommit(vp->v_mount);
352 }
353 }
354 return rtvals[0];
355}
356
357/*
358 * For nfs, cache consistency can only be maintained approximately.
359 * Although RFC1094 does not specify the criteria, the following is
360 * believed to be compatible with the reference port.
361 * For nfs:
362 * If the file's modify time on the server has changed since the
363 * last read rpc or you have written to the file,
364 * you may have lost data cache consistency with the
365 * server, so flush all of the file's data out of the cache.
366 * Then force a getattr rpc to ensure that you have up to date
367 * attributes.
368 * NB: This implies that cache data can be read when up to
369 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
370 * attributes this could be forced by setting n_attrstamp to 0 before
371 * the VOP_GETATTR() call.
372 */
373static inline int
374nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
375{
376 int error = 0;
377 struct vattr vattr;
378 struct nfsnode *np = VTONFS(vp);
379 int old_lock;
380
381 /*
382 * Grab the exclusive lock before checking whether the cache is
383 * consistent.
384 * XXX - We can make this cheaper later (by acquiring cheaper locks).
385 * But for now, this suffices.
386 */
387 old_lock = ncl_upgrade_vnlock(vp);
388 if (vp->v_iflag & VI_DOOMED) {
389 ncl_downgrade_vnlock(vp, old_lock);
390 return (EBADF);
391 }
392
393 mtx_lock(&np->n_mtx);
394 if (np->n_flag & NMODIFIED) {
395 mtx_unlock(&np->n_mtx);
396 if (vp->v_type != VREG) {
397 if (vp->v_type != VDIR)
398 panic("nfs: bioread, not dir");
399 ncl_invaldir(vp);
400 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
401 if (error)
402 goto out;
403 }
404 np->n_attrstamp = 0;
405 error = VOP_GETATTR(vp, &vattr, cred);
406 if (error)
407 goto out;
408 mtx_lock(&np->n_mtx);
409 np->n_mtime = vattr.va_mtime;
410 mtx_unlock(&np->n_mtx);
411 } else {
412 mtx_unlock(&np->n_mtx);
413 error = VOP_GETATTR(vp, &vattr, cred);
414 if (error)
415 return (error);
416 mtx_lock(&np->n_mtx);
417 if ((np->n_flag & NSIZECHANGED)
418 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
419 mtx_unlock(&np->n_mtx);
420 if (vp->v_type == VDIR)
421 ncl_invaldir(vp);
422 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
423 if (error)
424 goto out;
425 mtx_lock(&np->n_mtx);
426 np->n_mtime = vattr.va_mtime;
427 np->n_flag &= ~NSIZECHANGED;
428 }
429 mtx_unlock(&np->n_mtx);
430 }
431out:
432 ncl_downgrade_vnlock(vp, old_lock);
433 return error;
434}
435
436/*
437 * Vnode op for read using bio
438 */
439int
440ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
441{
442 struct nfsnode *np = VTONFS(vp);
443 int biosize, i;
444 struct buf *bp, *rabp;
445 struct thread *td;
446 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
447 daddr_t lbn, rabn;
448 int bcount;
449 int seqcount;
450 int nra, error = 0, n = 0, on = 0;
451
452#ifdef DIAGNOSTIC
453 if (uio->uio_rw != UIO_READ)
454 panic("ncl_read mode");
455#endif
456 if (uio->uio_resid == 0)
457 return (0);
458 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
459 return (EINVAL);
460 td = uio->uio_td;
461
462 mtx_lock(&nmp->nm_mtx);
463 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
464 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
465 mtx_unlock(&nmp->nm_mtx);
466 (void)ncl_fsinfo(nmp, vp, cred, td);
467 mtx_lock(&nmp->nm_mtx);
468 }
469 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
470 (void) newnfs_iosize(nmp);
471 mtx_unlock(&nmp->nm_mtx);
472
473 if (vp->v_type != VDIR &&
474 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
475 return (EFBIG);
476
477 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
478 /* No caching/ no readaheads. Just read data into the user buffer */
479 return ncl_readrpc(vp, uio, cred);
480
481 biosize = vp->v_mount->mnt_stat.f_iosize;
482 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
483
484 error = nfs_bioread_check_cons(vp, td, cred);
485 if (error)
486 return error;
487
488 do {
489 u_quad_t nsize;
490
491 mtx_lock(&np->n_mtx);
492 nsize = np->n_size;
493 mtx_unlock(&np->n_mtx);
494
495 switch (vp->v_type) {
496 case VREG:
497 NFSINCRGLOBAL(newnfsstats.biocache_reads);
498 lbn = uio->uio_offset / biosize;
499 on = uio->uio_offset & (biosize - 1);
500
501 /*
502 * Start the read ahead(s), as required.
503 */
504 if (nmp->nm_readahead > 0) {
505 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
506 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
507 rabn = lbn + 1 + nra;
508 if (incore(&vp->v_bufobj, rabn) == NULL) {
509 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
510 if (!rabp) {
511 error = newnfs_sigintr(nmp, td);
512 if (error)
513 return (error);
514 else
515 break;
516 }
517 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
518 rabp->b_flags |= B_ASYNC;
519 rabp->b_iocmd = BIO_READ;
520 vfs_busy_pages(rabp, 0);
521 if (ncl_asyncio(nmp, rabp, cred, td)) {
522 rabp->b_flags |= B_INVAL;
523 rabp->b_ioflags |= BIO_ERROR;
524 vfs_unbusy_pages(rabp);
525 brelse(rabp);
526 break;
527 }
528 } else {
529 brelse(rabp);
530 }
531 }
532 }
533 }
534
535 /* Note that bcount is *not* DEV_BSIZE aligned. */
536 bcount = biosize;
537 if ((off_t)lbn * biosize >= nsize) {
538 bcount = 0;
539 } else if ((off_t)(lbn + 1) * biosize > nsize) {
540 bcount = nsize - (off_t)lbn * biosize;
541 }
542 bp = nfs_getcacheblk(vp, lbn, bcount, td);
543
544 if (!bp) {
545 error = newnfs_sigintr(nmp, td);
546 return (error ? error : EINTR);
547 }
548
549 /*
550 * If B_CACHE is not set, we must issue the read. If this
551 * fails, we return an error.
552 */
553
554 if ((bp->b_flags & B_CACHE) == 0) {
555 bp->b_iocmd = BIO_READ;
556 vfs_busy_pages(bp, 0);
340
341 pmap_qremove(kva, npages);
342 relpbuf(bp, &ncl_pbuf_freecnt);
343
344 if (!error) {
345 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
346 for (i = 0; i < nwritten; i++) {
347 rtvals[i] = VM_PAGER_OK;
348 vm_page_undirty(pages[i]);
349 }
350 if (must_commit) {
351 ncl_clearcommit(vp->v_mount);
352 }
353 }
354 return rtvals[0];
355}
356
357/*
358 * For nfs, cache consistency can only be maintained approximately.
359 * Although RFC1094 does not specify the criteria, the following is
360 * believed to be compatible with the reference port.
361 * For nfs:
362 * If the file's modify time on the server has changed since the
363 * last read rpc or you have written to the file,
364 * you may have lost data cache consistency with the
365 * server, so flush all of the file's data out of the cache.
366 * Then force a getattr rpc to ensure that you have up to date
367 * attributes.
368 * NB: This implies that cache data can be read when up to
369 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
370 * attributes this could be forced by setting n_attrstamp to 0 before
371 * the VOP_GETATTR() call.
372 */
373static inline int
374nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
375{
376 int error = 0;
377 struct vattr vattr;
378 struct nfsnode *np = VTONFS(vp);
379 int old_lock;
380
381 /*
382 * Grab the exclusive lock before checking whether the cache is
383 * consistent.
384 * XXX - We can make this cheaper later (by acquiring cheaper locks).
385 * But for now, this suffices.
386 */
387 old_lock = ncl_upgrade_vnlock(vp);
388 if (vp->v_iflag & VI_DOOMED) {
389 ncl_downgrade_vnlock(vp, old_lock);
390 return (EBADF);
391 }
392
393 mtx_lock(&np->n_mtx);
394 if (np->n_flag & NMODIFIED) {
395 mtx_unlock(&np->n_mtx);
396 if (vp->v_type != VREG) {
397 if (vp->v_type != VDIR)
398 panic("nfs: bioread, not dir");
399 ncl_invaldir(vp);
400 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
401 if (error)
402 goto out;
403 }
404 np->n_attrstamp = 0;
405 error = VOP_GETATTR(vp, &vattr, cred);
406 if (error)
407 goto out;
408 mtx_lock(&np->n_mtx);
409 np->n_mtime = vattr.va_mtime;
410 mtx_unlock(&np->n_mtx);
411 } else {
412 mtx_unlock(&np->n_mtx);
413 error = VOP_GETATTR(vp, &vattr, cred);
414 if (error)
415 return (error);
416 mtx_lock(&np->n_mtx);
417 if ((np->n_flag & NSIZECHANGED)
418 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
419 mtx_unlock(&np->n_mtx);
420 if (vp->v_type == VDIR)
421 ncl_invaldir(vp);
422 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
423 if (error)
424 goto out;
425 mtx_lock(&np->n_mtx);
426 np->n_mtime = vattr.va_mtime;
427 np->n_flag &= ~NSIZECHANGED;
428 }
429 mtx_unlock(&np->n_mtx);
430 }
431out:
432 ncl_downgrade_vnlock(vp, old_lock);
433 return error;
434}
435
436/*
437 * Vnode op for read using bio
438 */
439int
440ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
441{
442 struct nfsnode *np = VTONFS(vp);
443 int biosize, i;
444 struct buf *bp, *rabp;
445 struct thread *td;
446 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
447 daddr_t lbn, rabn;
448 int bcount;
449 int seqcount;
450 int nra, error = 0, n = 0, on = 0;
451
452#ifdef DIAGNOSTIC
453 if (uio->uio_rw != UIO_READ)
454 panic("ncl_read mode");
455#endif
456 if (uio->uio_resid == 0)
457 return (0);
458 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
459 return (EINVAL);
460 td = uio->uio_td;
461
462 mtx_lock(&nmp->nm_mtx);
463 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
464 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
465 mtx_unlock(&nmp->nm_mtx);
466 (void)ncl_fsinfo(nmp, vp, cred, td);
467 mtx_lock(&nmp->nm_mtx);
468 }
469 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
470 (void) newnfs_iosize(nmp);
471 mtx_unlock(&nmp->nm_mtx);
472
473 if (vp->v_type != VDIR &&
474 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
475 return (EFBIG);
476
477 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
478 /* No caching/ no readaheads. Just read data into the user buffer */
479 return ncl_readrpc(vp, uio, cred);
480
481 biosize = vp->v_mount->mnt_stat.f_iosize;
482 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
483
484 error = nfs_bioread_check_cons(vp, td, cred);
485 if (error)
486 return error;
487
488 do {
489 u_quad_t nsize;
490
491 mtx_lock(&np->n_mtx);
492 nsize = np->n_size;
493 mtx_unlock(&np->n_mtx);
494
495 switch (vp->v_type) {
496 case VREG:
497 NFSINCRGLOBAL(newnfsstats.biocache_reads);
498 lbn = uio->uio_offset / biosize;
499 on = uio->uio_offset & (biosize - 1);
500
501 /*
502 * Start the read ahead(s), as required.
503 */
504 if (nmp->nm_readahead > 0) {
505 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
506 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
507 rabn = lbn + 1 + nra;
508 if (incore(&vp->v_bufobj, rabn) == NULL) {
509 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
510 if (!rabp) {
511 error = newnfs_sigintr(nmp, td);
512 if (error)
513 return (error);
514 else
515 break;
516 }
517 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
518 rabp->b_flags |= B_ASYNC;
519 rabp->b_iocmd = BIO_READ;
520 vfs_busy_pages(rabp, 0);
521 if (ncl_asyncio(nmp, rabp, cred, td)) {
522 rabp->b_flags |= B_INVAL;
523 rabp->b_ioflags |= BIO_ERROR;
524 vfs_unbusy_pages(rabp);
525 brelse(rabp);
526 break;
527 }
528 } else {
529 brelse(rabp);
530 }
531 }
532 }
533 }
534
535 /* Note that bcount is *not* DEV_BSIZE aligned. */
536 bcount = biosize;
537 if ((off_t)lbn * biosize >= nsize) {
538 bcount = 0;
539 } else if ((off_t)(lbn + 1) * biosize > nsize) {
540 bcount = nsize - (off_t)lbn * biosize;
541 }
542 bp = nfs_getcacheblk(vp, lbn, bcount, td);
543
544 if (!bp) {
545 error = newnfs_sigintr(nmp, td);
546 return (error ? error : EINTR);
547 }
548
549 /*
550 * If B_CACHE is not set, we must issue the read. If this
551 * fails, we return an error.
552 */
553
554 if ((bp->b_flags & B_CACHE) == 0) {
555 bp->b_iocmd = BIO_READ;
556 vfs_busy_pages(bp, 0);
557 error = ncl_doio(vp, bp, cred, td);
557 error = ncl_doio(vp, bp, cred, td, 0);
558 if (error) {
559 brelse(bp);
560 return (error);
561 }
562 }
563
564 /*
565 * on is the offset into the current bp. Figure out how many
566 * bytes we can copy out of the bp. Note that bcount is
567 * NOT DEV_BSIZE aligned.
568 *
569 * Then figure out how many bytes we can copy into the uio.
570 */
571
572 n = 0;
573 if (on < bcount)
574 n = min((unsigned)(bcount - on), uio->uio_resid);
575 break;
576 case VLNK:
577 NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
578 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
579 if (!bp) {
580 error = newnfs_sigintr(nmp, td);
581 return (error ? error : EINTR);
582 }
583 if ((bp->b_flags & B_CACHE) == 0) {
584 bp->b_iocmd = BIO_READ;
585 vfs_busy_pages(bp, 0);
558 if (error) {
559 brelse(bp);
560 return (error);
561 }
562 }
563
564 /*
565 * on is the offset into the current bp. Figure out how many
566 * bytes we can copy out of the bp. Note that bcount is
567 * NOT DEV_BSIZE aligned.
568 *
569 * Then figure out how many bytes we can copy into the uio.
570 */
571
572 n = 0;
573 if (on < bcount)
574 n = min((unsigned)(bcount - on), uio->uio_resid);
575 break;
576 case VLNK:
577 NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
578 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
579 if (!bp) {
580 error = newnfs_sigintr(nmp, td);
581 return (error ? error : EINTR);
582 }
583 if ((bp->b_flags & B_CACHE) == 0) {
584 bp->b_iocmd = BIO_READ;
585 vfs_busy_pages(bp, 0);
586 error = ncl_doio(vp, bp, cred, td);
586 error = ncl_doio(vp, bp, cred, td, 0);
587 if (error) {
588 bp->b_ioflags |= BIO_ERROR;
589 brelse(bp);
590 return (error);
591 }
592 }
593 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
594 on = 0;
595 break;
596 case VDIR:
597 NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
598 if (np->n_direofoffset
599 && uio->uio_offset >= np->n_direofoffset) {
600 return (0);
601 }
602 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
603 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
604 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
605 if (!bp) {
606 error = newnfs_sigintr(nmp, td);
607 return (error ? error : EINTR);
608 }
609 if ((bp->b_flags & B_CACHE) == 0) {
610 bp->b_iocmd = BIO_READ;
611 vfs_busy_pages(bp, 0);
587 if (error) {
588 bp->b_ioflags |= BIO_ERROR;
589 brelse(bp);
590 return (error);
591 }
592 }
593 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
594 on = 0;
595 break;
596 case VDIR:
597 NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
598 if (np->n_direofoffset
599 && uio->uio_offset >= np->n_direofoffset) {
600 return (0);
601 }
602 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
603 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
604 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
605 if (!bp) {
606 error = newnfs_sigintr(nmp, td);
607 return (error ? error : EINTR);
608 }
609 if ((bp->b_flags & B_CACHE) == 0) {
610 bp->b_iocmd = BIO_READ;
611 vfs_busy_pages(bp, 0);
612 error = ncl_doio(vp, bp, cred, td);
612 error = ncl_doio(vp, bp, cred, td, 0);
613 if (error) {
614 brelse(bp);
615 }
616 while (error == NFSERR_BAD_COOKIE) {
617 ncl_invaldir(vp);
618 error = ncl_vinvalbuf(vp, 0, td, 1);
619 /*
620 * Yuck! The directory has been modified on the
621 * server. The only way to get the block is by
622 * reading from the beginning to get all the
623 * offset cookies.
624 *
625 * Leave the last bp intact unless there is an error.
626 * Loop back up to the while if the error is another
627 * NFSERR_BAD_COOKIE (double yuch!).
628 */
629 for (i = 0; i <= lbn && !error; i++) {
630 if (np->n_direofoffset
631 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
632 return (0);
633 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
634 if (!bp) {
635 error = newnfs_sigintr(nmp, td);
636 return (error ? error : EINTR);
637 }
638 if ((bp->b_flags & B_CACHE) == 0) {
639 bp->b_iocmd = BIO_READ;
640 vfs_busy_pages(bp, 0);
613 if (error) {
614 brelse(bp);
615 }
616 while (error == NFSERR_BAD_COOKIE) {
617 ncl_invaldir(vp);
618 error = ncl_vinvalbuf(vp, 0, td, 1);
619 /*
620 * Yuck! The directory has been modified on the
621 * server. The only way to get the block is by
622 * reading from the beginning to get all the
623 * offset cookies.
624 *
625 * Leave the last bp intact unless there is an error.
626 * Loop back up to the while if the error is another
627 * NFSERR_BAD_COOKIE (double yuch!).
628 */
629 for (i = 0; i <= lbn && !error; i++) {
630 if (np->n_direofoffset
631 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
632 return (0);
633 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
634 if (!bp) {
635 error = newnfs_sigintr(nmp, td);
636 return (error ? error : EINTR);
637 }
638 if ((bp->b_flags & B_CACHE) == 0) {
639 bp->b_iocmd = BIO_READ;
640 vfs_busy_pages(bp, 0);
641 error = ncl_doio(vp, bp, cred, td);
641 error = ncl_doio(vp, bp, cred, td, 0);
642 /*
643 * no error + B_INVAL == directory EOF,
644 * use the block.
645 */
646 if (error == 0 && (bp->b_flags & B_INVAL))
647 break;
648 }
649 /*
650 * An error will throw away the block and the
651 * for loop will break out. If no error and this
652 * is not the block we want, we throw away the
653 * block and go for the next one via the for loop.
654 */
655 if (error || i < lbn)
656 brelse(bp);
657 }
658 }
659 /*
660 * The above while is repeated if we hit another cookie
661 * error. If we hit an error and it wasn't a cookie error,
662 * we give up.
663 */
664 if (error)
665 return (error);
666 }
667
668 /*
669 * If not eof and read aheads are enabled, start one.
670 * (You need the current block first, so that you have the
671 * directory offset cookie of the next block.)
672 */
673 if (nmp->nm_readahead > 0 &&
674 (bp->b_flags & B_INVAL) == 0 &&
675 (np->n_direofoffset == 0 ||
676 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
677 incore(&vp->v_bufobj, lbn + 1) == NULL) {
678 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
679 if (rabp) {
680 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
681 rabp->b_flags |= B_ASYNC;
682 rabp->b_iocmd = BIO_READ;
683 vfs_busy_pages(rabp, 0);
684 if (ncl_asyncio(nmp, rabp, cred, td)) {
685 rabp->b_flags |= B_INVAL;
686 rabp->b_ioflags |= BIO_ERROR;
687 vfs_unbusy_pages(rabp);
688 brelse(rabp);
689 }
690 } else {
691 brelse(rabp);
692 }
693 }
694 }
695 /*
696 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
697 * chopped for the EOF condition, we cannot tell how large
698 * NFS directories are going to be until we hit EOF. So
699 * an NFS directory buffer is *not* chopped to its EOF. Now,
700 * it just so happens that b_resid will effectively chop it
701 * to EOF. *BUT* this information is lost if the buffer goes
702 * away and is reconstituted into a B_CACHE state ( due to
703 * being VMIO ) later. So we keep track of the directory eof
704 * in np->n_direofoffset and chop it off as an extra step
705 * right here.
706 */
707 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
708 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
709 n = np->n_direofoffset - uio->uio_offset;
710 break;
711 default:
712 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
713 bp = NULL;
714 break;
715 };
716
717 if (n > 0) {
718 error = uiomove(bp->b_data + on, (int)n, uio);
719 }
720 if (vp->v_type == VLNK)
721 n = 0;
722 if (bp != NULL)
723 brelse(bp);
724 } while (error == 0 && uio->uio_resid > 0 && n > 0);
725 return (error);
726}
727
728/*
729 * The NFS write path cannot handle iovecs with len > 1. So we need to
730 * break up iovecs accordingly (restricting them to wsize).
731 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
732 * For the ASYNC case, 2 copies are needed. The first a copy from the
733 * user buffer to a staging buffer and then a second copy from the staging
734 * buffer to mbufs. This can be optimized by copying from the user buffer
735 * directly into mbufs and passing the chain down, but that requires a
736 * fair amount of re-working of the relevant codepaths (and can be done
737 * later).
738 */
739static int
740nfs_directio_write(vp, uiop, cred, ioflag)
741 struct vnode *vp;
742 struct uio *uiop;
743 struct ucred *cred;
744 int ioflag;
745{
746 int error;
747 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
748 struct thread *td = uiop->uio_td;
749 int size;
750 int wsize;
751
752 mtx_lock(&nmp->nm_mtx);
753 wsize = nmp->nm_wsize;
754 mtx_unlock(&nmp->nm_mtx);
755 if (ioflag & IO_SYNC) {
756 int iomode, must_commit;
757 struct uio uio;
758 struct iovec iov;
759do_sync:
760 while (uiop->uio_resid > 0) {
761 size = min(uiop->uio_resid, wsize);
762 size = min(uiop->uio_iov->iov_len, size);
763 iov.iov_base = uiop->uio_iov->iov_base;
764 iov.iov_len = size;
765 uio.uio_iov = &iov;
766 uio.uio_iovcnt = 1;
767 uio.uio_offset = uiop->uio_offset;
768 uio.uio_resid = size;
769 uio.uio_segflg = UIO_USERSPACE;
770 uio.uio_rw = UIO_WRITE;
771 uio.uio_td = td;
772 iomode = NFSWRITE_FILESYNC;
773 error = ncl_writerpc(vp, &uio, cred, &iomode,
642 /*
643 * no error + B_INVAL == directory EOF,
644 * use the block.
645 */
646 if (error == 0 && (bp->b_flags & B_INVAL))
647 break;
648 }
649 /*
650 * An error will throw away the block and the
651 * for loop will break out. If no error and this
652 * is not the block we want, we throw away the
653 * block and go for the next one via the for loop.
654 */
655 if (error || i < lbn)
656 brelse(bp);
657 }
658 }
659 /*
660 * The above while is repeated if we hit another cookie
661 * error. If we hit an error and it wasn't a cookie error,
662 * we give up.
663 */
664 if (error)
665 return (error);
666 }
667
668 /*
669 * If not eof and read aheads are enabled, start one.
670 * (You need the current block first, so that you have the
671 * directory offset cookie of the next block.)
672 */
673 if (nmp->nm_readahead > 0 &&
674 (bp->b_flags & B_INVAL) == 0 &&
675 (np->n_direofoffset == 0 ||
676 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
677 incore(&vp->v_bufobj, lbn + 1) == NULL) {
678 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
679 if (rabp) {
680 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
681 rabp->b_flags |= B_ASYNC;
682 rabp->b_iocmd = BIO_READ;
683 vfs_busy_pages(rabp, 0);
684 if (ncl_asyncio(nmp, rabp, cred, td)) {
685 rabp->b_flags |= B_INVAL;
686 rabp->b_ioflags |= BIO_ERROR;
687 vfs_unbusy_pages(rabp);
688 brelse(rabp);
689 }
690 } else {
691 brelse(rabp);
692 }
693 }
694 }
695 /*
696 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
697 * chopped for the EOF condition, we cannot tell how large
698 * NFS directories are going to be until we hit EOF. So
699 * an NFS directory buffer is *not* chopped to its EOF. Now,
700 * it just so happens that b_resid will effectively chop it
701 * to EOF. *BUT* this information is lost if the buffer goes
702 * away and is reconstituted into a B_CACHE state ( due to
703 * being VMIO ) later. So we keep track of the directory eof
704 * in np->n_direofoffset and chop it off as an extra step
705 * right here.
706 */
707 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
708 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
709 n = np->n_direofoffset - uio->uio_offset;
710 break;
711 default:
712 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
713 bp = NULL;
714 break;
715 };
716
717 if (n > 0) {
718 error = uiomove(bp->b_data + on, (int)n, uio);
719 }
720 if (vp->v_type == VLNK)
721 n = 0;
722 if (bp != NULL)
723 brelse(bp);
724 } while (error == 0 && uio->uio_resid > 0 && n > 0);
725 return (error);
726}
727
728/*
729 * The NFS write path cannot handle iovecs with len > 1. So we need to
730 * break up iovecs accordingly (restricting them to wsize).
731 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
732 * For the ASYNC case, 2 copies are needed. The first a copy from the
733 * user buffer to a staging buffer and then a second copy from the staging
734 * buffer to mbufs. This can be optimized by copying from the user buffer
735 * directly into mbufs and passing the chain down, but that requires a
736 * fair amount of re-working of the relevant codepaths (and can be done
737 * later).
738 */
739static int
740nfs_directio_write(vp, uiop, cred, ioflag)
741 struct vnode *vp;
742 struct uio *uiop;
743 struct ucred *cred;
744 int ioflag;
745{
746 int error;
747 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
748 struct thread *td = uiop->uio_td;
749 int size;
750 int wsize;
751
752 mtx_lock(&nmp->nm_mtx);
753 wsize = nmp->nm_wsize;
754 mtx_unlock(&nmp->nm_mtx);
755 if (ioflag & IO_SYNC) {
756 int iomode, must_commit;
757 struct uio uio;
758 struct iovec iov;
759do_sync:
760 while (uiop->uio_resid > 0) {
761 size = min(uiop->uio_resid, wsize);
762 size = min(uiop->uio_iov->iov_len, size);
763 iov.iov_base = uiop->uio_iov->iov_base;
764 iov.iov_len = size;
765 uio.uio_iov = &iov;
766 uio.uio_iovcnt = 1;
767 uio.uio_offset = uiop->uio_offset;
768 uio.uio_resid = size;
769 uio.uio_segflg = UIO_USERSPACE;
770 uio.uio_rw = UIO_WRITE;
771 uio.uio_td = td;
772 iomode = NFSWRITE_FILESYNC;
773 error = ncl_writerpc(vp, &uio, cred, &iomode,
774 &must_commit);
774 &must_commit, 0);
775 KASSERT((must_commit == 0),
776 ("ncl_directio_write: Did not commit write"));
777 if (error)
778 return (error);
779 uiop->uio_offset += size;
780 uiop->uio_resid -= size;
781 if (uiop->uio_iov->iov_len <= size) {
782 uiop->uio_iovcnt--;
783 uiop->uio_iov++;
784 } else {
785 uiop->uio_iov->iov_base =
786 (char *)uiop->uio_iov->iov_base + size;
787 uiop->uio_iov->iov_len -= size;
788 }
789 }
790 } else {
791 struct uio *t_uio;
792 struct iovec *t_iov;
793 struct buf *bp;
794
795 /*
796 * Break up the write into blocksize chunks and hand these
797 * over to nfsiod's for write back.
798 * Unfortunately, this incurs a copy of the data. Since
799 * the user could modify the buffer before the write is
800 * initiated.
801 *
802 * The obvious optimization here is that one of the 2 copies
803 * in the async write path can be eliminated by copying the
804 * data here directly into mbufs and passing the mbuf chain
805 * down. But that will require a fair amount of re-working
806 * of the code and can be done if there's enough interest
807 * in NFS directio access.
808 */
809 while (uiop->uio_resid > 0) {
810 size = min(uiop->uio_resid, wsize);
811 size = min(uiop->uio_iov->iov_len, size);
812 bp = getpbuf(&ncl_pbuf_freecnt);
813 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
814 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
815 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
816 t_iov->iov_len = size;
817 t_uio->uio_iov = t_iov;
818 t_uio->uio_iovcnt = 1;
819 t_uio->uio_offset = uiop->uio_offset;
820 t_uio->uio_resid = size;
821 t_uio->uio_segflg = UIO_SYSSPACE;
822 t_uio->uio_rw = UIO_WRITE;
823 t_uio->uio_td = td;
824 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
825 bp->b_flags |= B_DIRECT;
826 bp->b_iocmd = BIO_WRITE;
827 if (cred != NOCRED) {
828 crhold(cred);
829 bp->b_wcred = cred;
830 } else
831 bp->b_wcred = NOCRED;
832 bp->b_caller1 = (void *)t_uio;
833 bp->b_vp = vp;
834 error = ncl_asyncio(nmp, bp, NOCRED, td);
835 if (error) {
836 free(t_iov->iov_base, M_NFSDIRECTIO);
837 free(t_iov, M_NFSDIRECTIO);
838 free(t_uio, M_NFSDIRECTIO);
839 bp->b_vp = NULL;
840 relpbuf(bp, &ncl_pbuf_freecnt);
841 if (error == EINTR)
842 return (error);
843 goto do_sync;
844 }
845 uiop->uio_offset += size;
846 uiop->uio_resid -= size;
847 if (uiop->uio_iov->iov_len <= size) {
848 uiop->uio_iovcnt--;
849 uiop->uio_iov++;
850 } else {
851 uiop->uio_iov->iov_base =
852 (char *)uiop->uio_iov->iov_base + size;
853 uiop->uio_iov->iov_len -= size;
854 }
855 }
856 }
857 return (0);
858}
859
860/*
861 * Vnode op for write using bio
862 */
863int
864ncl_write(struct vop_write_args *ap)
865{
866 int biosize;
867 struct uio *uio = ap->a_uio;
868 struct thread *td = uio->uio_td;
869 struct vnode *vp = ap->a_vp;
870 struct nfsnode *np = VTONFS(vp);
871 struct ucred *cred = ap->a_cred;
872 int ioflag = ap->a_ioflag;
873 struct buf *bp;
874 struct vattr vattr;
875 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
876 daddr_t lbn;
877 int bcount;
878 int n, on, error = 0;
879 struct proc *p = td?td->td_proc:NULL;
880
881#ifdef DIAGNOSTIC
882 if (uio->uio_rw != UIO_WRITE)
883 panic("ncl_write mode");
884 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
885 panic("ncl_write proc");
886#endif
887 if (vp->v_type != VREG)
888 return (EIO);
889 mtx_lock(&np->n_mtx);
890 if (np->n_flag & NWRITEERR) {
891 np->n_flag &= ~NWRITEERR;
892 mtx_unlock(&np->n_mtx);
893 return (np->n_error);
894 } else
895 mtx_unlock(&np->n_mtx);
896 mtx_lock(&nmp->nm_mtx);
897 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
898 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
899 mtx_unlock(&nmp->nm_mtx);
900 (void)ncl_fsinfo(nmp, vp, cred, td);
901 mtx_lock(&nmp->nm_mtx);
902 }
903 if (nmp->nm_wsize == 0)
904 (void) newnfs_iosize(nmp);
905 mtx_unlock(&nmp->nm_mtx);
906
907 /*
908 * Synchronously flush pending buffers if we are in synchronous
909 * mode or if we are appending.
910 */
911 if (ioflag & (IO_APPEND | IO_SYNC)) {
912 mtx_lock(&np->n_mtx);
913 if (np->n_flag & NMODIFIED) {
914 mtx_unlock(&np->n_mtx);
915#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
916 /*
917 * Require non-blocking, synchronous writes to
918 * dirty files to inform the program it needs
919 * to fsync(2) explicitly.
920 */
921 if (ioflag & IO_NDELAY)
922 return (EAGAIN);
923#endif
924flush_and_restart:
925 np->n_attrstamp = 0;
926 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
927 if (error)
928 return (error);
929 } else
930 mtx_unlock(&np->n_mtx);
931 }
932
933 /*
934 * If IO_APPEND then load uio_offset. We restart here if we cannot
935 * get the append lock.
936 */
937 if (ioflag & IO_APPEND) {
938 np->n_attrstamp = 0;
939 error = VOP_GETATTR(vp, &vattr, cred);
940 if (error)
941 return (error);
942 mtx_lock(&np->n_mtx);
943 uio->uio_offset = np->n_size;
944 mtx_unlock(&np->n_mtx);
945 }
946
947 if (uio->uio_offset < 0)
948 return (EINVAL);
949 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
950 return (EFBIG);
951 if (uio->uio_resid == 0)
952 return (0);
953
954 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
955 return nfs_directio_write(vp, uio, cred, ioflag);
956
957 /*
958 * Maybe this should be above the vnode op call, but so long as
959 * file servers have no limits, i don't think it matters
960 */
961 if (p != NULL) {
962 PROC_LOCK(p);
963 if (uio->uio_offset + uio->uio_resid >
964 lim_cur(p, RLIMIT_FSIZE)) {
965 psignal(p, SIGXFSZ);
966 PROC_UNLOCK(p);
967 return (EFBIG);
968 }
969 PROC_UNLOCK(p);
970 }
971
972 biosize = vp->v_mount->mnt_stat.f_iosize;
973 /*
974 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
975 * would exceed the local maximum per-file write commit size when
976 * combined with those, we must decide whether to flush,
977 * go synchronous, or return error. We don't bother checking
978 * IO_UNIT -- we just make all writes atomic anyway, as there's
979 * no point optimizing for something that really won't ever happen.
980 */
981 if (!(ioflag & IO_SYNC)) {
982 int nflag;
983
984 mtx_lock(&np->n_mtx);
985 nflag = np->n_flag;
986 mtx_unlock(&np->n_mtx);
987 int needrestart = 0;
988 if (nmp->nm_wcommitsize < uio->uio_resid) {
989 /*
990 * If this request could not possibly be completed
991 * without exceeding the maximum outstanding write
992 * commit size, see if we can convert it into a
993 * synchronous write operation.
994 */
995 if (ioflag & IO_NDELAY)
996 return (EAGAIN);
997 ioflag |= IO_SYNC;
998 if (nflag & NMODIFIED)
999 needrestart = 1;
1000 } else if (nflag & NMODIFIED) {
1001 int wouldcommit = 0;
1002 BO_LOCK(&vp->v_bufobj);
1003 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1004 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1005 b_bobufs) {
1006 if (bp->b_flags & B_NEEDCOMMIT)
1007 wouldcommit += bp->b_bcount;
1008 }
1009 }
1010 BO_UNLOCK(&vp->v_bufobj);
1011 /*
1012 * Since we're not operating synchronously and
1013 * bypassing the buffer cache, we are in a commit
1014 * and holding all of these buffers whether
1015 * transmitted or not. If not limited, this
1016 * will lead to the buffer cache deadlocking,
1017 * as no one else can flush our uncommitted buffers.
1018 */
1019 wouldcommit += uio->uio_resid;
1020 /*
1021 * If we would initially exceed the maximum
1022 * outstanding write commit size, flush and restart.
1023 */
1024 if (wouldcommit > nmp->nm_wcommitsize)
1025 needrestart = 1;
1026 }
1027 if (needrestart)
1028 goto flush_and_restart;
1029 }
1030
1031 do {
1032 NFSINCRGLOBAL(newnfsstats.biocache_writes);
1033 lbn = uio->uio_offset / biosize;
1034 on = uio->uio_offset & (biosize-1);
1035 n = min((unsigned)(biosize - on), uio->uio_resid);
1036again:
1037 /*
1038 * Handle direct append and file extension cases, calculate
1039 * unaligned buffer size.
1040 */
1041 mtx_lock(&np->n_mtx);
1042 if (uio->uio_offset == np->n_size && n) {
1043 mtx_unlock(&np->n_mtx);
1044 /*
1045 * Get the buffer (in its pre-append state to maintain
1046 * B_CACHE if it was previously set). Resize the
1047 * nfsnode after we have locked the buffer to prevent
1048 * readers from reading garbage.
1049 */
1050 bcount = on;
1051 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1052
1053 if (bp != NULL) {
1054 long save;
1055
1056 mtx_lock(&np->n_mtx);
1057 np->n_size = uio->uio_offset + n;
1058 np->n_flag |= NMODIFIED;
1059 vnode_pager_setsize(vp, np->n_size);
1060 mtx_unlock(&np->n_mtx);
1061
1062 save = bp->b_flags & B_CACHE;
1063 bcount += n;
1064 allocbuf(bp, bcount);
1065 bp->b_flags |= save;
1066 }
1067 } else {
1068 /*
1069 * Obtain the locked cache block first, and then
1070 * adjust the file's size as appropriate.
1071 */
1072 bcount = on + n;
1073 if ((off_t)lbn * biosize + bcount < np->n_size) {
1074 if ((off_t)(lbn + 1) * biosize < np->n_size)
1075 bcount = biosize;
1076 else
1077 bcount = np->n_size - (off_t)lbn * biosize;
1078 }
1079 mtx_unlock(&np->n_mtx);
1080 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1081 mtx_lock(&np->n_mtx);
1082 if (uio->uio_offset + n > np->n_size) {
1083 np->n_size = uio->uio_offset + n;
1084 np->n_flag |= NMODIFIED;
1085 vnode_pager_setsize(vp, np->n_size);
1086 }
1087 mtx_unlock(&np->n_mtx);
1088 }
1089
1090 if (!bp) {
1091 error = newnfs_sigintr(nmp, td);
1092 if (!error)
1093 error = EINTR;
1094 break;
1095 }
1096
1097 /*
1098 * Issue a READ if B_CACHE is not set. In special-append
1099 * mode, B_CACHE is based on the buffer prior to the write
1100 * op and is typically set, avoiding the read. If a read
1101 * is required in special append mode, the server will
1102 * probably send us a short-read since we extended the file
1103 * on our end, resulting in b_resid == 0 and, thusly,
1104 * B_CACHE getting set.
1105 *
1106 * We can also avoid issuing the read if the write covers
1107 * the entire buffer. We have to make sure the buffer state
1108 * is reasonable in this case since we will not be initiating
1109 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1110 * more information.
1111 *
1112 * B_CACHE may also be set due to the buffer being cached
1113 * normally.
1114 */
1115
1116 if (on == 0 && n == bcount) {
1117 bp->b_flags |= B_CACHE;
1118 bp->b_flags &= ~B_INVAL;
1119 bp->b_ioflags &= ~BIO_ERROR;
1120 }
1121
1122 if ((bp->b_flags & B_CACHE) == 0) {
1123 bp->b_iocmd = BIO_READ;
1124 vfs_busy_pages(bp, 0);
775 KASSERT((must_commit == 0),
776 ("ncl_directio_write: Did not commit write"));
777 if (error)
778 return (error);
779 uiop->uio_offset += size;
780 uiop->uio_resid -= size;
781 if (uiop->uio_iov->iov_len <= size) {
782 uiop->uio_iovcnt--;
783 uiop->uio_iov++;
784 } else {
785 uiop->uio_iov->iov_base =
786 (char *)uiop->uio_iov->iov_base + size;
787 uiop->uio_iov->iov_len -= size;
788 }
789 }
790 } else {
791 struct uio *t_uio;
792 struct iovec *t_iov;
793 struct buf *bp;
794
795 /*
796 * Break up the write into blocksize chunks and hand these
797 * over to nfsiod's for write back.
798 * Unfortunately, this incurs a copy of the data. Since
799 * the user could modify the buffer before the write is
800 * initiated.
801 *
802 * The obvious optimization here is that one of the 2 copies
803 * in the async write path can be eliminated by copying the
804 * data here directly into mbufs and passing the mbuf chain
805 * down. But that will require a fair amount of re-working
806 * of the code and can be done if there's enough interest
807 * in NFS directio access.
808 */
809 while (uiop->uio_resid > 0) {
810 size = min(uiop->uio_resid, wsize);
811 size = min(uiop->uio_iov->iov_len, size);
812 bp = getpbuf(&ncl_pbuf_freecnt);
813 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
814 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
815 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
816 t_iov->iov_len = size;
817 t_uio->uio_iov = t_iov;
818 t_uio->uio_iovcnt = 1;
819 t_uio->uio_offset = uiop->uio_offset;
820 t_uio->uio_resid = size;
821 t_uio->uio_segflg = UIO_SYSSPACE;
822 t_uio->uio_rw = UIO_WRITE;
823 t_uio->uio_td = td;
824 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
825 bp->b_flags |= B_DIRECT;
826 bp->b_iocmd = BIO_WRITE;
827 if (cred != NOCRED) {
828 crhold(cred);
829 bp->b_wcred = cred;
830 } else
831 bp->b_wcred = NOCRED;
832 bp->b_caller1 = (void *)t_uio;
833 bp->b_vp = vp;
834 error = ncl_asyncio(nmp, bp, NOCRED, td);
835 if (error) {
836 free(t_iov->iov_base, M_NFSDIRECTIO);
837 free(t_iov, M_NFSDIRECTIO);
838 free(t_uio, M_NFSDIRECTIO);
839 bp->b_vp = NULL;
840 relpbuf(bp, &ncl_pbuf_freecnt);
841 if (error == EINTR)
842 return (error);
843 goto do_sync;
844 }
845 uiop->uio_offset += size;
846 uiop->uio_resid -= size;
847 if (uiop->uio_iov->iov_len <= size) {
848 uiop->uio_iovcnt--;
849 uiop->uio_iov++;
850 } else {
851 uiop->uio_iov->iov_base =
852 (char *)uiop->uio_iov->iov_base + size;
853 uiop->uio_iov->iov_len -= size;
854 }
855 }
856 }
857 return (0);
858}
859
860/*
861 * Vnode op for write using bio
862 */
863int
864ncl_write(struct vop_write_args *ap)
865{
866 int biosize;
867 struct uio *uio = ap->a_uio;
868 struct thread *td = uio->uio_td;
869 struct vnode *vp = ap->a_vp;
870 struct nfsnode *np = VTONFS(vp);
871 struct ucred *cred = ap->a_cred;
872 int ioflag = ap->a_ioflag;
873 struct buf *bp;
874 struct vattr vattr;
875 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
876 daddr_t lbn;
877 int bcount;
878 int n, on, error = 0;
879 struct proc *p = td?td->td_proc:NULL;
880
881#ifdef DIAGNOSTIC
882 if (uio->uio_rw != UIO_WRITE)
883 panic("ncl_write mode");
884 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
885 panic("ncl_write proc");
886#endif
887 if (vp->v_type != VREG)
888 return (EIO);
889 mtx_lock(&np->n_mtx);
890 if (np->n_flag & NWRITEERR) {
891 np->n_flag &= ~NWRITEERR;
892 mtx_unlock(&np->n_mtx);
893 return (np->n_error);
894 } else
895 mtx_unlock(&np->n_mtx);
896 mtx_lock(&nmp->nm_mtx);
897 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
898 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
899 mtx_unlock(&nmp->nm_mtx);
900 (void)ncl_fsinfo(nmp, vp, cred, td);
901 mtx_lock(&nmp->nm_mtx);
902 }
903 if (nmp->nm_wsize == 0)
904 (void) newnfs_iosize(nmp);
905 mtx_unlock(&nmp->nm_mtx);
906
907 /*
908 * Synchronously flush pending buffers if we are in synchronous
909 * mode or if we are appending.
910 */
911 if (ioflag & (IO_APPEND | IO_SYNC)) {
912 mtx_lock(&np->n_mtx);
913 if (np->n_flag & NMODIFIED) {
914 mtx_unlock(&np->n_mtx);
915#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
916 /*
917 * Require non-blocking, synchronous writes to
918 * dirty files to inform the program it needs
919 * to fsync(2) explicitly.
920 */
921 if (ioflag & IO_NDELAY)
922 return (EAGAIN);
923#endif
924flush_and_restart:
925 np->n_attrstamp = 0;
926 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
927 if (error)
928 return (error);
929 } else
930 mtx_unlock(&np->n_mtx);
931 }
932
933 /*
934 * If IO_APPEND then load uio_offset. We restart here if we cannot
935 * get the append lock.
936 */
937 if (ioflag & IO_APPEND) {
938 np->n_attrstamp = 0;
939 error = VOP_GETATTR(vp, &vattr, cred);
940 if (error)
941 return (error);
942 mtx_lock(&np->n_mtx);
943 uio->uio_offset = np->n_size;
944 mtx_unlock(&np->n_mtx);
945 }
946
947 if (uio->uio_offset < 0)
948 return (EINVAL);
949 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
950 return (EFBIG);
951 if (uio->uio_resid == 0)
952 return (0);
953
954 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
955 return nfs_directio_write(vp, uio, cred, ioflag);
956
957 /*
958 * Maybe this should be above the vnode op call, but so long as
959 * file servers have no limits, i don't think it matters
960 */
961 if (p != NULL) {
962 PROC_LOCK(p);
963 if (uio->uio_offset + uio->uio_resid >
964 lim_cur(p, RLIMIT_FSIZE)) {
965 psignal(p, SIGXFSZ);
966 PROC_UNLOCK(p);
967 return (EFBIG);
968 }
969 PROC_UNLOCK(p);
970 }
971
972 biosize = vp->v_mount->mnt_stat.f_iosize;
973 /*
974 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
975 * would exceed the local maximum per-file write commit size when
976 * combined with those, we must decide whether to flush,
977 * go synchronous, or return error. We don't bother checking
978 * IO_UNIT -- we just make all writes atomic anyway, as there's
979 * no point optimizing for something that really won't ever happen.
980 */
981 if (!(ioflag & IO_SYNC)) {
982 int nflag;
983
984 mtx_lock(&np->n_mtx);
985 nflag = np->n_flag;
986 mtx_unlock(&np->n_mtx);
987 int needrestart = 0;
988 if (nmp->nm_wcommitsize < uio->uio_resid) {
989 /*
990 * If this request could not possibly be completed
991 * without exceeding the maximum outstanding write
992 * commit size, see if we can convert it into a
993 * synchronous write operation.
994 */
995 if (ioflag & IO_NDELAY)
996 return (EAGAIN);
997 ioflag |= IO_SYNC;
998 if (nflag & NMODIFIED)
999 needrestart = 1;
1000 } else if (nflag & NMODIFIED) {
1001 int wouldcommit = 0;
1002 BO_LOCK(&vp->v_bufobj);
1003 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1004 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1005 b_bobufs) {
1006 if (bp->b_flags & B_NEEDCOMMIT)
1007 wouldcommit += bp->b_bcount;
1008 }
1009 }
1010 BO_UNLOCK(&vp->v_bufobj);
1011 /*
1012 * Since we're not operating synchronously and
1013 * bypassing the buffer cache, we are in a commit
1014 * and holding all of these buffers whether
1015 * transmitted or not. If not limited, this
1016 * will lead to the buffer cache deadlocking,
1017 * as no one else can flush our uncommitted buffers.
1018 */
1019 wouldcommit += uio->uio_resid;
1020 /*
1021 * If we would initially exceed the maximum
1022 * outstanding write commit size, flush and restart.
1023 */
1024 if (wouldcommit > nmp->nm_wcommitsize)
1025 needrestart = 1;
1026 }
1027 if (needrestart)
1028 goto flush_and_restart;
1029 }
1030
1031 do {
1032 NFSINCRGLOBAL(newnfsstats.biocache_writes);
1033 lbn = uio->uio_offset / biosize;
1034 on = uio->uio_offset & (biosize-1);
1035 n = min((unsigned)(biosize - on), uio->uio_resid);
1036again:
1037 /*
1038 * Handle direct append and file extension cases, calculate
1039 * unaligned buffer size.
1040 */
1041 mtx_lock(&np->n_mtx);
1042 if (uio->uio_offset == np->n_size && n) {
1043 mtx_unlock(&np->n_mtx);
1044 /*
1045 * Get the buffer (in its pre-append state to maintain
1046 * B_CACHE if it was previously set). Resize the
1047 * nfsnode after we have locked the buffer to prevent
1048 * readers from reading garbage.
1049 */
1050 bcount = on;
1051 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1052
1053 if (bp != NULL) {
1054 long save;
1055
1056 mtx_lock(&np->n_mtx);
1057 np->n_size = uio->uio_offset + n;
1058 np->n_flag |= NMODIFIED;
1059 vnode_pager_setsize(vp, np->n_size);
1060 mtx_unlock(&np->n_mtx);
1061
1062 save = bp->b_flags & B_CACHE;
1063 bcount += n;
1064 allocbuf(bp, bcount);
1065 bp->b_flags |= save;
1066 }
1067 } else {
1068 /*
1069 * Obtain the locked cache block first, and then
1070 * adjust the file's size as appropriate.
1071 */
1072 bcount = on + n;
1073 if ((off_t)lbn * biosize + bcount < np->n_size) {
1074 if ((off_t)(lbn + 1) * biosize < np->n_size)
1075 bcount = biosize;
1076 else
1077 bcount = np->n_size - (off_t)lbn * biosize;
1078 }
1079 mtx_unlock(&np->n_mtx);
1080 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1081 mtx_lock(&np->n_mtx);
1082 if (uio->uio_offset + n > np->n_size) {
1083 np->n_size = uio->uio_offset + n;
1084 np->n_flag |= NMODIFIED;
1085 vnode_pager_setsize(vp, np->n_size);
1086 }
1087 mtx_unlock(&np->n_mtx);
1088 }
1089
1090 if (!bp) {
1091 error = newnfs_sigintr(nmp, td);
1092 if (!error)
1093 error = EINTR;
1094 break;
1095 }
1096
1097 /*
1098 * Issue a READ if B_CACHE is not set. In special-append
1099 * mode, B_CACHE is based on the buffer prior to the write
1100 * op and is typically set, avoiding the read. If a read
1101 * is required in special append mode, the server will
1102 * probably send us a short-read since we extended the file
1103 * on our end, resulting in b_resid == 0 and, thusly,
1104 * B_CACHE getting set.
1105 *
1106 * We can also avoid issuing the read if the write covers
1107 * the entire buffer. We have to make sure the buffer state
1108 * is reasonable in this case since we will not be initiating
1109 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1110 * more information.
1111 *
1112 * B_CACHE may also be set due to the buffer being cached
1113 * normally.
1114 */
1115
1116 if (on == 0 && n == bcount) {
1117 bp->b_flags |= B_CACHE;
1118 bp->b_flags &= ~B_INVAL;
1119 bp->b_ioflags &= ~BIO_ERROR;
1120 }
1121
1122 if ((bp->b_flags & B_CACHE) == 0) {
1123 bp->b_iocmd = BIO_READ;
1124 vfs_busy_pages(bp, 0);
1125 error = ncl_doio(vp, bp, cred, td);
1125 error = ncl_doio(vp, bp, cred, td, 0);
1126 if (error) {
1127 brelse(bp);
1128 break;
1129 }
1130 }
1131 if (bp->b_wcred == NOCRED)
1132 bp->b_wcred = crhold(cred);
1133 mtx_lock(&np->n_mtx);
1134 np->n_flag |= NMODIFIED;
1135 mtx_unlock(&np->n_mtx);
1136
1137 /*
1138 * If dirtyend exceeds file size, chop it down. This should
1139 * not normally occur but there is an append race where it
1140 * might occur XXX, so we log it.
1141 *
1142 * If the chopping creates a reverse-indexed or degenerate
1143 * situation with dirtyoff/end, we 0 both of them.
1144 */
1145
1146 if (bp->b_dirtyend > bcount) {
1147 ncl_printf("NFS append race @%lx:%d\n",
1148 (long)bp->b_blkno * DEV_BSIZE,
1149 bp->b_dirtyend - bcount);
1150 bp->b_dirtyend = bcount;
1151 }
1152
1153 if (bp->b_dirtyoff >= bp->b_dirtyend)
1154 bp->b_dirtyoff = bp->b_dirtyend = 0;
1155
1156 /*
1157 * If the new write will leave a contiguous dirty
1158 * area, just update the b_dirtyoff and b_dirtyend,
1159 * otherwise force a write rpc of the old dirty area.
1160 *
1161 * While it is possible to merge discontiguous writes due to
1162 * our having a B_CACHE buffer ( and thus valid read data
1163 * for the hole), we don't because it could lead to
1164 * significant cache coherency problems with multiple clients,
1165 * especially if locking is implemented later on.
1166 *
1167 * as an optimization we could theoretically maintain
1168 * a linked list of discontinuous areas, but we would still
1169 * have to commit them separately so there isn't much
1170 * advantage to it except perhaps a bit of asynchronization.
1171 */
1172
1173 if (bp->b_dirtyend > 0 &&
1174 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1175 if (bwrite(bp) == EINTR) {
1176 error = EINTR;
1177 break;
1178 }
1179 goto again;
1180 }
1181
1182 error = uiomove((char *)bp->b_data + on, n, uio);
1183
1184 /*
1185 * Since this block is being modified, it must be written
1186 * again and not just committed. Since write clustering does
1187 * not work for the stage 1 data write, only the stage 2
1188 * commit rpc, we have to clear B_CLUSTEROK as well.
1189 */
1190 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1191
1192 if (error) {
1193 bp->b_ioflags |= BIO_ERROR;
1194 brelse(bp);
1195 break;
1196 }
1197
1198 /*
1199 * Only update dirtyoff/dirtyend if not a degenerate
1200 * condition.
1201 */
1202 if (n) {
1203 if (bp->b_dirtyend > 0) {
1204 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1205 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1206 } else {
1207 bp->b_dirtyoff = on;
1208 bp->b_dirtyend = on + n;
1209 }
1210 vfs_bio_set_valid(bp, on, n);
1211 }
1212
1213 /*
1214 * If IO_SYNC do bwrite().
1215 *
1216 * IO_INVAL appears to be unused. The idea appears to be
1217 * to turn off caching in this case. Very odd. XXX
1218 */
1219 if ((ioflag & IO_SYNC)) {
1220 if (ioflag & IO_INVAL)
1221 bp->b_flags |= B_NOCACHE;
1222 error = bwrite(bp);
1223 if (error)
1224 break;
1225 } else if ((n + on) == biosize) {
1226 bp->b_flags |= B_ASYNC;
1227 (void) ncl_writebp(bp, 0, NULL);
1228 } else {
1229 bdwrite(bp);
1230 }
1231 } while (uio->uio_resid > 0 && n > 0);
1232
1233 return (error);
1234}
1235
1236/*
1237 * Get an nfs cache block.
1238 *
1239 * Allocate a new one if the block isn't currently in the cache
1240 * and return the block marked busy. If the calling process is
1241 * interrupted by a signal for an interruptible mount point, return
1242 * NULL.
1243 *
1244 * The caller must carefully deal with the possible B_INVAL state of
1245 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1246 * indirectly), so synchronous reads can be issued without worrying about
1247 * the B_INVAL state. We have to be a little more careful when dealing
1248 * with writes (see comments in nfs_write()) when extending a file past
1249 * its EOF.
1250 */
1251static struct buf *
1252nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1253{
1254 struct buf *bp;
1255 struct mount *mp;
1256 struct nfsmount *nmp;
1257
1258 mp = vp->v_mount;
1259 nmp = VFSTONFS(mp);
1260
1261 if (nmp->nm_flag & NFSMNT_INT) {
1262 sigset_t oldset;
1263
1264 newnfs_set_sigmask(td, &oldset);
1265 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1266 newnfs_restore_sigmask(td, &oldset);
1267 while (bp == NULL) {
1268 if (newnfs_sigintr(nmp, td))
1269 return (NULL);
1270 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1271 }
1272 } else {
1273 bp = getblk(vp, bn, size, 0, 0, 0);
1274 }
1275
1276 if (vp->v_type == VREG) {
1277 int biosize;
1278
1279 biosize = mp->mnt_stat.f_iosize;
1280 bp->b_blkno = bn * (biosize / DEV_BSIZE);
1281 }
1282 return (bp);
1283}
1284
1285/*
1286 * Flush and invalidate all dirty buffers. If another process is already
1287 * doing the flush, just wait for completion.
1288 */
1289int
1290ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1291{
1292 struct nfsnode *np = VTONFS(vp);
1293 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1294 int error = 0, slpflag, slptimeo;
1295 int old_lock = 0;
1296
1297 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1298
1299 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1300 intrflg = 0;
1301 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1302 intrflg = 1;
1303 if (intrflg) {
1304 slpflag = NFS_PCATCH;
1305 slptimeo = 2 * hz;
1306 } else {
1307 slpflag = 0;
1308 slptimeo = 0;
1309 }
1310
1311 old_lock = ncl_upgrade_vnlock(vp);
1312 if (vp->v_iflag & VI_DOOMED) {
1313 /*
1314 * Since vgonel() uses the generic vinvalbuf() to flush
1315 * dirty buffers and it does not call this function, it
1316 * is safe to just return OK when VI_DOOMED is set.
1317 */
1318 ncl_downgrade_vnlock(vp, old_lock);
1319 return (0);
1320 }
1321
1322 /*
1323 * Now, flush as required.
1324 */
1325 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1326 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1327 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1328 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1329 /*
1330 * If the page clean was interrupted, fail the invalidation.
1331 * Not doing so, we run the risk of losing dirty pages in the
1332 * vinvalbuf() call below.
1333 */
1334 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1335 goto out;
1336 }
1337
1338 error = vinvalbuf(vp, flags, slpflag, 0);
1339 while (error) {
1340 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1341 goto out;
1342 error = vinvalbuf(vp, flags, 0, slptimeo);
1343 }
1344 mtx_lock(&np->n_mtx);
1345 if (np->n_directio_asyncwr == 0)
1346 np->n_flag &= ~NMODIFIED;
1347 mtx_unlock(&np->n_mtx);
1348out:
1349 ncl_downgrade_vnlock(vp, old_lock);
1350 return error;
1351}
1352
1353/*
1354 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1355 * This is mainly to avoid queueing async I/O requests when the nfsiods
1356 * are all hung on a dead server.
1357 *
1358 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1359 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1360 */
1361int
1362ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1363{
1364 int iod;
1365 int gotiod;
1366 int slpflag = 0;
1367 int slptimeo = 0;
1368 int error, error2;
1369
1370 /*
1371 * Unless iothreadcnt is set > 0, don't bother with async I/O
1372 * threads. For LAN environments, they don't buy any significant
1373 * performance improvement that you can't get with large block
1374 * sizes.
1375 */
1376 if (nmp->nm_readahead == 0)
1377 return (EPERM);
1378
1379 /*
1380 * Commits are usually short and sweet so lets save some cpu and
1381 * leave the async daemons for more important rpc's (such as reads
1382 * and writes).
1383 */
1384 mtx_lock(&ncl_iod_mutex);
1385 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1386 (nmp->nm_bufqiods > ncl_numasync / 2)) {
1387 mtx_unlock(&ncl_iod_mutex);
1388 return(EIO);
1389 }
1390again:
1391 if (nmp->nm_flag & NFSMNT_INT)
1392 slpflag = NFS_PCATCH;
1393 gotiod = FALSE;
1394
1395 /*
1396 * Find a free iod to process this request.
1397 */
1398 for (iod = 0; iod < ncl_numasync; iod++)
1399 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1400 gotiod = TRUE;
1401 break;
1402 }
1403
1404 /*
1405 * Try to create one if none are free.
1406 */
1407 if (!gotiod) {
1408 iod = ncl_nfsiodnew(1);
1409 if (iod != -1)
1410 gotiod = TRUE;
1411 }
1412
1413 if (gotiod) {
1414 /*
1415 * Found one, so wake it up and tell it which
1416 * mount to process.
1417 */
1418 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1419 iod, nmp));
1420 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1421 ncl_iodmount[iod] = nmp;
1422 nmp->nm_bufqiods++;
1423 wakeup(&ncl_iodwant[iod]);
1424 }
1425
1426 /*
1427 * If none are free, we may already have an iod working on this mount
1428 * point. If so, it will process our request.
1429 */
1430 if (!gotiod) {
1431 if (nmp->nm_bufqiods > 0) {
1432 NFS_DPF(ASYNCIO,
1433 ("ncl_asyncio: %d iods are already processing mount %p\n",
1434 nmp->nm_bufqiods, nmp));
1435 gotiod = TRUE;
1436 }
1437 }
1438
1439 /*
1440 * If we have an iod which can process the request, then queue
1441 * the buffer.
1442 */
1443 if (gotiod) {
1444 /*
1445 * Ensure that the queue never grows too large. We still want
1446 * to asynchronize so we block rather then return EIO.
1447 */
1448 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1449 NFS_DPF(ASYNCIO,
1450 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1451 nmp->nm_bufqwant = TRUE;
1452 error = newnfs_msleep(td, &nmp->nm_bufq,
1453 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1454 slptimeo);
1455 if (error) {
1456 error2 = newnfs_sigintr(nmp, td);
1457 if (error2) {
1458 mtx_unlock(&ncl_iod_mutex);
1459 return (error2);
1460 }
1461 if (slpflag == NFS_PCATCH) {
1462 slpflag = 0;
1463 slptimeo = 2 * hz;
1464 }
1465 }
1466 /*
1467 * We might have lost our iod while sleeping,
1468 * so check and loop if nescessary.
1469 */
1470 if (nmp->nm_bufqiods == 0) {
1471 NFS_DPF(ASYNCIO,
1472 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1473 goto again;
1474 }
1475 }
1476
1477 /* We might have lost our nfsiod */
1478 if (nmp->nm_bufqiods == 0) {
1479 NFS_DPF(ASYNCIO,
1480 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1481 goto again;
1482 }
1483
1484 if (bp->b_iocmd == BIO_READ) {
1485 if (bp->b_rcred == NOCRED && cred != NOCRED)
1486 bp->b_rcred = crhold(cred);
1487 } else {
1488 if (bp->b_wcred == NOCRED && cred != NOCRED)
1489 bp->b_wcred = crhold(cred);
1490 }
1491
1492 if (bp->b_flags & B_REMFREE)
1493 bremfreef(bp);
1494 BUF_KERNPROC(bp);
1495 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1496 nmp->nm_bufqlen++;
1497 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1498 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1499 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1500 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1501 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1502 }
1503 mtx_unlock(&ncl_iod_mutex);
1504 return (0);
1505 }
1506
1507 mtx_unlock(&ncl_iod_mutex);
1508
1509 /*
1510 * All the iods are busy on other mounts, so return EIO to
1511 * force the caller to process the i/o synchronously.
1512 */
1513 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1514 return (EIO);
1515}
1516
1517void
1518ncl_doio_directwrite(struct buf *bp)
1519{
1520 int iomode, must_commit;
1521 struct uio *uiop = (struct uio *)bp->b_caller1;
1522 char *iov_base = uiop->uio_iov->iov_base;
1523
1524 iomode = NFSWRITE_FILESYNC;
1525 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1126 if (error) {
1127 brelse(bp);
1128 break;
1129 }
1130 }
1131 if (bp->b_wcred == NOCRED)
1132 bp->b_wcred = crhold(cred);
1133 mtx_lock(&np->n_mtx);
1134 np->n_flag |= NMODIFIED;
1135 mtx_unlock(&np->n_mtx);
1136
1137 /*
1138 * If dirtyend exceeds file size, chop it down. This should
1139 * not normally occur but there is an append race where it
1140 * might occur XXX, so we log it.
1141 *
1142 * If the chopping creates a reverse-indexed or degenerate
1143 * situation with dirtyoff/end, we 0 both of them.
1144 */
1145
1146 if (bp->b_dirtyend > bcount) {
1147 ncl_printf("NFS append race @%lx:%d\n",
1148 (long)bp->b_blkno * DEV_BSIZE,
1149 bp->b_dirtyend - bcount);
1150 bp->b_dirtyend = bcount;
1151 }
1152
1153 if (bp->b_dirtyoff >= bp->b_dirtyend)
1154 bp->b_dirtyoff = bp->b_dirtyend = 0;
1155
1156 /*
1157 * If the new write will leave a contiguous dirty
1158 * area, just update the b_dirtyoff and b_dirtyend,
1159 * otherwise force a write rpc of the old dirty area.
1160 *
1161 * While it is possible to merge discontiguous writes due to
1162 * our having a B_CACHE buffer ( and thus valid read data
1163 * for the hole), we don't because it could lead to
1164 * significant cache coherency problems with multiple clients,
1165 * especially if locking is implemented later on.
1166 *
1167 * as an optimization we could theoretically maintain
1168 * a linked list of discontinuous areas, but we would still
1169 * have to commit them separately so there isn't much
1170 * advantage to it except perhaps a bit of asynchronization.
1171 */
1172
1173 if (bp->b_dirtyend > 0 &&
1174 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1175 if (bwrite(bp) == EINTR) {
1176 error = EINTR;
1177 break;
1178 }
1179 goto again;
1180 }
1181
1182 error = uiomove((char *)bp->b_data + on, n, uio);
1183
1184 /*
1185 * Since this block is being modified, it must be written
1186 * again and not just committed. Since write clustering does
1187 * not work for the stage 1 data write, only the stage 2
1188 * commit rpc, we have to clear B_CLUSTEROK as well.
1189 */
1190 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1191
1192 if (error) {
1193 bp->b_ioflags |= BIO_ERROR;
1194 brelse(bp);
1195 break;
1196 }
1197
1198 /*
1199 * Only update dirtyoff/dirtyend if not a degenerate
1200 * condition.
1201 */
1202 if (n) {
1203 if (bp->b_dirtyend > 0) {
1204 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1205 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1206 } else {
1207 bp->b_dirtyoff = on;
1208 bp->b_dirtyend = on + n;
1209 }
1210 vfs_bio_set_valid(bp, on, n);
1211 }
1212
1213 /*
1214 * If IO_SYNC do bwrite().
1215 *
1216 * IO_INVAL appears to be unused. The idea appears to be
1217 * to turn off caching in this case. Very odd. XXX
1218 */
1219 if ((ioflag & IO_SYNC)) {
1220 if (ioflag & IO_INVAL)
1221 bp->b_flags |= B_NOCACHE;
1222 error = bwrite(bp);
1223 if (error)
1224 break;
1225 } else if ((n + on) == biosize) {
1226 bp->b_flags |= B_ASYNC;
1227 (void) ncl_writebp(bp, 0, NULL);
1228 } else {
1229 bdwrite(bp);
1230 }
1231 } while (uio->uio_resid > 0 && n > 0);
1232
1233 return (error);
1234}
1235
1236/*
1237 * Get an nfs cache block.
1238 *
1239 * Allocate a new one if the block isn't currently in the cache
1240 * and return the block marked busy. If the calling process is
1241 * interrupted by a signal for an interruptible mount point, return
1242 * NULL.
1243 *
1244 * The caller must carefully deal with the possible B_INVAL state of
1245 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1246 * indirectly), so synchronous reads can be issued without worrying about
1247 * the B_INVAL state. We have to be a little more careful when dealing
1248 * with writes (see comments in nfs_write()) when extending a file past
1249 * its EOF.
1250 */
1251static struct buf *
1252nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1253{
1254 struct buf *bp;
1255 struct mount *mp;
1256 struct nfsmount *nmp;
1257
1258 mp = vp->v_mount;
1259 nmp = VFSTONFS(mp);
1260
1261 if (nmp->nm_flag & NFSMNT_INT) {
1262 sigset_t oldset;
1263
1264 newnfs_set_sigmask(td, &oldset);
1265 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1266 newnfs_restore_sigmask(td, &oldset);
1267 while (bp == NULL) {
1268 if (newnfs_sigintr(nmp, td))
1269 return (NULL);
1270 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1271 }
1272 } else {
1273 bp = getblk(vp, bn, size, 0, 0, 0);
1274 }
1275
1276 if (vp->v_type == VREG) {
1277 int biosize;
1278
1279 biosize = mp->mnt_stat.f_iosize;
1280 bp->b_blkno = bn * (biosize / DEV_BSIZE);
1281 }
1282 return (bp);
1283}
1284
1285/*
1286 * Flush and invalidate all dirty buffers. If another process is already
1287 * doing the flush, just wait for completion.
1288 */
1289int
1290ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1291{
1292 struct nfsnode *np = VTONFS(vp);
1293 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1294 int error = 0, slpflag, slptimeo;
1295 int old_lock = 0;
1296
1297 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1298
1299 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1300 intrflg = 0;
1301 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1302 intrflg = 1;
1303 if (intrflg) {
1304 slpflag = NFS_PCATCH;
1305 slptimeo = 2 * hz;
1306 } else {
1307 slpflag = 0;
1308 slptimeo = 0;
1309 }
1310
1311 old_lock = ncl_upgrade_vnlock(vp);
1312 if (vp->v_iflag & VI_DOOMED) {
1313 /*
1314 * Since vgonel() uses the generic vinvalbuf() to flush
1315 * dirty buffers and it does not call this function, it
1316 * is safe to just return OK when VI_DOOMED is set.
1317 */
1318 ncl_downgrade_vnlock(vp, old_lock);
1319 return (0);
1320 }
1321
1322 /*
1323 * Now, flush as required.
1324 */
1325 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1326 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1327 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1328 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1329 /*
1330 * If the page clean was interrupted, fail the invalidation.
1331 * Not doing so, we run the risk of losing dirty pages in the
1332 * vinvalbuf() call below.
1333 */
1334 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1335 goto out;
1336 }
1337
1338 error = vinvalbuf(vp, flags, slpflag, 0);
1339 while (error) {
1340 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1341 goto out;
1342 error = vinvalbuf(vp, flags, 0, slptimeo);
1343 }
1344 mtx_lock(&np->n_mtx);
1345 if (np->n_directio_asyncwr == 0)
1346 np->n_flag &= ~NMODIFIED;
1347 mtx_unlock(&np->n_mtx);
1348out:
1349 ncl_downgrade_vnlock(vp, old_lock);
1350 return error;
1351}
1352
1353/*
1354 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1355 * This is mainly to avoid queueing async I/O requests when the nfsiods
1356 * are all hung on a dead server.
1357 *
1358 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1359 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1360 */
1361int
1362ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1363{
1364 int iod;
1365 int gotiod;
1366 int slpflag = 0;
1367 int slptimeo = 0;
1368 int error, error2;
1369
1370 /*
1371 * Unless iothreadcnt is set > 0, don't bother with async I/O
1372 * threads. For LAN environments, they don't buy any significant
1373 * performance improvement that you can't get with large block
1374 * sizes.
1375 */
1376 if (nmp->nm_readahead == 0)
1377 return (EPERM);
1378
1379 /*
1380 * Commits are usually short and sweet so lets save some cpu and
1381 * leave the async daemons for more important rpc's (such as reads
1382 * and writes).
1383 */
1384 mtx_lock(&ncl_iod_mutex);
1385 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1386 (nmp->nm_bufqiods > ncl_numasync / 2)) {
1387 mtx_unlock(&ncl_iod_mutex);
1388 return(EIO);
1389 }
1390again:
1391 if (nmp->nm_flag & NFSMNT_INT)
1392 slpflag = NFS_PCATCH;
1393 gotiod = FALSE;
1394
1395 /*
1396 * Find a free iod to process this request.
1397 */
1398 for (iod = 0; iod < ncl_numasync; iod++)
1399 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1400 gotiod = TRUE;
1401 break;
1402 }
1403
1404 /*
1405 * Try to create one if none are free.
1406 */
1407 if (!gotiod) {
1408 iod = ncl_nfsiodnew(1);
1409 if (iod != -1)
1410 gotiod = TRUE;
1411 }
1412
1413 if (gotiod) {
1414 /*
1415 * Found one, so wake it up and tell it which
1416 * mount to process.
1417 */
1418 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1419 iod, nmp));
1420 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1421 ncl_iodmount[iod] = nmp;
1422 nmp->nm_bufqiods++;
1423 wakeup(&ncl_iodwant[iod]);
1424 }
1425
1426 /*
1427 * If none are free, we may already have an iod working on this mount
1428 * point. If so, it will process our request.
1429 */
1430 if (!gotiod) {
1431 if (nmp->nm_bufqiods > 0) {
1432 NFS_DPF(ASYNCIO,
1433 ("ncl_asyncio: %d iods are already processing mount %p\n",
1434 nmp->nm_bufqiods, nmp));
1435 gotiod = TRUE;
1436 }
1437 }
1438
1439 /*
1440 * If we have an iod which can process the request, then queue
1441 * the buffer.
1442 */
1443 if (gotiod) {
1444 /*
1445 * Ensure that the queue never grows too large. We still want
1446 * to asynchronize so we block rather then return EIO.
1447 */
1448 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1449 NFS_DPF(ASYNCIO,
1450 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1451 nmp->nm_bufqwant = TRUE;
1452 error = newnfs_msleep(td, &nmp->nm_bufq,
1453 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1454 slptimeo);
1455 if (error) {
1456 error2 = newnfs_sigintr(nmp, td);
1457 if (error2) {
1458 mtx_unlock(&ncl_iod_mutex);
1459 return (error2);
1460 }
1461 if (slpflag == NFS_PCATCH) {
1462 slpflag = 0;
1463 slptimeo = 2 * hz;
1464 }
1465 }
1466 /*
1467 * We might have lost our iod while sleeping,
1468 * so check and loop if nescessary.
1469 */
1470 if (nmp->nm_bufqiods == 0) {
1471 NFS_DPF(ASYNCIO,
1472 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1473 goto again;
1474 }
1475 }
1476
1477 /* We might have lost our nfsiod */
1478 if (nmp->nm_bufqiods == 0) {
1479 NFS_DPF(ASYNCIO,
1480 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1481 goto again;
1482 }
1483
1484 if (bp->b_iocmd == BIO_READ) {
1485 if (bp->b_rcred == NOCRED && cred != NOCRED)
1486 bp->b_rcred = crhold(cred);
1487 } else {
1488 if (bp->b_wcred == NOCRED && cred != NOCRED)
1489 bp->b_wcred = crhold(cred);
1490 }
1491
1492 if (bp->b_flags & B_REMFREE)
1493 bremfreef(bp);
1494 BUF_KERNPROC(bp);
1495 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1496 nmp->nm_bufqlen++;
1497 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1498 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1499 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1500 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1501 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1502 }
1503 mtx_unlock(&ncl_iod_mutex);
1504 return (0);
1505 }
1506
1507 mtx_unlock(&ncl_iod_mutex);
1508
1509 /*
1510 * All the iods are busy on other mounts, so return EIO to
1511 * force the caller to process the i/o synchronously.
1512 */
1513 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1514 return (EIO);
1515}
1516
1517void
1518ncl_doio_directwrite(struct buf *bp)
1519{
1520 int iomode, must_commit;
1521 struct uio *uiop = (struct uio *)bp->b_caller1;
1522 char *iov_base = uiop->uio_iov->iov_base;
1523
1524 iomode = NFSWRITE_FILESYNC;
1525 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1526 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1526 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1527 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1528 free(iov_base, M_NFSDIRECTIO);
1529 free(uiop->uio_iov, M_NFSDIRECTIO);
1530 free(uiop, M_NFSDIRECTIO);
1531 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1532 struct nfsnode *np = VTONFS(bp->b_vp);
1533 mtx_lock(&np->n_mtx);
1534 np->n_directio_asyncwr--;
1535 if (np->n_directio_asyncwr == 0) {
1536 np->n_flag &= ~NMODIFIED;
1537 if ((np->n_flag & NFSYNCWAIT)) {
1538 np->n_flag &= ~NFSYNCWAIT;
1539 wakeup((caddr_t)&np->n_directio_asyncwr);
1540 }
1541 }
1542 mtx_unlock(&np->n_mtx);
1543 }
1544 bp->b_vp = NULL;
1545 relpbuf(bp, &ncl_pbuf_freecnt);
1546}
1547
1548/*
1549 * Do an I/O operation to/from a cache block. This may be called
1550 * synchronously or from an nfsiod.
1551 */
1552int
1527 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1528 free(iov_base, M_NFSDIRECTIO);
1529 free(uiop->uio_iov, M_NFSDIRECTIO);
1530 free(uiop, M_NFSDIRECTIO);
1531 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1532 struct nfsnode *np = VTONFS(bp->b_vp);
1533 mtx_lock(&np->n_mtx);
1534 np->n_directio_asyncwr--;
1535 if (np->n_directio_asyncwr == 0) {
1536 np->n_flag &= ~NMODIFIED;
1537 if ((np->n_flag & NFSYNCWAIT)) {
1538 np->n_flag &= ~NFSYNCWAIT;
1539 wakeup((caddr_t)&np->n_directio_asyncwr);
1540 }
1541 }
1542 mtx_unlock(&np->n_mtx);
1543 }
1544 bp->b_vp = NULL;
1545 relpbuf(bp, &ncl_pbuf_freecnt);
1546}
1547
1548/*
1549 * Do an I/O operation to/from a cache block. This may be called
1550 * synchronously or from an nfsiod.
1551 */
1552int
1553ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1553ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1554 int called_from_strategy)
1554{
1555 struct uio *uiop;
1556 struct nfsnode *np;
1557 struct nfsmount *nmp;
1558 int error = 0, iomode, must_commit = 0;
1559 struct uio uio;
1560 struct iovec io;
1561 struct proc *p = td ? td->td_proc : NULL;
1562 uint8_t iocmd;
1563
1564 np = VTONFS(vp);
1565 nmp = VFSTONFS(vp->v_mount);
1566 uiop = &uio;
1567 uiop->uio_iov = &io;
1568 uiop->uio_iovcnt = 1;
1569 uiop->uio_segflg = UIO_SYSSPACE;
1570 uiop->uio_td = td;
1571
1572 /*
1573 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1574 * do this here so we do not have to do it in all the code that
1575 * calls us.
1576 */
1577 bp->b_flags &= ~B_INVAL;
1578 bp->b_ioflags &= ~BIO_ERROR;
1579
1580 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1581 iocmd = bp->b_iocmd;
1582 if (iocmd == BIO_READ) {
1583 io.iov_len = uiop->uio_resid = bp->b_bcount;
1584 io.iov_base = bp->b_data;
1585 uiop->uio_rw = UIO_READ;
1586
1587 switch (vp->v_type) {
1588 case VREG:
1589 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1590 NFSINCRGLOBAL(newnfsstats.read_bios);
1591 error = ncl_readrpc(vp, uiop, cr);
1592
1593 if (!error) {
1594 if (uiop->uio_resid) {
1595 /*
1596 * If we had a short read with no error, we must have
1597 * hit a file hole. We should zero-fill the remainder.
1598 * This can also occur if the server hits the file EOF.
1599 *
1600 * Holes used to be able to occur due to pending
1601 * writes, but that is not possible any longer.
1602 */
1603 int nread = bp->b_bcount - uiop->uio_resid;
1604 int left = uiop->uio_resid;
1605
1606 if (left > 0)
1607 bzero((char *)bp->b_data + nread, left);
1608 uiop->uio_resid = 0;
1609 }
1610 }
1611 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1612 if (p && (vp->v_vflag & VV_TEXT)) {
1613 mtx_lock(&np->n_mtx);
1614 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1615 mtx_unlock(&np->n_mtx);
1616 PROC_LOCK(p);
1617 killproc(p, "text file modification");
1618 PROC_UNLOCK(p);
1619 } else
1620 mtx_unlock(&np->n_mtx);
1621 }
1622 break;
1623 case VLNK:
1624 uiop->uio_offset = (off_t)0;
1625 NFSINCRGLOBAL(newnfsstats.readlink_bios);
1626 error = ncl_readlinkrpc(vp, uiop, cr);
1627 break;
1628 case VDIR:
1629 NFSINCRGLOBAL(newnfsstats.readdir_bios);
1630 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1631 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1632 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1633 if (error == NFSERR_NOTSUPP)
1634 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1635 }
1636 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1637 error = ncl_readdirrpc(vp, uiop, cr, td);
1638 /*
1639 * end-of-directory sets B_INVAL but does not generate an
1640 * error.
1641 */
1642 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1643 bp->b_flags |= B_INVAL;
1644 break;
1645 default:
1646 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1647 break;
1648 };
1649 if (error) {
1650 bp->b_ioflags |= BIO_ERROR;
1651 bp->b_error = error;
1652 }
1653 } else {
1654 /*
1655 * If we only need to commit, try to commit
1656 */
1657 if (bp->b_flags & B_NEEDCOMMIT) {
1658 int retv;
1659 off_t off;
1660
1661 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1662 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1663 bp->b_wcred, td);
1664 if (retv == 0) {
1665 bp->b_dirtyoff = bp->b_dirtyend = 0;
1666 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1667 bp->b_resid = 0;
1668 bufdone(bp);
1669 return (0);
1670 }
1671 if (retv == NFSERR_STALEWRITEVERF) {
1672 ncl_clearcommit(vp->v_mount);
1673 }
1674 }
1675
1676 /*
1677 * Setup for actual write
1678 */
1679 mtx_lock(&np->n_mtx);
1680 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1681 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1682 mtx_unlock(&np->n_mtx);
1683
1684 if (bp->b_dirtyend > bp->b_dirtyoff) {
1685 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1686 - bp->b_dirtyoff;
1687 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1688 + bp->b_dirtyoff;
1689 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1690 uiop->uio_rw = UIO_WRITE;
1691 NFSINCRGLOBAL(newnfsstats.write_bios);
1692
1693 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1694 iomode = NFSWRITE_UNSTABLE;
1695 else
1696 iomode = NFSWRITE_FILESYNC;
1697
1555{
1556 struct uio *uiop;
1557 struct nfsnode *np;
1558 struct nfsmount *nmp;
1559 int error = 0, iomode, must_commit = 0;
1560 struct uio uio;
1561 struct iovec io;
1562 struct proc *p = td ? td->td_proc : NULL;
1563 uint8_t iocmd;
1564
1565 np = VTONFS(vp);
1566 nmp = VFSTONFS(vp->v_mount);
1567 uiop = &uio;
1568 uiop->uio_iov = &io;
1569 uiop->uio_iovcnt = 1;
1570 uiop->uio_segflg = UIO_SYSSPACE;
1571 uiop->uio_td = td;
1572
1573 /*
1574 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1575 * do this here so we do not have to do it in all the code that
1576 * calls us.
1577 */
1578 bp->b_flags &= ~B_INVAL;
1579 bp->b_ioflags &= ~BIO_ERROR;
1580
1581 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1582 iocmd = bp->b_iocmd;
1583 if (iocmd == BIO_READ) {
1584 io.iov_len = uiop->uio_resid = bp->b_bcount;
1585 io.iov_base = bp->b_data;
1586 uiop->uio_rw = UIO_READ;
1587
1588 switch (vp->v_type) {
1589 case VREG:
1590 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1591 NFSINCRGLOBAL(newnfsstats.read_bios);
1592 error = ncl_readrpc(vp, uiop, cr);
1593
1594 if (!error) {
1595 if (uiop->uio_resid) {
1596 /*
1597 * If we had a short read with no error, we must have
1598 * hit a file hole. We should zero-fill the remainder.
1599 * This can also occur if the server hits the file EOF.
1600 *
1601 * Holes used to be able to occur due to pending
1602 * writes, but that is not possible any longer.
1603 */
1604 int nread = bp->b_bcount - uiop->uio_resid;
1605 int left = uiop->uio_resid;
1606
1607 if (left > 0)
1608 bzero((char *)bp->b_data + nread, left);
1609 uiop->uio_resid = 0;
1610 }
1611 }
1612 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1613 if (p && (vp->v_vflag & VV_TEXT)) {
1614 mtx_lock(&np->n_mtx);
1615 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1616 mtx_unlock(&np->n_mtx);
1617 PROC_LOCK(p);
1618 killproc(p, "text file modification");
1619 PROC_UNLOCK(p);
1620 } else
1621 mtx_unlock(&np->n_mtx);
1622 }
1623 break;
1624 case VLNK:
1625 uiop->uio_offset = (off_t)0;
1626 NFSINCRGLOBAL(newnfsstats.readlink_bios);
1627 error = ncl_readlinkrpc(vp, uiop, cr);
1628 break;
1629 case VDIR:
1630 NFSINCRGLOBAL(newnfsstats.readdir_bios);
1631 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1632 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1633 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1634 if (error == NFSERR_NOTSUPP)
1635 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1636 }
1637 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1638 error = ncl_readdirrpc(vp, uiop, cr, td);
1639 /*
1640 * end-of-directory sets B_INVAL but does not generate an
1641 * error.
1642 */
1643 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1644 bp->b_flags |= B_INVAL;
1645 break;
1646 default:
1647 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1648 break;
1649 };
1650 if (error) {
1651 bp->b_ioflags |= BIO_ERROR;
1652 bp->b_error = error;
1653 }
1654 } else {
1655 /*
1656 * If we only need to commit, try to commit
1657 */
1658 if (bp->b_flags & B_NEEDCOMMIT) {
1659 int retv;
1660 off_t off;
1661
1662 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1663 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1664 bp->b_wcred, td);
1665 if (retv == 0) {
1666 bp->b_dirtyoff = bp->b_dirtyend = 0;
1667 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1668 bp->b_resid = 0;
1669 bufdone(bp);
1670 return (0);
1671 }
1672 if (retv == NFSERR_STALEWRITEVERF) {
1673 ncl_clearcommit(vp->v_mount);
1674 }
1675 }
1676
1677 /*
1678 * Setup for actual write
1679 */
1680 mtx_lock(&np->n_mtx);
1681 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1682 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1683 mtx_unlock(&np->n_mtx);
1684
1685 if (bp->b_dirtyend > bp->b_dirtyoff) {
1686 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1687 - bp->b_dirtyoff;
1688 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1689 + bp->b_dirtyoff;
1690 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1691 uiop->uio_rw = UIO_WRITE;
1692 NFSINCRGLOBAL(newnfsstats.write_bios);
1693
1694 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1695 iomode = NFSWRITE_UNSTABLE;
1696 else
1697 iomode = NFSWRITE_FILESYNC;
1698
1698 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit);
1699 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1700 called_from_strategy);
1699
1700 /*
1701 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1702 * to cluster the buffers needing commit. This will allow
1703 * the system to submit a single commit rpc for the whole
1704 * cluster. We can do this even if the buffer is not 100%
1705 * dirty (relative to the NFS blocksize), so we optimize the
1706 * append-to-file-case.
1707 *
1708 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1709 * cleared because write clustering only works for commit
1710 * rpc's, not for the data portion of the write).
1711 */
1712
1713 if (!error && iomode == NFSWRITE_UNSTABLE) {
1714 bp->b_flags |= B_NEEDCOMMIT;
1715 if (bp->b_dirtyoff == 0
1716 && bp->b_dirtyend == bp->b_bcount)
1717 bp->b_flags |= B_CLUSTEROK;
1718 } else {
1719 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1720 }
1721
1722 /*
1723 * For an interrupted write, the buffer is still valid
1724 * and the write hasn't been pushed to the server yet,
1725 * so we can't set BIO_ERROR and report the interruption
1726 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1727 * is not relevant, so the rpc attempt is essentially
1728 * a noop. For the case of a V3 write rpc not being
1729 * committed to stable storage, the block is still
1730 * dirty and requires either a commit rpc or another
1731 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1732 * the block is reused. This is indicated by setting
1733 * the B_DELWRI and B_NEEDCOMMIT flags.
1734 *
1701
1702 /*
1703 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1704 * to cluster the buffers needing commit. This will allow
1705 * the system to submit a single commit rpc for the whole
1706 * cluster. We can do this even if the buffer is not 100%
1707 * dirty (relative to the NFS blocksize), so we optimize the
1708 * append-to-file-case.
1709 *
1710 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1711 * cleared because write clustering only works for commit
1712 * rpc's, not for the data portion of the write).
1713 */
1714
1715 if (!error && iomode == NFSWRITE_UNSTABLE) {
1716 bp->b_flags |= B_NEEDCOMMIT;
1717 if (bp->b_dirtyoff == 0
1718 && bp->b_dirtyend == bp->b_bcount)
1719 bp->b_flags |= B_CLUSTEROK;
1720 } else {
1721 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1722 }
1723
1724 /*
1725 * For an interrupted write, the buffer is still valid
1726 * and the write hasn't been pushed to the server yet,
1727 * so we can't set BIO_ERROR and report the interruption
1728 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1729 * is not relevant, so the rpc attempt is essentially
1730 * a noop. For the case of a V3 write rpc not being
1731 * committed to stable storage, the block is still
1732 * dirty and requires either a commit rpc or another
1733 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1734 * the block is reused. This is indicated by setting
1735 * the B_DELWRI and B_NEEDCOMMIT flags.
1736 *
1737 * EIO is returned by ncl_writerpc() to indicate a recoverable
1738 * write error and is handled as above, except that
1739 * B_EINTR isn't set. One cause of this is a stale stateid
1740 * error for the RPC that indicates recovery is required,
1741 * when called with called_from_strategy != 0.
1742 *
1735 * If the buffer is marked B_PAGING, it does not reside on
1736 * the vp's paging queues so we cannot call bdirty(). The
1737 * bp in this case is not an NFS cache block so we should
1738 * be safe. XXX
1739 *
1740 * The logic below breaks up errors into recoverable and
1741 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1742 * and keep the buffer around for potential write retries.
1743 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1744 * and save the error in the nfsnode. This is less than ideal
1745 * but necessary. Keeping such buffers around could potentially
1746 * cause buffer exhaustion eventually (they can never be written
1747 * out, so will get constantly be re-dirtied). It also causes
1748 * all sorts of vfs panics. For non-recoverable write errors,
1749 * also invalidate the attrcache, so we'll be forced to go over
1750 * the wire for this object, returning an error to user on next
1751 * call (most of the time).
1752 */
1753 if (error == EINTR || error == EIO || error == ETIMEDOUT
1754 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1755 int s;
1756
1757 s = splbio();
1758 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1759 if ((bp->b_flags & B_PAGING) == 0) {
1760 bdirty(bp);
1761 bp->b_flags &= ~B_DONE;
1762 }
1743 * If the buffer is marked B_PAGING, it does not reside on
1744 * the vp's paging queues so we cannot call bdirty(). The
1745 * bp in this case is not an NFS cache block so we should
1746 * be safe. XXX
1747 *
1748 * The logic below breaks up errors into recoverable and
1749 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1750 * and keep the buffer around for potential write retries.
1751 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1752 * and save the error in the nfsnode. This is less than ideal
1753 * but necessary. Keeping such buffers around could potentially
1754 * cause buffer exhaustion eventually (they can never be written
1755 * out, so will get constantly be re-dirtied). It also causes
1756 * all sorts of vfs panics. For non-recoverable write errors,
1757 * also invalidate the attrcache, so we'll be forced to go over
1758 * the wire for this object, returning an error to user on next
1759 * call (most of the time).
1760 */
1761 if (error == EINTR || error == EIO || error == ETIMEDOUT
1762 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1763 int s;
1764
1765 s = splbio();
1766 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1767 if ((bp->b_flags & B_PAGING) == 0) {
1768 bdirty(bp);
1769 bp->b_flags &= ~B_DONE;
1770 }
1763 if (error && (bp->b_flags & B_ASYNC) == 0)
1771 if ((error == EINTR || error == ETIMEDOUT) &&
1772 (bp->b_flags & B_ASYNC) == 0)
1764 bp->b_flags |= B_EINTR;
1765 splx(s);
1766 } else {
1767 if (error) {
1768 bp->b_ioflags |= BIO_ERROR;
1769 bp->b_flags |= B_INVAL;
1770 bp->b_error = np->n_error = error;
1771 mtx_lock(&np->n_mtx);
1772 np->n_flag |= NWRITEERR;
1773 np->n_attrstamp = 0;
1774 mtx_unlock(&np->n_mtx);
1775 }
1776 bp->b_dirtyoff = bp->b_dirtyend = 0;
1777 }
1778 } else {
1779 bp->b_resid = 0;
1780 bufdone(bp);
1781 return (0);
1782 }
1783 }
1784 bp->b_resid = uiop->uio_resid;
1785 if (must_commit)
1786 ncl_clearcommit(vp->v_mount);
1787 bufdone(bp);
1788 return (error);
1789}
1790
1791/*
1792 * Used to aid in handling ftruncate() operations on the NFS client side.
1793 * Truncation creates a number of special problems for NFS. We have to
1794 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1795 * we have to properly handle VM pages or (potentially dirty) buffers
1796 * that straddle the truncation point.
1797 */
1798
1799int
1800ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1801{
1802 struct nfsnode *np = VTONFS(vp);
1803 u_quad_t tsize;
1804 int biosize = vp->v_mount->mnt_stat.f_iosize;
1805 int error = 0;
1806
1807 mtx_lock(&np->n_mtx);
1808 tsize = np->n_size;
1809 np->n_size = nsize;
1810 mtx_unlock(&np->n_mtx);
1811
1812 if (nsize < tsize) {
1813 struct buf *bp;
1814 daddr_t lbn;
1815 int bufsize;
1816
1817 /*
1818 * vtruncbuf() doesn't get the buffer overlapping the
1819 * truncation point. We may have a B_DELWRI and/or B_CACHE
1820 * buffer that now needs to be truncated.
1821 */
1822 error = vtruncbuf(vp, cred, td, nsize, biosize);
1823 lbn = nsize / biosize;
1824 bufsize = nsize & (biosize - 1);
1825 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1826 if (!bp)
1827 return EINTR;
1828 if (bp->b_dirtyoff > bp->b_bcount)
1829 bp->b_dirtyoff = bp->b_bcount;
1830 if (bp->b_dirtyend > bp->b_bcount)
1831 bp->b_dirtyend = bp->b_bcount;
1832 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1833 brelse(bp);
1834 } else {
1835 vnode_pager_setsize(vp, nsize);
1836 }
1837 return(error);
1838}
1839
1773 bp->b_flags |= B_EINTR;
1774 splx(s);
1775 } else {
1776 if (error) {
1777 bp->b_ioflags |= BIO_ERROR;
1778 bp->b_flags |= B_INVAL;
1779 bp->b_error = np->n_error = error;
1780 mtx_lock(&np->n_mtx);
1781 np->n_flag |= NWRITEERR;
1782 np->n_attrstamp = 0;
1783 mtx_unlock(&np->n_mtx);
1784 }
1785 bp->b_dirtyoff = bp->b_dirtyend = 0;
1786 }
1787 } else {
1788 bp->b_resid = 0;
1789 bufdone(bp);
1790 return (0);
1791 }
1792 }
1793 bp->b_resid = uiop->uio_resid;
1794 if (must_commit)
1795 ncl_clearcommit(vp->v_mount);
1796 bufdone(bp);
1797 return (error);
1798}
1799
1800/*
1801 * Used to aid in handling ftruncate() operations on the NFS client side.
1802 * Truncation creates a number of special problems for NFS. We have to
1803 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1804 * we have to properly handle VM pages or (potentially dirty) buffers
1805 * that straddle the truncation point.
1806 */
1807
1808int
1809ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1810{
1811 struct nfsnode *np = VTONFS(vp);
1812 u_quad_t tsize;
1813 int biosize = vp->v_mount->mnt_stat.f_iosize;
1814 int error = 0;
1815
1816 mtx_lock(&np->n_mtx);
1817 tsize = np->n_size;
1818 np->n_size = nsize;
1819 mtx_unlock(&np->n_mtx);
1820
1821 if (nsize < tsize) {
1822 struct buf *bp;
1823 daddr_t lbn;
1824 int bufsize;
1825
1826 /*
1827 * vtruncbuf() doesn't get the buffer overlapping the
1828 * truncation point. We may have a B_DELWRI and/or B_CACHE
1829 * buffer that now needs to be truncated.
1830 */
1831 error = vtruncbuf(vp, cred, td, nsize, biosize);
1832 lbn = nsize / biosize;
1833 bufsize = nsize & (biosize - 1);
1834 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1835 if (!bp)
1836 return EINTR;
1837 if (bp->b_dirtyoff > bp->b_bcount)
1838 bp->b_dirtyoff = bp->b_bcount;
1839 if (bp->b_dirtyend > bp->b_bcount)
1840 bp->b_dirtyend = bp->b_bcount;
1841 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1842 brelse(bp);
1843 } else {
1844 vnode_pager_setsize(vp, nsize);
1845 }
1846 return(error);
1847}
1848