Deleted Added
sdiff udiff text old ( 234742 ) new ( 235332 )
full compact
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/fs/nfsclient/nfs_clvnops.c 235332 2012-05-12 12:02:51Z rmacklem $");
37
38/*
39 * vnode op calls for Sun NFS version 2, 3 and 4
40 */
41
42#include "opt_kdtrace.h"
43#include "opt_inet.h"
44
45#include <sys/param.h>
46#include <sys/kernel.h>
47#include <sys/systm.h>
48#include <sys/resourcevar.h>
49#include <sys/proc.h>
50#include <sys/mount.h>
51#include <sys/bio.h>
52#include <sys/buf.h>
53#include <sys/jail.h>
54#include <sys/malloc.h>
55#include <sys/mbuf.h>
56#include <sys/namei.h>
57#include <sys/socket.h>
58#include <sys/vnode.h>
59#include <sys/dirent.h>
60#include <sys/fcntl.h>
61#include <sys/lockf.h>
62#include <sys/stat.h>
63#include <sys/sysctl.h>
64#include <sys/signalvar.h>
65
66#include <vm/vm.h>
67#include <vm/vm_extern.h>
68#include <vm/vm_object.h>
69
70#include <fs/nfs/nfsport.h>
71#include <fs/nfsclient/nfsnode.h>
72#include <fs/nfsclient/nfsmount.h>
73#include <fs/nfsclient/nfs.h>
74#include <fs/nfsclient/nfs_kdtrace.h>
75
76#include <net/if.h>
77#include <netinet/in.h>
78#include <netinet/in_var.h>
79
80#include <nfs/nfs_lock.h>
81
82#ifdef KDTRACE_HOOKS
83#include <sys/dtrace_bsd.h>
84
85dtrace_nfsclient_accesscache_flush_probe_func_t
86 dtrace_nfscl_accesscache_flush_done_probe;
87uint32_t nfscl_accesscache_flush_done_id;
88
89dtrace_nfsclient_accesscache_get_probe_func_t
90 dtrace_nfscl_accesscache_get_hit_probe,
91 dtrace_nfscl_accesscache_get_miss_probe;
92uint32_t nfscl_accesscache_get_hit_id;
93uint32_t nfscl_accesscache_get_miss_id;
94
95dtrace_nfsclient_accesscache_load_probe_func_t
96 dtrace_nfscl_accesscache_load_done_probe;
97uint32_t nfscl_accesscache_load_done_id;
98#endif /* !KDTRACE_HOOKS */
99
100/* Defs */
101#define TRUE 1
102#define FALSE 0
103
104extern struct nfsstats newnfsstats;
105extern int nfsrv_useacl;
106MALLOC_DECLARE(M_NEWNFSREQ);
107
108/*
109 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
110 * calls are not in getblk() and brelse() so that they would not be necessary
111 * here.
112 */
113#ifndef B_VMIO
114#define vfs_busy_pages(bp, f)
115#endif
116
117static vop_read_t nfsfifo_read;
118static vop_write_t nfsfifo_write;
119static vop_close_t nfsfifo_close;
120static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
121 struct thread *);
122static vop_lookup_t nfs_lookup;
123static vop_create_t nfs_create;
124static vop_mknod_t nfs_mknod;
125static vop_open_t nfs_open;
126static vop_pathconf_t nfs_pathconf;
127static vop_close_t nfs_close;
128static vop_access_t nfs_access;
129static vop_getattr_t nfs_getattr;
130static vop_setattr_t nfs_setattr;
131static vop_read_t nfs_read;
132static vop_fsync_t nfs_fsync;
133static vop_remove_t nfs_remove;
134static vop_link_t nfs_link;
135static vop_rename_t nfs_rename;
136static vop_mkdir_t nfs_mkdir;
137static vop_rmdir_t nfs_rmdir;
138static vop_symlink_t nfs_symlink;
139static vop_readdir_t nfs_readdir;
140static vop_strategy_t nfs_strategy;
141static vop_lock1_t nfs_lock1;
142static int nfs_lookitup(struct vnode *, char *, int,
143 struct ucred *, struct thread *, struct nfsnode **);
144static int nfs_sillyrename(struct vnode *, struct vnode *,
145 struct componentname *);
146static vop_access_t nfsspec_access;
147static vop_readlink_t nfs_readlink;
148static vop_print_t nfs_print;
149static vop_advlock_t nfs_advlock;
150static vop_advlockasync_t nfs_advlockasync;
151static vop_getacl_t nfs_getacl;
152static vop_setacl_t nfs_setacl;
153
154/*
155 * Global vfs data structures for nfs
156 */
157struct vop_vector newnfs_vnodeops = {
158 .vop_default = &default_vnodeops,
159 .vop_access = nfs_access,
160 .vop_advlock = nfs_advlock,
161 .vop_advlockasync = nfs_advlockasync,
162 .vop_close = nfs_close,
163 .vop_create = nfs_create,
164 .vop_fsync = nfs_fsync,
165 .vop_getattr = nfs_getattr,
166 .vop_getpages = ncl_getpages,
167 .vop_putpages = ncl_putpages,
168 .vop_inactive = ncl_inactive,
169 .vop_link = nfs_link,
170 .vop_lock1 = nfs_lock1,
171 .vop_lookup = nfs_lookup,
172 .vop_mkdir = nfs_mkdir,
173 .vop_mknod = nfs_mknod,
174 .vop_open = nfs_open,
175 .vop_pathconf = nfs_pathconf,
176 .vop_print = nfs_print,
177 .vop_read = nfs_read,
178 .vop_readdir = nfs_readdir,
179 .vop_readlink = nfs_readlink,
180 .vop_reclaim = ncl_reclaim,
181 .vop_remove = nfs_remove,
182 .vop_rename = nfs_rename,
183 .vop_rmdir = nfs_rmdir,
184 .vop_setattr = nfs_setattr,
185 .vop_strategy = nfs_strategy,
186 .vop_symlink = nfs_symlink,
187 .vop_write = ncl_write,
188 .vop_getacl = nfs_getacl,
189 .vop_setacl = nfs_setacl,
190};
191
192struct vop_vector newnfs_fifoops = {
193 .vop_default = &fifo_specops,
194 .vop_access = nfsspec_access,
195 .vop_close = nfsfifo_close,
196 .vop_fsync = nfs_fsync,
197 .vop_getattr = nfs_getattr,
198 .vop_inactive = ncl_inactive,
199 .vop_print = nfs_print,
200 .vop_read = nfsfifo_read,
201 .vop_reclaim = ncl_reclaim,
202 .vop_setattr = nfs_setattr,
203 .vop_write = nfsfifo_write,
204};
205
206static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
207 struct componentname *cnp, struct vattr *vap);
208static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
209 int namelen, struct ucred *cred, struct thread *td);
210static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp,
211 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp,
212 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td);
213static int nfs_renameit(struct vnode *sdvp, struct vnode *svp,
214 struct componentname *scnp, struct sillyrename *sp);
215
216/*
217 * Global variables
218 */
219#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
220
221SYSCTL_DECL(_vfs_nfs);
222
223static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
224SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
225 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
226
227static int nfs_prime_access_cache = 0;
228SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
229 &nfs_prime_access_cache, 0,
230 "Prime NFS ACCESS cache when fetching attributes");
231
232static int newnfs_commit_on_close = 0;
233SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
234 &newnfs_commit_on_close, 0, "write+commit on close, else only write");
235
236static int nfs_clean_pages_on_close = 1;
237SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
238 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
239
240int newnfs_directio_enable = 0;
241SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
242 &newnfs_directio_enable, 0, "Enable NFS directio");
243
244int nfs_keep_dirty_on_error;
245SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW,
246 &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned");
247
248/*
249 * This sysctl allows other processes to mmap a file that has been opened
250 * O_DIRECT by a process. In general, having processes mmap the file while
251 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow
252 * this by default to prevent DoS attacks - to prevent a malicious user from
253 * opening up files O_DIRECT preventing other users from mmap'ing these
254 * files. "Protected" environments where stricter consistency guarantees are
255 * required can disable this knob. The process that opened the file O_DIRECT
256 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
257 * meaningful.
258 */
259int newnfs_directio_allow_mmap = 1;
260SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
261 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
262
263#if 0
264SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
265 &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
266
267SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
268 &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
269#endif
270
271#define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \
272 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \
273 | NFSACCESS_DELETE | NFSACCESS_LOOKUP)
274
275/*
276 * SMP Locking Note :
277 * The list of locks after the description of the lock is the ordering
278 * of other locks acquired with the lock held.
279 * np->n_mtx : Protects the fields in the nfsnode.
280 VM Object Lock
281 VI_MTX (acquired indirectly)
282 * nmp->nm_mtx : Protects the fields in the nfsmount.
283 rep->r_mtx
284 * ncl_iod_mutex : Global lock, protects shared nfsiod state.
285 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
286 nmp->nm_mtx
287 rep->r_mtx
288 * rep->r_mtx : Protects the fields in an nfsreq.
289 */
290
291static int
292nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
293 struct ucred *cred, u_int32_t *retmode)
294{
295 int error = 0, attrflag, i, lrupos;
296 u_int32_t rmode;
297 struct nfsnode *np = VTONFS(vp);
298 struct nfsvattr nfsva;
299
300 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag,
301 &rmode, NULL);
302 if (attrflag)
303 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
304 if (!error) {
305 lrupos = 0;
306 mtx_lock(&np->n_mtx);
307 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
308 if (np->n_accesscache[i].uid == cred->cr_uid) {
309 np->n_accesscache[i].mode = rmode;
310 np->n_accesscache[i].stamp = time_second;
311 break;
312 }
313 if (i > 0 && np->n_accesscache[i].stamp <
314 np->n_accesscache[lrupos].stamp)
315 lrupos = i;
316 }
317 if (i == NFS_ACCESSCACHESIZE) {
318 np->n_accesscache[lrupos].uid = cred->cr_uid;
319 np->n_accesscache[lrupos].mode = rmode;
320 np->n_accesscache[lrupos].stamp = time_second;
321 }
322 mtx_unlock(&np->n_mtx);
323 if (retmode != NULL)
324 *retmode = rmode;
325 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
326 } else if (NFS_ISV4(vp)) {
327 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
328 }
329#ifdef KDTRACE_HOOKS
330 if (error != 0)
331 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
332 error);
333#endif
334 return (error);
335}
336
337/*
338 * nfs access vnode op.
339 * For nfs version 2, just return ok. File accesses may fail later.
340 * For nfs version 3, use the access rpc to check accessibility. If file modes
341 * are changed on the server, accesses might still fail later.
342 */
343static int
344nfs_access(struct vop_access_args *ap)
345{
346 struct vnode *vp = ap->a_vp;
347 int error = 0, i, gotahit;
348 u_int32_t mode, wmode, rmode;
349 int v34 = NFS_ISV34(vp);
350 struct nfsnode *np = VTONFS(vp);
351
352 /*
353 * Disallow write attempts on filesystems mounted read-only;
354 * unless the file is a socket, fifo, or a block or character
355 * device resident on the filesystem.
356 */
357 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS |
358 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL |
359 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
360 switch (vp->v_type) {
361 case VREG:
362 case VDIR:
363 case VLNK:
364 return (EROFS);
365 default:
366 break;
367 }
368 }
369 /*
370 * For nfs v3 or v4, check to see if we have done this recently, and if
371 * so return our cached result instead of making an ACCESS call.
372 * If not, do an access rpc, otherwise you are stuck emulating
373 * ufs_access() locally using the vattr. This may not be correct,
374 * since the server may apply other access criteria such as
375 * client uid-->server uid mapping that we do not know about.
376 */
377 if (v34) {
378 if (ap->a_accmode & VREAD)
379 mode = NFSACCESS_READ;
380 else
381 mode = 0;
382 if (vp->v_type != VDIR) {
383 if (ap->a_accmode & VWRITE)
384 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
385 if (ap->a_accmode & VAPPEND)
386 mode |= NFSACCESS_EXTEND;
387 if (ap->a_accmode & VEXEC)
388 mode |= NFSACCESS_EXECUTE;
389 if (ap->a_accmode & VDELETE)
390 mode |= NFSACCESS_DELETE;
391 } else {
392 if (ap->a_accmode & VWRITE)
393 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
394 if (ap->a_accmode & VAPPEND)
395 mode |= NFSACCESS_EXTEND;
396 if (ap->a_accmode & VEXEC)
397 mode |= NFSACCESS_LOOKUP;
398 if (ap->a_accmode & VDELETE)
399 mode |= NFSACCESS_DELETE;
400 if (ap->a_accmode & VDELETE_CHILD)
401 mode |= NFSACCESS_MODIFY;
402 }
403 /* XXX safety belt, only make blanket request if caching */
404 if (nfsaccess_cache_timeout > 0) {
405 wmode = NFSACCESS_READ | NFSACCESS_MODIFY |
406 NFSACCESS_EXTEND | NFSACCESS_EXECUTE |
407 NFSACCESS_DELETE | NFSACCESS_LOOKUP;
408 } else {
409 wmode = mode;
410 }
411
412 /*
413 * Does our cached result allow us to give a definite yes to
414 * this request?
415 */
416 gotahit = 0;
417 mtx_lock(&np->n_mtx);
418 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
419 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
420 if (time_second < (np->n_accesscache[i].stamp
421 + nfsaccess_cache_timeout) &&
422 (np->n_accesscache[i].mode & mode) == mode) {
423 NFSINCRGLOBAL(newnfsstats.accesscache_hits);
424 gotahit = 1;
425 }
426 break;
427 }
428 }
429 mtx_unlock(&np->n_mtx);
430#ifdef KDTRACE_HOOKS
431 if (gotahit != 0)
432 KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
433 ap->a_cred->cr_uid, mode);
434 else
435 KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
436 ap->a_cred->cr_uid, mode);
437#endif
438 if (gotahit == 0) {
439 /*
440 * Either a no, or a don't know. Go to the wire.
441 */
442 NFSINCRGLOBAL(newnfsstats.accesscache_misses);
443 error = nfs34_access_otw(vp, wmode, ap->a_td,
444 ap->a_cred, &rmode);
445 if (!error &&
446 (rmode & mode) != mode)
447 error = EACCES;
448 }
449 return (error);
450 } else {
451 if ((error = nfsspec_access(ap)) != 0) {
452 return (error);
453 }
454 /*
455 * Attempt to prevent a mapped root from accessing a file
456 * which it shouldn't. We try to read a byte from the file
457 * if the user is root and the file is not zero length.
458 * After calling nfsspec_access, we should have the correct
459 * file size cached.
460 */
461 mtx_lock(&np->n_mtx);
462 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
463 && VTONFS(vp)->n_size > 0) {
464 struct iovec aiov;
465 struct uio auio;
466 char buf[1];
467
468 mtx_unlock(&np->n_mtx);
469 aiov.iov_base = buf;
470 aiov.iov_len = 1;
471 auio.uio_iov = &aiov;
472 auio.uio_iovcnt = 1;
473 auio.uio_offset = 0;
474 auio.uio_resid = 1;
475 auio.uio_segflg = UIO_SYSSPACE;
476 auio.uio_rw = UIO_READ;
477 auio.uio_td = ap->a_td;
478
479 if (vp->v_type == VREG)
480 error = ncl_readrpc(vp, &auio, ap->a_cred);
481 else if (vp->v_type == VDIR) {
482 char* bp;
483 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
484 aiov.iov_base = bp;
485 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
486 error = ncl_readdirrpc(vp, &auio, ap->a_cred,
487 ap->a_td);
488 free(bp, M_TEMP);
489 } else if (vp->v_type == VLNK)
490 error = ncl_readlinkrpc(vp, &auio, ap->a_cred);
491 else
492 error = EACCES;
493 } else
494 mtx_unlock(&np->n_mtx);
495 return (error);
496 }
497}
498
499
500/*
501 * nfs open vnode op
502 * Check to see if the type is ok
503 * and that deletion is not in progress.
504 * For paged in text files, you will need to flush the page cache
505 * if consistency is lost.
506 */
507/* ARGSUSED */
508static int
509nfs_open(struct vop_open_args *ap)
510{
511 struct vnode *vp = ap->a_vp;
512 struct nfsnode *np = VTONFS(vp);
513 struct vattr vattr;
514 int error;
515 int fmode = ap->a_mode;
516 struct ucred *cred;
517
518 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
519 return (EOPNOTSUPP);
520
521 /*
522 * For NFSv4, we need to do the Open Op before cache validation,
523 * so that we conform to RFC3530 Sec. 9.3.1.
524 */
525 if (NFS_ISV4(vp)) {
526 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td);
527 if (error) {
528 error = nfscl_maperr(ap->a_td, error, (uid_t)0,
529 (gid_t)0);
530 return (error);
531 }
532 }
533
534 /*
535 * Now, if this Open will be doing reading, re-validate/flush the
536 * cache, so that Close/Open coherency is maintained.
537 */
538 mtx_lock(&np->n_mtx);
539 if (np->n_flag & NMODIFIED) {
540 mtx_unlock(&np->n_mtx);
541 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
542 if (error == EINTR || error == EIO) {
543 if (NFS_ISV4(vp))
544 (void) nfsrpc_close(vp, 0, ap->a_td);
545 return (error);
546 }
547 mtx_lock(&np->n_mtx);
548 np->n_attrstamp = 0;
549 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
550 if (vp->v_type == VDIR)
551 np->n_direofoffset = 0;
552 mtx_unlock(&np->n_mtx);
553 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
554 if (error) {
555 if (NFS_ISV4(vp))
556 (void) nfsrpc_close(vp, 0, ap->a_td);
557 return (error);
558 }
559 mtx_lock(&np->n_mtx);
560 np->n_mtime = vattr.va_mtime;
561 if (NFS_ISV4(vp))
562 np->n_change = vattr.va_filerev;
563 } else {
564 mtx_unlock(&np->n_mtx);
565 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
566 if (error) {
567 if (NFS_ISV4(vp))
568 (void) nfsrpc_close(vp, 0, ap->a_td);
569 return (error);
570 }
571 mtx_lock(&np->n_mtx);
572 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) ||
573 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
574 if (vp->v_type == VDIR)
575 np->n_direofoffset = 0;
576 mtx_unlock(&np->n_mtx);
577 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
578 if (error == EINTR || error == EIO) {
579 if (NFS_ISV4(vp))
580 (void) nfsrpc_close(vp, 0, ap->a_td);
581 return (error);
582 }
583 mtx_lock(&np->n_mtx);
584 np->n_mtime = vattr.va_mtime;
585 if (NFS_ISV4(vp))
586 np->n_change = vattr.va_filerev;
587 }
588 }
589
590 /*
591 * If the object has >= 1 O_DIRECT active opens, we disable caching.
592 */
593 if (newnfs_directio_enable && (fmode & O_DIRECT) &&
594 (vp->v_type == VREG)) {
595 if (np->n_directio_opens == 0) {
596 mtx_unlock(&np->n_mtx);
597 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
598 if (error) {
599 if (NFS_ISV4(vp))
600 (void) nfsrpc_close(vp, 0, ap->a_td);
601 return (error);
602 }
603 mtx_lock(&np->n_mtx);
604 np->n_flag |= NNONCACHE;
605 }
606 np->n_directio_opens++;
607 }
608
609 /*
610 * If this is an open for writing, capture a reference to the
611 * credentials, so they can be used by ncl_putpages(). Using
612 * these write credentials is preferable to the credentials of
613 * whatever thread happens to be doing the VOP_PUTPAGES() since
614 * the write RPCs are less likely to fail with EACCES.
615 */
616 if ((fmode & FWRITE) != 0) {
617 cred = np->n_writecred;
618 np->n_writecred = crhold(ap->a_cred);
619 } else
620 cred = NULL;
621 mtx_unlock(&np->n_mtx);
622 if (cred != NULL)
623 crfree(cred);
624 vnode_create_vobject(vp, vattr.va_size, ap->a_td);
625 return (0);
626}
627
628/*
629 * nfs close vnode op
630 * What an NFS client should do upon close after writing is a debatable issue.
631 * Most NFS clients push delayed writes to the server upon close, basically for
632 * two reasons:
633 * 1 - So that any write errors may be reported back to the client process
634 * doing the close system call. By far the two most likely errors are
635 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
636 * 2 - To put a worst case upper bound on cache inconsistency between
637 * multiple clients for the file.
638 * There is also a consistency problem for Version 2 of the protocol w.r.t.
639 * not being able to tell if other clients are writing a file concurrently,
640 * since there is no way of knowing if the changed modify time in the reply
641 * is only due to the write for this client.
642 * (NFS Version 3 provides weak cache consistency data in the reply that
643 * should be sufficient to detect and handle this case.)
644 *
645 * The current code does the following:
646 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
647 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
648 * or commit them (this satisfies 1 and 2 except for the
649 * case where the server crashes after this close but
650 * before the commit RPC, which is felt to be "good
651 * enough". Changing the last argument to ncl_flush() to
652 * a 1 would force a commit operation, if it is felt a
653 * commit is necessary now.
654 * for NFS Version 4 - flush the dirty buffers and commit them, if
655 * nfscl_mustflush() says this is necessary.
656 * It is necessary if there is no write delegation held,
657 * in order to satisfy open/close coherency.
658 * If the file isn't cached on local stable storage,
659 * it may be necessary in order to detect "out of space"
660 * errors from the server, if the write delegation
661 * issued by the server doesn't allow the file to grow.
662 */
663/* ARGSUSED */
664static int
665nfs_close(struct vop_close_args *ap)
666{
667 struct vnode *vp = ap->a_vp;
668 struct nfsnode *np = VTONFS(vp);
669 struct nfsvattr nfsva;
670 struct ucred *cred;
671 int error = 0, ret, localcred = 0;
672 int fmode = ap->a_fflag;
673
674 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF))
675 return (0);
676 /*
677 * During shutdown, a_cred isn't valid, so just use root.
678 */
679 if (ap->a_cred == NOCRED) {
680 cred = newnfs_getcred();
681 localcred = 1;
682 } else {
683 cred = ap->a_cred;
684 }
685 if (vp->v_type == VREG) {
686 /*
687 * Examine and clean dirty pages, regardless of NMODIFIED.
688 * This closes a major hole in close-to-open consistency.
689 * We want to push out all dirty pages (and buffers) on
690 * close, regardless of whether they were dirtied by
691 * mmap'ed writes or via write().
692 */
693 if (nfs_clean_pages_on_close && vp->v_object) {
694 VM_OBJECT_LOCK(vp->v_object);
695 vm_object_page_clean(vp->v_object, 0, 0, 0);
696 VM_OBJECT_UNLOCK(vp->v_object);
697 }
698 mtx_lock(&np->n_mtx);
699 if (np->n_flag & NMODIFIED) {
700 mtx_unlock(&np->n_mtx);
701 if (NFS_ISV3(vp)) {
702 /*
703 * Under NFSv3 we have dirty buffers to dispose of. We
704 * must flush them to the NFS server. We have the option
705 * of waiting all the way through the commit rpc or just
706 * waiting for the initial write. The default is to only
707 * wait through the initial write so the data is in the
708 * server's cache, which is roughly similar to the state
709 * a standard disk subsystem leaves the file in on close().
710 *
711 * We cannot clear the NMODIFIED bit in np->n_flag due to
712 * potential races with other processes, and certainly
713 * cannot clear it if we don't commit.
714 * These races occur when there is no longer the old
715 * traditional vnode locking implemented for Vnode Ops.
716 */
717 int cm = newnfs_commit_on_close ? 1 : 0;
718 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm, 0);
719 /* np->n_flag &= ~NMODIFIED; */
720 } else if (NFS_ISV4(vp)) {
721 if (nfscl_mustflush(vp) != 0) {
722 int cm = newnfs_commit_on_close ? 1 : 0;
723 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td,
724 cm, 0);
725 /*
726 * as above w.r.t races when clearing
727 * NMODIFIED.
728 * np->n_flag &= ~NMODIFIED;
729 */
730 }
731 } else
732 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
733 mtx_lock(&np->n_mtx);
734 }
735 /*
736 * Invalidate the attribute cache in all cases.
737 * An open is going to fetch fresh attrs any way, other procs
738 * on this node that have file open will be forced to do an
739 * otw attr fetch, but this is safe.
740 * --> A user found that their RPC count dropped by 20% when
741 * this was commented out and I can't see any requirement
742 * for it, so I've disabled it when negative lookups are
743 * enabled. (What does this have to do with negative lookup
744 * caching? Well nothing, except it was reported by the
745 * same user that needed negative lookup caching and I wanted
746 * there to be a way to disable it to see if it
747 * is the cause of some caching/coherency issue that might
748 * crop up.)
749 */
750 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) {
751 np->n_attrstamp = 0;
752 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
753 }
754 if (np->n_flag & NWRITEERR) {
755 np->n_flag &= ~NWRITEERR;
756 error = np->n_error;
757 }
758 mtx_unlock(&np->n_mtx);
759 }
760
761 if (NFS_ISV4(vp)) {
762 /*
763 * Get attributes so "change" is up to date.
764 */
765 if (error == 0 && nfscl_mustflush(vp) != 0) {
766 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva,
767 NULL);
768 if (!ret) {
769 np->n_change = nfsva.na_filerev;
770 (void) nfscl_loadattrcache(&vp, &nfsva, NULL,
771 NULL, 0, 0);
772 }
773 }
774
775 /*
776 * and do the close.
777 */
778 ret = nfsrpc_close(vp, 0, ap->a_td);
779 if (!error && ret)
780 error = ret;
781 if (error)
782 error = nfscl_maperr(ap->a_td, error, (uid_t)0,
783 (gid_t)0);
784 }
785 if (newnfs_directio_enable)
786 KASSERT((np->n_directio_asyncwr == 0),
787 ("nfs_close: dirty unflushed (%d) directio buffers\n",
788 np->n_directio_asyncwr));
789 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
790 mtx_lock(&np->n_mtx);
791 KASSERT((np->n_directio_opens > 0),
792 ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
793 np->n_directio_opens--;
794 if (np->n_directio_opens == 0)
795 np->n_flag &= ~NNONCACHE;
796 mtx_unlock(&np->n_mtx);
797 }
798 if (localcred)
799 NFSFREECRED(cred);
800 return (error);
801}
802
803/*
804 * nfs getattr call from vfs.
805 */
806static int
807nfs_getattr(struct vop_getattr_args *ap)
808{
809 struct vnode *vp = ap->a_vp;
810 struct thread *td = curthread; /* XXX */
811 struct nfsnode *np = VTONFS(vp);
812 int error = 0;
813 struct nfsvattr nfsva;
814 struct vattr *vap = ap->a_vap;
815 struct vattr vattr;
816
817 /*
818 * Update local times for special files.
819 */
820 mtx_lock(&np->n_mtx);
821 if (np->n_flag & (NACC | NUPD))
822 np->n_flag |= NCHG;
823 mtx_unlock(&np->n_mtx);
824 /*
825 * First look in the cache.
826 */
827 if (ncl_getattrcache(vp, &vattr) == 0) {
828 vap->va_type = vattr.va_type;
829 vap->va_mode = vattr.va_mode;
830 vap->va_nlink = vattr.va_nlink;
831 vap->va_uid = vattr.va_uid;
832 vap->va_gid = vattr.va_gid;
833 vap->va_fsid = vattr.va_fsid;
834 vap->va_fileid = vattr.va_fileid;
835 vap->va_size = vattr.va_size;
836 vap->va_blocksize = vattr.va_blocksize;
837 vap->va_atime = vattr.va_atime;
838 vap->va_mtime = vattr.va_mtime;
839 vap->va_ctime = vattr.va_ctime;
840 vap->va_gen = vattr.va_gen;
841 vap->va_flags = vattr.va_flags;
842 vap->va_rdev = vattr.va_rdev;
843 vap->va_bytes = vattr.va_bytes;
844 vap->va_filerev = vattr.va_filerev;
845 /*
846 * Get the local modify time for the case of a write
847 * delegation.
848 */
849 nfscl_deleggetmodtime(vp, &vap->va_mtime);
850 return (0);
851 }
852
853 if (NFS_ISV34(vp) && nfs_prime_access_cache &&
854 nfsaccess_cache_timeout > 0) {
855 NFSINCRGLOBAL(newnfsstats.accesscache_misses);
856 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL);
857 if (ncl_getattrcache(vp, ap->a_vap) == 0) {
858 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime);
859 return (0);
860 }
861 }
862 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL);
863 if (!error)
864 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0);
865 if (!error) {
866 /*
867 * Get the local modify time for the case of a write
868 * delegation.
869 */
870 nfscl_deleggetmodtime(vp, &vap->va_mtime);
871 } else if (NFS_ISV4(vp)) {
872 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
873 }
874 return (error);
875}
876
877/*
878 * nfs setattr call.
879 */
880static int
881nfs_setattr(struct vop_setattr_args *ap)
882{
883 struct vnode *vp = ap->a_vp;
884 struct nfsnode *np = VTONFS(vp);
885 struct thread *td = curthread; /* XXX */
886 struct vattr *vap = ap->a_vap;
887 int error = 0;
888 u_quad_t tsize;
889
890#ifndef nolint
891 tsize = (u_quad_t)0;
892#endif
893
894 /*
895 * Setting of flags and marking of atimes are not supported.
896 */
897 if (vap->va_flags != VNOVAL)
898 return (EOPNOTSUPP);
899
900 /*
901 * Disallow write attempts if the filesystem is mounted read-only.
902 */
903 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
904 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
905 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
906 (vp->v_mount->mnt_flag & MNT_RDONLY))
907 return (EROFS);
908 if (vap->va_size != VNOVAL) {
909 switch (vp->v_type) {
910 case VDIR:
911 return (EISDIR);
912 case VCHR:
913 case VBLK:
914 case VSOCK:
915 case VFIFO:
916 if (vap->va_mtime.tv_sec == VNOVAL &&
917 vap->va_atime.tv_sec == VNOVAL &&
918 vap->va_mode == (mode_t)VNOVAL &&
919 vap->va_uid == (uid_t)VNOVAL &&
920 vap->va_gid == (gid_t)VNOVAL)
921 return (0);
922 vap->va_size = VNOVAL;
923 break;
924 default:
925 /*
926 * Disallow write attempts if the filesystem is
927 * mounted read-only.
928 */
929 if (vp->v_mount->mnt_flag & MNT_RDONLY)
930 return (EROFS);
931 /*
932 * We run vnode_pager_setsize() early (why?),
933 * we must set np->n_size now to avoid vinvalbuf
934 * V_SAVE races that might setsize a lower
935 * value.
936 */
937 mtx_lock(&np->n_mtx);
938 tsize = np->n_size;
939 mtx_unlock(&np->n_mtx);
940 error = ncl_meta_setsize(vp, ap->a_cred, td,
941 vap->va_size);
942 mtx_lock(&np->n_mtx);
943 if (np->n_flag & NMODIFIED) {
944 tsize = np->n_size;
945 mtx_unlock(&np->n_mtx);
946 if (vap->va_size == 0)
947 error = ncl_vinvalbuf(vp, 0, td, 1);
948 else
949 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
950 if (error) {
951 vnode_pager_setsize(vp, tsize);
952 return (error);
953 }
954 /*
955 * Call nfscl_delegmodtime() to set the modify time
956 * locally, as required.
957 */
958 nfscl_delegmodtime(vp);
959 } else
960 mtx_unlock(&np->n_mtx);
961 /*
962 * np->n_size has already been set to vap->va_size
963 * in ncl_meta_setsize(). We must set it again since
964 * nfs_loadattrcache() could be called through
965 * ncl_meta_setsize() and could modify np->n_size.
966 */
967 mtx_lock(&np->n_mtx);
968 np->n_vattr.na_size = np->n_size = vap->va_size;
969 mtx_unlock(&np->n_mtx);
970 };
971 } else {
972 mtx_lock(&np->n_mtx);
973 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) &&
974 (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
975 mtx_unlock(&np->n_mtx);
976 if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 &&
977 (error == EINTR || error == EIO))
978 return (error);
979 } else
980 mtx_unlock(&np->n_mtx);
981 }
982 error = nfs_setattrrpc(vp, vap, ap->a_cred, td);
983 if (error && vap->va_size != VNOVAL) {
984 mtx_lock(&np->n_mtx);
985 np->n_size = np->n_vattr.na_size = tsize;
986 vnode_pager_setsize(vp, tsize);
987 mtx_unlock(&np->n_mtx);
988 }
989 return (error);
990}
991
992/*
993 * Do an nfs setattr rpc.
994 */
995static int
996nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
997 struct thread *td)
998{
999 struct nfsnode *np = VTONFS(vp);
1000 int error, ret, attrflag, i;
1001 struct nfsvattr nfsva;
1002
1003 if (NFS_ISV34(vp)) {
1004 mtx_lock(&np->n_mtx);
1005 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
1006 np->n_accesscache[i].stamp = 0;
1007 np->n_flag |= NDELEGMOD;
1008 mtx_unlock(&np->n_mtx);
1009 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
1010 }
1011 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag,
1012 NULL);
1013 if (attrflag) {
1014 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1015 if (ret && !error)
1016 error = ret;
1017 }
1018 if (error && NFS_ISV4(vp))
1019 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid);
1020 return (error);
1021}
1022
1023/*
1024 * nfs lookup call, one step at a time...
1025 * First look in cache
1026 * If not found, unlock the directory nfsnode and do the rpc
1027 */
1028static int
1029nfs_lookup(struct vop_lookup_args *ap)
1030{
1031 struct componentname *cnp = ap->a_cnp;
1032 struct vnode *dvp = ap->a_dvp;
1033 struct vnode **vpp = ap->a_vpp;
1034 struct mount *mp = dvp->v_mount;
1035 int flags = cnp->cn_flags;
1036 struct vnode *newvp;
1037 struct nfsmount *nmp;
1038 struct nfsnode *np, *newnp;
1039 int error = 0, attrflag, dattrflag, ltype, ncticks;
1040 struct thread *td = cnp->cn_thread;
1041 struct nfsfh *nfhp;
1042 struct nfsvattr dnfsva, nfsva;
1043 struct vattr vattr;
1044 struct timespec nctime;
1045
1046 *vpp = NULLVP;
1047 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
1048 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
1049 return (EROFS);
1050 if (dvp->v_type != VDIR)
1051 return (ENOTDIR);
1052 nmp = VFSTONFS(mp);
1053 np = VTONFS(dvp);
1054
1055 /* For NFSv4, wait until any remove is done. */
1056 mtx_lock(&np->n_mtx);
1057 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) {
1058 np->n_flag |= NREMOVEWANT;
1059 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0);
1060 }
1061 mtx_unlock(&np->n_mtx);
1062
1063 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0)
1064 return (error);
1065 error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks);
1066 if (error > 0 && error != ENOENT)
1067 return (error);
1068 if (error == -1) {
1069 /*
1070 * Lookups of "." are special and always return the
1071 * current directory. cache_lookup() already handles
1072 * associated locking bookkeeping, etc.
1073 */
1074 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
1075 /* XXX: Is this really correct? */
1076 if (cnp->cn_nameiop != LOOKUP &&
1077 (flags & ISLASTCN))
1078 cnp->cn_flags |= SAVENAME;
1079 return (0);
1080 }
1081
1082 /*
1083 * We only accept a positive hit in the cache if the
1084 * change time of the file matches our cached copy.
1085 * Otherwise, we discard the cache entry and fallback
1086 * to doing a lookup RPC. We also only trust cache
1087 * entries for less than nm_nametimeo seconds.
1088 *
1089 * To better handle stale file handles and attributes,
1090 * clear the attribute cache of this node if it is a
1091 * leaf component, part of an open() call, and not
1092 * locally modified before fetching the attributes.
1093 * This should allow stale file handles to be detected
1094 * here where we can fall back to a LOOKUP RPC to
1095 * recover rather than having nfs_open() detect the
1096 * stale file handle and failing open(2) with ESTALE.
1097 */
1098 newvp = *vpp;
1099 newnp = VTONFS(newvp);
1100 if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
1101 (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
1102 !(newnp->n_flag & NMODIFIED)) {
1103 mtx_lock(&newnp->n_mtx);
1104 newnp->n_attrstamp = 0;
1105 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
1106 mtx_unlock(&newnp->n_mtx);
1107 }
1108 if (nfscl_nodeleg(newvp, 0) == 0 ||
1109 ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
1110 VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
1111 timespeccmp(&vattr.va_ctime, &nctime, ==))) {
1112 NFSINCRGLOBAL(newnfsstats.lookupcache_hits);
1113 if (cnp->cn_nameiop != LOOKUP &&
1114 (flags & ISLASTCN))
1115 cnp->cn_flags |= SAVENAME;
1116 return (0);
1117 }
1118 cache_purge(newvp);
1119 if (dvp != newvp)
1120 vput(newvp);
1121 else
1122 vrele(newvp);
1123 *vpp = NULLVP;
1124 } else if (error == ENOENT) {
1125 if (dvp->v_iflag & VI_DOOMED)
1126 return (ENOENT);
1127 /*
1128 * We only accept a negative hit in the cache if the
1129 * modification time of the parent directory matches
1130 * the cached copy in the name cache entry.
1131 * Otherwise, we discard all of the negative cache
1132 * entries for this directory. We also only trust
1133 * negative cache entries for up to nm_negnametimeo
1134 * seconds.
1135 */
1136 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) &&
1137 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
1138 timespeccmp(&vattr.va_mtime, &nctime, ==)) {
1139 NFSINCRGLOBAL(newnfsstats.lookupcache_hits);
1140 return (ENOENT);
1141 }
1142 cache_purge_negative(dvp);
1143 }
1144
1145 error = 0;
1146 newvp = NULLVP;
1147 NFSINCRGLOBAL(newnfsstats.lookupcache_misses);
1148 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1149 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
1150 NULL);
1151 if (dattrflag)
1152 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1153 if (error) {
1154 if (newvp != NULLVP) {
1155 vput(newvp);
1156 *vpp = NULLVP;
1157 }
1158
1159 if (error != ENOENT) {
1160 if (NFS_ISV4(dvp))
1161 error = nfscl_maperr(td, error, (uid_t)0,
1162 (gid_t)0);
1163 return (error);
1164 }
1165
1166 /* The requested file was not found. */
1167 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1168 (flags & ISLASTCN)) {
1169 /*
1170 * XXX: UFS does a full VOP_ACCESS(dvp,
1171 * VWRITE) here instead of just checking
1172 * MNT_RDONLY.
1173 */
1174 if (mp->mnt_flag & MNT_RDONLY)
1175 return (EROFS);
1176 cnp->cn_flags |= SAVENAME;
1177 return (EJUSTRETURN);
1178 }
1179
1180 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE &&
1181 dattrflag) {
1182 /*
1183 * Cache the modification time of the parent
1184 * directory from the post-op attributes in
1185 * the name cache entry. The negative cache
1186 * entry will be ignored once the directory
1187 * has changed. Don't bother adding the entry
1188 * if the directory has already changed.
1189 */
1190 mtx_lock(&np->n_mtx);
1191 if (timespeccmp(&np->n_vattr.na_mtime,
1192 &dnfsva.na_mtime, ==)) {
1193 mtx_unlock(&np->n_mtx);
1194 cache_enter_time(dvp, NULL, cnp,
1195 &dnfsva.na_mtime, NULL);
1196 } else
1197 mtx_unlock(&np->n_mtx);
1198 }
1199 return (ENOENT);
1200 }
1201
1202 /*
1203 * Handle RENAME case...
1204 */
1205 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
1206 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
1207 FREE((caddr_t)nfhp, M_NFSFH);
1208 return (EISDIR);
1209 }
1210 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
1211 LK_EXCLUSIVE);
1212 if (error)
1213 return (error);
1214 newvp = NFSTOV(np);
1215 if (attrflag)
1216 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1217 0, 1);
1218 *vpp = newvp;
1219 cnp->cn_flags |= SAVENAME;
1220 return (0);
1221 }
1222
1223 if (flags & ISDOTDOT) {
1224 ltype = NFSVOPISLOCKED(dvp);
1225 error = vfs_busy(mp, MBF_NOWAIT);
1226 if (error != 0) {
1227 vfs_ref(mp);
1228 NFSVOPUNLOCK(dvp, 0);
1229 error = vfs_busy(mp, 0);
1230 NFSVOPLOCK(dvp, ltype | LK_RETRY);
1231 vfs_rel(mp);
1232 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
1233 vfs_unbusy(mp);
1234 error = ENOENT;
1235 }
1236 if (error != 0)
1237 return (error);
1238 }
1239 NFSVOPUNLOCK(dvp, 0);
1240 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
1241 cnp->cn_lkflags);
1242 if (error == 0)
1243 newvp = NFSTOV(np);
1244 vfs_unbusy(mp);
1245 if (newvp != dvp)
1246 NFSVOPLOCK(dvp, ltype | LK_RETRY);
1247 if (dvp->v_iflag & VI_DOOMED) {
1248 if (error == 0) {
1249 if (newvp == dvp)
1250 vrele(newvp);
1251 else
1252 vput(newvp);
1253 }
1254 error = ENOENT;
1255 }
1256 if (error != 0)
1257 return (error);
1258 if (attrflag)
1259 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1260 0, 1);
1261 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
1262 FREE((caddr_t)nfhp, M_NFSFH);
1263 VREF(dvp);
1264 newvp = dvp;
1265 if (attrflag)
1266 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1267 0, 1);
1268 } else {
1269 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
1270 cnp->cn_lkflags);
1271 if (error)
1272 return (error);
1273 newvp = NFSTOV(np);
1274 if (attrflag)
1275 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1276 0, 1);
1277 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
1278 !(np->n_flag & NMODIFIED)) {
1279 /*
1280 * Flush the attribute cache when opening a
1281 * leaf node to ensure that fresh attributes
1282 * are fetched in nfs_open() since we did not
1283 * fetch attributes from the LOOKUP reply.
1284 */
1285 mtx_lock(&np->n_mtx);
1286 np->n_attrstamp = 0;
1287 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
1288 mtx_unlock(&np->n_mtx);
1289 }
1290 }
1291 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1292 cnp->cn_flags |= SAVENAME;
1293 if ((cnp->cn_flags & MAKEENTRY) &&
1294 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) &&
1295 attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0))
1296 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
1297 newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime);
1298 *vpp = newvp;
1299 return (0);
1300}
1301
1302/*
1303 * nfs read call.
1304 * Just call ncl_bioread() to do the work.
1305 */
1306static int
1307nfs_read(struct vop_read_args *ap)
1308{
1309 struct vnode *vp = ap->a_vp;
1310
1311 switch (vp->v_type) {
1312 case VREG:
1313 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1314 case VDIR:
1315 return (EISDIR);
1316 default:
1317 return (EOPNOTSUPP);
1318 }
1319}
1320
1321/*
1322 * nfs readlink call
1323 */
1324static int
1325nfs_readlink(struct vop_readlink_args *ap)
1326{
1327 struct vnode *vp = ap->a_vp;
1328
1329 if (vp->v_type != VLNK)
1330 return (EINVAL);
1331 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred));
1332}
1333
1334/*
1335 * Do a readlink rpc.
1336 * Called by ncl_doio() from below the buffer cache.
1337 */
1338int
1339ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1340{
1341 int error, ret, attrflag;
1342 struct nfsvattr nfsva;
1343
1344 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva,
1345 &attrflag, NULL);
1346 if (attrflag) {
1347 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1348 if (ret && !error)
1349 error = ret;
1350 }
1351 if (error && NFS_ISV4(vp))
1352 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
1353 return (error);
1354}
1355
1356/*
1357 * nfs read rpc call
1358 * Ditto above
1359 */
1360int
1361ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1362{
1363 int error, ret, attrflag;
1364 struct nfsvattr nfsva;
1365
1366 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, &attrflag,
1367 NULL);
1368 if (attrflag) {
1369 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1370 if (ret && !error)
1371 error = ret;
1372 }
1373 if (error && NFS_ISV4(vp))
1374 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
1375 return (error);
1376}
1377
1378/*
1379 * nfs write call
1380 */
1381int
1382ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
1383 int *iomode, int *must_commit, int called_from_strategy)
1384{
1385 struct nfsvattr nfsva;
1386 int error = 0, attrflag, ret;
1387
1388 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred,
1389 uiop->uio_td, &nfsva, &attrflag, NULL, called_from_strategy);
1390 if (attrflag) {
1391 if (VTONFS(vp)->n_flag & ND_NFSV4)
1392 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1,
1393 1);
1394 else
1395 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
1396 1);
1397 if (ret && !error)
1398 error = ret;
1399 }
1400 if (DOINGASYNC(vp))
1401 *iomode = NFSWRITE_FILESYNC;
1402 if (error && NFS_ISV4(vp))
1403 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
1404 return (error);
1405}
1406
1407/*
1408 * nfs mknod rpc
1409 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1410 * mode set to specify the file type and the size field for rdev.
1411 */
1412static int
1413nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1414 struct vattr *vap)
1415{
1416 struct nfsvattr nfsva, dnfsva;
1417 struct vnode *newvp = NULL;
1418 struct nfsnode *np = NULL, *dnp;
1419 struct nfsfh *nfhp;
1420 struct vattr vattr;
1421 int error = 0, attrflag, dattrflag;
1422 u_int32_t rdev;
1423
1424 if (vap->va_type == VCHR || vap->va_type == VBLK)
1425 rdev = vap->va_rdev;
1426 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1427 rdev = 0xffffffff;
1428 else
1429 return (EOPNOTSUPP);
1430 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
1431 return (error);
1432 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap,
1433 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva,
1434 &nfsva, &nfhp, &attrflag, &dattrflag, NULL);
1435 if (!error) {
1436 if (!nfhp)
1437 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
1438 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
1439 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
1440 NULL);
1441 if (nfhp)
1442 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
1443 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
1444 }
1445 if (dattrflag)
1446 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1447 if (!error) {
1448 newvp = NFSTOV(np);
1449 if (attrflag != 0) {
1450 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1451 0, 1);
1452 if (error != 0)
1453 vput(newvp);
1454 }
1455 }
1456 if (!error) {
1457 *vpp = newvp;
1458 } else if (NFS_ISV4(dvp)) {
1459 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
1460 vap->va_gid);
1461 }
1462 dnp = VTONFS(dvp);
1463 mtx_lock(&dnp->n_mtx);
1464 dnp->n_flag |= NMODIFIED;
1465 if (!dattrflag) {
1466 dnp->n_attrstamp = 0;
1467 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
1468 }
1469 mtx_unlock(&dnp->n_mtx);
1470 return (error);
1471}
1472
1473/*
1474 * nfs mknod vop
1475 * just call nfs_mknodrpc() to do the work.
1476 */
1477/* ARGSUSED */
1478static int
1479nfs_mknod(struct vop_mknod_args *ap)
1480{
1481 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
1482}
1483
1484static struct mtx nfs_cverf_mtx;
1485MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex",
1486 MTX_DEF);
1487
1488static nfsquad_t
1489nfs_get_cverf(void)
1490{
1491 static nfsquad_t cverf;
1492 nfsquad_t ret;
1493 static int cverf_initialized = 0;
1494
1495 mtx_lock(&nfs_cverf_mtx);
1496 if (cverf_initialized == 0) {
1497 cverf.lval[0] = arc4random();
1498 cverf.lval[1] = arc4random();
1499 cverf_initialized = 1;
1500 } else
1501 cverf.qval++;
1502 ret = cverf;
1503 mtx_unlock(&nfs_cverf_mtx);
1504
1505 return (ret);
1506}
1507
1508/*
1509 * nfs file create call
1510 */
1511static int
1512nfs_create(struct vop_create_args *ap)
1513{
1514 struct vnode *dvp = ap->a_dvp;
1515 struct vattr *vap = ap->a_vap;
1516 struct componentname *cnp = ap->a_cnp;
1517 struct nfsnode *np = NULL, *dnp;
1518 struct vnode *newvp = NULL;
1519 struct nfsmount *nmp;
1520 struct nfsvattr dnfsva, nfsva;
1521 struct nfsfh *nfhp;
1522 nfsquad_t cverf;
1523 int error = 0, attrflag, dattrflag, fmode = 0;
1524 struct vattr vattr;
1525
1526 /*
1527 * Oops, not for me..
1528 */
1529 if (vap->va_type == VSOCK)
1530 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1531
1532 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
1533 return (error);
1534 if (vap->va_vaflags & VA_EXCLUSIVE)
1535 fmode |= O_EXCL;
1536 dnp = VTONFS(dvp);
1537 nmp = VFSTONFS(vnode_mount(dvp));
1538again:
1539 /* For NFSv4, wait until any remove is done. */
1540 mtx_lock(&dnp->n_mtx);
1541 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) {
1542 dnp->n_flag |= NREMOVEWANT;
1543 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0);
1544 }
1545 mtx_unlock(&dnp->n_mtx);
1546
1547 cverf = nfs_get_cverf();
1548 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1549 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva,
1550 &nfhp, &attrflag, &dattrflag, NULL);
1551 if (!error) {
1552 if (nfhp == NULL)
1553 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
1554 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
1555 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
1556 NULL);
1557 if (nfhp != NULL)
1558 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
1559 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
1560 }
1561 if (dattrflag)
1562 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1563 if (!error) {
1564 newvp = NFSTOV(np);
1565 if (attrflag == 0)
1566 error = nfsrpc_getattr(newvp, cnp->cn_cred,
1567 cnp->cn_thread, &nfsva, NULL);
1568 if (error == 0)
1569 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1570 0, 1);
1571 }
1572 if (error) {
1573 if (newvp != NULL) {
1574 vput(newvp);
1575 newvp = NULL;
1576 }
1577 if (NFS_ISV34(dvp) && (fmode & O_EXCL) &&
1578 error == NFSERR_NOTSUPP) {
1579 fmode &= ~O_EXCL;
1580 goto again;
1581 }
1582 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) {
1583 if (nfscl_checksattr(vap, &nfsva)) {
1584 /*
1585 * We are normally called with only a partially
1586 * initialized VAP. Since the NFSv3 spec says that
1587 * the server may use the file attributes to
1588 * store the verifier, the spec requires us to do a
1589 * SETATTR RPC. FreeBSD servers store the verifier in
1590 * atime, but we can't really assume that all servers
1591 * will so we ensure that our SETATTR sets both atime
1592 * and mtime.
1593 */
1594 if (vap->va_mtime.tv_sec == VNOVAL)
1595 vfs_timestamp(&vap->va_mtime);
1596 if (vap->va_atime.tv_sec == VNOVAL)
1597 vap->va_atime = vap->va_mtime;
1598 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred,
1599 cnp->cn_thread, &nfsva, &attrflag, NULL);
1600 if (error && (vap->va_uid != (uid_t)VNOVAL ||
1601 vap->va_gid != (gid_t)VNOVAL)) {
1602 /* try again without setting uid/gid */
1603 vap->va_uid = (uid_t)VNOVAL;
1604 vap->va_gid = (uid_t)VNOVAL;
1605 error = nfsrpc_setattr(newvp, vap, NULL,
1606 cnp->cn_cred, cnp->cn_thread, &nfsva,
1607 &attrflag, NULL);
1608 }
1609 if (attrflag)
1610 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
1611 NULL, 0, 1);
1612 if (error != 0)
1613 vput(newvp);
1614 }
1615 }
1616 if (!error) {
1617 if ((cnp->cn_flags & MAKEENTRY) && attrflag)
1618 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
1619 NULL);
1620 *ap->a_vpp = newvp;
1621 } else if (NFS_ISV4(dvp)) {
1622 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
1623 vap->va_gid);
1624 }
1625 mtx_lock(&dnp->n_mtx);
1626 dnp->n_flag |= NMODIFIED;
1627 if (!dattrflag) {
1628 dnp->n_attrstamp = 0;
1629 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
1630 }
1631 mtx_unlock(&dnp->n_mtx);
1632 return (error);
1633}
1634
1635/*
1636 * nfs file remove call
1637 * To try and make nfs semantics closer to ufs semantics, a file that has
1638 * other processes using the vnode is renamed instead of removed and then
1639 * removed later on the last close.
1640 * - If v_usecount > 1
1641 * If a rename is not already in the works
1642 * call nfs_sillyrename() to set it up
1643 * else
1644 * do the remove rpc
1645 */
1646static int
1647nfs_remove(struct vop_remove_args *ap)
1648{
1649 struct vnode *vp = ap->a_vp;
1650 struct vnode *dvp = ap->a_dvp;
1651 struct componentname *cnp = ap->a_cnp;
1652 struct nfsnode *np = VTONFS(vp);
1653 int error = 0;
1654 struct vattr vattr;
1655
1656 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name"));
1657 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
1658 if (vp->v_type == VDIR)
1659 error = EPERM;
1660 else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1661 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1662 vattr.va_nlink > 1)) {
1663 /*
1664 * Purge the name cache so that the chance of a lookup for
1665 * the name succeeding while the remove is in progress is
1666 * minimized. Without node locking it can still happen, such
1667 * that an I/O op returns ESTALE, but since you get this if
1668 * another host removes the file..
1669 */
1670 cache_purge(vp);
1671 /*
1672 * throw away biocache buffers, mainly to avoid
1673 * unnecessary delayed writes later.
1674 */
1675 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1);
1676 /* Do the rpc */
1677 if (error != EINTR && error != EIO)
1678 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr,
1679 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
1680 /*
1681 * Kludge City: If the first reply to the remove rpc is lost..
1682 * the reply to the retransmitted request will be ENOENT
1683 * since the file was in fact removed
1684 * Therefore, we cheat and return success.
1685 */
1686 if (error == ENOENT)
1687 error = 0;
1688 } else if (!np->n_sillyrename)
1689 error = nfs_sillyrename(dvp, vp, cnp);
1690 mtx_lock(&np->n_mtx);
1691 np->n_attrstamp = 0;
1692 mtx_unlock(&np->n_mtx);
1693 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1694 return (error);
1695}
1696
1697/*
1698 * nfs file remove rpc called from nfs_inactive
1699 */
1700int
1701ncl_removeit(struct sillyrename *sp, struct vnode *vp)
1702{
1703 /*
1704 * Make sure that the directory vnode is still valid.
1705 * XXX we should lock sp->s_dvp here.
1706 */
1707 if (sp->s_dvp->v_type == VBAD)
1708 return (0);
1709 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen,
1710 sp->s_cred, NULL));
1711}
1712
1713/*
1714 * Nfs remove rpc, called from nfs_remove() and ncl_removeit().
1715 */
1716static int
1717nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
1718 int namelen, struct ucred *cred, struct thread *td)
1719{
1720 struct nfsvattr dnfsva;
1721 struct nfsnode *dnp = VTONFS(dvp);
1722 int error = 0, dattrflag;
1723
1724 mtx_lock(&dnp->n_mtx);
1725 dnp->n_flag |= NREMOVEINPROG;
1726 mtx_unlock(&dnp->n_mtx);
1727 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva,
1728 &dattrflag, NULL);
1729 mtx_lock(&dnp->n_mtx);
1730 if ((dnp->n_flag & NREMOVEWANT)) {
1731 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG);
1732 mtx_unlock(&dnp->n_mtx);
1733 wakeup((caddr_t)dnp);
1734 } else {
1735 dnp->n_flag &= ~NREMOVEINPROG;
1736 mtx_unlock(&dnp->n_mtx);
1737 }
1738 if (dattrflag)
1739 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1740 mtx_lock(&dnp->n_mtx);
1741 dnp->n_flag |= NMODIFIED;
1742 if (!dattrflag) {
1743 dnp->n_attrstamp = 0;
1744 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
1745 }
1746 mtx_unlock(&dnp->n_mtx);
1747 if (error && NFS_ISV4(dvp))
1748 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
1749 return (error);
1750}
1751
1752/*
1753 * nfs file rename call
1754 */
1755static int
1756nfs_rename(struct vop_rename_args *ap)
1757{
1758 struct vnode *fvp = ap->a_fvp;
1759 struct vnode *tvp = ap->a_tvp;
1760 struct vnode *fdvp = ap->a_fdvp;
1761 struct vnode *tdvp = ap->a_tdvp;
1762 struct componentname *tcnp = ap->a_tcnp;
1763 struct componentname *fcnp = ap->a_fcnp;
1764 struct nfsnode *fnp = VTONFS(ap->a_fvp);
1765 struct nfsnode *tdnp = VTONFS(ap->a_tdvp);
1766 struct nfsv4node *newv4 = NULL;
1767 int error;
1768
1769 KASSERT((tcnp->cn_flags & HASBUF) != 0 &&
1770 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name"));
1771 /* Check for cross-device rename */
1772 if ((fvp->v_mount != tdvp->v_mount) ||
1773 (tvp && (fvp->v_mount != tvp->v_mount))) {
1774 error = EXDEV;
1775 goto out;
1776 }
1777
1778 if (fvp == tvp) {
1779 ncl_printf("nfs_rename: fvp == tvp (can't happen)\n");
1780 error = 0;
1781 goto out;
1782 }
1783 if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0)
1784 goto out;
1785
1786 /*
1787 * We have to flush B_DELWRI data prior to renaming
1788 * the file. If we don't, the delayed-write buffers
1789 * can be flushed out later after the file has gone stale
1790 * under NFSV3. NFSV2 does not have this problem because
1791 * ( as far as I can tell ) it flushes dirty buffers more
1792 * often.
1793 *
1794 * Skip the rename operation if the fsync fails, this can happen
1795 * due to the server's volume being full, when we pushed out data
1796 * that was written back to our cache earlier. Not checking for
1797 * this condition can result in potential (silent) data loss.
1798 */
1799 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
1800 NFSVOPUNLOCK(fvp, 0);
1801 if (!error && tvp)
1802 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
1803 if (error)
1804 goto out;
1805
1806 /*
1807 * If the tvp exists and is in use, sillyrename it before doing the
1808 * rename of the new file over it.
1809 * XXX Can't sillyrename a directory.
1810 */
1811 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1812 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1813 vput(tvp);
1814 tvp = NULL;
1815 }
1816
1817 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1818 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1819 tcnp->cn_thread);
1820
1821 if (error == 0 && NFS_ISV4(tdvp)) {
1822 /*
1823 * For NFSv4, check to see if it is the same name and
1824 * replace the name, if it is different.
1825 */
1826 MALLOC(newv4, struct nfsv4node *,
1827 sizeof (struct nfsv4node) +
1828 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1,
1829 M_NFSV4NODE, M_WAITOK);
1830 mtx_lock(&tdnp->n_mtx);
1831 mtx_lock(&fnp->n_mtx);
1832 if (fnp->n_v4 != NULL && fvp->v_type == VREG &&
1833 (fnp->n_v4->n4_namelen != tcnp->cn_namelen ||
1834 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4),
1835 tcnp->cn_namelen) ||
1836 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen ||
1837 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
1838 tdnp->n_fhp->nfh_len))) {
1839#ifdef notdef
1840{ char nnn[100]; int nnnl;
1841nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99;
1842bcopy(tcnp->cn_nameptr, nnn, nnnl);
1843nnn[nnnl] = '\0';
1844printf("ren replace=%s\n",nnn);
1845}
1846#endif
1847 FREE((caddr_t)fnp->n_v4, M_NFSV4NODE);
1848 fnp->n_v4 = newv4;
1849 newv4 = NULL;
1850 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len;
1851 fnp->n_v4->n4_namelen = tcnp->cn_namelen;
1852 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
1853 tdnp->n_fhp->nfh_len);
1854 NFSBCOPY(tcnp->cn_nameptr,
1855 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen);
1856 }
1857 mtx_unlock(&tdnp->n_mtx);
1858 mtx_unlock(&fnp->n_mtx);
1859 if (newv4 != NULL)
1860 FREE((caddr_t)newv4, M_NFSV4NODE);
1861 }
1862
1863 if (fvp->v_type == VDIR) {
1864 if (tvp != NULL && tvp->v_type == VDIR)
1865 cache_purge(tdvp);
1866 cache_purge(fdvp);
1867 }
1868
1869out:
1870 if (tdvp == tvp)
1871 vrele(tdvp);
1872 else
1873 vput(tdvp);
1874 if (tvp)
1875 vput(tvp);
1876 vrele(fdvp);
1877 vrele(fvp);
1878 /*
1879 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1880 */
1881 if (error == ENOENT)
1882 error = 0;
1883 return (error);
1884}
1885
1886/*
1887 * nfs file rename rpc called from nfs_remove() above
1888 */
1889static int
1890nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp,
1891 struct sillyrename *sp)
1892{
1893
1894 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen,
1895 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred,
1896 scnp->cn_thread));
1897}
1898
1899/*
1900 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1901 */
1902static int
1903nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr,
1904 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr,
1905 int tnamelen, struct ucred *cred, struct thread *td)
1906{
1907 struct nfsvattr fnfsva, tnfsva;
1908 struct nfsnode *fdnp = VTONFS(fdvp);
1909 struct nfsnode *tdnp = VTONFS(tdvp);
1910 int error = 0, fattrflag, tattrflag;
1911
1912 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp,
1913 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag,
1914 &tattrflag, NULL, NULL);
1915 mtx_lock(&fdnp->n_mtx);
1916 fdnp->n_flag |= NMODIFIED;
1917 if (fattrflag != 0) {
1918 mtx_unlock(&fdnp->n_mtx);
1919 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1);
1920 } else {
1921 fdnp->n_attrstamp = 0;
1922 mtx_unlock(&fdnp->n_mtx);
1923 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
1924 }
1925 mtx_lock(&tdnp->n_mtx);
1926 tdnp->n_flag |= NMODIFIED;
1927 if (tattrflag != 0) {
1928 mtx_unlock(&tdnp->n_mtx);
1929 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1);
1930 } else {
1931 tdnp->n_attrstamp = 0;
1932 mtx_unlock(&tdnp->n_mtx);
1933 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
1934 }
1935 if (error && NFS_ISV4(fdvp))
1936 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
1937 return (error);
1938}
1939
1940/*
1941 * nfs hard link create call
1942 */
1943static int
1944nfs_link(struct vop_link_args *ap)
1945{
1946 struct vnode *vp = ap->a_vp;
1947 struct vnode *tdvp = ap->a_tdvp;
1948 struct componentname *cnp = ap->a_cnp;
1949 struct nfsnode *np, *tdnp;
1950 struct nfsvattr nfsva, dnfsva;
1951 int error = 0, attrflag, dattrflag;
1952
1953 if (vp->v_mount != tdvp->v_mount) {
1954 return (EXDEV);
1955 }
1956
1957 /*
1958 * Push all writes to the server, so that the attribute cache
1959 * doesn't get "out of sync" with the server.
1960 * XXX There should be a better way!
1961 */
1962 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
1963
1964 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
1965 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag,
1966 &dattrflag, NULL);
1967 tdnp = VTONFS(tdvp);
1968 mtx_lock(&tdnp->n_mtx);
1969 tdnp->n_flag |= NMODIFIED;
1970 if (dattrflag != 0) {
1971 mtx_unlock(&tdnp->n_mtx);
1972 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1);
1973 } else {
1974 tdnp->n_attrstamp = 0;
1975 mtx_unlock(&tdnp->n_mtx);
1976 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
1977 }
1978 if (attrflag)
1979 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1980 else {
1981 np = VTONFS(vp);
1982 mtx_lock(&np->n_mtx);
1983 np->n_attrstamp = 0;
1984 mtx_unlock(&np->n_mtx);
1985 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1986 }
1987 /*
1988 * If negative lookup caching is enabled, I might as well
1989 * add an entry for this node. Not necessary for correctness,
1990 * but if negative caching is enabled, then the system
1991 * must care about lookup caching hit rate, so...
1992 */
1993 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 &&
1994 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
1995 cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL);
1996 }
1997 if (error && NFS_ISV4(vp))
1998 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
1999 (gid_t)0);
2000 return (error);
2001}
2002
2003/*
2004 * nfs symbolic link create call
2005 */
2006static int
2007nfs_symlink(struct vop_symlink_args *ap)
2008{
2009 struct vnode *dvp = ap->a_dvp;
2010 struct vattr *vap = ap->a_vap;
2011 struct componentname *cnp = ap->a_cnp;
2012 struct nfsvattr nfsva, dnfsva;
2013 struct nfsfh *nfhp;
2014 struct nfsnode *np = NULL, *dnp;
2015 struct vnode *newvp = NULL;
2016 int error = 0, attrflag, dattrflag, ret;
2017
2018 vap->va_type = VLNK;
2019 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2020 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva,
2021 &nfsva, &nfhp, &attrflag, &dattrflag, NULL);
2022 if (nfhp) {
2023 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
2024 &np, NULL, LK_EXCLUSIVE);
2025 if (!ret)
2026 newvp = NFSTOV(np);
2027 else if (!error)
2028 error = ret;
2029 }
2030 if (newvp != NULL) {
2031 if (attrflag)
2032 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
2033 0, 1);
2034 } else if (!error) {
2035 /*
2036 * If we do not have an error and we could not extract the
2037 * newvp from the response due to the request being NFSv2, we
2038 * have to do a lookup in order to obtain a newvp to return.
2039 */
2040 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2041 cnp->cn_cred, cnp->cn_thread, &np);
2042 if (!error)
2043 newvp = NFSTOV(np);
2044 }
2045 if (error) {
2046 if (newvp)
2047 vput(newvp);
2048 if (NFS_ISV4(dvp))
2049 error = nfscl_maperr(cnp->cn_thread, error,
2050 vap->va_uid, vap->va_gid);
2051 } else {
2052 *ap->a_vpp = newvp;
2053 }
2054
2055 dnp = VTONFS(dvp);
2056 mtx_lock(&dnp->n_mtx);
2057 dnp->n_flag |= NMODIFIED;
2058 if (dattrflag != 0) {
2059 mtx_unlock(&dnp->n_mtx);
2060 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2061 } else {
2062 dnp->n_attrstamp = 0;
2063 mtx_unlock(&dnp->n_mtx);
2064 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
2065 }
2066 /*
2067 * If negative lookup caching is enabled, I might as well
2068 * add an entry for this node. Not necessary for correctness,
2069 * but if negative caching is enabled, then the system
2070 * must care about lookup caching hit rate, so...
2071 */
2072 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
2073 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
2074 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL);
2075 }
2076 return (error);
2077}
2078
2079/*
2080 * nfs make dir call
2081 */
2082static int
2083nfs_mkdir(struct vop_mkdir_args *ap)
2084{
2085 struct vnode *dvp = ap->a_dvp;
2086 struct vattr *vap = ap->a_vap;
2087 struct componentname *cnp = ap->a_cnp;
2088 struct nfsnode *np = NULL, *dnp;
2089 struct vnode *newvp = NULL;
2090 struct vattr vattr;
2091 struct nfsfh *nfhp;
2092 struct nfsvattr nfsva, dnfsva;
2093 int error = 0, attrflag, dattrflag, ret;
2094
2095 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
2096 return (error);
2097 vap->va_type = VDIR;
2098 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2099 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp,
2100 &attrflag, &dattrflag, NULL);
2101 dnp = VTONFS(dvp);
2102 mtx_lock(&dnp->n_mtx);
2103 dnp->n_flag |= NMODIFIED;
2104 if (dattrflag != 0) {
2105 mtx_unlock(&dnp->n_mtx);
2106 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2107 } else {
2108 dnp->n_attrstamp = 0;
2109 mtx_unlock(&dnp->n_mtx);
2110 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
2111 }
2112 if (nfhp) {
2113 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
2114 &np, NULL, LK_EXCLUSIVE);
2115 if (!ret) {
2116 newvp = NFSTOV(np);
2117 if (attrflag)
2118 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
2119 NULL, 0, 1);
2120 } else if (!error)
2121 error = ret;
2122 }
2123 if (!error && newvp == NULL) {
2124 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2125 cnp->cn_cred, cnp->cn_thread, &np);
2126 if (!error) {
2127 newvp = NFSTOV(np);
2128 if (newvp->v_type != VDIR)
2129 error = EEXIST;
2130 }
2131 }
2132 if (error) {
2133 if (newvp)
2134 vput(newvp);
2135 if (NFS_ISV4(dvp))
2136 error = nfscl_maperr(cnp->cn_thread, error,
2137 vap->va_uid, vap->va_gid);
2138 } else {
2139 /*
2140 * If negative lookup caching is enabled, I might as well
2141 * add an entry for this node. Not necessary for correctness,
2142 * but if negative caching is enabled, then the system
2143 * must care about lookup caching hit rate, so...
2144 */
2145 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
2146 (cnp->cn_flags & MAKEENTRY) &&
2147 attrflag != 0 && dattrflag != 0)
2148 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
2149 &dnfsva.na_ctime);
2150 *ap->a_vpp = newvp;
2151 }
2152 return (error);
2153}
2154
2155/*
2156 * nfs remove directory call
2157 */
2158static int
2159nfs_rmdir(struct vop_rmdir_args *ap)
2160{
2161 struct vnode *vp = ap->a_vp;
2162 struct vnode *dvp = ap->a_dvp;
2163 struct componentname *cnp = ap->a_cnp;
2164 struct nfsnode *dnp;
2165 struct nfsvattr dnfsva;
2166 int error, dattrflag;
2167
2168 if (dvp == vp)
2169 return (EINVAL);
2170 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2171 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL);
2172 dnp = VTONFS(dvp);
2173 mtx_lock(&dnp->n_mtx);
2174 dnp->n_flag |= NMODIFIED;
2175 if (dattrflag != 0) {
2176 mtx_unlock(&dnp->n_mtx);
2177 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2178 } else {
2179 dnp->n_attrstamp = 0;
2180 mtx_unlock(&dnp->n_mtx);
2181 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
2182 }
2183
2184 cache_purge(dvp);
2185 cache_purge(vp);
2186 if (error && NFS_ISV4(dvp))
2187 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
2188 (gid_t)0);
2189 /*
2190 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2191 */
2192 if (error == ENOENT)
2193 error = 0;
2194 return (error);
2195}
2196
2197/*
2198 * nfs readdir call
2199 */
2200static int
2201nfs_readdir(struct vop_readdir_args *ap)
2202{
2203 struct vnode *vp = ap->a_vp;
2204 struct nfsnode *np = VTONFS(vp);
2205 struct uio *uio = ap->a_uio;
2206 ssize_t tresid;
2207 int error = 0;
2208 struct vattr vattr;
2209
2210 if (vp->v_type != VDIR)
2211 return(EPERM);
2212
2213 /*
2214 * First, check for hit on the EOF offset cache
2215 */
2216 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2217 (np->n_flag & NMODIFIED) == 0) {
2218 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
2219 mtx_lock(&np->n_mtx);
2220 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
2221 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
2222 mtx_unlock(&np->n_mtx);
2223 NFSINCRGLOBAL(newnfsstats.direofcache_hits);
2224 return (0);
2225 } else
2226 mtx_unlock(&np->n_mtx);
2227 }
2228 }
2229
2230 /*
2231 * Call ncl_bioread() to do the real work.
2232 */
2233 tresid = uio->uio_resid;
2234 error = ncl_bioread(vp, uio, 0, ap->a_cred);
2235
2236 if (!error && uio->uio_resid == tresid)
2237 NFSINCRGLOBAL(newnfsstats.direofcache_misses);
2238 return (error);
2239}
2240
2241/*
2242 * Readdir rpc call.
2243 * Called from below the buffer cache by ncl_doio().
2244 */
2245int
2246ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2247 struct thread *td)
2248{
2249 struct nfsvattr nfsva;
2250 nfsuint64 *cookiep, cookie;
2251 struct nfsnode *dnp = VTONFS(vp);
2252 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2253 int error = 0, eof, attrflag;
2254
2255 KASSERT(uiop->uio_iovcnt == 1 &&
2256 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
2257 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
2258 ("nfs readdirrpc bad uio"));
2259
2260 /*
2261 * If there is no cookie, assume directory was stale.
2262 */
2263 ncl_dircookie_lock(dnp);
2264 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
2265 if (cookiep) {
2266 cookie = *cookiep;
2267 ncl_dircookie_unlock(dnp);
2268 } else {
2269 ncl_dircookie_unlock(dnp);
2270 return (NFSERR_BAD_COOKIE);
2271 }
2272
2273 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
2274 (void)ncl_fsinfo(nmp, vp, cred, td);
2275
2276 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva,
2277 &attrflag, &eof, NULL);
2278 if (attrflag)
2279 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
2280
2281 if (!error) {
2282 /*
2283 * We are now either at the end of the directory or have filled
2284 * the block.
2285 */
2286 if (eof)
2287 dnp->n_direofoffset = uiop->uio_offset;
2288 else {
2289 if (uiop->uio_resid > 0)
2290 ncl_printf("EEK! readdirrpc resid > 0\n");
2291 ncl_dircookie_lock(dnp);
2292 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
2293 *cookiep = cookie;
2294 ncl_dircookie_unlock(dnp);
2295 }
2296 } else if (NFS_ISV4(vp)) {
2297 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2298 }
2299 return (error);
2300}
2301
2302/*
2303 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc().
2304 */
2305int
2306ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2307 struct thread *td)
2308{
2309 struct nfsvattr nfsva;
2310 nfsuint64 *cookiep, cookie;
2311 struct nfsnode *dnp = VTONFS(vp);
2312 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2313 int error = 0, attrflag, eof;
2314
2315 KASSERT(uiop->uio_iovcnt == 1 &&
2316 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
2317 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
2318 ("nfs readdirplusrpc bad uio"));
2319
2320 /*
2321 * If there is no cookie, assume directory was stale.
2322 */
2323 ncl_dircookie_lock(dnp);
2324 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
2325 if (cookiep) {
2326 cookie = *cookiep;
2327 ncl_dircookie_unlock(dnp);
2328 } else {
2329 ncl_dircookie_unlock(dnp);
2330 return (NFSERR_BAD_COOKIE);
2331 }
2332
2333 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
2334 (void)ncl_fsinfo(nmp, vp, cred, td);
2335 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva,
2336 &attrflag, &eof, NULL);
2337 if (attrflag)
2338 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
2339
2340 if (!error) {
2341 /*
2342 * We are now either at end of the directory or have filled the
2343 * the block.
2344 */
2345 if (eof)
2346 dnp->n_direofoffset = uiop->uio_offset;
2347 else {
2348 if (uiop->uio_resid > 0)
2349 ncl_printf("EEK! readdirplusrpc resid > 0\n");
2350 ncl_dircookie_lock(dnp);
2351 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
2352 *cookiep = cookie;
2353 ncl_dircookie_unlock(dnp);
2354 }
2355 } else if (NFS_ISV4(vp)) {
2356 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2357 }
2358 return (error);
2359}
2360
2361/*
2362 * Silly rename. To make the NFS filesystem that is stateless look a little
2363 * more like the "ufs" a remove of an active vnode is translated to a rename
2364 * to a funny looking filename that is removed by nfs_inactive on the
2365 * nfsnode. There is the potential for another process on a different client
2366 * to create the same funny name between the nfs_lookitup() fails and the
2367 * nfs_rename() completes, but...
2368 */
2369static int
2370nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2371{
2372 struct sillyrename *sp;
2373 struct nfsnode *np;
2374 int error;
2375 short pid;
2376 unsigned int lticks;
2377
2378 cache_purge(dvp);
2379 np = VTONFS(vp);
2380 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
2381 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2382 M_NEWNFSREQ, M_WAITOK);
2383 sp->s_cred = crhold(cnp->cn_cred);
2384 sp->s_dvp = dvp;
2385 VREF(dvp);
2386
2387 /*
2388 * Fudge together a funny name.
2389 * Changing the format of the funny name to accomodate more
2390 * sillynames per directory.
2391 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is
2392 * CPU ticks since boot.
2393 */
2394 pid = cnp->cn_thread->td_proc->p_pid;
2395 lticks = (unsigned int)ticks;
2396 for ( ; ; ) {
2397 sp->s_namlen = sprintf(sp->s_name,
2398 ".nfs.%08x.%04x4.4", lticks,
2399 pid);
2400 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2401 cnp->cn_thread, NULL))
2402 break;
2403 lticks++;
2404 }
2405 error = nfs_renameit(dvp, vp, cnp, sp);
2406 if (error)
2407 goto bad;
2408 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2409 cnp->cn_thread, &np);
2410 np->n_sillyrename = sp;
2411 return (0);
2412bad:
2413 vrele(sp->s_dvp);
2414 crfree(sp->s_cred);
2415 free((caddr_t)sp, M_NEWNFSREQ);
2416 return (error);
2417}
2418
2419/*
2420 * Look up a file name and optionally either update the file handle or
2421 * allocate an nfsnode, depending on the value of npp.
2422 * npp == NULL --> just do the lookup
2423 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2424 * handled too
2425 * *npp != NULL --> update the file handle in the vnode
2426 */
2427static int
2428nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
2429 struct thread *td, struct nfsnode **npp)
2430{
2431 struct vnode *newvp = NULL, *vp;
2432 struct nfsnode *np, *dnp = VTONFS(dvp);
2433 struct nfsfh *nfhp, *onfhp;
2434 struct nfsvattr nfsva, dnfsva;
2435 struct componentname cn;
2436 int error = 0, attrflag, dattrflag;
2437 u_int hash;
2438
2439 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva,
2440 &nfhp, &attrflag, &dattrflag, NULL);
2441 if (dattrflag)
2442 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2443 if (npp && !error) {
2444 if (*npp != NULL) {
2445 np = *npp;
2446 vp = NFSTOV(np);
2447 /*
2448 * For NFSv4, check to see if it is the same name and
2449 * replace the name, if it is different.
2450 */
2451 if (np->n_v4 != NULL && nfsva.na_type == VREG &&
2452 (np->n_v4->n4_namelen != len ||
2453 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) ||
2454 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
2455 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
2456 dnp->n_fhp->nfh_len))) {
2457#ifdef notdef
2458{ char nnn[100]; int nnnl;
2459nnnl = (len < 100) ? len : 99;
2460bcopy(name, nnn, nnnl);
2461nnn[nnnl] = '\0';
2462printf("replace=%s\n",nnn);
2463}
2464#endif
2465 FREE((caddr_t)np->n_v4, M_NFSV4NODE);
2466 MALLOC(np->n_v4, struct nfsv4node *,
2467 sizeof (struct nfsv4node) +
2468 dnp->n_fhp->nfh_len + len - 1,
2469 M_NFSV4NODE, M_WAITOK);
2470 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
2471 np->n_v4->n4_namelen = len;
2472 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
2473 dnp->n_fhp->nfh_len);
2474 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len);
2475 }
2476 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len,
2477 FNV1_32_INIT);
2478 onfhp = np->n_fhp;
2479 /*
2480 * Rehash node for new file handle.
2481 */
2482 vfs_hash_rehash(vp, hash);
2483 np->n_fhp = nfhp;
2484 if (onfhp != NULL)
2485 FREE((caddr_t)onfhp, M_NFSFH);
2486 newvp = NFSTOV(np);
2487 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) {
2488 FREE((caddr_t)nfhp, M_NFSFH);
2489 VREF(dvp);
2490 newvp = dvp;
2491 } else {
2492 cn.cn_nameptr = name;
2493 cn.cn_namelen = len;
2494 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td,
2495 &np, NULL, LK_EXCLUSIVE);
2496 if (error)
2497 return (error);
2498 newvp = NFSTOV(np);
2499 }
2500 if (!attrflag && *npp == NULL) {
2501 if (newvp == dvp)
2502 vrele(newvp);
2503 else
2504 vput(newvp);
2505 return (ENOENT);
2506 }
2507 if (attrflag)
2508 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
2509 0, 1);
2510 }
2511 if (npp && *npp == NULL) {
2512 if (error) {
2513 if (newvp) {
2514 if (newvp == dvp)
2515 vrele(newvp);
2516 else
2517 vput(newvp);
2518 }
2519 } else
2520 *npp = np;
2521 }
2522 if (error && NFS_ISV4(dvp))
2523 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2524 return (error);
2525}
2526
2527/*
2528 * Nfs Version 3 and 4 commit rpc
2529 */
2530int
2531ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
2532 struct thread *td)
2533{
2534 struct nfsvattr nfsva;
2535 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2536 int error, attrflag;
2537 u_char verf[NFSX_VERF];
2538
2539 mtx_lock(&nmp->nm_mtx);
2540 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
2541 mtx_unlock(&nmp->nm_mtx);
2542 return (0);
2543 }
2544 mtx_unlock(&nmp->nm_mtx);
2545 error = nfsrpc_commit(vp, offset, cnt, cred, td, verf, &nfsva,
2546 &attrflag, NULL);
2547 if (!error) {
2548 mtx_lock(&nmp->nm_mtx);
2549 if (NFSBCMP((caddr_t)nmp->nm_verf, verf, NFSX_VERF)) {
2550 NFSBCOPY(verf, (caddr_t)nmp->nm_verf, NFSX_VERF);
2551 error = NFSERR_STALEWRITEVERF;
2552 }
2553 mtx_unlock(&nmp->nm_mtx);
2554 if (!error && attrflag)
2555 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL,
2556 0, 1);
2557 } else if (NFS_ISV4(vp)) {
2558 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2559 }
2560 return (error);
2561}
2562
2563/*
2564 * Strategy routine.
2565 * For async requests when nfsiod(s) are running, queue the request by
2566 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the
2567 * request.
2568 */
2569static int
2570nfs_strategy(struct vop_strategy_args *ap)
2571{
2572 struct buf *bp = ap->a_bp;
2573 struct ucred *cr;
2574
2575 KASSERT(!(bp->b_flags & B_DONE),
2576 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2577 BUF_ASSERT_HELD(bp);
2578
2579 if (bp->b_iocmd == BIO_READ)
2580 cr = bp->b_rcred;
2581 else
2582 cr = bp->b_wcred;
2583
2584 /*
2585 * If the op is asynchronous and an i/o daemon is waiting
2586 * queue the request, wake it up and wait for completion
2587 * otherwise just do it ourselves.
2588 */
2589 if ((bp->b_flags & B_ASYNC) == 0 ||
2590 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread))
2591 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1);
2592 return (0);
2593}
2594
2595/*
2596 * fsync vnode op. Just call ncl_flush() with commit == 1.
2597 */
2598/* ARGSUSED */
2599static int
2600nfs_fsync(struct vop_fsync_args *ap)
2601{
2602
2603 if (ap->a_vp->v_type != VREG) {
2604 /*
2605 * For NFS, metadata is changed synchronously on the server,
2606 * so there is nothing to flush. Also, ncl_flush() clears
2607 * the NMODIFIED flag and that shouldn't be done here for
2608 * directories.
2609 */
2610 return (0);
2611 }
2612 return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1, 0));
2613}
2614
2615/*
2616 * Flush all the blocks associated with a vnode.
2617 * Walk through the buffer pool and push any dirty pages
2618 * associated with the vnode.
2619 * If the called_from_renewthread argument is TRUE, it has been called
2620 * from the NFSv4 renew thread and, as such, cannot block indefinitely
2621 * waiting for a buffer write to complete.
2622 */
2623int
2624ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td,
2625 int commit, int called_from_renewthread)
2626{
2627 struct nfsnode *np = VTONFS(vp);
2628 struct buf *bp;
2629 int i;
2630 struct buf *nbp;
2631 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2632 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2633 int passone = 1, trycnt = 0;
2634 u_quad_t off, endoff, toff;
2635 struct ucred* wcred = NULL;
2636 struct buf **bvec = NULL;
2637 struct bufobj *bo;
2638#ifndef NFS_COMMITBVECSIZ
2639#define NFS_COMMITBVECSIZ 20
2640#endif
2641 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2642 int bvecsize = 0, bveccount;
2643
2644 if (called_from_renewthread != 0)
2645 slptimeo = hz;
2646 if (nmp->nm_flag & NFSMNT_INT)
2647 slpflag = NFS_PCATCH;
2648 if (!commit)
2649 passone = 0;
2650 bo = &vp->v_bufobj;
2651 /*
2652 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2653 * server, but has not been committed to stable storage on the server
2654 * yet. On the first pass, the byte range is worked out and the commit
2655 * rpc is done. On the second pass, ncl_writebp() is called to do the
2656 * job.
2657 */
2658again:
2659 off = (u_quad_t)-1;
2660 endoff = 0;
2661 bvecpos = 0;
2662 if (NFS_ISV34(vp) && commit) {
2663 if (bvec != NULL && bvec != bvec_on_stack)
2664 free(bvec, M_TEMP);
2665 /*
2666 * Count up how many buffers waiting for a commit.
2667 */
2668 bveccount = 0;
2669 BO_LOCK(bo);
2670 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2671 if (!BUF_ISLOCKED(bp) &&
2672 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2673 == (B_DELWRI | B_NEEDCOMMIT))
2674 bveccount++;
2675 }
2676 /*
2677 * Allocate space to remember the list of bufs to commit. It is
2678 * important to use M_NOWAIT here to avoid a race with nfs_write.
2679 * If we can't get memory (for whatever reason), we will end up
2680 * committing the buffers one-by-one in the loop below.
2681 */
2682 if (bveccount > NFS_COMMITBVECSIZ) {
2683 /*
2684 * Release the vnode interlock to avoid a lock
2685 * order reversal.
2686 */
2687 BO_UNLOCK(bo);
2688 bvec = (struct buf **)
2689 malloc(bveccount * sizeof(struct buf *),
2690 M_TEMP, M_NOWAIT);
2691 BO_LOCK(bo);
2692 if (bvec == NULL) {
2693 bvec = bvec_on_stack;
2694 bvecsize = NFS_COMMITBVECSIZ;
2695 } else
2696 bvecsize = bveccount;
2697 } else {
2698 bvec = bvec_on_stack;
2699 bvecsize = NFS_COMMITBVECSIZ;
2700 }
2701 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2702 if (bvecpos >= bvecsize)
2703 break;
2704 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2705 nbp = TAILQ_NEXT(bp, b_bobufs);
2706 continue;
2707 }
2708 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2709 (B_DELWRI | B_NEEDCOMMIT)) {
2710 BUF_UNLOCK(bp);
2711 nbp = TAILQ_NEXT(bp, b_bobufs);
2712 continue;
2713 }
2714 BO_UNLOCK(bo);
2715 bremfree(bp);
2716 /*
2717 * Work out if all buffers are using the same cred
2718 * so we can deal with them all with one commit.
2719 *
2720 * NOTE: we are not clearing B_DONE here, so we have
2721 * to do it later on in this routine if we intend to
2722 * initiate I/O on the bp.
2723 *
2724 * Note: to avoid loopback deadlocks, we do not
2725 * assign b_runningbufspace.
2726 */
2727 if (wcred == NULL)
2728 wcred = bp->b_wcred;
2729 else if (wcred != bp->b_wcred)
2730 wcred = NOCRED;
2731 vfs_busy_pages(bp, 1);
2732
2733 BO_LOCK(bo);
2734 /*
2735 * bp is protected by being locked, but nbp is not
2736 * and vfs_busy_pages() may sleep. We have to
2737 * recalculate nbp.
2738 */
2739 nbp = TAILQ_NEXT(bp, b_bobufs);
2740
2741 /*
2742 * A list of these buffers is kept so that the
2743 * second loop knows which buffers have actually
2744 * been committed. This is necessary, since there
2745 * may be a race between the commit rpc and new
2746 * uncommitted writes on the file.
2747 */
2748 bvec[bvecpos++] = bp;
2749 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2750 bp->b_dirtyoff;
2751 if (toff < off)
2752 off = toff;
2753 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2754 if (toff > endoff)
2755 endoff = toff;
2756 }
2757 BO_UNLOCK(bo);
2758 }
2759 if (bvecpos > 0) {
2760 /*
2761 * Commit data on the server, as required.
2762 * If all bufs are using the same wcred, then use that with
2763 * one call for all of them, otherwise commit each one
2764 * separately.
2765 */
2766 if (wcred != NOCRED)
2767 retv = ncl_commit(vp, off, (int)(endoff - off),
2768 wcred, td);
2769 else {
2770 retv = 0;
2771 for (i = 0; i < bvecpos; i++) {
2772 off_t off, size;
2773 bp = bvec[i];
2774 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2775 bp->b_dirtyoff;
2776 size = (u_quad_t)(bp->b_dirtyend
2777 - bp->b_dirtyoff);
2778 retv = ncl_commit(vp, off, (int)size,
2779 bp->b_wcred, td);
2780 if (retv) break;
2781 }
2782 }
2783
2784 if (retv == NFSERR_STALEWRITEVERF)
2785 ncl_clearcommit(vp->v_mount);
2786
2787 /*
2788 * Now, either mark the blocks I/O done or mark the
2789 * blocks dirty, depending on whether the commit
2790 * succeeded.
2791 */
2792 for (i = 0; i < bvecpos; i++) {
2793 bp = bvec[i];
2794 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
2795 if (retv) {
2796 /*
2797 * Error, leave B_DELWRI intact
2798 */
2799 vfs_unbusy_pages(bp);
2800 brelse(bp);
2801 } else {
2802 /*
2803 * Success, remove B_DELWRI ( bundirty() ).
2804 *
2805 * b_dirtyoff/b_dirtyend seem to be NFS
2806 * specific. We should probably move that
2807 * into bundirty(). XXX
2808 */
2809 bufobj_wref(bo);
2810 bp->b_flags |= B_ASYNC;
2811 bundirty(bp);
2812 bp->b_flags &= ~B_DONE;
2813 bp->b_ioflags &= ~BIO_ERROR;
2814 bp->b_dirtyoff = bp->b_dirtyend = 0;
2815 bufdone(bp);
2816 }
2817 }
2818 }
2819
2820 /*
2821 * Start/do any write(s) that are required.
2822 */
2823loop:
2824 BO_LOCK(bo);
2825 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2826 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2827 if (waitfor != MNT_WAIT || passone)
2828 continue;
2829
2830 error = BUF_TIMELOCK(bp,
2831 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2832 BO_MTX(bo), "nfsfsync", slpflag, slptimeo);
2833 if (error == 0) {
2834 BUF_UNLOCK(bp);
2835 goto loop;
2836 }
2837 if (error == ENOLCK) {
2838 error = 0;
2839 goto loop;
2840 }
2841 if (called_from_renewthread != 0) {
2842 /*
2843 * Return EIO so the flush will be retried
2844 * later.
2845 */
2846 error = EIO;
2847 goto done;
2848 }
2849 if (newnfs_sigintr(nmp, td)) {
2850 error = EINTR;
2851 goto done;
2852 }
2853 if (slpflag & PCATCH) {
2854 slpflag = 0;
2855 slptimeo = 2 * hz;
2856 }
2857 goto loop;
2858 }
2859 if ((bp->b_flags & B_DELWRI) == 0)
2860 panic("nfs_fsync: not dirty");
2861 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
2862 BUF_UNLOCK(bp);
2863 continue;
2864 }
2865 BO_UNLOCK(bo);
2866 bremfree(bp);
2867 if (passone || !commit)
2868 bp->b_flags |= B_ASYNC;
2869 else
2870 bp->b_flags |= B_ASYNC;
2871 bwrite(bp);
2872 if (newnfs_sigintr(nmp, td)) {
2873 error = EINTR;
2874 goto done;
2875 }
2876 goto loop;
2877 }
2878 if (passone) {
2879 passone = 0;
2880 BO_UNLOCK(bo);
2881 goto again;
2882 }
2883 if (waitfor == MNT_WAIT) {
2884 while (bo->bo_numoutput) {
2885 error = bufobj_wwait(bo, slpflag, slptimeo);
2886 if (error) {
2887 BO_UNLOCK(bo);
2888 if (called_from_renewthread != 0) {
2889 /*
2890 * Return EIO so that the flush will be
2891 * retried later.
2892 */
2893 error = EIO;
2894 goto done;
2895 }
2896 error = newnfs_sigintr(nmp, td);
2897 if (error)
2898 goto done;
2899 if (slpflag & PCATCH) {
2900 slpflag = 0;
2901 slptimeo = 2 * hz;
2902 }
2903 BO_LOCK(bo);
2904 }
2905 }
2906 if (bo->bo_dirty.bv_cnt != 0 && commit) {
2907 BO_UNLOCK(bo);
2908 goto loop;
2909 }
2910 /*
2911 * Wait for all the async IO requests to drain
2912 */
2913 BO_UNLOCK(bo);
2914 mtx_lock(&np->n_mtx);
2915 while (np->n_directio_asyncwr > 0) {
2916 np->n_flag |= NFSYNCWAIT;
2917 error = newnfs_msleep(td, &np->n_directio_asyncwr,
2918 &np->n_mtx, slpflag | (PRIBIO + 1),
2919 "nfsfsync", 0);
2920 if (error) {
2921 if (newnfs_sigintr(nmp, td)) {
2922 mtx_unlock(&np->n_mtx);
2923 error = EINTR;
2924 goto done;
2925 }
2926 }
2927 }
2928 mtx_unlock(&np->n_mtx);
2929 } else
2930 BO_UNLOCK(bo);
2931 mtx_lock(&np->n_mtx);
2932 if (np->n_flag & NWRITEERR) {
2933 error = np->n_error;
2934 np->n_flag &= ~NWRITEERR;
2935 }
2936 if (commit && bo->bo_dirty.bv_cnt == 0 &&
2937 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
2938 np->n_flag &= ~NMODIFIED;
2939 mtx_unlock(&np->n_mtx);
2940done:
2941 if (bvec != NULL && bvec != bvec_on_stack)
2942 free(bvec, M_TEMP);
2943 if (error == 0 && commit != 0 && waitfor == MNT_WAIT &&
2944 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 ||
2945 np->n_directio_asyncwr != 0) && trycnt++ < 5) {
2946 /* try, try again... */
2947 passone = 1;
2948 wcred = NULL;
2949 bvec = NULL;
2950 bvecsize = 0;
2951printf("try%d\n", trycnt);
2952 goto again;
2953 }
2954 return (error);
2955}
2956
2957/*
2958 * NFS advisory byte-level locks.
2959 */
2960static int
2961nfs_advlock(struct vop_advlock_args *ap)
2962{
2963 struct vnode *vp = ap->a_vp;
2964 struct ucred *cred;
2965 struct nfsnode *np = VTONFS(ap->a_vp);
2966 struct proc *p = (struct proc *)ap->a_id;
2967 struct thread *td = curthread; /* XXX */
2968 struct vattr va;
2969 int ret, error = EOPNOTSUPP;
2970 u_quad_t size;
2971
2972 if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) {
2973 if (vp->v_type != VREG)
2974 return (EINVAL);
2975 if ((ap->a_flags & F_POSIX) != 0)
2976 cred = p->p_ucred;
2977 else
2978 cred = td->td_ucred;
2979 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
2980 if (vp->v_iflag & VI_DOOMED) {
2981 NFSVOPUNLOCK(vp, 0);
2982 return (EBADF);
2983 }
2984
2985 /*
2986 * If this is unlocking a write locked region, flush and
2987 * commit them before unlocking. This is required by
2988 * RFC3530 Sec. 9.3.2.
2989 */
2990 if (ap->a_op == F_UNLCK &&
2991 nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id,
2992 ap->a_flags))
2993 (void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0);
2994
2995 /*
2996 * Loop around doing the lock op, while a blocking lock
2997 * must wait for the lock op to succeed.
2998 */
2999 do {
3000 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op,
3001 ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags);
3002 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
3003 ap->a_op == F_SETLK) {
3004 NFSVOPUNLOCK(vp, 0);
3005 error = nfs_catnap(PZERO | PCATCH, ret,
3006 "ncladvl");
3007 if (error)
3008 return (EINTR);
3009 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
3010 if (vp->v_iflag & VI_DOOMED) {
3011 NFSVOPUNLOCK(vp, 0);
3012 return (EBADF);
3013 }
3014 }
3015 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
3016 ap->a_op == F_SETLK);
3017 if (ret == NFSERR_DENIED) {
3018 NFSVOPUNLOCK(vp, 0);
3019 return (EAGAIN);
3020 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) {
3021 NFSVOPUNLOCK(vp, 0);
3022 return (ret);
3023 } else if (ret != 0) {
3024 NFSVOPUNLOCK(vp, 0);
3025 return (EACCES);
3026 }
3027
3028 /*
3029 * Now, if we just got a lock, invalidate data in the buffer
3030 * cache, as required, so that the coherency conforms with
3031 * RFC3530 Sec. 9.3.2.
3032 */
3033 if (ap->a_op == F_SETLK) {
3034 if ((np->n_flag & NMODIFIED) == 0) {
3035 np->n_attrstamp = 0;
3036 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
3037 ret = VOP_GETATTR(vp, &va, cred);
3038 }
3039 if ((np->n_flag & NMODIFIED) || ret ||
3040 np->n_change != va.va_filerev) {
3041 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1);
3042 np->n_attrstamp = 0;
3043 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
3044 ret = VOP_GETATTR(vp, &va, cred);
3045 if (!ret) {
3046 np->n_mtime = va.va_mtime;
3047 np->n_change = va.va_filerev;
3048 }
3049 }
3050 }
3051 NFSVOPUNLOCK(vp, 0);
3052 return (0);
3053 } else if (!NFS_ISV4(vp)) {
3054 error = NFSVOPLOCK(vp, LK_SHARED);
3055 if (error)
3056 return (error);
3057 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
3058 size = VTONFS(vp)->n_size;
3059 NFSVOPUNLOCK(vp, 0);
3060 error = lf_advlock(ap, &(vp->v_lockf), size);
3061 } else {
3062 if (nfs_advlock_p != NULL)
3063 error = nfs_advlock_p(ap);
3064 else {
3065 NFSVOPUNLOCK(vp, 0);
3066 error = ENOLCK;
3067 }
3068 }
3069 }
3070 return (error);
3071}
3072
3073/*
3074 * NFS advisory byte-level locks.
3075 */
3076static int
3077nfs_advlockasync(struct vop_advlockasync_args *ap)
3078{
3079 struct vnode *vp = ap->a_vp;
3080 u_quad_t size;
3081 int error;
3082
3083 if (NFS_ISV4(vp))
3084 return (EOPNOTSUPP);
3085 error = NFSVOPLOCK(vp, LK_SHARED);
3086 if (error)
3087 return (error);
3088 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
3089 size = VTONFS(vp)->n_size;
3090 NFSVOPUNLOCK(vp, 0);
3091 error = lf_advlockasync(ap, &(vp->v_lockf), size);
3092 } else {
3093 NFSVOPUNLOCK(vp, 0);
3094 error = EOPNOTSUPP;
3095 }
3096 return (error);
3097}
3098
3099/*
3100 * Print out the contents of an nfsnode.
3101 */
3102static int
3103nfs_print(struct vop_print_args *ap)
3104{
3105 struct vnode *vp = ap->a_vp;
3106 struct nfsnode *np = VTONFS(vp);
3107
3108 ncl_printf("\tfileid %ld fsid 0x%x",
3109 np->n_vattr.na_fileid, np->n_vattr.na_fsid);
3110 if (vp->v_type == VFIFO)
3111 fifo_printinfo(vp);
3112 printf("\n");
3113 return (0);
3114}
3115
3116/*
3117 * This is the "real" nfs::bwrite(struct buf*).
3118 * We set B_CACHE if this is a VMIO buffer.
3119 */
3120int
3121ncl_writebp(struct buf *bp, int force __unused, struct thread *td)
3122{
3123 int s;
3124 int oldflags = bp->b_flags;
3125#if 0
3126 int retv = 1;
3127 off_t off;
3128#endif
3129
3130 BUF_ASSERT_HELD(bp);
3131
3132 if (bp->b_flags & B_INVAL) {
3133 brelse(bp);
3134 return(0);
3135 }
3136
3137 bp->b_flags |= B_CACHE;
3138
3139 /*
3140 * Undirty the bp. We will redirty it later if the I/O fails.
3141 */
3142
3143 s = splbio();
3144 bundirty(bp);
3145 bp->b_flags &= ~B_DONE;
3146 bp->b_ioflags &= ~BIO_ERROR;
3147 bp->b_iocmd = BIO_WRITE;
3148
3149 bufobj_wref(bp->b_bufobj);
3150 curthread->td_ru.ru_oublock++;
3151 splx(s);
3152
3153 /*
3154 * Note: to avoid loopback deadlocks, we do not
3155 * assign b_runningbufspace.
3156 */
3157 vfs_busy_pages(bp, 1);
3158
3159 BUF_KERNPROC(bp);
3160 bp->b_iooffset = dbtob(bp->b_blkno);
3161 bstrategy(bp);
3162
3163 if( (oldflags & B_ASYNC) == 0) {
3164 int rtval = bufwait(bp);
3165
3166 if (oldflags & B_DELWRI) {
3167 s = splbio();
3168 reassignbuf(bp);
3169 splx(s);
3170 }
3171 brelse(bp);
3172 return (rtval);
3173 }
3174
3175 return (0);
3176}
3177
3178/*
3179 * nfs special file access vnode op.
3180 * Essentially just get vattr and then imitate iaccess() since the device is
3181 * local to the client.
3182 */
3183static int
3184nfsspec_access(struct vop_access_args *ap)
3185{
3186 struct vattr *vap;
3187 struct ucred *cred = ap->a_cred;
3188 struct vnode *vp = ap->a_vp;
3189 accmode_t accmode = ap->a_accmode;
3190 struct vattr vattr;
3191 int error;
3192
3193 /*
3194 * Disallow write attempts on filesystems mounted read-only;
3195 * unless the file is a socket, fifo, or a block or character
3196 * device resident on the filesystem.
3197 */
3198 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3199 switch (vp->v_type) {
3200 case VREG:
3201 case VDIR:
3202 case VLNK:
3203 return (EROFS);
3204 default:
3205 break;
3206 }
3207 }
3208 vap = &vattr;
3209 error = VOP_GETATTR(vp, vap, cred);
3210 if (error)
3211 goto out;
3212 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
3213 accmode, cred, NULL);
3214out:
3215 return error;
3216}
3217
3218/*
3219 * Read wrapper for fifos.
3220 */
3221static int
3222nfsfifo_read(struct vop_read_args *ap)
3223{
3224 struct nfsnode *np = VTONFS(ap->a_vp);
3225 int error;
3226
3227 /*
3228 * Set access flag.
3229 */
3230 mtx_lock(&np->n_mtx);
3231 np->n_flag |= NACC;
3232 getnanotime(&np->n_atim);
3233 mtx_unlock(&np->n_mtx);
3234 error = fifo_specops.vop_read(ap);
3235 return error;
3236}
3237
3238/*
3239 * Write wrapper for fifos.
3240 */
3241static int
3242nfsfifo_write(struct vop_write_args *ap)
3243{
3244 struct nfsnode *np = VTONFS(ap->a_vp);
3245
3246 /*
3247 * Set update flag.
3248 */
3249 mtx_lock(&np->n_mtx);
3250 np->n_flag |= NUPD;
3251 getnanotime(&np->n_mtim);
3252 mtx_unlock(&np->n_mtx);
3253 return(fifo_specops.vop_write(ap));
3254}
3255
3256/*
3257 * Close wrapper for fifos.
3258 *
3259 * Update the times on the nfsnode then do fifo close.
3260 */
3261static int
3262nfsfifo_close(struct vop_close_args *ap)
3263{
3264 struct vnode *vp = ap->a_vp;
3265 struct nfsnode *np = VTONFS(vp);
3266 struct vattr vattr;
3267 struct timespec ts;
3268
3269 mtx_lock(&np->n_mtx);
3270 if (np->n_flag & (NACC | NUPD)) {
3271 getnanotime(&ts);
3272 if (np->n_flag & NACC)
3273 np->n_atim = ts;
3274 if (np->n_flag & NUPD)
3275 np->n_mtim = ts;
3276 np->n_flag |= NCHG;
3277 if (vrefcnt(vp) == 1 &&
3278 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3279 VATTR_NULL(&vattr);
3280 if (np->n_flag & NACC)
3281 vattr.va_atime = np->n_atim;
3282 if (np->n_flag & NUPD)
3283 vattr.va_mtime = np->n_mtim;
3284 mtx_unlock(&np->n_mtx);
3285 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3286 goto out;
3287 }
3288 }
3289 mtx_unlock(&np->n_mtx);
3290out:
3291 return (fifo_specops.vop_close(ap));
3292}
3293
3294/*
3295 * Just call ncl_writebp() with the force argument set to 1.
3296 *
3297 * NOTE: B_DONE may or may not be set in a_bp on call.
3298 */
3299static int
3300nfs_bwrite(struct buf *bp)
3301{
3302
3303 return (ncl_writebp(bp, 1, curthread));
3304}
3305
3306struct buf_ops buf_ops_newnfs = {
3307 .bop_name = "buf_ops_nfs",
3308 .bop_write = nfs_bwrite,
3309 .bop_strategy = bufstrategy,
3310 .bop_sync = bufsync,
3311 .bop_bdflush = bufbdflush,
3312};
3313
3314/*
3315 * Cloned from vop_stdlock(), and then the ugly hack added.
3316 */
3317static int
3318nfs_lock1(struct vop_lock1_args *ap)
3319{
3320 struct vnode *vp = ap->a_vp;
3321 int error = 0;
3322
3323 /*
3324 * Since vfs_hash_get() calls vget() and it will no longer work
3325 * for FreeBSD8 with flags == 0, I can only think of this horrible
3326 * hack to work around it. I call vfs_hash_get() with LK_EXCLOTHER
3327 * and then handle it here. All I want for this case is a v_usecount
3328 * on the vnode to use for recovery, while another thread might
3329 * hold a lock on the vnode. I have the other threads blocked, so
3330 * there isn't any race problem.
3331 */
3332 if ((ap->a_flags & LK_TYPE_MASK) == LK_EXCLOTHER) {
3333 if ((ap->a_flags & LK_INTERLOCK) == 0)
3334 panic("ncllock1");
3335 if ((vp->v_iflag & VI_DOOMED))
3336 error = ENOENT;
3337 VI_UNLOCK(vp);
3338 return (error);
3339 }
3340 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
3341 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
3342 ap->a_line));
3343}
3344
3345static int
3346nfs_getacl(struct vop_getacl_args *ap)
3347{
3348 int error;
3349
3350 if (ap->a_type != ACL_TYPE_NFS4)
3351 return (EOPNOTSUPP);
3352 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
3353 NULL);
3354 if (error > NFSERR_STALE) {
3355 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
3356 error = EPERM;
3357 }
3358 return (error);
3359}
3360
3361static int
3362nfs_setacl(struct vop_setacl_args *ap)
3363{
3364 int error;
3365
3366 if (ap->a_type != ACL_TYPE_NFS4)
3367 return (EOPNOTSUPP);
3368 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
3369 NULL);
3370 if (error > NFSERR_STALE) {
3371 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
3372 error = EPERM;
3373 }
3374 return (error);
3375}
3376
3377/*
3378 * Return POSIX pathconf information applicable to nfs filesystems.
3379 */
3380static int
3381nfs_pathconf(struct vop_pathconf_args *ap)
3382{
3383 struct nfsv3_pathconf pc;
3384 struct nfsvattr nfsva;
3385 struct vnode *vp = ap->a_vp;
3386 struct thread *td = curthread;
3387 int attrflag, error;
3388
3389 if (NFS_ISV4(vp) || (NFS_ISV3(vp) && (ap->a_name == _PC_LINK_MAX ||
3390 ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED ||
3391 ap->a_name == _PC_NO_TRUNC))) {
3392 /*
3393 * Since only the above 4 a_names are returned by the NFSv3
3394 * Pathconf RPC, there is no point in doing it for others.
3395 */
3396 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva,
3397 &attrflag, NULL);
3398 if (attrflag != 0)
3399 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
3400 1);
3401 if (error != 0)
3402 return (error);
3403 } else {
3404 /*
3405 * For NFSv2 (or NFSv3 when not one of the above 4 a_names),
3406 * just fake them.
3407 */
3408 pc.pc_linkmax = LINK_MAX;
3409 pc.pc_namemax = NFS_MAXNAMLEN;
3410 pc.pc_notrunc = 1;
3411 pc.pc_chownrestricted = 1;
3412 pc.pc_caseinsensitive = 0;
3413 pc.pc_casepreserving = 1;
3414 error = 0;
3415 }
3416 switch (ap->a_name) {
3417 case _PC_LINK_MAX:
3418 *ap->a_retval = pc.pc_linkmax;
3419 break;
3420 case _PC_NAME_MAX:
3421 *ap->a_retval = pc.pc_namemax;
3422 break;
3423 case _PC_PATH_MAX:
3424 *ap->a_retval = PATH_MAX;
3425 break;
3426 case _PC_PIPE_BUF:
3427 *ap->a_retval = PIPE_BUF;
3428 break;
3429 case _PC_CHOWN_RESTRICTED:
3430 *ap->a_retval = pc.pc_chownrestricted;
3431 break;
3432 case _PC_NO_TRUNC:
3433 *ap->a_retval = pc.pc_notrunc;
3434 break;
3435 case _PC_ACL_EXTENDED:
3436 *ap->a_retval = 0;
3437 break;
3438 case _PC_ACL_NFS4:
3439 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 &&
3440 NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL))
3441 *ap->a_retval = 1;
3442 else
3443 *ap->a_retval = 0;
3444 break;
3445 case _PC_ACL_PATH_MAX:
3446 if (NFS_ISV4(vp))
3447 *ap->a_retval = ACL_MAX_ENTRIES;
3448 else
3449 *ap->a_retval = 3;
3450 break;
3451 case _PC_MAC_PRESENT:
3452 *ap->a_retval = 0;
3453 break;
3454 case _PC_ASYNC_IO:
3455 /* _PC_ASYNC_IO should have been handled by upper layers. */
3456 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
3457 error = EINVAL;
3458 break;
3459 case _PC_PRIO_IO:
3460 *ap->a_retval = 0;
3461 break;
3462 case _PC_SYNC_IO:
3463 *ap->a_retval = 0;
3464 break;
3465 case _PC_ALLOC_SIZE_MIN:
3466 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize;
3467 break;
3468 case _PC_FILESIZEBITS:
3469 if (NFS_ISV34(vp))
3470 *ap->a_retval = 64;
3471 else
3472 *ap->a_retval = 32;
3473 break;
3474 case _PC_REC_INCR_XFER_SIZE:
3475 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
3476 break;
3477 case _PC_REC_MAX_XFER_SIZE:
3478 *ap->a_retval = -1; /* means ``unlimited'' */
3479 break;
3480 case _PC_REC_MIN_XFER_SIZE:
3481 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
3482 break;
3483 case _PC_REC_XFER_ALIGN:
3484 *ap->a_retval = PAGE_SIZE;
3485 break;
3486 case _PC_SYMLINK_MAX:
3487 *ap->a_retval = NFS_MAXPATHLEN;
3488 break;
3489
3490 default:
3491 error = EINVAL;
3492 break;
3493 }
3494 return (error);
3495}
3496