nfs_clnode.c revision 302210
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from nfs_node.c 8.6 (Berkeley) 5/22/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/fs/nfsclient/nfs_clnode.c 302210 2016-06-26 14:18:28Z kib $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/fcntl.h> 41#include <sys/lock.h> 42#include <sys/malloc.h> 43#include <sys/mount.h> 44#include <sys/namei.h> 45#include <sys/proc.h> 46#include <sys/socket.h> 47#include <sys/sysctl.h> 48#include <sys/taskqueue.h> 49#include <sys/vnode.h> 50 51#include <vm/uma.h> 52 53#include <fs/nfs/nfsport.h> 54#include <fs/nfsclient/nfsnode.h> 55#include <fs/nfsclient/nfsmount.h> 56#include <fs/nfsclient/nfs.h> 57#include <fs/nfsclient/nfs_kdtrace.h> 58 59#include <nfs/nfs_lock.h> 60 61extern struct vop_vector newnfs_vnodeops; 62extern struct buf_ops buf_ops_newnfs; 63MALLOC_DECLARE(M_NEWNFSREQ); 64 65uma_zone_t newnfsnode_zone; 66 67const char nfs_vnode_tag[] = "nfs"; 68 69static void nfs_freesillyrename(void *arg, __unused int pending); 70 71void 72ncl_nhinit(void) 73{ 74 75 newnfsnode_zone = uma_zcreate("NCLNODE", sizeof(struct nfsnode), NULL, 76 NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 77} 78 79void 80ncl_nhuninit(void) 81{ 82 uma_zdestroy(newnfsnode_zone); 83} 84 85/* 86 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this 87 * function is going to be used to get Regular Files, code must be added 88 * to fill in the "struct nfsv4node". 89 * Look up a vnode/nfsnode by file handle. 90 * Callers must check for mount points!! 91 * In all cases, a pointer to a 92 * nfsnode structure is returned. 93 */ 94int 95ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp, 96 int lkflags) 97{ 98 struct thread *td = curthread; /* XXX */ 99 struct nfsnode *np; 100 struct vnode *vp; 101 struct vnode *nvp; 102 int error; 103 u_int hash; 104 struct nfsmount *nmp; 105 struct nfsfh *nfhp; 106 107 nmp = VFSTONFS(mntp); 108 *npp = NULL; 109 110 hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); 111 112 MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, 113 M_NFSFH, M_WAITOK); 114 bcopy(fhp, &nfhp->nfh_fh[0], fhsize); 115 nfhp->nfh_len = fhsize; 116 error = vfs_hash_get(mntp, hash, lkflags, 117 td, &nvp, newnfs_vncmpf, nfhp); 118 FREE(nfhp, M_NFSFH); 119 if (error) 120 return (error); 121 if (nvp != NULL) { 122 *npp = VTONFS(nvp); 123 return (0); 124 } 125 np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); 126 127 error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp); 128 if (error) { 129 uma_zfree(newnfsnode_zone, np); 130 return (error); 131 } 132 vp = nvp; 133 KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); 134 vp->v_bufobj.bo_ops = &buf_ops_newnfs; 135 vp->v_data = np; 136 np->n_vnode = vp; 137 /* 138 * Initialize the mutex even if the vnode is going to be a loser. 139 * This simplifies the logic in reclaim, which can then unconditionally 140 * destroy the mutex (in the case of the loser, or if hash_insert 141 * happened to return an error no special casing is needed). 142 */ 143 mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); 144 /* 145 * NFS supports recursive and shared locking. 146 */ 147 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); 148 VN_LOCK_AREC(vp); 149 VN_LOCK_ASHARE(vp); 150 /* 151 * Are we getting the root? If so, make sure the vnode flags 152 * are correct 153 */ 154 if ((fhsize == nmp->nm_fhsize) && 155 !bcmp(fhp, nmp->nm_fh, fhsize)) { 156 if (vp->v_type == VNON) 157 vp->v_type = VDIR; 158 vp->v_vflag |= VV_ROOT; 159 } 160 161 MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, 162 M_NFSFH, M_WAITOK); 163 bcopy(fhp, np->n_fhp->nfh_fh, fhsize); 164 np->n_fhp->nfh_len = fhsize; 165 error = insmntque(vp, mntp); 166 if (error != 0) { 167 *npp = NULL; 168 FREE((caddr_t)np->n_fhp, M_NFSFH); 169 mtx_destroy(&np->n_mtx); 170 uma_zfree(newnfsnode_zone, np); 171 return (error); 172 } 173 error = vfs_hash_insert(vp, hash, lkflags, 174 td, &nvp, newnfs_vncmpf, np->n_fhp); 175 if (error) 176 return (error); 177 if (nvp != NULL) { 178 *npp = VTONFS(nvp); 179 /* vfs_hash_insert() vput()'s the losing vnode */ 180 return (0); 181 } 182 *npp = np; 183 184 return (0); 185} 186 187/* 188 * Do the vrele(sp->s_dvp) as a separate task in order to avoid a 189 * deadlock because of a LOR when vrele() locks the directory vnode. 190 */ 191static void 192nfs_freesillyrename(void *arg, __unused int pending) 193{ 194 struct sillyrename *sp; 195 196 sp = arg; 197 vrele(sp->s_dvp); 198 free(sp, M_NEWNFSREQ); 199} 200 201static void 202ncl_releasesillyrename(struct vnode *vp, struct thread *td) 203{ 204 struct nfsnode *np; 205 struct sillyrename *sp; 206 207 ASSERT_VOP_ELOCKED(vp, "releasesillyrename"); 208 np = VTONFS(vp); 209 mtx_assert(&np->n_mtx, MA_OWNED); 210 if (vp->v_type != VDIR) { 211 sp = np->n_sillyrename; 212 np->n_sillyrename = NULL; 213 } else 214 sp = NULL; 215 if (sp != NULL) { 216 mtx_unlock(&np->n_mtx); 217 (void) ncl_vinvalbuf(vp, 0, td, 1); 218 /* 219 * Remove the silly file that was rename'd earlier 220 */ 221 ncl_removeit(sp, vp); 222 crfree(sp->s_cred); 223 TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp); 224 taskqueue_enqueue(taskqueue_thread, &sp->s_task); 225 mtx_lock(&np->n_mtx); 226 } 227} 228 229int 230ncl_inactive(struct vop_inactive_args *ap) 231{ 232 struct vnode *vp = ap->a_vp; 233 struct nfsnode *np; 234 boolean_t retv; 235 236 if (NFS_ISV4(vp) && vp->v_type == VREG) { 237 /* 238 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 239 * Close operations are delayed until now. Any dirty 240 * buffers/pages must be flushed before the close, so that the 241 * stateid is available for the writes. 242 */ 243 if (vp->v_object != NULL) { 244 VM_OBJECT_WLOCK(vp->v_object); 245 retv = vm_object_page_clean(vp->v_object, 0, 0, 246 OBJPC_SYNC); 247 VM_OBJECT_WUNLOCK(vp->v_object); 248 } else 249 retv = TRUE; 250 if (retv == TRUE) { 251 (void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0); 252 (void)nfsrpc_close(vp, 1, ap->a_td); 253 } 254 } 255 256 np = VTONFS(vp); 257 mtx_lock(&np->n_mtx); 258 ncl_releasesillyrename(vp, ap->a_td); 259 260 /* 261 * NMODIFIED means that there might be dirty/stale buffers 262 * associated with the NFS vnode. None of the other flags are 263 * meaningful after the vnode is unused. 264 */ 265 np->n_flag &= NMODIFIED; 266 mtx_unlock(&np->n_mtx); 267 return (0); 268} 269 270/* 271 * Reclaim an nfsnode so that it can be used for other purposes. 272 */ 273int 274ncl_reclaim(struct vop_reclaim_args *ap) 275{ 276 struct vnode *vp = ap->a_vp; 277 struct nfsnode *np = VTONFS(vp); 278 struct nfsdmap *dp, *dp2; 279 280 /* 281 * If the NLM is running, give it a chance to abort pending 282 * locks. 283 */ 284 if (nfs_reclaim_p != NULL) 285 nfs_reclaim_p(ap); 286 287 mtx_lock(&np->n_mtx); 288 ncl_releasesillyrename(vp, ap->a_td); 289 mtx_unlock(&np->n_mtx); 290 291 /* 292 * Destroy the vm object and flush associated pages. 293 */ 294 vnode_destroy_vobject(vp); 295 296 if (NFS_ISV4(vp) && vp->v_type == VREG) 297 /* 298 * We can now safely close any remaining NFSv4 Opens for 299 * this file. Most opens will have already been closed by 300 * ncl_inactive(), but there are cases where it is not 301 * called, so we need to do it again here. 302 */ 303 (void) nfsrpc_close(vp, 1, ap->a_td); 304 305 vfs_hash_remove(vp); 306 307 /* 308 * Call nfscl_reclaimnode() to save attributes in the delegation, 309 * as required. 310 */ 311 if (vp->v_type == VREG) 312 nfscl_reclaimnode(vp); 313 314 /* 315 * Free up any directory cookie structures and 316 * large file handle structures that might be associated with 317 * this nfs node. 318 */ 319 if (vp->v_type == VDIR) { 320 dp = LIST_FIRST(&np->n_cookies); 321 while (dp) { 322 dp2 = dp; 323 dp = LIST_NEXT(dp, ndm_list); 324 FREE((caddr_t)dp2, M_NFSDIROFF); 325 } 326 } 327 if (np->n_writecred != NULL) 328 crfree(np->n_writecred); 329 FREE((caddr_t)np->n_fhp, M_NFSFH); 330 if (np->n_v4 != NULL) 331 FREE((caddr_t)np->n_v4, M_NFSV4NODE); 332 mtx_destroy(&np->n_mtx); 333 uma_zfree(newnfsnode_zone, vp->v_data); 334 vp->v_data = NULL; 335 return (0); 336} 337 338/* 339 * Invalidate both the access and attribute caches for this vnode. 340 */ 341void 342ncl_invalcaches(struct vnode *vp) 343{ 344 struct nfsnode *np = VTONFS(vp); 345 int i; 346 347 mtx_lock(&np->n_mtx); 348 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 349 np->n_accesscache[i].stamp = 0; 350 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); 351 np->n_attrstamp = 0; 352 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 353 mtx_unlock(&np->n_mtx); 354} 355