1139776Simp/*- 21541Srgrimes * Copyright (c) 1992, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * John Heidemann of the UCLA Ficus project. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 3222521Sdyson * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 331541Srgrimes * 3422521Sdyson * Ancestors: 3522521Sdyson * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 3622521Sdyson * ...and... 3722521Sdyson * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 3822521Sdyson * 3950477Speter * $FreeBSD: releng/10.3/sys/fs/nullfs/null_vnops.c 295970 2016-02-24 13:48:40Z kib $ 401541Srgrimes */ 411541Srgrimes 421541Srgrimes/* 431541Srgrimes * Null Layer 441541Srgrimes * 4577130Sru * (See mount_nullfs(8) for more information.) 461541Srgrimes * 4796755Strhodes * The null layer duplicates a portion of the filesystem 481541Srgrimes * name space under a new name. In this respect, it is 4996755Strhodes * similar to the loopback filesystem. It differs from 501541Srgrimes * the loopback fs in two respects: it is implemented using 5135256Sdes * a stackable layers techniques, and its "null-node"s stack above 521541Srgrimes * all lower-layer vnodes, not just over directory vnodes. 531541Srgrimes * 541541Srgrimes * The null layer has two purposes. First, it serves as a demonstration 551541Srgrimes * of layering by proving a layer which does nothing. (It actually 5696755Strhodes * does everything the loopback filesystem does, which is slightly 571541Srgrimes * more than nothing.) Second, the null layer can serve as a prototype 581541Srgrimes * layer. Since it provides all necessary layer framework, 5996755Strhodes * new filesystem layers can be created very easily be starting 601541Srgrimes * with a null layer. 611541Srgrimes * 621541Srgrimes * The remainder of this man page examines the null layer as a basis 631541Srgrimes * for constructing new layers. 641541Srgrimes * 651541Srgrimes * 661541Srgrimes * INSTANTIATING NEW NULL LAYERS 671541Srgrimes * 6877130Sru * New null layers are created with mount_nullfs(8). 6977130Sru * Mount_nullfs(8) takes two arguments, the pathname 701541Srgrimes * of the lower vfs (target-pn) and the pathname where the null 711541Srgrimes * layer will appear in the namespace (alias-pn). After 721541Srgrimes * the null layer is put into place, the contents 731541Srgrimes * of target-pn subtree will be aliased under alias-pn. 741541Srgrimes * 751541Srgrimes * 761541Srgrimes * OPERATION OF A NULL LAYER 771541Srgrimes * 7896755Strhodes * The null layer is the minimum filesystem layer, 791541Srgrimes * simply bypassing all possible operations to the lower layer 801541Srgrimes * for processing there. The majority of its activity centers 8126963Salex * on the bypass routine, through which nearly all vnode operations 821541Srgrimes * pass. 831541Srgrimes * 841541Srgrimes * The bypass routine accepts arbitrary vnode operations for 851541Srgrimes * handling by the lower layer. It begins by examing vnode 861541Srgrimes * operation arguments and replacing any null-nodes by their 871541Srgrimes * lower-layer equivlants. It then invokes the operation 881541Srgrimes * on the lower layer. Finally, it replaces the null-nodes 891541Srgrimes * in the arguments and, if a vnode is return by the operation, 901541Srgrimes * stacks a null-node on top of the returned vnode. 911541Srgrimes * 9222521Sdyson * Although bypass handles most operations, vop_getattr, vop_lock, 9322521Sdyson * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 9422521Sdyson * bypassed. Vop_getattr must change the fsid being returned. 9522521Sdyson * Vop_lock and vop_unlock must handle any locking for the 9622521Sdyson * current vnode as well as pass the lock request down. 971541Srgrimes * Vop_inactive and vop_reclaim are not bypassed so that 9822521Sdyson * they can handle freeing null-layer specific data. Vop_print 9922521Sdyson * is not bypassed to avoid excessive debugging information. 10022521Sdyson * Also, certain vnode operations change the locking state within 10122521Sdyson * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 10222521Sdyson * and symlink). Ideally these operations should not change the 10322521Sdyson * lock state, but should be changed to let the caller of the 10422521Sdyson * function unlock them. Otherwise all intermediate vnode layers 10522521Sdyson * (such as union, umapfs, etc) must catch these functions to do 10622521Sdyson * the necessary locking at their layer. 1071541Srgrimes * 1081541Srgrimes * 1091541Srgrimes * INSTANTIATING VNODE STACKS 1101541Srgrimes * 1111541Srgrimes * Mounting associates the null layer with a lower layer, 1121541Srgrimes * effect stacking two VFSes. Vnode stacks are instead 1131541Srgrimes * created on demand as files are accessed. 1141541Srgrimes * 1151541Srgrimes * The initial mount creates a single vnode stack for the 1161541Srgrimes * root of the new null layer. All other vnode stacks 1171541Srgrimes * are created as a result of vnode operations on 1181541Srgrimes * this or other null vnode stacks. 1191541Srgrimes * 1201541Srgrimes * New vnode stacks come into existance as a result of 1218876Srgrimes * an operation which returns a vnode. 1221541Srgrimes * The bypass routine stacks a null-node above the new 1231541Srgrimes * vnode before returning it to the caller. 1241541Srgrimes * 1251541Srgrimes * For example, imagine mounting a null layer with 12677130Sru * "mount_nullfs /usr/include /dev/layer/null". 1271541Srgrimes * Changing directory to /dev/layer/null will assign 1281541Srgrimes * the root null-node (which was created when the null layer was mounted). 1291541Srgrimes * Now consider opening "sys". A vop_lookup would be 1301541Srgrimes * done on the root null-node. This operation would bypass through 1318876Srgrimes * to the lower layer which would return a vnode representing 1321541Srgrimes * the UFS "sys". Null_bypass then builds a null-node 1331541Srgrimes * aliasing the UFS "sys" and returns this to the caller. 1341541Srgrimes * Later operations on the null-node "sys" will repeat this 1351541Srgrimes * process when constructing other vnode stacks. 1361541Srgrimes * 1371541Srgrimes * 1381541Srgrimes * CREATING OTHER FILE SYSTEM LAYERS 1391541Srgrimes * 14096755Strhodes * One of the easiest ways to construct new filesystem layers is to make 1411541Srgrimes * a copy of the null layer, rename all files and variables, and 1421541Srgrimes * then begin modifing the copy. Sed can be used to easily rename 1431541Srgrimes * all variables. 1441541Srgrimes * 1458876Srgrimes * The umap layer is an example of a layer descended from the 1461541Srgrimes * null layer. 1471541Srgrimes * 1481541Srgrimes * 1491541Srgrimes * INVOKING OPERATIONS ON LOWER LAYERS 1501541Srgrimes * 1518876Srgrimes * There are two techniques to invoke operations on a lower layer 1521541Srgrimes * when the operation cannot be completely bypassed. Each method 1531541Srgrimes * is appropriate in different situations. In both cases, 1541541Srgrimes * it is the responsibility of the aliasing layer to make 1551541Srgrimes * the operation arguments "correct" for the lower layer 156108470Sschweikh * by mapping a vnode arguments to the lower layer. 1571541Srgrimes * 1581541Srgrimes * The first approach is to call the aliasing layer's bypass routine. 1591541Srgrimes * This method is most suitable when you wish to invoke the operation 16026964Salex * currently being handled on the lower layer. It has the advantage 1611541Srgrimes * that the bypass routine already must do argument mapping. 1621541Srgrimes * An example of this is null_getattrs in the null layer. 1631541Srgrimes * 16426964Salex * A second approach is to directly invoke vnode operations on 1651541Srgrimes * the lower layer with the VOP_OPERATIONNAME interface. 1661541Srgrimes * The advantage of this method is that it is easy to invoke 1671541Srgrimes * arbitrary operations on the lower layer. The disadvantage 16826964Salex * is that vnode arguments must be manualy mapped. 1691541Srgrimes * 1701541Srgrimes */ 1711541Srgrimes 1721541Srgrimes#include <sys/param.h> 1731541Srgrimes#include <sys/systm.h> 17476166Smarkm#include <sys/conf.h> 1752960Swollman#include <sys/kernel.h> 17676166Smarkm#include <sys/lock.h> 17776166Smarkm#include <sys/malloc.h> 17876166Smarkm#include <sys/mount.h> 17976166Smarkm#include <sys/mutex.h> 18076166Smarkm#include <sys/namei.h> 18112769Sphk#include <sys/sysctl.h> 1821541Srgrimes#include <sys/vnode.h> 18376166Smarkm 18477031Sru#include <fs/nullfs/null.h> 1851541Srgrimes 18666356Sbp#include <vm/vm.h> 18766356Sbp#include <vm/vm_extern.h> 18866356Sbp#include <vm/vm_object.h> 18966356Sbp#include <vm/vnode_pager.h> 19066356Sbp 19112769Sphkstatic int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 19212769SphkSYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 19312769Sphk &null_bug_bypass, 0, ""); 1941541Srgrimes 1951541Srgrimes/* 1961541Srgrimes * This is the 10-Apr-92 bypass routine. 1971541Srgrimes * This version has been optimized for speed, throwing away some 1981541Srgrimes * safety checks. It should still always work, but it's not as 1991541Srgrimes * robust to programmer errors. 2001541Srgrimes * 2011541Srgrimes * In general, we map all vnodes going down and unmap them on the way back. 2021541Srgrimes * As an exception to this, vnodes can be marked "unmapped" by setting 2031541Srgrimes * the Nth bit in operation's vdesc_flags. 2041541Srgrimes * 2051541Srgrimes * Also, some BSD vnode operations have the side effect of vrele'ing 2061541Srgrimes * their arguments. With stacking, the reference counts are held 2071541Srgrimes * by the upper node, not the lower one, so we must handle these 2081541Srgrimes * side-effects here. This is not of concern in Sun-derived systems 2091541Srgrimes * since there are no such side-effects. 2101541Srgrimes * 2111541Srgrimes * This makes the following assumptions: 2121541Srgrimes * - only one returned vpp 2131541Srgrimes * - no INOUT vpp's (Sun's vop_open has one of these) 2141541Srgrimes * - the vnode operation vector of the first vnode should be used 2151541Srgrimes * to determine what implementation of the op should be invoked 2161541Srgrimes * - all mapped vnodes are of our vnode-type (NEEDSWORK: 2171541Srgrimes * problems on rmdir'ing mount points and renaming?) 2188876Srgrimes */ 21922521Sdysonint 220140728Sphknull_bypass(struct vop_generic_args *ap) 2211541Srgrimes{ 222140732Sphk struct vnode **this_vp_p; 2231541Srgrimes int error; 2241541Srgrimes struct vnode *old_vps[VDESC_MAX_VPS]; 2251541Srgrimes struct vnode **vps_p[VDESC_MAX_VPS]; 2261541Srgrimes struct vnode ***vppp; 2271541Srgrimes struct vnodeop_desc *descp = ap->a_desc; 2281541Srgrimes int reles, i; 2291541Srgrimes 2301541Srgrimes if (null_bug_bypass) 2311541Srgrimes printf ("null_bypass: %s\n", descp->vdesc_name); 2321541Srgrimes 23350616Sbde#ifdef DIAGNOSTIC 2341541Srgrimes /* 2351541Srgrimes * We require at least one vp. 2361541Srgrimes */ 2371541Srgrimes if (descp->vdesc_vp_offsets == NULL || 2381541Srgrimes descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 23950616Sbde panic ("null_bypass: no vp's in map"); 2401541Srgrimes#endif 2411541Srgrimes 2421541Srgrimes /* 2431541Srgrimes * Map the vnodes going in. 2441541Srgrimes * Later, we'll invoke the operation based on 2451541Srgrimes * the first mapped vnode's operation vector. 2461541Srgrimes */ 2471541Srgrimes reles = descp->vdesc_flags; 2481541Srgrimes for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 2491541Srgrimes if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 2501541Srgrimes break; /* bail out at end of list */ 2518876Srgrimes vps_p[i] = this_vp_p = 2521541Srgrimes VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 2531541Srgrimes /* 2541541Srgrimes * We're not guaranteed that any but the first vnode 2551541Srgrimes * are of our type. Check for and don't map any 2561541Srgrimes * that aren't. (We must always map first vp or vclean fails.) 2571541Srgrimes */ 25824987Skato if (i && (*this_vp_p == NULLVP || 259138290Sphk (*this_vp_p)->v_op != &null_vnodeops)) { 26024987Skato old_vps[i] = NULLVP; 2611541Srgrimes } else { 2621541Srgrimes old_vps[i] = *this_vp_p; 2631541Srgrimes *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 2641541Srgrimes /* 2651541Srgrimes * XXX - Several operations have the side effect 2661541Srgrimes * of vrele'ing their vp's. We must account for 2671541Srgrimes * that. (This should go away in the future.) 2681541Srgrimes */ 26966356Sbp if (reles & VDESC_VP0_WILLRELE) 2701541Srgrimes VREF(*this_vp_p); 2711541Srgrimes } 2728876Srgrimes 2731541Srgrimes } 2741541Srgrimes 2751541Srgrimes /* 2761541Srgrimes * Call the operation on the lower layer 2771541Srgrimes * with the modified argument structure. 2781541Srgrimes */ 27966356Sbp if (vps_p[0] && *vps_p[0]) 280140165Sphk error = VCALL(ap); 28166356Sbp else { 28266356Sbp printf("null_bypass: no map for %s\n", descp->vdesc_name); 28366356Sbp error = EINVAL; 28466356Sbp } 2851541Srgrimes 2861541Srgrimes /* 2871541Srgrimes * Maintain the illusion of call-by-value 2881541Srgrimes * by restoring vnodes in the argument structure 2891541Srgrimes * to their original value. 2901541Srgrimes */ 2911541Srgrimes reles = descp->vdesc_flags; 2921541Srgrimes for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 2931541Srgrimes if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 2941541Srgrimes break; /* bail out at end of list */ 2951541Srgrimes if (old_vps[i]) { 2961541Srgrimes *(vps_p[i]) = old_vps[i]; 29766356Sbp#if 0 29866356Sbp if (reles & VDESC_VP0_WILLUNLOCK) 299175294Sattilio VOP_UNLOCK(*(vps_p[i]), 0); 30066356Sbp#endif 30166356Sbp if (reles & VDESC_VP0_WILLRELE) 3021541Srgrimes vrele(*(vps_p[i])); 3031541Srgrimes } 3041541Srgrimes } 3051541Srgrimes 3061541Srgrimes /* 3071541Srgrimes * Map the possible out-going vpp 3081541Srgrimes * (Assumes that the lower layer always returns 3091541Srgrimes * a VREF'ed vpp unless it gets an error.) 3101541Srgrimes */ 3111541Srgrimes if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 3121541Srgrimes !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 3131541Srgrimes !error) { 3141541Srgrimes /* 3151541Srgrimes * XXX - even though some ops have vpp returned vp's, 3161541Srgrimes * several ops actually vrele this before returning. 3171541Srgrimes * We must avoid these ops. 3181541Srgrimes * (This should go away when these ops are regularized.) 3191541Srgrimes */ 3201541Srgrimes if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 3211541Srgrimes goto out; 3221541Srgrimes vppp = VOPARG_OFFSETTO(struct vnode***, 3231541Srgrimes descp->vdesc_vpp_offset,ap); 32429584Sphk if (*vppp) 32598183Ssemenu error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); 3261541Srgrimes } 3271541Srgrimes 3281541Srgrimes out: 3291541Srgrimes return (error); 3301541Srgrimes} 3311541Srgrimes 332242476Skibstatic int 333242476Skibnull_add_writecount(struct vop_add_writecount_args *ap) 334242476Skib{ 335242476Skib struct vnode *lvp, *vp; 336242476Skib int error; 337242476Skib 338242476Skib vp = ap->a_vp; 339242476Skib lvp = NULLVPTOLOWERVP(vp); 340242476Skib KASSERT(vp->v_writecount + ap->a_inc >= 0, ("wrong writecount inc")); 341242476Skib if (vp->v_writecount > 0 && vp->v_writecount + ap->a_inc == 0) 342242476Skib error = VOP_ADD_WRITECOUNT(lvp, -1); 343242476Skib else if (vp->v_writecount == 0 && vp->v_writecount + ap->a_inc > 0) 344242476Skib error = VOP_ADD_WRITECOUNT(lvp, 1); 345242476Skib else 346242476Skib error = 0; 347242476Skib if (error == 0) 348242476Skib vp->v_writecount += ap->a_inc; 349242476Skib return (error); 350242476Skib} 351242476Skib 35222521Sdyson/* 35322521Sdyson * We have to carry on the locking protocol on the null layer vnodes 35422521Sdyson * as we progress through the tree. We also have to enforce read-only 35522521Sdyson * if this layer is mounted read-only. 35622521Sdyson */ 35722521Sdysonstatic int 358140728Sphknull_lookup(struct vop_lookup_args *ap) 35922521Sdyson{ 36022521Sdyson struct componentname *cnp = ap->a_cnp; 36166356Sbp struct vnode *dvp = ap->a_dvp; 36222521Sdyson int flags = cnp->cn_flags; 36366356Sbp struct vnode *vp, *ldvp, *lvp; 364270319Skib struct mount *mp; 36522521Sdyson int error; 3661541Srgrimes 367270319Skib mp = dvp->v_mount; 368270319Skib if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 && 36922521Sdyson (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 37022521Sdyson return (EROFS); 37166356Sbp /* 37266356Sbp * Although it is possible to call null_bypass(), we'll do 37366356Sbp * a direct call to reduce overhead 37466356Sbp */ 37566356Sbp ldvp = NULLVPTOLOWERVP(dvp); 37666356Sbp vp = lvp = NULL; 377269493Skib KASSERT((ldvp->v_vflag & VV_ROOT) == 0 || 378269493Skib ((dvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) == 0), 379269493Skib ("ldvp %p fl %#x dvp %p fl %#x flags %#x", ldvp, ldvp->v_vflag, 380269493Skib dvp, dvp->v_vflag, flags)); 381270319Skib 382270319Skib /* 383270319Skib * Hold ldvp. The reference on it, owned by dvp, is lost in 384270319Skib * case of dvp reclamation, and we need ldvp to move our lock 385270319Skib * from ldvp to dvp. 386270319Skib */ 387270319Skib vhold(ldvp); 388270319Skib 38966356Sbp error = VOP_LOOKUP(ldvp, &lvp, cnp); 390270319Skib 391270319Skib /* 392270319Skib * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows 393270319Skib * dvp to be reclaimed due to shared v_vnlock. Check for the 394270319Skib * doomed state and return error. 395270319Skib */ 396270319Skib if ((error == 0 || error == EJUSTRETURN) && 397270319Skib (dvp->v_iflag & VI_DOOMED) != 0) { 398270319Skib error = ENOENT; 399270319Skib if (lvp != NULL) 400270319Skib vput(lvp); 401270319Skib 402270319Skib /* 403270319Skib * If vgone() did reclaimed dvp before curthread 404270319Skib * relocked ldvp, the locks of dvp and ldpv are no 405270319Skib * longer shared. In this case, relock of ldvp in 406270319Skib * lower fs VOP_LOOKUP() does not restore the locking 407270319Skib * state of dvp. Compensate for this by unlocking 408270319Skib * ldvp and locking dvp, which is also correct if the 409270319Skib * locks are still shared. 410270319Skib */ 411270319Skib VOP_UNLOCK(ldvp, 0); 412270319Skib vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 413270319Skib } 414270319Skib vdrop(ldvp); 415270319Skib 416270319Skib if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 && 417270319Skib (mp->mnt_flag & MNT_RDONLY) != 0 && 41822521Sdyson (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 41922521Sdyson error = EROFS; 42066356Sbp 42166356Sbp if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 42266356Sbp if (ldvp == lvp) { 42366356Sbp *ap->a_vpp = dvp; 42466356Sbp VREF(dvp); 42566356Sbp vrele(lvp); 42666356Sbp } else { 427270319Skib error = null_nodeget(mp, lvp, &vp); 428229431Skib if (error == 0) 429185335Skib *ap->a_vpp = vp; 43066356Sbp } 43122521Sdyson } 43222521Sdyson return (error); 43322521Sdyson} 43422521Sdyson 435140776Sphkstatic int 436140776Sphknull_open(struct vop_open_args *ap) 437140776Sphk{ 438140776Sphk int retval; 439140776Sphk struct vnode *vp, *ldvp; 440140776Sphk 441140776Sphk vp = ap->a_vp; 442140776Sphk ldvp = NULLVPTOLOWERVP(vp); 443140776Sphk retval = null_bypass(&ap->a_gen); 444140776Sphk if (retval == 0) 445140776Sphk vp->v_object = ldvp->v_object; 446140776Sphk return (retval); 447140776Sphk} 448140776Sphk 4491541Srgrimes/* 45022521Sdyson * Setattr call. Disallow write attempts if the layer is mounted read-only. 45122521Sdyson */ 452105211Sphkstatic int 453140728Sphknull_setattr(struct vop_setattr_args *ap) 45422521Sdyson{ 45522521Sdyson struct vnode *vp = ap->a_vp; 45622521Sdyson struct vattr *vap = ap->a_vap; 45722521Sdyson 45822521Sdyson if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 45922597Smpp vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 46022597Smpp vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 46122521Sdyson (vp->v_mount->mnt_flag & MNT_RDONLY)) 46222521Sdyson return (EROFS); 46322521Sdyson if (vap->va_size != VNOVAL) { 46422521Sdyson switch (vp->v_type) { 46522521Sdyson case VDIR: 46622521Sdyson return (EISDIR); 46722521Sdyson case VCHR: 46822521Sdyson case VBLK: 46922521Sdyson case VSOCK: 47022521Sdyson case VFIFO: 47136840Speter if (vap->va_flags != VNOVAL) 47236840Speter return (EOPNOTSUPP); 47322521Sdyson return (0); 47422521Sdyson case VREG: 47522521Sdyson case VLNK: 47622521Sdyson default: 47722521Sdyson /* 47822521Sdyson * Disallow write attempts if the filesystem is 47922521Sdyson * mounted read-only. 48022521Sdyson */ 48122521Sdyson if (vp->v_mount->mnt_flag & MNT_RDONLY) 48222521Sdyson return (EROFS); 48322521Sdyson } 48422521Sdyson } 48566356Sbp 48622607Smpp return (null_bypass((struct vop_generic_args *)ap)); 48722521Sdyson} 48822521Sdyson 48922521Sdyson/* 4901541Srgrimes * We handle getattr only to change the fsid. 4911541Srgrimes */ 49212769Sphkstatic int 493140728Sphknull_getattr(struct vop_getattr_args *ap) 4941541Srgrimes{ 4951541Srgrimes int error; 49622521Sdyson 49743311Sdillon if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) 4981541Srgrimes return (error); 49965467Sbp 50065467Sbp ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 5011541Srgrimes return (0); 5021541Srgrimes} 5031541Srgrimes 50466356Sbp/* 50566356Sbp * Handle to disallow write access if mounted read-only. 50666356Sbp */ 50722521Sdysonstatic int 508140728Sphknull_access(struct vop_access_args *ap) 50922521Sdyson{ 51022521Sdyson struct vnode *vp = ap->a_vp; 511184413Strasz accmode_t accmode = ap->a_accmode; 5121541Srgrimes 51322521Sdyson /* 51422521Sdyson * Disallow write attempts on read-only layers; 51522521Sdyson * unless the file is a socket, fifo, or a block or 51696755Strhodes * character device resident on the filesystem. 51722521Sdyson */ 518184413Strasz if (accmode & VWRITE) { 51922521Sdyson switch (vp->v_type) { 52022521Sdyson case VDIR: 52122521Sdyson case VLNK: 52222521Sdyson case VREG: 52322521Sdyson if (vp->v_mount->mnt_flag & MNT_RDONLY) 52422521Sdyson return (EROFS); 52522521Sdyson break; 52643305Sdillon default: 52743305Sdillon break; 52822521Sdyson } 52922521Sdyson } 53022607Smpp return (null_bypass((struct vop_generic_args *)ap)); 53122521Sdyson} 53222521Sdyson 533193092Straszstatic int 534193092Strasznull_accessx(struct vop_accessx_args *ap) 535193092Strasz{ 536193092Strasz struct vnode *vp = ap->a_vp; 537193092Strasz accmode_t accmode = ap->a_accmode; 538193092Strasz 539193092Strasz /* 540193092Strasz * Disallow write attempts on read-only layers; 541193092Strasz * unless the file is a socket, fifo, or a block or 542193092Strasz * character device resident on the filesystem. 543193092Strasz */ 544193092Strasz if (accmode & VWRITE) { 545193092Strasz switch (vp->v_type) { 546193092Strasz case VDIR: 547193092Strasz case VLNK: 548193092Strasz case VREG: 549193092Strasz if (vp->v_mount->mnt_flag & MNT_RDONLY) 550193092Strasz return (EROFS); 551193092Strasz break; 552193092Strasz default: 553193092Strasz break; 554193092Strasz } 555193092Strasz } 556193092Strasz return (null_bypass((struct vop_generic_args *)ap)); 557193092Strasz} 558193092Strasz 55922521Sdyson/* 560212043Srmacklem * Increasing refcount of lower vnode is needed at least for the case 561212043Srmacklem * when lower FS is NFS to do sillyrename if the file is in use. 562212043Srmacklem * Unfortunately v_usecount is incremented in many places in 563212043Srmacklem * the kernel and, as such, there may be races that result in 564212043Srmacklem * the NFS client doing an extraneous silly rename, but that seems 565212043Srmacklem * preferable to not doing a silly rename when it is needed. 566212043Srmacklem */ 567212043Srmacklemstatic int 568212043Srmacklemnull_remove(struct vop_remove_args *ap) 569212043Srmacklem{ 570212043Srmacklem int retval, vreleit; 571293235Skib struct vnode *lvp, *vp; 572212043Srmacklem 573293235Skib vp = ap->a_vp; 574293235Skib if (vrefcnt(vp) > 1) { 575293235Skib lvp = NULLVPTOLOWERVP(vp); 576212043Srmacklem VREF(lvp); 577212043Srmacklem vreleit = 1; 578212043Srmacklem } else 579212043Srmacklem vreleit = 0; 580293235Skib VTONULL(vp)->null_flags |= NULLV_DROP; 581212043Srmacklem retval = null_bypass(&ap->a_gen); 582212043Srmacklem if (vreleit != 0) 583212043Srmacklem vrele(lvp); 584212043Srmacklem return (retval); 585212043Srmacklem} 586212043Srmacklem 587212043Srmacklem/* 58865467Sbp * We handle this to eliminate null FS to lower FS 58965467Sbp * file moving. Don't know why we don't allow this, 59065467Sbp * possibly we should. 59165467Sbp */ 59265467Sbpstatic int 593140728Sphknull_rename(struct vop_rename_args *ap) 59465467Sbp{ 59565467Sbp struct vnode *tdvp = ap->a_tdvp; 59665467Sbp struct vnode *fvp = ap->a_fvp; 59765467Sbp struct vnode *fdvp = ap->a_fdvp; 59865467Sbp struct vnode *tvp = ap->a_tvp; 599252714Skib struct null_node *tnn; 60065467Sbp 60165467Sbp /* Check for cross-device rename. */ 60265467Sbp if ((fvp->v_mount != tdvp->v_mount) || 60365467Sbp (tvp && (fvp->v_mount != tvp->v_mount))) { 60465467Sbp if (tdvp == tvp) 60565467Sbp vrele(tdvp); 60665467Sbp else 60765467Sbp vput(tdvp); 60865467Sbp if (tvp) 60965467Sbp vput(tvp); 61065467Sbp vrele(fdvp); 61165467Sbp vrele(fvp); 61265467Sbp return (EXDEV); 61365467Sbp } 614252714Skib 615252714Skib if (tvp != NULL) { 616252714Skib tnn = VTONULL(tvp); 617252714Skib tnn->null_flags |= NULLV_DROP; 618252714Skib } 61965467Sbp return (null_bypass((struct vop_generic_args *)ap)); 62065467Sbp} 62165467Sbp 622295970Skibstatic int 623295970Skibnull_rmdir(struct vop_rmdir_args *ap) 624295970Skib{ 625295970Skib 626295970Skib VTONULL(ap->a_vp)->null_flags |= NULLV_DROP; 627295970Skib return (null_bypass(&ap->a_gen)); 628295970Skib} 629295970Skib 63065467Sbp/* 63122521Sdyson * We need to process our own vnode lock and then clear the 63222521Sdyson * interlock flag as it applies only to our vnode, not the 63322521Sdyson * vnodes below us on the stack. 63422521Sdyson */ 63522597Smppstatic int 636169671Skibnull_lock(struct vop_lock1_args *ap) 63722521Sdyson{ 63866356Sbp struct vnode *vp = ap->a_vp; 63966356Sbp int flags = ap->a_flags; 640143642Sjeff struct null_node *nn; 64166356Sbp struct vnode *lvp; 64266356Sbp int error; 64322521Sdyson 64466356Sbp 645143513Sjeff if ((flags & LK_INTERLOCK) == 0) { 646143513Sjeff VI_LOCK(vp); 647143642Sjeff ap->a_flags = flags |= LK_INTERLOCK; 648143513Sjeff } 649143642Sjeff nn = VTONULL(vp); 650143642Sjeff /* 651143642Sjeff * If we're still active we must ask the lower layer to 652143642Sjeff * lock as ffs has special lock considerations in it's 653143642Sjeff * vop lock. 654143642Sjeff */ 655143642Sjeff if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 656145424Sjeff VI_LOCK_FLAGS(lvp, MTX_DUPOK); 657116469Stjr VI_UNLOCK(vp); 65866356Sbp /* 659143642Sjeff * We have to hold the vnode here to solve a potential 660143642Sjeff * reclaim race. If we're forcibly vgone'd while we 661143642Sjeff * still have refs, a thread could be sleeping inside 662143642Sjeff * the lowervp's vop_lock routine. When we vgone we will 663143642Sjeff * drop our last ref to the lowervp, which would allow it 664143642Sjeff * to be reclaimed. The lowervp could then be recycled, 665143642Sjeff * in which case it is not legal to be sleeping in it's VOP. 666143642Sjeff * We prevent it from being recycled by holding the vnode 667143642Sjeff * here. 66866356Sbp */ 669143642Sjeff vholdl(lvp); 670175294Sattilio error = VOP_LOCK(lvp, flags); 671150181Skan 672150181Skan /* 673150181Skan * We might have slept to get the lock and someone might have 674150181Skan * clean our vnode already, switching vnode lock from one in 675150181Skan * lowervp to v_lock in our own vnode structure. Handle this 676150181Skan * case by reacquiring correct lock in requested mode. 677150181Skan */ 678150181Skan if (VTONULL(vp) == NULL && error == 0) { 679150181Skan ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK); 680150181Skan switch (flags & LK_TYPE_MASK) { 681150181Skan case LK_SHARED: 682150181Skan ap->a_flags |= LK_SHARED; 683150181Skan break; 684150181Skan case LK_UPGRADE: 685150181Skan case LK_EXCLUSIVE: 686150181Skan ap->a_flags |= LK_EXCLUSIVE; 687150181Skan break; 688150181Skan default: 689150181Skan panic("Unsupported lock request %d\n", 690150181Skan ap->a_flags); 691150181Skan } 692175294Sattilio VOP_UNLOCK(lvp, 0); 693150181Skan error = vop_stdlock(ap); 694150181Skan } 695143642Sjeff vdrop(lvp); 696143642Sjeff } else 697143642Sjeff error = vop_stdlock(ap); 698143642Sjeff 699143642Sjeff return (error); 70022521Sdyson} 70122521Sdyson 70222521Sdyson/* 70322521Sdyson * We need to process our own vnode unlock and then clear the 70422521Sdyson * interlock flag as it applies only to our vnode, not the 70522521Sdyson * vnodes below us on the stack. 70622521Sdyson */ 70722597Smppstatic int 708140728Sphknull_unlock(struct vop_unlock_args *ap) 70922521Sdyson{ 71066356Sbp struct vnode *vp = ap->a_vp; 71166356Sbp int flags = ap->a_flags; 712172644Sdaichi int mtxlkflag = 0; 713143642Sjeff struct null_node *nn; 71466570Sbp struct vnode *lvp; 715143642Sjeff int error; 71666356Sbp 717172644Sdaichi if ((flags & LK_INTERLOCK) != 0) 718172644Sdaichi mtxlkflag = 1; 719172644Sdaichi else if (mtx_owned(VI_MTX(vp)) == 0) { 720172644Sdaichi VI_LOCK(vp); 721172644Sdaichi mtxlkflag = 2; 72266356Sbp } 723143642Sjeff nn = VTONULL(vp); 724172644Sdaichi if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { 725172644Sdaichi VI_LOCK_FLAGS(lvp, MTX_DUPOK); 726172644Sdaichi flags |= LK_INTERLOCK; 727172644Sdaichi vholdl(lvp); 728172644Sdaichi VI_UNLOCK(vp); 729175294Sattilio error = VOP_UNLOCK(lvp, flags); 730172644Sdaichi vdrop(lvp); 731172644Sdaichi if (mtxlkflag == 0) 732172644Sdaichi VI_LOCK(vp); 733172644Sdaichi } else { 734172644Sdaichi if (mtxlkflag == 2) 735172644Sdaichi VI_UNLOCK(vp); 736143642Sjeff error = vop_stdunlock(ap); 737172644Sdaichi } 738143642Sjeff 739143642Sjeff return (error); 74022521Sdyson} 74122521Sdyson 74266356Sbp/* 743241554Skib * Do not allow the VOP_INACTIVE to be passed to the lower layer, 744241554Skib * since the reference count on the lower vnode is not related to 745241554Skib * ours. 74666356Sbp */ 74766356Sbpstatic int 748240285Skibnull_inactive(struct vop_inactive_args *ap __unused) 7491541Srgrimes{ 750250505Skib struct vnode *vp, *lvp; 751250505Skib struct null_node *xp; 752245004Skib struct mount *mp; 753245004Skib struct null_mount *xmp; 75492540Smckusick 755245004Skib vp = ap->a_vp; 756250505Skib xp = VTONULL(vp); 757250505Skib lvp = NULLVPTOLOWERVP(vp); 758245004Skib mp = vp->v_mount; 759245004Skib xmp = MOUNTTONULLMOUNT(mp); 760250505Skib if ((xmp->nullm_flags & NULLM_CACHE) == 0 || 761250505Skib (xp->null_flags & NULLV_DROP) != 0 || 762250505Skib (lvp->v_vflag & VV_NOSYNC) != 0) { 763245004Skib /* 764245004Skib * If this is the last reference and caching of the 765250505Skib * nullfs vnodes is not enabled, or the lower vnode is 766250505Skib * deleted, then free up the vnode so as not to tie up 767250505Skib * the lower vnodes. 768245004Skib */ 769245004Skib vp->v_object = NULL; 770245004Skib vrecycle(vp); 771245004Skib } 77292540Smckusick return (0); 77392540Smckusick} 77492540Smckusick 77592540Smckusick/* 776240285Skib * Now, the nullfs vnode and, due to the sharing lock, the lower 777240285Skib * vnode, are exclusively locked, and we shall destroy the null vnode. 77892540Smckusick */ 77992540Smckusickstatic int 780140728Sphknull_reclaim(struct vop_reclaim_args *ap) 78192540Smckusick{ 782232303Skib struct vnode *vp; 783232303Skib struct null_node *xp; 784232303Skib struct vnode *lowervp; 78566356Sbp 786232303Skib vp = ap->a_vp; 787232303Skib xp = VTONULL(vp); 788232303Skib lowervp = xp->null_lowervp; 789232303Skib 790232303Skib KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock, 791269156Skib ("Reclaiming incomplete null vnode %p", vp)); 792232303Skib 793232303Skib null_hashrem(xp); 794143744Sjeff /* 795143744Sjeff * Use the interlock to protect the clearing of v_data to 796143744Sjeff * prevent faults in null_lock(). 797143744Sjeff */ 798193172Skib lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); 799143744Sjeff VI_LOCK(vp); 800143744Sjeff vp->v_data = NULL; 801155899Sjeff vp->v_object = NULL; 802150181Skan vp->v_vnlock = &vp->v_lock; 803193172Skib VI_UNLOCK(vp); 804245262Skib 805245262Skib /* 806245262Skib * If we were opened for write, we leased one write reference 807245262Skib * to the lower vnode. If this is a reclamation due to the 808245262Skib * forced unmount, undo the reference now. 809245262Skib */ 810245262Skib if (vp->v_writecount > 0) 811245262Skib VOP_ADD_WRITECOUNT(lowervp, -1); 812250505Skib if ((xp->null_flags & NULLV_NOUNLOCK) != 0) 813250505Skib vunref(lowervp); 814250505Skib else 815250505Skib vput(lowervp); 816184205Sdes free(xp, M_NULLFSNODE); 81766356Sbp 8181541Srgrimes return (0); 8191541Srgrimes} 8201541Srgrimes 82112769Sphkstatic int 822140728Sphknull_print(struct vop_print_args *ap) 8231541Srgrimes{ 824140732Sphk struct vnode *vp = ap->a_vp; 825155899Sjeff 826227696Skib printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp); 8271541Srgrimes return (0); 8281541Srgrimes} 8291541Srgrimes 830156585Sjeff/* ARGSUSED */ 831156585Sjeffstatic int 832156585Sjeffnull_getwritemount(struct vop_getwritemount_args *ap) 833156585Sjeff{ 834156585Sjeff struct null_node *xp; 835156585Sjeff struct vnode *lowervp; 836156585Sjeff struct vnode *vp; 837156585Sjeff 838156585Sjeff vp = ap->a_vp; 839156585Sjeff VI_LOCK(vp); 840156585Sjeff xp = VTONULL(vp); 841156585Sjeff if (xp && (lowervp = xp->null_lowervp)) { 842156585Sjeff VI_LOCK_FLAGS(lowervp, MTX_DUPOK); 843156585Sjeff VI_UNLOCK(vp); 844156585Sjeff vholdl(lowervp); 845156585Sjeff VI_UNLOCK(lowervp); 846156585Sjeff VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); 847156585Sjeff vdrop(lowervp); 848156585Sjeff } else { 849156585Sjeff VI_UNLOCK(vp); 850156585Sjeff *(ap->a_mpp) = NULL; 851156585Sjeff } 852156585Sjeff return (0); 853156585Sjeff} 854156585Sjeff 855166774Spjdstatic int 856166774Spjdnull_vptofh(struct vop_vptofh_args *ap) 857166774Spjd{ 858166774Spjd struct vnode *lvp; 859166774Spjd 860166774Spjd lvp = NULLVPTOLOWERVP(ap->a_vp); 861166774Spjd return VOP_VPTOFH(lvp, ap->a_fhp); 862166774Spjd} 863166774Spjd 864193175Skibstatic int 865193175Skibnull_vptocnp(struct vop_vptocnp_args *ap) 866193175Skib{ 867193175Skib struct vnode *vp = ap->a_vp; 868193175Skib struct vnode **dvp = ap->a_vpp; 869193175Skib struct vnode *lvp, *ldvp; 870194601Skib struct ucred *cred = ap->a_cred; 871193175Skib int error, locked; 872193175Skib 873193175Skib if (vp->v_type == VDIR) 874193175Skib return (vop_stdvptocnp(ap)); 875193175Skib 876193175Skib locked = VOP_ISLOCKED(vp); 877193175Skib lvp = NULLVPTOLOWERVP(vp); 878193175Skib vhold(lvp); 879193175Skib VOP_UNLOCK(vp, 0); /* vp is held by vn_vptocnp_locked that called us */ 880193175Skib ldvp = lvp; 881227697Skib vref(lvp); 882194601Skib error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen); 883193175Skib vdrop(lvp); 884193175Skib if (error != 0) { 885193175Skib vn_lock(vp, locked | LK_RETRY); 886193175Skib return (ENOENT); 887193175Skib } 888193175Skib 889193175Skib /* 890193175Skib * Exclusive lock is required by insmntque1 call in 891193175Skib * null_nodeget() 892193175Skib */ 893193175Skib error = vn_lock(ldvp, LK_EXCLUSIVE); 894193175Skib if (error != 0) { 895227697Skib vrele(ldvp); 896193175Skib vn_lock(vp, locked | LK_RETRY); 897193175Skib return (ENOENT); 898193175Skib } 899193175Skib vref(ldvp); 900193175Skib error = null_nodeget(vp->v_mount, ldvp, dvp); 901193175Skib if (error == 0) { 902193175Skib#ifdef DIAGNOSTIC 903193175Skib NULLVPTOLOWERVP(*dvp); 904193175Skib#endif 905227697Skib VOP_UNLOCK(*dvp, 0); /* keep reference on *dvp */ 906229431Skib } 907193175Skib vn_lock(vp, locked | LK_RETRY); 908193175Skib return (error); 909193175Skib} 910193175Skib 9111541Srgrimes/* 9121541Srgrimes * Global vfs data structures 9131541Srgrimes */ 914138290Sphkstruct vop_vector null_vnodeops = { 915138290Sphk .vop_bypass = null_bypass, 916138290Sphk .vop_access = null_access, 917193092Strasz .vop_accessx = null_accessx, 918208128Skib .vop_advlockpurge = vop_stdadvlockpurge, 919138290Sphk .vop_bmap = VOP_EOPNOTSUPP, 920138290Sphk .vop_getattr = null_getattr, 921156585Sjeff .vop_getwritemount = null_getwritemount, 922138290Sphk .vop_inactive = null_inactive, 923189961Spho .vop_islocked = vop_stdislocked, 924169671Skib .vop_lock1 = null_lock, 925138290Sphk .vop_lookup = null_lookup, 926140776Sphk .vop_open = null_open, 927138290Sphk .vop_print = null_print, 928138290Sphk .vop_reclaim = null_reclaim, 929212043Srmacklem .vop_remove = null_remove, 930138290Sphk .vop_rename = null_rename, 931295970Skib .vop_rmdir = null_rmdir, 932138290Sphk .vop_setattr = null_setattr, 933138290Sphk .vop_strategy = VOP_EOPNOTSUPP, 934138290Sphk .vop_unlock = null_unlock, 935193175Skib .vop_vptocnp = null_vptocnp, 936166774Spjd .vop_vptofh = null_vptofh, 937242476Skib .vop_add_writecount = null_add_writecount, 9381541Srgrimes}; 939