null_vnops.c revision 143630
1139776Simp/*- 21541Srgrimes * Copyright (c) 1992, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * John Heidemann of the UCLA Ficus project. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 3222521Sdyson * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 331541Srgrimes * 3422521Sdyson * Ancestors: 3522521Sdyson * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 3622521Sdyson * ...and... 3722521Sdyson * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 3822521Sdyson * 3950477Speter * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 143630 2005-03-15 11:28:45Z jeff $ 401541Srgrimes */ 411541Srgrimes 421541Srgrimes/* 431541Srgrimes * Null Layer 441541Srgrimes * 4577130Sru * (See mount_nullfs(8) for more information.) 461541Srgrimes * 4796755Strhodes * The null layer duplicates a portion of the filesystem 481541Srgrimes * name space under a new name. In this respect, it is 4996755Strhodes * similar to the loopback filesystem. It differs from 501541Srgrimes * the loopback fs in two respects: it is implemented using 5135256Sdes * a stackable layers techniques, and its "null-node"s stack above 521541Srgrimes * all lower-layer vnodes, not just over directory vnodes. 531541Srgrimes * 541541Srgrimes * The null layer has two purposes. First, it serves as a demonstration 551541Srgrimes * of layering by proving a layer which does nothing. (It actually 5696755Strhodes * does everything the loopback filesystem does, which is slightly 571541Srgrimes * more than nothing.) Second, the null layer can serve as a prototype 581541Srgrimes * layer. Since it provides all necessary layer framework, 5996755Strhodes * new filesystem layers can be created very easily be starting 601541Srgrimes * with a null layer. 611541Srgrimes * 621541Srgrimes * The remainder of this man page examines the null layer as a basis 631541Srgrimes * for constructing new layers. 641541Srgrimes * 651541Srgrimes * 661541Srgrimes * INSTANTIATING NEW NULL LAYERS 671541Srgrimes * 6877130Sru * New null layers are created with mount_nullfs(8). 6977130Sru * Mount_nullfs(8) takes two arguments, the pathname 701541Srgrimes * of the lower vfs (target-pn) and the pathname where the null 711541Srgrimes * layer will appear in the namespace (alias-pn). After 721541Srgrimes * the null layer is put into place, the contents 731541Srgrimes * of target-pn subtree will be aliased under alias-pn. 741541Srgrimes * 751541Srgrimes * 761541Srgrimes * OPERATION OF A NULL LAYER 771541Srgrimes * 7896755Strhodes * The null layer is the minimum filesystem layer, 791541Srgrimes * simply bypassing all possible operations to the lower layer 801541Srgrimes * for processing there. The majority of its activity centers 8126963Salex * on the bypass routine, through which nearly all vnode operations 821541Srgrimes * pass. 831541Srgrimes * 841541Srgrimes * The bypass routine accepts arbitrary vnode operations for 851541Srgrimes * handling by the lower layer. It begins by examing vnode 861541Srgrimes * operation arguments and replacing any null-nodes by their 871541Srgrimes * lower-layer equivlants. It then invokes the operation 881541Srgrimes * on the lower layer. Finally, it replaces the null-nodes 891541Srgrimes * in the arguments and, if a vnode is return by the operation, 901541Srgrimes * stacks a null-node on top of the returned vnode. 911541Srgrimes * 9222521Sdyson * Although bypass handles most operations, vop_getattr, vop_lock, 9322521Sdyson * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 9422521Sdyson * bypassed. Vop_getattr must change the fsid being returned. 9522521Sdyson * Vop_lock and vop_unlock must handle any locking for the 9622521Sdyson * current vnode as well as pass the lock request down. 971541Srgrimes * Vop_inactive and vop_reclaim are not bypassed so that 9822521Sdyson * they can handle freeing null-layer specific data. Vop_print 9922521Sdyson * is not bypassed to avoid excessive debugging information. 10022521Sdyson * Also, certain vnode operations change the locking state within 10122521Sdyson * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 10222521Sdyson * and symlink). Ideally these operations should not change the 10322521Sdyson * lock state, but should be changed to let the caller of the 10422521Sdyson * function unlock them. Otherwise all intermediate vnode layers 10522521Sdyson * (such as union, umapfs, etc) must catch these functions to do 10622521Sdyson * the necessary locking at their layer. 1071541Srgrimes * 1081541Srgrimes * 1091541Srgrimes * INSTANTIATING VNODE STACKS 1101541Srgrimes * 1111541Srgrimes * Mounting associates the null layer with a lower layer, 1121541Srgrimes * effect stacking two VFSes. Vnode stacks are instead 1131541Srgrimes * created on demand as files are accessed. 1141541Srgrimes * 1151541Srgrimes * The initial mount creates a single vnode stack for the 1161541Srgrimes * root of the new null layer. All other vnode stacks 1171541Srgrimes * are created as a result of vnode operations on 1181541Srgrimes * this or other null vnode stacks. 1191541Srgrimes * 1201541Srgrimes * New vnode stacks come into existance as a result of 1218876Srgrimes * an operation which returns a vnode. 1221541Srgrimes * The bypass routine stacks a null-node above the new 1231541Srgrimes * vnode before returning it to the caller. 1241541Srgrimes * 1251541Srgrimes * For example, imagine mounting a null layer with 12677130Sru * "mount_nullfs /usr/include /dev/layer/null". 1271541Srgrimes * Changing directory to /dev/layer/null will assign 1281541Srgrimes * the root null-node (which was created when the null layer was mounted). 1291541Srgrimes * Now consider opening "sys". A vop_lookup would be 1301541Srgrimes * done on the root null-node. This operation would bypass through 1318876Srgrimes * to the lower layer which would return a vnode representing 1321541Srgrimes * the UFS "sys". Null_bypass then builds a null-node 1331541Srgrimes * aliasing the UFS "sys" and returns this to the caller. 1341541Srgrimes * Later operations on the null-node "sys" will repeat this 1351541Srgrimes * process when constructing other vnode stacks. 1361541Srgrimes * 1371541Srgrimes * 1381541Srgrimes * CREATING OTHER FILE SYSTEM LAYERS 1391541Srgrimes * 14096755Strhodes * One of the easiest ways to construct new filesystem layers is to make 1411541Srgrimes * a copy of the null layer, rename all files and variables, and 1421541Srgrimes * then begin modifing the copy. Sed can be used to easily rename 1431541Srgrimes * all variables. 1441541Srgrimes * 1458876Srgrimes * The umap layer is an example of a layer descended from the 1461541Srgrimes * null layer. 1471541Srgrimes * 1481541Srgrimes * 1491541Srgrimes * INVOKING OPERATIONS ON LOWER LAYERS 1501541Srgrimes * 1518876Srgrimes * There are two techniques to invoke operations on a lower layer 1521541Srgrimes * when the operation cannot be completely bypassed. Each method 1531541Srgrimes * is appropriate in different situations. In both cases, 1541541Srgrimes * it is the responsibility of the aliasing layer to make 1551541Srgrimes * the operation arguments "correct" for the lower layer 156108470Sschweikh * by mapping a vnode arguments to the lower layer. 1571541Srgrimes * 1581541Srgrimes * The first approach is to call the aliasing layer's bypass routine. 1591541Srgrimes * This method is most suitable when you wish to invoke the operation 16026964Salex * currently being handled on the lower layer. It has the advantage 1611541Srgrimes * that the bypass routine already must do argument mapping. 1621541Srgrimes * An example of this is null_getattrs in the null layer. 1631541Srgrimes * 16426964Salex * A second approach is to directly invoke vnode operations on 1651541Srgrimes * the lower layer with the VOP_OPERATIONNAME interface. 1661541Srgrimes * The advantage of this method is that it is easy to invoke 1671541Srgrimes * arbitrary operations on the lower layer. The disadvantage 16826964Salex * is that vnode arguments must be manualy mapped. 1691541Srgrimes * 1701541Srgrimes */ 1711541Srgrimes 1721541Srgrimes#include <sys/param.h> 1731541Srgrimes#include <sys/systm.h> 17476166Smarkm#include <sys/conf.h> 1752960Swollman#include <sys/kernel.h> 17676166Smarkm#include <sys/lock.h> 17776166Smarkm#include <sys/malloc.h> 17876166Smarkm#include <sys/mount.h> 17976166Smarkm#include <sys/mutex.h> 18076166Smarkm#include <sys/namei.h> 18112769Sphk#include <sys/sysctl.h> 1821541Srgrimes#include <sys/vnode.h> 18376166Smarkm 18477031Sru#include <fs/nullfs/null.h> 1851541Srgrimes 18666356Sbp#include <vm/vm.h> 18766356Sbp#include <vm/vm_extern.h> 18866356Sbp#include <vm/vm_object.h> 18966356Sbp#include <vm/vnode_pager.h> 19066356Sbp 19112769Sphkstatic int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 19212769SphkSYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 19312769Sphk &null_bug_bypass, 0, ""); 1941541Srgrimes 1951541Srgrimes/* 1961541Srgrimes * This is the 10-Apr-92 bypass routine. 1971541Srgrimes * This version has been optimized for speed, throwing away some 1981541Srgrimes * safety checks. It should still always work, but it's not as 1991541Srgrimes * robust to programmer errors. 2001541Srgrimes * 2011541Srgrimes * In general, we map all vnodes going down and unmap them on the way back. 2021541Srgrimes * As an exception to this, vnodes can be marked "unmapped" by setting 2031541Srgrimes * the Nth bit in operation's vdesc_flags. 2041541Srgrimes * 2051541Srgrimes * Also, some BSD vnode operations have the side effect of vrele'ing 2061541Srgrimes * their arguments. With stacking, the reference counts are held 2071541Srgrimes * by the upper node, not the lower one, so we must handle these 2081541Srgrimes * side-effects here. This is not of concern in Sun-derived systems 2091541Srgrimes * since there are no such side-effects. 2101541Srgrimes * 2111541Srgrimes * This makes the following assumptions: 2121541Srgrimes * - only one returned vpp 2131541Srgrimes * - no INOUT vpp's (Sun's vop_open has one of these) 2141541Srgrimes * - the vnode operation vector of the first vnode should be used 2151541Srgrimes * to determine what implementation of the op should be invoked 2161541Srgrimes * - all mapped vnodes are of our vnode-type (NEEDSWORK: 2171541Srgrimes * problems on rmdir'ing mount points and renaming?) 2188876Srgrimes */ 21922521Sdysonint 220140728Sphknull_bypass(struct vop_generic_args *ap) 2211541Srgrimes{ 222140732Sphk struct vnode **this_vp_p; 2231541Srgrimes int error; 2241541Srgrimes struct vnode *old_vps[VDESC_MAX_VPS]; 2251541Srgrimes struct vnode **vps_p[VDESC_MAX_VPS]; 2261541Srgrimes struct vnode ***vppp; 2271541Srgrimes struct vnodeop_desc *descp = ap->a_desc; 2281541Srgrimes int reles, i; 2291541Srgrimes 2301541Srgrimes if (null_bug_bypass) 2311541Srgrimes printf ("null_bypass: %s\n", descp->vdesc_name); 2321541Srgrimes 23350616Sbde#ifdef DIAGNOSTIC 2341541Srgrimes /* 2351541Srgrimes * We require at least one vp. 2361541Srgrimes */ 2371541Srgrimes if (descp->vdesc_vp_offsets == NULL || 2381541Srgrimes descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 23950616Sbde panic ("null_bypass: no vp's in map"); 2401541Srgrimes#endif 2411541Srgrimes 2421541Srgrimes /* 2431541Srgrimes * Map the vnodes going in. 2441541Srgrimes * Later, we'll invoke the operation based on 2451541Srgrimes * the first mapped vnode's operation vector. 2461541Srgrimes */ 2471541Srgrimes reles = descp->vdesc_flags; 2481541Srgrimes for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 2491541Srgrimes if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 2501541Srgrimes break; /* bail out at end of list */ 2518876Srgrimes vps_p[i] = this_vp_p = 2521541Srgrimes VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 2531541Srgrimes /* 2541541Srgrimes * We're not guaranteed that any but the first vnode 2551541Srgrimes * are of our type. Check for and don't map any 2561541Srgrimes * that aren't. (We must always map first vp or vclean fails.) 2571541Srgrimes */ 25824987Skato if (i && (*this_vp_p == NULLVP || 259138290Sphk (*this_vp_p)->v_op != &null_vnodeops)) { 26024987Skato old_vps[i] = NULLVP; 2611541Srgrimes } else { 2621541Srgrimes old_vps[i] = *this_vp_p; 2631541Srgrimes *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 2641541Srgrimes /* 2651541Srgrimes * XXX - Several operations have the side effect 2661541Srgrimes * of vrele'ing their vp's. We must account for 2671541Srgrimes * that. (This should go away in the future.) 2681541Srgrimes */ 26966356Sbp if (reles & VDESC_VP0_WILLRELE) 2701541Srgrimes VREF(*this_vp_p); 2711541Srgrimes } 2728876Srgrimes 2731541Srgrimes } 2741541Srgrimes 2751541Srgrimes /* 2761541Srgrimes * Call the operation on the lower layer 2771541Srgrimes * with the modified argument structure. 2781541Srgrimes */ 27966356Sbp if (vps_p[0] && *vps_p[0]) 280140165Sphk error = VCALL(ap); 28166356Sbp else { 28266356Sbp printf("null_bypass: no map for %s\n", descp->vdesc_name); 28366356Sbp error = EINVAL; 28466356Sbp } 2851541Srgrimes 2861541Srgrimes /* 2871541Srgrimes * Maintain the illusion of call-by-value 2881541Srgrimes * by restoring vnodes in the argument structure 2891541Srgrimes * to their original value. 2901541Srgrimes */ 2911541Srgrimes reles = descp->vdesc_flags; 2921541Srgrimes for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 2931541Srgrimes if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 2941541Srgrimes break; /* bail out at end of list */ 2951541Srgrimes if (old_vps[i]) { 2961541Srgrimes *(vps_p[i]) = old_vps[i]; 29766356Sbp#if 0 29866356Sbp if (reles & VDESC_VP0_WILLUNLOCK) 29983366Sjulian VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curthread); 30066356Sbp#endif 30166356Sbp if (reles & VDESC_VP0_WILLRELE) 3021541Srgrimes vrele(*(vps_p[i])); 3031541Srgrimes } 3041541Srgrimes } 3051541Srgrimes 3061541Srgrimes /* 3071541Srgrimes * Map the possible out-going vpp 3081541Srgrimes * (Assumes that the lower layer always returns 3091541Srgrimes * a VREF'ed vpp unless it gets an error.) 3101541Srgrimes */ 3111541Srgrimes if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 3121541Srgrimes !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 3131541Srgrimes !error) { 3141541Srgrimes /* 3151541Srgrimes * XXX - even though some ops have vpp returned vp's, 3161541Srgrimes * several ops actually vrele this before returning. 3171541Srgrimes * We must avoid these ops. 3181541Srgrimes * (This should go away when these ops are regularized.) 3191541Srgrimes */ 3201541Srgrimes if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 3211541Srgrimes goto out; 3221541Srgrimes vppp = VOPARG_OFFSETTO(struct vnode***, 3231541Srgrimes descp->vdesc_vpp_offset,ap); 32429584Sphk if (*vppp) 32598183Ssemenu error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); 3261541Srgrimes } 3271541Srgrimes 3281541Srgrimes out: 3291541Srgrimes return (error); 3301541Srgrimes} 3311541Srgrimes 33222521Sdyson/* 33322521Sdyson * We have to carry on the locking protocol on the null layer vnodes 33422521Sdyson * as we progress through the tree. We also have to enforce read-only 33522521Sdyson * if this layer is mounted read-only. 33622521Sdyson */ 33722521Sdysonstatic int 338140728Sphknull_lookup(struct vop_lookup_args *ap) 33922521Sdyson{ 34022521Sdyson struct componentname *cnp = ap->a_cnp; 34166356Sbp struct vnode *dvp = ap->a_dvp; 34283366Sjulian struct thread *td = cnp->cn_thread; 34322521Sdyson int flags = cnp->cn_flags; 34466356Sbp struct vnode *vp, *ldvp, *lvp; 34522521Sdyson int error; 3461541Srgrimes 34766356Sbp if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 34822521Sdyson (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 34922521Sdyson return (EROFS); 35066356Sbp /* 35166356Sbp * Although it is possible to call null_bypass(), we'll do 35266356Sbp * a direct call to reduce overhead 35366356Sbp */ 35466356Sbp ldvp = NULLVPTOLOWERVP(dvp); 35566356Sbp vp = lvp = NULL; 35666356Sbp error = VOP_LOOKUP(ldvp, &lvp, cnp); 35722521Sdyson if (error == EJUSTRETURN && (flags & ISLASTCN) && 35866356Sbp (dvp->v_mount->mnt_flag & MNT_RDONLY) && 35922521Sdyson (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 36022521Sdyson error = EROFS; 36166356Sbp 36222521Sdyson /* 36366356Sbp * Rely only on the PDIRUNLOCK flag which should be carefully 36466356Sbp * tracked by underlying filesystem. 36522521Sdyson */ 366124404Struckman if ((cnp->cn_flags & PDIRUNLOCK) && dvp->v_vnlock != ldvp->v_vnlock) 36783366Sjulian VOP_UNLOCK(dvp, LK_THISLAYER, td); 36866356Sbp if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 36966356Sbp if (ldvp == lvp) { 37066356Sbp *ap->a_vpp = dvp; 37166356Sbp VREF(dvp); 37266356Sbp vrele(lvp); 37366356Sbp } else { 37498183Ssemenu error = null_nodeget(dvp->v_mount, lvp, &vp); 37598183Ssemenu if (error) { 37698183Ssemenu /* XXX Cleanup needed... */ 37798183Ssemenu panic("null_nodeget failed"); 37898183Ssemenu } 37998183Ssemenu *ap->a_vpp = vp; 38066356Sbp } 38122521Sdyson } 38222521Sdyson return (error); 38322521Sdyson} 38422521Sdyson 385140776Sphkstatic int 386140776Sphknull_open(struct vop_open_args *ap) 387140776Sphk{ 388140776Sphk int retval; 389140776Sphk struct vnode *vp, *ldvp; 390140776Sphk 391140776Sphk vp = ap->a_vp; 392140776Sphk ldvp = NULLVPTOLOWERVP(vp); 393140776Sphk retval = null_bypass(&ap->a_gen); 394140776Sphk if (retval == 0) 395140776Sphk vp->v_object = ldvp->v_object; 396140776Sphk return (retval); 397140776Sphk} 398140776Sphk 3991541Srgrimes/* 40022521Sdyson * Setattr call. Disallow write attempts if the layer is mounted read-only. 40122521Sdyson */ 402105211Sphkstatic int 403140728Sphknull_setattr(struct vop_setattr_args *ap) 40422521Sdyson{ 40522521Sdyson struct vnode *vp = ap->a_vp; 40622521Sdyson struct vattr *vap = ap->a_vap; 40722521Sdyson 40822521Sdyson if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 40922597Smpp vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 41022597Smpp vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 41122521Sdyson (vp->v_mount->mnt_flag & MNT_RDONLY)) 41222521Sdyson return (EROFS); 41322521Sdyson if (vap->va_size != VNOVAL) { 41422521Sdyson switch (vp->v_type) { 41522521Sdyson case VDIR: 41622521Sdyson return (EISDIR); 41722521Sdyson case VCHR: 41822521Sdyson case VBLK: 41922521Sdyson case VSOCK: 42022521Sdyson case VFIFO: 42136840Speter if (vap->va_flags != VNOVAL) 42236840Speter return (EOPNOTSUPP); 42322521Sdyson return (0); 42422521Sdyson case VREG: 42522521Sdyson case VLNK: 42622521Sdyson default: 42722521Sdyson /* 42822521Sdyson * Disallow write attempts if the filesystem is 42922521Sdyson * mounted read-only. 43022521Sdyson */ 43122521Sdyson if (vp->v_mount->mnt_flag & MNT_RDONLY) 43222521Sdyson return (EROFS); 43322521Sdyson } 43422521Sdyson } 43566356Sbp 43622607Smpp return (null_bypass((struct vop_generic_args *)ap)); 43722521Sdyson} 43822521Sdyson 43922521Sdyson/* 4401541Srgrimes * We handle getattr only to change the fsid. 4411541Srgrimes */ 44212769Sphkstatic int 443140728Sphknull_getattr(struct vop_getattr_args *ap) 4441541Srgrimes{ 4451541Srgrimes int error; 44622521Sdyson 44743311Sdillon if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) 4481541Srgrimes return (error); 44965467Sbp 45065467Sbp ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 4511541Srgrimes return (0); 4521541Srgrimes} 4531541Srgrimes 45466356Sbp/* 45566356Sbp * Handle to disallow write access if mounted read-only. 45666356Sbp */ 45722521Sdysonstatic int 458140728Sphknull_access(struct vop_access_args *ap) 45922521Sdyson{ 46022521Sdyson struct vnode *vp = ap->a_vp; 46122521Sdyson mode_t mode = ap->a_mode; 4621541Srgrimes 46322521Sdyson /* 46422521Sdyson * Disallow write attempts on read-only layers; 46522521Sdyson * unless the file is a socket, fifo, or a block or 46696755Strhodes * character device resident on the filesystem. 46722521Sdyson */ 46822521Sdyson if (mode & VWRITE) { 46922521Sdyson switch (vp->v_type) { 47022521Sdyson case VDIR: 47122521Sdyson case VLNK: 47222521Sdyson case VREG: 47322521Sdyson if (vp->v_mount->mnt_flag & MNT_RDONLY) 47422521Sdyson return (EROFS); 47522521Sdyson break; 47643305Sdillon default: 47743305Sdillon break; 47822521Sdyson } 47922521Sdyson } 48022607Smpp return (null_bypass((struct vop_generic_args *)ap)); 48122521Sdyson} 48222521Sdyson 48322521Sdyson/* 48465467Sbp * We handle this to eliminate null FS to lower FS 48565467Sbp * file moving. Don't know why we don't allow this, 48665467Sbp * possibly we should. 48765467Sbp */ 48865467Sbpstatic int 489140728Sphknull_rename(struct vop_rename_args *ap) 49065467Sbp{ 49165467Sbp struct vnode *tdvp = ap->a_tdvp; 49265467Sbp struct vnode *fvp = ap->a_fvp; 49365467Sbp struct vnode *fdvp = ap->a_fdvp; 49465467Sbp struct vnode *tvp = ap->a_tvp; 49565467Sbp 49665467Sbp /* Check for cross-device rename. */ 49765467Sbp if ((fvp->v_mount != tdvp->v_mount) || 49865467Sbp (tvp && (fvp->v_mount != tvp->v_mount))) { 49965467Sbp if (tdvp == tvp) 50065467Sbp vrele(tdvp); 50165467Sbp else 50265467Sbp vput(tdvp); 50365467Sbp if (tvp) 50465467Sbp vput(tvp); 50565467Sbp vrele(fdvp); 50665467Sbp vrele(fvp); 50765467Sbp return (EXDEV); 50865467Sbp } 50965467Sbp 51065467Sbp return (null_bypass((struct vop_generic_args *)ap)); 51165467Sbp} 51265467Sbp 51365467Sbp/* 51422521Sdyson * We need to process our own vnode lock and then clear the 51522521Sdyson * interlock flag as it applies only to our vnode, not the 51622521Sdyson * vnodes below us on the stack. 51722521Sdyson */ 51822597Smppstatic int 519140728Sphknull_lock(struct vop_lock_args *ap) 52022521Sdyson{ 52166356Sbp struct vnode *vp = ap->a_vp; 52266356Sbp int flags = ap->a_flags; 52383366Sjulian struct thread *td = ap->a_td; 52466356Sbp struct vnode *lvp; 52566356Sbp int error; 526116469Stjr struct null_node *nn; 52722521Sdyson 52866356Sbp if (flags & LK_THISLAYER) { 52997072Ssemenu if (vp->v_vnlock != NULL) { 53097072Ssemenu /* lock is shared across layers */ 53197072Ssemenu if (flags & LK_INTERLOCK) 53297072Ssemenu mtx_unlock(&vp->v_interlock); 53397072Ssemenu return 0; 53497072Ssemenu } 53566356Sbp error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER, 53683366Sjulian &vp->v_interlock, td); 53766356Sbp return (error); 53866356Sbp } 53966356Sbp 540143513Sjeff if ((flags & LK_INTERLOCK) == 0) { 541143513Sjeff VI_LOCK(vp); 542143513Sjeff flags |= LK_INTERLOCK; 543143513Sjeff } 54466356Sbp if (vp->v_vnlock != NULL) { 54566356Sbp /* 54666356Sbp * The lower level has exported a struct lock to us. Use 54766356Sbp * it so that all vnodes in the stack lock and unlock 54866356Sbp * simultaneously. Note: we don't DRAIN the lock as DRAIN 54966356Sbp * decommissions the lock - just because our vnode is 55066356Sbp * going away doesn't mean the struct lock below us is. 55166356Sbp * LK_EXCLUSIVE is fine. 55266356Sbp */ 553116469Stjr nn = VTONULL(vp); 55466356Sbp if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 55566356Sbp NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n"); 556116469Stjr /* 557116469Stjr * Emulate lock draining by waiting for all other 558116469Stjr * pending locks to complete. Afterwards the 559116469Stjr * lockmgr call might block, but no other threads 560116469Stjr * will attempt to use this nullfs vnode due to the 561143513Sjeff * VI_DOOMED flag. 562116469Stjr */ 563116469Stjr while (nn->null_pending_locks > 0) { 564116469Stjr nn->null_drain_wakeup = 1; 565116469Stjr msleep(&nn->null_pending_locks, 566116469Stjr VI_MTX(vp), 567116469Stjr PVFS, 568116469Stjr "nuldr", 0); 569116469Stjr } 570116469Stjr error = lockmgr(vp->v_vnlock, 571116469Stjr (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, 572116469Stjr VI_MTX(vp), td); 573116469Stjr return error; 57466356Sbp } 575116469Stjr nn->null_pending_locks++; 576116469Stjr error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td); 577116469Stjr VI_LOCK(vp); 578116469Stjr /* 579116469Stjr * If we're called from vrele then v_usecount can have been 0 580116469Stjr * and another process might have initiated a recycle 581116469Stjr * operation. When that happens, just back out. 582116469Stjr */ 583143513Sjeff if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0 && 584123932Sbde td != vp->v_vxthread) { 585116469Stjr lockmgr(vp->v_vnlock, 586116469Stjr (flags & ~LK_TYPE_MASK) | LK_RELEASE, 587116469Stjr VI_MTX(vp), td); 588116469Stjr VI_LOCK(vp); 589116469Stjr error = ENOENT; 590116469Stjr } 591116469Stjr nn->null_pending_locks--; 592116469Stjr /* 593116469Stjr * Wakeup the process draining the vnode after all 594116469Stjr * pending lock attempts has been failed. 595116469Stjr */ 596116469Stjr if (nn->null_pending_locks == 0 && 597116469Stjr nn->null_drain_wakeup != 0) { 598116469Stjr nn->null_drain_wakeup = 0; 599116469Stjr wakeup(&nn->null_pending_locks); 600116469Stjr } 601116469Stjr VI_UNLOCK(vp); 602116469Stjr return error; 60366356Sbp } else { 60466356Sbp /* 60566356Sbp * To prevent race conditions involving doing a lookup 60666356Sbp * on "..", we have to lock the lower node, then lock our 60766356Sbp * node. Most of the time it won't matter that we lock our 60866356Sbp * node (as any locking would need the lower one locked 60966356Sbp * first). But we can LK_DRAIN the upper lock as a step 61066356Sbp * towards decomissioning it. 61166356Sbp */ 61266356Sbp lvp = NULLVPTOLOWERVP(vp); 61366570Sbp if (lvp == NULL) 61483366Sjulian return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td)); 61566356Sbp if (flags & LK_INTERLOCK) { 61672200Sbmilekic mtx_unlock(&vp->v_interlock); 61766356Sbp flags &= ~LK_INTERLOCK; 61866356Sbp } 61966356Sbp if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 62066356Sbp error = VOP_LOCK(lvp, 62183366Sjulian (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, td); 62266356Sbp } else 62383366Sjulian error = VOP_LOCK(lvp, flags, td); 62466356Sbp if (error) 62566356Sbp return (error); 62683366Sjulian error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td); 62766356Sbp if (error) 62883366Sjulian VOP_UNLOCK(lvp, 0, td); 62966356Sbp return (error); 63066356Sbp } 63122521Sdyson} 63222521Sdyson 63322521Sdyson/* 63422521Sdyson * We need to process our own vnode unlock and then clear the 63522521Sdyson * interlock flag as it applies only to our vnode, not the 63622521Sdyson * vnodes below us on the stack. 63722521Sdyson */ 63822597Smppstatic int 639140728Sphknull_unlock(struct vop_unlock_args *ap) 64022521Sdyson{ 64166356Sbp struct vnode *vp = ap->a_vp; 64266356Sbp int flags = ap->a_flags; 64383366Sjulian struct thread *td = ap->a_td; 64466570Sbp struct vnode *lvp; 64566356Sbp 64666356Sbp if (vp->v_vnlock != NULL) { 64766356Sbp if (flags & LK_THISLAYER) 64866356Sbp return 0; /* the lock is shared across layers */ 64966356Sbp flags &= ~LK_THISLAYER; 65066356Sbp return (lockmgr(vp->v_vnlock, flags | LK_RELEASE, 65183366Sjulian &vp->v_interlock, td)); 65266356Sbp } 65366570Sbp lvp = NULLVPTOLOWERVP(vp); 65466570Sbp if (lvp == NULL) 65583366Sjulian return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td)); 65666356Sbp if ((flags & LK_THISLAYER) == 0) { 65767145Sbp if (flags & LK_INTERLOCK) { 65872200Sbmilekic mtx_unlock(&vp->v_interlock); 65967145Sbp flags &= ~LK_INTERLOCK; 66067145Sbp } 66183366Sjulian VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, td); 66266356Sbp } else 66366356Sbp flags &= ~LK_THISLAYER; 66483366Sjulian return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td)); 66522521Sdyson} 66622521Sdyson 66722597Smppstatic int 668140728Sphknull_islocked(struct vop_islocked_args *ap) 66966356Sbp{ 67066356Sbp struct vnode *vp = ap->a_vp; 67183366Sjulian struct thread *td = ap->a_td; 67266356Sbp 67366356Sbp if (vp->v_vnlock != NULL) 67483366Sjulian return (lockstatus(vp->v_vnlock, td)); 67583366Sjulian return (lockstatus(&vp->v_lock, td)); 67666356Sbp} 67766356Sbp 67866356Sbp/* 67966356Sbp * There is no way to tell that someone issued remove/rmdir operation 68066356Sbp * on the underlying filesystem. For now we just have to release lowevrp 68166356Sbp * as soon as possible. 68298183Ssemenu * 68398183Ssemenu * Note, we can't release any resources nor remove vnode from hash before 68498183Ssemenu * appropriate VXLOCK stuff is is done because other process can find this 68598183Ssemenu * vnode in hash during inactivation and may be sitting in vget() and waiting 68698183Ssemenu * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM. 68766356Sbp */ 68866356Sbpstatic int 689140728Sphknull_inactive(struct vop_inactive_args *ap) 6901541Srgrimes{ 69130636Sroberto struct vnode *vp = ap->a_vp; 69298175Ssemenu struct thread *td = ap->a_td; 69392540Smckusick 694141447Sphk vp->v_object = NULL; 69598175Ssemenu 69692540Smckusick /* 69792540Smckusick * If this is the last reference, then free up the vnode 69892540Smckusick * so as not to tie up the lower vnodes. 69992540Smckusick */ 700140936Sphk vrecycle(vp, td); 70198175Ssemenu 70292540Smckusick return (0); 70392540Smckusick} 70492540Smckusick 70592540Smckusick/* 70698183Ssemenu * Now, the VXLOCK is in force and we're free to destroy the null vnode. 70792540Smckusick */ 70892540Smckusickstatic int 709140728Sphknull_reclaim(struct vop_reclaim_args *ap) 71092540Smckusick{ 71192540Smckusick struct vnode *vp = ap->a_vp; 71230636Sroberto struct null_node *xp = VTONULL(vp); 71330636Sroberto struct vnode *lowervp = xp->null_lowervp; 714143630Sjeff struct lock *vnlock; 71566356Sbp 71698176Ssemenu if (lowervp) { 71798176Ssemenu null_hashrem(xp); 71866356Sbp 71998176Ssemenu vrele(lowervp); 72098176Ssemenu vrele(lowervp); 72198176Ssemenu } 72266356Sbp 7231541Srgrimes vp->v_data = NULL; 724140939Sphk vp->v_object = NULL; 725143630Sjeff vnlock = vp->v_vnlock; 726143630Sjeff lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL, curthread); 727105077Smckusick vp->v_vnlock = &vp->v_lock; 728143630Sjeff transferlockers(vnlock, vp->v_vnlock); 729143630Sjeff lockmgr(vnlock, LK_RELEASE, NULL, curthread); 73098176Ssemenu FREE(xp, M_NULLFSNODE); 73166356Sbp 7321541Srgrimes return (0); 7331541Srgrimes} 7341541Srgrimes 73512769Sphkstatic int 736140728Sphknull_print(struct vop_print_args *ap) 7371541Srgrimes{ 738140732Sphk struct vnode *vp = ap->a_vp; 739111841Snjl printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp)); 7401541Srgrimes return (0); 7411541Srgrimes} 7421541Srgrimes 7431541Srgrimes/* 7441541Srgrimes * Global vfs data structures 7451541Srgrimes */ 746138290Sphkstruct vop_vector null_vnodeops = { 747138290Sphk .vop_bypass = null_bypass, 74866356Sbp 749138290Sphk .vop_access = null_access, 750138290Sphk .vop_bmap = VOP_EOPNOTSUPP, 751138290Sphk .vop_getattr = null_getattr, 752138290Sphk .vop_getwritemount = vop_stdgetwritemount, 753138290Sphk .vop_inactive = null_inactive, 754138290Sphk .vop_islocked = null_islocked, 755138290Sphk .vop_lock = null_lock, 756138290Sphk .vop_lookup = null_lookup, 757140776Sphk .vop_open = null_open, 758138290Sphk .vop_print = null_print, 759138290Sphk .vop_reclaim = null_reclaim, 760138290Sphk .vop_rename = null_rename, 761138290Sphk .vop_setattr = null_setattr, 762138290Sphk .vop_strategy = VOP_EOPNOTSUPP, 763138290Sphk .vop_unlock = null_unlock, 7641541Srgrimes}; 765