Lines Matching refs:vp

108 static void	delmntque(struct vnode *vp);
112 static int vtryrecycle(struct vnode *vp, bool isvnlru);
115 static void vn_seqc_write_end_free(struct vnode *vp);
118 static void vdropl_recycle(struct vnode *vp);
119 static void vdrop_recycle(struct vnode *vp);
124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
319 static void vdbatch_dequeue(struct vnode *vp);
428 struct vnode *vp;
450 vp = nd.ni_vp;
452 if (VN_IS_DOOMED(vp)) {
463 vgone(vp);
465 vput(vp);
476 struct vnode *vp;
490 vp = fp->f_vnode;
492 error = vn_lock(vp, LK_EXCLUSIVE);
496 vgone(vp);
497 VOP_UNLOCK(vp);
511 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
552 struct vnode *vp;
554 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
555 vp->v_type = VMARKER;
556 vp->v_mount = mp;
558 return (vp);
562 vn_free_marker(struct vnode *vp)
565 MPASS(vp->v_type == VMARKER);
566 free(vp, M_VNODE_MARKER);
623 struct vnode *vp;
625 vp = mem;
626 bzero(vp, size);
630 vp->v_vnlock = &vp->v_lock;
631 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
635 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
640 bufobj_init(&vp->v_bufobj, vp);
644 cache_vnode_init(vp);
648 rangelock_init(&vp->v_rl);
650 vp->v_dbatchcpu = NOCPU;
652 vp->v_state = VSTATE_DEAD;
657 vp->v_holdcnt = VHOLD_NO_SMR;
658 vp->v_type = VNON;
660 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist);
671 struct vnode *vp;
674 vp = mem;
675 vdbatch_dequeue(vp);
677 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
679 rangelock_destroy(&vp->v_rl);
680 lockdestroy(vp->v_vnlock);
681 mtx_destroy(&vp->v_interlock);
682 bo = &vp->v_bufobj;
1200 struct vnode *vp, *mvp;
1213 vp = mvp;
1215 vp = TAILQ_NEXT(vp, v_vnodelist);
1216 if (__predict_false(vp == NULL))
1219 if (__predict_false(vp->v_type == VMARKER))
1228 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 ||
1229 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)))
1232 if (vp->v_type == VBAD || vp->v_type == VNON)
1235 object = atomic_load_ptr(&vp->v_object);
1247 if (!VI_TRYLOCK(vp))
1249 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1250 VI_UNLOCK(vp);
1253 if (vp->v_mount == NULL) {
1254 VI_UNLOCK(vp);
1257 vholdl(vp);
1258 VI_UNLOCK(vp);
1260 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1263 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1264 vdrop_recycle(vp);
1267 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) {
1268 vdrop_recycle(vp);
1273 VI_LOCK(vp);
1274 if (vp->v_usecount > 0 ||
1275 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
1276 (vp->v_object != NULL && vp->v_object->handle == vp &&
1277 vp->v_object->resident_page_count > trigger)) {
1278 VOP_UNLOCK(vp);
1279 vdropl_recycle(vp);
1284 vgonel(vp);
1285 VOP_UNLOCK(vp);
1286 vdropl_recycle(vp);
1294 MPASS(vp->v_type != VMARKER);
1298 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1326 struct vnode *vp;
1340 vp = mvp;
1342 vp = TAILQ_NEXT(vp, v_vnodelist);
1343 if (__predict_false(vp == NULL)) {
1354 vp = mvp;
1367 if (__predict_false(vp->v_type == VMARKER))
1369 if (vp->v_holdcnt > 0)
1377 if (mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1381 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1384 if (!vhold_recycle_free(vp))
1387 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1410 vtryrecycle(vp, isvnlru);
1416 vp = mvp;
1863 vtryrecycle(struct vnode *vp, bool isvnlru)
1867 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1868 VNPASS(vp->v_holdcnt > 0, vp);
1873 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1875 "%s: impossible to recycle, vp %p lock is already held",
1876 __func__, vp);
1877 vdrop_recycle(vp);
1883 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1884 VOP_UNLOCK(vp);
1887 __func__, vp);
1888 vdrop_recycle(vp);
1897 VI_LOCK(vp);
1898 if (vp->v_usecount) {
1899 VOP_UNLOCK(vp);
1900 vdropl_recycle(vp);
1904 __func__, vp);
1907 if (!VN_IS_DOOMED(vp)) {
1912 vgonel(vp);
1914 VOP_UNLOCK(vp);
1915 vdropl_recycle(vp);
2025 vn_free(struct vnode *vp)
2029 uma_zfree_smr(vnode_zone, vp);
2039 struct vnode *vp;
2051 vp = td->td_vp_reserved;
2054 vp = vn_alloc(mp);
2058 vn_set_state(vp, VSTATE_UNINITIALIZED);
2074 lo = &vp->v_vnlock->lock_object;
2087 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
2091 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
2092 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
2093 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
2094 vp->v_type = VNON;
2095 vp->v_op = vops;
2096 vp->v_irflag = 0;
2097 v_init_counters(vp);
2098 vn_seqc_init(vp);
2099 vp->v_bufobj.bo_ops = &buf_ops_bio;
2105 mac_vnode_init(vp);
2107 mac_vnode_associate_singlelabel(mp, vp);
2110 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
2119 vp->v_hash = (uintptr_t)vp >> vnsz2log;
2121 *vpp = vp;
2148 freevnode(struct vnode *vp)
2161 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2165 vn_seqc_write_end_free(vp);
2167 bo = &vp->v_bufobj;
2168 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2169 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp);
2170 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2171 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2172 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2173 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2174 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2176 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2177 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2179 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp,
2181 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp,
2183 VI_UNLOCK(vp);
2184 cache_assert_no_entries(vp);
2187 mac_vnode_destroy(vp);
2189 if (vp->v_pollinfo != NULL) {
2195 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT);
2196 destroy_vpollinfo(vp->v_pollinfo);
2197 VOP_UNLOCK(vp);
2198 vp->v_pollinfo = NULL;
2200 vp->v_mountedhere = NULL;
2201 vp->v_unpcb = NULL;
2202 vp->v_rdev = NULL;
2203 vp->v_fifoinfo = NULL;
2204 vp->v_iflag = 0;
2205 vp->v_vflag = 0;
2207 vn_free(vp);
2214 delmntque(struct vnode *vp)
2218 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
2220 mp = vp->v_mount;
2222 VI_LOCK(vp);
2223 vp->v_mount = NULL;
2224 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
2226 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2233 ASSERT_VI_LOCKED(vp, __func__);
2237 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr)
2240 KASSERT(vp->v_mount == NULL,
2242 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
2244 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
2261 VI_LOCK(vp);
2265 (vp->v_vflag & VV_FORCEINSMQ) == 0) {
2266 VI_UNLOCK(vp);
2269 vp->v_data = NULL;
2270 vp->v_op = &dead_vnodeops;
2271 vgone(vp);
2272 vput(vp);
2276 vp->v_mount = mp;
2278 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2279 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
2282 VI_UNLOCK(vp);
2293 insmntque(struct vnode *vp, struct mount *mp)
2295 return (insmntque1_int(vp, mp, true));
2299 insmntque1(struct vnode *vp, struct mount *mp)
2301 return (insmntque1_int(vp, mp, false));
2396 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
2399 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2400 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
2401 if (vp->v_object != NULL && vp->v_object->handle != vp)
2403 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
2529 vtruncbuf(struct vnode *vp, off_t length, int blksize)
2535 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__,
2536 vp, blksize, (uintmax_t)length);
2543 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
2545 bo = &vp->v_bufobj;
2549 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN)
2569 VNASSERT((bp->b_flags & B_DELWRI), vp,
2581 vnode_pager_setsize(vp, length);
2591 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
2597 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range");
2602 bo = &vp->v_bufobj;
2606 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN)
2610 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1));
2614 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
2620 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
2644 nbp->b_vp != vp ||
2667 (nbp->b_vp != vp) ||
2818 bgetvp(struct vnode *vp, struct buf *bp)
2823 bo = &vp->v_bufobj;
2827 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2828 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2835 bp->b_vp = vp;
2844 vhold(vp);
2862 struct vnode *vp;
2864 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2870 vp = bp->b_vp; /* XXX */
2884 vdrop(vp);
2941 struct vnode *vp;
2947 vp = bo2vnode(*bo);
2948 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2956 vholdl(vp);
2958 VI_UNLOCK(vp);
2959 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2960 vdrop(vp);
2966 ("suspended mp syncing vp %p", vp));
2967 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2968 (void) VOP_FSYNC(vp, MNT_LAZY, td);
2969 VOP_UNLOCK(vp);
2982 vdrop(vp);
3193 struct vnode *vp;
3200 vp = bp->b_vp;
3206 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
3228 switch (vp->v_type) {
3272 v_init_counters(struct vnode *vp)
3275 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
3276 vp, ("%s called for an initialized vnode", __FUNCTION__));
3277 ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
3279 refcount_init(&vp->v_holdcnt, 1);
3280 refcount_init(&vp->v_usecount, 1);
3294 vget_prep_smr(struct vnode *vp)
3300 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3303 if (vhold_smr(vp))
3312 vget_prep(struct vnode *vp)
3316 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3319 vhold(vp);
3326 vget_abort(struct vnode *vp, enum vgetstate vs)
3331 vrele(vp);
3334 vdrop(vp);
3342 vget(struct vnode *vp, int flags)
3346 vs = vget_prep(vp);
3347 return (vget_finish(vp, flags, vs));
3351 vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
3356 ASSERT_VI_LOCKED(vp, __func__);
3358 ASSERT_VI_UNLOCKED(vp, __func__);
3359 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3360 VNPASS(vp->v_holdcnt > 0, vp);
3361 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3363 error = vn_lock(vp, flags);
3365 vget_abort(vp, vs);
3367 vp);
3371 vget_finish_ref(vp, vs);
3376 vget_finish_ref(struct vnode *vp, enum vgetstate vs)
3380 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3381 VNPASS(vp->v_holdcnt > 0, vp);
3382 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3392 old = atomic_fetchadd_int(&vp->v_usecount, 1);
3393 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old));
3396 old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
3397 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old));
3399 refcount_release(&vp->v_holdcnt);
3405 vref(struct vnode *vp)
3409 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3410 vs = vget_prep(vp);
3411 vget_finish_ref(vp, vs);
3415 vrefact(struct vnode *vp)
3419 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3420 old = refcount_acquire(&vp->v_usecount);
3421 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old));
3425 vlazy(struct vnode *vp)
3429 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__));
3431 if ((vp->v_mflag & VMP_LAZYLIST) != 0)
3436 if (VN_IS_DOOMED(vp))
3438 mp = vp->v_mount;
3440 if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
3441 vp->v_mflag |= VMP_LAZYLIST;
3442 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3449 vunlazy(struct vnode *vp)
3453 ASSERT_VI_LOCKED(vp, __func__);
3454 VNPASS(!VN_IS_DOOMED(vp), vp);
3456 mp = vp->v_mount;
3458 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3465 if (vp->v_holdcnt == 0) {
3466 vp->v_mflag &= ~VMP_LAZYLIST;
3467 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3478 vunlazy_gone(struct vnode *vp)
3482 ASSERT_VOP_ELOCKED(vp, __func__);
3483 ASSERT_VI_LOCKED(vp, __func__);
3484 VNPASS(!VN_IS_DOOMED(vp), vp);
3486 if (vp->v_mflag & VMP_LAZYLIST) {
3487 mp = vp->v_mount;
3489 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3490 vp->v_mflag &= ~VMP_LAZYLIST;
3491 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3498 vdefer_inactive(struct vnode *vp)
3501 ASSERT_VI_LOCKED(vp, __func__);
3502 VNPASS(vp->v_holdcnt > 0, vp);
3503 if (VN_IS_DOOMED(vp)) {
3504 vdropl(vp);
3507 if (vp->v_iflag & VI_DEFINACT) {
3508 VNPASS(vp->v_holdcnt > 1, vp);
3509 vdropl(vp);
3512 if (vp->v_usecount > 0) {
3513 vp->v_iflag &= ~VI_OWEINACT;
3514 vdropl(vp);
3517 vlazy(vp);
3518 vp->v_iflag |= VI_DEFINACT;
3519 VI_UNLOCK(vp);
3524 vdefer_inactive_unlocked(struct vnode *vp)
3527 VI_LOCK(vp);
3528 if ((vp->v_iflag & VI_OWEINACT) == 0) {
3529 vdropl(vp);
3532 vdefer_inactive(vp);
3554 vput_final(struct vnode *vp, enum vput_op func)
3559 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3560 VNPASS(vp->v_holdcnt > 0, vp);
3562 VI_LOCK(vp);
3568 if (vp->v_usecount > 0)
3575 if (VN_IS_DOOMED(vp))
3578 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0))
3581 if (vp->v_iflag & VI_DOINGINACT)
3590 vp->v_iflag |= VI_OWEINACT;
3595 switch (VOP_ISLOCKED(vp)) {
3601 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
3602 VI_LOCK(vp);
3616 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3617 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
3619 VI_LOCK(vp);
3623 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3624 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
3625 VI_LOCK(vp);
3631 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
3633 vp->v_vflag |= VV_UNREF;
3636 error = vinactive(vp);
3638 VOP_UNLOCK(vp);
3641 VOP_LOCK(vp, LK_EXCLUSIVE);
3644 vp->v_vflag &= ~VV_UNREF;
3645 vdropl(vp);
3647 vdefer_inactive(vp);
3652 VOP_UNLOCK(vp);
3653 vdropl(vp);
3670 vrele(struct vnode *vp)
3673 ASSERT_VI_UNLOCKED(vp, __func__);
3674 if (!refcount_release(&vp->v_usecount))
3676 vput_final(vp, VRELE);
3684 vput(struct vnode *vp)
3687 ASSERT_VOP_LOCKED(vp, __func__);
3688 ASSERT_VI_UNLOCKED(vp, __func__);
3689 if (!refcount_release(&vp->v_usecount)) {
3690 VOP_UNLOCK(vp);
3693 vput_final(vp, VPUT);
3701 vunref(struct vnode *vp)
3704 ASSERT_VOP_LOCKED(vp, __func__);
3705 ASSERT_VI_UNLOCKED(vp, __func__);
3706 if (!refcount_release(&vp->v_usecount))
3708 vput_final(vp, VUNREF);
3712 vhold(struct vnode *vp)
3716 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3717 old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3718 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3725 vholdnz(struct vnode *vp)
3728 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3730 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3731 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3734 atomic_add_int(&vp->v_holdcnt, 1);
3749 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3758 vhold_smr(struct vnode *vp)
3764 count = atomic_load_int(&vp->v_holdcnt);
3767 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3771 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3772 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3797 vhold_recycle_free(struct vnode *vp)
3803 count = atomic_load_int(&vp->v_holdcnt);
3806 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3810 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3814 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3824 struct vnode *vp;
3845 vp = vd->tab[i];
3847 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
3848 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist);
3849 MPASS(vp->v_dbatchcpu != NOCPU);
3850 vp->v_dbatchcpu = NOCPU;
3857 vp = vd->tab[i];
3859 MPASS(vp->v_dbatchcpu != NOCPU);
3860 vp->v_dbatchcpu = NOCPU;
3868 vdbatch_enqueue(struct vnode *vp)
3872 ASSERT_VI_LOCKED(vp, __func__);
3873 VNPASS(!VN_IS_DOOMED(vp), vp);
3875 if (vp->v_dbatchcpu != NOCPU) {
3876 VI_UNLOCK(vp);
3889 vp->v_dbatchcpu = curcpu;
3890 vd->tab[vd->index] = vp;
3892 VI_UNLOCK(vp);
3905 vdbatch_dequeue(struct vnode *vp)
3911 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp);
3913 cpu = vp->v_dbatchcpu;
3920 if (vd->tab[i] != vp)
3922 vp->v_dbatchcpu = NOCPU;
3932 MPASS(vp->v_dbatchcpu == NOCPU);
3945 vdropl_final(struct vnode *vp)
3948 ASSERT_VI_LOCKED(vp, __func__);
3949 VNPASS(VN_IS_DOOMED(vp), vp);
3956 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) {
3958 VI_UNLOCK(vp);
3968 freevnode(vp);
3972 vdrop(struct vnode *vp)
3975 ASSERT_VI_UNLOCKED(vp, __func__);
3976 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3977 if (refcount_release_if_not_last(&vp->v_holdcnt))
3979 VI_LOCK(vp);
3980 vdropl(vp);
3984 vdropl_impl(struct vnode *vp, bool enqueue)
3987 ASSERT_VI_LOCKED(vp, __func__);
3988 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3989 if (!refcount_release(&vp->v_holdcnt)) {
3990 VI_UNLOCK(vp);
3993 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp);
3994 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
3995 if (VN_IS_DOOMED(vp)) {
3996 vdropl_final(vp);
4001 if (vp->v_mflag & VMP_LAZYLIST) {
4002 vunlazy(vp);
4006 VI_UNLOCK(vp);
4015 vdbatch_enqueue(vp);
4019 vdropl(struct vnode *vp)
4022 vdropl_impl(vp, true);
4037 vdropl_recycle(struct vnode *vp)
4040 vdropl_impl(vp, false);
4044 vdrop_recycle(struct vnode *vp)
4047 VI_LOCK(vp);
4048 vdropl_recycle(vp);
4056 vinactivef(struct vnode *vp)
4060 ASSERT_VOP_ELOCKED(vp, "vinactive");
4061 ASSERT_VI_LOCKED(vp, "vinactive");
4062 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp);
4063 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4064 vp->v_iflag |= VI_DOINGINACT;
4065 vp->v_iflag &= ~VI_OWEINACT;
4066 VI_UNLOCK(vp);
4078 if ((vp->v_vflag & VV_NOSYNC) == 0)
4079 vnode_pager_clean_async(vp);
4081 error = VOP_INACTIVE(vp);
4082 VI_LOCK(vp);
4083 VNPASS(vp->v_iflag & VI_DOINGINACT, vp);
4084 vp->v_iflag &= ~VI_DOINGINACT;
4089 vinactive(struct vnode *vp)
4092 ASSERT_VOP_ELOCKED(vp, "vinactive");
4093 ASSERT_VI_LOCKED(vp, "vinactive");
4094 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4096 if ((vp->v_iflag & VI_OWEINACT) == 0)
4098 if (vp->v_iflag & VI_DOINGINACT)
4100 if (vp->v_usecount > 0) {
4101 vp->v_iflag &= ~VI_OWEINACT;
4104 return (vinactivef(vp));
4135 struct vnode *vp, *mvp, *rootvp = NULL;
4156 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
4157 vholdl(vp);
4158 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
4160 vdrop(vp);
4167 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
4168 VOP_UNLOCK(vp);
4169 vdrop(vp);
4178 vnode_pager_clean_async(vp);
4180 error = VOP_FSYNC(vp, MNT_WAIT, td);
4183 VOP_UNLOCK(vp);
4184 vdrop(vp);
4188 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
4189 VI_LOCK(vp);
4191 if ((vp->v_type == VNON ||
4193 (vp->v_writecount <= 0 || vp->v_type != VREG)) {
4194 VOP_UNLOCK(vp);
4195 vdropl(vp);
4199 VI_LOCK(vp);
4206 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
4207 vgonel(vp);
4212 vn_printf(vp, "vflush: busy vnode ");
4215 VOP_UNLOCK(vp);
4216 vdropl(vp);
4250 vrecycle(struct vnode *vp)
4254 VI_LOCK(vp);
4255 recycled = vrecyclel(vp);
4256 VI_UNLOCK(vp);
4261 * vrecycle, with the vp interlock held.
4264 vrecyclel(struct vnode *vp)
4268 ASSERT_VOP_ELOCKED(vp, __func__);
4269 ASSERT_VI_LOCKED(vp, __func__);
4270 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4272 if (vp->v_usecount == 0) {
4274 vgonel(vp);
4284 vgone(struct vnode *vp)
4286 VI_LOCK(vp);
4287 vgonel(vp);
4288 VI_UNLOCK(vp);
4295 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event)
4300 mp = atomic_load_ptr(&vp->v_mount);
4314 VFS_RECLAIM_LOWERVP(ump->mp, vp);
4317 VFS_UNLINK_LOWERVP(ump->mp, vp);
4332 * vgone, with the vp interlock held.
4335 vgonel(struct vnode *vp)
4342 ASSERT_VOP_ELOCKED(vp, "vgonel");
4343 ASSERT_VI_LOCKED(vp, "vgonel");
4344 VNASSERT(vp->v_holdcnt, vp,
4345 ("vgonel: vp %p has no reference.", vp));
4346 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4352 if (VN_IS_DOOMED(vp)) {
4353 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \
4354 vn_get_state(vp) == VSTATE_DEAD, vp);
4360 vn_seqc_write_begin_locked(vp);
4361 vunlazy_gone(vp);
4362 vn_irflag_set_locked(vp, VIRF_DOOMED);
4363 vn_set_state(vp, VSTATE_DESTROYING);
4373 active = vp->v_usecount > 0;
4374 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4375 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0;
4380 if (vp->v_iflag & VI_DEFINACT) {
4381 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count"));
4382 vp->v_iflag &= ~VI_DEFINACT;
4383 vdropl(vp);
4385 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count"));
4386 VI_UNLOCK(vp);
4388 cache_purge_vgone(vp);
4389 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
4396 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
4400 VI_LOCK(vp);
4401 vinactivef(vp);
4402 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4403 VI_UNLOCK(vp);
4407 if (vp->v_type == VSOCK)
4408 vfs_unp_reclaim(vp);
4415 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
4416 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
4417 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
4418 while (vinvalbuf(vp, 0, 0, 0) != 0)
4422 BO_LOCK(&vp->v_bufobj);
4423 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
4424 vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
4425 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
4426 vp->v_bufobj.bo_clean.bv_cnt == 0,
4427 ("vp %p bufobj not invalidated", vp));
4434 object = vp->v_bufobj.bo_object;
4436 vp->v_bufobj.bo_flag |= BO_DEAD;
4437 BO_UNLOCK(&vp->v_bufobj);
4446 object->handle == vp)
4447 vnode_destroy_vobject(vp);
4452 if (VOP_RECLAIM(vp))
4456 VNASSERT(vp->v_object == NULL, vp,
4457 ("vop_reclaim left v_object vp=%p", vp));
4461 if (vp->v_lockf != NULL) {
4462 (void)VOP_ADVLOCKPURGE(vp);
4463 vp->v_lockf = NULL;
4468 if (vp->v_mount == NULL) {
4469 VI_LOCK(vp);
4471 delmntque(vp);
4472 ASSERT_VI_LOCKED(vp, "vgonel 2");
4478 vp->v_vnlock = &vp->v_lock;
4479 vp->v_op = &dead_vnodeops;
4480 vp->v_type = VBAD;
4481 vn_set_state(vp, VSTATE_DEAD);
4515 vn_printf(struct vnode *vp, const char *fmt, ...)
4526 printf("%p: ", (void *)vp);
4527 printf("type %s state %s op %p\n", vtypename[vp->v_type],
4528 vstatename[vp->v_state], vp->v_op);
4529 holdcnt = atomic_load_int(&vp->v_holdcnt);
4531 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS,
4532 vp->v_seqc_users);
4533 switch (vp->v_type) {
4535 printf(" mountedhere %p\n", vp->v_mountedhere);
4538 printf(" rdev %p\n", vp->v_rdev);
4541 printf(" socket %p\n", vp->v_unpcb);
4544 printf(" fifoinfo %p\n", vp->v_fifoinfo);
4558 irflag = vn_irflag_read(vp);
4572 if (vp->v_vflag & VV_ROOT)
4574 if (vp->v_vflag & VV_ISTTY)
4576 if (vp->v_vflag & VV_NOSYNC)
4578 if (vp->v_vflag & VV_ETERNALDEV)
4580 if (vp->v_vflag & VV_CACHEDLABEL)
4582 if (vp->v_vflag & VV_VMSIZEVNLOCK)
4584 if (vp->v_vflag & VV_COPYONWRITE)
4586 if (vp->v_vflag & VV_SYSTEM)
4588 if (vp->v_vflag & VV_PROCDEP)
4590 if (vp->v_vflag & VV_DELETED)
4592 if (vp->v_vflag & VV_MD)
4594 if (vp->v_vflag & VV_FORCEINSMQ)
4596 if (vp->v_vflag & VV_READLINK)
4598 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
4605 if (vp->v_iflag & VI_MOUNT)
4607 if (vp->v_iflag & VI_DOINGINACT)
4609 if (vp->v_iflag & VI_OWEINACT)
4611 if (vp->v_iflag & VI_DEFINACT)
4613 if (vp->v_iflag & VI_FOPENING)
4615 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT |
4621 if (vp->v_mflag & VMP_LAZYLIST)
4623 flags = vp->v_mflag & ~(VMP_LAZYLIST);
4629 if (mtx_owned(VI_MTX(vp)))
4632 if (vp->v_object != NULL)
4635 vp->v_object, vp->v_object->ref_count,
4636 vp->v_object->resident_page_count,
4637 vp->v_bufobj.bo_clean.bv_cnt,
4638 vp->v_bufobj.bo_dirty.bv_cnt);
4640 lockmgr_printinfo(vp->v_vnlock);
4641 if (vp->v_data != NULL)
4642 VOP_PRINT(vp);
4653 struct vnode *vp;
4663 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4664 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
4665 vn_printf(vp, "vnode ");
4675 struct vnode *vp;
4679 vp = (struct vnode *)addr;
4680 vn_printf(vp, "vnode ");
4691 struct vnode *vp;
4861 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4862 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) {
4863 vn_printf(vp, "vnode ");
4869 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4870 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) {
4871 vn_printf(vp, "vnode ");
5080 vfs_deferred_inactive(struct vnode *vp, int lkflags)
5083 ASSERT_VI_LOCKED(vp, __func__);
5084 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
5085 if ((vp->v_iflag & VI_OWEINACT) == 0) {
5086 vdropl(vp);
5089 if (vn_lock(vp, lkflags) == 0) {
5090 VI_LOCK(vp);
5091 vinactive(vp);
5092 VOP_UNLOCK(vp);
5093 vdropl(vp);
5096 vdefer_inactive_unlocked(vp);
5100 vfs_periodic_inactive_filter(struct vnode *vp, void *arg)
5103 return (vp->v_iflag & VI_DEFINACT);
5109 struct vnode *vp, *mvp;
5116 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) {
5117 if ((vp->v_iflag & VI_DEFINACT) == 0) {
5118 VI_UNLOCK(vp);
5121 vp->v_iflag &= ~VI_DEFINACT;
5122 vfs_deferred_inactive(vp, lkflags);
5127 vfs_want_msync(struct vnode *vp)
5135 if (vp->v_vflag & VV_NOSYNC)
5137 obj = vp->v_object;
5142 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused)
5145 if (vp->v_vflag & VV_NOSYNC)
5147 if (vp->v_iflag & VI_DEFINACT)
5149 return (vfs_want_msync(vp));
5155 struct vnode *vp, *mvp;
5163 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) {
5165 if (vp->v_iflag & VI_DEFINACT) {
5166 vp->v_iflag &= ~VI_DEFINACT;
5169 if (!vfs_want_msync(vp)) {
5171 vfs_deferred_inactive(vp, lkflags);
5173 VI_UNLOCK(vp);
5176 if (vget(vp, lkflags) == 0) {
5177 if ((vp->v_vflag & VV_NOSYNC) == 0) {
5179 vnode_pager_clean_sync(vp);
5181 vnode_pager_clean_async(vp);
5183 vput(vp);
5185 vdrop(vp);
5188 vdefer_inactive_unlocked(vp);
5227 v_addpollinfo(struct vnode *vp)
5231 if (vp->v_pollinfo != NULL)
5235 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
5237 VI_LOCK(vp);
5238 if (vp->v_pollinfo != NULL) {
5239 VI_UNLOCK(vp);
5243 vp->v_pollinfo = vi;
5244 VI_UNLOCK(vp);
5256 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
5259 v_addpollinfo(vp);
5260 mtx_lock(&vp->v_pollinfo->vpi_lock);
5261 if (vp->v_pollinfo->vpi_revents & events) {
5269 events &= vp->v_pollinfo->vpi_revents;
5270 vp->v_pollinfo->vpi_revents &= ~events;
5272 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5275 vp->v_pollinfo->vpi_events |= events;
5276 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
5277 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5311 struct vnode *vp;
5317 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
5320 vp->v_type = VNON;
5321 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5322 vp->v_vflag |= VV_FORCEINSMQ;
5323 error = insmntque1(vp, mp);
5326 vp->v_vflag &= ~VV_FORCEINSMQ;
5327 vn_set_state(vp, VSTATE_CONSTRUCTED);
5328 VOP_UNLOCK(vp);
5345 bo = &vp->v_bufobj;
5352 mp->mnt_syncer = vp;
5353 vp = NULL;
5357 if (vp != NULL) {
5358 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5359 vgone(vp);
5360 vput(vp);
5367 struct vnode *vp;
5370 vp = mp->mnt_syncer;
5371 if (vp != NULL)
5374 if (vp != NULL)
5375 vrele(vp);
5442 struct vnode *vp = ap->a_vp;
5445 bo = &vp->v_bufobj;
5448 if (vp->v_mount->mnt_syncer == vp)
5449 vp->v_mount->mnt_syncer = NULL;
5463 vn_need_pageq_flush(struct vnode *vp)
5467 obj = vp->v_object;
5468 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
5476 vn_isdisk_error(struct vnode *vp, int *errp)
5480 if (vp->v_type != VCHR) {
5486 if (vp->v_rdev == NULL)
5488 else if (vp->v_rdev->si_devsw == NULL)
5490 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
5499 vn_isdisk(struct vnode *vp)
5503 return (vn_isdisk_error(vp, &error));
5680 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
5699 return (VOP_ACCESS(vp, accmode, cred, td));
5729 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
5737 vn_printf(vp, "vnode ");
5739 printf("%s: %p %s\n", str, (void *)vp, msg);
5745 assert_vi_locked(struct vnode *vp, const char *str)
5748 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
5749 vfs_badlock("interlock is not locked but should be", str, vp);
5753 assert_vi_unlocked(struct vnode *vp, const char *str)
5756 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
5757 vfs_badlock("interlock is locked but should not be", str, vp);
5761 assert_vop_locked(struct vnode *vp, const char *str)
5763 if (KERNEL_PANICKED() || vp == NULL)
5767 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5768 witness_is_owned(&vp->v_vnlock->lock_object) == -1)
5770 int locked = VOP_ISLOCKED(vp);
5773 vfs_badlock("is not locked but should be", str, vp);
5777 assert_vop_unlocked(struct vnode *vp, const char *str)
5779 if (KERNEL_PANICKED() || vp == NULL)
5783 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5784 witness_is_owned(&vp->v_vnlock->lock_object) == 1)
5786 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
5788 vfs_badlock("is locked but should not be", str, vp);
5792 assert_vop_elocked(struct vnode *vp, const char *str)
5794 if (KERNEL_PANICKED() || vp == NULL)
5797 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
5798 vfs_badlock("is not exclusive locked but should be", str, vp);
5869 struct vnode *vp;
5872 vp = a->a_vp;
5876 VNPASS(VN_IS_DOOMED(vp), vp);
5894 vop_fsync_debugprepost(struct vnode *vp, const char *name)
5896 if (vp->v_type == VCHR)
5900 * is actually determined by vp's write mount as indicated
5902 * may not be the same as vp->v_mount. However, if the
5913 else if (MNT_SHARED_WRITES(vp->v_mount))
5914 ASSERT_VOP_LOCKED(vp, name);
5916 ASSERT_VOP_ELOCKED(vp, name);
6004 struct vnode *vp = a->a_vp;
6006 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp);
6007 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK");
6077 struct vnode *vp;
6080 vp = a->a_vp;
6081 vn_seqc_write_begin(vp);
6088 struct vnode *vp;
6091 vp = a->a_vp;
6092 vn_seqc_write_end(vp);
6101 struct vnode *vp, *tdvp;
6104 vp = a->a_vp;
6106 vn_seqc_write_begin(vp);
6114 struct vnode *vp, *tdvp;
6117 vp = a->a_vp;
6119 vn_seqc_write_end(vp);
6122 VFS_KNOTE_LOCKED(vp, NOTE_LINK);
6191 struct vnode *vp;
6194 vp = a->a_vp;
6195 ASSERT_VOP_IN_SEQC(vp);
6197 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE);
6204 struct vnode *dvp, *vp;
6208 vp = a->a_vp;
6210 vn_seqc_write_begin(vp);
6217 struct vnode *dvp, *vp;
6221 vp = a->a_vp;
6223 vn_seqc_write_end(vp);
6226 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6272 struct vnode *dvp, *vp;
6276 vp = a->a_vp;
6278 vn_seqc_write_begin(vp);
6285 struct vnode *dvp, *vp;
6289 vp = a->a_vp;
6291 vn_seqc_write_end(vp);
6293 vp->v_vflag |= VV_UNLINKED;
6295 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6303 struct vnode *vp;
6306 vp = a->a_vp;
6307 vn_seqc_write_begin(vp);
6314 struct vnode *vp;
6317 vp = a->a_vp;
6318 vn_seqc_write_end(vp);
6320 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6327 struct vnode *vp;
6330 vp = a->a_vp;
6331 vn_seqc_write_begin(vp);
6338 struct vnode *vp;
6341 vp = a->a_vp;
6342 vn_seqc_write_end(vp);
6349 struct vnode *vp;
6352 vp = a->a_vp;
6353 vn_seqc_write_begin(vp);
6360 struct vnode *vp;
6363 vp = a->a_vp;
6364 vn_seqc_write_end(vp);
6366 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6561 struct vnode *vp = arg;
6563 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6569 struct vnode *vp = arg;
6571 VOP_UNLOCK(vp);
6578 struct vnode *vp = arg;
6581 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
6583 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
6590 struct vnode *vp = ap->a_vp;
6594 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ &&
6611 kn->kn_hook = (caddr_t)vp;
6613 v_addpollinfo(vp);
6614 if (vp->v_pollinfo == NULL)
6616 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
6617 vhold(vp);
6629 struct vnode *vp = (struct vnode *)kn->kn_hook;
6631 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
6632 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
6633 vdrop(vp);
6640 struct vnode *vp = (struct vnode *)kn->kn_hook;
6648 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6649 VI_LOCK(vp);
6651 VI_UNLOCK(vp);
6655 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0)
6658 VI_LOCK(vp);
6661 VI_UNLOCK(vp);
6669 struct vnode *vp = (struct vnode *)kn->kn_hook;
6671 VI_LOCK(vp);
6677 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
6681 VI_UNLOCK(vp);
6688 struct vnode *vp = (struct vnode *)kn->kn_hook;
6691 VI_LOCK(vp);
6694 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6696 VI_UNLOCK(vp);
6700 VI_UNLOCK(vp);
6788 struct vnode *vp;
6794 vp = mp->mnt_rootvnode;
6795 if (vp != NULL) {
6796 if (!VN_IS_DOOMED(vp)) {
6797 vrefact(vp);
6799 error = vn_lock(vp, flags);
6801 *vpp = vp;
6804 vrele(vp);
6813 if (vp != NULL) {
6815 vrele(vp);
6849 struct vnode *vp;
6854 vp = atomic_load_ptr(&mp->mnt_rootvnode);
6855 if (vp == NULL || VN_IS_DOOMED(vp)) {
6859 vrefact(vp);
6861 error = vn_lock(vp, flags);
6863 vrele(vp);
6866 *vpp = vp;
6873 struct vnode *vp;
6879 vp = mp->mnt_rootvnode;
6880 if (vp != NULL)
6881 vn_seqc_write_begin(vp);
6883 return (vp);
6887 vfs_cache_root_set(struct mount *mp, struct vnode *vp)
6891 vrefact(vp);
6892 mp->mnt_rootvnode = vp;
6905 struct vnode *vp;
6910 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
6911 vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
6913 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6915 VI_LOCK(vp);
6916 if (VN_IS_DOOMED(vp)) {
6917 VI_UNLOCK(vp);
6922 if (vp == NULL) {
6929 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6931 return (vp);
6937 struct vnode *vp;
6943 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
6945 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6947 VI_LOCK(vp);
6948 if (VN_IS_DOOMED(vp)) {
6949 VI_UNLOCK(vp);
6954 if (vp == NULL) {
6961 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6963 return (vp);
7003 * Relock the mp mount vnode list lock with the vp vnode interlock in the
7013 struct vnode *vp)
7019 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp,
7021 ASSERT_VI_UNLOCKED(vp, __func__);
7025 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist);
7033 vhold(vp);
7035 VI_LOCK(vp);
7036 if (VN_IS_DOOMED(vp)) {
7037 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
7040 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
7044 if (!refcount_release_if_not_last(&vp->v_holdcnt))
7049 vdropl(vp);
7059 struct vnode *vp;
7064 vp = TAILQ_NEXT(*mvp, v_lazylist);
7065 while (vp != NULL) {
7066 if (vp->v_type == VMARKER) {
7067 vp = TAILQ_NEXT(vp, v_lazylist);
7075 VNPASS(!VN_IS_DOOMED(vp), vp);
7076 if (!cb(vp, cbarg)) {
7078 vp = TAILQ_NEXT(vp, v_lazylist);
7083 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp,
7093 if (!VI_TRYLOCK(vp) &&
7094 !mnt_vnode_next_lazy_relock(*mvp, mp, vp))
7096 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
7097 KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
7098 ("alien vnode on the lazy list %p %p", vp, mp));
7099 VNPASS(vp->v_mount == mp, vp);
7100 VNPASS(!VN_IS_DOOMED(vp), vp);
7106 if (vp == NULL) {
7111 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist);
7113 ASSERT_VI_LOCKED(vp, "lazy iter");
7114 return (vp);
7131 struct vnode *vp;
7142 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist);
7143 if (vp == NULL) {
7148 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist);
7166 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp)
7174 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread));
7182 vn_seqc_write_begin_locked(struct vnode *vp)
7185 ASSERT_VI_LOCKED(vp, __func__);
7186 VNPASS(vp->v_holdcnt > 0, vp);
7187 VNPASS(vp->v_seqc_users >= 0, vp);
7188 vp->v_seqc_users++;
7189 if (vp->v_seqc_users == 1)
7190 seqc_sleepable_write_begin(&vp->v_seqc);
7194 vn_seqc_write_begin(struct vnode *vp)
7197 VI_LOCK(vp);
7198 vn_seqc_write_begin_locked(vp);
7199 VI_UNLOCK(vp);
7203 vn_seqc_write_end_locked(struct vnode *vp)
7206 ASSERT_VI_LOCKED(vp, __func__);
7207 VNPASS(vp->v_seqc_users > 0, vp);
7208 vp->v_seqc_users--;
7209 if (vp->v_seqc_users == 0)
7210 seqc_sleepable_write_end(&vp->v_seqc);
7214 vn_seqc_write_end(struct vnode *vp)
7217 VI_LOCK(vp);
7218 vn_seqc_write_end_locked(vp);
7219 VI_UNLOCK(vp);
7229 vn_seqc_init(struct vnode *vp)
7232 vp->v_seqc = 0;
7233 vp->v_seqc_users = 0;
7237 vn_seqc_write_end_free(struct vnode *vp)
7240 VNPASS(seqc_in_modify(vp->v_seqc), vp);
7241 VNPASS(vp->v_seqc_users == 1, vp);
7245 vn_irflag_set_locked(struct vnode *vp, short toset)
7249 ASSERT_VI_LOCKED(vp, __func__);
7250 flags = vn_irflag_read(vp);
7251 VNASSERT((flags & toset) == 0, vp,
7254 atomic_store_short(&vp->v_irflag, flags | toset);
7258 vn_irflag_set(struct vnode *vp, short toset)
7261 VI_LOCK(vp);
7262 vn_irflag_set_locked(vp, toset);
7263 VI_UNLOCK(vp);
7267 vn_irflag_set_cond_locked(struct vnode *vp, short toset)
7271 ASSERT_VI_LOCKED(vp, __func__);
7272 flags = vn_irflag_read(vp);
7273 atomic_store_short(&vp->v_irflag, flags | toset);
7277 vn_irflag_set_cond(struct vnode *vp, short toset)
7280 VI_LOCK(vp);
7281 vn_irflag_set_cond_locked(vp, toset);
7282 VI_UNLOCK(vp);
7286 vn_irflag_unset_locked(struct vnode *vp, short tounset)
7290 ASSERT_VI_LOCKED(vp, __func__);
7291 flags = vn_irflag_read(vp);
7292 VNASSERT((flags & tounset) == tounset, vp,
7295 atomic_store_short(&vp->v_irflag, flags & ~tounset);
7299 vn_irflag_unset(struct vnode *vp, short tounset)
7302 VI_LOCK(vp);
7303 vn_irflag_unset_locked(vp, tounset);
7304 VI_UNLOCK(vp);
7308 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred)
7313 ASSERT_VOP_LOCKED(vp, __func__);
7314 error = VOP_GETATTR(vp, &vattr, cred);
7325 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred)
7329 VOP_LOCK(vp, LK_SHARED);
7330 error = vn_getsize_locked(vp, size, cred);
7331 VOP_UNLOCK(vp);
7337 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state)
7340 switch (vp->v_state) {
7351 ASSERT_VOP_ELOCKED(vp, __func__);
7360 ASSERT_VOP_ELOCKED(vp, __func__);
7378 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state);
7379 panic("invalid state transition %d -> %d\n", vp->v_state, state);