Lines Matching defs:vp

202 static void vclean(vnode_t vp, int flag);
207 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
213 static void insmntque(vnode_t vp, mount_t mp);
229 static void record_vp(vnode_t vp, int count);
254 #define VLISTCHECK(fun, vp, list) \
255 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
258 #define VLISTCHECK(fun, vp, list)
261 #define VLISTNONE(vp) \
263 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
264 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
267 #define VONLIST(vp) \
268 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
271 #define VREMFREE(fun, vp) \
273 VLISTCHECK((fun), (vp), "free"); \
274 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
275 VLISTNONE((vp)); \
281 #define VREMDEAD(fun, vp) \
283 VLISTCHECK((fun), (vp), "dead"); \
284 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
285 VLISTNONE((vp)); \
286 vp->v_listflag &= ~VLIST_DEAD; \
292 #define VREMASYNC_WORK(fun, vp) \
294 VLISTCHECK((fun), (vp), "async_work"); \
295 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
296 VLISTNONE((vp)); \
297 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
303 #define VREMRAGE(fun, vp) \
305 if ( !(vp->v_listflag & VLIST_RAGE)) \
306 panic("VREMRAGE: vp not on rage list"); \
307 VLISTCHECK((fun), (vp), "rage"); \
308 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
309 VLISTNONE((vp)); \
310 vp->v_listflag &= ~VLIST_RAGE; \
393 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) {
397 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
399 if (vp->v_numoutput > output_target) {
403 vnode_lock_spin(vp);
405 while ((vp->v_numoutput > output_target) && error == 0) {
407 vp->v_flag |= VTHROTTLED;
409 vp->v_flag |= VBWAIT;
413 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
415 vnode_lock_spin(vp);
417 vnode_unlock(vp);
419 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
426 vnode_startwrite(vnode_t vp) {
428 OSAddAtomic(1, &vp->v_numoutput);
433 vnode_writedone(vnode_t vp)
435 if (vp) {
438 OSAddAtomic(-1, &vp->v_numoutput);
440 vnode_lock_spin(vp);
442 if (vp->v_numoutput < 0)
445 if ((vp->v_flag & VTHROTTLED)) {
446 vp->v_flag &= ~VTHROTTLED;
449 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
450 vp->v_flag &= ~VBWAIT;
453 vnode_unlock(vp);
456 wakeup((caddr_t)&vp->v_numoutput);
463 vnode_hasdirtyblks(vnode_t vp)
474 if (vp->v_dirtyblkhd.lh_first)
477 if (!UBCINFOEXISTS(vp))
480 wbp = vp->v_ubcinfo->cl_wbehind;
489 vnode_hascleanblks(vnode_t vp)
498 if (vp->v_cleanblkhd.lh_first)
518 vnode_t vp;
520 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
521 if (vp->v_type == VDIR)
523 if (vp == skipvp)
525 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH)))
527 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP))
529 if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG))
533 if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
536 } else if (vp->v_iocount > 0) {
538 tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
539 if (vp->v_iocount > 0)
555 vnode_t vp;
562 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
563 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
633 struct vnode *vp;
651 if (panic_phys_range_before(hook->vp, &phys, &range)) {
652 kdb_log("vp = %p, phys = %p, prev (%p: %p-%p)\n",
656 kdb_log("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
665 struct vnode *vp;
684 hook.vp = NULL;
688 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
689 hook.vp = vp;
690 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
691 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
692 vid = vp->v_id;
693 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
698 if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) {
707 if (vnode_reload(vp)) {
709 vnode_put(vp);
715 retval = callout(vp, arg);
720 vnode_put(vp);
1062 struct vnode *vp;
1177 error = VFS_ROOT(mp, &vp, ctx);
1184 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1188 vnode_put(vp);
1297 insmntque(vnode_t vp, mount_t mp)
1303 if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1304 if ((vp->v_lflag & VNAMED_MOUNT) == 0)
1305 panic("insmntque: vp not in mount vnode list");
1306 vp->v_lflag &= ~VNAMED_MOUNT;
1312 if (vp->v_mntvnodes.tqe_next == NULL) {
1313 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp)
1314 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1315 else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp)
1316 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1317 else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp)
1318 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1320 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1321 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1323 vp->v_mntvnodes.tqe_next = NULL;
1324 vp->v_mntvnodes.tqe_prev = NULL;
1332 if ((vp->v_mount = mp) != NULL) {
1334 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0))
1335 panic("vp already in mount list");
1337 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1339 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1340 if (vp->v_lflag & VNAMED_MOUNT)
1341 panic("insmntque: vp already in mount vnode list");
1342 vp->v_lflag |= VNAMED_MOUNT;
1434 struct vnode *vp;
1443 for (vp = *vpp; vp; vp = vp->v_specnext) {
1444 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1445 vid = vp->v_id;
1451 if (vp) {
1453 if (vnode_getwithvid(vp,vid)) {
1459 vnode_lock(vp);
1464 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
1465 vnode_reclaim_internal(vp, 1, 1, 0);
1466 vnode_put_locked(vp);
1467 vnode_unlock(vp);
1472 if (vp == NULL || vp->v_tag != VT_NON) {
1490 if (vp == NULLVP) {
1491 for (vp = *vpp; vp; vp = vp->v_specnext) {
1492 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
1493 vid = vp->v_id;
1504 if (vp != NULLVP) {
1506 vp->v_specflags |= SI_ALIASED;
1508 vnode_put_locked(vp);
1509 vnode_unlock(vp);
1521 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0)
1522 return(vp);
1524 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
1526 return (vp);
1541 vget_internal(vnode_t vp, int vid, int vflags)
1545 vnode_lock_spin(vp);
1547 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0))
1553 error = vnode_getiocount(vp, vid, vflags);
1555 vnode_unlock(vp);
1565 vnode_ref(vnode_t vp)
1568 return (vnode_ref_ext(vp, 0, 0));
1576 vnode_ref_ext(vnode_t vp, int fmode, int flags)
1580 vnode_lock_spin(vp);
1587 if (vp->v_iocount <= 0 && vp->v_usecount <= 0)
1588 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
1594 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
1595 if (vp->v_owner != current_thread()) {
1601 vp->v_usecount++;
1604 if (++vp->v_writecount <= 0)
1608 if (++vp->v_kusecount <= 0)
1611 if (vp->v_flag & VRAGE) {
1626 vp->v_flag &= ~VRAGE;
1627 vp->v_references = 0;
1628 vnode_list_remove(vp);
1631 if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
1633 if (vp->v_ubcinfo) {
1634 vnode_lock_convert(vp);
1635 memory_object_mark_used(vp->v_ubcinfo->ui_control);
1639 vnode_unlock(vp);
1646 vnode_on_reliable_media(vnode_t vp)
1648 if ( !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && (vp->v_mount->mnt_flag & MNT_LOCAL) )
1654 vnode_async_list_add(vnode_t vp)
1658 if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE|VL_DEAD)))
1659 panic("vnode_async_list_add: %p is in wrong state", vp);
1661 TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
1662 vp->v_listflag |= VLIST_ASYNC_WORK;
1678 vnode_list_add(vnode_t vp)
1683 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1688 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE))
1693 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
1697 if ((vp->v_flag & VAGE))
1698 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
1700 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
1702 vp->v_listflag |= VLIST_RAGE;
1706 * reset the timestamp for the last inserted vp on the RAGE
1719 if ( (vp->v_lflag & VL_DEAD)) {
1720 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
1721 vp->v_listflag |= VLIST_DEAD;
1729 } else if ( (vp->v_flag & VAGE) ) {
1730 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1731 vp->v_flag &= ~VAGE;
1734 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1751 vnode_list_remove_locked(vnode_t vp)
1753 if (VONLIST(vp)) {
1758 if (vp->v_listflag & VLIST_RAGE)
1759 VREMRAGE("vnode_list_remove", vp);
1760 else if (vp->v_listflag & VLIST_DEAD)
1761 VREMDEAD("vnode_list_remove", vp);
1762 else if (vp->v_listflag & VLIST_ASYNC_WORK)
1763 VREMASYNC_WORK("vnode_list_remove", vp);
1765 VREMFREE("vnode_list_remove", vp);
1775 vnode_list_remove(vnode_t vp)
1778 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1794 if (VONLIST(vp)) {
1807 vnode_list_remove_locked(vp);
1815 vnode_rele(vnode_t vp)
1817 vnode_rele_internal(vp, 0, 0, 0);
1822 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
1824 vnode_rele_internal(vp, fmode, dont_reenter, 0);
1829 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
1833 vnode_lock_spin(vp);
1836 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
1838 if (--vp->v_usecount < 0)
1839 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1842 if (--vp->v_writecount < 0)
1843 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
1846 if (--vp->v_kusecount < 0)
1847 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
1849 if (vp->v_kusecount > vp->v_usecount)
1850 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
1852 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
1858 if (vp->v_usecount == 0) {
1859 vp->v_lflag |= VL_NEEDINACTIVE;
1860 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1864 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
1866 if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
1874 if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) ) {
1875 vp->v_lflag |= VL_NEEDINACTIVE;
1877 if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
1878 vnode_async_list_add(vp);
1882 vp->v_flag |= VAGE;
1884 vnode_list_add(vp);
1894 vp->v_iocount++;
1896 record_vp(vp, 1);
1898 vp->v_lflag &= ~VL_NEEDINACTIVE;
1899 vnode_unlock(vp);
1901 VNOP_INACTIVE(vp, vfs_context_current());
1903 vnode_lock_spin(vp);
1913 if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) &&
1914 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
1920 vp->v_defer_reclaimlist = ut->uu_vreclaims;
1921 ut->uu_vreclaims = vp;
1924 vnode_lock_convert(vp);
1925 vnode_reclaim_internal(vp, 1, 1, 0);
1927 vnode_dropiocount(vp);
1928 vnode_list_add(vp);
1930 if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
1932 if (vp->v_ubcinfo) {
1933 vnode_lock_convert(vp);
1934 memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
1938 vnode_unlock(vp);
1957 struct vnode *vp;
1992 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
1993 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
1994 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
1996 if ( (vp->v_mount != mp) || (vp == skipvp)) {
1999 vid = vp->v_id;
2002 vnode_lock_spin(vp);
2004 if ((vp->v_id != vid) || ((vp->v_lflag & (VL_DEAD | VL_TERMINATE)))) {
2005 vnode_unlock(vp);
2014 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2015 (vp->v_flag & VNOFLUSH))) {
2016 vnode_unlock(vp);
2023 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2024 vnode_unlock(vp);
2031 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2032 vnode_unlock(vp);
2041 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2042 vnode_unlock(vp);
2050 if (((vp->v_usecount == 0) ||
2051 ((vp->v_usecount - vp->v_kusecount) == 0))) {
2053 vnode_lock_convert(vp);
2054 vp->v_iocount++; /* so that drain waits for * other iocounts */
2056 record_vp(vp, 1);
2058 vnode_reclaim_internal(vp, 1, 1, 0);
2059 vnode_dropiocount(vp);
2060 vnode_list_add(vp);
2061 vnode_unlock(vp);
2073 vnode_lock_convert(vp);
2075 if (vp->v_type != VBLK && vp->v_type != VCHR) {
2076 vp->v_iocount++; /* so that drain waits * for other iocounts */
2078 record_vp(vp, 1);
2080 vnode_abort_advlocks(vp);
2081 vnode_reclaim_internal(vp, 1, 1, 0);
2082 vnode_dropiocount(vp);
2083 vnode_list_add(vp);
2084 vnode_unlock(vp);
2086 vclean(vp, 0);
2087 vp->v_lflag &= ~VL_DEAD;
2088 vp->v_op = spec_vnodeop_p;
2089 vp->v_flag |= VDEVFLUSH;
2090 vnode_unlock(vp);
2097 vprint("vflush: busy vnode", vp);
2099 vnode_unlock(vp);
2132 vclean(vnode_t vp, int flags)
2149 active = vp->v_usecount;
2155 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
2157 vp->v_lflag &= ~VL_NEEDINACTIVE;
2163 already_terminating = (vp->v_lflag & VL_TERMINATE);
2165 vp->v_lflag |= VL_TERMINATE;
2171 insmntque(vp, (struct mount *)0);
2174 is_namedstream = vnode_isnamedstream(vp);
2177 vnode_unlock(vp);
2187 VNOP_CLOSE(vp, clflags, ctx);
2194 if (vp->v_tag == VT_NFS)
2195 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2199 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2200 buf_invalidateblks(vp, BUF_WRITE_DATA | BUF_INVALIDATE_LOCKED, 0, 0);
2202 if (UBCINFOEXISTS(vp))
2206 (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2209 VNOP_INACTIVE(vp, ctx);
2212 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2213 vnode_t pvp = vp->v_parent;
2216 if (vnode_isshadow(vp)) {
2217 vnode_relenamedstream(pvp, vp);
2241 ubc_destroy_named(vp);
2247 if (vp->v_resolve)
2248 vnode_resolver_detach(vp);
2254 if (VNOP_RECLAIM(vp, ctx))
2258 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE);
2260 vnode_lock(vp);
2262 vp->v_mount = dead_mountp;
2263 vp->v_op = dead_vnodeop_p;
2264 vp->v_tag = VT_NON;
2265 vp->v_data = NULL;
2267 vp->v_lflag |= VL_DEAD;
2268 vp->v_flag &= ~VISDIRTY;
2271 vp->v_lflag &= ~VL_TERMINATE;
2275 if (vp->v_lflag & VL_TERMWANT) {
2276 vp->v_lflag &= ~VL_TERMWANT;
2277 wakeup(&vp->v_lflag);
2288 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
2290 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
2301 if (vnode_isaliased(vp)) {
2306 if (vp->v_lflag & VL_TERMINATE)
2310 * Ensure that vp will not be vgone'd while we
2314 while ((vp->v_specflags & SI_ALIASED)) {
2315 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2316 if (vq->v_rdev != vp->v_rdev ||
2317 vq->v_type != vp->v_type || vp == vq)
2337 vnode_lock(vp);
2338 if (vp->v_lflag & VL_TERMINATE) {
2339 vnode_unlock(vp);
2342 vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
2343 vnode_unlock(vp);
2353 vnode_recycle(struct vnode *vp)
2355 vnode_lock_spin(vp);
2357 if (vp->v_iocount || vp->v_usecount) {
2358 vp->v_lflag |= VL_MARKTERM;
2359 vnode_unlock(vp);
2362 vnode_lock_convert(vp);
2363 vnode_reclaim_internal(vp, 1, 0, 0);
2365 vnode_unlock(vp);
2371 vnode_reload(vnode_t vp)
2373 vnode_lock_spin(vp);
2375 if ((vp->v_iocount > 1) || vp->v_usecount) {
2376 vnode_unlock(vp);
2379 if (vp->v_iocount <= 0)
2380 panic("vnode_reload with no iocount %d", vp->v_iocount);
2383 vp->v_lflag |= VL_MARKTERM;
2384 vnode_unlock(vp);
2391 vgone(vnode_t vp, int flags)
2401 vclean(vp, flags | DOCLOSE);
2407 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
2409 if (*vp->v_hashchain == vp) {
2410 *vp->v_hashchain = vp->v_specnext;
2412 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2413 if (vq->v_specnext != vp)
2415 vq->v_specnext = vp->v_specnext;
2421 if (vp->v_specflags & SI_ALIASED) {
2423 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2424 if (vq->v_rdev != vp->v_rdev ||
2425 vq->v_type != vp->v_type)
2435 vp->v_specflags &= ~SI_ALIASED;
2439 struct specinfo *tmp = vp->v_specinfo;
2440 vp->v_specinfo = NULL;
2452 vnode_t vp;
2458 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
2459 if (dev != vp->v_rdev || type != vp->v_type)
2461 vid = vp->v_id;
2463 if (vnode_getwithvid(vp,vid))
2465 vnode_lock_spin(vp);
2466 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
2467 vnode_unlock(vp);
2468 if ((*errorp = vfs_mountedon(vp)) != 0)
2471 vnode_unlock(vp);
2472 vnode_put(vp);
2483 vcount(vnode_t vp)
2490 if (!vnode_isaliased(vp))
2491 return (vp->v_specinfo->si_opencount);
2498 vq = *vp->v_hashchain;
2512 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
2513 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
2554 vprint(const char *label, struct vnode *vp)
2561 typename[vp->v_type], vp->v_usecount, vp->v_writecount);
2563 if (vp->v_flag & VROOT)
2565 if (vp->v_flag & VTEXT)
2567 if (vp->v_flag & VSYSTEM)
2569 if (vp->v_flag & VNOFLUSH)
2571 if (vp->v_flag & VBWAIT)
2573 if (vnode_isaliased(vp))
2581 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
2583 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
2587 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
2589 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
2593 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
2595 return ubc_cs_getcdhash(vp, offset, cdhash);
2713 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
2918 vfs_mountedon(struct vnode *vp)
2924 if (vp->v_specflags & SI_MOUNTEDON) {
2928 if (vp->v_specflags & SI_ALIASED) {
2929 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
2930 if (vq->v_rdev != vp->v_rdev ||
2931 vq->v_type != vp->v_type)
3012 vnode_pager_vrele(vnode_t vp)
3016 vnode_lock_spin(vp);
3018 vp->v_lflag &= ~VNAMED_UBC;
3019 if (vp->v_usecount != 0) {
3032 vnode_unlock(vp);
3033 ubc_unmap(vp);
3034 vnode_lock_spin(vp);
3037 uip = vp->v_ubcinfo;
3038 vp->v_ubcinfo = UBC_INFO_NULL;
3040 vnode_unlock(vp);
3757 process_vp(vnode_t vp, int want_vp, int *deferred)
3763 vpid = vp->v_id;
3765 vnode_list_remove_locked(vp);
3769 vnode_lock_spin(vp);
3772 * We could wait for the vnode_lock after removing the vp from the freelist
3776 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
3777 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
3783 vnode_unlock(vp);
3787 if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) {
3803 vp->v_iocount++;
3805 record_vp(vp, 1);
3807 vnode_put_locked(vp);
3808 vnode_unlock(vp);
3815 if (vp->v_type != VBAD) {
3816 if (want_vp && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
3817 vnode_async_list_add(vp);
3818 vnode_unlock(vp);
3824 if (vp->v_lflag & VL_DEAD)
3825 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
3827 vnode_lock_convert(vp);
3828 (void)vnode_reclaim_internal(vp, 1, want_vp, 0);
3831 if ((VONLIST(vp)))
3832 panic("new_vnode(%p): vp on list", vp);
3833 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
3834 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH)))
3835 panic("new_vnode(%p): free vnode still referenced", vp);
3836 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0))
3837 panic("new_vnode(%p): vnode seems to be on mount list", vp);
3838 if ( !LIST_EMPTY(&vp->v_nclinks) || !LIST_EMPTY(&vp->v_ncchildren))
3839 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
3841 vnode_unlock(vp);
3842 vp = NULLVP;
3845 return (vp);
3855 vnode_t vp;
3874 vp = TAILQ_FIRST(q);
3876 vp = process_vp(vp, 0, &deferred);
3878 if (vp != NULLVP)
3879 panic("found VBAD vp (%p) on async queue", vp);
3887 vnode_t vp;
3898 vp = NULLVP;
3912 vp = TAILQ_FIRST(&vnode_dead_list);
3922 MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK);
3923 bzero((char *)vp, sizeof(*vp));
3924 VLISTNONE(vp); /* avoid double queue removal */
3925 lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr);
3927 klist_init(&vp->v_knotes);
3929 vp->v_id = ts.tv_nsec;
3930 vp->v_flag = VSTANDARD;
3933 if (mac_vnode_label_init_needed(vp))
3934 mac_vnode_label_init(vp);
3937 vp->v_iocount = 1;
3948 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
3949 if ( !(vp->v_listflag & VLIST_RAGE))
3950 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
3960 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
3961 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
3969 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE)
3977 vp = NULL;
3983 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
3985 * Pick the first vp for possible reuse
3988 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
3998 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
3999 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
4007 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE)
4015 vp = NULL;
4030 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
4036 if (vp == NULL) {
4082 if ((vp = process_vp(vp, 1, &deferred)) == NULLVP) {
4141 assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
4142 assert ((vp->v_lflag & VL_LABEL) != VL_LABEL);
4143 if (vp->v_lflag & VL_LABELED) {
4144 vnode_lock_convert(vp);
4145 mac_vnode_label_recycle(vp);
4146 } else if (mac_vnode_label_init_needed(vp)) {
4147 vnode_lock_convert(vp);
4148 mac_vnode_label_init(vp);
4153 vp->v_iocount = 1;
4154 vp->v_lflag = 0;
4155 vp->v_writecount = 0;
4156 vp->v_references = 0;
4157 vp->v_iterblkflags = 0;
4158 vp->v_flag = VSTANDARD;
4160 vp->v_mount = NULL;
4161 vp->v_defer_reclaimlist = (vnode_t)0;
4163 vnode_unlock(vp);
4166 *vpp = vp;
4172 vnode_lock(vnode_t vp)
4174 lck_mtx_lock(&vp->v_lock);
4178 vnode_lock_spin(vnode_t vp)
4180 lck_mtx_lock_spin(&vp->v_lock);
4184 vnode_unlock(vnode_t vp)
4186 lck_mtx_unlock(&vp->v_lock);
4192 vnode_get(struct vnode *vp)
4196 vnode_lock_spin(vp);
4197 retval = vnode_get_locked(vp);
4198 vnode_unlock(vp);
4204 vnode_get_locked(struct vnode *vp)
4207 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
4209 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
4212 vp->v_iocount++;
4214 record_vp(vp, 1);
4226 vnode_getwithvid(vnode_t vp, uint32_t vid)
4228 return(vget_internal(vp, vid, ( VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO )));
4237 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
4239 return(vget_internal(vp, vid, ( VNODE_NODEAD | VNODE_WITHID )));
4243 vnode_getwithref(vnode_t vp)
4245 return(vget_internal(vp, 0, 0));
4250 vnode_getalways(vnode_t vp)
4252 return(vget_internal(vp, 0, VNODE_ALWAYS));
4256 vnode_put(vnode_t vp)
4260 vnode_lock_spin(vp);
4261 retval = vnode_put_locked(vp);
4262 vnode_unlock(vp);
4268 vnode_put_locked(vnode_t vp)
4273 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
4276 if (vp->v_iocount < 1)
4277 panic("vnode_put(%p): iocount < 1", vp);
4279 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
4280 vnode_dropiocount(vp);
4283 if ((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) {
4285 vp->v_lflag &= ~VL_NEEDINACTIVE;
4286 vnode_unlock(vp);
4288 VNOP_INACTIVE(vp, ctx);
4290 vnode_lock_spin(vp);
4302 vp->v_lflag &= ~VL_NEEDINACTIVE;
4304 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
4305 vnode_lock_convert(vp);
4306 vnode_reclaim_internal(vp, 1, 1, 0);
4308 vnode_dropiocount(vp);
4309 vnode_list_add(vp);
4316 vnode_isinuse(vnode_t vp, int refcnt)
4318 return(vnode_isinuse_locked(vp, refcnt, 0));
4323 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
4328 vnode_lock_spin(vp);
4329 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
4333 if (vp->v_type == VREG) {
4334 retval = ubc_isinuse_locked(vp, refcnt, 1);
4339 vnode_unlock(vp);
4346 vnode_resume(vnode_t vp)
4348 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
4350 vnode_lock_spin(vp);
4351 vp->v_lflag &= ~VL_SUSPENDED;
4352 vp->v_owner = NULL;
4353 vnode_unlock(vp);
4355 wakeup(&vp->v_iocount);
4367 vnode_suspend(vnode_t vp)
4369 if (vp->v_lflag & VL_SUSPENDED) {
4373 vnode_lock_spin(vp);
4380 if (vp->v_owner == NULL) {
4381 vp->v_lflag |= VL_SUSPENDED;
4382 vp->v_owner = current_thread();
4384 vnode_unlock(vp);
4396 vnode_abort_advlocks(vnode_t vp)
4398 if (vp->v_flag & VLOCKLOCAL)
4399 lf_abort_advlocks(vp);
4404 vnode_drain(vnode_t vp)
4407 if (vp->v_lflag & VL_DRAIN) {
4411 vp->v_lflag |= VL_DRAIN;
4412 vp->v_owner = current_thread();
4414 while (vp->v_iocount > 1)
4415 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
4417 vp->v_lflag &= ~VL_DRAIN;
4437 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
4449 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
4455 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) {
4461 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
4468 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
4469 (vp->v_owner == current_thread())) {
4480 if (vp->v_lflag & VL_DRAIN) {
4499 if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount))
4503 vnode_lock_convert(vp);
4505 if (vp->v_lflag & VL_TERMINATE) {
4506 vp->v_lflag |= VL_TERMWANT;
4508 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vnode getiocount", NULL);
4510 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
4512 if (withvid && vid != vp->v_id) {
4515 if (++vp->v_references >= UNAGE_THRESHHOLD ||
4516 (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD)) {
4517 vp->v_references = 0;
4518 vnode_list_remove(vp);
4520 vp->v_iocount++;
4522 record_vp(vp, 1);
4528 vnode_dropiocount (vnode_t vp)
4530 if (vp->v_iocount < 1)
4531 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
4533 vp->v_iocount--;
4535 record_vp(vp, -1);
4537 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1))
4538 wakeup(&vp->v_iocount);
4543 vnode_reclaim(struct vnode * vp)
4545 vnode_reclaim_internal(vp, 0, 0, 0);
4550 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
4555 vnode_lock(vp);
4557 if (vp->v_lflag & VL_TERMINATE) {
4560 vp->v_lflag |= VL_TERMINATE;
4562 vn_clearunionwait(vp, 1);
4564 vnode_drain(vp);
4566 isfifo = (vp->v_type == VFIFO);
4568 if (vp->v_type != VBAD)
4569 vgone(vp, flags); /* clean and reclaim the vnode */
4589 vnode_list_remove_locked(vp);
4590 vp->v_id++;
4597 fip = vp->v_fifoinfo;
4598 vp->v_fifoinfo = NULL;
4601 vp->v_type = VBAD;
4603 if (vp->v_data)
4605 if (vp->v_numoutput)
4607 if (UBCINFOEXISTS(vp))
4609 if (vp->v_parent)
4611 if (vp->v_name)
4614 vp->v_socket = NULL;
4616 vp->v_lflag &= ~VL_TERMINATE;
4617 vp->v_owner = NULL;
4619 KNOTE(&vp->v_knotes, NOTE_REVOKE);
4622 klist_init(&vp->v_knotes);
4624 if (vp->v_lflag & VL_TERMWANT) {
4625 vp->v_lflag &= ~VL_TERMWANT;
4626 wakeup(&vp->v_lflag);
4633 vnode_list_add(vp);
4636 vnode_unlock(vp);
4649 vnode_t vp;
4684 if ( (error = new_vnode(&vp)) )
4690 vp->v_op = param->vnfs_vops;
4691 vp->v_type = param->vnfs_vtype;
4692 vp->v_data = param->vnfs_fsnode;
4695 vp->v_flag |= VROOT;
4697 vp->v_flag |= VSYSTEM;
4698 if (vp->v_type == VREG) {
4699 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
4702 record_vp(vp, 1);
4704 vp->v_mount = NULL;
4705 vp->v_op = dead_vnodeop_p;
4706 vp->v_tag = VT_NON;
4707 vp->v_data = NULL;
4708 vp->v_type = VBAD;
4709 vp->v_lflag |= VL_DEAD;
4711 vnode_put(vp);
4715 memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
4718 record_vp(vp, 1);
4725 if ((vp->v_type == VDIR) && (tinfo != NULL)) {
4732 record_vp(vp, -1);
4734 error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
4737 vp->v_mount = NULL;
4738 vp->v_op = dead_vnodeop_p;
4739 vp->v_tag = VT_NON;
4740 vp->v_data = NULL;
4741 vp->v_type = VBAD;
4742 vp->v_lflag |= VL_DEAD;
4744 record_vp(vp, 1);
4746 vnode_put(vp);
4751 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4753 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
4755 if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) {
4761 vp->v_data = NULL;
4762 vp->v_op = spec_vnodeop_p;
4763 vp->v_type = VBAD;
4764 vp->v_lflag = VL_DEAD;
4765 vp->v_data = NULL;
4766 vp->v_tag = VT_NON;
4767 vnode_put(vp);
4773 vp = nvp;
4775 vclean(vp, 0);
4776 vp->v_op = param->vnfs_vops;
4777 vp->v_type = param->vnfs_vtype;
4778 vp->v_data = param->vnfs_fsnode;
4779 vp->v_lflag = 0;
4780 vp->v_mount = NULL;
4781 insmntque(vp, param->vnfs_mp);
4783 vnode_unlock(vp);
4786 if (VCHR == vp->v_type) {
4787 u_int maj = major(vp->v_rdev);
4790 vp->v_flag |= VISTTY;
4794 if (vp->v_type == VFIFO) {
4800 vp->v_fifoinfo = fip;
4807 *vpp = vp;
4811 vp->v_lflag |= VNAMED_FSHASH;
4815 vp->v_flag |= VLOCKLOCAL;
4817 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4818 panic("insmntque: vp on the free list\n");
4823 insmntque(vp, param->vnfs_mp);
4827 vp->v_parent = dvp;
4837 vp->v_name = cache_enter_create(dvp, vp, cnp);
4839 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
4842 vp->v_flag |= VISUNION;
4849 vp->v_flag |= VNCACHEABLE;
4861 vp->v_flag |= VRAGE;
4867 vnode_addfsref(vnode_t vp)
4869 vnode_lock_spin(vp);
4870 if (vp->v_lflag & VNAMED_FSHASH)
4871 panic("add_fsref: vp already has named reference");
4872 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb))
4873 panic("addfsref: vp on the free list\n");
4874 vp->v_lflag |= VNAMED_FSHASH;
4875 vnode_unlock(vp);
4880 vnode_removefsref(vnode_t vp)
4882 vnode_lock_spin(vp);
4883 if ((vp->v_lflag & VNAMED_FSHASH) == 0)
4885 vp->v_lflag &= ~VNAMED_FSHASH;
4886 vnode_unlock(vp);
5217 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
5225 error = vn_close(vp, flags, ctx);
5226 vnode_put(vp);
5231 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
5238 error = vnode_getattr(vp, &va, ctx);
5245 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
5252 error = vnode_getattr(vp, &va, ctx);
5263 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
5270 error = vnode_getattr(vp, &va, ctx);
5277 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
5284 return(vnode_setattr(vp, &va, ctx));
5288 vnode_setdirty(vnode_t vp)
5290 vnode_lock_spin(vp);
5291 vp->v_flag |= VISDIRTY;
5292 vnode_unlock(vp);
5297 vnode_cleardirty(vnode_t vp)
5299 vnode_lock_spin(vp);
5300 vp->v_flag &= ~VISDIRTY;
5301 vnode_unlock(vp);
5306 vnode_isdirty(vnode_t vp)
5310 vnode_lock_spin(vp);
5311 dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
5312 vnode_unlock(vp);
5374 vnode_t vp = (vnode_t)0;
5438 vp = *vpp;
5443 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
5460 if ((error != 0) && (vp != (vnode_t)0)) {
5464 VNOP_CLOSE(vp, fmode, ctx);
5470 vnode_put(vp);
5487 vnode_t vp;
5595 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
5606 if ((vp->v_type == VDIR) &&
5607 !(vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSDIRLINKS)) {
5614 error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
5617 error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
5623 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
5637 if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
5638 error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
5644 if ( (fmode & O_DIRECTORY) && vp->v_type != VDIR ) {
5648 if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
5652 if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
5657 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
5662 if (vp->v_type != VDIR) {
5673 if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
5674 error = mac_vnode_check_open(ctx, vp, fmode);
5699 error = vnode_authorize(vp, NULL, action, ctx);
5708 if (vnode_isshadow(vp) && vnode_isnamedstream (vp)) {
5709 error = vnode_verifynamedstream(vp);
5917 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
5928 if (vp->v_type != VDIR) {
5935 if (dvp == vp) {
5944 vp, cnp);
5949 return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
5971 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
5979 if (vp->v_type == VBAD)
5984 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
6382 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
6403 * Authorize the deletion of the node vp from the directory dvp.
6464 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
6473 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
6478 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
6510 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
6519 KAUTH_DEBUG("%p ALLOWED - granted by file ACL", vcp->vp);
6524 KAUTH_DEBUG("%p DEFERRED%s - by file ACL", vcp->vp, delete_denied ? "(DENY)" : "");
6531 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
6538 vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
6544 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp);
6586 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
6616 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
6622 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
6625 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
6630 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
6654 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
6665 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
6669 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
6702 vcp->vp,
6704 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
6706 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
6708 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
6712 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
6743 vnode_authorize_checkimmutable(vnode_t vp, struct vnode_attr *vap, int rights, int ignore)
6754 switch(vp->v_type) {
6773 mp = vp->v_mount;
6779 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vp);
6790 if (vp->v_type == VDIR) {
6798 KAUTH_DEBUG("%p DENIED - file is immutable", vp);
6822 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
6827 * If the vp is a device node, socket or FIFO it actually represents a local
6830 switch(vp->v_type) {
6844 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount))
6851 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
6853 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
6861 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
6863 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
6865 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
6868 VNOP_CLOSE(vp, FREAD, ctx);
6876 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
6906 vnode_t vp, dvp;
6912 vp = (vnode_t)arg1;
6927 if (dvp && vp)
6948 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
6949 cvp = vnode_getparent(vp);
6969 cvp = vp;
7001 vnode_t vp, dvp;
7014 vp = vcp->vp = (vnode_t)arg1;
7031 if ((ctx == NULL) || (vp == NULL) || (cred == NULL))
7032 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
7036 vp, vfs_context_proc(ctx)->p_comm,
7038 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
7039 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
7040 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
7042 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
7052 vnode_isdir(vp) ? "directory" : "file",
7053 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
7083 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
7084 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
7085 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
7094 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
7105 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx))
7117 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
7118 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
7128 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
7137 if (vnode_isnamedstream(vp)) {
7149 * Point 'vp' to the resource fork's parent for ACL checking
7151 if (vnode_isnamedstream(vp) &&
7152 (vp->v_parent != NULL) &&
7153 (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
7155 vcp->vp = vp = vp->v_parent;
7164 if ((result = vnode_getattr(vp, &va, ctx)) != 0)
7174 if ((result = vnode_authorize_checkimmutable(vp, &va, rights, noimmutable)) != 0)
7211 (vp->v_type == VREG) &&
7215 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vp, va.va_mode);
7219 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp);
7229 vnode_put(vp);
7231 KAUTH_DEBUG("%p DENIED - auth denied", vp);
7234 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
7245 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
7265 vnode_put(vp);
7270 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
7506 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
7525 if (vfs_authopaque(vp->v_mount))
7602 if (vnode_isdir(vp)) {
7613 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
8026 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
8028 vnode_lock_spin(vp);
8029 vp->v_flag |= VLOCKLOCAL;
8030 vnode_unlock(vp);
8059 vnode_setswapmount(vnode_t vp)
8061 mount_lock(vp->v_mount);
8062 vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
8063 mount_unlock(vp->v_mount);
8068 vn_setunionwait(vnode_t vp)
8070 vnode_lock_spin(vp);
8071 vp->v_flag |= VISUNION;
8072 vnode_unlock(vp);
8077 vn_checkunionwait(vnode_t vp)
8079 vnode_lock_spin(vp);
8080 while ((vp->v_flag & VISUNION) == VISUNION)
8081 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
8082 vnode_unlock(vp);
8086 vn_clearunionwait(vnode_t vp, int locked)
8089 vnode_lock_spin(vp);
8090 if((vp->v_flag & VISUNION) == VISUNION) {
8091 vp->v_flag &= ~VISUNION;
8092 wakeup((caddr_t)&vp->v_flag);
8095 vnode_unlock(vp);
8110 errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag)
8125 error = vnode_suspend(vp);
8151 if ((error = VNOP_OPEN(vp, FREAD, ctx)))
8165 if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx)))
8206 if (vp->v_tag == VT_HFS && nentries > 2)
8209 if (vp->v_tag == VT_NFS) {
8235 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
8265 nd_temp.ni_dvp = vp;
8280 if (vp->v_tag == VT_HFS && nentries > 2)
8283 if (vp->v_tag == VT_NFS) {
8300 VNOP_CLOSE(vp, FREAD, ctx);
8305 vnode_resume(vp);
8314 lock_vnode_and_post(vnode_t vp, int kevent_num)
8317 if (vp->v_knotes.slh_first != NULL) {
8318 vnode_lock(vp);
8319 KNOTE(&vp->v_knotes, kevent_num);
8320 vnode_unlock(vp);
8361 static char *__vpath(vnode_t vp, char *str, int len, int depth)
8369 if (!vp->v_name)
8373 if ((vp->v_flag & VROOT)) {
8374 if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
8377 return __vpath(vp->v_mount->mnt_vnodecovered,
8383 src = (char *)vp->v_name;
8402 if (vp->v_parent && len > 1) {
8406 return __vpath(vp->v_parent, str, len, depth + 1);
8418 vnode_t vp;
8434 TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
8437 type = __vtype(vp->v_type);
8438 nm = __vpath(vp, vname, sizeof(vname)-1, 0);
8440 type, vp->v_usecount, vp->v_iocount, nm);
8454 static void record_vp(vnode_t vp, int count) {
8458 if (vp->v_resolve)
8461 if ((vp->v_flag & VSYSTEM))
8471 ut->uu_vps[ut->uu_vpindex] = vp;
8531 vnode_trigger_update(vnode_t vp, resolver_result_t result)
8537 if (vp->v_resolve == NULL) {
8548 rp = vp->v_resolve;
8566 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
8570 vnode_lock_spin(vp);
8571 if (vp->v_resolve != NULL) {
8572 vnode_unlock(vp);
8575 vp->v_resolve = rp;
8577 vnode_unlock(vp);
8580 error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
8596 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
8624 result = vnode_resolver_attach(vp, rp, external);
8657 vnode_resolver_detach(vnode_t vp)
8662 mp = vnode_mount(vp);
8664 vnode_lock(vp);
8665 rp = vp->v_resolve;
8666 vp->v_resolve = NULL;
8667 vnode_unlock(vp);
8670 vnode_rele_ext(vp, O_EVTONLY, 1);
8681 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
8688 if ((vp->v_resolve == NULL) ||
8689 (vp->v_resolve->vr_rearm_func == NULL) ||
8690 (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
8694 rp = vp->v_resolve;
8713 result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
8728 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
8737 if ((vp->v_resolve == NULL) ||
8738 (vp->v_resolve->vr_resolve_func == NULL) ||
8739 (vp->v_mountedhere != NULL)) {
8743 rp = vp->v_resolve;
8767 result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
8784 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
8791 if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
8795 rp = vp->v_resolve;
8817 result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
8845 vnode_t vp;
8851 vp = mp->mnt_vnodecovered;
8852 if (vp == NULLVP)
8855 mp = vp->v_mount;
8911 vnode_t vp = infop->trigger_vp;
8916 if (mp == vp->v_mountedhere) {
8917 vnode_put(vp);
8922 if (infop->trigger_mp != vp->v_mountedhere) {
8923 vnode_put(vp);
8925 infop->trigger_mp, vp->v_mountedhere);
8929 error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
8930 vnode_put(vp);
8933 vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
8941 * ref on mp so save its covered vp for later processing
9004 vnode_t vp = info.trigger_vp;
9006 if (info.trigger_mp == vp->v_mountedhere) {
9007 (void) vnode_trigger_unresolve(vp, flags, ctx);
9009 vnode_put(vp);
9018 vnode_t rvp, vp;
9049 vp = nd.ni_vp;
9070 res = vnode_resolver_create(mp, vp, &vtp, TRUE);
9071 vnode_put(vp);