Lines Matching refs:ip

76 	struct xfs_inode	*ip;
82 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
84 if (inode_init_always(mp->m_super, VFS_I(ip))) {
85 kmem_cache_free(xfs_inode_cache, ip);
90 VFS_I(ip)->i_mode = 0;
91 VFS_I(ip)->i_state = 0;
92 mapping_set_large_folios(VFS_I(ip)->i_mapping);
95 ASSERT(atomic_read(&ip->i_pincount) == 0);
96 ASSERT(ip->i_ino == 0);
99 ip->i_ino = ino;
100 ip->i_mount = mp;
101 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
102 ip->i_cowfp = NULL;
103 memset(&ip->i_af, 0, sizeof(ip->i_af));
104 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
105 memset(&ip->i_df, 0, sizeof(ip->i_df));
106 ip->i_flags = 0;
107 ip->i_delayed_blks = 0;
108 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
109 ip->i_nblocks = 0;
110 ip->i_forkoff = 0;
111 ip->i_sick = 0;
112 ip->i_checked = 0;
113 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
114 INIT_LIST_HEAD(&ip->i_ioend_list);
115 spin_lock_init(&ip->i_ioend_lock);
116 ip->i_next_unlinked = NULLAGINO;
117 ip->i_prev_unlinked = 0;
119 return ip;
127 struct xfs_inode *ip = XFS_I(inode);
129 switch (VFS_I(ip)->i_mode & S_IFMT) {
133 xfs_idestroy_fork(&ip->i_df);
137 xfs_ifork_zap_attr(ip);
139 if (ip->i_cowfp) {
140 xfs_idestroy_fork(ip->i_cowfp);
141 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
143 if (ip->i_itemp) {
145 &ip->i_itemp->ili_item.li_flags));
146 xfs_inode_item_destroy(ip);
147 ip->i_itemp = NULL;
150 kmem_cache_free(xfs_inode_cache, ip);
155 struct xfs_inode *ip)
158 ASSERT(atomic_read(&ip->i_pincount) == 0);
159 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
160 XFS_STATS_DEC(ip->i_mount, vn_active);
162 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
167 struct xfs_inode *ip)
169 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
174 * free state. The ip->i_flags_lock provides the barrier against lookup
177 spin_lock(&ip->i_flags_lock);
178 ip->i_flags = XFS_IRECLAIM;
179 ip->i_ino = 0;
180 spin_unlock(&ip->i_flags_lock);
182 __xfs_inode_free(ip);
338 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
340 struct xfs_mount *mp = ip->i_mount;
341 struct inode *inode = VFS_I(ip);
344 trace_xfs_iget_recycle(ip);
346 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
355 ip->i_flags |= XFS_IRECLAIM;
357 spin_unlock(&ip->i_flags_lock);
362 xfs_iunlock(ip, XFS_ILOCK_EXCL);
369 spin_lock(&ip->i_flags_lock);
370 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
371 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
372 spin_unlock(&ip->i_flags_lock);
375 trace_xfs_iget_recycle_fail(ip);
380 spin_lock(&ip->i_flags_lock);
387 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
388 ip->i_flags |= XFS_INEW;
389 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
392 spin_unlock(&ip->i_flags_lock);
410 struct xfs_inode *ip,
415 if (VFS_I(ip)->i_mode != 0) {
416 xfs_warn(ip->i_mount,
418 ip->i_ino, VFS_I(ip)->i_mode);
419 xfs_agno_mark_sick(ip->i_mount,
420 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
425 if (ip->i_nblocks != 0) {
426 xfs_warn(ip->i_mount,
428 ip->i_ino);
429 xfs_agno_mark_sick(ip->i_mount,
430 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
438 if (VFS_I(ip)->i_mode == 0)
491 struct xfs_inode *ip,
496 struct inode *inode = VFS_I(ip);
497 struct xfs_mount *mp = ip->i_mount;
507 spin_lock(&ip->i_flags_lock);
508 if (ip->i_ino != ino)
529 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
532 if (ip->i_flags & XFS_NEED_INACTIVE) {
534 if (VFS_I(ip)->i_nlink == 0) {
545 error = xfs_iget_check_free_state(ip, flags);
551 (ip->i_flags & XFS_IRECLAIMABLE))
555 if (ip->i_flags & XFS_IRECLAIMABLE) {
557 error = xfs_iget_recycle(pag, ip);
568 spin_unlock(&ip->i_flags_lock);
570 trace_xfs_iget_hit(ip);
574 xfs_ilock(ip, lock_flags);
577 xfs_iflags_clear(ip, XFS_ISTALE);
583 trace_xfs_iget_skip(ip);
587 spin_unlock(&ip->i_flags_lock);
592 spin_unlock(&ip->i_flags_lock);
613 struct xfs_inode *ip;
618 ip = xfs_inode_alloc(mp, ino);
619 if (!ip)
622 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
638 VFS_I(ip)->i_generation = get_random_u32();
642 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
646 error = xfs_inode_from_disk(ip,
647 xfs_buf_offset(bp, ip->i_imap.im_boffset));
651 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
658 trace_xfs_iget_miss(ip);
664 error = xfs_iget_check_free_state(ip, flags);
683 if (!xfs_ilock_nowait(ip, lock_flags))
692 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
698 d_mark_dontcache(VFS_I(ip));
699 ip->i_udquot = NULL;
700 ip->i_gdquot = NULL;
701 ip->i_pdquot = NULL;
702 xfs_iflags_set(ip, iflags);
706 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
716 *ipp = ip;
723 xfs_iunlock(ip, lock_flags);
725 __destroy_inode(VFS_I(ip));
726 xfs_inode_free(ip);
751 struct xfs_inode *ip;
771 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
773 if (ip) {
774 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
785 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
792 *ipp = ip;
799 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
800 xfs_setup_existing_inode(ip);
832 struct xfs_inode *ip,
837 spin_lock(&ip->i_flags_lock);
838 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
839 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
841 spin_unlock(&ip->i_flags_lock);
846 if (ip->i_sick &&
848 spin_unlock(&ip->i_flags_lock);
852 __xfs_iflags_set(ip, XFS_IRECLAIM);
853 spin_unlock(&ip->i_flags_lock);
871 struct xfs_inode *ip,
874 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
876 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
878 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
888 if (xlog_is_shutdown(ip->i_mount->m_log)) {
889 xfs_iunpin_wait(ip);
890 xfs_iflush_shutdown_abort(ip);
893 if (xfs_ipincount(ip))
895 if (!xfs_inode_clean(ip))
898 xfs_iflags_clear(ip, XFS_IFLUSHING);
900 trace_xfs_inode_reclaiming(ip);
912 spin_lock(&ip->i_flags_lock);
913 ip->i_flags = XFS_IRECLAIM;
914 ip->i_ino = 0;
915 ip->i_sick = 0;
916 ip->i_checked = 0;
917 spin_unlock(&ip->i_flags_lock);
919 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
920 xfs_iunlock(ip, XFS_ILOCK_EXCL);
922 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
932 XFS_INO_TO_AGINO(ip->i_mount, ino)))
945 xfs_ilock(ip, XFS_ILOCK_EXCL);
946 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
947 xfs_iunlock(ip, XFS_ILOCK_EXCL);
948 ASSERT(xfs_inode_clean(ip));
950 __xfs_inode_free(ip);
954 xfs_iflags_clear(ip, XFS_IFLUSHING);
956 xfs_iunlock(ip, XFS_ILOCK_EXCL);
958 xfs_iflags_clear(ip, XFS_IRECLAIM);
1037 struct xfs_inode *ip,
1041 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1045 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1049 ip->i_projid != icw->icw_prid)
1061 struct xfs_inode *ip,
1065 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1069 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1073 ip->i_projid == icw->icw_prid)
1080 * Is this inode @ip eligible for eof/cow block reclamation, given some
1086 struct xfs_inode *ip,
1095 match = xfs_icwalk_match_id_union(ip, icw);
1097 match = xfs_icwalk_match_id(ip, icw);
1103 XFS_ISIZE(ip) < icw->icw_min_file_size)
1128 struct xfs_inode *ip,
1136 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1143 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1146 if (!xfs_icwalk_match(ip, icw))
1153 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1160 if (xfs_can_free_eofblocks(ip, false))
1161 return xfs_free_eofblocks(ip);
1164 trace_xfs_inode_free_eofblocks_invalid(ip);
1165 xfs_inode_clear_eofblocks_tag(ip);
1171 struct xfs_inode *ip,
1174 struct xfs_mount *mp = ip->i_mount;
1183 if (ip->i_flags & iflag)
1185 spin_lock(&ip->i_flags_lock);
1186 ip->i_flags |= iflag;
1187 spin_unlock(&ip->i_flags_lock);
1189 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1192 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1201 xfs_inode_t *ip)
1203 trace_xfs_inode_set_eofblocks_tag(ip);
1204 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1209 struct xfs_inode *ip,
1212 struct xfs_mount *mp = ip->i_mount;
1218 spin_lock(&ip->i_flags_lock);
1219 ip->i_flags &= ~iflag;
1220 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1221 spin_unlock(&ip->i_flags_lock);
1226 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1229 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1238 xfs_inode_t *ip)
1240 trace_xfs_inode_clear_eofblocks_tag(ip);
1241 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1251 struct xfs_inode *ip)
1257 if (!xfs_inode_has_cow_data(ip)) {
1258 trace_xfs_inode_free_cowblocks_invalid(ip);
1259 xfs_inode_clear_cowblocks_tag(ip);
1267 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1268 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1269 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1270 atomic_read(&VFS_I(ip)->i_dio_count))
1290 struct xfs_inode *ip,
1299 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1302 if (!xfs_prep_free_cowblocks(ip))
1305 if (!xfs_icwalk_match(ip, icw))
1313 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1320 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1331 if (xfs_prep_free_cowblocks(ip))
1332 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1338 xfs_inode_t *ip)
1340 trace_xfs_inode_set_cowblocks_tag(ip);
1341 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1346 xfs_inode_t *ip)
1348 trace_xfs_inode_clear_cowblocks_tag(ip);
1349 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1391 * Decide if the given @ip is eligible for garbage collection of speculative
1397 struct xfs_inode *ip)
1399 struct inode *inode = VFS_I(ip);
1404 spin_lock(&ip->i_flags_lock);
1405 if (!ip->i_ino)
1408 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1410 spin_unlock(&ip->i_flags_lock);
1413 if (xfs_is_shutdown(ip->i_mount))
1424 spin_unlock(&ip->i_flags_lock);
1431 struct xfs_inode *ip,
1437 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1441 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1444 xfs_iunlock(ip, lockflags);
1445 xfs_irele(ip);
1573 struct xfs_inode *ip,
1576 return xfs_blockgc_free_dquots(ip->i_mount,
1577 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1578 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1579 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1600 struct xfs_inode *ip,
1605 return xfs_blockgc_igrab(ip);
1607 return xfs_reclaim_igrab(ip, icw);
1620 struct xfs_inode *ip,
1628 error = xfs_blockgc_scan_inode(ip, icw);
1631 xfs_reclaim_inode(ip, pag);
1683 struct xfs_inode *ip = batch[i];
1685 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1700 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1702 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1703 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1778 struct xfs_inode *ip,
1781 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1785 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1789 xfs_warn(ip->i_mount,
1791 ip->i_ino,
1798 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1804 struct xfs_inode *ip)
1806 struct xfs_mount *mp = ip->i_mount;
1809 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1810 xfs_check_delalloc(ip, XFS_DATA_FORK);
1811 xfs_check_delalloc(ip, XFS_COW_FORK);
1815 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1817 spin_lock(&ip->i_flags_lock);
1819 trace_xfs_inode_set_reclaimable(ip);
1820 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1821 ip->i_flags |= XFS_IRECLAIMABLE;
1822 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1825 spin_unlock(&ip->i_flags_lock);
1837 struct xfs_inode *ip)
1841 trace_xfs_inode_inactivating(ip);
1842 error = xfs_inactive(ip);
1843 xfs_inodegc_set_reclaimable(ip);
1855 struct xfs_inode *ip, *n;
1880 ip = llist_entry(node, struct xfs_inode, i_gclist);
1884 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1887 xfs_iflags_set(ip, XFS_INACTIVATING);
1888 error = xfs_inodegc_inactivate(ip);
1975 struct xfs_inode *ip)
1977 struct xfs_mount *mp = ip->i_mount;
1979 if (!XFS_IS_REALTIME_INODE(ip))
1990 # define xfs_inodegc_want_queue_rt_file(ip) (false)
2002 struct xfs_inode *ip,
2005 struct xfs_mount *mp = ip->i_mount;
2015 if (xfs_inodegc_want_queue_rt_file(ip))
2018 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2021 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2024 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2049 struct xfs_inode *ip,
2072 struct xfs_inode *ip)
2074 struct xfs_mount *mp = ip->i_mount;
2081 trace_xfs_inode_set_need_inactive(ip);
2082 spin_lock(&ip->i_flags_lock);
2083 ip->i_flags |= XFS_NEED_INACTIVE;
2084 spin_unlock(&ip->i_flags_lock);
2088 llist_add(&ip->i_gclist, &gc->list);
2111 if (xfs_inodegc_want_queue_work(ip, items))
2119 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2137 struct xfs_inode *ip)
2139 struct xfs_mount *mp = ip->i_mount;
2147 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2149 need_inactive = xfs_inode_needs_inactive(ip);
2151 xfs_inodegc_queue(ip);
2156 xfs_qm_dqdetach(ip);
2157 xfs_inodegc_set_reclaimable(ip);