1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6#ifndef __XFS_SCRUB_COMMON_H__ 7#define __XFS_SCRUB_COMMON_H__ 8 9/* 10 * We /could/ terminate a scrub/repair operation early. If we're not 11 * in a good place to continue (fatal signal, etc.) then bail out. 12 * Note that we're careful not to make any judgements about *error. 13 */ 14static inline bool 15xchk_should_terminate( 16 struct xfs_scrub *sc, 17 int *error) 18{ 19 /* 20 * If preemption is disabled, we need to yield to the scheduler every 21 * few seconds so that we don't run afoul of the soft lockup watchdog 22 * or RCU stall detector. 23 */ 24 cond_resched(); 25 26 if (fatal_signal_pending(current)) { 27 if (*error == 0) 28 *error = -EINTR; 29 return true; 30 } 31 return false; 32} 33 34int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks); 35int xchk_trans_alloc_empty(struct xfs_scrub *sc); 36void xchk_trans_cancel(struct xfs_scrub *sc); 37 38bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno, 39 xfs_agblock_t bno, int *error); 40bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork, 41 xfs_fileoff_t offset, int *error); 42 43bool xchk_xref_process_error(struct xfs_scrub *sc, 44 xfs_agnumber_t agno, xfs_agblock_t bno, int *error); 45bool xchk_fblock_xref_process_error(struct xfs_scrub *sc, 46 int whichfork, xfs_fileoff_t offset, int *error); 47 48void xchk_block_set_preen(struct xfs_scrub *sc, 49 struct xfs_buf *bp); 50void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino); 51 52void xchk_set_corrupt(struct xfs_scrub *sc); 53void xchk_block_set_corrupt(struct xfs_scrub *sc, 54 struct xfs_buf *bp); 55void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino); 56void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork, 57 xfs_fileoff_t offset); 58#ifdef CONFIG_XFS_QUOTA 59void xchk_qcheck_set_corrupt(struct xfs_scrub *sc, unsigned int dqtype, 60 xfs_dqid_t id); 61#endif 62 63void xchk_block_xref_set_corrupt(struct xfs_scrub *sc, 64 struct xfs_buf *bp); 65void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc, 66 xfs_ino_t ino); 67void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc, 68 int whichfork, xfs_fileoff_t offset); 69 70void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino); 71void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork, 72 xfs_fileoff_t offset); 73 74void xchk_set_incomplete(struct xfs_scrub *sc); 75int xchk_checkpoint_log(struct xfs_mount *mp); 76 77/* Are we set up for a cross-referencing check? */ 78bool xchk_should_check_xref(struct xfs_scrub *sc, int *error, 79 struct xfs_btree_cur **curpp); 80 81/* Setup functions */ 82int xchk_setup_agheader(struct xfs_scrub *sc); 83int xchk_setup_fs(struct xfs_scrub *sc); 84int xchk_setup_ag_allocbt(struct xfs_scrub *sc); 85int xchk_setup_ag_iallocbt(struct xfs_scrub *sc); 86int xchk_setup_ag_rmapbt(struct xfs_scrub *sc); 87int xchk_setup_ag_refcountbt(struct xfs_scrub *sc); 88int xchk_setup_inode(struct xfs_scrub *sc); 89int xchk_setup_inode_bmap(struct xfs_scrub *sc); 90int xchk_setup_inode_bmap_data(struct xfs_scrub *sc); 91int xchk_setup_directory(struct xfs_scrub *sc); 92int xchk_setup_xattr(struct xfs_scrub *sc); 93int xchk_setup_symlink(struct xfs_scrub *sc); 94int xchk_setup_parent(struct xfs_scrub *sc); 95#ifdef CONFIG_XFS_RT 96int xchk_setup_rtbitmap(struct xfs_scrub *sc); 97int xchk_setup_rtsummary(struct xfs_scrub *sc); 98#else 99static inline int 100xchk_setup_rtbitmap(struct xfs_scrub *sc) 101{ 102 return -ENOENT; 103} 104static inline int 105xchk_setup_rtsummary(struct xfs_scrub *sc) 106{ 107 return -ENOENT; 108} 109#endif 110#ifdef CONFIG_XFS_QUOTA 111int xchk_ino_dqattach(struct xfs_scrub *sc); 112int xchk_setup_quota(struct xfs_scrub *sc); 113int xchk_setup_quotacheck(struct xfs_scrub *sc); 114#else 115static inline int 116xchk_ino_dqattach(struct xfs_scrub *sc) 117{ 118 return 0; 119} 120static inline int 121xchk_setup_quota(struct xfs_scrub *sc) 122{ 123 return -ENOENT; 124} 125static inline int 126xchk_setup_quotacheck(struct xfs_scrub *sc) 127{ 128 return -ENOENT; 129} 130#endif 131int xchk_setup_fscounters(struct xfs_scrub *sc); 132int xchk_setup_nlinks(struct xfs_scrub *sc); 133 134void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa); 135int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno, 136 struct xchk_ag *sa); 137int xchk_perag_drain_and_lock(struct xfs_scrub *sc); 138 139/* 140 * Grab all AG resources, treating the inability to grab the perag structure as 141 * a fs corruption. This is intended for callers checking an ondisk reference 142 * to a given AG, which means that the AG must still exist. 143 */ 144static inline int 145xchk_ag_init_existing( 146 struct xfs_scrub *sc, 147 xfs_agnumber_t agno, 148 struct xchk_ag *sa) 149{ 150 int error = xchk_ag_init(sc, agno, sa); 151 152 return error == -ENOENT ? -EFSCORRUPTED : error; 153} 154 155int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno, 156 struct xchk_ag *sa); 157void xchk_ag_btcur_free(struct xchk_ag *sa); 158void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa); 159int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur, 160 const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks); 161 162int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log); 163int xchk_iget_for_scrubbing(struct xfs_scrub *sc); 164int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks); 165int xchk_install_live_inode(struct xfs_scrub *sc, struct xfs_inode *ip); 166 167void xchk_ilock(struct xfs_scrub *sc, unsigned int ilock_flags); 168bool xchk_ilock_nowait(struct xfs_scrub *sc, unsigned int ilock_flags); 169void xchk_iunlock(struct xfs_scrub *sc, unsigned int ilock_flags); 170 171void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp); 172 173/* 174 * Grab the inode at @inum. The caller must have created a scrub transaction 175 * so that we can confirm the inumber by walking the inobt and not deadlock on 176 * a loop in the inobt. 177 */ 178int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp); 179int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum, 180 struct xfs_buf **agi_bpp, struct xfs_inode **ipp); 181void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip); 182int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip); 183 184/* 185 * Safe version of (untrusted) xchk_iget that uses an empty transaction to 186 * avoid deadlocking on loops in the inobt. This should only be used in a 187 * scrub or repair setup routine, and only prior to grabbing a transaction. 188 */ 189static inline int 190xchk_iget_safe(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp) 191{ 192 int error; 193 194 ASSERT(sc->tp == NULL); 195 196 error = xchk_trans_alloc(sc, 0); 197 if (error) 198 return error; 199 error = xchk_iget(sc, inum, ipp); 200 xchk_trans_cancel(sc); 201 return error; 202} 203 204/* 205 * Don't bother cross-referencing if we already found corruption or cross 206 * referencing discrepancies. 207 */ 208static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm) 209{ 210 return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | 211 XFS_SCRUB_OFLAG_XCORRUPT); 212} 213 214bool xchk_dir_looks_zapped(struct xfs_inode *dp); 215 216#ifdef CONFIG_XFS_ONLINE_REPAIR 217/* Decide if a repair is required. */ 218static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm) 219{ 220 return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | 221 XFS_SCRUB_OFLAG_XCORRUPT | 222 XFS_SCRUB_OFLAG_PREEN); 223} 224 225/* 226 * "Should we prepare for a repair?" 227 * 228 * Return true if the caller permits us to repair metadata and we're not 229 * setting up for a post-repair evaluation. 230 */ 231static inline bool xchk_could_repair(const struct xfs_scrub *sc) 232{ 233 return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) && 234 !(sc->flags & XREP_ALREADY_FIXED); 235} 236#else 237# define xchk_needs_repair(sc) (false) 238# define xchk_could_repair(sc) (false) 239#endif /* CONFIG_XFS_ONLINE_REPAIR */ 240 241int xchk_metadata_inode_forks(struct xfs_scrub *sc); 242 243/* 244 * Helper macros to allocate and format xfile description strings. 245 * Callers must kfree the pointer returned. 246 */ 247#define xchk_xfile_descr(sc, fmt, ...) \ 248 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): " fmt, \ 249 (sc)->mp->m_super->s_id, ##__VA_ARGS__) 250#define xchk_xfile_ag_descr(sc, fmt, ...) \ 251 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): AG 0x%x " fmt, \ 252 (sc)->mp->m_super->s_id, \ 253 (sc)->sa.pag ? (sc)->sa.pag->pag_agno : (sc)->sm->sm_agno, \ 254 ##__VA_ARGS__) 255#define xchk_xfile_ino_descr(sc, fmt, ...) \ 256 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): inode 0x%llx " fmt, \ 257 (sc)->mp->m_super->s_id, \ 258 (sc)->ip ? (sc)->ip->i_ino : (sc)->sm->sm_ino, \ 259 ##__VA_ARGS__) 260 261/* 262 * Setting up a hook to wait for intents to drain is costly -- we have to take 263 * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it 264 * up, and again to tear it down. These costs add up quickly, so we only want 265 * to enable the drain waiter if the drain actually detected a conflict with 266 * running intent chains. 267 */ 268static inline bool xchk_need_intent_drain(struct xfs_scrub *sc) 269{ 270 return sc->flags & XCHK_NEED_DRAIN; 271} 272 273void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks); 274 275int xchk_inode_is_allocated(struct xfs_scrub *sc, xfs_agino_t agino, 276 bool *inuse); 277 278#endif /* __XFS_SCRUB_COMMON_H__ */ 279