Lines Matching defs:sc

33 	struct xfs_scrub	*sc)
35 if (xchk_need_intent_drain(sc))
36 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
37 return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
66 struct xfs_scrub *sc,
73 struct xfs_btree_cur *cur = sc->sa.fino_cur;
101 xchk_btree_xref_set_corrupt(sc, cur, 0);
103 xchk_btree_xref_set_corrupt(sc, cur, 0);
123 xchk_btree_xref_set_corrupt(sc, cur, 0);
133 struct xfs_scrub *sc,
142 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT);
144 if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm))
157 error = xchk_inobt_xref_finobt(sc, irec, i, free, hole);
158 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
169 struct xfs_scrub *sc,
176 struct xfs_btree_cur *cur = sc->sa.ino_cur;
204 xchk_btree_xref_set_corrupt(sc, cur, 0);
206 xchk_btree_xref_set_corrupt(sc, cur, 0);
211 xchk_btree_xref_set_corrupt(sc, cur, 0);
221 struct xfs_scrub *sc,
230 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT);
232 if (!sc->sa.ino_cur || xchk_skip_xref(sc->sm))
245 error = xchk_finobt_xref_inobt(sc, frec, i, ffree, fhole);
246 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
259 struct xfs_scrub *sc = bs->sc;
269 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
271 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
274 xchk_xref_is_used_space(sc, agbno, len);
275 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
276 xchk_inobt_chunk_xref_finobt(sc, irec, agino, nr_inodes);
278 xchk_finobt_chunk_xref_inobt(sc, irec, agino, nr_inodes);
279 xchk_xref_is_only_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
280 xchk_xref_is_not_shared(sc, agbno, len);
281 xchk_xref_is_not_cow_staging(sc, agbno, len);
314 if (xchk_should_terminate(bs->sc, &error))
327 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
331 error = xchk_inode_is_allocated(bs->sc, agino, &ino_inuse);
335 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
349 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
406 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
418 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
424 xchk_xref_is_not_owned_by(bs->sc, agbno,
430 xchk_xref_is_only_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster,
435 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
443 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
481 cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
501 struct xfs_mount *mp = bs->sc->mp;
523 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
534 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
550 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
555 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
589 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
596 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
604 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
618 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
632 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
635 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
652 struct xfs_scrub *sc)
659 if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
660 (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
661 xchk_skip_xref(sc->sm))
665 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
666 if (!xchk_process_error(sc, 0, 0, &error))
669 if (sc->sa.fino_cur) {
670 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
671 if (!xchk_process_error(sc, 0, 0, &error))
675 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
677 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
680 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
689 struct xfs_scrub *sc,
696 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
700 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
702 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
704 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
706 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
712 struct xfs_scrub *sc)
722 switch (sc->sm->sm_type) {
724 cur = sc->sa.ino_cur;
727 cur = sc->sa.fino_cur;
734 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
739 xchk_iallocbt_xref_rmap_btreeblks(sc);
748 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
749 xchk_iallocbt_xref_rmap_inodes(sc, iabt.inodes);
756 struct xfs_scrub *sc,
765 if (!(*icur) || xchk_skip_xref(sc->sm))
769 if (!xchk_should_check_xref(sc, &error, icur))
772 xchk_btree_xref_set_corrupt(sc, *icur, 0);
778 struct xfs_scrub *sc,
782 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur,
784 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur,
791 struct xfs_scrub *sc,
795 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur,