1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (c) 2021-2024 Oracle.  All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#ifndef __XFS_SCRUB_ISCAN_H__
7#define __XFS_SCRUB_ISCAN_H__
8
9struct xchk_iscan {
10	struct xfs_scrub	*sc;
11
12	/* Lock to protect the scan cursor. */
13	struct mutex		lock;
14
15	/*
16	 * This is the first inode in the inumber address space that we
17	 * examined.  When the scan wraps around back to here, the scan is
18	 * finished.
19	 */
20	xfs_ino_t		scan_start_ino;
21
22	/* This is the inode that will be examined next. */
23	xfs_ino_t		cursor_ino;
24
25	/* If nonzero and non-NULL, skip this inode when scanning. */
26	xfs_ino_t		skip_ino;
27
28	/*
29	 * This is the last inode that we've successfully scanned, either
30	 * because the caller scanned it, or we moved the cursor past an empty
31	 * part of the inode address space.  Scan callers should only use the
32	 * xchk_iscan_visit function to modify this.
33	 */
34	xfs_ino_t		__visited_ino;
35
36	/* Operational state of the livescan. */
37	unsigned long		__opstate;
38
39	/* Give up on iterating @cursor_ino if we can't iget it by this time. */
40	unsigned long		__iget_deadline;
41
42	/* Amount of time (in ms) that we will try to iget an inode. */
43	unsigned int		iget_timeout;
44
45	/* Wait this many ms to retry an iget. */
46	unsigned int		iget_retry_delay;
47
48	/*
49	 * The scan grabs batches of inodes and stashes them here before
50	 * handing them out with _iter.  Unallocated inodes are set in the
51	 * mask so that all updates to that inode are selected for live
52	 * update propagation.
53	 */
54	xfs_ino_t		__batch_ino;
55	xfs_inofree_t		__skipped_inomask;
56	struct xfs_inode	*__inodes[XFS_INODES_PER_CHUNK];
57};
58
59/* Set if the scan has been aborted due to some event in the fs. */
60#define XCHK_ISCAN_OPSTATE_ABORTED	(1)
61
62static inline bool
63xchk_iscan_aborted(const struct xchk_iscan *iscan)
64{
65	return test_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
66}
67
68static inline void
69xchk_iscan_abort(struct xchk_iscan *iscan)
70{
71	set_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
72}
73
74void xchk_iscan_start(struct xfs_scrub *sc, unsigned int iget_timeout,
75		unsigned int iget_retry_delay, struct xchk_iscan *iscan);
76void xchk_iscan_teardown(struct xchk_iscan *iscan);
77
78int xchk_iscan_iter(struct xchk_iscan *iscan, struct xfs_inode **ipp);
79void xchk_iscan_iter_finish(struct xchk_iscan *iscan);
80
81void xchk_iscan_mark_visited(struct xchk_iscan *iscan, struct xfs_inode *ip);
82bool xchk_iscan_want_live_update(struct xchk_iscan *iscan, xfs_ino_t ino);
83
84#endif /* __XFS_SCRUB_ISCAN_H__ */
85