1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "alloc_background.h"
5#include "backpointers.h"
6#include "btree_gc.h"
7#include "btree_node_scan.h"
8#include "ec.h"
9#include "fsck.h"
10#include "inode.h"
11#include "journal.h"
12#include "lru.h"
13#include "logged_ops.h"
14#include "rebalance.h"
15#include "recovery.h"
16#include "recovery_passes.h"
17#include "snapshot.h"
18#include "subvolume.h"
19#include "super.h"
20#include "super-io.h"
21
22const char * const bch2_recovery_passes[] = {
23#define x(_fn, ...)	#_fn,
24	BCH_RECOVERY_PASSES()
25#undef x
26	NULL
27};
28
29static int bch2_check_allocations(struct bch_fs *c)
30{
31	return bch2_gc(c, true, false);
32}
33
34static int bch2_set_may_go_rw(struct bch_fs *c)
35{
36	struct journal_keys *keys = &c->journal_keys;
37
38	/*
39	 * After we go RW, the journal keys buffer can't be modified (except for
40	 * setting journal_key->overwritten: it will be accessed by multiple
41	 * threads
42	 */
43	move_gap(keys, keys->nr);
44
45	set_bit(BCH_FS_may_go_rw, &c->flags);
46
47	if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
48		return bch2_fs_read_write_early(c);
49	return 0;
50}
51
52struct recovery_pass_fn {
53	int		(*fn)(struct bch_fs *);
54	unsigned	when;
55};
56
57static struct recovery_pass_fn recovery_pass_fns[] = {
58#define x(_fn, _id, _when)	{ .fn = bch2_##_fn, .when = _when },
59	BCH_RECOVERY_PASSES()
60#undef x
61};
62
63static const u8 passes_to_stable_map[] = {
64#define x(n, id, ...)	[BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
65	BCH_RECOVERY_PASSES()
66#undef x
67};
68
69static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
70{
71	return passes_to_stable_map[pass];
72}
73
74u64 bch2_recovery_passes_to_stable(u64 v)
75{
76	u64 ret = 0;
77	for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
78		if (v & BIT_ULL(i))
79			ret |= BIT_ULL(passes_to_stable_map[i]);
80	return ret;
81}
82
83u64 bch2_recovery_passes_from_stable(u64 v)
84{
85	static const u8 map[] = {
86#define x(n, id, ...)	[BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
87	BCH_RECOVERY_PASSES()
88#undef x
89	};
90
91	u64 ret = 0;
92	for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
93		if (v & BIT_ULL(i))
94			ret |= BIT_ULL(map[i]);
95	return ret;
96}
97
98/*
99 * For when we need to rewind recovery passes and run a pass we skipped:
100 */
101int bch2_run_explicit_recovery_pass(struct bch_fs *c,
102				    enum bch_recovery_pass pass)
103{
104	if (c->recovery_passes_explicit & BIT_ULL(pass))
105		return 0;
106
107	bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
108		 bch2_recovery_passes[pass], pass,
109		 bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
110
111	c->recovery_passes_explicit |= BIT_ULL(pass);
112
113	if (c->curr_recovery_pass >= pass) {
114		c->curr_recovery_pass = pass;
115		c->recovery_passes_complete &= (1ULL << pass) >> 1;
116		return -BCH_ERR_restart_recovery;
117	} else {
118		return 0;
119	}
120}
121
122int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
123					       enum bch_recovery_pass pass)
124{
125	enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
126
127	mutex_lock(&c->sb_lock);
128	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
129
130	if (!test_bit_le64(s, ext->recovery_passes_required)) {
131		__set_bit_le64(s, ext->recovery_passes_required);
132		bch2_write_super(c);
133	}
134	mutex_unlock(&c->sb_lock);
135
136	return bch2_run_explicit_recovery_pass(c, pass);
137}
138
139static void bch2_clear_recovery_pass_required(struct bch_fs *c,
140					      enum bch_recovery_pass pass)
141{
142	enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
143
144	mutex_lock(&c->sb_lock);
145	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
146
147	if (test_bit_le64(s, ext->recovery_passes_required)) {
148		__clear_bit_le64(s, ext->recovery_passes_required);
149		bch2_write_super(c);
150	}
151	mutex_unlock(&c->sb_lock);
152}
153
154u64 bch2_fsck_recovery_passes(void)
155{
156	u64 ret = 0;
157
158	for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
159		if (recovery_pass_fns[i].when & PASS_FSCK)
160			ret |= BIT_ULL(i);
161	return ret;
162}
163
164static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
165{
166	struct recovery_pass_fn *p = recovery_pass_fns + pass;
167
168	if (c->recovery_passes_explicit & BIT_ULL(pass))
169		return true;
170	if ((p->when & PASS_FSCK) && c->opts.fsck)
171		return true;
172	if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
173		return true;
174	if (p->when & PASS_ALWAYS)
175		return true;
176	return false;
177}
178
179static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
180{
181	struct recovery_pass_fn *p = recovery_pass_fns + pass;
182	int ret;
183
184	if (!(p->when & PASS_SILENT))
185		bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
186			   bch2_recovery_passes[pass]);
187	ret = p->fn(c);
188	if (ret)
189		return ret;
190	if (!(p->when & PASS_SILENT))
191		bch2_print(c, KERN_CONT " done\n");
192
193	return 0;
194}
195
196int bch2_run_online_recovery_passes(struct bch_fs *c)
197{
198	int ret = 0;
199
200	for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
201		struct recovery_pass_fn *p = recovery_pass_fns + i;
202
203		if (!(p->when & PASS_ONLINE))
204			continue;
205
206		ret = bch2_run_recovery_pass(c, i);
207		if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
208			i = c->curr_recovery_pass;
209			continue;
210		}
211		if (ret)
212			break;
213	}
214
215	return ret;
216}
217
218int bch2_run_recovery_passes(struct bch_fs *c)
219{
220	int ret = 0;
221
222	while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
223		if (c->opts.recovery_pass_last &&
224		    c->curr_recovery_pass > c->opts.recovery_pass_last)
225			break;
226
227		if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
228			unsigned pass = c->curr_recovery_pass;
229
230			ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
231			if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
232			    (ret && c->curr_recovery_pass < pass))
233				continue;
234			if (ret)
235				break;
236
237			c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
238		}
239
240		c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
241
242		if (!test_bit(BCH_FS_error, &c->flags))
243			bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
244
245		c->curr_recovery_pass++;
246	}
247
248	return ret;
249}
250