1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "alloc_background.h"
5#include "bkey_buf.h"
6#include "btree_journal_iter.h"
7#include "btree_node_scan.h"
8#include "btree_update.h"
9#include "btree_update_interior.h"
10#include "btree_io.h"
11#include "buckets.h"
12#include "dirent.h"
13#include "errcode.h"
14#include "error.h"
15#include "fs-common.h"
16#include "journal_io.h"
17#include "journal_reclaim.h"
18#include "journal_seq_blacklist.h"
19#include "logged_ops.h"
20#include "move.h"
21#include "quota.h"
22#include "rebalance.h"
23#include "recovery.h"
24#include "recovery_passes.h"
25#include "replicas.h"
26#include "sb-clean.h"
27#include "sb-downgrade.h"
28#include "snapshot.h"
29#include "super-io.h"
30
31#include <linux/sort.h>
32#include <linux/stat.h>
33
34#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
35
36void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
37{
38	if (btree >= BTREE_ID_NR_MAX)
39		return;
40
41	u64 b = BIT_ULL(btree);
42
43	if (!(c->sb.btrees_lost_data & b)) {
44		bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
45
46		mutex_lock(&c->sb_lock);
47		bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
48		bch2_write_super(c);
49		mutex_unlock(&c->sb_lock);
50	}
51}
52
53/* for -o reconstruct_alloc: */
54static void bch2_reconstruct_alloc(struct bch_fs *c)
55{
56	bch2_journal_log_msg(c, "dropping alloc info");
57	bch_info(c, "dropping and reconstructing all alloc info");
58
59	mutex_lock(&c->sb_lock);
60	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
61
62	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required);
63	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_info, ext->recovery_passes_required);
64	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_lrus, ext->recovery_passes_required);
65	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_extents_to_backpointers, ext->recovery_passes_required);
66	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_to_lru_refs, ext->recovery_passes_required);
67
68	__set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent);
69	__set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent);
70	__set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent);
71
72	__set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent);
73	__set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent);
74	__set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent);
75
76	__set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent);
77	__set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent);
78	__set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent);
79	__set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent);
80
81	__set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
82	__set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
83	__set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
84	__set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
85	__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
86	__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
87	__set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent);
88	__set_bit_le64(BCH_FSCK_ERR_freespace_key_wrong, ext->errors_silent);
89	__set_bit_le64(BCH_FSCK_ERR_bucket_gens_key_wrong, ext->errors_silent);
90	__set_bit_le64(BCH_FSCK_ERR_freespace_hole_missing, ext->errors_silent);
91	__set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_backpointer, ext->errors_silent);
92	__set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent);
93	c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
94
95	bch2_write_super(c);
96	mutex_unlock(&c->sb_lock);
97
98	c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
99
100
101	bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
102				     0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
103	bch2_shoot_down_journal_keys(c, BTREE_ID_backpointers,
104				     0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
105	bch2_shoot_down_journal_keys(c, BTREE_ID_need_discard,
106				     0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
107	bch2_shoot_down_journal_keys(c, BTREE_ID_freespace,
108				     0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
109	bch2_shoot_down_journal_keys(c, BTREE_ID_bucket_gens,
110				     0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
111}
112
113/*
114 * Btree node pointers have a field to stack a pointer to the in memory btree
115 * node; we need to zero out this field when reading in btree nodes, or when
116 * reading in keys from the journal:
117 */
118static void zero_out_btree_mem_ptr(struct journal_keys *keys)
119{
120	darray_for_each(*keys, i)
121		if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
122			bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
123}
124
125/* journal replay: */
126
127static void replay_now_at(struct journal *j, u64 seq)
128{
129	BUG_ON(seq < j->replay_journal_seq);
130
131	seq = min(seq, j->replay_journal_seq_end);
132
133	while (j->replay_journal_seq < seq)
134		bch2_journal_pin_put(j, j->replay_journal_seq++);
135}
136
137static int bch2_journal_replay_key(struct btree_trans *trans,
138				   struct journal_key *k)
139{
140	struct btree_iter iter;
141	unsigned iter_flags =
142		BTREE_ITER_intent|
143		BTREE_ITER_not_extents;
144	unsigned update_flags = BTREE_TRIGGER_norun;
145	int ret;
146
147	if (k->overwritten)
148		return 0;
149
150	trans->journal_res.seq = k->journal_seq;
151
152	/*
153	 * BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
154	 * keep the key cache coherent with the underlying btree. Nothing
155	 * besides the allocator is doing updates yet so we don't need key cache
156	 * coherency for non-alloc btrees, and key cache fills for snapshots
157	 * btrees use BTREE_ITER_filter_snapshots, which isn't available until
158	 * the snapshots recovery pass runs.
159	 */
160	if (!k->level && k->btree_id == BTREE_ID_alloc)
161		iter_flags |= BTREE_ITER_cached;
162	else
163		update_flags |= BTREE_UPDATE_key_cache_reclaim;
164
165	bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
166				  BTREE_MAX_DEPTH, k->level,
167				  iter_flags);
168	ret = bch2_btree_iter_traverse(&iter);
169	if (ret)
170		goto out;
171
172	struct btree_path *path = btree_iter_path(trans, &iter);
173	if (unlikely(!btree_path_node(path, k->level))) {
174		bch2_trans_iter_exit(trans, &iter);
175		bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
176					  BTREE_MAX_DEPTH, 0, iter_flags);
177		ret =   bch2_btree_iter_traverse(&iter) ?:
178			bch2_btree_increase_depth(trans, iter.path, 0) ?:
179			-BCH_ERR_transaction_restart_nested;
180		goto out;
181	}
182
183	/* Must be checked with btree locked: */
184	if (k->overwritten)
185		goto out;
186
187	ret = bch2_trans_update(trans, &iter, k->k, update_flags);
188out:
189	bch2_trans_iter_exit(trans, &iter);
190	return ret;
191}
192
193static int journal_sort_seq_cmp(const void *_l, const void *_r)
194{
195	const struct journal_key *l = *((const struct journal_key **)_l);
196	const struct journal_key *r = *((const struct journal_key **)_r);
197
198	return cmp_int(l->journal_seq, r->journal_seq);
199}
200
201int bch2_journal_replay(struct bch_fs *c)
202{
203	struct journal_keys *keys = &c->journal_keys;
204	DARRAY(struct journal_key *) keys_sorted = { 0 };
205	struct journal *j = &c->journal;
206	u64 start_seq	= c->journal_replay_seq_start;
207	u64 end_seq	= c->journal_replay_seq_start;
208	struct btree_trans *trans = NULL;
209	bool immediate_flush = false;
210	int ret = 0;
211
212	if (keys->nr) {
213		ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
214					   keys->nr, start_seq, end_seq);
215		if (ret)
216			goto err;
217	}
218
219	BUG_ON(!atomic_read(&keys->ref));
220
221	move_gap(keys, keys->nr);
222	trans = bch2_trans_get(c);
223
224	/*
225	 * First, attempt to replay keys in sorted order. This is more
226	 * efficient - better locality of btree access -  but some might fail if
227	 * that would cause a journal deadlock.
228	 */
229	darray_for_each(*keys, k) {
230		cond_resched();
231
232		/*
233		 * k->allocated means the key wasn't read in from the journal,
234		 * rather it was from early repair code
235		 */
236		if (k->allocated)
237			immediate_flush = true;
238
239		/* Skip fastpath if we're low on space in the journal */
240		ret = c->journal.watermark ? -1 :
241			commit_do(trans, NULL, NULL,
242				  BCH_TRANS_COMMIT_no_enospc|
243				  BCH_TRANS_COMMIT_journal_reclaim|
244				  (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0),
245			     bch2_journal_replay_key(trans, k));
246		BUG_ON(!ret && !k->overwritten);
247		if (ret) {
248			ret = darray_push(&keys_sorted, k);
249			if (ret)
250				goto err;
251		}
252	}
253
254	/*
255	 * Now, replay any remaining keys in the order in which they appear in
256	 * the journal, unpinning those journal entries as we go:
257	 */
258	sort(keys_sorted.data, keys_sorted.nr,
259	     sizeof(keys_sorted.data[0]),
260	     journal_sort_seq_cmp, NULL);
261
262	darray_for_each(keys_sorted, kp) {
263		cond_resched();
264
265		struct journal_key *k = *kp;
266
267		if (k->journal_seq)
268			replay_now_at(j, k->journal_seq);
269		else
270			replay_now_at(j, j->replay_journal_seq_end);
271
272		ret = commit_do(trans, NULL, NULL,
273				BCH_TRANS_COMMIT_no_enospc|
274				(!k->allocated
275				 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim
276				 : 0),
277			     bch2_journal_replay_key(trans, k));
278		bch_err_msg(c, ret, "while replaying key at btree %s level %u:",
279			    bch2_btree_id_str(k->btree_id), k->level);
280		if (ret)
281			goto err;
282
283		BUG_ON(!k->overwritten);
284	}
285
286	/*
287	 * We need to put our btree_trans before calling flush_all_pins(), since
288	 * that will use a btree_trans internally
289	 */
290	bch2_trans_put(trans);
291	trans = NULL;
292
293	if (!c->opts.retain_recovery_info &&
294	    c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay)
295		bch2_journal_keys_put_initial(c);
296
297	replay_now_at(j, j->replay_journal_seq_end);
298	j->replay_journal_seq = 0;
299
300	bch2_journal_set_replay_done(j);
301
302	/* if we did any repair, flush it immediately */
303	if (immediate_flush) {
304		bch2_journal_flush_all_pins(&c->journal);
305		ret = bch2_journal_meta(&c->journal);
306	}
307
308	if (keys->nr)
309		bch2_journal_log_msg(c, "journal replay finished");
310err:
311	if (trans)
312		bch2_trans_put(trans);
313	darray_exit(&keys_sorted);
314	bch_err_fn(c, ret);
315	return ret;
316}
317
318/* journal replay early: */
319
320static int journal_replay_entry_early(struct bch_fs *c,
321				      struct jset_entry *entry)
322{
323	int ret = 0;
324
325	switch (entry->type) {
326	case BCH_JSET_ENTRY_btree_root: {
327		struct btree_root *r;
328
329		if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX,
330				c, invalid_btree_id,
331				"invalid btree id %u (max %u)",
332				entry->btree_id, BTREE_ID_NR_MAX))
333			return 0;
334
335		while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
336			ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
337			if (ret)
338				return ret;
339		}
340
341		r = bch2_btree_id_root(c, entry->btree_id);
342
343		if (entry->u64s) {
344			r->level = entry->level;
345			bkey_copy(&r->key, (struct bkey_i *) entry->start);
346			r->error = 0;
347		} else {
348			r->error = -BCH_ERR_btree_node_read_error;
349		}
350		r->alive = true;
351		break;
352	}
353	case BCH_JSET_ENTRY_usage: {
354		struct jset_entry_usage *u =
355			container_of(entry, struct jset_entry_usage, entry);
356
357		switch (entry->btree_id) {
358		case BCH_FS_USAGE_reserved:
359			if (entry->level < BCH_REPLICAS_MAX)
360				c->usage_base->persistent_reserved[entry->level] =
361					le64_to_cpu(u->v);
362			break;
363		case BCH_FS_USAGE_inodes:
364			c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
365			break;
366		case BCH_FS_USAGE_key_version:
367			atomic64_set(&c->key_version,
368				     le64_to_cpu(u->v));
369			break;
370		}
371
372		break;
373	}
374	case BCH_JSET_ENTRY_data_usage: {
375		struct jset_entry_data_usage *u =
376			container_of(entry, struct jset_entry_data_usage, entry);
377
378		ret = bch2_replicas_set_usage(c, &u->r,
379					      le64_to_cpu(u->v));
380		break;
381	}
382	case BCH_JSET_ENTRY_dev_usage: {
383		struct jset_entry_dev_usage *u =
384			container_of(entry, struct jset_entry_dev_usage, entry);
385		unsigned nr_types = jset_entry_dev_usage_nr_types(u);
386
387		rcu_read_lock();
388		struct bch_dev *ca = bch2_dev_rcu(c, le32_to_cpu(u->dev));
389		if (ca)
390			for (unsigned i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
391				ca->usage_base->d[i].buckets	= le64_to_cpu(u->d[i].buckets);
392				ca->usage_base->d[i].sectors	= le64_to_cpu(u->d[i].sectors);
393				ca->usage_base->d[i].fragmented	= le64_to_cpu(u->d[i].fragmented);
394			}
395		rcu_read_unlock();
396
397		break;
398	}
399	case BCH_JSET_ENTRY_blacklist: {
400		struct jset_entry_blacklist *bl_entry =
401			container_of(entry, struct jset_entry_blacklist, entry);
402
403		ret = bch2_journal_seq_blacklist_add(c,
404				le64_to_cpu(bl_entry->seq),
405				le64_to_cpu(bl_entry->seq) + 1);
406		break;
407	}
408	case BCH_JSET_ENTRY_blacklist_v2: {
409		struct jset_entry_blacklist_v2 *bl_entry =
410			container_of(entry, struct jset_entry_blacklist_v2, entry);
411
412		ret = bch2_journal_seq_blacklist_add(c,
413				le64_to_cpu(bl_entry->start),
414				le64_to_cpu(bl_entry->end) + 1);
415		break;
416	}
417	case BCH_JSET_ENTRY_clock: {
418		struct jset_entry_clock *clock =
419			container_of(entry, struct jset_entry_clock, entry);
420
421		atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
422	}
423	}
424fsck_err:
425	return ret;
426}
427
428static int journal_replay_early(struct bch_fs *c,
429				struct bch_sb_field_clean *clean)
430{
431	if (clean) {
432		for (struct jset_entry *entry = clean->start;
433		     entry != vstruct_end(&clean->field);
434		     entry = vstruct_next(entry)) {
435			int ret = journal_replay_entry_early(c, entry);
436			if (ret)
437				return ret;
438		}
439	} else {
440		struct genradix_iter iter;
441		struct journal_replay *i, **_i;
442
443		genradix_for_each(&c->journal_entries, iter, _i) {
444			i = *_i;
445
446			if (journal_replay_ignore(i))
447				continue;
448
449			vstruct_for_each(&i->j, entry) {
450				int ret = journal_replay_entry_early(c, entry);
451				if (ret)
452					return ret;
453			}
454		}
455	}
456
457	bch2_fs_usage_initialize(c);
458
459	return 0;
460}
461
462/* sb clean section: */
463
464static int read_btree_roots(struct bch_fs *c)
465{
466	int ret = 0;
467
468	for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
469		struct btree_root *r = bch2_btree_id_root(c, i);
470
471		if (!r->alive)
472			continue;
473
474		if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc)
475			continue;
476
477		if (mustfix_fsck_err_on((ret = r->error),
478					c, btree_root_bkey_invalid,
479					"invalid btree root %s",
480					bch2_btree_id_str(i)) ||
481		    mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
482					c, btree_root_read_error,
483					"error reading btree root %s l=%u: %s",
484					bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
485			if (btree_id_is_alloc(i)) {
486				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
487				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
488				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
489				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
490				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
491				c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
492				r->error = 0;
493			} else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
494				bch_info(c, "will run btree node scan");
495				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
496				c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
497			}
498
499			ret = 0;
500			bch2_btree_lost_data(c, i);
501		}
502	}
503
504	for (unsigned i = 0; i < BTREE_ID_NR; i++) {
505		struct btree_root *r = bch2_btree_id_root(c, i);
506
507		if (!r->b && !r->error) {
508			r->alive = false;
509			r->level = 0;
510			bch2_btree_root_alloc_fake(c, i, 0);
511		}
512	}
513fsck_err:
514	return ret;
515}
516
517static bool check_version_upgrade(struct bch_fs *c)
518{
519	unsigned latest_version	= bcachefs_metadata_version_current;
520	unsigned latest_compatible = min(latest_version,
521					 bch2_latest_compatible_version(c->sb.version));
522	unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
523	unsigned new_version = 0;
524
525	if (old_version < bcachefs_metadata_required_upgrade_below) {
526		if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
527		    latest_compatible < bcachefs_metadata_required_upgrade_below)
528			new_version = latest_version;
529		else
530			new_version = latest_compatible;
531	} else {
532		switch (c->opts.version_upgrade) {
533		case BCH_VERSION_UPGRADE_compatible:
534			new_version = latest_compatible;
535			break;
536		case BCH_VERSION_UPGRADE_incompatible:
537			new_version = latest_version;
538			break;
539		case BCH_VERSION_UPGRADE_none:
540			new_version = min(old_version, latest_version);
541			break;
542		}
543	}
544
545	if (new_version > old_version) {
546		struct printbuf buf = PRINTBUF;
547
548		if (old_version < bcachefs_metadata_required_upgrade_below)
549			prt_str(&buf, "Version upgrade required:\n");
550
551		if (old_version != c->sb.version) {
552			prt_str(&buf, "Version upgrade from ");
553			bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
554			prt_str(&buf, " to ");
555			bch2_version_to_text(&buf, c->sb.version);
556			prt_str(&buf, " incomplete\n");
557		}
558
559		prt_printf(&buf, "Doing %s version upgrade from ",
560			   BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
561			   ? "incompatible" : "compatible");
562		bch2_version_to_text(&buf, old_version);
563		prt_str(&buf, " to ");
564		bch2_version_to_text(&buf, new_version);
565		prt_newline(&buf);
566
567		struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
568		__le64 passes = ext->recovery_passes_required[0];
569		bch2_sb_set_upgrade(c, old_version, new_version);
570		passes = ext->recovery_passes_required[0] & ~passes;
571
572		if (passes) {
573			prt_str(&buf, "  running recovery passes: ");
574			prt_bitflags(&buf, bch2_recovery_passes,
575				     bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
576		}
577
578		bch_info(c, "%s", buf.buf);
579
580		bch2_sb_upgrade(c, new_version);
581
582		printbuf_exit(&buf);
583		return true;
584	}
585
586	return false;
587}
588
589int bch2_fs_recovery(struct bch_fs *c)
590{
591	struct bch_sb_field_clean *clean = NULL;
592	struct jset *last_journal_entry = NULL;
593	u64 last_seq = 0, blacklist_seq, journal_seq;
594	int ret = 0;
595
596	if (c->sb.clean) {
597		clean = bch2_read_superblock_clean(c);
598		ret = PTR_ERR_OR_ZERO(clean);
599		if (ret)
600			goto err;
601
602		bch_info(c, "recovering from clean shutdown, journal seq %llu",
603			 le64_to_cpu(clean->journal_seq));
604	} else {
605		bch_info(c, "recovering from unclean shutdown");
606	}
607
608	if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
609		bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
610		ret = -EINVAL;
611		goto err;
612	}
613
614	if (!c->sb.clean &&
615	    !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
616		bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
617		ret = -EINVAL;
618		goto err;
619	}
620
621	if (c->opts.norecovery)
622		c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
623
624	mutex_lock(&c->sb_lock);
625	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
626	bool write_sb = false;
627
628	if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
629		ext->recovery_passes_required[0] |=
630			cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
631		write_sb = true;
632	}
633
634	u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
635	if (sb_passes) {
636		struct printbuf buf = PRINTBUF;
637		prt_str(&buf, "superblock requires following recovery passes to be run:\n  ");
638		prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
639		bch_info(c, "%s", buf.buf);
640		printbuf_exit(&buf);
641	}
642
643	if (bch2_check_version_downgrade(c)) {
644		struct printbuf buf = PRINTBUF;
645
646		prt_str(&buf, "Version downgrade required:");
647
648		__le64 passes = ext->recovery_passes_required[0];
649		bch2_sb_set_downgrade(c,
650				      BCH_VERSION_MINOR(bcachefs_metadata_version_current),
651				      BCH_VERSION_MINOR(c->sb.version));
652		passes = ext->recovery_passes_required[0] & ~passes;
653		if (passes) {
654			prt_str(&buf, "\n  running recovery passes: ");
655			prt_bitflags(&buf, bch2_recovery_passes,
656				     bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
657		}
658
659		bch_info(c, "%s", buf.buf);
660		printbuf_exit(&buf);
661		write_sb = true;
662	}
663
664	if (check_version_upgrade(c))
665		write_sb = true;
666
667	c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
668
669	if (write_sb)
670		bch2_write_super(c);
671	mutex_unlock(&c->sb_lock);
672
673	if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
674		c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
675
676	if (c->opts.fsck)
677		set_bit(BCH_FS_fsck_running, &c->flags);
678
679	ret = bch2_blacklist_table_initialize(c);
680	if (ret) {
681		bch_err(c, "error initializing blacklist table");
682		goto err;
683	}
684
685	bch2_journal_pos_from_member_info_resume(c);
686
687	if (!c->sb.clean || c->opts.retain_recovery_info) {
688		struct genradix_iter iter;
689		struct journal_replay **i;
690
691		bch_verbose(c, "starting journal read");
692		ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
693		if (ret)
694			goto err;
695
696		/*
697		 * note: cmd_list_journal needs the blacklist table fully up to date so
698		 * it can asterisk ignored journal entries:
699		 */
700		if (c->opts.read_journal_only)
701			goto out;
702
703		genradix_for_each_reverse(&c->journal_entries, iter, i)
704			if (!journal_replay_ignore(*i)) {
705				last_journal_entry = &(*i)->j;
706				break;
707			}
708
709		if (mustfix_fsck_err_on(c->sb.clean &&
710					last_journal_entry &&
711					!journal_entry_empty(last_journal_entry), c,
712				clean_but_journal_not_empty,
713				"filesystem marked clean but journal not empty")) {
714			c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
715			SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
716			c->sb.clean = false;
717		}
718
719		if (!last_journal_entry) {
720			fsck_err_on(!c->sb.clean, c,
721				    dirty_but_no_journal_entries,
722				    "no journal entries found");
723			if (clean)
724				goto use_clean;
725
726			genradix_for_each_reverse(&c->journal_entries, iter, i)
727				if (*i) {
728					last_journal_entry = &(*i)->j;
729					(*i)->ignore_blacklisted = false;
730					(*i)->ignore_not_dirty= false;
731					/*
732					 * This was probably a NO_FLUSH entry,
733					 * so last_seq was garbage - but we know
734					 * we're only using a single journal
735					 * entry, set it here:
736					 */
737					(*i)->j.last_seq = (*i)->j.seq;
738					break;
739				}
740		}
741
742		ret = bch2_journal_keys_sort(c);
743		if (ret)
744			goto err;
745
746		if (c->sb.clean && last_journal_entry) {
747			ret = bch2_verify_superblock_clean(c, &clean,
748						      last_journal_entry);
749			if (ret)
750				goto err;
751		}
752	} else {
753use_clean:
754		if (!clean) {
755			bch_err(c, "no superblock clean section found");
756			ret = -BCH_ERR_fsck_repair_impossible;
757			goto err;
758
759		}
760		blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
761	}
762
763	c->journal_replay_seq_start	= last_seq;
764	c->journal_replay_seq_end	= blacklist_seq - 1;
765
766	if (c->opts.reconstruct_alloc)
767		bch2_reconstruct_alloc(c);
768
769	zero_out_btree_mem_ptr(&c->journal_keys);
770
771	ret = journal_replay_early(c, clean);
772	if (ret)
773		goto err;
774
775	/*
776	 * After an unclean shutdown, skip then next few journal sequence
777	 * numbers as they may have been referenced by btree writes that
778	 * happened before their corresponding journal writes - those btree
779	 * writes need to be ignored, by skipping and blacklisting the next few
780	 * journal sequence numbers:
781	 */
782	if (!c->sb.clean)
783		journal_seq += 8;
784
785	if (blacklist_seq != journal_seq) {
786		ret =   bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
787					     blacklist_seq, journal_seq) ?:
788			bch2_journal_seq_blacklist_add(c,
789					blacklist_seq, journal_seq);
790		if (ret) {
791			bch_err_msg(c, ret, "error creating new journal seq blacklist entry");
792			goto err;
793		}
794	}
795
796	ret =   bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
797				     journal_seq, last_seq, blacklist_seq - 1) ?:
798		bch2_fs_journal_start(&c->journal, journal_seq);
799	if (ret)
800		goto err;
801
802	/*
803	 * Skip past versions that might have possibly been used (as nonces),
804	 * but hadn't had their pointers written:
805	 */
806	if (c->sb.encryption_type && !c->sb.clean)
807		atomic64_add(1 << 16, &c->key_version);
808
809	ret = read_btree_roots(c);
810	if (ret)
811		goto err;
812
813	ret = bch2_run_recovery_passes(c);
814	if (ret)
815		goto err;
816
817	clear_bit(BCH_FS_fsck_running, &c->flags);
818
819	/* fsync if we fixed errors */
820	if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
821	    bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
822		bch2_journal_flush_all_pins(&c->journal);
823		bch2_journal_meta(&c->journal);
824		bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
825	}
826
827	/* If we fixed errors, verify that fs is actually clean now: */
828	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
829	    test_bit(BCH_FS_errors_fixed, &c->flags) &&
830	    !test_bit(BCH_FS_errors_not_fixed, &c->flags) &&
831	    !test_bit(BCH_FS_error, &c->flags)) {
832		bch2_flush_fsck_errs(c);
833
834		bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
835		clear_bit(BCH_FS_errors_fixed, &c->flags);
836
837		c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
838
839		ret = bch2_run_recovery_passes(c);
840		if (ret)
841			goto err;
842
843		if (test_bit(BCH_FS_errors_fixed, &c->flags) ||
844		    test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
845			bch_err(c, "Second fsck run was not clean");
846			set_bit(BCH_FS_errors_not_fixed, &c->flags);
847		}
848
849		set_bit(BCH_FS_errors_fixed, &c->flags);
850	}
851
852	if (enabled_qtypes(c)) {
853		bch_verbose(c, "reading quotas");
854		ret = bch2_fs_quota_read(c);
855		if (ret)
856			goto err;
857		bch_verbose(c, "quotas done");
858	}
859
860	mutex_lock(&c->sb_lock);
861	ext = bch2_sb_field_get(c->disk_sb.sb, ext);
862	write_sb = false;
863
864	if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
865		SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
866		write_sb = true;
867	}
868
869	if (!test_bit(BCH_FS_error, &c->flags) &&
870	    !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
871		c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
872		write_sb = true;
873	}
874
875	if (!test_bit(BCH_FS_error, &c->flags) &&
876	    !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) {
877		memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
878		write_sb = true;
879	}
880
881	if (c->opts.fsck &&
882	    !test_bit(BCH_FS_error, &c->flags) &&
883	    c->recovery_pass_done == BCH_RECOVERY_PASS_NR - 1 &&
884	    ext->btrees_lost_data) {
885		ext->btrees_lost_data = 0;
886		write_sb = true;
887	}
888
889	if (c->opts.fsck &&
890	    !test_bit(BCH_FS_error, &c->flags) &&
891	    !test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
892		SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
893		SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
894		write_sb = true;
895	}
896
897	if (bch2_blacklist_entries_gc(c))
898		write_sb = true;
899
900	if (write_sb)
901		bch2_write_super(c);
902	mutex_unlock(&c->sb_lock);
903
904	if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
905	    c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
906		struct bch_move_stats stats;
907
908		bch2_move_stats_init(&stats, "recovery");
909
910		struct printbuf buf = PRINTBUF;
911		bch2_version_to_text(&buf, c->sb.version_min);
912		bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf);
913		printbuf_exit(&buf);
914
915		ret =   bch2_fs_read_write_early(c) ?:
916			bch2_scan_old_btree_nodes(c, &stats);
917		if (ret)
918			goto err;
919		bch_info(c, "scanning for old btree nodes done");
920	}
921
922	ret = 0;
923out:
924	bch2_flush_fsck_errs(c);
925
926	if (!c->opts.retain_recovery_info) {
927		bch2_journal_keys_put_initial(c);
928		bch2_find_btree_nodes_exit(&c->found_btree_nodes);
929	}
930	if (!IS_ERR(clean))
931		kfree(clean);
932
933	if (!ret &&
934	    test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
935	    !c->opts.nochanges) {
936		bch2_fs_read_write_early(c);
937		bch2_delete_dead_snapshots_async(c);
938	}
939
940	bch_err_fn(c, ret);
941	return ret;
942err:
943fsck_err:
944	bch2_fs_emergency_read_only(c);
945	goto out;
946}
947
948int bch2_fs_initialize(struct bch_fs *c)
949{
950	struct bch_inode_unpacked root_inode, lostfound_inode;
951	struct bkey_inode_buf packed_inode;
952	struct qstr lostfound = QSTR("lost+found");
953	int ret;
954
955	bch_notice(c, "initializing new filesystem");
956	set_bit(BCH_FS_new_fs, &c->flags);
957
958	mutex_lock(&c->sb_lock);
959	c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
960	c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
961
962	bch2_check_version_downgrade(c);
963
964	if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
965		bch2_sb_upgrade(c, bcachefs_metadata_version_current);
966		SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
967		bch2_write_super(c);
968	}
969	mutex_unlock(&c->sb_lock);
970
971	c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
972	set_bit(BCH_FS_may_go_rw, &c->flags);
973
974	for (unsigned i = 0; i < BTREE_ID_NR; i++)
975		bch2_btree_root_alloc_fake(c, i, 0);
976
977	for_each_member_device(c, ca)
978		bch2_dev_usage_init(ca);
979
980	ret = bch2_fs_journal_alloc(c);
981	if (ret)
982		goto err;
983
984	/*
985	 * journal_res_get() will crash if called before this has
986	 * set up the journal.pin FIFO and journal.cur pointer:
987	 */
988	bch2_fs_journal_start(&c->journal, 1);
989	bch2_journal_set_replay_done(&c->journal);
990
991	ret = bch2_fs_read_write_early(c);
992	if (ret)
993		goto err;
994
995	/*
996	 * Write out the superblock and journal buckets, now that we can do
997	 * btree updates
998	 */
999	bch_verbose(c, "marking superblocks");
1000	ret = bch2_trans_mark_dev_sbs(c);
1001	bch_err_msg(c, ret, "marking superblocks");
1002	if (ret)
1003		goto err;
1004
1005	for_each_online_member(c, ca)
1006		ca->new_fs_bucket_idx = 0;
1007
1008	ret = bch2_fs_freespace_init(c);
1009	if (ret)
1010		goto err;
1011
1012	ret = bch2_initialize_subvolumes(c);
1013	if (ret)
1014		goto err;
1015
1016	bch_verbose(c, "reading snapshots table");
1017	ret = bch2_snapshots_read(c);
1018	if (ret)
1019		goto err;
1020	bch_verbose(c, "reading snapshots done");
1021
1022	bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1023	root_inode.bi_inum	= BCACHEFS_ROOT_INO;
1024	root_inode.bi_subvol	= BCACHEFS_ROOT_SUBVOL;
1025	bch2_inode_pack(&packed_inode, &root_inode);
1026	packed_inode.inode.k.p.snapshot = U32_MAX;
1027
1028	ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
1029	bch_err_msg(c, ret, "creating root directory");
1030	if (ret)
1031		goto err;
1032
1033	bch2_inode_init_early(c, &lostfound_inode);
1034
1035	ret = bch2_trans_do(c, NULL, NULL, 0,
1036		bch2_create_trans(trans,
1037				  BCACHEFS_ROOT_SUBVOL_INUM,
1038				  &root_inode, &lostfound_inode,
1039				  &lostfound,
1040				  0, 0, S_IFDIR|0700, 0,
1041				  NULL, NULL, (subvol_inum) { 0 }, 0));
1042	bch_err_msg(c, ret, "creating lost+found");
1043	if (ret)
1044		goto err;
1045
1046	c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
1047
1048	if (enabled_qtypes(c)) {
1049		ret = bch2_fs_quota_read(c);
1050		if (ret)
1051			goto err;
1052	}
1053
1054	ret = bch2_journal_flush(&c->journal);
1055	bch_err_msg(c, ret, "writing first journal entry");
1056	if (ret)
1057		goto err;
1058
1059	mutex_lock(&c->sb_lock);
1060	SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1061	SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1062
1063	bch2_write_super(c);
1064	mutex_unlock(&c->sb_lock);
1065
1066	return 0;
1067err:
1068	bch_err_fn(c, ret);
1069	return ret;
1070}
1071