1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#ifndef NO_BCACHEFS_SYSFS
10
11#include "bcachefs.h"
12#include "alloc_background.h"
13#include "alloc_foreground.h"
14#include "sysfs.h"
15#include "btree_cache.h"
16#include "btree_io.h"
17#include "btree_iter.h"
18#include "btree_key_cache.h"
19#include "btree_update.h"
20#include "btree_gc.h"
21#include "buckets.h"
22#include "clock.h"
23#include "compress.h"
24#include "disk_groups.h"
25#include "ec.h"
26#include "inode.h"
27#include "journal.h"
28#include "journal_reclaim.h"
29#include "keylist.h"
30#include "move.h"
31#include "movinggc.h"
32#include "nocow_locking.h"
33#include "opts.h"
34#include "rebalance.h"
35#include "replicas.h"
36#include "super-io.h"
37#include "tests.h"
38
39#include <linux/blkdev.h>
40#include <linux/sort.h>
41#include <linux/sched/clock.h>
42
43#include "util.h"
44
45#define SYSFS_OPS(type)							\
46const struct sysfs_ops type ## _sysfs_ops = {				\
47	.show	= type ## _show,					\
48	.store	= type ## _store					\
49}
50
51#define SHOW(fn)							\
52static ssize_t fn ## _to_text(struct printbuf *,			\
53			      struct kobject *, struct attribute *);	\
54									\
55static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
56			   char *buf)					\
57{									\
58	struct printbuf out = PRINTBUF;					\
59	ssize_t ret = fn ## _to_text(&out, kobj, attr);			\
60									\
61	if (out.pos && out.buf[out.pos - 1] != '\n')			\
62		prt_newline(&out);					\
63									\
64	if (!ret && out.allocation_failure)				\
65		ret = -ENOMEM;						\
66									\
67	if (!ret) {							\
68		ret = min_t(size_t, out.pos, PAGE_SIZE - 1);		\
69		memcpy(buf, out.buf, ret);				\
70	}								\
71	printbuf_exit(&out);						\
72	return bch2_err_class(ret);					\
73}									\
74									\
75static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
76			      struct attribute *attr)
77
78#define STORE(fn)							\
79static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
80			    const char *, size_t);			\
81									\
82static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
83			    const char *buf, size_t size)		\
84{									\
85	return bch2_err_class(fn##_store_inner(kobj, attr, buf, size));	\
86}									\
87									\
88static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
89				  const char *buf, size_t size)
90
91#define __sysfs_attribute(_name, _mode)					\
92	static struct attribute sysfs_##_name =				\
93		{ .name = #_name, .mode = _mode }
94
95#define write_attribute(n)	__sysfs_attribute(n, 0200)
96#define read_attribute(n)	__sysfs_attribute(n, 0444)
97#define rw_attribute(n)		__sysfs_attribute(n, 0644)
98
99#define sysfs_printf(file, fmt, ...)					\
100do {									\
101	if (attr == &sysfs_ ## file)					\
102		prt_printf(out, fmt "\n", __VA_ARGS__);			\
103} while (0)
104
105#define sysfs_print(file, var)						\
106do {									\
107	if (attr == &sysfs_ ## file)					\
108		snprint(out, var);					\
109} while (0)
110
111#define sysfs_hprint(file, val)						\
112do {									\
113	if (attr == &sysfs_ ## file)					\
114		prt_human_readable_s64(out, val);			\
115} while (0)
116
117#define sysfs_strtoul(file, var)					\
118do {									\
119	if (attr == &sysfs_ ## file)					\
120		return strtoul_safe(buf, var) ?: (ssize_t) size;	\
121} while (0)
122
123#define sysfs_strtoul_clamp(file, var, min, max)			\
124do {									\
125	if (attr == &sysfs_ ## file)					\
126		return strtoul_safe_clamp(buf, var, min, max)		\
127			?: (ssize_t) size;				\
128} while (0)
129
130#define strtoul_or_return(cp)						\
131({									\
132	unsigned long _v;						\
133	int _r = kstrtoul(cp, 10, &_v);					\
134	if (_r)								\
135		return _r;						\
136	_v;								\
137})
138
139write_attribute(trigger_gc);
140write_attribute(trigger_discards);
141write_attribute(trigger_invalidates);
142write_attribute(trigger_journal_flush);
143write_attribute(prune_cache);
144write_attribute(btree_wakeup);
145rw_attribute(btree_gc_periodic);
146rw_attribute(gc_gens_pos);
147
148read_attribute(uuid);
149read_attribute(minor);
150read_attribute(flags);
151read_attribute(bucket_size);
152read_attribute(first_bucket);
153read_attribute(nbuckets);
154rw_attribute(durability);
155read_attribute(io_done);
156read_attribute(io_errors);
157write_attribute(io_errors_reset);
158
159read_attribute(io_latency_read);
160read_attribute(io_latency_write);
161read_attribute(io_latency_stats_read);
162read_attribute(io_latency_stats_write);
163read_attribute(congested);
164
165read_attribute(btree_write_stats);
166
167read_attribute(btree_cache_size);
168read_attribute(compression_stats);
169read_attribute(journal_debug);
170read_attribute(btree_cache);
171read_attribute(btree_key_cache);
172read_attribute(stripes_heap);
173read_attribute(open_buckets);
174read_attribute(open_buckets_partial);
175read_attribute(write_points);
176read_attribute(nocow_lock_table);
177
178#ifdef BCH_WRITE_REF_DEBUG
179read_attribute(write_refs);
180
181static const char * const bch2_write_refs[] = {
182#define x(n)	#n,
183	BCH_WRITE_REFS()
184#undef x
185	NULL
186};
187
188static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
189{
190	bch2_printbuf_tabstop_push(out, 24);
191
192	for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
193		prt_str(out, bch2_write_refs[i]);
194		prt_tab(out);
195		prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
196		prt_newline(out);
197	}
198}
199#endif
200
201read_attribute(internal_uuid);
202read_attribute(disk_groups);
203
204read_attribute(has_data);
205read_attribute(alloc_debug);
206
207#define x(t, n, ...) read_attribute(t);
208BCH_PERSISTENT_COUNTERS()
209#undef x
210
211rw_attribute(discard);
212rw_attribute(label);
213
214rw_attribute(copy_gc_enabled);
215read_attribute(copy_gc_wait);
216
217rw_attribute(rebalance_enabled);
218sysfs_pd_controller_attribute(rebalance);
219read_attribute(rebalance_status);
220rw_attribute(promote_whole_extents);
221
222read_attribute(new_stripes);
223
224read_attribute(io_timers_read);
225read_attribute(io_timers_write);
226
227read_attribute(moving_ctxts);
228
229#ifdef CONFIG_BCACHEFS_TESTS
230write_attribute(perf_test);
231#endif /* CONFIG_BCACHEFS_TESTS */
232
233#define x(_name)						\
234	static struct attribute sysfs_time_stat_##_name =		\
235		{ .name = #_name, .mode = 0444 };
236	BCH_TIME_STATS()
237#undef x
238
239static struct attribute sysfs_state_rw = {
240	.name = "state",
241	.mode =  0444,
242};
243
244static size_t bch2_btree_cache_size(struct bch_fs *c)
245{
246	size_t ret = 0;
247	struct btree *b;
248
249	mutex_lock(&c->btree_cache.lock);
250	list_for_each_entry(b, &c->btree_cache.live, list)
251		ret += btree_buf_bytes(b);
252
253	mutex_unlock(&c->btree_cache.lock);
254	return ret;
255}
256
257static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
258{
259	struct btree_trans *trans;
260	enum btree_id id;
261	struct compression_type_stats {
262		u64		nr_extents;
263		u64		sectors_compressed;
264		u64		sectors_uncompressed;
265	} s[BCH_COMPRESSION_TYPE_NR];
266	u64 compressed_incompressible = 0;
267	int ret = 0;
268
269	memset(s, 0, sizeof(s));
270
271	if (!test_bit(BCH_FS_started, &c->flags))
272		return -EPERM;
273
274	trans = bch2_trans_get(c);
275
276	for (id = 0; id < BTREE_ID_NR; id++) {
277		if (!btree_type_has_ptrs(id))
278			continue;
279
280		ret = for_each_btree_key(trans, iter, id, POS_MIN,
281					 BTREE_ITER_ALL_SNAPSHOTS, k, ({
282			struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
283			struct bch_extent_crc_unpacked crc;
284			const union bch_extent_entry *entry;
285			bool compressed = false, incompressible = false;
286
287			bkey_for_each_crc(k.k, ptrs, crc, entry) {
288				incompressible	|= crc.compression_type == BCH_COMPRESSION_TYPE_incompressible;
289				compressed	|= crc_is_compressed(crc);
290
291				if (crc_is_compressed(crc)) {
292					s[crc.compression_type].nr_extents++;
293					s[crc.compression_type].sectors_compressed += crc.compressed_size;
294					s[crc.compression_type].sectors_uncompressed += crc.uncompressed_size;
295				}
296			}
297
298			compressed_incompressible += compressed && incompressible;
299
300			if (!compressed) {
301				unsigned t = incompressible ? BCH_COMPRESSION_TYPE_incompressible : 0;
302
303				s[t].nr_extents++;
304				s[t].sectors_compressed += k.k->size;
305				s[t].sectors_uncompressed += k.k->size;
306			}
307			0;
308		}));
309	}
310
311	bch2_trans_put(trans);
312
313	if (ret)
314		return ret;
315
316	prt_str(out, "type");
317	printbuf_tabstop_push(out, 12);
318	prt_tab(out);
319
320	prt_str(out, "compressed");
321	printbuf_tabstop_push(out, 16);
322	prt_tab_rjust(out);
323
324	prt_str(out, "uncompressed");
325	printbuf_tabstop_push(out, 16);
326	prt_tab_rjust(out);
327
328	prt_str(out, "average extent size");
329	printbuf_tabstop_push(out, 24);
330	prt_tab_rjust(out);
331	prt_newline(out);
332
333	for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
334		bch2_prt_compression_type(out, i);
335		prt_tab(out);
336
337		prt_human_readable_u64(out, s[i].sectors_compressed << 9);
338		prt_tab_rjust(out);
339
340		prt_human_readable_u64(out, s[i].sectors_uncompressed << 9);
341		prt_tab_rjust(out);
342
343		prt_human_readable_u64(out, s[i].nr_extents
344				       ? div_u64(s[i].sectors_uncompressed << 9, s[i].nr_extents)
345				       : 0);
346		prt_tab_rjust(out);
347		prt_newline(out);
348	}
349
350	if (compressed_incompressible) {
351		prt_printf(out, "%llu compressed & incompressible extents", compressed_incompressible);
352		prt_newline(out);
353	}
354
355	return 0;
356}
357
358static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
359{
360	prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
361	bch2_bpos_to_text(out, c->gc_gens_pos);
362	prt_printf(out, "\n");
363}
364
365static void bch2_btree_wakeup_all(struct bch_fs *c)
366{
367	struct btree_trans *trans;
368
369	seqmutex_lock(&c->btree_trans_lock);
370	list_for_each_entry(trans, &c->btree_trans_list, list) {
371		struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
372
373		if (b)
374			six_lock_wakeup_all(&b->lock);
375
376	}
377	seqmutex_unlock(&c->btree_trans_lock);
378}
379
380SHOW(bch2_fs)
381{
382	struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
383
384	sysfs_print(minor,			c->minor);
385	sysfs_printf(internal_uuid, "%pU",	c->sb.uuid.b);
386
387	if (attr == &sysfs_flags)
388		prt_bitflags(out, bch2_fs_flag_strs, c->flags);
389
390	sysfs_hprint(btree_cache_size,		bch2_btree_cache_size(c));
391
392	if (attr == &sysfs_btree_write_stats)
393		bch2_btree_write_stats_to_text(out, c);
394
395	sysfs_printf(btree_gc_periodic, "%u",	(int) c->btree_gc_periodic);
396
397	if (attr == &sysfs_gc_gens_pos)
398		bch2_gc_gens_pos_to_text(out, c);
399
400	sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
401
402	sysfs_printf(rebalance_enabled,		"%i", c->rebalance.enabled);
403	sysfs_pd_controller_show(rebalance,	&c->rebalance.pd); /* XXX */
404
405	if (attr == &sysfs_copy_gc_wait)
406		bch2_copygc_wait_to_text(out, c);
407
408	if (attr == &sysfs_rebalance_status)
409		bch2_rebalance_status_to_text(out, c);
410
411	sysfs_print(promote_whole_extents,	c->promote_whole_extents);
412
413	/* Debugging: */
414
415	if (attr == &sysfs_journal_debug)
416		bch2_journal_debug_to_text(out, &c->journal);
417
418	if (attr == &sysfs_btree_cache)
419		bch2_btree_cache_to_text(out, c);
420
421	if (attr == &sysfs_btree_key_cache)
422		bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
423
424	if (attr == &sysfs_stripes_heap)
425		bch2_stripes_heap_to_text(out, c);
426
427	if (attr == &sysfs_open_buckets)
428		bch2_open_buckets_to_text(out, c);
429
430	if (attr == &sysfs_open_buckets_partial)
431		bch2_open_buckets_partial_to_text(out, c);
432
433	if (attr == &sysfs_write_points)
434		bch2_write_points_to_text(out, c);
435
436	if (attr == &sysfs_compression_stats)
437		bch2_compression_stats_to_text(out, c);
438
439	if (attr == &sysfs_new_stripes)
440		bch2_new_stripes_to_text(out, c);
441
442	if (attr == &sysfs_io_timers_read)
443		bch2_io_timers_to_text(out, &c->io_clock[READ]);
444
445	if (attr == &sysfs_io_timers_write)
446		bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
447
448	if (attr == &sysfs_moving_ctxts)
449		bch2_fs_moving_ctxts_to_text(out, c);
450
451#ifdef BCH_WRITE_REF_DEBUG
452	if (attr == &sysfs_write_refs)
453		bch2_write_refs_to_text(out, c);
454#endif
455
456	if (attr == &sysfs_nocow_lock_table)
457		bch2_nocow_locks_to_text(out, &c->nocow_locks);
458
459	if (attr == &sysfs_disk_groups)
460		bch2_disk_groups_to_text(out, c);
461
462	return 0;
463}
464
465STORE(bch2_fs)
466{
467	struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
468
469	if (attr == &sysfs_btree_gc_periodic) {
470		ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
471			?: (ssize_t) size;
472
473		wake_up_process(c->gc_thread);
474		return ret;
475	}
476
477	if (attr == &sysfs_copy_gc_enabled) {
478		ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
479			?: (ssize_t) size;
480
481		if (c->copygc_thread)
482			wake_up_process(c->copygc_thread);
483		return ret;
484	}
485
486	if (attr == &sysfs_rebalance_enabled) {
487		ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
488			?: (ssize_t) size;
489
490		rebalance_wakeup(c);
491		return ret;
492	}
493
494	sysfs_pd_controller_store(rebalance,	&c->rebalance.pd);
495
496	sysfs_strtoul(promote_whole_extents,	c->promote_whole_extents);
497
498	/* Debugging: */
499
500	if (!test_bit(BCH_FS_started, &c->flags))
501		return -EPERM;
502
503	/* Debugging: */
504
505	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
506		return -EROFS;
507
508	if (attr == &sysfs_prune_cache) {
509		struct shrink_control sc;
510
511		sc.gfp_mask = GFP_KERNEL;
512		sc.nr_to_scan = strtoul_or_return(buf);
513		c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
514	}
515
516	if (attr == &sysfs_btree_wakeup)
517		bch2_btree_wakeup_all(c);
518
519	if (attr == &sysfs_trigger_gc) {
520		/*
521		 * Full gc is currently incompatible with btree key cache:
522		 */
523#if 0
524		down_read(&c->state_lock);
525		bch2_gc(c, false, false);
526		up_read(&c->state_lock);
527#else
528		bch2_gc_gens(c);
529#endif
530	}
531
532	if (attr == &sysfs_trigger_discards)
533		bch2_do_discards(c);
534
535	if (attr == &sysfs_trigger_invalidates)
536		bch2_do_invalidates(c);
537
538	if (attr == &sysfs_trigger_journal_flush) {
539		bch2_journal_flush_all_pins(&c->journal);
540		bch2_journal_meta(&c->journal);
541	}
542
543#ifdef CONFIG_BCACHEFS_TESTS
544	if (attr == &sysfs_perf_test) {
545		char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
546		char *test		= strsep(&p, " \t\n");
547		char *nr_str		= strsep(&p, " \t\n");
548		char *threads_str	= strsep(&p, " \t\n");
549		unsigned threads;
550		u64 nr;
551		int ret = -EINVAL;
552
553		if (threads_str &&
554		    !(ret = kstrtouint(threads_str, 10, &threads)) &&
555		    !(ret = bch2_strtoull_h(nr_str, &nr)))
556			ret = bch2_btree_perf_test(c, test, nr, threads);
557		kfree(tmp);
558
559		if (ret)
560			size = ret;
561	}
562#endif
563	bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
564	return size;
565}
566SYSFS_OPS(bch2_fs);
567
568struct attribute *bch2_fs_files[] = {
569	&sysfs_minor,
570	&sysfs_btree_cache_size,
571	&sysfs_btree_write_stats,
572
573	&sysfs_promote_whole_extents,
574
575	&sysfs_compression_stats,
576
577#ifdef CONFIG_BCACHEFS_TESTS
578	&sysfs_perf_test,
579#endif
580	NULL
581};
582
583/* counters dir */
584
585SHOW(bch2_fs_counters)
586{
587	struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
588	u64 counter = 0;
589	u64 counter_since_mount = 0;
590
591	printbuf_tabstop_push(out, 32);
592
593	#define x(t, ...) \
594		if (attr == &sysfs_##t) {					\
595			counter             = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
596			counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
597			prt_printf(out, "since mount:");				\
598			prt_tab(out);						\
599			prt_human_readable_u64(out, counter_since_mount);	\
600			prt_newline(out);					\
601										\
602			prt_printf(out, "since filesystem creation:");		\
603			prt_tab(out);						\
604			prt_human_readable_u64(out, counter);			\
605			prt_newline(out);					\
606		}
607	BCH_PERSISTENT_COUNTERS()
608	#undef x
609	return 0;
610}
611
612STORE(bch2_fs_counters) {
613	return 0;
614}
615
616SYSFS_OPS(bch2_fs_counters);
617
618struct attribute *bch2_fs_counters_files[] = {
619#define x(t, ...) \
620	&sysfs_##t,
621	BCH_PERSISTENT_COUNTERS()
622#undef x
623	NULL
624};
625/* internal dir - just a wrapper */
626
627SHOW(bch2_fs_internal)
628{
629	struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
630
631	return bch2_fs_to_text(out, &c->kobj, attr);
632}
633
634STORE(bch2_fs_internal)
635{
636	struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
637
638	return bch2_fs_store(&c->kobj, attr, buf, size);
639}
640SYSFS_OPS(bch2_fs_internal);
641
642struct attribute *bch2_fs_internal_files[] = {
643	&sysfs_flags,
644	&sysfs_journal_debug,
645	&sysfs_btree_cache,
646	&sysfs_btree_key_cache,
647	&sysfs_new_stripes,
648	&sysfs_stripes_heap,
649	&sysfs_open_buckets,
650	&sysfs_open_buckets_partial,
651	&sysfs_write_points,
652#ifdef BCH_WRITE_REF_DEBUG
653	&sysfs_write_refs,
654#endif
655	&sysfs_nocow_lock_table,
656	&sysfs_io_timers_read,
657	&sysfs_io_timers_write,
658
659	&sysfs_trigger_gc,
660	&sysfs_trigger_discards,
661	&sysfs_trigger_invalidates,
662	&sysfs_trigger_journal_flush,
663	&sysfs_prune_cache,
664	&sysfs_btree_wakeup,
665
666	&sysfs_gc_gens_pos,
667
668	&sysfs_copy_gc_enabled,
669	&sysfs_copy_gc_wait,
670
671	&sysfs_rebalance_enabled,
672	&sysfs_rebalance_status,
673	sysfs_pd_controller_files(rebalance),
674
675	&sysfs_moving_ctxts,
676
677	&sysfs_internal_uuid,
678
679	&sysfs_disk_groups,
680	NULL
681};
682
683/* options */
684
685SHOW(bch2_fs_opts_dir)
686{
687	struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
688	const struct bch_option *opt = container_of(attr, struct bch_option, attr);
689	int id = opt - bch2_opt_table;
690	u64 v = bch2_opt_get_by_id(&c->opts, id);
691
692	bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
693	prt_char(out, '\n');
694
695	return 0;
696}
697
698STORE(bch2_fs_opts_dir)
699{
700	struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
701	const struct bch_option *opt = container_of(attr, struct bch_option, attr);
702	int ret, id = opt - bch2_opt_table;
703	char *tmp;
704	u64 v;
705
706	/*
707	 * We don't need to take c->writes for correctness, but it eliminates an
708	 * unsightly error message in the dmesg log when we're RO:
709	 */
710	if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
711		return -EROFS;
712
713	tmp = kstrdup(buf, GFP_KERNEL);
714	if (!tmp) {
715		ret = -ENOMEM;
716		goto err;
717	}
718
719	ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
720	kfree(tmp);
721
722	if (ret < 0)
723		goto err;
724
725	ret = bch2_opt_check_may_set(c, id, v);
726	if (ret < 0)
727		goto err;
728
729	bch2_opt_set_sb(c, opt, v);
730	bch2_opt_set_by_id(&c->opts, id, v);
731
732	if (v &&
733	    (id == Opt_background_target ||
734	     id == Opt_background_compression ||
735	     (id == Opt_compression && !c->opts.background_compression)))
736		bch2_set_rebalance_needs_scan(c, 0);
737
738	ret = size;
739err:
740	bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
741	return ret;
742}
743SYSFS_OPS(bch2_fs_opts_dir);
744
745struct attribute *bch2_fs_opts_dir_files[] = { NULL };
746
747int bch2_opts_create_sysfs_files(struct kobject *kobj)
748{
749	const struct bch_option *i;
750	int ret;
751
752	for (i = bch2_opt_table;
753	     i < bch2_opt_table + bch2_opts_nr;
754	     i++) {
755		if (!(i->flags & OPT_FS))
756			continue;
757
758		ret = sysfs_create_file(kobj, &i->attr);
759		if (ret)
760			return ret;
761	}
762
763	return 0;
764}
765
766/* time stats */
767
768SHOW(bch2_fs_time_stats)
769{
770	struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
771
772#define x(name)								\
773	if (attr == &sysfs_time_stat_##name)				\
774		bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
775	BCH_TIME_STATS()
776#undef x
777
778	return 0;
779}
780
781STORE(bch2_fs_time_stats)
782{
783	return size;
784}
785SYSFS_OPS(bch2_fs_time_stats);
786
787struct attribute *bch2_fs_time_stats_files[] = {
788#define x(name)						\
789	&sysfs_time_stat_##name,
790	BCH_TIME_STATS()
791#undef x
792	NULL
793};
794
795static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
796{
797	struct bch_fs *c = ca->fs;
798	struct bch_dev_usage stats = bch2_dev_usage_read(ca);
799	unsigned i, nr[BCH_DATA_NR];
800
801	memset(nr, 0, sizeof(nr));
802
803	for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
804		nr[c->open_buckets[i].data_type]++;
805
806	printbuf_tabstop_push(out, 8);
807	printbuf_tabstop_push(out, 16);
808	printbuf_tabstop_push(out, 16);
809	printbuf_tabstop_push(out, 16);
810	printbuf_tabstop_push(out, 16);
811
812	bch2_dev_usage_to_text(out, &stats);
813
814	prt_newline(out);
815
816	prt_printf(out, "reserves:");
817	prt_newline(out);
818	for (i = 0; i < BCH_WATERMARK_NR; i++) {
819		prt_str(out, bch2_watermarks[i]);
820		prt_tab(out);
821		prt_u64(out, bch2_dev_buckets_reserved(ca, i));
822		prt_tab_rjust(out);
823		prt_newline(out);
824	}
825
826	prt_newline(out);
827
828	printbuf_tabstops_reset(out);
829	printbuf_tabstop_push(out, 24);
830
831	prt_str(out, "freelist_wait");
832	prt_tab(out);
833	prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
834	prt_newline(out);
835
836	prt_str(out, "open buckets allocated");
837	prt_tab(out);
838	prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
839	prt_newline(out);
840
841	prt_str(out, "open buckets this dev");
842	prt_tab(out);
843	prt_u64(out, ca->nr_open_buckets);
844	prt_newline(out);
845
846	prt_str(out, "open buckets total");
847	prt_tab(out);
848	prt_u64(out, OPEN_BUCKETS_COUNT);
849	prt_newline(out);
850
851	prt_str(out, "open_buckets_wait");
852	prt_tab(out);
853	prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
854	prt_newline(out);
855
856	prt_str(out, "open_buckets_btree");
857	prt_tab(out);
858	prt_u64(out, nr[BCH_DATA_btree]);
859	prt_newline(out);
860
861	prt_str(out, "open_buckets_user");
862	prt_tab(out);
863	prt_u64(out, nr[BCH_DATA_user]);
864	prt_newline(out);
865
866	prt_str(out, "buckets_to_invalidate");
867	prt_tab(out);
868	prt_u64(out, should_invalidate_buckets(ca, stats));
869	prt_newline(out);
870
871	prt_str(out, "btree reserve cache");
872	prt_tab(out);
873	prt_u64(out, c->btree_reserve_cache_nr);
874	prt_newline(out);
875}
876
877static const char * const bch2_rw[] = {
878	"read",
879	"write",
880	NULL
881};
882
883static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
884{
885	int rw, i;
886
887	for (rw = 0; rw < 2; rw++) {
888		prt_printf(out, "%s:\n", bch2_rw[rw]);
889
890		for (i = 1; i < BCH_DATA_NR; i++)
891			prt_printf(out, "%-12s:%12llu\n",
892			       bch2_data_type_str(i),
893			       percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
894	}
895}
896
897SHOW(bch2_dev)
898{
899	struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
900	struct bch_fs *c = ca->fs;
901
902	sysfs_printf(uuid,		"%pU\n", ca->uuid.b);
903
904	sysfs_print(bucket_size,	bucket_bytes(ca));
905	sysfs_print(first_bucket,	ca->mi.first_bucket);
906	sysfs_print(nbuckets,		ca->mi.nbuckets);
907	sysfs_print(durability,		ca->mi.durability);
908	sysfs_print(discard,		ca->mi.discard);
909
910	if (attr == &sysfs_label) {
911		if (ca->mi.group)
912			bch2_disk_path_to_text(out, c, ca->mi.group - 1);
913		prt_char(out, '\n');
914	}
915
916	if (attr == &sysfs_has_data) {
917		prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
918		prt_char(out, '\n');
919	}
920
921	if (attr == &sysfs_state_rw) {
922		prt_string_option(out, bch2_member_states, ca->mi.state);
923		prt_char(out, '\n');
924	}
925
926	if (attr == &sysfs_io_done)
927		dev_io_done_to_text(out, ca);
928
929	if (attr == &sysfs_io_errors)
930		bch2_dev_io_errors_to_text(out, ca);
931
932	sysfs_print(io_latency_read,		atomic64_read(&ca->cur_latency[READ]));
933	sysfs_print(io_latency_write,		atomic64_read(&ca->cur_latency[WRITE]));
934
935	if (attr == &sysfs_io_latency_stats_read)
936		bch2_time_stats_to_text(out, &ca->io_latency[READ].stats);
937
938	if (attr == &sysfs_io_latency_stats_write)
939		bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats);
940
941	sysfs_printf(congested,			"%u%%",
942		     clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
943		     * 100 / CONGESTED_MAX);
944
945	if (attr == &sysfs_alloc_debug)
946		dev_alloc_debug_to_text(out, ca);
947
948	return 0;
949}
950
951STORE(bch2_dev)
952{
953	struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
954	struct bch_fs *c = ca->fs;
955	struct bch_member *mi;
956
957	if (attr == &sysfs_discard) {
958		bool v = strtoul_or_return(buf);
959
960		mutex_lock(&c->sb_lock);
961		mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
962
963		if (v != BCH_MEMBER_DISCARD(mi)) {
964			SET_BCH_MEMBER_DISCARD(mi, v);
965			bch2_write_super(c);
966		}
967		mutex_unlock(&c->sb_lock);
968	}
969
970	if (attr == &sysfs_durability) {
971		u64 v = strtoul_or_return(buf);
972
973		mutex_lock(&c->sb_lock);
974		mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
975
976		if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
977			SET_BCH_MEMBER_DURABILITY(mi, v + 1);
978			bch2_write_super(c);
979		}
980		mutex_unlock(&c->sb_lock);
981	}
982
983	if (attr == &sysfs_label) {
984		char *tmp;
985		int ret;
986
987		tmp = kstrdup(buf, GFP_KERNEL);
988		if (!tmp)
989			return -ENOMEM;
990
991		ret = bch2_dev_group_set(c, ca, strim(tmp));
992		kfree(tmp);
993		if (ret)
994			return ret;
995	}
996
997	if (attr == &sysfs_io_errors_reset)
998		bch2_dev_errors_reset(ca);
999
1000	return size;
1001}
1002SYSFS_OPS(bch2_dev);
1003
1004struct attribute *bch2_dev_files[] = {
1005	&sysfs_uuid,
1006	&sysfs_bucket_size,
1007	&sysfs_first_bucket,
1008	&sysfs_nbuckets,
1009	&sysfs_durability,
1010
1011	/* settings: */
1012	&sysfs_discard,
1013	&sysfs_state_rw,
1014	&sysfs_label,
1015
1016	&sysfs_has_data,
1017	&sysfs_io_done,
1018	&sysfs_io_errors,
1019	&sysfs_io_errors_reset,
1020
1021	&sysfs_io_latency_read,
1022	&sysfs_io_latency_write,
1023	&sysfs_io_latency_stats_read,
1024	&sysfs_io_latency_stats_write,
1025	&sysfs_congested,
1026
1027	/* debug: */
1028	&sysfs_alloc_debug,
1029	NULL
1030};
1031
1032#endif  /* _BCACHEFS_SYSFS_H_ */
1033