1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 Oracle.  All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_sysfs.h"
13#include "xfs_btree.h"
14#include "xfs_super.h"
15#include "scrub/scrub.h"
16#include "scrub/stats.h"
17#include "scrub/trace.h"
18
19struct xchk_scrub_stats {
20	/* all 32-bit counters here */
21
22	/* checking stats */
23	uint32_t		invocations;
24	uint32_t		clean;
25	uint32_t		corrupt;
26	uint32_t		preen;
27	uint32_t		xfail;
28	uint32_t		xcorrupt;
29	uint32_t		incomplete;
30	uint32_t		warning;
31	uint32_t		retries;
32
33	/* repair stats */
34	uint32_t		repair_invocations;
35	uint32_t		repair_success;
36
37	/* all 64-bit items here */
38
39	/* runtimes */
40	uint64_t		checktime_us;
41	uint64_t		repairtime_us;
42
43	/* non-counter state must go at the end for clearall */
44	spinlock_t		css_lock;
45};
46
47struct xchk_stats {
48	struct dentry		*cs_debugfs;
49	struct xchk_scrub_stats	cs_stats[XFS_SCRUB_TYPE_NR];
50};
51
52
53static struct xchk_stats	global_stats;
54
55static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56	[XFS_SCRUB_TYPE_SB]		= "sb",
57	[XFS_SCRUB_TYPE_AGF]		= "agf",
58	[XFS_SCRUB_TYPE_AGFL]		= "agfl",
59	[XFS_SCRUB_TYPE_AGI]		= "agi",
60	[XFS_SCRUB_TYPE_BNOBT]		= "bnobt",
61	[XFS_SCRUB_TYPE_CNTBT]		= "cntbt",
62	[XFS_SCRUB_TYPE_INOBT]		= "inobt",
63	[XFS_SCRUB_TYPE_FINOBT]		= "finobt",
64	[XFS_SCRUB_TYPE_RMAPBT]		= "rmapbt",
65	[XFS_SCRUB_TYPE_REFCNTBT]	= "refcountbt",
66	[XFS_SCRUB_TYPE_INODE]		= "inode",
67	[XFS_SCRUB_TYPE_BMBTD]		= "bmapbtd",
68	[XFS_SCRUB_TYPE_BMBTA]		= "bmapbta",
69	[XFS_SCRUB_TYPE_BMBTC]		= "bmapbtc",
70	[XFS_SCRUB_TYPE_DIR]		= "directory",
71	[XFS_SCRUB_TYPE_XATTR]		= "xattr",
72	[XFS_SCRUB_TYPE_SYMLINK]	= "symlink",
73	[XFS_SCRUB_TYPE_PARENT]		= "parent",
74	[XFS_SCRUB_TYPE_RTBITMAP]	= "rtbitmap",
75	[XFS_SCRUB_TYPE_RTSUM]		= "rtsummary",
76	[XFS_SCRUB_TYPE_UQUOTA]		= "usrquota",
77	[XFS_SCRUB_TYPE_GQUOTA]		= "grpquota",
78	[XFS_SCRUB_TYPE_PQUOTA]		= "prjquota",
79	[XFS_SCRUB_TYPE_FSCOUNTERS]	= "fscounters",
80	[XFS_SCRUB_TYPE_QUOTACHECK]	= "quotacheck",
81	[XFS_SCRUB_TYPE_NLINKS]		= "nlinks",
82};
83
84/* Format the scrub stats into a text buffer, similar to pcp style. */
85STATIC ssize_t
86xchk_stats_format(
87	struct xchk_stats	*cs,
88	char			*buf,
89	size_t			remaining)
90{
91	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
92	unsigned int		i;
93	ssize_t			copied = 0;
94	int			ret = 0;
95
96	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
97		if (!name_map[i])
98			continue;
99
100		ret = scnprintf(buf, remaining,
101 "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
102				name_map[i],
103				(unsigned int)css->invocations,
104				(unsigned int)css->clean,
105				(unsigned int)css->corrupt,
106				(unsigned int)css->preen,
107				(unsigned int)css->xfail,
108				(unsigned int)css->xcorrupt,
109				(unsigned int)css->incomplete,
110				(unsigned int)css->warning,
111				(unsigned int)css->retries,
112				(unsigned long long)css->checktime_us,
113				(unsigned int)css->repair_invocations,
114				(unsigned int)css->repair_success,
115				(unsigned long long)css->repairtime_us);
116		if (ret <= 0)
117			break;
118
119		remaining -= ret;
120		copied += ret;
121		buf +=  ret;
122	}
123
124	return copied > 0 ? copied : ret;
125}
126
127/* Estimate the worst case buffer size required to hold the whole report. */
128STATIC size_t
129xchk_stats_estimate_bufsize(
130	struct xchk_stats	*cs)
131{
132	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
133	unsigned int		i;
134	size_t			field_width;
135	size_t			ret = 0;
136
137	/* 4294967296 plus one space for each u32 field */
138	field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
139			    sizeof(uint32_t));
140
141	/* 18446744073709551615 plus one space for each u64 field */
142	field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
143			      offsetof(struct xchk_scrub_stats, checktime_us)) /
144			     sizeof(uint64_t));
145
146	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
147		if (!name_map[i])
148			continue;
149
150		/* name plus one space */
151		ret += 1 + strlen(name_map[i]);
152
153		/* all fields, plus newline */
154		ret += field_width + 1;
155	}
156
157	return ret;
158}
159
160/* Clear all counters. */
161STATIC void
162xchk_stats_clearall(
163	struct xchk_stats	*cs)
164{
165	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
166	unsigned int		i;
167
168	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
169		spin_lock(&css->css_lock);
170		memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
171		spin_unlock(&css->css_lock);
172	}
173}
174
175#define XFS_SCRUB_OFLAG_UNCLEAN	(XFS_SCRUB_OFLAG_CORRUPT | \
176				 XFS_SCRUB_OFLAG_PREEN | \
177				 XFS_SCRUB_OFLAG_XFAIL | \
178				 XFS_SCRUB_OFLAG_XCORRUPT | \
179				 XFS_SCRUB_OFLAG_INCOMPLETE | \
180				 XFS_SCRUB_OFLAG_WARNING)
181
182STATIC void
183xchk_stats_merge_one(
184	struct xchk_stats		*cs,
185	const struct xfs_scrub_metadata	*sm,
186	const struct xchk_stats_run	*run)
187{
188	struct xchk_scrub_stats		*css;
189
190	if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
191		ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
192		return;
193	}
194
195	css = &cs->cs_stats[sm->sm_type];
196	spin_lock(&css->css_lock);
197	css->invocations++;
198	if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
199		css->clean++;
200	if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
201		css->corrupt++;
202	if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
203		css->preen++;
204	if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
205		css->xfail++;
206	if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
207		css->xcorrupt++;
208	if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
209		css->incomplete++;
210	if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
211		css->warning++;
212	css->retries += run->retries;
213	css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
214
215	if (run->repair_attempted)
216		css->repair_invocations++;
217	if (run->repair_succeeded)
218		css->repair_success++;
219	css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
220	spin_unlock(&css->css_lock);
221}
222
223/* Merge these scrub-run stats into the global and mount stat data. */
224void
225xchk_stats_merge(
226	struct xfs_mount		*mp,
227	const struct xfs_scrub_metadata	*sm,
228	const struct xchk_stats_run	*run)
229{
230	xchk_stats_merge_one(&global_stats, sm, run);
231	xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
232}
233
234/* debugfs boilerplate */
235
236static ssize_t
237xchk_scrub_stats_read(
238	struct file		*file,
239	char __user		*ubuf,
240	size_t			count,
241	loff_t			*ppos)
242{
243	struct xchk_stats	*cs = file->private_data;
244	char			*buf;
245	size_t			bufsize;
246	ssize_t			avail, ret;
247
248	/*
249	 * This generates stringly snapshot of all the scrub counters, so we
250	 * do not want userspace to receive garbled text from multiple calls.
251	 * If the file position is greater than 0, return a short read.
252	 */
253	if (*ppos > 0)
254		return 0;
255
256	bufsize = xchk_stats_estimate_bufsize(cs);
257
258	buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
259	if (!buf)
260		return -ENOMEM;
261
262	avail = xchk_stats_format(cs, buf, bufsize);
263	if (avail < 0) {
264		ret = avail;
265		goto out;
266	}
267
268	ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
269out:
270	kvfree(buf);
271	return ret;
272}
273
274static const struct file_operations scrub_stats_fops = {
275	.open			= simple_open,
276	.read			= xchk_scrub_stats_read,
277};
278
279static ssize_t
280xchk_clear_scrub_stats_write(
281	struct file		*file,
282	const char __user	*ubuf,
283	size_t			count,
284	loff_t			*ppos)
285{
286	struct xchk_stats	*cs = file->private_data;
287	unsigned int		val;
288	int			ret;
289
290	ret = kstrtouint_from_user(ubuf, count, 0, &val);
291	if (ret)
292		return ret;
293
294	if (val != 1)
295		return -EINVAL;
296
297	xchk_stats_clearall(cs);
298	return count;
299}
300
301static const struct file_operations clear_scrub_stats_fops = {
302	.open			= simple_open,
303	.write			= xchk_clear_scrub_stats_write,
304};
305
306/* Initialize the stats object. */
307STATIC int
308xchk_stats_init(
309	struct xchk_stats	*cs,
310	struct xfs_mount	*mp)
311{
312	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
313	unsigned int		i;
314
315	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
316		spin_lock_init(&css->css_lock);
317
318	return 0;
319}
320
321/* Connect the stats object to debugfs. */
322void
323xchk_stats_register(
324	struct xchk_stats	*cs,
325	struct dentry		*parent)
326{
327	if (!parent)
328		return;
329
330	cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
331	if (!cs->cs_debugfs)
332		return;
333
334	debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
335			&scrub_stats_fops);
336	debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
337			&clear_scrub_stats_fops);
338}
339
340/* Free all resources related to the stats object. */
341STATIC int
342xchk_stats_teardown(
343	struct xchk_stats	*cs)
344{
345	return 0;
346}
347
348/* Disconnect the stats object from debugfs. */
349void
350xchk_stats_unregister(
351	struct xchk_stats	*cs)
352{
353	debugfs_remove(cs->cs_debugfs);
354}
355
356/* Initialize global stats and register them */
357int __init
358xchk_global_stats_setup(
359	struct dentry		*parent)
360{
361	int			error;
362
363	error = xchk_stats_init(&global_stats, NULL);
364	if (error)
365		return error;
366
367	xchk_stats_register(&global_stats, parent);
368	return 0;
369}
370
371/* Unregister global stats and tear them down */
372void
373xchk_global_stats_teardown(void)
374{
375	xchk_stats_unregister(&global_stats);
376	xchk_stats_teardown(&global_stats);
377}
378
379/* Allocate per-mount stats */
380int
381xchk_mount_stats_alloc(
382	struct xfs_mount	*mp)
383{
384	struct xchk_stats	*cs;
385	int			error;
386
387	cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
388	if (!cs)
389		return -ENOMEM;
390
391	error = xchk_stats_init(cs, mp);
392	if (error)
393		goto out_free;
394
395	mp->m_scrub_stats = cs;
396	return 0;
397out_free:
398	kvfree(cs);
399	return error;
400}
401
402/* Free per-mount stats */
403void
404xchk_mount_stats_free(
405	struct xfs_mount	*mp)
406{
407	xchk_stats_teardown(mp->m_scrub_stats);
408	kvfree(mp->m_scrub_stats);
409	mp->m_scrub_stats = NULL;
410}
411