1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs iostat support
4 *
5 * Copyright 2021 Google LLC
6 * Author: Daeho Jeong <daehojeong@google.com>
7 */
8
9#include <linux/fs.h>
10#include <linux/f2fs_fs.h>
11#include <linux/seq_file.h>
12
13#include "f2fs.h"
14#include "iostat.h"
15#include <trace/events/f2fs.h>
16
17#define NUM_PREALLOC_IOSTAT_CTXS	128
18static struct kmem_cache *bio_iostat_ctx_cache;
19static mempool_t *bio_iostat_ctx_pool;
20
21int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
22{
23	struct super_block *sb = seq->private;
24	struct f2fs_sb_info *sbi = F2FS_SB(sb);
25	time64_t now = ktime_get_real_seconds();
26
27	if (!sbi->iostat_enable)
28		return 0;
29
30	seq_printf(seq, "time:		%-16llu\n", now);
31
32	/* print app write IOs */
33	seq_puts(seq, "[WRITE]\n");
34	seq_printf(seq, "app buffered:	%-16llu\n",
35				sbi->rw_iostat[APP_BUFFERED_IO]);
36	seq_printf(seq, "app direct:	%-16llu\n",
37				sbi->rw_iostat[APP_DIRECT_IO]);
38	seq_printf(seq, "app mapped:	%-16llu\n",
39				sbi->rw_iostat[APP_MAPPED_IO]);
40
41	/* print fs write IOs */
42	seq_printf(seq, "fs data:	%-16llu\n",
43				sbi->rw_iostat[FS_DATA_IO]);
44	seq_printf(seq, "fs node:	%-16llu\n",
45				sbi->rw_iostat[FS_NODE_IO]);
46	seq_printf(seq, "fs meta:	%-16llu\n",
47				sbi->rw_iostat[FS_META_IO]);
48	seq_printf(seq, "fs gc data:	%-16llu\n",
49				sbi->rw_iostat[FS_GC_DATA_IO]);
50	seq_printf(seq, "fs gc node:	%-16llu\n",
51				sbi->rw_iostat[FS_GC_NODE_IO]);
52	seq_printf(seq, "fs cp data:	%-16llu\n",
53				sbi->rw_iostat[FS_CP_DATA_IO]);
54	seq_printf(seq, "fs cp node:	%-16llu\n",
55				sbi->rw_iostat[FS_CP_NODE_IO]);
56	seq_printf(seq, "fs cp meta:	%-16llu\n",
57				sbi->rw_iostat[FS_CP_META_IO]);
58
59	/* print app read IOs */
60	seq_puts(seq, "[READ]\n");
61	seq_printf(seq, "app buffered:	%-16llu\n",
62				sbi->rw_iostat[APP_BUFFERED_READ_IO]);
63	seq_printf(seq, "app direct:	%-16llu\n",
64				sbi->rw_iostat[APP_DIRECT_READ_IO]);
65	seq_printf(seq, "app mapped:	%-16llu\n",
66				sbi->rw_iostat[APP_MAPPED_READ_IO]);
67
68	/* print fs read IOs */
69	seq_printf(seq, "fs data:	%-16llu\n",
70				sbi->rw_iostat[FS_DATA_READ_IO]);
71	seq_printf(seq, "fs gc data:	%-16llu\n",
72				sbi->rw_iostat[FS_GDATA_READ_IO]);
73	seq_printf(seq, "fs compr_data:	%-16llu\n",
74				sbi->rw_iostat[FS_CDATA_READ_IO]);
75	seq_printf(seq, "fs node:	%-16llu\n",
76				sbi->rw_iostat[FS_NODE_READ_IO]);
77	seq_printf(seq, "fs meta:	%-16llu\n",
78				sbi->rw_iostat[FS_META_READ_IO]);
79
80	/* print other IOs */
81	seq_puts(seq, "[OTHER]\n");
82	seq_printf(seq, "fs discard:	%-16llu\n",
83				sbi->rw_iostat[FS_DISCARD]);
84
85	return 0;
86}
87
88static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
89{
90	int io, idx = 0;
91	unsigned int cnt;
92	struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
93	struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
94	unsigned long flags;
95
96	spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
97	for (idx = 0; idx < MAX_IO_TYPE; idx++) {
98		for (io = 0; io < NR_PAGE_TYPE; io++) {
99			cnt = io_lat->bio_cnt[idx][io];
100			iostat_lat[idx][io].peak_lat =
101			   jiffies_to_msecs(io_lat->peak_lat[idx][io]);
102			iostat_lat[idx][io].cnt = cnt;
103			iostat_lat[idx][io].avg_lat = cnt ?
104			   jiffies_to_msecs(io_lat->sum_lat[idx][io]) / cnt : 0;
105			io_lat->sum_lat[idx][io] = 0;
106			io_lat->peak_lat[idx][io] = 0;
107			io_lat->bio_cnt[idx][io] = 0;
108		}
109	}
110	spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
111
112	trace_f2fs_iostat_latency(sbi, iostat_lat);
113}
114
115static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
116{
117	unsigned long long iostat_diff[NR_IO_TYPE];
118	int i;
119	unsigned long flags;
120
121	if (time_is_after_jiffies(sbi->iostat_next_period))
122		return;
123
124	/* Need double check under the lock */
125	spin_lock_irqsave(&sbi->iostat_lock, flags);
126	if (time_is_after_jiffies(sbi->iostat_next_period)) {
127		spin_unlock_irqrestore(&sbi->iostat_lock, flags);
128		return;
129	}
130	sbi->iostat_next_period = jiffies +
131				msecs_to_jiffies(sbi->iostat_period_ms);
132
133	for (i = 0; i < NR_IO_TYPE; i++) {
134		iostat_diff[i] = sbi->rw_iostat[i] -
135				sbi->prev_rw_iostat[i];
136		sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
137	}
138	spin_unlock_irqrestore(&sbi->iostat_lock, flags);
139
140	trace_f2fs_iostat(sbi, iostat_diff);
141
142	__record_iostat_latency(sbi);
143}
144
145void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
146{
147	struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
148	int i;
149
150	spin_lock_irq(&sbi->iostat_lock);
151	for (i = 0; i < NR_IO_TYPE; i++) {
152		sbi->rw_iostat[i] = 0;
153		sbi->prev_rw_iostat[i] = 0;
154	}
155	spin_unlock_irq(&sbi->iostat_lock);
156
157	spin_lock_irq(&sbi->iostat_lat_lock);
158	memset(io_lat, 0, sizeof(struct iostat_lat_info));
159	spin_unlock_irq(&sbi->iostat_lat_lock);
160}
161
162void f2fs_update_iostat(struct f2fs_sb_info *sbi,
163			enum iostat_type type, unsigned long long io_bytes)
164{
165	unsigned long flags;
166
167	if (!sbi->iostat_enable)
168		return;
169
170	spin_lock_irqsave(&sbi->iostat_lock, flags);
171	sbi->rw_iostat[type] += io_bytes;
172
173	if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
174		sbi->rw_iostat[APP_WRITE_IO] += io_bytes;
175
176	if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
177		sbi->rw_iostat[APP_READ_IO] += io_bytes;
178
179	spin_unlock_irqrestore(&sbi->iostat_lock, flags);
180
181	f2fs_record_iostat(sbi);
182}
183
184static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
185				int rw, bool is_sync)
186{
187	unsigned long ts_diff;
188	unsigned int iotype = iostat_ctx->type;
189	struct f2fs_sb_info *sbi = iostat_ctx->sbi;
190	struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
191	int idx;
192	unsigned long flags;
193
194	if (!sbi->iostat_enable)
195		return;
196
197	ts_diff = jiffies - iostat_ctx->submit_ts;
198	if (iotype >= META_FLUSH)
199		iotype = META;
200
201	if (rw == 0) {
202		idx = READ_IO;
203	} else {
204		if (is_sync)
205			idx = WRITE_SYNC_IO;
206		else
207			idx = WRITE_ASYNC_IO;
208	}
209
210	spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
211	io_lat->sum_lat[idx][iotype] += ts_diff;
212	io_lat->bio_cnt[idx][iotype]++;
213	if (ts_diff > io_lat->peak_lat[idx][iotype])
214		io_lat->peak_lat[idx][iotype] = ts_diff;
215	spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
216}
217
218void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
219{
220	struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
221	bool is_sync = bio->bi_opf & REQ_SYNC;
222
223	if (rw == 0)
224		bio->bi_private = iostat_ctx->post_read_ctx;
225	else
226		bio->bi_private = iostat_ctx->sbi;
227	__update_iostat_latency(iostat_ctx, rw, is_sync);
228	mempool_free(iostat_ctx, bio_iostat_ctx_pool);
229}
230
231void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
232		struct bio *bio, struct bio_post_read_ctx *ctx)
233{
234	struct bio_iostat_ctx *iostat_ctx;
235	/* Due to the mempool, this never fails. */
236	iostat_ctx = mempool_alloc(bio_iostat_ctx_pool, GFP_NOFS);
237	iostat_ctx->sbi = sbi;
238	iostat_ctx->submit_ts = 0;
239	iostat_ctx->type = 0;
240	iostat_ctx->post_read_ctx = ctx;
241	bio->bi_private = iostat_ctx;
242}
243
244int __init f2fs_init_iostat_processing(void)
245{
246	bio_iostat_ctx_cache =
247		kmem_cache_create("f2fs_bio_iostat_ctx",
248				  sizeof(struct bio_iostat_ctx), 0, 0, NULL);
249	if (!bio_iostat_ctx_cache)
250		goto fail;
251	bio_iostat_ctx_pool =
252		mempool_create_slab_pool(NUM_PREALLOC_IOSTAT_CTXS,
253					 bio_iostat_ctx_cache);
254	if (!bio_iostat_ctx_pool)
255		goto fail_free_cache;
256	return 0;
257
258fail_free_cache:
259	kmem_cache_destroy(bio_iostat_ctx_cache);
260fail:
261	return -ENOMEM;
262}
263
264void f2fs_destroy_iostat_processing(void)
265{
266	mempool_destroy(bio_iostat_ctx_pool);
267	kmem_cache_destroy(bio_iostat_ctx_cache);
268}
269
270int f2fs_init_iostat(struct f2fs_sb_info *sbi)
271{
272	/* init iostat info */
273	spin_lock_init(&sbi->iostat_lock);
274	spin_lock_init(&sbi->iostat_lat_lock);
275	sbi->iostat_enable = false;
276	sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
277	sbi->iostat_io_lat = f2fs_kzalloc(sbi, sizeof(struct iostat_lat_info),
278					GFP_KERNEL);
279	if (!sbi->iostat_io_lat)
280		return -ENOMEM;
281
282	return 0;
283}
284
285void f2fs_destroy_iostat(struct f2fs_sb_info *sbi)
286{
287	kfree(sbi->iostat_io_lat);
288}
289