1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block stat tracking code
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7#include <linux/kernel.h>
8#include <linux/rculist.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
12#include "blk.h"
13
14struct blk_queue_stats {
15	struct list_head callbacks;
16	spinlock_t lock;
17	int accounting;
18};
19
20void blk_rq_stat_init(struct blk_rq_stat *stat)
21{
22	stat->min = -1ULL;
23	stat->max = stat->nr_samples = stat->mean = 0;
24	stat->batch = 0;
25}
26
27/* src is a per-cpu stat, mean isn't initialized */
28void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29{
30	if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
31		return;
32
33	dst->min = min(dst->min, src->min);
34	dst->max = max(dst->max, src->max);
35
36	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37				dst->nr_samples + src->nr_samples);
38
39	dst->nr_samples += src->nr_samples;
40}
41
42void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43{
44	stat->min = min(stat->min, value);
45	stat->max = max(stat->max, value);
46	stat->batch += value;
47	stat->nr_samples++;
48}
49
50void blk_stat_add(struct request *rq, u64 now)
51{
52	struct request_queue *q = rq->q;
53	struct blk_stat_callback *cb;
54	struct blk_rq_stat *stat;
55	int bucket, cpu;
56	u64 value;
57
58	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
59
60	if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE)
61		blk_throtl_stat_add(rq, value);
62
63	rcu_read_lock();
64	cpu = get_cpu();
65	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
66		if (!blk_stat_is_active(cb))
67			continue;
68
69		bucket = cb->bucket_fn(rq);
70		if (bucket < 0)
71			continue;
72
73		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
74		blk_rq_stat_add(stat, value);
75	}
76	put_cpu();
77	rcu_read_unlock();
78}
79
80static void blk_stat_timer_fn(struct timer_list *t)
81{
82	struct blk_stat_callback *cb = from_timer(cb, t, timer);
83	unsigned int bucket;
84	int cpu;
85
86	for (bucket = 0; bucket < cb->buckets; bucket++)
87		blk_rq_stat_init(&cb->stat[bucket]);
88
89	for_each_online_cpu(cpu) {
90		struct blk_rq_stat *cpu_stat;
91
92		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
93		for (bucket = 0; bucket < cb->buckets; bucket++) {
94			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
95			blk_rq_stat_init(&cpu_stat[bucket]);
96		}
97	}
98
99	cb->timer_fn(cb);
100}
101
102struct blk_stat_callback *
103blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104			int (*bucket_fn)(const struct request *),
105			unsigned int buckets, void *data)
106{
107	struct blk_stat_callback *cb;
108
109	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
110	if (!cb)
111		return NULL;
112
113	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
114				 GFP_KERNEL);
115	if (!cb->stat) {
116		kfree(cb);
117		return NULL;
118	}
119	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
120				      __alignof__(struct blk_rq_stat));
121	if (!cb->cpu_stat) {
122		kfree(cb->stat);
123		kfree(cb);
124		return NULL;
125	}
126
127	cb->timer_fn = timer_fn;
128	cb->bucket_fn = bucket_fn;
129	cb->data = data;
130	cb->buckets = buckets;
131	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
132
133	return cb;
134}
135
136void blk_stat_add_callback(struct request_queue *q,
137			   struct blk_stat_callback *cb)
138{
139	unsigned int bucket;
140	unsigned long flags;
141	int cpu;
142
143	for_each_possible_cpu(cpu) {
144		struct blk_rq_stat *cpu_stat;
145
146		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
147		for (bucket = 0; bucket < cb->buckets; bucket++)
148			blk_rq_stat_init(&cpu_stat[bucket]);
149	}
150
151	spin_lock_irqsave(&q->stats->lock, flags);
152	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
153	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
154	spin_unlock_irqrestore(&q->stats->lock, flags);
155}
156
157void blk_stat_remove_callback(struct request_queue *q,
158			      struct blk_stat_callback *cb)
159{
160	unsigned long flags;
161
162	spin_lock_irqsave(&q->stats->lock, flags);
163	list_del_rcu(&cb->list);
164	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
165		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
166	spin_unlock_irqrestore(&q->stats->lock, flags);
167
168	del_timer_sync(&cb->timer);
169}
170
171static void blk_stat_free_callback_rcu(struct rcu_head *head)
172{
173	struct blk_stat_callback *cb;
174
175	cb = container_of(head, struct blk_stat_callback, rcu);
176	free_percpu(cb->cpu_stat);
177	kfree(cb->stat);
178	kfree(cb);
179}
180
181void blk_stat_free_callback(struct blk_stat_callback *cb)
182{
183	if (cb)
184		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
185}
186
187void blk_stat_disable_accounting(struct request_queue *q)
188{
189	unsigned long flags;
190
191	spin_lock_irqsave(&q->stats->lock, flags);
192	if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
193		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
194	spin_unlock_irqrestore(&q->stats->lock, flags);
195}
196EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
197
198void blk_stat_enable_accounting(struct request_queue *q)
199{
200	unsigned long flags;
201
202	spin_lock_irqsave(&q->stats->lock, flags);
203	if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
204		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
205	spin_unlock_irqrestore(&q->stats->lock, flags);
206}
207EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
208
209struct blk_queue_stats *blk_alloc_queue_stats(void)
210{
211	struct blk_queue_stats *stats;
212
213	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
214	if (!stats)
215		return NULL;
216
217	INIT_LIST_HEAD(&stats->callbacks);
218	spin_lock_init(&stats->lock);
219	stats->accounting = 0;
220
221	return stats;
222}
223
224void blk_free_queue_stats(struct blk_queue_stats *stats)
225{
226	if (!stats)
227		return;
228
229	WARN_ON(!list_empty(&stats->callbacks));
230
231	kfree(stats);
232}
233