1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  Floating proportions with flexible aging period
4 *
5 *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
6 *
7 * The goal of this code is: Given different types of event, measure proportion
8 * of each type of event over time. The proportions are measured with
9 * exponentially decaying history to give smooth transitions. A formula
10 * expressing proportion of event of type 'j' is:
11 *
12 *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
13 *
14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15 * total number of events in i-th last time period.
16 *
17 * Note that p_{j}'s are normalised, i.e.
18 *
19 *   \Sum_{j} p_{j} = 1,
20 *
21 * This formula can be straightforwardly computed by maintaining denominator
22 * (let's call it 'd') and for each event type its numerator (let's call it
23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
24 *   n_j++; d++;
25 *
26 * When a new period is declared, we could do:
27 *   d /= 2
28 *   for each j
29 *     n_j /= 2
30 *
31 * To avoid iteration over all event types, we instead shift numerator of event
32 * j lazily when someone asks for a proportion of event j or when event j
33 * occurs. This can bit trivially implemented by remembering last period in
34 * which something happened with proportion of type j.
35 */
36#include <linux/flex_proportions.h>
37
38int fprop_global_init(struct fprop_global *p, gfp_t gfp)
39{
40	int err;
41
42	p->period = 0;
43	/* Use 1 to avoid dealing with periods with 0 events... */
44	err = percpu_counter_init(&p->events, 1, gfp);
45	if (err)
46		return err;
47	seqcount_init(&p->sequence);
48	return 0;
49}
50
51void fprop_global_destroy(struct fprop_global *p)
52{
53	percpu_counter_destroy(&p->events);
54}
55
56/*
57 * Declare @periods new periods. It is upto the caller to make sure period
58 * transitions cannot happen in parallel.
59 *
60 * The function returns true if the proportions are still defined and false
61 * if aging zeroed out all events. This can be used to detect whether declaring
62 * further periods has any effect.
63 */
64bool fprop_new_period(struct fprop_global *p, int periods)
65{
66	s64 events = percpu_counter_sum(&p->events);
67
68	/*
69	 * Don't do anything if there are no events.
70	 */
71	if (events <= 1)
72		return false;
73	preempt_disable_nested();
74	write_seqcount_begin(&p->sequence);
75	if (periods < 64)
76		events -= events >> periods;
77	/* Use addition to avoid losing events happening between sum and set */
78	percpu_counter_add(&p->events, -events);
79	p->period += periods;
80	write_seqcount_end(&p->sequence);
81	preempt_enable_nested();
82
83	return true;
84}
85
86/*
87 * ---- PERCPU ----
88 */
89#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
90
91int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
92{
93	int err;
94
95	err = percpu_counter_init(&pl->events, 0, gfp);
96	if (err)
97		return err;
98	pl->period = 0;
99	raw_spin_lock_init(&pl->lock);
100	return 0;
101}
102
103void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
104{
105	percpu_counter_destroy(&pl->events);
106}
107
108static void fprop_reflect_period_percpu(struct fprop_global *p,
109					struct fprop_local_percpu *pl)
110{
111	unsigned int period = p->period;
112	unsigned long flags;
113
114	/* Fast path - period didn't change */
115	if (pl->period == period)
116		return;
117	raw_spin_lock_irqsave(&pl->lock, flags);
118	/* Someone updated pl->period while we were spinning? */
119	if (pl->period >= period) {
120		raw_spin_unlock_irqrestore(&pl->lock, flags);
121		return;
122	}
123	/* Aging zeroed our fraction? */
124	if (period - pl->period < BITS_PER_LONG) {
125		s64 val = percpu_counter_read(&pl->events);
126
127		if (val < (nr_cpu_ids * PROP_BATCH))
128			val = percpu_counter_sum(&pl->events);
129
130		percpu_counter_add_batch(&pl->events,
131			-val + (val >> (period-pl->period)), PROP_BATCH);
132	} else
133		percpu_counter_set(&pl->events, 0);
134	pl->period = period;
135	raw_spin_unlock_irqrestore(&pl->lock, flags);
136}
137
138/* Event of type pl happened */
139void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
140		long nr)
141{
142	fprop_reflect_period_percpu(p, pl);
143	percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
144	percpu_counter_add(&p->events, nr);
145}
146
147void fprop_fraction_percpu(struct fprop_global *p,
148			   struct fprop_local_percpu *pl,
149			   unsigned long *numerator, unsigned long *denominator)
150{
151	unsigned int seq;
152	s64 num, den;
153
154	do {
155		seq = read_seqcount_begin(&p->sequence);
156		fprop_reflect_period_percpu(p, pl);
157		num = percpu_counter_read_positive(&pl->events);
158		den = percpu_counter_read_positive(&p->events);
159	} while (read_seqcount_retry(&p->sequence, seq));
160
161	/*
162	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
163	 * counter errors
164	 */
165	if (den <= num) {
166		if (num)
167			den = num;
168		else
169			den = 1;
170	}
171	*denominator = den;
172	*numerator = num;
173}
174
175/*
176 * Like __fprop_add_percpu() except that event is counted only if the given
177 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
178 */
179void __fprop_add_percpu_max(struct fprop_global *p,
180		struct fprop_local_percpu *pl, int max_frac, long nr)
181{
182	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
183		unsigned long numerator, denominator;
184		s64 tmp;
185
186		fprop_fraction_percpu(p, pl, &numerator, &denominator);
187		/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
188		tmp = (u64)denominator * max_frac -
189					((u64)numerator << FPROP_FRAC_SHIFT);
190		if (tmp < 0) {
191			/* Maximum fraction already exceeded? */
192			return;
193		} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
194			/* Add just enough for the fraction to saturate */
195			nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
196					FPROP_FRAC_BASE - max_frac);
197		}
198	}
199
200	__fprop_add_percpu(p, pl, nr);
201}
202