1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Floating proportions with flexible aging period
4 *
5 * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
6 *
7 * The goal of this code is: Given different types of event, measure proportion
8 * of each type of event over time. The proportions are measured with
9 * exponentially decaying history to give smooth transitions. A formula
10 * expressing proportion of event of type 'j' is:
11 *
12 * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
13 *
14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15 * total number of events in i-th last time period.
16 *
17 * Note that p_{j}'s are normalised, i.e.
18 *
19 * \Sum_{j} p_{j} = 1,
20 *
21 * This formula can be straightforwardly computed by maintaining denominator
22 * (let's call it 'd') and for each event type its numerator (let's call it
23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
24 * n_j++; d++;
25 *
26 * When a new period is declared, we could do:
27 * d /= 2
28 * for each j
29 * n_j /= 2
30 *
31 * To avoid iteration over all event types, we instead shift numerator of event
32 * j lazily when someone asks for a proportion of event j or when event j
33 * occurs. This can bit trivially implemented by remembering last period in
34 * which something happened with proportion of type j.
35 */
36#include <linux/flex_proportions.h>
37
38int fprop_global_init(struct fprop_global *p, gfp_t gfp)
39{
40 int err;
41
42 p->period = 0;
43 /* Use 1 to avoid dealing with periods with 0 events... */
44 err = percpu_counter_init(&p->events, 1, gfp);
45 if (err)
46 return err;
47 seqcount_init(&p->sequence);
48 return 0;
49}
50
51void fprop_global_destroy(struct fprop_global *p)
52{
53 percpu_counter_destroy(fbc: &p->events);
54}
55
56/*
57 * Declare @periods new periods. It is upto the caller to make sure period
58 * transitions cannot happen in parallel.
59 *
60 * The function returns true if the proportions are still defined and false
61 * if aging zeroed out all events. This can be used to detect whether declaring
62 * further periods has any effect.
63 */
64bool fprop_new_period(struct fprop_global *p, int periods)
65{
66 s64 events = percpu_counter_sum(fbc: &p->events);
67
68 /*
69 * Don't do anything if there are no events.
70 */
71 if (events <= 1)
72 return false;
73 preempt_disable_nested();
74 write_seqcount_begin(&p->sequence);
75 if (periods < 64)
76 events -= events >> periods;
77 /* Use addition to avoid losing events happening between sum and set */
78 percpu_counter_add(fbc: &p->events, amount: -events);
79 p->period += periods;
80 write_seqcount_end(&p->sequence);
81 preempt_enable_nested();
82
83 return true;
84}
85
86/*
87 * ---- SINGLE ----
88 */
89
90int fprop_local_init_single(struct fprop_local_single *pl)
91{
92 pl->events = 0;
93 pl->period = 0;
94 raw_spin_lock_init(&pl->lock);
95 return 0;
96}
97
98void fprop_local_destroy_single(struct fprop_local_single *pl)
99{
100}
101
102static void fprop_reflect_period_single(struct fprop_global *p,
103 struct fprop_local_single *pl)
104{
105 unsigned int period = p->period;
106 unsigned long flags;
107
108 /* Fast path - period didn't change */
109 if (pl->period == period)
110 return;
111 raw_spin_lock_irqsave(&pl->lock, flags);
112 /* Someone updated pl->period while we were spinning? */
113 if (pl->period >= period) {
114 raw_spin_unlock_irqrestore(&pl->lock, flags);
115 return;
116 }
117 /* Aging zeroed our fraction? */
118 if (period - pl->period < BITS_PER_LONG)
119 pl->events >>= period - pl->period;
120 else
121 pl->events = 0;
122 pl->period = period;
123 raw_spin_unlock_irqrestore(&pl->lock, flags);
124}
125
126/* Event of type pl happened */
127void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
128{
129 fprop_reflect_period_single(p, pl);
130 pl->events++;
131 percpu_counter_add(fbc: &p->events, amount: 1);
132}
133
134/* Return fraction of events of type pl */
135void fprop_fraction_single(struct fprop_global *p,
136 struct fprop_local_single *pl,
137 unsigned long *numerator, unsigned long *denominator)
138{
139 unsigned int seq;
140 s64 num, den;
141
142 do {
143 seq = read_seqcount_begin(&p->sequence);
144 fprop_reflect_period_single(p, pl);
145 num = pl->events;
146 den = percpu_counter_read_positive(fbc: &p->events);
147 } while (read_seqcount_retry(&p->sequence, seq));
148
149 /*
150 * Make fraction <= 1 and denominator > 0 even in presence of percpu
151 * counter errors
152 */
153 if (den <= num) {
154 if (num)
155 den = num;
156 else
157 den = 1;
158 }
159 *denominator = den;
160 *numerator = num;
161}
162
163/*
164 * ---- PERCPU ----
165 */
166#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
167
168int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
169{
170 int err;
171
172 err = percpu_counter_init(&pl->events, 0, gfp);
173 if (err)
174 return err;
175 pl->period = 0;
176 raw_spin_lock_init(&pl->lock);
177 return 0;
178}
179
180void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
181{
182 percpu_counter_destroy(fbc: &pl->events);
183}
184
185static void fprop_reflect_period_percpu(struct fprop_global *p,
186 struct fprop_local_percpu *pl)
187{
188 unsigned int period = p->period;
189 unsigned long flags;
190
191 /* Fast path - period didn't change */
192 if (pl->period == period)
193 return;
194 raw_spin_lock_irqsave(&pl->lock, flags);
195 /* Someone updated pl->period while we were spinning? */
196 if (pl->period >= period) {
197 raw_spin_unlock_irqrestore(&pl->lock, flags);
198 return;
199 }
200 /* Aging zeroed our fraction? */
201 if (period - pl->period < BITS_PER_LONG) {
202 s64 val = percpu_counter_read(fbc: &pl->events);
203
204 if (val < (nr_cpu_ids * PROP_BATCH))
205 val = percpu_counter_sum(fbc: &pl->events);
206
207 percpu_counter_add_batch(fbc: &pl->events,
208 amount: -val + (val >> (period-pl->period)), PROP_BATCH);
209 } else
210 percpu_counter_set(fbc: &pl->events, amount: 0);
211 pl->period = period;
212 raw_spin_unlock_irqrestore(&pl->lock, flags);
213}
214
215/* Event of type pl happened */
216void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
217 long nr)
218{
219 fprop_reflect_period_percpu(p, pl);
220 percpu_counter_add_batch(fbc: &pl->events, amount: nr, PROP_BATCH);
221 percpu_counter_add(fbc: &p->events, amount: nr);
222}
223
224void fprop_fraction_percpu(struct fprop_global *p,
225 struct fprop_local_percpu *pl,
226 unsigned long *numerator, unsigned long *denominator)
227{
228 unsigned int seq;
229 s64 num, den;
230
231 do {
232 seq = read_seqcount_begin(&p->sequence);
233 fprop_reflect_period_percpu(p, pl);
234 num = percpu_counter_read_positive(fbc: &pl->events);
235 den = percpu_counter_read_positive(fbc: &p->events);
236 } while (read_seqcount_retry(&p->sequence, seq));
237
238 /*
239 * Make fraction <= 1 and denominator > 0 even in presence of percpu
240 * counter errors
241 */
242 if (den <= num) {
243 if (num)
244 den = num;
245 else
246 den = 1;
247 }
248 *denominator = den;
249 *numerator = num;
250}
251
252/*
253 * Like __fprop_add_percpu() except that event is counted only if the given
254 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
255 */
256void __fprop_add_percpu_max(struct fprop_global *p,
257 struct fprop_local_percpu *pl, int max_frac, long nr)
258{
259 if (unlikely(max_frac < FPROP_FRAC_BASE)) {
260 unsigned long numerator, denominator;
261 s64 tmp;
262
263 fprop_fraction_percpu(p, pl, numerator: &numerator, denominator: &denominator);
264 /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
265 tmp = (u64)denominator * max_frac -
266 ((u64)numerator << FPROP_FRAC_SHIFT);
267 if (tmp < 0) {
268 /* Maximum fraction already exceeded? */
269 return;
270 } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
271 /* Add just enough for the fraction to saturate */
272 nr = div_u64(dividend: tmp + FPROP_FRAC_BASE - max_frac - 1,
273 FPROP_FRAC_BASE - max_frac);
274 }
275 }
276
277 __fprop_add_percpu(p, pl, nr);
278}
279

source code of linux/lib/flex_proportions.c