1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * CPUFreq governor based on scheduler-provided CPU utilization data. |
4 | * |
5 | * Copyright (C) 2016, Intel Corporation |
6 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | |
11 | #include "sched.h" |
12 | |
13 | #include <linux/sched/cpufreq.h> |
14 | #include <trace/events/power.h> |
15 | |
16 | struct sugov_tunables { |
17 | struct gov_attr_set attr_set; |
18 | unsigned int rate_limit_us; |
19 | }; |
20 | |
21 | struct sugov_policy { |
22 | struct cpufreq_policy *policy; |
23 | |
24 | struct sugov_tunables *tunables; |
25 | struct list_head tunables_hook; |
26 | |
27 | raw_spinlock_t update_lock; /* For shared policies */ |
28 | u64 last_freq_update_time; |
29 | s64 freq_update_delay_ns; |
30 | unsigned int next_freq; |
31 | unsigned int cached_raw_freq; |
32 | |
33 | /* The next fields are only needed if fast switch cannot be used: */ |
34 | struct irq_work irq_work; |
35 | struct kthread_work work; |
36 | struct mutex work_lock; |
37 | struct kthread_worker worker; |
38 | struct task_struct *thread; |
39 | bool work_in_progress; |
40 | |
41 | bool need_freq_update; |
42 | }; |
43 | |
44 | struct sugov_cpu { |
45 | struct update_util_data update_util; |
46 | struct sugov_policy *sg_policy; |
47 | unsigned int cpu; |
48 | |
49 | bool iowait_boost_pending; |
50 | unsigned int iowait_boost; |
51 | u64 last_update; |
52 | |
53 | unsigned long bw_dl; |
54 | unsigned long min; |
55 | unsigned long max; |
56 | |
57 | /* The field below is for single-CPU policies only: */ |
58 | #ifdef CONFIG_NO_HZ_COMMON |
59 | unsigned long saved_idle_calls; |
60 | #endif |
61 | }; |
62 | |
63 | static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); |
64 | |
65 | /************************ Governor internals ***********************/ |
66 | |
67 | static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) |
68 | { |
69 | s64 delta_ns; |
70 | |
71 | /* |
72 | * Since cpufreq_update_util() is called with rq->lock held for |
73 | * the @target_cpu, our per-CPU data is fully serialized. |
74 | * |
75 | * However, drivers cannot in general deal with cross-CPU |
76 | * requests, so while get_next_freq() will work, our |
77 | * sugov_update_commit() call may not for the fast switching platforms. |
78 | * |
79 | * Hence stop here for remote requests if they aren't supported |
80 | * by the hardware, as calculating the frequency is pointless if |
81 | * we cannot in fact act on it. |
82 | * |
83 | * For the slow switching platforms, the kthread is always scheduled on |
84 | * the right set of CPUs and any CPU can find the next frequency and |
85 | * schedule the kthread. |
86 | */ |
87 | if (sg_policy->policy->fast_switch_enabled && |
88 | !cpufreq_this_cpu_can_update(sg_policy->policy)) |
89 | return false; |
90 | |
91 | if (unlikely(sg_policy->need_freq_update)) |
92 | return true; |
93 | |
94 | delta_ns = time - sg_policy->last_freq_update_time; |
95 | |
96 | return delta_ns >= sg_policy->freq_update_delay_ns; |
97 | } |
98 | |
99 | static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, |
100 | unsigned int next_freq) |
101 | { |
102 | if (sg_policy->next_freq == next_freq) |
103 | return false; |
104 | |
105 | sg_policy->next_freq = next_freq; |
106 | sg_policy->last_freq_update_time = time; |
107 | |
108 | return true; |
109 | } |
110 | |
111 | static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, |
112 | unsigned int next_freq) |
113 | { |
114 | struct cpufreq_policy *policy = sg_policy->policy; |
115 | |
116 | if (!sugov_update_next_freq(sg_policy, time, next_freq)) |
117 | return; |
118 | |
119 | next_freq = cpufreq_driver_fast_switch(policy, next_freq); |
120 | if (!next_freq) |
121 | return; |
122 | |
123 | policy->cur = next_freq; |
124 | trace_cpu_frequency(next_freq, smp_processor_id()); |
125 | } |
126 | |
127 | static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, |
128 | unsigned int next_freq) |
129 | { |
130 | if (!sugov_update_next_freq(sg_policy, time, next_freq)) |
131 | return; |
132 | |
133 | if (!sg_policy->work_in_progress) { |
134 | sg_policy->work_in_progress = true; |
135 | irq_work_queue(&sg_policy->irq_work); |
136 | } |
137 | } |
138 | |
139 | /** |
140 | * get_next_freq - Compute a new frequency for a given cpufreq policy. |
141 | * @sg_policy: schedutil policy object to compute the new frequency for. |
142 | * @util: Current CPU utilization. |
143 | * @max: CPU capacity. |
144 | * |
145 | * If the utilization is frequency-invariant, choose the new frequency to be |
146 | * proportional to it, that is |
147 | * |
148 | * next_freq = C * max_freq * util / max |
149 | * |
150 | * Otherwise, approximate the would-be frequency-invariant utilization by |
151 | * util_raw * (curr_freq / max_freq) which leads to |
152 | * |
153 | * next_freq = C * curr_freq * util_raw / max |
154 | * |
155 | * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. |
156 | * |
157 | * The lowest driver-supported frequency which is equal or greater than the raw |
158 | * next_freq (as calculated above) is returned, subject to policy min/max and |
159 | * cpufreq driver limitations. |
160 | */ |
161 | static unsigned int get_next_freq(struct sugov_policy *sg_policy, |
162 | unsigned long util, unsigned long max) |
163 | { |
164 | struct cpufreq_policy *policy = sg_policy->policy; |
165 | unsigned int freq = arch_scale_freq_invariant() ? |
166 | policy->cpuinfo.max_freq : policy->cur; |
167 | |
168 | freq = map_util_freq(util, freq, max); |
169 | |
170 | if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) |
171 | return sg_policy->next_freq; |
172 | |
173 | sg_policy->need_freq_update = false; |
174 | sg_policy->cached_raw_freq = freq; |
175 | return cpufreq_driver_resolve_freq(policy, freq); |
176 | } |
177 | |
178 | /* |
179 | * This function computes an effective utilization for the given CPU, to be |
180 | * used for frequency selection given the linear relation: f = u * f_max. |
181 | * |
182 | * The scheduler tracks the following metrics: |
183 | * |
184 | * cpu_util_{cfs,rt,dl,irq}() |
185 | * cpu_bw_dl() |
186 | * |
187 | * Where the cfs,rt and dl util numbers are tracked with the same metric and |
188 | * synchronized windows and are thus directly comparable. |
189 | * |
190 | * The cfs,rt,dl utilization are the running times measured with rq->clock_task |
191 | * which excludes things like IRQ and steal-time. These latter are then accrued |
192 | * in the irq utilization. |
193 | * |
194 | * The DL bandwidth number otoh is not a measured metric but a value computed |
195 | * based on the task model parameters and gives the minimal utilization |
196 | * required to meet deadlines. |
197 | */ |
198 | unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, |
199 | unsigned long max, enum schedutil_type type) |
200 | { |
201 | unsigned long dl_util, util, irq; |
202 | struct rq *rq = cpu_rq(cpu); |
203 | |
204 | if (type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) |
205 | return max; |
206 | |
207 | /* |
208 | * Early check to see if IRQ/steal time saturates the CPU, can be |
209 | * because of inaccuracies in how we track these -- see |
210 | * update_irq_load_avg(). |
211 | */ |
212 | irq = cpu_util_irq(rq); |
213 | if (unlikely(irq >= max)) |
214 | return max; |
215 | |
216 | /* |
217 | * Because the time spend on RT/DL tasks is visible as 'lost' time to |
218 | * CFS tasks and we use the same metric to track the effective |
219 | * utilization (PELT windows are synchronized) we can directly add them |
220 | * to obtain the CPU's actual utilization. |
221 | */ |
222 | util = util_cfs; |
223 | util += cpu_util_rt(rq); |
224 | |
225 | dl_util = cpu_util_dl(rq); |
226 | |
227 | /* |
228 | * For frequency selection we do not make cpu_util_dl() a permanent part |
229 | * of this sum because we want to use cpu_bw_dl() later on, but we need |
230 | * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such |
231 | * that we select f_max when there is no idle time. |
232 | * |
233 | * NOTE: numerical errors or stop class might cause us to not quite hit |
234 | * saturation when we should -- something for later. |
235 | */ |
236 | if (util + dl_util >= max) |
237 | return max; |
238 | |
239 | /* |
240 | * OTOH, for energy computation we need the estimated running time, so |
241 | * include util_dl and ignore dl_bw. |
242 | */ |
243 | if (type == ENERGY_UTIL) |
244 | util += dl_util; |
245 | |
246 | /* |
247 | * There is still idle time; further improve the number by using the |
248 | * irq metric. Because IRQ/steal time is hidden from the task clock we |
249 | * need to scale the task numbers: |
250 | * |
251 | * 1 - irq |
252 | * U' = irq + ------- * U |
253 | * max |
254 | */ |
255 | util = scale_irq_capacity(util, irq, max); |
256 | util += irq; |
257 | |
258 | /* |
259 | * Bandwidth required by DEADLINE must always be granted while, for |
260 | * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism |
261 | * to gracefully reduce the frequency when no tasks show up for longer |
262 | * periods of time. |
263 | * |
264 | * Ideally we would like to set bw_dl as min/guaranteed freq and util + |
265 | * bw_dl as requested freq. However, cpufreq is not yet ready for such |
266 | * an interface. So, we only do the latter for now. |
267 | */ |
268 | if (type == FREQUENCY_UTIL) |
269 | util += cpu_bw_dl(rq); |
270 | |
271 | return min(max, util); |
272 | } |
273 | |
274 | static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) |
275 | { |
276 | struct rq *rq = cpu_rq(sg_cpu->cpu); |
277 | unsigned long util = cpu_util_cfs(rq); |
278 | unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); |
279 | |
280 | sg_cpu->max = max; |
281 | sg_cpu->bw_dl = cpu_bw_dl(rq); |
282 | |
283 | return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL); |
284 | } |
285 | |
286 | /** |
287 | * sugov_iowait_reset() - Reset the IO boost status of a CPU. |
288 | * @sg_cpu: the sugov data for the CPU to boost |
289 | * @time: the update time from the caller |
290 | * @set_iowait_boost: true if an IO boost has been requested |
291 | * |
292 | * The IO wait boost of a task is disabled after a tick since the last update |
293 | * of a CPU. If a new IO wait boost is requested after more then a tick, then |
294 | * we enable the boost starting from the minimum frequency, which improves |
295 | * energy efficiency by ignoring sporadic wakeups from IO. |
296 | */ |
297 | static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, |
298 | bool set_iowait_boost) |
299 | { |
300 | s64 delta_ns = time - sg_cpu->last_update; |
301 | |
302 | /* Reset boost only if a tick has elapsed since last request */ |
303 | if (delta_ns <= TICK_NSEC) |
304 | return false; |
305 | |
306 | sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0; |
307 | sg_cpu->iowait_boost_pending = set_iowait_boost; |
308 | |
309 | return true; |
310 | } |
311 | |
312 | /** |
313 | * sugov_iowait_boost() - Updates the IO boost status of a CPU. |
314 | * @sg_cpu: the sugov data for the CPU to boost |
315 | * @time: the update time from the caller |
316 | * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait |
317 | * |
318 | * Each time a task wakes up after an IO operation, the CPU utilization can be |
319 | * boosted to a certain utilization which doubles at each "frequent and |
320 | * successive" wakeup from IO, ranging from the utilization of the minimum |
321 | * OPP to the utilization of the maximum OPP. |
322 | * To keep doubling, an IO boost has to be requested at least once per tick, |
323 | * otherwise we restart from the utilization of the minimum OPP. |
324 | */ |
325 | static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, |
326 | unsigned int flags) |
327 | { |
328 | bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; |
329 | |
330 | /* Reset boost if the CPU appears to have been idle enough */ |
331 | if (sg_cpu->iowait_boost && |
332 | sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) |
333 | return; |
334 | |
335 | /* Boost only tasks waking up after IO */ |
336 | if (!set_iowait_boost) |
337 | return; |
338 | |
339 | /* Ensure boost doubles only one time at each request */ |
340 | if (sg_cpu->iowait_boost_pending) |
341 | return; |
342 | sg_cpu->iowait_boost_pending = true; |
343 | |
344 | /* Double the boost at each request */ |
345 | if (sg_cpu->iowait_boost) { |
346 | sg_cpu->iowait_boost = |
347 | min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); |
348 | return; |
349 | } |
350 | |
351 | /* First wakeup after IO: start with minimum boost */ |
352 | sg_cpu->iowait_boost = sg_cpu->min; |
353 | } |
354 | |
355 | /** |
356 | * sugov_iowait_apply() - Apply the IO boost to a CPU. |
357 | * @sg_cpu: the sugov data for the cpu to boost |
358 | * @time: the update time from the caller |
359 | * @util: the utilization to (eventually) boost |
360 | * @max: the maximum value the utilization can be boosted to |
361 | * |
362 | * A CPU running a task which woken up after an IO operation can have its |
363 | * utilization boosted to speed up the completion of those IO operations. |
364 | * The IO boost value is increased each time a task wakes up from IO, in |
365 | * sugov_iowait_apply(), and it's instead decreased by this function, |
366 | * each time an increase has not been requested (!iowait_boost_pending). |
367 | * |
368 | * A CPU which also appears to have been idle for at least one tick has also |
369 | * its IO boost utilization reset. |
370 | * |
371 | * This mechanism is designed to boost high frequently IO waiting tasks, while |
372 | * being more conservative on tasks which does sporadic IO operations. |
373 | */ |
374 | static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, |
375 | unsigned long util, unsigned long max) |
376 | { |
377 | unsigned long boost; |
378 | |
379 | /* No boost currently required */ |
380 | if (!sg_cpu->iowait_boost) |
381 | return util; |
382 | |
383 | /* Reset boost if the CPU appears to have been idle enough */ |
384 | if (sugov_iowait_reset(sg_cpu, time, false)) |
385 | return util; |
386 | |
387 | if (!sg_cpu->iowait_boost_pending) { |
388 | /* |
389 | * No boost pending; reduce the boost value. |
390 | */ |
391 | sg_cpu->iowait_boost >>= 1; |
392 | if (sg_cpu->iowait_boost < sg_cpu->min) { |
393 | sg_cpu->iowait_boost = 0; |
394 | return util; |
395 | } |
396 | } |
397 | |
398 | sg_cpu->iowait_boost_pending = false; |
399 | |
400 | /* |
401 | * @util is already in capacity scale; convert iowait_boost |
402 | * into the same scale so we can compare. |
403 | */ |
404 | boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; |
405 | return max(boost, util); |
406 | } |
407 | |
408 | #ifdef CONFIG_NO_HZ_COMMON |
409 | static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) |
410 | { |
411 | unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); |
412 | bool ret = idle_calls == sg_cpu->saved_idle_calls; |
413 | |
414 | sg_cpu->saved_idle_calls = idle_calls; |
415 | return ret; |
416 | } |
417 | #else |
418 | static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } |
419 | #endif /* CONFIG_NO_HZ_COMMON */ |
420 | |
421 | /* |
422 | * Make sugov_should_update_freq() ignore the rate limit when DL |
423 | * has increased the utilization. |
424 | */ |
425 | static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) |
426 | { |
427 | if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) |
428 | sg_policy->need_freq_update = true; |
429 | } |
430 | |
431 | static void sugov_update_single(struct update_util_data *hook, u64 time, |
432 | unsigned int flags) |
433 | { |
434 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
435 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
436 | unsigned long util, max; |
437 | unsigned int next_f; |
438 | bool busy; |
439 | |
440 | sugov_iowait_boost(sg_cpu, time, flags); |
441 | sg_cpu->last_update = time; |
442 | |
443 | ignore_dl_rate_limit(sg_cpu, sg_policy); |
444 | |
445 | if (!sugov_should_update_freq(sg_policy, time)) |
446 | return; |
447 | |
448 | busy = sugov_cpu_is_busy(sg_cpu); |
449 | |
450 | util = sugov_get_util(sg_cpu); |
451 | max = sg_cpu->max; |
452 | util = sugov_iowait_apply(sg_cpu, time, util, max); |
453 | next_f = get_next_freq(sg_policy, util, max); |
454 | /* |
455 | * Do not reduce the frequency if the CPU has not been idle |
456 | * recently, as the reduction is likely to be premature then. |
457 | */ |
458 | if (busy && next_f < sg_policy->next_freq) { |
459 | next_f = sg_policy->next_freq; |
460 | |
461 | /* Reset cached freq as next_freq has changed */ |
462 | sg_policy->cached_raw_freq = 0; |
463 | } |
464 | |
465 | /* |
466 | * This code runs under rq->lock for the target CPU, so it won't run |
467 | * concurrently on two different CPUs for the same target and it is not |
468 | * necessary to acquire the lock in the fast switch case. |
469 | */ |
470 | if (sg_policy->policy->fast_switch_enabled) { |
471 | sugov_fast_switch(sg_policy, time, next_f); |
472 | } else { |
473 | raw_spin_lock(&sg_policy->update_lock); |
474 | sugov_deferred_update(sg_policy, time, next_f); |
475 | raw_spin_unlock(&sg_policy->update_lock); |
476 | } |
477 | } |
478 | |
479 | static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) |
480 | { |
481 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
482 | struct cpufreq_policy *policy = sg_policy->policy; |
483 | unsigned long util = 0, max = 1; |
484 | unsigned int j; |
485 | |
486 | for_each_cpu(j, policy->cpus) { |
487 | struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); |
488 | unsigned long j_util, j_max; |
489 | |
490 | j_util = sugov_get_util(j_sg_cpu); |
491 | j_max = j_sg_cpu->max; |
492 | j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max); |
493 | |
494 | if (j_util * max > j_max * util) { |
495 | util = j_util; |
496 | max = j_max; |
497 | } |
498 | } |
499 | |
500 | return get_next_freq(sg_policy, util, max); |
501 | } |
502 | |
503 | static void |
504 | sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) |
505 | { |
506 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
507 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
508 | unsigned int next_f; |
509 | |
510 | raw_spin_lock(&sg_policy->update_lock); |
511 | |
512 | sugov_iowait_boost(sg_cpu, time, flags); |
513 | sg_cpu->last_update = time; |
514 | |
515 | ignore_dl_rate_limit(sg_cpu, sg_policy); |
516 | |
517 | if (sugov_should_update_freq(sg_policy, time)) { |
518 | next_f = sugov_next_freq_shared(sg_cpu, time); |
519 | |
520 | if (sg_policy->policy->fast_switch_enabled) |
521 | sugov_fast_switch(sg_policy, time, next_f); |
522 | else |
523 | sugov_deferred_update(sg_policy, time, next_f); |
524 | } |
525 | |
526 | raw_spin_unlock(&sg_policy->update_lock); |
527 | } |
528 | |
529 | static void sugov_work(struct kthread_work *work) |
530 | { |
531 | struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); |
532 | unsigned int freq; |
533 | unsigned long flags; |
534 | |
535 | /* |
536 | * Hold sg_policy->update_lock shortly to handle the case where: |
537 | * incase sg_policy->next_freq is read here, and then updated by |
538 | * sugov_deferred_update() just before work_in_progress is set to false |
539 | * here, we may miss queueing the new update. |
540 | * |
541 | * Note: If a work was queued after the update_lock is released, |
542 | * sugov_work() will just be called again by kthread_work code; and the |
543 | * request will be proceed before the sugov thread sleeps. |
544 | */ |
545 | raw_spin_lock_irqsave(&sg_policy->update_lock, flags); |
546 | freq = sg_policy->next_freq; |
547 | sg_policy->work_in_progress = false; |
548 | raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); |
549 | |
550 | mutex_lock(&sg_policy->work_lock); |
551 | __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); |
552 | mutex_unlock(&sg_policy->work_lock); |
553 | } |
554 | |
555 | static void sugov_irq_work(struct irq_work *irq_work) |
556 | { |
557 | struct sugov_policy *sg_policy; |
558 | |
559 | sg_policy = container_of(irq_work, struct sugov_policy, irq_work); |
560 | |
561 | kthread_queue_work(&sg_policy->worker, &sg_policy->work); |
562 | } |
563 | |
564 | /************************** sysfs interface ************************/ |
565 | |
566 | static struct sugov_tunables *global_tunables; |
567 | static DEFINE_MUTEX(global_tunables_lock); |
568 | |
569 | static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) |
570 | { |
571 | return container_of(attr_set, struct sugov_tunables, attr_set); |
572 | } |
573 | |
574 | static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) |
575 | { |
576 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
577 | |
578 | return sprintf(buf, "%u\n" , tunables->rate_limit_us); |
579 | } |
580 | |
581 | static ssize_t |
582 | rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) |
583 | { |
584 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
585 | struct sugov_policy *sg_policy; |
586 | unsigned int rate_limit_us; |
587 | |
588 | if (kstrtouint(buf, 10, &rate_limit_us)) |
589 | return -EINVAL; |
590 | |
591 | tunables->rate_limit_us = rate_limit_us; |
592 | |
593 | list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) |
594 | sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; |
595 | |
596 | return count; |
597 | } |
598 | |
599 | static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); |
600 | |
601 | static struct attribute *sugov_attributes[] = { |
602 | &rate_limit_us.attr, |
603 | NULL |
604 | }; |
605 | |
606 | static struct kobj_type sugov_tunables_ktype = { |
607 | .default_attrs = sugov_attributes, |
608 | .sysfs_ops = &governor_sysfs_ops, |
609 | }; |
610 | |
611 | /********************** cpufreq governor interface *********************/ |
612 | |
613 | struct cpufreq_governor schedutil_gov; |
614 | |
615 | static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) |
616 | { |
617 | struct sugov_policy *sg_policy; |
618 | |
619 | sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); |
620 | if (!sg_policy) |
621 | return NULL; |
622 | |
623 | sg_policy->policy = policy; |
624 | raw_spin_lock_init(&sg_policy->update_lock); |
625 | return sg_policy; |
626 | } |
627 | |
628 | static void sugov_policy_free(struct sugov_policy *sg_policy) |
629 | { |
630 | kfree(sg_policy); |
631 | } |
632 | |
633 | static int sugov_kthread_create(struct sugov_policy *sg_policy) |
634 | { |
635 | struct task_struct *thread; |
636 | struct sched_attr attr = { |
637 | .size = sizeof(struct sched_attr), |
638 | .sched_policy = SCHED_DEADLINE, |
639 | .sched_flags = SCHED_FLAG_SUGOV, |
640 | .sched_nice = 0, |
641 | .sched_priority = 0, |
642 | /* |
643 | * Fake (unused) bandwidth; workaround to "fix" |
644 | * priority inheritance. |
645 | */ |
646 | .sched_runtime = 1000000, |
647 | .sched_deadline = 10000000, |
648 | .sched_period = 10000000, |
649 | }; |
650 | struct cpufreq_policy *policy = sg_policy->policy; |
651 | int ret; |
652 | |
653 | /* kthread only required for slow path */ |
654 | if (policy->fast_switch_enabled) |
655 | return 0; |
656 | |
657 | kthread_init_work(&sg_policy->work, sugov_work); |
658 | kthread_init_worker(&sg_policy->worker); |
659 | thread = kthread_create(kthread_worker_fn, &sg_policy->worker, |
660 | "sugov:%d" , |
661 | cpumask_first(policy->related_cpus)); |
662 | if (IS_ERR(thread)) { |
663 | pr_err("failed to create sugov thread: %ld\n" , PTR_ERR(thread)); |
664 | return PTR_ERR(thread); |
665 | } |
666 | |
667 | ret = sched_setattr_nocheck(thread, &attr); |
668 | if (ret) { |
669 | kthread_stop(thread); |
670 | pr_warn("%s: failed to set SCHED_DEADLINE\n" , __func__); |
671 | return ret; |
672 | } |
673 | |
674 | sg_policy->thread = thread; |
675 | kthread_bind_mask(thread, policy->related_cpus); |
676 | init_irq_work(&sg_policy->irq_work, sugov_irq_work); |
677 | mutex_init(&sg_policy->work_lock); |
678 | |
679 | wake_up_process(thread); |
680 | |
681 | return 0; |
682 | } |
683 | |
684 | static void sugov_kthread_stop(struct sugov_policy *sg_policy) |
685 | { |
686 | /* kthread only required for slow path */ |
687 | if (sg_policy->policy->fast_switch_enabled) |
688 | return; |
689 | |
690 | kthread_flush_worker(&sg_policy->worker); |
691 | kthread_stop(sg_policy->thread); |
692 | mutex_destroy(&sg_policy->work_lock); |
693 | } |
694 | |
695 | static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) |
696 | { |
697 | struct sugov_tunables *tunables; |
698 | |
699 | tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); |
700 | if (tunables) { |
701 | gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); |
702 | if (!have_governor_per_policy()) |
703 | global_tunables = tunables; |
704 | } |
705 | return tunables; |
706 | } |
707 | |
708 | static void sugov_tunables_free(struct sugov_tunables *tunables) |
709 | { |
710 | if (!have_governor_per_policy()) |
711 | global_tunables = NULL; |
712 | |
713 | kfree(tunables); |
714 | } |
715 | |
716 | static int sugov_init(struct cpufreq_policy *policy) |
717 | { |
718 | struct sugov_policy *sg_policy; |
719 | struct sugov_tunables *tunables; |
720 | int ret = 0; |
721 | |
722 | /* State should be equivalent to EXIT */ |
723 | if (policy->governor_data) |
724 | return -EBUSY; |
725 | |
726 | cpufreq_enable_fast_switch(policy); |
727 | |
728 | sg_policy = sugov_policy_alloc(policy); |
729 | if (!sg_policy) { |
730 | ret = -ENOMEM; |
731 | goto disable_fast_switch; |
732 | } |
733 | |
734 | ret = sugov_kthread_create(sg_policy); |
735 | if (ret) |
736 | goto free_sg_policy; |
737 | |
738 | mutex_lock(&global_tunables_lock); |
739 | |
740 | if (global_tunables) { |
741 | if (WARN_ON(have_governor_per_policy())) { |
742 | ret = -EINVAL; |
743 | goto stop_kthread; |
744 | } |
745 | policy->governor_data = sg_policy; |
746 | sg_policy->tunables = global_tunables; |
747 | |
748 | gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); |
749 | goto out; |
750 | } |
751 | |
752 | tunables = sugov_tunables_alloc(sg_policy); |
753 | if (!tunables) { |
754 | ret = -ENOMEM; |
755 | goto stop_kthread; |
756 | } |
757 | |
758 | tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
759 | |
760 | policy->governor_data = sg_policy; |
761 | sg_policy->tunables = tunables; |
762 | |
763 | ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, |
764 | get_governor_parent_kobj(policy), "%s" , |
765 | schedutil_gov.name); |
766 | if (ret) |
767 | goto fail; |
768 | |
769 | out: |
770 | mutex_unlock(&global_tunables_lock); |
771 | return 0; |
772 | |
773 | fail: |
774 | policy->governor_data = NULL; |
775 | sugov_tunables_free(tunables); |
776 | |
777 | stop_kthread: |
778 | sugov_kthread_stop(sg_policy); |
779 | mutex_unlock(&global_tunables_lock); |
780 | |
781 | free_sg_policy: |
782 | sugov_policy_free(sg_policy); |
783 | |
784 | disable_fast_switch: |
785 | cpufreq_disable_fast_switch(policy); |
786 | |
787 | pr_err("initialization failed (error %d)\n" , ret); |
788 | return ret; |
789 | } |
790 | |
791 | static void sugov_exit(struct cpufreq_policy *policy) |
792 | { |
793 | struct sugov_policy *sg_policy = policy->governor_data; |
794 | struct sugov_tunables *tunables = sg_policy->tunables; |
795 | unsigned int count; |
796 | |
797 | mutex_lock(&global_tunables_lock); |
798 | |
799 | count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); |
800 | policy->governor_data = NULL; |
801 | if (!count) |
802 | sugov_tunables_free(tunables); |
803 | |
804 | mutex_unlock(&global_tunables_lock); |
805 | |
806 | sugov_kthread_stop(sg_policy); |
807 | sugov_policy_free(sg_policy); |
808 | cpufreq_disable_fast_switch(policy); |
809 | } |
810 | |
811 | static int sugov_start(struct cpufreq_policy *policy) |
812 | { |
813 | struct sugov_policy *sg_policy = policy->governor_data; |
814 | unsigned int cpu; |
815 | |
816 | sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; |
817 | sg_policy->last_freq_update_time = 0; |
818 | sg_policy->next_freq = 0; |
819 | sg_policy->work_in_progress = false; |
820 | sg_policy->need_freq_update = false; |
821 | sg_policy->cached_raw_freq = 0; |
822 | |
823 | for_each_cpu(cpu, policy->cpus) { |
824 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
825 | |
826 | memset(sg_cpu, 0, sizeof(*sg_cpu)); |
827 | sg_cpu->cpu = cpu; |
828 | sg_cpu->sg_policy = sg_policy; |
829 | sg_cpu->min = |
830 | (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) / |
831 | policy->cpuinfo.max_freq; |
832 | } |
833 | |
834 | for_each_cpu(cpu, policy->cpus) { |
835 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
836 | |
837 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, |
838 | policy_is_shared(policy) ? |
839 | sugov_update_shared : |
840 | sugov_update_single); |
841 | } |
842 | return 0; |
843 | } |
844 | |
845 | static void sugov_stop(struct cpufreq_policy *policy) |
846 | { |
847 | struct sugov_policy *sg_policy = policy->governor_data; |
848 | unsigned int cpu; |
849 | |
850 | for_each_cpu(cpu, policy->cpus) |
851 | cpufreq_remove_update_util_hook(cpu); |
852 | |
853 | synchronize_rcu(); |
854 | |
855 | if (!policy->fast_switch_enabled) { |
856 | irq_work_sync(&sg_policy->irq_work); |
857 | kthread_cancel_work_sync(&sg_policy->work); |
858 | } |
859 | } |
860 | |
861 | static void sugov_limits(struct cpufreq_policy *policy) |
862 | { |
863 | struct sugov_policy *sg_policy = policy->governor_data; |
864 | |
865 | if (!policy->fast_switch_enabled) { |
866 | mutex_lock(&sg_policy->work_lock); |
867 | cpufreq_policy_apply_limits(policy); |
868 | mutex_unlock(&sg_policy->work_lock); |
869 | } |
870 | |
871 | sg_policy->need_freq_update = true; |
872 | } |
873 | |
874 | struct cpufreq_governor schedutil_gov = { |
875 | .name = "schedutil" , |
876 | .owner = THIS_MODULE, |
877 | .dynamic_switching = true, |
878 | .init = sugov_init, |
879 | .exit = sugov_exit, |
880 | .start = sugov_start, |
881 | .stop = sugov_stop, |
882 | .limits = sugov_limits, |
883 | }; |
884 | |
885 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL |
886 | struct cpufreq_governor *cpufreq_default_governor(void) |
887 | { |
888 | return &schedutil_gov; |
889 | } |
890 | #endif |
891 | |
892 | static int __init sugov_register(void) |
893 | { |
894 | return cpufreq_register_governor(&schedutil_gov); |
895 | } |
896 | fs_initcall(sugov_register); |
897 | |
898 | #ifdef CONFIG_ENERGY_MODEL |
899 | extern bool sched_energy_update; |
900 | extern struct mutex sched_energy_mutex; |
901 | |
902 | static void rebuild_sd_workfn(struct work_struct *work) |
903 | { |
904 | mutex_lock(&sched_energy_mutex); |
905 | sched_energy_update = true; |
906 | rebuild_sched_domains(); |
907 | sched_energy_update = false; |
908 | mutex_unlock(&sched_energy_mutex); |
909 | } |
910 | static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); |
911 | |
912 | /* |
913 | * EAS shouldn't be attempted without sugov, so rebuild the sched_domains |
914 | * on governor changes to make sure the scheduler knows about it. |
915 | */ |
916 | void sched_cpufreq_governor_change(struct cpufreq_policy *policy, |
917 | struct cpufreq_governor *old_gov) |
918 | { |
919 | if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) { |
920 | /* |
921 | * When called from the cpufreq_register_driver() path, the |
922 | * cpu_hotplug_lock is already held, so use a work item to |
923 | * avoid nested locking in rebuild_sched_domains(). |
924 | */ |
925 | schedule_work(&rebuild_sd_work); |
926 | } |
927 | |
928 | } |
929 | #endif |
930 | |