1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * CPUFreq governor based on scheduler-provided CPU utilization data. |
4 | * |
5 | * Copyright (C) 2016, Intel Corporation |
6 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
7 | */ |
8 | |
9 | #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) |
10 | |
11 | struct sugov_tunables { |
12 | struct gov_attr_set attr_set; |
13 | unsigned int rate_limit_us; |
14 | }; |
15 | |
16 | struct sugov_policy { |
17 | struct cpufreq_policy *policy; |
18 | |
19 | struct sugov_tunables *tunables; |
20 | struct list_head tunables_hook; |
21 | |
22 | raw_spinlock_t update_lock; |
23 | u64 last_freq_update_time; |
24 | s64 freq_update_delay_ns; |
25 | unsigned int next_freq; |
26 | unsigned int cached_raw_freq; |
27 | |
28 | /* The next fields are only needed if fast switch cannot be used: */ |
29 | struct irq_work irq_work; |
30 | struct kthread_work work; |
31 | struct mutex work_lock; |
32 | struct kthread_worker worker; |
33 | struct task_struct *thread; |
34 | bool work_in_progress; |
35 | |
36 | bool limits_changed; |
37 | bool need_freq_update; |
38 | }; |
39 | |
40 | struct sugov_cpu { |
41 | struct update_util_data update_util; |
42 | struct sugov_policy *sg_policy; |
43 | unsigned int cpu; |
44 | |
45 | bool iowait_boost_pending; |
46 | unsigned int iowait_boost; |
47 | u64 last_update; |
48 | |
49 | unsigned long util; |
50 | unsigned long bw_dl; |
51 | |
52 | /* The field below is for single-CPU policies only: */ |
53 | #ifdef CONFIG_NO_HZ_COMMON |
54 | unsigned long saved_idle_calls; |
55 | #endif |
56 | }; |
57 | |
58 | static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); |
59 | |
60 | /************************ Governor internals ***********************/ |
61 | |
62 | static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) |
63 | { |
64 | s64 delta_ns; |
65 | |
66 | /* |
67 | * Since cpufreq_update_util() is called with rq->lock held for |
68 | * the @target_cpu, our per-CPU data is fully serialized. |
69 | * |
70 | * However, drivers cannot in general deal with cross-CPU |
71 | * requests, so while get_next_freq() will work, our |
72 | * sugov_update_commit() call may not for the fast switching platforms. |
73 | * |
74 | * Hence stop here for remote requests if they aren't supported |
75 | * by the hardware, as calculating the frequency is pointless if |
76 | * we cannot in fact act on it. |
77 | * |
78 | * This is needed on the slow switching platforms too to prevent CPUs |
79 | * going offline from leaving stale IRQ work items behind. |
80 | */ |
81 | if (!cpufreq_this_cpu_can_update(policy: sg_policy->policy)) |
82 | return false; |
83 | |
84 | if (unlikely(sg_policy->limits_changed)) { |
85 | sg_policy->limits_changed = false; |
86 | sg_policy->need_freq_update = true; |
87 | return true; |
88 | } |
89 | |
90 | delta_ns = time - sg_policy->last_freq_update_time; |
91 | |
92 | return delta_ns >= sg_policy->freq_update_delay_ns; |
93 | } |
94 | |
95 | static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, |
96 | unsigned int next_freq) |
97 | { |
98 | if (sg_policy->need_freq_update) |
99 | sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); |
100 | else if (sg_policy->next_freq == next_freq) |
101 | return false; |
102 | |
103 | sg_policy->next_freq = next_freq; |
104 | sg_policy->last_freq_update_time = time; |
105 | |
106 | return true; |
107 | } |
108 | |
109 | static void sugov_deferred_update(struct sugov_policy *sg_policy) |
110 | { |
111 | if (!sg_policy->work_in_progress) { |
112 | sg_policy->work_in_progress = true; |
113 | irq_work_queue(work: &sg_policy->irq_work); |
114 | } |
115 | } |
116 | |
117 | /** |
118 | * get_next_freq - Compute a new frequency for a given cpufreq policy. |
119 | * @sg_policy: schedutil policy object to compute the new frequency for. |
120 | * @util: Current CPU utilization. |
121 | * @max: CPU capacity. |
122 | * |
123 | * If the utilization is frequency-invariant, choose the new frequency to be |
124 | * proportional to it, that is |
125 | * |
126 | * next_freq = C * max_freq * util / max |
127 | * |
128 | * Otherwise, approximate the would-be frequency-invariant utilization by |
129 | * util_raw * (curr_freq / max_freq) which leads to |
130 | * |
131 | * next_freq = C * curr_freq * util_raw / max |
132 | * |
133 | * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. |
134 | * |
135 | * The lowest driver-supported frequency which is equal or greater than the raw |
136 | * next_freq (as calculated above) is returned, subject to policy min/max and |
137 | * cpufreq driver limitations. |
138 | */ |
139 | static unsigned int get_next_freq(struct sugov_policy *sg_policy, |
140 | unsigned long util, unsigned long max) |
141 | { |
142 | struct cpufreq_policy *policy = sg_policy->policy; |
143 | unsigned int freq = arch_scale_freq_invariant() ? |
144 | policy->cpuinfo.max_freq : policy->cur; |
145 | |
146 | util = map_util_perf(util); |
147 | freq = map_util_freq(util, freq, cap: max); |
148 | |
149 | if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) |
150 | return sg_policy->next_freq; |
151 | |
152 | sg_policy->cached_raw_freq = freq; |
153 | return cpufreq_driver_resolve_freq(policy, target_freq: freq); |
154 | } |
155 | |
156 | static void sugov_get_util(struct sugov_cpu *sg_cpu) |
157 | { |
158 | unsigned long util = cpu_util_cfs_boost(cpu: sg_cpu->cpu); |
159 | struct rq *rq = cpu_rq(sg_cpu->cpu); |
160 | |
161 | sg_cpu->bw_dl = cpu_bw_dl(rq); |
162 | sg_cpu->util = effective_cpu_util(cpu: sg_cpu->cpu, util_cfs: util, |
163 | type: FREQUENCY_UTIL, NULL); |
164 | } |
165 | |
166 | /** |
167 | * sugov_iowait_reset() - Reset the IO boost status of a CPU. |
168 | * @sg_cpu: the sugov data for the CPU to boost |
169 | * @time: the update time from the caller |
170 | * @set_iowait_boost: true if an IO boost has been requested |
171 | * |
172 | * The IO wait boost of a task is disabled after a tick since the last update |
173 | * of a CPU. If a new IO wait boost is requested after more then a tick, then |
174 | * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy |
175 | * efficiency by ignoring sporadic wakeups from IO. |
176 | */ |
177 | static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, |
178 | bool set_iowait_boost) |
179 | { |
180 | s64 delta_ns = time - sg_cpu->last_update; |
181 | |
182 | /* Reset boost only if a tick has elapsed since last request */ |
183 | if (delta_ns <= TICK_NSEC) |
184 | return false; |
185 | |
186 | sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; |
187 | sg_cpu->iowait_boost_pending = set_iowait_boost; |
188 | |
189 | return true; |
190 | } |
191 | |
192 | /** |
193 | * sugov_iowait_boost() - Updates the IO boost status of a CPU. |
194 | * @sg_cpu: the sugov data for the CPU to boost |
195 | * @time: the update time from the caller |
196 | * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait |
197 | * |
198 | * Each time a task wakes up after an IO operation, the CPU utilization can be |
199 | * boosted to a certain utilization which doubles at each "frequent and |
200 | * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization |
201 | * of the maximum OPP. |
202 | * |
203 | * To keep doubling, an IO boost has to be requested at least once per tick, |
204 | * otherwise we restart from the utilization of the minimum OPP. |
205 | */ |
206 | static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, |
207 | unsigned int flags) |
208 | { |
209 | bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; |
210 | |
211 | /* Reset boost if the CPU appears to have been idle enough */ |
212 | if (sg_cpu->iowait_boost && |
213 | sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) |
214 | return; |
215 | |
216 | /* Boost only tasks waking up after IO */ |
217 | if (!set_iowait_boost) |
218 | return; |
219 | |
220 | /* Ensure boost doubles only one time at each request */ |
221 | if (sg_cpu->iowait_boost_pending) |
222 | return; |
223 | sg_cpu->iowait_boost_pending = true; |
224 | |
225 | /* Double the boost at each request */ |
226 | if (sg_cpu->iowait_boost) { |
227 | sg_cpu->iowait_boost = |
228 | min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); |
229 | return; |
230 | } |
231 | |
232 | /* First wakeup after IO: start with minimum boost */ |
233 | sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; |
234 | } |
235 | |
236 | /** |
237 | * sugov_iowait_apply() - Apply the IO boost to a CPU. |
238 | * @sg_cpu: the sugov data for the cpu to boost |
239 | * @time: the update time from the caller |
240 | * @max_cap: the max CPU capacity |
241 | * |
242 | * A CPU running a task which woken up after an IO operation can have its |
243 | * utilization boosted to speed up the completion of those IO operations. |
244 | * The IO boost value is increased each time a task wakes up from IO, in |
245 | * sugov_iowait_apply(), and it's instead decreased by this function, |
246 | * each time an increase has not been requested (!iowait_boost_pending). |
247 | * |
248 | * A CPU which also appears to have been idle for at least one tick has also |
249 | * its IO boost utilization reset. |
250 | * |
251 | * This mechanism is designed to boost high frequently IO waiting tasks, while |
252 | * being more conservative on tasks which does sporadic IO operations. |
253 | */ |
254 | static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, |
255 | unsigned long max_cap) |
256 | { |
257 | unsigned long boost; |
258 | |
259 | /* No boost currently required */ |
260 | if (!sg_cpu->iowait_boost) |
261 | return; |
262 | |
263 | /* Reset boost if the CPU appears to have been idle enough */ |
264 | if (sugov_iowait_reset(sg_cpu, time, set_iowait_boost: false)) |
265 | return; |
266 | |
267 | if (!sg_cpu->iowait_boost_pending) { |
268 | /* |
269 | * No boost pending; reduce the boost value. |
270 | */ |
271 | sg_cpu->iowait_boost >>= 1; |
272 | if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { |
273 | sg_cpu->iowait_boost = 0; |
274 | return; |
275 | } |
276 | } |
277 | |
278 | sg_cpu->iowait_boost_pending = false; |
279 | |
280 | /* |
281 | * sg_cpu->util is already in capacity scale; convert iowait_boost |
282 | * into the same scale so we can compare. |
283 | */ |
284 | boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT; |
285 | boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), util: boost, NULL); |
286 | if (sg_cpu->util < boost) |
287 | sg_cpu->util = boost; |
288 | } |
289 | |
290 | #ifdef CONFIG_NO_HZ_COMMON |
291 | static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) |
292 | { |
293 | unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(cpu: sg_cpu->cpu); |
294 | bool ret = idle_calls == sg_cpu->saved_idle_calls; |
295 | |
296 | sg_cpu->saved_idle_calls = idle_calls; |
297 | return ret; |
298 | } |
299 | #else |
300 | static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } |
301 | #endif /* CONFIG_NO_HZ_COMMON */ |
302 | |
303 | /* |
304 | * Make sugov_should_update_freq() ignore the rate limit when DL |
305 | * has increased the utilization. |
306 | */ |
307 | static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) |
308 | { |
309 | if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) |
310 | sg_cpu->sg_policy->limits_changed = true; |
311 | } |
312 | |
313 | static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, |
314 | u64 time, unsigned long max_cap, |
315 | unsigned int flags) |
316 | { |
317 | sugov_iowait_boost(sg_cpu, time, flags); |
318 | sg_cpu->last_update = time; |
319 | |
320 | ignore_dl_rate_limit(sg_cpu); |
321 | |
322 | if (!sugov_should_update_freq(sg_policy: sg_cpu->sg_policy, time)) |
323 | return false; |
324 | |
325 | sugov_get_util(sg_cpu); |
326 | sugov_iowait_apply(sg_cpu, time, max_cap); |
327 | |
328 | return true; |
329 | } |
330 | |
331 | static void sugov_update_single_freq(struct update_util_data *hook, u64 time, |
332 | unsigned int flags) |
333 | { |
334 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
335 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
336 | unsigned int cached_freq = sg_policy->cached_raw_freq; |
337 | unsigned long max_cap; |
338 | unsigned int next_f; |
339 | |
340 | max_cap = arch_scale_cpu_capacity(cpu: sg_cpu->cpu); |
341 | |
342 | if (!sugov_update_single_common(sg_cpu, time, max_cap, flags)) |
343 | return; |
344 | |
345 | next_f = get_next_freq(sg_policy, util: sg_cpu->util, max: max_cap); |
346 | /* |
347 | * Do not reduce the frequency if the CPU has not been idle |
348 | * recently, as the reduction is likely to be premature then. |
349 | * |
350 | * Except when the rq is capped by uclamp_max. |
351 | */ |
352 | if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && |
353 | sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq && |
354 | !sg_policy->need_freq_update) { |
355 | next_f = sg_policy->next_freq; |
356 | |
357 | /* Restore cached freq as next_freq has changed */ |
358 | sg_policy->cached_raw_freq = cached_freq; |
359 | } |
360 | |
361 | if (!sugov_update_next_freq(sg_policy, time, next_freq: next_f)) |
362 | return; |
363 | |
364 | /* |
365 | * This code runs under rq->lock for the target CPU, so it won't run |
366 | * concurrently on two different CPUs for the same target and it is not |
367 | * necessary to acquire the lock in the fast switch case. |
368 | */ |
369 | if (sg_policy->policy->fast_switch_enabled) { |
370 | cpufreq_driver_fast_switch(policy: sg_policy->policy, target_freq: next_f); |
371 | } else { |
372 | raw_spin_lock(&sg_policy->update_lock); |
373 | sugov_deferred_update(sg_policy); |
374 | raw_spin_unlock(&sg_policy->update_lock); |
375 | } |
376 | } |
377 | |
378 | static void sugov_update_single_perf(struct update_util_data *hook, u64 time, |
379 | unsigned int flags) |
380 | { |
381 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
382 | unsigned long prev_util = sg_cpu->util; |
383 | unsigned long max_cap; |
384 | |
385 | /* |
386 | * Fall back to the "frequency" path if frequency invariance is not |
387 | * supported, because the direct mapping between the utilization and |
388 | * the performance levels depends on the frequency invariance. |
389 | */ |
390 | if (!arch_scale_freq_invariant()) { |
391 | sugov_update_single_freq(hook, time, flags); |
392 | return; |
393 | } |
394 | |
395 | max_cap = arch_scale_cpu_capacity(cpu: sg_cpu->cpu); |
396 | |
397 | if (!sugov_update_single_common(sg_cpu, time, max_cap, flags)) |
398 | return; |
399 | |
400 | /* |
401 | * Do not reduce the target performance level if the CPU has not been |
402 | * idle recently, as the reduction is likely to be premature then. |
403 | * |
404 | * Except when the rq is capped by uclamp_max. |
405 | */ |
406 | if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && |
407 | sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) |
408 | sg_cpu->util = prev_util; |
409 | |
410 | cpufreq_driver_adjust_perf(cpu: sg_cpu->cpu, min_perf: map_util_perf(util: sg_cpu->bw_dl), |
411 | target_perf: map_util_perf(util: sg_cpu->util), capacity: max_cap); |
412 | |
413 | sg_cpu->sg_policy->last_freq_update_time = time; |
414 | } |
415 | |
416 | static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) |
417 | { |
418 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
419 | struct cpufreq_policy *policy = sg_policy->policy; |
420 | unsigned long util = 0, max_cap; |
421 | unsigned int j; |
422 | |
423 | max_cap = arch_scale_cpu_capacity(cpu: sg_cpu->cpu); |
424 | |
425 | for_each_cpu(j, policy->cpus) { |
426 | struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); |
427 | |
428 | sugov_get_util(sg_cpu: j_sg_cpu); |
429 | sugov_iowait_apply(sg_cpu: j_sg_cpu, time, max_cap); |
430 | |
431 | util = max(j_sg_cpu->util, util); |
432 | } |
433 | |
434 | return get_next_freq(sg_policy, util, max: max_cap); |
435 | } |
436 | |
437 | static void |
438 | sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) |
439 | { |
440 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
441 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
442 | unsigned int next_f; |
443 | |
444 | raw_spin_lock(&sg_policy->update_lock); |
445 | |
446 | sugov_iowait_boost(sg_cpu, time, flags); |
447 | sg_cpu->last_update = time; |
448 | |
449 | ignore_dl_rate_limit(sg_cpu); |
450 | |
451 | if (sugov_should_update_freq(sg_policy, time)) { |
452 | next_f = sugov_next_freq_shared(sg_cpu, time); |
453 | |
454 | if (!sugov_update_next_freq(sg_policy, time, next_freq: next_f)) |
455 | goto unlock; |
456 | |
457 | if (sg_policy->policy->fast_switch_enabled) |
458 | cpufreq_driver_fast_switch(policy: sg_policy->policy, target_freq: next_f); |
459 | else |
460 | sugov_deferred_update(sg_policy); |
461 | } |
462 | unlock: |
463 | raw_spin_unlock(&sg_policy->update_lock); |
464 | } |
465 | |
466 | static void sugov_work(struct kthread_work *work) |
467 | { |
468 | struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); |
469 | unsigned int freq; |
470 | unsigned long flags; |
471 | |
472 | /* |
473 | * Hold sg_policy->update_lock shortly to handle the case where: |
474 | * in case sg_policy->next_freq is read here, and then updated by |
475 | * sugov_deferred_update() just before work_in_progress is set to false |
476 | * here, we may miss queueing the new update. |
477 | * |
478 | * Note: If a work was queued after the update_lock is released, |
479 | * sugov_work() will just be called again by kthread_work code; and the |
480 | * request will be proceed before the sugov thread sleeps. |
481 | */ |
482 | raw_spin_lock_irqsave(&sg_policy->update_lock, flags); |
483 | freq = sg_policy->next_freq; |
484 | sg_policy->work_in_progress = false; |
485 | raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); |
486 | |
487 | mutex_lock(&sg_policy->work_lock); |
488 | __cpufreq_driver_target(policy: sg_policy->policy, target_freq: freq, CPUFREQ_RELATION_L); |
489 | mutex_unlock(lock: &sg_policy->work_lock); |
490 | } |
491 | |
492 | static void sugov_irq_work(struct irq_work *irq_work) |
493 | { |
494 | struct sugov_policy *sg_policy; |
495 | |
496 | sg_policy = container_of(irq_work, struct sugov_policy, irq_work); |
497 | |
498 | kthread_queue_work(worker: &sg_policy->worker, work: &sg_policy->work); |
499 | } |
500 | |
501 | /************************** sysfs interface ************************/ |
502 | |
503 | static struct sugov_tunables *global_tunables; |
504 | static DEFINE_MUTEX(global_tunables_lock); |
505 | |
506 | static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) |
507 | { |
508 | return container_of(attr_set, struct sugov_tunables, attr_set); |
509 | } |
510 | |
511 | static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) |
512 | { |
513 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
514 | |
515 | return sprintf(buf, fmt: "%u\n" , tunables->rate_limit_us); |
516 | } |
517 | |
518 | static ssize_t |
519 | rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) |
520 | { |
521 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
522 | struct sugov_policy *sg_policy; |
523 | unsigned int rate_limit_us; |
524 | |
525 | if (kstrtouint(s: buf, base: 10, res: &rate_limit_us)) |
526 | return -EINVAL; |
527 | |
528 | tunables->rate_limit_us = rate_limit_us; |
529 | |
530 | list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) |
531 | sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; |
532 | |
533 | return count; |
534 | } |
535 | |
536 | static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); |
537 | |
538 | static struct attribute *sugov_attrs[] = { |
539 | &rate_limit_us.attr, |
540 | NULL |
541 | }; |
542 | ATTRIBUTE_GROUPS(sugov); |
543 | |
544 | static void sugov_tunables_free(struct kobject *kobj) |
545 | { |
546 | struct gov_attr_set *attr_set = to_gov_attr_set(kobj); |
547 | |
548 | kfree(objp: to_sugov_tunables(attr_set)); |
549 | } |
550 | |
551 | static const struct kobj_type sugov_tunables_ktype = { |
552 | .default_groups = sugov_groups, |
553 | .sysfs_ops = &governor_sysfs_ops, |
554 | .release = &sugov_tunables_free, |
555 | }; |
556 | |
557 | /********************** cpufreq governor interface *********************/ |
558 | |
559 | #ifdef CONFIG_ENERGY_MODEL |
560 | static void rebuild_sd_workfn(struct work_struct *work) |
561 | { |
562 | rebuild_sched_domains_energy(); |
563 | } |
564 | |
565 | static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); |
566 | |
567 | /* |
568 | * EAS shouldn't be attempted without sugov, so rebuild the sched_domains |
569 | * on governor changes to make sure the scheduler knows about it. |
570 | */ |
571 | static void sugov_eas_rebuild_sd(void) |
572 | { |
573 | /* |
574 | * When called from the cpufreq_register_driver() path, the |
575 | * cpu_hotplug_lock is already held, so use a work item to |
576 | * avoid nested locking in rebuild_sched_domains(). |
577 | */ |
578 | schedule_work(work: &rebuild_sd_work); |
579 | } |
580 | #else |
581 | static inline void sugov_eas_rebuild_sd(void) { }; |
582 | #endif |
583 | |
584 | struct cpufreq_governor schedutil_gov; |
585 | |
586 | static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) |
587 | { |
588 | struct sugov_policy *sg_policy; |
589 | |
590 | sg_policy = kzalloc(size: sizeof(*sg_policy), GFP_KERNEL); |
591 | if (!sg_policy) |
592 | return NULL; |
593 | |
594 | sg_policy->policy = policy; |
595 | raw_spin_lock_init(&sg_policy->update_lock); |
596 | return sg_policy; |
597 | } |
598 | |
599 | static void sugov_policy_free(struct sugov_policy *sg_policy) |
600 | { |
601 | kfree(objp: sg_policy); |
602 | } |
603 | |
604 | static int sugov_kthread_create(struct sugov_policy *sg_policy) |
605 | { |
606 | struct task_struct *thread; |
607 | struct sched_attr attr = { |
608 | .size = sizeof(struct sched_attr), |
609 | .sched_policy = SCHED_DEADLINE, |
610 | .sched_flags = SCHED_FLAG_SUGOV, |
611 | .sched_nice = 0, |
612 | .sched_priority = 0, |
613 | /* |
614 | * Fake (unused) bandwidth; workaround to "fix" |
615 | * priority inheritance. |
616 | */ |
617 | .sched_runtime = 1000000, |
618 | .sched_deadline = 10000000, |
619 | .sched_period = 10000000, |
620 | }; |
621 | struct cpufreq_policy *policy = sg_policy->policy; |
622 | int ret; |
623 | |
624 | /* kthread only required for slow path */ |
625 | if (policy->fast_switch_enabled) |
626 | return 0; |
627 | |
628 | kthread_init_work(&sg_policy->work, sugov_work); |
629 | kthread_init_worker(&sg_policy->worker); |
630 | thread = kthread_create(kthread_worker_fn, &sg_policy->worker, |
631 | "sugov:%d" , |
632 | cpumask_first(policy->related_cpus)); |
633 | if (IS_ERR(ptr: thread)) { |
634 | pr_err("failed to create sugov thread: %ld\n" , PTR_ERR(thread)); |
635 | return PTR_ERR(ptr: thread); |
636 | } |
637 | |
638 | ret = sched_setattr_nocheck(thread, &attr); |
639 | if (ret) { |
640 | kthread_stop(k: thread); |
641 | pr_warn("%s: failed to set SCHED_DEADLINE\n" , __func__); |
642 | return ret; |
643 | } |
644 | |
645 | sg_policy->thread = thread; |
646 | kthread_bind_mask(k: thread, mask: policy->related_cpus); |
647 | init_irq_work(work: &sg_policy->irq_work, func: sugov_irq_work); |
648 | mutex_init(&sg_policy->work_lock); |
649 | |
650 | wake_up_process(tsk: thread); |
651 | |
652 | return 0; |
653 | } |
654 | |
655 | static void sugov_kthread_stop(struct sugov_policy *sg_policy) |
656 | { |
657 | /* kthread only required for slow path */ |
658 | if (sg_policy->policy->fast_switch_enabled) |
659 | return; |
660 | |
661 | kthread_flush_worker(worker: &sg_policy->worker); |
662 | kthread_stop(k: sg_policy->thread); |
663 | mutex_destroy(lock: &sg_policy->work_lock); |
664 | } |
665 | |
666 | static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) |
667 | { |
668 | struct sugov_tunables *tunables; |
669 | |
670 | tunables = kzalloc(size: sizeof(*tunables), GFP_KERNEL); |
671 | if (tunables) { |
672 | gov_attr_set_init(attr_set: &tunables->attr_set, list_node: &sg_policy->tunables_hook); |
673 | if (!have_governor_per_policy()) |
674 | global_tunables = tunables; |
675 | } |
676 | return tunables; |
677 | } |
678 | |
679 | static void sugov_clear_global_tunables(void) |
680 | { |
681 | if (!have_governor_per_policy()) |
682 | global_tunables = NULL; |
683 | } |
684 | |
685 | static int sugov_init(struct cpufreq_policy *policy) |
686 | { |
687 | struct sugov_policy *sg_policy; |
688 | struct sugov_tunables *tunables; |
689 | int ret = 0; |
690 | |
691 | /* State should be equivalent to EXIT */ |
692 | if (policy->governor_data) |
693 | return -EBUSY; |
694 | |
695 | cpufreq_enable_fast_switch(policy); |
696 | |
697 | sg_policy = sugov_policy_alloc(policy); |
698 | if (!sg_policy) { |
699 | ret = -ENOMEM; |
700 | goto disable_fast_switch; |
701 | } |
702 | |
703 | ret = sugov_kthread_create(sg_policy); |
704 | if (ret) |
705 | goto free_sg_policy; |
706 | |
707 | mutex_lock(&global_tunables_lock); |
708 | |
709 | if (global_tunables) { |
710 | if (WARN_ON(have_governor_per_policy())) { |
711 | ret = -EINVAL; |
712 | goto stop_kthread; |
713 | } |
714 | policy->governor_data = sg_policy; |
715 | sg_policy->tunables = global_tunables; |
716 | |
717 | gov_attr_set_get(attr_set: &global_tunables->attr_set, list_node: &sg_policy->tunables_hook); |
718 | goto out; |
719 | } |
720 | |
721 | tunables = sugov_tunables_alloc(sg_policy); |
722 | if (!tunables) { |
723 | ret = -ENOMEM; |
724 | goto stop_kthread; |
725 | } |
726 | |
727 | tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
728 | |
729 | policy->governor_data = sg_policy; |
730 | sg_policy->tunables = tunables; |
731 | |
732 | ret = kobject_init_and_add(kobj: &tunables->attr_set.kobj, ktype: &sugov_tunables_ktype, |
733 | parent: get_governor_parent_kobj(policy), fmt: "%s" , |
734 | schedutil_gov.name); |
735 | if (ret) |
736 | goto fail; |
737 | |
738 | sugov_eas_rebuild_sd(); |
739 | |
740 | out: |
741 | mutex_unlock(lock: &global_tunables_lock); |
742 | return 0; |
743 | |
744 | fail: |
745 | kobject_put(kobj: &tunables->attr_set.kobj); |
746 | policy->governor_data = NULL; |
747 | sugov_clear_global_tunables(); |
748 | |
749 | stop_kthread: |
750 | sugov_kthread_stop(sg_policy); |
751 | mutex_unlock(lock: &global_tunables_lock); |
752 | |
753 | free_sg_policy: |
754 | sugov_policy_free(sg_policy); |
755 | |
756 | disable_fast_switch: |
757 | cpufreq_disable_fast_switch(policy); |
758 | |
759 | pr_err("initialization failed (error %d)\n" , ret); |
760 | return ret; |
761 | } |
762 | |
763 | static void sugov_exit(struct cpufreq_policy *policy) |
764 | { |
765 | struct sugov_policy *sg_policy = policy->governor_data; |
766 | struct sugov_tunables *tunables = sg_policy->tunables; |
767 | unsigned int count; |
768 | |
769 | mutex_lock(&global_tunables_lock); |
770 | |
771 | count = gov_attr_set_put(attr_set: &tunables->attr_set, list_node: &sg_policy->tunables_hook); |
772 | policy->governor_data = NULL; |
773 | if (!count) |
774 | sugov_clear_global_tunables(); |
775 | |
776 | mutex_unlock(lock: &global_tunables_lock); |
777 | |
778 | sugov_kthread_stop(sg_policy); |
779 | sugov_policy_free(sg_policy); |
780 | cpufreq_disable_fast_switch(policy); |
781 | |
782 | sugov_eas_rebuild_sd(); |
783 | } |
784 | |
785 | static int sugov_start(struct cpufreq_policy *policy) |
786 | { |
787 | struct sugov_policy *sg_policy = policy->governor_data; |
788 | void (*uu)(struct update_util_data *data, u64 time, unsigned int flags); |
789 | unsigned int cpu; |
790 | |
791 | sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; |
792 | sg_policy->last_freq_update_time = 0; |
793 | sg_policy->next_freq = 0; |
794 | sg_policy->work_in_progress = false; |
795 | sg_policy->limits_changed = false; |
796 | sg_policy->cached_raw_freq = 0; |
797 | |
798 | sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); |
799 | |
800 | if (policy_is_shared(policy)) |
801 | uu = sugov_update_shared; |
802 | else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf()) |
803 | uu = sugov_update_single_perf; |
804 | else |
805 | uu = sugov_update_single_freq; |
806 | |
807 | for_each_cpu(cpu, policy->cpus) { |
808 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
809 | |
810 | memset(sg_cpu, 0, sizeof(*sg_cpu)); |
811 | sg_cpu->cpu = cpu; |
812 | sg_cpu->sg_policy = sg_policy; |
813 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); |
814 | } |
815 | return 0; |
816 | } |
817 | |
818 | static void sugov_stop(struct cpufreq_policy *policy) |
819 | { |
820 | struct sugov_policy *sg_policy = policy->governor_data; |
821 | unsigned int cpu; |
822 | |
823 | for_each_cpu(cpu, policy->cpus) |
824 | cpufreq_remove_update_util_hook(cpu); |
825 | |
826 | synchronize_rcu(); |
827 | |
828 | if (!policy->fast_switch_enabled) { |
829 | irq_work_sync(work: &sg_policy->irq_work); |
830 | kthread_cancel_work_sync(work: &sg_policy->work); |
831 | } |
832 | } |
833 | |
834 | static void sugov_limits(struct cpufreq_policy *policy) |
835 | { |
836 | struct sugov_policy *sg_policy = policy->governor_data; |
837 | |
838 | if (!policy->fast_switch_enabled) { |
839 | mutex_lock(&sg_policy->work_lock); |
840 | cpufreq_policy_apply_limits(policy); |
841 | mutex_unlock(lock: &sg_policy->work_lock); |
842 | } |
843 | |
844 | sg_policy->limits_changed = true; |
845 | } |
846 | |
847 | struct cpufreq_governor schedutil_gov = { |
848 | .name = "schedutil" , |
849 | .owner = THIS_MODULE, |
850 | .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, |
851 | .init = sugov_init, |
852 | .exit = sugov_exit, |
853 | .start = sugov_start, |
854 | .stop = sugov_stop, |
855 | .limits = sugov_limits, |
856 | }; |
857 | |
858 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL |
859 | struct cpufreq_governor *cpufreq_default_governor(void) |
860 | { |
861 | return &schedutil_gov; |
862 | } |
863 | #endif |
864 | |
865 | cpufreq_governor_init(schedutil_gov); |
866 | |