1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * drivers/cpufreq/cpufreq_governor.c |
4 | * |
5 | * CPUFREQ governors common code |
6 | * |
7 | * Copyright (C) 2001 Russell King |
8 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. |
9 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> |
10 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> |
11 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> |
12 | */ |
13 | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | |
16 | #include <linux/export.h> |
17 | #include <linux/kernel_stat.h> |
18 | #include <linux/slab.h> |
19 | |
20 | #include "cpufreq_governor.h" |
21 | |
22 | #define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) |
23 | |
24 | static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); |
25 | |
26 | static DEFINE_MUTEX(gov_dbs_data_mutex); |
27 | |
28 | /* Common sysfs tunables */ |
29 | /* |
30 | * sampling_rate_store - update sampling rate effective immediately if needed. |
31 | * |
32 | * If new rate is smaller than the old, simply updating |
33 | * dbs.sampling_rate might not be appropriate. For example, if the |
34 | * original sampling_rate was 1 second and the requested new sampling rate is 10 |
35 | * ms because the user needs immediate reaction from ondemand governor, but not |
36 | * sure if higher frequency will be required or not, then, the governor may |
37 | * change the sampling rate too late; up to 1 second later. Thus, if we are |
38 | * reducing the sampling rate, we need to make the new value effective |
39 | * immediately. |
40 | * |
41 | * This must be called with dbs_data->mutex held, otherwise traversing |
42 | * policy_dbs_list isn't safe. |
43 | */ |
44 | ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, |
45 | size_t count) |
46 | { |
47 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
48 | struct policy_dbs_info *policy_dbs; |
49 | unsigned int sampling_interval; |
50 | int ret; |
51 | |
52 | ret = sscanf(buf, "%u" , &sampling_interval); |
53 | if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL) |
54 | return -EINVAL; |
55 | |
56 | dbs_data->sampling_rate = sampling_interval; |
57 | |
58 | /* |
59 | * We are operating under dbs_data->mutex and so the list and its |
60 | * entries can't be freed concurrently. |
61 | */ |
62 | list_for_each_entry(policy_dbs, &attr_set->policy_list, list) { |
63 | mutex_lock(&policy_dbs->update_mutex); |
64 | /* |
65 | * On 32-bit architectures this may race with the |
66 | * sample_delay_ns read in dbs_update_util_handler(), but that |
67 | * really doesn't matter. If the read returns a value that's |
68 | * too big, the sample will be skipped, but the next invocation |
69 | * of dbs_update_util_handler() (when the update has been |
70 | * completed) will take a sample. |
71 | * |
72 | * If this runs in parallel with dbs_work_handler(), we may end |
73 | * up overwriting the sample_delay_ns value that it has just |
74 | * written, but it will be corrected next time a sample is |
75 | * taken, so it shouldn't be significant. |
76 | */ |
77 | gov_update_sample_delay(policy_dbs, delay_us: 0); |
78 | mutex_unlock(lock: &policy_dbs->update_mutex); |
79 | } |
80 | |
81 | return count; |
82 | } |
83 | EXPORT_SYMBOL_GPL(sampling_rate_store); |
84 | |
85 | /** |
86 | * gov_update_cpu_data - Update CPU load data. |
87 | * @dbs_data: Top-level governor data pointer. |
88 | * |
89 | * Update CPU load data for all CPUs in the domain governed by @dbs_data |
90 | * (that may be a single policy or a bunch of them if governor tunables are |
91 | * system-wide). |
92 | * |
93 | * Call under the @dbs_data mutex. |
94 | */ |
95 | void gov_update_cpu_data(struct dbs_data *dbs_data) |
96 | { |
97 | struct policy_dbs_info *policy_dbs; |
98 | |
99 | list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) { |
100 | unsigned int j; |
101 | |
102 | for_each_cpu(j, policy_dbs->policy->cpus) { |
103 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
104 | |
105 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(cpu: j, wall: &j_cdbs->prev_update_time, |
106 | io_busy: dbs_data->io_is_busy); |
107 | if (dbs_data->ignore_nice_load) |
108 | j_cdbs->prev_cpu_nice = kcpustat_field(kcpustat: &kcpustat_cpu(j), usage: CPUTIME_NICE, cpu: j); |
109 | } |
110 | } |
111 | } |
112 | EXPORT_SYMBOL_GPL(gov_update_cpu_data); |
113 | |
114 | unsigned int dbs_update(struct cpufreq_policy *policy) |
115 | { |
116 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
117 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
118 | unsigned int ignore_nice = dbs_data->ignore_nice_load; |
119 | unsigned int max_load = 0, idle_periods = UINT_MAX; |
120 | unsigned int sampling_rate, io_busy, j; |
121 | |
122 | /* |
123 | * Sometimes governors may use an additional multiplier to increase |
124 | * sample delays temporarily. Apply that multiplier to sampling_rate |
125 | * so as to keep the wake-up-from-idle detection logic a bit |
126 | * conservative. |
127 | */ |
128 | sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult; |
129 | /* |
130 | * For the purpose of ondemand, waiting for disk IO is an indication |
131 | * that you're performance critical, and not that the system is actually |
132 | * idle, so do not add the iowait time to the CPU idle time then. |
133 | */ |
134 | io_busy = dbs_data->io_is_busy; |
135 | |
136 | /* Get Absolute Load */ |
137 | for_each_cpu(j, policy->cpus) { |
138 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
139 | u64 update_time, cur_idle_time; |
140 | unsigned int idle_time, time_elapsed; |
141 | unsigned int load; |
142 | |
143 | cur_idle_time = get_cpu_idle_time(cpu: j, wall: &update_time, io_busy); |
144 | |
145 | time_elapsed = update_time - j_cdbs->prev_update_time; |
146 | j_cdbs->prev_update_time = update_time; |
147 | |
148 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; |
149 | j_cdbs->prev_cpu_idle = cur_idle_time; |
150 | |
151 | if (ignore_nice) { |
152 | u64 cur_nice = kcpustat_field(kcpustat: &kcpustat_cpu(j), usage: CPUTIME_NICE, cpu: j); |
153 | |
154 | idle_time += div_u64(dividend: cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC); |
155 | j_cdbs->prev_cpu_nice = cur_nice; |
156 | } |
157 | |
158 | if (unlikely(!time_elapsed)) { |
159 | /* |
160 | * That can only happen when this function is called |
161 | * twice in a row with a very short interval between the |
162 | * calls, so the previous load value can be used then. |
163 | */ |
164 | load = j_cdbs->prev_load; |
165 | } else if (unlikely((int)idle_time > 2 * sampling_rate && |
166 | j_cdbs->prev_load)) { |
167 | /* |
168 | * If the CPU had gone completely idle and a task has |
169 | * just woken up on this CPU now, it would be unfair to |
170 | * calculate 'load' the usual way for this elapsed |
171 | * time-window, because it would show near-zero load, |
172 | * irrespective of how CPU intensive that task actually |
173 | * was. This is undesirable for latency-sensitive bursty |
174 | * workloads. |
175 | * |
176 | * To avoid this, reuse the 'load' from the previous |
177 | * time-window and give this task a chance to start with |
178 | * a reasonably high CPU frequency. However, that |
179 | * shouldn't be over-done, lest we get stuck at a high |
180 | * load (high frequency) for too long, even when the |
181 | * current system load has actually dropped down, so |
182 | * clear prev_load to guarantee that the load will be |
183 | * computed again next time. |
184 | * |
185 | * Detecting this situation is easy: an unusually large |
186 | * 'idle_time' (as compared to the sampling rate) |
187 | * indicates this scenario. |
188 | */ |
189 | load = j_cdbs->prev_load; |
190 | j_cdbs->prev_load = 0; |
191 | } else { |
192 | if (time_elapsed >= idle_time) { |
193 | load = 100 * (time_elapsed - idle_time) / time_elapsed; |
194 | } else { |
195 | /* |
196 | * That can happen if idle_time is returned by |
197 | * get_cpu_idle_time_jiffy(). In that case |
198 | * idle_time is roughly equal to the difference |
199 | * between time_elapsed and "busy time" obtained |
200 | * from CPU statistics. Then, the "busy time" |
201 | * can end up being greater than time_elapsed |
202 | * (for example, if jiffies_64 and the CPU |
203 | * statistics are updated by different CPUs), |
204 | * so idle_time may in fact be negative. That |
205 | * means, though, that the CPU was busy all |
206 | * the time (on the rough average) during the |
207 | * last sampling interval and 100 can be |
208 | * returned as the load. |
209 | */ |
210 | load = (int)idle_time < 0 ? 100 : 0; |
211 | } |
212 | j_cdbs->prev_load = load; |
213 | } |
214 | |
215 | if (unlikely((int)idle_time > 2 * sampling_rate)) { |
216 | unsigned int periods = idle_time / sampling_rate; |
217 | |
218 | if (periods < idle_periods) |
219 | idle_periods = periods; |
220 | } |
221 | |
222 | if (load > max_load) |
223 | max_load = load; |
224 | } |
225 | |
226 | policy_dbs->idle_periods = idle_periods; |
227 | |
228 | return max_load; |
229 | } |
230 | EXPORT_SYMBOL_GPL(dbs_update); |
231 | |
232 | static void dbs_work_handler(struct work_struct *work) |
233 | { |
234 | struct policy_dbs_info *policy_dbs; |
235 | struct cpufreq_policy *policy; |
236 | struct dbs_governor *gov; |
237 | |
238 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
239 | policy = policy_dbs->policy; |
240 | gov = dbs_governor_of(policy); |
241 | |
242 | /* |
243 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
244 | * ondemand governor isn't updating the sampling rate in parallel. |
245 | */ |
246 | mutex_lock(&policy_dbs->update_mutex); |
247 | gov_update_sample_delay(policy_dbs, delay_us: gov->gov_dbs_update(policy)); |
248 | mutex_unlock(lock: &policy_dbs->update_mutex); |
249 | |
250 | /* Allow the utilization update handler to queue up more work. */ |
251 | atomic_set(v: &policy_dbs->work_count, i: 0); |
252 | /* |
253 | * If the update below is reordered with respect to the sample delay |
254 | * modification, the utilization update handler may end up using a stale |
255 | * sample delay value. |
256 | */ |
257 | smp_wmb(); |
258 | policy_dbs->work_in_progress = false; |
259 | } |
260 | |
261 | static void dbs_irq_work(struct irq_work *irq_work) |
262 | { |
263 | struct policy_dbs_info *policy_dbs; |
264 | |
265 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
266 | schedule_work_on(smp_processor_id(), work: &policy_dbs->work); |
267 | } |
268 | |
269 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, |
270 | unsigned int flags) |
271 | { |
272 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); |
273 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
274 | u64 delta_ns, lst; |
275 | |
276 | if (!cpufreq_this_cpu_can_update(policy: policy_dbs->policy)) |
277 | return; |
278 | |
279 | /* |
280 | * The work may not be allowed to be queued up right now. |
281 | * Possible reasons: |
282 | * - Work has already been queued up or is in progress. |
283 | * - It is too early (too little time from the previous sample). |
284 | */ |
285 | if (policy_dbs->work_in_progress) |
286 | return; |
287 | |
288 | /* |
289 | * If the reads below are reordered before the check above, the value |
290 | * of sample_delay_ns used in the computation may be stale. |
291 | */ |
292 | smp_rmb(); |
293 | lst = READ_ONCE(policy_dbs->last_sample_time); |
294 | delta_ns = time - lst; |
295 | if ((s64)delta_ns < policy_dbs->sample_delay_ns) |
296 | return; |
297 | |
298 | /* |
299 | * If the policy is not shared, the irq_work may be queued up right away |
300 | * at this point. Otherwise, we need to ensure that only one of the |
301 | * CPUs sharing the policy will do that. |
302 | */ |
303 | if (policy_dbs->is_shared) { |
304 | if (!atomic_add_unless(v: &policy_dbs->work_count, a: 1, u: 1)) |
305 | return; |
306 | |
307 | /* |
308 | * If another CPU updated last_sample_time in the meantime, we |
309 | * shouldn't be here, so clear the work counter and bail out. |
310 | */ |
311 | if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) { |
312 | atomic_set(v: &policy_dbs->work_count, i: 0); |
313 | return; |
314 | } |
315 | } |
316 | |
317 | policy_dbs->last_sample_time = time; |
318 | policy_dbs->work_in_progress = true; |
319 | irq_work_queue(work: &policy_dbs->irq_work); |
320 | } |
321 | |
322 | static void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
323 | unsigned int delay_us) |
324 | { |
325 | struct cpufreq_policy *policy = policy_dbs->policy; |
326 | int cpu; |
327 | |
328 | gov_update_sample_delay(policy_dbs, delay_us); |
329 | policy_dbs->last_sample_time = 0; |
330 | |
331 | for_each_cpu(cpu, policy->cpus) { |
332 | struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); |
333 | |
334 | cpufreq_add_update_util_hook(cpu, data: &cdbs->update_util, |
335 | func: dbs_update_util_handler); |
336 | } |
337 | } |
338 | |
339 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) |
340 | { |
341 | int i; |
342 | |
343 | for_each_cpu(i, policy->cpus) |
344 | cpufreq_remove_update_util_hook(cpu: i); |
345 | |
346 | synchronize_rcu(); |
347 | } |
348 | |
349 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
350 | struct dbs_governor *gov) |
351 | { |
352 | struct policy_dbs_info *policy_dbs; |
353 | int j; |
354 | |
355 | /* Allocate memory for per-policy governor data. */ |
356 | policy_dbs = gov->alloc(); |
357 | if (!policy_dbs) |
358 | return NULL; |
359 | |
360 | policy_dbs->policy = policy; |
361 | mutex_init(&policy_dbs->update_mutex); |
362 | atomic_set(v: &policy_dbs->work_count, i: 0); |
363 | init_irq_work(work: &policy_dbs->irq_work, func: dbs_irq_work); |
364 | INIT_WORK(&policy_dbs->work, dbs_work_handler); |
365 | |
366 | /* Set policy_dbs for all CPUs, online+offline */ |
367 | for_each_cpu(j, policy->related_cpus) { |
368 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
369 | |
370 | j_cdbs->policy_dbs = policy_dbs; |
371 | } |
372 | return policy_dbs; |
373 | } |
374 | |
375 | static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs, |
376 | struct dbs_governor *gov) |
377 | { |
378 | int j; |
379 | |
380 | mutex_destroy(lock: &policy_dbs->update_mutex); |
381 | |
382 | for_each_cpu(j, policy_dbs->policy->related_cpus) { |
383 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
384 | |
385 | j_cdbs->policy_dbs = NULL; |
386 | j_cdbs->update_util.func = NULL; |
387 | } |
388 | gov->free(policy_dbs); |
389 | } |
390 | |
391 | static void cpufreq_dbs_data_release(struct kobject *kobj) |
392 | { |
393 | struct dbs_data *dbs_data = to_dbs_data(attr_set: to_gov_attr_set(kobj)); |
394 | struct dbs_governor *gov = dbs_data->gov; |
395 | |
396 | gov->exit(dbs_data); |
397 | kfree(objp: dbs_data); |
398 | } |
399 | |
400 | int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) |
401 | { |
402 | struct dbs_governor *gov = dbs_governor_of(policy); |
403 | struct dbs_data *dbs_data; |
404 | struct policy_dbs_info *policy_dbs; |
405 | int ret = 0; |
406 | |
407 | /* State should be equivalent to EXIT */ |
408 | if (policy->governor_data) |
409 | return -EBUSY; |
410 | |
411 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
412 | if (!policy_dbs) |
413 | return -ENOMEM; |
414 | |
415 | /* Protect gov->gdbs_data against concurrent updates. */ |
416 | mutex_lock(&gov_dbs_data_mutex); |
417 | |
418 | dbs_data = gov->gdbs_data; |
419 | if (dbs_data) { |
420 | if (WARN_ON(have_governor_per_policy())) { |
421 | ret = -EINVAL; |
422 | goto free_policy_dbs_info; |
423 | } |
424 | policy_dbs->dbs_data = dbs_data; |
425 | policy->governor_data = policy_dbs; |
426 | |
427 | gov_attr_set_get(attr_set: &dbs_data->attr_set, list_node: &policy_dbs->list); |
428 | goto out; |
429 | } |
430 | |
431 | dbs_data = kzalloc(size: sizeof(*dbs_data), GFP_KERNEL); |
432 | if (!dbs_data) { |
433 | ret = -ENOMEM; |
434 | goto free_policy_dbs_info; |
435 | } |
436 | |
437 | dbs_data->gov = gov; |
438 | gov_attr_set_init(attr_set: &dbs_data->attr_set, list_node: &policy_dbs->list); |
439 | |
440 | ret = gov->init(dbs_data); |
441 | if (ret) |
442 | goto free_dbs_data; |
443 | |
444 | /* |
445 | * The sampling interval should not be less than the transition latency |
446 | * of the CPU and it also cannot be too small for dbs_update() to work |
447 | * correctly. |
448 | */ |
449 | dbs_data->sampling_rate = max_t(unsigned int, |
450 | CPUFREQ_DBS_MIN_SAMPLING_INTERVAL, |
451 | cpufreq_policy_transition_delay_us(policy)); |
452 | |
453 | if (!have_governor_per_policy()) |
454 | gov->gdbs_data = dbs_data; |
455 | |
456 | policy_dbs->dbs_data = dbs_data; |
457 | policy->governor_data = policy_dbs; |
458 | |
459 | gov->kobj_type.sysfs_ops = &governor_sysfs_ops; |
460 | gov->kobj_type.release = cpufreq_dbs_data_release; |
461 | ret = kobject_init_and_add(kobj: &dbs_data->attr_set.kobj, ktype: &gov->kobj_type, |
462 | parent: get_governor_parent_kobj(policy), |
463 | fmt: "%s" , gov->gov.name); |
464 | if (!ret) |
465 | goto out; |
466 | |
467 | /* Failure, so roll back. */ |
468 | pr_err("initialization failed (dbs_data kobject init error %d)\n" , ret); |
469 | |
470 | kobject_put(kobj: &dbs_data->attr_set.kobj); |
471 | |
472 | policy->governor_data = NULL; |
473 | |
474 | if (!have_governor_per_policy()) |
475 | gov->gdbs_data = NULL; |
476 | gov->exit(dbs_data); |
477 | |
478 | free_dbs_data: |
479 | kfree(objp: dbs_data); |
480 | |
481 | free_policy_dbs_info: |
482 | free_policy_dbs_info(policy_dbs, gov); |
483 | |
484 | out: |
485 | mutex_unlock(lock: &gov_dbs_data_mutex); |
486 | return ret; |
487 | } |
488 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init); |
489 | |
490 | void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy) |
491 | { |
492 | struct dbs_governor *gov = dbs_governor_of(policy); |
493 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
494 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
495 | unsigned int count; |
496 | |
497 | /* Protect gov->gdbs_data against concurrent updates. */ |
498 | mutex_lock(&gov_dbs_data_mutex); |
499 | |
500 | count = gov_attr_set_put(attr_set: &dbs_data->attr_set, list_node: &policy_dbs->list); |
501 | |
502 | policy->governor_data = NULL; |
503 | |
504 | if (!count && !have_governor_per_policy()) |
505 | gov->gdbs_data = NULL; |
506 | |
507 | free_policy_dbs_info(policy_dbs, gov); |
508 | |
509 | mutex_unlock(lock: &gov_dbs_data_mutex); |
510 | } |
511 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit); |
512 | |
513 | int cpufreq_dbs_governor_start(struct cpufreq_policy *policy) |
514 | { |
515 | struct dbs_governor *gov = dbs_governor_of(policy); |
516 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
517 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
518 | unsigned int sampling_rate, ignore_nice, j; |
519 | unsigned int io_busy; |
520 | |
521 | if (!policy->cur) |
522 | return -EINVAL; |
523 | |
524 | policy_dbs->is_shared = policy_is_shared(policy); |
525 | policy_dbs->rate_mult = 1; |
526 | |
527 | sampling_rate = dbs_data->sampling_rate; |
528 | ignore_nice = dbs_data->ignore_nice_load; |
529 | io_busy = dbs_data->io_is_busy; |
530 | |
531 | for_each_cpu(j, policy->cpus) { |
532 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
533 | |
534 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(cpu: j, wall: &j_cdbs->prev_update_time, io_busy); |
535 | /* |
536 | * Make the first invocation of dbs_update() compute the load. |
537 | */ |
538 | j_cdbs->prev_load = 0; |
539 | |
540 | if (ignore_nice) |
541 | j_cdbs->prev_cpu_nice = kcpustat_field(kcpustat: &kcpustat_cpu(j), usage: CPUTIME_NICE, cpu: j); |
542 | } |
543 | |
544 | gov->start(policy); |
545 | |
546 | gov_set_update_util(policy_dbs, delay_us: sampling_rate); |
547 | return 0; |
548 | } |
549 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start); |
550 | |
551 | void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy) |
552 | { |
553 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
554 | |
555 | gov_clear_update_util(policy: policy_dbs->policy); |
556 | irq_work_sync(work: &policy_dbs->irq_work); |
557 | cancel_work_sync(work: &policy_dbs->work); |
558 | atomic_set(v: &policy_dbs->work_count, i: 0); |
559 | policy_dbs->work_in_progress = false; |
560 | } |
561 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); |
562 | |
563 | void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) |
564 | { |
565 | struct policy_dbs_info *policy_dbs; |
566 | |
567 | /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */ |
568 | mutex_lock(&gov_dbs_data_mutex); |
569 | policy_dbs = policy->governor_data; |
570 | if (!policy_dbs) |
571 | goto out; |
572 | |
573 | mutex_lock(&policy_dbs->update_mutex); |
574 | cpufreq_policy_apply_limits(policy); |
575 | gov_update_sample_delay(policy_dbs, delay_us: 0); |
576 | mutex_unlock(lock: &policy_dbs->update_mutex); |
577 | |
578 | out: |
579 | mutex_unlock(lock: &gov_dbs_data_mutex); |
580 | } |
581 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); |
582 | |