1 | /* |
2 | * linux/include/linux/cpufreq.h |
3 | * |
4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. |
10 | */ |
11 | #ifndef _LINUX_CPUFREQ_H |
12 | #define _LINUX_CPUFREQ_H |
13 | |
14 | #include <linux/clk.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/completion.h> |
17 | #include <linux/kobject.h> |
18 | #include <linux/notifier.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/sysfs.h> |
21 | |
22 | /********************************************************************* |
23 | * CPUFREQ INTERFACE * |
24 | *********************************************************************/ |
25 | /* |
26 | * Frequency values here are CPU kHz |
27 | * |
28 | * Maximum transition latency is in nanoseconds - if it's unknown, |
29 | * CPUFREQ_ETERNAL shall be used. |
30 | */ |
31 | |
32 | #define CPUFREQ_ETERNAL (-1) |
33 | #define CPUFREQ_NAME_LEN 16 |
34 | /* Print length for names. Extra 1 space for accommodating '\n' in prints */ |
35 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) |
36 | |
37 | struct cpufreq_governor; |
38 | |
39 | enum cpufreq_table_sorting { |
40 | CPUFREQ_TABLE_UNSORTED, |
41 | CPUFREQ_TABLE_SORTED_ASCENDING, |
42 | CPUFREQ_TABLE_SORTED_DESCENDING |
43 | }; |
44 | |
45 | struct cpufreq_freqs { |
46 | unsigned int cpu; /* cpu nr */ |
47 | unsigned int old; |
48 | unsigned int new; |
49 | u8 flags; /* flags of cpufreq_driver, see below. */ |
50 | }; |
51 | |
52 | struct cpufreq_cpuinfo { |
53 | unsigned int max_freq; |
54 | unsigned int min_freq; |
55 | |
56 | /* in 10^(-9) s = nanoseconds */ |
57 | unsigned int transition_latency; |
58 | }; |
59 | |
60 | struct cpufreq_user_policy { |
61 | unsigned int min; /* in kHz */ |
62 | unsigned int max; /* in kHz */ |
63 | }; |
64 | |
65 | struct cpufreq_policy { |
66 | /* CPUs sharing clock, require sw coordination */ |
67 | cpumask_var_t cpus; /* Online CPUs only */ |
68 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ |
69 | cpumask_var_t real_cpus; /* Related and present */ |
70 | |
71 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
72 | should set cpufreq */ |
73 | unsigned int cpu; /* cpu managing this policy, must be online */ |
74 | |
75 | struct clk *clk; |
76 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
77 | |
78 | unsigned int min; /* in kHz */ |
79 | unsigned int max; /* in kHz */ |
80 | unsigned int cur; /* in kHz, only needed if cpufreq |
81 | * governors are used */ |
82 | unsigned int restore_freq; /* = policy->cur before transition */ |
83 | unsigned int suspend_freq; /* freq to set during suspend */ |
84 | |
85 | unsigned int policy; /* see above */ |
86 | unsigned int last_policy; /* policy before unplug */ |
87 | struct cpufreq_governor *governor; /* see below */ |
88 | void *governor_data; |
89 | char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ |
90 | |
91 | struct work_struct update; /* if update_policy() needs to be |
92 | * called, but you're in IRQ context */ |
93 | |
94 | struct cpufreq_user_policy user_policy; |
95 | struct cpufreq_frequency_table *freq_table; |
96 | enum cpufreq_table_sorting freq_table_sorted; |
97 | |
98 | struct list_head policy_list; |
99 | struct kobject kobj; |
100 | struct completion kobj_unregister; |
101 | |
102 | /* |
103 | * The rules for this semaphore: |
104 | * - Any routine that wants to read from the policy structure will |
105 | * do a down_read on this semaphore. |
106 | * - Any routine that will write to the policy structure and/or may take away |
107 | * the policy altogether (eg. CPU hotplug), will hold this lock in write |
108 | * mode before doing so. |
109 | */ |
110 | struct rw_semaphore rwsem; |
111 | |
112 | /* |
113 | * Fast switch flags: |
114 | * - fast_switch_possible should be set by the driver if it can |
115 | * guarantee that frequency can be changed on any CPU sharing the |
116 | * policy and that the change will affect all of the policy CPUs then. |
117 | * - fast_switch_enabled is to be set by governors that support fast |
118 | * frequency switching with the help of cpufreq_enable_fast_switch(). |
119 | */ |
120 | bool fast_switch_possible; |
121 | bool fast_switch_enabled; |
122 | |
123 | /* |
124 | * Preferred average time interval between consecutive invocations of |
125 | * the driver to set the frequency for this policy. To be set by the |
126 | * scaling driver (0, which is the default, means no preference). |
127 | */ |
128 | unsigned int transition_delay_us; |
129 | |
130 | /* |
131 | * Remote DVFS flag (Not added to the driver structure as we don't want |
132 | * to access another structure from scheduler hotpath). |
133 | * |
134 | * Should be set if CPUs can do DVFS on behalf of other CPUs from |
135 | * different cpufreq policies. |
136 | */ |
137 | bool dvfs_possible_from_any_cpu; |
138 | |
139 | /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ |
140 | unsigned int cached_target_freq; |
141 | int cached_resolved_idx; |
142 | |
143 | /* Synchronization for frequency transitions */ |
144 | bool transition_ongoing; /* Tracks transition status */ |
145 | spinlock_t transition_lock; |
146 | wait_queue_head_t transition_wait; |
147 | struct task_struct *transition_task; /* Task which is doing the transition */ |
148 | |
149 | /* cpufreq-stats */ |
150 | struct cpufreq_stats *stats; |
151 | |
152 | /* For cpufreq driver's internal use */ |
153 | void *driver_data; |
154 | |
155 | /* Pointer to the cooling device if used for thermal mitigation */ |
156 | struct thermal_cooling_device *cdev; |
157 | }; |
158 | |
159 | /* Only for ACPI */ |
160 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ |
161 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ |
162 | #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ |
163 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ |
164 | |
165 | #ifdef CONFIG_CPU_FREQ |
166 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); |
167 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
168 | void cpufreq_cpu_put(struct cpufreq_policy *policy); |
169 | #else |
170 | static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) |
171 | { |
172 | return NULL; |
173 | } |
174 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
175 | { |
176 | return NULL; |
177 | } |
178 | static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } |
179 | #endif |
180 | |
181 | static inline bool policy_is_shared(struct cpufreq_policy *policy) |
182 | { |
183 | return cpumask_weight(policy->cpus) > 1; |
184 | } |
185 | |
186 | /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ |
187 | extern struct kobject *cpufreq_global_kobject; |
188 | |
189 | #ifdef CONFIG_CPU_FREQ |
190 | unsigned int cpufreq_get(unsigned int cpu); |
191 | unsigned int cpufreq_quick_get(unsigned int cpu); |
192 | unsigned int cpufreq_quick_get_max(unsigned int cpu); |
193 | void disable_cpufreq(void); |
194 | |
195 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); |
196 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); |
197 | void cpufreq_update_policy(unsigned int cpu); |
198 | bool have_governor_per_policy(void); |
199 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
200 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); |
201 | void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); |
202 | #else |
203 | static inline unsigned int cpufreq_get(unsigned int cpu) |
204 | { |
205 | return 0; |
206 | } |
207 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) |
208 | { |
209 | return 0; |
210 | } |
211 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) |
212 | { |
213 | return 0; |
214 | } |
215 | static inline void disable_cpufreq(void) { } |
216 | #endif |
217 | |
218 | #ifdef CONFIG_CPU_FREQ_STAT |
219 | void cpufreq_stats_create_table(struct cpufreq_policy *policy); |
220 | void cpufreq_stats_free_table(struct cpufreq_policy *policy); |
221 | void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
222 | unsigned int new_freq); |
223 | #else |
224 | static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } |
225 | static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } |
226 | static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, |
227 | unsigned int new_freq) { } |
228 | #endif /* CONFIG_CPU_FREQ_STAT */ |
229 | |
230 | /********************************************************************* |
231 | * CPUFREQ DRIVER INTERFACE * |
232 | *********************************************************************/ |
233 | |
234 | #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ |
235 | #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ |
236 | #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ |
237 | |
238 | struct freq_attr { |
239 | struct attribute attr; |
240 | ssize_t (*show)(struct cpufreq_policy *, char *); |
241 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); |
242 | }; |
243 | |
244 | #define cpufreq_freq_attr_ro(_name) \ |
245 | static struct freq_attr _name = \ |
246 | __ATTR(_name, 0444, show_##_name, NULL) |
247 | |
248 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ |
249 | static struct freq_attr _name = \ |
250 | __ATTR(_name, _perm, show_##_name, NULL) |
251 | |
252 | #define cpufreq_freq_attr_rw(_name) \ |
253 | static struct freq_attr _name = \ |
254 | __ATTR(_name, 0644, show_##_name, store_##_name) |
255 | |
256 | #define cpufreq_freq_attr_wo(_name) \ |
257 | static struct freq_attr _name = \ |
258 | __ATTR(_name, 0200, NULL, store_##_name) |
259 | |
260 | #define define_one_global_ro(_name) \ |
261 | static struct kobj_attribute _name = \ |
262 | __ATTR(_name, 0444, show_##_name, NULL) |
263 | |
264 | #define define_one_global_rw(_name) \ |
265 | static struct kobj_attribute _name = \ |
266 | __ATTR(_name, 0644, show_##_name, store_##_name) |
267 | |
268 | |
269 | struct cpufreq_driver { |
270 | char name[CPUFREQ_NAME_LEN]; |
271 | u8 flags; |
272 | void *driver_data; |
273 | |
274 | /* needed by all drivers */ |
275 | int (*init)(struct cpufreq_policy *policy); |
276 | int (*verify)(struct cpufreq_policy *policy); |
277 | |
278 | /* define one out of two */ |
279 | int (*setpolicy)(struct cpufreq_policy *policy); |
280 | |
281 | /* |
282 | * On failure, should always restore frequency to policy->restore_freq |
283 | * (i.e. old freq). |
284 | */ |
285 | int (*target)(struct cpufreq_policy *policy, |
286 | unsigned int target_freq, |
287 | unsigned int relation); /* Deprecated */ |
288 | int (*target_index)(struct cpufreq_policy *policy, |
289 | unsigned int index); |
290 | unsigned int (*fast_switch)(struct cpufreq_policy *policy, |
291 | unsigned int target_freq); |
292 | |
293 | /* |
294 | * Caches and returns the lowest driver-supported frequency greater than |
295 | * or equal to the target frequency, subject to any driver limitations. |
296 | * Does not set the frequency. Only to be implemented for drivers with |
297 | * target(). |
298 | */ |
299 | unsigned int (*resolve_freq)(struct cpufreq_policy *policy, |
300 | unsigned int target_freq); |
301 | |
302 | /* |
303 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION |
304 | * unset. |
305 | * |
306 | * get_intermediate should return a stable intermediate frequency |
307 | * platform wants to switch to and target_intermediate() should set CPU |
308 | * to to that frequency, before jumping to the frequency corresponding |
309 | * to 'index'. Core will take care of sending notifications and driver |
310 | * doesn't have to handle them in target_intermediate() or |
311 | * target_index(). |
312 | * |
313 | * Drivers can return '0' from get_intermediate() in case they don't |
314 | * wish to switch to intermediate frequency for some target frequency. |
315 | * In that case core will directly call ->target_index(). |
316 | */ |
317 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, |
318 | unsigned int index); |
319 | int (*target_intermediate)(struct cpufreq_policy *policy, |
320 | unsigned int index); |
321 | |
322 | /* should be defined, if possible */ |
323 | unsigned int (*get)(unsigned int cpu); |
324 | |
325 | /* optional */ |
326 | int (*bios_limit)(int cpu, unsigned int *limit); |
327 | |
328 | int (*online)(struct cpufreq_policy *policy); |
329 | int (*offline)(struct cpufreq_policy *policy); |
330 | int (*exit)(struct cpufreq_policy *policy); |
331 | void (*stop_cpu)(struct cpufreq_policy *policy); |
332 | int (*suspend)(struct cpufreq_policy *policy); |
333 | int (*resume)(struct cpufreq_policy *policy); |
334 | |
335 | /* Will be called after the driver is fully initialized */ |
336 | void (*ready)(struct cpufreq_policy *policy); |
337 | |
338 | struct freq_attr **attr; |
339 | |
340 | /* platform specific boost support code */ |
341 | bool boost_enabled; |
342 | int (*set_boost)(int state); |
343 | }; |
344 | |
345 | /* flags */ |
346 | |
347 | /* driver isn't removed even if all ->init() calls failed */ |
348 | #define CPUFREQ_STICKY BIT(0) |
349 | |
350 | /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ |
351 | #define CPUFREQ_CONST_LOOPS BIT(1) |
352 | |
353 | /* don't warn on suspend/resume speed mismatches */ |
354 | #define CPUFREQ_PM_NO_WARN BIT(2) |
355 | |
356 | /* |
357 | * This should be set by platforms having multiple clock-domains, i.e. |
358 | * supporting multiple policies. With this sysfs directories of governor would |
359 | * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same |
360 | * governor with different tunables for different clusters. |
361 | */ |
362 | #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) |
363 | |
364 | /* |
365 | * Driver will do POSTCHANGE notifications from outside of their ->target() |
366 | * routine and so must set cpufreq_driver->flags with this flag, so that core |
367 | * can handle them specially. |
368 | */ |
369 | #define CPUFREQ_ASYNC_NOTIFICATION BIT(4) |
370 | |
371 | /* |
372 | * Set by drivers which want cpufreq core to check if CPU is running at a |
373 | * frequency present in freq-table exposed by the driver. For these drivers if |
374 | * CPU is found running at an out of table freq, we will try to set it to a freq |
375 | * from the table. And if that fails, we will stop further boot process by |
376 | * issuing a BUG_ON(). |
377 | */ |
378 | #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) |
379 | |
380 | /* |
381 | * Set by drivers to disallow use of governors with "dynamic_switching" flag |
382 | * set. |
383 | */ |
384 | #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) |
385 | |
386 | /* |
387 | * Set by drivers that want the core to automatically register the cpufreq |
388 | * driver as a thermal cooling device. |
389 | */ |
390 | #define CPUFREQ_IS_COOLING_DEV BIT(7) |
391 | |
392 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); |
393 | int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); |
394 | |
395 | const char *cpufreq_get_current_driver(void); |
396 | void *cpufreq_get_driver_data(void); |
397 | |
398 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, |
399 | unsigned int min, unsigned int max) |
400 | { |
401 | if (policy->min < min) |
402 | policy->min = min; |
403 | if (policy->max < min) |
404 | policy->max = min; |
405 | if (policy->min > max) |
406 | policy->min = max; |
407 | if (policy->max > max) |
408 | policy->max = max; |
409 | if (policy->min > policy->max) |
410 | policy->min = policy->max; |
411 | return; |
412 | } |
413 | |
414 | static inline void |
415 | cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) |
416 | { |
417 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
418 | policy->cpuinfo.max_freq); |
419 | } |
420 | |
421 | #ifdef CONFIG_CPU_FREQ |
422 | void cpufreq_suspend(void); |
423 | void cpufreq_resume(void); |
424 | int cpufreq_generic_suspend(struct cpufreq_policy *policy); |
425 | #else |
426 | static inline void cpufreq_suspend(void) {} |
427 | static inline void cpufreq_resume(void) {} |
428 | #endif |
429 | |
430 | /********************************************************************* |
431 | * CPUFREQ NOTIFIER INTERFACE * |
432 | *********************************************************************/ |
433 | |
434 | #define CPUFREQ_TRANSITION_NOTIFIER (0) |
435 | #define CPUFREQ_POLICY_NOTIFIER (1) |
436 | |
437 | /* Transition notifiers */ |
438 | #define CPUFREQ_PRECHANGE (0) |
439 | #define CPUFREQ_POSTCHANGE (1) |
440 | |
441 | /* Policy Notifiers */ |
442 | #define CPUFREQ_ADJUST (0) |
443 | #define CPUFREQ_NOTIFY (1) |
444 | |
445 | #ifdef CONFIG_CPU_FREQ |
446 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); |
447 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); |
448 | |
449 | void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, |
450 | struct cpufreq_freqs *freqs); |
451 | void cpufreq_freq_transition_end(struct cpufreq_policy *policy, |
452 | struct cpufreq_freqs *freqs, int transition_failed); |
453 | |
454 | #else /* CONFIG_CPU_FREQ */ |
455 | static inline int cpufreq_register_notifier(struct notifier_block *nb, |
456 | unsigned int list) |
457 | { |
458 | return 0; |
459 | } |
460 | static inline int cpufreq_unregister_notifier(struct notifier_block *nb, |
461 | unsigned int list) |
462 | { |
463 | return 0; |
464 | } |
465 | #endif /* !CONFIG_CPU_FREQ */ |
466 | |
467 | /** |
468 | * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch |
469 | * safe) |
470 | * @old: old value |
471 | * @div: divisor |
472 | * @mult: multiplier |
473 | * |
474 | * |
475 | * new = old * mult / div |
476 | */ |
477 | static inline unsigned long cpufreq_scale(unsigned long old, u_int div, |
478 | u_int mult) |
479 | { |
480 | #if BITS_PER_LONG == 32 |
481 | u64 result = ((u64) old) * ((u64) mult); |
482 | do_div(result, div); |
483 | return (unsigned long) result; |
484 | |
485 | #elif BITS_PER_LONG == 64 |
486 | unsigned long result = old * ((u64) mult); |
487 | result /= div; |
488 | return result; |
489 | #endif |
490 | } |
491 | |
492 | /********************************************************************* |
493 | * CPUFREQ GOVERNORS * |
494 | *********************************************************************/ |
495 | |
496 | /* |
497 | * If (cpufreq_driver->target) exists, the ->governor decides what frequency |
498 | * within the limits is used. If (cpufreq_driver->setpolicy> exists, these |
499 | * two generic policies are available: |
500 | */ |
501 | #define CPUFREQ_POLICY_POWERSAVE (1) |
502 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
503 | |
504 | /* |
505 | * The polling frequency depends on the capability of the processor. Default |
506 | * polling frequency is 1000 times the transition latency of the processor. The |
507 | * ondemand governor will work on any processor with transition latency <= 10ms, |
508 | * using appropriate sampling rate. |
509 | */ |
510 | #define LATENCY_MULTIPLIER (1000) |
511 | |
512 | struct cpufreq_governor { |
513 | char name[CPUFREQ_NAME_LEN]; |
514 | int (*init)(struct cpufreq_policy *policy); |
515 | void (*exit)(struct cpufreq_policy *policy); |
516 | int (*start)(struct cpufreq_policy *policy); |
517 | void (*stop)(struct cpufreq_policy *policy); |
518 | void (*limits)(struct cpufreq_policy *policy); |
519 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
520 | char *buf); |
521 | int (*store_setspeed) (struct cpufreq_policy *policy, |
522 | unsigned int freq); |
523 | /* For governors which change frequency dynamically by themselves */ |
524 | bool dynamic_switching; |
525 | struct list_head governor_list; |
526 | struct module *owner; |
527 | }; |
528 | |
529 | /* Pass a target to the cpufreq driver */ |
530 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
531 | unsigned int target_freq); |
532 | int cpufreq_driver_target(struct cpufreq_policy *policy, |
533 | unsigned int target_freq, |
534 | unsigned int relation); |
535 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
536 | unsigned int target_freq, |
537 | unsigned int relation); |
538 | unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, |
539 | unsigned int target_freq); |
540 | unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); |
541 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
542 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
543 | |
544 | struct cpufreq_governor *cpufreq_default_governor(void); |
545 | struct cpufreq_governor *cpufreq_fallback_governor(void); |
546 | |
547 | static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) |
548 | { |
549 | if (policy->max < policy->cur) |
550 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); |
551 | else if (policy->min > policy->cur) |
552 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); |
553 | } |
554 | |
555 | /* Governor attribute set */ |
556 | struct gov_attr_set { |
557 | struct kobject kobj; |
558 | struct list_head policy_list; |
559 | struct mutex update_lock; |
560 | int usage_count; |
561 | }; |
562 | |
563 | /* sysfs ops for cpufreq governors */ |
564 | extern const struct sysfs_ops governor_sysfs_ops; |
565 | |
566 | void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); |
567 | void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); |
568 | unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); |
569 | |
570 | /* Governor sysfs attribute */ |
571 | struct governor_attr { |
572 | struct attribute attr; |
573 | ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); |
574 | ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, |
575 | size_t count); |
576 | }; |
577 | |
578 | static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) |
579 | { |
580 | /* |
581 | * Allow remote callbacks if: |
582 | * - dvfs_possible_from_any_cpu flag is set |
583 | * - the local and remote CPUs share cpufreq policy |
584 | */ |
585 | return policy->dvfs_possible_from_any_cpu || |
586 | cpumask_test_cpu(smp_processor_id(), policy->cpus); |
587 | } |
588 | |
589 | /********************************************************************* |
590 | * FREQUENCY TABLE HELPERS * |
591 | *********************************************************************/ |
592 | |
593 | /* Special Values of .frequency field */ |
594 | #define CPUFREQ_ENTRY_INVALID ~0u |
595 | #define CPUFREQ_TABLE_END ~1u |
596 | /* Special Values of .flags field */ |
597 | #define CPUFREQ_BOOST_FREQ (1 << 0) |
598 | |
599 | struct cpufreq_frequency_table { |
600 | unsigned int flags; |
601 | unsigned int driver_data; /* driver specific data, not used by core */ |
602 | unsigned int frequency; /* kHz - doesn't need to be in ascending |
603 | * order */ |
604 | }; |
605 | |
606 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) |
607 | int dev_pm_opp_init_cpufreq_table(struct device *dev, |
608 | struct cpufreq_frequency_table **table); |
609 | void dev_pm_opp_free_cpufreq_table(struct device *dev, |
610 | struct cpufreq_frequency_table **table); |
611 | #else |
612 | static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, |
613 | struct cpufreq_frequency_table |
614 | **table) |
615 | { |
616 | return -EINVAL; |
617 | } |
618 | |
619 | static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, |
620 | struct cpufreq_frequency_table |
621 | **table) |
622 | { |
623 | } |
624 | #endif |
625 | |
626 | /* |
627 | * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table |
628 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
629 | * @table: the cpufreq_frequency_table * to iterate over. |
630 | */ |
631 | |
632 | #define cpufreq_for_each_entry(pos, table) \ |
633 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) |
634 | |
635 | /* |
636 | * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table |
637 | * with index |
638 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
639 | * @table: the cpufreq_frequency_table * to iterate over. |
640 | * @idx: the table entry currently being processed |
641 | */ |
642 | |
643 | #define cpufreq_for_each_entry_idx(pos, table, idx) \ |
644 | for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ |
645 | pos++, idx++) |
646 | |
647 | /* |
648 | * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table |
649 | * excluding CPUFREQ_ENTRY_INVALID frequencies. |
650 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
651 | * @table: the cpufreq_frequency_table * to iterate over. |
652 | */ |
653 | |
654 | #define cpufreq_for_each_valid_entry(pos, table) \ |
655 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ |
656 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ |
657 | continue; \ |
658 | else |
659 | |
660 | /* |
661 | * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq |
662 | * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. |
663 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
664 | * @table: the cpufreq_frequency_table * to iterate over. |
665 | * @idx: the table entry currently being processed |
666 | */ |
667 | |
668 | #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ |
669 | cpufreq_for_each_entry_idx(pos, table, idx) \ |
670 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ |
671 | continue; \ |
672 | else |
673 | |
674 | |
675 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
676 | struct cpufreq_frequency_table *table); |
677 | |
678 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, |
679 | struct cpufreq_frequency_table *table); |
680 | int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); |
681 | |
682 | int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, |
683 | unsigned int target_freq, |
684 | unsigned int relation); |
685 | int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, |
686 | unsigned int freq); |
687 | |
688 | ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); |
689 | |
690 | #ifdef CONFIG_CPU_FREQ |
691 | int cpufreq_boost_trigger_state(int state); |
692 | int cpufreq_boost_enabled(void); |
693 | int cpufreq_enable_boost_support(void); |
694 | bool policy_has_boost_freq(struct cpufreq_policy *policy); |
695 | |
696 | /* Find lowest freq at or above target in a table in ascending order */ |
697 | static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, |
698 | unsigned int target_freq) |
699 | { |
700 | struct cpufreq_frequency_table *table = policy->freq_table; |
701 | struct cpufreq_frequency_table *pos; |
702 | unsigned int freq; |
703 | int idx, best = -1; |
704 | |
705 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
706 | freq = pos->frequency; |
707 | |
708 | if (freq >= target_freq) |
709 | return idx; |
710 | |
711 | best = idx; |
712 | } |
713 | |
714 | return best; |
715 | } |
716 | |
717 | /* Find lowest freq at or above target in a table in descending order */ |
718 | static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, |
719 | unsigned int target_freq) |
720 | { |
721 | struct cpufreq_frequency_table *table = policy->freq_table; |
722 | struct cpufreq_frequency_table *pos; |
723 | unsigned int freq; |
724 | int idx, best = -1; |
725 | |
726 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
727 | freq = pos->frequency; |
728 | |
729 | if (freq == target_freq) |
730 | return idx; |
731 | |
732 | if (freq > target_freq) { |
733 | best = idx; |
734 | continue; |
735 | } |
736 | |
737 | /* No freq found above target_freq */ |
738 | if (best == -1) |
739 | return idx; |
740 | |
741 | return best; |
742 | } |
743 | |
744 | return best; |
745 | } |
746 | |
747 | /* Works only on sorted freq-tables */ |
748 | static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, |
749 | unsigned int target_freq) |
750 | { |
751 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
752 | |
753 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
754 | return cpufreq_table_find_index_al(policy, target_freq); |
755 | else |
756 | return cpufreq_table_find_index_dl(policy, target_freq); |
757 | } |
758 | |
759 | /* Find highest freq at or below target in a table in ascending order */ |
760 | static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, |
761 | unsigned int target_freq) |
762 | { |
763 | struct cpufreq_frequency_table *table = policy->freq_table; |
764 | struct cpufreq_frequency_table *pos; |
765 | unsigned int freq; |
766 | int idx, best = -1; |
767 | |
768 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
769 | freq = pos->frequency; |
770 | |
771 | if (freq == target_freq) |
772 | return idx; |
773 | |
774 | if (freq < target_freq) { |
775 | best = idx; |
776 | continue; |
777 | } |
778 | |
779 | /* No freq found below target_freq */ |
780 | if (best == -1) |
781 | return idx; |
782 | |
783 | return best; |
784 | } |
785 | |
786 | return best; |
787 | } |
788 | |
789 | /* Find highest freq at or below target in a table in descending order */ |
790 | static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, |
791 | unsigned int target_freq) |
792 | { |
793 | struct cpufreq_frequency_table *table = policy->freq_table; |
794 | struct cpufreq_frequency_table *pos; |
795 | unsigned int freq; |
796 | int idx, best = -1; |
797 | |
798 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
799 | freq = pos->frequency; |
800 | |
801 | if (freq <= target_freq) |
802 | return idx; |
803 | |
804 | best = idx; |
805 | } |
806 | |
807 | return best; |
808 | } |
809 | |
810 | /* Works only on sorted freq-tables */ |
811 | static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, |
812 | unsigned int target_freq) |
813 | { |
814 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
815 | |
816 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
817 | return cpufreq_table_find_index_ah(policy, target_freq); |
818 | else |
819 | return cpufreq_table_find_index_dh(policy, target_freq); |
820 | } |
821 | |
822 | /* Find closest freq to target in a table in ascending order */ |
823 | static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, |
824 | unsigned int target_freq) |
825 | { |
826 | struct cpufreq_frequency_table *table = policy->freq_table; |
827 | struct cpufreq_frequency_table *pos; |
828 | unsigned int freq; |
829 | int idx, best = -1; |
830 | |
831 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
832 | freq = pos->frequency; |
833 | |
834 | if (freq == target_freq) |
835 | return idx; |
836 | |
837 | if (freq < target_freq) { |
838 | best = idx; |
839 | continue; |
840 | } |
841 | |
842 | /* No freq found below target_freq */ |
843 | if (best == -1) |
844 | return idx; |
845 | |
846 | /* Choose the closest freq */ |
847 | if (target_freq - table[best].frequency > freq - target_freq) |
848 | return idx; |
849 | |
850 | return best; |
851 | } |
852 | |
853 | return best; |
854 | } |
855 | |
856 | /* Find closest freq to target in a table in descending order */ |
857 | static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, |
858 | unsigned int target_freq) |
859 | { |
860 | struct cpufreq_frequency_table *table = policy->freq_table; |
861 | struct cpufreq_frequency_table *pos; |
862 | unsigned int freq; |
863 | int idx, best = -1; |
864 | |
865 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
866 | freq = pos->frequency; |
867 | |
868 | if (freq == target_freq) |
869 | return idx; |
870 | |
871 | if (freq > target_freq) { |
872 | best = idx; |
873 | continue; |
874 | } |
875 | |
876 | /* No freq found above target_freq */ |
877 | if (best == -1) |
878 | return idx; |
879 | |
880 | /* Choose the closest freq */ |
881 | if (table[best].frequency - target_freq > target_freq - freq) |
882 | return idx; |
883 | |
884 | return best; |
885 | } |
886 | |
887 | return best; |
888 | } |
889 | |
890 | /* Works only on sorted freq-tables */ |
891 | static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, |
892 | unsigned int target_freq) |
893 | { |
894 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
895 | |
896 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) |
897 | return cpufreq_table_find_index_ac(policy, target_freq); |
898 | else |
899 | return cpufreq_table_find_index_dc(policy, target_freq); |
900 | } |
901 | |
902 | static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
903 | unsigned int target_freq, |
904 | unsigned int relation) |
905 | { |
906 | if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) |
907 | return cpufreq_table_index_unsorted(policy, target_freq, |
908 | relation); |
909 | |
910 | switch (relation) { |
911 | case CPUFREQ_RELATION_L: |
912 | return cpufreq_table_find_index_l(policy, target_freq); |
913 | case CPUFREQ_RELATION_H: |
914 | return cpufreq_table_find_index_h(policy, target_freq); |
915 | case CPUFREQ_RELATION_C: |
916 | return cpufreq_table_find_index_c(policy, target_freq); |
917 | default: |
918 | pr_err("%s: Invalid relation: %d\n" , __func__, relation); |
919 | return -EINVAL; |
920 | } |
921 | } |
922 | |
923 | static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) |
924 | { |
925 | struct cpufreq_frequency_table *pos; |
926 | int count = 0; |
927 | |
928 | if (unlikely(!policy->freq_table)) |
929 | return 0; |
930 | |
931 | cpufreq_for_each_valid_entry(pos, policy->freq_table) |
932 | count++; |
933 | |
934 | return count; |
935 | } |
936 | #else |
937 | static inline int cpufreq_boost_trigger_state(int state) |
938 | { |
939 | return 0; |
940 | } |
941 | static inline int cpufreq_boost_enabled(void) |
942 | { |
943 | return 0; |
944 | } |
945 | |
946 | static inline int cpufreq_enable_boost_support(void) |
947 | { |
948 | return -EINVAL; |
949 | } |
950 | |
951 | static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) |
952 | { |
953 | return false; |
954 | } |
955 | #endif |
956 | |
957 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
958 | void sched_cpufreq_governor_change(struct cpufreq_policy *policy, |
959 | struct cpufreq_governor *old_gov); |
960 | #else |
961 | static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, |
962 | struct cpufreq_governor *old_gov) { } |
963 | #endif |
964 | |
965 | extern void arch_freq_prepare_all(void); |
966 | extern unsigned int arch_freq_get_on_cpu(int cpu); |
967 | |
968 | extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, |
969 | unsigned long max_freq); |
970 | |
971 | /* the following are really really optional */ |
972 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
973 | extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; |
974 | extern struct freq_attr *cpufreq_generic_attr[]; |
975 | int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); |
976 | |
977 | unsigned int cpufreq_generic_get(unsigned int cpu); |
978 | int cpufreq_generic_init(struct cpufreq_policy *policy, |
979 | struct cpufreq_frequency_table *table, |
980 | unsigned int transition_latency); |
981 | #endif /* _LINUX_CPUFREQ_H */ |
982 | |