1 | /* |
2 | * cpuidle.h - a generic framework for CPU idle power management |
3 | * |
4 | * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
5 | * Shaohua Li <shaohua.li@intel.com> |
6 | * Adam Belay <abelay@novell.com> |
7 | * |
8 | * This code is licenced under the GPL. |
9 | */ |
10 | |
11 | #ifndef _LINUX_CPUIDLE_H |
12 | #define _LINUX_CPUIDLE_H |
13 | |
14 | #include <linux/percpu.h> |
15 | #include <linux/list.h> |
16 | #include <linux/hrtimer.h> |
17 | |
18 | #define CPUIDLE_STATE_MAX 10 |
19 | #define CPUIDLE_NAME_LEN 16 |
20 | #define CPUIDLE_DESC_LEN 32 |
21 | |
22 | struct module; |
23 | |
24 | struct cpuidle_device; |
25 | struct cpuidle_driver; |
26 | |
27 | |
28 | /**************************** |
29 | * CPUIDLE DEVICE INTERFACE * |
30 | ****************************/ |
31 | |
32 | struct cpuidle_state_usage { |
33 | unsigned long long disable; |
34 | unsigned long long usage; |
35 | unsigned long long time; /* in US */ |
36 | unsigned long long above; /* Number of times it's been too deep */ |
37 | unsigned long long below; /* Number of times it's been too shallow */ |
38 | #ifdef CONFIG_SUSPEND |
39 | unsigned long long s2idle_usage; |
40 | unsigned long long s2idle_time; /* in US */ |
41 | #endif |
42 | }; |
43 | |
44 | struct cpuidle_state { |
45 | char name[CPUIDLE_NAME_LEN]; |
46 | char desc[CPUIDLE_DESC_LEN]; |
47 | |
48 | unsigned int flags; |
49 | unsigned int exit_latency; /* in US */ |
50 | int power_usage; /* in mW */ |
51 | unsigned int target_residency; /* in US */ |
52 | bool disabled; /* disabled on all CPUs */ |
53 | |
54 | int (*enter) (struct cpuidle_device *dev, |
55 | struct cpuidle_driver *drv, |
56 | int index); |
57 | |
58 | int (*enter_dead) (struct cpuidle_device *dev, int index); |
59 | |
60 | /* |
61 | * CPUs execute ->enter_s2idle with the local tick or entire timekeeping |
62 | * suspended, so it must not re-enable interrupts at any point (even |
63 | * temporarily) or attempt to change states of clock event devices. |
64 | */ |
65 | void (*enter_s2idle) (struct cpuidle_device *dev, |
66 | struct cpuidle_driver *drv, |
67 | int index); |
68 | }; |
69 | |
70 | /* Idle State Flags */ |
71 | #define CPUIDLE_FLAG_NONE (0x00) |
72 | #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ |
73 | #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ |
74 | #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ |
75 | |
76 | struct cpuidle_device_kobj; |
77 | struct cpuidle_state_kobj; |
78 | struct cpuidle_driver_kobj; |
79 | |
80 | struct cpuidle_device { |
81 | unsigned int registered:1; |
82 | unsigned int enabled:1; |
83 | unsigned int use_deepest_state:1; |
84 | unsigned int poll_time_limit:1; |
85 | unsigned int cpu; |
86 | |
87 | int last_residency; |
88 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
89 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
90 | struct cpuidle_driver_kobj *kobj_driver; |
91 | struct cpuidle_device_kobj *kobj_dev; |
92 | struct list_head device_list; |
93 | |
94 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
95 | cpumask_t coupled_cpus; |
96 | struct cpuidle_coupled *coupled; |
97 | #endif |
98 | }; |
99 | |
100 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
101 | DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); |
102 | |
103 | /**************************** |
104 | * CPUIDLE DRIVER INTERFACE * |
105 | ****************************/ |
106 | |
107 | struct cpuidle_driver { |
108 | const char *name; |
109 | struct module *owner; |
110 | int refcnt; |
111 | |
112 | /* used by the cpuidle framework to setup the broadcast timer */ |
113 | unsigned int bctimer:1; |
114 | /* states array must be ordered in decreasing power consumption */ |
115 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; |
116 | int state_count; |
117 | int safe_state_index; |
118 | |
119 | /* the driver handles the cpus in cpumask */ |
120 | struct cpumask *cpumask; |
121 | }; |
122 | |
123 | #ifdef CONFIG_CPU_IDLE |
124 | extern void disable_cpuidle(void); |
125 | extern bool cpuidle_not_available(struct cpuidle_driver *drv, |
126 | struct cpuidle_device *dev); |
127 | |
128 | extern int cpuidle_select(struct cpuidle_driver *drv, |
129 | struct cpuidle_device *dev, |
130 | bool *stop_tick); |
131 | extern int cpuidle_enter(struct cpuidle_driver *drv, |
132 | struct cpuidle_device *dev, int index); |
133 | extern void cpuidle_reflect(struct cpuidle_device *dev, int index); |
134 | |
135 | extern int cpuidle_register_driver(struct cpuidle_driver *drv); |
136 | extern struct cpuidle_driver *cpuidle_get_driver(void); |
137 | extern struct cpuidle_driver *cpuidle_driver_ref(void); |
138 | extern void cpuidle_driver_unref(void); |
139 | extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); |
140 | extern int cpuidle_register_device(struct cpuidle_device *dev); |
141 | extern void cpuidle_unregister_device(struct cpuidle_device *dev); |
142 | extern int cpuidle_register(struct cpuidle_driver *drv, |
143 | const struct cpumask *const coupled_cpus); |
144 | extern void cpuidle_unregister(struct cpuidle_driver *drv); |
145 | extern void cpuidle_pause_and_lock(void); |
146 | extern void cpuidle_resume_and_unlock(void); |
147 | extern void cpuidle_pause(void); |
148 | extern void cpuidle_resume(void); |
149 | extern int cpuidle_enable_device(struct cpuidle_device *dev); |
150 | extern void cpuidle_disable_device(struct cpuidle_device *dev); |
151 | extern int cpuidle_play_dead(void); |
152 | |
153 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
154 | static inline struct cpuidle_device *cpuidle_get_device(void) |
155 | {return __this_cpu_read(cpuidle_devices); } |
156 | #else |
157 | static inline void disable_cpuidle(void) { } |
158 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, |
159 | struct cpuidle_device *dev) |
160 | {return true; } |
161 | static inline int cpuidle_select(struct cpuidle_driver *drv, |
162 | struct cpuidle_device *dev, bool *stop_tick) |
163 | {return -ENODEV; } |
164 | static inline int cpuidle_enter(struct cpuidle_driver *drv, |
165 | struct cpuidle_device *dev, int index) |
166 | {return -ENODEV; } |
167 | static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } |
168 | static inline int cpuidle_register_driver(struct cpuidle_driver *drv) |
169 | {return -ENODEV; } |
170 | static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } |
171 | static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } |
172 | static inline void cpuidle_driver_unref(void) {} |
173 | static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } |
174 | static inline int cpuidle_register_device(struct cpuidle_device *dev) |
175 | {return -ENODEV; } |
176 | static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } |
177 | static inline int cpuidle_register(struct cpuidle_driver *drv, |
178 | const struct cpumask *const coupled_cpus) |
179 | {return -ENODEV; } |
180 | static inline void cpuidle_unregister(struct cpuidle_driver *drv) { } |
181 | static inline void cpuidle_pause_and_lock(void) { } |
182 | static inline void cpuidle_resume_and_unlock(void) { } |
183 | static inline void cpuidle_pause(void) { } |
184 | static inline void cpuidle_resume(void) { } |
185 | static inline int cpuidle_enable_device(struct cpuidle_device *dev) |
186 | {return -ENODEV; } |
187 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } |
188 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
189 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
190 | struct cpuidle_device *dev) {return NULL; } |
191 | static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } |
192 | #endif |
193 | |
194 | #ifdef CONFIG_CPU_IDLE |
195 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
196 | struct cpuidle_device *dev); |
197 | extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, |
198 | struct cpuidle_device *dev); |
199 | extern void cpuidle_use_deepest_state(bool enable); |
200 | #else |
201 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
202 | struct cpuidle_device *dev) |
203 | {return -ENODEV; } |
204 | static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, |
205 | struct cpuidle_device *dev) |
206 | {return -ENODEV; } |
207 | static inline void cpuidle_use_deepest_state(bool enable) |
208 | { |
209 | } |
210 | #endif |
211 | |
212 | /* kernel/sched/idle.c */ |
213 | extern void sched_idle_set_state(struct cpuidle_state *idle_state); |
214 | extern void default_idle_call(void); |
215 | |
216 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
217 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); |
218 | #else |
219 | static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) |
220 | { |
221 | } |
222 | #endif |
223 | |
224 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) |
225 | void cpuidle_poll_state_init(struct cpuidle_driver *drv); |
226 | #else |
227 | static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} |
228 | #endif |
229 | |
230 | /****************************** |
231 | * CPUIDLE GOVERNOR INTERFACE * |
232 | ******************************/ |
233 | |
234 | struct cpuidle_governor { |
235 | char name[CPUIDLE_NAME_LEN]; |
236 | struct list_head governor_list; |
237 | unsigned int rating; |
238 | |
239 | int (*enable) (struct cpuidle_driver *drv, |
240 | struct cpuidle_device *dev); |
241 | void (*disable) (struct cpuidle_driver *drv, |
242 | struct cpuidle_device *dev); |
243 | |
244 | int (*select) (struct cpuidle_driver *drv, |
245 | struct cpuidle_device *dev, |
246 | bool *stop_tick); |
247 | void (*reflect) (struct cpuidle_device *dev, int index); |
248 | }; |
249 | |
250 | #ifdef CONFIG_CPU_IDLE |
251 | extern int cpuidle_register_governor(struct cpuidle_governor *gov); |
252 | extern int cpuidle_governor_latency_req(unsigned int cpu); |
253 | #else |
254 | static inline int cpuidle_register_governor(struct cpuidle_governor *gov) |
255 | {return 0;} |
256 | #endif |
257 | |
258 | #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ |
259 | ({ \ |
260 | int __ret = 0; \ |
261 | \ |
262 | if (!idx) { \ |
263 | cpu_do_idle(); \ |
264 | return idx; \ |
265 | } \ |
266 | \ |
267 | if (!is_retention) \ |
268 | __ret = cpu_pm_enter(); \ |
269 | if (!__ret) { \ |
270 | __ret = low_level_idle_enter(idx); \ |
271 | if (!is_retention) \ |
272 | cpu_pm_exit(); \ |
273 | } \ |
274 | \ |
275 | __ret ? -1 : idx; \ |
276 | }) |
277 | |
278 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ |
279 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) |
280 | |
281 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ |
282 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) |
283 | |
284 | #endif /* _LINUX_CPUIDLE_H */ |
285 | |