1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_PM_QOS_H |
3 | #define _LINUX_PM_QOS_H |
4 | /* interface for the pm_qos_power infrastructure of the linux kernel. |
5 | * |
6 | * Mark Gross <mgross@linux.intel.com> |
7 | */ |
8 | #include <linux/plist.h> |
9 | #include <linux/notifier.h> |
10 | #include <linux/device.h> |
11 | #include <linux/workqueue.h> |
12 | |
13 | enum { |
14 | PM_QOS_RESERVED = 0, |
15 | PM_QOS_CPU_DMA_LATENCY, |
16 | PM_QOS_NETWORK_LATENCY, |
17 | PM_QOS_NETWORK_THROUGHPUT, |
18 | PM_QOS_MEMORY_BANDWIDTH, |
19 | |
20 | /* insert new class ID */ |
21 | PM_QOS_NUM_CLASSES, |
22 | }; |
23 | |
24 | enum pm_qos_flags_status { |
25 | PM_QOS_FLAGS_UNDEFINED = -1, |
26 | PM_QOS_FLAGS_NONE, |
27 | PM_QOS_FLAGS_SOME, |
28 | PM_QOS_FLAGS_ALL, |
29 | }; |
30 | |
31 | #define PM_QOS_DEFAULT_VALUE (-1) |
32 | #define PM_QOS_LATENCY_ANY S32_MAX |
33 | #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) |
34 | |
35 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
36 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
37 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 |
38 | #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 |
39 | #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY |
40 | #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY |
41 | #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS |
42 | #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 |
43 | #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) |
44 | |
45 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) |
46 | |
47 | struct pm_qos_request { |
48 | struct plist_node node; |
49 | int pm_qos_class; |
50 | struct delayed_work work; /* for pm_qos_update_request_timeout */ |
51 | }; |
52 | |
53 | struct pm_qos_flags_request { |
54 | struct list_head node; |
55 | s32 flags; /* Do not change to 64 bit */ |
56 | }; |
57 | |
58 | enum dev_pm_qos_req_type { |
59 | DEV_PM_QOS_RESUME_LATENCY = 1, |
60 | DEV_PM_QOS_LATENCY_TOLERANCE, |
61 | DEV_PM_QOS_FLAGS, |
62 | }; |
63 | |
64 | struct dev_pm_qos_request { |
65 | enum dev_pm_qos_req_type type; |
66 | union { |
67 | struct plist_node pnode; |
68 | struct pm_qos_flags_request flr; |
69 | } data; |
70 | struct device *dev; |
71 | }; |
72 | |
73 | enum pm_qos_type { |
74 | PM_QOS_UNITIALIZED, |
75 | PM_QOS_MAX, /* return the largest value */ |
76 | PM_QOS_MIN, /* return the smallest value */ |
77 | PM_QOS_SUM /* return the sum */ |
78 | }; |
79 | |
80 | /* |
81 | * Note: The lockless read path depends on the CPU accessing target_value |
82 | * or effective_flags atomically. Atomic access is only guaranteed on all CPU |
83 | * types linux supports for 32 bit quantites |
84 | */ |
85 | struct pm_qos_constraints { |
86 | struct plist_head list; |
87 | s32 target_value; /* Do not change to 64 bit */ |
88 | s32 default_value; |
89 | s32 no_constraint_value; |
90 | enum pm_qos_type type; |
91 | struct blocking_notifier_head *notifiers; |
92 | }; |
93 | |
94 | struct pm_qos_flags { |
95 | struct list_head list; |
96 | s32 effective_flags; /* Do not change to 64 bit */ |
97 | }; |
98 | |
99 | struct dev_pm_qos { |
100 | struct pm_qos_constraints resume_latency; |
101 | struct pm_qos_constraints latency_tolerance; |
102 | struct pm_qos_flags flags; |
103 | struct dev_pm_qos_request *resume_latency_req; |
104 | struct dev_pm_qos_request *latency_tolerance_req; |
105 | struct dev_pm_qos_request *flags_req; |
106 | }; |
107 | |
108 | /* Action requested to pm_qos_update_target */ |
109 | enum pm_qos_req_action { |
110 | PM_QOS_ADD_REQ, /* Add a new request */ |
111 | PM_QOS_UPDATE_REQ, /* Update an existing request */ |
112 | PM_QOS_REMOVE_REQ /* Remove an existing request */ |
113 | }; |
114 | |
115 | static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) |
116 | { |
117 | return req->dev != NULL; |
118 | } |
119 | |
120 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
121 | enum pm_qos_req_action action, int value); |
122 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, |
123 | struct pm_qos_flags_request *req, |
124 | enum pm_qos_req_action action, s32 val); |
125 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
126 | s32 value); |
127 | void pm_qos_update_request(struct pm_qos_request *req, |
128 | s32 new_value); |
129 | void pm_qos_update_request_timeout(struct pm_qos_request *req, |
130 | s32 new_value, unsigned long timeout_us); |
131 | void pm_qos_remove_request(struct pm_qos_request *req); |
132 | |
133 | int pm_qos_request(int pm_qos_class); |
134 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); |
135 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); |
136 | int pm_qos_request_active(struct pm_qos_request *req); |
137 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
138 | |
139 | #ifdef CONFIG_PM |
140 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); |
141 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); |
142 | s32 __dev_pm_qos_read_value(struct device *dev); |
143 | s32 dev_pm_qos_read_value(struct device *dev); |
144 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
145 | enum dev_pm_qos_req_type type, s32 value); |
146 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); |
147 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); |
148 | int dev_pm_qos_add_notifier(struct device *dev, |
149 | struct notifier_block *notifier); |
150 | int dev_pm_qos_remove_notifier(struct device *dev, |
151 | struct notifier_block *notifier); |
152 | void dev_pm_qos_constraints_init(struct device *dev); |
153 | void dev_pm_qos_constraints_destroy(struct device *dev); |
154 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
155 | struct dev_pm_qos_request *req, |
156 | enum dev_pm_qos_req_type type, s32 value); |
157 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); |
158 | void dev_pm_qos_hide_latency_limit(struct device *dev); |
159 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); |
160 | void dev_pm_qos_hide_flags(struct device *dev); |
161 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); |
162 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); |
163 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); |
164 | int dev_pm_qos_expose_latency_tolerance(struct device *dev); |
165 | void dev_pm_qos_hide_latency_tolerance(struct device *dev); |
166 | |
167 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) |
168 | { |
169 | return dev->power.qos->resume_latency_req->data.pnode.prio; |
170 | } |
171 | |
172 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) |
173 | { |
174 | return dev->power.qos->flags_req->data.flr.flags; |
175 | } |
176 | |
177 | static inline s32 dev_pm_qos_raw_read_value(struct device *dev) |
178 | { |
179 | return IS_ERR_OR_NULL(dev->power.qos) ? |
180 | PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : |
181 | pm_qos_read_value(&dev->power.qos->resume_latency); |
182 | } |
183 | #else |
184 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, |
185 | s32 mask) |
186 | { return PM_QOS_FLAGS_UNDEFINED; } |
187 | static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, |
188 | s32 mask) |
189 | { return PM_QOS_FLAGS_UNDEFINED; } |
190 | static inline s32 __dev_pm_qos_read_value(struct device *dev) |
191 | { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } |
192 | static inline s32 dev_pm_qos_read_value(struct device *dev) |
193 | { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } |
194 | static inline int dev_pm_qos_add_request(struct device *dev, |
195 | struct dev_pm_qos_request *req, |
196 | enum dev_pm_qos_req_type type, |
197 | s32 value) |
198 | { return 0; } |
199 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, |
200 | s32 new_value) |
201 | { return 0; } |
202 | static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
203 | { return 0; } |
204 | static inline int dev_pm_qos_add_notifier(struct device *dev, |
205 | struct notifier_block *notifier) |
206 | { return 0; } |
207 | static inline int dev_pm_qos_remove_notifier(struct device *dev, |
208 | struct notifier_block *notifier) |
209 | { return 0; } |
210 | static inline void dev_pm_qos_constraints_init(struct device *dev) |
211 | { |
212 | dev->power.power_state = PMSG_ON; |
213 | } |
214 | static inline void dev_pm_qos_constraints_destroy(struct device *dev) |
215 | { |
216 | dev->power.power_state = PMSG_INVALID; |
217 | } |
218 | static inline int dev_pm_qos_add_ancestor_request(struct device *dev, |
219 | struct dev_pm_qos_request *req, |
220 | enum dev_pm_qos_req_type type, |
221 | s32 value) |
222 | { return 0; } |
223 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
224 | { return 0; } |
225 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
226 | static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) |
227 | { return 0; } |
228 | static inline void dev_pm_qos_hide_flags(struct device *dev) {} |
229 | static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) |
230 | { return 0; } |
231 | static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) |
232 | { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } |
233 | static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) |
234 | { return 0; } |
235 | static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) |
236 | { return 0; } |
237 | static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} |
238 | |
239 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) |
240 | { |
241 | return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
242 | } |
243 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } |
244 | static inline s32 dev_pm_qos_raw_read_value(struct device *dev) |
245 | { |
246 | return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
247 | } |
248 | #endif |
249 | |
250 | #endif |
251 | |