1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2019 Intel Corporation |
5 | */ |
6 | |
7 | #ifndef INTEL_WAKEREF_H |
8 | #define INTEL_WAKEREF_H |
9 | |
10 | #include <drm/drm_print.h> |
11 | |
12 | #include <linux/atomic.h> |
13 | #include <linux/bitfield.h> |
14 | #include <linux/bits.h> |
15 | #include <linux/lockdep.h> |
16 | #include <linux/mutex.h> |
17 | #include <linux/refcount.h> |
18 | #include <linux/ref_tracker.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/stackdepot.h> |
21 | #include <linux/timer.h> |
22 | #include <linux/workqueue.h> |
23 | |
24 | typedef unsigned long intel_wakeref_t; |
25 | |
26 | #define INTEL_REFTRACK_DEAD_COUNT 16 |
27 | #define INTEL_REFTRACK_PRINT_LIMIT 16 |
28 | |
29 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
30 | #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) |
31 | #else |
32 | #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) |
33 | #endif |
34 | |
35 | struct intel_runtime_pm; |
36 | struct intel_wakeref; |
37 | |
38 | struct intel_wakeref_ops { |
39 | int (*get)(struct intel_wakeref *wf); |
40 | int (*put)(struct intel_wakeref *wf); |
41 | }; |
42 | |
43 | struct intel_wakeref { |
44 | atomic_t count; |
45 | struct mutex mutex; |
46 | |
47 | intel_wakeref_t wakeref; |
48 | |
49 | struct drm_i915_private *i915; |
50 | const struct intel_wakeref_ops *ops; |
51 | |
52 | struct delayed_work work; |
53 | |
54 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) |
55 | struct ref_tracker_dir debug; |
56 | #endif |
57 | }; |
58 | |
59 | struct intel_wakeref_lockclass { |
60 | struct lock_class_key mutex; |
61 | struct lock_class_key work; |
62 | }; |
63 | |
64 | void __intel_wakeref_init(struct intel_wakeref *wf, |
65 | struct drm_i915_private *i915, |
66 | const struct intel_wakeref_ops *ops, |
67 | struct intel_wakeref_lockclass *key, |
68 | const char *name); |
69 | #define intel_wakeref_init(wf, i915, ops, name) do { \ |
70 | static struct intel_wakeref_lockclass __key; \ |
71 | \ |
72 | __intel_wakeref_init((wf), (i915), (ops), &__key, name); \ |
73 | } while (0) |
74 | |
75 | int __intel_wakeref_get_first(struct intel_wakeref *wf); |
76 | void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); |
77 | |
78 | /** |
79 | * intel_wakeref_get: Acquire the wakeref |
80 | * @wf: the wakeref |
81 | * |
82 | * Acquire a hold on the wakeref. The first user to do so, will acquire |
83 | * the runtime pm wakeref and then call the intel_wakeref_ops->get() |
84 | * underneath the wakeref mutex. |
85 | * |
86 | * Note that intel_wakeref_ops->get() is allowed to fail, in which case |
87 | * the runtime-pm wakeref will be released and the acquisition unwound, |
88 | * and an error reported. |
89 | * |
90 | * Returns: 0 if the wakeref was acquired successfully, or a negative error |
91 | * code otherwise. |
92 | */ |
93 | static inline int |
94 | intel_wakeref_get(struct intel_wakeref *wf) |
95 | { |
96 | might_sleep(); |
97 | if (unlikely(!atomic_inc_not_zero(&wf->count))) |
98 | return __intel_wakeref_get_first(wf); |
99 | |
100 | return 0; |
101 | } |
102 | |
103 | /** |
104 | * __intel_wakeref_get: Acquire the wakeref, again |
105 | * @wf: the wakeref |
106 | * |
107 | * Increment the wakeref counter, only valid if it is already held by |
108 | * the caller. |
109 | * |
110 | * See intel_wakeref_get(). |
111 | */ |
112 | static inline void |
113 | __intel_wakeref_get(struct intel_wakeref *wf) |
114 | { |
115 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
116 | atomic_inc(v: &wf->count); |
117 | } |
118 | |
119 | /** |
120 | * intel_wakeref_get_if_active: Acquire the wakeref |
121 | * @wf: the wakeref |
122 | * |
123 | * Acquire a hold on the wakeref, but only if the wakeref is already |
124 | * active. |
125 | * |
126 | * Returns: true if the wakeref was acquired, false otherwise. |
127 | */ |
128 | static inline bool |
129 | intel_wakeref_get_if_active(struct intel_wakeref *wf) |
130 | { |
131 | return atomic_inc_not_zero(v: &wf->count); |
132 | } |
133 | |
134 | enum { |
135 | INTEL_WAKEREF_PUT_ASYNC_BIT = 0, |
136 | __INTEL_WAKEREF_PUT_LAST_BIT__ |
137 | }; |
138 | |
139 | static inline void |
140 | intel_wakeref_might_get(struct intel_wakeref *wf) |
141 | { |
142 | might_lock(&wf->mutex); |
143 | } |
144 | |
145 | /** |
146 | * __intel_wakeref_put: Release the wakeref |
147 | * @wf: the wakeref |
148 | * @flags: control flags |
149 | * |
150 | * Release our hold on the wakeref. When there are no more users, |
151 | * the runtime pm wakeref will be released after the intel_wakeref_ops->put() |
152 | * callback is called underneath the wakeref mutex. |
153 | * |
154 | * Note that intel_wakeref_ops->put() is allowed to fail, in which case the |
155 | * runtime-pm wakeref is retained. |
156 | * |
157 | */ |
158 | static inline void |
159 | __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) |
160 | #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT) |
161 | #define INTEL_WAKEREF_PUT_DELAY \ |
162 | GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__) |
163 | { |
164 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
165 | if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) |
166 | __intel_wakeref_put_last(wf, flags); |
167 | } |
168 | |
169 | static inline void |
170 | intel_wakeref_put(struct intel_wakeref *wf) |
171 | { |
172 | might_sleep(); |
173 | __intel_wakeref_put(wf, flags: 0); |
174 | } |
175 | |
176 | static inline void |
177 | intel_wakeref_put_async(struct intel_wakeref *wf) |
178 | { |
179 | __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); |
180 | } |
181 | |
182 | static inline void |
183 | intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) |
184 | { |
185 | __intel_wakeref_put(wf, |
186 | INTEL_WAKEREF_PUT_ASYNC | |
187 | FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); |
188 | } |
189 | |
190 | static inline void |
191 | intel_wakeref_might_put(struct intel_wakeref *wf) |
192 | { |
193 | might_lock(&wf->mutex); |
194 | } |
195 | |
196 | /** |
197 | * intel_wakeref_lock: Lock the wakeref (mutex) |
198 | * @wf: the wakeref |
199 | * |
200 | * Locks the wakeref to prevent it being acquired or released. New users |
201 | * can still adjust the counter, but the wakeref itself (and callback) |
202 | * cannot be acquired or released. |
203 | */ |
204 | static inline void |
205 | intel_wakeref_lock(struct intel_wakeref *wf) |
206 | __acquires(wf->mutex) |
207 | { |
208 | mutex_lock(&wf->mutex); |
209 | } |
210 | |
211 | /** |
212 | * intel_wakeref_unlock: Unlock the wakeref |
213 | * @wf: the wakeref |
214 | * |
215 | * Releases a previously acquired intel_wakeref_lock(). |
216 | */ |
217 | static inline void |
218 | intel_wakeref_unlock(struct intel_wakeref *wf) |
219 | __releases(wf->mutex) |
220 | { |
221 | mutex_unlock(lock: &wf->mutex); |
222 | } |
223 | |
224 | /** |
225 | * intel_wakeref_unlock_wait: Wait until the active callback is complete |
226 | * @wf: the wakeref |
227 | * |
228 | * Waits for the active callback (under the @wf->mutex or another CPU) is |
229 | * complete. |
230 | */ |
231 | static inline void |
232 | intel_wakeref_unlock_wait(struct intel_wakeref *wf) |
233 | { |
234 | mutex_lock(&wf->mutex); |
235 | mutex_unlock(lock: &wf->mutex); |
236 | flush_delayed_work(dwork: &wf->work); |
237 | } |
238 | |
239 | /** |
240 | * intel_wakeref_is_active: Query whether the wakeref is currently held |
241 | * @wf: the wakeref |
242 | * |
243 | * Returns: true if the wakeref is currently held. |
244 | */ |
245 | static inline bool |
246 | intel_wakeref_is_active(const struct intel_wakeref *wf) |
247 | { |
248 | return READ_ONCE(wf->wakeref); |
249 | } |
250 | |
251 | /** |
252 | * __intel_wakeref_defer_park: Defer the current park callback |
253 | * @wf: the wakeref |
254 | */ |
255 | static inline void |
256 | __intel_wakeref_defer_park(struct intel_wakeref *wf) |
257 | { |
258 | lockdep_assert_held(&wf->mutex); |
259 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); |
260 | atomic_set_release(v: &wf->count, i: 1); |
261 | } |
262 | |
263 | /** |
264 | * intel_wakeref_wait_for_idle: Wait until the wakeref is idle |
265 | * @wf: the wakeref |
266 | * |
267 | * Wait for the earlier asynchronous release of the wakeref. Note |
268 | * this will wait for any third party as well, so make sure you only wait |
269 | * when you have control over the wakeref and trust no one else is acquiring |
270 | * it. |
271 | * |
272 | * Return: 0 on success, error code if killed. |
273 | */ |
274 | int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); |
275 | |
276 | #define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1)) |
277 | |
278 | static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir) |
279 | { |
280 | struct ref_tracker *user = NULL; |
281 | |
282 | ref_tracker_alloc(dir, trackerp: &user, GFP_NOWAIT); |
283 | |
284 | return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF; |
285 | } |
286 | |
287 | static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir, |
288 | intel_wakeref_t handle) |
289 | { |
290 | struct ref_tracker *user; |
291 | |
292 | user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle; |
293 | |
294 | ref_tracker_free(dir, trackerp: &user); |
295 | } |
296 | |
297 | void intel_ref_tracker_show(struct ref_tracker_dir *dir, |
298 | struct drm_printer *p); |
299 | |
300 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) |
301 | |
302 | static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) |
303 | { |
304 | return intel_ref_tracker_alloc(dir: &wf->debug); |
305 | } |
306 | |
307 | static inline void intel_wakeref_untrack(struct intel_wakeref *wf, |
308 | intel_wakeref_t handle) |
309 | { |
310 | intel_ref_tracker_free(dir: &wf->debug, handle); |
311 | } |
312 | |
313 | #else |
314 | |
315 | static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) |
316 | { |
317 | return -1; |
318 | } |
319 | |
320 | static inline void intel_wakeref_untrack(struct intel_wakeref *wf, |
321 | intel_wakeref_t handle) |
322 | { |
323 | } |
324 | |
325 | #endif |
326 | |
327 | struct intel_wakeref_auto { |
328 | struct drm_i915_private *i915; |
329 | struct timer_list timer; |
330 | intel_wakeref_t wakeref; |
331 | spinlock_t lock; |
332 | refcount_t count; |
333 | }; |
334 | |
335 | /** |
336 | * intel_wakeref_auto: Delay the runtime-pm autosuspend |
337 | * @wf: the wakeref |
338 | * @timeout: relative timeout in jiffies |
339 | * |
340 | * The runtime-pm core uses a suspend delay after the last wakeref |
341 | * is released before triggering runtime suspend of the device. That |
342 | * delay is configurable via sysfs with little regard to the device |
343 | * characteristics. Instead, we want to tune the autosuspend based on our |
344 | * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied |
345 | * timeout. |
346 | * |
347 | * Pass @timeout = 0 to cancel a previous autosuspend by executing the |
348 | * suspend immediately. |
349 | */ |
350 | void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); |
351 | |
352 | void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, |
353 | struct drm_i915_private *i915); |
354 | void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); |
355 | |
356 | #endif /* INTEL_WAKEREF_H */ |
357 | |