1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * drivers/base/power/main.c - Where the driver meets power management. |
4 | * |
5 | * Copyright (c) 2003 Patrick Mochel |
6 | * Copyright (c) 2003 Open Source Development Lab |
7 | * |
8 | * The driver model core calls device_pm_add() when a device is registered. |
9 | * This will initialize the embedded device_pm_info object in the device |
10 | * and add it to the list of power-controlled devices. sysfs entries for |
11 | * controlling device power management will also be added. |
12 | * |
13 | * A separate list is used for keeping track of power info, because the power |
14 | * domain dependencies may differ from the ancestral dependencies that the |
15 | * subsystem list maintains. |
16 | */ |
17 | |
18 | #define pr_fmt(fmt) "PM: " fmt |
19 | #define dev_fmt pr_fmt |
20 | |
21 | #include <linux/device.h> |
22 | #include <linux/export.h> |
23 | #include <linux/mutex.h> |
24 | #include <linux/pm.h> |
25 | #include <linux/pm_runtime.h> |
26 | #include <linux/pm-trace.h> |
27 | #include <linux/pm_wakeirq.h> |
28 | #include <linux/interrupt.h> |
29 | #include <linux/sched.h> |
30 | #include <linux/sched/debug.h> |
31 | #include <linux/async.h> |
32 | #include <linux/suspend.h> |
33 | #include <trace/events/power.h> |
34 | #include <linux/cpufreq.h> |
35 | #include <linux/devfreq.h> |
36 | #include <linux/timer.h> |
37 | |
38 | #include "../base.h" |
39 | #include "power.h" |
40 | |
41 | typedef int (*pm_callback_t)(struct device *); |
42 | |
43 | #define list_for_each_entry_rcu_locked(pos, head, member) \ |
44 | list_for_each_entry_rcu(pos, head, member, \ |
45 | device_links_read_lock_held()) |
46 | |
47 | /* |
48 | * The entries in the dpm_list list are in a depth first order, simply |
49 | * because children are guaranteed to be discovered after parents, and |
50 | * are inserted at the back of the list on discovery. |
51 | * |
52 | * Since device_pm_add() may be called with a device lock held, |
53 | * we must never try to acquire a device lock while holding |
54 | * dpm_list_mutex. |
55 | */ |
56 | |
57 | LIST_HEAD(dpm_list); |
58 | static LIST_HEAD(dpm_prepared_list); |
59 | static LIST_HEAD(dpm_suspended_list); |
60 | static LIST_HEAD(dpm_late_early_list); |
61 | static LIST_HEAD(dpm_noirq_list); |
62 | |
63 | static DEFINE_MUTEX(dpm_list_mtx); |
64 | static pm_message_t pm_transition; |
65 | |
66 | static int async_error; |
67 | |
68 | static const char *pm_verb(int event) |
69 | { |
70 | switch (event) { |
71 | case PM_EVENT_SUSPEND: |
72 | return "suspend" ; |
73 | case PM_EVENT_RESUME: |
74 | return "resume" ; |
75 | case PM_EVENT_FREEZE: |
76 | return "freeze" ; |
77 | case PM_EVENT_QUIESCE: |
78 | return "quiesce" ; |
79 | case PM_EVENT_HIBERNATE: |
80 | return "hibernate" ; |
81 | case PM_EVENT_THAW: |
82 | return "thaw" ; |
83 | case PM_EVENT_RESTORE: |
84 | return "restore" ; |
85 | case PM_EVENT_RECOVER: |
86 | return "recover" ; |
87 | default: |
88 | return "(unknown PM event)" ; |
89 | } |
90 | } |
91 | |
92 | /** |
93 | * device_pm_sleep_init - Initialize system suspend-related device fields. |
94 | * @dev: Device object being initialized. |
95 | */ |
96 | void device_pm_sleep_init(struct device *dev) |
97 | { |
98 | dev->power.is_prepared = false; |
99 | dev->power.is_suspended = false; |
100 | dev->power.is_noirq_suspended = false; |
101 | dev->power.is_late_suspended = false; |
102 | init_completion(x: &dev->power.completion); |
103 | complete_all(&dev->power.completion); |
104 | dev->power.wakeup = NULL; |
105 | INIT_LIST_HEAD(list: &dev->power.entry); |
106 | } |
107 | |
108 | /** |
109 | * device_pm_lock - Lock the list of active devices used by the PM core. |
110 | */ |
111 | void device_pm_lock(void) |
112 | { |
113 | mutex_lock(&dpm_list_mtx); |
114 | } |
115 | |
116 | /** |
117 | * device_pm_unlock - Unlock the list of active devices used by the PM core. |
118 | */ |
119 | void device_pm_unlock(void) |
120 | { |
121 | mutex_unlock(lock: &dpm_list_mtx); |
122 | } |
123 | |
124 | /** |
125 | * device_pm_add - Add a device to the PM core's list of active devices. |
126 | * @dev: Device to add to the list. |
127 | */ |
128 | void device_pm_add(struct device *dev) |
129 | { |
130 | /* Skip PM setup/initialization. */ |
131 | if (device_pm_not_required(dev)) |
132 | return; |
133 | |
134 | pr_debug("Adding info for %s:%s\n" , |
135 | dev->bus ? dev->bus->name : "No Bus" , dev_name(dev)); |
136 | device_pm_check_callbacks(dev); |
137 | mutex_lock(&dpm_list_mtx); |
138 | if (dev->parent && dev->parent->power.is_prepared) |
139 | dev_warn(dev, "parent %s should not be sleeping\n" , |
140 | dev_name(dev->parent)); |
141 | list_add_tail(new: &dev->power.entry, head: &dpm_list); |
142 | dev->power.in_dpm_list = true; |
143 | mutex_unlock(lock: &dpm_list_mtx); |
144 | } |
145 | |
146 | /** |
147 | * device_pm_remove - Remove a device from the PM core's list of active devices. |
148 | * @dev: Device to be removed from the list. |
149 | */ |
150 | void device_pm_remove(struct device *dev) |
151 | { |
152 | if (device_pm_not_required(dev)) |
153 | return; |
154 | |
155 | pr_debug("Removing info for %s:%s\n" , |
156 | dev->bus ? dev->bus->name : "No Bus" , dev_name(dev)); |
157 | complete_all(&dev->power.completion); |
158 | mutex_lock(&dpm_list_mtx); |
159 | list_del_init(entry: &dev->power.entry); |
160 | dev->power.in_dpm_list = false; |
161 | mutex_unlock(lock: &dpm_list_mtx); |
162 | device_wakeup_disable(dev); |
163 | pm_runtime_remove(dev); |
164 | device_pm_check_callbacks(dev); |
165 | } |
166 | |
167 | /** |
168 | * device_pm_move_before - Move device in the PM core's list of active devices. |
169 | * @deva: Device to move in dpm_list. |
170 | * @devb: Device @deva should come before. |
171 | */ |
172 | void device_pm_move_before(struct device *deva, struct device *devb) |
173 | { |
174 | pr_debug("Moving %s:%s before %s:%s\n" , |
175 | deva->bus ? deva->bus->name : "No Bus" , dev_name(deva), |
176 | devb->bus ? devb->bus->name : "No Bus" , dev_name(devb)); |
177 | /* Delete deva from dpm_list and reinsert before devb. */ |
178 | list_move_tail(list: &deva->power.entry, head: &devb->power.entry); |
179 | } |
180 | |
181 | /** |
182 | * device_pm_move_after - Move device in the PM core's list of active devices. |
183 | * @deva: Device to move in dpm_list. |
184 | * @devb: Device @deva should come after. |
185 | */ |
186 | void device_pm_move_after(struct device *deva, struct device *devb) |
187 | { |
188 | pr_debug("Moving %s:%s after %s:%s\n" , |
189 | deva->bus ? deva->bus->name : "No Bus" , dev_name(deva), |
190 | devb->bus ? devb->bus->name : "No Bus" , dev_name(devb)); |
191 | /* Delete deva from dpm_list and reinsert after devb. */ |
192 | list_move(list: &deva->power.entry, head: &devb->power.entry); |
193 | } |
194 | |
195 | /** |
196 | * device_pm_move_last - Move device to end of the PM core's list of devices. |
197 | * @dev: Device to move in dpm_list. |
198 | */ |
199 | void device_pm_move_last(struct device *dev) |
200 | { |
201 | pr_debug("Moving %s:%s to end of list\n" , |
202 | dev->bus ? dev->bus->name : "No Bus" , dev_name(dev)); |
203 | list_move_tail(list: &dev->power.entry, head: &dpm_list); |
204 | } |
205 | |
206 | static ktime_t initcall_debug_start(struct device *dev, void *cb) |
207 | { |
208 | if (!pm_print_times_enabled) |
209 | return 0; |
210 | |
211 | dev_info(dev, "calling %pS @ %i, parent: %s\n" , cb, |
212 | task_pid_nr(current), |
213 | dev->parent ? dev_name(dev->parent) : "none" ); |
214 | return ktime_get(); |
215 | } |
216 | |
217 | static void initcall_debug_report(struct device *dev, ktime_t calltime, |
218 | void *cb, int error) |
219 | { |
220 | ktime_t rettime; |
221 | |
222 | if (!pm_print_times_enabled) |
223 | return; |
224 | |
225 | rettime = ktime_get(); |
226 | dev_info(dev, "%pS returned %d after %Ld usecs\n" , cb, error, |
227 | (unsigned long long)ktime_us_delta(rettime, calltime)); |
228 | } |
229 | |
230 | /** |
231 | * dpm_wait - Wait for a PM operation to complete. |
232 | * @dev: Device to wait for. |
233 | * @async: If unset, wait only if the device's power.async_suspend flag is set. |
234 | */ |
235 | static void dpm_wait(struct device *dev, bool async) |
236 | { |
237 | if (!dev) |
238 | return; |
239 | |
240 | if (async || (pm_async_enabled && dev->power.async_suspend)) |
241 | wait_for_completion(&dev->power.completion); |
242 | } |
243 | |
244 | static int dpm_wait_fn(struct device *dev, void *async_ptr) |
245 | { |
246 | dpm_wait(dev, async: *((bool *)async_ptr)); |
247 | return 0; |
248 | } |
249 | |
250 | static void dpm_wait_for_children(struct device *dev, bool async) |
251 | { |
252 | device_for_each_child(dev, data: &async, fn: dpm_wait_fn); |
253 | } |
254 | |
255 | static void dpm_wait_for_suppliers(struct device *dev, bool async) |
256 | { |
257 | struct device_link *link; |
258 | int idx; |
259 | |
260 | idx = device_links_read_lock(); |
261 | |
262 | /* |
263 | * If the supplier goes away right after we've checked the link to it, |
264 | * we'll wait for its completion to change the state, but that's fine, |
265 | * because the only things that will block as a result are the SRCU |
266 | * callbacks freeing the link objects for the links in the list we're |
267 | * walking. |
268 | */ |
269 | list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) |
270 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
271 | dpm_wait(dev: link->supplier, async); |
272 | |
273 | device_links_read_unlock(idx); |
274 | } |
275 | |
276 | static bool dpm_wait_for_superior(struct device *dev, bool async) |
277 | { |
278 | struct device *parent; |
279 | |
280 | /* |
281 | * If the device is resumed asynchronously and the parent's callback |
282 | * deletes both the device and the parent itself, the parent object may |
283 | * be freed while this function is running, so avoid that by reference |
284 | * counting the parent once more unless the device has been deleted |
285 | * already (in which case return right away). |
286 | */ |
287 | mutex_lock(&dpm_list_mtx); |
288 | |
289 | if (!device_pm_initialized(dev)) { |
290 | mutex_unlock(lock: &dpm_list_mtx); |
291 | return false; |
292 | } |
293 | |
294 | parent = get_device(dev: dev->parent); |
295 | |
296 | mutex_unlock(lock: &dpm_list_mtx); |
297 | |
298 | dpm_wait(dev: parent, async); |
299 | put_device(dev: parent); |
300 | |
301 | dpm_wait_for_suppliers(dev, async); |
302 | |
303 | /* |
304 | * If the parent's callback has deleted the device, attempting to resume |
305 | * it would be invalid, so avoid doing that then. |
306 | */ |
307 | return device_pm_initialized(dev); |
308 | } |
309 | |
310 | static void dpm_wait_for_consumers(struct device *dev, bool async) |
311 | { |
312 | struct device_link *link; |
313 | int idx; |
314 | |
315 | idx = device_links_read_lock(); |
316 | |
317 | /* |
318 | * The status of a device link can only be changed from "dormant" by a |
319 | * probe, but that cannot happen during system suspend/resume. In |
320 | * theory it can change to "dormant" at that time, but then it is |
321 | * reasonable to wait for the target device anyway (eg. if it goes |
322 | * away, it's better to wait for it to go away completely and then |
323 | * continue instead of trying to continue in parallel with its |
324 | * unregistration). |
325 | */ |
326 | list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) |
327 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
328 | dpm_wait(dev: link->consumer, async); |
329 | |
330 | device_links_read_unlock(idx); |
331 | } |
332 | |
333 | static void dpm_wait_for_subordinate(struct device *dev, bool async) |
334 | { |
335 | dpm_wait_for_children(dev, async); |
336 | dpm_wait_for_consumers(dev, async); |
337 | } |
338 | |
339 | /** |
340 | * pm_op - Return the PM operation appropriate for given PM event. |
341 | * @ops: PM operations to choose from. |
342 | * @state: PM transition of the system being carried out. |
343 | */ |
344 | static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) |
345 | { |
346 | switch (state.event) { |
347 | #ifdef CONFIG_SUSPEND |
348 | case PM_EVENT_SUSPEND: |
349 | return ops->suspend; |
350 | case PM_EVENT_RESUME: |
351 | return ops->resume; |
352 | #endif /* CONFIG_SUSPEND */ |
353 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
354 | case PM_EVENT_FREEZE: |
355 | case PM_EVENT_QUIESCE: |
356 | return ops->freeze; |
357 | case PM_EVENT_HIBERNATE: |
358 | return ops->poweroff; |
359 | case PM_EVENT_THAW: |
360 | case PM_EVENT_RECOVER: |
361 | return ops->thaw; |
362 | case PM_EVENT_RESTORE: |
363 | return ops->restore; |
364 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
365 | } |
366 | |
367 | return NULL; |
368 | } |
369 | |
370 | /** |
371 | * pm_late_early_op - Return the PM operation appropriate for given PM event. |
372 | * @ops: PM operations to choose from. |
373 | * @state: PM transition of the system being carried out. |
374 | * |
375 | * Runtime PM is disabled for @dev while this function is being executed. |
376 | */ |
377 | static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, |
378 | pm_message_t state) |
379 | { |
380 | switch (state.event) { |
381 | #ifdef CONFIG_SUSPEND |
382 | case PM_EVENT_SUSPEND: |
383 | return ops->suspend_late; |
384 | case PM_EVENT_RESUME: |
385 | return ops->resume_early; |
386 | #endif /* CONFIG_SUSPEND */ |
387 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
388 | case PM_EVENT_FREEZE: |
389 | case PM_EVENT_QUIESCE: |
390 | return ops->freeze_late; |
391 | case PM_EVENT_HIBERNATE: |
392 | return ops->poweroff_late; |
393 | case PM_EVENT_THAW: |
394 | case PM_EVENT_RECOVER: |
395 | return ops->thaw_early; |
396 | case PM_EVENT_RESTORE: |
397 | return ops->restore_early; |
398 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
399 | } |
400 | |
401 | return NULL; |
402 | } |
403 | |
404 | /** |
405 | * pm_noirq_op - Return the PM operation appropriate for given PM event. |
406 | * @ops: PM operations to choose from. |
407 | * @state: PM transition of the system being carried out. |
408 | * |
409 | * The driver of @dev will not receive interrupts while this function is being |
410 | * executed. |
411 | */ |
412 | static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) |
413 | { |
414 | switch (state.event) { |
415 | #ifdef CONFIG_SUSPEND |
416 | case PM_EVENT_SUSPEND: |
417 | return ops->suspend_noirq; |
418 | case PM_EVENT_RESUME: |
419 | return ops->resume_noirq; |
420 | #endif /* CONFIG_SUSPEND */ |
421 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
422 | case PM_EVENT_FREEZE: |
423 | case PM_EVENT_QUIESCE: |
424 | return ops->freeze_noirq; |
425 | case PM_EVENT_HIBERNATE: |
426 | return ops->poweroff_noirq; |
427 | case PM_EVENT_THAW: |
428 | case PM_EVENT_RECOVER: |
429 | return ops->thaw_noirq; |
430 | case PM_EVENT_RESTORE: |
431 | return ops->restore_noirq; |
432 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
433 | } |
434 | |
435 | return NULL; |
436 | } |
437 | |
438 | static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) |
439 | { |
440 | dev_dbg(dev, "%s%s%s driver flags: %x\n" , info, pm_verb(state.event), |
441 | ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? |
442 | ", may wakeup" : "" , dev->power.driver_flags); |
443 | } |
444 | |
445 | static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, |
446 | int error) |
447 | { |
448 | dev_err(dev, "failed to %s%s: error %d\n" , pm_verb(state.event), info, |
449 | error); |
450 | } |
451 | |
452 | static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, |
453 | const char *info) |
454 | { |
455 | ktime_t calltime; |
456 | u64 usecs64; |
457 | int usecs; |
458 | |
459 | calltime = ktime_get(); |
460 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); |
461 | do_div(usecs64, NSEC_PER_USEC); |
462 | usecs = usecs64; |
463 | if (usecs == 0) |
464 | usecs = 1; |
465 | |
466 | pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n" , |
467 | info ?: "" , info ? " " : "" , pm_verb(state.event), |
468 | error ? "aborted" : "complete" , |
469 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); |
470 | } |
471 | |
472 | static int dpm_run_callback(pm_callback_t cb, struct device *dev, |
473 | pm_message_t state, const char *info) |
474 | { |
475 | ktime_t calltime; |
476 | int error; |
477 | |
478 | if (!cb) |
479 | return 0; |
480 | |
481 | calltime = initcall_debug_start(dev, cb); |
482 | |
483 | pm_dev_dbg(dev, state, info); |
484 | trace_device_pm_callback_start(dev, pm_ops: info, event: state.event); |
485 | error = cb(dev); |
486 | trace_device_pm_callback_end(dev, error); |
487 | suspend_report_result(dev, cb, error); |
488 | |
489 | initcall_debug_report(dev, calltime, cb, error); |
490 | |
491 | return error; |
492 | } |
493 | |
494 | #ifdef CONFIG_DPM_WATCHDOG |
495 | struct dpm_watchdog { |
496 | struct device *dev; |
497 | struct task_struct *tsk; |
498 | struct timer_list timer; |
499 | }; |
500 | |
501 | #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ |
502 | struct dpm_watchdog wd |
503 | |
504 | /** |
505 | * dpm_watchdog_handler - Driver suspend / resume watchdog handler. |
506 | * @t: The timer that PM watchdog depends on. |
507 | * |
508 | * Called when a driver has timed out suspending or resuming. |
509 | * There's not much we can do here to recover so panic() to |
510 | * capture a crash-dump in pstore. |
511 | */ |
512 | static void dpm_watchdog_handler(struct timer_list *t) |
513 | { |
514 | struct dpm_watchdog *wd = from_timer(wd, t, timer); |
515 | |
516 | dev_emerg(wd->dev, "**** DPM device timeout ****\n" ); |
517 | show_stack(task: wd->tsk, NULL, KERN_EMERG); |
518 | panic(fmt: "%s %s: unrecoverable failure\n" , |
519 | dev_driver_string(dev: wd->dev), dev_name(dev: wd->dev)); |
520 | } |
521 | |
522 | /** |
523 | * dpm_watchdog_set - Enable pm watchdog for given device. |
524 | * @wd: Watchdog. Must be allocated on the stack. |
525 | * @dev: Device to handle. |
526 | */ |
527 | static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) |
528 | { |
529 | struct timer_list *timer = &wd->timer; |
530 | |
531 | wd->dev = dev; |
532 | wd->tsk = current; |
533 | |
534 | timer_setup_on_stack(timer, dpm_watchdog_handler, 0); |
535 | /* use same timeout value for both suspend and resume */ |
536 | timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; |
537 | add_timer(timer); |
538 | } |
539 | |
540 | /** |
541 | * dpm_watchdog_clear - Disable suspend/resume watchdog. |
542 | * @wd: Watchdog to disable. |
543 | */ |
544 | static void dpm_watchdog_clear(struct dpm_watchdog *wd) |
545 | { |
546 | struct timer_list *timer = &wd->timer; |
547 | |
548 | del_timer_sync(timer); |
549 | destroy_timer_on_stack(timer); |
550 | } |
551 | #else |
552 | #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) |
553 | #define dpm_watchdog_set(x, y) |
554 | #define dpm_watchdog_clear(x) |
555 | #endif |
556 | |
557 | /*------------------------- Resume routines -------------------------*/ |
558 | |
559 | /** |
560 | * dev_pm_skip_resume - System-wide device resume optimization check. |
561 | * @dev: Target device. |
562 | * |
563 | * Return: |
564 | * - %false if the transition under way is RESTORE. |
565 | * - Return value of dev_pm_skip_suspend() if the transition under way is THAW. |
566 | * - The logical negation of %power.must_resume otherwise (that is, when the |
567 | * transition under way is RESUME). |
568 | */ |
569 | bool dev_pm_skip_resume(struct device *dev) |
570 | { |
571 | if (pm_transition.event == PM_EVENT_RESTORE) |
572 | return false; |
573 | |
574 | if (pm_transition.event == PM_EVENT_THAW) |
575 | return dev_pm_skip_suspend(dev); |
576 | |
577 | return !dev->power.must_resume; |
578 | } |
579 | |
580 | static bool is_async(struct device *dev) |
581 | { |
582 | return dev->power.async_suspend && pm_async_enabled |
583 | && !pm_trace_is_enabled(); |
584 | } |
585 | |
586 | static bool dpm_async_fn(struct device *dev, async_func_t func) |
587 | { |
588 | reinit_completion(x: &dev->power.completion); |
589 | |
590 | if (is_async(dev)) { |
591 | dev->power.async_in_progress = true; |
592 | |
593 | get_device(dev); |
594 | |
595 | if (async_schedule_dev_nocall(func, dev)) |
596 | return true; |
597 | |
598 | put_device(dev); |
599 | } |
600 | /* |
601 | * Because async_schedule_dev_nocall() above has returned false or it |
602 | * has not been called at all, func() is not running and it is safe to |
603 | * update the async_in_progress flag without extra synchronization. |
604 | */ |
605 | dev->power.async_in_progress = false; |
606 | return false; |
607 | } |
608 | |
609 | /** |
610 | * device_resume_noirq - Execute a "noirq resume" callback for given device. |
611 | * @dev: Device to handle. |
612 | * @state: PM transition of the system being carried out. |
613 | * @async: If true, the device is being resumed asynchronously. |
614 | * |
615 | * The driver of @dev will not receive interrupts while this function is being |
616 | * executed. |
617 | */ |
618 | static void device_resume_noirq(struct device *dev, pm_message_t state, bool async) |
619 | { |
620 | pm_callback_t callback = NULL; |
621 | const char *info = NULL; |
622 | bool skip_resume; |
623 | int error = 0; |
624 | |
625 | TRACE_DEVICE(dev); |
626 | TRACE_RESUME(0); |
627 | |
628 | if (dev->power.syscore || dev->power.direct_complete) |
629 | goto Out; |
630 | |
631 | if (!dev->power.is_noirq_suspended) |
632 | goto Out; |
633 | |
634 | if (!dpm_wait_for_superior(dev, async)) |
635 | goto Out; |
636 | |
637 | skip_resume = dev_pm_skip_resume(dev); |
638 | /* |
639 | * If the driver callback is skipped below or by the middle layer |
640 | * callback and device_resume_early() also skips the driver callback for |
641 | * this device later, it needs to appear as "suspended" to PM-runtime, |
642 | * so change its status accordingly. |
643 | * |
644 | * Otherwise, the device is going to be resumed, so set its PM-runtime |
645 | * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set |
646 | * to avoid confusing drivers that don't use it. |
647 | */ |
648 | if (skip_resume) |
649 | pm_runtime_set_suspended(dev); |
650 | else if (dev_pm_skip_suspend(dev)) |
651 | pm_runtime_set_active(dev); |
652 | |
653 | if (dev->pm_domain) { |
654 | info = "noirq power domain " ; |
655 | callback = pm_noirq_op(ops: &dev->pm_domain->ops, state); |
656 | } else if (dev->type && dev->type->pm) { |
657 | info = "noirq type " ; |
658 | callback = pm_noirq_op(ops: dev->type->pm, state); |
659 | } else if (dev->class && dev->class->pm) { |
660 | info = "noirq class " ; |
661 | callback = pm_noirq_op(ops: dev->class->pm, state); |
662 | } else if (dev->bus && dev->bus->pm) { |
663 | info = "noirq bus " ; |
664 | callback = pm_noirq_op(ops: dev->bus->pm, state); |
665 | } |
666 | if (callback) |
667 | goto Run; |
668 | |
669 | if (skip_resume) |
670 | goto Skip; |
671 | |
672 | if (dev->driver && dev->driver->pm) { |
673 | info = "noirq driver " ; |
674 | callback = pm_noirq_op(ops: dev->driver->pm, state); |
675 | } |
676 | |
677 | Run: |
678 | error = dpm_run_callback(cb: callback, dev, state, info); |
679 | |
680 | Skip: |
681 | dev->power.is_noirq_suspended = false; |
682 | |
683 | Out: |
684 | complete_all(&dev->power.completion); |
685 | TRACE_RESUME(error); |
686 | |
687 | if (error) { |
688 | async_error = error; |
689 | dpm_save_failed_dev(name: dev_name(dev)); |
690 | pm_dev_err(dev, state, info: async ? " async noirq" : " noirq" , error); |
691 | } |
692 | } |
693 | |
694 | static void async_resume_noirq(void *data, async_cookie_t cookie) |
695 | { |
696 | struct device *dev = data; |
697 | |
698 | device_resume_noirq(dev, state: pm_transition, async: true); |
699 | put_device(dev); |
700 | } |
701 | |
702 | static void dpm_noirq_resume_devices(pm_message_t state) |
703 | { |
704 | struct device *dev; |
705 | ktime_t starttime = ktime_get(); |
706 | |
707 | trace_suspend_resume(TPS("dpm_resume_noirq" ), val: state.event, start: true); |
708 | |
709 | async_error = 0; |
710 | pm_transition = state; |
711 | |
712 | mutex_lock(&dpm_list_mtx); |
713 | |
714 | /* |
715 | * Trigger the resume of "async" devices upfront so they don't have to |
716 | * wait for the "non-async" ones they don't depend on. |
717 | */ |
718 | list_for_each_entry(dev, &dpm_noirq_list, power.entry) |
719 | dpm_async_fn(dev, func: async_resume_noirq); |
720 | |
721 | while (!list_empty(head: &dpm_noirq_list)) { |
722 | dev = to_device(entry: dpm_noirq_list.next); |
723 | list_move_tail(list: &dev->power.entry, head: &dpm_late_early_list); |
724 | |
725 | if (!dev->power.async_in_progress) { |
726 | get_device(dev); |
727 | |
728 | mutex_unlock(lock: &dpm_list_mtx); |
729 | |
730 | device_resume_noirq(dev, state, async: false); |
731 | |
732 | put_device(dev); |
733 | |
734 | mutex_lock(&dpm_list_mtx); |
735 | } |
736 | } |
737 | mutex_unlock(lock: &dpm_list_mtx); |
738 | async_synchronize_full(); |
739 | dpm_show_time(starttime, state, error: 0, info: "noirq" ); |
740 | if (async_error) |
741 | dpm_save_failed_step(step: SUSPEND_RESUME_NOIRQ); |
742 | |
743 | trace_suspend_resume(TPS("dpm_resume_noirq" ), val: state.event, start: false); |
744 | } |
745 | |
746 | /** |
747 | * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. |
748 | * @state: PM transition of the system being carried out. |
749 | * |
750 | * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and |
751 | * allow device drivers' interrupt handlers to be called. |
752 | */ |
753 | void dpm_resume_noirq(pm_message_t state) |
754 | { |
755 | dpm_noirq_resume_devices(state); |
756 | |
757 | resume_device_irqs(); |
758 | device_wakeup_disarm_wake_irqs(); |
759 | } |
760 | |
761 | /** |
762 | * device_resume_early - Execute an "early resume" callback for given device. |
763 | * @dev: Device to handle. |
764 | * @state: PM transition of the system being carried out. |
765 | * @async: If true, the device is being resumed asynchronously. |
766 | * |
767 | * Runtime PM is disabled for @dev while this function is being executed. |
768 | */ |
769 | static void device_resume_early(struct device *dev, pm_message_t state, bool async) |
770 | { |
771 | pm_callback_t callback = NULL; |
772 | const char *info = NULL; |
773 | int error = 0; |
774 | |
775 | TRACE_DEVICE(dev); |
776 | TRACE_RESUME(0); |
777 | |
778 | if (dev->power.syscore || dev->power.direct_complete) |
779 | goto Out; |
780 | |
781 | if (!dev->power.is_late_suspended) |
782 | goto Out; |
783 | |
784 | if (!dpm_wait_for_superior(dev, async)) |
785 | goto Out; |
786 | |
787 | if (dev->pm_domain) { |
788 | info = "early power domain " ; |
789 | callback = pm_late_early_op(ops: &dev->pm_domain->ops, state); |
790 | } else if (dev->type && dev->type->pm) { |
791 | info = "early type " ; |
792 | callback = pm_late_early_op(ops: dev->type->pm, state); |
793 | } else if (dev->class && dev->class->pm) { |
794 | info = "early class " ; |
795 | callback = pm_late_early_op(ops: dev->class->pm, state); |
796 | } else if (dev->bus && dev->bus->pm) { |
797 | info = "early bus " ; |
798 | callback = pm_late_early_op(ops: dev->bus->pm, state); |
799 | } |
800 | if (callback) |
801 | goto Run; |
802 | |
803 | if (dev_pm_skip_resume(dev)) |
804 | goto Skip; |
805 | |
806 | if (dev->driver && dev->driver->pm) { |
807 | info = "early driver " ; |
808 | callback = pm_late_early_op(ops: dev->driver->pm, state); |
809 | } |
810 | |
811 | Run: |
812 | error = dpm_run_callback(cb: callback, dev, state, info); |
813 | |
814 | Skip: |
815 | dev->power.is_late_suspended = false; |
816 | |
817 | Out: |
818 | TRACE_RESUME(error); |
819 | |
820 | pm_runtime_enable(dev); |
821 | complete_all(&dev->power.completion); |
822 | |
823 | if (error) { |
824 | async_error = error; |
825 | dpm_save_failed_dev(name: dev_name(dev)); |
826 | pm_dev_err(dev, state, info: async ? " async early" : " early" , error); |
827 | } |
828 | } |
829 | |
830 | static void async_resume_early(void *data, async_cookie_t cookie) |
831 | { |
832 | struct device *dev = data; |
833 | |
834 | device_resume_early(dev, state: pm_transition, async: true); |
835 | put_device(dev); |
836 | } |
837 | |
838 | /** |
839 | * dpm_resume_early - Execute "early resume" callbacks for all devices. |
840 | * @state: PM transition of the system being carried out. |
841 | */ |
842 | void dpm_resume_early(pm_message_t state) |
843 | { |
844 | struct device *dev; |
845 | ktime_t starttime = ktime_get(); |
846 | |
847 | trace_suspend_resume(TPS("dpm_resume_early" ), val: state.event, start: true); |
848 | |
849 | async_error = 0; |
850 | pm_transition = state; |
851 | |
852 | mutex_lock(&dpm_list_mtx); |
853 | |
854 | /* |
855 | * Trigger the resume of "async" devices upfront so they don't have to |
856 | * wait for the "non-async" ones they don't depend on. |
857 | */ |
858 | list_for_each_entry(dev, &dpm_late_early_list, power.entry) |
859 | dpm_async_fn(dev, func: async_resume_early); |
860 | |
861 | while (!list_empty(head: &dpm_late_early_list)) { |
862 | dev = to_device(entry: dpm_late_early_list.next); |
863 | list_move_tail(list: &dev->power.entry, head: &dpm_suspended_list); |
864 | |
865 | if (!dev->power.async_in_progress) { |
866 | get_device(dev); |
867 | |
868 | mutex_unlock(lock: &dpm_list_mtx); |
869 | |
870 | device_resume_early(dev, state, async: false); |
871 | |
872 | put_device(dev); |
873 | |
874 | mutex_lock(&dpm_list_mtx); |
875 | } |
876 | } |
877 | mutex_unlock(lock: &dpm_list_mtx); |
878 | async_synchronize_full(); |
879 | dpm_show_time(starttime, state, error: 0, info: "early" ); |
880 | if (async_error) |
881 | dpm_save_failed_step(step: SUSPEND_RESUME_EARLY); |
882 | |
883 | trace_suspend_resume(TPS("dpm_resume_early" ), val: state.event, start: false); |
884 | } |
885 | |
886 | /** |
887 | * dpm_resume_start - Execute "noirq" and "early" device callbacks. |
888 | * @state: PM transition of the system being carried out. |
889 | */ |
890 | void dpm_resume_start(pm_message_t state) |
891 | { |
892 | dpm_resume_noirq(state); |
893 | dpm_resume_early(state); |
894 | } |
895 | EXPORT_SYMBOL_GPL(dpm_resume_start); |
896 | |
897 | /** |
898 | * device_resume - Execute "resume" callbacks for given device. |
899 | * @dev: Device to handle. |
900 | * @state: PM transition of the system being carried out. |
901 | * @async: If true, the device is being resumed asynchronously. |
902 | */ |
903 | static void device_resume(struct device *dev, pm_message_t state, bool async) |
904 | { |
905 | pm_callback_t callback = NULL; |
906 | const char *info = NULL; |
907 | int error = 0; |
908 | DECLARE_DPM_WATCHDOG_ON_STACK(wd); |
909 | |
910 | TRACE_DEVICE(dev); |
911 | TRACE_RESUME(0); |
912 | |
913 | if (dev->power.syscore) |
914 | goto Complete; |
915 | |
916 | if (dev->power.direct_complete) { |
917 | /* Match the pm_runtime_disable() in __device_suspend(). */ |
918 | pm_runtime_enable(dev); |
919 | goto Complete; |
920 | } |
921 | |
922 | if (!dpm_wait_for_superior(dev, async)) |
923 | goto Complete; |
924 | |
925 | dpm_watchdog_set(wd: &wd, dev); |
926 | device_lock(dev); |
927 | |
928 | /* |
929 | * This is a fib. But we'll allow new children to be added below |
930 | * a resumed device, even if the device hasn't been completed yet. |
931 | */ |
932 | dev->power.is_prepared = false; |
933 | |
934 | if (!dev->power.is_suspended) |
935 | goto Unlock; |
936 | |
937 | if (dev->pm_domain) { |
938 | info = "power domain " ; |
939 | callback = pm_op(ops: &dev->pm_domain->ops, state); |
940 | goto Driver; |
941 | } |
942 | |
943 | if (dev->type && dev->type->pm) { |
944 | info = "type " ; |
945 | callback = pm_op(ops: dev->type->pm, state); |
946 | goto Driver; |
947 | } |
948 | |
949 | if (dev->class && dev->class->pm) { |
950 | info = "class " ; |
951 | callback = pm_op(ops: dev->class->pm, state); |
952 | goto Driver; |
953 | } |
954 | |
955 | if (dev->bus) { |
956 | if (dev->bus->pm) { |
957 | info = "bus " ; |
958 | callback = pm_op(ops: dev->bus->pm, state); |
959 | } else if (dev->bus->resume) { |
960 | info = "legacy bus " ; |
961 | callback = dev->bus->resume; |
962 | goto End; |
963 | } |
964 | } |
965 | |
966 | Driver: |
967 | if (!callback && dev->driver && dev->driver->pm) { |
968 | info = "driver " ; |
969 | callback = pm_op(ops: dev->driver->pm, state); |
970 | } |
971 | |
972 | End: |
973 | error = dpm_run_callback(cb: callback, dev, state, info); |
974 | dev->power.is_suspended = false; |
975 | |
976 | Unlock: |
977 | device_unlock(dev); |
978 | dpm_watchdog_clear(wd: &wd); |
979 | |
980 | Complete: |
981 | complete_all(&dev->power.completion); |
982 | |
983 | TRACE_RESUME(error); |
984 | |
985 | if (error) { |
986 | async_error = error; |
987 | dpm_save_failed_dev(name: dev_name(dev)); |
988 | pm_dev_err(dev, state, info: async ? " async" : "" , error); |
989 | } |
990 | } |
991 | |
992 | static void async_resume(void *data, async_cookie_t cookie) |
993 | { |
994 | struct device *dev = data; |
995 | |
996 | device_resume(dev, state: pm_transition, async: true); |
997 | put_device(dev); |
998 | } |
999 | |
1000 | /** |
1001 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. |
1002 | * @state: PM transition of the system being carried out. |
1003 | * |
1004 | * Execute the appropriate "resume" callback for all devices whose status |
1005 | * indicates that they are suspended. |
1006 | */ |
1007 | void dpm_resume(pm_message_t state) |
1008 | { |
1009 | struct device *dev; |
1010 | ktime_t starttime = ktime_get(); |
1011 | |
1012 | trace_suspend_resume(TPS("dpm_resume" ), val: state.event, start: true); |
1013 | might_sleep(); |
1014 | |
1015 | pm_transition = state; |
1016 | async_error = 0; |
1017 | |
1018 | mutex_lock(&dpm_list_mtx); |
1019 | |
1020 | /* |
1021 | * Trigger the resume of "async" devices upfront so they don't have to |
1022 | * wait for the "non-async" ones they don't depend on. |
1023 | */ |
1024 | list_for_each_entry(dev, &dpm_suspended_list, power.entry) |
1025 | dpm_async_fn(dev, func: async_resume); |
1026 | |
1027 | while (!list_empty(head: &dpm_suspended_list)) { |
1028 | dev = to_device(entry: dpm_suspended_list.next); |
1029 | list_move_tail(list: &dev->power.entry, head: &dpm_prepared_list); |
1030 | |
1031 | if (!dev->power.async_in_progress) { |
1032 | get_device(dev); |
1033 | |
1034 | mutex_unlock(lock: &dpm_list_mtx); |
1035 | |
1036 | device_resume(dev, state, async: false); |
1037 | |
1038 | put_device(dev); |
1039 | |
1040 | mutex_lock(&dpm_list_mtx); |
1041 | } |
1042 | } |
1043 | mutex_unlock(lock: &dpm_list_mtx); |
1044 | async_synchronize_full(); |
1045 | dpm_show_time(starttime, state, error: 0, NULL); |
1046 | if (async_error) |
1047 | dpm_save_failed_step(step: SUSPEND_RESUME); |
1048 | |
1049 | cpufreq_resume(); |
1050 | devfreq_resume(); |
1051 | trace_suspend_resume(TPS("dpm_resume" ), val: state.event, start: false); |
1052 | } |
1053 | |
1054 | /** |
1055 | * device_complete - Complete a PM transition for given device. |
1056 | * @dev: Device to handle. |
1057 | * @state: PM transition of the system being carried out. |
1058 | */ |
1059 | static void device_complete(struct device *dev, pm_message_t state) |
1060 | { |
1061 | void (*callback)(struct device *) = NULL; |
1062 | const char *info = NULL; |
1063 | |
1064 | if (dev->power.syscore) |
1065 | goto out; |
1066 | |
1067 | device_lock(dev); |
1068 | |
1069 | if (dev->pm_domain) { |
1070 | info = "completing power domain " ; |
1071 | callback = dev->pm_domain->ops.complete; |
1072 | } else if (dev->type && dev->type->pm) { |
1073 | info = "completing type " ; |
1074 | callback = dev->type->pm->complete; |
1075 | } else if (dev->class && dev->class->pm) { |
1076 | info = "completing class " ; |
1077 | callback = dev->class->pm->complete; |
1078 | } else if (dev->bus && dev->bus->pm) { |
1079 | info = "completing bus " ; |
1080 | callback = dev->bus->pm->complete; |
1081 | } |
1082 | |
1083 | if (!callback && dev->driver && dev->driver->pm) { |
1084 | info = "completing driver " ; |
1085 | callback = dev->driver->pm->complete; |
1086 | } |
1087 | |
1088 | if (callback) { |
1089 | pm_dev_dbg(dev, state, info); |
1090 | callback(dev); |
1091 | } |
1092 | |
1093 | device_unlock(dev); |
1094 | |
1095 | out: |
1096 | pm_runtime_put(dev); |
1097 | } |
1098 | |
1099 | /** |
1100 | * dpm_complete - Complete a PM transition for all non-sysdev devices. |
1101 | * @state: PM transition of the system being carried out. |
1102 | * |
1103 | * Execute the ->complete() callbacks for all devices whose PM status is not |
1104 | * DPM_ON (this allows new devices to be registered). |
1105 | */ |
1106 | void dpm_complete(pm_message_t state) |
1107 | { |
1108 | struct list_head list; |
1109 | |
1110 | trace_suspend_resume(TPS("dpm_complete" ), val: state.event, start: true); |
1111 | might_sleep(); |
1112 | |
1113 | INIT_LIST_HEAD(list: &list); |
1114 | mutex_lock(&dpm_list_mtx); |
1115 | while (!list_empty(head: &dpm_prepared_list)) { |
1116 | struct device *dev = to_device(entry: dpm_prepared_list.prev); |
1117 | |
1118 | get_device(dev); |
1119 | dev->power.is_prepared = false; |
1120 | list_move(list: &dev->power.entry, head: &list); |
1121 | |
1122 | mutex_unlock(lock: &dpm_list_mtx); |
1123 | |
1124 | trace_device_pm_callback_start(dev, pm_ops: "" , event: state.event); |
1125 | device_complete(dev, state); |
1126 | trace_device_pm_callback_end(dev, error: 0); |
1127 | |
1128 | put_device(dev); |
1129 | |
1130 | mutex_lock(&dpm_list_mtx); |
1131 | } |
1132 | list_splice(list: &list, head: &dpm_list); |
1133 | mutex_unlock(lock: &dpm_list_mtx); |
1134 | |
1135 | /* Allow device probing and trigger re-probing of deferred devices */ |
1136 | device_unblock_probing(); |
1137 | trace_suspend_resume(TPS("dpm_complete" ), val: state.event, start: false); |
1138 | } |
1139 | |
1140 | /** |
1141 | * dpm_resume_end - Execute "resume" callbacks and complete system transition. |
1142 | * @state: PM transition of the system being carried out. |
1143 | * |
1144 | * Execute "resume" callbacks for all devices and complete the PM transition of |
1145 | * the system. |
1146 | */ |
1147 | void dpm_resume_end(pm_message_t state) |
1148 | { |
1149 | dpm_resume(state); |
1150 | dpm_complete(state); |
1151 | } |
1152 | EXPORT_SYMBOL_GPL(dpm_resume_end); |
1153 | |
1154 | |
1155 | /*------------------------- Suspend routines -------------------------*/ |
1156 | |
1157 | /** |
1158 | * resume_event - Return a "resume" message for given "suspend" sleep state. |
1159 | * @sleep_state: PM message representing a sleep state. |
1160 | * |
1161 | * Return a PM message representing the resume event corresponding to given |
1162 | * sleep state. |
1163 | */ |
1164 | static pm_message_t resume_event(pm_message_t sleep_state) |
1165 | { |
1166 | switch (sleep_state.event) { |
1167 | case PM_EVENT_SUSPEND: |
1168 | return PMSG_RESUME; |
1169 | case PM_EVENT_FREEZE: |
1170 | case PM_EVENT_QUIESCE: |
1171 | return PMSG_RECOVER; |
1172 | case PM_EVENT_HIBERNATE: |
1173 | return PMSG_RESTORE; |
1174 | } |
1175 | return PMSG_ON; |
1176 | } |
1177 | |
1178 | static void dpm_superior_set_must_resume(struct device *dev) |
1179 | { |
1180 | struct device_link *link; |
1181 | int idx; |
1182 | |
1183 | if (dev->parent) |
1184 | dev->parent->power.must_resume = true; |
1185 | |
1186 | idx = device_links_read_lock(); |
1187 | |
1188 | list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) |
1189 | link->supplier->power.must_resume = true; |
1190 | |
1191 | device_links_read_unlock(idx); |
1192 | } |
1193 | |
1194 | /** |
1195 | * device_suspend_noirq - Execute a "noirq suspend" callback for given device. |
1196 | * @dev: Device to handle. |
1197 | * @state: PM transition of the system being carried out. |
1198 | * @async: If true, the device is being suspended asynchronously. |
1199 | * |
1200 | * The driver of @dev will not receive interrupts while this function is being |
1201 | * executed. |
1202 | */ |
1203 | static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) |
1204 | { |
1205 | pm_callback_t callback = NULL; |
1206 | const char *info = NULL; |
1207 | int error = 0; |
1208 | |
1209 | TRACE_DEVICE(dev); |
1210 | TRACE_SUSPEND(0); |
1211 | |
1212 | dpm_wait_for_subordinate(dev, async); |
1213 | |
1214 | if (async_error) |
1215 | goto Complete; |
1216 | |
1217 | if (dev->power.syscore || dev->power.direct_complete) |
1218 | goto Complete; |
1219 | |
1220 | if (dev->pm_domain) { |
1221 | info = "noirq power domain " ; |
1222 | callback = pm_noirq_op(ops: &dev->pm_domain->ops, state); |
1223 | } else if (dev->type && dev->type->pm) { |
1224 | info = "noirq type " ; |
1225 | callback = pm_noirq_op(ops: dev->type->pm, state); |
1226 | } else if (dev->class && dev->class->pm) { |
1227 | info = "noirq class " ; |
1228 | callback = pm_noirq_op(ops: dev->class->pm, state); |
1229 | } else if (dev->bus && dev->bus->pm) { |
1230 | info = "noirq bus " ; |
1231 | callback = pm_noirq_op(ops: dev->bus->pm, state); |
1232 | } |
1233 | if (callback) |
1234 | goto Run; |
1235 | |
1236 | if (dev_pm_skip_suspend(dev)) |
1237 | goto Skip; |
1238 | |
1239 | if (dev->driver && dev->driver->pm) { |
1240 | info = "noirq driver " ; |
1241 | callback = pm_noirq_op(ops: dev->driver->pm, state); |
1242 | } |
1243 | |
1244 | Run: |
1245 | error = dpm_run_callback(cb: callback, dev, state, info); |
1246 | if (error) { |
1247 | async_error = error; |
1248 | dpm_save_failed_dev(name: dev_name(dev)); |
1249 | pm_dev_err(dev, state, info: async ? " async noirq" : " noirq" , error); |
1250 | goto Complete; |
1251 | } |
1252 | |
1253 | Skip: |
1254 | dev->power.is_noirq_suspended = true; |
1255 | |
1256 | /* |
1257 | * Skipping the resume of devices that were in use right before the |
1258 | * system suspend (as indicated by their PM-runtime usage counters) |
1259 | * would be suboptimal. Also resume them if doing that is not allowed |
1260 | * to be skipped. |
1261 | */ |
1262 | if (atomic_read(v: &dev->power.usage_count) > 1 || |
1263 | !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && |
1264 | dev->power.may_skip_resume)) |
1265 | dev->power.must_resume = true; |
1266 | |
1267 | if (dev->power.must_resume) |
1268 | dpm_superior_set_must_resume(dev); |
1269 | |
1270 | Complete: |
1271 | complete_all(&dev->power.completion); |
1272 | TRACE_SUSPEND(error); |
1273 | return error; |
1274 | } |
1275 | |
1276 | static void async_suspend_noirq(void *data, async_cookie_t cookie) |
1277 | { |
1278 | struct device *dev = data; |
1279 | |
1280 | device_suspend_noirq(dev, state: pm_transition, async: true); |
1281 | put_device(dev); |
1282 | } |
1283 | |
1284 | static int dpm_noirq_suspend_devices(pm_message_t state) |
1285 | { |
1286 | ktime_t starttime = ktime_get(); |
1287 | int error = 0; |
1288 | |
1289 | trace_suspend_resume(TPS("dpm_suspend_noirq" ), val: state.event, start: true); |
1290 | |
1291 | pm_transition = state; |
1292 | async_error = 0; |
1293 | |
1294 | mutex_lock(&dpm_list_mtx); |
1295 | |
1296 | while (!list_empty(head: &dpm_late_early_list)) { |
1297 | struct device *dev = to_device(entry: dpm_late_early_list.prev); |
1298 | |
1299 | list_move(list: &dev->power.entry, head: &dpm_noirq_list); |
1300 | |
1301 | if (dpm_async_fn(dev, func: async_suspend_noirq)) |
1302 | continue; |
1303 | |
1304 | get_device(dev); |
1305 | |
1306 | mutex_unlock(lock: &dpm_list_mtx); |
1307 | |
1308 | error = device_suspend_noirq(dev, state, async: false); |
1309 | |
1310 | put_device(dev); |
1311 | |
1312 | mutex_lock(&dpm_list_mtx); |
1313 | |
1314 | if (error || async_error) |
1315 | break; |
1316 | } |
1317 | |
1318 | mutex_unlock(lock: &dpm_list_mtx); |
1319 | |
1320 | async_synchronize_full(); |
1321 | if (!error) |
1322 | error = async_error; |
1323 | |
1324 | if (error) |
1325 | dpm_save_failed_step(step: SUSPEND_SUSPEND_NOIRQ); |
1326 | |
1327 | dpm_show_time(starttime, state, error, info: "noirq" ); |
1328 | trace_suspend_resume(TPS("dpm_suspend_noirq" ), val: state.event, start: false); |
1329 | return error; |
1330 | } |
1331 | |
1332 | /** |
1333 | * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. |
1334 | * @state: PM transition of the system being carried out. |
1335 | * |
1336 | * Prevent device drivers' interrupt handlers from being called and invoke |
1337 | * "noirq" suspend callbacks for all non-sysdev devices. |
1338 | */ |
1339 | int dpm_suspend_noirq(pm_message_t state) |
1340 | { |
1341 | int ret; |
1342 | |
1343 | device_wakeup_arm_wake_irqs(); |
1344 | suspend_device_irqs(); |
1345 | |
1346 | ret = dpm_noirq_suspend_devices(state); |
1347 | if (ret) |
1348 | dpm_resume_noirq(state: resume_event(sleep_state: state)); |
1349 | |
1350 | return ret; |
1351 | } |
1352 | |
1353 | static void dpm_propagate_wakeup_to_parent(struct device *dev) |
1354 | { |
1355 | struct device *parent = dev->parent; |
1356 | |
1357 | if (!parent) |
1358 | return; |
1359 | |
1360 | spin_lock_irq(lock: &parent->power.lock); |
1361 | |
1362 | if (device_wakeup_path(dev) && !parent->power.ignore_children) |
1363 | parent->power.wakeup_path = true; |
1364 | |
1365 | spin_unlock_irq(lock: &parent->power.lock); |
1366 | } |
1367 | |
1368 | /** |
1369 | * device_suspend_late - Execute a "late suspend" callback for given device. |
1370 | * @dev: Device to handle. |
1371 | * @state: PM transition of the system being carried out. |
1372 | * @async: If true, the device is being suspended asynchronously. |
1373 | * |
1374 | * Runtime PM is disabled for @dev while this function is being executed. |
1375 | */ |
1376 | static int device_suspend_late(struct device *dev, pm_message_t state, bool async) |
1377 | { |
1378 | pm_callback_t callback = NULL; |
1379 | const char *info = NULL; |
1380 | int error = 0; |
1381 | |
1382 | TRACE_DEVICE(dev); |
1383 | TRACE_SUSPEND(0); |
1384 | |
1385 | __pm_runtime_disable(dev, check_resume: false); |
1386 | |
1387 | dpm_wait_for_subordinate(dev, async); |
1388 | |
1389 | if (async_error) |
1390 | goto Complete; |
1391 | |
1392 | if (pm_wakeup_pending()) { |
1393 | async_error = -EBUSY; |
1394 | goto Complete; |
1395 | } |
1396 | |
1397 | if (dev->power.syscore || dev->power.direct_complete) |
1398 | goto Complete; |
1399 | |
1400 | if (dev->pm_domain) { |
1401 | info = "late power domain " ; |
1402 | callback = pm_late_early_op(ops: &dev->pm_domain->ops, state); |
1403 | } else if (dev->type && dev->type->pm) { |
1404 | info = "late type " ; |
1405 | callback = pm_late_early_op(ops: dev->type->pm, state); |
1406 | } else if (dev->class && dev->class->pm) { |
1407 | info = "late class " ; |
1408 | callback = pm_late_early_op(ops: dev->class->pm, state); |
1409 | } else if (dev->bus && dev->bus->pm) { |
1410 | info = "late bus " ; |
1411 | callback = pm_late_early_op(ops: dev->bus->pm, state); |
1412 | } |
1413 | if (callback) |
1414 | goto Run; |
1415 | |
1416 | if (dev_pm_skip_suspend(dev)) |
1417 | goto Skip; |
1418 | |
1419 | if (dev->driver && dev->driver->pm) { |
1420 | info = "late driver " ; |
1421 | callback = pm_late_early_op(ops: dev->driver->pm, state); |
1422 | } |
1423 | |
1424 | Run: |
1425 | error = dpm_run_callback(cb: callback, dev, state, info); |
1426 | if (error) { |
1427 | async_error = error; |
1428 | dpm_save_failed_dev(name: dev_name(dev)); |
1429 | pm_dev_err(dev, state, info: async ? " async late" : " late" , error); |
1430 | goto Complete; |
1431 | } |
1432 | dpm_propagate_wakeup_to_parent(dev); |
1433 | |
1434 | Skip: |
1435 | dev->power.is_late_suspended = true; |
1436 | |
1437 | Complete: |
1438 | TRACE_SUSPEND(error); |
1439 | complete_all(&dev->power.completion); |
1440 | return error; |
1441 | } |
1442 | |
1443 | static void async_suspend_late(void *data, async_cookie_t cookie) |
1444 | { |
1445 | struct device *dev = data; |
1446 | |
1447 | device_suspend_late(dev, state: pm_transition, async: true); |
1448 | put_device(dev); |
1449 | } |
1450 | |
1451 | /** |
1452 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. |
1453 | * @state: PM transition of the system being carried out. |
1454 | */ |
1455 | int dpm_suspend_late(pm_message_t state) |
1456 | { |
1457 | ktime_t starttime = ktime_get(); |
1458 | int error = 0; |
1459 | |
1460 | trace_suspend_resume(TPS("dpm_suspend_late" ), val: state.event, start: true); |
1461 | |
1462 | pm_transition = state; |
1463 | async_error = 0; |
1464 | |
1465 | wake_up_all_idle_cpus(); |
1466 | |
1467 | mutex_lock(&dpm_list_mtx); |
1468 | |
1469 | while (!list_empty(head: &dpm_suspended_list)) { |
1470 | struct device *dev = to_device(entry: dpm_suspended_list.prev); |
1471 | |
1472 | list_move(list: &dev->power.entry, head: &dpm_late_early_list); |
1473 | |
1474 | if (dpm_async_fn(dev, func: async_suspend_late)) |
1475 | continue; |
1476 | |
1477 | get_device(dev); |
1478 | |
1479 | mutex_unlock(lock: &dpm_list_mtx); |
1480 | |
1481 | error = device_suspend_late(dev, state, async: false); |
1482 | |
1483 | put_device(dev); |
1484 | |
1485 | mutex_lock(&dpm_list_mtx); |
1486 | |
1487 | if (error || async_error) |
1488 | break; |
1489 | } |
1490 | |
1491 | mutex_unlock(lock: &dpm_list_mtx); |
1492 | |
1493 | async_synchronize_full(); |
1494 | if (!error) |
1495 | error = async_error; |
1496 | |
1497 | if (error) { |
1498 | dpm_save_failed_step(step: SUSPEND_SUSPEND_LATE); |
1499 | dpm_resume_early(state: resume_event(sleep_state: state)); |
1500 | } |
1501 | dpm_show_time(starttime, state, error, info: "late" ); |
1502 | trace_suspend_resume(TPS("dpm_suspend_late" ), val: state.event, start: false); |
1503 | return error; |
1504 | } |
1505 | |
1506 | /** |
1507 | * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. |
1508 | * @state: PM transition of the system being carried out. |
1509 | */ |
1510 | int dpm_suspend_end(pm_message_t state) |
1511 | { |
1512 | ktime_t starttime = ktime_get(); |
1513 | int error; |
1514 | |
1515 | error = dpm_suspend_late(state); |
1516 | if (error) |
1517 | goto out; |
1518 | |
1519 | error = dpm_suspend_noirq(state); |
1520 | if (error) |
1521 | dpm_resume_early(state: resume_event(sleep_state: state)); |
1522 | |
1523 | out: |
1524 | dpm_show_time(starttime, state, error, info: "end" ); |
1525 | return error; |
1526 | } |
1527 | EXPORT_SYMBOL_GPL(dpm_suspend_end); |
1528 | |
1529 | /** |
1530 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. |
1531 | * @dev: Device to suspend. |
1532 | * @state: PM transition of the system being carried out. |
1533 | * @cb: Suspend callback to execute. |
1534 | * @info: string description of caller. |
1535 | */ |
1536 | static int legacy_suspend(struct device *dev, pm_message_t state, |
1537 | int (*cb)(struct device *dev, pm_message_t state), |
1538 | const char *info) |
1539 | { |
1540 | int error; |
1541 | ktime_t calltime; |
1542 | |
1543 | calltime = initcall_debug_start(dev, cb); |
1544 | |
1545 | trace_device_pm_callback_start(dev, pm_ops: info, event: state.event); |
1546 | error = cb(dev, state); |
1547 | trace_device_pm_callback_end(dev, error); |
1548 | suspend_report_result(dev, cb, error); |
1549 | |
1550 | initcall_debug_report(dev, calltime, cb, error); |
1551 | |
1552 | return error; |
1553 | } |
1554 | |
1555 | static void dpm_clear_superiors_direct_complete(struct device *dev) |
1556 | { |
1557 | struct device_link *link; |
1558 | int idx; |
1559 | |
1560 | if (dev->parent) { |
1561 | spin_lock_irq(lock: &dev->parent->power.lock); |
1562 | dev->parent->power.direct_complete = false; |
1563 | spin_unlock_irq(lock: &dev->parent->power.lock); |
1564 | } |
1565 | |
1566 | idx = device_links_read_lock(); |
1567 | |
1568 | list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { |
1569 | spin_lock_irq(lock: &link->supplier->power.lock); |
1570 | link->supplier->power.direct_complete = false; |
1571 | spin_unlock_irq(lock: &link->supplier->power.lock); |
1572 | } |
1573 | |
1574 | device_links_read_unlock(idx); |
1575 | } |
1576 | |
1577 | /** |
1578 | * device_suspend - Execute "suspend" callbacks for given device. |
1579 | * @dev: Device to handle. |
1580 | * @state: PM transition of the system being carried out. |
1581 | * @async: If true, the device is being suspended asynchronously. |
1582 | */ |
1583 | static int device_suspend(struct device *dev, pm_message_t state, bool async) |
1584 | { |
1585 | pm_callback_t callback = NULL; |
1586 | const char *info = NULL; |
1587 | int error = 0; |
1588 | DECLARE_DPM_WATCHDOG_ON_STACK(wd); |
1589 | |
1590 | TRACE_DEVICE(dev); |
1591 | TRACE_SUSPEND(0); |
1592 | |
1593 | dpm_wait_for_subordinate(dev, async); |
1594 | |
1595 | if (async_error) { |
1596 | dev->power.direct_complete = false; |
1597 | goto Complete; |
1598 | } |
1599 | |
1600 | /* |
1601 | * Wait for possible runtime PM transitions of the device in progress |
1602 | * to complete and if there's a runtime resume request pending for it, |
1603 | * resume it before proceeding with invoking the system-wide suspend |
1604 | * callbacks for it. |
1605 | * |
1606 | * If the system-wide suspend callbacks below change the configuration |
1607 | * of the device, they must disable runtime PM for it or otherwise |
1608 | * ensure that its runtime-resume callbacks will not be confused by that |
1609 | * change in case they are invoked going forward. |
1610 | */ |
1611 | pm_runtime_barrier(dev); |
1612 | |
1613 | if (pm_wakeup_pending()) { |
1614 | dev->power.direct_complete = false; |
1615 | async_error = -EBUSY; |
1616 | goto Complete; |
1617 | } |
1618 | |
1619 | if (dev->power.syscore) |
1620 | goto Complete; |
1621 | |
1622 | /* Avoid direct_complete to let wakeup_path propagate. */ |
1623 | if (device_may_wakeup(dev) || device_wakeup_path(dev)) |
1624 | dev->power.direct_complete = false; |
1625 | |
1626 | if (dev->power.direct_complete) { |
1627 | if (pm_runtime_status_suspended(dev)) { |
1628 | pm_runtime_disable(dev); |
1629 | if (pm_runtime_status_suspended(dev)) { |
1630 | pm_dev_dbg(dev, state, info: "direct-complete " ); |
1631 | goto Complete; |
1632 | } |
1633 | |
1634 | pm_runtime_enable(dev); |
1635 | } |
1636 | dev->power.direct_complete = false; |
1637 | } |
1638 | |
1639 | dev->power.may_skip_resume = true; |
1640 | dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); |
1641 | |
1642 | dpm_watchdog_set(wd: &wd, dev); |
1643 | device_lock(dev); |
1644 | |
1645 | if (dev->pm_domain) { |
1646 | info = "power domain " ; |
1647 | callback = pm_op(ops: &dev->pm_domain->ops, state); |
1648 | goto Run; |
1649 | } |
1650 | |
1651 | if (dev->type && dev->type->pm) { |
1652 | info = "type " ; |
1653 | callback = pm_op(ops: dev->type->pm, state); |
1654 | goto Run; |
1655 | } |
1656 | |
1657 | if (dev->class && dev->class->pm) { |
1658 | info = "class " ; |
1659 | callback = pm_op(ops: dev->class->pm, state); |
1660 | goto Run; |
1661 | } |
1662 | |
1663 | if (dev->bus) { |
1664 | if (dev->bus->pm) { |
1665 | info = "bus " ; |
1666 | callback = pm_op(ops: dev->bus->pm, state); |
1667 | } else if (dev->bus->suspend) { |
1668 | pm_dev_dbg(dev, state, info: "legacy bus " ); |
1669 | error = legacy_suspend(dev, state, cb: dev->bus->suspend, |
1670 | info: "legacy bus " ); |
1671 | goto End; |
1672 | } |
1673 | } |
1674 | |
1675 | Run: |
1676 | if (!callback && dev->driver && dev->driver->pm) { |
1677 | info = "driver " ; |
1678 | callback = pm_op(ops: dev->driver->pm, state); |
1679 | } |
1680 | |
1681 | error = dpm_run_callback(cb: callback, dev, state, info); |
1682 | |
1683 | End: |
1684 | if (!error) { |
1685 | dev->power.is_suspended = true; |
1686 | if (device_may_wakeup(dev)) |
1687 | dev->power.wakeup_path = true; |
1688 | |
1689 | dpm_propagate_wakeup_to_parent(dev); |
1690 | dpm_clear_superiors_direct_complete(dev); |
1691 | } |
1692 | |
1693 | device_unlock(dev); |
1694 | dpm_watchdog_clear(wd: &wd); |
1695 | |
1696 | Complete: |
1697 | if (error) { |
1698 | async_error = error; |
1699 | dpm_save_failed_dev(name: dev_name(dev)); |
1700 | pm_dev_err(dev, state, info: async ? " async" : "" , error); |
1701 | } |
1702 | |
1703 | complete_all(&dev->power.completion); |
1704 | TRACE_SUSPEND(error); |
1705 | return error; |
1706 | } |
1707 | |
1708 | static void async_suspend(void *data, async_cookie_t cookie) |
1709 | { |
1710 | struct device *dev = data; |
1711 | |
1712 | device_suspend(dev, state: pm_transition, async: true); |
1713 | put_device(dev); |
1714 | } |
1715 | |
1716 | /** |
1717 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
1718 | * @state: PM transition of the system being carried out. |
1719 | */ |
1720 | int dpm_suspend(pm_message_t state) |
1721 | { |
1722 | ktime_t starttime = ktime_get(); |
1723 | int error = 0; |
1724 | |
1725 | trace_suspend_resume(TPS("dpm_suspend" ), val: state.event, start: true); |
1726 | might_sleep(); |
1727 | |
1728 | devfreq_suspend(); |
1729 | cpufreq_suspend(); |
1730 | |
1731 | pm_transition = state; |
1732 | async_error = 0; |
1733 | |
1734 | mutex_lock(&dpm_list_mtx); |
1735 | |
1736 | while (!list_empty(head: &dpm_prepared_list)) { |
1737 | struct device *dev = to_device(entry: dpm_prepared_list.prev); |
1738 | |
1739 | list_move(list: &dev->power.entry, head: &dpm_suspended_list); |
1740 | |
1741 | if (dpm_async_fn(dev, func: async_suspend)) |
1742 | continue; |
1743 | |
1744 | get_device(dev); |
1745 | |
1746 | mutex_unlock(lock: &dpm_list_mtx); |
1747 | |
1748 | error = device_suspend(dev, state, async: false); |
1749 | |
1750 | put_device(dev); |
1751 | |
1752 | mutex_lock(&dpm_list_mtx); |
1753 | |
1754 | if (error || async_error) |
1755 | break; |
1756 | } |
1757 | |
1758 | mutex_unlock(lock: &dpm_list_mtx); |
1759 | |
1760 | async_synchronize_full(); |
1761 | if (!error) |
1762 | error = async_error; |
1763 | |
1764 | if (error) |
1765 | dpm_save_failed_step(step: SUSPEND_SUSPEND); |
1766 | |
1767 | dpm_show_time(starttime, state, error, NULL); |
1768 | trace_suspend_resume(TPS("dpm_suspend" ), val: state.event, start: false); |
1769 | return error; |
1770 | } |
1771 | |
1772 | /** |
1773 | * device_prepare - Prepare a device for system power transition. |
1774 | * @dev: Device to handle. |
1775 | * @state: PM transition of the system being carried out. |
1776 | * |
1777 | * Execute the ->prepare() callback(s) for given device. No new children of the |
1778 | * device may be registered after this function has returned. |
1779 | */ |
1780 | static int device_prepare(struct device *dev, pm_message_t state) |
1781 | { |
1782 | int (*callback)(struct device *) = NULL; |
1783 | int ret = 0; |
1784 | |
1785 | /* |
1786 | * If a device's parent goes into runtime suspend at the wrong time, |
1787 | * it won't be possible to resume the device. To prevent this we |
1788 | * block runtime suspend here, during the prepare phase, and allow |
1789 | * it again during the complete phase. |
1790 | */ |
1791 | pm_runtime_get_noresume(dev); |
1792 | |
1793 | if (dev->power.syscore) |
1794 | return 0; |
1795 | |
1796 | device_lock(dev); |
1797 | |
1798 | dev->power.wakeup_path = false; |
1799 | |
1800 | if (dev->power.no_pm_callbacks) |
1801 | goto unlock; |
1802 | |
1803 | if (dev->pm_domain) |
1804 | callback = dev->pm_domain->ops.prepare; |
1805 | else if (dev->type && dev->type->pm) |
1806 | callback = dev->type->pm->prepare; |
1807 | else if (dev->class && dev->class->pm) |
1808 | callback = dev->class->pm->prepare; |
1809 | else if (dev->bus && dev->bus->pm) |
1810 | callback = dev->bus->pm->prepare; |
1811 | |
1812 | if (!callback && dev->driver && dev->driver->pm) |
1813 | callback = dev->driver->pm->prepare; |
1814 | |
1815 | if (callback) |
1816 | ret = callback(dev); |
1817 | |
1818 | unlock: |
1819 | device_unlock(dev); |
1820 | |
1821 | if (ret < 0) { |
1822 | suspend_report_result(dev, callback, ret); |
1823 | pm_runtime_put(dev); |
1824 | return ret; |
1825 | } |
1826 | /* |
1827 | * A positive return value from ->prepare() means "this device appears |
1828 | * to be runtime-suspended and its state is fine, so if it really is |
1829 | * runtime-suspended, you can leave it in that state provided that you |
1830 | * will do the same thing with all of its descendants". This only |
1831 | * applies to suspend transitions, however. |
1832 | */ |
1833 | spin_lock_irq(lock: &dev->power.lock); |
1834 | dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && |
1835 | (ret > 0 || dev->power.no_pm_callbacks) && |
1836 | !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); |
1837 | spin_unlock_irq(lock: &dev->power.lock); |
1838 | return 0; |
1839 | } |
1840 | |
1841 | /** |
1842 | * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. |
1843 | * @state: PM transition of the system being carried out. |
1844 | * |
1845 | * Execute the ->prepare() callback(s) for all devices. |
1846 | */ |
1847 | int dpm_prepare(pm_message_t state) |
1848 | { |
1849 | int error = 0; |
1850 | |
1851 | trace_suspend_resume(TPS("dpm_prepare" ), val: state.event, start: true); |
1852 | might_sleep(); |
1853 | |
1854 | /* |
1855 | * Give a chance for the known devices to complete their probes, before |
1856 | * disable probing of devices. This sync point is important at least |
1857 | * at boot time + hibernation restore. |
1858 | */ |
1859 | wait_for_device_probe(); |
1860 | /* |
1861 | * It is unsafe if probing of devices will happen during suspend or |
1862 | * hibernation and system behavior will be unpredictable in this case. |
1863 | * So, let's prohibit device's probing here and defer their probes |
1864 | * instead. The normal behavior will be restored in dpm_complete(). |
1865 | */ |
1866 | device_block_probing(); |
1867 | |
1868 | mutex_lock(&dpm_list_mtx); |
1869 | while (!list_empty(head: &dpm_list) && !error) { |
1870 | struct device *dev = to_device(entry: dpm_list.next); |
1871 | |
1872 | get_device(dev); |
1873 | |
1874 | mutex_unlock(lock: &dpm_list_mtx); |
1875 | |
1876 | trace_device_pm_callback_start(dev, pm_ops: "" , event: state.event); |
1877 | error = device_prepare(dev, state); |
1878 | trace_device_pm_callback_end(dev, error); |
1879 | |
1880 | mutex_lock(&dpm_list_mtx); |
1881 | |
1882 | if (!error) { |
1883 | dev->power.is_prepared = true; |
1884 | if (!list_empty(head: &dev->power.entry)) |
1885 | list_move_tail(list: &dev->power.entry, head: &dpm_prepared_list); |
1886 | } else if (error == -EAGAIN) { |
1887 | error = 0; |
1888 | } else { |
1889 | dev_info(dev, "not prepared for power transition: code %d\n" , |
1890 | error); |
1891 | } |
1892 | |
1893 | mutex_unlock(lock: &dpm_list_mtx); |
1894 | |
1895 | put_device(dev); |
1896 | |
1897 | mutex_lock(&dpm_list_mtx); |
1898 | } |
1899 | mutex_unlock(lock: &dpm_list_mtx); |
1900 | trace_suspend_resume(TPS("dpm_prepare" ), val: state.event, start: false); |
1901 | return error; |
1902 | } |
1903 | |
1904 | /** |
1905 | * dpm_suspend_start - Prepare devices for PM transition and suspend them. |
1906 | * @state: PM transition of the system being carried out. |
1907 | * |
1908 | * Prepare all non-sysdev devices for system PM transition and execute "suspend" |
1909 | * callbacks for them. |
1910 | */ |
1911 | int dpm_suspend_start(pm_message_t state) |
1912 | { |
1913 | ktime_t starttime = ktime_get(); |
1914 | int error; |
1915 | |
1916 | error = dpm_prepare(state); |
1917 | if (error) |
1918 | dpm_save_failed_step(step: SUSPEND_PREPARE); |
1919 | else |
1920 | error = dpm_suspend(state); |
1921 | |
1922 | dpm_show_time(starttime, state, error, info: "start" ); |
1923 | return error; |
1924 | } |
1925 | EXPORT_SYMBOL_GPL(dpm_suspend_start); |
1926 | |
1927 | void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) |
1928 | { |
1929 | if (ret) |
1930 | dev_err(dev, "%s(): %pS returns %d\n" , function, fn, ret); |
1931 | } |
1932 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
1933 | |
1934 | /** |
1935 | * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. |
1936 | * @subordinate: Device that needs to wait for @dev. |
1937 | * @dev: Device to wait for. |
1938 | */ |
1939 | int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) |
1940 | { |
1941 | dpm_wait(dev, async: subordinate->power.async_suspend); |
1942 | return async_error; |
1943 | } |
1944 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); |
1945 | |
1946 | /** |
1947 | * dpm_for_each_dev - device iterator. |
1948 | * @data: data for the callback. |
1949 | * @fn: function to be called for each device. |
1950 | * |
1951 | * Iterate over devices in dpm_list, and call @fn for each device, |
1952 | * passing it @data. |
1953 | */ |
1954 | void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) |
1955 | { |
1956 | struct device *dev; |
1957 | |
1958 | if (!fn) |
1959 | return; |
1960 | |
1961 | device_pm_lock(); |
1962 | list_for_each_entry(dev, &dpm_list, power.entry) |
1963 | fn(dev, data); |
1964 | device_pm_unlock(); |
1965 | } |
1966 | EXPORT_SYMBOL_GPL(dpm_for_each_dev); |
1967 | |
1968 | static bool pm_ops_is_empty(const struct dev_pm_ops *ops) |
1969 | { |
1970 | if (!ops) |
1971 | return true; |
1972 | |
1973 | return !ops->prepare && |
1974 | !ops->suspend && |
1975 | !ops->suspend_late && |
1976 | !ops->suspend_noirq && |
1977 | !ops->resume_noirq && |
1978 | !ops->resume_early && |
1979 | !ops->resume && |
1980 | !ops->complete; |
1981 | } |
1982 | |
1983 | void device_pm_check_callbacks(struct device *dev) |
1984 | { |
1985 | unsigned long flags; |
1986 | |
1987 | spin_lock_irqsave(&dev->power.lock, flags); |
1988 | dev->power.no_pm_callbacks = |
1989 | (!dev->bus || (pm_ops_is_empty(ops: dev->bus->pm) && |
1990 | !dev->bus->suspend && !dev->bus->resume)) && |
1991 | (!dev->class || pm_ops_is_empty(ops: dev->class->pm)) && |
1992 | (!dev->type || pm_ops_is_empty(ops: dev->type->pm)) && |
1993 | (!dev->pm_domain || pm_ops_is_empty(ops: &dev->pm_domain->ops)) && |
1994 | (!dev->driver || (pm_ops_is_empty(ops: dev->driver->pm) && |
1995 | !dev->driver->suspend && !dev->driver->resume)); |
1996 | spin_unlock_irqrestore(lock: &dev->power.lock, flags); |
1997 | } |
1998 | |
1999 | bool dev_pm_skip_suspend(struct device *dev) |
2000 | { |
2001 | return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && |
2002 | pm_runtime_status_suspended(dev); |
2003 | } |
2004 | |