1/*
2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched/mm.h>
11#include <linux/ktime.h>
12#include <linux/hrtimer.h>
13#include <linux/export.h>
14#include <linux/pm_runtime.h>
15#include <linux/pm_wakeirq.h>
16#include <trace/events/rpm.h>
17
18#include "../base.h"
19#include "power.h"
20
21typedef int (*pm_callback_t)(struct device *);
22
23static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
24{
25 pm_callback_t cb;
26 const struct dev_pm_ops *ops;
27
28 if (dev->pm_domain)
29 ops = &dev->pm_domain->ops;
30 else if (dev->type && dev->type->pm)
31 ops = dev->type->pm;
32 else if (dev->class && dev->class->pm)
33 ops = dev->class->pm;
34 else if (dev->bus && dev->bus->pm)
35 ops = dev->bus->pm;
36 else
37 ops = NULL;
38
39 if (ops)
40 cb = *(pm_callback_t *)((void *)ops + cb_offset);
41 else
42 cb = NULL;
43
44 if (!cb && dev->driver && dev->driver->pm)
45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
46
47 return cb;
48}
49
50#define RPM_GET_CALLBACK(dev, callback) \
51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
52
53static int rpm_resume(struct device *dev, int rpmflags);
54static int rpm_suspend(struct device *dev, int rpmflags);
55
56/**
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
59 *
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
65 * correctly.
66 */
67static void update_pm_runtime_accounting(struct device *dev)
68{
69 u64 now, last, delta;
70
71 if (dev->power.disable_depth > 0)
72 return;
73
74 last = dev->power.accounting_timestamp;
75
76 now = ktime_get_mono_fast_ns();
77 dev->power.accounting_timestamp = now;
78
79 /*
80 * Because ktime_get_mono_fast_ns() is not monotonic during
81 * timekeeping updates, ensure that 'now' is after the last saved
82 * timesptamp.
83 */
84 if (now < last)
85 return;
86
87 delta = now - last;
88
89 if (dev->power.runtime_status == RPM_SUSPENDED)
90 dev->power.suspended_time += delta;
91 else
92 dev->power.active_time += delta;
93}
94
95static void __update_runtime_status(struct device *dev, enum rpm_status status)
96{
97 update_pm_runtime_accounting(dev);
98 dev->power.runtime_status = status;
99}
100
101static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
102{
103 u64 time;
104 unsigned long flags;
105
106 spin_lock_irqsave(&dev->power.lock, flags);
107
108 update_pm_runtime_accounting(dev);
109 time = suspended ? dev->power.suspended_time : dev->power.active_time;
110
111 spin_unlock_irqrestore(&dev->power.lock, flags);
112
113 return time;
114}
115
116u64 pm_runtime_active_time(struct device *dev)
117{
118 return rpm_get_accounted_time(dev, false);
119}
120
121u64 pm_runtime_suspended_time(struct device *dev)
122{
123 return rpm_get_accounted_time(dev, true);
124}
125EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
126
127/**
128 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
129 * @dev: Device to handle.
130 */
131static void pm_runtime_deactivate_timer(struct device *dev)
132{
133 if (dev->power.timer_expires > 0) {
134 hrtimer_try_to_cancel(&dev->power.suspend_timer);
135 dev->power.timer_expires = 0;
136 }
137}
138
139/**
140 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
141 * @dev: Device to handle.
142 */
143static void pm_runtime_cancel_pending(struct device *dev)
144{
145 pm_runtime_deactivate_timer(dev);
146 /*
147 * In case there's a request pending, make sure its work function will
148 * return without doing anything.
149 */
150 dev->power.request = RPM_REQ_NONE;
151}
152
153/*
154 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
155 * @dev: Device to handle.
156 *
157 * Compute the autosuspend-delay expiration time based on the device's
158 * power.last_busy time. If the delay has already expired or is disabled
159 * (negative) or the power.use_autosuspend flag isn't set, return 0.
160 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
161 *
162 * This function may be called either with or without dev->power.lock held.
163 * Either way it can be racy, since power.last_busy may be updated at any time.
164 */
165u64 pm_runtime_autosuspend_expiration(struct device *dev)
166{
167 int autosuspend_delay;
168 u64 expires;
169
170 if (!dev->power.use_autosuspend)
171 return 0;
172
173 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
174 if (autosuspend_delay < 0)
175 return 0;
176
177 expires = READ_ONCE(dev->power.last_busy);
178 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
179 if (expires > ktime_get_mono_fast_ns())
180 return expires; /* Expires in the future */
181
182 return 0;
183}
184EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
185
186static int dev_memalloc_noio(struct device *dev, void *data)
187{
188 return dev->power.memalloc_noio;
189}
190
191/*
192 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
193 * @dev: Device to handle.
194 * @enable: True for setting the flag and False for clearing the flag.
195 *
196 * Set the flag for all devices in the path from the device to the
197 * root device in the device tree if @enable is true, otherwise clear
198 * the flag for devices in the path whose siblings don't set the flag.
199 *
200 * The function should only be called by block device, or network
201 * device driver for solving the deadlock problem during runtime
202 * resume/suspend:
203 *
204 * If memory allocation with GFP_KERNEL is called inside runtime
205 * resume/suspend callback of any one of its ancestors(or the
206 * block device itself), the deadlock may be triggered inside the
207 * memory allocation since it might not complete until the block
208 * device becomes active and the involed page I/O finishes. The
209 * situation is pointed out first by Alan Stern. Network device
210 * are involved in iSCSI kind of situation.
211 *
212 * The lock of dev_hotplug_mutex is held in the function for handling
213 * hotplug race because pm_runtime_set_memalloc_noio() may be called
214 * in async probe().
215 *
216 * The function should be called between device_add() and device_del()
217 * on the affected device(block/network device).
218 */
219void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
220{
221 static DEFINE_MUTEX(dev_hotplug_mutex);
222
223 mutex_lock(&dev_hotplug_mutex);
224 for (;;) {
225 bool enabled;
226
227 /* hold power lock since bitfield is not SMP-safe. */
228 spin_lock_irq(&dev->power.lock);
229 enabled = dev->power.memalloc_noio;
230 dev->power.memalloc_noio = enable;
231 spin_unlock_irq(&dev->power.lock);
232
233 /*
234 * not need to enable ancestors any more if the device
235 * has been enabled.
236 */
237 if (enabled && enable)
238 break;
239
240 dev = dev->parent;
241
242 /*
243 * clear flag of the parent device only if all the
244 * children don't set the flag because ancestor's
245 * flag was set by any one of the descendants.
246 */
247 if (!dev || (!enable &&
248 device_for_each_child(dev, NULL,
249 dev_memalloc_noio)))
250 break;
251 }
252 mutex_unlock(&dev_hotplug_mutex);
253}
254EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
255
256/**
257 * rpm_check_suspend_allowed - Test whether a device may be suspended.
258 * @dev: Device to test.
259 */
260static int rpm_check_suspend_allowed(struct device *dev)
261{
262 int retval = 0;
263
264 if (dev->power.runtime_error)
265 retval = -EINVAL;
266 else if (dev->power.disable_depth > 0)
267 retval = -EACCES;
268 else if (atomic_read(&dev->power.usage_count) > 0)
269 retval = -EAGAIN;
270 else if (!dev->power.ignore_children &&
271 atomic_read(&dev->power.child_count))
272 retval = -EBUSY;
273
274 /* Pending resume requests take precedence over suspends. */
275 else if ((dev->power.deferred_resume
276 && dev->power.runtime_status == RPM_SUSPENDING)
277 || (dev->power.request_pending
278 && dev->power.request == RPM_REQ_RESUME))
279 retval = -EAGAIN;
280 else if (__dev_pm_qos_read_value(dev) == 0)
281 retval = -EPERM;
282 else if (dev->power.runtime_status == RPM_SUSPENDED)
283 retval = 1;
284
285 return retval;
286}
287
288static int rpm_get_suppliers(struct device *dev)
289{
290 struct device_link *link;
291
292 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
293 int retval;
294
295 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
296 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
297 continue;
298
299 retval = pm_runtime_get_sync(link->supplier);
300 /* Ignore suppliers with disabled runtime PM. */
301 if (retval < 0 && retval != -EACCES) {
302 pm_runtime_put_noidle(link->supplier);
303 return retval;
304 }
305 refcount_inc(&link->rpm_active);
306 }
307 return 0;
308}
309
310static void rpm_put_suppliers(struct device *dev)
311{
312 struct device_link *link;
313
314 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
315 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
316 continue;
317
318 while (refcount_dec_not_one(&link->rpm_active))
319 pm_runtime_put(link->supplier);
320 }
321}
322
323/**
324 * __rpm_callback - Run a given runtime PM callback for a given device.
325 * @cb: Runtime PM callback to run.
326 * @dev: Device to run the callback for.
327 */
328static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
329 __releases(&dev->power.lock) __acquires(&dev->power.lock)
330{
331 int retval, idx;
332 bool use_links = dev->power.links_count > 0;
333
334 if (dev->power.irq_safe) {
335 spin_unlock(&dev->power.lock);
336 } else {
337 spin_unlock_irq(&dev->power.lock);
338
339 /*
340 * Resume suppliers if necessary.
341 *
342 * The device's runtime PM status cannot change until this
343 * routine returns, so it is safe to read the status outside of
344 * the lock.
345 */
346 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
347 idx = device_links_read_lock();
348
349 retval = rpm_get_suppliers(dev);
350 if (retval)
351 goto fail;
352
353 device_links_read_unlock(idx);
354 }
355 }
356
357 retval = cb(dev);
358
359 if (dev->power.irq_safe) {
360 spin_lock(&dev->power.lock);
361 } else {
362 /*
363 * If the device is suspending and the callback has returned
364 * success, drop the usage counters of the suppliers that have
365 * been reference counted on its resume.
366 *
367 * Do that if resume fails too.
368 */
369 if (use_links
370 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
371 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
372 idx = device_links_read_lock();
373
374 fail:
375 rpm_put_suppliers(dev);
376
377 device_links_read_unlock(idx);
378 }
379
380 spin_lock_irq(&dev->power.lock);
381 }
382
383 return retval;
384}
385
386/**
387 * rpm_idle - Notify device bus type if the device can be suspended.
388 * @dev: Device to notify the bus type about.
389 * @rpmflags: Flag bits.
390 *
391 * Check if the device's runtime PM status allows it to be suspended. If
392 * another idle notification has been started earlier, return immediately. If
393 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
394 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
395 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
396 *
397 * This function must be called under dev->power.lock with interrupts disabled.
398 */
399static int rpm_idle(struct device *dev, int rpmflags)
400{
401 int (*callback)(struct device *);
402 int retval;
403
404 trace_rpm_idle_rcuidle(dev, rpmflags);
405 retval = rpm_check_suspend_allowed(dev);
406 if (retval < 0)
407 ; /* Conditions are wrong. */
408
409 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
410 else if (dev->power.runtime_status != RPM_ACTIVE)
411 retval = -EAGAIN;
412
413 /*
414 * Any pending request other than an idle notification takes
415 * precedence over us, except that the timer may be running.
416 */
417 else if (dev->power.request_pending &&
418 dev->power.request > RPM_REQ_IDLE)
419 retval = -EAGAIN;
420
421 /* Act as though RPM_NOWAIT is always set. */
422 else if (dev->power.idle_notification)
423 retval = -EINPROGRESS;
424 if (retval)
425 goto out;
426
427 /* Pending requests need to be canceled. */
428 dev->power.request = RPM_REQ_NONE;
429
430 if (dev->power.no_callbacks)
431 goto out;
432
433 /* Carry out an asynchronous or a synchronous idle notification. */
434 if (rpmflags & RPM_ASYNC) {
435 dev->power.request = RPM_REQ_IDLE;
436 if (!dev->power.request_pending) {
437 dev->power.request_pending = true;
438 queue_work(pm_wq, &dev->power.work);
439 }
440 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
441 return 0;
442 }
443
444 dev->power.idle_notification = true;
445
446 callback = RPM_GET_CALLBACK(dev, runtime_idle);
447
448 if (callback)
449 retval = __rpm_callback(callback, dev);
450
451 dev->power.idle_notification = false;
452 wake_up_all(&dev->power.wait_queue);
453
454 out:
455 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
456 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
457}
458
459/**
460 * rpm_callback - Run a given runtime PM callback for a given device.
461 * @cb: Runtime PM callback to run.
462 * @dev: Device to run the callback for.
463 */
464static int rpm_callback(int (*cb)(struct device *), struct device *dev)
465{
466 int retval;
467
468 if (!cb)
469 return -ENOSYS;
470
471 if (dev->power.memalloc_noio) {
472 unsigned int noio_flag;
473
474 /*
475 * Deadlock might be caused if memory allocation with
476 * GFP_KERNEL happens inside runtime_suspend and
477 * runtime_resume callbacks of one block device's
478 * ancestor or the block device itself. Network
479 * device might be thought as part of iSCSI block
480 * device, so network device and its ancestor should
481 * be marked as memalloc_noio too.
482 */
483 noio_flag = memalloc_noio_save();
484 retval = __rpm_callback(cb, dev);
485 memalloc_noio_restore(noio_flag);
486 } else {
487 retval = __rpm_callback(cb, dev);
488 }
489
490 dev->power.runtime_error = retval;
491 return retval != -EACCES ? retval : -EIO;
492}
493
494/**
495 * rpm_suspend - Carry out runtime suspend of given device.
496 * @dev: Device to suspend.
497 * @rpmflags: Flag bits.
498 *
499 * Check if the device's runtime PM status allows it to be suspended.
500 * Cancel a pending idle notification, autosuspend or suspend. If
501 * another suspend has been started earlier, either return immediately
502 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
503 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
504 * otherwise run the ->runtime_suspend() callback directly. When
505 * ->runtime_suspend succeeded, if a deferred resume was requested while
506 * the callback was running then carry it out, otherwise send an idle
507 * notification for its parent (if the suspend succeeded and both
508 * ignore_children of parent->power and irq_safe of dev->power are not set).
509 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
510 * flag is set and the next autosuspend-delay expiration time is in the
511 * future, schedule another autosuspend attempt.
512 *
513 * This function must be called under dev->power.lock with interrupts disabled.
514 */
515static int rpm_suspend(struct device *dev, int rpmflags)
516 __releases(&dev->power.lock) __acquires(&dev->power.lock)
517{
518 int (*callback)(struct device *);
519 struct device *parent = NULL;
520 int retval;
521
522 trace_rpm_suspend_rcuidle(dev, rpmflags);
523
524 repeat:
525 retval = rpm_check_suspend_allowed(dev);
526
527 if (retval < 0)
528 ; /* Conditions are wrong. */
529
530 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
531 else if (dev->power.runtime_status == RPM_RESUMING &&
532 !(rpmflags & RPM_ASYNC))
533 retval = -EAGAIN;
534 if (retval)
535 goto out;
536
537 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
538 if ((rpmflags & RPM_AUTO)
539 && dev->power.runtime_status != RPM_SUSPENDING) {
540 u64 expires = pm_runtime_autosuspend_expiration(dev);
541
542 if (expires != 0) {
543 /* Pending requests need to be canceled. */
544 dev->power.request = RPM_REQ_NONE;
545
546 /*
547 * Optimization: If the timer is already running and is
548 * set to expire at or before the autosuspend delay,
549 * avoid the overhead of resetting it. Just let it
550 * expire; pm_suspend_timer_fn() will take care of the
551 * rest.
552 */
553 if (!(dev->power.timer_expires &&
554 dev->power.timer_expires <= expires)) {
555 /*
556 * We add a slack of 25% to gather wakeups
557 * without sacrificing the granularity.
558 */
559 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
560 (NSEC_PER_MSEC >> 2);
561
562 dev->power.timer_expires = expires;
563 hrtimer_start_range_ns(&dev->power.suspend_timer,
564 ns_to_ktime(expires),
565 slack,
566 HRTIMER_MODE_ABS);
567 }
568 dev->power.timer_autosuspends = 1;
569 goto out;
570 }
571 }
572
573 /* Other scheduled or pending requests need to be canceled. */
574 pm_runtime_cancel_pending(dev);
575
576 if (dev->power.runtime_status == RPM_SUSPENDING) {
577 DEFINE_WAIT(wait);
578
579 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
580 retval = -EINPROGRESS;
581 goto out;
582 }
583
584 if (dev->power.irq_safe) {
585 spin_unlock(&dev->power.lock);
586
587 cpu_relax();
588
589 spin_lock(&dev->power.lock);
590 goto repeat;
591 }
592
593 /* Wait for the other suspend running in parallel with us. */
594 for (;;) {
595 prepare_to_wait(&dev->power.wait_queue, &wait,
596 TASK_UNINTERRUPTIBLE);
597 if (dev->power.runtime_status != RPM_SUSPENDING)
598 break;
599
600 spin_unlock_irq(&dev->power.lock);
601
602 schedule();
603
604 spin_lock_irq(&dev->power.lock);
605 }
606 finish_wait(&dev->power.wait_queue, &wait);
607 goto repeat;
608 }
609
610 if (dev->power.no_callbacks)
611 goto no_callback; /* Assume success. */
612
613 /* Carry out an asynchronous or a synchronous suspend. */
614 if (rpmflags & RPM_ASYNC) {
615 dev->power.request = (rpmflags & RPM_AUTO) ?
616 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
617 if (!dev->power.request_pending) {
618 dev->power.request_pending = true;
619 queue_work(pm_wq, &dev->power.work);
620 }
621 goto out;
622 }
623
624 __update_runtime_status(dev, RPM_SUSPENDING);
625
626 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
627
628 dev_pm_enable_wake_irq_check(dev, true);
629 retval = rpm_callback(callback, dev);
630 if (retval)
631 goto fail;
632
633 no_callback:
634 __update_runtime_status(dev, RPM_SUSPENDED);
635 pm_runtime_deactivate_timer(dev);
636
637 if (dev->parent) {
638 parent = dev->parent;
639 atomic_add_unless(&parent->power.child_count, -1, 0);
640 }
641 wake_up_all(&dev->power.wait_queue);
642
643 if (dev->power.deferred_resume) {
644 dev->power.deferred_resume = false;
645 rpm_resume(dev, 0);
646 retval = -EAGAIN;
647 goto out;
648 }
649
650 /* Maybe the parent is now able to suspend. */
651 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
652 spin_unlock(&dev->power.lock);
653
654 spin_lock(&parent->power.lock);
655 rpm_idle(parent, RPM_ASYNC);
656 spin_unlock(&parent->power.lock);
657
658 spin_lock(&dev->power.lock);
659 }
660
661 out:
662 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
663
664 return retval;
665
666 fail:
667 dev_pm_disable_wake_irq_check(dev);
668 __update_runtime_status(dev, RPM_ACTIVE);
669 dev->power.deferred_resume = false;
670 wake_up_all(&dev->power.wait_queue);
671
672 if (retval == -EAGAIN || retval == -EBUSY) {
673 dev->power.runtime_error = 0;
674
675 /*
676 * If the callback routine failed an autosuspend, and
677 * if the last_busy time has been updated so that there
678 * is a new autosuspend expiration time, automatically
679 * reschedule another autosuspend.
680 */
681 if ((rpmflags & RPM_AUTO) &&
682 pm_runtime_autosuspend_expiration(dev) != 0)
683 goto repeat;
684 } else {
685 pm_runtime_cancel_pending(dev);
686 }
687 goto out;
688}
689
690/**
691 * rpm_resume - Carry out runtime resume of given device.
692 * @dev: Device to resume.
693 * @rpmflags: Flag bits.
694 *
695 * Check if the device's runtime PM status allows it to be resumed. Cancel
696 * any scheduled or pending requests. If another resume has been started
697 * earlier, either return immediately or wait for it to finish, depending on the
698 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
699 * parallel with this function, either tell the other process to resume after
700 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
701 * flag is set then queue a resume request; otherwise run the
702 * ->runtime_resume() callback directly. Queue an idle notification for the
703 * device if the resume succeeded.
704 *
705 * This function must be called under dev->power.lock with interrupts disabled.
706 */
707static int rpm_resume(struct device *dev, int rpmflags)
708 __releases(&dev->power.lock) __acquires(&dev->power.lock)
709{
710 int (*callback)(struct device *);
711 struct device *parent = NULL;
712 int retval = 0;
713
714 trace_rpm_resume_rcuidle(dev, rpmflags);
715
716 repeat:
717 if (dev->power.runtime_error)
718 retval = -EINVAL;
719 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
720 && dev->power.runtime_status == RPM_ACTIVE)
721 retval = 1;
722 else if (dev->power.disable_depth > 0)
723 retval = -EACCES;
724 if (retval)
725 goto out;
726
727 /*
728 * Other scheduled or pending requests need to be canceled. Small
729 * optimization: If an autosuspend timer is running, leave it running
730 * rather than cancelling it now only to restart it again in the near
731 * future.
732 */
733 dev->power.request = RPM_REQ_NONE;
734 if (!dev->power.timer_autosuspends)
735 pm_runtime_deactivate_timer(dev);
736
737 if (dev->power.runtime_status == RPM_ACTIVE) {
738 retval = 1;
739 goto out;
740 }
741
742 if (dev->power.runtime_status == RPM_RESUMING
743 || dev->power.runtime_status == RPM_SUSPENDING) {
744 DEFINE_WAIT(wait);
745
746 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
747 if (dev->power.runtime_status == RPM_SUSPENDING)
748 dev->power.deferred_resume = true;
749 else
750 retval = -EINPROGRESS;
751 goto out;
752 }
753
754 if (dev->power.irq_safe) {
755 spin_unlock(&dev->power.lock);
756
757 cpu_relax();
758
759 spin_lock(&dev->power.lock);
760 goto repeat;
761 }
762
763 /* Wait for the operation carried out in parallel with us. */
764 for (;;) {
765 prepare_to_wait(&dev->power.wait_queue, &wait,
766 TASK_UNINTERRUPTIBLE);
767 if (dev->power.runtime_status != RPM_RESUMING
768 && dev->power.runtime_status != RPM_SUSPENDING)
769 break;
770
771 spin_unlock_irq(&dev->power.lock);
772
773 schedule();
774
775 spin_lock_irq(&dev->power.lock);
776 }
777 finish_wait(&dev->power.wait_queue, &wait);
778 goto repeat;
779 }
780
781 /*
782 * See if we can skip waking up the parent. This is safe only if
783 * power.no_callbacks is set, because otherwise we don't know whether
784 * the resume will actually succeed.
785 */
786 if (dev->power.no_callbacks && !parent && dev->parent) {
787 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
788 if (dev->parent->power.disable_depth > 0
789 || dev->parent->power.ignore_children
790 || dev->parent->power.runtime_status == RPM_ACTIVE) {
791 atomic_inc(&dev->parent->power.child_count);
792 spin_unlock(&dev->parent->power.lock);
793 retval = 1;
794 goto no_callback; /* Assume success. */
795 }
796 spin_unlock(&dev->parent->power.lock);
797 }
798
799 /* Carry out an asynchronous or a synchronous resume. */
800 if (rpmflags & RPM_ASYNC) {
801 dev->power.request = RPM_REQ_RESUME;
802 if (!dev->power.request_pending) {
803 dev->power.request_pending = true;
804 queue_work(pm_wq, &dev->power.work);
805 }
806 retval = 0;
807 goto out;
808 }
809
810 if (!parent && dev->parent) {
811 /*
812 * Increment the parent's usage counter and resume it if
813 * necessary. Not needed if dev is irq-safe; then the
814 * parent is permanently resumed.
815 */
816 parent = dev->parent;
817 if (dev->power.irq_safe)
818 goto skip_parent;
819 spin_unlock(&dev->power.lock);
820
821 pm_runtime_get_noresume(parent);
822
823 spin_lock(&parent->power.lock);
824 /*
825 * Resume the parent if it has runtime PM enabled and not been
826 * set to ignore its children.
827 */
828 if (!parent->power.disable_depth
829 && !parent->power.ignore_children) {
830 rpm_resume(parent, 0);
831 if (parent->power.runtime_status != RPM_ACTIVE)
832 retval = -EBUSY;
833 }
834 spin_unlock(&parent->power.lock);
835
836 spin_lock(&dev->power.lock);
837 if (retval)
838 goto out;
839 goto repeat;
840 }
841 skip_parent:
842
843 if (dev->power.no_callbacks)
844 goto no_callback; /* Assume success. */
845
846 __update_runtime_status(dev, RPM_RESUMING);
847
848 callback = RPM_GET_CALLBACK(dev, runtime_resume);
849
850 dev_pm_disable_wake_irq_check(dev);
851 retval = rpm_callback(callback, dev);
852 if (retval) {
853 __update_runtime_status(dev, RPM_SUSPENDED);
854 pm_runtime_cancel_pending(dev);
855 dev_pm_enable_wake_irq_check(dev, false);
856 } else {
857 no_callback:
858 __update_runtime_status(dev, RPM_ACTIVE);
859 pm_runtime_mark_last_busy(dev);
860 if (parent)
861 atomic_inc(&parent->power.child_count);
862 }
863 wake_up_all(&dev->power.wait_queue);
864
865 if (retval >= 0)
866 rpm_idle(dev, RPM_ASYNC);
867
868 out:
869 if (parent && !dev->power.irq_safe) {
870 spin_unlock_irq(&dev->power.lock);
871
872 pm_runtime_put(parent);
873
874 spin_lock_irq(&dev->power.lock);
875 }
876
877 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
878
879 return retval;
880}
881
882/**
883 * pm_runtime_work - Universal runtime PM work function.
884 * @work: Work structure used for scheduling the execution of this function.
885 *
886 * Use @work to get the device object the work is to be done for, determine what
887 * is to be done and execute the appropriate runtime PM function.
888 */
889static void pm_runtime_work(struct work_struct *work)
890{
891 struct device *dev = container_of(work, struct device, power.work);
892 enum rpm_request req;
893
894 spin_lock_irq(&dev->power.lock);
895
896 if (!dev->power.request_pending)
897 goto out;
898
899 req = dev->power.request;
900 dev->power.request = RPM_REQ_NONE;
901 dev->power.request_pending = false;
902
903 switch (req) {
904 case RPM_REQ_NONE:
905 break;
906 case RPM_REQ_IDLE:
907 rpm_idle(dev, RPM_NOWAIT);
908 break;
909 case RPM_REQ_SUSPEND:
910 rpm_suspend(dev, RPM_NOWAIT);
911 break;
912 case RPM_REQ_AUTOSUSPEND:
913 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
914 break;
915 case RPM_REQ_RESUME:
916 rpm_resume(dev, RPM_NOWAIT);
917 break;
918 }
919
920 out:
921 spin_unlock_irq(&dev->power.lock);
922}
923
924/**
925 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
926 * @data: Device pointer passed by pm_schedule_suspend().
927 *
928 * Check if the time is right and queue a suspend request.
929 */
930static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
931{
932 struct device *dev = container_of(timer, struct device, power.suspend_timer);
933 unsigned long flags;
934 u64 expires;
935
936 spin_lock_irqsave(&dev->power.lock, flags);
937
938 expires = dev->power.timer_expires;
939 /*
940 * If 'expires' is after the current time, we've been called
941 * too early.
942 */
943 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
944 dev->power.timer_expires = 0;
945 rpm_suspend(dev, dev->power.timer_autosuspends ?
946 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
947 }
948
949 spin_unlock_irqrestore(&dev->power.lock, flags);
950
951 return HRTIMER_NORESTART;
952}
953
954/**
955 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
956 * @dev: Device to suspend.
957 * @delay: Time to wait before submitting a suspend request, in milliseconds.
958 */
959int pm_schedule_suspend(struct device *dev, unsigned int delay)
960{
961 unsigned long flags;
962 u64 expires;
963 int retval;
964
965 spin_lock_irqsave(&dev->power.lock, flags);
966
967 if (!delay) {
968 retval = rpm_suspend(dev, RPM_ASYNC);
969 goto out;
970 }
971
972 retval = rpm_check_suspend_allowed(dev);
973 if (retval)
974 goto out;
975
976 /* Other scheduled or pending requests need to be canceled. */
977 pm_runtime_cancel_pending(dev);
978
979 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
980 dev->power.timer_expires = expires;
981 dev->power.timer_autosuspends = 0;
982 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
983
984 out:
985 spin_unlock_irqrestore(&dev->power.lock, flags);
986
987 return retval;
988}
989EXPORT_SYMBOL_GPL(pm_schedule_suspend);
990
991/**
992 * __pm_runtime_idle - Entry point for runtime idle operations.
993 * @dev: Device to send idle notification for.
994 * @rpmflags: Flag bits.
995 *
996 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
997 * return immediately if it is larger than zero. Then carry out an idle
998 * notification, either synchronous or asynchronous.
999 *
1000 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1001 * or if pm_runtime_irq_safe() has been called.
1002 */
1003int __pm_runtime_idle(struct device *dev, int rpmflags)
1004{
1005 unsigned long flags;
1006 int retval;
1007
1008 if (rpmflags & RPM_GET_PUT) {
1009 if (!atomic_dec_and_test(&dev->power.usage_count))
1010 return 0;
1011 }
1012
1013 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014
1015 spin_lock_irqsave(&dev->power.lock, flags);
1016 retval = rpm_idle(dev, rpmflags);
1017 spin_unlock_irqrestore(&dev->power.lock, flags);
1018
1019 return retval;
1020}
1021EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1022
1023/**
1024 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1025 * @dev: Device to suspend.
1026 * @rpmflags: Flag bits.
1027 *
1028 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1029 * return immediately if it is larger than zero. Then carry out a suspend,
1030 * either synchronous or asynchronous.
1031 *
1032 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1033 * or if pm_runtime_irq_safe() has been called.
1034 */
1035int __pm_runtime_suspend(struct device *dev, int rpmflags)
1036{
1037 unsigned long flags;
1038 int retval;
1039
1040 if (rpmflags & RPM_GET_PUT) {
1041 if (!atomic_dec_and_test(&dev->power.usage_count))
1042 return 0;
1043 }
1044
1045 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1046
1047 spin_lock_irqsave(&dev->power.lock, flags);
1048 retval = rpm_suspend(dev, rpmflags);
1049 spin_unlock_irqrestore(&dev->power.lock, flags);
1050
1051 return retval;
1052}
1053EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1054
1055/**
1056 * __pm_runtime_resume - Entry point for runtime resume operations.
1057 * @dev: Device to resume.
1058 * @rpmflags: Flag bits.
1059 *
1060 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1061 * carry out a resume, either synchronous or asynchronous.
1062 *
1063 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1064 * or if pm_runtime_irq_safe() has been called.
1065 */
1066int __pm_runtime_resume(struct device *dev, int rpmflags)
1067{
1068 unsigned long flags;
1069 int retval;
1070
1071 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1072 dev->power.runtime_status != RPM_ACTIVE);
1073
1074 if (rpmflags & RPM_GET_PUT)
1075 atomic_inc(&dev->power.usage_count);
1076
1077 spin_lock_irqsave(&dev->power.lock, flags);
1078 retval = rpm_resume(dev, rpmflags);
1079 spin_unlock_irqrestore(&dev->power.lock, flags);
1080
1081 return retval;
1082}
1083EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1084
1085/**
1086 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1087 * @dev: Device to handle.
1088 *
1089 * Return -EINVAL if runtime PM is disabled for the device.
1090 *
1091 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1092 * and the runtime PM usage counter is nonzero, increment the counter and
1093 * return 1. Otherwise return 0 without changing the counter.
1094 */
1095int pm_runtime_get_if_in_use(struct device *dev)
1096{
1097 unsigned long flags;
1098 int retval;
1099
1100 spin_lock_irqsave(&dev->power.lock, flags);
1101 retval = dev->power.disable_depth > 0 ? -EINVAL :
1102 dev->power.runtime_status == RPM_ACTIVE
1103 && atomic_inc_not_zero(&dev->power.usage_count);
1104 spin_unlock_irqrestore(&dev->power.lock, flags);
1105 return retval;
1106}
1107EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1108
1109/**
1110 * __pm_runtime_set_status - Set runtime PM status of a device.
1111 * @dev: Device to handle.
1112 * @status: New runtime PM status of the device.
1113 *
1114 * If runtime PM of the device is disabled or its power.runtime_error field is
1115 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1116 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1117 * However, if the device has a parent and the parent is not active, and the
1118 * parent's power.ignore_children flag is unset, the device's status cannot be
1119 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1120 *
1121 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1122 * and the device parent's counter of unsuspended children is modified to
1123 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1124 * notification request for the parent is submitted.
1125 *
1126 * If @dev has any suppliers (as reflected by device links to them), and @status
1127 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1128 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1129 * of the @status value) and the suppliers will be deacticated on exit. The
1130 * error returned by the failing supplier activation will be returned in that
1131 * case.
1132 */
1133int __pm_runtime_set_status(struct device *dev, unsigned int status)
1134{
1135 struct device *parent = dev->parent;
1136 bool notify_parent = false;
1137 int error = 0;
1138
1139 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1140 return -EINVAL;
1141
1142 spin_lock_irq(&dev->power.lock);
1143
1144 /*
1145 * Prevent PM-runtime from being enabled for the device or return an
1146 * error if it is enabled already and working.
1147 */
1148 if (dev->power.runtime_error || dev->power.disable_depth)
1149 dev->power.disable_depth++;
1150 else
1151 error = -EAGAIN;
1152
1153 spin_unlock_irq(&dev->power.lock);
1154
1155 if (error)
1156 return error;
1157
1158 /*
1159 * If the new status is RPM_ACTIVE, the suppliers can be activated
1160 * upfront regardless of the current status, because next time
1161 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1162 * involved will be dropped down to one anyway.
1163 */
1164 if (status == RPM_ACTIVE) {
1165 int idx = device_links_read_lock();
1166
1167 error = rpm_get_suppliers(dev);
1168 if (error)
1169 status = RPM_SUSPENDED;
1170
1171 device_links_read_unlock(idx);
1172 }
1173
1174 spin_lock_irq(&dev->power.lock);
1175
1176 if (dev->power.runtime_status == status || !parent)
1177 goto out_set;
1178
1179 if (status == RPM_SUSPENDED) {
1180 atomic_add_unless(&parent->power.child_count, -1, 0);
1181 notify_parent = !parent->power.ignore_children;
1182 } else {
1183 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1184
1185 /*
1186 * It is invalid to put an active child under a parent that is
1187 * not active, has runtime PM enabled and the
1188 * 'power.ignore_children' flag unset.
1189 */
1190 if (!parent->power.disable_depth
1191 && !parent->power.ignore_children
1192 && parent->power.runtime_status != RPM_ACTIVE) {
1193 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1194 dev_name(dev),
1195 dev_name(parent));
1196 error = -EBUSY;
1197 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1198 atomic_inc(&parent->power.child_count);
1199 }
1200
1201 spin_unlock(&parent->power.lock);
1202
1203 if (error) {
1204 status = RPM_SUSPENDED;
1205 goto out;
1206 }
1207 }
1208
1209 out_set:
1210 __update_runtime_status(dev, status);
1211 if (!error)
1212 dev->power.runtime_error = 0;
1213
1214 out:
1215 spin_unlock_irq(&dev->power.lock);
1216
1217 if (notify_parent)
1218 pm_request_idle(parent);
1219
1220 if (status == RPM_SUSPENDED) {
1221 int idx = device_links_read_lock();
1222
1223 rpm_put_suppliers(dev);
1224
1225 device_links_read_unlock(idx);
1226 }
1227
1228 pm_runtime_enable(dev);
1229
1230 return error;
1231}
1232EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1233
1234/**
1235 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1236 * @dev: Device to handle.
1237 *
1238 * Flush all pending requests for the device from pm_wq and wait for all
1239 * runtime PM operations involving the device in progress to complete.
1240 *
1241 * Should be called under dev->power.lock with interrupts disabled.
1242 */
1243static void __pm_runtime_barrier(struct device *dev)
1244{
1245 pm_runtime_deactivate_timer(dev);
1246
1247 if (dev->power.request_pending) {
1248 dev->power.request = RPM_REQ_NONE;
1249 spin_unlock_irq(&dev->power.lock);
1250
1251 cancel_work_sync(&dev->power.work);
1252
1253 spin_lock_irq(&dev->power.lock);
1254 dev->power.request_pending = false;
1255 }
1256
1257 if (dev->power.runtime_status == RPM_SUSPENDING
1258 || dev->power.runtime_status == RPM_RESUMING
1259 || dev->power.idle_notification) {
1260 DEFINE_WAIT(wait);
1261
1262 /* Suspend, wake-up or idle notification in progress. */
1263 for (;;) {
1264 prepare_to_wait(&dev->power.wait_queue, &wait,
1265 TASK_UNINTERRUPTIBLE);
1266 if (dev->power.runtime_status != RPM_SUSPENDING
1267 && dev->power.runtime_status != RPM_RESUMING
1268 && !dev->power.idle_notification)
1269 break;
1270 spin_unlock_irq(&dev->power.lock);
1271
1272 schedule();
1273
1274 spin_lock_irq(&dev->power.lock);
1275 }
1276 finish_wait(&dev->power.wait_queue, &wait);
1277 }
1278}
1279
1280/**
1281 * pm_runtime_barrier - Flush pending requests and wait for completions.
1282 * @dev: Device to handle.
1283 *
1284 * Prevent the device from being suspended by incrementing its usage counter and
1285 * if there's a pending resume request for the device, wake the device up.
1286 * Next, make sure that all pending requests for the device have been flushed
1287 * from pm_wq and wait for all runtime PM operations involving the device in
1288 * progress to complete.
1289 *
1290 * Return value:
1291 * 1, if there was a resume request pending and the device had to be woken up,
1292 * 0, otherwise
1293 */
1294int pm_runtime_barrier(struct device *dev)
1295{
1296 int retval = 0;
1297
1298 pm_runtime_get_noresume(dev);
1299 spin_lock_irq(&dev->power.lock);
1300
1301 if (dev->power.request_pending
1302 && dev->power.request == RPM_REQ_RESUME) {
1303 rpm_resume(dev, 0);
1304 retval = 1;
1305 }
1306
1307 __pm_runtime_barrier(dev);
1308
1309 spin_unlock_irq(&dev->power.lock);
1310 pm_runtime_put_noidle(dev);
1311
1312 return retval;
1313}
1314EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1315
1316/**
1317 * __pm_runtime_disable - Disable runtime PM of a device.
1318 * @dev: Device to handle.
1319 * @check_resume: If set, check if there's a resume request for the device.
1320 *
1321 * Increment power.disable_depth for the device and if it was zero previously,
1322 * cancel all pending runtime PM requests for the device and wait for all
1323 * operations in progress to complete. The device can be either active or
1324 * suspended after its runtime PM has been disabled.
1325 *
1326 * If @check_resume is set and there's a resume request pending when
1327 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1328 * function will wake up the device before disabling its runtime PM.
1329 */
1330void __pm_runtime_disable(struct device *dev, bool check_resume)
1331{
1332 spin_lock_irq(&dev->power.lock);
1333
1334 if (dev->power.disable_depth > 0) {
1335 dev->power.disable_depth++;
1336 goto out;
1337 }
1338
1339 /*
1340 * Wake up the device if there's a resume request pending, because that
1341 * means there probably is some I/O to process and disabling runtime PM
1342 * shouldn't prevent the device from processing the I/O.
1343 */
1344 if (check_resume && dev->power.request_pending
1345 && dev->power.request == RPM_REQ_RESUME) {
1346 /*
1347 * Prevent suspends and idle notifications from being carried
1348 * out after we have woken up the device.
1349 */
1350 pm_runtime_get_noresume(dev);
1351
1352 rpm_resume(dev, 0);
1353
1354 pm_runtime_put_noidle(dev);
1355 }
1356
1357 /* Update time accounting before disabling PM-runtime. */
1358 update_pm_runtime_accounting(dev);
1359
1360 if (!dev->power.disable_depth++)
1361 __pm_runtime_barrier(dev);
1362
1363 out:
1364 spin_unlock_irq(&dev->power.lock);
1365}
1366EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1367
1368/**
1369 * pm_runtime_enable - Enable runtime PM of a device.
1370 * @dev: Device to handle.
1371 */
1372void pm_runtime_enable(struct device *dev)
1373{
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(&dev->power.lock, flags);
1377
1378 if (dev->power.disable_depth > 0) {
1379 dev->power.disable_depth--;
1380
1381 /* About to enable runtime pm, set accounting_timestamp to now */
1382 if (!dev->power.disable_depth)
1383 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1384 } else {
1385 dev_warn(dev, "Unbalanced %s!\n", __func__);
1386 }
1387
1388 WARN(!dev->power.disable_depth &&
1389 dev->power.runtime_status == RPM_SUSPENDED &&
1390 !dev->power.ignore_children &&
1391 atomic_read(&dev->power.child_count) > 0,
1392 "Enabling runtime PM for inactive device (%s) with active children\n",
1393 dev_name(dev));
1394
1395 spin_unlock_irqrestore(&dev->power.lock, flags);
1396}
1397EXPORT_SYMBOL_GPL(pm_runtime_enable);
1398
1399/**
1400 * pm_runtime_forbid - Block runtime PM of a device.
1401 * @dev: Device to handle.
1402 *
1403 * Increase the device's usage count and clear its power.runtime_auto flag,
1404 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1405 * for it.
1406 */
1407void pm_runtime_forbid(struct device *dev)
1408{
1409 spin_lock_irq(&dev->power.lock);
1410 if (!dev->power.runtime_auto)
1411 goto out;
1412
1413 dev->power.runtime_auto = false;
1414 atomic_inc(&dev->power.usage_count);
1415 rpm_resume(dev, 0);
1416
1417 out:
1418 spin_unlock_irq(&dev->power.lock);
1419}
1420EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1421
1422/**
1423 * pm_runtime_allow - Unblock runtime PM of a device.
1424 * @dev: Device to handle.
1425 *
1426 * Decrease the device's usage count and set its power.runtime_auto flag.
1427 */
1428void pm_runtime_allow(struct device *dev)
1429{
1430 spin_lock_irq(&dev->power.lock);
1431 if (dev->power.runtime_auto)
1432 goto out;
1433
1434 dev->power.runtime_auto = true;
1435 if (atomic_dec_and_test(&dev->power.usage_count))
1436 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1437
1438 out:
1439 spin_unlock_irq(&dev->power.lock);
1440}
1441EXPORT_SYMBOL_GPL(pm_runtime_allow);
1442
1443/**
1444 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1445 * @dev: Device to handle.
1446 *
1447 * Set the power.no_callbacks flag, which tells the PM core that this
1448 * device is power-managed through its parent and has no runtime PM
1449 * callbacks of its own. The runtime sysfs attributes will be removed.
1450 */
1451void pm_runtime_no_callbacks(struct device *dev)
1452{
1453 spin_lock_irq(&dev->power.lock);
1454 dev->power.no_callbacks = 1;
1455 spin_unlock_irq(&dev->power.lock);
1456 if (device_is_registered(dev))
1457 rpm_sysfs_remove(dev);
1458}
1459EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1460
1461/**
1462 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1463 * @dev: Device to handle
1464 *
1465 * Set the power.irq_safe flag, which tells the PM core that the
1466 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1467 * always be invoked with the spinlock held and interrupts disabled. It also
1468 * causes the parent's usage counter to be permanently incremented, preventing
1469 * the parent from runtime suspending -- otherwise an irq-safe child might have
1470 * to wait for a non-irq-safe parent.
1471 */
1472void pm_runtime_irq_safe(struct device *dev)
1473{
1474 if (dev->parent)
1475 pm_runtime_get_sync(dev->parent);
1476 spin_lock_irq(&dev->power.lock);
1477 dev->power.irq_safe = 1;
1478 spin_unlock_irq(&dev->power.lock);
1479}
1480EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1481
1482/**
1483 * update_autosuspend - Handle a change to a device's autosuspend settings.
1484 * @dev: Device to handle.
1485 * @old_delay: The former autosuspend_delay value.
1486 * @old_use: The former use_autosuspend value.
1487 *
1488 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1489 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1490 *
1491 * This function must be called under dev->power.lock with interrupts disabled.
1492 */
1493static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1494{
1495 int delay = dev->power.autosuspend_delay;
1496
1497 /* Should runtime suspend be prevented now? */
1498 if (dev->power.use_autosuspend && delay < 0) {
1499
1500 /* If it used to be allowed then prevent it. */
1501 if (!old_use || old_delay >= 0) {
1502 atomic_inc(&dev->power.usage_count);
1503 rpm_resume(dev, 0);
1504 }
1505 }
1506
1507 /* Runtime suspend should be allowed now. */
1508 else {
1509
1510 /* If it used to be prevented then allow it. */
1511 if (old_use && old_delay < 0)
1512 atomic_dec(&dev->power.usage_count);
1513
1514 /* Maybe we can autosuspend now. */
1515 rpm_idle(dev, RPM_AUTO);
1516 }
1517}
1518
1519/**
1520 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1521 * @dev: Device to handle.
1522 * @delay: Value of the new delay in milliseconds.
1523 *
1524 * Set the device's power.autosuspend_delay value. If it changes to negative
1525 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1526 * changes the other way, allow runtime suspends.
1527 */
1528void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1529{
1530 int old_delay, old_use;
1531
1532 spin_lock_irq(&dev->power.lock);
1533 old_delay = dev->power.autosuspend_delay;
1534 old_use = dev->power.use_autosuspend;
1535 dev->power.autosuspend_delay = delay;
1536 update_autosuspend(dev, old_delay, old_use);
1537 spin_unlock_irq(&dev->power.lock);
1538}
1539EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1540
1541/**
1542 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1543 * @dev: Device to handle.
1544 * @use: New value for use_autosuspend.
1545 *
1546 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1547 * suspends as needed.
1548 */
1549void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1550{
1551 int old_delay, old_use;
1552
1553 spin_lock_irq(&dev->power.lock);
1554 old_delay = dev->power.autosuspend_delay;
1555 old_use = dev->power.use_autosuspend;
1556 dev->power.use_autosuspend = use;
1557 update_autosuspend(dev, old_delay, old_use);
1558 spin_unlock_irq(&dev->power.lock);
1559}
1560EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1561
1562/**
1563 * pm_runtime_init - Initialize runtime PM fields in given device object.
1564 * @dev: Device object to initialize.
1565 */
1566void pm_runtime_init(struct device *dev)
1567{
1568 dev->power.runtime_status = RPM_SUSPENDED;
1569 dev->power.idle_notification = false;
1570
1571 dev->power.disable_depth = 1;
1572 atomic_set(&dev->power.usage_count, 0);
1573
1574 dev->power.runtime_error = 0;
1575
1576 atomic_set(&dev->power.child_count, 0);
1577 pm_suspend_ignore_children(dev, false);
1578 dev->power.runtime_auto = true;
1579
1580 dev->power.request_pending = false;
1581 dev->power.request = RPM_REQ_NONE;
1582 dev->power.deferred_resume = false;
1583 INIT_WORK(&dev->power.work, pm_runtime_work);
1584
1585 dev->power.timer_expires = 0;
1586 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1587 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1588
1589 init_waitqueue_head(&dev->power.wait_queue);
1590}
1591
1592/**
1593 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1594 * @dev: Device object to re-initialize.
1595 */
1596void pm_runtime_reinit(struct device *dev)
1597{
1598 if (!pm_runtime_enabled(dev)) {
1599 if (dev->power.runtime_status == RPM_ACTIVE)
1600 pm_runtime_set_suspended(dev);
1601 if (dev->power.irq_safe) {
1602 spin_lock_irq(&dev->power.lock);
1603 dev->power.irq_safe = 0;
1604 spin_unlock_irq(&dev->power.lock);
1605 if (dev->parent)
1606 pm_runtime_put(dev->parent);
1607 }
1608 }
1609}
1610
1611/**
1612 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1613 * @dev: Device object being removed from device hierarchy.
1614 */
1615void pm_runtime_remove(struct device *dev)
1616{
1617 __pm_runtime_disable(dev, false);
1618 pm_runtime_reinit(dev);
1619}
1620
1621/**
1622 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1623 * @dev: Device whose driver is going to be removed.
1624 *
1625 * Check links from this device to any consumers and if any of them have active
1626 * runtime PM references to the device, drop the usage counter of the device
1627 * (as many times as needed).
1628 *
1629 * Links with the DL_FLAG_STATELESS flag set are ignored.
1630 *
1631 * Since the device is guaranteed to be runtime-active at the point this is
1632 * called, nothing else needs to be done here.
1633 *
1634 * Moreover, this is called after device_links_busy() has returned 'false', so
1635 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1636 * therefore rpm_active can't be manipulated concurrently.
1637 */
1638void pm_runtime_clean_up_links(struct device *dev)
1639{
1640 struct device_link *link;
1641 int idx;
1642
1643 idx = device_links_read_lock();
1644
1645 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1646 if (link->flags & DL_FLAG_STATELESS)
1647 continue;
1648
1649 while (refcount_dec_not_one(&link->rpm_active))
1650 pm_runtime_put_noidle(dev);
1651 }
1652
1653 device_links_read_unlock(idx);
1654}
1655
1656/**
1657 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1658 * @dev: Consumer device.
1659 */
1660void pm_runtime_get_suppliers(struct device *dev)
1661{
1662 struct device_link *link;
1663 int idx;
1664
1665 idx = device_links_read_lock();
1666
1667 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1668 if (link->flags & DL_FLAG_PM_RUNTIME) {
1669 link->supplier_preactivated = true;
1670 refcount_inc(&link->rpm_active);
1671 pm_runtime_get_sync(link->supplier);
1672 }
1673
1674 device_links_read_unlock(idx);
1675}
1676
1677/**
1678 * pm_runtime_put_suppliers - Drop references to supplier devices.
1679 * @dev: Consumer device.
1680 */
1681void pm_runtime_put_suppliers(struct device *dev)
1682{
1683 struct device_link *link;
1684 int idx;
1685
1686 idx = device_links_read_lock();
1687
1688 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1689 if (link->supplier_preactivated) {
1690 link->supplier_preactivated = false;
1691 if (refcount_dec_not_one(&link->rpm_active))
1692 pm_runtime_put(link->supplier);
1693 }
1694
1695 device_links_read_unlock(idx);
1696}
1697
1698void pm_runtime_new_link(struct device *dev)
1699{
1700 spin_lock_irq(&dev->power.lock);
1701 dev->power.links_count++;
1702 spin_unlock_irq(&dev->power.lock);
1703}
1704
1705void pm_runtime_drop_link(struct device *dev)
1706{
1707 spin_lock_irq(&dev->power.lock);
1708 WARN_ON(dev->power.links_count == 0);
1709 dev->power.links_count--;
1710 spin_unlock_irq(&dev->power.lock);
1711}
1712
1713static bool pm_runtime_need_not_resume(struct device *dev)
1714{
1715 return atomic_read(&dev->power.usage_count) <= 1 &&
1716 (atomic_read(&dev->power.child_count) == 0 ||
1717 dev->power.ignore_children);
1718}
1719
1720/**
1721 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1722 * @dev: Device to suspend.
1723 *
1724 * Disable runtime PM so we safely can check the device's runtime PM status and
1725 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1726 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1727 * usage and children counters don't indicate that the device was in use before
1728 * the system-wide transition under way, decrement its parent's children counter
1729 * (if there is a parent). Keep runtime PM disabled to preserve the state
1730 * unless we encounter errors.
1731 *
1732 * Typically this function may be invoked from a system suspend callback to make
1733 * sure the device is put into low power state and it should only be used during
1734 * system-wide PM transitions to sleep states. It assumes that the analogous
1735 * pm_runtime_force_resume() will be used to resume the device.
1736 */
1737int pm_runtime_force_suspend(struct device *dev)
1738{
1739 int (*callback)(struct device *);
1740 int ret;
1741
1742 pm_runtime_disable(dev);
1743 if (pm_runtime_status_suspended(dev))
1744 return 0;
1745
1746 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1747
1748 ret = callback ? callback(dev) : 0;
1749 if (ret)
1750 goto err;
1751
1752 /*
1753 * If the device can stay in suspend after the system-wide transition
1754 * to the working state that will follow, drop the children counter of
1755 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1756 * function will be called again for it in the meantime.
1757 */
1758 if (pm_runtime_need_not_resume(dev))
1759 pm_runtime_set_suspended(dev);
1760 else
1761 __update_runtime_status(dev, RPM_SUSPENDED);
1762
1763 return 0;
1764
1765err:
1766 pm_runtime_enable(dev);
1767 return ret;
1768}
1769EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1770
1771/**
1772 * pm_runtime_force_resume - Force a device into resume state if needed.
1773 * @dev: Device to resume.
1774 *
1775 * Prior invoking this function we expect the user to have brought the device
1776 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1777 * those actions and bring the device into full power, if it is expected to be
1778 * used on system resume. In the other case, we defer the resume to be managed
1779 * via runtime PM.
1780 *
1781 * Typically this function may be invoked from a system resume callback.
1782 */
1783int pm_runtime_force_resume(struct device *dev)
1784{
1785 int (*callback)(struct device *);
1786 int ret = 0;
1787
1788 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1789 goto out;
1790
1791 /*
1792 * The value of the parent's children counter is correct already, so
1793 * just update the status of the device.
1794 */
1795 __update_runtime_status(dev, RPM_ACTIVE);
1796
1797 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1798
1799 ret = callback ? callback(dev) : 0;
1800 if (ret) {
1801 pm_runtime_set_suspended(dev);
1802 goto out;
1803 }
1804
1805 pm_runtime_mark_last_busy(dev);
1806out:
1807 pm_runtime_enable(dev);
1808 return ret;
1809}
1810EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1811