Lines Matching refs:dev

18 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)  in __rpm_get_callback()  argument
23 if (dev->pm_domain) in __rpm_get_callback()
24 ops = &dev->pm_domain->ops; in __rpm_get_callback()
25 else if (dev->type && dev->type->pm) in __rpm_get_callback()
26 ops = dev->type->pm; in __rpm_get_callback()
27 else if (dev->class && dev->class->pm) in __rpm_get_callback()
28 ops = dev->class->pm; in __rpm_get_callback()
29 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
30 ops = dev->bus->pm; in __rpm_get_callback()
39 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
40 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
45 #define RPM_GET_CALLBACK(dev, callback) \ argument
46 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
48 static int rpm_resume(struct device *dev, int rpmflags);
49 static int rpm_suspend(struct device *dev, int rpmflags);
62 void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
67 delta = now - dev->power.accounting_timestamp; in update_pm_runtime_accounting()
69 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
71 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
74 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
75 dev->power.suspended_jiffies += delta; in update_pm_runtime_accounting()
77 dev->power.active_jiffies += delta; in update_pm_runtime_accounting()
80 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
82 update_pm_runtime_accounting(dev); in __update_runtime_status()
83 dev->power.runtime_status = status; in __update_runtime_status()
90 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
92 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
93 del_timer(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
94 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
102 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
104 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
109 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
124 unsigned long pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
131 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
134 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
138 last_busy = ACCESS_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
159 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
161 return dev->power.memalloc_noio; in dev_memalloc_noio()
192 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
201 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
202 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
203 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
204 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
213 dev = dev->parent; in pm_runtime_set_memalloc_noio()
220 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
221 device_for_each_child(dev, NULL, in pm_runtime_set_memalloc_noio()
233 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
237 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
239 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
241 else if (atomic_read(&dev->power.usage_count) > 0) in rpm_check_suspend_allowed()
243 else if (!pm_children_suspended(dev)) in rpm_check_suspend_allowed()
247 else if ((dev->power.deferred_resume in rpm_check_suspend_allowed()
248 && dev->power.runtime_status == RPM_SUSPENDING) in rpm_check_suspend_allowed()
249 || (dev->power.request_pending in rpm_check_suspend_allowed()
250 && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
252 else if (__dev_pm_qos_read_value(dev) < 0) in rpm_check_suspend_allowed()
254 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
265 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
266 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
270 if (dev->power.irq_safe) in __rpm_callback()
271 spin_unlock(&dev->power.lock); in __rpm_callback()
273 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
275 retval = cb(dev); in __rpm_callback()
277 if (dev->power.irq_safe) in __rpm_callback()
278 spin_lock(&dev->power.lock); in __rpm_callback()
280 spin_lock_irq(&dev->power.lock); in __rpm_callback()
298 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
303 trace_rpm_idle(dev, rpmflags); in rpm_idle()
304 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
309 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
316 else if (dev->power.request_pending && in rpm_idle()
317 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
321 else if (dev->power.idle_notification) in rpm_idle()
327 dev->power.request = RPM_REQ_NONE; in rpm_idle()
329 if (dev->power.no_callbacks) in rpm_idle()
334 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
335 if (!dev->power.request_pending) { in rpm_idle()
336 dev->power.request_pending = true; in rpm_idle()
337 queue_work(pm_wq, &dev->power.work); in rpm_idle()
339 trace_rpm_return_int(dev, _THIS_IP_, 0); in rpm_idle()
343 dev->power.idle_notification = true; in rpm_idle()
345 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
348 retval = __rpm_callback(callback, dev); in rpm_idle()
350 dev->power.idle_notification = false; in rpm_idle()
351 wake_up_all(&dev->power.wait_queue); in rpm_idle()
354 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_idle()
355 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
363 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
370 if (dev->power.memalloc_noio) { in rpm_callback()
383 retval = __rpm_callback(cb, dev); in rpm_callback()
386 retval = __rpm_callback(cb, dev); in rpm_callback()
389 dev->power.runtime_error = retval; in rpm_callback()
414 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
415 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
421 trace_rpm_suspend(dev, rpmflags); in rpm_suspend()
424 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
430 else if (dev->power.runtime_status == RPM_RESUMING && in rpm_suspend()
438 && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
439 unsigned long expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
443 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
452 if (!(dev->power.timer_expires && time_before_eq( in rpm_suspend()
453 dev->power.timer_expires, expires))) { in rpm_suspend()
454 dev->power.timer_expires = expires; in rpm_suspend()
455 mod_timer(&dev->power.suspend_timer, expires); in rpm_suspend()
457 dev->power.timer_autosuspends = 1; in rpm_suspend()
463 pm_runtime_cancel_pending(dev); in rpm_suspend()
465 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
473 if (dev->power.irq_safe) { in rpm_suspend()
474 spin_unlock(&dev->power.lock); in rpm_suspend()
478 spin_lock(&dev->power.lock); in rpm_suspend()
484 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
486 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
489 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
493 spin_lock_irq(&dev->power.lock); in rpm_suspend()
495 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
499 if (dev->power.no_callbacks) in rpm_suspend()
504 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
506 if (!dev->power.request_pending) { in rpm_suspend()
507 dev->power.request_pending = true; in rpm_suspend()
508 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
513 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
515 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
517 retval = rpm_callback(callback, dev); in rpm_suspend()
522 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
523 pm_runtime_deactivate_timer(dev); in rpm_suspend()
525 if (dev->parent) { in rpm_suspend()
526 parent = dev->parent; in rpm_suspend()
529 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
531 if (dev->power.deferred_resume) { in rpm_suspend()
532 dev->power.deferred_resume = false; in rpm_suspend()
533 rpm_resume(dev, 0); in rpm_suspend()
539 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { in rpm_suspend()
540 spin_unlock(&dev->power.lock); in rpm_suspend()
546 spin_lock(&dev->power.lock); in rpm_suspend()
550 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_suspend()
555 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
556 dev->power.deferred_resume = false; in rpm_suspend()
557 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
560 dev->power.runtime_error = 0; in rpm_suspend()
569 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
572 pm_runtime_cancel_pending(dev); in rpm_suspend()
594 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
595 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
601 trace_rpm_resume(dev, rpmflags); in rpm_resume()
604 if (dev->power.runtime_error) in rpm_resume()
606 else if (dev->power.disable_depth == 1 && dev->power.is_suspended in rpm_resume()
607 && dev->power.runtime_status == RPM_ACTIVE) in rpm_resume()
609 else if (dev->power.disable_depth > 0) in rpm_resume()
620 dev->power.request = RPM_REQ_NONE; in rpm_resume()
621 if (!dev->power.timer_autosuspends) in rpm_resume()
622 pm_runtime_deactivate_timer(dev); in rpm_resume()
624 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
629 if (dev->power.runtime_status == RPM_RESUMING in rpm_resume()
630 || dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
634 if (dev->power.runtime_status == RPM_SUSPENDING) in rpm_resume()
635 dev->power.deferred_resume = true; in rpm_resume()
641 if (dev->power.irq_safe) { in rpm_resume()
642 spin_unlock(&dev->power.lock); in rpm_resume()
646 spin_lock(&dev->power.lock); in rpm_resume()
652 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
654 if (dev->power.runtime_status != RPM_RESUMING in rpm_resume()
655 && dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
658 spin_unlock_irq(&dev->power.lock); in rpm_resume()
662 spin_lock_irq(&dev->power.lock); in rpm_resume()
664 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
673 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
674 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
675 if (dev->parent->power.disable_depth > 0 in rpm_resume()
676 || dev->parent->power.ignore_children in rpm_resume()
677 || dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
678 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
679 spin_unlock(&dev->parent->power.lock); in rpm_resume()
683 spin_unlock(&dev->parent->power.lock); in rpm_resume()
688 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
689 if (!dev->power.request_pending) { in rpm_resume()
690 dev->power.request_pending = true; in rpm_resume()
691 queue_work(pm_wq, &dev->power.work); in rpm_resume()
697 if (!parent && dev->parent) { in rpm_resume()
703 parent = dev->parent; in rpm_resume()
704 if (dev->power.irq_safe) in rpm_resume()
706 spin_unlock(&dev->power.lock); in rpm_resume()
723 spin_lock(&dev->power.lock); in rpm_resume()
730 if (dev->power.no_callbacks) in rpm_resume()
733 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
735 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
737 retval = rpm_callback(callback, dev); in rpm_resume()
739 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
740 pm_runtime_cancel_pending(dev); in rpm_resume()
743 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
747 wake_up_all(&dev->power.wait_queue); in rpm_resume()
750 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
753 if (parent && !dev->power.irq_safe) { in rpm_resume()
754 spin_unlock_irq(&dev->power.lock); in rpm_resume()
758 spin_lock_irq(&dev->power.lock); in rpm_resume()
761 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_resume()
775 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
778 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
780 if (!dev->power.request_pending) in pm_runtime_work()
783 req = dev->power.request; in pm_runtime_work()
784 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
785 dev->power.request_pending = false; in pm_runtime_work()
791 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
794 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
797 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
800 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
805 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
816 struct device *dev = (struct device *)data; in pm_suspend_timer_fn() local
820 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
822 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
825 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
826 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
830 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
838 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
843 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
846 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
850 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
855 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
857 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); in pm_schedule_suspend()
858 dev->power.timer_expires += !dev->power.timer_expires; in pm_schedule_suspend()
859 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
860 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); in pm_schedule_suspend()
863 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
881 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
886 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
889 if (!atomic_dec_and_test(&dev->power.usage_count)) in __pm_runtime_idle()
893 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
894 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
895 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
913 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
918 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
921 if (!atomic_dec_and_test(&dev->power.usage_count)) in __pm_runtime_suspend()
925 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
926 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
927 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
944 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
949 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_resume()
952 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
954 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
955 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
956 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
979 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
981 struct device *parent = dev->parent; in __pm_runtime_set_status()
989 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
991 if (!dev->power.runtime_error && !dev->power.disable_depth) { in __pm_runtime_set_status()
996 if (dev->power.runtime_status == status) in __pm_runtime_set_status()
1020 else if (dev->power.runtime_status == RPM_SUSPENDED) in __pm_runtime_set_status()
1030 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1031 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1033 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1051 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1053 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1055 if (dev->power.request_pending) { in __pm_runtime_barrier()
1056 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1057 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1059 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1061 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1062 dev->power.request_pending = false; in __pm_runtime_barrier()
1065 if (dev->power.runtime_status == RPM_SUSPENDING in __pm_runtime_barrier()
1066 || dev->power.runtime_status == RPM_RESUMING in __pm_runtime_barrier()
1067 || dev->power.idle_notification) { in __pm_runtime_barrier()
1072 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1074 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1075 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1076 && !dev->power.idle_notification) in __pm_runtime_barrier()
1078 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1082 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1084 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1102 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1106 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1107 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1109 if (dev->power.request_pending in pm_runtime_barrier()
1110 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1111 rpm_resume(dev, 0); in pm_runtime_barrier()
1115 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1117 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1118 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1138 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1140 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1142 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1143 dev->power.disable_depth++; in __pm_runtime_disable()
1152 if (check_resume && dev->power.request_pending in __pm_runtime_disable()
1153 && dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1158 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1160 rpm_resume(dev, 0); in __pm_runtime_disable()
1162 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1165 if (!dev->power.disable_depth++) in __pm_runtime_disable()
1166 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1169 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1177 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1181 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1183 if (dev->power.disable_depth > 0) in pm_runtime_enable()
1184 dev->power.disable_depth--; in pm_runtime_enable()
1186 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1188 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1200 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1202 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1203 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1206 dev->power.runtime_auto = false; in pm_runtime_forbid()
1207 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1208 rpm_resume(dev, 0); in pm_runtime_forbid()
1211 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1221 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1223 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1224 if (dev->power.runtime_auto) in pm_runtime_allow()
1227 dev->power.runtime_auto = true; in pm_runtime_allow()
1228 if (atomic_dec_and_test(&dev->power.usage_count)) in pm_runtime_allow()
1229 rpm_idle(dev, RPM_AUTO); in pm_runtime_allow()
1232 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1244 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1246 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1247 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1248 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1249 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1250 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1265 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1267 if (dev->parent) in pm_runtime_irq_safe()
1268 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1269 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1270 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1271 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1286 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1288 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1291 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1295 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1296 rpm_resume(dev, 0); in update_autosuspend()
1305 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1308 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1321 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1325 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1326 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1327 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1328 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1329 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1330 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1342 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1346 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1347 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1348 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1349 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1350 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1351 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1359 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1361 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1362 dev->power.idle_notification = false; in pm_runtime_init()
1364 dev->power.disable_depth = 1; in pm_runtime_init()
1365 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1367 dev->power.runtime_error = 0; in pm_runtime_init()
1369 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1370 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1371 dev->power.runtime_auto = true; in pm_runtime_init()
1373 dev->power.request_pending = false; in pm_runtime_init()
1374 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1375 dev->power.deferred_resume = false; in pm_runtime_init()
1376 dev->power.accounting_timestamp = jiffies; in pm_runtime_init()
1377 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1379 dev->power.timer_expires = 0; in pm_runtime_init()
1380 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, in pm_runtime_init()
1381 (unsigned long)dev); in pm_runtime_init()
1383 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1390 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1392 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1395 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_remove()
1396 pm_runtime_set_suspended(dev); in pm_runtime_remove()
1397 if (dev->power.irq_safe && dev->parent) in pm_runtime_remove()
1398 pm_runtime_put(dev->parent); in pm_runtime_remove()
1413 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
1418 pm_runtime_disable(dev); in pm_runtime_force_suspend()
1419 if (pm_runtime_status_suspended(dev)) in pm_runtime_force_suspend()
1422 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
1429 ret = callback(dev); in pm_runtime_force_suspend()
1433 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
1436 pm_runtime_enable(dev); in pm_runtime_force_suspend()
1453 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
1458 callback = RPM_GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
1465 ret = pm_runtime_set_active(dev); in pm_runtime_force_resume()
1469 ret = callback(dev); in pm_runtime_force_resume()
1471 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
1475 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
1477 pm_runtime_enable(dev); in pm_runtime_force_resume()