Lines Matching refs:dev

91 void device_pm_sleep_init(struct device *dev)  in device_pm_sleep_init()  argument
93 dev->power.is_prepared = false; in device_pm_sleep_init()
94 dev->power.is_suspended = false; in device_pm_sleep_init()
95 dev->power.is_noirq_suspended = false; in device_pm_sleep_init()
96 dev->power.is_late_suspended = false; in device_pm_sleep_init()
97 init_completion(&dev->power.completion); in device_pm_sleep_init()
98 complete_all(&dev->power.completion); in device_pm_sleep_init()
99 dev->power.wakeup = NULL; in device_pm_sleep_init()
100 INIT_LIST_HEAD(&dev->power.entry); in device_pm_sleep_init()
123 void device_pm_add(struct device *dev) in device_pm_add() argument
126 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_add()
128 if (dev->parent && dev->parent->power.is_prepared) in device_pm_add()
129 dev_warn(dev, "parent %s should not be sleeping\n", in device_pm_add()
130 dev_name(dev->parent)); in device_pm_add()
131 list_add_tail(&dev->power.entry, &dpm_list); in device_pm_add()
139 void device_pm_remove(struct device *dev) in device_pm_remove() argument
142 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_remove()
143 complete_all(&dev->power.completion); in device_pm_remove()
145 list_del_init(&dev->power.entry); in device_pm_remove()
147 device_wakeup_disable(dev); in device_pm_remove()
148 pm_runtime_remove(dev); in device_pm_remove()
183 void device_pm_move_last(struct device *dev) in device_pm_move_last() argument
186 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_move_last()
187 list_move_tail(&dev->power.entry, &dpm_list); in device_pm_move_last()
190 static ktime_t initcall_debug_start(struct device *dev) in initcall_debug_start() argument
196 dev_name(dev), task_pid_nr(current), in initcall_debug_start()
197 dev->parent ? dev_name(dev->parent) : "none"); in initcall_debug_start()
204 static void initcall_debug_report(struct device *dev, ktime_t calltime, in initcall_debug_report() argument
214 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), in initcall_debug_report()
224 static void dpm_wait(struct device *dev, bool async) in dpm_wait() argument
226 if (!dev) in dpm_wait()
229 if (async || (pm_async_enabled && dev->power.async_suspend)) in dpm_wait()
230 wait_for_completion(&dev->power.completion); in dpm_wait()
233 static int dpm_wait_fn(struct device *dev, void *async_ptr) in dpm_wait_fn() argument
235 dpm_wait(dev, *((bool *)async_ptr)); in dpm_wait_fn()
239 static void dpm_wait_for_children(struct device *dev, bool async) in dpm_wait_for_children() argument
241 device_for_each_child(dev, &async, dpm_wait_fn); in dpm_wait_for_children()
344 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) in pm_dev_dbg() argument
346 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), in pm_dev_dbg()
347 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? in pm_dev_dbg()
351 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, in pm_dev_err() argument
355 dev_name(dev), pm_verb(state.event), info, error); in pm_dev_err()
375 static int dpm_run_callback(pm_callback_t cb, struct device *dev, in dpm_run_callback() argument
384 calltime = initcall_debug_start(dev); in dpm_run_callback()
386 pm_dev_dbg(dev, state, info); in dpm_run_callback()
387 trace_device_pm_callback_start(dev, info, state.event); in dpm_run_callback()
388 error = cb(dev); in dpm_run_callback()
389 trace_device_pm_callback_end(dev, error); in dpm_run_callback()
392 initcall_debug_report(dev, calltime, error, state, info); in dpm_run_callback()
399 struct device *dev; member
419 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); in dpm_watchdog_handler()
422 dev_driver_string(wd->dev), dev_name(wd->dev)); in dpm_watchdog_handler()
430 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) in dpm_watchdog_set() argument
434 wd->dev = dev; in dpm_watchdog_set()
473 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) in device_resume_noirq() argument
479 TRACE_DEVICE(dev); in device_resume_noirq()
482 if (dev->power.syscore || dev->power.direct_complete) in device_resume_noirq()
485 if (!dev->power.is_noirq_suspended) in device_resume_noirq()
488 dpm_wait(dev->parent, async); in device_resume_noirq()
490 if (dev->pm_domain) { in device_resume_noirq()
492 callback = pm_noirq_op(&dev->pm_domain->ops, state); in device_resume_noirq()
493 } else if (dev->type && dev->type->pm) { in device_resume_noirq()
495 callback = pm_noirq_op(dev->type->pm, state); in device_resume_noirq()
496 } else if (dev->class && dev->class->pm) { in device_resume_noirq()
498 callback = pm_noirq_op(dev->class->pm, state); in device_resume_noirq()
499 } else if (dev->bus && dev->bus->pm) { in device_resume_noirq()
501 callback = pm_noirq_op(dev->bus->pm, state); in device_resume_noirq()
504 if (!callback && dev->driver && dev->driver->pm) { in device_resume_noirq()
506 callback = pm_noirq_op(dev->driver->pm, state); in device_resume_noirq()
509 error = dpm_run_callback(callback, dev, state, info); in device_resume_noirq()
510 dev->power.is_noirq_suspended = false; in device_resume_noirq()
513 complete_all(&dev->power.completion); in device_resume_noirq()
518 static bool is_async(struct device *dev) in is_async() argument
520 return dev->power.async_suspend && pm_async_enabled in is_async()
526 struct device *dev = (struct device *)data; in async_resume_noirq() local
529 error = device_resume_noirq(dev, pm_transition, true); in async_resume_noirq()
531 pm_dev_err(dev, pm_transition, " async", error); in async_resume_noirq()
533 put_device(dev); in async_resume_noirq()
545 struct device *dev; in dpm_resume_noirq() local
557 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { in dpm_resume_noirq()
558 reinit_completion(&dev->power.completion); in dpm_resume_noirq()
559 if (is_async(dev)) { in dpm_resume_noirq()
560 get_device(dev); in dpm_resume_noirq()
561 async_schedule(async_resume_noirq, dev); in dpm_resume_noirq()
566 dev = to_device(dpm_noirq_list.next); in dpm_resume_noirq()
567 get_device(dev); in dpm_resume_noirq()
568 list_move_tail(&dev->power.entry, &dpm_late_early_list); in dpm_resume_noirq()
571 if (!is_async(dev)) { in dpm_resume_noirq()
574 error = device_resume_noirq(dev, state, false); in dpm_resume_noirq()
578 dpm_save_failed_dev(dev_name(dev)); in dpm_resume_noirq()
579 pm_dev_err(dev, state, " noirq", error); in dpm_resume_noirq()
584 put_device(dev); in dpm_resume_noirq()
602 static int device_resume_early(struct device *dev, pm_message_t state, bool async) in device_resume_early() argument
608 TRACE_DEVICE(dev); in device_resume_early()
611 if (dev->power.syscore || dev->power.direct_complete) in device_resume_early()
614 if (!dev->power.is_late_suspended) in device_resume_early()
617 dpm_wait(dev->parent, async); in device_resume_early()
619 if (dev->pm_domain) { in device_resume_early()
621 callback = pm_late_early_op(&dev->pm_domain->ops, state); in device_resume_early()
622 } else if (dev->type && dev->type->pm) { in device_resume_early()
624 callback = pm_late_early_op(dev->type->pm, state); in device_resume_early()
625 } else if (dev->class && dev->class->pm) { in device_resume_early()
627 callback = pm_late_early_op(dev->class->pm, state); in device_resume_early()
628 } else if (dev->bus && dev->bus->pm) { in device_resume_early()
630 callback = pm_late_early_op(dev->bus->pm, state); in device_resume_early()
633 if (!callback && dev->driver && dev->driver->pm) { in device_resume_early()
635 callback = pm_late_early_op(dev->driver->pm, state); in device_resume_early()
638 error = dpm_run_callback(callback, dev, state, info); in device_resume_early()
639 dev->power.is_late_suspended = false; in device_resume_early()
644 pm_runtime_enable(dev); in device_resume_early()
645 complete_all(&dev->power.completion); in device_resume_early()
651 struct device *dev = (struct device *)data; in async_resume_early() local
654 error = device_resume_early(dev, pm_transition, true); in async_resume_early()
656 pm_dev_err(dev, pm_transition, " async", error); in async_resume_early()
658 put_device(dev); in async_resume_early()
667 struct device *dev; in dpm_resume_early() local
679 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { in dpm_resume_early()
680 reinit_completion(&dev->power.completion); in dpm_resume_early()
681 if (is_async(dev)) { in dpm_resume_early()
682 get_device(dev); in dpm_resume_early()
683 async_schedule(async_resume_early, dev); in dpm_resume_early()
688 dev = to_device(dpm_late_early_list.next); in dpm_resume_early()
689 get_device(dev); in dpm_resume_early()
690 list_move_tail(&dev->power.entry, &dpm_suspended_list); in dpm_resume_early()
693 if (!is_async(dev)) { in dpm_resume_early()
696 error = device_resume_early(dev, state, false); in dpm_resume_early()
700 dpm_save_failed_dev(dev_name(dev)); in dpm_resume_early()
701 pm_dev_err(dev, state, " early", error); in dpm_resume_early()
705 put_device(dev); in dpm_resume_early()
730 static int device_resume(struct device *dev, pm_message_t state, bool async) in device_resume() argument
737 TRACE_DEVICE(dev); in device_resume()
740 if (dev->power.syscore) in device_resume()
743 if (dev->power.direct_complete) { in device_resume()
745 pm_runtime_enable(dev); in device_resume()
749 dpm_wait(dev->parent, async); in device_resume()
750 dpm_watchdog_set(&wd, dev); in device_resume()
751 device_lock(dev); in device_resume()
757 dev->power.is_prepared = false; in device_resume()
759 if (!dev->power.is_suspended) in device_resume()
762 if (dev->pm_domain) { in device_resume()
764 callback = pm_op(&dev->pm_domain->ops, state); in device_resume()
768 if (dev->type && dev->type->pm) { in device_resume()
770 callback = pm_op(dev->type->pm, state); in device_resume()
774 if (dev->class) { in device_resume()
775 if (dev->class->pm) { in device_resume()
777 callback = pm_op(dev->class->pm, state); in device_resume()
779 } else if (dev->class->resume) { in device_resume()
781 callback = dev->class->resume; in device_resume()
786 if (dev->bus) { in device_resume()
787 if (dev->bus->pm) { in device_resume()
789 callback = pm_op(dev->bus->pm, state); in device_resume()
790 } else if (dev->bus->resume) { in device_resume()
792 callback = dev->bus->resume; in device_resume()
798 if (!callback && dev->driver && dev->driver->pm) { in device_resume()
800 callback = pm_op(dev->driver->pm, state); in device_resume()
804 error = dpm_run_callback(callback, dev, state, info); in device_resume()
805 dev->power.is_suspended = false; in device_resume()
808 device_unlock(dev); in device_resume()
812 complete_all(&dev->power.completion); in device_resume()
821 struct device *dev = (struct device *)data; in async_resume() local
824 error = device_resume(dev, pm_transition, true); in async_resume()
826 pm_dev_err(dev, pm_transition, " async", error); in async_resume()
827 put_device(dev); in async_resume()
839 struct device *dev; in dpm_resume() local
849 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { in dpm_resume()
850 reinit_completion(&dev->power.completion); in dpm_resume()
851 if (is_async(dev)) { in dpm_resume()
852 get_device(dev); in dpm_resume()
853 async_schedule(async_resume, dev); in dpm_resume()
858 dev = to_device(dpm_suspended_list.next); in dpm_resume()
859 get_device(dev); in dpm_resume()
860 if (!is_async(dev)) { in dpm_resume()
865 error = device_resume(dev, state, false); in dpm_resume()
869 dpm_save_failed_dev(dev_name(dev)); in dpm_resume()
870 pm_dev_err(dev, state, "", error); in dpm_resume()
875 if (!list_empty(&dev->power.entry)) in dpm_resume()
876 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_resume()
877 put_device(dev); in dpm_resume()
892 static void device_complete(struct device *dev, pm_message_t state) in device_complete() argument
897 if (dev->power.syscore) in device_complete()
900 device_lock(dev); in device_complete()
902 if (dev->pm_domain) { in device_complete()
904 callback = dev->pm_domain->ops.complete; in device_complete()
905 } else if (dev->type && dev->type->pm) { in device_complete()
907 callback = dev->type->pm->complete; in device_complete()
908 } else if (dev->class && dev->class->pm) { in device_complete()
910 callback = dev->class->pm->complete; in device_complete()
911 } else if (dev->bus && dev->bus->pm) { in device_complete()
913 callback = dev->bus->pm->complete; in device_complete()
916 if (!callback && dev->driver && dev->driver->pm) { in device_complete()
918 callback = dev->driver->pm->complete; in device_complete()
922 pm_dev_dbg(dev, state, info); in device_complete()
923 trace_device_pm_callback_start(dev, info, state.event); in device_complete()
924 callback(dev); in device_complete()
925 trace_device_pm_callback_end(dev, 0); in device_complete()
928 device_unlock(dev); in device_complete()
930 pm_runtime_put(dev); in device_complete()
950 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_complete() local
952 get_device(dev); in dpm_complete()
953 dev->power.is_prepared = false; in dpm_complete()
954 list_move(&dev->power.entry, &list); in dpm_complete()
957 device_complete(dev, state); in dpm_complete()
960 put_device(dev); in dpm_complete()
1014 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) in __device_suspend_noirq() argument
1020 TRACE_DEVICE(dev); in __device_suspend_noirq()
1031 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_noirq()
1034 dpm_wait_for_children(dev, async); in __device_suspend_noirq()
1036 if (dev->pm_domain) { in __device_suspend_noirq()
1038 callback = pm_noirq_op(&dev->pm_domain->ops, state); in __device_suspend_noirq()
1039 } else if (dev->type && dev->type->pm) { in __device_suspend_noirq()
1041 callback = pm_noirq_op(dev->type->pm, state); in __device_suspend_noirq()
1042 } else if (dev->class && dev->class->pm) { in __device_suspend_noirq()
1044 callback = pm_noirq_op(dev->class->pm, state); in __device_suspend_noirq()
1045 } else if (dev->bus && dev->bus->pm) { in __device_suspend_noirq()
1047 callback = pm_noirq_op(dev->bus->pm, state); in __device_suspend_noirq()
1050 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend_noirq()
1052 callback = pm_noirq_op(dev->driver->pm, state); in __device_suspend_noirq()
1055 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_noirq()
1057 dev->power.is_noirq_suspended = true; in __device_suspend_noirq()
1062 complete_all(&dev->power.completion); in __device_suspend_noirq()
1069 struct device *dev = (struct device *)data; in async_suspend_noirq() local
1072 error = __device_suspend_noirq(dev, pm_transition, true); in async_suspend_noirq()
1074 dpm_save_failed_dev(dev_name(dev)); in async_suspend_noirq()
1075 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_noirq()
1078 put_device(dev); in async_suspend_noirq()
1081 static int device_suspend_noirq(struct device *dev) in device_suspend_noirq() argument
1083 reinit_completion(&dev->power.completion); in device_suspend_noirq()
1085 if (is_async(dev)) { in device_suspend_noirq()
1086 get_device(dev); in device_suspend_noirq()
1087 async_schedule(async_suspend_noirq, dev); in device_suspend_noirq()
1090 return __device_suspend_noirq(dev, pm_transition, false); in device_suspend_noirq()
1113 struct device *dev = to_device(dpm_late_early_list.prev); in dpm_suspend_noirq() local
1115 get_device(dev); in dpm_suspend_noirq()
1118 error = device_suspend_noirq(dev); in dpm_suspend_noirq()
1122 pm_dev_err(dev, state, " noirq", error); in dpm_suspend_noirq()
1123 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_noirq()
1124 put_device(dev); in dpm_suspend_noirq()
1127 if (!list_empty(&dev->power.entry)) in dpm_suspend_noirq()
1128 list_move(&dev->power.entry, &dpm_noirq_list); in dpm_suspend_noirq()
1129 put_device(dev); in dpm_suspend_noirq()
1158 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) in __device_suspend_late() argument
1164 TRACE_DEVICE(dev); in __device_suspend_late()
1167 __pm_runtime_disable(dev, false); in __device_suspend_late()
1177 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_late()
1180 dpm_wait_for_children(dev, async); in __device_suspend_late()
1182 if (dev->pm_domain) { in __device_suspend_late()
1184 callback = pm_late_early_op(&dev->pm_domain->ops, state); in __device_suspend_late()
1185 } else if (dev->type && dev->type->pm) { in __device_suspend_late()
1187 callback = pm_late_early_op(dev->type->pm, state); in __device_suspend_late()
1188 } else if (dev->class && dev->class->pm) { in __device_suspend_late()
1190 callback = pm_late_early_op(dev->class->pm, state); in __device_suspend_late()
1191 } else if (dev->bus && dev->bus->pm) { in __device_suspend_late()
1193 callback = pm_late_early_op(dev->bus->pm, state); in __device_suspend_late()
1196 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend_late()
1198 callback = pm_late_early_op(dev->driver->pm, state); in __device_suspend_late()
1201 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_late()
1203 dev->power.is_late_suspended = true; in __device_suspend_late()
1209 complete_all(&dev->power.completion); in __device_suspend_late()
1215 struct device *dev = (struct device *)data; in async_suspend_late() local
1218 error = __device_suspend_late(dev, pm_transition, true); in async_suspend_late()
1220 dpm_save_failed_dev(dev_name(dev)); in async_suspend_late()
1221 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_late()
1223 put_device(dev); in async_suspend_late()
1226 static int device_suspend_late(struct device *dev) in device_suspend_late() argument
1228 reinit_completion(&dev->power.completion); in device_suspend_late()
1230 if (is_async(dev)) { in device_suspend_late()
1231 get_device(dev); in device_suspend_late()
1232 async_schedule(async_suspend_late, dev); in device_suspend_late()
1236 return __device_suspend_late(dev, pm_transition, false); in device_suspend_late()
1254 struct device *dev = to_device(dpm_suspended_list.prev); in dpm_suspend_late() local
1256 get_device(dev); in dpm_suspend_late()
1259 error = device_suspend_late(dev); in dpm_suspend_late()
1262 if (!list_empty(&dev->power.entry)) in dpm_suspend_late()
1263 list_move(&dev->power.entry, &dpm_late_early_list); in dpm_suspend_late()
1266 pm_dev_err(dev, state, " late", error); in dpm_suspend_late()
1267 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_late()
1268 put_device(dev); in dpm_suspend_late()
1271 put_device(dev); in dpm_suspend_late()
1318 static int legacy_suspend(struct device *dev, pm_message_t state, in legacy_suspend() argument
1319 int (*cb)(struct device *dev, pm_message_t state), in legacy_suspend() argument
1325 calltime = initcall_debug_start(dev); in legacy_suspend()
1327 trace_device_pm_callback_start(dev, info, state.event); in legacy_suspend()
1328 error = cb(dev, state); in legacy_suspend()
1329 trace_device_pm_callback_end(dev, error); in legacy_suspend()
1332 initcall_debug_report(dev, calltime, error, state, info); in legacy_suspend()
1343 static int __device_suspend(struct device *dev, pm_message_t state, bool async) in __device_suspend() argument
1350 TRACE_DEVICE(dev); in __device_suspend()
1353 dpm_wait_for_children(dev, async); in __device_suspend()
1364 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) in __device_suspend()
1365 pm_wakeup_event(dev, 0); in __device_suspend()
1372 if (dev->power.syscore) in __device_suspend()
1375 if (dev->power.direct_complete) { in __device_suspend()
1376 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1377 pm_runtime_disable(dev); in __device_suspend()
1378 if (pm_runtime_suspended_if_enabled(dev)) in __device_suspend()
1381 pm_runtime_enable(dev); in __device_suspend()
1383 dev->power.direct_complete = false; in __device_suspend()
1386 dpm_watchdog_set(&wd, dev); in __device_suspend()
1387 device_lock(dev); in __device_suspend()
1389 if (dev->pm_domain) { in __device_suspend()
1391 callback = pm_op(&dev->pm_domain->ops, state); in __device_suspend()
1395 if (dev->type && dev->type->pm) { in __device_suspend()
1397 callback = pm_op(dev->type->pm, state); in __device_suspend()
1401 if (dev->class) { in __device_suspend()
1402 if (dev->class->pm) { in __device_suspend()
1404 callback = pm_op(dev->class->pm, state); in __device_suspend()
1406 } else if (dev->class->suspend) { in __device_suspend()
1407 pm_dev_dbg(dev, state, "legacy class "); in __device_suspend()
1408 error = legacy_suspend(dev, state, dev->class->suspend, in __device_suspend()
1414 if (dev->bus) { in __device_suspend()
1415 if (dev->bus->pm) { in __device_suspend()
1417 callback = pm_op(dev->bus->pm, state); in __device_suspend()
1418 } else if (dev->bus->suspend) { in __device_suspend()
1419 pm_dev_dbg(dev, state, "legacy bus "); in __device_suspend()
1420 error = legacy_suspend(dev, state, dev->bus->suspend, in __device_suspend()
1427 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend()
1429 callback = pm_op(dev->driver->pm, state); in __device_suspend()
1432 error = dpm_run_callback(callback, dev, state, info); in __device_suspend()
1436 struct device *parent = dev->parent; in __device_suspend()
1438 dev->power.is_suspended = true; in __device_suspend()
1442 dev->parent->power.direct_complete = false; in __device_suspend()
1443 if (dev->power.wakeup_path in __device_suspend()
1444 && !dev->parent->power.ignore_children) in __device_suspend()
1445 dev->parent->power.wakeup_path = true; in __device_suspend()
1451 device_unlock(dev); in __device_suspend()
1455 complete_all(&dev->power.completion); in __device_suspend()
1465 struct device *dev = (struct device *)data; in async_suspend() local
1468 error = __device_suspend(dev, pm_transition, true); in async_suspend()
1470 dpm_save_failed_dev(dev_name(dev)); in async_suspend()
1471 pm_dev_err(dev, pm_transition, " async", error); in async_suspend()
1474 put_device(dev); in async_suspend()
1477 static int device_suspend(struct device *dev) in device_suspend() argument
1479 reinit_completion(&dev->power.completion); in device_suspend()
1481 if (is_async(dev)) { in device_suspend()
1482 get_device(dev); in device_suspend()
1483 async_schedule(async_suspend, dev); in device_suspend()
1487 return __device_suspend(dev, pm_transition, false); in device_suspend()
1508 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_suspend() local
1510 get_device(dev); in dpm_suspend()
1513 error = device_suspend(dev); in dpm_suspend()
1517 pm_dev_err(dev, state, "", error); in dpm_suspend()
1518 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend()
1519 put_device(dev); in dpm_suspend()
1522 if (!list_empty(&dev->power.entry)) in dpm_suspend()
1523 list_move(&dev->power.entry, &dpm_suspended_list); in dpm_suspend()
1524 put_device(dev); in dpm_suspend()
1549 static int device_prepare(struct device *dev, pm_message_t state) in device_prepare() argument
1555 if (dev->power.syscore) in device_prepare()
1564 pm_runtime_get_noresume(dev); in device_prepare()
1566 device_lock(dev); in device_prepare()
1568 dev->power.wakeup_path = device_may_wakeup(dev); in device_prepare()
1570 if (dev->pm_domain) { in device_prepare()
1572 callback = dev->pm_domain->ops.prepare; in device_prepare()
1573 } else if (dev->type && dev->type->pm) { in device_prepare()
1575 callback = dev->type->pm->prepare; in device_prepare()
1576 } else if (dev->class && dev->class->pm) { in device_prepare()
1578 callback = dev->class->pm->prepare; in device_prepare()
1579 } else if (dev->bus && dev->bus->pm) { in device_prepare()
1581 callback = dev->bus->pm->prepare; in device_prepare()
1584 if (!callback && dev->driver && dev->driver->pm) { in device_prepare()
1586 callback = dev->driver->pm->prepare; in device_prepare()
1590 trace_device_pm_callback_start(dev, info, state.event); in device_prepare()
1591 ret = callback(dev); in device_prepare()
1592 trace_device_pm_callback_end(dev, ret); in device_prepare()
1595 device_unlock(dev); in device_prepare()
1599 pm_runtime_put(dev); in device_prepare()
1609 spin_lock_irq(&dev->power.lock); in device_prepare()
1610 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; in device_prepare()
1611 spin_unlock_irq(&dev->power.lock); in device_prepare()
1630 struct device *dev = to_device(dpm_list.next); in dpm_prepare() local
1632 get_device(dev); in dpm_prepare()
1635 error = device_prepare(dev, state); in dpm_prepare()
1640 put_device(dev); in dpm_prepare()
1646 dev_name(dev), error); in dpm_prepare()
1647 put_device(dev); in dpm_prepare()
1650 dev->power.is_prepared = true; in dpm_prepare()
1651 if (!list_empty(&dev->power.entry)) in dpm_prepare()
1652 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_prepare()
1653 put_device(dev); in dpm_prepare()
1693 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) in device_pm_wait_for_dev() argument
1695 dpm_wait(dev, subordinate->power.async_suspend); in device_pm_wait_for_dev()
1710 struct device *dev; in dpm_for_each_dev() local
1716 list_for_each_entry(dev, &dpm_list, power.entry) in dpm_for_each_dev()
1717 fn(dev, data); in dpm_for_each_dev()