1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm-trace.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/async.h>
30#include <linux/suspend.h>
31#include <trace/events/power.h>
32#include <linux/cpufreq.h>
33#include <linux/cpuidle.h>
34#include <linux/timer.h>
35
36#include "../base.h"
37#include "power.h"
38
39typedef int (*pm_callback_t)(struct device *);
40
41/*
42 * The entries in the dpm_list list are in a depth first order, simply
43 * because children are guaranteed to be discovered after parents, and
44 * are inserted at the back of the list on discovery.
45 *
46 * Since device_pm_add() may be called with a device lock held,
47 * we must never try to acquire a device lock while holding
48 * dpm_list_mutex.
49 */
50
51LIST_HEAD(dpm_list);
52static LIST_HEAD(dpm_prepared_list);
53static LIST_HEAD(dpm_suspended_list);
54static LIST_HEAD(dpm_late_early_list);
55static LIST_HEAD(dpm_noirq_list);
56
57struct suspend_stats suspend_stats;
58static DEFINE_MUTEX(dpm_list_mtx);
59static pm_message_t pm_transition;
60
61static int async_error;
62
63static char *pm_verb(int event)
64{
65	switch (event) {
66	case PM_EVENT_SUSPEND:
67		return "suspend";
68	case PM_EVENT_RESUME:
69		return "resume";
70	case PM_EVENT_FREEZE:
71		return "freeze";
72	case PM_EVENT_QUIESCE:
73		return "quiesce";
74	case PM_EVENT_HIBERNATE:
75		return "hibernate";
76	case PM_EVENT_THAW:
77		return "thaw";
78	case PM_EVENT_RESTORE:
79		return "restore";
80	case PM_EVENT_RECOVER:
81		return "recover";
82	default:
83		return "(unknown PM event)";
84	}
85}
86
87/**
88 * device_pm_sleep_init - Initialize system suspend-related device fields.
89 * @dev: Device object being initialized.
90 */
91void device_pm_sleep_init(struct device *dev)
92{
93	dev->power.is_prepared = false;
94	dev->power.is_suspended = false;
95	dev->power.is_noirq_suspended = false;
96	dev->power.is_late_suspended = false;
97	init_completion(&dev->power.completion);
98	complete_all(&dev->power.completion);
99	dev->power.wakeup = NULL;
100	INIT_LIST_HEAD(&dev->power.entry);
101}
102
103/**
104 * device_pm_lock - Lock the list of active devices used by the PM core.
105 */
106void device_pm_lock(void)
107{
108	mutex_lock(&dpm_list_mtx);
109}
110
111/**
112 * device_pm_unlock - Unlock the list of active devices used by the PM core.
113 */
114void device_pm_unlock(void)
115{
116	mutex_unlock(&dpm_list_mtx);
117}
118
119/**
120 * device_pm_add - Add a device to the PM core's list of active devices.
121 * @dev: Device to add to the list.
122 */
123void device_pm_add(struct device *dev)
124{
125	pr_debug("PM: Adding info for %s:%s\n",
126		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
127	mutex_lock(&dpm_list_mtx);
128	if (dev->parent && dev->parent->power.is_prepared)
129		dev_warn(dev, "parent %s should not be sleeping\n",
130			dev_name(dev->parent));
131	list_add_tail(&dev->power.entry, &dpm_list);
132	mutex_unlock(&dpm_list_mtx);
133}
134
135/**
136 * device_pm_remove - Remove a device from the PM core's list of active devices.
137 * @dev: Device to be removed from the list.
138 */
139void device_pm_remove(struct device *dev)
140{
141	pr_debug("PM: Removing info for %s:%s\n",
142		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
143	complete_all(&dev->power.completion);
144	mutex_lock(&dpm_list_mtx);
145	list_del_init(&dev->power.entry);
146	mutex_unlock(&dpm_list_mtx);
147	device_wakeup_disable(dev);
148	pm_runtime_remove(dev);
149}
150
151/**
152 * device_pm_move_before - Move device in the PM core's list of active devices.
153 * @deva: Device to move in dpm_list.
154 * @devb: Device @deva should come before.
155 */
156void device_pm_move_before(struct device *deva, struct device *devb)
157{
158	pr_debug("PM: Moving %s:%s before %s:%s\n",
159		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
160		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
161	/* Delete deva from dpm_list and reinsert before devb. */
162	list_move_tail(&deva->power.entry, &devb->power.entry);
163}
164
165/**
166 * device_pm_move_after - Move device in the PM core's list of active devices.
167 * @deva: Device to move in dpm_list.
168 * @devb: Device @deva should come after.
169 */
170void device_pm_move_after(struct device *deva, struct device *devb)
171{
172	pr_debug("PM: Moving %s:%s after %s:%s\n",
173		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
174		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
175	/* Delete deva from dpm_list and reinsert after devb. */
176	list_move(&deva->power.entry, &devb->power.entry);
177}
178
179/**
180 * device_pm_move_last - Move device to end of the PM core's list of devices.
181 * @dev: Device to move in dpm_list.
182 */
183void device_pm_move_last(struct device *dev)
184{
185	pr_debug("PM: Moving %s:%s to end of list\n",
186		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
187	list_move_tail(&dev->power.entry, &dpm_list);
188}
189
190static ktime_t initcall_debug_start(struct device *dev)
191{
192	ktime_t calltime = ktime_set(0, 0);
193
194	if (pm_print_times_enabled) {
195		pr_info("calling  %s+ @ %i, parent: %s\n",
196			dev_name(dev), task_pid_nr(current),
197			dev->parent ? dev_name(dev->parent) : "none");
198		calltime = ktime_get();
199	}
200
201	return calltime;
202}
203
204static void initcall_debug_report(struct device *dev, ktime_t calltime,
205				  int error, pm_message_t state, char *info)
206{
207	ktime_t rettime;
208	s64 nsecs;
209
210	rettime = ktime_get();
211	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
212
213	if (pm_print_times_enabled) {
214		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
215			error, (unsigned long long)nsecs >> 10);
216	}
217}
218
219/**
220 * dpm_wait - Wait for a PM operation to complete.
221 * @dev: Device to wait for.
222 * @async: If unset, wait only if the device's power.async_suspend flag is set.
223 */
224static void dpm_wait(struct device *dev, bool async)
225{
226	if (!dev)
227		return;
228
229	if (async || (pm_async_enabled && dev->power.async_suspend))
230		wait_for_completion(&dev->power.completion);
231}
232
233static int dpm_wait_fn(struct device *dev, void *async_ptr)
234{
235	dpm_wait(dev, *((bool *)async_ptr));
236	return 0;
237}
238
239static void dpm_wait_for_children(struct device *dev, bool async)
240{
241       device_for_each_child(dev, &async, dpm_wait_fn);
242}
243
244/**
245 * pm_op - Return the PM operation appropriate for given PM event.
246 * @ops: PM operations to choose from.
247 * @state: PM transition of the system being carried out.
248 */
249static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
250{
251	switch (state.event) {
252#ifdef CONFIG_SUSPEND
253	case PM_EVENT_SUSPEND:
254		return ops->suspend;
255	case PM_EVENT_RESUME:
256		return ops->resume;
257#endif /* CONFIG_SUSPEND */
258#ifdef CONFIG_HIBERNATE_CALLBACKS
259	case PM_EVENT_FREEZE:
260	case PM_EVENT_QUIESCE:
261		return ops->freeze;
262	case PM_EVENT_HIBERNATE:
263		return ops->poweroff;
264	case PM_EVENT_THAW:
265	case PM_EVENT_RECOVER:
266		return ops->thaw;
267		break;
268	case PM_EVENT_RESTORE:
269		return ops->restore;
270#endif /* CONFIG_HIBERNATE_CALLBACKS */
271	}
272
273	return NULL;
274}
275
276/**
277 * pm_late_early_op - Return the PM operation appropriate for given PM event.
278 * @ops: PM operations to choose from.
279 * @state: PM transition of the system being carried out.
280 *
281 * Runtime PM is disabled for @dev while this function is being executed.
282 */
283static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
284				      pm_message_t state)
285{
286	switch (state.event) {
287#ifdef CONFIG_SUSPEND
288	case PM_EVENT_SUSPEND:
289		return ops->suspend_late;
290	case PM_EVENT_RESUME:
291		return ops->resume_early;
292#endif /* CONFIG_SUSPEND */
293#ifdef CONFIG_HIBERNATE_CALLBACKS
294	case PM_EVENT_FREEZE:
295	case PM_EVENT_QUIESCE:
296		return ops->freeze_late;
297	case PM_EVENT_HIBERNATE:
298		return ops->poweroff_late;
299	case PM_EVENT_THAW:
300	case PM_EVENT_RECOVER:
301		return ops->thaw_early;
302	case PM_EVENT_RESTORE:
303		return ops->restore_early;
304#endif /* CONFIG_HIBERNATE_CALLBACKS */
305	}
306
307	return NULL;
308}
309
310/**
311 * pm_noirq_op - Return the PM operation appropriate for given PM event.
312 * @ops: PM operations to choose from.
313 * @state: PM transition of the system being carried out.
314 *
315 * The driver of @dev will not receive interrupts while this function is being
316 * executed.
317 */
318static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
319{
320	switch (state.event) {
321#ifdef CONFIG_SUSPEND
322	case PM_EVENT_SUSPEND:
323		return ops->suspend_noirq;
324	case PM_EVENT_RESUME:
325		return ops->resume_noirq;
326#endif /* CONFIG_SUSPEND */
327#ifdef CONFIG_HIBERNATE_CALLBACKS
328	case PM_EVENT_FREEZE:
329	case PM_EVENT_QUIESCE:
330		return ops->freeze_noirq;
331	case PM_EVENT_HIBERNATE:
332		return ops->poweroff_noirq;
333	case PM_EVENT_THAW:
334	case PM_EVENT_RECOVER:
335		return ops->thaw_noirq;
336	case PM_EVENT_RESTORE:
337		return ops->restore_noirq;
338#endif /* CONFIG_HIBERNATE_CALLBACKS */
339	}
340
341	return NULL;
342}
343
344static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
345{
346	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
347		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
348		", may wakeup" : "");
349}
350
351static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
352			int error)
353{
354	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
355		dev_name(dev), pm_verb(state.event), info, error);
356}
357
358static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
359{
360	ktime_t calltime;
361	u64 usecs64;
362	int usecs;
363
364	calltime = ktime_get();
365	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
366	do_div(usecs64, NSEC_PER_USEC);
367	usecs = usecs64;
368	if (usecs == 0)
369		usecs = 1;
370	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
371		info ?: "", info ? " " : "", pm_verb(state.event),
372		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
373}
374
375static int dpm_run_callback(pm_callback_t cb, struct device *dev,
376			    pm_message_t state, char *info)
377{
378	ktime_t calltime;
379	int error;
380
381	if (!cb)
382		return 0;
383
384	calltime = initcall_debug_start(dev);
385
386	pm_dev_dbg(dev, state, info);
387	trace_device_pm_callback_start(dev, info, state.event);
388	error = cb(dev);
389	trace_device_pm_callback_end(dev, error);
390	suspend_report_result(cb, error);
391
392	initcall_debug_report(dev, calltime, error, state, info);
393
394	return error;
395}
396
397#ifdef CONFIG_DPM_WATCHDOG
398struct dpm_watchdog {
399	struct device		*dev;
400	struct task_struct	*tsk;
401	struct timer_list	timer;
402};
403
404#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
405	struct dpm_watchdog wd
406
407/**
408 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
409 * @data: Watchdog object address.
410 *
411 * Called when a driver has timed out suspending or resuming.
412 * There's not much we can do here to recover so panic() to
413 * capture a crash-dump in pstore.
414 */
415static void dpm_watchdog_handler(unsigned long data)
416{
417	struct dpm_watchdog *wd = (void *)data;
418
419	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
420	show_stack(wd->tsk, NULL);
421	panic("%s %s: unrecoverable failure\n",
422		dev_driver_string(wd->dev), dev_name(wd->dev));
423}
424
425/**
426 * dpm_watchdog_set - Enable pm watchdog for given device.
427 * @wd: Watchdog. Must be allocated on the stack.
428 * @dev: Device to handle.
429 */
430static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
431{
432	struct timer_list *timer = &wd->timer;
433
434	wd->dev = dev;
435	wd->tsk = current;
436
437	init_timer_on_stack(timer);
438	/* use same timeout value for both suspend and resume */
439	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
440	timer->function = dpm_watchdog_handler;
441	timer->data = (unsigned long)wd;
442	add_timer(timer);
443}
444
445/**
446 * dpm_watchdog_clear - Disable suspend/resume watchdog.
447 * @wd: Watchdog to disable.
448 */
449static void dpm_watchdog_clear(struct dpm_watchdog *wd)
450{
451	struct timer_list *timer = &wd->timer;
452
453	del_timer_sync(timer);
454	destroy_timer_on_stack(timer);
455}
456#else
457#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
458#define dpm_watchdog_set(x, y)
459#define dpm_watchdog_clear(x)
460#endif
461
462/*------------------------- Resume routines -------------------------*/
463
464/**
465 * device_resume_noirq - Execute an "early resume" callback for given device.
466 * @dev: Device to handle.
467 * @state: PM transition of the system being carried out.
468 * @async: If true, the device is being resumed asynchronously.
469 *
470 * The driver of @dev will not receive interrupts while this function is being
471 * executed.
472 */
473static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
474{
475	pm_callback_t callback = NULL;
476	char *info = NULL;
477	int error = 0;
478
479	TRACE_DEVICE(dev);
480	TRACE_RESUME(0);
481
482	if (dev->power.syscore || dev->power.direct_complete)
483		goto Out;
484
485	if (!dev->power.is_noirq_suspended)
486		goto Out;
487
488	dpm_wait(dev->parent, async);
489
490	if (dev->pm_domain) {
491		info = "noirq power domain ";
492		callback = pm_noirq_op(&dev->pm_domain->ops, state);
493	} else if (dev->type && dev->type->pm) {
494		info = "noirq type ";
495		callback = pm_noirq_op(dev->type->pm, state);
496	} else if (dev->class && dev->class->pm) {
497		info = "noirq class ";
498		callback = pm_noirq_op(dev->class->pm, state);
499	} else if (dev->bus && dev->bus->pm) {
500		info = "noirq bus ";
501		callback = pm_noirq_op(dev->bus->pm, state);
502	}
503
504	if (!callback && dev->driver && dev->driver->pm) {
505		info = "noirq driver ";
506		callback = pm_noirq_op(dev->driver->pm, state);
507	}
508
509	error = dpm_run_callback(callback, dev, state, info);
510	dev->power.is_noirq_suspended = false;
511
512 Out:
513	complete_all(&dev->power.completion);
514	TRACE_RESUME(error);
515	return error;
516}
517
518static bool is_async(struct device *dev)
519{
520	return dev->power.async_suspend && pm_async_enabled
521		&& !pm_trace_is_enabled();
522}
523
524static void async_resume_noirq(void *data, async_cookie_t cookie)
525{
526	struct device *dev = (struct device *)data;
527	int error;
528
529	error = device_resume_noirq(dev, pm_transition, true);
530	if (error)
531		pm_dev_err(dev, pm_transition, " async", error);
532
533	put_device(dev);
534}
535
536/**
537 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
538 * @state: PM transition of the system being carried out.
539 *
540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541 * enable device drivers to receive interrupts.
542 */
543void dpm_resume_noirq(pm_message_t state)
544{
545	struct device *dev;
546	ktime_t starttime = ktime_get();
547
548	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
549	mutex_lock(&dpm_list_mtx);
550	pm_transition = state;
551
552	/*
553	 * Advanced the async threads upfront,
554	 * in case the starting of async threads is
555	 * delayed by non-async resuming devices.
556	 */
557	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
558		reinit_completion(&dev->power.completion);
559		if (is_async(dev)) {
560			get_device(dev);
561			async_schedule(async_resume_noirq, dev);
562		}
563	}
564
565	while (!list_empty(&dpm_noirq_list)) {
566		dev = to_device(dpm_noirq_list.next);
567		get_device(dev);
568		list_move_tail(&dev->power.entry, &dpm_late_early_list);
569		mutex_unlock(&dpm_list_mtx);
570
571		if (!is_async(dev)) {
572			int error;
573
574			error = device_resume_noirq(dev, state, false);
575			if (error) {
576				suspend_stats.failed_resume_noirq++;
577				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
578				dpm_save_failed_dev(dev_name(dev));
579				pm_dev_err(dev, state, " noirq", error);
580			}
581		}
582
583		mutex_lock(&dpm_list_mtx);
584		put_device(dev);
585	}
586	mutex_unlock(&dpm_list_mtx);
587	async_synchronize_full();
588	dpm_show_time(starttime, state, "noirq");
589	resume_device_irqs();
590	cpuidle_resume();
591	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
592}
593
594/**
595 * device_resume_early - Execute an "early resume" callback for given device.
596 * @dev: Device to handle.
597 * @state: PM transition of the system being carried out.
598 * @async: If true, the device is being resumed asynchronously.
599 *
600 * Runtime PM is disabled for @dev while this function is being executed.
601 */
602static int device_resume_early(struct device *dev, pm_message_t state, bool async)
603{
604	pm_callback_t callback = NULL;
605	char *info = NULL;
606	int error = 0;
607
608	TRACE_DEVICE(dev);
609	TRACE_RESUME(0);
610
611	if (dev->power.syscore || dev->power.direct_complete)
612		goto Out;
613
614	if (!dev->power.is_late_suspended)
615		goto Out;
616
617	dpm_wait(dev->parent, async);
618
619	if (dev->pm_domain) {
620		info = "early power domain ";
621		callback = pm_late_early_op(&dev->pm_domain->ops, state);
622	} else if (dev->type && dev->type->pm) {
623		info = "early type ";
624		callback = pm_late_early_op(dev->type->pm, state);
625	} else if (dev->class && dev->class->pm) {
626		info = "early class ";
627		callback = pm_late_early_op(dev->class->pm, state);
628	} else if (dev->bus && dev->bus->pm) {
629		info = "early bus ";
630		callback = pm_late_early_op(dev->bus->pm, state);
631	}
632
633	if (!callback && dev->driver && dev->driver->pm) {
634		info = "early driver ";
635		callback = pm_late_early_op(dev->driver->pm, state);
636	}
637
638	error = dpm_run_callback(callback, dev, state, info);
639	dev->power.is_late_suspended = false;
640
641 Out:
642	TRACE_RESUME(error);
643
644	pm_runtime_enable(dev);
645	complete_all(&dev->power.completion);
646	return error;
647}
648
649static void async_resume_early(void *data, async_cookie_t cookie)
650{
651	struct device *dev = (struct device *)data;
652	int error;
653
654	error = device_resume_early(dev, pm_transition, true);
655	if (error)
656		pm_dev_err(dev, pm_transition, " async", error);
657
658	put_device(dev);
659}
660
661/**
662 * dpm_resume_early - Execute "early resume" callbacks for all devices.
663 * @state: PM transition of the system being carried out.
664 */
665void dpm_resume_early(pm_message_t state)
666{
667	struct device *dev;
668	ktime_t starttime = ktime_get();
669
670	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
671	mutex_lock(&dpm_list_mtx);
672	pm_transition = state;
673
674	/*
675	 * Advanced the async threads upfront,
676	 * in case the starting of async threads is
677	 * delayed by non-async resuming devices.
678	 */
679	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
680		reinit_completion(&dev->power.completion);
681		if (is_async(dev)) {
682			get_device(dev);
683			async_schedule(async_resume_early, dev);
684		}
685	}
686
687	while (!list_empty(&dpm_late_early_list)) {
688		dev = to_device(dpm_late_early_list.next);
689		get_device(dev);
690		list_move_tail(&dev->power.entry, &dpm_suspended_list);
691		mutex_unlock(&dpm_list_mtx);
692
693		if (!is_async(dev)) {
694			int error;
695
696			error = device_resume_early(dev, state, false);
697			if (error) {
698				suspend_stats.failed_resume_early++;
699				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
700				dpm_save_failed_dev(dev_name(dev));
701				pm_dev_err(dev, state, " early", error);
702			}
703		}
704		mutex_lock(&dpm_list_mtx);
705		put_device(dev);
706	}
707	mutex_unlock(&dpm_list_mtx);
708	async_synchronize_full();
709	dpm_show_time(starttime, state, "early");
710	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
711}
712
713/**
714 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
715 * @state: PM transition of the system being carried out.
716 */
717void dpm_resume_start(pm_message_t state)
718{
719	dpm_resume_noirq(state);
720	dpm_resume_early(state);
721}
722EXPORT_SYMBOL_GPL(dpm_resume_start);
723
724/**
725 * device_resume - Execute "resume" callbacks for given device.
726 * @dev: Device to handle.
727 * @state: PM transition of the system being carried out.
728 * @async: If true, the device is being resumed asynchronously.
729 */
730static int device_resume(struct device *dev, pm_message_t state, bool async)
731{
732	pm_callback_t callback = NULL;
733	char *info = NULL;
734	int error = 0;
735	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
736
737	TRACE_DEVICE(dev);
738	TRACE_RESUME(0);
739
740	if (dev->power.syscore)
741		goto Complete;
742
743	if (dev->power.direct_complete) {
744		/* Match the pm_runtime_disable() in __device_suspend(). */
745		pm_runtime_enable(dev);
746		goto Complete;
747	}
748
749	dpm_wait(dev->parent, async);
750	dpm_watchdog_set(&wd, dev);
751	device_lock(dev);
752
753	/*
754	 * This is a fib.  But we'll allow new children to be added below
755	 * a resumed device, even if the device hasn't been completed yet.
756	 */
757	dev->power.is_prepared = false;
758
759	if (!dev->power.is_suspended)
760		goto Unlock;
761
762	if (dev->pm_domain) {
763		info = "power domain ";
764		callback = pm_op(&dev->pm_domain->ops, state);
765		goto Driver;
766	}
767
768	if (dev->type && dev->type->pm) {
769		info = "type ";
770		callback = pm_op(dev->type->pm, state);
771		goto Driver;
772	}
773
774	if (dev->class) {
775		if (dev->class->pm) {
776			info = "class ";
777			callback = pm_op(dev->class->pm, state);
778			goto Driver;
779		} else if (dev->class->resume) {
780			info = "legacy class ";
781			callback = dev->class->resume;
782			goto End;
783		}
784	}
785
786	if (dev->bus) {
787		if (dev->bus->pm) {
788			info = "bus ";
789			callback = pm_op(dev->bus->pm, state);
790		} else if (dev->bus->resume) {
791			info = "legacy bus ";
792			callback = dev->bus->resume;
793			goto End;
794		}
795	}
796
797 Driver:
798	if (!callback && dev->driver && dev->driver->pm) {
799		info = "driver ";
800		callback = pm_op(dev->driver->pm, state);
801	}
802
803 End:
804	error = dpm_run_callback(callback, dev, state, info);
805	dev->power.is_suspended = false;
806
807 Unlock:
808	device_unlock(dev);
809	dpm_watchdog_clear(&wd);
810
811 Complete:
812	complete_all(&dev->power.completion);
813
814	TRACE_RESUME(error);
815
816	return error;
817}
818
819static void async_resume(void *data, async_cookie_t cookie)
820{
821	struct device *dev = (struct device *)data;
822	int error;
823
824	error = device_resume(dev, pm_transition, true);
825	if (error)
826		pm_dev_err(dev, pm_transition, " async", error);
827	put_device(dev);
828}
829
830/**
831 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
832 * @state: PM transition of the system being carried out.
833 *
834 * Execute the appropriate "resume" callback for all devices whose status
835 * indicates that they are suspended.
836 */
837void dpm_resume(pm_message_t state)
838{
839	struct device *dev;
840	ktime_t starttime = ktime_get();
841
842	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
843	might_sleep();
844
845	mutex_lock(&dpm_list_mtx);
846	pm_transition = state;
847	async_error = 0;
848
849	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
850		reinit_completion(&dev->power.completion);
851		if (is_async(dev)) {
852			get_device(dev);
853			async_schedule(async_resume, dev);
854		}
855	}
856
857	while (!list_empty(&dpm_suspended_list)) {
858		dev = to_device(dpm_suspended_list.next);
859		get_device(dev);
860		if (!is_async(dev)) {
861			int error;
862
863			mutex_unlock(&dpm_list_mtx);
864
865			error = device_resume(dev, state, false);
866			if (error) {
867				suspend_stats.failed_resume++;
868				dpm_save_failed_step(SUSPEND_RESUME);
869				dpm_save_failed_dev(dev_name(dev));
870				pm_dev_err(dev, state, "", error);
871			}
872
873			mutex_lock(&dpm_list_mtx);
874		}
875		if (!list_empty(&dev->power.entry))
876			list_move_tail(&dev->power.entry, &dpm_prepared_list);
877		put_device(dev);
878	}
879	mutex_unlock(&dpm_list_mtx);
880	async_synchronize_full();
881	dpm_show_time(starttime, state, NULL);
882
883	cpufreq_resume();
884	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
885}
886
887/**
888 * device_complete - Complete a PM transition for given device.
889 * @dev: Device to handle.
890 * @state: PM transition of the system being carried out.
891 */
892static void device_complete(struct device *dev, pm_message_t state)
893{
894	void (*callback)(struct device *) = NULL;
895	char *info = NULL;
896
897	if (dev->power.syscore)
898		return;
899
900	device_lock(dev);
901
902	if (dev->pm_domain) {
903		info = "completing power domain ";
904		callback = dev->pm_domain->ops.complete;
905	} else if (dev->type && dev->type->pm) {
906		info = "completing type ";
907		callback = dev->type->pm->complete;
908	} else if (dev->class && dev->class->pm) {
909		info = "completing class ";
910		callback = dev->class->pm->complete;
911	} else if (dev->bus && dev->bus->pm) {
912		info = "completing bus ";
913		callback = dev->bus->pm->complete;
914	}
915
916	if (!callback && dev->driver && dev->driver->pm) {
917		info = "completing driver ";
918		callback = dev->driver->pm->complete;
919	}
920
921	if (callback) {
922		pm_dev_dbg(dev, state, info);
923		trace_device_pm_callback_start(dev, info, state.event);
924		callback(dev);
925		trace_device_pm_callback_end(dev, 0);
926	}
927
928	device_unlock(dev);
929
930	pm_runtime_put(dev);
931}
932
933/**
934 * dpm_complete - Complete a PM transition for all non-sysdev devices.
935 * @state: PM transition of the system being carried out.
936 *
937 * Execute the ->complete() callbacks for all devices whose PM status is not
938 * DPM_ON (this allows new devices to be registered).
939 */
940void dpm_complete(pm_message_t state)
941{
942	struct list_head list;
943
944	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
945	might_sleep();
946
947	INIT_LIST_HEAD(&list);
948	mutex_lock(&dpm_list_mtx);
949	while (!list_empty(&dpm_prepared_list)) {
950		struct device *dev = to_device(dpm_prepared_list.prev);
951
952		get_device(dev);
953		dev->power.is_prepared = false;
954		list_move(&dev->power.entry, &list);
955		mutex_unlock(&dpm_list_mtx);
956
957		device_complete(dev, state);
958
959		mutex_lock(&dpm_list_mtx);
960		put_device(dev);
961	}
962	list_splice(&list, &dpm_list);
963	mutex_unlock(&dpm_list_mtx);
964	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
965}
966
967/**
968 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
969 * @state: PM transition of the system being carried out.
970 *
971 * Execute "resume" callbacks for all devices and complete the PM transition of
972 * the system.
973 */
974void dpm_resume_end(pm_message_t state)
975{
976	dpm_resume(state);
977	dpm_complete(state);
978}
979EXPORT_SYMBOL_GPL(dpm_resume_end);
980
981
982/*------------------------- Suspend routines -------------------------*/
983
984/**
985 * resume_event - Return a "resume" message for given "suspend" sleep state.
986 * @sleep_state: PM message representing a sleep state.
987 *
988 * Return a PM message representing the resume event corresponding to given
989 * sleep state.
990 */
991static pm_message_t resume_event(pm_message_t sleep_state)
992{
993	switch (sleep_state.event) {
994	case PM_EVENT_SUSPEND:
995		return PMSG_RESUME;
996	case PM_EVENT_FREEZE:
997	case PM_EVENT_QUIESCE:
998		return PMSG_RECOVER;
999	case PM_EVENT_HIBERNATE:
1000		return PMSG_RESTORE;
1001	}
1002	return PMSG_ON;
1003}
1004
1005/**
1006 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1007 * @dev: Device to handle.
1008 * @state: PM transition of the system being carried out.
1009 * @async: If true, the device is being suspended asynchronously.
1010 *
1011 * The driver of @dev will not receive interrupts while this function is being
1012 * executed.
1013 */
1014static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1015{
1016	pm_callback_t callback = NULL;
1017	char *info = NULL;
1018	int error = 0;
1019
1020	TRACE_DEVICE(dev);
1021	TRACE_SUSPEND(0);
1022
1023	if (async_error)
1024		goto Complete;
1025
1026	if (pm_wakeup_pending()) {
1027		async_error = -EBUSY;
1028		goto Complete;
1029	}
1030
1031	if (dev->power.syscore || dev->power.direct_complete)
1032		goto Complete;
1033
1034	dpm_wait_for_children(dev, async);
1035
1036	if (dev->pm_domain) {
1037		info = "noirq power domain ";
1038		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1039	} else if (dev->type && dev->type->pm) {
1040		info = "noirq type ";
1041		callback = pm_noirq_op(dev->type->pm, state);
1042	} else if (dev->class && dev->class->pm) {
1043		info = "noirq class ";
1044		callback = pm_noirq_op(dev->class->pm, state);
1045	} else if (dev->bus && dev->bus->pm) {
1046		info = "noirq bus ";
1047		callback = pm_noirq_op(dev->bus->pm, state);
1048	}
1049
1050	if (!callback && dev->driver && dev->driver->pm) {
1051		info = "noirq driver ";
1052		callback = pm_noirq_op(dev->driver->pm, state);
1053	}
1054
1055	error = dpm_run_callback(callback, dev, state, info);
1056	if (!error)
1057		dev->power.is_noirq_suspended = true;
1058	else
1059		async_error = error;
1060
1061Complete:
1062	complete_all(&dev->power.completion);
1063	TRACE_SUSPEND(error);
1064	return error;
1065}
1066
1067static void async_suspend_noirq(void *data, async_cookie_t cookie)
1068{
1069	struct device *dev = (struct device *)data;
1070	int error;
1071
1072	error = __device_suspend_noirq(dev, pm_transition, true);
1073	if (error) {
1074		dpm_save_failed_dev(dev_name(dev));
1075		pm_dev_err(dev, pm_transition, " async", error);
1076	}
1077
1078	put_device(dev);
1079}
1080
1081static int device_suspend_noirq(struct device *dev)
1082{
1083	reinit_completion(&dev->power.completion);
1084
1085	if (is_async(dev)) {
1086		get_device(dev);
1087		async_schedule(async_suspend_noirq, dev);
1088		return 0;
1089	}
1090	return __device_suspend_noirq(dev, pm_transition, false);
1091}
1092
1093/**
1094 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1095 * @state: PM transition of the system being carried out.
1096 *
1097 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1098 * handlers for all non-sysdev devices.
1099 */
1100int dpm_suspend_noirq(pm_message_t state)
1101{
1102	ktime_t starttime = ktime_get();
1103	int error = 0;
1104
1105	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1106	cpuidle_pause();
1107	suspend_device_irqs();
1108	mutex_lock(&dpm_list_mtx);
1109	pm_transition = state;
1110	async_error = 0;
1111
1112	while (!list_empty(&dpm_late_early_list)) {
1113		struct device *dev = to_device(dpm_late_early_list.prev);
1114
1115		get_device(dev);
1116		mutex_unlock(&dpm_list_mtx);
1117
1118		error = device_suspend_noirq(dev);
1119
1120		mutex_lock(&dpm_list_mtx);
1121		if (error) {
1122			pm_dev_err(dev, state, " noirq", error);
1123			dpm_save_failed_dev(dev_name(dev));
1124			put_device(dev);
1125			break;
1126		}
1127		if (!list_empty(&dev->power.entry))
1128			list_move(&dev->power.entry, &dpm_noirq_list);
1129		put_device(dev);
1130
1131		if (async_error)
1132			break;
1133	}
1134	mutex_unlock(&dpm_list_mtx);
1135	async_synchronize_full();
1136	if (!error)
1137		error = async_error;
1138
1139	if (error) {
1140		suspend_stats.failed_suspend_noirq++;
1141		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1142		dpm_resume_noirq(resume_event(state));
1143	} else {
1144		dpm_show_time(starttime, state, "noirq");
1145	}
1146	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1147	return error;
1148}
1149
1150/**
1151 * device_suspend_late - Execute a "late suspend" callback for given device.
1152 * @dev: Device to handle.
1153 * @state: PM transition of the system being carried out.
1154 * @async: If true, the device is being suspended asynchronously.
1155 *
1156 * Runtime PM is disabled for @dev while this function is being executed.
1157 */
1158static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1159{
1160	pm_callback_t callback = NULL;
1161	char *info = NULL;
1162	int error = 0;
1163
1164	TRACE_DEVICE(dev);
1165	TRACE_SUSPEND(0);
1166
1167	__pm_runtime_disable(dev, false);
1168
1169	if (async_error)
1170		goto Complete;
1171
1172	if (pm_wakeup_pending()) {
1173		async_error = -EBUSY;
1174		goto Complete;
1175	}
1176
1177	if (dev->power.syscore || dev->power.direct_complete)
1178		goto Complete;
1179
1180	dpm_wait_for_children(dev, async);
1181
1182	if (dev->pm_domain) {
1183		info = "late power domain ";
1184		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1185	} else if (dev->type && dev->type->pm) {
1186		info = "late type ";
1187		callback = pm_late_early_op(dev->type->pm, state);
1188	} else if (dev->class && dev->class->pm) {
1189		info = "late class ";
1190		callback = pm_late_early_op(dev->class->pm, state);
1191	} else if (dev->bus && dev->bus->pm) {
1192		info = "late bus ";
1193		callback = pm_late_early_op(dev->bus->pm, state);
1194	}
1195
1196	if (!callback && dev->driver && dev->driver->pm) {
1197		info = "late driver ";
1198		callback = pm_late_early_op(dev->driver->pm, state);
1199	}
1200
1201	error = dpm_run_callback(callback, dev, state, info);
1202	if (!error)
1203		dev->power.is_late_suspended = true;
1204	else
1205		async_error = error;
1206
1207Complete:
1208	TRACE_SUSPEND(error);
1209	complete_all(&dev->power.completion);
1210	return error;
1211}
1212
1213static void async_suspend_late(void *data, async_cookie_t cookie)
1214{
1215	struct device *dev = (struct device *)data;
1216	int error;
1217
1218	error = __device_suspend_late(dev, pm_transition, true);
1219	if (error) {
1220		dpm_save_failed_dev(dev_name(dev));
1221		pm_dev_err(dev, pm_transition, " async", error);
1222	}
1223	put_device(dev);
1224}
1225
1226static int device_suspend_late(struct device *dev)
1227{
1228	reinit_completion(&dev->power.completion);
1229
1230	if (is_async(dev)) {
1231		get_device(dev);
1232		async_schedule(async_suspend_late, dev);
1233		return 0;
1234	}
1235
1236	return __device_suspend_late(dev, pm_transition, false);
1237}
1238
1239/**
1240 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1241 * @state: PM transition of the system being carried out.
1242 */
1243int dpm_suspend_late(pm_message_t state)
1244{
1245	ktime_t starttime = ktime_get();
1246	int error = 0;
1247
1248	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1249	mutex_lock(&dpm_list_mtx);
1250	pm_transition = state;
1251	async_error = 0;
1252
1253	while (!list_empty(&dpm_suspended_list)) {
1254		struct device *dev = to_device(dpm_suspended_list.prev);
1255
1256		get_device(dev);
1257		mutex_unlock(&dpm_list_mtx);
1258
1259		error = device_suspend_late(dev);
1260
1261		mutex_lock(&dpm_list_mtx);
1262		if (!list_empty(&dev->power.entry))
1263			list_move(&dev->power.entry, &dpm_late_early_list);
1264
1265		if (error) {
1266			pm_dev_err(dev, state, " late", error);
1267			dpm_save_failed_dev(dev_name(dev));
1268			put_device(dev);
1269			break;
1270		}
1271		put_device(dev);
1272
1273		if (async_error)
1274			break;
1275	}
1276	mutex_unlock(&dpm_list_mtx);
1277	async_synchronize_full();
1278	if (!error)
1279		error = async_error;
1280	if (error) {
1281		suspend_stats.failed_suspend_late++;
1282		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1283		dpm_resume_early(resume_event(state));
1284	} else {
1285		dpm_show_time(starttime, state, "late");
1286	}
1287	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1288	return error;
1289}
1290
1291/**
1292 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1293 * @state: PM transition of the system being carried out.
1294 */
1295int dpm_suspend_end(pm_message_t state)
1296{
1297	int error = dpm_suspend_late(state);
1298	if (error)
1299		return error;
1300
1301	error = dpm_suspend_noirq(state);
1302	if (error) {
1303		dpm_resume_early(resume_event(state));
1304		return error;
1305	}
1306
1307	return 0;
1308}
1309EXPORT_SYMBOL_GPL(dpm_suspend_end);
1310
1311/**
1312 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1313 * @dev: Device to suspend.
1314 * @state: PM transition of the system being carried out.
1315 * @cb: Suspend callback to execute.
1316 * @info: string description of caller.
1317 */
1318static int legacy_suspend(struct device *dev, pm_message_t state,
1319			  int (*cb)(struct device *dev, pm_message_t state),
1320			  char *info)
1321{
1322	int error;
1323	ktime_t calltime;
1324
1325	calltime = initcall_debug_start(dev);
1326
1327	trace_device_pm_callback_start(dev, info, state.event);
1328	error = cb(dev, state);
1329	trace_device_pm_callback_end(dev, error);
1330	suspend_report_result(cb, error);
1331
1332	initcall_debug_report(dev, calltime, error, state, info);
1333
1334	return error;
1335}
1336
1337/**
1338 * device_suspend - Execute "suspend" callbacks for given device.
1339 * @dev: Device to handle.
1340 * @state: PM transition of the system being carried out.
1341 * @async: If true, the device is being suspended asynchronously.
1342 */
1343static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1344{
1345	pm_callback_t callback = NULL;
1346	char *info = NULL;
1347	int error = 0;
1348	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1349
1350	TRACE_DEVICE(dev);
1351	TRACE_SUSPEND(0);
1352
1353	dpm_wait_for_children(dev, async);
1354
1355	if (async_error)
1356		goto Complete;
1357
1358	/*
1359	 * If a device configured to wake up the system from sleep states
1360	 * has been suspended at run time and there's a resume request pending
1361	 * for it, this is equivalent to the device signaling wakeup, so the
1362	 * system suspend operation should be aborted.
1363	 */
1364	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1365		pm_wakeup_event(dev, 0);
1366
1367	if (pm_wakeup_pending()) {
1368		async_error = -EBUSY;
1369		goto Complete;
1370	}
1371
1372	if (dev->power.syscore)
1373		goto Complete;
1374
1375	if (dev->power.direct_complete) {
1376		if (pm_runtime_status_suspended(dev)) {
1377			pm_runtime_disable(dev);
1378			if (pm_runtime_suspended_if_enabled(dev))
1379				goto Complete;
1380
1381			pm_runtime_enable(dev);
1382		}
1383		dev->power.direct_complete = false;
1384	}
1385
1386	dpm_watchdog_set(&wd, dev);
1387	device_lock(dev);
1388
1389	if (dev->pm_domain) {
1390		info = "power domain ";
1391		callback = pm_op(&dev->pm_domain->ops, state);
1392		goto Run;
1393	}
1394
1395	if (dev->type && dev->type->pm) {
1396		info = "type ";
1397		callback = pm_op(dev->type->pm, state);
1398		goto Run;
1399	}
1400
1401	if (dev->class) {
1402		if (dev->class->pm) {
1403			info = "class ";
1404			callback = pm_op(dev->class->pm, state);
1405			goto Run;
1406		} else if (dev->class->suspend) {
1407			pm_dev_dbg(dev, state, "legacy class ");
1408			error = legacy_suspend(dev, state, dev->class->suspend,
1409						"legacy class ");
1410			goto End;
1411		}
1412	}
1413
1414	if (dev->bus) {
1415		if (dev->bus->pm) {
1416			info = "bus ";
1417			callback = pm_op(dev->bus->pm, state);
1418		} else if (dev->bus->suspend) {
1419			pm_dev_dbg(dev, state, "legacy bus ");
1420			error = legacy_suspend(dev, state, dev->bus->suspend,
1421						"legacy bus ");
1422			goto End;
1423		}
1424	}
1425
1426 Run:
1427	if (!callback && dev->driver && dev->driver->pm) {
1428		info = "driver ";
1429		callback = pm_op(dev->driver->pm, state);
1430	}
1431
1432	error = dpm_run_callback(callback, dev, state, info);
1433
1434 End:
1435	if (!error) {
1436		struct device *parent = dev->parent;
1437
1438		dev->power.is_suspended = true;
1439		if (parent) {
1440			spin_lock_irq(&parent->power.lock);
1441
1442			dev->parent->power.direct_complete = false;
1443			if (dev->power.wakeup_path
1444			    && !dev->parent->power.ignore_children)
1445				dev->parent->power.wakeup_path = true;
1446
1447			spin_unlock_irq(&parent->power.lock);
1448		}
1449	}
1450
1451	device_unlock(dev);
1452	dpm_watchdog_clear(&wd);
1453
1454 Complete:
1455	complete_all(&dev->power.completion);
1456	if (error)
1457		async_error = error;
1458
1459	TRACE_SUSPEND(error);
1460	return error;
1461}
1462
1463static void async_suspend(void *data, async_cookie_t cookie)
1464{
1465	struct device *dev = (struct device *)data;
1466	int error;
1467
1468	error = __device_suspend(dev, pm_transition, true);
1469	if (error) {
1470		dpm_save_failed_dev(dev_name(dev));
1471		pm_dev_err(dev, pm_transition, " async", error);
1472	}
1473
1474	put_device(dev);
1475}
1476
1477static int device_suspend(struct device *dev)
1478{
1479	reinit_completion(&dev->power.completion);
1480
1481	if (is_async(dev)) {
1482		get_device(dev);
1483		async_schedule(async_suspend, dev);
1484		return 0;
1485	}
1486
1487	return __device_suspend(dev, pm_transition, false);
1488}
1489
1490/**
1491 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1492 * @state: PM transition of the system being carried out.
1493 */
1494int dpm_suspend(pm_message_t state)
1495{
1496	ktime_t starttime = ktime_get();
1497	int error = 0;
1498
1499	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1500	might_sleep();
1501
1502	cpufreq_suspend();
1503
1504	mutex_lock(&dpm_list_mtx);
1505	pm_transition = state;
1506	async_error = 0;
1507	while (!list_empty(&dpm_prepared_list)) {
1508		struct device *dev = to_device(dpm_prepared_list.prev);
1509
1510		get_device(dev);
1511		mutex_unlock(&dpm_list_mtx);
1512
1513		error = device_suspend(dev);
1514
1515		mutex_lock(&dpm_list_mtx);
1516		if (error) {
1517			pm_dev_err(dev, state, "", error);
1518			dpm_save_failed_dev(dev_name(dev));
1519			put_device(dev);
1520			break;
1521		}
1522		if (!list_empty(&dev->power.entry))
1523			list_move(&dev->power.entry, &dpm_suspended_list);
1524		put_device(dev);
1525		if (async_error)
1526			break;
1527	}
1528	mutex_unlock(&dpm_list_mtx);
1529	async_synchronize_full();
1530	if (!error)
1531		error = async_error;
1532	if (error) {
1533		suspend_stats.failed_suspend++;
1534		dpm_save_failed_step(SUSPEND_SUSPEND);
1535	} else
1536		dpm_show_time(starttime, state, NULL);
1537	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1538	return error;
1539}
1540
1541/**
1542 * device_prepare - Prepare a device for system power transition.
1543 * @dev: Device to handle.
1544 * @state: PM transition of the system being carried out.
1545 *
1546 * Execute the ->prepare() callback(s) for given device.  No new children of the
1547 * device may be registered after this function has returned.
1548 */
1549static int device_prepare(struct device *dev, pm_message_t state)
1550{
1551	int (*callback)(struct device *) = NULL;
1552	char *info = NULL;
1553	int ret = 0;
1554
1555	if (dev->power.syscore)
1556		return 0;
1557
1558	/*
1559	 * If a device's parent goes into runtime suspend at the wrong time,
1560	 * it won't be possible to resume the device.  To prevent this we
1561	 * block runtime suspend here, during the prepare phase, and allow
1562	 * it again during the complete phase.
1563	 */
1564	pm_runtime_get_noresume(dev);
1565
1566	device_lock(dev);
1567
1568	dev->power.wakeup_path = device_may_wakeup(dev);
1569
1570	if (dev->pm_domain) {
1571		info = "preparing power domain ";
1572		callback = dev->pm_domain->ops.prepare;
1573	} else if (dev->type && dev->type->pm) {
1574		info = "preparing type ";
1575		callback = dev->type->pm->prepare;
1576	} else if (dev->class && dev->class->pm) {
1577		info = "preparing class ";
1578		callback = dev->class->pm->prepare;
1579	} else if (dev->bus && dev->bus->pm) {
1580		info = "preparing bus ";
1581		callback = dev->bus->pm->prepare;
1582	}
1583
1584	if (!callback && dev->driver && dev->driver->pm) {
1585		info = "preparing driver ";
1586		callback = dev->driver->pm->prepare;
1587	}
1588
1589	if (callback) {
1590		trace_device_pm_callback_start(dev, info, state.event);
1591		ret = callback(dev);
1592		trace_device_pm_callback_end(dev, ret);
1593	}
1594
1595	device_unlock(dev);
1596
1597	if (ret < 0) {
1598		suspend_report_result(callback, ret);
1599		pm_runtime_put(dev);
1600		return ret;
1601	}
1602	/*
1603	 * A positive return value from ->prepare() means "this device appears
1604	 * to be runtime-suspended and its state is fine, so if it really is
1605	 * runtime-suspended, you can leave it in that state provided that you
1606	 * will do the same thing with all of its descendants".  This only
1607	 * applies to suspend transitions, however.
1608	 */
1609	spin_lock_irq(&dev->power.lock);
1610	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1611	spin_unlock_irq(&dev->power.lock);
1612	return 0;
1613}
1614
1615/**
1616 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1617 * @state: PM transition of the system being carried out.
1618 *
1619 * Execute the ->prepare() callback(s) for all devices.
1620 */
1621int dpm_prepare(pm_message_t state)
1622{
1623	int error = 0;
1624
1625	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1626	might_sleep();
1627
1628	mutex_lock(&dpm_list_mtx);
1629	while (!list_empty(&dpm_list)) {
1630		struct device *dev = to_device(dpm_list.next);
1631
1632		get_device(dev);
1633		mutex_unlock(&dpm_list_mtx);
1634
1635		error = device_prepare(dev, state);
1636
1637		mutex_lock(&dpm_list_mtx);
1638		if (error) {
1639			if (error == -EAGAIN) {
1640				put_device(dev);
1641				error = 0;
1642				continue;
1643			}
1644			printk(KERN_INFO "PM: Device %s not prepared "
1645				"for power transition: code %d\n",
1646				dev_name(dev), error);
1647			put_device(dev);
1648			break;
1649		}
1650		dev->power.is_prepared = true;
1651		if (!list_empty(&dev->power.entry))
1652			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1653		put_device(dev);
1654	}
1655	mutex_unlock(&dpm_list_mtx);
1656	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1657	return error;
1658}
1659
1660/**
1661 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1662 * @state: PM transition of the system being carried out.
1663 *
1664 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1665 * callbacks for them.
1666 */
1667int dpm_suspend_start(pm_message_t state)
1668{
1669	int error;
1670
1671	error = dpm_prepare(state);
1672	if (error) {
1673		suspend_stats.failed_prepare++;
1674		dpm_save_failed_step(SUSPEND_PREPARE);
1675	} else
1676		error = dpm_suspend(state);
1677	return error;
1678}
1679EXPORT_SYMBOL_GPL(dpm_suspend_start);
1680
1681void __suspend_report_result(const char *function, void *fn, int ret)
1682{
1683	if (ret)
1684		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1685}
1686EXPORT_SYMBOL_GPL(__suspend_report_result);
1687
1688/**
1689 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1690 * @dev: Device to wait for.
1691 * @subordinate: Device that needs to wait for @dev.
1692 */
1693int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1694{
1695	dpm_wait(dev, subordinate->power.async_suspend);
1696	return async_error;
1697}
1698EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1699
1700/**
1701 * dpm_for_each_dev - device iterator.
1702 * @data: data for the callback.
1703 * @fn: function to be called for each device.
1704 *
1705 * Iterate over devices in dpm_list, and call @fn for each device,
1706 * passing it @data.
1707 */
1708void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1709{
1710	struct device *dev;
1711
1712	if (!fn)
1713		return;
1714
1715	device_pm_lock();
1716	list_for_each_entry(dev, &dpm_list, power.entry)
1717		fn(dev, data);
1718	device_pm_unlock();
1719}
1720EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1721