1/*
2 *  cpuidle-powernv - idle state cpuidle driver.
3 *  Adapted from drivers/cpuidle/cpuidle-pseries
4 *
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/moduleparam.h>
11#include <linux/cpuidle.h>
12#include <linux/cpu.h>
13#include <linux/notifier.h>
14#include <linux/clockchips.h>
15#include <linux/of.h>
16#include <linux/slab.h>
17
18#include <asm/machdep.h>
19#include <asm/firmware.h>
20#include <asm/opal.h>
21#include <asm/runlatch.h>
22
23#define MAX_POWERNV_IDLE_STATES	8
24
25struct cpuidle_driver powernv_idle_driver = {
26	.name             = "powernv_idle",
27	.owner            = THIS_MODULE,
28};
29
30static int max_idle_state;
31static struct cpuidle_state *cpuidle_state_table;
32
33static int snooze_loop(struct cpuidle_device *dev,
34			struct cpuidle_driver *drv,
35			int index)
36{
37	local_irq_enable();
38	set_thread_flag(TIF_POLLING_NRFLAG);
39
40	ppc64_runlatch_off();
41	while (!need_resched()) {
42		HMT_low();
43		HMT_very_low();
44	}
45
46	HMT_medium();
47	ppc64_runlatch_on();
48	clear_thread_flag(TIF_POLLING_NRFLAG);
49	smp_mb();
50	return index;
51}
52
53static int nap_loop(struct cpuidle_device *dev,
54			struct cpuidle_driver *drv,
55			int index)
56{
57	ppc64_runlatch_off();
58	power7_idle();
59	ppc64_runlatch_on();
60	return index;
61}
62
63/* Register for fastsleep only in oneshot mode of broadcast */
64#ifdef CONFIG_TICK_ONESHOT
65static int fastsleep_loop(struct cpuidle_device *dev,
66				struct cpuidle_driver *drv,
67				int index)
68{
69	unsigned long old_lpcr = mfspr(SPRN_LPCR);
70	unsigned long new_lpcr;
71
72	if (unlikely(system_state < SYSTEM_RUNNING))
73		return index;
74
75	new_lpcr = old_lpcr;
76	/* Do not exit powersave upon decrementer as we've setup the timer
77	 * offload.
78	 */
79	new_lpcr &= ~LPCR_PECE1;
80
81	mtspr(SPRN_LPCR, new_lpcr);
82	power7_sleep();
83
84	mtspr(SPRN_LPCR, old_lpcr);
85
86	return index;
87}
88#endif
89/*
90 * States for dedicated partition case.
91 */
92static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
93	{ /* Snooze */
94		.name = "snooze",
95		.desc = "snooze",
96		.exit_latency = 0,
97		.target_residency = 0,
98		.enter = &snooze_loop },
99};
100
101static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
102			unsigned long action, void *hcpu)
103{
104	int hotcpu = (unsigned long)hcpu;
105	struct cpuidle_device *dev =
106				per_cpu(cpuidle_devices, hotcpu);
107
108	if (dev && cpuidle_get_driver()) {
109		switch (action) {
110		case CPU_ONLINE:
111		case CPU_ONLINE_FROZEN:
112			cpuidle_pause_and_lock();
113			cpuidle_enable_device(dev);
114			cpuidle_resume_and_unlock();
115			break;
116
117		case CPU_DEAD:
118		case CPU_DEAD_FROZEN:
119			cpuidle_pause_and_lock();
120			cpuidle_disable_device(dev);
121			cpuidle_resume_and_unlock();
122			break;
123
124		default:
125			return NOTIFY_DONE;
126		}
127	}
128	return NOTIFY_OK;
129}
130
131static struct notifier_block setup_hotplug_notifier = {
132	.notifier_call = powernv_cpuidle_add_cpu_notifier,
133};
134
135/*
136 * powernv_cpuidle_driver_init()
137 */
138static int powernv_cpuidle_driver_init(void)
139{
140	int idle_state;
141	struct cpuidle_driver *drv = &powernv_idle_driver;
142
143	drv->state_count = 0;
144
145	for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
146		/* Is the state not enabled? */
147		if (cpuidle_state_table[idle_state].enter == NULL)
148			continue;
149
150		drv->states[drv->state_count] =	/* structure copy */
151			cpuidle_state_table[idle_state];
152
153		drv->state_count += 1;
154	}
155
156	return 0;
157}
158
159static int powernv_add_idle_states(void)
160{
161	struct device_node *power_mgt;
162	int nr_idle_states = 1; /* Snooze */
163	int dt_idle_states;
164	u32 *latency_ns, *residency_ns, *flags;
165	int i, rc;
166
167	/* Currently we have snooze statically defined */
168
169	power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
170	if (!power_mgt) {
171		pr_warn("opal: PowerMgmt Node not found\n");
172		goto out;
173	}
174
175	/* Read values of any property to determine the num of idle states */
176	dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
177	if (dt_idle_states < 0) {
178		pr_warn("cpuidle-powernv: no idle states found in the DT\n");
179		goto out;
180	}
181
182	flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
183	if (of_property_read_u32_array(power_mgt,
184			"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
185		pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
186		goto out_free_flags;
187	}
188
189	latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
190	rc = of_property_read_u32_array(power_mgt,
191		"ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
192	if (rc) {
193		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
194		goto out_free_latency;
195	}
196
197	residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
198	rc = of_property_read_u32_array(power_mgt,
199		"ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
200
201	for (i = 0; i < dt_idle_states; i++) {
202
203		/*
204		 * Cpuidle accepts exit_latency and target_residency in us.
205		 * Use default target_residency values if f/w does not expose it.
206		 */
207		if (flags[i] & OPAL_PM_NAP_ENABLED) {
208			/* Add NAP state */
209			strcpy(powernv_states[nr_idle_states].name, "Nap");
210			strcpy(powernv_states[nr_idle_states].desc, "Nap");
211			powernv_states[nr_idle_states].flags = 0;
212			powernv_states[nr_idle_states].target_residency = 100;
213			powernv_states[nr_idle_states].enter = &nap_loop;
214		}
215
216		/*
217		 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
218		 * within this config dependency check.
219		 */
220#ifdef CONFIG_TICK_ONESHOT
221		if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
222			flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
223			/* Add FASTSLEEP state */
224			strcpy(powernv_states[nr_idle_states].name, "FastSleep");
225			strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
226			powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
227			powernv_states[nr_idle_states].target_residency = 300000;
228			powernv_states[nr_idle_states].enter = &fastsleep_loop;
229		}
230#endif
231		powernv_states[nr_idle_states].exit_latency =
232				((unsigned int)latency_ns[i]) / 1000;
233
234		if (!rc) {
235			powernv_states[nr_idle_states].target_residency =
236				((unsigned int)residency_ns[i]) / 1000;
237		}
238
239		nr_idle_states++;
240	}
241
242	kfree(residency_ns);
243out_free_latency:
244	kfree(latency_ns);
245out_free_flags:
246	kfree(flags);
247out:
248	return nr_idle_states;
249}
250
251/*
252 * powernv_idle_probe()
253 * Choose state table for shared versus dedicated partition
254 */
255static int powernv_idle_probe(void)
256{
257	if (cpuidle_disable != IDLE_NO_OVERRIDE)
258		return -ENODEV;
259
260	if (firmware_has_feature(FW_FEATURE_OPALv3)) {
261		cpuidle_state_table = powernv_states;
262		/* Device tree can indicate more idle states */
263		max_idle_state = powernv_add_idle_states();
264 	} else
265 		return -ENODEV;
266
267	return 0;
268}
269
270static int __init powernv_processor_idle_init(void)
271{
272	int retval;
273
274	retval = powernv_idle_probe();
275	if (retval)
276		return retval;
277
278	powernv_cpuidle_driver_init();
279	retval = cpuidle_register(&powernv_idle_driver, NULL);
280	if (retval) {
281		printk(KERN_DEBUG "Registration of powernv driver failed.\n");
282		return retval;
283	}
284
285	register_cpu_notifier(&setup_hotplug_notifier);
286	printk(KERN_DEBUG "powernv_idle_driver registered\n");
287	return 0;
288}
289
290device_initcall(powernv_processor_idle_init);
291