1/*
2 *  This file contains the power_save function for Power7 CPUs.
3 *
4 *  This program is free software; you can redistribute it and/or
5 *  modify it under the terms of the GNU General Public License
6 *  as published by the Free Software Foundation; either version
7 *  2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/threads.h>
11#include <asm/processor.h>
12#include <asm/page.h>
13#include <asm/cputable.h>
14#include <asm/thread_info.h>
15#include <asm/ppc_asm.h>
16#include <asm/asm-offsets.h>
17#include <asm/ppc-opcode.h>
18#include <asm/hw_irq.h>
19#include <asm/kvm_book3s_asm.h>
20#include <asm/opal.h>
21#include <asm/cpuidle.h>
22#include <asm/mmu-hash64.h>
23
24#undef DEBUG
25
26/*
27 * Use unused space in the interrupt stack to save and restore
28 * registers for winkle support.
29 */
30#define _SDR1	GPR3
31#define _RPR	GPR4
32#define _SPURR	GPR5
33#define _PURR	GPR6
34#define _TSCR	GPR7
35#define _DSCR	GPR8
36#define _AMOR	GPR9
37#define _WORT	GPR10
38#define _WORC	GPR11
39
40/* Idle state entry routines */
41
42#define	IDLE_STATE_ENTER_SEQ(IDLE_INST)				\
43	/* Magic NAP/SLEEP/WINKLE mode enter sequence */	\
44	std	r0,0(r1);					\
45	ptesync;						\
46	ld	r0,0(r1);					\
471:	cmp	cr0,r0,r0;					\
48	bne	1b;						\
49	IDLE_INST;						\
50	b	.
51
52	.text
53
54/*
55 * Used by threads when the lock bit of core_idle_state is set.
56 * Threads will spin in HMT_LOW until the lock bit is cleared.
57 * r14 - pointer to core_idle_state
58 * r15 - used to load contents of core_idle_state
59 */
60
61core_idle_lock_held:
62	HMT_LOW
633:	lwz	r15,0(r14)
64	andi.   r15,r15,PNV_CORE_IDLE_LOCK_BIT
65	bne	3b
66	HMT_MEDIUM
67	lwarx	r15,0,r14
68	blr
69
70/*
71 * Pass requested state in r3:
72 *	r3 - PNV_THREAD_NAP/SLEEP/WINKLE
73 *
74 * To check IRQ_HAPPENED in r4
75 * 	0 - don't check
76 * 	1 - check
77 */
78_GLOBAL(power7_powersave_common)
79	/* Use r3 to pass state nap/sleep/winkle */
80	/* NAP is a state loss, we create a regs frame on the
81	 * stack, fill it up with the state we care about and
82	 * stick a pointer to it in PACAR1. We really only
83	 * need to save PC, some CR bits and the NV GPRs,
84	 * but for now an interrupt frame will do.
85	 */
86	mflr	r0
87	std	r0,16(r1)
88	stdu	r1,-INT_FRAME_SIZE(r1)
89	std	r0,_LINK(r1)
90	std	r0,_NIP(r1)
91
92#ifndef CONFIG_SMP
93	/* Make sure FPU, VSX etc... are flushed as we may lose
94	 * state when going to nap mode
95	 */
96	bl	discard_lazy_cpu_state
97#endif /* CONFIG_SMP */
98
99	/* Hard disable interrupts */
100	mfmsr	r9
101	rldicl	r9,r9,48,1
102	rotldi	r9,r9,16
103	mtmsrd	r9,1			/* hard-disable interrupts */
104
105	/* Check if something happened while soft-disabled */
106	lbz	r0,PACAIRQHAPPENED(r13)
107	andi.	r0,r0,~PACA_IRQ_HARD_DIS@l
108	beq	1f
109	cmpwi	cr0,r4,0
110	beq	1f
111	addi	r1,r1,INT_FRAME_SIZE
112	ld	r0,16(r1)
113	li	r3,0			/* Return 0 (no nap) */
114	mtlr	r0
115	blr
116
1171:	/* We mark irqs hard disabled as this is the state we'll
118	 * be in when returning and we need to tell arch_local_irq_restore()
119	 * about it
120	 */
121	li	r0,PACA_IRQ_HARD_DIS
122	stb	r0,PACAIRQHAPPENED(r13)
123
124	/* We haven't lost state ... yet */
125	li	r0,0
126	stb	r0,PACA_NAPSTATELOST(r13)
127
128	/* Continue saving state */
129	SAVE_GPR(2, r1)
130	SAVE_NVGPRS(r1)
131	mfcr	r4
132	std	r4,_CCR(r1)
133	std	r9,_MSR(r1)
134	std	r1,PACAR1(r13)
135
136	/*
137	 * Go to real mode to do the nap, as required by the architecture.
138	 * Also, we need to be in real mode before setting hwthread_state,
139	 * because as soon as we do that, another thread can switch
140	 * the MMU context to the guest.
141	 */
142	LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
143	li	r6, MSR_RI
144	andc	r6, r9, r6
145	LOAD_REG_ADDR(r7, power7_enter_nap_mode)
146	mtmsrd	r6, 1		/* clear RI before setting SRR0/1 */
147	mtspr	SPRN_SRR0, r7
148	mtspr	SPRN_SRR1, r5
149	rfid
150
151	.globl	power7_enter_nap_mode
152power7_enter_nap_mode:
153#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
154	/* Tell KVM we're napping */
155	li	r4,KVM_HWTHREAD_IN_NAP
156	stb	r4,HSTATE_HWTHREAD_STATE(r13)
157#endif
158	stb	r3,PACA_THREAD_IDLE_STATE(r13)
159	cmpwi	cr3,r3,PNV_THREAD_SLEEP
160	bge	cr3,2f
161	IDLE_STATE_ENTER_SEQ(PPC_NAP)
162	/* No return */
1632:
164	/* Sleep or winkle */
165	lbz	r7,PACA_THREAD_MASK(r13)
166	ld	r14,PACA_CORE_IDLE_STATE_PTR(r13)
167lwarx_loop1:
168	lwarx	r15,0,r14
169
170	andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
171	bnel	core_idle_lock_held
172
173	andc	r15,r15,r7			/* Clear thread bit */
174
175	andi.	r15,r15,PNV_CORE_IDLE_THREAD_BITS
176
177/*
178 * If cr0 = 0, then current thread is the last thread of the core entering
179 * sleep. Last thread needs to execute the hardware bug workaround code if
180 * required by the platform.
181 * Make the workaround call unconditionally here. The below branch call is
182 * patched out when the idle states are discovered if the platform does not
183 * require it.
184 */
185.global pnv_fastsleep_workaround_at_entry
186pnv_fastsleep_workaround_at_entry:
187	beq	fastsleep_workaround_at_entry
188
189	stwcx.	r15,0,r14
190	bne-	lwarx_loop1
191	isync
192
193common_enter: /* common code for all the threads entering sleep or winkle */
194	bgt	cr3,enter_winkle
195	IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
196
197fastsleep_workaround_at_entry:
198	ori	r15,r15,PNV_CORE_IDLE_LOCK_BIT
199	stwcx.	r15,0,r14
200	bne-	lwarx_loop1
201	isync
202
203	/* Fast sleep workaround */
204	li	r3,1
205	li	r4,1
206	li	r0,OPAL_CONFIG_CPU_IDLE_STATE
207	bl	opal_call_realmode
208
209	/* Clear Lock bit */
210	li	r0,0
211	lwsync
212	stw	r0,0(r14)
213	b	common_enter
214
215enter_winkle:
216	/*
217	 * Note all register i.e per-core, per-subcore or per-thread is saved
218	 * here since any thread in the core might wake up first
219	 */
220	mfspr	r3,SPRN_SDR1
221	std	r3,_SDR1(r1)
222	mfspr	r3,SPRN_RPR
223	std	r3,_RPR(r1)
224	mfspr	r3,SPRN_SPURR
225	std	r3,_SPURR(r1)
226	mfspr	r3,SPRN_PURR
227	std	r3,_PURR(r1)
228	mfspr	r3,SPRN_TSCR
229	std	r3,_TSCR(r1)
230	mfspr	r3,SPRN_DSCR
231	std	r3,_DSCR(r1)
232	mfspr	r3,SPRN_AMOR
233	std	r3,_AMOR(r1)
234	mfspr	r3,SPRN_WORT
235	std	r3,_WORT(r1)
236	mfspr	r3,SPRN_WORC
237	std	r3,_WORC(r1)
238	IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
239
240_GLOBAL(power7_idle)
241	/* Now check if user or arch enabled NAP mode */
242	LOAD_REG_ADDRBASE(r3,powersave_nap)
243	lwz	r4,ADDROFF(powersave_nap)(r3)
244	cmpwi	0,r4,0
245	beqlr
246	li	r3, 1
247	/* fall through */
248
249_GLOBAL(power7_nap)
250	mr	r4,r3
251	li	r3,PNV_THREAD_NAP
252	b	power7_powersave_common
253	/* No return */
254
255_GLOBAL(power7_sleep)
256	li	r3,PNV_THREAD_SLEEP
257	li	r4,1
258	b	power7_powersave_common
259	/* No return */
260
261_GLOBAL(power7_winkle)
262	li	r3,3
263	li	r4,1
264	b	power7_powersave_common
265	/* No return */
266
267#define CHECK_HMI_INTERRUPT						\
268	mfspr	r0,SPRN_SRR1;						\
269BEGIN_FTR_SECTION_NESTED(66);						\
270	rlwinm	r0,r0,45-31,0xf;  /* extract wake reason field (P8) */	\
271FTR_SECTION_ELSE_NESTED(66);						\
272	rlwinm	r0,r0,45-31,0xe;  /* P7 wake reason field is 3 bits */	\
273ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66);		\
274	cmpwi	r0,0xa;			/* Hypervisor maintenance ? */	\
275	bne	20f;							\
276	/* Invoke opal call to handle hmi */				\
277	ld	r2,PACATOC(r13);					\
278	ld	r1,PACAR1(r13);						\
279	std	r3,ORIG_GPR3(r1);	/* Save original r3 */		\
280	li	r0,OPAL_HANDLE_HMI;	/* Pass opal token argument*/	\
281	bl	opal_call_realmode;					\
282	ld	r3,ORIG_GPR3(r1);	/* Restore original r3 */	\
28320:	nop;
284
285
286_GLOBAL(power7_wakeup_tb_loss)
287	ld	r2,PACATOC(r13);
288	ld	r1,PACAR1(r13)
289	/*
290	 * Before entering any idle state, the NVGPRs are saved in the stack
291	 * and they are restored before switching to the process context. Hence
292	 * until they are restored, they are free to be used.
293	 *
294	 * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
295	 * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
296	 * wakeup reason if we branch to kvm_start_guest.
297	 */
298
299	mfspr	r16,SPRN_SRR1
300BEGIN_FTR_SECTION
301	CHECK_HMI_INTERRUPT
302END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
303
304	lbz	r7,PACA_THREAD_MASK(r13)
305	ld	r14,PACA_CORE_IDLE_STATE_PTR(r13)
306lwarx_loop2:
307	lwarx	r15,0,r14
308	andi.	r9,r15,PNV_CORE_IDLE_LOCK_BIT
309	/*
310	 * Lock bit is set in one of the 2 cases-
311	 * a. In the sleep/winkle enter path, the last thread is executing
312	 * fastsleep workaround code.
313	 * b. In the wake up path, another thread is executing fastsleep
314	 * workaround undo code or resyncing timebase or restoring context
315	 * In either case loop until the lock bit is cleared.
316	 */
317	bnel	core_idle_lock_held
318
319	cmpwi	cr2,r15,0
320	lbz	r4,PACA_SUBCORE_SIBLING_MASK(r13)
321	and	r4,r4,r15
322	cmpwi	cr1,r4,0	/* Check if first in subcore */
323
324	/*
325	 * At this stage
326	 * cr1 - 0b0100 if first thread to wakeup in subcore
327	 * cr2 - 0b0100 if first thread to wakeup in core
328	 * cr3-  0b0010 if waking up from sleep or winkle
329	 * cr4 - 0b0100 if waking up from winkle
330	 */
331
332	or	r15,r15,r7		/* Set thread bit */
333
334	beq	cr1,first_thread_in_subcore
335
336	/* Not first thread in subcore to wake up */
337	stwcx.	r15,0,r14
338	bne-	lwarx_loop2
339	isync
340	b	common_exit
341
342first_thread_in_subcore:
343	/* First thread in subcore to wakeup */
344	ori	r15,r15,PNV_CORE_IDLE_LOCK_BIT
345	stwcx.	r15,0,r14
346	bne-	lwarx_loop2
347	isync
348
349	/*
350	 * If waking up from sleep, subcore state is not lost. Hence
351	 * skip subcore state restore
352	 */
353	bne	cr4,subcore_state_restored
354
355	/* Restore per-subcore state */
356	ld      r4,_SDR1(r1)
357	mtspr   SPRN_SDR1,r4
358	ld      r4,_RPR(r1)
359	mtspr   SPRN_RPR,r4
360	ld	r4,_AMOR(r1)
361	mtspr	SPRN_AMOR,r4
362
363subcore_state_restored:
364	/*
365	 * Check if the thread is also the first thread in the core. If not,
366	 * skip to clear_lock.
367	 */
368	bne	cr2,clear_lock
369
370first_thread_in_core:
371
372	/*
373	 * First thread in the core waking up from fastsleep. It needs to
374	 * call the fastsleep workaround code if the platform requires it.
375	 * Call it unconditionally here. The below branch instruction will
376	 * be patched out when the idle states are discovered if platform
377	 * does not require workaround.
378	 */
379.global pnv_fastsleep_workaround_at_exit
380pnv_fastsleep_workaround_at_exit:
381	b	fastsleep_workaround_at_exit
382
383timebase_resync:
384	/* Do timebase resync if we are waking up from sleep. Use cr3 value
385	 * set in exceptions-64s.S */
386	ble	cr3,clear_lock
387	/* Time base re-sync */
388	li	r0,OPAL_RESYNC_TIMEBASE
389	bl	opal_call_realmode;
390	/* TODO: Check r3 for failure */
391
392	/*
393	 * If waking up from sleep, per core state is not lost, skip to
394	 * clear_lock.
395	 */
396	bne	cr4,clear_lock
397
398	/* Restore per core state */
399	ld	r4,_TSCR(r1)
400	mtspr	SPRN_TSCR,r4
401	ld	r4,_WORC(r1)
402	mtspr	SPRN_WORC,r4
403
404clear_lock:
405	andi.	r15,r15,PNV_CORE_IDLE_THREAD_BITS
406	lwsync
407	stw	r15,0(r14)
408
409common_exit:
410	/*
411	 * Common to all threads.
412	 *
413	 * If waking up from sleep, hypervisor state is not lost. Hence
414	 * skip hypervisor state restore.
415	 */
416	bne	cr4,hypervisor_state_restored
417
418	/* Waking up from winkle */
419
420	/* Restore per thread state */
421	bl	__restore_cpu_power8
422
423	/* Restore SLB  from PACA */
424	ld	r8,PACA_SLBSHADOWPTR(r13)
425
426	.rept	SLB_NUM_BOLTED
427	li	r3, SLBSHADOW_SAVEAREA
428	LDX_BE	r5, r8, r3
429	addi	r3, r3, 8
430	LDX_BE	r6, r8, r3
431	andis.	r7,r5,SLB_ESID_V@h
432	beq	1f
433	slbmte	r6,r5
4341:	addi	r8,r8,16
435	.endr
436
437	ld	r4,_SPURR(r1)
438	mtspr	SPRN_SPURR,r4
439	ld	r4,_PURR(r1)
440	mtspr	SPRN_PURR,r4
441	ld	r4,_DSCR(r1)
442	mtspr	SPRN_DSCR,r4
443	ld	r4,_WORT(r1)
444	mtspr	SPRN_WORT,r4
445
446hypervisor_state_restored:
447
448	li	r5,PNV_THREAD_RUNNING
449	stb     r5,PACA_THREAD_IDLE_STATE(r13)
450
451	mtspr	SPRN_SRR1,r16
452#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
453	li      r0,KVM_HWTHREAD_IN_KERNEL
454	stb     r0,HSTATE_HWTHREAD_STATE(r13)
455	/* Order setting hwthread_state vs. testing hwthread_req */
456	sync
457	lbz     r0,HSTATE_HWTHREAD_REQ(r13)
458	cmpwi   r0,0
459	beq     6f
460	b       kvm_start_guest
4616:
462#endif
463
464	REST_NVGPRS(r1)
465	REST_GPR(2, r1)
466	ld	r3,_CCR(r1)
467	ld	r4,_MSR(r1)
468	ld	r5,_NIP(r1)
469	addi	r1,r1,INT_FRAME_SIZE
470	mtcr	r3
471	mfspr	r3,SPRN_SRR1		/* Return SRR1 */
472	mtspr	SPRN_SRR1,r4
473	mtspr	SPRN_SRR0,r5
474	rfid
475
476fastsleep_workaround_at_exit:
477	li	r3,1
478	li	r4,0
479	li	r0,OPAL_CONFIG_CPU_IDLE_STATE
480	bl	opal_call_realmode
481	b	timebase_resync
482
483/*
484 * R3 here contains the value that will be returned to the caller
485 * of power7_nap.
486 */
487_GLOBAL(power7_wakeup_loss)
488	ld	r1,PACAR1(r13)
489BEGIN_FTR_SECTION
490	CHECK_HMI_INTERRUPT
491END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
492	REST_NVGPRS(r1)
493	REST_GPR(2, r1)
494	ld	r6,_CCR(r1)
495	ld	r4,_MSR(r1)
496	ld	r5,_NIP(r1)
497	addi	r1,r1,INT_FRAME_SIZE
498	mtcr	r6
499	mtspr	SPRN_SRR1,r4
500	mtspr	SPRN_SRR0,r5
501	rfid
502
503/*
504 * R3 here contains the value that will be returned to the caller
505 * of power7_nap.
506 */
507_GLOBAL(power7_wakeup_noloss)
508	lbz	r0,PACA_NAPSTATELOST(r13)
509	cmpwi	r0,0
510	bne	power7_wakeup_loss
511BEGIN_FTR_SECTION
512	CHECK_HMI_INTERRUPT
513END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
514	ld	r1,PACAR1(r13)
515	ld	r6,_CCR(r1)
516	ld	r4,_MSR(r1)
517	ld	r5,_NIP(r1)
518	addi	r1,r1,INT_FRAME_SIZE
519	mtcr	r6
520	mtspr	SPRN_SRR1,r4
521	mtspr	SPRN_SRR0,r5
522	rfid
523