1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
30#include <asm/firmware.h>
31#include <asm/bug.h>
32#include <asm/ptrace.h>
33#include <asm/irqflags.h>
34#include <asm/ftrace.h>
35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h>
37
38/*
39 * System calls.
40 */
41	.section	".toc","aw"
42SYS_CALL_TABLE:
43	.tc sys_call_table[TC],sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
48
49	.section	".text"
50	.align 7
51
52	.globl system_call_common
53system_call_common:
54	andi.	r10,r12,MSR_PR
55	mr	r10,r1
56	addi	r1,r1,-INT_FRAME_SIZE
57	beq-	1f
58	ld	r1,PACAKSAVE(r13)
591:	std	r10,0(r1)
60	std	r11,_NIP(r1)
61	std	r12,_MSR(r1)
62	std	r0,GPR0(r1)
63	std	r10,GPR1(r1)
64	beq	2f			/* if from kernel mode */
65	ACCOUNT_CPU_USER_ENTRY(r10, r11)
662:	std	r2,GPR2(r1)
67	std	r3,GPR3(r1)
68	mfcr	r2
69	std	r4,GPR4(r1)
70	std	r5,GPR5(r1)
71	std	r6,GPR6(r1)
72	std	r7,GPR7(r1)
73	std	r8,GPR8(r1)
74	li	r11,0
75	std	r11,GPR9(r1)
76	std	r11,GPR10(r1)
77	std	r11,GPR11(r1)
78	std	r11,GPR12(r1)
79	std	r11,_XER(r1)
80	std	r11,_CTR(r1)
81	std	r9,GPR13(r1)
82	mflr	r10
83	/*
84	 * This clears CR0.SO (bit 28), which is the error indication on
85	 * return from this system call.
86	 */
87	rldimi	r2,r11,28,(63-28)
88	li	r11,0xc01
89	std	r10,_LINK(r1)
90	std	r11,_TRAP(r1)
91	std	r3,ORIG_GPR3(r1)
92	std	r2,_CCR(r1)
93	ld	r2,PACATOC(r13)
94	addi	r9,r1,STACK_FRAME_OVERHEAD
95	ld	r11,exception_marker@toc(r2)
96	std	r11,-16(r9)		/* "regshere" marker */
97#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
98BEGIN_FW_FTR_SECTION
99	beq	33f
100	/* if from user, see if there are any DTL entries to process */
101	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
102	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
103	addi	r10,r10,LPPACA_DTLIDX
104	LDX_BE	r10,0,r10		/* get log write index */
105	cmpd	cr1,r11,r10
106	beq+	cr1,33f
107	bl	accumulate_stolen_time
108	REST_GPR(0,r1)
109	REST_4GPRS(3,r1)
110	REST_2GPRS(7,r1)
111	addi	r9,r1,STACK_FRAME_OVERHEAD
11233:
113END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
114#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
115
116	/*
117	 * A syscall should always be called with interrupts enabled
118	 * so we just unconditionally hard-enable here. When some kind
119	 * of irq tracing is used, we additionally check that condition
120	 * is correct
121	 */
122#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
123	lbz	r10,PACASOFTIRQEN(r13)
124	xori	r10,r10,1
1251:	tdnei	r10,0
126	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
127#endif
128
129#ifdef CONFIG_PPC_BOOK3E
130	wrteei	1
131#else
132	ld	r11,PACAKMSR(r13)
133	ori	r11,r11,MSR_EE
134	mtmsrd	r11,1
135#endif /* CONFIG_PPC_BOOK3E */
136
137	/* We do need to set SOFTE in the stack frame or the return
138	 * from interrupt will be painful
139	 */
140	li	r10,1
141	std	r10,SOFTE(r1)
142
143	CURRENT_THREAD_INFO(r11, r1)
144	ld	r10,TI_FLAGS(r11)
145	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
146	bne	syscall_dotrace
147.Lsyscall_dotrace_cont:
148	cmpldi	0,r0,NR_syscalls
149	bge-	syscall_enosys
150
151system_call:			/* label this so stack traces look sane */
152/*
153 * Need to vector to 32 Bit or default sys_call_table here,
154 * based on caller's run-mode / personality.
155 */
156	ld	r11,SYS_CALL_TABLE@toc(2)
157	andi.	r10,r10,_TIF_32BIT
158	beq	15f
159	addi	r11,r11,8	/* use 32-bit syscall entries */
160	clrldi	r3,r3,32
161	clrldi	r4,r4,32
162	clrldi	r5,r5,32
163	clrldi	r6,r6,32
164	clrldi	r7,r7,32
165	clrldi	r8,r8,32
16615:
167	slwi	r0,r0,4
168	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
169	mtctr   r12
170	bctrl			/* Call handler */
171
172.Lsyscall_exit:
173	std	r3,RESULT(r1)
174	CURRENT_THREAD_INFO(r12, r1)
175
176	ld	r8,_MSR(r1)
177#ifdef CONFIG_PPC_BOOK3S
178	/* No MSR:RI on BookE */
179	andi.	r10,r8,MSR_RI
180	beq-	unrecov_restore
181#endif
182	/*
183	 * Disable interrupts so current_thread_info()->flags can't change,
184	 * and so that we don't get interrupted after loading SRR0/1.
185	 */
186#ifdef CONFIG_PPC_BOOK3E
187	wrteei	0
188#else
189	ld	r10,PACAKMSR(r13)
190	/*
191	 * For performance reasons we clear RI the same time that we
192	 * clear EE. We only need to clear RI just before we restore r13
193	 * below, but batching it with EE saves us one expensive mtmsrd call.
194	 * We have to be careful to restore RI if we branch anywhere from
195	 * here (eg syscall_exit_work).
196	 */
197	li	r9,MSR_RI
198	andc	r11,r10,r9
199	mtmsrd	r11,1
200#endif /* CONFIG_PPC_BOOK3E */
201
202	ld	r9,TI_FLAGS(r12)
203	li	r11,-_LAST_ERRNO
204	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
205	bne-	syscall_exit_work
206	cmpld	r3,r11
207	ld	r5,_CCR(r1)
208	bge-	syscall_error
209.Lsyscall_error_cont:
210	ld	r7,_NIP(r1)
211BEGIN_FTR_SECTION
212	stdcx.	r0,0,r1			/* to clear the reservation */
213END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
214	andi.	r6,r8,MSR_PR
215	ld	r4,_LINK(r1)
216
217	beq-	1f
218	ACCOUNT_CPU_USER_EXIT(r11, r12)
219	HMT_MEDIUM_LOW_HAS_PPR
220	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
2211:	ld	r2,GPR2(r1)
222	ld	r1,GPR1(r1)
223	mtlr	r4
224	mtcr	r5
225	mtspr	SPRN_SRR0,r7
226	mtspr	SPRN_SRR1,r8
227	RFI
228	b	.	/* prevent speculative execution */
229
230syscall_error:
231	oris	r5,r5,0x1000	/* Set SO bit in CR */
232	neg	r3,r3
233	std	r5,_CCR(r1)
234	b	.Lsyscall_error_cont
235
236/* Traced system call support */
237syscall_dotrace:
238	bl	save_nvgprs
239	addi	r3,r1,STACK_FRAME_OVERHEAD
240	bl	do_syscall_trace_enter
241	/*
242	 * Restore argument registers possibly just changed.
243	 * We use the return value of do_syscall_trace_enter
244	 * for the call number to look up in the table (r0).
245	 */
246	mr	r0,r3
247	ld	r3,GPR3(r1)
248	ld	r4,GPR4(r1)
249	ld	r5,GPR5(r1)
250	ld	r6,GPR6(r1)
251	ld	r7,GPR7(r1)
252	ld	r8,GPR8(r1)
253	addi	r9,r1,STACK_FRAME_OVERHEAD
254	CURRENT_THREAD_INFO(r10, r1)
255	ld	r10,TI_FLAGS(r10)
256	b	.Lsyscall_dotrace_cont
257
258syscall_enosys:
259	li	r3,-ENOSYS
260	b	.Lsyscall_exit
261
262syscall_exit_work:
263#ifdef CONFIG_PPC_BOOK3S
264	mtmsrd	r10,1		/* Restore RI */
265#endif
266	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
267	 If TIF_NOERROR is set, just save r3 as it is. */
268
269	andi.	r0,r9,_TIF_RESTOREALL
270	beq+	0f
271	REST_NVGPRS(r1)
272	b	2f
2730:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
274	blt+	1f
275	andi.	r0,r9,_TIF_NOERROR
276	bne-	1f
277	ld	r5,_CCR(r1)
278	neg	r3,r3
279	oris	r5,r5,0x1000	/* Set SO bit in CR */
280	std	r5,_CCR(r1)
2811:	std	r3,GPR3(r1)
2822:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
283	beq	4f
284
285	/* Clear per-syscall TIF flags if any are set.  */
286
287	li	r11,_TIF_PERSYSCALL_MASK
288	addi	r12,r12,TI_FLAGS
2893:	ldarx	r10,0,r12
290	andc	r10,r10,r11
291	stdcx.	r10,0,r12
292	bne-	3b
293	subi	r12,r12,TI_FLAGS
294
2954:	/* Anything else left to do? */
296	SET_DEFAULT_THREAD_PPR(r3, r10)		/* Set thread.ppr = 3 */
297	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
298	beq	ret_from_except_lite
299
300	/* Re-enable interrupts */
301#ifdef CONFIG_PPC_BOOK3E
302	wrteei	1
303#else
304	ld	r10,PACAKMSR(r13)
305	ori	r10,r10,MSR_EE
306	mtmsrd	r10,1
307#endif /* CONFIG_PPC_BOOK3E */
308
309	bl	save_nvgprs
310	addi	r3,r1,STACK_FRAME_OVERHEAD
311	bl	do_syscall_trace_leave
312	b	ret_from_except
313
314/* Save non-volatile GPRs, if not already saved. */
315_GLOBAL(save_nvgprs)
316	ld	r11,_TRAP(r1)
317	andi.	r0,r11,1
318	beqlr-
319	SAVE_NVGPRS(r1)
320	clrrdi	r0,r11,1
321	std	r0,_TRAP(r1)
322	blr
323
324
325/*
326 * The sigsuspend and rt_sigsuspend system calls can call do_signal
327 * and thus put the process into the stopped state where we might
328 * want to examine its user state with ptrace.  Therefore we need
329 * to save all the nonvolatile registers (r14 - r31) before calling
330 * the C code.  Similarly, fork, vfork and clone need the full
331 * register state on the stack so that it can be copied to the child.
332 */
333
334_GLOBAL(ppc_fork)
335	bl	save_nvgprs
336	bl	sys_fork
337	b	.Lsyscall_exit
338
339_GLOBAL(ppc_vfork)
340	bl	save_nvgprs
341	bl	sys_vfork
342	b	.Lsyscall_exit
343
344_GLOBAL(ppc_clone)
345	bl	save_nvgprs
346	bl	sys_clone
347	b	.Lsyscall_exit
348
349_GLOBAL(ppc32_swapcontext)
350	bl	save_nvgprs
351	bl	compat_sys_swapcontext
352	b	.Lsyscall_exit
353
354_GLOBAL(ppc64_swapcontext)
355	bl	save_nvgprs
356	bl	sys_swapcontext
357	b	.Lsyscall_exit
358
359_GLOBAL(ppc_switch_endian)
360	bl	save_nvgprs
361	bl	sys_switch_endian
362	b	.Lsyscall_exit
363
364_GLOBAL(ret_from_fork)
365	bl	schedule_tail
366	REST_NVGPRS(r1)
367	li	r3,0
368	b	.Lsyscall_exit
369
370_GLOBAL(ret_from_kernel_thread)
371	bl	schedule_tail
372	REST_NVGPRS(r1)
373	mtlr	r14
374	mr	r3,r15
375#if defined(_CALL_ELF) && _CALL_ELF == 2
376	mr	r12,r14
377#endif
378	blrl
379	li	r3,0
380	b	.Lsyscall_exit
381
382/*
383 * This routine switches between two different tasks.  The process
384 * state of one is saved on its kernel stack.  Then the state
385 * of the other is restored from its kernel stack.  The memory
386 * management hardware is updated to the second process's state.
387 * Finally, we can return to the second process, via ret_from_except.
388 * On entry, r3 points to the THREAD for the current task, r4
389 * points to the THREAD for the new task.
390 *
391 * Note: there are two ways to get to the "going out" portion
392 * of this code; either by coming in via the entry (_switch)
393 * or via "fork" which must set up an environment equivalent
394 * to the "_switch" path.  If you change this you'll have to change
395 * the fork code also.
396 *
397 * The code which creates the new task context is in 'copy_thread'
398 * in arch/powerpc/kernel/process.c
399 */
400	.align	7
401_GLOBAL(_switch)
402	mflr	r0
403	std	r0,16(r1)
404	stdu	r1,-SWITCH_FRAME_SIZE(r1)
405	/* r3-r13 are caller saved -- Cort */
406	SAVE_8GPRS(14, r1)
407	SAVE_10GPRS(22, r1)
408	mflr	r20		/* Return to switch caller */
409	mfmsr	r22
410	li	r0, MSR_FP
411#ifdef CONFIG_VSX
412BEGIN_FTR_SECTION
413	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
414END_FTR_SECTION_IFSET(CPU_FTR_VSX)
415#endif /* CONFIG_VSX */
416#ifdef CONFIG_ALTIVEC
417BEGIN_FTR_SECTION
418	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
419	mfspr	r24,SPRN_VRSAVE	/* save vrsave register value */
420	std	r24,THREAD_VRSAVE(r3)
421END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
422#endif /* CONFIG_ALTIVEC */
423	and.	r0,r0,r22
424	beq+	1f
425	andc	r22,r22,r0
426	MTMSRD(r22)
427	isync
4281:	std	r20,_NIP(r1)
429	mfcr	r23
430	std	r23,_CCR(r1)
431	std	r1,KSP(r3)	/* Set old stack pointer */
432
433#ifdef CONFIG_PPC_BOOK3S_64
434BEGIN_FTR_SECTION
435	/* Event based branch registers */
436	mfspr	r0, SPRN_BESCR
437	std	r0, THREAD_BESCR(r3)
438	mfspr	r0, SPRN_EBBHR
439	std	r0, THREAD_EBBHR(r3)
440	mfspr	r0, SPRN_EBBRR
441	std	r0, THREAD_EBBRR(r3)
442END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
443#endif
444
445#ifdef CONFIG_SMP
446	/* We need a sync somewhere here to make sure that if the
447	 * previous task gets rescheduled on another CPU, it sees all
448	 * stores it has performed on this one.
449	 */
450	sync
451#endif /* CONFIG_SMP */
452
453	/*
454	 * If we optimise away the clear of the reservation in system
455	 * calls because we know the CPU tracks the address of the
456	 * reservation, then we need to clear it here to cover the
457	 * case that the kernel context switch path has no larx
458	 * instructions.
459	 */
460BEGIN_FTR_SECTION
461	ldarx	r6,0,r1
462END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
463
464#ifdef CONFIG_PPC_BOOK3S
465/* Cancel all explict user streams as they will have no use after context
466 * switch and will stop the HW from creating streams itself
467 */
468	DCBT_STOP_ALL_STREAM_IDS(r6)
469#endif
470
471	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
472	std	r6,PACACURRENT(r13)	/* Set new 'current' */
473
474	ld	r8,KSP(r4)	/* new stack pointer */
475#ifdef CONFIG_PPC_BOOK3S
476BEGIN_FTR_SECTION
477	clrrdi	r6,r8,28	/* get its ESID */
478	clrrdi	r9,r1,28	/* get current sp ESID */
479FTR_SECTION_ELSE
480	clrrdi	r6,r8,40	/* get its 1T ESID */
481	clrrdi	r9,r1,40	/* get current sp 1T ESID */
482ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
483	clrldi.	r0,r6,2		/* is new ESID c00000000? */
484	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
485	cror	eq,4*cr1+eq,eq
486	beq	2f		/* if yes, don't slbie it */
487
488	/* Bolt in the new stack SLB entry */
489	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
490	oris	r0,r6,(SLB_ESID_V)@h
491	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
492BEGIN_FTR_SECTION
493	li	r9,MMU_SEGSIZE_1T	/* insert B field */
494	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
495	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
496END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
497
498	/* Update the last bolted SLB.  No write barriers are needed
499	 * here, provided we only update the current CPU's SLB shadow
500	 * buffer.
501	 */
502	ld	r9,PACA_SLBSHADOWPTR(r13)
503	li	r12,0
504	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
505	li	r12,SLBSHADOW_STACKVSID
506	STDX_BE	r7,r12,r9			/* Save VSID */
507	li	r12,SLBSHADOW_STACKESID
508	STDX_BE	r0,r12,r9			/* Save ESID */
509
510	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
511	 * we have 1TB segments, the only CPUs known to have the errata
512	 * only support less than 1TB of system memory and we'll never
513	 * actually hit this code path.
514	 */
515
516	slbie	r6
517	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
518	slbmte	r7,r0
519	isync
5202:
521#endif /* !CONFIG_PPC_BOOK3S */
522
523	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
524	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
525	   because we don't need to leave the 288-byte ABI gap at the
526	   top of the kernel stack. */
527	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
528
529	mr	r1,r8		/* start using new stack pointer */
530	std	r7,PACAKSAVE(r13)
531
532#ifdef CONFIG_PPC_BOOK3S_64
533BEGIN_FTR_SECTION
534	/* Event based branch registers */
535	ld	r0, THREAD_BESCR(r4)
536	mtspr	SPRN_BESCR, r0
537	ld	r0, THREAD_EBBHR(r4)
538	mtspr	SPRN_EBBHR, r0
539	ld	r0, THREAD_EBBRR(r4)
540	mtspr	SPRN_EBBRR, r0
541
542	ld	r0,THREAD_TAR(r4)
543	mtspr	SPRN_TAR,r0
544END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
545#endif
546
547#ifdef CONFIG_ALTIVEC
548BEGIN_FTR_SECTION
549	ld	r0,THREAD_VRSAVE(r4)
550	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
551END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
552#endif /* CONFIG_ALTIVEC */
553#ifdef CONFIG_PPC64
554BEGIN_FTR_SECTION
555	lwz	r6,THREAD_DSCR_INHERIT(r4)
556	ld	r0,THREAD_DSCR(r4)
557	cmpwi	r6,0
558	bne	1f
559	ld	r0,PACA_DSCR(r13)
5601:
561BEGIN_FTR_SECTION_NESTED(70)
562	mfspr	r8, SPRN_FSCR
563	rldimi	r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
564	mtspr	SPRN_FSCR, r8
565END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
566	cmpd	r0,r25
567	beq	2f
568	mtspr	SPRN_DSCR,r0
5692:
570END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
571#endif
572
573	ld	r6,_CCR(r1)
574	mtcrf	0xFF,r6
575
576	/* r3-r13 are destroyed -- Cort */
577	REST_8GPRS(14, r1)
578	REST_10GPRS(22, r1)
579
580	/* convert old thread to its task_struct for return value */
581	addi	r3,r3,-THREAD
582	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
583	mtlr	r7
584	addi	r1,r1,SWITCH_FRAME_SIZE
585	blr
586
587	.align	7
588_GLOBAL(ret_from_except)
589	ld	r11,_TRAP(r1)
590	andi.	r0,r11,1
591	bne	ret_from_except_lite
592	REST_NVGPRS(r1)
593
594_GLOBAL(ret_from_except_lite)
595	/*
596	 * Disable interrupts so that current_thread_info()->flags
597	 * can't change between when we test it and when we return
598	 * from the interrupt.
599	 */
600#ifdef CONFIG_PPC_BOOK3E
601	wrteei	0
602#else
603	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
604	mtmsrd	r10,1		  /* Update machine state */
605#endif /* CONFIG_PPC_BOOK3E */
606
607	CURRENT_THREAD_INFO(r9, r1)
608	ld	r3,_MSR(r1)
609#ifdef CONFIG_PPC_BOOK3E
610	ld	r10,PACACURRENT(r13)
611#endif /* CONFIG_PPC_BOOK3E */
612	ld	r4,TI_FLAGS(r9)
613	andi.	r3,r3,MSR_PR
614	beq	resume_kernel
615#ifdef CONFIG_PPC_BOOK3E
616	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
617#endif /* CONFIG_PPC_BOOK3E */
618
619	/* Check current_thread_info()->flags */
620	andi.	r0,r4,_TIF_USER_WORK_MASK
621#ifdef CONFIG_PPC_BOOK3E
622	bne	1f
623	/*
624	 * Check to see if the dbcr0 register is set up to debug.
625	 * Use the internal debug mode bit to do this.
626	 */
627	andis.	r0,r3,DBCR0_IDM@h
628	beq	restore
629	mfmsr	r0
630	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
631	mtmsr	r0
632	mtspr	SPRN_DBCR0,r3
633	li	r10, -1
634	mtspr	SPRN_DBSR,r10
635	b	restore
636#else
637	beq	restore
638#endif
6391:	andi.	r0,r4,_TIF_NEED_RESCHED
640	beq	2f
641	bl	restore_interrupts
642	SCHEDULE_USER
643	b	ret_from_except_lite
6442:
645#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
646	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
647	bne	3f		/* only restore TM if nothing else to do */
648	addi	r3,r1,STACK_FRAME_OVERHEAD
649	bl	restore_tm_state
650	b	restore
6513:
652#endif
653	bl	save_nvgprs
654	/*
655	 * Use a non volatile GPR to save and restore our thread_info flags
656	 * across the call to restore_interrupts.
657	 */
658	mr	r30,r4
659	bl	restore_interrupts
660	mr	r4,r30
661	addi	r3,r1,STACK_FRAME_OVERHEAD
662	bl	do_notify_resume
663	b	ret_from_except
664
665resume_kernel:
666	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
667	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
668	beq+	1f
669
670	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
671
672	lwz	r3,GPR1(r1)
673	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
674	mr	r4,r1			/* src:  current exception frame */
675	mr	r1,r3			/* Reroute the trampoline frame to r1 */
676
677	/* Copy from the original to the trampoline. */
678	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
679	li	r6,0			/* start offset: 0 */
680	mtctr	r5
6812:	ldx	r0,r6,r4
682	stdx	r0,r6,r3
683	addi	r6,r6,8
684	bdnz	2b
685
686	/* Do real store operation to complete stwu */
687	lwz	r5,GPR1(r1)
688	std	r8,0(r5)
689
690	/* Clear _TIF_EMULATE_STACK_STORE flag */
691	lis	r11,_TIF_EMULATE_STACK_STORE@h
692	addi	r5,r9,TI_FLAGS
6930:	ldarx	r4,0,r5
694	andc	r4,r4,r11
695	stdcx.	r4,0,r5
696	bne-	0b
6971:
698
699#ifdef CONFIG_PREEMPT
700	/* Check if we need to preempt */
701	andi.	r0,r4,_TIF_NEED_RESCHED
702	beq+	restore
703	/* Check that preempt_count() == 0 and interrupts are enabled */
704	lwz	r8,TI_PREEMPT(r9)
705	cmpwi	cr1,r8,0
706	ld	r0,SOFTE(r1)
707	cmpdi	r0,0
708	crandc	eq,cr1*4+eq,eq
709	bne	restore
710
711	/*
712	 * Here we are preempting the current task. We want to make
713	 * sure we are soft-disabled first and reconcile irq state.
714	 */
715	RECONCILE_IRQ_STATE(r3,r4)
7161:	bl	preempt_schedule_irq
717
718	/* Re-test flags and eventually loop */
719	CURRENT_THREAD_INFO(r9, r1)
720	ld	r4,TI_FLAGS(r9)
721	andi.	r0,r4,_TIF_NEED_RESCHED
722	bne	1b
723
724	/*
725	 * arch_local_irq_restore() from preempt_schedule_irq above may
726	 * enable hard interrupt but we really should disable interrupts
727	 * when we return from the interrupt, and so that we don't get
728	 * interrupted after loading SRR0/1.
729	 */
730#ifdef CONFIG_PPC_BOOK3E
731	wrteei	0
732#else
733	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
734	mtmsrd	r10,1		  /* Update machine state */
735#endif /* CONFIG_PPC_BOOK3E */
736#endif /* CONFIG_PREEMPT */
737
738	.globl	fast_exc_return_irq
739fast_exc_return_irq:
740restore:
741	/*
742	 * This is the main kernel exit path. First we check if we
743	 * are about to re-enable interrupts
744	 */
745	ld	r5,SOFTE(r1)
746	lbz	r6,PACASOFTIRQEN(r13)
747	cmpwi	cr0,r5,0
748	beq	restore_irq_off
749
750	/* We are enabling, were we already enabled ? Yes, just return */
751	cmpwi	cr0,r6,1
752	beq	cr0,do_restore
753
754	/*
755	 * We are about to soft-enable interrupts (we are hard disabled
756	 * at this point). We check if there's anything that needs to
757	 * be replayed first.
758	 */
759	lbz	r0,PACAIRQHAPPENED(r13)
760	cmpwi	cr0,r0,0
761	bne-	restore_check_irq_replay
762
763	/*
764	 * Get here when nothing happened while soft-disabled, just
765	 * soft-enable and move-on. We will hard-enable as a side
766	 * effect of rfi
767	 */
768restore_no_replay:
769	TRACE_ENABLE_INTS
770	li	r0,1
771	stb	r0,PACASOFTIRQEN(r13);
772
773	/*
774	 * Final return path. BookE is handled in a different file
775	 */
776do_restore:
777#ifdef CONFIG_PPC_BOOK3E
778	b	exception_return_book3e
779#else
780	/*
781	 * Clear the reservation. If we know the CPU tracks the address of
782	 * the reservation then we can potentially save some cycles and use
783	 * a larx. On POWER6 and POWER7 this is significantly faster.
784	 */
785BEGIN_FTR_SECTION
786	stdcx.	r0,0,r1		/* to clear the reservation */
787FTR_SECTION_ELSE
788	ldarx	r4,0,r1
789ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
790
791	/*
792	 * Some code path such as load_up_fpu or altivec return directly
793	 * here. They run entirely hard disabled and do not alter the
794	 * interrupt state. They also don't use lwarx/stwcx. and thus
795	 * are known not to leave dangling reservations.
796	 */
797	.globl	fast_exception_return
798fast_exception_return:
799	ld	r3,_MSR(r1)
800	ld	r4,_CTR(r1)
801	ld	r0,_LINK(r1)
802	mtctr	r4
803	mtlr	r0
804	ld	r4,_XER(r1)
805	mtspr	SPRN_XER,r4
806
807	REST_8GPRS(5, r1)
808
809	andi.	r0,r3,MSR_RI
810	beq-	unrecov_restore
811
812	/* Load PPR from thread struct before we clear MSR:RI */
813BEGIN_FTR_SECTION
814	ld	r2,PACACURRENT(r13)
815	ld	r2,TASKTHREADPPR(r2)
816END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
817
818	/*
819	 * Clear RI before restoring r13.  If we are returning to
820	 * userspace and we take an exception after restoring r13,
821	 * we end up corrupting the userspace r13 value.
822	 */
823	ld	r4,PACAKMSR(r13) /* Get kernel MSR without EE */
824	andc	r4,r4,r0	 /* r0 contains MSR_RI here */
825	mtmsrd	r4,1
826
827#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
828	/* TM debug */
829	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
830#endif
831	/*
832	 * r13 is our per cpu area, only restore it if we are returning to
833	 * userspace the value stored in the stack frame may belong to
834	 * another CPU.
835	 */
836	andi.	r0,r3,MSR_PR
837	beq	1f
838BEGIN_FTR_SECTION
839	mtspr	SPRN_PPR,r2	/* Restore PPR */
840END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
841	ACCOUNT_CPU_USER_EXIT(r2, r4)
842	REST_GPR(13, r1)
8431:
844	mtspr	SPRN_SRR1,r3
845
846	ld	r2,_CCR(r1)
847	mtcrf	0xFF,r2
848	ld	r2,_NIP(r1)
849	mtspr	SPRN_SRR0,r2
850
851	ld	r0,GPR0(r1)
852	ld	r2,GPR2(r1)
853	ld	r3,GPR3(r1)
854	ld	r4,GPR4(r1)
855	ld	r1,GPR1(r1)
856
857	rfid
858	b	.	/* prevent speculative execution */
859
860#endif /* CONFIG_PPC_BOOK3E */
861
862	/*
863	 * We are returning to a context with interrupts soft disabled.
864	 *
865	 * However, we may also about to hard enable, so we need to
866	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
867	 * or that bit can get out of sync and bad things will happen
868	 */
869restore_irq_off:
870	ld	r3,_MSR(r1)
871	lbz	r7,PACAIRQHAPPENED(r13)
872	andi.	r0,r3,MSR_EE
873	beq	1f
874	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
875	stb	r7,PACAIRQHAPPENED(r13)
8761:	li	r0,0
877	stb	r0,PACASOFTIRQEN(r13);
878	TRACE_DISABLE_INTS
879	b	do_restore
880
881	/*
882	 * Something did happen, check if a re-emit is needed
883	 * (this also clears paca->irq_happened)
884	 */
885restore_check_irq_replay:
886	/* XXX: We could implement a fast path here where we check
887	 * for irq_happened being just 0x01, in which case we can
888	 * clear it and return. That means that we would potentially
889	 * miss a decrementer having wrapped all the way around.
890	 *
891	 * Still, this might be useful for things like hash_page
892	 */
893	bl	__check_irq_replay
894	cmpwi	cr0,r3,0
895 	beq	restore_no_replay
896
897	/*
898	 * We need to re-emit an interrupt. We do so by re-using our
899	 * existing exception frame. We first change the trap value,
900	 * but we need to ensure we preserve the low nibble of it
901	 */
902	ld	r4,_TRAP(r1)
903	clrldi	r4,r4,60
904	or	r4,r4,r3
905	std	r4,_TRAP(r1)
906
907	/*
908	 * Then find the right handler and call it. Interrupts are
909	 * still soft-disabled and we keep them that way.
910	*/
911	cmpwi	cr0,r3,0x500
912	bne	1f
913	addi	r3,r1,STACK_FRAME_OVERHEAD;
914 	bl	do_IRQ
915	b	ret_from_except
9161:	cmpwi	cr0,r3,0xe60
917	bne	1f
918	addi	r3,r1,STACK_FRAME_OVERHEAD;
919	bl	handle_hmi_exception
920	b	ret_from_except
9211:	cmpwi	cr0,r3,0x900
922	bne	1f
923	addi	r3,r1,STACK_FRAME_OVERHEAD;
924	bl	timer_interrupt
925	b	ret_from_except
926#ifdef CONFIG_PPC_DOORBELL
9271:
928#ifdef CONFIG_PPC_BOOK3E
929	cmpwi	cr0,r3,0x280
930#else
931	BEGIN_FTR_SECTION
932		cmpwi	cr0,r3,0xe80
933	FTR_SECTION_ELSE
934		cmpwi	cr0,r3,0xa00
935	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
936#endif /* CONFIG_PPC_BOOK3E */
937	bne	1f
938	addi	r3,r1,STACK_FRAME_OVERHEAD;
939	bl	doorbell_exception
940	b	ret_from_except
941#endif /* CONFIG_PPC_DOORBELL */
9421:	b	ret_from_except /* What else to do here ? */
943
944unrecov_restore:
945	addi	r3,r1,STACK_FRAME_OVERHEAD
946	bl	unrecoverable_exception
947	b	unrecov_restore
948
949#ifdef CONFIG_PPC_RTAS
950/*
951 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
952 * called with the MMU off.
953 *
954 * In addition, we need to be in 32b mode, at least for now.
955 *
956 * Note: r3 is an input parameter to rtas, so don't trash it...
957 */
958_GLOBAL(enter_rtas)
959	mflr	r0
960	std	r0,16(r1)
961        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
962
963	/* Because RTAS is running in 32b mode, it clobbers the high order half
964	 * of all registers that it saves.  We therefore save those registers
965	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
966   	 */
967	SAVE_GPR(2, r1)			/* Save the TOC */
968	SAVE_GPR(13, r1)		/* Save paca */
969	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
970	SAVE_10GPRS(22, r1)		/* ditto */
971
972	mfcr	r4
973	std	r4,_CCR(r1)
974	mfctr	r5
975	std	r5,_CTR(r1)
976	mfspr	r6,SPRN_XER
977	std	r6,_XER(r1)
978	mfdar	r7
979	std	r7,_DAR(r1)
980	mfdsisr	r8
981	std	r8,_DSISR(r1)
982
983	/* Temporary workaround to clear CR until RTAS can be modified to
984	 * ignore all bits.
985	 */
986	li	r0,0
987	mtcr	r0
988
989#ifdef CONFIG_BUG
990	/* There is no way it is acceptable to get here with interrupts enabled,
991	 * check it with the asm equivalent of WARN_ON
992	 */
993	lbz	r0,PACASOFTIRQEN(r13)
9941:	tdnei	r0,0
995	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
996#endif
997
998	/* Hard-disable interrupts */
999	mfmsr	r6
1000	rldicl	r7,r6,48,1
1001	rotldi	r7,r7,16
1002	mtmsrd	r7,1
1003
1004	/* Unfortunately, the stack pointer and the MSR are also clobbered,
1005	 * so they are saved in the PACA which allows us to restore
1006	 * our original state after RTAS returns.
1007         */
1008	std	r1,PACAR1(r13)
1009        std	r6,PACASAVEDMSR(r13)
1010
1011	/* Setup our real return addr */
1012	LOAD_REG_ADDR(r4,rtas_return_loc)
1013	clrldi	r4,r4,2			/* convert to realmode address */
1014       	mtlr	r4
1015
1016	li	r0,0
1017	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1018	andc	r0,r6,r0
1019
1020        li      r9,1
1021        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1022	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1023	andc	r6,r0,r9
1024	sync				/* disable interrupts so SRR0/1 */
1025	mtmsrd	r0			/* don't get trashed */
1026
1027	LOAD_REG_ADDR(r4, rtas)
1028	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
1029	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
1030
1031	mtspr	SPRN_SRR0,r5
1032	mtspr	SPRN_SRR1,r6
1033	rfid
1034	b	.	/* prevent speculative execution */
1035
1036rtas_return_loc:
1037	FIXUP_ENDIAN
1038
1039	/* relocation is off at this point */
1040	GET_PACA(r4)
1041	clrldi	r4,r4,2			/* convert to realmode address */
1042
1043	bcl	20,31,$+4
10440:	mflr	r3
1045	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
1046
1047	mfmsr   r6
1048	li	r0,MSR_RI
1049	andc	r6,r6,r0
1050	sync
1051	mtmsrd  r6
1052
1053        ld	r1,PACAR1(r4)           /* Restore our SP */
1054        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
1055
1056	mtspr	SPRN_SRR0,r3
1057	mtspr	SPRN_SRR1,r4
1058	rfid
1059	b	.	/* prevent speculative execution */
1060
1061	.align	3
10621:	.llong	rtas_restore_regs
1063
1064rtas_restore_regs:
1065	/* relocation is on at this point */
1066	REST_GPR(2, r1)			/* Restore the TOC */
1067	REST_GPR(13, r1)		/* Restore paca */
1068	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
1069	REST_10GPRS(22, r1)		/* ditto */
1070
1071	GET_PACA(r13)
1072
1073	ld	r4,_CCR(r1)
1074	mtcr	r4
1075	ld	r5,_CTR(r1)
1076	mtctr	r5
1077	ld	r6,_XER(r1)
1078	mtspr	SPRN_XER,r6
1079	ld	r7,_DAR(r1)
1080	mtdar	r7
1081	ld	r8,_DSISR(r1)
1082	mtdsisr	r8
1083
1084        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
1085	ld	r0,16(r1)		/* get return address */
1086
1087	mtlr    r0
1088        blr				/* return to caller */
1089
1090#endif /* CONFIG_PPC_RTAS */
1091
1092_GLOBAL(enter_prom)
1093	mflr	r0
1094	std	r0,16(r1)
1095        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
1096
1097	/* Because PROM is running in 32b mode, it clobbers the high order half
1098	 * of all registers that it saves.  We therefore save those registers
1099	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
1100   	 */
1101	SAVE_GPR(2, r1)
1102	SAVE_GPR(13, r1)
1103	SAVE_8GPRS(14, r1)
1104	SAVE_10GPRS(22, r1)
1105	mfcr	r10
1106	mfmsr	r11
1107	std	r10,_CCR(r1)
1108	std	r11,_MSR(r1)
1109
1110	/* Put PROM address in SRR0 */
1111	mtsrr0	r4
1112
1113	/* Setup our trampoline return addr in LR */
1114	bcl	20,31,$+4
11150:	mflr	r4
1116	addi	r4,r4,(1f - 0b)
1117       	mtlr	r4
1118
1119	/* Prepare a 32-bit mode big endian MSR
1120	 */
1121#ifdef CONFIG_PPC_BOOK3E
1122	rlwinm	r11,r11,0,1,31
1123	mtsrr1	r11
1124	rfi
1125#else /* CONFIG_PPC_BOOK3E */
1126	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1127	andc	r11,r11,r12
1128	mtsrr1	r11
1129	rfid
1130#endif /* CONFIG_PPC_BOOK3E */
1131
11321:	/* Return from OF */
1133	FIXUP_ENDIAN
1134
1135	/* Just make sure that r1 top 32 bits didn't get
1136	 * corrupt by OF
1137	 */
1138	rldicl	r1,r1,0,32
1139
1140	/* Restore the MSR (back to 64 bits) */
1141	ld	r0,_MSR(r1)
1142	MTMSRD(r0)
1143        isync
1144
1145	/* Restore other registers */
1146	REST_GPR(2, r1)
1147	REST_GPR(13, r1)
1148	REST_8GPRS(14, r1)
1149	REST_10GPRS(22, r1)
1150	ld	r4,_CCR(r1)
1151	mtcr	r4
1152
1153        addi	r1,r1,PROM_FRAME_SIZE
1154	ld	r0,16(r1)
1155	mtlr    r0
1156        blr
1157
1158#ifdef CONFIG_FUNCTION_TRACER
1159#ifdef CONFIG_DYNAMIC_FTRACE
1160_GLOBAL(mcount)
1161_GLOBAL(_mcount)
1162	blr
1163
1164_GLOBAL_TOC(ftrace_caller)
1165	/* Taken from output of objdump from lib64/glibc */
1166	mflr	r3
1167	ld	r11, 0(r1)
1168	stdu	r1, -112(r1)
1169	std	r3, 128(r1)
1170	ld	r4, 16(r11)
1171	subi	r3, r3, MCOUNT_INSN_SIZE
1172.globl ftrace_call
1173ftrace_call:
1174	bl	ftrace_stub
1175	nop
1176#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1177.globl ftrace_graph_call
1178ftrace_graph_call:
1179	b	ftrace_graph_stub
1180_GLOBAL(ftrace_graph_stub)
1181#endif
1182	ld	r0, 128(r1)
1183	mtlr	r0
1184	addi	r1, r1, 112
1185_GLOBAL(ftrace_stub)
1186	blr
1187#else
1188_GLOBAL_TOC(_mcount)
1189	/* Taken from output of objdump from lib64/glibc */
1190	mflr	r3
1191	ld	r11, 0(r1)
1192	stdu	r1, -112(r1)
1193	std	r3, 128(r1)
1194	ld	r4, 16(r11)
1195
1196	subi	r3, r3, MCOUNT_INSN_SIZE
1197	LOAD_REG_ADDR(r5,ftrace_trace_function)
1198	ld	r5,0(r5)
1199	ld	r5,0(r5)
1200	mtctr	r5
1201	bctrl
1202	nop
1203
1204
1205#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1206	b	ftrace_graph_caller
1207#endif
1208	ld	r0, 128(r1)
1209	mtlr	r0
1210	addi	r1, r1, 112
1211_GLOBAL(ftrace_stub)
1212	blr
1213
1214#endif /* CONFIG_DYNAMIC_FTRACE */
1215
1216#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1217_GLOBAL(ftrace_graph_caller)
1218	/* load r4 with local address */
1219	ld	r4, 128(r1)
1220	subi	r4, r4, MCOUNT_INSN_SIZE
1221
1222	/* Grab the LR out of the caller stack frame */
1223	ld	r11, 112(r1)
1224	ld	r3, 16(r11)
1225
1226	bl	prepare_ftrace_return
1227	nop
1228
1229	/*
1230	 * prepare_ftrace_return gives us the address we divert to.
1231	 * Change the LR in the callers stack frame to this.
1232	 */
1233	ld	r11, 112(r1)
1234	std	r3, 16(r11)
1235
1236	ld	r0, 128(r1)
1237	mtlr	r0
1238	addi	r1, r1, 112
1239	blr
1240
1241_GLOBAL(return_to_handler)
1242	/* need to save return values */
1243	std	r4,  -32(r1)
1244	std	r3,  -24(r1)
1245	/* save TOC */
1246	std	r2,  -16(r1)
1247	std	r31, -8(r1)
1248	mr	r31, r1
1249	stdu	r1, -112(r1)
1250
1251	/*
1252	 * We might be called from a module.
1253	 * Switch to our TOC to run inside the core kernel.
1254	 */
1255	ld	r2, PACATOC(r13)
1256
1257	bl	ftrace_return_to_handler
1258	nop
1259
1260	/* return value has real return address */
1261	mtlr	r3
1262
1263	ld	r1, 0(r1)
1264	ld	r4,  -32(r1)
1265	ld	r3,  -24(r1)
1266	ld	r2,  -16(r1)
1267	ld	r31, -8(r1)
1268
1269	/* Jump back to real return address */
1270	blr
1271#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1272#endif /* CONFIG_FUNCTION_TRACER */
1273