1/*
2 * include/asm-sh/processor.h
3 *
4 * Copyright (C) 1999, 2000  Niibe Yutaka
5 * Copyright (C) 2002, 2003  Paul Mundt
6 */
7
8#ifndef __ASM_SH_PROCESSOR_32_H
9#define __ASM_SH_PROCESSOR_32_H
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/types.h>
16#include <asm/hw_breakpoint.h>
17
18/*
19 * Default implementation of macro that returns current
20 * instruction pointer ("program counter").
21 */
22#define current_text_addr() ({ void *pc; __asm__("mova	1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
23
24/* Core Processor Version Register */
25#define CCN_PVR		0xff000030
26#define CCN_CVR		0xff000040
27#define CCN_PRR		0xff000044
28
29/*
30 * User space process size: 2GB.
31 *
32 * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
33 */
34#define TASK_SIZE	0x7c000000UL
35
36#define STACK_TOP	TASK_SIZE
37#define STACK_TOP_MAX	STACK_TOP
38
39/* This decides where the kernel will search for a free chunk of vm
40 * space during mmap's.
41 */
42#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
43
44/*
45 * Bit of SR register
46 *
47 * FD-bit:
48 *     When it's set, it means the processor doesn't have right to use FPU,
49 *     and it results exception when the floating operation is executed.
50 *
51 * IMASK-bit:
52 *     Interrupt level mask
53 */
54#define SR_DSP		0x00001000
55#define SR_IMASK	0x000000f0
56#define SR_FD		0x00008000
57#define SR_MD		0x40000000
58
59/*
60 * DSP structure and data
61 */
62struct sh_dsp_struct {
63	unsigned long dsp_regs[14];
64	long status;
65};
66
67/*
68 * FPU structure and data
69 */
70
71struct sh_fpu_hard_struct {
72	unsigned long fp_regs[16];
73	unsigned long xfp_regs[16];
74	unsigned long fpscr;
75	unsigned long fpul;
76
77	long status; /* software status information */
78};
79
80/* Dummy fpu emulator  */
81struct sh_fpu_soft_struct {
82	unsigned long fp_regs[16];
83	unsigned long xfp_regs[16];
84	unsigned long fpscr;
85	unsigned long fpul;
86
87	unsigned char lookahead;
88	unsigned long entry_pc;
89};
90
91union thread_xstate {
92	struct sh_fpu_hard_struct hardfpu;
93	struct sh_fpu_soft_struct softfpu;
94};
95
96struct thread_struct {
97	/* Saved registers when thread is descheduled */
98	unsigned long sp;
99	unsigned long pc;
100
101	/* Various thread flags, see SH_THREAD_xxx */
102	unsigned long flags;
103
104	/* Save middle states of ptrace breakpoints */
105	struct perf_event *ptrace_bps[HBP_NUM];
106
107#ifdef CONFIG_SH_DSP
108	/* Dsp status information */
109	struct sh_dsp_struct dsp_status;
110#endif
111
112	/* Extended processor state */
113	union thread_xstate *xstate;
114
115	/*
116	 * fpu_counter contains the number of consecutive context switches
117	 * that the FPU is used. If this is over a threshold, the lazy fpu
118	 * saving becomes unlazy to save the trap. This is an unsigned char
119	 * so that after 256 times the counter wraps and the behavior turns
120	 * lazy again; this to deal with bursty apps that only use FPU for
121	 * a short time
122	 */
123	unsigned char fpu_counter;
124};
125
126#define INIT_THREAD  {						\
127	.sp = sizeof(init_stack) + (long) &init_stack,		\
128	.flags = 0,						\
129}
130
131/* Forward declaration, a strange C thing */
132struct task_struct;
133
134extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
135
136/* Free all resources held by a thread. */
137extern void release_thread(struct task_struct *);
138
139/* Copy and release all segment info associated with a VM */
140#define copy_segments(p, mm)	do { } while(0)
141#define release_segments(mm)	do { } while(0)
142
143/*
144 * FPU lazy state save handling.
145 */
146
147static __inline__ void disable_fpu(void)
148{
149	unsigned long __dummy;
150
151	/* Set FD flag in SR */
152	__asm__ __volatile__("stc	sr, %0\n\t"
153			     "or	%1, %0\n\t"
154			     "ldc	%0, sr"
155			     : "=&r" (__dummy)
156			     : "r" (SR_FD));
157}
158
159static __inline__ void enable_fpu(void)
160{
161	unsigned long __dummy;
162
163	/* Clear out FD flag in SR */
164	__asm__ __volatile__("stc	sr, %0\n\t"
165			     "and	%1, %0\n\t"
166			     "ldc	%0, sr"
167			     : "=&r" (__dummy)
168			     : "r" (~SR_FD));
169}
170
171/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
172#define FPSCR_INIT  0x00080000
173
174#define	FPSCR_CAUSE_MASK	0x0001f000	/* Cause bits */
175#define	FPSCR_FLAG_MASK		0x0000007c	/* Flag bits */
176
177/*
178 * Return saved PC of a blocked thread.
179 */
180#define thread_saved_pc(tsk)	(tsk->thread.pc)
181
182void show_trace(struct task_struct *tsk, unsigned long *sp,
183		struct pt_regs *regs);
184
185#ifdef CONFIG_DUMP_CODE
186void show_code(struct pt_regs *regs);
187#else
188static inline void show_code(struct pt_regs *regs)
189{
190}
191#endif
192
193extern unsigned long get_wchan(struct task_struct *p);
194
195#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
196#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
197
198#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
199
200#define PREFETCH_STRIDE		L1_CACHE_BYTES
201#define ARCH_HAS_PREFETCH
202#define ARCH_HAS_PREFETCHW
203
204static inline void prefetch(const void *x)
205{
206	__builtin_prefetch(x, 0, 3);
207}
208
209static inline void prefetchw(const void *x)
210{
211	__builtin_prefetch(x, 1, 3);
212}
213#endif
214
215#endif /* __KERNEL__ */
216#endif /* __ASM_SH_PROCESSOR_32_H */
217