1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef __ASM_AVR32_PROCESSOR_H
9#define __ASM_AVR32_PROCESSOR_H
10
11#include <asm/page.h>
12#include <asm/cache.h>
13
14#define TASK_SIZE	0x80000000
15
16#ifdef __KERNEL__
17#define STACK_TOP	TASK_SIZE
18#define STACK_TOP_MAX	STACK_TOP
19#endif
20
21#ifndef __ASSEMBLY__
22
23static inline void *current_text_addr(void)
24{
25	register void *pc asm("pc");
26	return pc;
27}
28
29enum arch_type {
30	ARCH_AVR32A,
31	ARCH_AVR32B,
32	ARCH_MAX
33};
34
35enum cpu_type {
36	CPU_MORGAN,
37	CPU_AT32AP,
38	CPU_MAX
39};
40
41enum tlb_config {
42	TLB_NONE,
43	TLB_SPLIT,
44	TLB_UNIFIED,
45	TLB_INVALID
46};
47
48#define AVR32_FEATURE_RMW	(1 << 0)
49#define AVR32_FEATURE_DSP	(1 << 1)
50#define AVR32_FEATURE_SIMD	(1 << 2)
51#define AVR32_FEATURE_OCD	(1 << 3)
52#define AVR32_FEATURE_PCTR	(1 << 4)
53#define AVR32_FEATURE_JAVA	(1 << 5)
54#define AVR32_FEATURE_FPU	(1 << 6)
55
56struct avr32_cpuinfo {
57	struct clk *clk;
58	unsigned long loops_per_jiffy;
59	enum arch_type arch_type;
60	enum cpu_type cpu_type;
61	unsigned short arch_revision;
62	unsigned short cpu_revision;
63	enum tlb_config tlb_config;
64	unsigned long features;
65	u32 device_id;
66
67	struct cache_info icache;
68	struct cache_info dcache;
69};
70
71static inline unsigned int avr32_get_manufacturer_id(struct avr32_cpuinfo *cpu)
72{
73	return (cpu->device_id >> 1) & 0x7f;
74}
75static inline unsigned int avr32_get_product_number(struct avr32_cpuinfo *cpu)
76{
77	return (cpu->device_id >> 12) & 0xffff;
78}
79static inline unsigned int avr32_get_chip_revision(struct avr32_cpuinfo *cpu)
80{
81	return (cpu->device_id >> 28) & 0x0f;
82}
83
84extern struct avr32_cpuinfo boot_cpu_data;
85
86/* No SMP support so far */
87#define current_cpu_data boot_cpu_data
88
89/* This decides where the kernel will search for a free chunk of vm
90 * space during mmap's
91 */
92#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
93
94#define cpu_relax()		barrier()
95#define cpu_relax_lowlatency()        cpu_relax()
96#define cpu_sync_pipeline()	asm volatile("sub pc, -2" : : : "memory")
97
98struct cpu_context {
99	unsigned long sr;
100	unsigned long pc;
101	unsigned long ksp;	/* Kernel stack pointer */
102	unsigned long r7;
103	unsigned long r6;
104	unsigned long r5;
105	unsigned long r4;
106	unsigned long r3;
107	unsigned long r2;
108	unsigned long r1;
109	unsigned long r0;
110};
111
112/* This struct contains the CPU context as stored by switch_to() */
113struct thread_struct {
114	struct cpu_context cpu_context;
115	unsigned long single_step_addr;
116	u16 single_step_insn;
117};
118
119#define INIT_THREAD {						\
120	.cpu_context = {					\
121		.ksp = sizeof(init_stack) + (long)&init_stack,	\
122	},							\
123}
124
125/*
126 * Do necessary setup to start up a newly executed thread.
127 */
128#define start_thread(regs, new_pc, new_sp)	 \
129	do {					 \
130		memset(regs, 0, sizeof(*regs));	 \
131		regs->sr = MODE_USER;		 \
132		regs->pc = new_pc & ~1;		 \
133		regs->sp = new_sp;		 \
134	} while(0)
135
136struct task_struct;
137
138/* Free all resources held by a thread */
139extern void release_thread(struct task_struct *);
140
141/* Return saved PC of a blocked thread */
142#define thread_saved_pc(tsk)    ((tsk)->thread.cpu_context.pc)
143
144struct pt_regs;
145extern unsigned long get_wchan(struct task_struct *p);
146extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
147extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
148			       struct pt_regs *regs, const char *log_lvl);
149
150#define task_pt_regs(p) \
151	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
152
153#define KSTK_EIP(tsk)	((tsk)->thread.cpu_context.pc)
154#define KSTK_ESP(tsk)	((tsk)->thread.cpu_context.ksp)
155
156#define ARCH_HAS_PREFETCH
157
158static inline void prefetch(const void *x)
159{
160	const char *c = x;
161	asm volatile("pref %0" : : "r"(c));
162}
163#define PREFETCH_STRIDE	L1_CACHE_BYTES
164
165#endif /* __ASSEMBLY__ */
166
167#endif /* __ASM_AVR32_PROCESSOR_H */
168