1/*
2 * linux/arch/unicore32/kernel/traps.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 *  'traps.c' handles hardware exceptions after we have saved some state.
13 *  Mostly a debugging aid, but will probably kill the offending process.
14 */
15#include <linux/module.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/personality.h>
19#include <linux/kallsyms.h>
20#include <linux/kdebug.h>
21#include <linux/uaccess.h>
22#include <linux/delay.h>
23#include <linux/hardirq.h>
24#include <linux/init.h>
25#include <linux/atomic.h>
26#include <linux/unistd.h>
27
28#include <asm/cacheflush.h>
29#include <asm/traps.h>
30
31#include "setup.h"
32
33static void dump_mem(const char *, const char *, unsigned long, unsigned long);
34
35void dump_backtrace_entry(unsigned long where,
36		unsigned long from, unsigned long frame)
37{
38#ifdef CONFIG_KALLSYMS
39	printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n",
40			where, (void *)where, from, (void *)from);
41#else
42	printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n",
43			where, from);
44#endif
45}
46
47/*
48 * Stack pointers should always be within the kernels view of
49 * physical memory.  If it is not there, then we can't dump
50 * out any information relating to the stack.
51 */
52static int verify_stack(unsigned long sp)
53{
54	if (sp < PAGE_OFFSET ||
55	    (sp > (unsigned long)high_memory && high_memory != NULL))
56		return -EFAULT;
57
58	return 0;
59}
60
61/*
62 * Dump out the contents of some memory nicely...
63 */
64static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
65		     unsigned long top)
66{
67	unsigned long first;
68	mm_segment_t fs;
69	int i;
70
71	/*
72	 * We need to switch to kernel mode so that we can use __get_user
73	 * to safely read from kernel space.  Note that we now dump the
74	 * code first, just in case the backtrace kills us.
75	 */
76	fs = get_fs();
77	set_fs(KERNEL_DS);
78
79	printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n",
80			lvl, str, bottom, top);
81
82	for (first = bottom & ~31; first < top; first += 32) {
83		unsigned long p;
84		char str[sizeof(" 12345678") * 8 + 1];
85
86		memset(str, ' ', sizeof(str));
87		str[sizeof(str) - 1] = '\0';
88
89		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
90			if (p >= bottom && p < top) {
91				unsigned long val;
92				if (__get_user(val, (unsigned long *)p) == 0)
93					sprintf(str + i * 9, " %08lx", val);
94				else
95					sprintf(str + i * 9, " ????????");
96			}
97		}
98		printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str);
99	}
100
101	set_fs(fs);
102}
103
104static void dump_instr(const char *lvl, struct pt_regs *regs)
105{
106	unsigned long addr = instruction_pointer(regs);
107	const int width = 8;
108	mm_segment_t fs;
109	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
110	int i;
111
112	/*
113	 * We need to switch to kernel mode so that we can use __get_user
114	 * to safely read from kernel space.  Note that we now dump the
115	 * code first, just in case the backtrace kills us.
116	 */
117	fs = get_fs();
118	set_fs(KERNEL_DS);
119
120	for (i = -4; i < 1; i++) {
121		unsigned int val, bad;
122
123		bad = __get_user(val, &((u32 *)addr)[i]);
124
125		if (!bad)
126			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
127					width, val);
128		else {
129			p += sprintf(p, "bad PC value");
130			break;
131		}
132	}
133	printk(KERN_DEFAULT "%sCode: %s\n", lvl, str);
134
135	set_fs(fs);
136}
137
138static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
139{
140	unsigned int fp, mode;
141	int ok = 1;
142
143	printk(KERN_DEFAULT "Backtrace: ");
144
145	if (!tsk)
146		tsk = current;
147
148	if (regs) {
149		fp = regs->UCreg_fp;
150		mode = processor_mode(regs);
151	} else if (tsk != current) {
152		fp = thread_saved_fp(tsk);
153		mode = 0x10;
154	} else {
155		asm("mov %0, fp" : "=r" (fp) : : "cc");
156		mode = 0x10;
157	}
158
159	if (!fp) {
160		printk("no frame pointer");
161		ok = 0;
162	} else if (verify_stack(fp)) {
163		printk("invalid frame pointer 0x%08x", fp);
164		ok = 0;
165	} else if (fp < (unsigned long)end_of_stack(tsk))
166		printk("frame pointer underflow");
167	printk("\n");
168
169	if (ok)
170		c_backtrace(fp, mode);
171}
172
173void show_stack(struct task_struct *tsk, unsigned long *sp)
174{
175	dump_backtrace(NULL, tsk);
176	barrier();
177}
178
179static int __die(const char *str, int err, struct thread_info *thread,
180		struct pt_regs *regs)
181{
182	struct task_struct *tsk = thread->task;
183	static int die_counter;
184	int ret;
185
186	printk(KERN_EMERG "Internal error: %s: %x [#%d]\n",
187	       str, err, ++die_counter);
188
189	/* trap and error numbers are mostly meaningless on UniCore */
190	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \
191			SIGSEGV);
192	if (ret == NOTIFY_STOP)
193		return ret;
194
195	print_modules();
196	__show_regs(regs);
197	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
198		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
199
200	if (!user_mode(regs) || in_interrupt()) {
201		dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
202			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
203		dump_backtrace(regs, tsk);
204		dump_instr(KERN_EMERG, regs);
205	}
206
207	return ret;
208}
209
210DEFINE_SPINLOCK(die_lock);
211
212/*
213 * This function is protected against re-entrancy.
214 */
215void die(const char *str, struct pt_regs *regs, int err)
216{
217	struct thread_info *thread = current_thread_info();
218	int ret;
219
220	oops_enter();
221
222	spin_lock_irq(&die_lock);
223	console_verbose();
224	bust_spinlocks(1);
225	ret = __die(str, err, thread, regs);
226
227	bust_spinlocks(0);
228	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
229	spin_unlock_irq(&die_lock);
230	oops_exit();
231
232	if (in_interrupt())
233		panic("Fatal exception in interrupt");
234	if (panic_on_oops)
235		panic("Fatal exception");
236	if (ret != NOTIFY_STOP)
237		do_exit(SIGSEGV);
238}
239
240void uc32_notify_die(const char *str, struct pt_regs *regs,
241		struct siginfo *info, unsigned long err, unsigned long trap)
242{
243	if (user_mode(regs)) {
244		current->thread.error_code = err;
245		current->thread.trap_no = trap;
246
247		force_sig_info(info->si_signo, info, current);
248	} else
249		die(str, regs, err);
250}
251
252/*
253 * bad_mode handles the impossible case in the vectors.  If you see one of
254 * these, then it's extremely serious, and could mean you have buggy hardware.
255 * It never returns, and never tries to sync.  We hope that we can at least
256 * dump out some state information...
257 */
258asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason)
259{
260	console_verbose();
261
262	printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason);
263
264	die("Oops - bad mode", regs, 0);
265	local_irq_disable();
266	panic("bad mode");
267}
268
269void __pte_error(const char *file, int line, unsigned long val)
270{
271	printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val);
272}
273
274void __pmd_error(const char *file, int line, unsigned long val)
275{
276	printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val);
277}
278
279void __pgd_error(const char *file, int line, unsigned long val)
280{
281	printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val);
282}
283
284asmlinkage void __div0(void)
285{
286	printk(KERN_DEFAULT "Division by zero in kernel.\n");
287	dump_stack();
288}
289EXPORT_SYMBOL(__div0);
290
291void abort(void)
292{
293	BUG();
294
295	/* if that doesn't kill us, halt */
296	panic("Oops failed to kill thread");
297}
298EXPORT_SYMBOL(abort);
299
300void __init trap_init(void)
301{
302	return;
303}
304
305void __init early_trap_init(void)
306{
307	unsigned long vectors = VECTORS_BASE;
308
309	/*
310	 * Copy the vectors, stubs (in entry-unicore.S)
311	 * into the vector page, mapped at 0xffff0000, and ensure these
312	 * are visible to the instruction stream.
313	 */
314	memcpy((void *)vectors,
315			__vectors_start,
316			__vectors_end - __vectors_start);
317	memcpy((void *)vectors + 0x200,
318			__stubs_start,
319			__stubs_end - __stubs_start);
320
321	early_signal_init();
322
323	flush_icache_range(vectors, vectors + PAGE_SIZE);
324}
325