• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/s390/kernel/
1/*
2 *  arch/s390/kernel/traps.c
3 *
4 *  S390 version
5 *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 *  Derived from "arch/i386/kernel/traps.c"
10 *    Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13/*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/tracehook.h>
22#include <linux/timer.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/seq_file.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30#include <linux/kdebug.h>
31#include <linux/kallsyms.h>
32#include <linux/reboot.h>
33#include <linux/kprobes.h>
34#include <linux/bug.h>
35#include <linux/utsname.h>
36#include <asm/system.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <asm/atomic.h>
40#include <asm/mathemu.h>
41#include <asm/cpcmd.h>
42#include <asm/s390_ext.h>
43#include <asm/lowcore.h>
44#include <asm/debug.h>
45#include "entry.h"
46
47pgm_check_handler_t *pgm_check_table[128];
48
49int show_unhandled_signals;
50
51extern pgm_check_handler_t do_protection_exception;
52extern pgm_check_handler_t do_dat_exception;
53extern pgm_check_handler_t do_asce_exception;
54
55#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
56
57#ifndef CONFIG_64BIT
58#define LONG "%08lx "
59#define FOURLONG "%08lx %08lx %08lx %08lx\n"
60static int kstack_depth_to_print = 12;
61#else /* CONFIG_64BIT */
62#define LONG "%016lx "
63#define FOURLONG "%016lx %016lx %016lx %016lx\n"
64static int kstack_depth_to_print = 20;
65#endif /* CONFIG_64BIT */
66
67/*
68 * For show_trace we have tree different stack to consider:
69 *   - the panic stack which is used if the kernel stack has overflown
70 *   - the asynchronous interrupt stack (cpu related)
71 *   - the synchronous kernel stack (process related)
72 * The stack trace can start at any of the three stack and can potentially
73 * touch all of them. The order is: panic stack, async stack, sync stack.
74 */
75static unsigned long
76__show_trace(unsigned long sp, unsigned long low, unsigned long high)
77{
78	struct stack_frame *sf;
79	struct pt_regs *regs;
80
81	while (1) {
82		sp = sp & PSW_ADDR_INSN;
83		if (sp < low || sp > high - sizeof(*sf))
84			return sp;
85		sf = (struct stack_frame *) sp;
86		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
87		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
88		/* Follow the backchain. */
89		while (1) {
90			low = sp;
91			sp = sf->back_chain & PSW_ADDR_INSN;
92			if (!sp)
93				break;
94			if (sp <= low || sp > high - sizeof(*sf))
95				return sp;
96			sf = (struct stack_frame *) sp;
97			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
98			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
99		}
100		/* Zero backchain detected, check for interrupt frame. */
101		sp = (unsigned long) (sf + 1);
102		if (sp <= low || sp > high - sizeof(*regs))
103			return sp;
104		regs = (struct pt_regs *) sp;
105		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
106		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
107		low = sp;
108		sp = regs->gprs[15];
109	}
110}
111
112static void show_trace(struct task_struct *task, unsigned long *stack)
113{
114	register unsigned long __r15 asm ("15");
115	unsigned long sp;
116
117	sp = (unsigned long) stack;
118	if (!sp)
119		sp = task ? task->thread.ksp : __r15;
120	printk("Call Trace:\n");
121#ifdef CONFIG_CHECK_STACK
122	sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
123			  S390_lowcore.panic_stack);
124#endif
125	sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
126			  S390_lowcore.async_stack);
127	if (task)
128		__show_trace(sp, (unsigned long) task_stack_page(task),
129			     (unsigned long) task_stack_page(task) + THREAD_SIZE);
130	else
131		__show_trace(sp, S390_lowcore.thread_info,
132			     S390_lowcore.thread_info + THREAD_SIZE);
133	if (!task)
134		task = current;
135	debug_show_held_locks(task);
136}
137
138void show_stack(struct task_struct *task, unsigned long *sp)
139{
140	register unsigned long * __r15 asm ("15");
141	unsigned long *stack;
142	int i;
143
144	if (!sp)
145		stack = task ? (unsigned long *) task->thread.ksp : __r15;
146	else
147		stack = sp;
148
149	for (i = 0; i < kstack_depth_to_print; i++) {
150		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
151			break;
152		if (i && ((i * sizeof (long) % 32) == 0))
153			printk("\n       ");
154		printk(LONG, *stack++);
155	}
156	printk("\n");
157	show_trace(task, sp);
158}
159
160static void show_last_breaking_event(struct pt_regs *regs)
161{
162#ifdef CONFIG_64BIT
163	printk("Last Breaking-Event-Address:\n");
164	printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
165	print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
166#endif
167}
168
169/*
170 * The architecture-independent dump_stack generator
171 */
172void dump_stack(void)
173{
174	printk("CPU: %d %s %s %.*s\n",
175	       task_thread_info(current)->cpu, print_tainted(),
176	       init_utsname()->release,
177	       (int)strcspn(init_utsname()->version, " "),
178	       init_utsname()->version);
179	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
180	       current->comm, current->pid, current,
181	       (void *) current->thread.ksp);
182	show_stack(NULL, NULL);
183}
184EXPORT_SYMBOL(dump_stack);
185
186static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
187{
188	return (regs->psw.mask & bits) / ((~bits + 1) & bits);
189}
190
191void show_registers(struct pt_regs *regs)
192{
193	char *mode;
194
195	mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
196	printk("%s PSW : %p %p",
197	       mode, (void *) regs->psw.mask,
198	       (void *) regs->psw.addr);
199	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
200	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
201	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
202	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
203	       mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
204	       mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
205	       mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
206	       mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
207#ifdef CONFIG_64BIT
208	printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
209#endif
210	printk("\n%s GPRS: " FOURLONG, mode,
211	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
212	printk("           " FOURLONG,
213	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
214	printk("           " FOURLONG,
215	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
216	printk("           " FOURLONG,
217	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
218
219	show_code(regs);
220}
221
222void show_regs(struct pt_regs *regs)
223{
224	print_modules();
225	printk("CPU: %d %s %s %.*s\n",
226	       task_thread_info(current)->cpu, print_tainted(),
227	       init_utsname()->release,
228	       (int)strcspn(init_utsname()->version, " "),
229	       init_utsname()->version);
230	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
231	       current->comm, current->pid, current,
232	       (void *) current->thread.ksp);
233	show_registers(regs);
234	/* Show stack backtrace if pt_regs is from kernel mode */
235	if (!(regs->psw.mask & PSW_MASK_PSTATE))
236		show_trace(NULL, (unsigned long *) regs->gprs[15]);
237	show_last_breaking_event(regs);
238}
239
240/* This is called from fs/proc/array.c */
241void task_show_regs(struct seq_file *m, struct task_struct *task)
242{
243	struct pt_regs *regs;
244
245	regs = task_pt_regs(task);
246	seq_printf(m, "task: %p, ksp: %p\n",
247		       task, (void *)task->thread.ksp);
248	seq_printf(m, "User PSW : %p %p\n",
249		       (void *) regs->psw.mask, (void *)regs->psw.addr);
250
251	seq_printf(m, "User GPRS: " FOURLONG,
252			  regs->gprs[0], regs->gprs[1],
253			  regs->gprs[2], regs->gprs[3]);
254	seq_printf(m, "           " FOURLONG,
255			  regs->gprs[4], regs->gprs[5],
256			  regs->gprs[6], regs->gprs[7]);
257	seq_printf(m, "           " FOURLONG,
258			  regs->gprs[8], regs->gprs[9],
259			  regs->gprs[10], regs->gprs[11]);
260	seq_printf(m, "           " FOURLONG,
261			  regs->gprs[12], regs->gprs[13],
262			  regs->gprs[14], regs->gprs[15]);
263	seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
264			  task->thread.acrs[0], task->thread.acrs[1],
265			  task->thread.acrs[2], task->thread.acrs[3]);
266	seq_printf(m, "           %08x %08x %08x %08x\n",
267			  task->thread.acrs[4], task->thread.acrs[5],
268			  task->thread.acrs[6], task->thread.acrs[7]);
269	seq_printf(m, "           %08x %08x %08x %08x\n",
270			  task->thread.acrs[8], task->thread.acrs[9],
271			  task->thread.acrs[10], task->thread.acrs[11]);
272	seq_printf(m, "           %08x %08x %08x %08x\n",
273			  task->thread.acrs[12], task->thread.acrs[13],
274			  task->thread.acrs[14], task->thread.acrs[15]);
275}
276
277static DEFINE_SPINLOCK(die_lock);
278
279void die(const char * str, struct pt_regs * regs, long err)
280{
281	static int die_counter;
282
283	oops_enter();
284	debug_stop_all();
285	console_verbose();
286	spin_lock_irq(&die_lock);
287	bust_spinlocks(1);
288	printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
289#ifdef CONFIG_PREEMPT
290	printk("PREEMPT ");
291#endif
292#ifdef CONFIG_SMP
293	printk("SMP ");
294#endif
295#ifdef CONFIG_DEBUG_PAGEALLOC
296	printk("DEBUG_PAGEALLOC");
297#endif
298	printk("\n");
299	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
300	show_regs(regs);
301	bust_spinlocks(0);
302	add_taint(TAINT_DIE);
303	spin_unlock_irq(&die_lock);
304	if (in_interrupt())
305		panic("Fatal exception in interrupt");
306	if (panic_on_oops)
307		panic("Fatal exception: panic_on_oops");
308	oops_exit();
309	do_exit(SIGSEGV);
310}
311
312static void inline report_user_fault(struct pt_regs *regs, long int_code,
313				     int signr)
314{
315	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
316		return;
317	if (!unhandled_signal(current, signr))
318		return;
319	if (!printk_ratelimit())
320		return;
321	printk("User process fault: interruption code 0x%lX ", int_code);
322	print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
323	printk("\n");
324	show_regs(regs);
325}
326
327int is_valid_bugaddr(unsigned long addr)
328{
329	return 1;
330}
331
332static void __kprobes inline do_trap(long interruption_code, int signr,
333					char *str, struct pt_regs *regs,
334					siginfo_t *info)
335{
336	/*
337	 * We got all needed information from the lowcore and can
338	 * now safely switch on interrupts.
339	 */
340        if (regs->psw.mask & PSW_MASK_PSTATE)
341		local_irq_enable();
342
343	if (notify_die(DIE_TRAP, str, regs, interruption_code,
344				interruption_code, signr) == NOTIFY_STOP)
345		return;
346
347        if (regs->psw.mask & PSW_MASK_PSTATE) {
348                struct task_struct *tsk = current;
349
350                tsk->thread.trap_no = interruption_code & 0xffff;
351		force_sig_info(signr, info, tsk);
352		report_user_fault(regs, interruption_code, signr);
353        } else {
354                const struct exception_table_entry *fixup;
355                fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
356                if (fixup)
357                        regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
358		else {
359			enum bug_trap_type btt;
360
361			btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
362			if (btt == BUG_TRAP_TYPE_WARN)
363				return;
364			die(str, regs, interruption_code);
365		}
366        }
367}
368
369static inline void __user *get_check_address(struct pt_regs *regs)
370{
371	return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
372}
373
374void __kprobes do_single_step(struct pt_regs *regs)
375{
376	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
377					SIGTRAP) == NOTIFY_STOP){
378		return;
379	}
380	if (tracehook_consider_fatal_signal(current, SIGTRAP))
381		force_sig(SIGTRAP, current);
382}
383
384static void default_trap_handler(struct pt_regs * regs, long interruption_code)
385{
386        if (regs->psw.mask & PSW_MASK_PSTATE) {
387		local_irq_enable();
388		report_user_fault(regs, interruption_code, SIGSEGV);
389		do_exit(SIGSEGV);
390	} else
391		die("Unknown program exception", regs, interruption_code);
392}
393
394#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
395static void name(struct pt_regs * regs, long interruption_code) \
396{ \
397        siginfo_t info; \
398        info.si_signo = signr; \
399        info.si_errno = 0; \
400        info.si_code = sicode; \
401	info.si_addr = siaddr; \
402        do_trap(interruption_code, signr, str, regs, &info); \
403}
404
405DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
406	      ILL_ILLADR, get_check_address(regs))
407DO_ERROR_INFO(SIGILL,  "execute exception", execute_exception,
408	      ILL_ILLOPN, get_check_address(regs))
409DO_ERROR_INFO(SIGFPE,  "fixpoint divide exception", divide_exception,
410	      FPE_INTDIV, get_check_address(regs))
411DO_ERROR_INFO(SIGFPE,  "fixpoint overflow exception", overflow_exception,
412	      FPE_INTOVF, get_check_address(regs))
413DO_ERROR_INFO(SIGFPE,  "HFP overflow exception", hfp_overflow_exception,
414	      FPE_FLTOVF, get_check_address(regs))
415DO_ERROR_INFO(SIGFPE,  "HFP underflow exception", hfp_underflow_exception,
416	      FPE_FLTUND, get_check_address(regs))
417DO_ERROR_INFO(SIGFPE,  "HFP significance exception", hfp_significance_exception,
418	      FPE_FLTRES, get_check_address(regs))
419DO_ERROR_INFO(SIGFPE,  "HFP divide exception", hfp_divide_exception,
420	      FPE_FLTDIV, get_check_address(regs))
421DO_ERROR_INFO(SIGFPE,  "HFP square root exception", hfp_sqrt_exception,
422	      FPE_FLTINV, get_check_address(regs))
423DO_ERROR_INFO(SIGILL,  "operand exception", operand_exception,
424	      ILL_ILLOPN, get_check_address(regs))
425DO_ERROR_INFO(SIGILL,  "privileged operation", privileged_op,
426	      ILL_PRVOPC, get_check_address(regs))
427DO_ERROR_INFO(SIGILL,  "special operation exception", special_op_exception,
428	      ILL_ILLOPN, get_check_address(regs))
429DO_ERROR_INFO(SIGILL,  "translation exception", translation_exception,
430	      ILL_ILLOPN, get_check_address(regs))
431
432static inline void
433do_fp_trap(struct pt_regs *regs, void __user *location,
434           int fpc, long interruption_code)
435{
436	siginfo_t si;
437
438	si.si_signo = SIGFPE;
439	si.si_errno = 0;
440	si.si_addr = location;
441	si.si_code = 0;
442	/* FPC[2] is Data Exception Code */
443	if ((fpc & 0x00000300) == 0) {
444		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
445		if (fpc & 0x8000) /* invalid fp operation */
446			si.si_code = FPE_FLTINV;
447		else if (fpc & 0x4000) /* div by 0 */
448			si.si_code = FPE_FLTDIV;
449		else if (fpc & 0x2000) /* overflow */
450			si.si_code = FPE_FLTOVF;
451		else if (fpc & 0x1000) /* underflow */
452			si.si_code = FPE_FLTUND;
453		else if (fpc & 0x0800) /* inexact */
454			si.si_code = FPE_FLTRES;
455	}
456	current->thread.ieee_instruction_pointer = (addr_t) location;
457	do_trap(interruption_code, SIGFPE,
458		"floating point exception", regs, &si);
459}
460
461static void illegal_op(struct pt_regs * regs, long interruption_code)
462{
463	siginfo_t info;
464        __u8 opcode[6];
465	__u16 __user *location;
466	int signal = 0;
467
468	location = get_check_address(regs);
469
470	/*
471	 * We got all needed information from the lowcore and can
472	 * now safely switch on interrupts.
473	 */
474	if (regs->psw.mask & PSW_MASK_PSTATE)
475		local_irq_enable();
476
477	if (regs->psw.mask & PSW_MASK_PSTATE) {
478		if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
479			return;
480		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
481			if (tracehook_consider_fatal_signal(current, SIGTRAP))
482				force_sig(SIGTRAP, current);
483			else
484				signal = SIGILL;
485#ifdef CONFIG_MATHEMU
486		} else if (opcode[0] == 0xb3) {
487			if (get_user(*((__u16 *) (opcode+2)), location+1))
488				return;
489			signal = math_emu_b3(opcode, regs);
490                } else if (opcode[0] == 0xed) {
491			if (get_user(*((__u32 *) (opcode+2)),
492				     (__u32 __user *)(location+1)))
493				return;
494			signal = math_emu_ed(opcode, regs);
495		} else if (*((__u16 *) opcode) == 0xb299) {
496			if (get_user(*((__u16 *) (opcode+2)), location+1))
497				return;
498			signal = math_emu_srnm(opcode, regs);
499		} else if (*((__u16 *) opcode) == 0xb29c) {
500			if (get_user(*((__u16 *) (opcode+2)), location+1))
501				return;
502			signal = math_emu_stfpc(opcode, regs);
503		} else if (*((__u16 *) opcode) == 0xb29d) {
504			if (get_user(*((__u16 *) (opcode+2)), location+1))
505				return;
506			signal = math_emu_lfpc(opcode, regs);
507#endif
508		} else
509			signal = SIGILL;
510	} else {
511		/*
512		 * If we get an illegal op in kernel mode, send it through the
513		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
514		 */
515		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
516			       3, SIGTRAP) != NOTIFY_STOP)
517			signal = SIGILL;
518	}
519
520#ifdef CONFIG_MATHEMU
521        if (signal == SIGFPE)
522		do_fp_trap(regs, location,
523                           current->thread.fp_regs.fpc, interruption_code);
524        else if (signal == SIGSEGV) {
525		info.si_signo = signal;
526		info.si_errno = 0;
527		info.si_code = SEGV_MAPERR;
528		info.si_addr = (void __user *) location;
529		do_trap(interruption_code, signal,
530			"user address fault", regs, &info);
531	} else
532#endif
533        if (signal) {
534		info.si_signo = signal;
535		info.si_errno = 0;
536		info.si_code = ILL_ILLOPC;
537		info.si_addr = (void __user *) location;
538		do_trap(interruption_code, signal,
539			"illegal operation", regs, &info);
540	}
541}
542
543
544#ifdef CONFIG_MATHEMU
545asmlinkage void
546specification_exception(struct pt_regs * regs, long interruption_code)
547{
548        __u8 opcode[6];
549	__u16 __user *location = NULL;
550	int signal = 0;
551
552	location = (__u16 __user *) get_check_address(regs);
553
554	/*
555	 * We got all needed information from the lowcore and can
556	 * now safely switch on interrupts.
557	 */
558        if (regs->psw.mask & PSW_MASK_PSTATE)
559		local_irq_enable();
560
561        if (regs->psw.mask & PSW_MASK_PSTATE) {
562		get_user(*((__u16 *) opcode), location);
563		switch (opcode[0]) {
564		case 0x28: /* LDR Rx,Ry   */
565			signal = math_emu_ldr(opcode);
566			break;
567		case 0x38: /* LER Rx,Ry   */
568			signal = math_emu_ler(opcode);
569			break;
570		case 0x60: /* STD R,D(X,B) */
571			get_user(*((__u16 *) (opcode+2)), location+1);
572			signal = math_emu_std(opcode, regs);
573			break;
574		case 0x68: /* LD R,D(X,B) */
575			get_user(*((__u16 *) (opcode+2)), location+1);
576			signal = math_emu_ld(opcode, regs);
577			break;
578		case 0x70: /* STE R,D(X,B) */
579			get_user(*((__u16 *) (opcode+2)), location+1);
580			signal = math_emu_ste(opcode, regs);
581			break;
582		case 0x78: /* LE R,D(X,B) */
583			get_user(*((__u16 *) (opcode+2)), location+1);
584			signal = math_emu_le(opcode, regs);
585			break;
586		default:
587			signal = SIGILL;
588			break;
589                }
590        } else
591		signal = SIGILL;
592
593        if (signal == SIGFPE)
594		do_fp_trap(regs, location,
595                           current->thread.fp_regs.fpc, interruption_code);
596        else if (signal) {
597		siginfo_t info;
598		info.si_signo = signal;
599		info.si_errno = 0;
600		info.si_code = ILL_ILLOPN;
601		info.si_addr = location;
602		do_trap(interruption_code, signal,
603			"specification exception", regs, &info);
604	}
605}
606#else
607DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
608	      ILL_ILLOPN, get_check_address(regs));
609#endif
610
611static void data_exception(struct pt_regs * regs, long interruption_code)
612{
613	__u16 __user *location;
614	int signal = 0;
615
616	location = get_check_address(regs);
617
618	/*
619	 * We got all needed information from the lowcore and can
620	 * now safely switch on interrupts.
621	 */
622	if (regs->psw.mask & PSW_MASK_PSTATE)
623		local_irq_enable();
624
625	if (MACHINE_HAS_IEEE)
626		asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
627
628#ifdef CONFIG_MATHEMU
629        else if (regs->psw.mask & PSW_MASK_PSTATE) {
630        	__u8 opcode[6];
631		get_user(*((__u16 *) opcode), location);
632		switch (opcode[0]) {
633		case 0x28: /* LDR Rx,Ry   */
634			signal = math_emu_ldr(opcode);
635			break;
636		case 0x38: /* LER Rx,Ry   */
637			signal = math_emu_ler(opcode);
638			break;
639		case 0x60: /* STD R,D(X,B) */
640			get_user(*((__u16 *) (opcode+2)), location+1);
641			signal = math_emu_std(opcode, regs);
642			break;
643		case 0x68: /* LD R,D(X,B) */
644			get_user(*((__u16 *) (opcode+2)), location+1);
645			signal = math_emu_ld(opcode, regs);
646			break;
647		case 0x70: /* STE R,D(X,B) */
648			get_user(*((__u16 *) (opcode+2)), location+1);
649			signal = math_emu_ste(opcode, regs);
650			break;
651		case 0x78: /* LE R,D(X,B) */
652			get_user(*((__u16 *) (opcode+2)), location+1);
653			signal = math_emu_le(opcode, regs);
654			break;
655		case 0xb3:
656			get_user(*((__u16 *) (opcode+2)), location+1);
657			signal = math_emu_b3(opcode, regs);
658			break;
659                case 0xed:
660			get_user(*((__u32 *) (opcode+2)),
661				 (__u32 __user *)(location+1));
662			signal = math_emu_ed(opcode, regs);
663			break;
664	        case 0xb2:
665			if (opcode[1] == 0x99) {
666				get_user(*((__u16 *) (opcode+2)), location+1);
667				signal = math_emu_srnm(opcode, regs);
668			} else if (opcode[1] == 0x9c) {
669				get_user(*((__u16 *) (opcode+2)), location+1);
670				signal = math_emu_stfpc(opcode, regs);
671			} else if (opcode[1] == 0x9d) {
672				get_user(*((__u16 *) (opcode+2)), location+1);
673				signal = math_emu_lfpc(opcode, regs);
674			} else
675				signal = SIGILL;
676			break;
677		default:
678			signal = SIGILL;
679			break;
680                }
681        }
682#endif
683	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
684		signal = SIGFPE;
685	else
686		signal = SIGILL;
687        if (signal == SIGFPE)
688		do_fp_trap(regs, location,
689                           current->thread.fp_regs.fpc, interruption_code);
690        else if (signal) {
691		siginfo_t info;
692		info.si_signo = signal;
693		info.si_errno = 0;
694		info.si_code = ILL_ILLOPN;
695		info.si_addr = location;
696		do_trap(interruption_code, signal,
697			"data exception", regs, &info);
698	}
699}
700
701static void space_switch_exception(struct pt_regs * regs, long int_code)
702{
703        siginfo_t info;
704
705	/* Set user psw back to home space mode. */
706	if (regs->psw.mask & PSW_MASK_PSTATE)
707		regs->psw.mask |= PSW_ASC_HOME;
708	/* Send SIGILL. */
709        info.si_signo = SIGILL;
710        info.si_errno = 0;
711        info.si_code = ILL_PRVOPC;
712        info.si_addr = get_check_address(regs);
713        do_trap(int_code, SIGILL, "space switch event", regs, &info);
714}
715
716asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
717{
718	bust_spinlocks(1);
719	printk("Kernel stack overflow.\n");
720	show_regs(regs);
721	bust_spinlocks(0);
722	panic("Corrupt kernel stack, can't continue.");
723}
724
725/* init is done in lowcore.S and head.S */
726
727void __init trap_init(void)
728{
729        int i;
730
731        for (i = 0; i < 128; i++)
732          pgm_check_table[i] = &default_trap_handler;
733        pgm_check_table[1] = &illegal_op;
734        pgm_check_table[2] = &privileged_op;
735        pgm_check_table[3] = &execute_exception;
736        pgm_check_table[4] = &do_protection_exception;
737        pgm_check_table[5] = &addressing_exception;
738        pgm_check_table[6] = &specification_exception;
739        pgm_check_table[7] = &data_exception;
740        pgm_check_table[8] = &overflow_exception;
741        pgm_check_table[9] = &divide_exception;
742        pgm_check_table[0x0A] = &overflow_exception;
743        pgm_check_table[0x0B] = &divide_exception;
744        pgm_check_table[0x0C] = &hfp_overflow_exception;
745        pgm_check_table[0x0D] = &hfp_underflow_exception;
746        pgm_check_table[0x0E] = &hfp_significance_exception;
747        pgm_check_table[0x0F] = &hfp_divide_exception;
748        pgm_check_table[0x10] = &do_dat_exception;
749        pgm_check_table[0x11] = &do_dat_exception;
750        pgm_check_table[0x12] = &translation_exception;
751        pgm_check_table[0x13] = &special_op_exception;
752#ifdef CONFIG_64BIT
753	pgm_check_table[0x38] = &do_asce_exception;
754	pgm_check_table[0x39] = &do_dat_exception;
755	pgm_check_table[0x3A] = &do_dat_exception;
756        pgm_check_table[0x3B] = &do_dat_exception;
757#endif /* CONFIG_64BIT */
758        pgm_check_table[0x15] = &operand_exception;
759        pgm_check_table[0x1C] = &space_switch_exception;
760        pgm_check_table[0x1D] = &hfp_sqrt_exception;
761	pfault_irq_init();
762}
763