1/*
2 *  linux/arch/parisc/traps.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6 */
7
8/*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/timer.h>
19#include <linux/delay.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/smp.h>
23#include <linux/spinlock.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/console.h>
27#include <linux/kallsyms.h>
28#include <linux/bug.h>
29
30#include <asm/assembly.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/traps.h>
36#include <asm/unaligned.h>
37#include <asm/atomic.h>
38#include <asm/smp.h>
39#include <asm/pdc.h>
40#include <asm/pdc_chassis.h>
41#include <asm/unwind.h>
42#include <asm/tlbflush.h>
43#include <asm/cacheflush.h>
44
45#include "../math-emu/math-emu.h"	/* for handle_fpe() */
46
47#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
48			  /*  dumped to the console via printk)          */
49
50#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
51DEFINE_SPINLOCK(pa_dbit_lock);
52#endif
53
54static int printbinary(char *buf, unsigned long x, int nbits)
55{
56	unsigned long mask = 1UL << (nbits - 1);
57	while (mask != 0) {
58		*buf++ = (mask & x ? '1' : '0');
59		mask >>= 1;
60	}
61	*buf = '\0';
62
63	return nbits;
64}
65
66#ifdef CONFIG_64BIT
67#define RFMT "%016lx"
68#else
69#define RFMT "%08lx"
70#endif
71#define FFMT "%016llx"	/* fpregs are 64-bit always */
72
73#define PRINTREGS(lvl,r,f,fmt,x)	\
74	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
75		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
76		(r)[(x)+2], (r)[(x)+3])
77
78static void print_gr(char *level, struct pt_regs *regs)
79{
80	int i;
81	char buf[64];
82
83	printk("%s\n", level);
84	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
85	printbinary(buf, regs->gr[0], 32);
86	printk("%sPSW: %s %s\n", level, buf, print_tainted());
87
88	for (i = 0; i < 32; i += 4)
89		PRINTREGS(level, regs->gr, "r", RFMT, i);
90}
91
92static void print_fr(char *level, struct pt_regs *regs)
93{
94	int i;
95	char buf[64];
96	struct { u32 sw[2]; } s;
97
98	/* FR are 64bit everywhere. Need to use asm to get the content
99	 * of fpsr/fper1, and we assume that we won't have a FP Identify
100	 * in our way, otherwise we're screwed.
101	 * The fldd is used to restore the T-bit if there was one, as the
102	 * store clears it anyway.
103	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
104	asm volatile ("fstd %%fr0,0(%1)	\n\t"
105		      "fldd 0(%1),%%fr0	\n\t"
106		      : "=m" (s) : "r" (&s) : "r0");
107
108	printk("%s\n", level);
109	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
110	printbinary(buf, s.sw[0], 32);
111	printk("%sFPSR: %s\n", level, buf);
112	printk("%sFPER1: %08x\n", level, s.sw[1]);
113
114	/* here we'll print fr0 again, tho it'll be meaningless */
115	for (i = 0; i < 32; i += 4)
116		PRINTREGS(level, regs->fr, "fr", FFMT, i);
117}
118
119void show_regs(struct pt_regs *regs)
120{
121	int i;
122	char *level;
123	unsigned long cr30, cr31;
124
125	level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
126
127	print_gr(level, regs);
128
129	for (i = 0; i < 8; i += 4)
130		PRINTREGS(level, regs->sr, "sr", RFMT, i);
131
132	if (user_mode(regs))
133		print_fr(level, regs);
134
135	cr30 = mfctl(30);
136	cr31 = mfctl(31);
137	printk("%s\n", level);
138	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
139	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
140	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
141	       level, regs->iir, regs->isr, regs->ior);
142	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
143	       level, current_thread_info()->cpu, cr30, cr31);
144	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
145	printk(level);
146	print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
147	printk(level);
148	print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
149	printk(level);
150	print_symbol(" RP(r2): %s\n", regs->gr[2]);
151}
152
153
154void dump_stack(void)
155{
156	show_stack(NULL, NULL);
157}
158
159EXPORT_SYMBOL(dump_stack);
160
161static void do_show_stack(struct unwind_frame_info *info)
162{
163	int i = 1;
164
165	printk(KERN_CRIT "Backtrace:\n");
166	while (i <= 16) {
167		if (unwind_once(info) < 0 || info->ip == 0)
168			break;
169
170		if (__kernel_text_address(info->ip)) {
171			printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
172#ifdef CONFIG_KALLSYMS
173			print_symbol("%s\n", info->ip);
174#else
175			if ((i & 0x03) == 0)
176				printk("\n");
177#endif
178			i++;
179		}
180	}
181	printk("\n");
182}
183
184void show_stack(struct task_struct *task, unsigned long *s)
185{
186	struct unwind_frame_info info;
187
188	if (!task) {
189		unsigned long sp;
190
191HERE:
192		asm volatile ("copy %%r30, %0" : "=r"(sp));
193		{
194			struct pt_regs r;
195
196			memset(&r, 0, sizeof(struct pt_regs));
197			r.iaoq[0] = (unsigned long)&&HERE;
198			r.gr[2] = (unsigned long)__builtin_return_address(0);
199			r.gr[30] = sp;
200
201			unwind_frame_init(&info, current, &r);
202		}
203	} else {
204		unwind_frame_init_from_blocked_task(&info, task);
205	}
206
207	do_show_stack(&info);
208}
209
210int is_valid_bugaddr(unsigned long iaoq)
211{
212	return 1;
213}
214
215void die_if_kernel(char *str, struct pt_regs *regs, long err)
216{
217	if (user_mode(regs)) {
218		if (err == 0)
219			return; /* STFU */
220
221		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
222			current->comm, current->pid, str, err, regs->iaoq[0]);
223#ifdef PRINT_USER_FAULTS
224		show_regs(regs);
225#endif
226		return;
227	}
228
229	oops_in_progress = 1;
230
231	/* Amuse the user in a SPARC fashion */
232	if (err) printk(
233KERN_CRIT "      _______________________________ \n"
234KERN_CRIT "     < Your System ate a SPARC! Gah! >\n"
235KERN_CRIT "      ------------------------------- \n"
236KERN_CRIT "             \\   ^__^\n"
237KERN_CRIT "              \\  (xx)\\_______\n"
238KERN_CRIT "                 (__)\\       )\\/\\\n"
239KERN_CRIT "                  U  ||----w |\n"
240KERN_CRIT "                     ||     ||\n");
241
242	/* unlock the pdc lock if necessary */
243	pdc_emergency_unlock();
244
245	/* maybe the kernel hasn't booted very far yet and hasn't been able
246	 * to initialize the serial or STI console. In that case we should
247	 * re-enable the pdc console, so that the user will be able to
248	 * identify the problem. */
249	if (!console_drivers)
250		pdc_console_restart();
251
252	if (err)
253		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
254			current->comm, current->pid, str, err);
255
256	/* Wot's wrong wif bein' racy? */
257	if (current->thread.flags & PARISC_KERNEL_DEATH) {
258		printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
259		local_irq_enable();
260		while (1);
261	}
262	current->thread.flags |= PARISC_KERNEL_DEATH;
263
264	show_regs(regs);
265	dump_stack();
266
267	if (in_interrupt())
268		panic("Fatal exception in interrupt");
269
270	if (panic_on_oops) {
271		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
272		ssleep(5);
273		panic("Fatal exception");
274	}
275
276	do_exit(SIGSEGV);
277}
278
279int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
280{
281	return syscall(regs);
282}
283
284/* gdb uses break 4,8 */
285#define GDB_BREAK_INSN 0x10004
286static void handle_gdb_break(struct pt_regs *regs, int wot)
287{
288	struct siginfo si;
289
290	si.si_signo = SIGTRAP;
291	si.si_errno = 0;
292	si.si_code = wot;
293	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
294	force_sig_info(SIGTRAP, &si, current);
295}
296
297static void handle_break(struct pt_regs *regs)
298{
299	unsigned iir = regs->iir;
300
301	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
302		/* check if a BUG() or WARN() trapped here.  */
303		enum bug_trap_type tt;
304		tt = report_bug(regs->iaoq[0] & ~3);
305		if (tt == BUG_TRAP_TYPE_WARN) {
306			regs->iaoq[0] += 4;
307			regs->iaoq[1] += 4;
308			return; /* return to next instruction when WARN_ON().  */
309		}
310		die_if_kernel("Unknown kernel breakpoint", regs,
311			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
312	}
313
314#ifdef PRINT_USER_FAULTS
315	if (unlikely(iir != GDB_BREAK_INSN)) {
316		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
317			iir & 31, (iir>>13) & ((1<<13)-1),
318			current->pid, current->comm);
319		show_regs(regs);
320	}
321#endif
322
323	/* send standard GDB signal */
324	handle_gdb_break(regs, TRAP_BRKPT);
325}
326
327static void default_trap(int code, struct pt_regs *regs)
328{
329	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
330	show_regs(regs);
331}
332
333void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
334
335
336void transfer_pim_to_trap_frame(struct pt_regs *regs)
337{
338    register int i;
339    extern unsigned int hpmc_pim_data[];
340    struct pdc_hpmc_pim_11 *pim_narrow;
341    struct pdc_hpmc_pim_20 *pim_wide;
342
343    if (boot_cpu_data.cpu_type >= pcxu) {
344
345	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
346
347	/*
348	 * Note: The following code will probably generate a
349	 * bunch of truncation error warnings from the compiler.
350	 * Could be handled with an ifdef, but perhaps there
351	 * is a better way.
352	 */
353
354	regs->gr[0] = pim_wide->cr[22];
355
356	for (i = 1; i < 32; i++)
357	    regs->gr[i] = pim_wide->gr[i];
358
359	for (i = 0; i < 32; i++)
360	    regs->fr[i] = pim_wide->fr[i];
361
362	for (i = 0; i < 8; i++)
363	    regs->sr[i] = pim_wide->sr[i];
364
365	regs->iasq[0] = pim_wide->cr[17];
366	regs->iasq[1] = pim_wide->iasq_back;
367	regs->iaoq[0] = pim_wide->cr[18];
368	regs->iaoq[1] = pim_wide->iaoq_back;
369
370	regs->sar  = pim_wide->cr[11];
371	regs->iir  = pim_wide->cr[19];
372	regs->isr  = pim_wide->cr[20];
373	regs->ior  = pim_wide->cr[21];
374    }
375    else {
376	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
377
378	regs->gr[0] = pim_narrow->cr[22];
379
380	for (i = 1; i < 32; i++)
381	    regs->gr[i] = pim_narrow->gr[i];
382
383	for (i = 0; i < 32; i++)
384	    regs->fr[i] = pim_narrow->fr[i];
385
386	for (i = 0; i < 8; i++)
387	    regs->sr[i] = pim_narrow->sr[i];
388
389	regs->iasq[0] = pim_narrow->cr[17];
390	regs->iasq[1] = pim_narrow->iasq_back;
391	regs->iaoq[0] = pim_narrow->cr[18];
392	regs->iaoq[1] = pim_narrow->iaoq_back;
393
394	regs->sar  = pim_narrow->cr[11];
395	regs->iir  = pim_narrow->cr[19];
396	regs->isr  = pim_narrow->cr[20];
397	regs->ior  = pim_narrow->cr[21];
398    }
399
400    /*
401     * The following fields only have meaning if we came through
402     * another path. So just zero them here.
403     */
404
405    regs->ksp = 0;
406    regs->kpc = 0;
407    regs->orig_r28 = 0;
408}
409
410
411/*
412 * This routine is called as a last resort when everything else
413 * has gone clearly wrong. We get called for faults in kernel space,
414 * and HPMC's.
415 */
416void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
417{
418	static DEFINE_SPINLOCK(terminate_lock);
419
420	oops_in_progress = 1;
421
422	set_eiem(0);
423	local_irq_disable();
424	spin_lock(&terminate_lock);
425
426	/* unlock the pdc lock if necessary */
427	pdc_emergency_unlock();
428
429	/* restart pdc console if necessary */
430	if (!console_drivers)
431		pdc_console_restart();
432
433	/* Not all paths will gutter the processor... */
434	switch(code){
435
436	case 1:
437		transfer_pim_to_trap_frame(regs);
438		break;
439
440	default:
441		/* Fall through */
442		break;
443
444	}
445
446	{
447		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
448		struct unwind_frame_info info;
449		unwind_frame_init(&info, current, regs);
450		do_show_stack(&info);
451	}
452
453	printk("\n");
454	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
455			msg, code, regs, offset);
456	show_regs(regs);
457
458	spin_unlock(&terminate_lock);
459
460	/* put soft power button back under hardware control;
461	 * if the user had pressed it once at any time, the
462	 * system will shut down immediately right here. */
463	pdc_soft_power_button(0);
464
465	panic(msg);
466}
467
468void handle_interruption(int code, struct pt_regs *regs)
469{
470	unsigned long fault_address = 0;
471	unsigned long fault_space = 0;
472	struct siginfo si;
473
474	if (code == 1)
475	    pdc_console_restart();  /* switch back to pdc if HPMC */
476	else
477	    local_irq_enable();
478
479	/* Security check:
480	 * If the priority level is still user, and the
481	 * faulting space is not equal to the active space
482	 * then the user is attempting something in a space
483	 * that does not belong to them. Kill the process.
484	 *
485	 * This is normally the situation when the user
486	 * attempts to jump into the kernel space at the
487	 * wrong offset, be it at the gateway page or a
488	 * random location.
489	 *
490	 * We cannot normally signal the process because it
491	 * could *be* on the gateway page, and processes
492	 * executing on the gateway page can't have signals
493	 * delivered.
494	 *
495	 * We merely readjust the address into the users
496	 * space, at a destination address of zero, and
497	 * allow processing to continue.
498	 */
499	if (((unsigned long)regs->iaoq[0] & 3) &&
500	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
501	  	/* Kill the user process later */
502	  	regs->iaoq[0] = 0 | 3;
503		regs->iaoq[1] = regs->iaoq[0] + 4;
504	 	regs->iasq[0] = regs->iasq[0] = regs->sr[7];
505		regs->gr[0] &= ~PSW_B;
506		return;
507	}
508
509
510	switch(code) {
511
512	case  1:
513		/* High-priority machine check (HPMC) */
514
515		/* set up a new led state on systems shipped with a LED State panel */
516		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
517
518	    	parisc_terminate("High Priority Machine Check (HPMC)",
519				regs, code, 0);
520		/* NOT REACHED */
521
522	case  2:
523		/* Power failure interrupt */
524		printk(KERN_CRIT "Power failure interrupt !\n");
525		return;
526
527	case  3:
528		/* Recovery counter trap */
529		regs->gr[0] &= ~PSW_R;
530		if (user_space(regs))
531			handle_gdb_break(regs, TRAP_TRACE);
532		/* else this must be the start of a syscall - just let it run */
533		return;
534
535	case  5:
536		/* Low-priority machine check */
537		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
538
539		flush_cache_all();
540		flush_tlb_all();
541		cpu_lpmc(5, regs);
542		return;
543
544	case  6:
545		/* Instruction TLB miss fault/Instruction page fault */
546		fault_address = regs->iaoq[0];
547		fault_space   = regs->iasq[0];
548		break;
549
550	case  8:
551		/* Illegal instruction trap */
552		die_if_kernel("Illegal instruction", regs, code);
553		si.si_code = ILL_ILLOPC;
554		goto give_sigill;
555
556	case  9:
557		/* Break instruction trap */
558		handle_break(regs);
559		return;
560
561	case 10:
562		/* Privileged operation trap */
563		die_if_kernel("Privileged operation", regs, code);
564		si.si_code = ILL_PRVOPC;
565		goto give_sigill;
566
567	case 11:
568		/* Privileged register trap */
569		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
570
571			/* This is a MFCTL cr26/cr27 to gr instruction.
572			 * PCXS traps on this, so we need to emulate it.
573			 */
574
575			if (regs->iir & 0x00200000)
576				regs->gr[regs->iir & 0x1f] = mfctl(27);
577			else
578				regs->gr[regs->iir & 0x1f] = mfctl(26);
579
580			regs->iaoq[0] = regs->iaoq[1];
581			regs->iaoq[1] += 4;
582			regs->iasq[0] = regs->iasq[1];
583			return;
584		}
585
586		die_if_kernel("Privileged register usage", regs, code);
587		si.si_code = ILL_PRVREG;
588	give_sigill:
589		si.si_signo = SIGILL;
590		si.si_errno = 0;
591		si.si_addr = (void __user *) regs->iaoq[0];
592		force_sig_info(SIGILL, &si, current);
593		return;
594
595	case 12:
596		/* Overflow Trap, let the userland signal handler do the cleanup */
597		si.si_signo = SIGFPE;
598		si.si_code = FPE_INTOVF;
599		si.si_addr = (void __user *) regs->iaoq[0];
600		force_sig_info(SIGFPE, &si, current);
601		return;
602
603	case 13:
604		/* Conditional Trap
605		   The condition succeeds in an instruction which traps
606		   on condition  */
607		if(user_mode(regs)){
608			si.si_signo = SIGFPE;
609			/* Set to zero, and let the userspace app figure it out from
610		   	   the insn pointed to by si_addr */
611			si.si_code = 0;
612			si.si_addr = (void __user *) regs->iaoq[0];
613			force_sig_info(SIGFPE, &si, current);
614			return;
615		}
616		/* The kernel doesn't want to handle condition codes */
617		break;
618
619	case 14:
620		/* Assist Exception Trap, i.e. floating point exception. */
621		die_if_kernel("Floating point exception", regs, 0); /* quiet */
622		handle_fpe(regs);
623		return;
624
625	case 15:
626		/* Data TLB miss fault/Data page fault */
627		/* Fall through */
628	case 16:
629		/* Non-access instruction TLB miss fault */
630		/* The instruction TLB entry needed for the target address of the FIC
631		   is absent, and hardware can't find it, so we get to cleanup */
632		/* Fall through */
633	case 17:
634		/* Non-access data TLB miss fault/Non-access data page fault */
635		fault_address = regs->ior;
636		fault_space = regs->isr;
637		break;
638
639	case 18:
640		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
641		/* Check for unaligned access */
642		if (check_unaligned(regs)) {
643			handle_unaligned(regs);
644			return;
645		}
646		/* Fall Through */
647	case 26:
648		/* PCXL: Data memory access rights trap */
649		fault_address = regs->ior;
650		fault_space   = regs->isr;
651		break;
652
653	case 19:
654		/* Data memory break trap */
655		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
656		/* fall thru */
657	case 21:
658		/* Page reference trap */
659		handle_gdb_break(regs, TRAP_HWBKPT);
660		return;
661
662	case 25:
663		/* Taken branch trap */
664		regs->gr[0] &= ~PSW_T;
665		if (user_space(regs))
666			handle_gdb_break(regs, TRAP_BRANCH);
667		/* else this must be the start of a syscall - just let it
668		 * run.
669		 */
670		return;
671
672	case  7:
673		/* Instruction access rights */
674		/* PCXL: Instruction memory protection trap */
675
676		/*
677		 * This could be caused by either: 1) a process attempting
678		 * to execute within a vma that does not have execute
679		 * permission, or 2) an access rights violation caused by a
680		 * flush only translation set up by ptep_get_and_clear().
681		 * So we check the vma permissions to differentiate the two.
682		 * If the vma indicates we have execute permission, then
683		 * the cause is the latter one. In this case, we need to
684		 * call do_page_fault() to fix the problem.
685		 */
686
687		if (user_mode(regs)) {
688			struct vm_area_struct *vma;
689
690			down_read(&current->mm->mmap_sem);
691			vma = find_vma(current->mm,regs->iaoq[0]);
692			if (vma && (regs->iaoq[0] >= vma->vm_start)
693				&& (vma->vm_flags & VM_EXEC)) {
694
695				fault_address = regs->iaoq[0];
696				fault_space = regs->iasq[0];
697
698				up_read(&current->mm->mmap_sem);
699				break; /* call do_page_fault() */
700			}
701			up_read(&current->mm->mmap_sem);
702		}
703		/* Fall Through */
704	case 27:
705		/* Data memory protection ID trap */
706		die_if_kernel("Protection id trap", regs, code);
707		si.si_code = SEGV_MAPERR;
708		si.si_signo = SIGSEGV;
709		si.si_errno = 0;
710		if (code == 7)
711		    si.si_addr = (void __user *) regs->iaoq[0];
712		else
713		    si.si_addr = (void __user *) regs->ior;
714		force_sig_info(SIGSEGV, &si, current);
715		return;
716
717	case 28:
718		/* Unaligned data reference trap */
719		handle_unaligned(regs);
720		return;
721
722	default:
723		if (user_mode(regs)) {
724#ifdef PRINT_USER_FAULTS
725			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
726			    current->pid, current->comm);
727			show_regs(regs);
728#endif
729			/* SIGBUS, for lack of a better one. */
730			si.si_signo = SIGBUS;
731			si.si_code = BUS_OBJERR;
732			si.si_errno = 0;
733			si.si_addr = (void __user *) regs->ior;
734			force_sig_info(SIGBUS, &si, current);
735			return;
736		}
737		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
738
739		parisc_terminate("Unexpected interruption", regs, code, 0);
740		/* NOT REACHED */
741	}
742
743	if (user_mode(regs)) {
744	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
745#ifdef PRINT_USER_FAULTS
746		if (fault_space == 0)
747			printk(KERN_DEBUG "User Fault on Kernel Space ");
748		else
749			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
750			       code);
751		printk("pid=%d command='%s'\n", current->pid, current->comm);
752		show_regs(regs);
753#endif
754		si.si_signo = SIGSEGV;
755		si.si_errno = 0;
756		si.si_code = SEGV_MAPERR;
757		si.si_addr = (void __user *) regs->ior;
758		force_sig_info(SIGSEGV, &si, current);
759		return;
760	    }
761	}
762	else {
763
764	    /*
765	     * The kernel should never fault on its own address space.
766	     */
767
768	    if (fault_space == 0)
769	    {
770		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
771		parisc_terminate("Kernel Fault", regs, code, fault_address);
772
773	    }
774	}
775
776	do_page_fault(regs, code, fault_address);
777}
778
779
780int __init check_ivt(void *iva)
781{
782	extern const u32 os_hpmc[];
783	extern const u32 os_hpmc_end[];
784
785	int i;
786	u32 check = 0;
787	u32 *ivap;
788	u32 *hpmcp;
789	u32 length;
790
791	if (strcmp((char *)iva, "cows can fly"))
792		return -1;
793
794	ivap = (u32 *)iva;
795
796	for (i = 0; i < 8; i++)
797	    *ivap++ = 0;
798
799	/* Compute Checksum for HPMC handler */
800
801	length = os_hpmc_end - os_hpmc;
802	ivap[7] = length;
803
804	hpmcp = (u32 *)os_hpmc;
805
806	for (i=0; i<length/4; i++)
807	    check += *hpmcp++;
808
809	for (i=0; i<8; i++)
810	    check += ivap[i];
811
812	ivap[5] = -check;
813
814	return 0;
815}
816
817#ifndef CONFIG_64BIT
818extern const void fault_vector_11;
819#endif
820extern const void fault_vector_20;
821
822void __init trap_init(void)
823{
824	void *iva;
825
826	if (boot_cpu_data.cpu_type >= pcxu)
827		iva = (void *) &fault_vector_20;
828	else
829#ifdef CONFIG_64BIT
830		panic("Can't boot 64-bit OS on PA1.1 processor!");
831#else
832		iva = (void *) &fault_vector_11;
833#endif
834
835	if (check_ivt(iva))
836		panic("IVT invalid");
837}
838