1/* $Id: traps.c,v 1.1.1.1 2008/10/15 03:26:19 james26_jang Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/sched.h>  /* for jiffies */
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/smp.h>
17#include <linux/smp_lock.h>
18#include <linux/mm.h>
19
20#include <asm/delay.h>
21#include <asm/system.h>
22#include <asm/ptrace.h>
23#include <asm/oplib.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/unistd.h>
27#include <asm/uaccess.h>
28#include <asm/fpumacro.h>
29#include <asm/lsu.h>
30#include <asm/dcu.h>
31#include <asm/estate.h>
32#include <asm/chafsr.h>
33#include <asm/psrcompat.h>
34#include <asm/processor.h>
35#ifdef CONFIG_KMOD
36#include <linux/kmod.h>
37#endif
38
39/* When an irrecoverable trap occurs at tl > 0, the trap entry
40 * code logs the trap state registers at every level in the trap
41 * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
42 * is as follows:
43 */
44struct tl1_traplog {
45	struct {
46		unsigned long tstate;
47		unsigned long tpc;
48		unsigned long tnpc;
49		unsigned long tt;
50	} trapstack[4];
51	unsigned long tl;
52};
53
54static void dump_tl1_traplog(struct tl1_traplog *p)
55{
56	int i;
57
58	printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
59	       p->tl);
60	for (i = 0; i < 4; i++) {
61		printk(KERN_CRIT
62		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
63		       "TNPC[%016lx] TT[%lx]\n",
64		       i + 1,
65		       p->trapstack[i].tstate, p->trapstack[i].tpc,
66		       p->trapstack[i].tnpc, p->trapstack[i].tt);
67	}
68}
69
70void bad_trap (struct pt_regs *regs, long lvl)
71{
72	char buffer[32];
73	siginfo_t info;
74
75	if (lvl < 0x100) {
76		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
77		die_if_kernel(buffer, regs);
78	}
79
80	lvl -= 0x100;
81	if (regs->tstate & TSTATE_PRIV) {
82		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
83		die_if_kernel (buffer, regs);
84	}
85	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
86		regs->tpc &= 0xffffffff;
87		regs->tnpc &= 0xffffffff;
88	}
89	info.si_signo = SIGILL;
90	info.si_errno = 0;
91	info.si_code = ILL_ILLTRP;
92	info.si_addr = (void *)regs->tpc;
93	info.si_trapno = lvl;
94	force_sig_info(SIGILL, &info, current);
95}
96
97void bad_trap_tl1 (struct pt_regs *regs, long lvl)
98{
99	char buffer[32];
100
101	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
102
103	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
104	die_if_kernel (buffer, regs);
105}
106
107#ifdef CONFIG_DEBUG_BUGVERBOSE
108void do_BUG(const char *file, int line)
109{
110	bust_spinlocks(1);
111	printk("kernel BUG at %s:%d!\n", file, line);
112}
113#endif
114
115void instruction_access_exception(struct pt_regs *regs,
116				  unsigned long sfsr, unsigned long sfar)
117{
118	siginfo_t info;
119
120	if (regs->tstate & TSTATE_PRIV) {
121		printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
122		       sfsr, sfar);
123		die_if_kernel("Iax", regs);
124	}
125	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
126		regs->tpc &= 0xffffffff;
127		regs->tnpc &= 0xffffffff;
128	}
129	info.si_signo = SIGSEGV;
130	info.si_errno = 0;
131	info.si_code = SEGV_MAPERR;
132	info.si_addr = (void *)regs->tpc;
133	info.si_trapno = 0;
134	force_sig_info(SIGSEGV, &info, current);
135}
136
137void instruction_access_exception_tl1(struct pt_regs *regs,
138				      unsigned long sfsr, unsigned long sfar)
139{
140	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
141	instruction_access_exception(regs, sfsr, sfar);
142}
143
144void data_access_exception (struct pt_regs *regs,
145			    unsigned long sfsr, unsigned long sfar)
146{
147	siginfo_t info;
148
149	if (regs->tstate & TSTATE_PRIV) {
150		/* Test if this comes from uaccess places. */
151		unsigned long fixup, g2;
152
153		g2 = regs->u_regs[UREG_G2];
154		if ((fixup = search_exception_table (regs->tpc, &g2))) {
155			/* Ouch, somebody is trying ugly VM hole tricks on us... */
156#ifdef DEBUG_EXCEPTIONS
157			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
158			printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
159			       "g2<%016lx>\n", regs->tpc, fixup, g2);
160#endif
161			regs->tpc = fixup;
162			regs->tnpc = regs->tpc + 4;
163			regs->u_regs[UREG_G2] = g2;
164			return;
165		}
166		/* Shit... */
167		printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
168		       sfsr, sfar);
169		die_if_kernel("Dax", regs);
170	}
171
172	info.si_signo = SIGSEGV;
173	info.si_errno = 0;
174	info.si_code = SEGV_MAPERR;
175	info.si_addr = (void *)sfar;
176	info.si_trapno = 0;
177	force_sig_info(SIGSEGV, &info, current);
178}
179
180#ifdef CONFIG_PCI
181/* This is really pathetic... */
182extern volatile int pci_poke_in_progress;
183extern volatile int pci_poke_cpu;
184extern volatile int pci_poke_faulted;
185#endif
186
187/* When access exceptions happen, we must do this. */
188static void spitfire_clean_and_reenable_l1_caches(void)
189{
190	unsigned long va;
191
192	if (tlb_type != spitfire)
193		BUG();
194
195	/* Clean 'em. */
196	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
197		spitfire_put_icache_tag(va, 0x0);
198		spitfire_put_dcache_tag(va, 0x0);
199	}
200
201	/* Re-enable in LSU. */
202	__asm__ __volatile__("flush %%g6\n\t"
203			     "membar #Sync\n\t"
204			     "stxa %0, [%%g0] %1\n\t"
205			     "membar #Sync"
206			     : /* no outputs */
207			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
208				    LSU_CONTROL_IM | LSU_CONTROL_DM),
209			     "i" (ASI_LSU_CONTROL)
210			     : "memory");
211}
212
213void do_iae(struct pt_regs *regs)
214{
215	siginfo_t info;
216
217	spitfire_clean_and_reenable_l1_caches();
218
219	info.si_signo = SIGBUS;
220	info.si_errno = 0;
221	info.si_code = BUS_OBJERR;
222	info.si_addr = (void *)0;
223	info.si_trapno = 0;
224	force_sig_info(SIGBUS, &info, current);
225}
226
227void do_dae(struct pt_regs *regs)
228{
229#ifdef CONFIG_PCI
230	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
231		spitfire_clean_and_reenable_l1_caches();
232
233		pci_poke_faulted = 1;
234
235		/* Why the fuck did they have to change this? */
236		if (tlb_type == cheetah || tlb_type == cheetah_plus)
237			regs->tpc += 4;
238
239		regs->tnpc = regs->tpc + 4;
240		return;
241	}
242#endif
243	do_iae(regs);
244}
245
246static char ecc_syndrome_table[] = {
247	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
248	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
249	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
250	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
251	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
252	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
253	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
254	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
255	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
256	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
257	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
258	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
259	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
260	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
261	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
262	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
263	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
264	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
265	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
266	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
267	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
268	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
269	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
270	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
271	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
272	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
273	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
274	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
275	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
276	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
277	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
278	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
279};
280
281/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
282 * in the following format.  The AFAR is left as is, with
283 * reserved bits cleared, and is a raw 40-bit physical
284 * address.
285 */
286#define CE_STATUS_UDBH_UE		(1UL << (43 + 9))
287#define CE_STATUS_UDBH_CE		(1UL << (43 + 8))
288#define CE_STATUS_UDBH_ESYNDR		(0xffUL << 43)
289#define CE_STATUS_UDBH_SHIFT		43
290#define CE_STATUS_UDBL_UE		(1UL << (33 + 9))
291#define CE_STATUS_UDBL_CE		(1UL << (33 + 8))
292#define CE_STATUS_UDBL_ESYNDR		(0xffUL << 33)
293#define CE_STATUS_UDBL_SHIFT		33
294#define CE_STATUS_AFSR_MASK		(0x1ffffffffUL)
295#define CE_STATUS_AFSR_ME		(1UL << 32)
296#define CE_STATUS_AFSR_PRIV		(1UL << 31)
297#define CE_STATUS_AFSR_ISAP		(1UL << 30)
298#define CE_STATUS_AFSR_ETP		(1UL << 29)
299#define CE_STATUS_AFSR_IVUE		(1UL << 28)
300#define CE_STATUS_AFSR_TO		(1UL << 27)
301#define CE_STATUS_AFSR_BERR		(1UL << 26)
302#define CE_STATUS_AFSR_LDP		(1UL << 25)
303#define CE_STATUS_AFSR_CP		(1UL << 24)
304#define CE_STATUS_AFSR_WP		(1UL << 23)
305#define CE_STATUS_AFSR_EDP		(1UL << 22)
306#define CE_STATUS_AFSR_UE		(1UL << 21)
307#define CE_STATUS_AFSR_CE		(1UL << 20)
308#define CE_STATUS_AFSR_ETS		(0xfUL << 16)
309#define CE_STATUS_AFSR_ETS_SHIFT	16
310#define CE_STATUS_AFSR_PSYND		(0xffffUL << 0)
311#define CE_STATUS_AFSR_PSYND_SHIFT	0
312
313/* Layout of Ecache TAG Parity Syndrome of AFSR */
314#define AFSR_ETSYNDROME_7_0		0x1UL /* E$-tag bus bits  <7:0> */
315#define AFSR_ETSYNDROME_15_8		0x2UL /* E$-tag bus bits <15:8> */
316#define AFSR_ETSYNDROME_21_16		0x4UL /* E$-tag bus bits <21:16> */
317#define AFSR_ETSYNDROME_24_22		0x8UL /* E$-tag bus bits <24:22> */
318
319static char *syndrome_unknown = "<Unknown>";
320
321asmlinkage void cee_log(unsigned long ce_status,
322			unsigned long afar,
323			struct pt_regs *regs)
324{
325	char memmod_str[64];
326	char *p;
327	unsigned short scode, udb_reg;
328
329	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
330	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
331	       smp_processor_id(),
332	       (ce_status & CE_STATUS_AFSR_MASK),
333	       afar,
334	       ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
335	       ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
336
337	udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
338	if (udb_reg & (1 << 8)) {
339		scode = ecc_syndrome_table[udb_reg & 0xff];
340		if (prom_getunumber(scode, afar,
341				    memmod_str, sizeof(memmod_str)) == -1)
342			p = syndrome_unknown;
343		else
344			p = memmod_str;
345		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
346		       "Memory Module \"%s\"\n",
347		       smp_processor_id(), scode, p);
348	}
349
350	udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
351	if (udb_reg & (1 << 8)) {
352		scode = ecc_syndrome_table[udb_reg & 0xff];
353		if (prom_getunumber(scode, afar,
354				    memmod_str, sizeof(memmod_str)) == -1)
355			p = syndrome_unknown;
356		else
357			p = memmod_str;
358		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
359		       "Memory Module \"%s\"\n",
360		       smp_processor_id(), scode, p);
361	}
362}
363
364/* Cheetah error trap handling. */
365static unsigned long ecache_flush_physbase;
366static unsigned long ecache_flush_linesize;
367static unsigned long ecache_flush_size;
368
369/* WARNING: The error trap handlers in assembly know the precise
370 *	    layout of the following structure.
371 *
372 * C-level handlers below use this information to log the error
373 * and then determine how to recover (if possible).
374 */
375struct cheetah_err_info {
376/*0x00*/u64 afsr;
377/*0x08*/u64 afar;
378
379	/* D-cache state */
380/*0x10*/u64 dcache_data[4];	/* The actual data	*/
381/*0x30*/u64 dcache_index;	/* D-cache index	*/
382/*0x38*/u64 dcache_tag;		/* D-cache tag/valid	*/
383/*0x40*/u64 dcache_utag;	/* D-cache microtag	*/
384/*0x48*/u64 dcache_stag;	/* D-cache snooptag	*/
385
386	/* I-cache state */
387/*0x50*/u64 icache_data[8];	/* The actual insns + predecode	*/
388/*0x90*/u64 icache_index;	/* I-cache index	*/
389/*0x98*/u64 icache_tag;		/* I-cache phys tag	*/
390/*0xa0*/u64 icache_utag;	/* I-cache microtag	*/
391/*0xa8*/u64 icache_stag;	/* I-cache snooptag	*/
392/*0xb0*/u64 icache_upper;	/* I-cache upper-tag	*/
393/*0xb8*/u64 icache_lower;	/* I-cache lower-tag	*/
394
395	/* E-cache state */
396/*0xc0*/u64 ecache_data[4];	/* 32 bytes from staging registers */
397/*0xe0*/u64 ecache_index;	/* E-cache index	*/
398/*0xe8*/u64 ecache_tag;		/* E-cache tag/state	*/
399
400/*0xf0*/u64 __pad[32 - 30];
401};
402#define CHAFSR_INVALID		((u64)-1L)
403
404/* This is allocated at boot time based upon the largest hardware
405 * cpu ID in the system.  We allocate two entries per cpu, one for
406 * TL==0 logging and one for TL >= 1 logging.
407 */
408struct cheetah_err_info *cheetah_error_log;
409
410static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
411{
412	struct cheetah_err_info *p;
413	int cpu = smp_processor_id();
414
415	if (!cheetah_error_log)
416		return NULL;
417
418	p = cheetah_error_log + (cpu * 2);
419	if ((afsr & CHAFSR_TL1) != 0UL)
420		p++;
421
422	return p;
423}
424
425extern unsigned int tl0_icpe[], tl1_icpe[];
426extern unsigned int tl0_dcpe[], tl1_dcpe[];
427extern unsigned int tl0_fecc[], tl1_fecc[];
428extern unsigned int tl0_cee[], tl1_cee[];
429extern unsigned int tl0_iae[], tl1_iae[];
430extern unsigned int tl0_dae[], tl1_dae[];
431extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
432extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
433extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
434extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
435extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
436
437void cheetah_ecache_flush_init(void)
438{
439	unsigned long largest_size, smallest_linesize, order;
440	char type[16];
441	int node, highest_cpu, i;
442
443	/* Scan all cpu device tree nodes, note two values:
444	 * 1) largest E-cache size
445	 * 2) smallest E-cache line size
446	 */
447	largest_size = 0UL;
448	smallest_linesize = ~0UL;
449	node = prom_getchild(prom_root_node);
450	while ((node = prom_getsibling(node)) != 0) {
451		prom_getstring(node, "device_type", type, sizeof(type));
452		if (!strcmp(type, "cpu")) {
453			unsigned long val;
454
455			val = prom_getintdefault(node, "ecache-size",
456						 (2 * 1024 * 1024));
457			if (val > largest_size)
458				largest_size = val;
459			val = prom_getintdefault(node, "ecache-line-size", 64);
460			if (val < smallest_linesize)
461				smallest_linesize = val;
462		}
463	}
464	if (largest_size == 0UL || smallest_linesize == ~0UL) {
465		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
466			    "parameters.\n");
467		prom_halt();
468	}
469
470	ecache_flush_size = (2 * largest_size);
471	ecache_flush_linesize = smallest_linesize;
472
473	/* Discover a physically contiguous chunk of physical
474	 * memory in 'sp_banks' of size ecache_flush_size calculated
475	 * above.  Store the physical base of this area at
476	 * ecache_flush_physbase.
477	 */
478	for (node = 0; ; node++) {
479		if (sp_banks[node].num_bytes == 0)
480			break;
481		if (sp_banks[node].num_bytes >= ecache_flush_size) {
482			ecache_flush_physbase = sp_banks[node].base_addr;
483			break;
484		}
485	}
486
487	/* Note: Zero would be a valid value of ecache_flush_physbase so
488	 * don't use that as the success test. :-)
489	 */
490	if (sp_banks[node].num_bytes == 0) {
491		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
492			    "contiguous physical memory.\n", ecache_flush_size);
493		prom_halt();
494	}
495
496	/* Now allocate error trap reporting scoreboard. */
497	highest_cpu = 0;
498#ifdef CONFIG_SMP
499	for (i = 0; i < NR_CPUS; i++) {
500		if ((1UL << i) & cpu_present_map)
501			highest_cpu = i;
502	}
503#endif
504	highest_cpu++;
505	node = highest_cpu * (2 * sizeof(struct cheetah_err_info));
506	for (order = 0; order < MAX_ORDER; order++) {
507		if ((PAGE_SIZE << order) >= node)
508			break;
509	}
510	cheetah_error_log = (struct cheetah_err_info *)
511		__get_free_pages(GFP_KERNEL, order);
512	if (!cheetah_error_log) {
513		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
514			    "error logging scoreboard (%d bytes).\n", node);
515		prom_halt();
516	}
517	memset(cheetah_error_log, 0, PAGE_SIZE << order);
518
519	/* Mark all AFSRs as invalid so that the trap handler will
520	 * log new new information there.
521	 */
522	for (i = 0; i < 2 * highest_cpu; i++)
523		cheetah_error_log[i].afsr = CHAFSR_INVALID;
524
525	/* Now patch trap tables. */
526	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
527	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
528	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
529	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
530	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
531	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
532	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
533	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
534	if (tlb_type == cheetah_plus) {
535		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
536		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
537		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
538		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
539	}
540	flushi(PAGE_OFFSET);
541}
542
543static void cheetah_flush_ecache(void)
544{
545	unsigned long flush_base = ecache_flush_physbase;
546	unsigned long flush_linesize = ecache_flush_linesize;
547	unsigned long flush_size = ecache_flush_size;
548
549	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
550			     "   bne,pt	%%xcc, 1b\n\t"
551			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
552			     : "=&r" (flush_size)
553			     : "0" (flush_size), "r" (flush_base),
554			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
555}
556
557static void cheetah_flush_ecache_line(unsigned long physaddr)
558{
559	unsigned long alias;
560
561	physaddr &= ~(8UL - 1UL);
562	physaddr = (ecache_flush_physbase +
563		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
564	alias = physaddr + (ecache_flush_size >> 1UL);
565	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
566			     "ldxa [%1] %2, %%g0\n\t"
567			     "membar #Sync"
568			     : /* no outputs */
569			     : "r" (physaddr), "r" (alias),
570			       "i" (ASI_PHYS_USE_EC));
571}
572
573/* Unfortunately, the diagnostic access to the I-cache tags we need to
574 * use to clear the thing interferes with I-cache coherency transactions.
575 *
576 * So we must only flush the I-cache when it is disabled.
577 */
578static void __cheetah_flush_icache(void)
579{
580	unsigned long i;
581
582	/* Clear the valid bits in all the tags. */
583	for (i = 0; i < (1 << 15); i += (1 << 5)) {
584		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
585				     "membar #Sync"
586				     : /* no outputs */
587				     : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
588	}
589}
590
591static void cheetah_flush_icache(void)
592{
593	unsigned long dcu_save;
594
595	/* Save current DCU, disable I-cache. */
596	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
597			     "or %0, %2, %%g1\n\t"
598			     "stxa %%g1, [%%g0] %1\n\t"
599			     "membar #Sync"
600			     : "=r" (dcu_save)
601			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
602			     : "g1");
603
604	__cheetah_flush_icache();
605
606	/* Restore DCU register */
607	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
608			     "membar #Sync"
609			     : /* no outputs */
610			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
611}
612
613static void cheetah_flush_dcache(void)
614{
615	unsigned long i;
616
617	for (i = 0; i < (1 << 16); i += (1 << 5)) {
618		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
619				     "membar #Sync"
620				     : /* no outputs */
621				     : "r" (i), "i" (ASI_DCACHE_TAG));
622	}
623}
624
625/* In order to make the even parity correct we must do two things.
626 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
627 * Next, we clear out all 32-bytes of data for that line.  Data of
628 * all-zero + tag parity value of zero == correct parity.
629 */
630static void cheetah_plus_zap_dcache_parity(void)
631{
632	unsigned long i;
633
634	for (i = 0; i < (1 << 16); i += (1 << 5)) {
635		unsigned long tag = (i >> 14);
636		unsigned long j;
637
638		__asm__ __volatile__("membar	#Sync\n\t"
639				     "stxa	%0, [%1] %2\n\t"
640				     "membar	#Sync"
641				     : /* no outputs */
642				     : "r" (tag), "r" (i),
643				       "i" (ASI_DCACHE_UTAG));
644		for (j = i; j < i + (1 << 5); j += (1 << 3))
645			__asm__ __volatile__("membar	#Sync\n\t"
646					     "stxa	%%g0, [%0] %1\n\t"
647					     "membar	#Sync"
648					     : /* no outputs */
649					     : "r" (j), "i" (ASI_DCACHE_DATA));
650	}
651}
652
653/* Conversion tables used to frob Cheetah AFSR syndrome values into
654 * something palatable to the memory controller driver get_unumber
655 * routine.
656 */
657#define MT0	137
658#define MT1	138
659#define MT2	139
660#define NONE	254
661#define MTC0	140
662#define MTC1	141
663#define MTC2	142
664#define MTC3	143
665#define C0	128
666#define C1	129
667#define C2	130
668#define C3	131
669#define C4	132
670#define C5	133
671#define C6	134
672#define C7	135
673#define C8	136
674#define M2	144
675#define M3	145
676#define M4	146
677#define M	147
678static unsigned char cheetah_ecc_syntab[] = {
679/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
680/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
681/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
682/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
683/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
684/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
685/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
686/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
687/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
688/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
689/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
690/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
691/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
692/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
693/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
694/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
695/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
696/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
697/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
698/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
699/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
700/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
701/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
702/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
703/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
704/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
705/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
706/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
707/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
708/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
709/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
710/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
711};
712static unsigned char cheetah_mtag_syntab[] = {
713       NONE, MTC0,
714       MTC1, NONE,
715       MTC2, NONE,
716       NONE, MT0,
717       MTC3, NONE,
718       NONE, MT1,
719       NONE, MT2,
720       NONE, NONE
721};
722
723/* This table is ordered in priority of errors and matches the
724 * AFAR overwrite policy as well.
725 */
726static struct {
727	unsigned long mask;
728	char *name;
729} cheetah_error_table[] = {
730	{	CHAFSR_PERR,	"System interface protocol error"			},
731	{	CHAFSR_IERR,	"Internal processor error"				},
732	{	CHAFSR_ISAP,	"System request parity error on incoming addresss"	},
733	{	CHAFSR_UCU,	"Uncorrectable E-cache ECC error for ifetch/data"	},
734	{	CHAFSR_UCC,	"SW Correctable E-cache ECC error for ifetch/data"	},
735	{	CHAFSR_UE,	"Uncorrectable system bus data ECC error for read"	},
736	{	CHAFSR_EDU,	"Uncorrectable E-cache ECC error for stmerge/blkld"	},
737	{	CHAFSR_EMU,	"Uncorrectable system bus MTAG error"			},
738	{	CHAFSR_WDU,	"Uncorrectable E-cache ECC error for writeback"		},
739	{	CHAFSR_CPU,	"Uncorrectable ECC error for copyout"			},
740	{	CHAFSR_CE,	"HW corrected system bus data ECC error for read"	},
741	{	CHAFSR_EDC,	"HW corrected E-cache ECC error for stmerge/blkld"	},
742	{	CHAFSR_EMC,	"HW corrected system bus MTAG ECC error"		},
743	{	CHAFSR_WDC,	"HW corrected E-cache ECC error for writeback"		},
744	{	CHAFSR_CPC,	"HW corrected ECC error for copyout"			},
745	{	CHAFSR_TO,	"Unmapped error from system bus"			},
746	{	CHAFSR_BERR,	"Bus error response from system bus"			},
747	/* These two do not update the AFAR. */
748	{	CHAFSR_IVC,	"HW corrected system bus data ECC error for ivec read"	},
749	{	CHAFSR_IVU,	"Uncorrectable system bus data ECC error for ivec read"	},
750	{	0,		NULL							}
751};
752
753/* Return the highest priority error conditon mentioned. */
754static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
755{
756	unsigned long tmp = 0;
757	int i;
758
759	for (i = 0; cheetah_error_table[i].mask; i++) {
760		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
761			return tmp;
762	}
763	return tmp;
764}
765
766static char *cheetah_get_string(unsigned long bit)
767{
768	int i;
769
770	for (i = 0; cheetah_error_table[i].mask; i++) {
771		if ((bit & cheetah_error_table[i].mask) != 0UL)
772			return cheetah_error_table[i].name;
773	}
774	return "???";
775}
776
777extern int chmc_getunumber(int, unsigned long, char *, int);
778
779static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
780			       unsigned long afsr, unsigned long afar, int recoverable)
781{
782	unsigned long hipri;
783	char unum[256];
784
785	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
786	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
787	       afsr, afar,
788	       (afsr & CHAFSR_TL1) ? 1 : 0);
789	printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
790	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
791	       regs->tpc, regs->tnpc, regs->tstate);
792	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
793	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
794	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
795	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
796	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
797	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
798	hipri = cheetah_get_hipri(afsr);
799	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
800	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
801	       hipri, cheetah_get_string(hipri));
802
803	/* Try to get unumber if relevant. */
804#define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
805			 CHAFSR_CPC | CHAFSR_CPU | \
806			 CHAFSR_UE  | CHAFSR_CE  | \
807			 CHAFSR_EDC | CHAFSR_EDU  | \
808			 CHAFSR_UCC | CHAFSR_UCU  | \
809			 CHAFSR_WDU | CHAFSR_WDC)
810#define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
811	if (afsr & ESYND_ERRORS) {
812		int syndrome;
813		int ret;
814
815		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
816		syndrome = cheetah_ecc_syntab[syndrome];
817		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
818		if (ret != -1)
819			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
820			       (recoverable ? KERN_WARNING : KERN_CRIT),
821			       smp_processor_id(), unum);
822	} else if (afsr & MSYND_ERRORS) {
823		int syndrome;
824		int ret;
825
826		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
827		syndrome = cheetah_mtag_syntab[syndrome];
828		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
829		if (ret != -1)
830			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
831			       (recoverable ? KERN_WARNING : KERN_CRIT),
832			       smp_processor_id(), unum);
833	}
834
835	/* Now dump the cache snapshots. */
836	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
837	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
838	       (int) info->dcache_index,
839	       info->dcache_tag,
840	       info->dcache_utag,
841	       info->dcache_stag);
842	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
843	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
844	       info->dcache_data[0],
845	       info->dcache_data[1],
846	       info->dcache_data[2],
847	       info->dcache_data[3]);
848	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
849	       "u[%016lx] l[%016lx]\n",
850	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
851	       (int) info->icache_index,
852	       info->icache_tag,
853	       info->icache_utag,
854	       info->icache_stag,
855	       info->icache_upper,
856	       info->icache_lower);
857	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
858	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
859	       info->icache_data[0],
860	       info->icache_data[1],
861	       info->icache_data[2],
862	       info->icache_data[3]);
863	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
864	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
865	       info->icache_data[4],
866	       info->icache_data[5],
867	       info->icache_data[6],
868	       info->icache_data[7]);
869	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
870	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
871	       (int) info->ecache_index, info->ecache_tag);
872	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
873	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
874	       info->ecache_data[0],
875	       info->ecache_data[1],
876	       info->ecache_data[2],
877	       info->ecache_data[3]);
878
879	afsr = (afsr & ~hipri) & CHAFSR_ERRORS;
880	while (afsr != 0UL) {
881		unsigned long bit = cheetah_get_hipri(afsr);
882
883		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
884		       (recoverable ? KERN_WARNING : KERN_CRIT),
885		       bit, cheetah_get_string(bit));
886
887		afsr &= ~bit;
888	}
889
890	if (!recoverable)
891		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
892}
893
894static int cheetah_recheck_errors(struct cheetah_err_info *logp)
895{
896	unsigned long afsr, afar;
897	int ret = 0;
898
899	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
900			     : "=r" (afsr)
901			     : "i" (ASI_AFSR));
902	if ((afsr & CHAFSR_ERRORS) != 0) {
903		if (logp != NULL) {
904			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
905					     : "=r" (afar)
906					     : "i" (ASI_AFAR));
907			logp->afsr = afsr;
908			logp->afar = afar;
909		}
910		ret = 1;
911	}
912	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
913			     "membar #Sync\n\t"
914			     : : "r" (afsr), "i" (ASI_AFSR));
915
916	return ret;
917}
918
919void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
920{
921	struct cheetah_err_info local_snapshot, *p;
922	int recoverable;
923
924	/* Flush E-cache */
925	cheetah_flush_ecache();
926
927	p = cheetah_get_error_log(afsr);
928	if (!p) {
929		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
930			    afsr, afar);
931		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
932			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
933		prom_halt();
934	}
935
936	/* Grab snapshot of logged error. */
937	memcpy(&local_snapshot, p, sizeof(local_snapshot));
938
939	/* If the current trap snapshot does not match what the
940	 * trap handler passed along into our args, big trouble.
941	 * In such a case, mark the local copy as invalid.
942	 *
943	 * Else, it matches and we mark the afsr in the non-local
944	 * copy as invalid so we may log new error traps there.
945	 */
946	if (p->afsr != afsr || p->afar != afar)
947		local_snapshot.afsr = CHAFSR_INVALID;
948	else
949		p->afsr = CHAFSR_INVALID;
950
951	cheetah_flush_icache();
952	cheetah_flush_dcache();
953
954	/* Re-enable I-cache/D-cache */
955	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
956			     "or %%g1, %1, %%g1\n\t"
957			     "stxa %%g1, [%%g0] %0\n\t"
958			     "membar #Sync"
959			     : /* no outputs */
960			     : "i" (ASI_DCU_CONTROL_REG),
961			       "i" (DCU_DC | DCU_IC)
962			     : "g1");
963
964	/* Re-enable error reporting */
965	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
966			     "or %%g1, %1, %%g1\n\t"
967			     "stxa %%g1, [%%g0] %0\n\t"
968			     "membar #Sync"
969			     : /* no outputs */
970			     : "i" (ASI_ESTATE_ERROR_EN),
971			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
972			     : "g1");
973
974	/* Decide if we can continue after handling this trap and
975	 * logging the error.
976	 */
977	recoverable = 1;
978	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
979		recoverable = 0;
980
981	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
982	 * error was logged while we had error reporting traps disabled.
983	 */
984	if (cheetah_recheck_errors(&local_snapshot)) {
985		unsigned long new_afsr = local_snapshot.afsr;
986
987		/* If we got a new asynchronous error, die... */
988		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
989				CHAFSR_WDU | CHAFSR_CPU |
990				CHAFSR_IVU | CHAFSR_UE |
991				CHAFSR_BERR | CHAFSR_TO))
992			recoverable = 0;
993	}
994
995	/* Log errors. */
996	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
997
998	if (!recoverable)
999		panic("Irrecoverable Fast-ECC error trap.\n");
1000
1001	/* Flush E-cache to kick the error trap handlers out. */
1002	cheetah_flush_ecache();
1003}
1004
1005/* Try to fix a correctable error by pushing the line out from
1006 * the E-cache.  Recheck error reporting registers to see if the
1007 * problem is intermittent.
1008 */
1009static int cheetah_fix_ce(unsigned long physaddr)
1010{
1011	unsigned long orig_estate;
1012	unsigned long alias1, alias2;
1013	int ret;
1014
1015	/* Make sure correctable error traps are disabled. */
1016	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1017			     "andn	%0, %1, %%g1\n\t"
1018			     "stxa	%%g1, [%%g0] %2\n\t"
1019			     "membar	#Sync"
1020			     : "=&r" (orig_estate)
1021			     : "i" (ESTATE_ERROR_CEEN),
1022			       "i" (ASI_ESTATE_ERROR_EN)
1023			     : "g1");
1024
1025	/* We calculate alias addresses that will force the
1026	 * cache line in question out of the E-cache.  Then
1027	 * we bring it back in with an atomic instruction so
1028	 * that we get it in some modified/exclusive state,
1029	 * then we displace it again to try and get proper ECC
1030	 * pushed back into the system.
1031	 */
1032	physaddr &= ~(8UL - 1UL);
1033	alias1 = (ecache_flush_physbase +
1034		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1035	alias2 = alias1 + (ecache_flush_size >> 1);
1036	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1037			     "ldxa	[%1] %3, %%g0\n\t"
1038			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1039			     "membar	#StoreLoad | #StoreStore\n\t"
1040			     "ldxa	[%0] %3, %%g0\n\t"
1041			     "ldxa	[%1] %3, %%g0\n\t"
1042			     "membar	#Sync"
1043			     : /* no outputs */
1044			     : "r" (alias1), "r" (alias2),
1045			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1046
1047	/* Did that trigger another error? */
1048	if (cheetah_recheck_errors(NULL)) {
1049		/* Try one more time. */
1050		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1051				     "membar #Sync"
1052				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1053		if (cheetah_recheck_errors(NULL))
1054			ret = 2;
1055		else
1056			ret = 1;
1057	} else {
1058		/* No new error, intermittent problem. */
1059		ret = 0;
1060	}
1061
1062	/* Restore error enables. */
1063	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1064			     "membar	#Sync"
1065			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1066
1067	return ret;
1068}
1069
1070/* Return non-zero if PADDR is a valid physical memory address. */
1071static int cheetah_check_main_memory(unsigned long paddr)
1072{
1073	int i;
1074
1075	for (i = 0; ; i++) {
1076		if (sp_banks[i].num_bytes == 0)
1077			break;
1078		if (paddr >= sp_banks[i].base_addr &&
1079		    paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1080			return 1;
1081	}
1082	return 0;
1083}
1084
1085void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1086{
1087	struct cheetah_err_info local_snapshot, *p;
1088	int recoverable, is_memory;
1089
1090	p = cheetah_get_error_log(afsr);
1091	if (!p) {
1092		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1093			    afsr, afar);
1094		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1095			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1096		prom_halt();
1097	}
1098
1099	/* Grab snapshot of logged error. */
1100	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1101
1102	/* If the current trap snapshot does not match what the
1103	 * trap handler passed along into our args, big trouble.
1104	 * In such a case, mark the local copy as invalid.
1105	 *
1106	 * Else, it matches and we mark the afsr in the non-local
1107	 * copy as invalid so we may log new error traps there.
1108	 */
1109	if (p->afsr != afsr || p->afar != afar)
1110		local_snapshot.afsr = CHAFSR_INVALID;
1111	else
1112		p->afsr = CHAFSR_INVALID;
1113
1114	is_memory = cheetah_check_main_memory(afar);
1115
1116	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1117		cheetah_fix_ce(afar);
1118	}
1119
1120	{
1121		int flush_all, flush_line;
1122
1123		flush_all = flush_line = 0;
1124		if ((afsr & CHAFSR_EDC) != 0UL) {
1125			if ((afsr & CHAFSR_ERRORS) == CHAFSR_EDC)
1126				flush_line = 1;
1127			else
1128				flush_all = 1;
1129		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1130			if ((afsr & CHAFSR_ERRORS) == CHAFSR_CPC)
1131				flush_line = 1;
1132			else
1133				flush_all = 1;
1134		}
1135
1136		/* Trap handler only disabled I-cache, flush it. */
1137		cheetah_flush_icache();
1138
1139		/* Re-enable I-cache */
1140		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1141				     "or %%g1, %1, %%g1\n\t"
1142				     "stxa %%g1, [%%g0] %0\n\t"
1143				     "membar #Sync"
1144				     : /* no outputs */
1145				     : "i" (ASI_DCU_CONTROL_REG),
1146				     "i" (DCU_IC)
1147				     : "g1");
1148
1149		if (flush_all)
1150			cheetah_flush_ecache();
1151		else if (flush_line)
1152			cheetah_flush_ecache_line(afar);
1153	}
1154
1155	/* Re-enable error reporting */
1156	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1157			     "or %%g1, %1, %%g1\n\t"
1158			     "stxa %%g1, [%%g0] %0\n\t"
1159			     "membar #Sync"
1160			     : /* no outputs */
1161			     : "i" (ASI_ESTATE_ERROR_EN),
1162			       "i" (ESTATE_ERROR_CEEN)
1163			     : "g1");
1164
1165	/* Decide if we can continue after handling this trap and
1166	 * logging the error.
1167	 */
1168	recoverable = 1;
1169	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1170		recoverable = 0;
1171
1172	/* Re-check AFSR/AFAR */
1173	(void) cheetah_recheck_errors(&local_snapshot);
1174
1175	/* Log errors. */
1176	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1177
1178	if (!recoverable)
1179		panic("Irrecoverable Correctable-ECC error trap.\n");
1180}
1181
1182void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1183{
1184	struct cheetah_err_info local_snapshot, *p;
1185	int recoverable, is_memory;
1186
1187#ifdef CONFIG_PCI
1188	/* Check for the special PCI poke sequence. */
1189	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1190		cheetah_flush_icache();
1191		cheetah_flush_dcache();
1192
1193		/* Re-enable I-cache/D-cache */
1194		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1195				     "or %%g1, %1, %%g1\n\t"
1196				     "stxa %%g1, [%%g0] %0\n\t"
1197				     "membar #Sync"
1198				     : /* no outputs */
1199				     : "i" (ASI_DCU_CONTROL_REG),
1200				       "i" (DCU_DC | DCU_IC)
1201				     : "g1");
1202
1203		/* Re-enable error reporting */
1204		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1205				     "or %%g1, %1, %%g1\n\t"
1206				     "stxa %%g1, [%%g0] %0\n\t"
1207				     "membar #Sync"
1208				     : /* no outputs */
1209				     : "i" (ASI_ESTATE_ERROR_EN),
1210				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1211				     : "g1");
1212
1213		(void) cheetah_recheck_errors(NULL);
1214
1215		pci_poke_faulted = 1;
1216		regs->tpc += 4;
1217		regs->tnpc = regs->tpc + 4;
1218		return;
1219	}
1220#endif
1221
1222	p = cheetah_get_error_log(afsr);
1223	if (!p) {
1224		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1225			    afsr, afar);
1226		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1227			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1228		prom_halt();
1229	}
1230
1231	/* Grab snapshot of logged error. */
1232	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1233
1234	/* If the current trap snapshot does not match what the
1235	 * trap handler passed along into our args, big trouble.
1236	 * In such a case, mark the local copy as invalid.
1237	 *
1238	 * Else, it matches and we mark the afsr in the non-local
1239	 * copy as invalid so we may log new error traps there.
1240	 */
1241	if (p->afsr != afsr || p->afar != afar)
1242		local_snapshot.afsr = CHAFSR_INVALID;
1243	else
1244		p->afsr = CHAFSR_INVALID;
1245
1246	is_memory = cheetah_check_main_memory(afar);
1247
1248	{
1249		int flush_all, flush_line;
1250
1251		flush_all = flush_line = 0;
1252		if ((afsr & CHAFSR_EDU) != 0UL) {
1253			if ((afsr & CHAFSR_ERRORS) == CHAFSR_EDU)
1254				flush_line = 1;
1255			else
1256				flush_all = 1;
1257		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1258			if ((afsr & CHAFSR_ERRORS) == CHAFSR_BERR)
1259				flush_line = 1;
1260			else
1261				flush_all = 1;
1262		}
1263
1264		cheetah_flush_icache();
1265		cheetah_flush_dcache();
1266
1267		/* Re-enable I/D caches */
1268		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1269				     "or %%g1, %1, %%g1\n\t"
1270				     "stxa %%g1, [%%g0] %0\n\t"
1271				     "membar #Sync"
1272				     : /* no outputs */
1273				     : "i" (ASI_DCU_CONTROL_REG),
1274				     "i" (DCU_IC | DCU_DC)
1275				     : "g1");
1276
1277		if (flush_all)
1278			cheetah_flush_ecache();
1279		else if (flush_line)
1280			cheetah_flush_ecache_line(afar);
1281	}
1282
1283	/* Re-enable error reporting */
1284	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1285			     "or %%g1, %1, %%g1\n\t"
1286			     "stxa %%g1, [%%g0] %0\n\t"
1287			     "membar #Sync"
1288			     : /* no outputs */
1289			     : "i" (ASI_ESTATE_ERROR_EN),
1290			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1291			     : "g1");
1292
1293	/* Decide if we can continue after handling this trap and
1294	 * logging the error.
1295	 */
1296	recoverable = 1;
1297	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1298		recoverable = 0;
1299
1300	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1301	 * error was logged while we had error reporting traps disabled.
1302	 */
1303	if (cheetah_recheck_errors(&local_snapshot)) {
1304		unsigned long new_afsr = local_snapshot.afsr;
1305
1306		/* If we got a new asynchronous error, die... */
1307		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1308				CHAFSR_WDU | CHAFSR_CPU |
1309				CHAFSR_IVU | CHAFSR_UE |
1310				CHAFSR_BERR | CHAFSR_TO))
1311			recoverable = 0;
1312	}
1313
1314	/* Log errors. */
1315	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1316
1317	/* "Recoverable" here means we try to yank the page from ever
1318	 * being newly used again.  This depends upon a few things:
1319	 * 1) Must be main memory, and AFAR must be valid.
1320	 * 2) If we trapped from use, OK.
1321	 * 3) Else, if we trapped from kernel we must find exception
1322	 *    table entry (ie. we have to have been accessing user
1323	 *    space).
1324	 *
1325	 * If AFAR is not in main memory, or we trapped from kernel
1326	 * and cannot find an exception table entry, it is unacceptable
1327	 * to try and continue.
1328	 */
1329	if (recoverable && is_memory) {
1330		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1331			/* OK, usermode access. */
1332			recoverable = 1;
1333		} else {
1334			unsigned long g2 = regs->u_regs[UREG_G2];
1335			unsigned long fixup = search_exception_table(regs->tpc, &g2);
1336
1337			if (fixup != 0UL) {
1338				/* OK, kernel access to userspace. */
1339				recoverable = 1;
1340
1341			} else {
1342				/* BAD, privileged state is corrupted. */
1343				recoverable = 0;
1344			}
1345
1346			if (recoverable) {
1347				struct page *page = virt_to_page(__va(afar));
1348
1349				if (VALID_PAGE(page))
1350					get_page(page);
1351				else
1352					recoverable = 0;
1353
1354				/* Only perform fixup if we still have a
1355				 * recoverable condition.
1356				 */
1357				if (fixup != 0UL && recoverable) {
1358					regs->tpc = fixup;
1359					regs->tnpc = regs->tpc + 4;
1360					regs->u_regs[UREG_G2] = g2;
1361				}
1362			}
1363		}
1364	} else {
1365		recoverable = 0;
1366	}
1367
1368	if (!recoverable)
1369		panic("Irrecoverable deferred error trap.\n");
1370}
1371
1372/* Handle a D/I cache parity error trap.  TYPE is encoded as:
1373 *
1374 * Bit0:	0=dcache,1=icache
1375 * Bit1:	0=recoverable,1=unrecoverable
1376 *
1377 * The hardware has disabled both the I-cache and D-cache in
1378 * the %dcr register.
1379 */
1380void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1381{
1382	if (type & 0x1)
1383		__cheetah_flush_icache();
1384	else
1385		cheetah_plus_zap_dcache_parity();
1386	cheetah_flush_dcache();
1387
1388	/* Re-enable I-cache/D-cache */
1389	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1390			     "or %%g1, %1, %%g1\n\t"
1391			     "stxa %%g1, [%%g0] %0\n\t"
1392			     "membar #Sync"
1393			     : /* no outputs */
1394			     : "i" (ASI_DCU_CONTROL_REG),
1395			       "i" (DCU_DC | DCU_IC)
1396			     : "g1");
1397
1398	if (type & 0x2) {
1399		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1400		       smp_processor_id(),
1401		       (type & 0x1) ? 'I' : 'D',
1402		       regs->tpc);
1403		panic("Irrecoverable Cheetah+ parity error.");
1404	}
1405
1406	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1407	       smp_processor_id(),
1408	       (type & 0x1) ? 'I' : 'D',
1409	       regs->tpc);
1410}
1411
1412void do_fpe_common(struct pt_regs *regs)
1413{
1414	if(regs->tstate & TSTATE_PRIV) {
1415		regs->tpc = regs->tnpc;
1416		regs->tnpc += 4;
1417	} else {
1418		unsigned long fsr = current->thread.xfsr[0];
1419		siginfo_t info;
1420
1421		if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1422			regs->tpc &= 0xffffffff;
1423			regs->tnpc &= 0xffffffff;
1424		}
1425		info.si_signo = SIGFPE;
1426		info.si_errno = 0;
1427		info.si_addr = (void *)regs->tpc;
1428		info.si_trapno = 0;
1429		info.si_code = __SI_FAULT;
1430		if ((fsr & 0x1c000) == (1 << 14)) {
1431			if (fsr & 0x10)
1432				info.si_code = FPE_FLTINV;
1433			else if (fsr & 0x08)
1434				info.si_code = FPE_FLTOVF;
1435			else if (fsr & 0x04)
1436				info.si_code = FPE_FLTUND;
1437			else if (fsr & 0x02)
1438				info.si_code = FPE_FLTDIV;
1439			else if (fsr & 0x01)
1440				info.si_code = FPE_FLTRES;
1441		}
1442		force_sig_info(SIGFPE, &info, current);
1443	}
1444}
1445
1446void do_fpieee(struct pt_regs *regs)
1447{
1448	do_fpe_common(regs);
1449}
1450
1451extern int do_mathemu(struct pt_regs *, struct fpustate *);
1452
1453void do_fpother(struct pt_regs *regs)
1454{
1455	struct fpustate *f = FPUSTATE;
1456	int ret = 0;
1457
1458	switch ((current->thread.xfsr[0] & 0x1c000)) {
1459	case (2 << 14): /* unfinished_FPop */
1460	case (3 << 14): /* unimplemented_FPop */
1461		ret = do_mathemu(regs, f);
1462		break;
1463	}
1464	if (ret)
1465		return;
1466	do_fpe_common(regs);
1467}
1468
1469void do_tof(struct pt_regs *regs)
1470{
1471	siginfo_t info;
1472
1473	if(regs->tstate & TSTATE_PRIV)
1474		die_if_kernel("Penguin overflow trap from kernel mode", regs);
1475	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1476		regs->tpc &= 0xffffffff;
1477		regs->tnpc &= 0xffffffff;
1478	}
1479	info.si_signo = SIGEMT;
1480	info.si_errno = 0;
1481	info.si_code = EMT_TAGOVF;
1482	info.si_addr = (void *)regs->tpc;
1483	info.si_trapno = 0;
1484	force_sig_info(SIGEMT, &info, current);
1485}
1486
1487void do_div0(struct pt_regs *regs)
1488{
1489	siginfo_t info;
1490
1491	if (regs->tstate & TSTATE_PRIV)
1492		die_if_kernel("TL0: Kernel divide by zero.", regs);
1493	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1494		regs->tpc &= 0xffffffff;
1495		regs->tnpc &= 0xffffffff;
1496	}
1497	info.si_signo = SIGFPE;
1498	info.si_errno = 0;
1499	info.si_code = FPE_INTDIV;
1500	info.si_addr = (void *)regs->tpc;
1501	info.si_trapno = 0;
1502	force_sig_info(SIGFPE, &info, current);
1503}
1504
1505void instruction_dump (unsigned int *pc)
1506{
1507	int i;
1508
1509	if((((unsigned long) pc) & 3))
1510		return;
1511
1512	printk("Instruction DUMP:");
1513	for(i = -3; i < 6; i++)
1514		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1515	printk("\n");
1516}
1517
1518void user_instruction_dump (unsigned int *pc)
1519{
1520	int i;
1521	unsigned int buf[9];
1522
1523	if((((unsigned long) pc) & 3))
1524		return;
1525
1526	if(copy_from_user(buf, pc - 3, sizeof(buf)))
1527		return;
1528
1529	printk("Instruction DUMP:");
1530	for(i = 0; i < 9; i++)
1531		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1532	printk("\n");
1533}
1534
1535void show_trace_raw(struct task_struct *tsk, unsigned long ksp)
1536{
1537	unsigned long pc, fp;
1538	unsigned long task_base = (unsigned long)tsk;
1539	struct reg_window *rw;
1540	int count = 0;
1541
1542	fp = ksp + STACK_BIAS;
1543	do {
1544		/* Bogus frame pointer? */
1545		if (fp < (task_base + sizeof(struct task_struct)) ||
1546		    fp >= (task_base + THREAD_SIZE))
1547			break;
1548		rw = (struct reg_window *)fp;
1549		pc = rw->ins[7];
1550		printk("[%016lx] ", pc);
1551		fp = rw->ins[6] + STACK_BIAS;
1552	} while (++count < 16);
1553	printk("\n");
1554}
1555
1556void show_trace_task(struct task_struct *tsk)
1557{
1558	if (tsk)
1559		show_trace_raw(tsk, tsk->thread.ksp);
1560}
1561
1562void die_if_kernel(char *str, struct pt_regs *regs)
1563{
1564	extern void __show_regs(struct pt_regs * regs);
1565	extern void smp_report_regs(void);
1566	int count = 0;
1567	struct reg_window *lastrw;
1568
1569	/* Amuse the user. */
1570	printk(
1571"              \\|/ ____ \\|/\n"
1572"              \"@'/ .. \\`@\"\n"
1573"              /_| \\__/ |_\\\n"
1574"                 \\__U_/\n");
1575
1576	printk("%s(%d): %s\n", current->comm, current->pid, str);
1577	__asm__ __volatile__("flushw");
1578	__show_regs(regs);
1579	if(regs->tstate & TSTATE_PRIV) {
1580		struct reg_window *rw = (struct reg_window *)
1581			(regs->u_regs[UREG_FP] + STACK_BIAS);
1582
1583		/* Stop the back trace when we hit userland or we
1584		 * find some badly aligned kernel stack.
1585		 */
1586		lastrw = (struct reg_window *)current;
1587		while(rw					&&
1588		      count++ < 30				&&
1589		      rw >= lastrw				&&
1590		      (char *) rw < ((char *) current)
1591		        + sizeof (union task_union) 		&&
1592		      !(((unsigned long) rw) & 0x7)) {
1593			printk("Caller[%016lx]\n", rw->ins[7]);
1594			lastrw = rw;
1595			rw = (struct reg_window *)
1596				(rw->ins[6] + STACK_BIAS);
1597		}
1598		instruction_dump ((unsigned int *) regs->tpc);
1599	} else {
1600		if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1601			regs->tpc &= 0xffffffff;
1602			regs->tnpc &= 0xffffffff;
1603		}
1604		user_instruction_dump ((unsigned int *) regs->tpc);
1605	}
1606#ifdef CONFIG_SMP
1607	smp_report_regs();
1608#endif
1609
1610	if(regs->tstate & TSTATE_PRIV)
1611		do_exit(SIGKILL);
1612	do_exit(SIGSEGV);
1613}
1614
1615extern int handle_popc(u32 insn, struct pt_regs *regs);
1616extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1617
1618void do_illegal_instruction(struct pt_regs *regs)
1619{
1620	unsigned long pc = regs->tpc;
1621	unsigned long tstate = regs->tstate;
1622	u32 insn;
1623	siginfo_t info;
1624
1625	if(tstate & TSTATE_PRIV)
1626		die_if_kernel("Kernel illegal instruction", regs);
1627	if(current->thread.flags & SPARC_FLAG_32BIT)
1628		pc = (u32)pc;
1629	if (get_user(insn, (u32 *)pc) != -EFAULT) {
1630		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1631			if (handle_popc(insn, regs))
1632				return;
1633		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1634			if (handle_ldf_stq(insn, regs))
1635				return;
1636		}
1637	}
1638	info.si_signo = SIGILL;
1639	info.si_errno = 0;
1640	info.si_code = ILL_ILLOPC;
1641	info.si_addr = (void *)pc;
1642	info.si_trapno = 0;
1643	force_sig_info(SIGILL, &info, current);
1644}
1645
1646void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1647{
1648	siginfo_t info;
1649
1650	if(regs->tstate & TSTATE_PRIV) {
1651		extern void kernel_unaligned_trap(struct pt_regs *regs,
1652						  unsigned int insn,
1653						  unsigned long sfar, unsigned long sfsr);
1654
1655		return kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), sfar, sfsr);
1656	}
1657	info.si_signo = SIGBUS;
1658	info.si_errno = 0;
1659	info.si_code = BUS_ADRALN;
1660	info.si_addr = (void *)sfar;
1661	info.si_trapno = 0;
1662	force_sig_info(SIGBUS, &info, current);
1663}
1664
1665void do_privop(struct pt_regs *regs)
1666{
1667	siginfo_t info;
1668
1669	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1670		regs->tpc &= 0xffffffff;
1671		regs->tnpc &= 0xffffffff;
1672	}
1673	info.si_signo = SIGILL;
1674	info.si_errno = 0;
1675	info.si_code = ILL_PRVOPC;
1676	info.si_addr = (void *)regs->tpc;
1677	info.si_trapno = 0;
1678	force_sig_info(SIGILL, &info, current);
1679}
1680
1681void do_privact(struct pt_regs *regs)
1682{
1683	do_privop(regs);
1684}
1685
1686/* Trap level 1 stuff or other traps we should never see... */
1687void do_cee(struct pt_regs *regs)
1688{
1689	die_if_kernel("TL0: Cache Error Exception", regs);
1690}
1691
1692void do_cee_tl1(struct pt_regs *regs)
1693{
1694	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1695	die_if_kernel("TL1: Cache Error Exception", regs);
1696}
1697
1698void do_dae_tl1(struct pt_regs *regs)
1699{
1700	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1701	die_if_kernel("TL1: Data Access Exception", regs);
1702}
1703
1704void do_iae_tl1(struct pt_regs *regs)
1705{
1706	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1707	die_if_kernel("TL1: Instruction Access Exception", regs);
1708}
1709
1710void do_div0_tl1(struct pt_regs *regs)
1711{
1712	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1713	die_if_kernel("TL1: DIV0 Exception", regs);
1714}
1715
1716void do_fpdis_tl1(struct pt_regs *regs)
1717{
1718	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1719	die_if_kernel("TL1: FPU Disabled", regs);
1720}
1721
1722void do_fpieee_tl1(struct pt_regs *regs)
1723{
1724	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1725	die_if_kernel("TL1: FPU IEEE Exception", regs);
1726}
1727
1728void do_fpother_tl1(struct pt_regs *regs)
1729{
1730	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1731	die_if_kernel("TL1: FPU Other Exception", regs);
1732}
1733
1734void do_ill_tl1(struct pt_regs *regs)
1735{
1736	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1737	die_if_kernel("TL1: Illegal Instruction Exception", regs);
1738}
1739
1740void do_irq_tl1(struct pt_regs *regs)
1741{
1742	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1743	die_if_kernel("TL1: IRQ Exception", regs);
1744}
1745
1746void do_lddfmna_tl1(struct pt_regs *regs)
1747{
1748	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1749	die_if_kernel("TL1: LDDF Exception", regs);
1750}
1751
1752void do_stdfmna_tl1(struct pt_regs *regs)
1753{
1754	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1755	die_if_kernel("TL1: STDF Exception", regs);
1756}
1757
1758void do_paw(struct pt_regs *regs)
1759{
1760	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
1761}
1762
1763void do_paw_tl1(struct pt_regs *regs)
1764{
1765	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1766	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
1767}
1768
1769void do_vaw(struct pt_regs *regs)
1770{
1771	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
1772}
1773
1774void do_vaw_tl1(struct pt_regs *regs)
1775{
1776	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1777	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
1778}
1779
1780void do_tof_tl1(struct pt_regs *regs)
1781{
1782	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1783	die_if_kernel("TL1: Tag Overflow Exception", regs);
1784}
1785
1786void do_getpsr(struct pt_regs *regs)
1787{
1788	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
1789	regs->tpc   = regs->tnpc;
1790	regs->tnpc += 4;
1791	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1792		regs->tpc &= 0xffffffff;
1793		regs->tnpc &= 0xffffffff;
1794	}
1795}
1796
1797void trap_init(void)
1798{
1799	/* Attach to the address space of init_task. */
1800	atomic_inc(&init_mm.mm_count);
1801	current->active_mm = &init_mm;
1802
1803	/* NOTE: Other cpus have this done as they are started
1804	 *       up on SMP.
1805	 */
1806}
1807