1/* $Id: traps.c,v 1.1.1.1 2007/08/03 18:52:18 Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/kallsyms.h>
16#include <linux/signal.h>
17#include <linux/smp.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/kdebug.h>
21
22#include <asm/smp.h>
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
36#include <asm/sfafsr.h>
37#include <asm/psrcompat.h>
38#include <asm/processor.h>
39#include <asm/timer.h>
40#include <asm/head.h>
41#ifdef CONFIG_KMOD
42#include <linux/kmod.h>
43#endif
44#include <asm/prom.h>
45
46
47/* When an irrecoverable trap occurs at tl > 0, the trap entry
48 * code logs the trap state registers at every level in the trap
49 * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
50 * is as follows:
51 */
52struct tl1_traplog {
53	struct {
54		unsigned long tstate;
55		unsigned long tpc;
56		unsigned long tnpc;
57		unsigned long tt;
58	} trapstack[4];
59	unsigned long tl;
60};
61
62static void dump_tl1_traplog(struct tl1_traplog *p)
63{
64	int i, limit;
65
66	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
67	       "dumping track stack.\n", p->tl);
68
69	limit = (tlb_type == hypervisor) ? 2 : 4;
70	for (i = 0; i < limit; i++) {
71		printk(KERN_EMERG
72		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
73		       "TNPC[%016lx] TT[%lx]\n",
74		       i + 1,
75		       p->trapstack[i].tstate, p->trapstack[i].tpc,
76		       p->trapstack[i].tnpc, p->trapstack[i].tt);
77		print_symbol("TRAPLOG: TPC<%s>\n", p->trapstack[i].tpc);
78	}
79}
80
81void do_call_debug(struct pt_regs *regs)
82{
83	notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
84}
85
86void bad_trap(struct pt_regs *regs, long lvl)
87{
88	char buffer[32];
89	siginfo_t info;
90
91	if (notify_die(DIE_TRAP, "bad trap", regs,
92		       0, lvl, SIGTRAP) == NOTIFY_STOP)
93		return;
94
95	if (lvl < 0x100) {
96		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
97		die_if_kernel(buffer, regs);
98	}
99
100	lvl -= 0x100;
101	if (regs->tstate & TSTATE_PRIV) {
102		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
103		die_if_kernel(buffer, regs);
104	}
105	if (test_thread_flag(TIF_32BIT)) {
106		regs->tpc &= 0xffffffff;
107		regs->tnpc &= 0xffffffff;
108	}
109	info.si_signo = SIGILL;
110	info.si_errno = 0;
111	info.si_code = ILL_ILLTRP;
112	info.si_addr = (void __user *)regs->tpc;
113	info.si_trapno = lvl;
114	force_sig_info(SIGILL, &info, current);
115}
116
117void bad_trap_tl1(struct pt_regs *regs, long lvl)
118{
119	char buffer[32];
120
121	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
122		       0, lvl, SIGTRAP) == NOTIFY_STOP)
123		return;
124
125	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
126
127	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
128	die_if_kernel (buffer, regs);
129}
130
131#ifdef CONFIG_DEBUG_BUGVERBOSE
132void do_BUG(const char *file, int line)
133{
134	bust_spinlocks(1);
135	printk("kernel BUG at %s:%d!\n", file, line);
136}
137#endif
138
139void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
140{
141	siginfo_t info;
142
143	if (notify_die(DIE_TRAP, "instruction access exception", regs,
144		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
145		return;
146
147	if (regs->tstate & TSTATE_PRIV) {
148		printk("spitfire_insn_access_exception: SFSR[%016lx] "
149		       "SFAR[%016lx], going.\n", sfsr, sfar);
150		die_if_kernel("Iax", regs);
151	}
152	if (test_thread_flag(TIF_32BIT)) {
153		regs->tpc &= 0xffffffff;
154		regs->tnpc &= 0xffffffff;
155	}
156	info.si_signo = SIGSEGV;
157	info.si_errno = 0;
158	info.si_code = SEGV_MAPERR;
159	info.si_addr = (void __user *)regs->tpc;
160	info.si_trapno = 0;
161	force_sig_info(SIGSEGV, &info, current);
162}
163
164void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
165{
166	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
167		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
168		return;
169
170	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
171	spitfire_insn_access_exception(regs, sfsr, sfar);
172}
173
174void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
175{
176	unsigned short type = (type_ctx >> 16);
177	unsigned short ctx  = (type_ctx & 0xffff);
178	siginfo_t info;
179
180	if (notify_die(DIE_TRAP, "instruction access exception", regs,
181		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
182		return;
183
184	if (regs->tstate & TSTATE_PRIV) {
185		printk("sun4v_insn_access_exception: ADDR[%016lx] "
186		       "CTX[%04x] TYPE[%04x], going.\n",
187		       addr, ctx, type);
188		die_if_kernel("Iax", regs);
189	}
190
191	if (test_thread_flag(TIF_32BIT)) {
192		regs->tpc &= 0xffffffff;
193		regs->tnpc &= 0xffffffff;
194	}
195	info.si_signo = SIGSEGV;
196	info.si_errno = 0;
197	info.si_code = SEGV_MAPERR;
198	info.si_addr = (void __user *) addr;
199	info.si_trapno = 0;
200	force_sig_info(SIGSEGV, &info, current);
201}
202
203void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
204{
205	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
206		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
207		return;
208
209	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
210	sun4v_insn_access_exception(regs, addr, type_ctx);
211}
212
213void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
214{
215	siginfo_t info;
216
217	if (notify_die(DIE_TRAP, "data access exception", regs,
218		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
219		return;
220
221	if (regs->tstate & TSTATE_PRIV) {
222		/* Test if this comes from uaccess places. */
223		const struct exception_table_entry *entry;
224
225		entry = search_exception_tables(regs->tpc);
226		if (entry) {
227			/* Ouch, somebody is trying VM hole tricks on us... */
228#ifdef DEBUG_EXCEPTIONS
229			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
230			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
231			       regs->tpc, entry->fixup);
232#endif
233			regs->tpc = entry->fixup;
234			regs->tnpc = regs->tpc + 4;
235			return;
236		}
237		/* Shit... */
238		printk("spitfire_data_access_exception: SFSR[%016lx] "
239		       "SFAR[%016lx], going.\n", sfsr, sfar);
240		die_if_kernel("Dax", regs);
241	}
242
243	info.si_signo = SIGSEGV;
244	info.si_errno = 0;
245	info.si_code = SEGV_MAPERR;
246	info.si_addr = (void __user *)sfar;
247	info.si_trapno = 0;
248	force_sig_info(SIGSEGV, &info, current);
249}
250
251void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
252{
253	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
254		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
255		return;
256
257	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
258	spitfire_data_access_exception(regs, sfsr, sfar);
259}
260
261void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
262{
263	unsigned short type = (type_ctx >> 16);
264	unsigned short ctx  = (type_ctx & 0xffff);
265	siginfo_t info;
266
267	if (notify_die(DIE_TRAP, "data access exception", regs,
268		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
269		return;
270
271	if (regs->tstate & TSTATE_PRIV) {
272		printk("sun4v_data_access_exception: ADDR[%016lx] "
273		       "CTX[%04x] TYPE[%04x], going.\n",
274		       addr, ctx, type);
275		die_if_kernel("Dax", regs);
276	}
277
278	if (test_thread_flag(TIF_32BIT)) {
279		regs->tpc &= 0xffffffff;
280		regs->tnpc &= 0xffffffff;
281	}
282	info.si_signo = SIGSEGV;
283	info.si_errno = 0;
284	info.si_code = SEGV_MAPERR;
285	info.si_addr = (void __user *) addr;
286	info.si_trapno = 0;
287	force_sig_info(SIGSEGV, &info, current);
288}
289
290void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
291{
292	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
293		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
294		return;
295
296	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
297	sun4v_data_access_exception(regs, addr, type_ctx);
298}
299
300#ifdef CONFIG_PCI
301/* This is really pathetic... */
302extern volatile int pci_poke_in_progress;
303extern volatile int pci_poke_cpu;
304extern volatile int pci_poke_faulted;
305#endif
306
307/* When access exceptions happen, we must do this. */
308static void spitfire_clean_and_reenable_l1_caches(void)
309{
310	unsigned long va;
311
312	if (tlb_type != spitfire)
313		BUG();
314
315	/* Clean 'em. */
316	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
317		spitfire_put_icache_tag(va, 0x0);
318		spitfire_put_dcache_tag(va, 0x0);
319	}
320
321	/* Re-enable in LSU. */
322	__asm__ __volatile__("flush %%g6\n\t"
323			     "membar #Sync\n\t"
324			     "stxa %0, [%%g0] %1\n\t"
325			     "membar #Sync"
326			     : /* no outputs */
327			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
328				    LSU_CONTROL_IM | LSU_CONTROL_DM),
329			     "i" (ASI_LSU_CONTROL)
330			     : "memory");
331}
332
333static void spitfire_enable_estate_errors(void)
334{
335	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
336			     "membar	#Sync"
337			     : /* no outputs */
338			     : "r" (ESTATE_ERR_ALL),
339			       "i" (ASI_ESTATE_ERROR_EN));
340}
341
342static char ecc_syndrome_table[] = {
343	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
344	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
345	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
346	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
347	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
348	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
349	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
350	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
351	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
352	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
353	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
354	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
355	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
356	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
357	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
358	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
359	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
360	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
361	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
362	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
363	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
364	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
365	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
366	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
367	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
368	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
369	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
370	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
371	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
372	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
373	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
374	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
375};
376
377static char *syndrome_unknown = "<Unknown>";
378
379static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
380{
381	unsigned short scode;
382	char memmod_str[64], *p;
383
384	if (udbl & bit) {
385		scode = ecc_syndrome_table[udbl & 0xff];
386		if (prom_getunumber(scode, afar,
387				    memmod_str, sizeof(memmod_str)) == -1)
388			p = syndrome_unknown;
389		else
390			p = memmod_str;
391		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
392		       "Memory Module \"%s\"\n",
393		       smp_processor_id(), scode, p);
394	}
395
396	if (udbh & bit) {
397		scode = ecc_syndrome_table[udbh & 0xff];
398		if (prom_getunumber(scode, afar,
399				    memmod_str, sizeof(memmod_str)) == -1)
400			p = syndrome_unknown;
401		else
402			p = memmod_str;
403		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
404		       "Memory Module \"%s\"\n",
405		       smp_processor_id(), scode, p);
406	}
407
408}
409
410static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
411{
412
413	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
414	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
415	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
416
417	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
418
419	/* We always log it, even if someone is listening for this
420	 * trap.
421	 */
422	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
423		   0, TRAP_TYPE_CEE, SIGTRAP);
424
425	/* The Correctable ECC Error trap does not disable I/D caches.  So
426	 * we only have to restore the ESTATE Error Enable register.
427	 */
428	spitfire_enable_estate_errors();
429}
430
431static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
432{
433	siginfo_t info;
434
435	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
436	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
437	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
438
439
440	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
441
442	/* We always log it, even if someone is listening for this
443	 * trap.
444	 */
445	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
446		   0, tt, SIGTRAP);
447
448	if (regs->tstate & TSTATE_PRIV) {
449		if (tl1)
450			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
451		die_if_kernel("UE", regs);
452	}
453
454
455	spitfire_clean_and_reenable_l1_caches();
456	spitfire_enable_estate_errors();
457
458	if (test_thread_flag(TIF_32BIT)) {
459		regs->tpc &= 0xffffffff;
460		regs->tnpc &= 0xffffffff;
461	}
462	info.si_signo = SIGBUS;
463	info.si_errno = 0;
464	info.si_code = BUS_OBJERR;
465	info.si_addr = (void *)0;
466	info.si_trapno = 0;
467	force_sig_info(SIGBUS, &info, current);
468}
469
470void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
471{
472	unsigned long afsr, tt, udbh, udbl;
473	int tl1;
474
475	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
476	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
477	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
478	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
479	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
480
481#ifdef CONFIG_PCI
482	if (tt == TRAP_TYPE_DAE &&
483	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
484		spitfire_clean_and_reenable_l1_caches();
485		spitfire_enable_estate_errors();
486
487		pci_poke_faulted = 1;
488		regs->tnpc = regs->tpc + 4;
489		return;
490	}
491#endif
492
493	if (afsr & SFAFSR_UE)
494		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
495
496	if (tt == TRAP_TYPE_CEE) {
497		/* Handle the case where we took a CEE trap, but ACK'd
498		 * only the UE state in the UDB error registers.
499		 */
500		if (afsr & SFAFSR_UE) {
501			if (udbh & UDBE_CE) {
502				__asm__ __volatile__(
503					"stxa	%0, [%1] %2\n\t"
504					"membar	#Sync"
505					: /* no outputs */
506					: "r" (udbh & UDBE_CE),
507					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
508			}
509			if (udbl & UDBE_CE) {
510				__asm__ __volatile__(
511					"stxa	%0, [%1] %2\n\t"
512					"membar	#Sync"
513					: /* no outputs */
514					: "r" (udbl & UDBE_CE),
515					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
516			}
517		}
518
519		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
520	}
521}
522
523int cheetah_pcache_forced_on;
524
525void cheetah_enable_pcache(void)
526{
527	unsigned long dcr;
528
529	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
530	       smp_processor_id());
531
532	__asm__ __volatile__("ldxa [%%g0] %1, %0"
533			     : "=r" (dcr)
534			     : "i" (ASI_DCU_CONTROL_REG));
535	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
536	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
537			     "membar #Sync"
538			     : /* no outputs */
539			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
540}
541
542/* Cheetah error trap handling. */
543static unsigned long ecache_flush_physbase;
544static unsigned long ecache_flush_linesize;
545static unsigned long ecache_flush_size;
546
547/* WARNING: The error trap handlers in assembly know the precise
548 *	    layout of the following structure.
549 *
550 * C-level handlers below use this information to log the error
551 * and then determine how to recover (if possible).
552 */
553struct cheetah_err_info {
554/*0x00*/u64 afsr;
555/*0x08*/u64 afar;
556
557	/* D-cache state */
558/*0x10*/u64 dcache_data[4];	/* The actual data	*/
559/*0x30*/u64 dcache_index;	/* D-cache index	*/
560/*0x38*/u64 dcache_tag;		/* D-cache tag/valid	*/
561/*0x40*/u64 dcache_utag;	/* D-cache microtag	*/
562/*0x48*/u64 dcache_stag;	/* D-cache snooptag	*/
563
564	/* I-cache state */
565/*0x50*/u64 icache_data[8];	/* The actual insns + predecode	*/
566/*0x90*/u64 icache_index;	/* I-cache index	*/
567/*0x98*/u64 icache_tag;		/* I-cache phys tag	*/
568/*0xa0*/u64 icache_utag;	/* I-cache microtag	*/
569/*0xa8*/u64 icache_stag;	/* I-cache snooptag	*/
570/*0xb0*/u64 icache_upper;	/* I-cache upper-tag	*/
571/*0xb8*/u64 icache_lower;	/* I-cache lower-tag	*/
572
573	/* E-cache state */
574/*0xc0*/u64 ecache_data[4];	/* 32 bytes from staging registers */
575/*0xe0*/u64 ecache_index;	/* E-cache index	*/
576/*0xe8*/u64 ecache_tag;		/* E-cache tag/state	*/
577
578/*0xf0*/u64 __pad[32 - 30];
579};
580#define CHAFSR_INVALID		((u64)-1L)
581
582/* This table is ordered in priority of errors and matches the
583 * AFAR overwrite policy as well.
584 */
585
586struct afsr_error_table {
587	unsigned long mask;
588	const char *name;
589};
590
591static const char CHAFSR_PERR_msg[] =
592	"System interface protocol error";
593static const char CHAFSR_IERR_msg[] =
594	"Internal processor error";
595static const char CHAFSR_ISAP_msg[] =
596	"System request parity error on incoming addresss";
597static const char CHAFSR_UCU_msg[] =
598	"Uncorrectable E-cache ECC error for ifetch/data";
599static const char CHAFSR_UCC_msg[] =
600	"SW Correctable E-cache ECC error for ifetch/data";
601static const char CHAFSR_UE_msg[] =
602	"Uncorrectable system bus data ECC error for read";
603static const char CHAFSR_EDU_msg[] =
604	"Uncorrectable E-cache ECC error for stmerge/blkld";
605static const char CHAFSR_EMU_msg[] =
606	"Uncorrectable system bus MTAG error";
607static const char CHAFSR_WDU_msg[] =
608	"Uncorrectable E-cache ECC error for writeback";
609static const char CHAFSR_CPU_msg[] =
610	"Uncorrectable ECC error for copyout";
611static const char CHAFSR_CE_msg[] =
612	"HW corrected system bus data ECC error for read";
613static const char CHAFSR_EDC_msg[] =
614	"HW corrected E-cache ECC error for stmerge/blkld";
615static const char CHAFSR_EMC_msg[] =
616	"HW corrected system bus MTAG ECC error";
617static const char CHAFSR_WDC_msg[] =
618	"HW corrected E-cache ECC error for writeback";
619static const char CHAFSR_CPC_msg[] =
620	"HW corrected ECC error for copyout";
621static const char CHAFSR_TO_msg[] =
622	"Unmapped error from system bus";
623static const char CHAFSR_BERR_msg[] =
624	"Bus error response from system bus";
625static const char CHAFSR_IVC_msg[] =
626	"HW corrected system bus data ECC error for ivec read";
627static const char CHAFSR_IVU_msg[] =
628	"Uncorrectable system bus data ECC error for ivec read";
629static struct afsr_error_table __cheetah_error_table[] = {
630	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
631	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
632	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
633	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
634	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
635	{	CHAFSR_UE,	CHAFSR_UE_msg		},
636	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
637	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
638	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
639	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
640	{	CHAFSR_CE,	CHAFSR_CE_msg		},
641	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
642	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
643	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
644	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
645	{	CHAFSR_TO,	CHAFSR_TO_msg		},
646	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
647	/* These two do not update the AFAR. */
648	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
649	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
650	{	0,		NULL			},
651};
652static const char CHPAFSR_DTO_msg[] =
653	"System bus unmapped error for prefetch/storequeue-read";
654static const char CHPAFSR_DBERR_msg[] =
655	"System bus error for prefetch/storequeue-read";
656static const char CHPAFSR_THCE_msg[] =
657	"Hardware corrected E-cache Tag ECC error";
658static const char CHPAFSR_TSCE_msg[] =
659	"SW handled correctable E-cache Tag ECC error";
660static const char CHPAFSR_TUE_msg[] =
661	"Uncorrectable E-cache Tag ECC error";
662static const char CHPAFSR_DUE_msg[] =
663	"System bus uncorrectable data ECC error due to prefetch/store-fill";
664static struct afsr_error_table __cheetah_plus_error_table[] = {
665	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
666	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
667	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
668	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
669	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
670	{	CHAFSR_UE,	CHAFSR_UE_msg		},
671	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
672	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
673	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
674	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
675	{	CHAFSR_CE,	CHAFSR_CE_msg		},
676	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
677	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
678	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
679	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
680	{	CHAFSR_TO,	CHAFSR_TO_msg		},
681	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
682	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
683	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
684	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
685	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
686	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
687	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
688	/* These two do not update the AFAR. */
689	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
690	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
691	{	0,		NULL			},
692};
693static const char JPAFSR_JETO_msg[] =
694	"System interface protocol error, hw timeout caused";
695static const char JPAFSR_SCE_msg[] =
696	"Parity error on system snoop results";
697static const char JPAFSR_JEIC_msg[] =
698	"System interface protocol error, illegal command detected";
699static const char JPAFSR_JEIT_msg[] =
700	"System interface protocol error, illegal ADTYPE detected";
701static const char JPAFSR_OM_msg[] =
702	"Out of range memory error has occurred";
703static const char JPAFSR_ETP_msg[] =
704	"Parity error on L2 cache tag SRAM";
705static const char JPAFSR_UMS_msg[] =
706	"Error due to unsupported store";
707static const char JPAFSR_RUE_msg[] =
708	"Uncorrectable ECC error from remote cache/memory";
709static const char JPAFSR_RCE_msg[] =
710	"Correctable ECC error from remote cache/memory";
711static const char JPAFSR_BP_msg[] =
712	"JBUS parity error on returned read data";
713static const char JPAFSR_WBP_msg[] =
714	"JBUS parity error on data for writeback or block store";
715static const char JPAFSR_FRC_msg[] =
716	"Foreign read to DRAM incurring correctable ECC error";
717static const char JPAFSR_FRU_msg[] =
718	"Foreign read to DRAM incurring uncorrectable ECC error";
719static struct afsr_error_table __jalapeno_error_table[] = {
720	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
721	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
722	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
723	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
724	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
725	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
726	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
727	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
728	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
729	{	CHAFSR_UE,	CHAFSR_UE_msg		},
730	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
731	{	JPAFSR_OM,	JPAFSR_OM_msg		},
732	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
733	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
734	{	CHAFSR_CE,	CHAFSR_CE_msg		},
735	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
736	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
737	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
738	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
739	{	CHAFSR_TO,	CHAFSR_TO_msg		},
740	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
741	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
742	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
743	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
744	{	JPAFSR_BP,	JPAFSR_BP_msg		},
745	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
746	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
747	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
748	/* These two do not update the AFAR. */
749	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
750	{	0,		NULL			},
751};
752static struct afsr_error_table *cheetah_error_table;
753static unsigned long cheetah_afsr_errors;
754
755/* This is allocated at boot time based upon the largest hardware
756 * cpu ID in the system.  We allocate two entries per cpu, one for
757 * TL==0 logging and one for TL >= 1 logging.
758 */
759struct cheetah_err_info *cheetah_error_log;
760
761static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
762{
763	struct cheetah_err_info *p;
764	int cpu = smp_processor_id();
765
766	if (!cheetah_error_log)
767		return NULL;
768
769	p = cheetah_error_log + (cpu * 2);
770	if ((afsr & CHAFSR_TL1) != 0UL)
771		p++;
772
773	return p;
774}
775
776extern unsigned int tl0_icpe[], tl1_icpe[];
777extern unsigned int tl0_dcpe[], tl1_dcpe[];
778extern unsigned int tl0_fecc[], tl1_fecc[];
779extern unsigned int tl0_cee[], tl1_cee[];
780extern unsigned int tl0_iae[], tl1_iae[];
781extern unsigned int tl0_dae[], tl1_dae[];
782extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
783extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
784extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
785extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
786extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
787
788void __init cheetah_ecache_flush_init(void)
789{
790	unsigned long largest_size, smallest_linesize, order, ver;
791	int i, sz;
792
793	/* Scan all cpu device tree nodes, note two values:
794	 * 1) largest E-cache size
795	 * 2) smallest E-cache line size
796	 */
797	largest_size = 0UL;
798	smallest_linesize = ~0UL;
799
800	for (i = 0; i < NR_CPUS; i++) {
801		unsigned long val;
802
803		val = cpu_data(i).ecache_size;
804		if (!val)
805			continue;
806
807		if (val > largest_size)
808			largest_size = val;
809
810		val = cpu_data(i).ecache_line_size;
811		if (val < smallest_linesize)
812			smallest_linesize = val;
813
814	}
815
816	if (largest_size == 0UL || smallest_linesize == ~0UL) {
817		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
818			    "parameters.\n");
819		prom_halt();
820	}
821
822	ecache_flush_size = (2 * largest_size);
823	ecache_flush_linesize = smallest_linesize;
824
825	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
826
827	if (ecache_flush_physbase == ~0UL) {
828		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
829			    "contiguous physical memory.\n",
830			    ecache_flush_size);
831		prom_halt();
832	}
833
834	/* Now allocate error trap reporting scoreboard. */
835	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
836	for (order = 0; order < MAX_ORDER; order++) {
837		if ((PAGE_SIZE << order) >= sz)
838			break;
839	}
840	cheetah_error_log = (struct cheetah_err_info *)
841		__get_free_pages(GFP_KERNEL, order);
842	if (!cheetah_error_log) {
843		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
844			    "error logging scoreboard (%d bytes).\n", sz);
845		prom_halt();
846	}
847	memset(cheetah_error_log, 0, PAGE_SIZE << order);
848
849	/* Mark all AFSRs as invalid so that the trap handler will
850	 * log new new information there.
851	 */
852	for (i = 0; i < 2 * NR_CPUS; i++)
853		cheetah_error_log[i].afsr = CHAFSR_INVALID;
854
855	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
856	if ((ver >> 32) == __JALAPENO_ID ||
857	    (ver >> 32) == __SERRANO_ID) {
858		cheetah_error_table = &__jalapeno_error_table[0];
859		cheetah_afsr_errors = JPAFSR_ERRORS;
860	} else if ((ver >> 32) == 0x003e0015) {
861		cheetah_error_table = &__cheetah_plus_error_table[0];
862		cheetah_afsr_errors = CHPAFSR_ERRORS;
863	} else {
864		cheetah_error_table = &__cheetah_error_table[0];
865		cheetah_afsr_errors = CHAFSR_ERRORS;
866	}
867
868	/* Now patch trap tables. */
869	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
870	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
871	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
872	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
873	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
874	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
875	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
876	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
877	if (tlb_type == cheetah_plus) {
878		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
879		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
880		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
881		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
882	}
883	flushi(PAGE_OFFSET);
884}
885
886static void cheetah_flush_ecache(void)
887{
888	unsigned long flush_base = ecache_flush_physbase;
889	unsigned long flush_linesize = ecache_flush_linesize;
890	unsigned long flush_size = ecache_flush_size;
891
892	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
893			     "   bne,pt	%%xcc, 1b\n\t"
894			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
895			     : "=&r" (flush_size)
896			     : "0" (flush_size), "r" (flush_base),
897			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
898}
899
900static void cheetah_flush_ecache_line(unsigned long physaddr)
901{
902	unsigned long alias;
903
904	physaddr &= ~(8UL - 1UL);
905	physaddr = (ecache_flush_physbase +
906		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
907	alias = physaddr + (ecache_flush_size >> 1UL);
908	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
909			     "ldxa [%1] %2, %%g0\n\t"
910			     "membar #Sync"
911			     : /* no outputs */
912			     : "r" (physaddr), "r" (alias),
913			       "i" (ASI_PHYS_USE_EC));
914}
915
916/* Unfortunately, the diagnostic access to the I-cache tags we need to
917 * use to clear the thing interferes with I-cache coherency transactions.
918 *
919 * So we must only flush the I-cache when it is disabled.
920 */
921static void __cheetah_flush_icache(void)
922{
923	unsigned int icache_size, icache_line_size;
924	unsigned long addr;
925
926	icache_size = local_cpu_data().icache_size;
927	icache_line_size = local_cpu_data().icache_line_size;
928
929	/* Clear the valid bits in all the tags. */
930	for (addr = 0; addr < icache_size; addr += icache_line_size) {
931		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
932				     "membar #Sync"
933				     : /* no outputs */
934				     : "r" (addr | (2 << 3)),
935				       "i" (ASI_IC_TAG));
936	}
937}
938
939static void cheetah_flush_icache(void)
940{
941	unsigned long dcu_save;
942
943	/* Save current DCU, disable I-cache. */
944	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
945			     "or %0, %2, %%g1\n\t"
946			     "stxa %%g1, [%%g0] %1\n\t"
947			     "membar #Sync"
948			     : "=r" (dcu_save)
949			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
950			     : "g1");
951
952	__cheetah_flush_icache();
953
954	/* Restore DCU register */
955	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
956			     "membar #Sync"
957			     : /* no outputs */
958			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
959}
960
961static void cheetah_flush_dcache(void)
962{
963	unsigned int dcache_size, dcache_line_size;
964	unsigned long addr;
965
966	dcache_size = local_cpu_data().dcache_size;
967	dcache_line_size = local_cpu_data().dcache_line_size;
968
969	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
970		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
971				     "membar #Sync"
972				     : /* no outputs */
973				     : "r" (addr), "i" (ASI_DCACHE_TAG));
974	}
975}
976
977/* In order to make the even parity correct we must do two things.
978 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
979 * Next, we clear out all 32-bytes of data for that line.  Data of
980 * all-zero + tag parity value of zero == correct parity.
981 */
982static void cheetah_plus_zap_dcache_parity(void)
983{
984	unsigned int dcache_size, dcache_line_size;
985	unsigned long addr;
986
987	dcache_size = local_cpu_data().dcache_size;
988	dcache_line_size = local_cpu_data().dcache_line_size;
989
990	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
991		unsigned long tag = (addr >> 14);
992		unsigned long line;
993
994		__asm__ __volatile__("membar	#Sync\n\t"
995				     "stxa	%0, [%1] %2\n\t"
996				     "membar	#Sync"
997				     : /* no outputs */
998				     : "r" (tag), "r" (addr),
999				       "i" (ASI_DCACHE_UTAG));
1000		for (line = addr; line < addr + dcache_line_size; line += 8)
1001			__asm__ __volatile__("membar	#Sync\n\t"
1002					     "stxa	%%g0, [%0] %1\n\t"
1003					     "membar	#Sync"
1004					     : /* no outputs */
1005					     : "r" (line),
1006					       "i" (ASI_DCACHE_DATA));
1007	}
1008}
1009
1010/* Conversion tables used to frob Cheetah AFSR syndrome values into
1011 * something palatable to the memory controller driver get_unumber
1012 * routine.
1013 */
1014#define MT0	137
1015#define MT1	138
1016#define MT2	139
1017#define NONE	254
1018#define MTC0	140
1019#define MTC1	141
1020#define MTC2	142
1021#define MTC3	143
1022#define C0	128
1023#define C1	129
1024#define C2	130
1025#define C3	131
1026#define C4	132
1027#define C5	133
1028#define C6	134
1029#define C7	135
1030#define C8	136
1031#define M2	144
1032#define M3	145
1033#define M4	146
1034#define M	147
1035static unsigned char cheetah_ecc_syntab[] = {
1036/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1037/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1038/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1039/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1040/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1041/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1042/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1043/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1044/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1045/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1046/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1047/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1048/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1049/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1050/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1051/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1052/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1053/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1054/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1055/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1056/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1057/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1058/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1059/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1060/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1061/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1062/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1063/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1064/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1065/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1066/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1067/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1068};
1069static unsigned char cheetah_mtag_syntab[] = {
1070       NONE, MTC0,
1071       MTC1, NONE,
1072       MTC2, NONE,
1073       NONE, MT0,
1074       MTC3, NONE,
1075       NONE, MT1,
1076       NONE, MT2,
1077       NONE, NONE
1078};
1079
1080/* Return the highest priority error conditon mentioned. */
1081static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1082{
1083	unsigned long tmp = 0;
1084	int i;
1085
1086	for (i = 0; cheetah_error_table[i].mask; i++) {
1087		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1088			return tmp;
1089	}
1090	return tmp;
1091}
1092
1093static const char *cheetah_get_string(unsigned long bit)
1094{
1095	int i;
1096
1097	for (i = 0; cheetah_error_table[i].mask; i++) {
1098		if ((bit & cheetah_error_table[i].mask) != 0UL)
1099			return cheetah_error_table[i].name;
1100	}
1101	return "???";
1102}
1103
1104extern int chmc_getunumber(int, unsigned long, char *, int);
1105
1106static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1107			       unsigned long afsr, unsigned long afar, int recoverable)
1108{
1109	unsigned long hipri;
1110	char unum[256];
1111
1112	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1113	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1114	       afsr, afar,
1115	       (afsr & CHAFSR_TL1) ? 1 : 0);
1116	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1117	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1118	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1119	printk("%s" "ERROR(%d): ",
1120	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1121	print_symbol("TPC<%s>\n", regs->tpc);
1122	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1123	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1124	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1125	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1126	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1127	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1128	hipri = cheetah_get_hipri(afsr);
1129	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1130	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1131	       hipri, cheetah_get_string(hipri));
1132
1133	/* Try to get unumber if relevant. */
1134#define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1135			 CHAFSR_CPC | CHAFSR_CPU | \
1136			 CHAFSR_UE  | CHAFSR_CE  | \
1137			 CHAFSR_EDC | CHAFSR_EDU  | \
1138			 CHAFSR_UCC | CHAFSR_UCU  | \
1139			 CHAFSR_WDU | CHAFSR_WDC)
1140#define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1141	if (afsr & ESYND_ERRORS) {
1142		int syndrome;
1143		int ret;
1144
1145		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1146		syndrome = cheetah_ecc_syntab[syndrome];
1147		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1148		if (ret != -1)
1149			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1150			       (recoverable ? KERN_WARNING : KERN_CRIT),
1151			       smp_processor_id(), unum);
1152	} else if (afsr & MSYND_ERRORS) {
1153		int syndrome;
1154		int ret;
1155
1156		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1157		syndrome = cheetah_mtag_syntab[syndrome];
1158		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1159		if (ret != -1)
1160			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1161			       (recoverable ? KERN_WARNING : KERN_CRIT),
1162			       smp_processor_id(), unum);
1163	}
1164
1165	/* Now dump the cache snapshots. */
1166	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1167	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1168	       (int) info->dcache_index,
1169	       info->dcache_tag,
1170	       info->dcache_utag,
1171	       info->dcache_stag);
1172	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1173	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1174	       info->dcache_data[0],
1175	       info->dcache_data[1],
1176	       info->dcache_data[2],
1177	       info->dcache_data[3]);
1178	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1179	       "u[%016lx] l[%016lx]\n",
1180	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1181	       (int) info->icache_index,
1182	       info->icache_tag,
1183	       info->icache_utag,
1184	       info->icache_stag,
1185	       info->icache_upper,
1186	       info->icache_lower);
1187	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1188	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1189	       info->icache_data[0],
1190	       info->icache_data[1],
1191	       info->icache_data[2],
1192	       info->icache_data[3]);
1193	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1194	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1195	       info->icache_data[4],
1196	       info->icache_data[5],
1197	       info->icache_data[6],
1198	       info->icache_data[7]);
1199	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1200	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1201	       (int) info->ecache_index, info->ecache_tag);
1202	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1203	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1204	       info->ecache_data[0],
1205	       info->ecache_data[1],
1206	       info->ecache_data[2],
1207	       info->ecache_data[3]);
1208
1209	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1210	while (afsr != 0UL) {
1211		unsigned long bit = cheetah_get_hipri(afsr);
1212
1213		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1214		       (recoverable ? KERN_WARNING : KERN_CRIT),
1215		       bit, cheetah_get_string(bit));
1216
1217		afsr &= ~bit;
1218	}
1219
1220	if (!recoverable)
1221		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1222}
1223
1224static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1225{
1226	unsigned long afsr, afar;
1227	int ret = 0;
1228
1229	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1230			     : "=r" (afsr)
1231			     : "i" (ASI_AFSR));
1232	if ((afsr & cheetah_afsr_errors) != 0) {
1233		if (logp != NULL) {
1234			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1235					     : "=r" (afar)
1236					     : "i" (ASI_AFAR));
1237			logp->afsr = afsr;
1238			logp->afar = afar;
1239		}
1240		ret = 1;
1241	}
1242	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1243			     "membar #Sync\n\t"
1244			     : : "r" (afsr), "i" (ASI_AFSR));
1245
1246	return ret;
1247}
1248
1249void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1250{
1251	struct cheetah_err_info local_snapshot, *p;
1252	int recoverable;
1253
1254	/* Flush E-cache */
1255	cheetah_flush_ecache();
1256
1257	p = cheetah_get_error_log(afsr);
1258	if (!p) {
1259		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1260			    afsr, afar);
1261		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1262			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1263		prom_halt();
1264	}
1265
1266	/* Grab snapshot of logged error. */
1267	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1268
1269	/* If the current trap snapshot does not match what the
1270	 * trap handler passed along into our args, big trouble.
1271	 * In such a case, mark the local copy as invalid.
1272	 *
1273	 * Else, it matches and we mark the afsr in the non-local
1274	 * copy as invalid so we may log new error traps there.
1275	 */
1276	if (p->afsr != afsr || p->afar != afar)
1277		local_snapshot.afsr = CHAFSR_INVALID;
1278	else
1279		p->afsr = CHAFSR_INVALID;
1280
1281	cheetah_flush_icache();
1282	cheetah_flush_dcache();
1283
1284	/* Re-enable I-cache/D-cache */
1285	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1286			     "or %%g1, %1, %%g1\n\t"
1287			     "stxa %%g1, [%%g0] %0\n\t"
1288			     "membar #Sync"
1289			     : /* no outputs */
1290			     : "i" (ASI_DCU_CONTROL_REG),
1291			       "i" (DCU_DC | DCU_IC)
1292			     : "g1");
1293
1294	/* Re-enable error reporting */
1295	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1296			     "or %%g1, %1, %%g1\n\t"
1297			     "stxa %%g1, [%%g0] %0\n\t"
1298			     "membar #Sync"
1299			     : /* no outputs */
1300			     : "i" (ASI_ESTATE_ERROR_EN),
1301			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1302			     : "g1");
1303
1304	/* Decide if we can continue after handling this trap and
1305	 * logging the error.
1306	 */
1307	recoverable = 1;
1308	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1309		recoverable = 0;
1310
1311	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1312	 * error was logged while we had error reporting traps disabled.
1313	 */
1314	if (cheetah_recheck_errors(&local_snapshot)) {
1315		unsigned long new_afsr = local_snapshot.afsr;
1316
1317		/* If we got a new asynchronous error, die... */
1318		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1319				CHAFSR_WDU | CHAFSR_CPU |
1320				CHAFSR_IVU | CHAFSR_UE |
1321				CHAFSR_BERR | CHAFSR_TO))
1322			recoverable = 0;
1323	}
1324
1325	/* Log errors. */
1326	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1327
1328	if (!recoverable)
1329		panic("Irrecoverable Fast-ECC error trap.\n");
1330
1331	/* Flush E-cache to kick the error trap handlers out. */
1332	cheetah_flush_ecache();
1333}
1334
1335/* Try to fix a correctable error by pushing the line out from
1336 * the E-cache.  Recheck error reporting registers to see if the
1337 * problem is intermittent.
1338 */
1339static int cheetah_fix_ce(unsigned long physaddr)
1340{
1341	unsigned long orig_estate;
1342	unsigned long alias1, alias2;
1343	int ret;
1344
1345	/* Make sure correctable error traps are disabled. */
1346	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1347			     "andn	%0, %1, %%g1\n\t"
1348			     "stxa	%%g1, [%%g0] %2\n\t"
1349			     "membar	#Sync"
1350			     : "=&r" (orig_estate)
1351			     : "i" (ESTATE_ERROR_CEEN),
1352			       "i" (ASI_ESTATE_ERROR_EN)
1353			     : "g1");
1354
1355	/* We calculate alias addresses that will force the
1356	 * cache line in question out of the E-cache.  Then
1357	 * we bring it back in with an atomic instruction so
1358	 * that we get it in some modified/exclusive state,
1359	 * then we displace it again to try and get proper ECC
1360	 * pushed back into the system.
1361	 */
1362	physaddr &= ~(8UL - 1UL);
1363	alias1 = (ecache_flush_physbase +
1364		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1365	alias2 = alias1 + (ecache_flush_size >> 1);
1366	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1367			     "ldxa	[%1] %3, %%g0\n\t"
1368			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1369			     "membar	#StoreLoad | #StoreStore\n\t"
1370			     "ldxa	[%0] %3, %%g0\n\t"
1371			     "ldxa	[%1] %3, %%g0\n\t"
1372			     "membar	#Sync"
1373			     : /* no outputs */
1374			     : "r" (alias1), "r" (alias2),
1375			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1376
1377	/* Did that trigger another error? */
1378	if (cheetah_recheck_errors(NULL)) {
1379		/* Try one more time. */
1380		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1381				     "membar #Sync"
1382				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1383		if (cheetah_recheck_errors(NULL))
1384			ret = 2;
1385		else
1386			ret = 1;
1387	} else {
1388		/* No new error, intermittent problem. */
1389		ret = 0;
1390	}
1391
1392	/* Restore error enables. */
1393	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1394			     "membar	#Sync"
1395			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1396
1397	return ret;
1398}
1399
1400/* Return non-zero if PADDR is a valid physical memory address. */
1401static int cheetah_check_main_memory(unsigned long paddr)
1402{
1403	unsigned long vaddr = PAGE_OFFSET + paddr;
1404
1405	if (vaddr > (unsigned long) high_memory)
1406		return 0;
1407
1408	return kern_addr_valid(vaddr);
1409}
1410
1411void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1412{
1413	struct cheetah_err_info local_snapshot, *p;
1414	int recoverable, is_memory;
1415
1416	p = cheetah_get_error_log(afsr);
1417	if (!p) {
1418		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1419			    afsr, afar);
1420		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1421			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1422		prom_halt();
1423	}
1424
1425	/* Grab snapshot of logged error. */
1426	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1427
1428	/* If the current trap snapshot does not match what the
1429	 * trap handler passed along into our args, big trouble.
1430	 * In such a case, mark the local copy as invalid.
1431	 *
1432	 * Else, it matches and we mark the afsr in the non-local
1433	 * copy as invalid so we may log new error traps there.
1434	 */
1435	if (p->afsr != afsr || p->afar != afar)
1436		local_snapshot.afsr = CHAFSR_INVALID;
1437	else
1438		p->afsr = CHAFSR_INVALID;
1439
1440	is_memory = cheetah_check_main_memory(afar);
1441
1442	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1443		cheetah_fix_ce(afar);
1444	}
1445
1446	{
1447		int flush_all, flush_line;
1448
1449		flush_all = flush_line = 0;
1450		if ((afsr & CHAFSR_EDC) != 0UL) {
1451			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1452				flush_line = 1;
1453			else
1454				flush_all = 1;
1455		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1456			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1457				flush_line = 1;
1458			else
1459				flush_all = 1;
1460		}
1461
1462		/* Trap handler only disabled I-cache, flush it. */
1463		cheetah_flush_icache();
1464
1465		/* Re-enable I-cache */
1466		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1467				     "or %%g1, %1, %%g1\n\t"
1468				     "stxa %%g1, [%%g0] %0\n\t"
1469				     "membar #Sync"
1470				     : /* no outputs */
1471				     : "i" (ASI_DCU_CONTROL_REG),
1472				     "i" (DCU_IC)
1473				     : "g1");
1474
1475		if (flush_all)
1476			cheetah_flush_ecache();
1477		else if (flush_line)
1478			cheetah_flush_ecache_line(afar);
1479	}
1480
1481	/* Re-enable error reporting */
1482	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1483			     "or %%g1, %1, %%g1\n\t"
1484			     "stxa %%g1, [%%g0] %0\n\t"
1485			     "membar #Sync"
1486			     : /* no outputs */
1487			     : "i" (ASI_ESTATE_ERROR_EN),
1488			       "i" (ESTATE_ERROR_CEEN)
1489			     : "g1");
1490
1491	/* Decide if we can continue after handling this trap and
1492	 * logging the error.
1493	 */
1494	recoverable = 1;
1495	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1496		recoverable = 0;
1497
1498	/* Re-check AFSR/AFAR */
1499	(void) cheetah_recheck_errors(&local_snapshot);
1500
1501	/* Log errors. */
1502	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1503
1504	if (!recoverable)
1505		panic("Irrecoverable Correctable-ECC error trap.\n");
1506}
1507
1508void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1509{
1510	struct cheetah_err_info local_snapshot, *p;
1511	int recoverable, is_memory;
1512
1513#ifdef CONFIG_PCI
1514	/* Check for the special PCI poke sequence. */
1515	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1516		cheetah_flush_icache();
1517		cheetah_flush_dcache();
1518
1519		/* Re-enable I-cache/D-cache */
1520		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1521				     "or %%g1, %1, %%g1\n\t"
1522				     "stxa %%g1, [%%g0] %0\n\t"
1523				     "membar #Sync"
1524				     : /* no outputs */
1525				     : "i" (ASI_DCU_CONTROL_REG),
1526				       "i" (DCU_DC | DCU_IC)
1527				     : "g1");
1528
1529		/* Re-enable error reporting */
1530		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1531				     "or %%g1, %1, %%g1\n\t"
1532				     "stxa %%g1, [%%g0] %0\n\t"
1533				     "membar #Sync"
1534				     : /* no outputs */
1535				     : "i" (ASI_ESTATE_ERROR_EN),
1536				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1537				     : "g1");
1538
1539		(void) cheetah_recheck_errors(NULL);
1540
1541		pci_poke_faulted = 1;
1542		regs->tpc += 4;
1543		regs->tnpc = regs->tpc + 4;
1544		return;
1545	}
1546#endif
1547
1548	p = cheetah_get_error_log(afsr);
1549	if (!p) {
1550		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1551			    afsr, afar);
1552		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1553			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1554		prom_halt();
1555	}
1556
1557	/* Grab snapshot of logged error. */
1558	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1559
1560	/* If the current trap snapshot does not match what the
1561	 * trap handler passed along into our args, big trouble.
1562	 * In such a case, mark the local copy as invalid.
1563	 *
1564	 * Else, it matches and we mark the afsr in the non-local
1565	 * copy as invalid so we may log new error traps there.
1566	 */
1567	if (p->afsr != afsr || p->afar != afar)
1568		local_snapshot.afsr = CHAFSR_INVALID;
1569	else
1570		p->afsr = CHAFSR_INVALID;
1571
1572	is_memory = cheetah_check_main_memory(afar);
1573
1574	{
1575		int flush_all, flush_line;
1576
1577		flush_all = flush_line = 0;
1578		if ((afsr & CHAFSR_EDU) != 0UL) {
1579			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1580				flush_line = 1;
1581			else
1582				flush_all = 1;
1583		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1584			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1585				flush_line = 1;
1586			else
1587				flush_all = 1;
1588		}
1589
1590		cheetah_flush_icache();
1591		cheetah_flush_dcache();
1592
1593		/* Re-enable I/D caches */
1594		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1595				     "or %%g1, %1, %%g1\n\t"
1596				     "stxa %%g1, [%%g0] %0\n\t"
1597				     "membar #Sync"
1598				     : /* no outputs */
1599				     : "i" (ASI_DCU_CONTROL_REG),
1600				     "i" (DCU_IC | DCU_DC)
1601				     : "g1");
1602
1603		if (flush_all)
1604			cheetah_flush_ecache();
1605		else if (flush_line)
1606			cheetah_flush_ecache_line(afar);
1607	}
1608
1609	/* Re-enable error reporting */
1610	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1611			     "or %%g1, %1, %%g1\n\t"
1612			     "stxa %%g1, [%%g0] %0\n\t"
1613			     "membar #Sync"
1614			     : /* no outputs */
1615			     : "i" (ASI_ESTATE_ERROR_EN),
1616			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1617			     : "g1");
1618
1619	/* Decide if we can continue after handling this trap and
1620	 * logging the error.
1621	 */
1622	recoverable = 1;
1623	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1624		recoverable = 0;
1625
1626	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1627	 * error was logged while we had error reporting traps disabled.
1628	 */
1629	if (cheetah_recheck_errors(&local_snapshot)) {
1630		unsigned long new_afsr = local_snapshot.afsr;
1631
1632		/* If we got a new asynchronous error, die... */
1633		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1634				CHAFSR_WDU | CHAFSR_CPU |
1635				CHAFSR_IVU | CHAFSR_UE |
1636				CHAFSR_BERR | CHAFSR_TO))
1637			recoverable = 0;
1638	}
1639
1640	/* Log errors. */
1641	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1642
1643	/* "Recoverable" here means we try to yank the page from ever
1644	 * being newly used again.  This depends upon a few things:
1645	 * 1) Must be main memory, and AFAR must be valid.
1646	 * 2) If we trapped from user, OK.
1647	 * 3) Else, if we trapped from kernel we must find exception
1648	 *    table entry (ie. we have to have been accessing user
1649	 *    space).
1650	 *
1651	 * If AFAR is not in main memory, or we trapped from kernel
1652	 * and cannot find an exception table entry, it is unacceptable
1653	 * to try and continue.
1654	 */
1655	if (recoverable && is_memory) {
1656		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1657			/* OK, usermode access. */
1658			recoverable = 1;
1659		} else {
1660			const struct exception_table_entry *entry;
1661
1662			entry = search_exception_tables(regs->tpc);
1663			if (entry) {
1664				/* OK, kernel access to userspace. */
1665				recoverable = 1;
1666
1667			} else {
1668				/* BAD, privileged state is corrupted. */
1669				recoverable = 0;
1670			}
1671
1672			if (recoverable) {
1673				if (pfn_valid(afar >> PAGE_SHIFT))
1674					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1675				else
1676					recoverable = 0;
1677
1678				/* Only perform fixup if we still have a
1679				 * recoverable condition.
1680				 */
1681				if (recoverable) {
1682					regs->tpc = entry->fixup;
1683					regs->tnpc = regs->tpc + 4;
1684				}
1685			}
1686		}
1687	} else {
1688		recoverable = 0;
1689	}
1690
1691	if (!recoverable)
1692		panic("Irrecoverable deferred error trap.\n");
1693}
1694
1695/* Handle a D/I cache parity error trap.  TYPE is encoded as:
1696 *
1697 * Bit0:	0=dcache,1=icache
1698 * Bit1:	0=recoverable,1=unrecoverable
1699 *
1700 * The hardware has disabled both the I-cache and D-cache in
1701 * the %dcr register.
1702 */
1703void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1704{
1705	if (type & 0x1)
1706		__cheetah_flush_icache();
1707	else
1708		cheetah_plus_zap_dcache_parity();
1709	cheetah_flush_dcache();
1710
1711	/* Re-enable I-cache/D-cache */
1712	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1713			     "or %%g1, %1, %%g1\n\t"
1714			     "stxa %%g1, [%%g0] %0\n\t"
1715			     "membar #Sync"
1716			     : /* no outputs */
1717			     : "i" (ASI_DCU_CONTROL_REG),
1718			       "i" (DCU_DC | DCU_IC)
1719			     : "g1");
1720
1721	if (type & 0x2) {
1722		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1723		       smp_processor_id(),
1724		       (type & 0x1) ? 'I' : 'D',
1725		       regs->tpc);
1726		print_symbol(KERN_EMERG "TPC<%s>\n", regs->tpc);
1727		panic("Irrecoverable Cheetah+ parity error.");
1728	}
1729
1730	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1731	       smp_processor_id(),
1732	       (type & 0x1) ? 'I' : 'D',
1733	       regs->tpc);
1734	print_symbol(KERN_WARNING "TPC<%s>\n", regs->tpc);
1735}
1736
1737struct sun4v_error_entry {
1738	u64		err_handle;
1739	u64		err_stick;
1740
1741	u32		err_type;
1742#define SUN4V_ERR_TYPE_UNDEFINED	0
1743#define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1744#define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1745#define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1746#define SUN4V_ERR_TYPE_WARNING_RES	4
1747
1748	u32		err_attrs;
1749#define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1750#define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1751#define SUN4V_ERR_ATTRS_PIO		0x00000004
1752#define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1753#define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1754#define SUN4V_ERR_ATTRS_USER_MODE	0x01000000
1755#define SUN4V_ERR_ATTRS_PRIV_MODE	0x02000000
1756#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1757
1758	u64		err_raddr;
1759	u32		err_size;
1760	u16		err_cpu;
1761	u16		err_pad;
1762};
1763
1764static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1765static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1766
1767static const char *sun4v_err_type_to_str(u32 type)
1768{
1769	switch (type) {
1770	case SUN4V_ERR_TYPE_UNDEFINED:
1771		return "undefined";
1772	case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1773		return "uncorrected resumable";
1774	case SUN4V_ERR_TYPE_PRECISE_NONRES:
1775		return "precise nonresumable";
1776	case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1777		return "deferred nonresumable";
1778	case SUN4V_ERR_TYPE_WARNING_RES:
1779		return "warning resumable";
1780	default:
1781		return "unknown";
1782	};
1783}
1784
1785extern void __show_regs(struct pt_regs * regs);
1786
1787static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1788{
1789	int cnt;
1790
1791	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1792	printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1793	       pfx,
1794	       ent->err_handle, ent->err_stick,
1795	       ent->err_type,
1796	       sun4v_err_type_to_str(ent->err_type));
1797	printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1798	       pfx,
1799	       ent->err_attrs,
1800	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1801		"processor" : ""),
1802	       ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1803		"memory" : ""),
1804	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1805		"pio" : ""),
1806	       ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1807		"integer-regs" : ""),
1808	       ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1809		"fpu-regs" : ""),
1810	       ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1811		"user" : ""),
1812	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1813		"privileged" : ""),
1814	       ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1815		"queue-full" : ""));
1816	printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1817	       pfx,
1818	       ent->err_raddr, ent->err_size, ent->err_cpu);
1819
1820	__show_regs(regs);
1821
1822	if ((cnt = atomic_read(ocnt)) != 0) {
1823		atomic_set(ocnt, 0);
1824		wmb();
1825		printk("%s: Queue overflowed %d times.\n",
1826		       pfx, cnt);
1827	}
1828}
1829
1830/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1831 * Log the event and clear the first word of the entry.
1832 */
1833void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1834{
1835	struct sun4v_error_entry *ent, local_copy;
1836	struct trap_per_cpu *tb;
1837	unsigned long paddr;
1838	int cpu;
1839
1840	cpu = get_cpu();
1841
1842	tb = &trap_block[cpu];
1843	paddr = tb->resum_kernel_buf_pa + offset;
1844	ent = __va(paddr);
1845
1846	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1847
1848	/* We have a local copy now, so release the entry.  */
1849	ent->err_handle = 0;
1850	wmb();
1851
1852	put_cpu();
1853
1854	if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1855		/* If err_type is 0x4, it's a powerdown request.  Do
1856		 * not do the usual resumable error log because that
1857		 * makes it look like some abnormal error.
1858		 */
1859		printk(KERN_INFO "Power down request...\n");
1860		kill_cad_pid(SIGINT, 1);
1861		return;
1862	}
1863
1864	sun4v_log_error(regs, &local_copy, cpu,
1865			KERN_ERR "RESUMABLE ERROR",
1866			&sun4v_resum_oflow_cnt);
1867}
1868
1869/* If we try to printk() we'll probably make matters worse, by trying
1870 * to retake locks this cpu already holds or causing more errors. So
1871 * just bump a counter, and we'll report these counter bumps above.
1872 */
1873void sun4v_resum_overflow(struct pt_regs *regs)
1874{
1875	atomic_inc(&sun4v_resum_oflow_cnt);
1876}
1877
1878/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1879 * Log the event, clear the first word of the entry, and die.
1880 */
1881void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1882{
1883	struct sun4v_error_entry *ent, local_copy;
1884	struct trap_per_cpu *tb;
1885	unsigned long paddr;
1886	int cpu;
1887
1888	cpu = get_cpu();
1889
1890	tb = &trap_block[cpu];
1891	paddr = tb->nonresum_kernel_buf_pa + offset;
1892	ent = __va(paddr);
1893
1894	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1895
1896	/* We have a local copy now, so release the entry.  */
1897	ent->err_handle = 0;
1898	wmb();
1899
1900	put_cpu();
1901
1902#ifdef CONFIG_PCI
1903	/* Check for the special PCI poke sequence. */
1904	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1905		pci_poke_faulted = 1;
1906		regs->tpc += 4;
1907		regs->tnpc = regs->tpc + 4;
1908		return;
1909	}
1910#endif
1911
1912	sun4v_log_error(regs, &local_copy, cpu,
1913			KERN_EMERG "NON-RESUMABLE ERROR",
1914			&sun4v_nonresum_oflow_cnt);
1915
1916	panic("Non-resumable error.");
1917}
1918
1919/* If we try to printk() we'll probably make matters worse, by trying
1920 * to retake locks this cpu already holds or causing more errors. So
1921 * just bump a counter, and we'll report these counter bumps above.
1922 */
1923void sun4v_nonresum_overflow(struct pt_regs *regs)
1924{
1925	atomic_inc(&sun4v_nonresum_oflow_cnt);
1926}
1927
1928unsigned long sun4v_err_itlb_vaddr;
1929unsigned long sun4v_err_itlb_ctx;
1930unsigned long sun4v_err_itlb_pte;
1931unsigned long sun4v_err_itlb_error;
1932
1933void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1934{
1935	if (tl > 1)
1936		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1937
1938	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1939	       regs->tpc, tl);
1940	print_symbol(KERN_EMERG "SUN4V-ITLB: TPC<%s>\n", regs->tpc);
1941	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1942	       "pte[%lx] error[%lx]\n",
1943	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1944	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
1945
1946	prom_halt();
1947}
1948
1949unsigned long sun4v_err_dtlb_vaddr;
1950unsigned long sun4v_err_dtlb_ctx;
1951unsigned long sun4v_err_dtlb_pte;
1952unsigned long sun4v_err_dtlb_error;
1953
1954void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1955{
1956	if (tl > 1)
1957		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1958
1959	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1960	       regs->tpc, tl);
1961	print_symbol(KERN_EMERG "SUN4V-DTLB: TPC<%s>\n", regs->tpc);
1962	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1963	       "pte[%lx] error[%lx]\n",
1964	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1965	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1966
1967	prom_halt();
1968}
1969
1970void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1971{
1972	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1973	       err, op);
1974}
1975
1976void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1977{
1978	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1979	       err, op);
1980}
1981
1982void do_fpe_common(struct pt_regs *regs)
1983{
1984	if (regs->tstate & TSTATE_PRIV) {
1985		regs->tpc = regs->tnpc;
1986		regs->tnpc += 4;
1987	} else {
1988		unsigned long fsr = current_thread_info()->xfsr[0];
1989		siginfo_t info;
1990
1991		if (test_thread_flag(TIF_32BIT)) {
1992			regs->tpc &= 0xffffffff;
1993			regs->tnpc &= 0xffffffff;
1994		}
1995		info.si_signo = SIGFPE;
1996		info.si_errno = 0;
1997		info.si_addr = (void __user *)regs->tpc;
1998		info.si_trapno = 0;
1999		info.si_code = __SI_FAULT;
2000		if ((fsr & 0x1c000) == (1 << 14)) {
2001			if (fsr & 0x10)
2002				info.si_code = FPE_FLTINV;
2003			else if (fsr & 0x08)
2004				info.si_code = FPE_FLTOVF;
2005			else if (fsr & 0x04)
2006				info.si_code = FPE_FLTUND;
2007			else if (fsr & 0x02)
2008				info.si_code = FPE_FLTDIV;
2009			else if (fsr & 0x01)
2010				info.si_code = FPE_FLTRES;
2011		}
2012		force_sig_info(SIGFPE, &info, current);
2013	}
2014}
2015
2016void do_fpieee(struct pt_regs *regs)
2017{
2018	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2019		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2020		return;
2021
2022	do_fpe_common(regs);
2023}
2024
2025extern int do_mathemu(struct pt_regs *, struct fpustate *);
2026
2027void do_fpother(struct pt_regs *regs)
2028{
2029	struct fpustate *f = FPUSTATE;
2030	int ret = 0;
2031
2032	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2033		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2034		return;
2035
2036	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2037	case (2 << 14): /* unfinished_FPop */
2038	case (3 << 14): /* unimplemented_FPop */
2039		ret = do_mathemu(regs, f);
2040		break;
2041	}
2042	if (ret)
2043		return;
2044	do_fpe_common(regs);
2045}
2046
2047void do_tof(struct pt_regs *regs)
2048{
2049	siginfo_t info;
2050
2051	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2052		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2053		return;
2054
2055	if (regs->tstate & TSTATE_PRIV)
2056		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2057	if (test_thread_flag(TIF_32BIT)) {
2058		regs->tpc &= 0xffffffff;
2059		regs->tnpc &= 0xffffffff;
2060	}
2061	info.si_signo = SIGEMT;
2062	info.si_errno = 0;
2063	info.si_code = EMT_TAGOVF;
2064	info.si_addr = (void __user *)regs->tpc;
2065	info.si_trapno = 0;
2066	force_sig_info(SIGEMT, &info, current);
2067}
2068
2069void do_div0(struct pt_regs *regs)
2070{
2071	siginfo_t info;
2072
2073	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2074		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2075		return;
2076
2077	if (regs->tstate & TSTATE_PRIV)
2078		die_if_kernel("TL0: Kernel divide by zero.", regs);
2079	if (test_thread_flag(TIF_32BIT)) {
2080		regs->tpc &= 0xffffffff;
2081		regs->tnpc &= 0xffffffff;
2082	}
2083	info.si_signo = SIGFPE;
2084	info.si_errno = 0;
2085	info.si_code = FPE_INTDIV;
2086	info.si_addr = (void __user *)regs->tpc;
2087	info.si_trapno = 0;
2088	force_sig_info(SIGFPE, &info, current);
2089}
2090
2091void instruction_dump (unsigned int *pc)
2092{
2093	int i;
2094
2095	if ((((unsigned long) pc) & 3))
2096		return;
2097
2098	printk("Instruction DUMP:");
2099	for (i = -3; i < 6; i++)
2100		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2101	printk("\n");
2102}
2103
2104static void user_instruction_dump (unsigned int __user *pc)
2105{
2106	int i;
2107	unsigned int buf[9];
2108
2109	if ((((unsigned long) pc) & 3))
2110		return;
2111
2112	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2113		return;
2114
2115	printk("Instruction DUMP:");
2116	for (i = 0; i < 9; i++)
2117		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2118	printk("\n");
2119}
2120
2121void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2122{
2123	unsigned long pc, fp, thread_base, ksp;
2124	void *tp = task_stack_page(tsk);
2125	struct reg_window *rw;
2126	int count = 0;
2127
2128	ksp = (unsigned long) _ksp;
2129
2130	if (tp == current_thread_info())
2131		flushw_all();
2132
2133	fp = ksp + STACK_BIAS;
2134	thread_base = (unsigned long) tp;
2135
2136	printk("Call Trace:");
2137#ifdef CONFIG_KALLSYMS
2138	printk("\n");
2139#endif
2140	do {
2141		/* Bogus frame pointer? */
2142		if (fp < (thread_base + sizeof(struct thread_info)) ||
2143		    fp >= (thread_base + THREAD_SIZE))
2144			break;
2145		rw = (struct reg_window *)fp;
2146		pc = rw->ins[7];
2147		printk(" [%016lx] ", pc);
2148		print_symbol("%s\n", pc);
2149		fp = rw->ins[6] + STACK_BIAS;
2150	} while (++count < 16);
2151#ifndef CONFIG_KALLSYMS
2152	printk("\n");
2153#endif
2154}
2155
2156void dump_stack(void)
2157{
2158	unsigned long *ksp;
2159
2160	__asm__ __volatile__("mov	%%fp, %0"
2161			     : "=r" (ksp));
2162	show_stack(current, ksp);
2163}
2164
2165EXPORT_SYMBOL(dump_stack);
2166
2167static inline int is_kernel_stack(struct task_struct *task,
2168				  struct reg_window *rw)
2169{
2170	unsigned long rw_addr = (unsigned long) rw;
2171	unsigned long thread_base, thread_end;
2172
2173	if (rw_addr < PAGE_OFFSET) {
2174		if (task != &init_task)
2175			return 0;
2176	}
2177
2178	thread_base = (unsigned long) task_stack_page(task);
2179	thread_end = thread_base + sizeof(union thread_union);
2180	if (rw_addr >= thread_base &&
2181	    rw_addr < thread_end &&
2182	    !(rw_addr & 0x7UL))
2183		return 1;
2184
2185	return 0;
2186}
2187
2188static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2189{
2190	unsigned long fp = rw->ins[6];
2191
2192	if (!fp)
2193		return NULL;
2194
2195	return (struct reg_window *) (fp + STACK_BIAS);
2196}
2197
2198void die_if_kernel(char *str, struct pt_regs *regs)
2199{
2200	static int die_counter;
2201	extern void smp_report_regs(void);
2202	int count = 0;
2203
2204	/* Amuse the user. */
2205	printk(
2206"              \\|/ ____ \\|/\n"
2207"              \"@'/ .. \\`@\"\n"
2208"              /_| \\__/ |_\\\n"
2209"                 \\__U_/\n");
2210
2211	printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2212	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2213	__asm__ __volatile__("flushw");
2214	__show_regs(regs);
2215	if (regs->tstate & TSTATE_PRIV) {
2216		struct reg_window *rw = (struct reg_window *)
2217			(regs->u_regs[UREG_FP] + STACK_BIAS);
2218
2219		/* Stop the back trace when we hit userland or we
2220		 * find some badly aligned kernel stack.
2221		 */
2222		while (rw &&
2223		       count++ < 30&&
2224		       is_kernel_stack(current, rw)) {
2225			printk("Caller[%016lx]", rw->ins[7]);
2226			print_symbol(": %s", rw->ins[7]);
2227			printk("\n");
2228
2229			rw = kernel_stack_up(rw);
2230		}
2231		instruction_dump ((unsigned int *) regs->tpc);
2232	} else {
2233		if (test_thread_flag(TIF_32BIT)) {
2234			regs->tpc &= 0xffffffff;
2235			regs->tnpc &= 0xffffffff;
2236		}
2237		user_instruction_dump ((unsigned int __user *) regs->tpc);
2238	}
2239	if (regs->tstate & TSTATE_PRIV)
2240		do_exit(SIGKILL);
2241	do_exit(SIGSEGV);
2242}
2243
2244#define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2245#define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2246
2247extern int handle_popc(u32 insn, struct pt_regs *regs);
2248extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2249extern int vis_emul(struct pt_regs *, unsigned int);
2250
2251void do_illegal_instruction(struct pt_regs *regs)
2252{
2253	unsigned long pc = regs->tpc;
2254	unsigned long tstate = regs->tstate;
2255	u32 insn;
2256	siginfo_t info;
2257
2258	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2259		       0, 0x10, SIGILL) == NOTIFY_STOP)
2260		return;
2261
2262	if (tstate & TSTATE_PRIV)
2263		die_if_kernel("Kernel illegal instruction", regs);
2264	if (test_thread_flag(TIF_32BIT))
2265		pc = (u32)pc;
2266	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2267		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2268			if (handle_popc(insn, regs))
2269				return;
2270		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2271			if (handle_ldf_stq(insn, regs))
2272				return;
2273		} else if (tlb_type == hypervisor) {
2274			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2275				if (!vis_emul(regs, insn))
2276					return;
2277			} else {
2278				struct fpustate *f = FPUSTATE;
2279
2280				if (do_mathemu(regs, f))
2281					return;
2282			}
2283		}
2284	}
2285	info.si_signo = SIGILL;
2286	info.si_errno = 0;
2287	info.si_code = ILL_ILLOPC;
2288	info.si_addr = (void __user *)pc;
2289	info.si_trapno = 0;
2290	force_sig_info(SIGILL, &info, current);
2291}
2292
2293extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2294
2295void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2296{
2297	siginfo_t info;
2298
2299	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2300		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2301		return;
2302
2303	if (regs->tstate & TSTATE_PRIV) {
2304		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2305		return;
2306	}
2307	info.si_signo = SIGBUS;
2308	info.si_errno = 0;
2309	info.si_code = BUS_ADRALN;
2310	info.si_addr = (void __user *)sfar;
2311	info.si_trapno = 0;
2312	force_sig_info(SIGBUS, &info, current);
2313}
2314
2315void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2316{
2317	siginfo_t info;
2318
2319	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2320		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2321		return;
2322
2323	if (regs->tstate & TSTATE_PRIV) {
2324		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2325		return;
2326	}
2327	info.si_signo = SIGBUS;
2328	info.si_errno = 0;
2329	info.si_code = BUS_ADRALN;
2330	info.si_addr = (void __user *) addr;
2331	info.si_trapno = 0;
2332	force_sig_info(SIGBUS, &info, current);
2333}
2334
2335void do_privop(struct pt_regs *regs)
2336{
2337	siginfo_t info;
2338
2339	if (notify_die(DIE_TRAP, "privileged operation", regs,
2340		       0, 0x11, SIGILL) == NOTIFY_STOP)
2341		return;
2342
2343	if (test_thread_flag(TIF_32BIT)) {
2344		regs->tpc &= 0xffffffff;
2345		regs->tnpc &= 0xffffffff;
2346	}
2347	info.si_signo = SIGILL;
2348	info.si_errno = 0;
2349	info.si_code = ILL_PRVOPC;
2350	info.si_addr = (void __user *)regs->tpc;
2351	info.si_trapno = 0;
2352	force_sig_info(SIGILL, &info, current);
2353}
2354
2355void do_privact(struct pt_regs *regs)
2356{
2357	do_privop(regs);
2358}
2359
2360/* Trap level 1 stuff or other traps we should never see... */
2361void do_cee(struct pt_regs *regs)
2362{
2363	die_if_kernel("TL0: Cache Error Exception", regs);
2364}
2365
2366void do_cee_tl1(struct pt_regs *regs)
2367{
2368	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2369	die_if_kernel("TL1: Cache Error Exception", regs);
2370}
2371
2372void do_dae_tl1(struct pt_regs *regs)
2373{
2374	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2375	die_if_kernel("TL1: Data Access Exception", regs);
2376}
2377
2378void do_iae_tl1(struct pt_regs *regs)
2379{
2380	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2381	die_if_kernel("TL1: Instruction Access Exception", regs);
2382}
2383
2384void do_div0_tl1(struct pt_regs *regs)
2385{
2386	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2387	die_if_kernel("TL1: DIV0 Exception", regs);
2388}
2389
2390void do_fpdis_tl1(struct pt_regs *regs)
2391{
2392	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2393	die_if_kernel("TL1: FPU Disabled", regs);
2394}
2395
2396void do_fpieee_tl1(struct pt_regs *regs)
2397{
2398	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2399	die_if_kernel("TL1: FPU IEEE Exception", regs);
2400}
2401
2402void do_fpother_tl1(struct pt_regs *regs)
2403{
2404	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2405	die_if_kernel("TL1: FPU Other Exception", regs);
2406}
2407
2408void do_ill_tl1(struct pt_regs *regs)
2409{
2410	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2411	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2412}
2413
2414void do_irq_tl1(struct pt_regs *regs)
2415{
2416	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2417	die_if_kernel("TL1: IRQ Exception", regs);
2418}
2419
2420void do_lddfmna_tl1(struct pt_regs *regs)
2421{
2422	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2423	die_if_kernel("TL1: LDDF Exception", regs);
2424}
2425
2426void do_stdfmna_tl1(struct pt_regs *regs)
2427{
2428	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2429	die_if_kernel("TL1: STDF Exception", regs);
2430}
2431
2432void do_paw(struct pt_regs *regs)
2433{
2434	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2435}
2436
2437void do_paw_tl1(struct pt_regs *regs)
2438{
2439	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2440	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2441}
2442
2443void do_vaw(struct pt_regs *regs)
2444{
2445	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2446}
2447
2448void do_vaw_tl1(struct pt_regs *regs)
2449{
2450	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2451	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2452}
2453
2454void do_tof_tl1(struct pt_regs *regs)
2455{
2456	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2457	die_if_kernel("TL1: Tag Overflow Exception", regs);
2458}
2459
2460void do_getpsr(struct pt_regs *regs)
2461{
2462	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2463	regs->tpc   = regs->tnpc;
2464	regs->tnpc += 4;
2465	if (test_thread_flag(TIF_32BIT)) {
2466		regs->tpc &= 0xffffffff;
2467		regs->tnpc &= 0xffffffff;
2468	}
2469}
2470
2471struct trap_per_cpu trap_block[NR_CPUS];
2472
2473/* This can get invoked before sched_init() so play it super safe
2474 * and use hard_smp_processor_id().
2475 */
2476void init_cur_cpu_trap(struct thread_info *t)
2477{
2478	int cpu = hard_smp_processor_id();
2479	struct trap_per_cpu *p = &trap_block[cpu];
2480
2481	p->thread = t;
2482	p->pgd_paddr = 0;
2483}
2484
2485extern void thread_info_offsets_are_bolixed_dave(void);
2486extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2487extern void tsb_config_offsets_are_bolixed_dave(void);
2488
2489/* Only invoked on boot processor. */
2490void __init trap_init(void)
2491{
2492	/* Compile time sanity check. */
2493	if (TI_TASK != offsetof(struct thread_info, task) ||
2494	    TI_FLAGS != offsetof(struct thread_info, flags) ||
2495	    TI_CPU != offsetof(struct thread_info, cpu) ||
2496	    TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2497	    TI_KSP != offsetof(struct thread_info, ksp) ||
2498	    TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2499	    TI_KREGS != offsetof(struct thread_info, kregs) ||
2500	    TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2501	    TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2502	    TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2503	    TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2504	    TI_GSR != offsetof(struct thread_info, gsr) ||
2505	    TI_XFSR != offsetof(struct thread_info, xfsr) ||
2506	    TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2507	    TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2508	    TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2509	    TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2510	    TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2511	    TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2512	    TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2513	    TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2514	    TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2515	    TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2516	    TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2517	    TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2518	    (TI_FPREGS & (64 - 1)))
2519		thread_info_offsets_are_bolixed_dave();
2520
2521	if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2522	    (TRAP_PER_CPU_PGD_PADDR !=
2523	     offsetof(struct trap_per_cpu, pgd_paddr)) ||
2524	    (TRAP_PER_CPU_CPU_MONDO_PA !=
2525	     offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2526	    (TRAP_PER_CPU_DEV_MONDO_PA !=
2527	     offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2528	    (TRAP_PER_CPU_RESUM_MONDO_PA !=
2529	     offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2530	    (TRAP_PER_CPU_RESUM_KBUF_PA !=
2531	     offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2532	    (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2533	     offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2534	    (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2535	     offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2536	    (TRAP_PER_CPU_FAULT_INFO !=
2537	     offsetof(struct trap_per_cpu, fault_info)) ||
2538	    (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2539	     offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2540	    (TRAP_PER_CPU_CPU_LIST_PA !=
2541	     offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2542	    (TRAP_PER_CPU_TSB_HUGE !=
2543	     offsetof(struct trap_per_cpu, tsb_huge)) ||
2544	    (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2545	     offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2546	    (TRAP_PER_CPU_IRQ_WORKLIST !=
2547	     offsetof(struct trap_per_cpu, irq_worklist)) ||
2548	    (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2549	     offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2550	    (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2551	     offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2552	    (TRAP_PER_CPU_RESUM_QMASK !=
2553	     offsetof(struct trap_per_cpu, resum_qmask)) ||
2554	    (TRAP_PER_CPU_NONRESUM_QMASK !=
2555	     offsetof(struct trap_per_cpu, nonresum_qmask)))
2556		trap_per_cpu_offsets_are_bolixed_dave();
2557
2558	if ((TSB_CONFIG_TSB !=
2559	     offsetof(struct tsb_config, tsb)) ||
2560	    (TSB_CONFIG_RSS_LIMIT !=
2561	     offsetof(struct tsb_config, tsb_rss_limit)) ||
2562	    (TSB_CONFIG_NENTRIES !=
2563	     offsetof(struct tsb_config, tsb_nentries)) ||
2564	    (TSB_CONFIG_REG_VAL !=
2565	     offsetof(struct tsb_config, tsb_reg_val)) ||
2566	    (TSB_CONFIG_MAP_VADDR !=
2567	     offsetof(struct tsb_config, tsb_map_vaddr)) ||
2568	    (TSB_CONFIG_MAP_PTE !=
2569	     offsetof(struct tsb_config, tsb_map_pte)))
2570		tsb_config_offsets_are_bolixed_dave();
2571
2572	/* Attach to the address space of init_task.  On SMP we
2573	 * do this in smp.c:smp_callin for other cpus.
2574	 */
2575	atomic_inc(&init_mm.mm_count);
2576	current->active_mm = &init_mm;
2577}
2578