• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/sparc/kernel/
1/* arch/sparc64/kernel/traps.c
2 *
3 * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7/*
8 * I like traps on v9, :))))
9 */
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/linkage.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/smp.h>
17#include <linux/mm.h>
18#include <linux/init.h>
19#include <linux/kdebug.h>
20#include <linux/ftrace.h>
21#include <linux/gfp.h>
22
23#include <asm/smp.h>
24#include <asm/delay.h>
25#include <asm/system.h>
26#include <asm/ptrace.h>
27#include <asm/oplib.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/unistd.h>
31#include <asm/uaccess.h>
32#include <asm/fpumacro.h>
33#include <asm/lsu.h>
34#include <asm/dcu.h>
35#include <asm/estate.h>
36#include <asm/chafsr.h>
37#include <asm/sfafsr.h>
38#include <asm/psrcompat.h>
39#include <asm/processor.h>
40#include <asm/timer.h>
41#include <asm/head.h>
42#include <asm/prom.h>
43#include <asm/memctrl.h>
44
45#include "entry.h"
46#include "kstack.h"
47
48/* When an irrecoverable trap occurs at tl > 0, the trap entry
49 * code logs the trap state registers at every level in the trap
50 * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
51 * is as follows:
52 */
53struct tl1_traplog {
54	struct {
55		unsigned long tstate;
56		unsigned long tpc;
57		unsigned long tnpc;
58		unsigned long tt;
59	} trapstack[4];
60	unsigned long tl;
61};
62
63static void dump_tl1_traplog(struct tl1_traplog *p)
64{
65	int i, limit;
66
67	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
68	       "dumping track stack.\n", p->tl);
69
70	limit = (tlb_type == hypervisor) ? 2 : 4;
71	for (i = 0; i < limit; i++) {
72		printk(KERN_EMERG
73		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
74		       "TNPC[%016lx] TT[%lx]\n",
75		       i + 1,
76		       p->trapstack[i].tstate, p->trapstack[i].tpc,
77		       p->trapstack[i].tnpc, p->trapstack[i].tt);
78		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
79	}
80}
81
82void bad_trap(struct pt_regs *regs, long lvl)
83{
84	char buffer[32];
85	siginfo_t info;
86
87	if (notify_die(DIE_TRAP, "bad trap", regs,
88		       0, lvl, SIGTRAP) == NOTIFY_STOP)
89		return;
90
91	if (lvl < 0x100) {
92		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
93		die_if_kernel(buffer, regs);
94	}
95
96	lvl -= 0x100;
97	if (regs->tstate & TSTATE_PRIV) {
98		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
99		die_if_kernel(buffer, regs);
100	}
101	if (test_thread_flag(TIF_32BIT)) {
102		regs->tpc &= 0xffffffff;
103		regs->tnpc &= 0xffffffff;
104	}
105	info.si_signo = SIGILL;
106	info.si_errno = 0;
107	info.si_code = ILL_ILLTRP;
108	info.si_addr = (void __user *)regs->tpc;
109	info.si_trapno = lvl;
110	force_sig_info(SIGILL, &info, current);
111}
112
113void bad_trap_tl1(struct pt_regs *regs, long lvl)
114{
115	char buffer[32];
116
117	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
118		       0, lvl, SIGTRAP) == NOTIFY_STOP)
119		return;
120
121	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
122
123	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
124	die_if_kernel (buffer, regs);
125}
126
127#ifdef CONFIG_DEBUG_BUGVERBOSE
128void do_BUG(const char *file, int line)
129{
130	bust_spinlocks(1);
131	printk("kernel BUG at %s:%d!\n", file, line);
132}
133EXPORT_SYMBOL(do_BUG);
134#endif
135
136static DEFINE_SPINLOCK(dimm_handler_lock);
137static dimm_printer_t dimm_handler;
138
139static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
140{
141	unsigned long flags;
142	int ret = -ENODEV;
143
144	spin_lock_irqsave(&dimm_handler_lock, flags);
145	if (dimm_handler) {
146		ret = dimm_handler(synd_code, paddr, buf, buflen);
147	} else if (tlb_type == spitfire) {
148		if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
149			ret = -EINVAL;
150		else
151			ret = 0;
152	} else
153		ret = -ENODEV;
154	spin_unlock_irqrestore(&dimm_handler_lock, flags);
155
156	return ret;
157}
158
159int register_dimm_printer(dimm_printer_t func)
160{
161	unsigned long flags;
162	int ret = 0;
163
164	spin_lock_irqsave(&dimm_handler_lock, flags);
165	if (!dimm_handler)
166		dimm_handler = func;
167	else
168		ret = -EEXIST;
169	spin_unlock_irqrestore(&dimm_handler_lock, flags);
170
171	return ret;
172}
173EXPORT_SYMBOL_GPL(register_dimm_printer);
174
175void unregister_dimm_printer(dimm_printer_t func)
176{
177	unsigned long flags;
178
179	spin_lock_irqsave(&dimm_handler_lock, flags);
180	if (dimm_handler == func)
181		dimm_handler = NULL;
182	spin_unlock_irqrestore(&dimm_handler_lock, flags);
183}
184EXPORT_SYMBOL_GPL(unregister_dimm_printer);
185
186void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
187{
188	siginfo_t info;
189
190	if (notify_die(DIE_TRAP, "instruction access exception", regs,
191		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
192		return;
193
194	if (regs->tstate & TSTATE_PRIV) {
195		printk("spitfire_insn_access_exception: SFSR[%016lx] "
196		       "SFAR[%016lx], going.\n", sfsr, sfar);
197		die_if_kernel("Iax", regs);
198	}
199	if (test_thread_flag(TIF_32BIT)) {
200		regs->tpc &= 0xffffffff;
201		regs->tnpc &= 0xffffffff;
202	}
203	info.si_signo = SIGSEGV;
204	info.si_errno = 0;
205	info.si_code = SEGV_MAPERR;
206	info.si_addr = (void __user *)regs->tpc;
207	info.si_trapno = 0;
208	force_sig_info(SIGSEGV, &info, current);
209}
210
211void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
212{
213	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
214		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
215		return;
216
217	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
218	spitfire_insn_access_exception(regs, sfsr, sfar);
219}
220
221void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
222{
223	unsigned short type = (type_ctx >> 16);
224	unsigned short ctx  = (type_ctx & 0xffff);
225	siginfo_t info;
226
227	if (notify_die(DIE_TRAP, "instruction access exception", regs,
228		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
229		return;
230
231	if (regs->tstate & TSTATE_PRIV) {
232		printk("sun4v_insn_access_exception: ADDR[%016lx] "
233		       "CTX[%04x] TYPE[%04x], going.\n",
234		       addr, ctx, type);
235		die_if_kernel("Iax", regs);
236	}
237
238	if (test_thread_flag(TIF_32BIT)) {
239		regs->tpc &= 0xffffffff;
240		regs->tnpc &= 0xffffffff;
241	}
242	info.si_signo = SIGSEGV;
243	info.si_errno = 0;
244	info.si_code = SEGV_MAPERR;
245	info.si_addr = (void __user *) addr;
246	info.si_trapno = 0;
247	force_sig_info(SIGSEGV, &info, current);
248}
249
250void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
251{
252	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
253		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
254		return;
255
256	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
257	sun4v_insn_access_exception(regs, addr, type_ctx);
258}
259
260void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
261{
262	siginfo_t info;
263
264	if (notify_die(DIE_TRAP, "data access exception", regs,
265		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
266		return;
267
268	if (regs->tstate & TSTATE_PRIV) {
269		/* Test if this comes from uaccess places. */
270		const struct exception_table_entry *entry;
271
272		entry = search_exception_tables(regs->tpc);
273		if (entry) {
274			/* Ouch, somebody is trying VM hole tricks on us... */
275#ifdef DEBUG_EXCEPTIONS
276			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
277			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
278			       regs->tpc, entry->fixup);
279#endif
280			regs->tpc = entry->fixup;
281			regs->tnpc = regs->tpc + 4;
282			return;
283		}
284		/* Shit... */
285		printk("spitfire_data_access_exception: SFSR[%016lx] "
286		       "SFAR[%016lx], going.\n", sfsr, sfar);
287		die_if_kernel("Dax", regs);
288	}
289
290	info.si_signo = SIGSEGV;
291	info.si_errno = 0;
292	info.si_code = SEGV_MAPERR;
293	info.si_addr = (void __user *)sfar;
294	info.si_trapno = 0;
295	force_sig_info(SIGSEGV, &info, current);
296}
297
298void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
299{
300	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
301		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
302		return;
303
304	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
305	spitfire_data_access_exception(regs, sfsr, sfar);
306}
307
308void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
309{
310	unsigned short type = (type_ctx >> 16);
311	unsigned short ctx  = (type_ctx & 0xffff);
312	siginfo_t info;
313
314	if (notify_die(DIE_TRAP, "data access exception", regs,
315		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
316		return;
317
318	if (regs->tstate & TSTATE_PRIV) {
319		/* Test if this comes from uaccess places. */
320		const struct exception_table_entry *entry;
321
322		entry = search_exception_tables(regs->tpc);
323		if (entry) {
324			/* Ouch, somebody is trying VM hole tricks on us... */
325#ifdef DEBUG_EXCEPTIONS
326			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
327			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
328			       regs->tpc, entry->fixup);
329#endif
330			regs->tpc = entry->fixup;
331			regs->tnpc = regs->tpc + 4;
332			return;
333		}
334		printk("sun4v_data_access_exception: ADDR[%016lx] "
335		       "CTX[%04x] TYPE[%04x], going.\n",
336		       addr, ctx, type);
337		die_if_kernel("Dax", regs);
338	}
339
340	if (test_thread_flag(TIF_32BIT)) {
341		regs->tpc &= 0xffffffff;
342		regs->tnpc &= 0xffffffff;
343	}
344	info.si_signo = SIGSEGV;
345	info.si_errno = 0;
346	info.si_code = SEGV_MAPERR;
347	info.si_addr = (void __user *) addr;
348	info.si_trapno = 0;
349	force_sig_info(SIGSEGV, &info, current);
350}
351
352void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
353{
354	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
355		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
356		return;
357
358	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
359	sun4v_data_access_exception(regs, addr, type_ctx);
360}
361
362#ifdef CONFIG_PCI
363#include "pci_impl.h"
364#endif
365
366/* When access exceptions happen, we must do this. */
367static void spitfire_clean_and_reenable_l1_caches(void)
368{
369	unsigned long va;
370
371	if (tlb_type != spitfire)
372		BUG();
373
374	/* Clean 'em. */
375	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
376		spitfire_put_icache_tag(va, 0x0);
377		spitfire_put_dcache_tag(va, 0x0);
378	}
379
380	/* Re-enable in LSU. */
381	__asm__ __volatile__("flush %%g6\n\t"
382			     "membar #Sync\n\t"
383			     "stxa %0, [%%g0] %1\n\t"
384			     "membar #Sync"
385			     : /* no outputs */
386			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
387				    LSU_CONTROL_IM | LSU_CONTROL_DM),
388			     "i" (ASI_LSU_CONTROL)
389			     : "memory");
390}
391
392static void spitfire_enable_estate_errors(void)
393{
394	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
395			     "membar	#Sync"
396			     : /* no outputs */
397			     : "r" (ESTATE_ERR_ALL),
398			       "i" (ASI_ESTATE_ERROR_EN));
399}
400
401static char ecc_syndrome_table[] = {
402	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
403	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
404	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
405	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
406	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
407	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
408	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
409	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
410	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
411	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
412	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
413	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
414	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
415	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
416	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
417	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
418	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
419	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
420	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
421	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
422	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
423	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
424	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
425	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
426	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
427	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
428	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
429	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
430	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
431	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
432	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
433	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
434};
435
436static char *syndrome_unknown = "<Unknown>";
437
438static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
439{
440	unsigned short scode;
441	char memmod_str[64], *p;
442
443	if (udbl & bit) {
444		scode = ecc_syndrome_table[udbl & 0xff];
445		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
446			p = syndrome_unknown;
447		else
448			p = memmod_str;
449		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
450		       "Memory Module \"%s\"\n",
451		       smp_processor_id(), scode, p);
452	}
453
454	if (udbh & bit) {
455		scode = ecc_syndrome_table[udbh & 0xff];
456		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
457			p = syndrome_unknown;
458		else
459			p = memmod_str;
460		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
461		       "Memory Module \"%s\"\n",
462		       smp_processor_id(), scode, p);
463	}
464
465}
466
467static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
468{
469
470	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
471	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
472	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
473
474	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
475
476	/* We always log it, even if someone is listening for this
477	 * trap.
478	 */
479	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
480		   0, TRAP_TYPE_CEE, SIGTRAP);
481
482	/* The Correctable ECC Error trap does not disable I/D caches.  So
483	 * we only have to restore the ESTATE Error Enable register.
484	 */
485	spitfire_enable_estate_errors();
486}
487
488static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
489{
490	siginfo_t info;
491
492	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
493	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
494	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
495
496
497	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
498
499	/* We always log it, even if someone is listening for this
500	 * trap.
501	 */
502	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
503		   0, tt, SIGTRAP);
504
505	if (regs->tstate & TSTATE_PRIV) {
506		if (tl1)
507			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
508		die_if_kernel("UE", regs);
509	}
510
511
512	spitfire_clean_and_reenable_l1_caches();
513	spitfire_enable_estate_errors();
514
515	if (test_thread_flag(TIF_32BIT)) {
516		regs->tpc &= 0xffffffff;
517		regs->tnpc &= 0xffffffff;
518	}
519	info.si_signo = SIGBUS;
520	info.si_errno = 0;
521	info.si_code = BUS_OBJERR;
522	info.si_addr = (void *)0;
523	info.si_trapno = 0;
524	force_sig_info(SIGBUS, &info, current);
525}
526
527void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
528{
529	unsigned long afsr, tt, udbh, udbl;
530	int tl1;
531
532	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
533	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
534	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
535	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
536	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
537
538#ifdef CONFIG_PCI
539	if (tt == TRAP_TYPE_DAE &&
540	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
541		spitfire_clean_and_reenable_l1_caches();
542		spitfire_enable_estate_errors();
543
544		pci_poke_faulted = 1;
545		regs->tnpc = regs->tpc + 4;
546		return;
547	}
548#endif
549
550	if (afsr & SFAFSR_UE)
551		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
552
553	if (tt == TRAP_TYPE_CEE) {
554		/* Handle the case where we took a CEE trap, but ACK'd
555		 * only the UE state in the UDB error registers.
556		 */
557		if (afsr & SFAFSR_UE) {
558			if (udbh & UDBE_CE) {
559				__asm__ __volatile__(
560					"stxa	%0, [%1] %2\n\t"
561					"membar	#Sync"
562					: /* no outputs */
563					: "r" (udbh & UDBE_CE),
564					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
565			}
566			if (udbl & UDBE_CE) {
567				__asm__ __volatile__(
568					"stxa	%0, [%1] %2\n\t"
569					"membar	#Sync"
570					: /* no outputs */
571					: "r" (udbl & UDBE_CE),
572					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
573			}
574		}
575
576		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
577	}
578}
579
580int cheetah_pcache_forced_on;
581
582void cheetah_enable_pcache(void)
583{
584	unsigned long dcr;
585
586	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
587	       smp_processor_id());
588
589	__asm__ __volatile__("ldxa [%%g0] %1, %0"
590			     : "=r" (dcr)
591			     : "i" (ASI_DCU_CONTROL_REG));
592	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
593	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
594			     "membar #Sync"
595			     : /* no outputs */
596			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
597}
598
599/* Cheetah error trap handling. */
600static unsigned long ecache_flush_physbase;
601static unsigned long ecache_flush_linesize;
602static unsigned long ecache_flush_size;
603
604/* This table is ordered in priority of errors and matches the
605 * AFAR overwrite policy as well.
606 */
607
608struct afsr_error_table {
609	unsigned long mask;
610	const char *name;
611};
612
613static const char CHAFSR_PERR_msg[] =
614	"System interface protocol error";
615static const char CHAFSR_IERR_msg[] =
616	"Internal processor error";
617static const char CHAFSR_ISAP_msg[] =
618	"System request parity error on incoming addresss";
619static const char CHAFSR_UCU_msg[] =
620	"Uncorrectable E-cache ECC error for ifetch/data";
621static const char CHAFSR_UCC_msg[] =
622	"SW Correctable E-cache ECC error for ifetch/data";
623static const char CHAFSR_UE_msg[] =
624	"Uncorrectable system bus data ECC error for read";
625static const char CHAFSR_EDU_msg[] =
626	"Uncorrectable E-cache ECC error for stmerge/blkld";
627static const char CHAFSR_EMU_msg[] =
628	"Uncorrectable system bus MTAG error";
629static const char CHAFSR_WDU_msg[] =
630	"Uncorrectable E-cache ECC error for writeback";
631static const char CHAFSR_CPU_msg[] =
632	"Uncorrectable ECC error for copyout";
633static const char CHAFSR_CE_msg[] =
634	"HW corrected system bus data ECC error for read";
635static const char CHAFSR_EDC_msg[] =
636	"HW corrected E-cache ECC error for stmerge/blkld";
637static const char CHAFSR_EMC_msg[] =
638	"HW corrected system bus MTAG ECC error";
639static const char CHAFSR_WDC_msg[] =
640	"HW corrected E-cache ECC error for writeback";
641static const char CHAFSR_CPC_msg[] =
642	"HW corrected ECC error for copyout";
643static const char CHAFSR_TO_msg[] =
644	"Unmapped error from system bus";
645static const char CHAFSR_BERR_msg[] =
646	"Bus error response from system bus";
647static const char CHAFSR_IVC_msg[] =
648	"HW corrected system bus data ECC error for ivec read";
649static const char CHAFSR_IVU_msg[] =
650	"Uncorrectable system bus data ECC error for ivec read";
651static struct afsr_error_table __cheetah_error_table[] = {
652	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
653	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
654	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
655	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
656	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
657	{	CHAFSR_UE,	CHAFSR_UE_msg		},
658	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
659	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
660	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
661	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
662	{	CHAFSR_CE,	CHAFSR_CE_msg		},
663	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
664	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
665	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
666	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
667	{	CHAFSR_TO,	CHAFSR_TO_msg		},
668	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
669	/* These two do not update the AFAR. */
670	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
671	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
672	{	0,		NULL			},
673};
674static const char CHPAFSR_DTO_msg[] =
675	"System bus unmapped error for prefetch/storequeue-read";
676static const char CHPAFSR_DBERR_msg[] =
677	"System bus error for prefetch/storequeue-read";
678static const char CHPAFSR_THCE_msg[] =
679	"Hardware corrected E-cache Tag ECC error";
680static const char CHPAFSR_TSCE_msg[] =
681	"SW handled correctable E-cache Tag ECC error";
682static const char CHPAFSR_TUE_msg[] =
683	"Uncorrectable E-cache Tag ECC error";
684static const char CHPAFSR_DUE_msg[] =
685	"System bus uncorrectable data ECC error due to prefetch/store-fill";
686static struct afsr_error_table __cheetah_plus_error_table[] = {
687	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
688	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
689	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
690	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
691	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
692	{	CHAFSR_UE,	CHAFSR_UE_msg		},
693	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
694	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
695	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
696	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
697	{	CHAFSR_CE,	CHAFSR_CE_msg		},
698	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
699	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
700	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
701	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
702	{	CHAFSR_TO,	CHAFSR_TO_msg		},
703	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
704	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
705	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
706	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
707	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
708	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
709	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
710	/* These two do not update the AFAR. */
711	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
712	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
713	{	0,		NULL			},
714};
715static const char JPAFSR_JETO_msg[] =
716	"System interface protocol error, hw timeout caused";
717static const char JPAFSR_SCE_msg[] =
718	"Parity error on system snoop results";
719static const char JPAFSR_JEIC_msg[] =
720	"System interface protocol error, illegal command detected";
721static const char JPAFSR_JEIT_msg[] =
722	"System interface protocol error, illegal ADTYPE detected";
723static const char JPAFSR_OM_msg[] =
724	"Out of range memory error has occurred";
725static const char JPAFSR_ETP_msg[] =
726	"Parity error on L2 cache tag SRAM";
727static const char JPAFSR_UMS_msg[] =
728	"Error due to unsupported store";
729static const char JPAFSR_RUE_msg[] =
730	"Uncorrectable ECC error from remote cache/memory";
731static const char JPAFSR_RCE_msg[] =
732	"Correctable ECC error from remote cache/memory";
733static const char JPAFSR_BP_msg[] =
734	"JBUS parity error on returned read data";
735static const char JPAFSR_WBP_msg[] =
736	"JBUS parity error on data for writeback or block store";
737static const char JPAFSR_FRC_msg[] =
738	"Foreign read to DRAM incurring correctable ECC error";
739static const char JPAFSR_FRU_msg[] =
740	"Foreign read to DRAM incurring uncorrectable ECC error";
741static struct afsr_error_table __jalapeno_error_table[] = {
742	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
743	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
744	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
745	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
746	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
747	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
748	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
749	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
750	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
751	{	CHAFSR_UE,	CHAFSR_UE_msg		},
752	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
753	{	JPAFSR_OM,	JPAFSR_OM_msg		},
754	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
755	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
756	{	CHAFSR_CE,	CHAFSR_CE_msg		},
757	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
758	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
759	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
760	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
761	{	CHAFSR_TO,	CHAFSR_TO_msg		},
762	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
763	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
764	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
765	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
766	{	JPAFSR_BP,	JPAFSR_BP_msg		},
767	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
768	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
769	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
770	/* These two do not update the AFAR. */
771	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
772	{	0,		NULL			},
773};
774static struct afsr_error_table *cheetah_error_table;
775static unsigned long cheetah_afsr_errors;
776
777struct cheetah_err_info *cheetah_error_log;
778
779static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
780{
781	struct cheetah_err_info *p;
782	int cpu = smp_processor_id();
783
784	if (!cheetah_error_log)
785		return NULL;
786
787	p = cheetah_error_log + (cpu * 2);
788	if ((afsr & CHAFSR_TL1) != 0UL)
789		p++;
790
791	return p;
792}
793
794extern unsigned int tl0_icpe[], tl1_icpe[];
795extern unsigned int tl0_dcpe[], tl1_dcpe[];
796extern unsigned int tl0_fecc[], tl1_fecc[];
797extern unsigned int tl0_cee[], tl1_cee[];
798extern unsigned int tl0_iae[], tl1_iae[];
799extern unsigned int tl0_dae[], tl1_dae[];
800extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
801extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
802extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
803extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
804extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
805
806void __init cheetah_ecache_flush_init(void)
807{
808	unsigned long largest_size, smallest_linesize, order, ver;
809	int i, sz;
810
811	/* Scan all cpu device tree nodes, note two values:
812	 * 1) largest E-cache size
813	 * 2) smallest E-cache line size
814	 */
815	largest_size = 0UL;
816	smallest_linesize = ~0UL;
817
818	for (i = 0; i < NR_CPUS; i++) {
819		unsigned long val;
820
821		val = cpu_data(i).ecache_size;
822		if (!val)
823			continue;
824
825		if (val > largest_size)
826			largest_size = val;
827
828		val = cpu_data(i).ecache_line_size;
829		if (val < smallest_linesize)
830			smallest_linesize = val;
831
832	}
833
834	if (largest_size == 0UL || smallest_linesize == ~0UL) {
835		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
836			    "parameters.\n");
837		prom_halt();
838	}
839
840	ecache_flush_size = (2 * largest_size);
841	ecache_flush_linesize = smallest_linesize;
842
843	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
844
845	if (ecache_flush_physbase == ~0UL) {
846		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
847			    "contiguous physical memory.\n",
848			    ecache_flush_size);
849		prom_halt();
850	}
851
852	/* Now allocate error trap reporting scoreboard. */
853	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
854	for (order = 0; order < MAX_ORDER; order++) {
855		if ((PAGE_SIZE << order) >= sz)
856			break;
857	}
858	cheetah_error_log = (struct cheetah_err_info *)
859		__get_free_pages(GFP_KERNEL, order);
860	if (!cheetah_error_log) {
861		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
862			    "error logging scoreboard (%d bytes).\n", sz);
863		prom_halt();
864	}
865	memset(cheetah_error_log, 0, PAGE_SIZE << order);
866
867	/* Mark all AFSRs as invalid so that the trap handler will
868	 * log new new information there.
869	 */
870	for (i = 0; i < 2 * NR_CPUS; i++)
871		cheetah_error_log[i].afsr = CHAFSR_INVALID;
872
873	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
874	if ((ver >> 32) == __JALAPENO_ID ||
875	    (ver >> 32) == __SERRANO_ID) {
876		cheetah_error_table = &__jalapeno_error_table[0];
877		cheetah_afsr_errors = JPAFSR_ERRORS;
878	} else if ((ver >> 32) == 0x003e0015) {
879		cheetah_error_table = &__cheetah_plus_error_table[0];
880		cheetah_afsr_errors = CHPAFSR_ERRORS;
881	} else {
882		cheetah_error_table = &__cheetah_error_table[0];
883		cheetah_afsr_errors = CHAFSR_ERRORS;
884	}
885
886	/* Now patch trap tables. */
887	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
888	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
889	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
890	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
891	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
892	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
893	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
894	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
895	if (tlb_type == cheetah_plus) {
896		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
897		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
898		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
899		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
900	}
901	flushi(PAGE_OFFSET);
902}
903
904static void cheetah_flush_ecache(void)
905{
906	unsigned long flush_base = ecache_flush_physbase;
907	unsigned long flush_linesize = ecache_flush_linesize;
908	unsigned long flush_size = ecache_flush_size;
909
910	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
911			     "   bne,pt	%%xcc, 1b\n\t"
912			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
913			     : "=&r" (flush_size)
914			     : "0" (flush_size), "r" (flush_base),
915			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
916}
917
918static void cheetah_flush_ecache_line(unsigned long physaddr)
919{
920	unsigned long alias;
921
922	physaddr &= ~(8UL - 1UL);
923	physaddr = (ecache_flush_physbase +
924		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
925	alias = physaddr + (ecache_flush_size >> 1UL);
926	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
927			     "ldxa [%1] %2, %%g0\n\t"
928			     "membar #Sync"
929			     : /* no outputs */
930			     : "r" (physaddr), "r" (alias),
931			       "i" (ASI_PHYS_USE_EC));
932}
933
934/* Unfortunately, the diagnostic access to the I-cache tags we need to
935 * use to clear the thing interferes with I-cache coherency transactions.
936 *
937 * So we must only flush the I-cache when it is disabled.
938 */
939static void __cheetah_flush_icache(void)
940{
941	unsigned int icache_size, icache_line_size;
942	unsigned long addr;
943
944	icache_size = local_cpu_data().icache_size;
945	icache_line_size = local_cpu_data().icache_line_size;
946
947	/* Clear the valid bits in all the tags. */
948	for (addr = 0; addr < icache_size; addr += icache_line_size) {
949		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
950				     "membar #Sync"
951				     : /* no outputs */
952				     : "r" (addr | (2 << 3)),
953				       "i" (ASI_IC_TAG));
954	}
955}
956
957static void cheetah_flush_icache(void)
958{
959	unsigned long dcu_save;
960
961	/* Save current DCU, disable I-cache. */
962	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
963			     "or %0, %2, %%g1\n\t"
964			     "stxa %%g1, [%%g0] %1\n\t"
965			     "membar #Sync"
966			     : "=r" (dcu_save)
967			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
968			     : "g1");
969
970	__cheetah_flush_icache();
971
972	/* Restore DCU register */
973	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
974			     "membar #Sync"
975			     : /* no outputs */
976			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
977}
978
979static void cheetah_flush_dcache(void)
980{
981	unsigned int dcache_size, dcache_line_size;
982	unsigned long addr;
983
984	dcache_size = local_cpu_data().dcache_size;
985	dcache_line_size = local_cpu_data().dcache_line_size;
986
987	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
988		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
989				     "membar #Sync"
990				     : /* no outputs */
991				     : "r" (addr), "i" (ASI_DCACHE_TAG));
992	}
993}
994
995/* In order to make the even parity correct we must do two things.
996 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
997 * Next, we clear out all 32-bytes of data for that line.  Data of
998 * all-zero + tag parity value of zero == correct parity.
999 */
1000static void cheetah_plus_zap_dcache_parity(void)
1001{
1002	unsigned int dcache_size, dcache_line_size;
1003	unsigned long addr;
1004
1005	dcache_size = local_cpu_data().dcache_size;
1006	dcache_line_size = local_cpu_data().dcache_line_size;
1007
1008	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1009		unsigned long tag = (addr >> 14);
1010		unsigned long line;
1011
1012		__asm__ __volatile__("membar	#Sync\n\t"
1013				     "stxa	%0, [%1] %2\n\t"
1014				     "membar	#Sync"
1015				     : /* no outputs */
1016				     : "r" (tag), "r" (addr),
1017				       "i" (ASI_DCACHE_UTAG));
1018		for (line = addr; line < addr + dcache_line_size; line += 8)
1019			__asm__ __volatile__("membar	#Sync\n\t"
1020					     "stxa	%%g0, [%0] %1\n\t"
1021					     "membar	#Sync"
1022					     : /* no outputs */
1023					     : "r" (line),
1024					       "i" (ASI_DCACHE_DATA));
1025	}
1026}
1027
1028/* Conversion tables used to frob Cheetah AFSR syndrome values into
1029 * something palatable to the memory controller driver get_unumber
1030 * routine.
1031 */
1032#define MT0	137
1033#define MT1	138
1034#define MT2	139
1035#define NONE	254
1036#define MTC0	140
1037#define MTC1	141
1038#define MTC2	142
1039#define MTC3	143
1040#define C0	128
1041#define C1	129
1042#define C2	130
1043#define C3	131
1044#define C4	132
1045#define C5	133
1046#define C6	134
1047#define C7	135
1048#define C8	136
1049#define M2	144
1050#define M3	145
1051#define M4	146
1052#define M	147
1053static unsigned char cheetah_ecc_syntab[] = {
1054/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1055/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1056/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1057/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1058/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1059/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1060/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1061/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1062/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1063/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1064/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1065/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1066/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1067/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1068/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1069/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1070/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1071/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1072/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1073/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1074/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1075/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1076/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1077/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1078/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1079/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1080/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1081/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1082/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1083/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1084/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1085/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1086};
1087static unsigned char cheetah_mtag_syntab[] = {
1088       NONE, MTC0,
1089       MTC1, NONE,
1090       MTC2, NONE,
1091       NONE, MT0,
1092       MTC3, NONE,
1093       NONE, MT1,
1094       NONE, MT2,
1095       NONE, NONE
1096};
1097
1098/* Return the highest priority error conditon mentioned. */
1099static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1100{
1101	unsigned long tmp = 0;
1102	int i;
1103
1104	for (i = 0; cheetah_error_table[i].mask; i++) {
1105		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1106			return tmp;
1107	}
1108	return tmp;
1109}
1110
1111static const char *cheetah_get_string(unsigned long bit)
1112{
1113	int i;
1114
1115	for (i = 0; cheetah_error_table[i].mask; i++) {
1116		if ((bit & cheetah_error_table[i].mask) != 0UL)
1117			return cheetah_error_table[i].name;
1118	}
1119	return "???";
1120}
1121
1122static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1123			       unsigned long afsr, unsigned long afar, int recoverable)
1124{
1125	unsigned long hipri;
1126	char unum[256];
1127
1128	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1129	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1130	       afsr, afar,
1131	       (afsr & CHAFSR_TL1) ? 1 : 0);
1132	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1133	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1135	printk("%s" "ERROR(%d): ",
1136	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1137	printk("TPC<%pS>\n", (void *) regs->tpc);
1138	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1139	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1140	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1141	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1142	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1143	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1144	hipri = cheetah_get_hipri(afsr);
1145	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1146	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1147	       hipri, cheetah_get_string(hipri));
1148
1149	/* Try to get unumber if relevant. */
1150#define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1151			 CHAFSR_CPC | CHAFSR_CPU | \
1152			 CHAFSR_UE  | CHAFSR_CE  | \
1153			 CHAFSR_EDC | CHAFSR_EDU  | \
1154			 CHAFSR_UCC | CHAFSR_UCU  | \
1155			 CHAFSR_WDU | CHAFSR_WDC)
1156#define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1157	if (afsr & ESYND_ERRORS) {
1158		int syndrome;
1159		int ret;
1160
1161		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1162		syndrome = cheetah_ecc_syntab[syndrome];
1163		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1164		if (ret != -1)
1165			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1166			       (recoverable ? KERN_WARNING : KERN_CRIT),
1167			       smp_processor_id(), unum);
1168	} else if (afsr & MSYND_ERRORS) {
1169		int syndrome;
1170		int ret;
1171
1172		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1173		syndrome = cheetah_mtag_syntab[syndrome];
1174		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1175		if (ret != -1)
1176			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1177			       (recoverable ? KERN_WARNING : KERN_CRIT),
1178			       smp_processor_id(), unum);
1179	}
1180
1181	/* Now dump the cache snapshots. */
1182	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1183	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1184	       (int) info->dcache_index,
1185	       info->dcache_tag,
1186	       info->dcache_utag,
1187	       info->dcache_stag);
1188	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1189	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1190	       info->dcache_data[0],
1191	       info->dcache_data[1],
1192	       info->dcache_data[2],
1193	       info->dcache_data[3]);
1194	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1195	       "u[%016llx] l[%016llx]\n",
1196	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1197	       (int) info->icache_index,
1198	       info->icache_tag,
1199	       info->icache_utag,
1200	       info->icache_stag,
1201	       info->icache_upper,
1202	       info->icache_lower);
1203	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1204	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1205	       info->icache_data[0],
1206	       info->icache_data[1],
1207	       info->icache_data[2],
1208	       info->icache_data[3]);
1209	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1210	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1211	       info->icache_data[4],
1212	       info->icache_data[5],
1213	       info->icache_data[6],
1214	       info->icache_data[7]);
1215	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1216	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1217	       (int) info->ecache_index, info->ecache_tag);
1218	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1219	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1220	       info->ecache_data[0],
1221	       info->ecache_data[1],
1222	       info->ecache_data[2],
1223	       info->ecache_data[3]);
1224
1225	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1226	while (afsr != 0UL) {
1227		unsigned long bit = cheetah_get_hipri(afsr);
1228
1229		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1230		       (recoverable ? KERN_WARNING : KERN_CRIT),
1231		       bit, cheetah_get_string(bit));
1232
1233		afsr &= ~bit;
1234	}
1235
1236	if (!recoverable)
1237		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1238}
1239
1240static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1241{
1242	unsigned long afsr, afar;
1243	int ret = 0;
1244
1245	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1246			     : "=r" (afsr)
1247			     : "i" (ASI_AFSR));
1248	if ((afsr & cheetah_afsr_errors) != 0) {
1249		if (logp != NULL) {
1250			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1251					     : "=r" (afar)
1252					     : "i" (ASI_AFAR));
1253			logp->afsr = afsr;
1254			logp->afar = afar;
1255		}
1256		ret = 1;
1257	}
1258	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1259			     "membar #Sync\n\t"
1260			     : : "r" (afsr), "i" (ASI_AFSR));
1261
1262	return ret;
1263}
1264
1265void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1266{
1267	struct cheetah_err_info local_snapshot, *p;
1268	int recoverable;
1269
1270	/* Flush E-cache */
1271	cheetah_flush_ecache();
1272
1273	p = cheetah_get_error_log(afsr);
1274	if (!p) {
1275		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1276			    afsr, afar);
1277		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1278			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1279		prom_halt();
1280	}
1281
1282	/* Grab snapshot of logged error. */
1283	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1284
1285	/* If the current trap snapshot does not match what the
1286	 * trap handler passed along into our args, big trouble.
1287	 * In such a case, mark the local copy as invalid.
1288	 *
1289	 * Else, it matches and we mark the afsr in the non-local
1290	 * copy as invalid so we may log new error traps there.
1291	 */
1292	if (p->afsr != afsr || p->afar != afar)
1293		local_snapshot.afsr = CHAFSR_INVALID;
1294	else
1295		p->afsr = CHAFSR_INVALID;
1296
1297	cheetah_flush_icache();
1298	cheetah_flush_dcache();
1299
1300	/* Re-enable I-cache/D-cache */
1301	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1302			     "or %%g1, %1, %%g1\n\t"
1303			     "stxa %%g1, [%%g0] %0\n\t"
1304			     "membar #Sync"
1305			     : /* no outputs */
1306			     : "i" (ASI_DCU_CONTROL_REG),
1307			       "i" (DCU_DC | DCU_IC)
1308			     : "g1");
1309
1310	/* Re-enable error reporting */
1311	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1312			     "or %%g1, %1, %%g1\n\t"
1313			     "stxa %%g1, [%%g0] %0\n\t"
1314			     "membar #Sync"
1315			     : /* no outputs */
1316			     : "i" (ASI_ESTATE_ERROR_EN),
1317			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1318			     : "g1");
1319
1320	/* Decide if we can continue after handling this trap and
1321	 * logging the error.
1322	 */
1323	recoverable = 1;
1324	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1325		recoverable = 0;
1326
1327	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1328	 * error was logged while we had error reporting traps disabled.
1329	 */
1330	if (cheetah_recheck_errors(&local_snapshot)) {
1331		unsigned long new_afsr = local_snapshot.afsr;
1332
1333		/* If we got a new asynchronous error, die... */
1334		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1335				CHAFSR_WDU | CHAFSR_CPU |
1336				CHAFSR_IVU | CHAFSR_UE |
1337				CHAFSR_BERR | CHAFSR_TO))
1338			recoverable = 0;
1339	}
1340
1341	/* Log errors. */
1342	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1343
1344	if (!recoverable)
1345		panic("Irrecoverable Fast-ECC error trap.\n");
1346
1347	/* Flush E-cache to kick the error trap handlers out. */
1348	cheetah_flush_ecache();
1349}
1350
1351/* Try to fix a correctable error by pushing the line out from
1352 * the E-cache.  Recheck error reporting registers to see if the
1353 * problem is intermittent.
1354 */
1355static int cheetah_fix_ce(unsigned long physaddr)
1356{
1357	unsigned long orig_estate;
1358	unsigned long alias1, alias2;
1359	int ret;
1360
1361	/* Make sure correctable error traps are disabled. */
1362	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1363			     "andn	%0, %1, %%g1\n\t"
1364			     "stxa	%%g1, [%%g0] %2\n\t"
1365			     "membar	#Sync"
1366			     : "=&r" (orig_estate)
1367			     : "i" (ESTATE_ERROR_CEEN),
1368			       "i" (ASI_ESTATE_ERROR_EN)
1369			     : "g1");
1370
1371	/* We calculate alias addresses that will force the
1372	 * cache line in question out of the E-cache.  Then
1373	 * we bring it back in with an atomic instruction so
1374	 * that we get it in some modified/exclusive state,
1375	 * then we displace it again to try and get proper ECC
1376	 * pushed back into the system.
1377	 */
1378	physaddr &= ~(8UL - 1UL);
1379	alias1 = (ecache_flush_physbase +
1380		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1381	alias2 = alias1 + (ecache_flush_size >> 1);
1382	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1383			     "ldxa	[%1] %3, %%g0\n\t"
1384			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1385			     "ldxa	[%0] %3, %%g0\n\t"
1386			     "ldxa	[%1] %3, %%g0\n\t"
1387			     "membar	#Sync"
1388			     : /* no outputs */
1389			     : "r" (alias1), "r" (alias2),
1390			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1391
1392	/* Did that trigger another error? */
1393	if (cheetah_recheck_errors(NULL)) {
1394		/* Try one more time. */
1395		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1396				     "membar #Sync"
1397				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1398		if (cheetah_recheck_errors(NULL))
1399			ret = 2;
1400		else
1401			ret = 1;
1402	} else {
1403		/* No new error, intermittent problem. */
1404		ret = 0;
1405	}
1406
1407	/* Restore error enables. */
1408	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1409			     "membar	#Sync"
1410			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1411
1412	return ret;
1413}
1414
1415/* Return non-zero if PADDR is a valid physical memory address. */
1416static int cheetah_check_main_memory(unsigned long paddr)
1417{
1418	unsigned long vaddr = PAGE_OFFSET + paddr;
1419
1420	if (vaddr > (unsigned long) high_memory)
1421		return 0;
1422
1423	return kern_addr_valid(vaddr);
1424}
1425
1426void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1427{
1428	struct cheetah_err_info local_snapshot, *p;
1429	int recoverable, is_memory;
1430
1431	p = cheetah_get_error_log(afsr);
1432	if (!p) {
1433		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1434			    afsr, afar);
1435		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1436			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1437		prom_halt();
1438	}
1439
1440	/* Grab snapshot of logged error. */
1441	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1442
1443	/* If the current trap snapshot does not match what the
1444	 * trap handler passed along into our args, big trouble.
1445	 * In such a case, mark the local copy as invalid.
1446	 *
1447	 * Else, it matches and we mark the afsr in the non-local
1448	 * copy as invalid so we may log new error traps there.
1449	 */
1450	if (p->afsr != afsr || p->afar != afar)
1451		local_snapshot.afsr = CHAFSR_INVALID;
1452	else
1453		p->afsr = CHAFSR_INVALID;
1454
1455	is_memory = cheetah_check_main_memory(afar);
1456
1457	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1458		cheetah_fix_ce(afar);
1459	}
1460
1461	{
1462		int flush_all, flush_line;
1463
1464		flush_all = flush_line = 0;
1465		if ((afsr & CHAFSR_EDC) != 0UL) {
1466			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1467				flush_line = 1;
1468			else
1469				flush_all = 1;
1470		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1471			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1472				flush_line = 1;
1473			else
1474				flush_all = 1;
1475		}
1476
1477		/* Trap handler only disabled I-cache, flush it. */
1478		cheetah_flush_icache();
1479
1480		/* Re-enable I-cache */
1481		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1482				     "or %%g1, %1, %%g1\n\t"
1483				     "stxa %%g1, [%%g0] %0\n\t"
1484				     "membar #Sync"
1485				     : /* no outputs */
1486				     : "i" (ASI_DCU_CONTROL_REG),
1487				     "i" (DCU_IC)
1488				     : "g1");
1489
1490		if (flush_all)
1491			cheetah_flush_ecache();
1492		else if (flush_line)
1493			cheetah_flush_ecache_line(afar);
1494	}
1495
1496	/* Re-enable error reporting */
1497	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1498			     "or %%g1, %1, %%g1\n\t"
1499			     "stxa %%g1, [%%g0] %0\n\t"
1500			     "membar #Sync"
1501			     : /* no outputs */
1502			     : "i" (ASI_ESTATE_ERROR_EN),
1503			       "i" (ESTATE_ERROR_CEEN)
1504			     : "g1");
1505
1506	/* Decide if we can continue after handling this trap and
1507	 * logging the error.
1508	 */
1509	recoverable = 1;
1510	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1511		recoverable = 0;
1512
1513	/* Re-check AFSR/AFAR */
1514	(void) cheetah_recheck_errors(&local_snapshot);
1515
1516	/* Log errors. */
1517	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1518
1519	if (!recoverable)
1520		panic("Irrecoverable Correctable-ECC error trap.\n");
1521}
1522
1523void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1524{
1525	struct cheetah_err_info local_snapshot, *p;
1526	int recoverable, is_memory;
1527
1528#ifdef CONFIG_PCI
1529	/* Check for the special PCI poke sequence. */
1530	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1531		cheetah_flush_icache();
1532		cheetah_flush_dcache();
1533
1534		/* Re-enable I-cache/D-cache */
1535		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1536				     "or %%g1, %1, %%g1\n\t"
1537				     "stxa %%g1, [%%g0] %0\n\t"
1538				     "membar #Sync"
1539				     : /* no outputs */
1540				     : "i" (ASI_DCU_CONTROL_REG),
1541				       "i" (DCU_DC | DCU_IC)
1542				     : "g1");
1543
1544		/* Re-enable error reporting */
1545		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1546				     "or %%g1, %1, %%g1\n\t"
1547				     "stxa %%g1, [%%g0] %0\n\t"
1548				     "membar #Sync"
1549				     : /* no outputs */
1550				     : "i" (ASI_ESTATE_ERROR_EN),
1551				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1552				     : "g1");
1553
1554		(void) cheetah_recheck_errors(NULL);
1555
1556		pci_poke_faulted = 1;
1557		regs->tpc += 4;
1558		regs->tnpc = regs->tpc + 4;
1559		return;
1560	}
1561#endif
1562
1563	p = cheetah_get_error_log(afsr);
1564	if (!p) {
1565		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1566			    afsr, afar);
1567		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1568			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1569		prom_halt();
1570	}
1571
1572	/* Grab snapshot of logged error. */
1573	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1574
1575	/* If the current trap snapshot does not match what the
1576	 * trap handler passed along into our args, big trouble.
1577	 * In such a case, mark the local copy as invalid.
1578	 *
1579	 * Else, it matches and we mark the afsr in the non-local
1580	 * copy as invalid so we may log new error traps there.
1581	 */
1582	if (p->afsr != afsr || p->afar != afar)
1583		local_snapshot.afsr = CHAFSR_INVALID;
1584	else
1585		p->afsr = CHAFSR_INVALID;
1586
1587	is_memory = cheetah_check_main_memory(afar);
1588
1589	{
1590		int flush_all, flush_line;
1591
1592		flush_all = flush_line = 0;
1593		if ((afsr & CHAFSR_EDU) != 0UL) {
1594			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1595				flush_line = 1;
1596			else
1597				flush_all = 1;
1598		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1599			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1600				flush_line = 1;
1601			else
1602				flush_all = 1;
1603		}
1604
1605		cheetah_flush_icache();
1606		cheetah_flush_dcache();
1607
1608		/* Re-enable I/D caches */
1609		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1610				     "or %%g1, %1, %%g1\n\t"
1611				     "stxa %%g1, [%%g0] %0\n\t"
1612				     "membar #Sync"
1613				     : /* no outputs */
1614				     : "i" (ASI_DCU_CONTROL_REG),
1615				     "i" (DCU_IC | DCU_DC)
1616				     : "g1");
1617
1618		if (flush_all)
1619			cheetah_flush_ecache();
1620		else if (flush_line)
1621			cheetah_flush_ecache_line(afar);
1622	}
1623
1624	/* Re-enable error reporting */
1625	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1626			     "or %%g1, %1, %%g1\n\t"
1627			     "stxa %%g1, [%%g0] %0\n\t"
1628			     "membar #Sync"
1629			     : /* no outputs */
1630			     : "i" (ASI_ESTATE_ERROR_EN),
1631			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1632			     : "g1");
1633
1634	/* Decide if we can continue after handling this trap and
1635	 * logging the error.
1636	 */
1637	recoverable = 1;
1638	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1639		recoverable = 0;
1640
1641	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1642	 * error was logged while we had error reporting traps disabled.
1643	 */
1644	if (cheetah_recheck_errors(&local_snapshot)) {
1645		unsigned long new_afsr = local_snapshot.afsr;
1646
1647		/* If we got a new asynchronous error, die... */
1648		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1649				CHAFSR_WDU | CHAFSR_CPU |
1650				CHAFSR_IVU | CHAFSR_UE |
1651				CHAFSR_BERR | CHAFSR_TO))
1652			recoverable = 0;
1653	}
1654
1655	/* Log errors. */
1656	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1657
1658	/* "Recoverable" here means we try to yank the page from ever
1659	 * being newly used again.  This depends upon a few things:
1660	 * 1) Must be main memory, and AFAR must be valid.
1661	 * 2) If we trapped from user, OK.
1662	 * 3) Else, if we trapped from kernel we must find exception
1663	 *    table entry (ie. we have to have been accessing user
1664	 *    space).
1665	 *
1666	 * If AFAR is not in main memory, or we trapped from kernel
1667	 * and cannot find an exception table entry, it is unacceptable
1668	 * to try and continue.
1669	 */
1670	if (recoverable && is_memory) {
1671		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1672			/* OK, usermode access. */
1673			recoverable = 1;
1674		} else {
1675			const struct exception_table_entry *entry;
1676
1677			entry = search_exception_tables(regs->tpc);
1678			if (entry) {
1679				/* OK, kernel access to userspace. */
1680				recoverable = 1;
1681
1682			} else {
1683				/* BAD, privileged state is corrupted. */
1684				recoverable = 0;
1685			}
1686
1687			if (recoverable) {
1688				if (pfn_valid(afar >> PAGE_SHIFT))
1689					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1690				else
1691					recoverable = 0;
1692
1693				/* Only perform fixup if we still have a
1694				 * recoverable condition.
1695				 */
1696				if (recoverable) {
1697					regs->tpc = entry->fixup;
1698					regs->tnpc = regs->tpc + 4;
1699				}
1700			}
1701		}
1702	} else {
1703		recoverable = 0;
1704	}
1705
1706	if (!recoverable)
1707		panic("Irrecoverable deferred error trap.\n");
1708}
1709
1710/* Handle a D/I cache parity error trap.  TYPE is encoded as:
1711 *
1712 * Bit0:	0=dcache,1=icache
1713 * Bit1:	0=recoverable,1=unrecoverable
1714 *
1715 * The hardware has disabled both the I-cache and D-cache in
1716 * the %dcr register.
1717 */
1718void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1719{
1720	if (type & 0x1)
1721		__cheetah_flush_icache();
1722	else
1723		cheetah_plus_zap_dcache_parity();
1724	cheetah_flush_dcache();
1725
1726	/* Re-enable I-cache/D-cache */
1727	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1728			     "or %%g1, %1, %%g1\n\t"
1729			     "stxa %%g1, [%%g0] %0\n\t"
1730			     "membar #Sync"
1731			     : /* no outputs */
1732			     : "i" (ASI_DCU_CONTROL_REG),
1733			       "i" (DCU_DC | DCU_IC)
1734			     : "g1");
1735
1736	if (type & 0x2) {
1737		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1738		       smp_processor_id(),
1739		       (type & 0x1) ? 'I' : 'D',
1740		       regs->tpc);
1741		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1742		panic("Irrecoverable Cheetah+ parity error.");
1743	}
1744
1745	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1746	       smp_processor_id(),
1747	       (type & 0x1) ? 'I' : 'D',
1748	       regs->tpc);
1749	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1750}
1751
1752struct sun4v_error_entry {
1753	u64		err_handle;
1754	u64		err_stick;
1755
1756	u32		err_type;
1757#define SUN4V_ERR_TYPE_UNDEFINED	0
1758#define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1759#define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1760#define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1761#define SUN4V_ERR_TYPE_WARNING_RES	4
1762
1763	u32		err_attrs;
1764#define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1765#define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1766#define SUN4V_ERR_ATTRS_PIO		0x00000004
1767#define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1768#define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1769#define SUN4V_ERR_ATTRS_USER_MODE	0x01000000
1770#define SUN4V_ERR_ATTRS_PRIV_MODE	0x02000000
1771#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1772
1773	u64		err_raddr;
1774	u32		err_size;
1775	u16		err_cpu;
1776	u16		err_pad;
1777};
1778
1779static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1780static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1781
1782static const char *sun4v_err_type_to_str(u32 type)
1783{
1784	switch (type) {
1785	case SUN4V_ERR_TYPE_UNDEFINED:
1786		return "undefined";
1787	case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1788		return "uncorrected resumable";
1789	case SUN4V_ERR_TYPE_PRECISE_NONRES:
1790		return "precise nonresumable";
1791	case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1792		return "deferred nonresumable";
1793	case SUN4V_ERR_TYPE_WARNING_RES:
1794		return "warning resumable";
1795	default:
1796		return "unknown";
1797	};
1798}
1799
1800static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1801{
1802	int cnt;
1803
1804	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1805	printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n",
1806	       pfx,
1807	       ent->err_handle, ent->err_stick,
1808	       ent->err_type,
1809	       sun4v_err_type_to_str(ent->err_type));
1810	printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1811	       pfx,
1812	       ent->err_attrs,
1813	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1814		"processor" : ""),
1815	       ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1816		"memory" : ""),
1817	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1818		"pio" : ""),
1819	       ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1820		"integer-regs" : ""),
1821	       ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1822		"fpu-regs" : ""),
1823	       ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1824		"user" : ""),
1825	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1826		"privileged" : ""),
1827	       ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1828		"queue-full" : ""));
1829	printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n",
1830	       pfx,
1831	       ent->err_raddr, ent->err_size, ent->err_cpu);
1832
1833	show_regs(regs);
1834
1835	if ((cnt = atomic_read(ocnt)) != 0) {
1836		atomic_set(ocnt, 0);
1837		wmb();
1838		printk("%s: Queue overflowed %d times.\n",
1839		       pfx, cnt);
1840	}
1841}
1842
1843/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1844 * Log the event and clear the first word of the entry.
1845 */
1846void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1847{
1848	struct sun4v_error_entry *ent, local_copy;
1849	struct trap_per_cpu *tb;
1850	unsigned long paddr;
1851	int cpu;
1852
1853	cpu = get_cpu();
1854
1855	tb = &trap_block[cpu];
1856	paddr = tb->resum_kernel_buf_pa + offset;
1857	ent = __va(paddr);
1858
1859	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1860
1861	/* We have a local copy now, so release the entry.  */
1862	ent->err_handle = 0;
1863	wmb();
1864
1865	put_cpu();
1866
1867	if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1868		/* If err_type is 0x4, it's a powerdown request.  Do
1869		 * not do the usual resumable error log because that
1870		 * makes it look like some abnormal error.
1871		 */
1872		printk(KERN_INFO "Power down request...\n");
1873		kill_cad_pid(SIGINT, 1);
1874		return;
1875	}
1876
1877	sun4v_log_error(regs, &local_copy, cpu,
1878			KERN_ERR "RESUMABLE ERROR",
1879			&sun4v_resum_oflow_cnt);
1880}
1881
1882/* If we try to printk() we'll probably make matters worse, by trying
1883 * to retake locks this cpu already holds or causing more errors. So
1884 * just bump a counter, and we'll report these counter bumps above.
1885 */
1886void sun4v_resum_overflow(struct pt_regs *regs)
1887{
1888	atomic_inc(&sun4v_resum_oflow_cnt);
1889}
1890
1891/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1892 * Log the event, clear the first word of the entry, and die.
1893 */
1894void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1895{
1896	struct sun4v_error_entry *ent, local_copy;
1897	struct trap_per_cpu *tb;
1898	unsigned long paddr;
1899	int cpu;
1900
1901	cpu = get_cpu();
1902
1903	tb = &trap_block[cpu];
1904	paddr = tb->nonresum_kernel_buf_pa + offset;
1905	ent = __va(paddr);
1906
1907	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1908
1909	/* We have a local copy now, so release the entry.  */
1910	ent->err_handle = 0;
1911	wmb();
1912
1913	put_cpu();
1914
1915#ifdef CONFIG_PCI
1916	/* Check for the special PCI poke sequence. */
1917	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1918		pci_poke_faulted = 1;
1919		regs->tpc += 4;
1920		regs->tnpc = regs->tpc + 4;
1921		return;
1922	}
1923#endif
1924
1925	sun4v_log_error(regs, &local_copy, cpu,
1926			KERN_EMERG "NON-RESUMABLE ERROR",
1927			&sun4v_nonresum_oflow_cnt);
1928
1929	panic("Non-resumable error.");
1930}
1931
1932/* If we try to printk() we'll probably make matters worse, by trying
1933 * to retake locks this cpu already holds or causing more errors. So
1934 * just bump a counter, and we'll report these counter bumps above.
1935 */
1936void sun4v_nonresum_overflow(struct pt_regs *regs)
1937{
1938	atomic_inc(&sun4v_nonresum_oflow_cnt);
1939}
1940
1941unsigned long sun4v_err_itlb_vaddr;
1942unsigned long sun4v_err_itlb_ctx;
1943unsigned long sun4v_err_itlb_pte;
1944unsigned long sun4v_err_itlb_error;
1945
1946void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1947{
1948	if (tl > 1)
1949		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1950
1951	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1952	       regs->tpc, tl);
1953	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1954	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1955	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1956	       (void *) regs->u_regs[UREG_I7]);
1957	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1958	       "pte[%lx] error[%lx]\n",
1959	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1960	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
1961
1962	prom_halt();
1963}
1964
1965unsigned long sun4v_err_dtlb_vaddr;
1966unsigned long sun4v_err_dtlb_ctx;
1967unsigned long sun4v_err_dtlb_pte;
1968unsigned long sun4v_err_dtlb_error;
1969
1970void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1971{
1972	if (tl > 1)
1973		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1974
1975	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1976	       regs->tpc, tl);
1977	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1978	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1979	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1980	       (void *) regs->u_regs[UREG_I7]);
1981	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1982	       "pte[%lx] error[%lx]\n",
1983	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1984	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1985
1986	prom_halt();
1987}
1988
1989void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1990{
1991	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1992	       err, op);
1993}
1994
1995void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1996{
1997	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1998	       err, op);
1999}
2000
2001void do_fpe_common(struct pt_regs *regs)
2002{
2003	if (regs->tstate & TSTATE_PRIV) {
2004		regs->tpc = regs->tnpc;
2005		regs->tnpc += 4;
2006	} else {
2007		unsigned long fsr = current_thread_info()->xfsr[0];
2008		siginfo_t info;
2009
2010		if (test_thread_flag(TIF_32BIT)) {
2011			regs->tpc &= 0xffffffff;
2012			regs->tnpc &= 0xffffffff;
2013		}
2014		info.si_signo = SIGFPE;
2015		info.si_errno = 0;
2016		info.si_addr = (void __user *)regs->tpc;
2017		info.si_trapno = 0;
2018		info.si_code = __SI_FAULT;
2019		if ((fsr & 0x1c000) == (1 << 14)) {
2020			if (fsr & 0x10)
2021				info.si_code = FPE_FLTINV;
2022			else if (fsr & 0x08)
2023				info.si_code = FPE_FLTOVF;
2024			else if (fsr & 0x04)
2025				info.si_code = FPE_FLTUND;
2026			else if (fsr & 0x02)
2027				info.si_code = FPE_FLTDIV;
2028			else if (fsr & 0x01)
2029				info.si_code = FPE_FLTRES;
2030		}
2031		force_sig_info(SIGFPE, &info, current);
2032	}
2033}
2034
2035void do_fpieee(struct pt_regs *regs)
2036{
2037	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2038		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2039		return;
2040
2041	do_fpe_common(regs);
2042}
2043
2044extern int do_mathemu(struct pt_regs *, struct fpustate *);
2045
2046void do_fpother(struct pt_regs *regs)
2047{
2048	struct fpustate *f = FPUSTATE;
2049	int ret = 0;
2050
2051	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2052		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2053		return;
2054
2055	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2056	case (2 << 14): /* unfinished_FPop */
2057	case (3 << 14): /* unimplemented_FPop */
2058		ret = do_mathemu(regs, f);
2059		break;
2060	}
2061	if (ret)
2062		return;
2063	do_fpe_common(regs);
2064}
2065
2066void do_tof(struct pt_regs *regs)
2067{
2068	siginfo_t info;
2069
2070	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2071		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2072		return;
2073
2074	if (regs->tstate & TSTATE_PRIV)
2075		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2076	if (test_thread_flag(TIF_32BIT)) {
2077		regs->tpc &= 0xffffffff;
2078		regs->tnpc &= 0xffffffff;
2079	}
2080	info.si_signo = SIGEMT;
2081	info.si_errno = 0;
2082	info.si_code = EMT_TAGOVF;
2083	info.si_addr = (void __user *)regs->tpc;
2084	info.si_trapno = 0;
2085	force_sig_info(SIGEMT, &info, current);
2086}
2087
2088void do_div0(struct pt_regs *regs)
2089{
2090	siginfo_t info;
2091
2092	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2093		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2094		return;
2095
2096	if (regs->tstate & TSTATE_PRIV)
2097		die_if_kernel("TL0: Kernel divide by zero.", regs);
2098	if (test_thread_flag(TIF_32BIT)) {
2099		regs->tpc &= 0xffffffff;
2100		regs->tnpc &= 0xffffffff;
2101	}
2102	info.si_signo = SIGFPE;
2103	info.si_errno = 0;
2104	info.si_code = FPE_INTDIV;
2105	info.si_addr = (void __user *)regs->tpc;
2106	info.si_trapno = 0;
2107	force_sig_info(SIGFPE, &info, current);
2108}
2109
2110static void instruction_dump(unsigned int *pc)
2111{
2112	int i;
2113
2114	if ((((unsigned long) pc) & 3))
2115		return;
2116
2117	printk("Instruction DUMP:");
2118	for (i = -3; i < 6; i++)
2119		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2120	printk("\n");
2121}
2122
2123static void user_instruction_dump(unsigned int __user *pc)
2124{
2125	int i;
2126	unsigned int buf[9];
2127
2128	if ((((unsigned long) pc) & 3))
2129		return;
2130
2131	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2132		return;
2133
2134	printk("Instruction DUMP:");
2135	for (i = 0; i < 9; i++)
2136		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2137	printk("\n");
2138}
2139
2140void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2141{
2142	unsigned long fp, thread_base, ksp;
2143	struct thread_info *tp;
2144	int count = 0;
2145#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2146	int graph = 0;
2147#endif
2148
2149	ksp = (unsigned long) _ksp;
2150	if (!tsk)
2151		tsk = current;
2152	tp = task_thread_info(tsk);
2153	if (ksp == 0UL) {
2154		if (tsk == current)
2155			asm("mov %%fp, %0" : "=r" (ksp));
2156		else
2157			ksp = tp->ksp;
2158	}
2159	if (tp == current_thread_info())
2160		flushw_all();
2161
2162	fp = ksp + STACK_BIAS;
2163	thread_base = (unsigned long) tp;
2164
2165	printk("Call Trace:\n");
2166	do {
2167		struct sparc_stackf *sf;
2168		struct pt_regs *regs;
2169		unsigned long pc;
2170
2171		if (!kstack_valid(tp, fp))
2172			break;
2173		sf = (struct sparc_stackf *) fp;
2174		regs = (struct pt_regs *) (sf + 1);
2175
2176		if (kstack_is_trap_frame(tp, regs)) {
2177			if (!(regs->tstate & TSTATE_PRIV))
2178				break;
2179			pc = regs->tpc;
2180			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2181		} else {
2182			pc = sf->callers_pc;
2183			fp = (unsigned long)sf->fp + STACK_BIAS;
2184		}
2185
2186		printk(" [%016lx] %pS\n", pc, (void *) pc);
2187#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2188		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2189			int index = tsk->curr_ret_stack;
2190			if (tsk->ret_stack && index >= graph) {
2191				pc = tsk->ret_stack[index - graph].ret;
2192				printk(" [%016lx] %pS\n", pc, (void *) pc);
2193				graph++;
2194			}
2195		}
2196#endif
2197	} while (++count < 16);
2198}
2199
2200void dump_stack(void)
2201{
2202	show_stack(current, NULL);
2203}
2204
2205EXPORT_SYMBOL(dump_stack);
2206
2207static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2208{
2209	unsigned long fp = rw->ins[6];
2210
2211	if (!fp)
2212		return NULL;
2213
2214	return (struct reg_window *) (fp + STACK_BIAS);
2215}
2216
2217void die_if_kernel(char *str, struct pt_regs *regs)
2218{
2219	static int die_counter;
2220	int count = 0;
2221
2222	/* Amuse the user. */
2223	printk(
2224"              \\|/ ____ \\|/\n"
2225"              \"@'/ .. \\`@\"\n"
2226"              /_| \\__/ |_\\\n"
2227"                 \\__U_/\n");
2228
2229	printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2230	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2231	__asm__ __volatile__("flushw");
2232	show_regs(regs);
2233	add_taint(TAINT_DIE);
2234	if (regs->tstate & TSTATE_PRIV) {
2235		struct thread_info *tp = current_thread_info();
2236		struct reg_window *rw = (struct reg_window *)
2237			(regs->u_regs[UREG_FP] + STACK_BIAS);
2238
2239		/* Stop the back trace when we hit userland or we
2240		 * find some badly aligned kernel stack.
2241		 */
2242		while (rw &&
2243		       count++ < 30 &&
2244		       kstack_valid(tp, (unsigned long) rw)) {
2245			printk("Caller[%016lx]: %pS\n", rw->ins[7],
2246			       (void *) rw->ins[7]);
2247
2248			rw = kernel_stack_up(rw);
2249		}
2250		instruction_dump ((unsigned int *) regs->tpc);
2251	} else {
2252		if (test_thread_flag(TIF_32BIT)) {
2253			regs->tpc &= 0xffffffff;
2254			regs->tnpc &= 0xffffffff;
2255		}
2256		user_instruction_dump ((unsigned int __user *) regs->tpc);
2257	}
2258	if (regs->tstate & TSTATE_PRIV)
2259		do_exit(SIGKILL);
2260	do_exit(SIGSEGV);
2261}
2262EXPORT_SYMBOL(die_if_kernel);
2263
2264#define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2265#define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2266
2267extern int handle_popc(u32 insn, struct pt_regs *regs);
2268extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2269
2270void do_illegal_instruction(struct pt_regs *regs)
2271{
2272	unsigned long pc = regs->tpc;
2273	unsigned long tstate = regs->tstate;
2274	u32 insn;
2275	siginfo_t info;
2276
2277	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2278		       0, 0x10, SIGILL) == NOTIFY_STOP)
2279		return;
2280
2281	if (tstate & TSTATE_PRIV)
2282		die_if_kernel("Kernel illegal instruction", regs);
2283	if (test_thread_flag(TIF_32BIT))
2284		pc = (u32)pc;
2285	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2286		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2287			if (handle_popc(insn, regs))
2288				return;
2289		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2290			if (handle_ldf_stq(insn, regs))
2291				return;
2292		} else if (tlb_type == hypervisor) {
2293			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2294				if (!vis_emul(regs, insn))
2295					return;
2296			} else {
2297				struct fpustate *f = FPUSTATE;
2298
2299				if (do_mathemu(regs, f))
2300					return;
2301			}
2302		}
2303	}
2304	info.si_signo = SIGILL;
2305	info.si_errno = 0;
2306	info.si_code = ILL_ILLOPC;
2307	info.si_addr = (void __user *)pc;
2308	info.si_trapno = 0;
2309	force_sig_info(SIGILL, &info, current);
2310}
2311
2312extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2313
2314void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2315{
2316	siginfo_t info;
2317
2318	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2319		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2320		return;
2321
2322	if (regs->tstate & TSTATE_PRIV) {
2323		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2324		return;
2325	}
2326	info.si_signo = SIGBUS;
2327	info.si_errno = 0;
2328	info.si_code = BUS_ADRALN;
2329	info.si_addr = (void __user *)sfar;
2330	info.si_trapno = 0;
2331	force_sig_info(SIGBUS, &info, current);
2332}
2333
2334void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2335{
2336	siginfo_t info;
2337
2338	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2339		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2340		return;
2341
2342	if (regs->tstate & TSTATE_PRIV) {
2343		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2344		return;
2345	}
2346	info.si_signo = SIGBUS;
2347	info.si_errno = 0;
2348	info.si_code = BUS_ADRALN;
2349	info.si_addr = (void __user *) addr;
2350	info.si_trapno = 0;
2351	force_sig_info(SIGBUS, &info, current);
2352}
2353
2354void do_privop(struct pt_regs *regs)
2355{
2356	siginfo_t info;
2357
2358	if (notify_die(DIE_TRAP, "privileged operation", regs,
2359		       0, 0x11, SIGILL) == NOTIFY_STOP)
2360		return;
2361
2362	if (test_thread_flag(TIF_32BIT)) {
2363		regs->tpc &= 0xffffffff;
2364		regs->tnpc &= 0xffffffff;
2365	}
2366	info.si_signo = SIGILL;
2367	info.si_errno = 0;
2368	info.si_code = ILL_PRVOPC;
2369	info.si_addr = (void __user *)regs->tpc;
2370	info.si_trapno = 0;
2371	force_sig_info(SIGILL, &info, current);
2372}
2373
2374void do_privact(struct pt_regs *regs)
2375{
2376	do_privop(regs);
2377}
2378
2379/* Trap level 1 stuff or other traps we should never see... */
2380void do_cee(struct pt_regs *regs)
2381{
2382	die_if_kernel("TL0: Cache Error Exception", regs);
2383}
2384
2385void do_cee_tl1(struct pt_regs *regs)
2386{
2387	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2388	die_if_kernel("TL1: Cache Error Exception", regs);
2389}
2390
2391void do_dae_tl1(struct pt_regs *regs)
2392{
2393	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2394	die_if_kernel("TL1: Data Access Exception", regs);
2395}
2396
2397void do_iae_tl1(struct pt_regs *regs)
2398{
2399	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2400	die_if_kernel("TL1: Instruction Access Exception", regs);
2401}
2402
2403void do_div0_tl1(struct pt_regs *regs)
2404{
2405	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2406	die_if_kernel("TL1: DIV0 Exception", regs);
2407}
2408
2409void do_fpdis_tl1(struct pt_regs *regs)
2410{
2411	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2412	die_if_kernel("TL1: FPU Disabled", regs);
2413}
2414
2415void do_fpieee_tl1(struct pt_regs *regs)
2416{
2417	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2418	die_if_kernel("TL1: FPU IEEE Exception", regs);
2419}
2420
2421void do_fpother_tl1(struct pt_regs *regs)
2422{
2423	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2424	die_if_kernel("TL1: FPU Other Exception", regs);
2425}
2426
2427void do_ill_tl1(struct pt_regs *regs)
2428{
2429	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2430	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2431}
2432
2433void do_irq_tl1(struct pt_regs *regs)
2434{
2435	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2436	die_if_kernel("TL1: IRQ Exception", regs);
2437}
2438
2439void do_lddfmna_tl1(struct pt_regs *regs)
2440{
2441	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2442	die_if_kernel("TL1: LDDF Exception", regs);
2443}
2444
2445void do_stdfmna_tl1(struct pt_regs *regs)
2446{
2447	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2448	die_if_kernel("TL1: STDF Exception", regs);
2449}
2450
2451void do_paw(struct pt_regs *regs)
2452{
2453	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2454}
2455
2456void do_paw_tl1(struct pt_regs *regs)
2457{
2458	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2459	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2460}
2461
2462void do_vaw(struct pt_regs *regs)
2463{
2464	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2465}
2466
2467void do_vaw_tl1(struct pt_regs *regs)
2468{
2469	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2470	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2471}
2472
2473void do_tof_tl1(struct pt_regs *regs)
2474{
2475	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2476	die_if_kernel("TL1: Tag Overflow Exception", regs);
2477}
2478
2479void do_getpsr(struct pt_regs *regs)
2480{
2481	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2482	regs->tpc   = regs->tnpc;
2483	regs->tnpc += 4;
2484	if (test_thread_flag(TIF_32BIT)) {
2485		regs->tpc &= 0xffffffff;
2486		regs->tnpc &= 0xffffffff;
2487	}
2488}
2489
2490struct trap_per_cpu trap_block[NR_CPUS];
2491EXPORT_SYMBOL(trap_block);
2492
2493/* This can get invoked before sched_init() so play it super safe
2494 * and use hard_smp_processor_id().
2495 */
2496void notrace init_cur_cpu_trap(struct thread_info *t)
2497{
2498	int cpu = hard_smp_processor_id();
2499	struct trap_per_cpu *p = &trap_block[cpu];
2500
2501	p->thread = t;
2502	p->pgd_paddr = 0;
2503}
2504
2505extern void thread_info_offsets_are_bolixed_dave(void);
2506extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2507extern void tsb_config_offsets_are_bolixed_dave(void);
2508
2509/* Only invoked on boot processor. */
2510void __init trap_init(void)
2511{
2512	/* Compile time sanity check. */
2513	BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2514		     TI_FLAGS != offsetof(struct thread_info, flags) ||
2515		     TI_CPU != offsetof(struct thread_info, cpu) ||
2516		     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2517		     TI_KSP != offsetof(struct thread_info, ksp) ||
2518		     TI_FAULT_ADDR != offsetof(struct thread_info,
2519					       fault_address) ||
2520		     TI_KREGS != offsetof(struct thread_info, kregs) ||
2521		     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2522		     TI_EXEC_DOMAIN != offsetof(struct thread_info,
2523						exec_domain) ||
2524		     TI_REG_WINDOW != offsetof(struct thread_info,
2525					       reg_window) ||
2526		     TI_RWIN_SPTRS != offsetof(struct thread_info,
2527					       rwbuf_stkptrs) ||
2528		     TI_GSR != offsetof(struct thread_info, gsr) ||
2529		     TI_XFSR != offsetof(struct thread_info, xfsr) ||
2530		     TI_PRE_COUNT != offsetof(struct thread_info,
2531					      preempt_count) ||
2532		     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2533		     TI_SYS_NOERROR != offsetof(struct thread_info,
2534						syscall_noerror) ||
2535		     TI_RESTART_BLOCK != offsetof(struct thread_info,
2536						  restart_block) ||
2537		     TI_KUNA_REGS != offsetof(struct thread_info,
2538					      kern_una_regs) ||
2539		     TI_KUNA_INSN != offsetof(struct thread_info,
2540					      kern_una_insn) ||
2541		     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2542		     (TI_FPREGS & (64 - 1)));
2543
2544	BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2545						     thread) ||
2546		     (TRAP_PER_CPU_PGD_PADDR !=
2547		      offsetof(struct trap_per_cpu, pgd_paddr)) ||
2548		     (TRAP_PER_CPU_CPU_MONDO_PA !=
2549		      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2550		     (TRAP_PER_CPU_DEV_MONDO_PA !=
2551		      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2552		     (TRAP_PER_CPU_RESUM_MONDO_PA !=
2553		      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2554		     (TRAP_PER_CPU_RESUM_KBUF_PA !=
2555		      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2556		     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2557		      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2558		     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2559		      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2560		     (TRAP_PER_CPU_FAULT_INFO !=
2561		      offsetof(struct trap_per_cpu, fault_info)) ||
2562		     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2563		      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2564		     (TRAP_PER_CPU_CPU_LIST_PA !=
2565		      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2566		     (TRAP_PER_CPU_TSB_HUGE !=
2567		      offsetof(struct trap_per_cpu, tsb_huge)) ||
2568		     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2569		      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2570		     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2571		      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2572		     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2573		      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2574		     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2575		      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2576		     (TRAP_PER_CPU_RESUM_QMASK !=
2577		      offsetof(struct trap_per_cpu, resum_qmask)) ||
2578		     (TRAP_PER_CPU_NONRESUM_QMASK !=
2579		      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2580		     (TRAP_PER_CPU_PER_CPU_BASE !=
2581		      offsetof(struct trap_per_cpu, __per_cpu_base)));
2582
2583	BUILD_BUG_ON((TSB_CONFIG_TSB !=
2584		      offsetof(struct tsb_config, tsb)) ||
2585		     (TSB_CONFIG_RSS_LIMIT !=
2586		      offsetof(struct tsb_config, tsb_rss_limit)) ||
2587		     (TSB_CONFIG_NENTRIES !=
2588		      offsetof(struct tsb_config, tsb_nentries)) ||
2589		     (TSB_CONFIG_REG_VAL !=
2590		      offsetof(struct tsb_config, tsb_reg_val)) ||
2591		     (TSB_CONFIG_MAP_VADDR !=
2592		      offsetof(struct tsb_config, tsb_map_vaddr)) ||
2593		     (TSB_CONFIG_MAP_PTE !=
2594		      offsetof(struct tsb_config, tsb_map_pte)));
2595
2596	/* Attach to the address space of init_task.  On SMP we
2597	 * do this in smp.c:smp_callin for other cpus.
2598	 */
2599	atomic_inc(&init_mm.mm_count);
2600	current->active_mm = &init_mm;
2601}
2602