1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/traps.c
7 *
8 * Copyright (C) 2000, 2001  Paolo Alberelli
9 * Copyright (C) 2003, 2004  Paul Mundt
10 * Copyright (C) 2003, 2004  Richard Curnow
11 *
12 */
13
14/*
15 * 'Traps.c' handles hardware traps and faults after we have saved some
16 * state in 'entry.S'.
17 */
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/spinlock.h>
29#include <linux/kallsyms.h>
30#include <linux/interrupt.h>
31#include <linux/sysctl.h>
32#include <linux/module.h>
33
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <asm/io.h>
37#include <asm/atomic.h>
38#include <asm/processor.h>
39#include <asm/pgtable.h>
40
41#undef DEBUG_EXCEPTION
42#ifdef DEBUG_EXCEPTION
43/* implemented in ../lib/dbg.c */
44extern void show_excp_regs(char *fname, int trapnr, int signr,
45			   struct pt_regs *regs);
46#else
47#define show_excp_regs(a, b, c, d)
48#endif
49
50static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
51		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
52
53#define DO_ERROR(trapnr, signr, str, name, tsk) \
54asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
55{ \
56	do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
57}
58
59spinlock_t die_lock;
60
61void die(const char * str, struct pt_regs * regs, long err)
62{
63	console_verbose();
64	spin_lock_irq(&die_lock);
65	printk("%s: %lx\n", str, (err & 0xffffff));
66	show_regs(regs);
67	spin_unlock_irq(&die_lock);
68	do_exit(SIGSEGV);
69}
70
71static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
72{
73	if (!user_mode(regs))
74		die(str, regs, err);
75}
76
77static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
78{
79	if (!user_mode(regs)) {
80		const struct exception_table_entry *fixup;
81		fixup = search_exception_tables(regs->pc);
82		if (fixup) {
83			regs->pc = fixup->fixup;
84			return;
85		}
86		die(str, regs, err);
87	}
88}
89
90DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
91DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
92
93
94/* Implement misaligned load/store handling for kernel (and optionally for user
95   mode too).  Limitation : only SHmedia mode code is handled - there is no
96   handling at all for misaligned accesses occurring in SHcompact code yet. */
97
98static int misaligned_fixup(struct pt_regs *regs);
99
100asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
101{
102	if (misaligned_fixup(regs) < 0) {
103		do_unhandled_exception(7, SIGSEGV, "address error(load)",
104				"do_address_error_load",
105				error_code, regs, current);
106	}
107	return;
108}
109
110asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
111{
112	if (misaligned_fixup(regs) < 0) {
113		do_unhandled_exception(8, SIGSEGV, "address error(store)",
114				"do_address_error_store",
115				error_code, regs, current);
116	}
117	return;
118}
119
120#if defined(CONFIG_SH64_ID2815_WORKAROUND)
121
122#define OPCODE_INVALID      0
123#define OPCODE_USER_VALID   1
124#define OPCODE_PRIV_VALID   2
125
126/* getcon/putcon - requires checking which control register is referenced. */
127#define OPCODE_CTRL_REG     3
128
129/* Table of valid opcodes for SHmedia mode.
130   Form a 10-bit value by concatenating the major/minor opcodes i.e.
131   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
132   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
133   LSBs==4'b0000 etc). */
134static unsigned long shmedia_opcode_table[64] = {
135	0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
136	0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
137	0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
138	0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
139	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
140	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
141	0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
142	0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
143};
144
145void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
146{
147
148	unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
149	unsigned long pc, aligned_pc;
150	int get_user_error;
151	int trapnr = 12;
152	int signr = SIGILL;
153	char *exception_name = "reserved_instruction";
154
155	pc = regs->pc;
156	if ((pc & 3) == 1) {
157		/* SHmedia : check for defect.  This requires executable vmas
158		   to be readable too. */
159		aligned_pc = pc & ~3;
160		if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
161			get_user_error = -EFAULT;
162		} else {
163			get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
164		}
165		if (get_user_error >= 0) {
166			unsigned long index, shift;
167			unsigned long major, minor, combined;
168			unsigned long reserved_field;
169			reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
170			major = (opcode >> 26) & 0x3f;
171			minor = (opcode >> 16) & 0xf;
172			combined = (major << 4) | minor;
173			index = major;
174			shift = minor << 1;
175			if (reserved_field == 0) {
176				int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
177				switch (opcode_state) {
178					case OPCODE_INVALID:
179						/* Trap. */
180						break;
181					case OPCODE_USER_VALID:
182						/* Restart the instruction : the branch to the instruction will now be from an RTE
183						   not from SHcompact so the silicon defect won't be triggered. */
184						return;
185					case OPCODE_PRIV_VALID:
186						if (!user_mode(regs)) {
187							/* Should only ever get here if a module has
188							   SHcompact code inside it.  If so, the same fix up is needed. */
189							return; /* same reason */
190						}
191						/* Otherwise, user mode trying to execute a privileged instruction -
192						   fall through to trap. */
193						break;
194					case OPCODE_CTRL_REG:
195						/* If in privileged mode, return as above. */
196						if (!user_mode(regs)) return;
197						/* In user mode ... */
198						if (combined == 0x9f) { /* GETCON */
199							unsigned long regno = (opcode >> 20) & 0x3f;
200							if (regno >= 62) {
201								return;
202							}
203							/* Otherwise, reserved or privileged control register, => trap */
204						} else if (combined == 0x1bf) { /* PUTCON */
205							unsigned long regno = (opcode >> 4) & 0x3f;
206							if (regno >= 62) {
207								return;
208							}
209							/* Otherwise, reserved or privileged control register, => trap */
210						} else {
211							/* Trap */
212						}
213						break;
214					default:
215						/* Fall through to trap. */
216						break;
217				}
218			}
219			/* fall through to normal resinst processing */
220		} else {
221			/* Error trying to read opcode.  This typically means a
222			   real fault, not a RESINST any more.  So change the
223			   codes. */
224			trapnr = 87;
225			exception_name = "address error (exec)";
226			signr = SIGSEGV;
227		}
228	}
229
230	do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
231}
232
233#else /* CONFIG_SH64_ID2815_WORKAROUND */
234
235DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
236
237#endif /* CONFIG_SH64_ID2815_WORKAROUND */
238
239
240#include <asm/system.h>
241
242/* Called with interrupts disabled */
243asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
244{
245	PLS();
246	show_excp_regs(__FUNCTION__, -1, -1, regs);
247	die_if_kernel("exception", regs, ex);
248}
249
250int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
251{
252	/* Syscall debug */
253        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
254
255	die_if_kernel("unknown trapa", regs, scId);
256
257	return -ENOSYS;
258}
259
260void show_stack(struct task_struct *tsk, unsigned long *sp)
261{
262#ifdef CONFIG_KALLSYMS
263	extern void sh64_unwind(struct pt_regs *regs);
264	struct pt_regs *regs;
265
266	regs = tsk ? tsk->thread.kregs : NULL;
267
268	sh64_unwind(regs);
269#else
270	printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
271#endif
272}
273
274void show_task(unsigned long *sp)
275{
276	show_stack(NULL, sp);
277}
278
279void dump_stack(void)
280{
281	show_task(NULL);
282}
283/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
284EXPORT_SYMBOL(dump_stack);
285
286static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
287		unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
288{
289	show_excp_regs(fn_name, trapnr, signr, regs);
290	tsk->thread.error_code = error_code;
291	tsk->thread.trap_no = trapnr;
292
293	if (user_mode(regs))
294		force_sig(signr, tsk);
295
296	die_if_no_fixup(str, regs, error_code);
297}
298
299static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
300{
301	int get_user_error;
302	unsigned long aligned_pc;
303	unsigned long opcode;
304
305	if ((pc & 3) == 1) {
306		/* SHmedia */
307		aligned_pc = pc & ~3;
308		if (from_user_mode) {
309			if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
310				get_user_error = -EFAULT;
311			} else {
312				get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
313				*result_opcode = opcode;
314			}
315			return get_user_error;
316		} else {
317			/* If the fault was in the kernel, we can either read
318			 * this directly, or if not, we fault.
319			*/
320			*result_opcode = *(unsigned long *) aligned_pc;
321			return 0;
322		}
323	} else if ((pc & 1) == 0) {
324		/* SHcompact */
325		/* TODO : provide handling for this.  We don't really support
326		   user-mode SHcompact yet, and for a kernel fault, this would
327		   have to come from a module built for SHcompact.  */
328		return -EFAULT;
329	} else {
330		/* misaligned */
331		return -EFAULT;
332	}
333}
334
335static int address_is_sign_extended(__u64 a)
336{
337	__u64 b;
338#if (NEFF == 32)
339	b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
340	return (b == a) ? 1 : 0;
341#else
342#error "Sign extend check only works for NEFF==32"
343#endif
344}
345
346static int generate_and_check_address(struct pt_regs *regs,
347				      __u32 opcode,
348				      int displacement_not_indexed,
349				      int width_shift,
350				      __u64 *address)
351{
352	/* return -1 for fault, 0 for OK */
353
354	__u64 base_address, addr;
355	int basereg;
356
357	basereg = (opcode >> 20) & 0x3f;
358	base_address = regs->regs[basereg];
359	if (displacement_not_indexed) {
360		__s64 displacement;
361		displacement = (opcode >> 10) & 0x3ff;
362		displacement = ((displacement << 54) >> 54); /* sign extend */
363		addr = (__u64)((__s64)base_address + (displacement << width_shift));
364	} else {
365		__u64 offset;
366		int offsetreg;
367		offsetreg = (opcode >> 10) & 0x3f;
368		offset = regs->regs[offsetreg];
369		addr = base_address + offset;
370	}
371
372	/* Check sign extended */
373	if (!address_is_sign_extended(addr)) {
374		return -1;
375	}
376
377#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
378	/* Check accessible.  For misaligned access in the kernel, assume the
379	   address is always accessible (and if not, just fault when the
380	   load/store gets done.) */
381	if (user_mode(regs)) {
382		if (addr >= TASK_SIZE) {
383			return -1;
384		}
385		/* Do access_ok check later - it depends on whether it's a load or a store. */
386	}
387#endif
388
389	*address = addr;
390	return 0;
391}
392
393/* Default value as for sh */
394#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
395static int user_mode_unaligned_fixup_count = 10;
396static int user_mode_unaligned_fixup_enable = 1;
397#endif
398
399static int kernel_mode_unaligned_fixup_count = 32;
400
401static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
402{
403	unsigned short x;
404	unsigned char *p, *q;
405	p = (unsigned char *) (int) address;
406	q = (unsigned char *) &x;
407	q[0] = p[0];
408	q[1] = p[1];
409
410	if (do_sign_extend) {
411		*result = (__u64)(__s64) *(short *) &x;
412	} else {
413		*result = (__u64) x;
414	}
415}
416
417static void misaligned_kernel_word_store(__u64 address, __u64 value)
418{
419	unsigned short x;
420	unsigned char *p, *q;
421	p = (unsigned char *) (int) address;
422	q = (unsigned char *) &x;
423
424	x = (__u16) value;
425	p[0] = q[0];
426	p[1] = q[1];
427}
428
429static int misaligned_load(struct pt_regs *regs,
430			   __u32 opcode,
431			   int displacement_not_indexed,
432			   int width_shift,
433			   int do_sign_extend)
434{
435	/* Return -1 for a fault, 0 for OK */
436	int error;
437	int destreg;
438	__u64 address;
439
440	error = generate_and_check_address(regs, opcode,
441			displacement_not_indexed, width_shift, &address);
442	if (error < 0) {
443		return error;
444	}
445
446	destreg = (opcode >> 4) & 0x3f;
447#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
448	if (user_mode(regs)) {
449		__u64 buffer;
450
451		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
452			return -1;
453		}
454
455		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
456			return -1; /* fault */
457		}
458		switch (width_shift) {
459		case 1:
460			if (do_sign_extend) {
461				regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
462			} else {
463				regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
464			}
465			break;
466		case 2:
467			regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
468			break;
469		case 3:
470			regs->regs[destreg] = buffer;
471			break;
472		default:
473			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
474				width_shift, (unsigned long) regs->pc);
475			break;
476		}
477	} else
478#endif
479	{
480		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
481		__u64 lo, hi;
482
483		switch (width_shift) {
484		case 1:
485			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
486			break;
487		case 2:
488			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
489			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
490			regs->regs[destreg] = lo | hi;
491			break;
492		case 3:
493			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
494			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
495			regs->regs[destreg] = lo | hi;
496			break;
497
498		default:
499			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
500				width_shift, (unsigned long) regs->pc);
501			break;
502		}
503	}
504
505	return 0;
506
507}
508
509static int misaligned_store(struct pt_regs *regs,
510			    __u32 opcode,
511			    int displacement_not_indexed,
512			    int width_shift)
513{
514	/* Return -1 for a fault, 0 for OK */
515	int error;
516	int srcreg;
517	__u64 address;
518
519	error = generate_and_check_address(regs, opcode,
520			displacement_not_indexed, width_shift, &address);
521	if (error < 0) {
522		return error;
523	}
524
525	srcreg = (opcode >> 4) & 0x3f;
526#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
527	if (user_mode(regs)) {
528		__u64 buffer;
529
530		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
531			return -1;
532		}
533
534		switch (width_shift) {
535		case 1:
536			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
537			break;
538		case 2:
539			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
540			break;
541		case 3:
542			buffer = regs->regs[srcreg];
543			break;
544		default:
545			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
546				width_shift, (unsigned long) regs->pc);
547			break;
548		}
549
550		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
551			return -1; /* fault */
552		}
553	} else
554#endif
555	{
556		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
557		__u64 val = regs->regs[srcreg];
558
559		switch (width_shift) {
560		case 1:
561			misaligned_kernel_word_store(address, val);
562			break;
563		case 2:
564			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
565			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
566			break;
567		case 3:
568			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
569			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
570			break;
571
572		default:
573			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
574				width_shift, (unsigned long) regs->pc);
575			break;
576		}
577	}
578
579	return 0;
580
581}
582
583#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
584/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
585   error. */
586static int misaligned_fpu_load(struct pt_regs *regs,
587			   __u32 opcode,
588			   int displacement_not_indexed,
589			   int width_shift,
590			   int do_paired_load)
591{
592	/* Return -1 for a fault, 0 for OK */
593	int error;
594	int destreg;
595	__u64 address;
596
597	error = generate_and_check_address(regs, opcode,
598			displacement_not_indexed, width_shift, &address);
599	if (error < 0) {
600		return error;
601	}
602
603	destreg = (opcode >> 4) & 0x3f;
604	if (user_mode(regs)) {
605		__u64 buffer;
606		__u32 buflo, bufhi;
607
608		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
609			return -1;
610		}
611
612		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
613			return -1; /* fault */
614		}
615		/* 'current' may be the current owner of the FPU state, so
616		   context switch the registers into memory so they can be
617		   indexed by register number. */
618		if (last_task_used_math == current) {
619			grab_fpu();
620			fpsave(&current->thread.fpu.hard);
621			release_fpu();
622			last_task_used_math = NULL;
623			regs->sr |= SR_FD;
624		}
625
626		buflo = *(__u32*) &buffer;
627		bufhi = *(1 + (__u32*) &buffer);
628
629		switch (width_shift) {
630		case 2:
631			current->thread.fpu.hard.fp_regs[destreg] = buflo;
632			break;
633		case 3:
634			if (do_paired_load) {
635				current->thread.fpu.hard.fp_regs[destreg] = buflo;
636				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
637			} else {
638#if defined(CONFIG_LITTLE_ENDIAN)
639				current->thread.fpu.hard.fp_regs[destreg] = bufhi;
640				current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
641#else
642				current->thread.fpu.hard.fp_regs[destreg] = buflo;
643				current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
644#endif
645			}
646			break;
647		default:
648			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
649				width_shift, (unsigned long) regs->pc);
650			break;
651		}
652		return 0;
653	} else {
654		die ("Misaligned FPU load inside kernel", regs, 0);
655		return -1;
656	}
657
658
659}
660
661static int misaligned_fpu_store(struct pt_regs *regs,
662			   __u32 opcode,
663			   int displacement_not_indexed,
664			   int width_shift,
665			   int do_paired_load)
666{
667	/* Return -1 for a fault, 0 for OK */
668	int error;
669	int srcreg;
670	__u64 address;
671
672	error = generate_and_check_address(regs, opcode,
673			displacement_not_indexed, width_shift, &address);
674	if (error < 0) {
675		return error;
676	}
677
678	srcreg = (opcode >> 4) & 0x3f;
679	if (user_mode(regs)) {
680		__u64 buffer;
681		/* Initialise these to NaNs. */
682		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
683
684		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
685			return -1;
686		}
687
688		/* 'current' may be the current owner of the FPU state, so
689		   context switch the registers into memory so they can be
690		   indexed by register number. */
691		if (last_task_used_math == current) {
692			grab_fpu();
693			fpsave(&current->thread.fpu.hard);
694			release_fpu();
695			last_task_used_math = NULL;
696			regs->sr |= SR_FD;
697		}
698
699		switch (width_shift) {
700		case 2:
701			buflo = current->thread.fpu.hard.fp_regs[srcreg];
702			break;
703		case 3:
704			if (do_paired_load) {
705				buflo = current->thread.fpu.hard.fp_regs[srcreg];
706				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
707			} else {
708#if defined(CONFIG_LITTLE_ENDIAN)
709				bufhi = current->thread.fpu.hard.fp_regs[srcreg];
710				buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
711#else
712				buflo = current->thread.fpu.hard.fp_regs[srcreg];
713				bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
714#endif
715			}
716			break;
717		default:
718			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
719				width_shift, (unsigned long) regs->pc);
720			break;
721		}
722
723		*(__u32*) &buffer = buflo;
724		*(1 + (__u32*) &buffer) = bufhi;
725		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
726			return -1; /* fault */
727		}
728		return 0;
729	} else {
730		die ("Misaligned FPU load inside kernel", regs, 0);
731		return -1;
732	}
733}
734#endif
735
736static int misaligned_fixup(struct pt_regs *regs)
737{
738	unsigned long opcode;
739	int error;
740	int major, minor;
741
742#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
743	/* Never fixup user mode misaligned accesses without this option enabled. */
744	return -1;
745#else
746	if (!user_mode_unaligned_fixup_enable) return -1;
747#endif
748
749	error = read_opcode(regs->pc, &opcode, user_mode(regs));
750	if (error < 0) {
751		return error;
752	}
753	major = (opcode >> 26) & 0x3f;
754	minor = (opcode >> 16) & 0xf;
755
756#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
757	if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
758		--user_mode_unaligned_fixup_count;
759		/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
760		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
761		       current->comm, current->pid, (__u32)regs->pc, opcode);
762	} else
763#endif
764	if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
765		--kernel_mode_unaligned_fixup_count;
766		if (in_interrupt()) {
767			printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
768			       (__u32)regs->pc, opcode);
769		} else {
770			printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
771			       current->comm, current->pid, (__u32)regs->pc, opcode);
772		}
773	}
774
775
776	switch (major) {
777		case (0x84>>2): /* LD.W */
778			error = misaligned_load(regs, opcode, 1, 1, 1);
779			break;
780		case (0xb0>>2): /* LD.UW */
781			error = misaligned_load(regs, opcode, 1, 1, 0);
782			break;
783		case (0x88>>2): /* LD.L */
784			error = misaligned_load(regs, opcode, 1, 2, 1);
785			break;
786		case (0x8c>>2): /* LD.Q */
787			error = misaligned_load(regs, opcode, 1, 3, 0);
788			break;
789
790		case (0xa4>>2): /* ST.W */
791			error = misaligned_store(regs, opcode, 1, 1);
792			break;
793		case (0xa8>>2): /* ST.L */
794			error = misaligned_store(regs, opcode, 1, 2);
795			break;
796		case (0xac>>2): /* ST.Q */
797			error = misaligned_store(regs, opcode, 1, 3);
798			break;
799
800		case (0x40>>2): /* indexed loads */
801			switch (minor) {
802				case 0x1: /* LDX.W */
803					error = misaligned_load(regs, opcode, 0, 1, 1);
804					break;
805				case 0x5: /* LDX.UW */
806					error = misaligned_load(regs, opcode, 0, 1, 0);
807					break;
808				case 0x2: /* LDX.L */
809					error = misaligned_load(regs, opcode, 0, 2, 1);
810					break;
811				case 0x3: /* LDX.Q */
812					error = misaligned_load(regs, opcode, 0, 3, 0);
813					break;
814				default:
815					error = -1;
816					break;
817			}
818			break;
819
820		case (0x60>>2): /* indexed stores */
821			switch (minor) {
822				case 0x1: /* STX.W */
823					error = misaligned_store(regs, opcode, 0, 1);
824					break;
825				case 0x2: /* STX.L */
826					error = misaligned_store(regs, opcode, 0, 2);
827					break;
828				case 0x3: /* STX.Q */
829					error = misaligned_store(regs, opcode, 0, 3);
830					break;
831				default:
832					error = -1;
833					break;
834			}
835			break;
836
837#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
838		case (0x94>>2): /* FLD.S */
839			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
840			break;
841		case (0x98>>2): /* FLD.P */
842			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
843			break;
844		case (0x9c>>2): /* FLD.D */
845			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
846			break;
847		case (0x1c>>2): /* floating indexed loads */
848			switch (minor) {
849			case 0x8: /* FLDX.S */
850				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
851				break;
852			case 0xd: /* FLDX.P */
853				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
854				break;
855			case 0x9: /* FLDX.D */
856				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
857				break;
858			default:
859				error = -1;
860				break;
861			}
862			break;
863		case (0xb4>>2): /* FLD.S */
864			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
865			break;
866		case (0xb8>>2): /* FLD.P */
867			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
868			break;
869		case (0xbc>>2): /* FLD.D */
870			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
871			break;
872		case (0x3c>>2): /* floating indexed stores */
873			switch (minor) {
874			case 0x8: /* FSTX.S */
875				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
876				break;
877			case 0xd: /* FSTX.P */
878				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
879				break;
880			case 0x9: /* FSTX.D */
881				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
882				break;
883			default:
884				error = -1;
885				break;
886			}
887			break;
888#endif
889
890		default:
891			/* Fault */
892			error = -1;
893			break;
894	}
895
896	if (error < 0) {
897		return error;
898	} else {
899		regs->pc += 4; /* Skip the instruction that's just been emulated */
900		return 0;
901	}
902
903}
904
905static ctl_table unaligned_table[] = {
906	{
907		.ctl_name	= CTL_UNNUMBERED,
908		.procname	= "kernel_reports",
909		.data		= &kernel_mode_unaligned_fixup_count,
910		.maxlen		= sizeof(int),
911		.mode		= 0644,
912		.proc_handler	= &proc_dointvec
913	},
914#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
915	{
916		.ctl_name	= CTL_UNNUMBERED,
917		.procname	= "user_reports",
918		.data		= &user_mode_unaligned_fixup_count,
919		.maxlen		= sizeof(int),
920		.mode		= 0644,
921		.proc_handler	= &proc_dointvec
922	},
923	{
924		.ctl_name	= CTL_UNNUMBERED,
925		.procname	= "user_enable",
926		.data		= &user_mode_unaligned_fixup_enable,
927		.maxlen		= sizeof(int),
928		.mode		= 0644,
929		.proc_handler	= &proc_dointvec},
930#endif
931	{}
932};
933
934static ctl_table unaligned_root[] = {
935	{
936		.ctl_name	= CTL_UNNUMBERED,
937		.procname	= "unaligned_fixup",
938		.mode		= 0555,
939		unaligned_table
940	},
941	{}
942};
943
944static ctl_table sh64_root[] = {
945	{
946		.ctl_name	= CTL_UNNUMBERED,
947		.procname	= "sh64",
948		.mode		= 0555,
949		.child		= unaligned_root
950	},
951	{}
952};
953static struct ctl_table_header *sysctl_header;
954static int __init init_sysctl(void)
955{
956	sysctl_header = register_sysctl_table(sh64_root);
957	return 0;
958}
959
960__initcall(init_sysctl);
961
962
963asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
964{
965	u64 peek_real_address_q(u64 addr);
966	u64 poke_real_address_q(u64 addr, u64 val);
967	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
968	unsigned long long exp_cause;
969	/* It's not worth ioremapping the debug module registers for the amount
970	   of access we make to them - just go direct to their physical
971	   addresses. */
972	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
973	if (exp_cause & ~4) {
974		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
975			(unsigned long)(exp_cause & 0xffffffff));
976	}
977	show_state();
978	/* Clear all DEBUGINT causes */
979	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
980}
981