1/*
2 * linux/arch/m32r/kernel/ptrace.c
3 *
4 * Copyright (C) 2002  Hirokazu Takata, Takeo Takahashi
5 * Copyright (C) 2004  Hirokazu Takata, Kei Sakamoto
6 *
7 * Original x86 implementation:
8 *	By Ross Biro 1/23/92
9 *	edited by Linus Torvalds
10 *
11 * Some code taken from sh version:
12 *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
13 * Some code taken from arm version:
14 *   Copyright (C) 2000 Russell King
15 */
16
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/errno.h>
23#include <linux/ptrace.h>
24#include <linux/user.h>
25#include <linux/string.h>
26#include <linux/signal.h>
27
28#include <asm/cacheflush.h>
29#include <asm/io.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/system.h>
33#include <asm/processor.h>
34#include <asm/mmu_context.h>
35
36/*
37 * This routine will get a word off of the process kernel stack.
38 */
39static inline unsigned long int
40get_stack_long(struct task_struct *task, int offset)
41{
42	unsigned long *stack;
43
44	stack = (unsigned long *)task_pt_regs(task);
45
46	return stack[offset];
47}
48
49/*
50 * This routine will put a word on the process kernel stack.
51 */
52static inline int
53put_stack_long(struct task_struct *task, int offset, unsigned long data)
54{
55	unsigned long *stack;
56
57	stack = (unsigned long *)task_pt_regs(task);
58	stack[offset] = data;
59
60	return 0;
61}
62
63static int reg_offset[] = {
64	PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
65	PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
66};
67
68/*
69 * Read the word at offset "off" into the "struct user".  We
70 * actually access the pt_regs stored on the kernel stack.
71 */
72static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
73			    unsigned long __user *data)
74{
75	unsigned long tmp;
76#ifndef NO_FPU
77	struct user * dummy = NULL;
78#endif
79
80	if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
81		return -EIO;
82
83	off >>= 2;
84	switch (off) {
85	case PT_EVB:
86		__asm__ __volatile__ (
87			"mvfc	%0, cr5 \n\t"
88	 		: "=r" (tmp)
89		);
90		break;
91	case PT_CBR: {
92			unsigned long psw;
93			psw = get_stack_long(tsk, PT_PSW);
94			tmp = ((psw >> 8) & 1);
95		}
96		break;
97	case PT_PSW: {
98			unsigned long psw, bbpsw;
99			psw = get_stack_long(tsk, PT_PSW);
100			bbpsw = get_stack_long(tsk, PT_BBPSW);
101			tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
102		}
103		break;
104	case PT_PC:
105		tmp = get_stack_long(tsk, PT_BPC);
106		break;
107	case PT_BPC:
108		off = PT_BBPC;
109		/* fall through */
110	default:
111		if (off < (sizeof(struct pt_regs) >> 2))
112			tmp = get_stack_long(tsk, off);
113#ifndef NO_FPU
114		else if (off >= (long)(&dummy->fpu >> 2) &&
115			 off < (long)(&dummy->u_fpvalid >> 2)) {
116			if (!tsk_used_math(tsk)) {
117				if (off == (long)(&dummy->fpu.fpscr >> 2))
118					tmp = FPSCR_INIT;
119				else
120					tmp = 0;
121			} else
122				tmp = ((long *)(&tsk->thread.fpu >> 2))
123					[off - (long)&dummy->fpu];
124		} else if (off == (long)(&dummy->u_fpvalid >> 2))
125			tmp = !!tsk_used_math(tsk);
126#endif /* not NO_FPU */
127		else
128			tmp = 0;
129	}
130
131	return put_user(tmp, data);
132}
133
134static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
135			     unsigned long data)
136{
137	int ret = -EIO;
138#ifndef NO_FPU
139	struct user * dummy = NULL;
140#endif
141
142	if ((off & 3) || off < 0 ||
143	    off > sizeof(struct user) - 3)
144		return -EIO;
145
146	off >>= 2;
147	switch (off) {
148	case PT_EVB:
149	case PT_BPC:
150	case PT_SPI:
151		/* We don't allow to modify evb. */
152		ret = 0;
153		break;
154	case PT_PSW:
155	case PT_CBR: {
156			/* We allow to modify only cbr in psw */
157			unsigned long psw;
158			psw = get_stack_long(tsk, PT_PSW);
159			psw = (psw & ~0x100) | ((data & 1) << 8);
160			ret = put_stack_long(tsk, PT_PSW, psw);
161		}
162		break;
163	case PT_PC:
164		off = PT_BPC;
165		data &= ~1;
166		/* fall through */
167	default:
168		if (off < (sizeof(struct pt_regs) >> 2))
169			ret = put_stack_long(tsk, off, data);
170#ifndef NO_FPU
171		else if (off >= (long)(&dummy->fpu >> 2) &&
172			 off < (long)(&dummy->u_fpvalid >> 2)) {
173			set_stopped_child_used_math(tsk);
174			((long *)&tsk->thread.fpu)
175				[off - (long)&dummy->fpu] = data;
176			ret = 0;
177		} else if (off == (long)(&dummy->u_fpvalid >> 2)) {
178			conditional_stopped_child_used_math(data, tsk);
179			ret = 0;
180		}
181#endif /* not NO_FPU */
182		break;
183	}
184
185	return ret;
186}
187
188/*
189 * Get all user integer registers.
190 */
191static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
192{
193	struct pt_regs *regs = task_pt_regs(tsk);
194
195	return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
196}
197
198/*
199 * Set all user integer registers.
200 */
201static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
202{
203	struct pt_regs newregs;
204	int ret;
205
206	ret = -EFAULT;
207	if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
208		struct pt_regs *regs = task_pt_regs(tsk);
209		*regs = newregs;
210		ret = 0;
211	}
212
213	return ret;
214}
215
216
217static inline int
218check_condition_bit(struct task_struct *child)
219{
220	return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
221}
222
223static int
224check_condition_src(unsigned long op, unsigned long regno1,
225		    unsigned long regno2, struct task_struct *child)
226{
227	unsigned long reg1, reg2;
228
229	reg2 = get_stack_long(child, reg_offset[regno2]);
230
231	switch (op) {
232	case 0x0: /* BEQ */
233		reg1 = get_stack_long(child, reg_offset[regno1]);
234		return reg1 == reg2;
235	case 0x1: /* BNE */
236		reg1 = get_stack_long(child, reg_offset[regno1]);
237		return reg1 != reg2;
238	case 0x8: /* BEQZ */
239		return reg2 == 0;
240	case 0x9: /* BNEZ */
241		return reg2 != 0;
242	case 0xa: /* BLTZ */
243		return (int)reg2 < 0;
244	case 0xb: /* BGEZ */
245		return (int)reg2 >= 0;
246	case 0xc: /* BLEZ */
247		return (int)reg2 <= 0;
248	case 0xd: /* BGTZ */
249		return (int)reg2 > 0;
250	default:
251		/* never reached */
252		return 0;
253	}
254}
255
256static void
257compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
258			       unsigned long *next_pc,
259			       struct task_struct *child)
260{
261	unsigned long op, op2, op3;
262	unsigned long disp;
263	unsigned long regno;
264	int parallel = 0;
265
266	if (insn & 0x00008000)
267		parallel = 1;
268	if (pc & 3)
269		insn &= 0x7fff;	/* right slot */
270	else
271		insn >>= 16;	/* left slot */
272
273	op = (insn >> 12) & 0xf;
274	op2 = (insn >> 8) & 0xf;
275	op3 = (insn >> 4) & 0xf;
276
277	if (op == 0x7) {
278		switch (op2) {
279		case 0xd: /* BNC */
280		case 0x9: /* BNCL */
281			if (!check_condition_bit(child)) {
282				disp = (long)(insn << 24) >> 22;
283				*next_pc = (pc & ~0x3) + disp;
284				return;
285			}
286			break;
287		case 0x8: /* BCL */
288		case 0xc: /* BC */
289			if (check_condition_bit(child)) {
290				disp = (long)(insn << 24) >> 22;
291				*next_pc = (pc & ~0x3) + disp;
292				return;
293			}
294			break;
295		case 0xe: /* BL */
296		case 0xf: /* BRA */
297			disp = (long)(insn << 24) >> 22;
298			*next_pc = (pc & ~0x3) + disp;
299			return;
300			break;
301		}
302	} else if (op == 0x1) {
303		switch (op2) {
304		case 0x0:
305			if (op3 == 0xf) { /* TRAP */
306				/* pass through */
307			} else if (op3 == 0xd) { /* RTE */
308				*next_pc = get_stack_long(child, PT_BPC);
309				return;
310			}
311			break;
312		case 0xc: /* JC */
313			if (op3 == 0xc && check_condition_bit(child)) {
314				regno = insn & 0xf;
315				*next_pc = get_stack_long(child,
316							  reg_offset[regno]);
317				return;
318			}
319			break;
320		case 0xd: /* JNC */
321			if (op3 == 0xc && !check_condition_bit(child)) {
322				regno = insn & 0xf;
323				*next_pc = get_stack_long(child,
324							  reg_offset[regno]);
325				return;
326			}
327			break;
328		case 0xe: /* JL */
329		case 0xf: /* JMP */
330			if (op3 == 0xc) { /* JMP */
331				regno = insn & 0xf;
332				*next_pc = get_stack_long(child,
333							  reg_offset[regno]);
334				return;
335			}
336			break;
337		}
338	}
339	if (parallel)
340		*next_pc = pc + 4;
341	else
342		*next_pc = pc + 2;
343}
344
345static void
346compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
347			       unsigned long *next_pc,
348			       struct task_struct *child)
349{
350	unsigned long op;
351	unsigned long op2;
352	unsigned long disp;
353	unsigned long regno1, regno2;
354
355	op = (insn >> 28) & 0xf;
356	if (op == 0xf) { 	/* branch 24-bit relative */
357		op2 = (insn >> 24) & 0xf;
358		switch (op2) {
359		case 0xd:	/* BNC */
360		case 0x9:	/* BNCL */
361			if (!check_condition_bit(child)) {
362				disp = (long)(insn << 8) >> 6;
363				*next_pc = (pc & ~0x3) + disp;
364				return;
365			}
366			break;
367		case 0x8:	/* BCL */
368		case 0xc:	/* BC */
369			if (check_condition_bit(child)) {
370				disp = (long)(insn << 8) >> 6;
371				*next_pc = (pc & ~0x3) + disp;
372				return;
373			}
374			break;
375		case 0xe:	/* BL */
376		case 0xf:	/* BRA */
377			disp = (long)(insn << 8) >> 6;
378			*next_pc = (pc & ~0x3) + disp;
379			return;
380		}
381	} else if (op == 0xb) { /* branch 16-bit relative */
382		op2 = (insn >> 20) & 0xf;
383		switch (op2) {
384		case 0x0: /* BEQ */
385		case 0x1: /* BNE */
386		case 0x8: /* BEQZ */
387		case 0x9: /* BNEZ */
388		case 0xa: /* BLTZ */
389		case 0xb: /* BGEZ */
390		case 0xc: /* BLEZ */
391		case 0xd: /* BGTZ */
392			regno1 = ((insn >> 24) & 0xf);
393			regno2 = ((insn >> 16) & 0xf);
394			if (check_condition_src(op2, regno1, regno2, child)) {
395				disp = (long)(insn << 16) >> 14;
396				*next_pc = (pc & ~0x3) + disp;
397				return;
398			}
399			break;
400		}
401	}
402	*next_pc = pc + 4;
403}
404
405static inline void
406compute_next_pc(unsigned long insn, unsigned long pc,
407		unsigned long *next_pc, struct task_struct *child)
408{
409	if (insn & 0x80000000)
410		compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
411	else
412		compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
413}
414
415static int
416register_debug_trap(struct task_struct *child, unsigned long next_pc,
417	unsigned long next_insn, unsigned long *code)
418{
419	struct debug_trap *p = &child->thread.debug_trap;
420	unsigned long addr = next_pc & ~3;
421
422	if (p->nr_trap == MAX_TRAPS) {
423		printk("kernel BUG at %s %d: p->nr_trap = %d\n",
424					__FILE__, __LINE__, p->nr_trap);
425		return -1;
426	}
427	p->addr[p->nr_trap] = addr;
428	p->insn[p->nr_trap] = next_insn;
429	p->nr_trap++;
430	if (next_pc & 3) {
431		*code = (next_insn & 0xffff0000) | 0x10f1;
432	} else {
433		if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
434			*code = 0x10f17000;
435			/* TRAP1 --> NOP */
436		} else {
437			*code = (next_insn & 0xffff) | 0x10f10000;
438		}
439	}
440	return 0;
441}
442
443static int
444unregister_debug_trap(struct task_struct *child, unsigned long addr,
445		      unsigned long *code)
446{
447	struct debug_trap *p = &child->thread.debug_trap;
448        int i;
449
450	/* Search debug trap entry. */
451	for (i = 0; i < p->nr_trap; i++) {
452		if (p->addr[i] == addr)
453			break;
454	}
455	if (i >= p->nr_trap) {
456		/* The trap may be requested from debugger.
457		 * ptrace should do nothing in this case.
458		 */
459		return 0;
460	}
461
462	/* Recover orignal instruction code. */
463	*code = p->insn[i];
464
465	/* Shift debug trap entries. */
466	while (i < p->nr_trap - 1) {
467		p->insn[i] = p->insn[i + 1];
468		p->addr[i] = p->addr[i + 1];
469		i++;
470	}
471	p->nr_trap--;
472	return 1;
473}
474
475static void
476unregister_all_debug_traps(struct task_struct *child)
477{
478	struct debug_trap *p = &child->thread.debug_trap;
479	int i;
480
481	for (i = 0; i < p->nr_trap; i++)
482		access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
483	p->nr_trap = 0;
484}
485
486static inline void
487invalidate_cache(void)
488{
489#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
490
491	_flush_cache_copyback_all();
492
493#else	/* ! CONFIG_CHIP_M32700 */
494
495	/* Invalidate cache */
496	__asm__ __volatile__ (
497                "ldi    r0, #-1					\n\t"
498                "ldi    r1, #0					\n\t"
499                "stb    r1, @r0		; cache off		\n\t"
500                ";						\n\t"
501                "ldi    r0, #-2					\n\t"
502                "ldi    r1, #1					\n\t"
503                "stb    r1, @r0		; cache invalidate	\n\t"
504                ".fillinsn					\n"
505                "0:						\n\t"
506                "ldb    r1, @r0		; invalidate check	\n\t"
507                "bnez   r1, 0b					\n\t"
508                ";						\n\t"
509                "ldi    r0, #-1					\n\t"
510                "ldi    r1, #1					\n\t"
511                "stb    r1, @r0		; cache on		\n\t"
512		: : : "r0", "r1", "memory"
513	);
514#endif	/* CONFIG_CHIP_M32700 */
515}
516
517/* Embed a debug trap (TRAP1) code */
518static int
519embed_debug_trap(struct task_struct *child, unsigned long next_pc)
520{
521	unsigned long next_insn, code;
522	unsigned long addr = next_pc & ~3;
523
524	if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
525	    != sizeof(next_insn)) {
526		return -1; /* error */
527	}
528
529	/* Set a trap code. */
530	if (register_debug_trap(child, next_pc, next_insn, &code)) {
531		return -1; /* error */
532	}
533	if (access_process_vm(child, addr, &code, sizeof(code), 1)
534	    != sizeof(code)) {
535		return -1; /* error */
536	}
537	return 0; /* success */
538}
539
540void
541withdraw_debug_trap(struct pt_regs *regs)
542{
543	unsigned long addr;
544	unsigned long code;
545
546 	addr = (regs->bpc - 2) & ~3;
547	regs->bpc -= 2;
548	if (unregister_debug_trap(current, addr, &code)) {
549	    access_process_vm(current, addr, &code, sizeof(code), 1);
550	    invalidate_cache();
551	}
552}
553
554static void
555init_debug_traps(struct task_struct *child)
556{
557	struct debug_trap *p = &child->thread.debug_trap;
558	int i;
559	p->nr_trap = 0;
560	for (i = 0; i < MAX_TRAPS; i++) {
561		p->addr[i] = 0;
562		p->insn[i] = 0;
563	}
564}
565
566
567/*
568 * Called by kernel/ptrace.c when detaching..
569 *
570 * Make sure single step bits etc are not set.
571 */
572void ptrace_disable(struct task_struct *child)
573{
574	/* nothing to do.. */
575}
576
577static int
578do_ptrace(long request, struct task_struct *child, long addr, long data)
579{
580	unsigned long tmp;
581	int ret;
582
583	switch (request) {
584	/*
585	 * read word at location "addr" in the child process.
586	 */
587	case PTRACE_PEEKTEXT:
588	case PTRACE_PEEKDATA:
589		ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
590		if (ret == sizeof(tmp))
591			ret = put_user(tmp,(unsigned long __user *) data);
592		else
593			ret = -EIO;
594		break;
595
596	/*
597	 * read the word at location addr in the USER area.
598	 */
599	case PTRACE_PEEKUSR:
600		ret = ptrace_read_user(child, addr,
601				       (unsigned long __user *)data);
602		break;
603
604	/*
605	 * write the word at location addr.
606	 */
607	case PTRACE_POKETEXT:
608	case PTRACE_POKEDATA:
609		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
610		if (ret == sizeof(data)) {
611			ret = 0;
612			if (request == PTRACE_POKETEXT) {
613				invalidate_cache();
614			}
615		} else {
616			ret = -EIO;
617		}
618		break;
619
620	/*
621	 * write the word at location addr in the USER area.
622	 */
623	case PTRACE_POKEUSR:
624		ret = ptrace_write_user(child, addr, data);
625		break;
626
627	/*
628	 * continue/restart and stop at next (return from) syscall
629	 */
630	case PTRACE_SYSCALL:
631	case PTRACE_CONT:
632		ret = -EIO;
633		if (!valid_signal(data))
634			break;
635		if (request == PTRACE_SYSCALL)
636			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
637		else
638			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
639		child->exit_code = data;
640		wake_up_process(child);
641		ret = 0;
642		break;
643
644	/*
645	 * make the child exit.  Best I can do is send it a sigkill.
646	 * perhaps it should be put in the status that it wants to
647	 * exit.
648	 */
649	case PTRACE_KILL: {
650		ret = 0;
651		unregister_all_debug_traps(child);
652		invalidate_cache();
653		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
654			break;
655		child->exit_code = SIGKILL;
656		wake_up_process(child);
657		break;
658	}
659
660	/*
661	 * execute single instruction.
662	 */
663	case PTRACE_SINGLESTEP: {
664		unsigned long next_pc;
665		unsigned long pc, insn;
666
667		ret = -EIO;
668		if (!valid_signal(data))
669			break;
670		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
671		if ((child->ptrace & PT_DTRACE) == 0) {
672			/* Spurious delayed TF traps may occur */
673			child->ptrace |= PT_DTRACE;
674		}
675
676		/* Compute next pc.  */
677		pc = get_stack_long(child, PT_BPC);
678
679		if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
680		    != sizeof(insn))
681			break;
682
683		compute_next_pc(insn, pc, &next_pc, child);
684		if (next_pc & 0x80000000)
685			break;
686
687		if (embed_debug_trap(child, next_pc))
688			break;
689
690		invalidate_cache();
691		child->exit_code = data;
692
693		/* give it a chance to run. */
694		wake_up_process(child);
695		ret = 0;
696		break;
697	}
698
699	/*
700	 * detach a process that was attached.
701	 */
702	case PTRACE_DETACH:
703		ret = 0;
704		ret = ptrace_detach(child, data);
705		break;
706
707	case PTRACE_GETREGS:
708		ret = ptrace_getregs(child, (void __user *)data);
709		break;
710
711	case PTRACE_SETREGS:
712		ret = ptrace_setregs(child, (void __user *)data);
713		break;
714
715	default:
716		ret = ptrace_request(child, request, addr, data);
717		break;
718	}
719
720	return ret;
721}
722
723asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
724{
725	struct task_struct *child;
726	int ret;
727
728	lock_kernel();
729	if (request == PTRACE_TRACEME) {
730		ret = ptrace_traceme();
731		goto out;
732	}
733
734	child = ptrace_get_task_struct(pid);
735	if (IS_ERR(child)) {
736		ret = PTR_ERR(child);
737		goto out;
738	}
739
740	if (request == PTRACE_ATTACH) {
741		ret = ptrace_attach(child);
742		if (ret == 0)
743			init_debug_traps(child);
744		goto out_tsk;
745	}
746
747	ret = ptrace_check_attach(child, request == PTRACE_KILL);
748	if (ret == 0)
749		ret = do_ptrace(request, child, addr, data);
750
751out_tsk:
752	put_task_struct(child);
753out:
754	unlock_kernel();
755
756	return ret;
757}
758
759/* notification of system call entry/exit
760 * - triggered by current->work.syscall_trace
761 */
762void do_syscall_trace(void)
763{
764	if (!test_thread_flag(TIF_SYSCALL_TRACE))
765		return;
766	if (!(current->ptrace & PT_PTRACED))
767		return;
768	/* the 0x80 provides a way for the tracing parent to distinguish
769	   between a syscall stop and SIGTRAP delivery */
770	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
771				 ? 0x80 : 0));
772
773	/*
774	 * this isn't the same as continuing with a signal, but it will do
775	 * for normal use.  strace only continues with a signal if the
776	 * stopping signal is not SIGTRAP.  -brl
777	 */
778	if (current->exit_code) {
779		send_sig(current->exit_code, current, 1);
780		current->exit_code = 0;
781	}
782}
783