1/*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 1999-2002 Hewlett-Packard Co
5 *	David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Derived from the x86 and Alpha versions.  Most of the code in here
8 * could actually be factored into a common set of routines.
9 */
10#include <linux/config.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/smp_lock.h>
17#include <linux/user.h>
18
19#include <asm/pgtable.h>
20#include <asm/processor.h>
21#include <asm/ptrace_offsets.h>
22#include <asm/rse.h>
23#include <asm/system.h>
24#include <asm/uaccess.h>
25#include <asm/unwind.h>
26#ifdef CONFIG_PERFMON
27# include <asm/perfmon.h>
28#endif
29
30/*
31 * Bits in the PSR that we allow ptrace() to change:
32 *	be, up, ac, mfl, mfh (the user mask; five bits total)
33 *	db (debug breakpoint fault; one bit)
34 *	id (instruction debug fault disable; one bit)
35 *	dd (data debug fault disable; one bit)
36 *	ri (restart instruction; two bits)
37 *	is (instruction set; one bit)
38 */
39#define IPSR_WRITE_MASK \
40	(IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
41#define IPSR_READ_MASK	IPSR_WRITE_MASK
42
43#define PTRACE_DEBUG	1
44
45#if PTRACE_DEBUG
46# define dprintk(format...)	printk(format)
47# define inline
48#else
49# define dprintk(format...)
50#endif
51
52/*
53 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
54 * bitset where bit i is set iff the NaT bit of register i is set.
55 */
56unsigned long
57ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
58{
59#	define GET_BITS(first, last, unat)						\
60	({										\
61		unsigned long bit = ia64_unat_pos(&pt->r##first);			\
62		unsigned long mask = ((1UL << (last - first + 1)) - 1) << first;	\
63		(ia64_rotl(unat, first) >> bit) & mask;					\
64	})
65	unsigned long val;
66
67	val  = GET_BITS( 1,  3, scratch_unat);
68	val |= GET_BITS(12, 15, scratch_unat);
69	val |= GET_BITS( 8, 11, scratch_unat);
70	val |= GET_BITS(16, 31, scratch_unat);
71	return val;
72
73#	undef GET_BITS
74}
75
76/*
77 * Set the NaT bits for the scratch registers according to NAT and
78 * return the resulting unat (assuming the scratch registers are
79 * stored in PT).
80 */
81unsigned long
82ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
83{
84	unsigned long scratch_unat;
85
86#	define PUT_BITS(first, last, nat)					\
87	({									\
88		unsigned long bit = ia64_unat_pos(&pt->r##first);		\
89		unsigned long mask = ((1UL << (last - first + 1)) - 1) << bit;	\
90		(ia64_rotr(nat, first) << bit) & mask;				\
91	})
92	scratch_unat  = PUT_BITS( 1,  3, nat);
93	scratch_unat |= PUT_BITS(12, 15, nat);
94	scratch_unat |= PUT_BITS( 8, 11, nat);
95	scratch_unat |= PUT_BITS(16, 31, nat);
96
97	return scratch_unat;
98
99#	undef PUT_BITS
100}
101
102#define IA64_MLX_TEMPLATE	0x2
103#define IA64_MOVL_OPCODE	6
104
105void
106ia64_increment_ip (struct pt_regs *regs)
107{
108	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
109
110	if (ri > 2) {
111		ri = 0;
112		regs->cr_iip += 16;
113	} else if (ri == 2) {
114		get_user(w0, (char *) regs->cr_iip + 0);
115		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
116			/*
117			 * rfi'ing to slot 2 of an MLX bundle causes
118			 * an illegal operation fault.  We don't want
119			 * that to happen...
120			 */
121			ri = 0;
122			regs->cr_iip += 16;
123		}
124	}
125	ia64_psr(regs)->ri = ri;
126}
127
128void
129ia64_decrement_ip (struct pt_regs *regs)
130{
131	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
132
133	if (ia64_psr(regs)->ri == 0) {
134		regs->cr_iip -= 16;
135		ri = 2;
136		get_user(w0, (char *) regs->cr_iip + 0);
137		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
138			/*
139			 * rfi'ing to slot 2 of an MLX bundle causes
140			 * an illegal operation fault.  We don't want
141			 * that to happen...
142			 */
143			ri = 1;
144		}
145	}
146	ia64_psr(regs)->ri = ri;
147}
148
149/*
150 * This routine is used to read an rnat bits that are stored on the kernel backing store.
151 * Since, in general, the alignment of the user and kernel are different, this is not
152 * completely trivial.  In essence, we need to construct the user RNAT based on up to two
153 * kernel RNAT values and/or the RNAT value saved in the child's pt_regs.
154 *
155 * user rbs
156 *
157 * +--------+ <-- lowest address
158 * | slot62 |
159 * +--------+
160 * |  rnat  | 0x....1f8
161 * +--------+
162 * | slot00 | \
163 * +--------+ |
164 * | slot01 | > child_regs->ar_rnat
165 * +--------+ |
166 * | slot02 | /				kernel rbs
167 * +--------+				+--------+
168 *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
169 * +- - - - +				+--------+
170 *					| slot62 |
171 * +- - - - +				+--------+
172 *					|  rnat	 |
173 * +- - - - +				+--------+
174 *   vrnat				| slot00 |
175 * +- - - - +				+--------+
176 *					=	 =
177 *					+--------+
178 *					| slot00 | \
179 *					+--------+ |
180 *					| slot01 | > child_stack->ar_rnat
181 *					+--------+ |
182 *					| slot02 | /
183 *					+--------+
184 *						  <--- child_stack->ar_bspstore
185 *
186 * The way to think of this code is as follows: bit 0 in the user rnat corresponds to some
187 * bit N (0 <= N <= 62) in one of the kernel rnat value.  The kernel rnat value holding
188 * this bit is stored in variable rnat0.  rnat1 is loaded with the kernel rnat value that
189 * form the upper bits of the user rnat value.
190 *
191 * Boundary cases:
192 *
193 * o when reading the rnat "below" the first rnat slot on the kernel backing store,
194 *   rnat0/rnat1 are set to 0 and the low order bits are merged in from pt->ar_rnat.
195 *
196 * o when reading the rnat "above" the last rnat slot on the kernel backing store,
197 *   rnat0/rnat1 gets its value from sw->ar_rnat.
198 */
199static unsigned long
200get_rnat (struct pt_regs *pt, struct switch_stack *sw,
201	  unsigned long *krbs, unsigned long *urnat_addr)
202{
203	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, kmask = ~0UL;
204	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
205	long num_regs;
206
207	kbsp = (unsigned long *) sw->ar_bspstore;
208	ubspstore = (unsigned long *) pt->ar_bspstore;
209	/*
210	 * First, figure out which bit number slot 0 in user-land maps
211	 * to in the kernel rnat.  Do this by figuring out how many
212	 * register slots we're beyond the user's backingstore and
213	 * then computing the equivalent address in kernel space.
214	 */
215	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
216	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
217	shift = ia64_rse_slot_num(slot0_kaddr);
218	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
219	rnat0_kaddr = rnat1_kaddr - 64;
220
221	if (ubspstore + 63 > urnat_addr) {
222		/* some bits need to be merged in from pt->ar_rnat */
223		kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
224		urnat = (pt->ar_rnat & ~kmask);
225	}
226	if (rnat0_kaddr >= kbsp) {
227		rnat0 = sw->ar_rnat;
228	} else if (rnat0_kaddr > krbs) {
229		rnat0 = *rnat0_kaddr;
230	}
231	if (rnat1_kaddr >= kbsp) {
232		rnat1 = sw->ar_rnat;
233	} else if (rnat1_kaddr > krbs) {
234		rnat1 = *rnat1_kaddr;
235	}
236	urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & kmask;
237	return urnat;
238}
239
240/*
241 * The reverse of get_rnat.
242 */
243static void
244put_rnat (struct pt_regs *pt, struct switch_stack *sw,
245	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
246{
247	unsigned long rnat0 = 0, rnat1 = 0, rnat = 0, *slot0_kaddr, kmask = ~0UL, mask;
248	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
249	long num_regs;
250
251	kbsp = (unsigned long *) sw->ar_bspstore;
252	ubspstore = (unsigned long *) pt->ar_bspstore;
253	/*
254	 * First, figure out which bit number slot 0 in user-land maps
255	 * to in the kernel rnat.  Do this by figuring out how many
256	 * register slots we're beyond the user's backingstore and
257	 * then computing the equivalent address in kernel space.
258	 */
259	num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
260	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
261	shift = ia64_rse_slot_num(slot0_kaddr);
262	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
263	rnat0_kaddr = rnat1_kaddr - 64;
264
265	if (ubspstore + 63 > urnat_addr) {
266		/* some bits need to be place in pt->ar_rnat: */
267		kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
268		pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask);
269	}
270	/*
271	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
272	 * rnat slot is ignored. so we don't have to clear it here.
273	 */
274	rnat0 = (urnat << shift);
275	mask = ~0UL << shift;
276	if (rnat0_kaddr >= kbsp) {
277		sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat0 & mask);
278	} else if (rnat0_kaddr > krbs) {
279		*rnat0_kaddr = ((*rnat0_kaddr & ~mask) | (rnat0 & mask));
280	}
281
282	rnat1 = (urnat >> (63 - shift));
283	mask = ~0UL >> (63 - shift);
284	if (rnat1_kaddr >= kbsp) {
285		sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat1 & mask);
286	} else if (rnat1_kaddr > krbs) {
287		*rnat1_kaddr = ((*rnat1_kaddr & ~mask) | (rnat1 & mask));
288	}
289}
290
291/*
292 * Read a word from the user-level backing store of task CHILD.  ADDR is the user-level
293 * address to read the word from, VAL a pointer to the return value, and USER_BSP gives
294 * the end of the user-level backing store (i.e., it's the address that would be in ar.bsp
295 * after the user executed a "cover" instruction).
296 *
297 * This routine takes care of accessing the kernel register backing store for those
298 * registers that got spilled there.  It also takes care of calculating the appropriate
299 * RNaT collection words.
300 */
301long
302ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end,
303	   unsigned long addr, long *val)
304{
305	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
306	struct pt_regs *child_regs;
307	size_t copied;
308	long ret;
309
310	urbs_end = (long *) user_rbs_end;
311	laddr = (unsigned long *) addr;
312	child_regs = ia64_task_regs(child);
313	bspstore = (unsigned long *) child_regs->ar_bspstore;
314	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
315	if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) {
316		/*
317		 * Attempt to read the RBS in an area that's actually on the kernel RBS =>
318		 * read the corresponding bits in the kernel RBS.
319		 */
320		rnat_addr = ia64_rse_rnat_addr(laddr);
321		ret = get_rnat(child_regs, child_stack, krbs, rnat_addr);
322
323		if (laddr == rnat_addr) {
324			/* return NaT collection word itself */
325			*val = ret;
326			return 0;
327		}
328
329		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
330			/*
331			 * It is implementation dependent whether the data portion of a
332			 * NaT value gets saved on a st8.spill or RSE spill (e.g., see
333			 * EAS 2.6, 4.4.4.6 Register Spill and Fill).  To get consistent
334			 * behavior across all possible IA-64 implementations, we return
335			 * zero in this case.
336			 */
337			*val = 0;
338			return 0;
339		}
340
341		if (laddr < urbs_end) {
342			/* the desired word is on the kernel RBS and is not a NaT */
343			regnum = ia64_rse_num_regs(bspstore, laddr);
344			*val = *ia64_rse_skip_regs(krbs, regnum);
345			return 0;
346		}
347	}
348	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
349	if (copied != sizeof(ret))
350		return -EIO;
351	*val = ret;
352	return 0;
353}
354
355long
356ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end,
357	   unsigned long addr, long val)
358{
359	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end = (long *) user_rbs_end;
360	struct pt_regs *child_regs;
361
362	laddr = (unsigned long *) addr;
363	child_regs = ia64_task_regs(child);
364	bspstore = (unsigned long *) child_regs->ar_bspstore;
365	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
366	if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) {
367		/*
368		 * Attempt to write the RBS in an area that's actually on the kernel RBS
369		 * => write the corresponding bits in the kernel RBS.
370		 */
371		if (ia64_rse_is_rnat_slot(laddr))
372			put_rnat(child_regs, child_stack, krbs, laddr, val);
373		else {
374			if (laddr < urbs_end) {
375				regnum = ia64_rse_num_regs(bspstore, laddr);
376				*ia64_rse_skip_regs(krbs, regnum) = val;
377			}
378		}
379	} else if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) {
380		return -EIO;
381	}
382	return 0;
383}
384
385/*
386 * Calculate the address of the end of the user-level register backing store.  This is the
387 * address that would have been stored in ar.bsp if the user had executed a "cover"
388 * instruction right before entering the kernel.  If CFMP is not NULL, it is used to
389 * return the "current frame mask" that was active at the time the kernel was entered.
390 */
391unsigned long
392ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned long *cfmp)
393{
394	unsigned long *krbs, *bspstore, cfm;
395	struct unw_frame_info info;
396	long ndirty;
397
398	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
399	bspstore = (unsigned long *) pt->ar_bspstore;
400	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
401	cfm = pt->cr_ifs & ~(1UL << 63);
402
403	if ((long) pt->cr_ifs >= 0) {
404		/*
405		 * If bit 63 of cr.ifs is cleared, the kernel was entered via a system
406		 * call and we need to recover the CFM that existed on entry to the
407		 * kernel by unwinding the kernel stack.
408		 */
409		unw_init_from_blocked_task(&info, child);
410		if (unw_unwind_to_user(&info) == 0) {
411			unw_get_cfm(&info, &cfm);
412			ndirty += (cfm & 0x7f);
413		}
414	}
415	if (cfmp)
416		*cfmp = cfm;
417	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
418}
419
420/*
421 * Synchronize (i.e, write) the RSE backing store living in kernel space to the VM of the
422 * CHILD task.  SW and PT are the pointers to the switch_stack and pt_regs structures,
423 * respectively.  USER_RBS_END is the user-level address at which the backing store ends.
424 */
425long
426ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
427		    unsigned long user_rbs_start, unsigned long user_rbs_end)
428{
429	unsigned long addr, val;
430	long ret;
431
432	/* now copy word for word from kernel rbs to user rbs: */
433	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
434		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
435		if (ret < 0)
436			return ret;
437		if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
438			return -EIO;
439	}
440	return 0;
441}
442
443/*
444 * Simulate user-level "flushrs".  Note: we can't just add pt->loadrs>>16 to
445 * pt->ar_bspstore because the kernel backing store and the user-level backing store may
446 * have different alignments (and therefore a different number of intervening rnat slots).
447 */
448static void
449user_flushrs (struct task_struct *task, struct pt_regs *pt)
450{
451	unsigned long *krbs;
452	long ndirty;
453
454	krbs = (unsigned long *) task + IA64_RBS_OFFSET/8;
455	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
456
457	pt->ar_bspstore = (unsigned long) ia64_rse_skip_regs((unsigned long *) pt->ar_bspstore,
458							     ndirty);
459	pt->loadrs = 0;
460}
461
462/*
463 * Synchronize the RSE backing store of CHILD and all tasks that share the address space
464 * with it.  CHILD_URBS_END is the address of the end of the register backing store of
465 * CHILD.  If MAKE_WRITABLE is set, a user-level "flushrs" is simulated such that the VM
466 * can be written via ptrace() and the tasks will pick up the newly written values.  It
467 * would be OK to unconditionally simulate a "flushrs", but this would be more intrusive
468 * than strictly necessary (e.g., it would make it impossible to obtain the original value
469 * of ar.bspstore).
470 */
471static void
472threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, int make_writable)
473{
474	struct switch_stack *sw;
475	unsigned long urbs_end;
476	struct task_struct *p;
477	struct mm_struct *mm;
478	struct pt_regs *pt;
479	long multi_threaded;
480
481	task_lock(child);
482	{
483		mm = child->mm;
484		multi_threaded = mm && (atomic_read(&mm->mm_users) > 1);
485	}
486	task_unlock(child);
487
488	if (!multi_threaded) {
489		sw = (struct switch_stack *) (child->thread.ksp + 16);
490		pt = ia64_task_regs(child);
491		ia64_sync_user_rbs(child, sw, pt->ar_bspstore, child_urbs_end);
492		if (make_writable)
493			user_flushrs(child, pt);
494	} else {
495		read_lock(&tasklist_lock);
496		{
497			for_each_task(p) {
498				if (p->mm == mm && p->state != TASK_RUNNING) {
499					sw = (struct switch_stack *) (p->thread.ksp + 16);
500					pt = ia64_task_regs(p);
501					urbs_end = ia64_get_user_rbs_end(p, pt, NULL);
502					ia64_sync_user_rbs(p, sw, pt->ar_bspstore, urbs_end);
503					if (make_writable)
504						user_flushrs(p, pt);
505				}
506			}
507		}
508		read_unlock(&tasklist_lock);
509	}
510	child->thread.flags |= IA64_THREAD_KRBS_SYNCED;	/* set the flag in the child thread only */
511}
512
513/*
514 * Write f32-f127 back to task->thread.fph if it has been modified.
515 */
516inline void
517ia64_flush_fph (struct task_struct *task)
518{
519	struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
520#ifdef CONFIG_SMP
521	struct task_struct *fpu_owner = current;
522#else
523	struct task_struct *fpu_owner = ia64_get_fpu_owner();
524#endif
525
526	if (task == fpu_owner && psr->mfh) {
527		psr->mfh = 0;
528		ia64_save_fpu(&task->thread.fph[0]);
529		task->thread.flags |= IA64_THREAD_FPH_VALID;
530	}
531}
532
533/*
534 * Sync the fph state of the task so that it can be manipulated
535 * through thread.fph.  If necessary, f32-f127 are written back to
536 * thread.fph or, if the fph state hasn't been used before, thread.fph
537 * is cleared to zeroes.  Also, access to f32-f127 is disabled to
538 * ensure that the task picks up the state from thread.fph when it
539 * executes again.
540 */
541void
542ia64_sync_fph (struct task_struct *task)
543{
544	struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
545
546	ia64_flush_fph(task);
547	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
548		task->thread.flags |= IA64_THREAD_FPH_VALID;
549		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
550	}
551#ifndef CONFIG_SMP
552	if (ia64_get_fpu_owner() == task)
553		ia64_set_fpu_owner(0);
554#endif
555	psr->dfh = 1;
556}
557
558static int
559access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access)
560{
561	struct ia64_fpreg fpval;
562	int ret;
563
564	ret = unw_get_fr(info, regnum, &fpval);
565	if (ret < 0)
566		return ret;
567
568	if (write_access) {
569		fpval.u.bits[hi] = *data;
570		ret = unw_set_fr(info, regnum, fpval);
571	} else
572		*data = fpval.u.bits[hi];
573	return ret;
574}
575
576static int
577access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
578{
579	unsigned long *ptr, regnum, urbs_end, rnat_addr;
580	struct switch_stack *sw;
581	struct unw_frame_info info;
582	struct pt_regs *pt;
583
584	pt = ia64_task_regs(child);
585	sw = (struct switch_stack *) (child->thread.ksp + 16);
586
587	if ((addr & 0x7) != 0) {
588		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
589		return -1;
590	}
591
592	if (addr < PT_F127 + 16) {
593		/* accessing fph */
594		if (write_access)
595			ia64_sync_fph(child);
596		else
597			ia64_flush_fph(child);
598		ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
599	} else if (addr >= PT_F10 && addr < PT_F15 + 16) {
600		/* scratch registers untouched by kernel (saved in switch_stack) */
601		ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
602	} else if (addr < PT_AR_LC + 8) {
603		/* preserved state: */
604		unsigned long nat_bits, scratch_unat, dummy = 0;
605		struct unw_frame_info info;
606		char nat = 0;
607		int ret;
608
609		unw_init_from_blocked_task(&info, child);
610		if (unw_unwind_to_user(&info) < 0)
611			return -1;
612
613		switch (addr) {
614		      case PT_NAT_BITS:
615			if (write_access) {
616				nat_bits = *data;
617				scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
618				if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) {
619					dprintk("ptrace: failed to set ar.unat\n");
620					return -1;
621				}
622				for (regnum = 4; regnum <= 7; ++regnum) {
623					unw_get_gr(&info, regnum, &dummy, &nat);
624					unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1);
625				}
626			} else {
627				if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) {
628					dprintk("ptrace: failed to read ar.unat\n");
629					return -1;
630				}
631				nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
632				for (regnum = 4; regnum <= 7; ++regnum) {
633					unw_get_gr(&info, regnum, &dummy, &nat);
634					nat_bits |= (nat != 0) << regnum;
635				}
636				*data = nat_bits;
637			}
638			return 0;
639
640		      case PT_R4: case PT_R5: case PT_R6: case PT_R7:
641			if (write_access) {
642				/* read NaT bit first: */
643				ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat);
644				if (ret < 0)
645					return ret;
646			}
647			return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat,
648					     write_access);
649
650		      case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5:
651			return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access);
652
653		      case PT_AR_EC:
654			return unw_access_ar(&info, UNW_AR_EC, data, write_access);
655
656		      case PT_AR_LC:
657			return unw_access_ar(&info, UNW_AR_LC, data, write_access);
658
659		      default:
660			if (addr >= PT_F2 && addr < PT_F5 + 16)
661				return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0,
662						 data, write_access);
663			else if (addr >= PT_F16 && addr < PT_F31 + 16)
664				return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0,
665						 data, write_access);
666			else {
667				dprintk("ptrace: rejecting access to register address 0x%lx\n",
668					addr);
669				return -1;
670			}
671		}
672	} else if (addr < PT_F9+16) {
673		/* scratch state */
674		switch (addr) {
675		      case PT_AR_BSP:
676			/*
677			 * By convention, we use PT_AR_BSP to refer to the end of the user-level
678			 * backing store.  Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get
679			 * the real value of ar.bsp at the time the kernel was entered.
680			 */
681			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
682			if (write_access) {
683				if (*data != urbs_end) {
684					if (ia64_sync_user_rbs(child, sw,
685							       pt->ar_bspstore, urbs_end) < 0)
686						return -1;
687					/* simulate user-level write of ar.bsp: */
688					pt->loadrs = 0;
689					pt->ar_bspstore = *data;
690				}
691			} else
692				*data = urbs_end;
693			return 0;
694
695		      case PT_CFM:
696			if ((long) pt->cr_ifs < 0) {
697				if (write_access)
698					pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
699						      | (*data & 0x3fffffffffUL));
700				else
701					*data = pt->cr_ifs & 0x3fffffffffUL;
702			} else {
703				/* kernel was entered through a system call */
704				unsigned long cfm;
705
706				unw_init_from_blocked_task(&info, child);
707				if (unw_unwind_to_user(&info) < 0)
708					return -1;
709
710				unw_get_cfm(&info, &cfm);
711				if (write_access)
712					unw_set_cfm(&info, ((cfm & ~0x3fffffffffU)
713							    | (*data & 0x3fffffffffUL)));
714				else
715					*data = cfm;
716			}
717			return 0;
718
719		      case PT_CR_IPSR:
720			if (write_access)
721				pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)
722					       | (pt->cr_ipsr & ~IPSR_WRITE_MASK));
723			else
724				*data = (pt->cr_ipsr & IPSR_READ_MASK);
725			return 0;
726
727		      case PT_AR_RNAT:
728			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
729			rnat_addr = (long) ia64_rse_rnat_addr((long *) urbs_end);
730			if (write_access)
731				return ia64_poke(child, sw, urbs_end, rnat_addr, *data);
732			else
733				return ia64_peek(child, sw, urbs_end, rnat_addr, data);
734
735				   case PT_R1:  case PT_R2:  case PT_R3:
736		      case PT_R8:  case PT_R9:  case PT_R10: case PT_R11:
737		      case PT_R12: case PT_R13: case PT_R14: case PT_R15:
738		      case PT_R16: case PT_R17: case PT_R18: case PT_R19:
739		      case PT_R20: case PT_R21: case PT_R22: case PT_R23:
740		      case PT_R24: case PT_R25: case PT_R26: case PT_R27:
741		      case PT_R28: case PT_R29: case PT_R30: case PT_R31:
742		      case PT_B0:  case PT_B6:  case PT_B7:
743		      case PT_F6:  case PT_F6+8: case PT_F7: case PT_F7+8:
744		      case PT_F8:  case PT_F8+8: case PT_F9: case PT_F9+8:
745		      case PT_AR_BSPSTORE:
746		      case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS:
747		      case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
748			/* scratch register */
749			ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
750			break;
751
752		      default:
753			/* disallow accessing anything else... */
754			dprintk("ptrace: rejecting access to register address 0x%lx\n",
755				addr);
756			return -1;
757		}
758	} else {
759		/* access debug registers */
760
761		if (addr >= PT_IBR) {
762			regnum = (addr - PT_IBR) >> 3;
763			ptr = &child->thread.ibr[0];
764		} else {
765			regnum = (addr - PT_DBR) >> 3;
766			ptr = &child->thread.dbr[0];
767		}
768
769		if (regnum >= 8) {
770			dprintk("ptrace: rejecting access to register address 0x%lx\n", addr);
771			return -1;
772		}
773#ifdef CONFIG_PERFMON
774		/*
775		 * Check if debug registers are used by perfmon. This test must be done
776		 * once we know that we can do the operation, i.e. the arguments are all
777		 * valid, but before we start modifying the state.
778		 *
779		 * Perfmon needs to keep a count of how many processes are trying to
780		 * modify the debug registers for system wide monitoring sessions.
781		 *
782		 * We also include read access here, because they may cause the
783		 * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two
784		 * arrays are also used by perfmon, but we do not use
785		 * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context
786		 * switch code.
787		 */
788		if (pfm_use_debug_registers(child))
789			return -1;
790#endif
791
792		if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
793			child->thread.flags |= IA64_THREAD_DBG_VALID;
794			memset(child->thread.dbr, 0, sizeof(child->thread.dbr));
795			memset(child->thread.ibr, 0, sizeof(child->thread.ibr));
796		}
797
798		ptr += regnum;
799
800		if (write_access)
801			/* don't let the user set kernel-level breakpoints... */
802			*ptr = *data & ~(7UL << 56);
803		else
804			*data = *ptr;
805		return 0;
806	}
807	if (write_access)
808		*ptr = *data;
809	else
810		*data = *ptr;
811	return 0;
812}
813
814static long
815ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
816{
817	struct switch_stack *sw;
818	struct pt_regs *pt;
819	long ret, retval;
820	struct unw_frame_info info;
821	char nat = 0;
822	int i;
823
824	retval = verify_area(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs));
825	if (retval != 0) {
826		return -EIO;
827	}
828
829	pt = ia64_task_regs(child);
830	sw = (struct switch_stack *) (child->thread.ksp + 16);
831	unw_init_from_blocked_task(&info, child);
832	if (unw_unwind_to_user(&info) < 0) {
833		return -EIO;
834	}
835
836	if (((unsigned long) ppr & 0x7) != 0) {
837		dprintk("ptrace:unaligned register address %p\n", ppr);
838		return -EIO;
839	}
840
841	retval = 0;
842
843	/* control regs */
844
845	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
846	retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 0);
847
848	/* app regs */
849
850	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
851	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
852	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
853	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
854	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
855	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
856
857	retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 0);
858	retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 0);
859	retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 0);
860	retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 0);
861	retval |= access_uarea(child, PT_CFM, &ppr->cfm, 0);
862
863	/* gr1-gr3 */
864
865	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long) * 3);
866
867	/* gr4-gr7 */
868
869	for (i = 4; i < 8; i++) {
870		retval |= unw_access_gr(&info, i, &ppr->gr[i], &nat, 0);
871	}
872
873	/* gr8-gr11 */
874
875	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
876
877	/* gr12-gr15 */
878
879	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 4);
880
881	/* gr16-gr31 */
882
883	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
884
885	/* b0 */
886
887	retval |= __put_user(pt->b0, &ppr->br[0]);
888
889	/* b1-b5 */
890
891	for (i = 1; i < 6; i++) {
892		retval |= unw_access_br(&info, i, &ppr->br[i], 0);
893	}
894
895	/* b6-b7 */
896
897	retval |= __put_user(pt->b6, &ppr->br[6]);
898	retval |= __put_user(pt->b7, &ppr->br[7]);
899
900	/* fr2-fr5 */
901
902	for (i = 2; i < 6; i++) {
903		retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 0);
904		retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
905	}
906
907	/* fr6-fr9 */
908
909	retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 4);
910
911	/* fp scratch regs(10-15) */
912
913	retval |= __copy_to_user(&ppr->fr[10], &sw->f10, sizeof(struct ia64_fpreg) * 6);
914
915	/* fr16-fr31 */
916
917	for (i = 16; i < 32; i++) {
918		retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 0);
919		retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
920	}
921
922	/* fph */
923
924	ia64_flush_fph(child);
925	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, sizeof(ppr->fr[32]) * 96);
926
927	/* preds */
928
929	retval |= __put_user(pt->pr, &ppr->pr);
930
931	/* nat bits */
932
933	retval |= access_uarea(child, PT_NAT_BITS, &ppr->nat, 0);
934
935	ret = retval ? -EIO : 0;
936	return ret;
937}
938
939static long
940ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
941{
942	struct switch_stack *sw;
943	struct pt_regs *pt;
944	long ret, retval;
945	struct unw_frame_info info;
946	char nat = 0;
947	int i;
948
949	retval = verify_area(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs));
950	if (retval != 0) {
951		return -EIO;
952	}
953
954	pt = ia64_task_regs(child);
955	sw = (struct switch_stack *) (child->thread.ksp + 16);
956	unw_init_from_blocked_task(&info, child);
957	if (unw_unwind_to_user(&info) < 0) {
958		return -EIO;
959	}
960
961	if (((unsigned long) ppr & 0x7) != 0) {
962		dprintk("ptrace:unaligned register address %p\n", ppr);
963		return -EIO;
964	}
965
966	retval = 0;
967
968	/* control regs */
969
970	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
971	retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 1);
972
973	/* app regs */
974
975	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
976	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
977	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
978	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
979	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
980	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
981
982	retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 1);
983	retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 1);
984	retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 1);
985	retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 1);
986	retval |= access_uarea(child, PT_CFM, &ppr->cfm, 1);
987
988	/* gr1-gr3 */
989
990	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long) * 3);
991
992	/* gr4-gr7 */
993
994	for (i = 4; i < 8; i++) {
995		long ret = unw_get_gr(&info, i, &ppr->gr[i], &nat);
996		if (ret < 0) {
997			return ret;
998		}
999		retval |= unw_access_gr(&info, i, &ppr->gr[i], &nat, 1);
1000	}
1001
1002	/* gr8-gr11 */
1003
1004	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1005
1006	/* gr12-gr15 */
1007
1008	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 4);
1009
1010	/* gr16-gr31 */
1011
1012	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1013
1014	/* b0 */
1015
1016	retval |= __get_user(pt->b0, &ppr->br[0]);
1017
1018	/* b1-b5 */
1019
1020	for (i = 1; i < 6; i++) {
1021		retval |= unw_access_br(&info, i, &ppr->br[i], 1);
1022	}
1023
1024	/* b6-b7 */
1025
1026	retval |= __get_user(pt->b6, &ppr->br[6]);
1027	retval |= __get_user(pt->b7, &ppr->br[7]);
1028
1029	/* fr2-fr5 */
1030
1031	for (i = 2; i < 6; i++) {
1032		retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 1);
1033		retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
1034	}
1035
1036	/* fr6-fr9 */
1037
1038	retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 4);
1039
1040	/* fp scratch regs(10-15) */
1041
1042	retval |= __copy_from_user(&sw->f10, &ppr->fr[10], sizeof(ppr->fr[10]) * 6);
1043
1044	/* fr16-fr31 */
1045
1046	for (i = 16; i < 32; i++) {
1047		retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 1);
1048		retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
1049	}
1050
1051	/* fph */
1052
1053	ia64_sync_fph(child);
1054	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], sizeof(ppr->fr[32]) * 96);
1055
1056	/* preds */
1057
1058	retval |= __get_user(pt->pr, &ppr->pr);
1059
1060	/* nat bits */
1061
1062	retval |= access_uarea(child, PT_NAT_BITS, &ppr->nat, 1);
1063
1064	ret = retval ? -EIO : 0;
1065	return ret;
1066}
1067
1068/*
1069 * Called by kernel/ptrace.c when detaching..
1070 *
1071 * Make sure the single step bit is not set.
1072 */
1073void
1074ptrace_disable (struct task_struct *child)
1075{
1076	struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
1077
1078	/* make sure the single step/take-branch tra bits are not set: */
1079	child_psr->ss = 0;
1080	child_psr->tb = 0;
1081
1082	/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1083	child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1084}
1085
1086asmlinkage long
1087sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
1088	    long arg4, long arg5, long arg6, long arg7, long stack)
1089{
1090	struct pt_regs *pt, *regs = (struct pt_regs *) &stack;
1091	unsigned long urbs_end;
1092	struct task_struct *child;
1093	struct switch_stack *sw;
1094	long ret;
1095
1096	lock_kernel();
1097	ret = -EPERM;
1098	if (request == PTRACE_TRACEME) {
1099		/* are we already being traced? */
1100		if (current->ptrace & PT_PTRACED)
1101			goto out;
1102		current->ptrace |= PT_PTRACED;
1103		ret = 0;
1104		goto out;
1105	}
1106
1107	ret = -ESRCH;
1108	read_lock(&tasklist_lock);
1109	{
1110		child = find_task_by_pid(pid);
1111		if (child)
1112			get_task_struct(child);
1113	}
1114	read_unlock(&tasklist_lock);
1115	if (!child)
1116		goto out;
1117	ret = -EPERM;
1118	if (pid == 1)		/* no messing around with init! */
1119		goto out_tsk;
1120
1121	if (request == PTRACE_ATTACH) {
1122		ret = ptrace_attach(child);
1123		goto out_tsk;
1124	}
1125
1126	ret = ptrace_check_attach(child, request == PTRACE_KILL);
1127	if (ret < 0)
1128		goto out_tsk;
1129
1130	pt = ia64_task_regs(child);
1131	sw = (struct switch_stack *) (child->thread.ksp + 16);
1132
1133	switch (request) {
1134	      case PTRACE_PEEKTEXT:
1135	      case PTRACE_PEEKDATA:		/* read word at location addr */
1136		urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1137
1138		if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
1139			threads_sync_user_rbs(child, urbs_end, 0);
1140
1141		ret = ia64_peek(child, sw, urbs_end, addr, &data);
1142		if (ret == 0) {
1143			ret = data;
1144			regs->r8 = 0;	/* ensure "ret" is not mistaken as an error code */
1145		}
1146		goto out_tsk;
1147
1148	      case PTRACE_POKETEXT:
1149	      case PTRACE_POKEDATA:		/* write the word at location addr */
1150		urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1151		if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
1152			threads_sync_user_rbs(child, urbs_end, 1);
1153
1154		ret = ia64_poke(child, sw, urbs_end, addr, data);
1155		goto out_tsk;
1156
1157	      case PTRACE_PEEKUSR:		/* read the word at addr in the USER area */
1158		if (access_uarea(child, addr, &data, 0) < 0) {
1159			ret = -EIO;
1160			goto out_tsk;
1161		}
1162		ret = data;
1163		regs->r8 = 0;	/* ensure "ret" is not mistaken as an error code */
1164		goto out_tsk;
1165
1166	      case PTRACE_POKEUSR:	      /* write the word at addr in the USER area */
1167		if (access_uarea(child, addr, &data, 1) < 0) {
1168			ret = -EIO;
1169			goto out_tsk;
1170		}
1171		ret = 0;
1172		goto out_tsk;
1173
1174	      case PTRACE_GETSIGINFO:
1175		ret = -EIO;
1176		if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t)) || !child->thread.siginfo)
1177			goto out_tsk;
1178		ret = copy_siginfo_to_user((siginfo_t *) data, child->thread.siginfo);
1179		goto out_tsk;
1180
1181	      case PTRACE_SETSIGINFO:
1182		ret = -EIO;
1183		if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t))
1184		    || child->thread.siginfo == 0)
1185			goto out_tsk;
1186		ret = copy_siginfo_from_user(child->thread.siginfo, (siginfo_t *) data);
1187		goto out_tsk;
1188
1189	      case PTRACE_SYSCALL:	/* continue and stop at next (return from) syscall */
1190	      case PTRACE_CONT:		/* restart after signal. */
1191		ret = -EIO;
1192		if (data > _NSIG)
1193			goto out_tsk;
1194		if (request == PTRACE_SYSCALL)
1195			child->ptrace |= PT_TRACESYS;
1196		else
1197			child->ptrace &= ~PT_TRACESYS;
1198		child->exit_code = data;
1199
1200		/* make sure the single step/taken-branch trap bits are not set: */
1201		ia64_psr(pt)->ss = 0;
1202		ia64_psr(pt)->tb = 0;
1203
1204		/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1205		child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1206
1207		wake_up_process(child);
1208		ret = 0;
1209		goto out_tsk;
1210
1211	      case PTRACE_KILL:
1212		/*
1213		 * Make the child exit.  Best I can do is send it a
1214		 * sigkill.  Perhaps it should be put in the status
1215		 * that it wants to exit.
1216		 */
1217		if (child->state == TASK_ZOMBIE)		/* already dead */
1218			goto out_tsk;
1219		child->exit_code = SIGKILL;
1220
1221		/* make sure the single step/take-branch tra bits are not set: */
1222		ia64_psr(pt)->ss = 0;
1223		ia64_psr(pt)->tb = 0;
1224
1225		/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1226		child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1227
1228		wake_up_process(child);
1229		ret = 0;
1230		goto out_tsk;
1231
1232	      case PTRACE_SINGLESTEP:		/* let child execute for one instruction */
1233	      case PTRACE_SINGLEBLOCK:
1234		ret = -EIO;
1235		if (data > _NSIG)
1236			goto out_tsk;
1237
1238		child->ptrace &= ~PT_TRACESYS;
1239		if (request == PTRACE_SINGLESTEP) {
1240			ia64_psr(pt)->ss = 1;
1241		} else {
1242			ia64_psr(pt)->tb = 1;
1243		}
1244		child->exit_code = data;
1245
1246		/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1247		child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1248
1249		/* give it a chance to run. */
1250		wake_up_process(child);
1251		ret = 0;
1252		goto out_tsk;
1253
1254	      case PTRACE_DETACH:		/* detach a process that was attached. */
1255		ret = ptrace_detach(child, data);
1256		goto out_tsk;
1257
1258	      case PTRACE_GETREGS:
1259		ret = ptrace_getregs(child, (struct pt_all_user_regs*) data);
1260		goto out_tsk;
1261
1262	      case PTRACE_SETREGS:
1263		ret = ptrace_setregs(child, (struct pt_all_user_regs*) data);
1264		goto out_tsk;
1265
1266	      default:
1267		ret = -EIO;
1268		goto out_tsk;
1269	}
1270  out_tsk:
1271	free_task_struct(child);
1272  out:
1273	unlock_kernel();
1274	return ret;
1275}
1276
1277void
1278syscall_trace (void)
1279{
1280	if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) != (PT_PTRACED|PT_TRACESYS))
1281		return;
1282	current->exit_code = SIGTRAP;
1283	set_current_state(TASK_STOPPED);
1284	notify_parent(current, SIGCHLD);
1285	schedule();
1286	/*
1287	 * This isn't the same as continuing with a signal, but it
1288	 * will do for normal use.  strace only continues with a
1289	 * signal if the stopping signal is not SIGTRAP.  -brl
1290	 */
1291	if (current->exit_code) {
1292		send_sig(current->exit_code, current, 1);
1293		current->exit_code = 0;
1294	}
1295}
1296