1/* 2 * Kernel support for the ptrace() and syscall tracing interfaces. 3 * 4 * Copyright (C) 1999-2005 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Copyright (C) 2006 Intel Co 7 * 2006-08-12 - IA64 Native Utrace implementation support added by 8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * 10 * Derived from the x86 and Alpha versions. 11 */ 12#include <linux/kernel.h> 13#include <linux/sched.h> 14#include <linux/mm.h> 15#include <linux/errno.h> 16#include <linux/ptrace.h> 17#include <linux/user.h> 18#include <linux/security.h> 19#include <linux/audit.h> 20#include <linux/signal.h> 21#include <linux/regset.h> 22#include <linux/elf.h> 23#include <linux/tracehook.h> 24 25#include <asm/pgtable.h> 26#include <asm/processor.h> 27#include <asm/ptrace_offsets.h> 28#include <asm/rse.h> 29#include <asm/system.h> 30#include <asm/uaccess.h> 31#include <asm/unwind.h> 32#ifdef CONFIG_PERFMON 33#include <asm/perfmon.h> 34#endif 35 36#include "entry.h" 37 38/* 39 * Bits in the PSR that we allow ptrace() to change: 40 * be, up, ac, mfl, mfh (the user mask; five bits total) 41 * db (debug breakpoint fault; one bit) 42 * id (instruction debug fault disable; one bit) 43 * dd (data debug fault disable; one bit) 44 * ri (restart instruction; two bits) 45 * is (instruction set; one bit) 46 */ 47#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ 48 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) 49 50#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ 51#define PFM_MASK MASK(38) 52 53#define PTRACE_DEBUG 0 54 55#if PTRACE_DEBUG 56# define dprintk(format...) printk(format) 57# define inline 58#else 59# define dprintk(format...) 60#endif 61 62/* Return TRUE if PT was created due to kernel-entry via a system-call. */ 63 64static inline int 65in_syscall (struct pt_regs *pt) 66{ 67 return (long) pt->cr_ifs >= 0; 68} 69 70/* 71 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT 72 * bitset where bit i is set iff the NaT bit of register i is set. 73 */ 74unsigned long 75ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) 76{ 77# define GET_BITS(first, last, unat) \ 78 ({ \ 79 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 80 unsigned long nbits = (last - first + 1); \ 81 unsigned long mask = MASK(nbits) << first; \ 82 unsigned long dist; \ 83 if (bit < first) \ 84 dist = 64 + bit - first; \ 85 else \ 86 dist = bit - first; \ 87 ia64_rotr(unat, dist) & mask; \ 88 }) 89 unsigned long val; 90 91 /* 92 * Registers that are stored consecutively in struct pt_regs 93 * can be handled in parallel. If the register order in 94 * struct_pt_regs changes, this code MUST be updated. 95 */ 96 val = GET_BITS( 1, 1, scratch_unat); 97 val |= GET_BITS( 2, 3, scratch_unat); 98 val |= GET_BITS(12, 13, scratch_unat); 99 val |= GET_BITS(14, 14, scratch_unat); 100 val |= GET_BITS(15, 15, scratch_unat); 101 val |= GET_BITS( 8, 11, scratch_unat); 102 val |= GET_BITS(16, 31, scratch_unat); 103 return val; 104 105# undef GET_BITS 106} 107 108/* 109 * Set the NaT bits for the scratch registers according to NAT and 110 * return the resulting unat (assuming the scratch registers are 111 * stored in PT). 112 */ 113unsigned long 114ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) 115{ 116# define PUT_BITS(first, last, nat) \ 117 ({ \ 118 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 119 unsigned long nbits = (last - first + 1); \ 120 unsigned long mask = MASK(nbits) << first; \ 121 long dist; \ 122 if (bit < first) \ 123 dist = 64 + bit - first; \ 124 else \ 125 dist = bit - first; \ 126 ia64_rotl(nat & mask, dist); \ 127 }) 128 unsigned long scratch_unat; 129 130 /* 131 * Registers that are stored consecutively in struct pt_regs 132 * can be handled in parallel. If the register order in 133 * struct_pt_regs changes, this code MUST be updated. 134 */ 135 scratch_unat = PUT_BITS( 1, 1, nat); 136 scratch_unat |= PUT_BITS( 2, 3, nat); 137 scratch_unat |= PUT_BITS(12, 13, nat); 138 scratch_unat |= PUT_BITS(14, 14, nat); 139 scratch_unat |= PUT_BITS(15, 15, nat); 140 scratch_unat |= PUT_BITS( 8, 11, nat); 141 scratch_unat |= PUT_BITS(16, 31, nat); 142 143 return scratch_unat; 144 145# undef PUT_BITS 146} 147 148#define IA64_MLX_TEMPLATE 0x2 149#define IA64_MOVL_OPCODE 6 150 151void 152ia64_increment_ip (struct pt_regs *regs) 153{ 154 unsigned long w0, ri = ia64_psr(regs)->ri + 1; 155 156 if (ri > 2) { 157 ri = 0; 158 regs->cr_iip += 16; 159 } else if (ri == 2) { 160 get_user(w0, (char __user *) regs->cr_iip + 0); 161 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 162 /* 163 * rfi'ing to slot 2 of an MLX bundle causes 164 * an illegal operation fault. We don't want 165 * that to happen... 166 */ 167 ri = 0; 168 regs->cr_iip += 16; 169 } 170 } 171 ia64_psr(regs)->ri = ri; 172} 173 174void 175ia64_decrement_ip (struct pt_regs *regs) 176{ 177 unsigned long w0, ri = ia64_psr(regs)->ri - 1; 178 179 if (ia64_psr(regs)->ri == 0) { 180 regs->cr_iip -= 16; 181 ri = 2; 182 get_user(w0, (char __user *) regs->cr_iip + 0); 183 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 184 /* 185 * rfi'ing to slot 2 of an MLX bundle causes 186 * an illegal operation fault. We don't want 187 * that to happen... 188 */ 189 ri = 1; 190 } 191 } 192 ia64_psr(regs)->ri = ri; 193} 194 195/* 196 * This routine is used to read an rnat bits that are stored on the 197 * kernel backing store. Since, in general, the alignment of the user 198 * and kernel are different, this is not completely trivial. In 199 * essence, we need to construct the user RNAT based on up to two 200 * kernel RNAT values and/or the RNAT value saved in the child's 201 * pt_regs. 202 * 203 * user rbs 204 * 205 * +--------+ <-- lowest address 206 * | slot62 | 207 * +--------+ 208 * | rnat | 0x....1f8 209 * +--------+ 210 * | slot00 | \ 211 * +--------+ | 212 * | slot01 | > child_regs->ar_rnat 213 * +--------+ | 214 * | slot02 | / kernel rbs 215 * +--------+ +--------+ 216 * <- child_regs->ar_bspstore | slot61 | <-- krbs 217 * +- - - - + +--------+ 218 * | slot62 | 219 * +- - - - + +--------+ 220 * | rnat | 221 * +- - - - + +--------+ 222 * vrnat | slot00 | 223 * +- - - - + +--------+ 224 * = = 225 * +--------+ 226 * | slot00 | \ 227 * +--------+ | 228 * | slot01 | > child_stack->ar_rnat 229 * +--------+ | 230 * | slot02 | / 231 * +--------+ 232 * <--- child_stack->ar_bspstore 233 * 234 * The way to think of this code is as follows: bit 0 in the user rnat 235 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat 236 * value. The kernel rnat value holding this bit is stored in 237 * variable rnat0. rnat1 is loaded with the kernel rnat value that 238 * form the upper bits of the user rnat value. 239 * 240 * Boundary cases: 241 * 242 * o when reading the rnat "below" the first rnat slot on the kernel 243 * backing store, rnat0/rnat1 are set to 0 and the low order bits are 244 * merged in from pt->ar_rnat. 245 * 246 * o when reading the rnat "above" the last rnat slot on the kernel 247 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. 248 */ 249static unsigned long 250get_rnat (struct task_struct *task, struct switch_stack *sw, 251 unsigned long *krbs, unsigned long *urnat_addr, 252 unsigned long *urbs_end) 253{ 254 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; 255 unsigned long umask = 0, mask, m; 256 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 257 long num_regs, nbits; 258 struct pt_regs *pt; 259 260 pt = task_pt_regs(task); 261 kbsp = (unsigned long *) sw->ar_bspstore; 262 ubspstore = (unsigned long *) pt->ar_bspstore; 263 264 if (urbs_end < urnat_addr) 265 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); 266 else 267 nbits = 63; 268 mask = MASK(nbits); 269 /* 270 * First, figure out which bit number slot 0 in user-land maps 271 * to in the kernel rnat. Do this by figuring out how many 272 * register slots we're beyond the user's backingstore and 273 * then computing the equivalent address in kernel space. 274 */ 275 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 276 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 277 shift = ia64_rse_slot_num(slot0_kaddr); 278 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 279 rnat0_kaddr = rnat1_kaddr - 64; 280 281 if (ubspstore + 63 > urnat_addr) { 282 /* some bits need to be merged in from pt->ar_rnat */ 283 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 284 urnat = (pt->ar_rnat & umask); 285 mask &= ~umask; 286 if (!mask) 287 return urnat; 288 } 289 290 m = mask << shift; 291 if (rnat0_kaddr >= kbsp) 292 rnat0 = sw->ar_rnat; 293 else if (rnat0_kaddr > krbs) 294 rnat0 = *rnat0_kaddr; 295 urnat |= (rnat0 & m) >> shift; 296 297 m = mask >> (63 - shift); 298 if (rnat1_kaddr >= kbsp) 299 rnat1 = sw->ar_rnat; 300 else if (rnat1_kaddr > krbs) 301 rnat1 = *rnat1_kaddr; 302 urnat |= (rnat1 & m) << (63 - shift); 303 return urnat; 304} 305 306/* 307 * The reverse of get_rnat. 308 */ 309static void 310put_rnat (struct task_struct *task, struct switch_stack *sw, 311 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, 312 unsigned long *urbs_end) 313{ 314 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; 315 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 316 long num_regs, nbits; 317 struct pt_regs *pt; 318 unsigned long cfm, *urbs_kargs; 319 320 pt = task_pt_regs(task); 321 kbsp = (unsigned long *) sw->ar_bspstore; 322 ubspstore = (unsigned long *) pt->ar_bspstore; 323 324 urbs_kargs = urbs_end; 325 if (in_syscall(pt)) { 326 /* 327 * If entered via syscall, don't allow user to set rnat bits 328 * for syscall args. 329 */ 330 cfm = pt->cr_ifs; 331 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); 332 } 333 334 if (urbs_kargs >= urnat_addr) 335 nbits = 63; 336 else { 337 if ((urnat_addr - 63) >= urbs_kargs) 338 return; 339 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); 340 } 341 mask = MASK(nbits); 342 343 /* 344 * First, figure out which bit number slot 0 in user-land maps 345 * to in the kernel rnat. Do this by figuring out how many 346 * register slots we're beyond the user's backingstore and 347 * then computing the equivalent address in kernel space. 348 */ 349 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 350 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 351 shift = ia64_rse_slot_num(slot0_kaddr); 352 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 353 rnat0_kaddr = rnat1_kaddr - 64; 354 355 if (ubspstore + 63 > urnat_addr) { 356 /* some bits need to be place in pt->ar_rnat: */ 357 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 358 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); 359 mask &= ~umask; 360 if (!mask) 361 return; 362 } 363 /* 364 * Note: Section 11.1 of the EAS guarantees that bit 63 of an 365 * rnat slot is ignored. so we don't have to clear it here. 366 */ 367 rnat0 = (urnat << shift); 368 m = mask << shift; 369 if (rnat0_kaddr >= kbsp) 370 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); 371 else if (rnat0_kaddr > krbs) 372 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); 373 374 rnat1 = (urnat >> (63 - shift)); 375 m = mask >> (63 - shift); 376 if (rnat1_kaddr >= kbsp) 377 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); 378 else if (rnat1_kaddr > krbs) 379 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); 380} 381 382static inline int 383on_kernel_rbs (unsigned long addr, unsigned long bspstore, 384 unsigned long urbs_end) 385{ 386 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) 387 urbs_end); 388 return (addr >= bspstore && addr <= (unsigned long) rnat_addr); 389} 390 391/* 392 * Read a word from the user-level backing store of task CHILD. ADDR 393 * is the user-level address to read the word from, VAL a pointer to 394 * the return value, and USER_BSP gives the end of the user-level 395 * backing store (i.e., it's the address that would be in ar.bsp after 396 * the user executed a "cover" instruction). 397 * 398 * This routine takes care of accessing the kernel register backing 399 * store for those registers that got spilled there. It also takes 400 * care of calculating the appropriate RNaT collection words. 401 */ 402long 403ia64_peek (struct task_struct *child, struct switch_stack *child_stack, 404 unsigned long user_rbs_end, unsigned long addr, long *val) 405{ 406 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; 407 struct pt_regs *child_regs; 408 size_t copied; 409 long ret; 410 411 urbs_end = (long *) user_rbs_end; 412 laddr = (unsigned long *) addr; 413 child_regs = task_pt_regs(child); 414 bspstore = (unsigned long *) child_regs->ar_bspstore; 415 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 416 if (on_kernel_rbs(addr, (unsigned long) bspstore, 417 (unsigned long) urbs_end)) 418 { 419 /* 420 * Attempt to read the RBS in an area that's actually 421 * on the kernel RBS => read the corresponding bits in 422 * the kernel RBS. 423 */ 424 rnat_addr = ia64_rse_rnat_addr(laddr); 425 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); 426 427 if (laddr == rnat_addr) { 428 /* return NaT collection word itself */ 429 *val = ret; 430 return 0; 431 } 432 433 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { 434 /* 435 * It is implementation dependent whether the 436 * data portion of a NaT value gets saved on a 437 * st8.spill or RSE spill (e.g., see EAS 2.6, 438 * 4.4.4.6 Register Spill and Fill). To get 439 * consistent behavior across all possible 440 * IA-64 implementations, we return zero in 441 * this case. 442 */ 443 *val = 0; 444 return 0; 445 } 446 447 if (laddr < urbs_end) { 448 /* 449 * The desired word is on the kernel RBS and 450 * is not a NaT. 451 */ 452 regnum = ia64_rse_num_regs(bspstore, laddr); 453 *val = *ia64_rse_skip_regs(krbs, regnum); 454 return 0; 455 } 456 } 457 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); 458 if (copied != sizeof(ret)) 459 return -EIO; 460 *val = ret; 461 return 0; 462} 463 464long 465ia64_poke (struct task_struct *child, struct switch_stack *child_stack, 466 unsigned long user_rbs_end, unsigned long addr, long val) 467{ 468 unsigned long *bspstore, *krbs, regnum, *laddr; 469 unsigned long *urbs_end = (long *) user_rbs_end; 470 struct pt_regs *child_regs; 471 472 laddr = (unsigned long *) addr; 473 child_regs = task_pt_regs(child); 474 bspstore = (unsigned long *) child_regs->ar_bspstore; 475 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 476 if (on_kernel_rbs(addr, (unsigned long) bspstore, 477 (unsigned long) urbs_end)) 478 { 479 /* 480 * Attempt to write the RBS in an area that's actually 481 * on the kernel RBS => write the corresponding bits 482 * in the kernel RBS. 483 */ 484 if (ia64_rse_is_rnat_slot(laddr)) 485 put_rnat(child, child_stack, krbs, laddr, val, 486 urbs_end); 487 else { 488 if (laddr < urbs_end) { 489 regnum = ia64_rse_num_regs(bspstore, laddr); 490 *ia64_rse_skip_regs(krbs, regnum) = val; 491 } 492 } 493 } else if (access_process_vm(child, addr, &val, sizeof(val), 1) 494 != sizeof(val)) 495 return -EIO; 496 return 0; 497} 498 499/* 500 * Calculate the address of the end of the user-level register backing 501 * store. This is the address that would have been stored in ar.bsp 502 * if the user had executed a "cover" instruction right before 503 * entering the kernel. If CFMP is not NULL, it is used to return the 504 * "current frame mask" that was active at the time the kernel was 505 * entered. 506 */ 507unsigned long 508ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, 509 unsigned long *cfmp) 510{ 511 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; 512 long ndirty; 513 514 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 515 bspstore = (unsigned long *) pt->ar_bspstore; 516 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 517 518 if (in_syscall(pt)) 519 ndirty += (cfm & 0x7f); 520 else 521 cfm &= ~(1UL << 63); /* clear valid bit */ 522 523 if (cfmp) 524 *cfmp = cfm; 525 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); 526} 527 528/* 529 * Synchronize (i.e, write) the RSE backing store living in kernel 530 * space to the VM of the CHILD task. SW and PT are the pointers to 531 * the switch_stack and pt_regs structures, respectively. 532 * USER_RBS_END is the user-level address at which the backing store 533 * ends. 534 */ 535long 536ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, 537 unsigned long user_rbs_start, unsigned long user_rbs_end) 538{ 539 unsigned long addr, val; 540 long ret; 541 542 /* now copy word for word from kernel rbs to user rbs: */ 543 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 544 ret = ia64_peek(child, sw, user_rbs_end, addr, &val); 545 if (ret < 0) 546 return ret; 547 if (access_process_vm(child, addr, &val, sizeof(val), 1) 548 != sizeof(val)) 549 return -EIO; 550 } 551 return 0; 552} 553 554static long 555ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, 556 unsigned long user_rbs_start, unsigned long user_rbs_end) 557{ 558 unsigned long addr, val; 559 long ret; 560 561 /* now copy word for word from user rbs to kernel rbs: */ 562 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 563 if (access_process_vm(child, addr, &val, sizeof(val), 0) 564 != sizeof(val)) 565 return -EIO; 566 567 ret = ia64_poke(child, sw, user_rbs_end, addr, val); 568 if (ret < 0) 569 return ret; 570 } 571 return 0; 572} 573 574typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, 575 unsigned long, unsigned long); 576 577static void do_sync_rbs(struct unw_frame_info *info, void *arg) 578{ 579 struct pt_regs *pt; 580 unsigned long urbs_end; 581 syncfunc_t fn = arg; 582 583 if (unw_unwind_to_user(info) < 0) 584 return; 585 pt = task_pt_regs(info->task); 586 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); 587 588 fn(info->task, info->sw, pt->ar_bspstore, urbs_end); 589} 590 591void ia64_ptrace_stop(void) 592{ 593 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 594 return; 595 set_notify_resume(current); 596 unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 597} 598 599/* 600 * This is called to read back the register backing store. 601 */ 602void ia64_sync_krbs(void) 603{ 604 clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 605 606 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 607} 608 609/* 610 * After PTRACE_ATTACH, a thread's register backing store area in user 611 * space is assumed to contain correct data whenever the thread is 612 * stopped. arch_ptrace_stop takes care of this on tracing stops. 613 * But if the child was already stopped for job control when we attach 614 * to it, then it might not ever get into ptrace_stop by the time we 615 * want to examine the user memory containing the RBS. 616 */ 617void 618ptrace_attach_sync_user_rbs (struct task_struct *child) 619{ 620 int stopped = 0; 621 struct unw_frame_info info; 622 623 /* 624 * If the child is in TASK_STOPPED, we need to change that to 625 * TASK_TRACED momentarily while we operate on it. This ensures 626 * that the child won't be woken up and return to user mode while 627 * we are doing the sync. (It can only be woken up for SIGKILL.) 628 */ 629 630 read_lock(&tasklist_lock); 631 if (child->sighand) { 632 spin_lock_irq(&child->sighand->siglock); 633 if (child->state == TASK_STOPPED && 634 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 635 set_notify_resume(child); 636 637 child->state = TASK_TRACED; 638 stopped = 1; 639 } 640 spin_unlock_irq(&child->sighand->siglock); 641 } 642 read_unlock(&tasklist_lock); 643 644 if (!stopped) 645 return; 646 647 unw_init_from_blocked_task(&info, child); 648 do_sync_rbs(&info, ia64_sync_user_rbs); 649 650 /* 651 * Now move the child back into TASK_STOPPED if it should be in a 652 * job control stop, so that SIGCONT can be used to wake it up. 653 */ 654 read_lock(&tasklist_lock); 655 if (child->sighand) { 656 spin_lock_irq(&child->sighand->siglock); 657 if (child->state == TASK_TRACED && 658 (child->signal->flags & SIGNAL_STOP_STOPPED)) { 659 child->state = TASK_STOPPED; 660 } 661 spin_unlock_irq(&child->sighand->siglock); 662 } 663 read_unlock(&tasklist_lock); 664} 665 666static inline int 667thread_matches (struct task_struct *thread, unsigned long addr) 668{ 669 unsigned long thread_rbs_end; 670 struct pt_regs *thread_regs; 671 672 if (ptrace_check_attach(thread, 0) < 0) 673 /* 674 * If the thread is not in an attachable state, we'll 675 * ignore it. The net effect is that if ADDR happens 676 * to overlap with the portion of the thread's 677 * register backing store that is currently residing 678 * on the thread's kernel stack, then ptrace() may end 679 * up accessing a stale value. But if the thread 680 * isn't stopped, that's a problem anyhow, so we're 681 * doing as well as we can... 682 */ 683 return 0; 684 685 thread_regs = task_pt_regs(thread); 686 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 687 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 688 return 0; 689 690 return 1; /* looks like we've got a winner */ 691} 692 693/* 694 * Write f32-f127 back to task->thread.fph if it has been modified. 695 */ 696inline void 697ia64_flush_fph (struct task_struct *task) 698{ 699 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 700 701 /* 702 * Prevent migrating this task while 703 * we're fiddling with the FPU state 704 */ 705 preempt_disable(); 706 if (ia64_is_local_fpu_owner(task) && psr->mfh) { 707 psr->mfh = 0; 708 task->thread.flags |= IA64_THREAD_FPH_VALID; 709 ia64_save_fpu(&task->thread.fph[0]); 710 } 711 preempt_enable(); 712} 713 714/* 715 * Sync the fph state of the task so that it can be manipulated 716 * through thread.fph. If necessary, f32-f127 are written back to 717 * thread.fph or, if the fph state hasn't been used before, thread.fph 718 * is cleared to zeroes. Also, access to f32-f127 is disabled to 719 * ensure that the task picks up the state from thread.fph when it 720 * executes again. 721 */ 722void 723ia64_sync_fph (struct task_struct *task) 724{ 725 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 726 727 ia64_flush_fph(task); 728 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 729 task->thread.flags |= IA64_THREAD_FPH_VALID; 730 memset(&task->thread.fph, 0, sizeof(task->thread.fph)); 731 } 732 ia64_drop_fpu(task); 733 psr->dfh = 1; 734} 735 736/* 737 * Change the machine-state of CHILD such that it will return via the normal 738 * kernel exit-path, rather than the syscall-exit path. 739 */ 740static void 741convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, 742 unsigned long cfm) 743{ 744 struct unw_frame_info info, prev_info; 745 unsigned long ip, sp, pr; 746 747 unw_init_from_blocked_task(&info, child); 748 while (1) { 749 prev_info = info; 750 if (unw_unwind(&info) < 0) 751 return; 752 753 unw_get_sp(&info, &sp); 754 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 755 < IA64_PT_REGS_SIZE) { 756 dprintk("ptrace.%s: ran off the top of the kernel " 757 "stack\n", __func__); 758 return; 759 } 760 if (unw_get_pr (&prev_info, &pr) < 0) { 761 unw_get_rp(&prev_info, &ip); 762 dprintk("ptrace.%s: failed to read " 763 "predicate register (ip=0x%lx)\n", 764 __func__, ip); 765 return; 766 } 767 if (unw_is_intr_frame(&info) 768 && (pr & (1UL << PRED_USER_STACK))) 769 break; 770 } 771 772 /* 773 * Note: at the time of this call, the target task is blocked 774 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL 775 * (aka, "pLvSys") we redirect execution from 776 * .work_pending_syscall_end to .work_processed_kernel. 777 */ 778 unw_get_pr(&prev_info, &pr); 779 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); 780 pr |= (1UL << PRED_NON_SYSCALL); 781 unw_set_pr(&prev_info, pr); 782 783 pt->cr_ifs = (1UL << 63) | cfm; 784 /* 785 * Clear the memory that is NOT written on syscall-entry to 786 * ensure we do not leak kernel-state to user when execution 787 * resumes. 788 */ 789 pt->r2 = 0; 790 pt->r3 = 0; 791 pt->r14 = 0; 792 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ 793 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ 794 pt->b7 = 0; 795 pt->ar_ccv = 0; 796 pt->ar_csd = 0; 797 pt->ar_ssd = 0; 798} 799 800static int 801access_nat_bits (struct task_struct *child, struct pt_regs *pt, 802 struct unw_frame_info *info, 803 unsigned long *data, int write_access) 804{ 805 unsigned long regnum, nat_bits, scratch_unat, dummy = 0; 806 char nat = 0; 807 808 if (write_access) { 809 nat_bits = *data; 810 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); 811 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { 812 dprintk("ptrace: failed to set ar.unat\n"); 813 return -1; 814 } 815 for (regnum = 4; regnum <= 7; ++regnum) { 816 unw_get_gr(info, regnum, &dummy, &nat); 817 unw_set_gr(info, regnum, dummy, 818 (nat_bits >> regnum) & 1); 819 } 820 } else { 821 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { 822 dprintk("ptrace: failed to read ar.unat\n"); 823 return -1; 824 } 825 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); 826 for (regnum = 4; regnum <= 7; ++regnum) { 827 unw_get_gr(info, regnum, &dummy, &nat); 828 nat_bits |= (nat != 0) << regnum; 829 } 830 *data = nat_bits; 831 } 832 return 0; 833} 834 835static int 836access_uarea (struct task_struct *child, unsigned long addr, 837 unsigned long *data, int write_access); 838 839static long 840ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 841{ 842 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; 843 struct unw_frame_info info; 844 struct ia64_fpreg fpval; 845 struct switch_stack *sw; 846 struct pt_regs *pt; 847 long ret, retval = 0; 848 char nat = 0; 849 int i; 850 851 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 852 return -EIO; 853 854 pt = task_pt_regs(child); 855 sw = (struct switch_stack *) (child->thread.ksp + 16); 856 unw_init_from_blocked_task(&info, child); 857 if (unw_unwind_to_user(&info) < 0) { 858 return -EIO; 859 } 860 861 if (((unsigned long) ppr & 0x7) != 0) { 862 dprintk("ptrace:unaligned register address %p\n", ppr); 863 return -EIO; 864 } 865 866 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 867 || access_uarea(child, PT_AR_EC, &ec, 0) < 0 868 || access_uarea(child, PT_AR_LC, &lc, 0) < 0 869 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 870 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 871 || access_uarea(child, PT_CFM, &cfm, 0) 872 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) 873 return -EIO; 874 875 /* control regs */ 876 877 retval |= __put_user(pt->cr_iip, &ppr->cr_iip); 878 retval |= __put_user(psr, &ppr->cr_ipsr); 879 880 /* app regs */ 881 882 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 883 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); 884 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 885 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 886 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 887 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 888 889 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); 890 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); 891 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); 892 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); 893 retval |= __put_user(cfm, &ppr->cfm); 894 895 /* gr1-gr3 */ 896 897 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); 898 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); 899 900 /* gr4-gr7 */ 901 902 for (i = 4; i < 8; i++) { 903 if (unw_access_gr(&info, i, &val, &nat, 0) < 0) 904 return -EIO; 905 retval |= __put_user(val, &ppr->gr[i]); 906 } 907 908 /* gr8-gr11 */ 909 910 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); 911 912 /* gr12-gr15 */ 913 914 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); 915 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); 916 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); 917 918 /* gr16-gr31 */ 919 920 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); 921 922 /* b0 */ 923 924 retval |= __put_user(pt->b0, &ppr->br[0]); 925 926 /* b1-b5 */ 927 928 for (i = 1; i < 6; i++) { 929 if (unw_access_br(&info, i, &val, 0) < 0) 930 return -EIO; 931 __put_user(val, &ppr->br[i]); 932 } 933 934 /* b6-b7 */ 935 936 retval |= __put_user(pt->b6, &ppr->br[6]); 937 retval |= __put_user(pt->b7, &ppr->br[7]); 938 939 /* fr2-fr5 */ 940 941 for (i = 2; i < 6; i++) { 942 if (unw_get_fr(&info, i, &fpval) < 0) 943 return -EIO; 944 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 945 } 946 947 /* fr6-fr11 */ 948 949 retval |= __copy_to_user(&ppr->fr[6], &pt->f6, 950 sizeof(struct ia64_fpreg) * 6); 951 952 /* fp scratch regs(12-15) */ 953 954 retval |= __copy_to_user(&ppr->fr[12], &sw->f12, 955 sizeof(struct ia64_fpreg) * 4); 956 957 /* fr16-fr31 */ 958 959 for (i = 16; i < 32; i++) { 960 if (unw_get_fr(&info, i, &fpval) < 0) 961 return -EIO; 962 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 963 } 964 965 /* fph */ 966 967 ia64_flush_fph(child); 968 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, 969 sizeof(ppr->fr[32]) * 96); 970 971 /* preds */ 972 973 retval |= __put_user(pt->pr, &ppr->pr); 974 975 /* nat bits */ 976 977 retval |= __put_user(nat_bits, &ppr->nat); 978 979 ret = retval ? -EIO : 0; 980 return ret; 981} 982 983static long 984ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 985{ 986 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; 987 struct unw_frame_info info; 988 struct switch_stack *sw; 989 struct ia64_fpreg fpval; 990 struct pt_regs *pt; 991 long ret, retval = 0; 992 int i; 993 994 memset(&fpval, 0, sizeof(fpval)); 995 996 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 997 return -EIO; 998 999 pt = task_pt_regs(child); 1000 sw = (struct switch_stack *) (child->thread.ksp + 16); 1001 unw_init_from_blocked_task(&info, child); 1002 if (unw_unwind_to_user(&info) < 0) { 1003 return -EIO; 1004 } 1005 1006 if (((unsigned long) ppr & 0x7) != 0) { 1007 dprintk("ptrace:unaligned register address %p\n", ppr); 1008 return -EIO; 1009 } 1010 1011 /* control regs */ 1012 1013 retval |= __get_user(pt->cr_iip, &ppr->cr_iip); 1014 retval |= __get_user(psr, &ppr->cr_ipsr); 1015 1016 /* app regs */ 1017 1018 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 1019 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); 1020 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 1021 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 1022 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 1023 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 1024 1025 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); 1026 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); 1027 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); 1028 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); 1029 retval |= __get_user(cfm, &ppr->cfm); 1030 1031 /* gr1-gr3 */ 1032 1033 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); 1034 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); 1035 1036 /* gr4-gr7 */ 1037 1038 for (i = 4; i < 8; i++) { 1039 retval |= __get_user(val, &ppr->gr[i]); 1040 /* NaT bit will be set via PT_NAT_BITS: */ 1041 if (unw_set_gr(&info, i, val, 0) < 0) 1042 return -EIO; 1043 } 1044 1045 /* gr8-gr11 */ 1046 1047 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); 1048 1049 /* gr12-gr15 */ 1050 1051 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); 1052 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); 1053 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); 1054 1055 /* gr16-gr31 */ 1056 1057 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); 1058 1059 /* b0 */ 1060 1061 retval |= __get_user(pt->b0, &ppr->br[0]); 1062 1063 /* b1-b5 */ 1064 1065 for (i = 1; i < 6; i++) { 1066 retval |= __get_user(val, &ppr->br[i]); 1067 unw_set_br(&info, i, val); 1068 } 1069 1070 /* b6-b7 */ 1071 1072 retval |= __get_user(pt->b6, &ppr->br[6]); 1073 retval |= __get_user(pt->b7, &ppr->br[7]); 1074 1075 /* fr2-fr5 */ 1076 1077 for (i = 2; i < 6; i++) { 1078 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); 1079 if (unw_set_fr(&info, i, fpval) < 0) 1080 return -EIO; 1081 } 1082 1083 /* fr6-fr11 */ 1084 1085 retval |= __copy_from_user(&pt->f6, &ppr->fr[6], 1086 sizeof(ppr->fr[6]) * 6); 1087 1088 /* fp scratch regs(12-15) */ 1089 1090 retval |= __copy_from_user(&sw->f12, &ppr->fr[12], 1091 sizeof(ppr->fr[12]) * 4); 1092 1093 /* fr16-fr31 */ 1094 1095 for (i = 16; i < 32; i++) { 1096 retval |= __copy_from_user(&fpval, &ppr->fr[i], 1097 sizeof(fpval)); 1098 if (unw_set_fr(&info, i, fpval) < 0) 1099 return -EIO; 1100 } 1101 1102 /* fph */ 1103 1104 ia64_sync_fph(child); 1105 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], 1106 sizeof(ppr->fr[32]) * 96); 1107 1108 /* preds */ 1109 1110 retval |= __get_user(pt->pr, &ppr->pr); 1111 1112 /* nat bits */ 1113 1114 retval |= __get_user(nat_bits, &ppr->nat); 1115 1116 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); 1117 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); 1118 retval |= access_uarea(child, PT_AR_EC, &ec, 1); 1119 retval |= access_uarea(child, PT_AR_LC, &lc, 1); 1120 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); 1121 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); 1122 retval |= access_uarea(child, PT_CFM, &cfm, 1); 1123 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); 1124 1125 ret = retval ? -EIO : 0; 1126 return ret; 1127} 1128 1129void 1130user_enable_single_step (struct task_struct *child) 1131{ 1132 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1133 1134 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1135 child_psr->ss = 1; 1136} 1137 1138void 1139user_enable_block_step (struct task_struct *child) 1140{ 1141 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1142 1143 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1144 child_psr->tb = 1; 1145} 1146 1147void 1148user_disable_single_step (struct task_struct *child) 1149{ 1150 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1151 1152 /* make sure the single step/taken-branch trap bits are not set: */ 1153 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 1154 child_psr->ss = 0; 1155 child_psr->tb = 0; 1156} 1157 1158/* 1159 * Called by kernel/ptrace.c when detaching.. 1160 * 1161 * Make sure the single step bit is not set. 1162 */ 1163void 1164ptrace_disable (struct task_struct *child) 1165{ 1166 user_disable_single_step(child); 1167} 1168 1169long 1170arch_ptrace (struct task_struct *child, long request, long addr, long data) 1171{ 1172 switch (request) { 1173 case PTRACE_PEEKTEXT: 1174 case PTRACE_PEEKDATA: 1175 /* read word at location addr */ 1176 if (access_process_vm(child, addr, &data, sizeof(data), 0) 1177 != sizeof(data)) 1178 return -EIO; 1179 /* ensure return value is not mistaken for error code */ 1180 force_successful_syscall_return(); 1181 return data; 1182 1183 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled 1184 * by the generic ptrace_request(). 1185 */ 1186 1187 case PTRACE_PEEKUSR: 1188 /* read the word at addr in the USER area */ 1189 if (access_uarea(child, addr, &data, 0) < 0) 1190 return -EIO; 1191 /* ensure return value is not mistaken for error code */ 1192 force_successful_syscall_return(); 1193 return data; 1194 1195 case PTRACE_POKEUSR: 1196 /* write the word at addr in the USER area */ 1197 if (access_uarea(child, addr, &data, 1) < 0) 1198 return -EIO; 1199 return 0; 1200 1201 case PTRACE_OLD_GETSIGINFO: 1202 /* for backwards-compatibility */ 1203 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); 1204 1205 case PTRACE_OLD_SETSIGINFO: 1206 /* for backwards-compatibility */ 1207 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); 1208 1209 case PTRACE_GETREGS: 1210 return ptrace_getregs(child, 1211 (struct pt_all_user_regs __user *) data); 1212 1213 case PTRACE_SETREGS: 1214 return ptrace_setregs(child, 1215 (struct pt_all_user_regs __user *) data); 1216 1217 default: 1218 return ptrace_request(child, request, addr, data); 1219 } 1220} 1221 1222 1223/* "asmlinkage" so the input arguments are preserved... */ 1224 1225asmlinkage long 1226syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, 1227 long arg4, long arg5, long arg6, long arg7, 1228 struct pt_regs regs) 1229{ 1230 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1231 if (tracehook_report_syscall_entry(®s)) 1232 return -ENOSYS; 1233 1234 /* copy user rbs to kernel rbs */ 1235 if (test_thread_flag(TIF_RESTORE_RSE)) 1236 ia64_sync_krbs(); 1237 1238 if (unlikely(current->audit_context)) { 1239 long syscall; 1240 int arch; 1241 1242 syscall = regs.r15; 1243 arch = AUDIT_ARCH_IA64; 1244 1245 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); 1246 } 1247 1248 return 0; 1249} 1250 1251/* "asmlinkage" so the input arguments are preserved... */ 1252 1253asmlinkage void 1254syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, 1255 long arg4, long arg5, long arg6, long arg7, 1256 struct pt_regs regs) 1257{ 1258 int step; 1259 1260 if (unlikely(current->audit_context)) { 1261 int success = AUDITSC_RESULT(regs.r10); 1262 long result = regs.r8; 1263 1264 if (success != AUDITSC_SUCCESS) 1265 result = -result; 1266 audit_syscall_exit(success, result); 1267 } 1268 1269 step = test_thread_flag(TIF_SINGLESTEP); 1270 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1271 tracehook_report_syscall_exit(®s, step); 1272 1273 /* copy user rbs to kernel rbs */ 1274 if (test_thread_flag(TIF_RESTORE_RSE)) 1275 ia64_sync_krbs(); 1276} 1277 1278/* Utrace implementation starts here */ 1279struct regset_get { 1280 void *kbuf; 1281 void __user *ubuf; 1282}; 1283 1284struct regset_set { 1285 const void *kbuf; 1286 const void __user *ubuf; 1287}; 1288 1289struct regset_getset { 1290 struct task_struct *target; 1291 const struct user_regset *regset; 1292 union { 1293 struct regset_get get; 1294 struct regset_set set; 1295 } u; 1296 unsigned int pos; 1297 unsigned int count; 1298 int ret; 1299}; 1300 1301static int 1302access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, 1303 unsigned long addr, unsigned long *data, int write_access) 1304{ 1305 struct pt_regs *pt; 1306 unsigned long *ptr = NULL; 1307 int ret; 1308 char nat = 0; 1309 1310 pt = task_pt_regs(target); 1311 switch (addr) { 1312 case ELF_GR_OFFSET(1): 1313 ptr = &pt->r1; 1314 break; 1315 case ELF_GR_OFFSET(2): 1316 case ELF_GR_OFFSET(3): 1317 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); 1318 break; 1319 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): 1320 if (write_access) { 1321 /* read NaT bit first: */ 1322 unsigned long dummy; 1323 1324 ret = unw_get_gr(info, addr/8, &dummy, &nat); 1325 if (ret < 0) 1326 return ret; 1327 } 1328 return unw_access_gr(info, addr/8, data, &nat, write_access); 1329 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): 1330 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); 1331 break; 1332 case ELF_GR_OFFSET(12): 1333 case ELF_GR_OFFSET(13): 1334 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); 1335 break; 1336 case ELF_GR_OFFSET(14): 1337 ptr = &pt->r14; 1338 break; 1339 case ELF_GR_OFFSET(15): 1340 ptr = &pt->r15; 1341 } 1342 if (write_access) 1343 *ptr = *data; 1344 else 1345 *data = *ptr; 1346 return 0; 1347} 1348 1349static int 1350access_elf_breg(struct task_struct *target, struct unw_frame_info *info, 1351 unsigned long addr, unsigned long *data, int write_access) 1352{ 1353 struct pt_regs *pt; 1354 unsigned long *ptr = NULL; 1355 1356 pt = task_pt_regs(target); 1357 switch (addr) { 1358 case ELF_BR_OFFSET(0): 1359 ptr = &pt->b0; 1360 break; 1361 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): 1362 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, 1363 data, write_access); 1364 case ELF_BR_OFFSET(6): 1365 ptr = &pt->b6; 1366 break; 1367 case ELF_BR_OFFSET(7): 1368 ptr = &pt->b7; 1369 } 1370 if (write_access) 1371 *ptr = *data; 1372 else 1373 *data = *ptr; 1374 return 0; 1375} 1376 1377static int 1378access_elf_areg(struct task_struct *target, struct unw_frame_info *info, 1379 unsigned long addr, unsigned long *data, int write_access) 1380{ 1381 struct pt_regs *pt; 1382 unsigned long cfm, urbs_end; 1383 unsigned long *ptr = NULL; 1384 1385 pt = task_pt_regs(target); 1386 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { 1387 switch (addr) { 1388 case ELF_AR_RSC_OFFSET: 1389 /* force PL3 */ 1390 if (write_access) 1391 pt->ar_rsc = *data | (3 << 2); 1392 else 1393 *data = pt->ar_rsc; 1394 return 0; 1395 case ELF_AR_BSP_OFFSET: 1396 /* 1397 * By convention, we use PT_AR_BSP to refer to 1398 * the end of the user-level backing store. 1399 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) 1400 * to get the real value of ar.bsp at the time 1401 * the kernel was entered. 1402 * 1403 * Furthermore, when changing the contents of 1404 * PT_AR_BSP (or PT_CFM) while the task is 1405 * blocked in a system call, convert the state 1406 * so that the non-system-call exit 1407 * path is used. This ensures that the proper 1408 * state will be picked up when resuming 1409 * execution. However, it *also* means that 1410 * once we write PT_AR_BSP/PT_CFM, it won't be 1411 * possible to modify the syscall arguments of 1412 * the pending system call any longer. This 1413 * shouldn't be an issue because modifying 1414 * PT_AR_BSP/PT_CFM generally implies that 1415 * we're either abandoning the pending system 1416 * call or that we defer it's re-execution 1417 * (e.g., due to GDB doing an inferior 1418 * function call). 1419 */ 1420 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); 1421 if (write_access) { 1422 if (*data != urbs_end) { 1423 if (in_syscall(pt)) 1424 convert_to_non_syscall(target, 1425 pt, 1426 cfm); 1427 /* 1428 * Simulate user-level write 1429 * of ar.bsp: 1430 */ 1431 pt->loadrs = 0; 1432 pt->ar_bspstore = *data; 1433 } 1434 } else 1435 *data = urbs_end; 1436 return 0; 1437 case ELF_AR_BSPSTORE_OFFSET: 1438 ptr = &pt->ar_bspstore; 1439 break; 1440 case ELF_AR_RNAT_OFFSET: 1441 ptr = &pt->ar_rnat; 1442 break; 1443 case ELF_AR_CCV_OFFSET: 1444 ptr = &pt->ar_ccv; 1445 break; 1446 case ELF_AR_UNAT_OFFSET: 1447 ptr = &pt->ar_unat; 1448 break; 1449 case ELF_AR_FPSR_OFFSET: 1450 ptr = &pt->ar_fpsr; 1451 break; 1452 case ELF_AR_PFS_OFFSET: 1453 ptr = &pt->ar_pfs; 1454 break; 1455 case ELF_AR_LC_OFFSET: 1456 return unw_access_ar(info, UNW_AR_LC, data, 1457 write_access); 1458 case ELF_AR_EC_OFFSET: 1459 return unw_access_ar(info, UNW_AR_EC, data, 1460 write_access); 1461 case ELF_AR_CSD_OFFSET: 1462 ptr = &pt->ar_csd; 1463 break; 1464 case ELF_AR_SSD_OFFSET: 1465 ptr = &pt->ar_ssd; 1466 } 1467 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { 1468 switch (addr) { 1469 case ELF_CR_IIP_OFFSET: 1470 ptr = &pt->cr_iip; 1471 break; 1472 case ELF_CFM_OFFSET: 1473 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); 1474 if (write_access) { 1475 if (((cfm ^ *data) & PFM_MASK) != 0) { 1476 if (in_syscall(pt)) 1477 convert_to_non_syscall(target, 1478 pt, 1479 cfm); 1480 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) 1481 | (*data & PFM_MASK)); 1482 } 1483 } else 1484 *data = cfm; 1485 return 0; 1486 case ELF_CR_IPSR_OFFSET: 1487 if (write_access) { 1488 unsigned long tmp = *data; 1489 /* psr.ri==3 is a reserved value: SDM 2:25 */ 1490 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) 1491 tmp &= ~IA64_PSR_RI; 1492 pt->cr_ipsr = ((tmp & IPSR_MASK) 1493 | (pt->cr_ipsr & ~IPSR_MASK)); 1494 } else 1495 *data = (pt->cr_ipsr & IPSR_MASK); 1496 return 0; 1497 } 1498 } else if (addr == ELF_NAT_OFFSET) 1499 return access_nat_bits(target, pt, info, 1500 data, write_access); 1501 else if (addr == ELF_PR_OFFSET) 1502 ptr = &pt->pr; 1503 else 1504 return -1; 1505 1506 if (write_access) 1507 *ptr = *data; 1508 else 1509 *data = *ptr; 1510 1511 return 0; 1512} 1513 1514static int 1515access_elf_reg(struct task_struct *target, struct unw_frame_info *info, 1516 unsigned long addr, unsigned long *data, int write_access) 1517{ 1518 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) 1519 return access_elf_gpreg(target, info, addr, data, write_access); 1520 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) 1521 return access_elf_breg(target, info, addr, data, write_access); 1522 else 1523 return access_elf_areg(target, info, addr, data, write_access); 1524} 1525 1526void do_gpregs_get(struct unw_frame_info *info, void *arg) 1527{ 1528 struct pt_regs *pt; 1529 struct regset_getset *dst = arg; 1530 elf_greg_t tmp[16]; 1531 unsigned int i, index, min_copy; 1532 1533 if (unw_unwind_to_user(info) < 0) 1534 return; 1535 1536 /* 1537 * coredump format: 1538 * r0-r31 1539 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) 1540 * predicate registers (p0-p63) 1541 * b0-b7 1542 * ip cfm user-mask 1543 * ar.rsc ar.bsp ar.bspstore ar.rnat 1544 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec 1545 */ 1546 1547 1548 /* Skip r0 */ 1549 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { 1550 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, 1551 &dst->u.get.kbuf, 1552 &dst->u.get.ubuf, 1553 0, ELF_GR_OFFSET(1)); 1554 if (dst->ret || dst->count == 0) 1555 return; 1556 } 1557 1558 /* gr1 - gr15 */ 1559 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { 1560 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); 1561 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? 1562 (dst->pos + dst->count) : ELF_GR_OFFSET(16); 1563 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), 1564 index++) 1565 if (access_elf_reg(dst->target, info, i, 1566 &tmp[index], 0) < 0) { 1567 dst->ret = -EIO; 1568 return; 1569 } 1570 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1571 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1572 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); 1573 if (dst->ret || dst->count == 0) 1574 return; 1575 } 1576 1577 /* r16-r31 */ 1578 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { 1579 pt = task_pt_regs(dst->target); 1580 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1581 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, 1582 ELF_GR_OFFSET(16), ELF_NAT_OFFSET); 1583 if (dst->ret || dst->count == 0) 1584 return; 1585 } 1586 1587 /* nat, pr, b0 - b7 */ 1588 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { 1589 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); 1590 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? 1591 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; 1592 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), 1593 index++) 1594 if (access_elf_reg(dst->target, info, i, 1595 &tmp[index], 0) < 0) { 1596 dst->ret = -EIO; 1597 return; 1598 } 1599 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1600 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1601 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); 1602 if (dst->ret || dst->count == 0) 1603 return; 1604 } 1605 1606 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat 1607 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd 1608 */ 1609 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { 1610 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); 1611 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? 1612 (dst->pos + dst->count) : ELF_AR_END_OFFSET; 1613 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), 1614 index++) 1615 if (access_elf_reg(dst->target, info, i, 1616 &tmp[index], 0) < 0) { 1617 dst->ret = -EIO; 1618 return; 1619 } 1620 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1621 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1622 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); 1623 } 1624} 1625 1626void do_gpregs_set(struct unw_frame_info *info, void *arg) 1627{ 1628 struct pt_regs *pt; 1629 struct regset_getset *dst = arg; 1630 elf_greg_t tmp[16]; 1631 unsigned int i, index; 1632 1633 if (unw_unwind_to_user(info) < 0) 1634 return; 1635 1636 /* Skip r0 */ 1637 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { 1638 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, 1639 &dst->u.set.kbuf, 1640 &dst->u.set.ubuf, 1641 0, ELF_GR_OFFSET(1)); 1642 if (dst->ret || dst->count == 0) 1643 return; 1644 } 1645 1646 /* gr1-gr15 */ 1647 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { 1648 i = dst->pos; 1649 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); 1650 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1651 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1652 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); 1653 if (dst->ret) 1654 return; 1655 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) 1656 if (access_elf_reg(dst->target, info, i, 1657 &tmp[index], 1) < 0) { 1658 dst->ret = -EIO; 1659 return; 1660 } 1661 if (dst->count == 0) 1662 return; 1663 } 1664 1665 /* gr16-gr31 */ 1666 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { 1667 pt = task_pt_regs(dst->target); 1668 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1669 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, 1670 ELF_GR_OFFSET(16), ELF_NAT_OFFSET); 1671 if (dst->ret || dst->count == 0) 1672 return; 1673 } 1674 1675 /* nat, pr, b0 - b7 */ 1676 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { 1677 i = dst->pos; 1678 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); 1679 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1680 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1681 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); 1682 if (dst->ret) 1683 return; 1684 for (; i < dst->pos; i += sizeof(elf_greg_t), index++) 1685 if (access_elf_reg(dst->target, info, i, 1686 &tmp[index], 1) < 0) { 1687 dst->ret = -EIO; 1688 return; 1689 } 1690 if (dst->count == 0) 1691 return; 1692 } 1693 1694 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat 1695 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd 1696 */ 1697 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { 1698 i = dst->pos; 1699 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); 1700 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1701 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1702 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); 1703 if (dst->ret) 1704 return; 1705 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) 1706 if (access_elf_reg(dst->target, info, i, 1707 &tmp[index], 1) < 0) { 1708 dst->ret = -EIO; 1709 return; 1710 } 1711 } 1712} 1713 1714#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t)) 1715 1716void do_fpregs_get(struct unw_frame_info *info, void *arg) 1717{ 1718 struct regset_getset *dst = arg; 1719 struct task_struct *task = dst->target; 1720 elf_fpreg_t tmp[30]; 1721 int index, min_copy, i; 1722 1723 if (unw_unwind_to_user(info) < 0) 1724 return; 1725 1726 /* Skip pos 0 and 1 */ 1727 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { 1728 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, 1729 &dst->u.get.kbuf, 1730 &dst->u.get.ubuf, 1731 0, ELF_FP_OFFSET(2)); 1732 if (dst->count == 0 || dst->ret) 1733 return; 1734 } 1735 1736 /* fr2-fr31 */ 1737 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { 1738 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); 1739 1740 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), 1741 dst->pos + dst->count); 1742 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), 1743 index++) 1744 if (unw_get_fr(info, i / sizeof(elf_fpreg_t), 1745 &tmp[index])) { 1746 dst->ret = -EIO; 1747 return; 1748 } 1749 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1750 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1751 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); 1752 if (dst->count == 0 || dst->ret) 1753 return; 1754 } 1755 1756 /* fph */ 1757 if (dst->count > 0) { 1758 ia64_flush_fph(dst->target); 1759 if (task->thread.flags & IA64_THREAD_FPH_VALID) 1760 dst->ret = user_regset_copyout( 1761 &dst->pos, &dst->count, 1762 &dst->u.get.kbuf, &dst->u.get.ubuf, 1763 &dst->target->thread.fph, 1764 ELF_FP_OFFSET(32), -1); 1765 else 1766 /* Zero fill instead. */ 1767 dst->ret = user_regset_copyout_zero( 1768 &dst->pos, &dst->count, 1769 &dst->u.get.kbuf, &dst->u.get.ubuf, 1770 ELF_FP_OFFSET(32), -1); 1771 } 1772} 1773 1774void do_fpregs_set(struct unw_frame_info *info, void *arg) 1775{ 1776 struct regset_getset *dst = arg; 1777 elf_fpreg_t fpreg, tmp[30]; 1778 int index, start, end; 1779 1780 if (unw_unwind_to_user(info) < 0) 1781 return; 1782 1783 /* Skip pos 0 and 1 */ 1784 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { 1785 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, 1786 &dst->u.set.kbuf, 1787 &dst->u.set.ubuf, 1788 0, ELF_FP_OFFSET(2)); 1789 if (dst->count == 0 || dst->ret) 1790 return; 1791 } 1792 1793 /* fr2-fr31 */ 1794 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { 1795 start = dst->pos; 1796 end = min(((unsigned int)ELF_FP_OFFSET(32)), 1797 dst->pos + dst->count); 1798 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1799 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1800 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); 1801 if (dst->ret) 1802 return; 1803 1804 if (start & 0xF) { /* only write high part */ 1805 if (unw_get_fr(info, start / sizeof(elf_fpreg_t), 1806 &fpreg)) { 1807 dst->ret = -EIO; 1808 return; 1809 } 1810 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] 1811 = fpreg.u.bits[0]; 1812 start &= ~0xFUL; 1813 } 1814 if (end & 0xF) { /* only write low part */ 1815 if (unw_get_fr(info, end / sizeof(elf_fpreg_t), 1816 &fpreg)) { 1817 dst->ret = -EIO; 1818 return; 1819 } 1820 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] 1821 = fpreg.u.bits[1]; 1822 end = (end + 0xF) & ~0xFUL; 1823 } 1824 1825 for ( ; start < end ; start += sizeof(elf_fpreg_t)) { 1826 index = start / sizeof(elf_fpreg_t); 1827 if (unw_set_fr(info, index, tmp[index - 2])) { 1828 dst->ret = -EIO; 1829 return; 1830 } 1831 } 1832 if (dst->ret || dst->count == 0) 1833 return; 1834 } 1835 1836 /* fph */ 1837 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { 1838 ia64_sync_fph(dst->target); 1839 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1840 &dst->u.set.kbuf, 1841 &dst->u.set.ubuf, 1842 &dst->target->thread.fph, 1843 ELF_FP_OFFSET(32), -1); 1844 } 1845} 1846 1847static int 1848do_regset_call(void (*call)(struct unw_frame_info *, void *), 1849 struct task_struct *target, 1850 const struct user_regset *regset, 1851 unsigned int pos, unsigned int count, 1852 const void *kbuf, const void __user *ubuf) 1853{ 1854 struct regset_getset info = { .target = target, .regset = regset, 1855 .pos = pos, .count = count, 1856 .u.set = { .kbuf = kbuf, .ubuf = ubuf }, 1857 .ret = 0 }; 1858 1859 if (target == current) 1860 unw_init_running(call, &info); 1861 else { 1862 struct unw_frame_info ufi; 1863 memset(&ufi, 0, sizeof(ufi)); 1864 unw_init_from_blocked_task(&ufi, target); 1865 (*call)(&ufi, &info); 1866 } 1867 1868 return info.ret; 1869} 1870 1871static int 1872gpregs_get(struct task_struct *target, 1873 const struct user_regset *regset, 1874 unsigned int pos, unsigned int count, 1875 void *kbuf, void __user *ubuf) 1876{ 1877 return do_regset_call(do_gpregs_get, target, regset, pos, count, 1878 kbuf, ubuf); 1879} 1880 1881static int gpregs_set(struct task_struct *target, 1882 const struct user_regset *regset, 1883 unsigned int pos, unsigned int count, 1884 const void *kbuf, const void __user *ubuf) 1885{ 1886 return do_regset_call(do_gpregs_set, target, regset, pos, count, 1887 kbuf, ubuf); 1888} 1889 1890static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) 1891{ 1892 do_sync_rbs(info, ia64_sync_user_rbs); 1893} 1894 1895/* 1896 * This is called to write back the register backing store. 1897 * ptrace does this before it stops, so that a tracer reading the user 1898 * memory after the thread stops will get the current register data. 1899 */ 1900static int 1901gpregs_writeback(struct task_struct *target, 1902 const struct user_regset *regset, 1903 int now) 1904{ 1905 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) 1906 return 0; 1907 set_notify_resume(target); 1908 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, 1909 NULL, NULL); 1910} 1911 1912static int 1913fpregs_active(struct task_struct *target, const struct user_regset *regset) 1914{ 1915 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; 1916} 1917 1918static int fpregs_get(struct task_struct *target, 1919 const struct user_regset *regset, 1920 unsigned int pos, unsigned int count, 1921 void *kbuf, void __user *ubuf) 1922{ 1923 return do_regset_call(do_fpregs_get, target, regset, pos, count, 1924 kbuf, ubuf); 1925} 1926 1927static int fpregs_set(struct task_struct *target, 1928 const struct user_regset *regset, 1929 unsigned int pos, unsigned int count, 1930 const void *kbuf, const void __user *ubuf) 1931{ 1932 return do_regset_call(do_fpregs_set, target, regset, pos, count, 1933 kbuf, ubuf); 1934} 1935 1936static int 1937access_uarea(struct task_struct *child, unsigned long addr, 1938 unsigned long *data, int write_access) 1939{ 1940 unsigned int pos = -1; /* an invalid value */ 1941 int ret; 1942 unsigned long *ptr, regnum; 1943 1944 if ((addr & 0x7) != 0) { 1945 dprintk("ptrace: unaligned register address 0x%lx\n", addr); 1946 return -1; 1947 } 1948 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || 1949 (addr >= PT_R7 + 8 && addr < PT_B1) || 1950 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || 1951 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { 1952 dprintk("ptrace: rejecting access to register " 1953 "address 0x%lx\n", addr); 1954 return -1; 1955 } 1956 1957 switch (addr) { 1958 case PT_F32 ... (PT_F127 + 15): 1959 pos = addr - PT_F32 + ELF_FP_OFFSET(32); 1960 break; 1961 case PT_F2 ... (PT_F5 + 15): 1962 pos = addr - PT_F2 + ELF_FP_OFFSET(2); 1963 break; 1964 case PT_F10 ... (PT_F31 + 15): 1965 pos = addr - PT_F10 + ELF_FP_OFFSET(10); 1966 break; 1967 case PT_F6 ... (PT_F9 + 15): 1968 pos = addr - PT_F6 + ELF_FP_OFFSET(6); 1969 break; 1970 } 1971 1972 if (pos != -1) { 1973 if (write_access) 1974 ret = fpregs_set(child, NULL, pos, 1975 sizeof(unsigned long), data, NULL); 1976 else 1977 ret = fpregs_get(child, NULL, pos, 1978 sizeof(unsigned long), data, NULL); 1979 if (ret != 0) 1980 return -1; 1981 return 0; 1982 } 1983 1984 switch (addr) { 1985 case PT_NAT_BITS: 1986 pos = ELF_NAT_OFFSET; 1987 break; 1988 case PT_R4 ... PT_R7: 1989 pos = addr - PT_R4 + ELF_GR_OFFSET(4); 1990 break; 1991 case PT_B1 ... PT_B5: 1992 pos = addr - PT_B1 + ELF_BR_OFFSET(1); 1993 break; 1994 case PT_AR_EC: 1995 pos = ELF_AR_EC_OFFSET; 1996 break; 1997 case PT_AR_LC: 1998 pos = ELF_AR_LC_OFFSET; 1999 break; 2000 case PT_CR_IPSR: 2001 pos = ELF_CR_IPSR_OFFSET; 2002 break; 2003 case PT_CR_IIP: 2004 pos = ELF_CR_IIP_OFFSET; 2005 break; 2006 case PT_CFM: 2007 pos = ELF_CFM_OFFSET; 2008 break; 2009 case PT_AR_UNAT: 2010 pos = ELF_AR_UNAT_OFFSET; 2011 break; 2012 case PT_AR_PFS: 2013 pos = ELF_AR_PFS_OFFSET; 2014 break; 2015 case PT_AR_RSC: 2016 pos = ELF_AR_RSC_OFFSET; 2017 break; 2018 case PT_AR_RNAT: 2019 pos = ELF_AR_RNAT_OFFSET; 2020 break; 2021 case PT_AR_BSPSTORE: 2022 pos = ELF_AR_BSPSTORE_OFFSET; 2023 break; 2024 case PT_PR: 2025 pos = ELF_PR_OFFSET; 2026 break; 2027 case PT_B6: 2028 pos = ELF_BR_OFFSET(6); 2029 break; 2030 case PT_AR_BSP: 2031 pos = ELF_AR_BSP_OFFSET; 2032 break; 2033 case PT_R1 ... PT_R3: 2034 pos = addr - PT_R1 + ELF_GR_OFFSET(1); 2035 break; 2036 case PT_R12 ... PT_R15: 2037 pos = addr - PT_R12 + ELF_GR_OFFSET(12); 2038 break; 2039 case PT_R8 ... PT_R11: 2040 pos = addr - PT_R8 + ELF_GR_OFFSET(8); 2041 break; 2042 case PT_R16 ... PT_R31: 2043 pos = addr - PT_R16 + ELF_GR_OFFSET(16); 2044 break; 2045 case PT_AR_CCV: 2046 pos = ELF_AR_CCV_OFFSET; 2047 break; 2048 case PT_AR_FPSR: 2049 pos = ELF_AR_FPSR_OFFSET; 2050 break; 2051 case PT_B0: 2052 pos = ELF_BR_OFFSET(0); 2053 break; 2054 case PT_B7: 2055 pos = ELF_BR_OFFSET(7); 2056 break; 2057 case PT_AR_CSD: 2058 pos = ELF_AR_CSD_OFFSET; 2059 break; 2060 case PT_AR_SSD: 2061 pos = ELF_AR_SSD_OFFSET; 2062 break; 2063 } 2064 2065 if (pos != -1) { 2066 if (write_access) 2067 ret = gpregs_set(child, NULL, pos, 2068 sizeof(unsigned long), data, NULL); 2069 else 2070 ret = gpregs_get(child, NULL, pos, 2071 sizeof(unsigned long), data, NULL); 2072 if (ret != 0) 2073 return -1; 2074 return 0; 2075 } 2076 2077 /* access debug registers */ 2078 if (addr >= PT_IBR) { 2079 regnum = (addr - PT_IBR) >> 3; 2080 ptr = &child->thread.ibr[0]; 2081 } else { 2082 regnum = (addr - PT_DBR) >> 3; 2083 ptr = &child->thread.dbr[0]; 2084 } 2085 2086 if (regnum >= 8) { 2087 dprintk("ptrace: rejecting access to register " 2088 "address 0x%lx\n", addr); 2089 return -1; 2090 } 2091#ifdef CONFIG_PERFMON 2092 /* 2093 * Check if debug registers are used by perfmon. This 2094 * test must be done once we know that we can do the 2095 * operation, i.e. the arguments are all valid, but 2096 * before we start modifying the state. 2097 * 2098 * Perfmon needs to keep a count of how many processes 2099 * are trying to modify the debug registers for system 2100 * wide monitoring sessions. 2101 * 2102 * We also include read access here, because they may 2103 * cause the PMU-installed debug register state 2104 * (dbr[], ibr[]) to be reset. The two arrays are also 2105 * used by perfmon, but we do not use 2106 * IA64_THREAD_DBG_VALID. The registers are restored 2107 * by the PMU context switch code. 2108 */ 2109 if (pfm_use_debug_registers(child)) 2110 return -1; 2111#endif 2112 2113 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { 2114 child->thread.flags |= IA64_THREAD_DBG_VALID; 2115 memset(child->thread.dbr, 0, 2116 sizeof(child->thread.dbr)); 2117 memset(child->thread.ibr, 0, 2118 sizeof(child->thread.ibr)); 2119 } 2120 2121 ptr += regnum; 2122 2123 if ((regnum & 1) && write_access) { 2124 /* don't let the user set kernel-level breakpoints: */ 2125 *ptr = *data & ~(7UL << 56); 2126 return 0; 2127 } 2128 if (write_access) 2129 *ptr = *data; 2130 else 2131 *data = *ptr; 2132 return 0; 2133} 2134 2135static const struct user_regset native_regsets[] = { 2136 { 2137 .core_note_type = NT_PRSTATUS, 2138 .n = ELF_NGREG, 2139 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), 2140 .get = gpregs_get, .set = gpregs_set, 2141 .writeback = gpregs_writeback 2142 }, 2143 { 2144 .core_note_type = NT_PRFPREG, 2145 .n = ELF_NFPREG, 2146 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), 2147 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active 2148 }, 2149}; 2150 2151static const struct user_regset_view user_ia64_view = { 2152 .name = "ia64", 2153 .e_machine = EM_IA_64, 2154 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 2155}; 2156 2157const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) 2158{ 2159 return &user_ia64_view; 2160} 2161 2162struct syscall_get_set_args { 2163 unsigned int i; 2164 unsigned int n; 2165 unsigned long *args; 2166 struct pt_regs *regs; 2167 int rw; 2168}; 2169 2170static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) 2171{ 2172 struct syscall_get_set_args *args = data; 2173 struct pt_regs *pt = args->regs; 2174 unsigned long *krbs, cfm, ndirty; 2175 int i, count; 2176 2177 if (unw_unwind_to_user(info) < 0) 2178 return; 2179 2180 cfm = pt->cr_ifs; 2181 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; 2182 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 2183 2184 count = 0; 2185 if (in_syscall(pt)) 2186 count = min_t(int, args->n, cfm & 0x7f); 2187 2188 for (i = 0; i < count; i++) { 2189 if (args->rw) 2190 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = 2191 args->args[i]; 2192 else 2193 args->args[i] = *ia64_rse_skip_regs(krbs, 2194 ndirty + i + args->i); 2195 } 2196 2197 if (!args->rw) { 2198 while (i < args->n) { 2199 args->args[i] = 0; 2200 i++; 2201 } 2202 } 2203} 2204 2205void ia64_syscall_get_set_arguments(struct task_struct *task, 2206 struct pt_regs *regs, unsigned int i, unsigned int n, 2207 unsigned long *args, int rw) 2208{ 2209 struct syscall_get_set_args data = { 2210 .i = i, 2211 .n = n, 2212 .args = args, 2213 .regs = regs, 2214 .rw = rw, 2215 }; 2216 2217 if (task == current) 2218 unw_init_running(syscall_get_set_args_cb, &data); 2219 else { 2220 struct unw_frame_info ufi; 2221 memset(&ufi, 0, sizeof(ufi)); 2222 unw_init_from_blocked_task(&ufi, task); 2223 syscall_get_set_args_cb(&ufi, &data); 2224 } 2225} 2226