34 35#include "opt_compat.h" 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/syscallsubr.h> 42#include <sys/sysent.h> 43#include <sys/sysproto.h> 44#include <sys/proc.h> 45#include <sys/vnode.h> 46#include <sys/ptrace.h> 47#include <sys/sx.h> 48#include <sys/malloc.h> 49#include <sys/signalvar.h> 50 51#include <machine/reg.h> 52 53#include <security/audit/audit.h> 54 55#include <vm/vm.h> 56#include <vm/pmap.h> 57#include <vm/vm_extern.h> 58#include <vm/vm_map.h> 59#include <vm/vm_kern.h> 60#include <vm/vm_object.h> 61#include <vm/vm_page.h> 62#include <vm/vm_pager.h> 63#include <vm/vm_param.h> 64 65#ifdef COMPAT_FREEBSD32 66#include <sys/procfs.h> 67#include <compat/freebsd32/freebsd32_signal.h> 68 69struct ptrace_io_desc32 { 70 int piod_op; 71 uint32_t piod_offs; 72 uint32_t piod_addr; 73 uint32_t piod_len; 74}; 75 76struct ptrace_vm_entry32 { 77 int pve_entry; 78 int pve_timestamp; 79 uint32_t pve_start; 80 uint32_t pve_end; 81 uint32_t pve_offset; 82 u_int pve_prot; 83 u_int pve_pathlen; 84 int32_t pve_fileid; 85 u_int pve_fsid; 86 uint32_t pve_path; 87}; 88 89struct ptrace_lwpinfo32 { 90 lwpid_t pl_lwpid; /* LWP described. */ 91 int pl_event; /* Event that stopped the LWP. */ 92 int pl_flags; /* LWP flags. */ 93 sigset_t pl_sigmask; /* LWP signal mask */ 94 sigset_t pl_siglist; /* LWP pending signal */ 95 struct siginfo32 pl_siginfo; /* siginfo for signal */ 96 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 97 int pl_child_pid; /* New child pid */ 98}; 99 100#endif 101 102/* 103 * Functions implemented using PROC_ACTION(): 104 * 105 * proc_read_regs(proc, regs) 106 * Get the current user-visible register set from the process 107 * and copy it into the regs structure (<machine/reg.h>). 108 * The process is stopped at the time read_regs is called. 109 * 110 * proc_write_regs(proc, regs) 111 * Update the current register set from the passed in regs 112 * structure. Take care to avoid clobbering special CPU 113 * registers or privileged bits in the PSL. 114 * Depending on the architecture this may have fix-up work to do, 115 * especially if the IAR or PCW are modified. 116 * The process is stopped at the time write_regs is called. 117 * 118 * proc_read_fpregs, proc_write_fpregs 119 * deal with the floating point register set, otherwise as above. 120 * 121 * proc_read_dbregs, proc_write_dbregs 122 * deal with the processor debug register set, otherwise as above. 123 * 124 * proc_sstep(proc) 125 * Arrange for the process to trap after executing a single instruction. 126 */ 127 128#define PROC_ACTION(action) do { \ 129 int error; \ 130 \ 131 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 132 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 133 error = EIO; \ 134 else \ 135 error = (action); \ 136 return (error); \ 137} while(0) 138 139int 140proc_read_regs(struct thread *td, struct reg *regs) 141{ 142 143 PROC_ACTION(fill_regs(td, regs)); 144} 145 146int 147proc_write_regs(struct thread *td, struct reg *regs) 148{ 149 150 PROC_ACTION(set_regs(td, regs)); 151} 152 153int 154proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 155{ 156 157 PROC_ACTION(fill_dbregs(td, dbregs)); 158} 159 160int 161proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 162{ 163 164 PROC_ACTION(set_dbregs(td, dbregs)); 165} 166 167/* 168 * Ptrace doesn't support fpregs at all, and there are no security holes 169 * or translations for fpregs, so we can just copy them. 170 */ 171int 172proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 173{ 174 175 PROC_ACTION(fill_fpregs(td, fpregs)); 176} 177 178int 179proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 180{ 181 182 PROC_ACTION(set_fpregs(td, fpregs)); 183} 184 185#ifdef COMPAT_FREEBSD32 186/* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 187int 188proc_read_regs32(struct thread *td, struct reg32 *regs32) 189{ 190 191 PROC_ACTION(fill_regs32(td, regs32)); 192} 193 194int 195proc_write_regs32(struct thread *td, struct reg32 *regs32) 196{ 197 198 PROC_ACTION(set_regs32(td, regs32)); 199} 200 201int 202proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 203{ 204 205 PROC_ACTION(fill_dbregs32(td, dbregs32)); 206} 207 208int 209proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 210{ 211 212 PROC_ACTION(set_dbregs32(td, dbregs32)); 213} 214 215int 216proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 217{ 218 219 PROC_ACTION(fill_fpregs32(td, fpregs32)); 220} 221 222int 223proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 224{ 225 226 PROC_ACTION(set_fpregs32(td, fpregs32)); 227} 228#endif 229 230int 231proc_sstep(struct thread *td) 232{ 233 234 PROC_ACTION(ptrace_single_step(td)); 235} 236 237int 238proc_rwmem(struct proc *p, struct uio *uio) 239{ 240 vm_map_t map; 241 vm_offset_t pageno; /* page number */ 242 vm_prot_t reqprot; 243 int error, fault_flags, page_offset, writing; 244 245 /* 246 * Assert that someone has locked this vmspace. (Should be 247 * curthread but we can't assert that.) This keeps the process 248 * from exiting out from under us until this operation completes. 249 */ 250 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 251 p, p->p_pid)); 252 253 /* 254 * The map we want... 255 */ 256 map = &p->p_vmspace->vm_map; 257 258 /* 259 * If we are writing, then we request vm_fault() to create a private 260 * copy of each page. Since these copies will not be writeable by the 261 * process, we must explicity request that they be dirtied. 262 */ 263 writing = uio->uio_rw == UIO_WRITE; 264 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 265 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 266 267 /* 268 * Only map in one page at a time. We don't have to, but it 269 * makes things easier. This way is trivial - right? 270 */ 271 do { 272 vm_offset_t uva; 273 u_int len; 274 vm_page_t m; 275 276 uva = (vm_offset_t)uio->uio_offset; 277 278 /* 279 * Get the page number of this segment. 280 */ 281 pageno = trunc_page(uva); 282 page_offset = uva - pageno; 283 284 /* 285 * How many bytes to copy 286 */ 287 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 288 289 /* 290 * Fault and hold the page on behalf of the process. 291 */ 292 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 293 if (error != KERN_SUCCESS) { 294 if (error == KERN_RESOURCE_SHORTAGE) 295 error = ENOMEM; 296 else 297 error = EFAULT; 298 break; 299 } 300 301 /* 302 * Now do the i/o move. 303 */ 304 error = uiomove_fromphys(&m, page_offset, len, uio); 305 306 /* Make the I-cache coherent for breakpoints. */ 307 if (writing && error == 0) { 308 vm_map_lock_read(map); 309 if (vm_map_check_protection(map, pageno, pageno + 310 PAGE_SIZE, VM_PROT_EXECUTE)) 311 vm_sync_icache(map, uva, len); 312 vm_map_unlock_read(map); 313 } 314 315 /* 316 * Release the page. 317 */ 318 vm_page_lock(m); 319 vm_page_unhold(m); 320 vm_page_unlock(m); 321 322 } while (error == 0 && uio->uio_resid > 0); 323 324 return (error); 325} 326 327static int 328ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 329{ 330 struct vattr vattr; 331 vm_map_t map; 332 vm_map_entry_t entry; 333 vm_object_t obj, tobj, lobj; 334 struct vmspace *vm; 335 struct vnode *vp; 336 char *freepath, *fullpath; 337 u_int pathlen;
| 34 35#include "opt_compat.h" 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/syscallsubr.h> 42#include <sys/sysent.h> 43#include <sys/sysproto.h> 44#include <sys/proc.h> 45#include <sys/vnode.h> 46#include <sys/ptrace.h> 47#include <sys/sx.h> 48#include <sys/malloc.h> 49#include <sys/signalvar.h> 50 51#include <machine/reg.h> 52 53#include <security/audit/audit.h> 54 55#include <vm/vm.h> 56#include <vm/pmap.h> 57#include <vm/vm_extern.h> 58#include <vm/vm_map.h> 59#include <vm/vm_kern.h> 60#include <vm/vm_object.h> 61#include <vm/vm_page.h> 62#include <vm/vm_pager.h> 63#include <vm/vm_param.h> 64 65#ifdef COMPAT_FREEBSD32 66#include <sys/procfs.h> 67#include <compat/freebsd32/freebsd32_signal.h> 68 69struct ptrace_io_desc32 { 70 int piod_op; 71 uint32_t piod_offs; 72 uint32_t piod_addr; 73 uint32_t piod_len; 74}; 75 76struct ptrace_vm_entry32 { 77 int pve_entry; 78 int pve_timestamp; 79 uint32_t pve_start; 80 uint32_t pve_end; 81 uint32_t pve_offset; 82 u_int pve_prot; 83 u_int pve_pathlen; 84 int32_t pve_fileid; 85 u_int pve_fsid; 86 uint32_t pve_path; 87}; 88 89struct ptrace_lwpinfo32 { 90 lwpid_t pl_lwpid; /* LWP described. */ 91 int pl_event; /* Event that stopped the LWP. */ 92 int pl_flags; /* LWP flags. */ 93 sigset_t pl_sigmask; /* LWP signal mask */ 94 sigset_t pl_siglist; /* LWP pending signal */ 95 struct siginfo32 pl_siginfo; /* siginfo for signal */ 96 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */ 97 int pl_child_pid; /* New child pid */ 98}; 99 100#endif 101 102/* 103 * Functions implemented using PROC_ACTION(): 104 * 105 * proc_read_regs(proc, regs) 106 * Get the current user-visible register set from the process 107 * and copy it into the regs structure (<machine/reg.h>). 108 * The process is stopped at the time read_regs is called. 109 * 110 * proc_write_regs(proc, regs) 111 * Update the current register set from the passed in regs 112 * structure. Take care to avoid clobbering special CPU 113 * registers or privileged bits in the PSL. 114 * Depending on the architecture this may have fix-up work to do, 115 * especially if the IAR or PCW are modified. 116 * The process is stopped at the time write_regs is called. 117 * 118 * proc_read_fpregs, proc_write_fpregs 119 * deal with the floating point register set, otherwise as above. 120 * 121 * proc_read_dbregs, proc_write_dbregs 122 * deal with the processor debug register set, otherwise as above. 123 * 124 * proc_sstep(proc) 125 * Arrange for the process to trap after executing a single instruction. 126 */ 127 128#define PROC_ACTION(action) do { \ 129 int error; \ 130 \ 131 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 132 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 133 error = EIO; \ 134 else \ 135 error = (action); \ 136 return (error); \ 137} while(0) 138 139int 140proc_read_regs(struct thread *td, struct reg *regs) 141{ 142 143 PROC_ACTION(fill_regs(td, regs)); 144} 145 146int 147proc_write_regs(struct thread *td, struct reg *regs) 148{ 149 150 PROC_ACTION(set_regs(td, regs)); 151} 152 153int 154proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 155{ 156 157 PROC_ACTION(fill_dbregs(td, dbregs)); 158} 159 160int 161proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 162{ 163 164 PROC_ACTION(set_dbregs(td, dbregs)); 165} 166 167/* 168 * Ptrace doesn't support fpregs at all, and there are no security holes 169 * or translations for fpregs, so we can just copy them. 170 */ 171int 172proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 173{ 174 175 PROC_ACTION(fill_fpregs(td, fpregs)); 176} 177 178int 179proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 180{ 181 182 PROC_ACTION(set_fpregs(td, fpregs)); 183} 184 185#ifdef COMPAT_FREEBSD32 186/* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 187int 188proc_read_regs32(struct thread *td, struct reg32 *regs32) 189{ 190 191 PROC_ACTION(fill_regs32(td, regs32)); 192} 193 194int 195proc_write_regs32(struct thread *td, struct reg32 *regs32) 196{ 197 198 PROC_ACTION(set_regs32(td, regs32)); 199} 200 201int 202proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 203{ 204 205 PROC_ACTION(fill_dbregs32(td, dbregs32)); 206} 207 208int 209proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 210{ 211 212 PROC_ACTION(set_dbregs32(td, dbregs32)); 213} 214 215int 216proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 217{ 218 219 PROC_ACTION(fill_fpregs32(td, fpregs32)); 220} 221 222int 223proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 224{ 225 226 PROC_ACTION(set_fpregs32(td, fpregs32)); 227} 228#endif 229 230int 231proc_sstep(struct thread *td) 232{ 233 234 PROC_ACTION(ptrace_single_step(td)); 235} 236 237int 238proc_rwmem(struct proc *p, struct uio *uio) 239{ 240 vm_map_t map; 241 vm_offset_t pageno; /* page number */ 242 vm_prot_t reqprot; 243 int error, fault_flags, page_offset, writing; 244 245 /* 246 * Assert that someone has locked this vmspace. (Should be 247 * curthread but we can't assert that.) This keeps the process 248 * from exiting out from under us until this operation completes. 249 */ 250 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 251 p, p->p_pid)); 252 253 /* 254 * The map we want... 255 */ 256 map = &p->p_vmspace->vm_map; 257 258 /* 259 * If we are writing, then we request vm_fault() to create a private 260 * copy of each page. Since these copies will not be writeable by the 261 * process, we must explicity request that they be dirtied. 262 */ 263 writing = uio->uio_rw == UIO_WRITE; 264 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 265 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL; 266 267 /* 268 * Only map in one page at a time. We don't have to, but it 269 * makes things easier. This way is trivial - right? 270 */ 271 do { 272 vm_offset_t uva; 273 u_int len; 274 vm_page_t m; 275 276 uva = (vm_offset_t)uio->uio_offset; 277 278 /* 279 * Get the page number of this segment. 280 */ 281 pageno = trunc_page(uva); 282 page_offset = uva - pageno; 283 284 /* 285 * How many bytes to copy 286 */ 287 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 288 289 /* 290 * Fault and hold the page on behalf of the process. 291 */ 292 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m); 293 if (error != KERN_SUCCESS) { 294 if (error == KERN_RESOURCE_SHORTAGE) 295 error = ENOMEM; 296 else 297 error = EFAULT; 298 break; 299 } 300 301 /* 302 * Now do the i/o move. 303 */ 304 error = uiomove_fromphys(&m, page_offset, len, uio); 305 306 /* Make the I-cache coherent for breakpoints. */ 307 if (writing && error == 0) { 308 vm_map_lock_read(map); 309 if (vm_map_check_protection(map, pageno, pageno + 310 PAGE_SIZE, VM_PROT_EXECUTE)) 311 vm_sync_icache(map, uva, len); 312 vm_map_unlock_read(map); 313 } 314 315 /* 316 * Release the page. 317 */ 318 vm_page_lock(m); 319 vm_page_unhold(m); 320 vm_page_unlock(m); 321 322 } while (error == 0 && uio->uio_resid > 0); 323 324 return (error); 325} 326 327static int 328ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 329{ 330 struct vattr vattr; 331 vm_map_t map; 332 vm_map_entry_t entry; 333 vm_object_t obj, tobj, lobj; 334 struct vmspace *vm; 335 struct vnode *vp; 336 char *freepath, *fullpath; 337 u_int pathlen;
|
423 424 if (fullpath != NULL) { 425 pve->pve_pathlen = strlen(fullpath) + 1; 426 if (pve->pve_pathlen <= pathlen) { 427 error = copyout(fullpath, pve->pve_path, 428 pve->pve_pathlen); 429 } else 430 error = ENAMETOOLONG; 431 } 432 if (freepath != NULL) 433 free(freepath, M_TEMP); 434 } 435 } 436 437 return (error); 438} 439 440#ifdef COMPAT_FREEBSD32 441static int 442ptrace_vm_entry32(struct thread *td, struct proc *p, 443 struct ptrace_vm_entry32 *pve32) 444{ 445 struct ptrace_vm_entry pve; 446 int error; 447 448 pve.pve_entry = pve32->pve_entry; 449 pve.pve_pathlen = pve32->pve_pathlen; 450 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 451 452 error = ptrace_vm_entry(td, p, &pve); 453 if (error == 0) { 454 pve32->pve_entry = pve.pve_entry; 455 pve32->pve_timestamp = pve.pve_timestamp; 456 pve32->pve_start = pve.pve_start; 457 pve32->pve_end = pve.pve_end; 458 pve32->pve_offset = pve.pve_offset; 459 pve32->pve_prot = pve.pve_prot; 460 pve32->pve_fileid = pve.pve_fileid; 461 pve32->pve_fsid = pve.pve_fsid; 462 } 463 464 pve32->pve_pathlen = pve.pve_pathlen; 465 return (error); 466} 467 468static void 469ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 470 struct ptrace_lwpinfo32 *pl32) 471{ 472 473 pl32->pl_lwpid = pl->pl_lwpid; 474 pl32->pl_event = pl->pl_event; 475 pl32->pl_flags = pl->pl_flags; 476 pl32->pl_sigmask = pl->pl_sigmask; 477 pl32->pl_siglist = pl->pl_siglist; 478 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 479 strcpy(pl32->pl_tdname, pl->pl_tdname); 480 pl32->pl_child_pid = pl->pl_child_pid; 481} 482#endif /* COMPAT_FREEBSD32 */ 483 484/* 485 * Process debugging system call. 486 */ 487#ifndef _SYS_SYSPROTO_H_ 488struct ptrace_args { 489 int req; 490 pid_t pid; 491 caddr_t addr; 492 int data; 493}; 494#endif 495 496#ifdef COMPAT_FREEBSD32 497/* 498 * This CPP subterfuge is to try and reduce the number of ifdefs in 499 * the body of the code. 500 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 501 * becomes either: 502 * copyin(uap->addr, &r.reg, sizeof r.reg); 503 * or 504 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 505 * .. except this is done at runtime. 506 */ 507#define COPYIN(u, k, s) wrap32 ? \ 508 copyin(u, k ## 32, s ## 32) : \ 509 copyin(u, k, s) 510#define COPYOUT(k, u, s) wrap32 ? \ 511 copyout(k ## 32, u, s ## 32) : \ 512 copyout(k, u, s) 513#else 514#define COPYIN(u, k, s) copyin(u, k, s) 515#define COPYOUT(k, u, s) copyout(k, u, s) 516#endif 517int 518sys_ptrace(struct thread *td, struct ptrace_args *uap) 519{ 520 /* 521 * XXX this obfuscation is to reduce stack usage, but the register 522 * structs may be too large to put on the stack anyway. 523 */ 524 union { 525 struct ptrace_io_desc piod; 526 struct ptrace_lwpinfo pl; 527 struct ptrace_vm_entry pve; 528 struct dbreg dbreg; 529 struct fpreg fpreg; 530 struct reg reg; 531#ifdef COMPAT_FREEBSD32 532 struct dbreg32 dbreg32; 533 struct fpreg32 fpreg32; 534 struct reg32 reg32; 535 struct ptrace_io_desc32 piod32; 536 struct ptrace_lwpinfo32 pl32; 537 struct ptrace_vm_entry32 pve32; 538#endif 539 } r; 540 void *addr; 541 int error = 0; 542#ifdef COMPAT_FREEBSD32 543 int wrap32 = 0; 544 545 if (SV_CURPROC_FLAG(SV_ILP32)) 546 wrap32 = 1; 547#endif 548 AUDIT_ARG_PID(uap->pid); 549 AUDIT_ARG_CMD(uap->req); 550 AUDIT_ARG_VALUE(uap->data); 551 addr = &r; 552 switch (uap->req) { 553 case PT_GETREGS: 554 case PT_GETFPREGS: 555 case PT_GETDBREGS: 556 case PT_LWPINFO: 557 break; 558 case PT_SETREGS: 559 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 560 break; 561 case PT_SETFPREGS: 562 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 563 break; 564 case PT_SETDBREGS: 565 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 566 break; 567 case PT_IO: 568 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 569 break; 570 case PT_VM_ENTRY: 571 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 572 break; 573 default: 574 addr = uap->addr; 575 break; 576 } 577 if (error) 578 return (error); 579 580 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 581 if (error) 582 return (error); 583 584 switch (uap->req) { 585 case PT_VM_ENTRY: 586 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 587 break; 588 case PT_IO: 589 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 590 break; 591 case PT_GETREGS: 592 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 593 break; 594 case PT_GETFPREGS: 595 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 596 break; 597 case PT_GETDBREGS: 598 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 599 break; 600 case PT_LWPINFO: 601 error = copyout(&r.pl, uap->addr, uap->data); 602 break; 603 } 604 605 return (error); 606} 607#undef COPYIN 608#undef COPYOUT 609 610#ifdef COMPAT_FREEBSD32 611/* 612 * PROC_READ(regs, td2, addr); 613 * becomes either: 614 * proc_read_regs(td2, addr); 615 * or 616 * proc_read_regs32(td2, addr); 617 * .. except this is done at runtime. There is an additional 618 * complication in that PROC_WRITE disallows 32 bit consumers 619 * from writing to 64 bit address space targets. 620 */ 621#define PROC_READ(w, t, a) wrap32 ? \ 622 proc_read_ ## w ## 32(t, a) : \ 623 proc_read_ ## w (t, a) 624#define PROC_WRITE(w, t, a) wrap32 ? \ 625 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 626 proc_write_ ## w (t, a) 627#else 628#define PROC_READ(w, t, a) proc_read_ ## w (t, a) 629#define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 630#endif 631 632int 633kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 634{ 635 struct iovec iov; 636 struct uio uio; 637 struct proc *curp, *p, *pp; 638 struct thread *td2 = NULL, *td3; 639 struct ptrace_io_desc *piod = NULL; 640 struct ptrace_lwpinfo *pl; 641 int error, write, tmp, num; 642 int proctree_locked = 0; 643 lwpid_t tid = 0, *buf; 644#ifdef COMPAT_FREEBSD32 645 int wrap32 = 0, safe = 0; 646 struct ptrace_io_desc32 *piod32 = NULL; 647 struct ptrace_lwpinfo32 *pl32 = NULL; 648 struct ptrace_lwpinfo plr; 649#endif 650 651 curp = td->td_proc; 652 653 /* Lock proctree before locking the process. */ 654 switch (req) { 655 case PT_TRACE_ME: 656 case PT_ATTACH: 657 case PT_STEP: 658 case PT_CONTINUE: 659 case PT_TO_SCE: 660 case PT_TO_SCX: 661 case PT_SYSCALL: 662 case PT_FOLLOW_FORK: 663 case PT_DETACH: 664 sx_xlock(&proctree_lock); 665 proctree_locked = 1; 666 break; 667 default: 668 break; 669 } 670 671 write = 0; 672 if (req == PT_TRACE_ME) { 673 p = td->td_proc; 674 PROC_LOCK(p); 675 } else { 676 if (pid <= PID_MAX) { 677 if ((p = pfind(pid)) == NULL) { 678 if (proctree_locked) 679 sx_xunlock(&proctree_lock); 680 return (ESRCH); 681 } 682 } else { 683 td2 = tdfind(pid, -1); 684 if (td2 == NULL) { 685 if (proctree_locked) 686 sx_xunlock(&proctree_lock); 687 return (ESRCH); 688 } 689 p = td2->td_proc; 690 tid = pid; 691 pid = p->p_pid; 692 } 693 } 694 AUDIT_ARG_PROCESS(p); 695 696 if ((p->p_flag & P_WEXIT) != 0) { 697 error = ESRCH; 698 goto fail; 699 } 700 if ((error = p_cansee(td, p)) != 0) 701 goto fail; 702 703 if ((error = p_candebug(td, p)) != 0) 704 goto fail; 705 706 /* 707 * System processes can't be debugged. 708 */ 709 if ((p->p_flag & P_SYSTEM) != 0) { 710 error = EINVAL; 711 goto fail; 712 } 713 714 if (tid == 0) { 715 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 716 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 717 td2 = p->p_xthread; 718 } else { 719 td2 = FIRST_THREAD_IN_PROC(p); 720 } 721 tid = td2->td_tid; 722 } 723 724#ifdef COMPAT_FREEBSD32 725 /* 726 * Test if we're a 32 bit client and what the target is. 727 * Set the wrap controls accordingly. 728 */ 729 if (SV_CURPROC_FLAG(SV_ILP32)) { 730 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 731 safe = 1; 732 wrap32 = 1; 733 } 734#endif 735 /* 736 * Permissions check 737 */ 738 switch (req) { 739 case PT_TRACE_ME: 740 /* Always legal. */ 741 break; 742 743 case PT_ATTACH: 744 /* Self */ 745 if (p->p_pid == td->td_proc->p_pid) { 746 error = EINVAL; 747 goto fail; 748 } 749 750 /* Already traced */ 751 if (p->p_flag & P_TRACED) { 752 error = EBUSY; 753 goto fail; 754 } 755 756 /* Can't trace an ancestor if you're being traced. */ 757 if (curp->p_flag & P_TRACED) { 758 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 759 if (pp == p) { 760 error = EINVAL; 761 goto fail; 762 } 763 } 764 } 765 766 767 /* OK */ 768 break; 769 770 case PT_CLEARSTEP: 771 /* Allow thread to clear single step for itself */ 772 if (td->td_tid == tid) 773 break; 774 775 /* FALLTHROUGH */ 776 default: 777 /* not being traced... */ 778 if ((p->p_flag & P_TRACED) == 0) { 779 error = EPERM; 780 goto fail; 781 } 782 783 /* not being traced by YOU */ 784 if (p->p_pptr != td->td_proc) { 785 error = EBUSY; 786 goto fail; 787 } 788 789 /* not currently stopped */ 790 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 791 p->p_suspcount != p->p_numthreads || 792 (p->p_flag & P_WAITED) == 0) { 793 error = EBUSY; 794 goto fail; 795 } 796 797 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 798 static int count = 0; 799 if (count++ == 0) 800 printf("P_STOPPED_TRACE not set.\n"); 801 } 802 803 /* OK */ 804 break; 805 } 806 807 /* Keep this process around until we finish this request. */ 808 _PHOLD(p); 809 810#ifdef FIX_SSTEP 811 /* 812 * Single step fixup ala procfs 813 */ 814 FIX_SSTEP(td2); 815#endif 816 817 /* 818 * Actually do the requests 819 */ 820 821 td->td_retval[0] = 0; 822 823 switch (req) { 824 case PT_TRACE_ME: 825 /* set my trace flag and "owner" so it can read/write me */ 826 p->p_flag |= P_TRACED; 827 p->p_oppid = p->p_pptr->p_pid; 828 break; 829 830 case PT_ATTACH: 831 /* security check done above */ 832 /* 833 * It would be nice if the tracing relationship was separate 834 * from the parent relationship but that would require 835 * another set of links in the proc struct or for "wait" 836 * to scan the entire proc table. To make life easier, 837 * we just re-parent the process we're trying to trace. 838 * The old parent is remembered so we can put things back 839 * on a "detach". 840 */ 841 p->p_flag |= P_TRACED; 842 p->p_oppid = p->p_pptr->p_pid; 843 if (p->p_pptr != td->td_proc) { 844 proc_reparent(p, td->td_proc); 845 } 846 data = SIGSTOP; 847 goto sendsig; /* in PT_CONTINUE below */ 848 849 case PT_CLEARSTEP: 850 error = ptrace_clear_single_step(td2); 851 break; 852 853 case PT_SETSTEP: 854 error = ptrace_single_step(td2); 855 break; 856 857 case PT_SUSPEND: 858 td2->td_dbgflags |= TDB_SUSPEND; 859 thread_lock(td2); 860 td2->td_flags |= TDF_NEEDSUSPCHK; 861 thread_unlock(td2); 862 break; 863 864 case PT_RESUME: 865 td2->td_dbgflags &= ~TDB_SUSPEND; 866 break; 867 868 case PT_FOLLOW_FORK: 869 if (data) 870 p->p_flag |= P_FOLLOWFORK; 871 else 872 p->p_flag &= ~P_FOLLOWFORK; 873 break; 874 875 case PT_STEP: 876 case PT_CONTINUE: 877 case PT_TO_SCE: 878 case PT_TO_SCX: 879 case PT_SYSCALL: 880 case PT_DETACH: 881 /* Zero means do not send any signal */ 882 if (data < 0 || data > _SIG_MAXSIG) { 883 error = EINVAL; 884 break; 885 } 886 887 switch (req) { 888 case PT_STEP: 889 error = ptrace_single_step(td2); 890 if (error) 891 goto out; 892 break; 893 case PT_CONTINUE: 894 case PT_TO_SCE: 895 case PT_TO_SCX: 896 case PT_SYSCALL: 897 if (addr != (void *)1) { 898 error = ptrace_set_pc(td2, 899 (u_long)(uintfptr_t)addr); 900 if (error) 901 goto out; 902 } 903 switch (req) { 904 case PT_TO_SCE: 905 p->p_stops |= S_PT_SCE; 906 break; 907 case PT_TO_SCX: 908 p->p_stops |= S_PT_SCX; 909 break; 910 case PT_SYSCALL: 911 p->p_stops |= S_PT_SCE | S_PT_SCX; 912 break; 913 } 914 break; 915 case PT_DETACH: 916 /* reset process parent */ 917 if (p->p_oppid != p->p_pptr->p_pid) { 918 struct proc *pp; 919 920 PROC_LOCK(p->p_pptr); 921 sigqueue_take(p->p_ksi); 922 PROC_UNLOCK(p->p_pptr); 923 924 PROC_UNLOCK(p); 925 pp = pfind(p->p_oppid); 926 if (pp == NULL) 927 pp = initproc; 928 else 929 PROC_UNLOCK(pp); 930 PROC_LOCK(p); 931 proc_reparent(p, pp); 932 if (pp == initproc) 933 p->p_sigparent = SIGCHLD; 934 } 935 p->p_oppid = 0; 936 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 937 938 /* should we send SIGCHLD? */ 939 /* childproc_continued(p); */ 940 break; 941 } 942 943 sendsig: 944 if (proctree_locked) { 945 sx_xunlock(&proctree_lock); 946 proctree_locked = 0; 947 } 948 p->p_xstat = data; 949 p->p_xthread = NULL; 950 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 951 /* deliver or queue signal */ 952 td2->td_dbgflags &= ~TDB_XSIG; 953 td2->td_xsig = data; 954 955 if (req == PT_DETACH) { 956 FOREACH_THREAD_IN_PROC(p, td3) 957 td3->td_dbgflags &= ~TDB_SUSPEND; 958 } 959 /* 960 * unsuspend all threads, to not let a thread run, 961 * you should use PT_SUSPEND to suspend it before 962 * continuing process. 963 */ 964 PROC_SLOCK(p); 965 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 966 thread_unsuspend(p); 967 PROC_SUNLOCK(p); 968 if (req == PT_ATTACH) 969 kern_psignal(p, data); 970 } else { 971 if (data) 972 kern_psignal(p, data); 973 } 974 break; 975 976 case PT_WRITE_I: 977 case PT_WRITE_D: 978 td2->td_dbgflags |= TDB_USERWR; 979 write = 1; 980 /* FALLTHROUGH */ 981 case PT_READ_I: 982 case PT_READ_D: 983 PROC_UNLOCK(p); 984 tmp = 0; 985 /* write = 0 set above */ 986 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 987 iov.iov_len = sizeof(int); 988 uio.uio_iov = &iov; 989 uio.uio_iovcnt = 1; 990 uio.uio_offset = (off_t)(uintptr_t)addr; 991 uio.uio_resid = sizeof(int); 992 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 993 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 994 uio.uio_td = td; 995 error = proc_rwmem(p, &uio); 996 if (uio.uio_resid != 0) { 997 /* 998 * XXX proc_rwmem() doesn't currently return ENOSPC, 999 * so I think write() can bogusly return 0. 1000 * XXX what happens for short writes? We don't want 1001 * to write partial data. 1002 * XXX proc_rwmem() returns EPERM for other invalid 1003 * addresses. Convert this to EINVAL. Does this 1004 * clobber returns of EPERM for other reasons? 1005 */ 1006 if (error == 0 || error == ENOSPC || error == EPERM) 1007 error = EINVAL; /* EOF */ 1008 } 1009 if (!write) 1010 td->td_retval[0] = tmp; 1011 PROC_LOCK(p); 1012 break; 1013 1014 case PT_IO: 1015#ifdef COMPAT_FREEBSD32 1016 if (wrap32) { 1017 piod32 = addr; 1018 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1019 iov.iov_len = piod32->piod_len; 1020 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1021 uio.uio_resid = piod32->piod_len; 1022 } else 1023#endif 1024 { 1025 piod = addr; 1026 iov.iov_base = piod->piod_addr; 1027 iov.iov_len = piod->piod_len; 1028 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1029 uio.uio_resid = piod->piod_len; 1030 } 1031 uio.uio_iov = &iov; 1032 uio.uio_iovcnt = 1; 1033 uio.uio_segflg = UIO_USERSPACE; 1034 uio.uio_td = td; 1035#ifdef COMPAT_FREEBSD32 1036 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1037#else 1038 tmp = piod->piod_op; 1039#endif 1040 switch (tmp) { 1041 case PIOD_READ_D: 1042 case PIOD_READ_I: 1043 uio.uio_rw = UIO_READ; 1044 break; 1045 case PIOD_WRITE_D: 1046 case PIOD_WRITE_I: 1047 td2->td_dbgflags |= TDB_USERWR; 1048 uio.uio_rw = UIO_WRITE; 1049 break; 1050 default: 1051 error = EINVAL; 1052 goto out; 1053 } 1054 PROC_UNLOCK(p); 1055 error = proc_rwmem(p, &uio); 1056#ifdef COMPAT_FREEBSD32 1057 if (wrap32) 1058 piod32->piod_len -= uio.uio_resid; 1059 else 1060#endif 1061 piod->piod_len -= uio.uio_resid; 1062 PROC_LOCK(p); 1063 break; 1064 1065 case PT_KILL: 1066 data = SIGKILL; 1067 goto sendsig; /* in PT_CONTINUE above */ 1068 1069 case PT_SETREGS: 1070 td2->td_dbgflags |= TDB_USERWR; 1071 error = PROC_WRITE(regs, td2, addr); 1072 break; 1073 1074 case PT_GETREGS: 1075 error = PROC_READ(regs, td2, addr); 1076 break; 1077 1078 case PT_SETFPREGS: 1079 td2->td_dbgflags |= TDB_USERWR; 1080 error = PROC_WRITE(fpregs, td2, addr); 1081 break; 1082 1083 case PT_GETFPREGS: 1084 error = PROC_READ(fpregs, td2, addr); 1085 break; 1086 1087 case PT_SETDBREGS: 1088 td2->td_dbgflags |= TDB_USERWR; 1089 error = PROC_WRITE(dbregs, td2, addr); 1090 break; 1091 1092 case PT_GETDBREGS: 1093 error = PROC_READ(dbregs, td2, addr); 1094 break; 1095 1096 case PT_LWPINFO: 1097 if (data <= 0 || 1098#ifdef COMPAT_FREEBSD32 1099 (!wrap32 && data > sizeof(*pl)) || 1100 (wrap32 && data > sizeof(*pl32))) { 1101#else 1102 data > sizeof(*pl)) { 1103#endif 1104 error = EINVAL; 1105 break; 1106 } 1107#ifdef COMPAT_FREEBSD32 1108 if (wrap32) { 1109 pl = &plr; 1110 pl32 = addr; 1111 } else 1112#endif 1113 pl = addr; 1114 pl->pl_lwpid = td2->td_tid; 1115 pl->pl_event = PL_EVENT_NONE; 1116 pl->pl_flags = 0; 1117 if (td2->td_dbgflags & TDB_XSIG) { 1118 pl->pl_event = PL_EVENT_SIGNAL; 1119 if (td2->td_dbgksi.ksi_signo != 0 && 1120#ifdef COMPAT_FREEBSD32 1121 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1122 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1123 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1124 pl_siginfo) + sizeof(struct siginfo32))) 1125#else 1126 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1127 + sizeof(pl->pl_siginfo) 1128#endif 1129 ){ 1130 pl->pl_flags |= PL_FLAG_SI; 1131 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1132 } 1133 } 1134 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1135 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1136 if (td2->td_dbgflags & TDB_SCE) 1137 pl->pl_flags |= PL_FLAG_SCE; 1138 else if (td2->td_dbgflags & TDB_SCX) 1139 pl->pl_flags |= PL_FLAG_SCX; 1140 if (td2->td_dbgflags & TDB_EXEC) 1141 pl->pl_flags |= PL_FLAG_EXEC; 1142 if (td2->td_dbgflags & TDB_FORK) { 1143 pl->pl_flags |= PL_FLAG_FORKED; 1144 pl->pl_child_pid = td2->td_dbg_forked; 1145 } 1146 if (td2->td_dbgflags & TDB_CHILD) 1147 pl->pl_flags |= PL_FLAG_CHILD; 1148 pl->pl_sigmask = td2->td_sigmask; 1149 pl->pl_siglist = td2->td_siglist; 1150 strcpy(pl->pl_tdname, td2->td_name); 1151#ifdef COMPAT_FREEBSD32 1152 if (wrap32) 1153 ptrace_lwpinfo_to32(pl, pl32); 1154#endif 1155 break; 1156 1157 case PT_GETNUMLWPS: 1158 td->td_retval[0] = p->p_numthreads; 1159 break; 1160 1161 case PT_GETLWPLIST: 1162 if (data <= 0) { 1163 error = EINVAL; 1164 break; 1165 } 1166 num = imin(p->p_numthreads, data); 1167 PROC_UNLOCK(p); 1168 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1169 tmp = 0; 1170 PROC_LOCK(p); 1171 FOREACH_THREAD_IN_PROC(p, td2) { 1172 if (tmp >= num) 1173 break; 1174 buf[tmp++] = td2->td_tid; 1175 } 1176 PROC_UNLOCK(p); 1177 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1178 free(buf, M_TEMP); 1179 if (!error) 1180 td->td_retval[0] = tmp; 1181 PROC_LOCK(p); 1182 break; 1183 1184 case PT_VM_TIMESTAMP: 1185 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1186 break; 1187 1188 case PT_VM_ENTRY: 1189 PROC_UNLOCK(p); 1190#ifdef COMPAT_FREEBSD32 1191 if (wrap32) 1192 error = ptrace_vm_entry32(td, p, addr); 1193 else 1194#endif 1195 error = ptrace_vm_entry(td, p, addr); 1196 PROC_LOCK(p); 1197 break; 1198 1199 default: 1200#ifdef __HAVE_PTRACE_MACHDEP 1201 if (req >= PT_FIRSTMACH) { 1202 PROC_UNLOCK(p); 1203 error = cpu_ptrace(td2, req, addr, data); 1204 PROC_LOCK(p); 1205 } else 1206#endif 1207 /* Unknown request. */ 1208 error = EINVAL; 1209 break; 1210 } 1211 1212out: 1213 /* Drop our hold on this process now that the request has completed. */ 1214 _PRELE(p); 1215fail: 1216 PROC_UNLOCK(p); 1217 if (proctree_locked) 1218 sx_xunlock(&proctree_lock); 1219 return (error); 1220} 1221#undef PROC_READ 1222#undef PROC_WRITE 1223 1224/* 1225 * Stop a process because of a debugging event; 1226 * stay stopped until p->p_step is cleared 1227 * (cleared by PIOCCONT in procfs). 1228 */ 1229void 1230stopevent(struct proc *p, unsigned int event, unsigned int val) 1231{ 1232 1233 PROC_LOCK_ASSERT(p, MA_OWNED); 1234 p->p_step = 1; 1235 do { 1236 p->p_xstat = val; 1237 p->p_xthread = NULL; 1238 p->p_stype = event; /* Which event caused the stop? */ 1239 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1240 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1241 } while (p->p_step); 1242}
| 421 422 if (fullpath != NULL) { 423 pve->pve_pathlen = strlen(fullpath) + 1; 424 if (pve->pve_pathlen <= pathlen) { 425 error = copyout(fullpath, pve->pve_path, 426 pve->pve_pathlen); 427 } else 428 error = ENAMETOOLONG; 429 } 430 if (freepath != NULL) 431 free(freepath, M_TEMP); 432 } 433 } 434 435 return (error); 436} 437 438#ifdef COMPAT_FREEBSD32 439static int 440ptrace_vm_entry32(struct thread *td, struct proc *p, 441 struct ptrace_vm_entry32 *pve32) 442{ 443 struct ptrace_vm_entry pve; 444 int error; 445 446 pve.pve_entry = pve32->pve_entry; 447 pve.pve_pathlen = pve32->pve_pathlen; 448 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 449 450 error = ptrace_vm_entry(td, p, &pve); 451 if (error == 0) { 452 pve32->pve_entry = pve.pve_entry; 453 pve32->pve_timestamp = pve.pve_timestamp; 454 pve32->pve_start = pve.pve_start; 455 pve32->pve_end = pve.pve_end; 456 pve32->pve_offset = pve.pve_offset; 457 pve32->pve_prot = pve.pve_prot; 458 pve32->pve_fileid = pve.pve_fileid; 459 pve32->pve_fsid = pve.pve_fsid; 460 } 461 462 pve32->pve_pathlen = pve.pve_pathlen; 463 return (error); 464} 465 466static void 467ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl, 468 struct ptrace_lwpinfo32 *pl32) 469{ 470 471 pl32->pl_lwpid = pl->pl_lwpid; 472 pl32->pl_event = pl->pl_event; 473 pl32->pl_flags = pl->pl_flags; 474 pl32->pl_sigmask = pl->pl_sigmask; 475 pl32->pl_siglist = pl->pl_siglist; 476 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo); 477 strcpy(pl32->pl_tdname, pl->pl_tdname); 478 pl32->pl_child_pid = pl->pl_child_pid; 479} 480#endif /* COMPAT_FREEBSD32 */ 481 482/* 483 * Process debugging system call. 484 */ 485#ifndef _SYS_SYSPROTO_H_ 486struct ptrace_args { 487 int req; 488 pid_t pid; 489 caddr_t addr; 490 int data; 491}; 492#endif 493 494#ifdef COMPAT_FREEBSD32 495/* 496 * This CPP subterfuge is to try and reduce the number of ifdefs in 497 * the body of the code. 498 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 499 * becomes either: 500 * copyin(uap->addr, &r.reg, sizeof r.reg); 501 * or 502 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 503 * .. except this is done at runtime. 504 */ 505#define COPYIN(u, k, s) wrap32 ? \ 506 copyin(u, k ## 32, s ## 32) : \ 507 copyin(u, k, s) 508#define COPYOUT(k, u, s) wrap32 ? \ 509 copyout(k ## 32, u, s ## 32) : \ 510 copyout(k, u, s) 511#else 512#define COPYIN(u, k, s) copyin(u, k, s) 513#define COPYOUT(k, u, s) copyout(k, u, s) 514#endif 515int 516sys_ptrace(struct thread *td, struct ptrace_args *uap) 517{ 518 /* 519 * XXX this obfuscation is to reduce stack usage, but the register 520 * structs may be too large to put on the stack anyway. 521 */ 522 union { 523 struct ptrace_io_desc piod; 524 struct ptrace_lwpinfo pl; 525 struct ptrace_vm_entry pve; 526 struct dbreg dbreg; 527 struct fpreg fpreg; 528 struct reg reg; 529#ifdef COMPAT_FREEBSD32 530 struct dbreg32 dbreg32; 531 struct fpreg32 fpreg32; 532 struct reg32 reg32; 533 struct ptrace_io_desc32 piod32; 534 struct ptrace_lwpinfo32 pl32; 535 struct ptrace_vm_entry32 pve32; 536#endif 537 } r; 538 void *addr; 539 int error = 0; 540#ifdef COMPAT_FREEBSD32 541 int wrap32 = 0; 542 543 if (SV_CURPROC_FLAG(SV_ILP32)) 544 wrap32 = 1; 545#endif 546 AUDIT_ARG_PID(uap->pid); 547 AUDIT_ARG_CMD(uap->req); 548 AUDIT_ARG_VALUE(uap->data); 549 addr = &r; 550 switch (uap->req) { 551 case PT_GETREGS: 552 case PT_GETFPREGS: 553 case PT_GETDBREGS: 554 case PT_LWPINFO: 555 break; 556 case PT_SETREGS: 557 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 558 break; 559 case PT_SETFPREGS: 560 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 561 break; 562 case PT_SETDBREGS: 563 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 564 break; 565 case PT_IO: 566 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 567 break; 568 case PT_VM_ENTRY: 569 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 570 break; 571 default: 572 addr = uap->addr; 573 break; 574 } 575 if (error) 576 return (error); 577 578 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 579 if (error) 580 return (error); 581 582 switch (uap->req) { 583 case PT_VM_ENTRY: 584 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 585 break; 586 case PT_IO: 587 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 588 break; 589 case PT_GETREGS: 590 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 591 break; 592 case PT_GETFPREGS: 593 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 594 break; 595 case PT_GETDBREGS: 596 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 597 break; 598 case PT_LWPINFO: 599 error = copyout(&r.pl, uap->addr, uap->data); 600 break; 601 } 602 603 return (error); 604} 605#undef COPYIN 606#undef COPYOUT 607 608#ifdef COMPAT_FREEBSD32 609/* 610 * PROC_READ(regs, td2, addr); 611 * becomes either: 612 * proc_read_regs(td2, addr); 613 * or 614 * proc_read_regs32(td2, addr); 615 * .. except this is done at runtime. There is an additional 616 * complication in that PROC_WRITE disallows 32 bit consumers 617 * from writing to 64 bit address space targets. 618 */ 619#define PROC_READ(w, t, a) wrap32 ? \ 620 proc_read_ ## w ## 32(t, a) : \ 621 proc_read_ ## w (t, a) 622#define PROC_WRITE(w, t, a) wrap32 ? \ 623 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 624 proc_write_ ## w (t, a) 625#else 626#define PROC_READ(w, t, a) proc_read_ ## w (t, a) 627#define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 628#endif 629 630int 631kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 632{ 633 struct iovec iov; 634 struct uio uio; 635 struct proc *curp, *p, *pp; 636 struct thread *td2 = NULL, *td3; 637 struct ptrace_io_desc *piod = NULL; 638 struct ptrace_lwpinfo *pl; 639 int error, write, tmp, num; 640 int proctree_locked = 0; 641 lwpid_t tid = 0, *buf; 642#ifdef COMPAT_FREEBSD32 643 int wrap32 = 0, safe = 0; 644 struct ptrace_io_desc32 *piod32 = NULL; 645 struct ptrace_lwpinfo32 *pl32 = NULL; 646 struct ptrace_lwpinfo plr; 647#endif 648 649 curp = td->td_proc; 650 651 /* Lock proctree before locking the process. */ 652 switch (req) { 653 case PT_TRACE_ME: 654 case PT_ATTACH: 655 case PT_STEP: 656 case PT_CONTINUE: 657 case PT_TO_SCE: 658 case PT_TO_SCX: 659 case PT_SYSCALL: 660 case PT_FOLLOW_FORK: 661 case PT_DETACH: 662 sx_xlock(&proctree_lock); 663 proctree_locked = 1; 664 break; 665 default: 666 break; 667 } 668 669 write = 0; 670 if (req == PT_TRACE_ME) { 671 p = td->td_proc; 672 PROC_LOCK(p); 673 } else { 674 if (pid <= PID_MAX) { 675 if ((p = pfind(pid)) == NULL) { 676 if (proctree_locked) 677 sx_xunlock(&proctree_lock); 678 return (ESRCH); 679 } 680 } else { 681 td2 = tdfind(pid, -1); 682 if (td2 == NULL) { 683 if (proctree_locked) 684 sx_xunlock(&proctree_lock); 685 return (ESRCH); 686 } 687 p = td2->td_proc; 688 tid = pid; 689 pid = p->p_pid; 690 } 691 } 692 AUDIT_ARG_PROCESS(p); 693 694 if ((p->p_flag & P_WEXIT) != 0) { 695 error = ESRCH; 696 goto fail; 697 } 698 if ((error = p_cansee(td, p)) != 0) 699 goto fail; 700 701 if ((error = p_candebug(td, p)) != 0) 702 goto fail; 703 704 /* 705 * System processes can't be debugged. 706 */ 707 if ((p->p_flag & P_SYSTEM) != 0) { 708 error = EINVAL; 709 goto fail; 710 } 711 712 if (tid == 0) { 713 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 714 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 715 td2 = p->p_xthread; 716 } else { 717 td2 = FIRST_THREAD_IN_PROC(p); 718 } 719 tid = td2->td_tid; 720 } 721 722#ifdef COMPAT_FREEBSD32 723 /* 724 * Test if we're a 32 bit client and what the target is. 725 * Set the wrap controls accordingly. 726 */ 727 if (SV_CURPROC_FLAG(SV_ILP32)) { 728 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32)) 729 safe = 1; 730 wrap32 = 1; 731 } 732#endif 733 /* 734 * Permissions check 735 */ 736 switch (req) { 737 case PT_TRACE_ME: 738 /* Always legal. */ 739 break; 740 741 case PT_ATTACH: 742 /* Self */ 743 if (p->p_pid == td->td_proc->p_pid) { 744 error = EINVAL; 745 goto fail; 746 } 747 748 /* Already traced */ 749 if (p->p_flag & P_TRACED) { 750 error = EBUSY; 751 goto fail; 752 } 753 754 /* Can't trace an ancestor if you're being traced. */ 755 if (curp->p_flag & P_TRACED) { 756 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 757 if (pp == p) { 758 error = EINVAL; 759 goto fail; 760 } 761 } 762 } 763 764 765 /* OK */ 766 break; 767 768 case PT_CLEARSTEP: 769 /* Allow thread to clear single step for itself */ 770 if (td->td_tid == tid) 771 break; 772 773 /* FALLTHROUGH */ 774 default: 775 /* not being traced... */ 776 if ((p->p_flag & P_TRACED) == 0) { 777 error = EPERM; 778 goto fail; 779 } 780 781 /* not being traced by YOU */ 782 if (p->p_pptr != td->td_proc) { 783 error = EBUSY; 784 goto fail; 785 } 786 787 /* not currently stopped */ 788 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 789 p->p_suspcount != p->p_numthreads || 790 (p->p_flag & P_WAITED) == 0) { 791 error = EBUSY; 792 goto fail; 793 } 794 795 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 796 static int count = 0; 797 if (count++ == 0) 798 printf("P_STOPPED_TRACE not set.\n"); 799 } 800 801 /* OK */ 802 break; 803 } 804 805 /* Keep this process around until we finish this request. */ 806 _PHOLD(p); 807 808#ifdef FIX_SSTEP 809 /* 810 * Single step fixup ala procfs 811 */ 812 FIX_SSTEP(td2); 813#endif 814 815 /* 816 * Actually do the requests 817 */ 818 819 td->td_retval[0] = 0; 820 821 switch (req) { 822 case PT_TRACE_ME: 823 /* set my trace flag and "owner" so it can read/write me */ 824 p->p_flag |= P_TRACED; 825 p->p_oppid = p->p_pptr->p_pid; 826 break; 827 828 case PT_ATTACH: 829 /* security check done above */ 830 /* 831 * It would be nice if the tracing relationship was separate 832 * from the parent relationship but that would require 833 * another set of links in the proc struct or for "wait" 834 * to scan the entire proc table. To make life easier, 835 * we just re-parent the process we're trying to trace. 836 * The old parent is remembered so we can put things back 837 * on a "detach". 838 */ 839 p->p_flag |= P_TRACED; 840 p->p_oppid = p->p_pptr->p_pid; 841 if (p->p_pptr != td->td_proc) { 842 proc_reparent(p, td->td_proc); 843 } 844 data = SIGSTOP; 845 goto sendsig; /* in PT_CONTINUE below */ 846 847 case PT_CLEARSTEP: 848 error = ptrace_clear_single_step(td2); 849 break; 850 851 case PT_SETSTEP: 852 error = ptrace_single_step(td2); 853 break; 854 855 case PT_SUSPEND: 856 td2->td_dbgflags |= TDB_SUSPEND; 857 thread_lock(td2); 858 td2->td_flags |= TDF_NEEDSUSPCHK; 859 thread_unlock(td2); 860 break; 861 862 case PT_RESUME: 863 td2->td_dbgflags &= ~TDB_SUSPEND; 864 break; 865 866 case PT_FOLLOW_FORK: 867 if (data) 868 p->p_flag |= P_FOLLOWFORK; 869 else 870 p->p_flag &= ~P_FOLLOWFORK; 871 break; 872 873 case PT_STEP: 874 case PT_CONTINUE: 875 case PT_TO_SCE: 876 case PT_TO_SCX: 877 case PT_SYSCALL: 878 case PT_DETACH: 879 /* Zero means do not send any signal */ 880 if (data < 0 || data > _SIG_MAXSIG) { 881 error = EINVAL; 882 break; 883 } 884 885 switch (req) { 886 case PT_STEP: 887 error = ptrace_single_step(td2); 888 if (error) 889 goto out; 890 break; 891 case PT_CONTINUE: 892 case PT_TO_SCE: 893 case PT_TO_SCX: 894 case PT_SYSCALL: 895 if (addr != (void *)1) { 896 error = ptrace_set_pc(td2, 897 (u_long)(uintfptr_t)addr); 898 if (error) 899 goto out; 900 } 901 switch (req) { 902 case PT_TO_SCE: 903 p->p_stops |= S_PT_SCE; 904 break; 905 case PT_TO_SCX: 906 p->p_stops |= S_PT_SCX; 907 break; 908 case PT_SYSCALL: 909 p->p_stops |= S_PT_SCE | S_PT_SCX; 910 break; 911 } 912 break; 913 case PT_DETACH: 914 /* reset process parent */ 915 if (p->p_oppid != p->p_pptr->p_pid) { 916 struct proc *pp; 917 918 PROC_LOCK(p->p_pptr); 919 sigqueue_take(p->p_ksi); 920 PROC_UNLOCK(p->p_pptr); 921 922 PROC_UNLOCK(p); 923 pp = pfind(p->p_oppid); 924 if (pp == NULL) 925 pp = initproc; 926 else 927 PROC_UNLOCK(pp); 928 PROC_LOCK(p); 929 proc_reparent(p, pp); 930 if (pp == initproc) 931 p->p_sigparent = SIGCHLD; 932 } 933 p->p_oppid = 0; 934 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK); 935 936 /* should we send SIGCHLD? */ 937 /* childproc_continued(p); */ 938 break; 939 } 940 941 sendsig: 942 if (proctree_locked) { 943 sx_xunlock(&proctree_lock); 944 proctree_locked = 0; 945 } 946 p->p_xstat = data; 947 p->p_xthread = NULL; 948 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 949 /* deliver or queue signal */ 950 td2->td_dbgflags &= ~TDB_XSIG; 951 td2->td_xsig = data; 952 953 if (req == PT_DETACH) { 954 FOREACH_THREAD_IN_PROC(p, td3) 955 td3->td_dbgflags &= ~TDB_SUSPEND; 956 } 957 /* 958 * unsuspend all threads, to not let a thread run, 959 * you should use PT_SUSPEND to suspend it before 960 * continuing process. 961 */ 962 PROC_SLOCK(p); 963 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 964 thread_unsuspend(p); 965 PROC_SUNLOCK(p); 966 if (req == PT_ATTACH) 967 kern_psignal(p, data); 968 } else { 969 if (data) 970 kern_psignal(p, data); 971 } 972 break; 973 974 case PT_WRITE_I: 975 case PT_WRITE_D: 976 td2->td_dbgflags |= TDB_USERWR; 977 write = 1; 978 /* FALLTHROUGH */ 979 case PT_READ_I: 980 case PT_READ_D: 981 PROC_UNLOCK(p); 982 tmp = 0; 983 /* write = 0 set above */ 984 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 985 iov.iov_len = sizeof(int); 986 uio.uio_iov = &iov; 987 uio.uio_iovcnt = 1; 988 uio.uio_offset = (off_t)(uintptr_t)addr; 989 uio.uio_resid = sizeof(int); 990 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 991 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 992 uio.uio_td = td; 993 error = proc_rwmem(p, &uio); 994 if (uio.uio_resid != 0) { 995 /* 996 * XXX proc_rwmem() doesn't currently return ENOSPC, 997 * so I think write() can bogusly return 0. 998 * XXX what happens for short writes? We don't want 999 * to write partial data. 1000 * XXX proc_rwmem() returns EPERM for other invalid 1001 * addresses. Convert this to EINVAL. Does this 1002 * clobber returns of EPERM for other reasons? 1003 */ 1004 if (error == 0 || error == ENOSPC || error == EPERM) 1005 error = EINVAL; /* EOF */ 1006 } 1007 if (!write) 1008 td->td_retval[0] = tmp; 1009 PROC_LOCK(p); 1010 break; 1011 1012 case PT_IO: 1013#ifdef COMPAT_FREEBSD32 1014 if (wrap32) { 1015 piod32 = addr; 1016 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1017 iov.iov_len = piod32->piod_len; 1018 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1019 uio.uio_resid = piod32->piod_len; 1020 } else 1021#endif 1022 { 1023 piod = addr; 1024 iov.iov_base = piod->piod_addr; 1025 iov.iov_len = piod->piod_len; 1026 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1027 uio.uio_resid = piod->piod_len; 1028 } 1029 uio.uio_iov = &iov; 1030 uio.uio_iovcnt = 1; 1031 uio.uio_segflg = UIO_USERSPACE; 1032 uio.uio_td = td; 1033#ifdef COMPAT_FREEBSD32 1034 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1035#else 1036 tmp = piod->piod_op; 1037#endif 1038 switch (tmp) { 1039 case PIOD_READ_D: 1040 case PIOD_READ_I: 1041 uio.uio_rw = UIO_READ; 1042 break; 1043 case PIOD_WRITE_D: 1044 case PIOD_WRITE_I: 1045 td2->td_dbgflags |= TDB_USERWR; 1046 uio.uio_rw = UIO_WRITE; 1047 break; 1048 default: 1049 error = EINVAL; 1050 goto out; 1051 } 1052 PROC_UNLOCK(p); 1053 error = proc_rwmem(p, &uio); 1054#ifdef COMPAT_FREEBSD32 1055 if (wrap32) 1056 piod32->piod_len -= uio.uio_resid; 1057 else 1058#endif 1059 piod->piod_len -= uio.uio_resid; 1060 PROC_LOCK(p); 1061 break; 1062 1063 case PT_KILL: 1064 data = SIGKILL; 1065 goto sendsig; /* in PT_CONTINUE above */ 1066 1067 case PT_SETREGS: 1068 td2->td_dbgflags |= TDB_USERWR; 1069 error = PROC_WRITE(regs, td2, addr); 1070 break; 1071 1072 case PT_GETREGS: 1073 error = PROC_READ(regs, td2, addr); 1074 break; 1075 1076 case PT_SETFPREGS: 1077 td2->td_dbgflags |= TDB_USERWR; 1078 error = PROC_WRITE(fpregs, td2, addr); 1079 break; 1080 1081 case PT_GETFPREGS: 1082 error = PROC_READ(fpregs, td2, addr); 1083 break; 1084 1085 case PT_SETDBREGS: 1086 td2->td_dbgflags |= TDB_USERWR; 1087 error = PROC_WRITE(dbregs, td2, addr); 1088 break; 1089 1090 case PT_GETDBREGS: 1091 error = PROC_READ(dbregs, td2, addr); 1092 break; 1093 1094 case PT_LWPINFO: 1095 if (data <= 0 || 1096#ifdef COMPAT_FREEBSD32 1097 (!wrap32 && data > sizeof(*pl)) || 1098 (wrap32 && data > sizeof(*pl32))) { 1099#else 1100 data > sizeof(*pl)) { 1101#endif 1102 error = EINVAL; 1103 break; 1104 } 1105#ifdef COMPAT_FREEBSD32 1106 if (wrap32) { 1107 pl = &plr; 1108 pl32 = addr; 1109 } else 1110#endif 1111 pl = addr; 1112 pl->pl_lwpid = td2->td_tid; 1113 pl->pl_event = PL_EVENT_NONE; 1114 pl->pl_flags = 0; 1115 if (td2->td_dbgflags & TDB_XSIG) { 1116 pl->pl_event = PL_EVENT_SIGNAL; 1117 if (td2->td_dbgksi.ksi_signo != 0 && 1118#ifdef COMPAT_FREEBSD32 1119 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo, 1120 pl_siginfo) + sizeof(pl->pl_siginfo)) || 1121 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32, 1122 pl_siginfo) + sizeof(struct siginfo32))) 1123#else 1124 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo) 1125 + sizeof(pl->pl_siginfo) 1126#endif 1127 ){ 1128 pl->pl_flags |= PL_FLAG_SI; 1129 pl->pl_siginfo = td2->td_dbgksi.ksi_info; 1130 } 1131 } 1132 if ((pl->pl_flags & PL_FLAG_SI) == 0) 1133 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo)); 1134 if (td2->td_dbgflags & TDB_SCE) 1135 pl->pl_flags |= PL_FLAG_SCE; 1136 else if (td2->td_dbgflags & TDB_SCX) 1137 pl->pl_flags |= PL_FLAG_SCX; 1138 if (td2->td_dbgflags & TDB_EXEC) 1139 pl->pl_flags |= PL_FLAG_EXEC; 1140 if (td2->td_dbgflags & TDB_FORK) { 1141 pl->pl_flags |= PL_FLAG_FORKED; 1142 pl->pl_child_pid = td2->td_dbg_forked; 1143 } 1144 if (td2->td_dbgflags & TDB_CHILD) 1145 pl->pl_flags |= PL_FLAG_CHILD; 1146 pl->pl_sigmask = td2->td_sigmask; 1147 pl->pl_siglist = td2->td_siglist; 1148 strcpy(pl->pl_tdname, td2->td_name); 1149#ifdef COMPAT_FREEBSD32 1150 if (wrap32) 1151 ptrace_lwpinfo_to32(pl, pl32); 1152#endif 1153 break; 1154 1155 case PT_GETNUMLWPS: 1156 td->td_retval[0] = p->p_numthreads; 1157 break; 1158 1159 case PT_GETLWPLIST: 1160 if (data <= 0) { 1161 error = EINVAL; 1162 break; 1163 } 1164 num = imin(p->p_numthreads, data); 1165 PROC_UNLOCK(p); 1166 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1167 tmp = 0; 1168 PROC_LOCK(p); 1169 FOREACH_THREAD_IN_PROC(p, td2) { 1170 if (tmp >= num) 1171 break; 1172 buf[tmp++] = td2->td_tid; 1173 } 1174 PROC_UNLOCK(p); 1175 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1176 free(buf, M_TEMP); 1177 if (!error) 1178 td->td_retval[0] = tmp; 1179 PROC_LOCK(p); 1180 break; 1181 1182 case PT_VM_TIMESTAMP: 1183 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1184 break; 1185 1186 case PT_VM_ENTRY: 1187 PROC_UNLOCK(p); 1188#ifdef COMPAT_FREEBSD32 1189 if (wrap32) 1190 error = ptrace_vm_entry32(td, p, addr); 1191 else 1192#endif 1193 error = ptrace_vm_entry(td, p, addr); 1194 PROC_LOCK(p); 1195 break; 1196 1197 default: 1198#ifdef __HAVE_PTRACE_MACHDEP 1199 if (req >= PT_FIRSTMACH) { 1200 PROC_UNLOCK(p); 1201 error = cpu_ptrace(td2, req, addr, data); 1202 PROC_LOCK(p); 1203 } else 1204#endif 1205 /* Unknown request. */ 1206 error = EINVAL; 1207 break; 1208 } 1209 1210out: 1211 /* Drop our hold on this process now that the request has completed. */ 1212 _PRELE(p); 1213fail: 1214 PROC_UNLOCK(p); 1215 if (proctree_locked) 1216 sx_xunlock(&proctree_lock); 1217 return (error); 1218} 1219#undef PROC_READ 1220#undef PROC_WRITE 1221 1222/* 1223 * Stop a process because of a debugging event; 1224 * stay stopped until p->p_step is cleared 1225 * (cleared by PIOCCONT in procfs). 1226 */ 1227void 1228stopevent(struct proc *p, unsigned int event, unsigned int val) 1229{ 1230 1231 PROC_LOCK_ASSERT(p, MA_OWNED); 1232 p->p_step = 1; 1233 do { 1234 p->p_xstat = val; 1235 p->p_xthread = NULL; 1236 p->p_stype = event; /* Which event caused the stop? */ 1237 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1238 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1239 } while (p->p_step); 1240}
|