kern_procctl.c revision 207410
1/*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/kern/sys_process.c 207410 2010-04-30 00:46:43Z kmacy $"); 34 35#include "opt_compat.h" 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/syscallsubr.h> 42#include <sys/sysent.h> 43#include <sys/sysproto.h> 44#include <sys/proc.h> 45#include <sys/vnode.h> 46#include <sys/ptrace.h> 47#include <sys/sx.h> 48#include <sys/malloc.h> 49#include <sys/signalvar.h> 50 51#include <machine/reg.h> 52 53#include <security/audit/audit.h> 54 55#include <vm/vm.h> 56#include <vm/pmap.h> 57#include <vm/vm_extern.h> 58#include <vm/vm_map.h> 59#include <vm/vm_kern.h> 60#include <vm/vm_object.h> 61#include <vm/vm_page.h> 62#include <vm/vm_pager.h> 63#include <vm/vm_param.h> 64 65#ifdef COMPAT_FREEBSD32 66#include <sys/procfs.h> 67 68struct ptrace_io_desc32 { 69 int piod_op; 70 u_int32_t piod_offs; 71 u_int32_t piod_addr; 72 u_int32_t piod_len; 73}; 74 75struct ptrace_vm_entry32 { 76 int pve_entry; 77 int pve_timestamp; 78 uint32_t pve_start; 79 uint32_t pve_end; 80 uint32_t pve_offset; 81 u_int pve_prot; 82 u_int pve_pathlen; 83 int32_t pve_fileid; 84 u_int pve_fsid; 85 uint32_t pve_path; 86}; 87 88#endif 89 90/* 91 * Functions implemented using PROC_ACTION(): 92 * 93 * proc_read_regs(proc, regs) 94 * Get the current user-visible register set from the process 95 * and copy it into the regs structure (<machine/reg.h>). 96 * The process is stopped at the time read_regs is called. 97 * 98 * proc_write_regs(proc, regs) 99 * Update the current register set from the passed in regs 100 * structure. Take care to avoid clobbering special CPU 101 * registers or privileged bits in the PSL. 102 * Depending on the architecture this may have fix-up work to do, 103 * especially if the IAR or PCW are modified. 104 * The process is stopped at the time write_regs is called. 105 * 106 * proc_read_fpregs, proc_write_fpregs 107 * deal with the floating point register set, otherwise as above. 108 * 109 * proc_read_dbregs, proc_write_dbregs 110 * deal with the processor debug register set, otherwise as above. 111 * 112 * proc_sstep(proc) 113 * Arrange for the process to trap after executing a single instruction. 114 */ 115 116#define PROC_ACTION(action) do { \ 117 int error; \ 118 \ 119 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 120 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 121 error = EIO; \ 122 else \ 123 error = (action); \ 124 return (error); \ 125} while(0) 126 127int 128proc_read_regs(struct thread *td, struct reg *regs) 129{ 130 131 PROC_ACTION(fill_regs(td, regs)); 132} 133 134int 135proc_write_regs(struct thread *td, struct reg *regs) 136{ 137 138 PROC_ACTION(set_regs(td, regs)); 139} 140 141int 142proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 143{ 144 145 PROC_ACTION(fill_dbregs(td, dbregs)); 146} 147 148int 149proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 150{ 151 152 PROC_ACTION(set_dbregs(td, dbregs)); 153} 154 155/* 156 * Ptrace doesn't support fpregs at all, and there are no security holes 157 * or translations for fpregs, so we can just copy them. 158 */ 159int 160proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 161{ 162 163 PROC_ACTION(fill_fpregs(td, fpregs)); 164} 165 166int 167proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 168{ 169 170 PROC_ACTION(set_fpregs(td, fpregs)); 171} 172 173#ifdef COMPAT_FREEBSD32 174/* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 175int 176proc_read_regs32(struct thread *td, struct reg32 *regs32) 177{ 178 179 PROC_ACTION(fill_regs32(td, regs32)); 180} 181 182int 183proc_write_regs32(struct thread *td, struct reg32 *regs32) 184{ 185 186 PROC_ACTION(set_regs32(td, regs32)); 187} 188 189int 190proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 191{ 192 193 PROC_ACTION(fill_dbregs32(td, dbregs32)); 194} 195 196int 197proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 198{ 199 200 PROC_ACTION(set_dbregs32(td, dbregs32)); 201} 202 203int 204proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 205{ 206 207 PROC_ACTION(fill_fpregs32(td, fpregs32)); 208} 209 210int 211proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 212{ 213 214 PROC_ACTION(set_fpregs32(td, fpregs32)); 215} 216#endif 217 218int 219proc_sstep(struct thread *td) 220{ 221 222 PROC_ACTION(ptrace_single_step(td)); 223} 224 225int 226proc_rwmem(struct proc *p, struct uio *uio) 227{ 228 vm_map_t map; 229 vm_object_t backing_object, object; 230 vm_offset_t pageno; /* page number */ 231 vm_prot_t reqprot; 232 int error, writing; 233 234 /* 235 * Assert that someone has locked this vmspace. (Should be 236 * curthread but we can't assert that.) This keeps the process 237 * from exiting out from under us until this operation completes. 238 */ 239 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 240 p, p->p_pid)); 241 242 /* 243 * The map we want... 244 */ 245 map = &p->p_vmspace->vm_map; 246 247 writing = uio->uio_rw == UIO_WRITE; 248 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 249 250 /* 251 * Only map in one page at a time. We don't have to, but it 252 * makes things easier. This way is trivial - right? 253 */ 254 do { 255 vm_map_t tmap; 256 vm_offset_t uva; 257 int page_offset; /* offset into page */ 258 vm_map_entry_t out_entry; 259 vm_prot_t out_prot; 260 boolean_t wired; 261 vm_pindex_t pindex; 262 u_int len; 263 vm_page_t m; 264 265 object = NULL; 266 267 uva = (vm_offset_t)uio->uio_offset; 268 269 /* 270 * Get the page number of this segment. 271 */ 272 pageno = trunc_page(uva); 273 page_offset = uva - pageno; 274 275 /* 276 * How many bytes to copy 277 */ 278 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 279 280 /* 281 * Fault the page on behalf of the process 282 */ 283 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 284 if (error) { 285 if (error == KERN_RESOURCE_SHORTAGE) 286 error = ENOMEM; 287 else 288 error = EFAULT; 289 break; 290 } 291 292 /* 293 * Now we need to get the page. out_entry and wired 294 * aren't used. One would think the vm code 295 * would be a *bit* nicer... We use tmap because 296 * vm_map_lookup() can change the map argument. 297 */ 298 tmap = map; 299 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 300 &object, &pindex, &out_prot, &wired); 301 if (error) { 302 error = EFAULT; 303 break; 304 } 305 VM_OBJECT_LOCK(object); 306 while ((m = vm_page_lookup(object, pindex)) == NULL && 307 !writing && 308 (backing_object = object->backing_object) != NULL) { 309 /* 310 * Allow fallback to backing objects if we are reading. 311 */ 312 VM_OBJECT_LOCK(backing_object); 313 pindex += OFF_TO_IDX(object->backing_object_offset); 314 VM_OBJECT_UNLOCK(object); 315 object = backing_object; 316 } 317 if (writing && m != NULL) { 318 vm_page_dirty(m); 319 vm_pager_page_unswapped(m); 320 } 321 VM_OBJECT_UNLOCK(object); 322 if (m == NULL) { 323 vm_map_lookup_done(tmap, out_entry); 324 error = EFAULT; 325 break; 326 } 327 328 /* 329 * Hold the page in memory. 330 */ 331 vm_page_lock(m); 332 vm_page_hold(m); 333 vm_page_unlock(m); 334 335 /* 336 * We're done with tmap now. 337 */ 338 vm_map_lookup_done(tmap, out_entry); 339 340 /* 341 * Now do the i/o move. 342 */ 343 error = uiomove_fromphys(&m, page_offset, len, uio); 344 345 /* Make the I-cache coherent for breakpoints. */ 346 if (!error && writing && (out_prot & VM_PROT_EXECUTE)) 347 vm_sync_icache(map, uva, len); 348 349 /* 350 * Release the page. 351 */ 352 vm_page_lock(m); 353 vm_page_unhold(m); 354 vm_page_unlock(m); 355 356 } while (error == 0 && uio->uio_resid > 0); 357 358 return (error); 359} 360 361static int 362ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 363{ 364 struct vattr vattr; 365 vm_map_t map; 366 vm_map_entry_t entry; 367 vm_object_t obj, tobj, lobj; 368 struct vmspace *vm; 369 struct vnode *vp; 370 char *freepath, *fullpath; 371 u_int pathlen; 372 int error, index, vfslocked; 373 374 error = 0; 375 obj = NULL; 376 377 vm = vmspace_acquire_ref(p); 378 map = &vm->vm_map; 379 vm_map_lock_read(map); 380 381 do { 382 entry = map->header.next; 383 index = 0; 384 while (index < pve->pve_entry && entry != &map->header) { 385 entry = entry->next; 386 index++; 387 } 388 if (index != pve->pve_entry) { 389 error = EINVAL; 390 break; 391 } 392 while (entry != &map->header && 393 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 394 entry = entry->next; 395 index++; 396 } 397 if (entry == &map->header) { 398 error = ENOENT; 399 break; 400 } 401 402 /* We got an entry. */ 403 pve->pve_entry = index + 1; 404 pve->pve_timestamp = map->timestamp; 405 pve->pve_start = entry->start; 406 pve->pve_end = entry->end - 1; 407 pve->pve_offset = entry->offset; 408 pve->pve_prot = entry->protection; 409 410 /* Backing object's path needed? */ 411 if (pve->pve_pathlen == 0) 412 break; 413 414 pathlen = pve->pve_pathlen; 415 pve->pve_pathlen = 0; 416 417 obj = entry->object.vm_object; 418 if (obj != NULL) 419 VM_OBJECT_LOCK(obj); 420 } while (0); 421 422 vm_map_unlock_read(map); 423 vmspace_free(vm); 424 425 pve->pve_fsid = VNOVAL; 426 pve->pve_fileid = VNOVAL; 427 428 if (error == 0 && obj != NULL) { 429 lobj = obj; 430 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 431 if (tobj != obj) 432 VM_OBJECT_LOCK(tobj); 433 if (lobj != obj) 434 VM_OBJECT_UNLOCK(lobj); 435 lobj = tobj; 436 pve->pve_offset += tobj->backing_object_offset; 437 } 438 vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL; 439 if (vp != NULL) 440 vref(vp); 441 if (lobj != obj) 442 VM_OBJECT_UNLOCK(lobj); 443 VM_OBJECT_UNLOCK(obj); 444 445 if (vp != NULL) { 446 freepath = NULL; 447 fullpath = NULL; 448 vn_fullpath(td, vp, &fullpath, &freepath); 449 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 450 vn_lock(vp, LK_SHARED | LK_RETRY); 451 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 452 pve->pve_fileid = vattr.va_fileid; 453 pve->pve_fsid = vattr.va_fsid; 454 } 455 vput(vp); 456 VFS_UNLOCK_GIANT(vfslocked); 457 458 if (fullpath != NULL) { 459 pve->pve_pathlen = strlen(fullpath) + 1; 460 if (pve->pve_pathlen <= pathlen) { 461 error = copyout(fullpath, pve->pve_path, 462 pve->pve_pathlen); 463 } else 464 error = ENAMETOOLONG; 465 } 466 if (freepath != NULL) 467 free(freepath, M_TEMP); 468 } 469 } 470 471 return (error); 472} 473 474#ifdef COMPAT_FREEBSD32 475static int 476ptrace_vm_entry32(struct thread *td, struct proc *p, 477 struct ptrace_vm_entry32 *pve32) 478{ 479 struct ptrace_vm_entry pve; 480 int error; 481 482 pve.pve_entry = pve32->pve_entry; 483 pve.pve_pathlen = pve32->pve_pathlen; 484 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 485 486 error = ptrace_vm_entry(td, p, &pve); 487 if (error == 0) { 488 pve32->pve_entry = pve.pve_entry; 489 pve32->pve_timestamp = pve.pve_timestamp; 490 pve32->pve_start = pve.pve_start; 491 pve32->pve_end = pve.pve_end; 492 pve32->pve_offset = pve.pve_offset; 493 pve32->pve_prot = pve.pve_prot; 494 pve32->pve_fileid = pve.pve_fileid; 495 pve32->pve_fsid = pve.pve_fsid; 496 } 497 498 pve32->pve_pathlen = pve.pve_pathlen; 499 return (error); 500} 501#endif /* COMPAT_FREEBSD32 */ 502 503/* 504 * Process debugging system call. 505 */ 506#ifndef _SYS_SYSPROTO_H_ 507struct ptrace_args { 508 int req; 509 pid_t pid; 510 caddr_t addr; 511 int data; 512}; 513#endif 514 515#ifdef COMPAT_FREEBSD32 516/* 517 * This CPP subterfuge is to try and reduce the number of ifdefs in 518 * the body of the code. 519 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 520 * becomes either: 521 * copyin(uap->addr, &r.reg, sizeof r.reg); 522 * or 523 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 524 * .. except this is done at runtime. 525 */ 526#define COPYIN(u, k, s) wrap32 ? \ 527 copyin(u, k ## 32, s ## 32) : \ 528 copyin(u, k, s) 529#define COPYOUT(k, u, s) wrap32 ? \ 530 copyout(k ## 32, u, s ## 32) : \ 531 copyout(k, u, s) 532#else 533#define COPYIN(u, k, s) copyin(u, k, s) 534#define COPYOUT(k, u, s) copyout(k, u, s) 535#endif 536int 537ptrace(struct thread *td, struct ptrace_args *uap) 538{ 539 /* 540 * XXX this obfuscation is to reduce stack usage, but the register 541 * structs may be too large to put on the stack anyway. 542 */ 543 union { 544 struct ptrace_io_desc piod; 545 struct ptrace_lwpinfo pl; 546 struct ptrace_vm_entry pve; 547 struct dbreg dbreg; 548 struct fpreg fpreg; 549 struct reg reg; 550#ifdef COMPAT_FREEBSD32 551 struct dbreg32 dbreg32; 552 struct fpreg32 fpreg32; 553 struct reg32 reg32; 554 struct ptrace_io_desc32 piod32; 555 struct ptrace_vm_entry32 pve32; 556#endif 557 } r; 558 void *addr; 559 int error = 0; 560#ifdef COMPAT_FREEBSD32 561 int wrap32 = 0; 562 563 if (SV_CURPROC_FLAG(SV_ILP32)) 564 wrap32 = 1; 565#endif 566 AUDIT_ARG_PID(uap->pid); 567 AUDIT_ARG_CMD(uap->req); 568 AUDIT_ARG_VALUE(uap->data); 569 addr = &r; 570 switch (uap->req) { 571 case PT_GETREGS: 572 case PT_GETFPREGS: 573 case PT_GETDBREGS: 574 case PT_LWPINFO: 575 break; 576 case PT_SETREGS: 577 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 578 break; 579 case PT_SETFPREGS: 580 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 581 break; 582 case PT_SETDBREGS: 583 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 584 break; 585 case PT_IO: 586 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 587 break; 588 case PT_VM_ENTRY: 589 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 590 break; 591 default: 592 addr = uap->addr; 593 break; 594 } 595 if (error) 596 return (error); 597 598 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 599 if (error) 600 return (error); 601 602 switch (uap->req) { 603 case PT_VM_ENTRY: 604 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 605 break; 606 case PT_IO: 607 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 608 break; 609 case PT_GETREGS: 610 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 611 break; 612 case PT_GETFPREGS: 613 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 614 break; 615 case PT_GETDBREGS: 616 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 617 break; 618 case PT_LWPINFO: 619 error = copyout(&r.pl, uap->addr, uap->data); 620 break; 621 } 622 623 return (error); 624} 625#undef COPYIN 626#undef COPYOUT 627 628#ifdef COMPAT_FREEBSD32 629/* 630 * PROC_READ(regs, td2, addr); 631 * becomes either: 632 * proc_read_regs(td2, addr); 633 * or 634 * proc_read_regs32(td2, addr); 635 * .. except this is done at runtime. There is an additional 636 * complication in that PROC_WRITE disallows 32 bit consumers 637 * from writing to 64 bit address space targets. 638 */ 639#define PROC_READ(w, t, a) wrap32 ? \ 640 proc_read_ ## w ## 32(t, a) : \ 641 proc_read_ ## w (t, a) 642#define PROC_WRITE(w, t, a) wrap32 ? \ 643 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 644 proc_write_ ## w (t, a) 645#else 646#define PROC_READ(w, t, a) proc_read_ ## w (t, a) 647#define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 648#endif 649 650int 651kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 652{ 653 struct iovec iov; 654 struct uio uio; 655 struct proc *curp, *p, *pp; 656 struct thread *td2 = NULL; 657 struct ptrace_io_desc *piod = NULL; 658 struct ptrace_lwpinfo *pl; 659 int error, write, tmp, num; 660 int proctree_locked = 0; 661 lwpid_t tid = 0, *buf; 662#ifdef COMPAT_FREEBSD32 663 int wrap32 = 0, safe = 0; 664 struct ptrace_io_desc32 *piod32 = NULL; 665#endif 666 667 curp = td->td_proc; 668 669 /* Lock proctree before locking the process. */ 670 switch (req) { 671 case PT_TRACE_ME: 672 case PT_ATTACH: 673 case PT_STEP: 674 case PT_CONTINUE: 675 case PT_TO_SCE: 676 case PT_TO_SCX: 677 case PT_SYSCALL: 678 case PT_DETACH: 679 sx_xlock(&proctree_lock); 680 proctree_locked = 1; 681 break; 682 default: 683 break; 684 } 685 686 write = 0; 687 if (req == PT_TRACE_ME) { 688 p = td->td_proc; 689 PROC_LOCK(p); 690 } else { 691 if (pid <= PID_MAX) { 692 if ((p = pfind(pid)) == NULL) { 693 if (proctree_locked) 694 sx_xunlock(&proctree_lock); 695 return (ESRCH); 696 } 697 } else { 698 /* this is slow, should be optimized */ 699 sx_slock(&allproc_lock); 700 FOREACH_PROC_IN_SYSTEM(p) { 701 PROC_LOCK(p); 702 FOREACH_THREAD_IN_PROC(p, td2) { 703 if (td2->td_tid == pid) 704 break; 705 } 706 if (td2 != NULL) 707 break; /* proc lock held */ 708 PROC_UNLOCK(p); 709 } 710 sx_sunlock(&allproc_lock); 711 if (p == NULL) { 712 if (proctree_locked) 713 sx_xunlock(&proctree_lock); 714 return (ESRCH); 715 } 716 tid = pid; 717 pid = p->p_pid; 718 } 719 } 720 AUDIT_ARG_PROCESS(p); 721 722 if ((p->p_flag & P_WEXIT) != 0) { 723 error = ESRCH; 724 goto fail; 725 } 726 if ((error = p_cansee(td, p)) != 0) 727 goto fail; 728 729 if ((error = p_candebug(td, p)) != 0) 730 goto fail; 731 732 /* 733 * System processes can't be debugged. 734 */ 735 if ((p->p_flag & P_SYSTEM) != 0) { 736 error = EINVAL; 737 goto fail; 738 } 739 740 if (tid == 0) { 741 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 742 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 743 td2 = p->p_xthread; 744 } else { 745 td2 = FIRST_THREAD_IN_PROC(p); 746 } 747 tid = td2->td_tid; 748 } 749 750#ifdef COMPAT_FREEBSD32 751 /* 752 * Test if we're a 32 bit client and what the target is. 753 * Set the wrap controls accordingly. 754 */ 755 if (SV_CURPROC_FLAG(SV_ILP32)) { 756 if (td2->td_proc->p_sysent->sv_flags & SV_ILP32) 757 safe = 1; 758 wrap32 = 1; 759 } 760#endif 761 /* 762 * Permissions check 763 */ 764 switch (req) { 765 case PT_TRACE_ME: 766 /* Always legal. */ 767 break; 768 769 case PT_ATTACH: 770 /* Self */ 771 if (p->p_pid == td->td_proc->p_pid) { 772 error = EINVAL; 773 goto fail; 774 } 775 776 /* Already traced */ 777 if (p->p_flag & P_TRACED) { 778 error = EBUSY; 779 goto fail; 780 } 781 782 /* Can't trace an ancestor if you're being traced. */ 783 if (curp->p_flag & P_TRACED) { 784 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 785 if (pp == p) { 786 error = EINVAL; 787 goto fail; 788 } 789 } 790 } 791 792 793 /* OK */ 794 break; 795 796 case PT_CLEARSTEP: 797 /* Allow thread to clear single step for itself */ 798 if (td->td_tid == tid) 799 break; 800 801 /* FALLTHROUGH */ 802 default: 803 /* not being traced... */ 804 if ((p->p_flag & P_TRACED) == 0) { 805 error = EPERM; 806 goto fail; 807 } 808 809 /* not being traced by YOU */ 810 if (p->p_pptr != td->td_proc) { 811 error = EBUSY; 812 goto fail; 813 } 814 815 /* not currently stopped */ 816 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 817 p->p_suspcount != p->p_numthreads || 818 (p->p_flag & P_WAITED) == 0) { 819 error = EBUSY; 820 goto fail; 821 } 822 823 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 824 static int count = 0; 825 if (count++ == 0) 826 printf("P_STOPPED_TRACE not set.\n"); 827 } 828 829 /* OK */ 830 break; 831 } 832 833 /* Keep this process around until we finish this request. */ 834 _PHOLD(p); 835 836#ifdef FIX_SSTEP 837 /* 838 * Single step fixup ala procfs 839 */ 840 FIX_SSTEP(td2); 841#endif 842 843 /* 844 * Actually do the requests 845 */ 846 847 td->td_retval[0] = 0; 848 849 switch (req) { 850 case PT_TRACE_ME: 851 /* set my trace flag and "owner" so it can read/write me */ 852 p->p_flag |= P_TRACED; 853 p->p_oppid = p->p_pptr->p_pid; 854 break; 855 856 case PT_ATTACH: 857 /* security check done above */ 858 p->p_flag |= P_TRACED; 859 p->p_oppid = p->p_pptr->p_pid; 860 if (p->p_pptr != td->td_proc) 861 proc_reparent(p, td->td_proc); 862 data = SIGSTOP; 863 goto sendsig; /* in PT_CONTINUE below */ 864 865 case PT_CLEARSTEP: 866 error = ptrace_clear_single_step(td2); 867 break; 868 869 case PT_SETSTEP: 870 error = ptrace_single_step(td2); 871 break; 872 873 case PT_SUSPEND: 874 td2->td_dbgflags |= TDB_SUSPEND; 875 thread_lock(td2); 876 td2->td_flags |= TDF_NEEDSUSPCHK; 877 thread_unlock(td2); 878 break; 879 880 case PT_RESUME: 881 td2->td_dbgflags &= ~TDB_SUSPEND; 882 break; 883 884 case PT_STEP: 885 case PT_CONTINUE: 886 case PT_TO_SCE: 887 case PT_TO_SCX: 888 case PT_SYSCALL: 889 case PT_DETACH: 890 /* Zero means do not send any signal */ 891 if (data < 0 || data > _SIG_MAXSIG) { 892 error = EINVAL; 893 break; 894 } 895 896 switch (req) { 897 case PT_STEP: 898 error = ptrace_single_step(td2); 899 if (error) 900 goto out; 901 break; 902 case PT_TO_SCE: 903 p->p_stops |= S_PT_SCE; 904 break; 905 case PT_TO_SCX: 906 p->p_stops |= S_PT_SCX; 907 break; 908 case PT_SYSCALL: 909 p->p_stops |= S_PT_SCE | S_PT_SCX; 910 break; 911 } 912 913 if (addr != (void *)1) { 914 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr); 915 if (error) 916 break; 917 } 918 919 if (req == PT_DETACH) { 920 /* reset process parent */ 921 if (p->p_oppid != p->p_pptr->p_pid) { 922 struct proc *pp; 923 924 PROC_LOCK(p->p_pptr); 925 sigqueue_take(p->p_ksi); 926 PROC_UNLOCK(p->p_pptr); 927 928 PROC_UNLOCK(p); 929 pp = pfind(p->p_oppid); 930 if (pp == NULL) 931 pp = initproc; 932 else 933 PROC_UNLOCK(pp); 934 PROC_LOCK(p); 935 proc_reparent(p, pp); 936 if (pp == initproc) 937 p->p_sigparent = SIGCHLD; 938 } 939 p->p_flag &= ~(P_TRACED | P_WAITED); 940 p->p_oppid = 0; 941 942 /* should we send SIGCHLD? */ 943 /* childproc_continued(p); */ 944 } 945 946 sendsig: 947 if (proctree_locked) { 948 sx_xunlock(&proctree_lock); 949 proctree_locked = 0; 950 } 951 p->p_xstat = data; 952 p->p_xthread = NULL; 953 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 954 /* deliver or queue signal */ 955 td2->td_dbgflags &= ~TDB_XSIG; 956 td2->td_xsig = data; 957 958 if (req == PT_DETACH) { 959 struct thread *td3; 960 FOREACH_THREAD_IN_PROC(p, td3) { 961 td3->td_dbgflags &= ~TDB_SUSPEND; 962 } 963 } 964 /* 965 * unsuspend all threads, to not let a thread run, 966 * you should use PT_SUSPEND to suspend it before 967 * continuing process. 968 */ 969 PROC_SLOCK(p); 970 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 971 thread_unsuspend(p); 972 PROC_SUNLOCK(p); 973 } else { 974 if (data) 975 psignal(p, data); 976 } 977 break; 978 979 case PT_WRITE_I: 980 case PT_WRITE_D: 981 td2->td_dbgflags |= TDB_USERWR; 982 write = 1; 983 /* FALLTHROUGH */ 984 case PT_READ_I: 985 case PT_READ_D: 986 PROC_UNLOCK(p); 987 tmp = 0; 988 /* write = 0 set above */ 989 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 990 iov.iov_len = sizeof(int); 991 uio.uio_iov = &iov; 992 uio.uio_iovcnt = 1; 993 uio.uio_offset = (off_t)(uintptr_t)addr; 994 uio.uio_resid = sizeof(int); 995 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 996 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 997 uio.uio_td = td; 998 error = proc_rwmem(p, &uio); 999 if (uio.uio_resid != 0) { 1000 /* 1001 * XXX proc_rwmem() doesn't currently return ENOSPC, 1002 * so I think write() can bogusly return 0. 1003 * XXX what happens for short writes? We don't want 1004 * to write partial data. 1005 * XXX proc_rwmem() returns EPERM for other invalid 1006 * addresses. Convert this to EINVAL. Does this 1007 * clobber returns of EPERM for other reasons? 1008 */ 1009 if (error == 0 || error == ENOSPC || error == EPERM) 1010 error = EINVAL; /* EOF */ 1011 } 1012 if (!write) 1013 td->td_retval[0] = tmp; 1014 PROC_LOCK(p); 1015 break; 1016 1017 case PT_IO: 1018#ifdef COMPAT_FREEBSD32 1019 if (wrap32) { 1020 piod32 = addr; 1021 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1022 iov.iov_len = piod32->piod_len; 1023 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1024 uio.uio_resid = piod32->piod_len; 1025 } else 1026#endif 1027 { 1028 piod = addr; 1029 iov.iov_base = piod->piod_addr; 1030 iov.iov_len = piod->piod_len; 1031 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1032 uio.uio_resid = piod->piod_len; 1033 } 1034 uio.uio_iov = &iov; 1035 uio.uio_iovcnt = 1; 1036 uio.uio_segflg = UIO_USERSPACE; 1037 uio.uio_td = td; 1038#ifdef COMPAT_FREEBSD32 1039 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1040#else 1041 tmp = piod->piod_op; 1042#endif 1043 switch (tmp) { 1044 case PIOD_READ_D: 1045 case PIOD_READ_I: 1046 uio.uio_rw = UIO_READ; 1047 break; 1048 case PIOD_WRITE_D: 1049 case PIOD_WRITE_I: 1050 td2->td_dbgflags |= TDB_USERWR; 1051 uio.uio_rw = UIO_WRITE; 1052 break; 1053 default: 1054 error = EINVAL; 1055 goto out; 1056 } 1057 PROC_UNLOCK(p); 1058 error = proc_rwmem(p, &uio); 1059#ifdef COMPAT_FREEBSD32 1060 if (wrap32) 1061 piod32->piod_len -= uio.uio_resid; 1062 else 1063#endif 1064 piod->piod_len -= uio.uio_resid; 1065 PROC_LOCK(p); 1066 break; 1067 1068 case PT_KILL: 1069 data = SIGKILL; 1070 goto sendsig; /* in PT_CONTINUE above */ 1071 1072 case PT_SETREGS: 1073 td2->td_dbgflags |= TDB_USERWR; 1074 error = PROC_WRITE(regs, td2, addr); 1075 break; 1076 1077 case PT_GETREGS: 1078 error = PROC_READ(regs, td2, addr); 1079 break; 1080 1081 case PT_SETFPREGS: 1082 td2->td_dbgflags |= TDB_USERWR; 1083 error = PROC_WRITE(fpregs, td2, addr); 1084 break; 1085 1086 case PT_GETFPREGS: 1087 error = PROC_READ(fpregs, td2, addr); 1088 break; 1089 1090 case PT_SETDBREGS: 1091 td2->td_dbgflags |= TDB_USERWR; 1092 error = PROC_WRITE(dbregs, td2, addr); 1093 break; 1094 1095 case PT_GETDBREGS: 1096 error = PROC_READ(dbregs, td2, addr); 1097 break; 1098 1099 case PT_LWPINFO: 1100 if (data <= 0 || data > sizeof(*pl)) { 1101 error = EINVAL; 1102 break; 1103 } 1104 pl = addr; 1105 pl->pl_lwpid = td2->td_tid; 1106 if (td2->td_dbgflags & TDB_XSIG) 1107 pl->pl_event = PL_EVENT_SIGNAL; 1108 else 1109 pl->pl_event = 0; 1110 pl->pl_flags = 0; 1111 pl->pl_sigmask = td2->td_sigmask; 1112 pl->pl_siglist = td2->td_siglist; 1113 break; 1114 1115 case PT_GETNUMLWPS: 1116 td->td_retval[0] = p->p_numthreads; 1117 break; 1118 1119 case PT_GETLWPLIST: 1120 if (data <= 0) { 1121 error = EINVAL; 1122 break; 1123 } 1124 num = imin(p->p_numthreads, data); 1125 PROC_UNLOCK(p); 1126 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1127 tmp = 0; 1128 PROC_LOCK(p); 1129 FOREACH_THREAD_IN_PROC(p, td2) { 1130 if (tmp >= num) 1131 break; 1132 buf[tmp++] = td2->td_tid; 1133 } 1134 PROC_UNLOCK(p); 1135 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1136 free(buf, M_TEMP); 1137 if (!error) 1138 td->td_retval[0] = tmp; 1139 PROC_LOCK(p); 1140 break; 1141 1142 case PT_VM_TIMESTAMP: 1143 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1144 break; 1145 1146 case PT_VM_ENTRY: 1147 PROC_UNLOCK(p); 1148#ifdef COMPAT_FREEBSD32 1149 if (wrap32) 1150 error = ptrace_vm_entry32(td, p, addr); 1151 else 1152#endif 1153 error = ptrace_vm_entry(td, p, addr); 1154 PROC_LOCK(p); 1155 break; 1156 1157 default: 1158#ifdef __HAVE_PTRACE_MACHDEP 1159 if (req >= PT_FIRSTMACH) { 1160 PROC_UNLOCK(p); 1161 error = cpu_ptrace(td2, req, addr, data); 1162 PROC_LOCK(p); 1163 } else 1164#endif 1165 /* Unknown request. */ 1166 error = EINVAL; 1167 break; 1168 } 1169 1170out: 1171 /* Drop our hold on this process now that the request has completed. */ 1172 _PRELE(p); 1173fail: 1174 PROC_UNLOCK(p); 1175 if (proctree_locked) 1176 sx_xunlock(&proctree_lock); 1177 return (error); 1178} 1179#undef PROC_READ 1180#undef PROC_WRITE 1181 1182/* 1183 * Stop a process because of a debugging event; 1184 * stay stopped until p->p_step is cleared 1185 * (cleared by PIOCCONT in procfs). 1186 */ 1187void 1188stopevent(struct proc *p, unsigned int event, unsigned int val) 1189{ 1190 1191 PROC_LOCK_ASSERT(p, MA_OWNED); 1192 p->p_step = 1; 1193 do { 1194 p->p_xstat = val; 1195 p->p_xthread = NULL; 1196 p->p_stype = event; /* Which event caused the stop? */ 1197 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1198 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1199 } while (p->p_step); 1200} 1201