Deleted Added
sdiff udiff text old ( 239135 ) new ( 241896 )
full compact
1/*-
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/sys_process.c 239135 2012-08-08 00:20:30Z kib $");
34
35#include "opt_compat.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/syscallsubr.h>
42#include <sys/sysent.h>
43#include <sys/sysproto.h>
44#include <sys/proc.h>
45#include <sys/vnode.h>
46#include <sys/ptrace.h>
47#include <sys/sx.h>
48#include <sys/malloc.h>
49#include <sys/signalvar.h>
50
51#include <machine/reg.h>
52
53#include <security/audit/audit.h>
54
55#include <vm/vm.h>
56#include <vm/pmap.h>
57#include <vm/vm_extern.h>
58#include <vm/vm_map.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_object.h>
61#include <vm/vm_page.h>
62#include <vm/vm_pager.h>
63#include <vm/vm_param.h>
64
65#ifdef COMPAT_FREEBSD32
66#include <sys/procfs.h>
67#include <compat/freebsd32/freebsd32_signal.h>
68
69struct ptrace_io_desc32 {
70 int piod_op;
71 uint32_t piod_offs;
72 uint32_t piod_addr;
73 uint32_t piod_len;
74};
75
76struct ptrace_vm_entry32 {
77 int pve_entry;
78 int pve_timestamp;
79 uint32_t pve_start;
80 uint32_t pve_end;
81 uint32_t pve_offset;
82 u_int pve_prot;
83 u_int pve_pathlen;
84 int32_t pve_fileid;
85 u_int pve_fsid;
86 uint32_t pve_path;
87};
88
89struct ptrace_lwpinfo32 {
90 lwpid_t pl_lwpid; /* LWP described. */
91 int pl_event; /* Event that stopped the LWP. */
92 int pl_flags; /* LWP flags. */
93 sigset_t pl_sigmask; /* LWP signal mask */
94 sigset_t pl_siglist; /* LWP pending signal */
95 struct siginfo32 pl_siginfo; /* siginfo for signal */
96 char pl_tdname[MAXCOMLEN + 1]; /* LWP name. */
97 int pl_child_pid; /* New child pid */
98};
99
100#endif
101
102/*
103 * Functions implemented using PROC_ACTION():
104 *
105 * proc_read_regs(proc, regs)
106 * Get the current user-visible register set from the process
107 * and copy it into the regs structure (<machine/reg.h>).
108 * The process is stopped at the time read_regs is called.
109 *
110 * proc_write_regs(proc, regs)
111 * Update the current register set from the passed in regs
112 * structure. Take care to avoid clobbering special CPU
113 * registers or privileged bits in the PSL.
114 * Depending on the architecture this may have fix-up work to do,
115 * especially if the IAR or PCW are modified.
116 * The process is stopped at the time write_regs is called.
117 *
118 * proc_read_fpregs, proc_write_fpregs
119 * deal with the floating point register set, otherwise as above.
120 *
121 * proc_read_dbregs, proc_write_dbregs
122 * deal with the processor debug register set, otherwise as above.
123 *
124 * proc_sstep(proc)
125 * Arrange for the process to trap after executing a single instruction.
126 */
127
128#define PROC_ACTION(action) do { \
129 int error; \
130 \
131 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
132 if ((td->td_proc->p_flag & P_INMEM) == 0) \
133 error = EIO; \
134 else \
135 error = (action); \
136 return (error); \
137} while(0)
138
139int
140proc_read_regs(struct thread *td, struct reg *regs)
141{
142
143 PROC_ACTION(fill_regs(td, regs));
144}
145
146int
147proc_write_regs(struct thread *td, struct reg *regs)
148{
149
150 PROC_ACTION(set_regs(td, regs));
151}
152
153int
154proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
155{
156
157 PROC_ACTION(fill_dbregs(td, dbregs));
158}
159
160int
161proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
162{
163
164 PROC_ACTION(set_dbregs(td, dbregs));
165}
166
167/*
168 * Ptrace doesn't support fpregs at all, and there are no security holes
169 * or translations for fpregs, so we can just copy them.
170 */
171int
172proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
173{
174
175 PROC_ACTION(fill_fpregs(td, fpregs));
176}
177
178int
179proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
180{
181
182 PROC_ACTION(set_fpregs(td, fpregs));
183}
184
185#ifdef COMPAT_FREEBSD32
186/* For 32 bit binaries, we need to expose the 32 bit regs layouts. */
187int
188proc_read_regs32(struct thread *td, struct reg32 *regs32)
189{
190
191 PROC_ACTION(fill_regs32(td, regs32));
192}
193
194int
195proc_write_regs32(struct thread *td, struct reg32 *regs32)
196{
197
198 PROC_ACTION(set_regs32(td, regs32));
199}
200
201int
202proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
203{
204
205 PROC_ACTION(fill_dbregs32(td, dbregs32));
206}
207
208int
209proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
210{
211
212 PROC_ACTION(set_dbregs32(td, dbregs32));
213}
214
215int
216proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
217{
218
219 PROC_ACTION(fill_fpregs32(td, fpregs32));
220}
221
222int
223proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
224{
225
226 PROC_ACTION(set_fpregs32(td, fpregs32));
227}
228#endif
229
230int
231proc_sstep(struct thread *td)
232{
233
234 PROC_ACTION(ptrace_single_step(td));
235}
236
237int
238proc_rwmem(struct proc *p, struct uio *uio)
239{
240 vm_map_t map;
241 vm_offset_t pageno; /* page number */
242 vm_prot_t reqprot;
243 int error, fault_flags, page_offset, writing;
244
245 /*
246 * Assert that someone has locked this vmspace. (Should be
247 * curthread but we can't assert that.) This keeps the process
248 * from exiting out from under us until this operation completes.
249 */
250 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__,
251 p, p->p_pid));
252
253 /*
254 * The map we want...
255 */
256 map = &p->p_vmspace->vm_map;
257
258 /*
259 * If we are writing, then we request vm_fault() to create a private
260 * copy of each page. Since these copies will not be writeable by the
261 * process, we must explicity request that they be dirtied.
262 */
263 writing = uio->uio_rw == UIO_WRITE;
264 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
265 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
266
267 /*
268 * Only map in one page at a time. We don't have to, but it
269 * makes things easier. This way is trivial - right?
270 */
271 do {
272 vm_offset_t uva;
273 u_int len;
274 vm_page_t m;
275
276 uva = (vm_offset_t)uio->uio_offset;
277
278 /*
279 * Get the page number of this segment.
280 */
281 pageno = trunc_page(uva);
282 page_offset = uva - pageno;
283
284 /*
285 * How many bytes to copy
286 */
287 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
288
289 /*
290 * Fault and hold the page on behalf of the process.
291 */
292 error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m);
293 if (error != KERN_SUCCESS) {
294 if (error == KERN_RESOURCE_SHORTAGE)
295 error = ENOMEM;
296 else
297 error = EFAULT;
298 break;
299 }
300
301 /*
302 * Now do the i/o move.
303 */
304 error = uiomove_fromphys(&m, page_offset, len, uio);
305
306 /* Make the I-cache coherent for breakpoints. */
307 if (writing && error == 0) {
308 vm_map_lock_read(map);
309 if (vm_map_check_protection(map, pageno, pageno +
310 PAGE_SIZE, VM_PROT_EXECUTE))
311 vm_sync_icache(map, uva, len);
312 vm_map_unlock_read(map);
313 }
314
315 /*
316 * Release the page.
317 */
318 vm_page_lock(m);
319 vm_page_unhold(m);
320 vm_page_unlock(m);
321
322 } while (error == 0 && uio->uio_resid > 0);
323
324 return (error);
325}
326
327static int
328ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
329{
330 struct vattr vattr;
331 vm_map_t map;
332 vm_map_entry_t entry;
333 vm_object_t obj, tobj, lobj;
334 struct vmspace *vm;
335 struct vnode *vp;
336 char *freepath, *fullpath;
337 u_int pathlen;
338 int error, index, vfslocked;
339
340 error = 0;
341 obj = NULL;
342
343 vm = vmspace_acquire_ref(p);
344 map = &vm->vm_map;
345 vm_map_lock_read(map);
346
347 do {
348 entry = map->header.next;
349 index = 0;
350 while (index < pve->pve_entry && entry != &map->header) {
351 entry = entry->next;
352 index++;
353 }
354 if (index != pve->pve_entry) {
355 error = EINVAL;
356 break;
357 }
358 while (entry != &map->header &&
359 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
360 entry = entry->next;
361 index++;
362 }
363 if (entry == &map->header) {
364 error = ENOENT;
365 break;
366 }
367
368 /* We got an entry. */
369 pve->pve_entry = index + 1;
370 pve->pve_timestamp = map->timestamp;
371 pve->pve_start = entry->start;
372 pve->pve_end = entry->end - 1;
373 pve->pve_offset = entry->offset;
374 pve->pve_prot = entry->protection;
375
376 /* Backing object's path needed? */
377 if (pve->pve_pathlen == 0)
378 break;
379
380 pathlen = pve->pve_pathlen;
381 pve->pve_pathlen = 0;
382
383 obj = entry->object.vm_object;
384 if (obj != NULL)
385 VM_OBJECT_LOCK(obj);
386 } while (0);
387
388 vm_map_unlock_read(map);
389 vmspace_free(vm);
390
391 pve->pve_fsid = VNOVAL;
392 pve->pve_fileid = VNOVAL;
393
394 if (error == 0 && obj != NULL) {
395 lobj = obj;
396 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
397 if (tobj != obj)
398 VM_OBJECT_LOCK(tobj);
399 if (lobj != obj)
400 VM_OBJECT_UNLOCK(lobj);
401 lobj = tobj;
402 pve->pve_offset += tobj->backing_object_offset;
403 }
404 vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL;
405 if (vp != NULL)
406 vref(vp);
407 if (lobj != obj)
408 VM_OBJECT_UNLOCK(lobj);
409 VM_OBJECT_UNLOCK(obj);
410
411 if (vp != NULL) {
412 freepath = NULL;
413 fullpath = NULL;
414 vn_fullpath(td, vp, &fullpath, &freepath);
415 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
416 vn_lock(vp, LK_SHARED | LK_RETRY);
417 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) {
418 pve->pve_fileid = vattr.va_fileid;
419 pve->pve_fsid = vattr.va_fsid;
420 }
421 vput(vp);
422 VFS_UNLOCK_GIANT(vfslocked);
423
424 if (fullpath != NULL) {
425 pve->pve_pathlen = strlen(fullpath) + 1;
426 if (pve->pve_pathlen <= pathlen) {
427 error = copyout(fullpath, pve->pve_path,
428 pve->pve_pathlen);
429 } else
430 error = ENAMETOOLONG;
431 }
432 if (freepath != NULL)
433 free(freepath, M_TEMP);
434 }
435 }
436
437 return (error);
438}
439
440#ifdef COMPAT_FREEBSD32
441static int
442ptrace_vm_entry32(struct thread *td, struct proc *p,
443 struct ptrace_vm_entry32 *pve32)
444{
445 struct ptrace_vm_entry pve;
446 int error;
447
448 pve.pve_entry = pve32->pve_entry;
449 pve.pve_pathlen = pve32->pve_pathlen;
450 pve.pve_path = (void *)(uintptr_t)pve32->pve_path;
451
452 error = ptrace_vm_entry(td, p, &pve);
453 if (error == 0) {
454 pve32->pve_entry = pve.pve_entry;
455 pve32->pve_timestamp = pve.pve_timestamp;
456 pve32->pve_start = pve.pve_start;
457 pve32->pve_end = pve.pve_end;
458 pve32->pve_offset = pve.pve_offset;
459 pve32->pve_prot = pve.pve_prot;
460 pve32->pve_fileid = pve.pve_fileid;
461 pve32->pve_fsid = pve.pve_fsid;
462 }
463
464 pve32->pve_pathlen = pve.pve_pathlen;
465 return (error);
466}
467
468static void
469ptrace_lwpinfo_to32(const struct ptrace_lwpinfo *pl,
470 struct ptrace_lwpinfo32 *pl32)
471{
472
473 pl32->pl_lwpid = pl->pl_lwpid;
474 pl32->pl_event = pl->pl_event;
475 pl32->pl_flags = pl->pl_flags;
476 pl32->pl_sigmask = pl->pl_sigmask;
477 pl32->pl_siglist = pl->pl_siglist;
478 siginfo_to_siginfo32(&pl->pl_siginfo, &pl32->pl_siginfo);
479 strcpy(pl32->pl_tdname, pl->pl_tdname);
480 pl32->pl_child_pid = pl->pl_child_pid;
481}
482#endif /* COMPAT_FREEBSD32 */
483
484/*
485 * Process debugging system call.
486 */
487#ifndef _SYS_SYSPROTO_H_
488struct ptrace_args {
489 int req;
490 pid_t pid;
491 caddr_t addr;
492 int data;
493};
494#endif
495
496#ifdef COMPAT_FREEBSD32
497/*
498 * This CPP subterfuge is to try and reduce the number of ifdefs in
499 * the body of the code.
500 * COPYIN(uap->addr, &r.reg, sizeof r.reg);
501 * becomes either:
502 * copyin(uap->addr, &r.reg, sizeof r.reg);
503 * or
504 * copyin(uap->addr, &r.reg32, sizeof r.reg32);
505 * .. except this is done at runtime.
506 */
507#define COPYIN(u, k, s) wrap32 ? \
508 copyin(u, k ## 32, s ## 32) : \
509 copyin(u, k, s)
510#define COPYOUT(k, u, s) wrap32 ? \
511 copyout(k ## 32, u, s ## 32) : \
512 copyout(k, u, s)
513#else
514#define COPYIN(u, k, s) copyin(u, k, s)
515#define COPYOUT(k, u, s) copyout(k, u, s)
516#endif
517int
518sys_ptrace(struct thread *td, struct ptrace_args *uap)
519{
520 /*
521 * XXX this obfuscation is to reduce stack usage, but the register
522 * structs may be too large to put on the stack anyway.
523 */
524 union {
525 struct ptrace_io_desc piod;
526 struct ptrace_lwpinfo pl;
527 struct ptrace_vm_entry pve;
528 struct dbreg dbreg;
529 struct fpreg fpreg;
530 struct reg reg;
531#ifdef COMPAT_FREEBSD32
532 struct dbreg32 dbreg32;
533 struct fpreg32 fpreg32;
534 struct reg32 reg32;
535 struct ptrace_io_desc32 piod32;
536 struct ptrace_lwpinfo32 pl32;
537 struct ptrace_vm_entry32 pve32;
538#endif
539 } r;
540 void *addr;
541 int error = 0;
542#ifdef COMPAT_FREEBSD32
543 int wrap32 = 0;
544
545 if (SV_CURPROC_FLAG(SV_ILP32))
546 wrap32 = 1;
547#endif
548 AUDIT_ARG_PID(uap->pid);
549 AUDIT_ARG_CMD(uap->req);
550 AUDIT_ARG_VALUE(uap->data);
551 addr = &r;
552 switch (uap->req) {
553 case PT_GETREGS:
554 case PT_GETFPREGS:
555 case PT_GETDBREGS:
556 case PT_LWPINFO:
557 break;
558 case PT_SETREGS:
559 error = COPYIN(uap->addr, &r.reg, sizeof r.reg);
560 break;
561 case PT_SETFPREGS:
562 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg);
563 break;
564 case PT_SETDBREGS:
565 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg);
566 break;
567 case PT_IO:
568 error = COPYIN(uap->addr, &r.piod, sizeof r.piod);
569 break;
570 case PT_VM_ENTRY:
571 error = COPYIN(uap->addr, &r.pve, sizeof r.pve);
572 break;
573 default:
574 addr = uap->addr;
575 break;
576 }
577 if (error)
578 return (error);
579
580 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
581 if (error)
582 return (error);
583
584 switch (uap->req) {
585 case PT_VM_ENTRY:
586 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve);
587 break;
588 case PT_IO:
589 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod);
590 break;
591 case PT_GETREGS:
592 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg);
593 break;
594 case PT_GETFPREGS:
595 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg);
596 break;
597 case PT_GETDBREGS:
598 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg);
599 break;
600 case PT_LWPINFO:
601 error = copyout(&r.pl, uap->addr, uap->data);
602 break;
603 }
604
605 return (error);
606}
607#undef COPYIN
608#undef COPYOUT
609
610#ifdef COMPAT_FREEBSD32
611/*
612 * PROC_READ(regs, td2, addr);
613 * becomes either:
614 * proc_read_regs(td2, addr);
615 * or
616 * proc_read_regs32(td2, addr);
617 * .. except this is done at runtime. There is an additional
618 * complication in that PROC_WRITE disallows 32 bit consumers
619 * from writing to 64 bit address space targets.
620 */
621#define PROC_READ(w, t, a) wrap32 ? \
622 proc_read_ ## w ## 32(t, a) : \
623 proc_read_ ## w (t, a)
624#define PROC_WRITE(w, t, a) wrap32 ? \
625 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \
626 proc_write_ ## w (t, a)
627#else
628#define PROC_READ(w, t, a) proc_read_ ## w (t, a)
629#define PROC_WRITE(w, t, a) proc_write_ ## w (t, a)
630#endif
631
632int
633kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
634{
635 struct iovec iov;
636 struct uio uio;
637 struct proc *curp, *p, *pp;
638 struct thread *td2 = NULL, *td3;
639 struct ptrace_io_desc *piod = NULL;
640 struct ptrace_lwpinfo *pl;
641 int error, write, tmp, num;
642 int proctree_locked = 0;
643 lwpid_t tid = 0, *buf;
644#ifdef COMPAT_FREEBSD32
645 int wrap32 = 0, safe = 0;
646 struct ptrace_io_desc32 *piod32 = NULL;
647 struct ptrace_lwpinfo32 *pl32 = NULL;
648 struct ptrace_lwpinfo plr;
649#endif
650
651 curp = td->td_proc;
652
653 /* Lock proctree before locking the process. */
654 switch (req) {
655 case PT_TRACE_ME:
656 case PT_ATTACH:
657 case PT_STEP:
658 case PT_CONTINUE:
659 case PT_TO_SCE:
660 case PT_TO_SCX:
661 case PT_SYSCALL:
662 case PT_FOLLOW_FORK:
663 case PT_DETACH:
664 sx_xlock(&proctree_lock);
665 proctree_locked = 1;
666 break;
667 default:
668 break;
669 }
670
671 write = 0;
672 if (req == PT_TRACE_ME) {
673 p = td->td_proc;
674 PROC_LOCK(p);
675 } else {
676 if (pid <= PID_MAX) {
677 if ((p = pfind(pid)) == NULL) {
678 if (proctree_locked)
679 sx_xunlock(&proctree_lock);
680 return (ESRCH);
681 }
682 } else {
683 td2 = tdfind(pid, -1);
684 if (td2 == NULL) {
685 if (proctree_locked)
686 sx_xunlock(&proctree_lock);
687 return (ESRCH);
688 }
689 p = td2->td_proc;
690 tid = pid;
691 pid = p->p_pid;
692 }
693 }
694 AUDIT_ARG_PROCESS(p);
695
696 if ((p->p_flag & P_WEXIT) != 0) {
697 error = ESRCH;
698 goto fail;
699 }
700 if ((error = p_cansee(td, p)) != 0)
701 goto fail;
702
703 if ((error = p_candebug(td, p)) != 0)
704 goto fail;
705
706 /*
707 * System processes can't be debugged.
708 */
709 if ((p->p_flag & P_SYSTEM) != 0) {
710 error = EINVAL;
711 goto fail;
712 }
713
714 if (tid == 0) {
715 if ((p->p_flag & P_STOPPED_TRACE) != 0) {
716 KASSERT(p->p_xthread != NULL, ("NULL p_xthread"));
717 td2 = p->p_xthread;
718 } else {
719 td2 = FIRST_THREAD_IN_PROC(p);
720 }
721 tid = td2->td_tid;
722 }
723
724#ifdef COMPAT_FREEBSD32
725 /*
726 * Test if we're a 32 bit client and what the target is.
727 * Set the wrap controls accordingly.
728 */
729 if (SV_CURPROC_FLAG(SV_ILP32)) {
730 if (SV_PROC_FLAG(td2->td_proc, SV_ILP32))
731 safe = 1;
732 wrap32 = 1;
733 }
734#endif
735 /*
736 * Permissions check
737 */
738 switch (req) {
739 case PT_TRACE_ME:
740 /* Always legal. */
741 break;
742
743 case PT_ATTACH:
744 /* Self */
745 if (p->p_pid == td->td_proc->p_pid) {
746 error = EINVAL;
747 goto fail;
748 }
749
750 /* Already traced */
751 if (p->p_flag & P_TRACED) {
752 error = EBUSY;
753 goto fail;
754 }
755
756 /* Can't trace an ancestor if you're being traced. */
757 if (curp->p_flag & P_TRACED) {
758 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
759 if (pp == p) {
760 error = EINVAL;
761 goto fail;
762 }
763 }
764 }
765
766
767 /* OK */
768 break;
769
770 case PT_CLEARSTEP:
771 /* Allow thread to clear single step for itself */
772 if (td->td_tid == tid)
773 break;
774
775 /* FALLTHROUGH */
776 default:
777 /* not being traced... */
778 if ((p->p_flag & P_TRACED) == 0) {
779 error = EPERM;
780 goto fail;
781 }
782
783 /* not being traced by YOU */
784 if (p->p_pptr != td->td_proc) {
785 error = EBUSY;
786 goto fail;
787 }
788
789 /* not currently stopped */
790 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 ||
791 p->p_suspcount != p->p_numthreads ||
792 (p->p_flag & P_WAITED) == 0) {
793 error = EBUSY;
794 goto fail;
795 }
796
797 if ((p->p_flag & P_STOPPED_TRACE) == 0) {
798 static int count = 0;
799 if (count++ == 0)
800 printf("P_STOPPED_TRACE not set.\n");
801 }
802
803 /* OK */
804 break;
805 }
806
807 /* Keep this process around until we finish this request. */
808 _PHOLD(p);
809
810#ifdef FIX_SSTEP
811 /*
812 * Single step fixup ala procfs
813 */
814 FIX_SSTEP(td2);
815#endif
816
817 /*
818 * Actually do the requests
819 */
820
821 td->td_retval[0] = 0;
822
823 switch (req) {
824 case PT_TRACE_ME:
825 /* set my trace flag and "owner" so it can read/write me */
826 p->p_flag |= P_TRACED;
827 p->p_oppid = p->p_pptr->p_pid;
828 break;
829
830 case PT_ATTACH:
831 /* security check done above */
832 /*
833 * It would be nice if the tracing relationship was separate
834 * from the parent relationship but that would require
835 * another set of links in the proc struct or for "wait"
836 * to scan the entire proc table. To make life easier,
837 * we just re-parent the process we're trying to trace.
838 * The old parent is remembered so we can put things back
839 * on a "detach".
840 */
841 p->p_flag |= P_TRACED;
842 p->p_oppid = p->p_pptr->p_pid;
843 if (p->p_pptr != td->td_proc) {
844 proc_reparent(p, td->td_proc);
845 }
846 data = SIGSTOP;
847 goto sendsig; /* in PT_CONTINUE below */
848
849 case PT_CLEARSTEP:
850 error = ptrace_clear_single_step(td2);
851 break;
852
853 case PT_SETSTEP:
854 error = ptrace_single_step(td2);
855 break;
856
857 case PT_SUSPEND:
858 td2->td_dbgflags |= TDB_SUSPEND;
859 thread_lock(td2);
860 td2->td_flags |= TDF_NEEDSUSPCHK;
861 thread_unlock(td2);
862 break;
863
864 case PT_RESUME:
865 td2->td_dbgflags &= ~TDB_SUSPEND;
866 break;
867
868 case PT_FOLLOW_FORK:
869 if (data)
870 p->p_flag |= P_FOLLOWFORK;
871 else
872 p->p_flag &= ~P_FOLLOWFORK;
873 break;
874
875 case PT_STEP:
876 case PT_CONTINUE:
877 case PT_TO_SCE:
878 case PT_TO_SCX:
879 case PT_SYSCALL:
880 case PT_DETACH:
881 /* Zero means do not send any signal */
882 if (data < 0 || data > _SIG_MAXSIG) {
883 error = EINVAL;
884 break;
885 }
886
887 switch (req) {
888 case PT_STEP:
889 error = ptrace_single_step(td2);
890 if (error)
891 goto out;
892 break;
893 case PT_CONTINUE:
894 case PT_TO_SCE:
895 case PT_TO_SCX:
896 case PT_SYSCALL:
897 if (addr != (void *)1) {
898 error = ptrace_set_pc(td2,
899 (u_long)(uintfptr_t)addr);
900 if (error)
901 goto out;
902 }
903 switch (req) {
904 case PT_TO_SCE:
905 p->p_stops |= S_PT_SCE;
906 break;
907 case PT_TO_SCX:
908 p->p_stops |= S_PT_SCX;
909 break;
910 case PT_SYSCALL:
911 p->p_stops |= S_PT_SCE | S_PT_SCX;
912 break;
913 }
914 break;
915 case PT_DETACH:
916 /* reset process parent */
917 if (p->p_oppid != p->p_pptr->p_pid) {
918 struct proc *pp;
919
920 PROC_LOCK(p->p_pptr);
921 sigqueue_take(p->p_ksi);
922 PROC_UNLOCK(p->p_pptr);
923
924 PROC_UNLOCK(p);
925 pp = pfind(p->p_oppid);
926 if (pp == NULL)
927 pp = initproc;
928 else
929 PROC_UNLOCK(pp);
930 PROC_LOCK(p);
931 proc_reparent(p, pp);
932 if (pp == initproc)
933 p->p_sigparent = SIGCHLD;
934 }
935 p->p_oppid = 0;
936 p->p_flag &= ~(P_TRACED | P_WAITED | P_FOLLOWFORK);
937
938 /* should we send SIGCHLD? */
939 /* childproc_continued(p); */
940 break;
941 }
942
943 sendsig:
944 if (proctree_locked) {
945 sx_xunlock(&proctree_lock);
946 proctree_locked = 0;
947 }
948 p->p_xstat = data;
949 p->p_xthread = NULL;
950 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) {
951 /* deliver or queue signal */
952 td2->td_dbgflags &= ~TDB_XSIG;
953 td2->td_xsig = data;
954
955 if (req == PT_DETACH) {
956 FOREACH_THREAD_IN_PROC(p, td3)
957 td3->td_dbgflags &= ~TDB_SUSPEND;
958 }
959 /*
960 * unsuspend all threads, to not let a thread run,
961 * you should use PT_SUSPEND to suspend it before
962 * continuing process.
963 */
964 PROC_SLOCK(p);
965 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
966 thread_unsuspend(p);
967 PROC_SUNLOCK(p);
968 if (req == PT_ATTACH)
969 kern_psignal(p, data);
970 } else {
971 if (data)
972 kern_psignal(p, data);
973 }
974 break;
975
976 case PT_WRITE_I:
977 case PT_WRITE_D:
978 td2->td_dbgflags |= TDB_USERWR;
979 write = 1;
980 /* FALLTHROUGH */
981 case PT_READ_I:
982 case PT_READ_D:
983 PROC_UNLOCK(p);
984 tmp = 0;
985 /* write = 0 set above */
986 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
987 iov.iov_len = sizeof(int);
988 uio.uio_iov = &iov;
989 uio.uio_iovcnt = 1;
990 uio.uio_offset = (off_t)(uintptr_t)addr;
991 uio.uio_resid = sizeof(int);
992 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */
993 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
994 uio.uio_td = td;
995 error = proc_rwmem(p, &uio);
996 if (uio.uio_resid != 0) {
997 /*
998 * XXX proc_rwmem() doesn't currently return ENOSPC,
999 * so I think write() can bogusly return 0.
1000 * XXX what happens for short writes? We don't want
1001 * to write partial data.
1002 * XXX proc_rwmem() returns EPERM for other invalid
1003 * addresses. Convert this to EINVAL. Does this
1004 * clobber returns of EPERM for other reasons?
1005 */
1006 if (error == 0 || error == ENOSPC || error == EPERM)
1007 error = EINVAL; /* EOF */
1008 }
1009 if (!write)
1010 td->td_retval[0] = tmp;
1011 PROC_LOCK(p);
1012 break;
1013
1014 case PT_IO:
1015#ifdef COMPAT_FREEBSD32
1016 if (wrap32) {
1017 piod32 = addr;
1018 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr;
1019 iov.iov_len = piod32->piod_len;
1020 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs;
1021 uio.uio_resid = piod32->piod_len;
1022 } else
1023#endif
1024 {
1025 piod = addr;
1026 iov.iov_base = piod->piod_addr;
1027 iov.iov_len = piod->piod_len;
1028 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
1029 uio.uio_resid = piod->piod_len;
1030 }
1031 uio.uio_iov = &iov;
1032 uio.uio_iovcnt = 1;
1033 uio.uio_segflg = UIO_USERSPACE;
1034 uio.uio_td = td;
1035#ifdef COMPAT_FREEBSD32
1036 tmp = wrap32 ? piod32->piod_op : piod->piod_op;
1037#else
1038 tmp = piod->piod_op;
1039#endif
1040 switch (tmp) {
1041 case PIOD_READ_D:
1042 case PIOD_READ_I:
1043 uio.uio_rw = UIO_READ;
1044 break;
1045 case PIOD_WRITE_D:
1046 case PIOD_WRITE_I:
1047 td2->td_dbgflags |= TDB_USERWR;
1048 uio.uio_rw = UIO_WRITE;
1049 break;
1050 default:
1051 error = EINVAL;
1052 goto out;
1053 }
1054 PROC_UNLOCK(p);
1055 error = proc_rwmem(p, &uio);
1056#ifdef COMPAT_FREEBSD32
1057 if (wrap32)
1058 piod32->piod_len -= uio.uio_resid;
1059 else
1060#endif
1061 piod->piod_len -= uio.uio_resid;
1062 PROC_LOCK(p);
1063 break;
1064
1065 case PT_KILL:
1066 data = SIGKILL;
1067 goto sendsig; /* in PT_CONTINUE above */
1068
1069 case PT_SETREGS:
1070 td2->td_dbgflags |= TDB_USERWR;
1071 error = PROC_WRITE(regs, td2, addr);
1072 break;
1073
1074 case PT_GETREGS:
1075 error = PROC_READ(regs, td2, addr);
1076 break;
1077
1078 case PT_SETFPREGS:
1079 td2->td_dbgflags |= TDB_USERWR;
1080 error = PROC_WRITE(fpregs, td2, addr);
1081 break;
1082
1083 case PT_GETFPREGS:
1084 error = PROC_READ(fpregs, td2, addr);
1085 break;
1086
1087 case PT_SETDBREGS:
1088 td2->td_dbgflags |= TDB_USERWR;
1089 error = PROC_WRITE(dbregs, td2, addr);
1090 break;
1091
1092 case PT_GETDBREGS:
1093 error = PROC_READ(dbregs, td2, addr);
1094 break;
1095
1096 case PT_LWPINFO:
1097 if (data <= 0 ||
1098#ifdef COMPAT_FREEBSD32
1099 (!wrap32 && data > sizeof(*pl)) ||
1100 (wrap32 && data > sizeof(*pl32))) {
1101#else
1102 data > sizeof(*pl)) {
1103#endif
1104 error = EINVAL;
1105 break;
1106 }
1107#ifdef COMPAT_FREEBSD32
1108 if (wrap32) {
1109 pl = &plr;
1110 pl32 = addr;
1111 } else
1112#endif
1113 pl = addr;
1114 pl->pl_lwpid = td2->td_tid;
1115 pl->pl_event = PL_EVENT_NONE;
1116 pl->pl_flags = 0;
1117 if (td2->td_dbgflags & TDB_XSIG) {
1118 pl->pl_event = PL_EVENT_SIGNAL;
1119 if (td2->td_dbgksi.ksi_signo != 0 &&
1120#ifdef COMPAT_FREEBSD32
1121 ((!wrap32 && data >= offsetof(struct ptrace_lwpinfo,
1122 pl_siginfo) + sizeof(pl->pl_siginfo)) ||
1123 (wrap32 && data >= offsetof(struct ptrace_lwpinfo32,
1124 pl_siginfo) + sizeof(struct siginfo32)))
1125#else
1126 data >= offsetof(struct ptrace_lwpinfo, pl_siginfo)
1127 + sizeof(pl->pl_siginfo)
1128#endif
1129 ){
1130 pl->pl_flags |= PL_FLAG_SI;
1131 pl->pl_siginfo = td2->td_dbgksi.ksi_info;
1132 }
1133 }
1134 if ((pl->pl_flags & PL_FLAG_SI) == 0)
1135 bzero(&pl->pl_siginfo, sizeof(pl->pl_siginfo));
1136 if (td2->td_dbgflags & TDB_SCE)
1137 pl->pl_flags |= PL_FLAG_SCE;
1138 else if (td2->td_dbgflags & TDB_SCX)
1139 pl->pl_flags |= PL_FLAG_SCX;
1140 if (td2->td_dbgflags & TDB_EXEC)
1141 pl->pl_flags |= PL_FLAG_EXEC;
1142 if (td2->td_dbgflags & TDB_FORK) {
1143 pl->pl_flags |= PL_FLAG_FORKED;
1144 pl->pl_child_pid = td2->td_dbg_forked;
1145 }
1146 if (td2->td_dbgflags & TDB_CHILD)
1147 pl->pl_flags |= PL_FLAG_CHILD;
1148 pl->pl_sigmask = td2->td_sigmask;
1149 pl->pl_siglist = td2->td_siglist;
1150 strcpy(pl->pl_tdname, td2->td_name);
1151#ifdef COMPAT_FREEBSD32
1152 if (wrap32)
1153 ptrace_lwpinfo_to32(pl, pl32);
1154#endif
1155 break;
1156
1157 case PT_GETNUMLWPS:
1158 td->td_retval[0] = p->p_numthreads;
1159 break;
1160
1161 case PT_GETLWPLIST:
1162 if (data <= 0) {
1163 error = EINVAL;
1164 break;
1165 }
1166 num = imin(p->p_numthreads, data);
1167 PROC_UNLOCK(p);
1168 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
1169 tmp = 0;
1170 PROC_LOCK(p);
1171 FOREACH_THREAD_IN_PROC(p, td2) {
1172 if (tmp >= num)
1173 break;
1174 buf[tmp++] = td2->td_tid;
1175 }
1176 PROC_UNLOCK(p);
1177 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
1178 free(buf, M_TEMP);
1179 if (!error)
1180 td->td_retval[0] = tmp;
1181 PROC_LOCK(p);
1182 break;
1183
1184 case PT_VM_TIMESTAMP:
1185 td->td_retval[0] = p->p_vmspace->vm_map.timestamp;
1186 break;
1187
1188 case PT_VM_ENTRY:
1189 PROC_UNLOCK(p);
1190#ifdef COMPAT_FREEBSD32
1191 if (wrap32)
1192 error = ptrace_vm_entry32(td, p, addr);
1193 else
1194#endif
1195 error = ptrace_vm_entry(td, p, addr);
1196 PROC_LOCK(p);
1197 break;
1198
1199 default:
1200#ifdef __HAVE_PTRACE_MACHDEP
1201 if (req >= PT_FIRSTMACH) {
1202 PROC_UNLOCK(p);
1203 error = cpu_ptrace(td2, req, addr, data);
1204 PROC_LOCK(p);
1205 } else
1206#endif
1207 /* Unknown request. */
1208 error = EINVAL;
1209 break;
1210 }
1211
1212out:
1213 /* Drop our hold on this process now that the request has completed. */
1214 _PRELE(p);
1215fail:
1216 PROC_UNLOCK(p);
1217 if (proctree_locked)
1218 sx_xunlock(&proctree_lock);
1219 return (error);
1220}
1221#undef PROC_READ
1222#undef PROC_WRITE
1223
1224/*
1225 * Stop a process because of a debugging event;
1226 * stay stopped until p->p_step is cleared
1227 * (cleared by PIOCCONT in procfs).
1228 */
1229void
1230stopevent(struct proc *p, unsigned int event, unsigned int val)
1231{
1232
1233 PROC_LOCK_ASSERT(p, MA_OWNED);
1234 p->p_step = 1;
1235 do {
1236 p->p_xstat = val;
1237 p->p_xthread = NULL;
1238 p->p_stype = event; /* Which event caused the stop? */
1239 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
1240 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
1241 } while (p->p_step);
1242}