Deleted Added
full compact
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_exit.c 215664 2010-11-22 09:06:59Z netchild $");
38__FBSDID("$FreeBSD: head/sys/kern/kern_exit.c 220137 2011-03-29 17:47:25Z trasz $");
39
40#include "opt_compat.h"
41#include "opt_kdtrace.h"
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/eventhandler.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/pioctl.h>
54#include <sys/jail.h>
55#include <sys/tty.h>
56#include <sys/wait.h>
57#include <sys/vmmeter.h>
58#include <sys/vnode.h>
59#include <sys/racct.h>
60#include <sys/resourcevar.h>
61#include <sys/sbuf.h>
62#include <sys/signalvar.h>
63#include <sys/sched.h>
64#include <sys/sx.h>
65#include <sys/syscallsubr.h>
66#include <sys/syslog.h>
67#include <sys/ptrace.h>
68#include <sys/acct.h> /* for acct_process() function prototype */
69#include <sys/filedesc.h>
70#include <sys/sdt.h>
71#include <sys/shm.h>
72#include <sys/sem.h>
73#ifdef KTRACE
74#include <sys/ktrace.h>
75#endif
76
77#include <security/audit/audit.h>
78#include <security/mac/mac_framework.h>
79
80#include <vm/vm.h>
81#include <vm/vm_extern.h>
82#include <vm/vm_param.h>
83#include <vm/pmap.h>
84#include <vm/vm_map.h>
85#include <vm/vm_page.h>
86#include <vm/uma.h>
87
88#ifdef KDTRACE_HOOKS
89#include <sys/dtrace_bsd.h>
90dtrace_execexit_func_t dtrace_fasttrap_exit;
91#endif
92
93SDT_PROVIDER_DECLARE(proc);
94SDT_PROBE_DEFINE(proc, kernel, , exit, exit);
95SDT_PROBE_ARGTYPE(proc, kernel, , exit, 0, "int");
96
97/* Required to be non-static for SysVR4 emulator */
98MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
99
100/* Hook for NFS teardown procedure. */
101void (*nlminfo_release_p)(struct proc *p);
102
103/*
104 * exit -- death of process.
105 */
106void
107sys_exit(struct thread *td, struct sys_exit_args *uap)
108{
109
110 exit1(td, W_EXITCODE(uap->rval, 0));
111 /* NOTREACHED */
112}
113
114/*
115 * Exit: deallocate address space and other resources, change proc state to
116 * zombie, and unlink proc from allproc and parent's lists. Save exit status
117 * and rusage for wait(). Check for child processes and orphan them.
118 */
119void
120exit1(struct thread *td, int rv)
121{
122 struct proc *p, *nq, *q;
123 struct vnode *vtmp;
124 struct vnode *ttyvp = NULL;
125 struct plimit *plim;
126 int locked;
127
128 mtx_assert(&Giant, MA_NOTOWNED);
129
130 p = td->td_proc;
131 /*
132 * XXX in case we're rebooting we just let init die in order to
133 * work around an unsolved stack overflow seen very late during
134 * shutdown on sparc64 when the gmirror worker process exists.
135 */
136 if (p == initproc && rebooting == 0) {
137 printf("init died (signal %d, exit %d)\n",
138 WTERMSIG(rv), WEXITSTATUS(rv));
139 panic("Going nowhere without my init!");
140 }
141
142 /*
143 * MUST abort all other threads before proceeding past here.
144 */
145 PROC_LOCK(p);
146 while (p->p_flag & P_HADTHREADS) {
147 /*
148 * First check if some other thread got here before us..
149 * if so, act apropriatly, (exit or suspend);
150 */
151 thread_suspend_check(0);
152
153 /*
154 * Kill off the other threads. This requires
155 * some co-operation from other parts of the kernel
156 * so it may not be instantaneous. With this state set
157 * any thread entering the kernel from userspace will
158 * thread_exit() in trap(). Any thread attempting to
159 * sleep will return immediately with EINTR or EWOULDBLOCK
160 * which will hopefully force them to back out to userland
161 * freeing resources as they go. Any thread attempting
162 * to return to userland will thread_exit() from userret().
163 * thread_exit() will unsuspend us when the last of the
164 * other threads exits.
165 * If there is already a thread singler after resumption,
166 * calling thread_single will fail; in that case, we just
167 * re-check all suspension request, the thread should
168 * either be suspended there or exit.
169 */
170 if (! thread_single(SINGLE_EXIT))
171 break;
172
173 /*
174 * All other activity in this process is now stopped.
175 * Threading support has been turned off.
176 */
177 }
178 KASSERT(p->p_numthreads == 1,
179 ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
180 /*
181 * Wakeup anyone in procfs' PIOCWAIT. They should have a hold
182 * on our vmspace, so we should block below until they have
183 * released their reference to us. Note that if they have
184 * requested S_EXIT stops we will block here until they ack
185 * via PIOCCONT.
186 */
187 _STOPEVENT(p, S_EXIT, rv);
188
189 /*
190 * Note that we are exiting and do another wakeup of anyone in
191 * PIOCWAIT in case they aren't listening for S_EXIT stops or
192 * decided to wait again after we told them we are exiting.
193 */
194 p->p_flag |= P_WEXIT;
195 wakeup(&p->p_stype);
196
197 /*
198 * Wait for any processes that have a hold on our vmspace to
199 * release their reference.
200 */
201 while (p->p_lock > 0)
202 msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
203
204 p->p_xstat = rv; /* Let event handler change exit status */
205 PROC_UNLOCK(p);
206 /* Drain the limit callout while we don't have the proc locked */
207 callout_drain(&p->p_limco);
208
209#ifdef AUDIT
210 /*
211 * The Sun BSM exit token contains two components: an exit status as
212 * passed to exit(), and a return value to indicate what sort of exit
213 * it was. The exit status is WEXITSTATUS(rv), but it's not clear
214 * what the return value is.
215 */
216 AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
217 AUDIT_SYSCALL_EXIT(0, td);
218#endif
219
220 /* Are we a task leader? */
221 if (p == p->p_leader) {
222 mtx_lock(&ppeers_lock);
223 q = p->p_peers;
224 while (q != NULL) {
225 PROC_LOCK(q);
226 psignal(q, SIGKILL);
227 PROC_UNLOCK(q);
228 q = q->p_peers;
229 }
230 while (p->p_peers != NULL)
231 msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
232 mtx_unlock(&ppeers_lock);
233 }
234
235 /*
236 * Check if any loadable modules need anything done at process exit.
237 * E.g. SYSV IPC stuff
238 * XXX what if one of these generates an error?
239 */
240 EVENTHANDLER_INVOKE(process_exit, p);
241
242 /*
243 * If parent is waiting for us to exit or exec,
244 * P_PPWAIT is set; we will wakeup the parent below.
245 */
246 PROC_LOCK(p);
247 rv = p->p_xstat; /* Event handler could change exit status */
248 stopprofclock(p);
249 p->p_flag &= ~(P_TRACED | P_PPWAIT);
250
251 /*
252 * Stop the real interval timer. If the handler is currently
253 * executing, prevent it from rearming itself and let it finish.
254 */
255 if (timevalisset(&p->p_realtimer.it_value) &&
256 callout_stop(&p->p_itcallout) == 0) {
257 timevalclear(&p->p_realtimer.it_interval);
258 msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
259 KASSERT(!timevalisset(&p->p_realtimer.it_value),
260 ("realtime timer is still armed"));
261 }
262 PROC_UNLOCK(p);
263
264 /*
265 * Reset any sigio structures pointing to us as a result of
266 * F_SETOWN with our pid.
267 */
268 funsetownlst(&p->p_sigiolst);
269
270 /*
271 * If this process has an nlminfo data area (for lockd), release it
272 */
273 if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
274 (*nlminfo_release_p)(p);
275
276 /*
277 * Close open files and release open-file table.
278 * This may block!
279 */
280 fdfree(td);
281
282 /*
283 * If this thread tickled GEOM, we need to wait for the giggling to
284 * stop before we return to userland
285 */
286 if (td->td_pflags & TDP_GEOM)
287 g_waitidle();
288
289 /*
290 * Remove ourself from our leader's peer list and wake our leader.
291 */
292 mtx_lock(&ppeers_lock);
293 if (p->p_leader->p_peers) {
294 q = p->p_leader;
295 while (q->p_peers != p)
296 q = q->p_peers;
297 q->p_peers = p->p_peers;
298 wakeup(p->p_leader);
299 }
300 mtx_unlock(&ppeers_lock);
301
302 vmspace_exit(td);
303
304 sx_xlock(&proctree_lock);
305 if (SESS_LEADER(p)) {
306 struct session *sp = p->p_session;
307 struct tty *tp;
308
309 /*
310 * s_ttyp is not zero'd; we use this to indicate that
311 * the session once had a controlling terminal. (for
312 * logging and informational purposes)
313 */
314 SESS_LOCK(sp);
315 ttyvp = sp->s_ttyvp;
316 tp = sp->s_ttyp;
317 sp->s_ttyvp = NULL;
318 sp->s_ttydp = NULL;
319 sp->s_leader = NULL;
320 SESS_UNLOCK(sp);
321
322 /*
323 * Signal foreground pgrp and revoke access to
324 * controlling terminal if it has not been revoked
325 * already.
326 *
327 * Because the TTY may have been revoked in the mean
328 * time and could already have a new session associated
329 * with it, make sure we don't send a SIGHUP to a
330 * foreground process group that does not belong to this
331 * session.
332 */
333
334 if (tp != NULL) {
335 tty_lock(tp);
336 if (tp->t_session == sp)
337 tty_signal_pgrp(tp, SIGHUP);
338 tty_unlock(tp);
339 }
340
341 if (ttyvp != NULL) {
342 sx_xunlock(&proctree_lock);
343 if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
344 VOP_REVOKE(ttyvp, REVOKEALL);
345 VOP_UNLOCK(ttyvp, 0);
346 }
347 sx_xlock(&proctree_lock);
348 }
349 }
350 fixjobc(p, p->p_pgrp, 0);
351 sx_xunlock(&proctree_lock);
352 (void)acct_process(td);
353
354 /* Release the TTY now we've unlocked everything. */
355 if (ttyvp != NULL)
356 vrele(ttyvp);
357#ifdef KTRACE
358 ktrprocexit(td);
359#endif
360 /*
361 * Release reference to text vnode
362 */
363 if ((vtmp = p->p_textvp) != NULL) {
364 p->p_textvp = NULL;
365 locked = VFS_LOCK_GIANT(vtmp->v_mount);
366 vrele(vtmp);
367 VFS_UNLOCK_GIANT(locked);
368 }
369
370 /*
371 * Release our limits structure.
372 */
373 PROC_LOCK(p);
374 plim = p->p_limit;
375 p->p_limit = NULL;
376 PROC_UNLOCK(p);
377 lim_free(plim);
378
379 tidhash_remove(td);
380
381 /*
382 * Remove proc from allproc queue and pidhash chain.
383 * Place onto zombproc. Unlink from parent's child list.
384 */
385 sx_xlock(&allproc_lock);
386 LIST_REMOVE(p, p_list);
387 LIST_INSERT_HEAD(&zombproc, p, p_list);
388 LIST_REMOVE(p, p_hash);
389 sx_xunlock(&allproc_lock);
390
391 /*
392 * Call machine-dependent code to release any
393 * machine-dependent resources other than the address space.
394 * The address space is released by "vmspace_exitfree(p)" in
395 * vm_waitproc().
396 */
397 cpu_exit(td);
398
399 WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);
400
401 /*
402 * Reparent all of our children to init.
403 */
404 sx_xlock(&proctree_lock);
405 q = LIST_FIRST(&p->p_children);
406 if (q != NULL) /* only need this if any child is S_ZOMB */
407 wakeup(initproc);
408 for (; q != NULL; q = nq) {
409 nq = LIST_NEXT(q, p_sibling);
410 PROC_LOCK(q);
411 proc_reparent(q, initproc);
412 q->p_sigparent = SIGCHLD;
413 /*
414 * Traced processes are killed
415 * since their existence means someone is screwing up.
416 */
417 if (q->p_flag & P_TRACED) {
418 struct thread *temp;
419
420 q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
421 FOREACH_THREAD_IN_PROC(q, temp)
422 temp->td_dbgflags &= ~TDB_SUSPEND;
423 psignal(q, SIGKILL);
424 }
425 PROC_UNLOCK(q);
426 }
427
428 /* Save exit status. */
429 PROC_LOCK(p);
430 p->p_xthread = td;
431
432 /* Tell the prison that we are gone. */
433 prison_proc_free(p->p_ucred->cr_prison);
434
435#ifdef KDTRACE_HOOKS
436 /*
437 * Tell the DTrace fasttrap provider about the exit if it
438 * has declared an interest.
439 */
440 if (dtrace_fasttrap_exit)
441 dtrace_fasttrap_exit(p);
442#endif
443
444 /*
445 * Notify interested parties of our demise.
446 */
447 KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
448
449#ifdef KDTRACE_HOOKS
450 int reason = CLD_EXITED;
451 if (WCOREDUMP(rv))
452 reason = CLD_DUMPED;
453 else if (WIFSIGNALED(rv))
454 reason = CLD_KILLED;
455 SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
456#endif
457
458 /*
459 * Just delete all entries in the p_klist. At this point we won't
460 * report any more events, and there are nasty race conditions that
461 * can beat us if we don't.
462 */
463 knlist_clear(&p->p_klist, 1);
464
465 /*
466 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
467 * flag set, or if the handler is set to SIG_IGN, notify process
468 * 1 instead (and hope it will handle this situation).
469 */
470 PROC_LOCK(p->p_pptr);
471 mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
472 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
473 struct proc *pp;
474
475 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
476 pp = p->p_pptr;
477 PROC_UNLOCK(pp);
478 proc_reparent(p, initproc);
479 p->p_sigparent = SIGCHLD;
480 PROC_LOCK(p->p_pptr);
481
482 /*
483 * Notify parent, so in case he was wait(2)ing or
484 * executing waitpid(2) with our pid, he will
485 * continue.
486 */
487 wakeup(pp);
488 } else
489 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
490
491 if (p->p_pptr == initproc)
492 psignal(p->p_pptr, SIGCHLD);
493 else if (p->p_sigparent != 0) {
494 if (p->p_sigparent == SIGCHLD)
495 childproc_exited(p);
496 else /* LINUX thread */
497 psignal(p->p_pptr, p->p_sigparent);
498 }
499 sx_xunlock(&proctree_lock);
500
501 /*
502 * The state PRS_ZOMBIE prevents other proesses from sending
503 * signal to the process, to avoid memory leak, we free memory
504 * for signal queue at the time when the state is set.
505 */
506 sigqueue_flush(&p->p_sigqueue);
507 sigqueue_flush(&td->td_sigqueue);
508
509 /*
510 * We have to wait until after acquiring all locks before
511 * changing p_state. We need to avoid all possible context
512 * switches (including ones from blocking on a mutex) while
513 * marked as a zombie. We also have to set the zombie state
514 * before we release the parent process' proc lock to avoid
515 * a lost wakeup. So, we first call wakeup, then we grab the
516 * sched lock, update the state, and release the parent process'
517 * proc lock.
518 */
519 wakeup(p->p_pptr);
520 cv_broadcast(&p->p_pwait);
521 sched_exit(p->p_pptr, td);
522 PROC_SLOCK(p);
523 p->p_state = PRS_ZOMBIE;
524 PROC_UNLOCK(p->p_pptr);
525
526 /*
527 * Hopefully no one will try to deliver a signal to the process this
528 * late in the game.
529 */
530 knlist_destroy(&p->p_klist);
531
532 /*
533 * Save our children's rusage information in our exit rusage.
534 */
535 ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
536
537 /*
538 * Make sure the scheduler takes this thread out of its tables etc.
539 * This will also release this thread's reference to the ucred.
540 * Other thread parts to release include pcb bits and such.
541 */
542 thread_exit();
543}
544
545
546#ifndef _SYS_SYSPROTO_H_
547struct abort2_args {
548 char *why;
549 int nargs;
550 void **args;
551};
552#endif
553
554int
555abort2(struct thread *td, struct abort2_args *uap)
556{
557 struct proc *p = td->td_proc;
558 struct sbuf *sb;
559 void *uargs[16];
560 int error, i, sig;
561
562 /*
563 * Do it right now so we can log either proper call of abort2(), or
564 * note, that invalid argument was passed. 512 is big enough to
565 * handle 16 arguments' descriptions with additional comments.
566 */
567 sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN);
568 sbuf_clear(sb);
569 sbuf_printf(sb, "%s(pid %d uid %d) aborted: ",
570 p->p_comm, p->p_pid, td->td_ucred->cr_uid);
571 /*
572 * Since we can't return from abort2(), send SIGKILL in cases, where
573 * abort2() was called improperly
574 */
575 sig = SIGKILL;
576 /* Prevent from DoSes from user-space. */
577 if (uap->nargs < 0 || uap->nargs > 16)
578 goto out;
579 if (uap->nargs > 0) {
580 if (uap->args == NULL)
581 goto out;
582 error = copyin(uap->args, uargs, uap->nargs * sizeof(void *));
583 if (error != 0)
584 goto out;
585 }
586 /*
587 * Limit size of 'reason' string to 128. Will fit even when
588 * maximal number of arguments was chosen to be logged.
589 */
590 if (uap->why != NULL) {
591 error = sbuf_copyin(sb, uap->why, 128);
592 if (error < 0)
593 goto out;
594 } else {
595 sbuf_printf(sb, "(null)");
596 }
597 if (uap->nargs > 0) {
598 sbuf_printf(sb, "(");
599 for (i = 0;i < uap->nargs; i++)
600 sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
601 sbuf_printf(sb, ")");
602 }
603 /*
604 * Final stage: arguments were proper, string has been
605 * successfully copied from userspace, and copying pointers
606 * from user-space succeed.
607 */
608 sig = SIGABRT;
609out:
610 if (sig == SIGKILL) {
611 sbuf_trim(sb);
612 sbuf_printf(sb, " (Reason text inaccessible)");
613 }
614 sbuf_cat(sb, "\n");
615 sbuf_finish(sb);
616 log(LOG_INFO, "%s", sbuf_data(sb));
617 sbuf_delete(sb);
618 exit1(td, W_EXITCODE(0, sig));
619 return (0);
620}
621
622
623#ifdef COMPAT_43
624/*
625 * The dirty work is handled by kern_wait().
626 */
627int
628owait(struct thread *td, struct owait_args *uap __unused)
629{
630 int error, status;
631
632 error = kern_wait(td, WAIT_ANY, &status, 0, NULL);
633 if (error == 0)
634 td->td_retval[1] = status;
635 return (error);
636}
637#endif /* COMPAT_43 */
638
639/*
640 * The dirty work is handled by kern_wait().
641 */
642int
643wait4(struct thread *td, struct wait_args *uap)
644{
645 struct rusage ru, *rup;
646 int error, status;
647
648 if (uap->rusage != NULL)
649 rup = &ru;
650 else
651 rup = NULL;
652 error = kern_wait(td, uap->pid, &status, uap->options, rup);
653 if (uap->status != NULL && error == 0)
654 error = copyout(&status, uap->status, sizeof(status));
655 if (uap->rusage != NULL && error == 0)
656 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
657 return (error);
658}
659
660/*
661 * Reap the remains of a zombie process and optionally return status and
662 * rusage. Asserts and will release both the proctree_lock and the process
663 * lock as part of its work.
664 */
665static void
666proc_reap(struct thread *td, struct proc *p, int *status, int options,
667 struct rusage *rusage)
668{
669 struct proc *q, *t;
670
671 sx_assert(&proctree_lock, SA_XLOCKED);
672 PROC_LOCK_ASSERT(p, MA_OWNED);
673 PROC_SLOCK_ASSERT(p, MA_OWNED);
674 KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE"));
675
676 q = td->td_proc;
677 if (rusage) {
678 *rusage = p->p_ru;
679 calcru(p, &rusage->ru_utime, &rusage->ru_stime);
680 }
681 PROC_SUNLOCK(p);
682 td->td_retval[0] = p->p_pid;
683 if (status)
684 *status = p->p_xstat; /* convert to int */
685 if (options & WNOWAIT) {
686 /*
687 * Only poll, returning the status. Caller does not wish to
688 * release the proc struct just yet.
689 */
690 PROC_UNLOCK(p);
691 sx_xunlock(&proctree_lock);
692 return;
693 }
694
695 PROC_LOCK(q);
696 sigqueue_take(p->p_ksi);
697 PROC_UNLOCK(q);
698 PROC_UNLOCK(p);
699
700 /*
701 * If we got the child via a ptrace 'attach', we need to give it back
702 * to the old parent.
703 */
704 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
705 PROC_LOCK(p);
706 p->p_oppid = 0;
707 proc_reparent(p, t);
708 PROC_UNLOCK(p);
709 pksignal(t, SIGCHLD, p->p_ksi);
710 wakeup(t);
711 cv_broadcast(&p->p_pwait);
712 PROC_UNLOCK(t);
713 sx_xunlock(&proctree_lock);
714 return;
715 }
716
717 /*
718 * Remove other references to this process to ensure we have an
719 * exclusive reference.
720 */
721 sx_xlock(&allproc_lock);
722 LIST_REMOVE(p, p_list); /* off zombproc */
723 sx_xunlock(&allproc_lock);
724 LIST_REMOVE(p, p_sibling);
725 leavepgrp(p);
726 sx_xunlock(&proctree_lock);
727
728 /*
729 * As a side effect of this lock, we know that all other writes to
730 * this proc are visible now, so no more locking is needed for p.
731 */
732 PROC_LOCK(p);
733 p->p_xstat = 0; /* XXX: why? */
734 PROC_UNLOCK(p);
735 PROC_LOCK(q);
736 ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru, &p->p_rux);
737 PROC_UNLOCK(q);
738
739 /*
740 * Decrement the count of procs running with this uid.
741 */
742 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
743
744 /*
745 * Destroy resource accounting information associated with the process.
746 */
747 racct_proc_exit(p);
748
749 /*
750 * Free credentials, arguments, and sigacts.
751 */
752 crfree(p->p_ucred);
753 p->p_ucred = NULL;
754 pargs_drop(p->p_args);
755 p->p_args = NULL;
756 sigacts_free(p->p_sigacts);
757 p->p_sigacts = NULL;
758
759 /*
760 * Do any thread-system specific cleanups.
761 */
762 thread_wait(p);
763
764 /*
765 * Give vm and machine-dependent layer a chance to free anything that
766 * cpu_exit couldn't release while still running in process context.
767 */
768 vm_waitproc(p);
769#ifdef MAC
770 mac_proc_destroy(p);
771#endif
772 KASSERT(FIRST_THREAD_IN_PROC(p),
773 ("proc_reap: no residual thread!"));
774 uma_zfree(proc_zone, p);
775 sx_xlock(&allproc_lock);
776 nprocs--;
777 sx_xunlock(&allproc_lock);
778}
779
780int
781kern_wait(struct thread *td, pid_t pid, int *status, int options,
782 struct rusage *rusage)
783{
784 struct proc *p, *q;
785 int error, nfound;
786
787 AUDIT_ARG_PID(pid);
788 AUDIT_ARG_VALUE(options);
789
790 q = td->td_proc;
791 if (pid == 0) {
792 PROC_LOCK(q);
793 pid = -q->p_pgid;
794 PROC_UNLOCK(q);
795 }
796 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WNOWAIT|WLINUXCLONE))
797 return (EINVAL);
798loop:
799 if (q->p_flag & P_STATCHILD) {
800 PROC_LOCK(q);
801 q->p_flag &= ~P_STATCHILD;
802 PROC_UNLOCK(q);
803 }
804 nfound = 0;
805 sx_xlock(&proctree_lock);
806 LIST_FOREACH(p, &q->p_children, p_sibling) {
807 PROC_LOCK(p);
808 if (pid != WAIT_ANY &&
809 p->p_pid != pid && p->p_pgid != -pid) {
810 PROC_UNLOCK(p);
811 continue;
812 }
813 if (p_canwait(td, p)) {
814 PROC_UNLOCK(p);
815 continue;
816 }
817
818 /*
819 * This special case handles a kthread spawned by linux_clone
820 * (see linux_misc.c). The linux_wait4 and linux_waitpid
821 * functions need to be able to distinguish between waiting
822 * on a process and waiting on a thread. It is a thread if
823 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
824 * signifies we want to wait for threads and not processes.
825 */
826 if ((p->p_sigparent != SIGCHLD) ^
827 ((options & WLINUXCLONE) != 0)) {
828 PROC_UNLOCK(p);
829 continue;
830 }
831
832 nfound++;
833 PROC_SLOCK(p);
834 if (p->p_state == PRS_ZOMBIE) {
835 proc_reap(td, p, status, options, rusage);
836 return (0);
837 }
838 if ((p->p_flag & P_STOPPED_SIG) &&
839 (p->p_suspcount == p->p_numthreads) &&
840 (p->p_flag & P_WAITED) == 0 &&
841 (p->p_flag & P_TRACED || options & WUNTRACED)) {
842 PROC_SUNLOCK(p);
843 p->p_flag |= P_WAITED;
844 sx_xunlock(&proctree_lock);
845 td->td_retval[0] = p->p_pid;
846 if (status)
847 *status = W_STOPCODE(p->p_xstat);
848
849 PROC_LOCK(q);
850 sigqueue_take(p->p_ksi);
851 PROC_UNLOCK(q);
852 PROC_UNLOCK(p);
853
854 return (0);
855 }
856 PROC_SUNLOCK(p);
857 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
858 sx_xunlock(&proctree_lock);
859 td->td_retval[0] = p->p_pid;
860 p->p_flag &= ~P_CONTINUED;
861
862 PROC_LOCK(q);
863 sigqueue_take(p->p_ksi);
864 PROC_UNLOCK(q);
865 PROC_UNLOCK(p);
866
867 if (status)
868 *status = SIGCONT;
869 return (0);
870 }
871 PROC_UNLOCK(p);
872 }
873 if (nfound == 0) {
874 sx_xunlock(&proctree_lock);
875 return (ECHILD);
876 }
877 if (options & WNOHANG) {
878 sx_xunlock(&proctree_lock);
879 td->td_retval[0] = 0;
880 return (0);
881 }
882 PROC_LOCK(q);
883 sx_xunlock(&proctree_lock);
884 if (q->p_flag & P_STATCHILD) {
885 q->p_flag &= ~P_STATCHILD;
886 error = 0;
887 } else
888 error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
889 PROC_UNLOCK(q);
890 if (error)
891 return (error);
892 goto loop;
893}
894
895/*
896 * Make process 'parent' the new parent of process 'child'.
897 * Must be called with an exclusive hold of proctree lock.
898 */
899void
900proc_reparent(struct proc *child, struct proc *parent)
901{
902
903 sx_assert(&proctree_lock, SX_XLOCKED);
904 PROC_LOCK_ASSERT(child, MA_OWNED);
905 if (child->p_pptr == parent)
906 return;
907
908 PROC_LOCK(child->p_pptr);
909 sigqueue_take(child->p_ksi);
910 PROC_UNLOCK(child->p_pptr);
911 LIST_REMOVE(child, p_sibling);
912 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
913 child->p_pptr = parent;
914}