Deleted Added
full compact
kern_exit.c (214158) kern_exit.c (215664)
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 */
36
37#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_exit.c 214158 2010-10-21 19:17:40Z jhb $");
38__FBSDID("$FreeBSD: head/sys/kern/kern_exit.c 215664 2010-11-22 09:06:59Z netchild $");
39
40#include "opt_compat.h"
41#include "opt_kdtrace.h"
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/eventhandler.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/pioctl.h>
54#include <sys/jail.h>
55#include <sys/tty.h>
56#include <sys/wait.h>
57#include <sys/vmmeter.h>
58#include <sys/vnode.h>
59#include <sys/resourcevar.h>
60#include <sys/sbuf.h>
61#include <sys/signalvar.h>
62#include <sys/sched.h>
63#include <sys/sx.h>
64#include <sys/syscallsubr.h>
65#include <sys/syslog.h>
66#include <sys/ptrace.h>
67#include <sys/acct.h> /* for acct_process() function prototype */
68#include <sys/filedesc.h>
69#include <sys/sdt.h>
70#include <sys/shm.h>
71#include <sys/sem.h>
72#ifdef KTRACE
73#include <sys/ktrace.h>
74#endif
75
76#include <security/audit/audit.h>
77#include <security/mac/mac_framework.h>
78
79#include <vm/vm.h>
80#include <vm/vm_extern.h>
81#include <vm/vm_param.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_page.h>
85#include <vm/uma.h>
86
87#ifdef KDTRACE_HOOKS
88#include <sys/dtrace_bsd.h>
89dtrace_execexit_func_t dtrace_fasttrap_exit;
90#endif
91
92SDT_PROVIDER_DECLARE(proc);
93SDT_PROBE_DEFINE(proc, kernel, , exit, exit);
94SDT_PROBE_ARGTYPE(proc, kernel, , exit, 0, "int");
95
96/* Required to be non-static for SysVR4 emulator */
97MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
98
99/* Hook for NFS teardown procedure. */
100void (*nlminfo_release_p)(struct proc *p);
101
102/*
103 * exit -- death of process.
104 */
105void
106sys_exit(struct thread *td, struct sys_exit_args *uap)
107{
108
109 exit1(td, W_EXITCODE(uap->rval, 0));
110 /* NOTREACHED */
111}
112
113/*
114 * Exit: deallocate address space and other resources, change proc state to
115 * zombie, and unlink proc from allproc and parent's lists. Save exit status
116 * and rusage for wait(). Check for child processes and orphan them.
117 */
118void
119exit1(struct thread *td, int rv)
120{
121 struct proc *p, *nq, *q;
122 struct vnode *vtmp;
123 struct vnode *ttyvp = NULL;
124 struct plimit *plim;
125 int locked;
126
127 mtx_assert(&Giant, MA_NOTOWNED);
128
129 p = td->td_proc;
130 /*
131 * XXX in case we're rebooting we just let init die in order to
132 * work around an unsolved stack overflow seen very late during
133 * shutdown on sparc64 when the gmirror worker process exists.
134 */
135 if (p == initproc && rebooting == 0) {
136 printf("init died (signal %d, exit %d)\n",
137 WTERMSIG(rv), WEXITSTATUS(rv));
138 panic("Going nowhere without my init!");
139 }
140
141 /*
142 * MUST abort all other threads before proceeding past here.
143 */
144 PROC_LOCK(p);
145 while (p->p_flag & P_HADTHREADS) {
146 /*
147 * First check if some other thread got here before us..
148 * if so, act apropriatly, (exit or suspend);
149 */
150 thread_suspend_check(0);
151
152 /*
153 * Kill off the other threads. This requires
154 * some co-operation from other parts of the kernel
155 * so it may not be instantaneous. With this state set
156 * any thread entering the kernel from userspace will
157 * thread_exit() in trap(). Any thread attempting to
158 * sleep will return immediately with EINTR or EWOULDBLOCK
159 * which will hopefully force them to back out to userland
160 * freeing resources as they go. Any thread attempting
161 * to return to userland will thread_exit() from userret().
162 * thread_exit() will unsuspend us when the last of the
163 * other threads exits.
164 * If there is already a thread singler after resumption,
165 * calling thread_single will fail; in that case, we just
166 * re-check all suspension request, the thread should
167 * either be suspended there or exit.
168 */
169 if (! thread_single(SINGLE_EXIT))
170 break;
171
172 /*
173 * All other activity in this process is now stopped.
174 * Threading support has been turned off.
175 */
176 }
177 KASSERT(p->p_numthreads == 1,
178 ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
179 /*
180 * Wakeup anyone in procfs' PIOCWAIT. They should have a hold
181 * on our vmspace, so we should block below until they have
182 * released their reference to us. Note that if they have
183 * requested S_EXIT stops we will block here until they ack
184 * via PIOCCONT.
185 */
186 _STOPEVENT(p, S_EXIT, rv);
187
188 /*
189 * Note that we are exiting and do another wakeup of anyone in
190 * PIOCWAIT in case they aren't listening for S_EXIT stops or
191 * decided to wait again after we told them we are exiting.
192 */
193 p->p_flag |= P_WEXIT;
194 wakeup(&p->p_stype);
195
196 /*
197 * Wait for any processes that have a hold on our vmspace to
198 * release their reference.
199 */
200 while (p->p_lock > 0)
201 msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
202
39
40#include "opt_compat.h"
41#include "opt_kdtrace.h"
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/eventhandler.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/pioctl.h>
54#include <sys/jail.h>
55#include <sys/tty.h>
56#include <sys/wait.h>
57#include <sys/vmmeter.h>
58#include <sys/vnode.h>
59#include <sys/resourcevar.h>
60#include <sys/sbuf.h>
61#include <sys/signalvar.h>
62#include <sys/sched.h>
63#include <sys/sx.h>
64#include <sys/syscallsubr.h>
65#include <sys/syslog.h>
66#include <sys/ptrace.h>
67#include <sys/acct.h> /* for acct_process() function prototype */
68#include <sys/filedesc.h>
69#include <sys/sdt.h>
70#include <sys/shm.h>
71#include <sys/sem.h>
72#ifdef KTRACE
73#include <sys/ktrace.h>
74#endif
75
76#include <security/audit/audit.h>
77#include <security/mac/mac_framework.h>
78
79#include <vm/vm.h>
80#include <vm/vm_extern.h>
81#include <vm/vm_param.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_page.h>
85#include <vm/uma.h>
86
87#ifdef KDTRACE_HOOKS
88#include <sys/dtrace_bsd.h>
89dtrace_execexit_func_t dtrace_fasttrap_exit;
90#endif
91
92SDT_PROVIDER_DECLARE(proc);
93SDT_PROBE_DEFINE(proc, kernel, , exit, exit);
94SDT_PROBE_ARGTYPE(proc, kernel, , exit, 0, "int");
95
96/* Required to be non-static for SysVR4 emulator */
97MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
98
99/* Hook for NFS teardown procedure. */
100void (*nlminfo_release_p)(struct proc *p);
101
102/*
103 * exit -- death of process.
104 */
105void
106sys_exit(struct thread *td, struct sys_exit_args *uap)
107{
108
109 exit1(td, W_EXITCODE(uap->rval, 0));
110 /* NOTREACHED */
111}
112
113/*
114 * Exit: deallocate address space and other resources, change proc state to
115 * zombie, and unlink proc from allproc and parent's lists. Save exit status
116 * and rusage for wait(). Check for child processes and orphan them.
117 */
118void
119exit1(struct thread *td, int rv)
120{
121 struct proc *p, *nq, *q;
122 struct vnode *vtmp;
123 struct vnode *ttyvp = NULL;
124 struct plimit *plim;
125 int locked;
126
127 mtx_assert(&Giant, MA_NOTOWNED);
128
129 p = td->td_proc;
130 /*
131 * XXX in case we're rebooting we just let init die in order to
132 * work around an unsolved stack overflow seen very late during
133 * shutdown on sparc64 when the gmirror worker process exists.
134 */
135 if (p == initproc && rebooting == 0) {
136 printf("init died (signal %d, exit %d)\n",
137 WTERMSIG(rv), WEXITSTATUS(rv));
138 panic("Going nowhere without my init!");
139 }
140
141 /*
142 * MUST abort all other threads before proceeding past here.
143 */
144 PROC_LOCK(p);
145 while (p->p_flag & P_HADTHREADS) {
146 /*
147 * First check if some other thread got here before us..
148 * if so, act apropriatly, (exit or suspend);
149 */
150 thread_suspend_check(0);
151
152 /*
153 * Kill off the other threads. This requires
154 * some co-operation from other parts of the kernel
155 * so it may not be instantaneous. With this state set
156 * any thread entering the kernel from userspace will
157 * thread_exit() in trap(). Any thread attempting to
158 * sleep will return immediately with EINTR or EWOULDBLOCK
159 * which will hopefully force them to back out to userland
160 * freeing resources as they go. Any thread attempting
161 * to return to userland will thread_exit() from userret().
162 * thread_exit() will unsuspend us when the last of the
163 * other threads exits.
164 * If there is already a thread singler after resumption,
165 * calling thread_single will fail; in that case, we just
166 * re-check all suspension request, the thread should
167 * either be suspended there or exit.
168 */
169 if (! thread_single(SINGLE_EXIT))
170 break;
171
172 /*
173 * All other activity in this process is now stopped.
174 * Threading support has been turned off.
175 */
176 }
177 KASSERT(p->p_numthreads == 1,
178 ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
179 /*
180 * Wakeup anyone in procfs' PIOCWAIT. They should have a hold
181 * on our vmspace, so we should block below until they have
182 * released their reference to us. Note that if they have
183 * requested S_EXIT stops we will block here until they ack
184 * via PIOCCONT.
185 */
186 _STOPEVENT(p, S_EXIT, rv);
187
188 /*
189 * Note that we are exiting and do another wakeup of anyone in
190 * PIOCWAIT in case they aren't listening for S_EXIT stops or
191 * decided to wait again after we told them we are exiting.
192 */
193 p->p_flag |= P_WEXIT;
194 wakeup(&p->p_stype);
195
196 /*
197 * Wait for any processes that have a hold on our vmspace to
198 * release their reference.
199 */
200 while (p->p_lock > 0)
201 msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
202
203 p->p_xstat = rv; /* Let event handler change exit status */
203 PROC_UNLOCK(p);
204 /* Drain the limit callout while we don't have the proc locked */
205 callout_drain(&p->p_limco);
206
207#ifdef AUDIT
208 /*
209 * The Sun BSM exit token contains two components: an exit status as
210 * passed to exit(), and a return value to indicate what sort of exit
211 * it was. The exit status is WEXITSTATUS(rv), but it's not clear
212 * what the return value is.
213 */
214 AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
215 AUDIT_SYSCALL_EXIT(0, td);
216#endif
217
218 /* Are we a task leader? */
219 if (p == p->p_leader) {
220 mtx_lock(&ppeers_lock);
221 q = p->p_peers;
222 while (q != NULL) {
223 PROC_LOCK(q);
224 psignal(q, SIGKILL);
225 PROC_UNLOCK(q);
226 q = q->p_peers;
227 }
228 while (p->p_peers != NULL)
229 msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
230 mtx_unlock(&ppeers_lock);
231 }
232
233 /*
234 * Check if any loadable modules need anything done at process exit.
235 * E.g. SYSV IPC stuff
236 * XXX what if one of these generates an error?
237 */
238 EVENTHANDLER_INVOKE(process_exit, p);
239
240 /*
241 * If parent is waiting for us to exit or exec,
242 * P_PPWAIT is set; we will wakeup the parent below.
243 */
244 PROC_LOCK(p);
204 PROC_UNLOCK(p);
205 /* Drain the limit callout while we don't have the proc locked */
206 callout_drain(&p->p_limco);
207
208#ifdef AUDIT
209 /*
210 * The Sun BSM exit token contains two components: an exit status as
211 * passed to exit(), and a return value to indicate what sort of exit
212 * it was. The exit status is WEXITSTATUS(rv), but it's not clear
213 * what the return value is.
214 */
215 AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
216 AUDIT_SYSCALL_EXIT(0, td);
217#endif
218
219 /* Are we a task leader? */
220 if (p == p->p_leader) {
221 mtx_lock(&ppeers_lock);
222 q = p->p_peers;
223 while (q != NULL) {
224 PROC_LOCK(q);
225 psignal(q, SIGKILL);
226 PROC_UNLOCK(q);
227 q = q->p_peers;
228 }
229 while (p->p_peers != NULL)
230 msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
231 mtx_unlock(&ppeers_lock);
232 }
233
234 /*
235 * Check if any loadable modules need anything done at process exit.
236 * E.g. SYSV IPC stuff
237 * XXX what if one of these generates an error?
238 */
239 EVENTHANDLER_INVOKE(process_exit, p);
240
241 /*
242 * If parent is waiting for us to exit or exec,
243 * P_PPWAIT is set; we will wakeup the parent below.
244 */
245 PROC_LOCK(p);
246 rv = p->p_xstat; /* Event handler could change exit status */
245 stopprofclock(p);
246 p->p_flag &= ~(P_TRACED | P_PPWAIT);
247
248 /*
249 * Stop the real interval timer. If the handler is currently
250 * executing, prevent it from rearming itself and let it finish.
251 */
252 if (timevalisset(&p->p_realtimer.it_value) &&
253 callout_stop(&p->p_itcallout) == 0) {
254 timevalclear(&p->p_realtimer.it_interval);
255 msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
256 KASSERT(!timevalisset(&p->p_realtimer.it_value),
257 ("realtime timer is still armed"));
258 }
259 PROC_UNLOCK(p);
260
261 /*
262 * Reset any sigio structures pointing to us as a result of
263 * F_SETOWN with our pid.
264 */
265 funsetownlst(&p->p_sigiolst);
266
267 /*
268 * If this process has an nlminfo data area (for lockd), release it
269 */
270 if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
271 (*nlminfo_release_p)(p);
272
273 /*
274 * Close open files and release open-file table.
275 * This may block!
276 */
277 fdfree(td);
278
279 /*
280 * If this thread tickled GEOM, we need to wait for the giggling to
281 * stop before we return to userland
282 */
283 if (td->td_pflags & TDP_GEOM)
284 g_waitidle();
285
286 /*
287 * Remove ourself from our leader's peer list and wake our leader.
288 */
289 mtx_lock(&ppeers_lock);
290 if (p->p_leader->p_peers) {
291 q = p->p_leader;
292 while (q->p_peers != p)
293 q = q->p_peers;
294 q->p_peers = p->p_peers;
295 wakeup(p->p_leader);
296 }
297 mtx_unlock(&ppeers_lock);
298
299 vmspace_exit(td);
300
301 sx_xlock(&proctree_lock);
302 if (SESS_LEADER(p)) {
303 struct session *sp = p->p_session;
304 struct tty *tp;
305
306 /*
307 * s_ttyp is not zero'd; we use this to indicate that
308 * the session once had a controlling terminal. (for
309 * logging and informational purposes)
310 */
311 SESS_LOCK(sp);
312 ttyvp = sp->s_ttyvp;
313 tp = sp->s_ttyp;
314 sp->s_ttyvp = NULL;
315 sp->s_ttydp = NULL;
316 sp->s_leader = NULL;
317 SESS_UNLOCK(sp);
318
319 /*
320 * Signal foreground pgrp and revoke access to
321 * controlling terminal if it has not been revoked
322 * already.
323 *
324 * Because the TTY may have been revoked in the mean
325 * time and could already have a new session associated
326 * with it, make sure we don't send a SIGHUP to a
327 * foreground process group that does not belong to this
328 * session.
329 */
330
331 if (tp != NULL) {
332 tty_lock(tp);
333 if (tp->t_session == sp)
334 tty_signal_pgrp(tp, SIGHUP);
335 tty_unlock(tp);
336 }
337
338 if (ttyvp != NULL) {
339 sx_xunlock(&proctree_lock);
340 if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
341 VOP_REVOKE(ttyvp, REVOKEALL);
342 VOP_UNLOCK(ttyvp, 0);
343 }
344 sx_xlock(&proctree_lock);
345 }
346 }
347 fixjobc(p, p->p_pgrp, 0);
348 sx_xunlock(&proctree_lock);
349 (void)acct_process(td);
350
351 /* Release the TTY now we've unlocked everything. */
352 if (ttyvp != NULL)
353 vrele(ttyvp);
354#ifdef KTRACE
355 ktrprocexit(td);
356#endif
357 /*
358 * Release reference to text vnode
359 */
360 if ((vtmp = p->p_textvp) != NULL) {
361 p->p_textvp = NULL;
362 locked = VFS_LOCK_GIANT(vtmp->v_mount);
363 vrele(vtmp);
364 VFS_UNLOCK_GIANT(locked);
365 }
366
367 /*
368 * Release our limits structure.
369 */
370 PROC_LOCK(p);
371 plim = p->p_limit;
372 p->p_limit = NULL;
373 PROC_UNLOCK(p);
374 lim_free(plim);
375
376 tidhash_remove(td);
377
378 /*
379 * Remove proc from allproc queue and pidhash chain.
380 * Place onto zombproc. Unlink from parent's child list.
381 */
382 sx_xlock(&allproc_lock);
383 LIST_REMOVE(p, p_list);
384 LIST_INSERT_HEAD(&zombproc, p, p_list);
385 LIST_REMOVE(p, p_hash);
386 sx_xunlock(&allproc_lock);
387
388 /*
389 * Call machine-dependent code to release any
390 * machine-dependent resources other than the address space.
391 * The address space is released by "vmspace_exitfree(p)" in
392 * vm_waitproc().
393 */
394 cpu_exit(td);
395
396 WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);
397
398 /*
399 * Reparent all of our children to init.
400 */
401 sx_xlock(&proctree_lock);
402 q = LIST_FIRST(&p->p_children);
403 if (q != NULL) /* only need this if any child is S_ZOMB */
404 wakeup(initproc);
405 for (; q != NULL; q = nq) {
406 nq = LIST_NEXT(q, p_sibling);
407 PROC_LOCK(q);
408 proc_reparent(q, initproc);
409 q->p_sigparent = SIGCHLD;
410 /*
411 * Traced processes are killed
412 * since their existence means someone is screwing up.
413 */
414 if (q->p_flag & P_TRACED) {
415 struct thread *temp;
416
417 q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
418 FOREACH_THREAD_IN_PROC(q, temp)
419 temp->td_dbgflags &= ~TDB_SUSPEND;
420 psignal(q, SIGKILL);
421 }
422 PROC_UNLOCK(q);
423 }
424
425 /* Save exit status. */
426 PROC_LOCK(p);
247 stopprofclock(p);
248 p->p_flag &= ~(P_TRACED | P_PPWAIT);
249
250 /*
251 * Stop the real interval timer. If the handler is currently
252 * executing, prevent it from rearming itself and let it finish.
253 */
254 if (timevalisset(&p->p_realtimer.it_value) &&
255 callout_stop(&p->p_itcallout) == 0) {
256 timevalclear(&p->p_realtimer.it_interval);
257 msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
258 KASSERT(!timevalisset(&p->p_realtimer.it_value),
259 ("realtime timer is still armed"));
260 }
261 PROC_UNLOCK(p);
262
263 /*
264 * Reset any sigio structures pointing to us as a result of
265 * F_SETOWN with our pid.
266 */
267 funsetownlst(&p->p_sigiolst);
268
269 /*
270 * If this process has an nlminfo data area (for lockd), release it
271 */
272 if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
273 (*nlminfo_release_p)(p);
274
275 /*
276 * Close open files and release open-file table.
277 * This may block!
278 */
279 fdfree(td);
280
281 /*
282 * If this thread tickled GEOM, we need to wait for the giggling to
283 * stop before we return to userland
284 */
285 if (td->td_pflags & TDP_GEOM)
286 g_waitidle();
287
288 /*
289 * Remove ourself from our leader's peer list and wake our leader.
290 */
291 mtx_lock(&ppeers_lock);
292 if (p->p_leader->p_peers) {
293 q = p->p_leader;
294 while (q->p_peers != p)
295 q = q->p_peers;
296 q->p_peers = p->p_peers;
297 wakeup(p->p_leader);
298 }
299 mtx_unlock(&ppeers_lock);
300
301 vmspace_exit(td);
302
303 sx_xlock(&proctree_lock);
304 if (SESS_LEADER(p)) {
305 struct session *sp = p->p_session;
306 struct tty *tp;
307
308 /*
309 * s_ttyp is not zero'd; we use this to indicate that
310 * the session once had a controlling terminal. (for
311 * logging and informational purposes)
312 */
313 SESS_LOCK(sp);
314 ttyvp = sp->s_ttyvp;
315 tp = sp->s_ttyp;
316 sp->s_ttyvp = NULL;
317 sp->s_ttydp = NULL;
318 sp->s_leader = NULL;
319 SESS_UNLOCK(sp);
320
321 /*
322 * Signal foreground pgrp and revoke access to
323 * controlling terminal if it has not been revoked
324 * already.
325 *
326 * Because the TTY may have been revoked in the mean
327 * time and could already have a new session associated
328 * with it, make sure we don't send a SIGHUP to a
329 * foreground process group that does not belong to this
330 * session.
331 */
332
333 if (tp != NULL) {
334 tty_lock(tp);
335 if (tp->t_session == sp)
336 tty_signal_pgrp(tp, SIGHUP);
337 tty_unlock(tp);
338 }
339
340 if (ttyvp != NULL) {
341 sx_xunlock(&proctree_lock);
342 if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
343 VOP_REVOKE(ttyvp, REVOKEALL);
344 VOP_UNLOCK(ttyvp, 0);
345 }
346 sx_xlock(&proctree_lock);
347 }
348 }
349 fixjobc(p, p->p_pgrp, 0);
350 sx_xunlock(&proctree_lock);
351 (void)acct_process(td);
352
353 /* Release the TTY now we've unlocked everything. */
354 if (ttyvp != NULL)
355 vrele(ttyvp);
356#ifdef KTRACE
357 ktrprocexit(td);
358#endif
359 /*
360 * Release reference to text vnode
361 */
362 if ((vtmp = p->p_textvp) != NULL) {
363 p->p_textvp = NULL;
364 locked = VFS_LOCK_GIANT(vtmp->v_mount);
365 vrele(vtmp);
366 VFS_UNLOCK_GIANT(locked);
367 }
368
369 /*
370 * Release our limits structure.
371 */
372 PROC_LOCK(p);
373 plim = p->p_limit;
374 p->p_limit = NULL;
375 PROC_UNLOCK(p);
376 lim_free(plim);
377
378 tidhash_remove(td);
379
380 /*
381 * Remove proc from allproc queue and pidhash chain.
382 * Place onto zombproc. Unlink from parent's child list.
383 */
384 sx_xlock(&allproc_lock);
385 LIST_REMOVE(p, p_list);
386 LIST_INSERT_HEAD(&zombproc, p, p_list);
387 LIST_REMOVE(p, p_hash);
388 sx_xunlock(&allproc_lock);
389
390 /*
391 * Call machine-dependent code to release any
392 * machine-dependent resources other than the address space.
393 * The address space is released by "vmspace_exitfree(p)" in
394 * vm_waitproc().
395 */
396 cpu_exit(td);
397
398 WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);
399
400 /*
401 * Reparent all of our children to init.
402 */
403 sx_xlock(&proctree_lock);
404 q = LIST_FIRST(&p->p_children);
405 if (q != NULL) /* only need this if any child is S_ZOMB */
406 wakeup(initproc);
407 for (; q != NULL; q = nq) {
408 nq = LIST_NEXT(q, p_sibling);
409 PROC_LOCK(q);
410 proc_reparent(q, initproc);
411 q->p_sigparent = SIGCHLD;
412 /*
413 * Traced processes are killed
414 * since their existence means someone is screwing up.
415 */
416 if (q->p_flag & P_TRACED) {
417 struct thread *temp;
418
419 q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
420 FOREACH_THREAD_IN_PROC(q, temp)
421 temp->td_dbgflags &= ~TDB_SUSPEND;
422 psignal(q, SIGKILL);
423 }
424 PROC_UNLOCK(q);
425 }
426
427 /* Save exit status. */
428 PROC_LOCK(p);
427 p->p_xstat = rv;
428 p->p_xthread = td;
429
430 /* Tell the prison that we are gone. */
431 prison_proc_free(p->p_ucred->cr_prison);
432
433#ifdef KDTRACE_HOOKS
434 /*
435 * Tell the DTrace fasttrap provider about the exit if it
436 * has declared an interest.
437 */
438 if (dtrace_fasttrap_exit)
439 dtrace_fasttrap_exit(p);
440#endif
441
442 /*
443 * Notify interested parties of our demise.
444 */
445 KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
446
447#ifdef KDTRACE_HOOKS
448 int reason = CLD_EXITED;
449 if (WCOREDUMP(rv))
450 reason = CLD_DUMPED;
451 else if (WIFSIGNALED(rv))
452 reason = CLD_KILLED;
453 SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
454#endif
455
456 /*
457 * Just delete all entries in the p_klist. At this point we won't
458 * report any more events, and there are nasty race conditions that
459 * can beat us if we don't.
460 */
461 knlist_clear(&p->p_klist, 1);
462
463 /*
464 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
465 * flag set, or if the handler is set to SIG_IGN, notify process
466 * 1 instead (and hope it will handle this situation).
467 */
468 PROC_LOCK(p->p_pptr);
469 mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
470 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
471 struct proc *pp;
472
473 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
474 pp = p->p_pptr;
475 PROC_UNLOCK(pp);
476 proc_reparent(p, initproc);
477 p->p_sigparent = SIGCHLD;
478 PROC_LOCK(p->p_pptr);
479
480 /*
481 * Notify parent, so in case he was wait(2)ing or
482 * executing waitpid(2) with our pid, he will
483 * continue.
484 */
485 wakeup(pp);
486 } else
487 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
488
489 if (p->p_pptr == initproc)
490 psignal(p->p_pptr, SIGCHLD);
491 else if (p->p_sigparent != 0) {
492 if (p->p_sigparent == SIGCHLD)
493 childproc_exited(p);
494 else /* LINUX thread */
495 psignal(p->p_pptr, p->p_sigparent);
496 }
497 sx_xunlock(&proctree_lock);
498
499 /*
500 * The state PRS_ZOMBIE prevents other proesses from sending
501 * signal to the process, to avoid memory leak, we free memory
502 * for signal queue at the time when the state is set.
503 */
504 sigqueue_flush(&p->p_sigqueue);
505 sigqueue_flush(&td->td_sigqueue);
506
507 /*
508 * We have to wait until after acquiring all locks before
509 * changing p_state. We need to avoid all possible context
510 * switches (including ones from blocking on a mutex) while
511 * marked as a zombie. We also have to set the zombie state
512 * before we release the parent process' proc lock to avoid
513 * a lost wakeup. So, we first call wakeup, then we grab the
514 * sched lock, update the state, and release the parent process'
515 * proc lock.
516 */
517 wakeup(p->p_pptr);
518 cv_broadcast(&p->p_pwait);
519 sched_exit(p->p_pptr, td);
520 PROC_SLOCK(p);
521 p->p_state = PRS_ZOMBIE;
522 PROC_UNLOCK(p->p_pptr);
523
524 /*
525 * Hopefully no one will try to deliver a signal to the process this
526 * late in the game.
527 */
528 knlist_destroy(&p->p_klist);
529
530 /*
531 * Save our children's rusage information in our exit rusage.
532 */
533 ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
534
535 /*
536 * Make sure the scheduler takes this thread out of its tables etc.
537 * This will also release this thread's reference to the ucred.
538 * Other thread parts to release include pcb bits and such.
539 */
540 thread_exit();
541}
542
543
544#ifndef _SYS_SYSPROTO_H_
545struct abort2_args {
546 char *why;
547 int nargs;
548 void **args;
549};
550#endif
551
552int
553abort2(struct thread *td, struct abort2_args *uap)
554{
555 struct proc *p = td->td_proc;
556 struct sbuf *sb;
557 void *uargs[16];
558 int error, i, sig;
559
560 /*
561 * Do it right now so we can log either proper call of abort2(), or
562 * note, that invalid argument was passed. 512 is big enough to
563 * handle 16 arguments' descriptions with additional comments.
564 */
565 sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN);
566 sbuf_clear(sb);
567 sbuf_printf(sb, "%s(pid %d uid %d) aborted: ",
568 p->p_comm, p->p_pid, td->td_ucred->cr_uid);
569 /*
570 * Since we can't return from abort2(), send SIGKILL in cases, where
571 * abort2() was called improperly
572 */
573 sig = SIGKILL;
574 /* Prevent from DoSes from user-space. */
575 if (uap->nargs < 0 || uap->nargs > 16)
576 goto out;
577 if (uap->nargs > 0) {
578 if (uap->args == NULL)
579 goto out;
580 error = copyin(uap->args, uargs, uap->nargs * sizeof(void *));
581 if (error != 0)
582 goto out;
583 }
584 /*
585 * Limit size of 'reason' string to 128. Will fit even when
586 * maximal number of arguments was chosen to be logged.
587 */
588 if (uap->why != NULL) {
589 error = sbuf_copyin(sb, uap->why, 128);
590 if (error < 0)
591 goto out;
592 } else {
593 sbuf_printf(sb, "(null)");
594 }
595 if (uap->nargs > 0) {
596 sbuf_printf(sb, "(");
597 for (i = 0;i < uap->nargs; i++)
598 sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
599 sbuf_printf(sb, ")");
600 }
601 /*
602 * Final stage: arguments were proper, string has been
603 * successfully copied from userspace, and copying pointers
604 * from user-space succeed.
605 */
606 sig = SIGABRT;
607out:
608 if (sig == SIGKILL) {
609 sbuf_trim(sb);
610 sbuf_printf(sb, " (Reason text inaccessible)");
611 }
612 sbuf_cat(sb, "\n");
613 sbuf_finish(sb);
614 log(LOG_INFO, "%s", sbuf_data(sb));
615 sbuf_delete(sb);
616 exit1(td, W_EXITCODE(0, sig));
617 return (0);
618}
619
620
621#ifdef COMPAT_43
622/*
623 * The dirty work is handled by kern_wait().
624 */
625int
626owait(struct thread *td, struct owait_args *uap __unused)
627{
628 int error, status;
629
630 error = kern_wait(td, WAIT_ANY, &status, 0, NULL);
631 if (error == 0)
632 td->td_retval[1] = status;
633 return (error);
634}
635#endif /* COMPAT_43 */
636
637/*
638 * The dirty work is handled by kern_wait().
639 */
640int
641wait4(struct thread *td, struct wait_args *uap)
642{
643 struct rusage ru, *rup;
644 int error, status;
645
646 if (uap->rusage != NULL)
647 rup = &ru;
648 else
649 rup = NULL;
650 error = kern_wait(td, uap->pid, &status, uap->options, rup);
651 if (uap->status != NULL && error == 0)
652 error = copyout(&status, uap->status, sizeof(status));
653 if (uap->rusage != NULL && error == 0)
654 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
655 return (error);
656}
657
658/*
659 * Reap the remains of a zombie process and optionally return status and
660 * rusage. Asserts and will release both the proctree_lock and the process
661 * lock as part of its work.
662 */
663static void
664proc_reap(struct thread *td, struct proc *p, int *status, int options,
665 struct rusage *rusage)
666{
667 struct proc *q, *t;
668
669 sx_assert(&proctree_lock, SA_XLOCKED);
670 PROC_LOCK_ASSERT(p, MA_OWNED);
671 PROC_SLOCK_ASSERT(p, MA_OWNED);
672 KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE"));
673
674 q = td->td_proc;
675 if (rusage) {
676 *rusage = p->p_ru;
677 calcru(p, &rusage->ru_utime, &rusage->ru_stime);
678 }
679 PROC_SUNLOCK(p);
680 td->td_retval[0] = p->p_pid;
681 if (status)
682 *status = p->p_xstat; /* convert to int */
683 if (options & WNOWAIT) {
684 /*
685 * Only poll, returning the status. Caller does not wish to
686 * release the proc struct just yet.
687 */
688 PROC_UNLOCK(p);
689 sx_xunlock(&proctree_lock);
690 return;
691 }
692
693 PROC_LOCK(q);
694 sigqueue_take(p->p_ksi);
695 PROC_UNLOCK(q);
696 PROC_UNLOCK(p);
697
698 /*
699 * If we got the child via a ptrace 'attach', we need to give it back
700 * to the old parent.
701 */
702 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
703 PROC_LOCK(p);
704 p->p_oppid = 0;
705 proc_reparent(p, t);
706 PROC_UNLOCK(p);
707 pksignal(t, SIGCHLD, p->p_ksi);
708 wakeup(t);
709 cv_broadcast(&p->p_pwait);
710 PROC_UNLOCK(t);
711 sx_xunlock(&proctree_lock);
712 return;
713 }
714
715 /*
716 * Remove other references to this process to ensure we have an
717 * exclusive reference.
718 */
719 sx_xlock(&allproc_lock);
720 LIST_REMOVE(p, p_list); /* off zombproc */
721 sx_xunlock(&allproc_lock);
722 LIST_REMOVE(p, p_sibling);
723 leavepgrp(p);
724 sx_xunlock(&proctree_lock);
725
726 /*
727 * As a side effect of this lock, we know that all other writes to
728 * this proc are visible now, so no more locking is needed for p.
729 */
730 PROC_LOCK(p);
731 p->p_xstat = 0; /* XXX: why? */
732 PROC_UNLOCK(p);
733 PROC_LOCK(q);
734 ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru, &p->p_rux);
735 PROC_UNLOCK(q);
736
737 /*
738 * Decrement the count of procs running with this uid.
739 */
740 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
741
742 /*
743 * Free credentials, arguments, and sigacts.
744 */
745 crfree(p->p_ucred);
746 p->p_ucred = NULL;
747 pargs_drop(p->p_args);
748 p->p_args = NULL;
749 sigacts_free(p->p_sigacts);
750 p->p_sigacts = NULL;
751
752 /*
753 * Do any thread-system specific cleanups.
754 */
755 thread_wait(p);
756
757 /*
758 * Give vm and machine-dependent layer a chance to free anything that
759 * cpu_exit couldn't release while still running in process context.
760 */
761 vm_waitproc(p);
762#ifdef MAC
763 mac_proc_destroy(p);
764#endif
765 KASSERT(FIRST_THREAD_IN_PROC(p),
766 ("proc_reap: no residual thread!"));
767 uma_zfree(proc_zone, p);
768 sx_xlock(&allproc_lock);
769 nprocs--;
770 sx_xunlock(&allproc_lock);
771}
772
773int
774kern_wait(struct thread *td, pid_t pid, int *status, int options,
775 struct rusage *rusage)
776{
777 struct proc *p, *q;
778 int error, nfound;
779
780 AUDIT_ARG_PID(pid);
781 AUDIT_ARG_VALUE(options);
782
783 q = td->td_proc;
784 if (pid == 0) {
785 PROC_LOCK(q);
786 pid = -q->p_pgid;
787 PROC_UNLOCK(q);
788 }
789 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WNOWAIT|WLINUXCLONE))
790 return (EINVAL);
791loop:
792 if (q->p_flag & P_STATCHILD) {
793 PROC_LOCK(q);
794 q->p_flag &= ~P_STATCHILD;
795 PROC_UNLOCK(q);
796 }
797 nfound = 0;
798 sx_xlock(&proctree_lock);
799 LIST_FOREACH(p, &q->p_children, p_sibling) {
800 PROC_LOCK(p);
801 if (pid != WAIT_ANY &&
802 p->p_pid != pid && p->p_pgid != -pid) {
803 PROC_UNLOCK(p);
804 continue;
805 }
806 if (p_canwait(td, p)) {
807 PROC_UNLOCK(p);
808 continue;
809 }
810
811 /*
812 * This special case handles a kthread spawned by linux_clone
813 * (see linux_misc.c). The linux_wait4 and linux_waitpid
814 * functions need to be able to distinguish between waiting
815 * on a process and waiting on a thread. It is a thread if
816 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
817 * signifies we want to wait for threads and not processes.
818 */
819 if ((p->p_sigparent != SIGCHLD) ^
820 ((options & WLINUXCLONE) != 0)) {
821 PROC_UNLOCK(p);
822 continue;
823 }
824
825 nfound++;
826 PROC_SLOCK(p);
827 if (p->p_state == PRS_ZOMBIE) {
828 proc_reap(td, p, status, options, rusage);
829 return (0);
830 }
831 if ((p->p_flag & P_STOPPED_SIG) &&
832 (p->p_suspcount == p->p_numthreads) &&
833 (p->p_flag & P_WAITED) == 0 &&
834 (p->p_flag & P_TRACED || options & WUNTRACED)) {
835 PROC_SUNLOCK(p);
836 p->p_flag |= P_WAITED;
837 sx_xunlock(&proctree_lock);
838 td->td_retval[0] = p->p_pid;
839 if (status)
840 *status = W_STOPCODE(p->p_xstat);
841
842 PROC_LOCK(q);
843 sigqueue_take(p->p_ksi);
844 PROC_UNLOCK(q);
845 PROC_UNLOCK(p);
846
847 return (0);
848 }
849 PROC_SUNLOCK(p);
850 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
851 sx_xunlock(&proctree_lock);
852 td->td_retval[0] = p->p_pid;
853 p->p_flag &= ~P_CONTINUED;
854
855 PROC_LOCK(q);
856 sigqueue_take(p->p_ksi);
857 PROC_UNLOCK(q);
858 PROC_UNLOCK(p);
859
860 if (status)
861 *status = SIGCONT;
862 return (0);
863 }
864 PROC_UNLOCK(p);
865 }
866 if (nfound == 0) {
867 sx_xunlock(&proctree_lock);
868 return (ECHILD);
869 }
870 if (options & WNOHANG) {
871 sx_xunlock(&proctree_lock);
872 td->td_retval[0] = 0;
873 return (0);
874 }
875 PROC_LOCK(q);
876 sx_xunlock(&proctree_lock);
877 if (q->p_flag & P_STATCHILD) {
878 q->p_flag &= ~P_STATCHILD;
879 error = 0;
880 } else
881 error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
882 PROC_UNLOCK(q);
883 if (error)
884 return (error);
885 goto loop;
886}
887
888/*
889 * Make process 'parent' the new parent of process 'child'.
890 * Must be called with an exclusive hold of proctree lock.
891 */
892void
893proc_reparent(struct proc *child, struct proc *parent)
894{
895
896 sx_assert(&proctree_lock, SX_XLOCKED);
897 PROC_LOCK_ASSERT(child, MA_OWNED);
898 if (child->p_pptr == parent)
899 return;
900
901 PROC_LOCK(child->p_pptr);
902 sigqueue_take(child->p_ksi);
903 PROC_UNLOCK(child->p_pptr);
904 LIST_REMOVE(child, p_sibling);
905 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
906 child->p_pptr = parent;
907}
429 p->p_xthread = td;
430
431 /* Tell the prison that we are gone. */
432 prison_proc_free(p->p_ucred->cr_prison);
433
434#ifdef KDTRACE_HOOKS
435 /*
436 * Tell the DTrace fasttrap provider about the exit if it
437 * has declared an interest.
438 */
439 if (dtrace_fasttrap_exit)
440 dtrace_fasttrap_exit(p);
441#endif
442
443 /*
444 * Notify interested parties of our demise.
445 */
446 KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
447
448#ifdef KDTRACE_HOOKS
449 int reason = CLD_EXITED;
450 if (WCOREDUMP(rv))
451 reason = CLD_DUMPED;
452 else if (WIFSIGNALED(rv))
453 reason = CLD_KILLED;
454 SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
455#endif
456
457 /*
458 * Just delete all entries in the p_klist. At this point we won't
459 * report any more events, and there are nasty race conditions that
460 * can beat us if we don't.
461 */
462 knlist_clear(&p->p_klist, 1);
463
464 /*
465 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
466 * flag set, or if the handler is set to SIG_IGN, notify process
467 * 1 instead (and hope it will handle this situation).
468 */
469 PROC_LOCK(p->p_pptr);
470 mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
471 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
472 struct proc *pp;
473
474 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
475 pp = p->p_pptr;
476 PROC_UNLOCK(pp);
477 proc_reparent(p, initproc);
478 p->p_sigparent = SIGCHLD;
479 PROC_LOCK(p->p_pptr);
480
481 /*
482 * Notify parent, so in case he was wait(2)ing or
483 * executing waitpid(2) with our pid, he will
484 * continue.
485 */
486 wakeup(pp);
487 } else
488 mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
489
490 if (p->p_pptr == initproc)
491 psignal(p->p_pptr, SIGCHLD);
492 else if (p->p_sigparent != 0) {
493 if (p->p_sigparent == SIGCHLD)
494 childproc_exited(p);
495 else /* LINUX thread */
496 psignal(p->p_pptr, p->p_sigparent);
497 }
498 sx_xunlock(&proctree_lock);
499
500 /*
501 * The state PRS_ZOMBIE prevents other proesses from sending
502 * signal to the process, to avoid memory leak, we free memory
503 * for signal queue at the time when the state is set.
504 */
505 sigqueue_flush(&p->p_sigqueue);
506 sigqueue_flush(&td->td_sigqueue);
507
508 /*
509 * We have to wait until after acquiring all locks before
510 * changing p_state. We need to avoid all possible context
511 * switches (including ones from blocking on a mutex) while
512 * marked as a zombie. We also have to set the zombie state
513 * before we release the parent process' proc lock to avoid
514 * a lost wakeup. So, we first call wakeup, then we grab the
515 * sched lock, update the state, and release the parent process'
516 * proc lock.
517 */
518 wakeup(p->p_pptr);
519 cv_broadcast(&p->p_pwait);
520 sched_exit(p->p_pptr, td);
521 PROC_SLOCK(p);
522 p->p_state = PRS_ZOMBIE;
523 PROC_UNLOCK(p->p_pptr);
524
525 /*
526 * Hopefully no one will try to deliver a signal to the process this
527 * late in the game.
528 */
529 knlist_destroy(&p->p_klist);
530
531 /*
532 * Save our children's rusage information in our exit rusage.
533 */
534 ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
535
536 /*
537 * Make sure the scheduler takes this thread out of its tables etc.
538 * This will also release this thread's reference to the ucred.
539 * Other thread parts to release include pcb bits and such.
540 */
541 thread_exit();
542}
543
544
545#ifndef _SYS_SYSPROTO_H_
546struct abort2_args {
547 char *why;
548 int nargs;
549 void **args;
550};
551#endif
552
553int
554abort2(struct thread *td, struct abort2_args *uap)
555{
556 struct proc *p = td->td_proc;
557 struct sbuf *sb;
558 void *uargs[16];
559 int error, i, sig;
560
561 /*
562 * Do it right now so we can log either proper call of abort2(), or
563 * note, that invalid argument was passed. 512 is big enough to
564 * handle 16 arguments' descriptions with additional comments.
565 */
566 sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN);
567 sbuf_clear(sb);
568 sbuf_printf(sb, "%s(pid %d uid %d) aborted: ",
569 p->p_comm, p->p_pid, td->td_ucred->cr_uid);
570 /*
571 * Since we can't return from abort2(), send SIGKILL in cases, where
572 * abort2() was called improperly
573 */
574 sig = SIGKILL;
575 /* Prevent from DoSes from user-space. */
576 if (uap->nargs < 0 || uap->nargs > 16)
577 goto out;
578 if (uap->nargs > 0) {
579 if (uap->args == NULL)
580 goto out;
581 error = copyin(uap->args, uargs, uap->nargs * sizeof(void *));
582 if (error != 0)
583 goto out;
584 }
585 /*
586 * Limit size of 'reason' string to 128. Will fit even when
587 * maximal number of arguments was chosen to be logged.
588 */
589 if (uap->why != NULL) {
590 error = sbuf_copyin(sb, uap->why, 128);
591 if (error < 0)
592 goto out;
593 } else {
594 sbuf_printf(sb, "(null)");
595 }
596 if (uap->nargs > 0) {
597 sbuf_printf(sb, "(");
598 for (i = 0;i < uap->nargs; i++)
599 sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
600 sbuf_printf(sb, ")");
601 }
602 /*
603 * Final stage: arguments were proper, string has been
604 * successfully copied from userspace, and copying pointers
605 * from user-space succeed.
606 */
607 sig = SIGABRT;
608out:
609 if (sig == SIGKILL) {
610 sbuf_trim(sb);
611 sbuf_printf(sb, " (Reason text inaccessible)");
612 }
613 sbuf_cat(sb, "\n");
614 sbuf_finish(sb);
615 log(LOG_INFO, "%s", sbuf_data(sb));
616 sbuf_delete(sb);
617 exit1(td, W_EXITCODE(0, sig));
618 return (0);
619}
620
621
622#ifdef COMPAT_43
623/*
624 * The dirty work is handled by kern_wait().
625 */
626int
627owait(struct thread *td, struct owait_args *uap __unused)
628{
629 int error, status;
630
631 error = kern_wait(td, WAIT_ANY, &status, 0, NULL);
632 if (error == 0)
633 td->td_retval[1] = status;
634 return (error);
635}
636#endif /* COMPAT_43 */
637
638/*
639 * The dirty work is handled by kern_wait().
640 */
641int
642wait4(struct thread *td, struct wait_args *uap)
643{
644 struct rusage ru, *rup;
645 int error, status;
646
647 if (uap->rusage != NULL)
648 rup = &ru;
649 else
650 rup = NULL;
651 error = kern_wait(td, uap->pid, &status, uap->options, rup);
652 if (uap->status != NULL && error == 0)
653 error = copyout(&status, uap->status, sizeof(status));
654 if (uap->rusage != NULL && error == 0)
655 error = copyout(&ru, uap->rusage, sizeof(struct rusage));
656 return (error);
657}
658
659/*
660 * Reap the remains of a zombie process and optionally return status and
661 * rusage. Asserts and will release both the proctree_lock and the process
662 * lock as part of its work.
663 */
664static void
665proc_reap(struct thread *td, struct proc *p, int *status, int options,
666 struct rusage *rusage)
667{
668 struct proc *q, *t;
669
670 sx_assert(&proctree_lock, SA_XLOCKED);
671 PROC_LOCK_ASSERT(p, MA_OWNED);
672 PROC_SLOCK_ASSERT(p, MA_OWNED);
673 KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE"));
674
675 q = td->td_proc;
676 if (rusage) {
677 *rusage = p->p_ru;
678 calcru(p, &rusage->ru_utime, &rusage->ru_stime);
679 }
680 PROC_SUNLOCK(p);
681 td->td_retval[0] = p->p_pid;
682 if (status)
683 *status = p->p_xstat; /* convert to int */
684 if (options & WNOWAIT) {
685 /*
686 * Only poll, returning the status. Caller does not wish to
687 * release the proc struct just yet.
688 */
689 PROC_UNLOCK(p);
690 sx_xunlock(&proctree_lock);
691 return;
692 }
693
694 PROC_LOCK(q);
695 sigqueue_take(p->p_ksi);
696 PROC_UNLOCK(q);
697 PROC_UNLOCK(p);
698
699 /*
700 * If we got the child via a ptrace 'attach', we need to give it back
701 * to the old parent.
702 */
703 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
704 PROC_LOCK(p);
705 p->p_oppid = 0;
706 proc_reparent(p, t);
707 PROC_UNLOCK(p);
708 pksignal(t, SIGCHLD, p->p_ksi);
709 wakeup(t);
710 cv_broadcast(&p->p_pwait);
711 PROC_UNLOCK(t);
712 sx_xunlock(&proctree_lock);
713 return;
714 }
715
716 /*
717 * Remove other references to this process to ensure we have an
718 * exclusive reference.
719 */
720 sx_xlock(&allproc_lock);
721 LIST_REMOVE(p, p_list); /* off zombproc */
722 sx_xunlock(&allproc_lock);
723 LIST_REMOVE(p, p_sibling);
724 leavepgrp(p);
725 sx_xunlock(&proctree_lock);
726
727 /*
728 * As a side effect of this lock, we know that all other writes to
729 * this proc are visible now, so no more locking is needed for p.
730 */
731 PROC_LOCK(p);
732 p->p_xstat = 0; /* XXX: why? */
733 PROC_UNLOCK(p);
734 PROC_LOCK(q);
735 ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru, &p->p_rux);
736 PROC_UNLOCK(q);
737
738 /*
739 * Decrement the count of procs running with this uid.
740 */
741 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
742
743 /*
744 * Free credentials, arguments, and sigacts.
745 */
746 crfree(p->p_ucred);
747 p->p_ucred = NULL;
748 pargs_drop(p->p_args);
749 p->p_args = NULL;
750 sigacts_free(p->p_sigacts);
751 p->p_sigacts = NULL;
752
753 /*
754 * Do any thread-system specific cleanups.
755 */
756 thread_wait(p);
757
758 /*
759 * Give vm and machine-dependent layer a chance to free anything that
760 * cpu_exit couldn't release while still running in process context.
761 */
762 vm_waitproc(p);
763#ifdef MAC
764 mac_proc_destroy(p);
765#endif
766 KASSERT(FIRST_THREAD_IN_PROC(p),
767 ("proc_reap: no residual thread!"));
768 uma_zfree(proc_zone, p);
769 sx_xlock(&allproc_lock);
770 nprocs--;
771 sx_xunlock(&allproc_lock);
772}
773
774int
775kern_wait(struct thread *td, pid_t pid, int *status, int options,
776 struct rusage *rusage)
777{
778 struct proc *p, *q;
779 int error, nfound;
780
781 AUDIT_ARG_PID(pid);
782 AUDIT_ARG_VALUE(options);
783
784 q = td->td_proc;
785 if (pid == 0) {
786 PROC_LOCK(q);
787 pid = -q->p_pgid;
788 PROC_UNLOCK(q);
789 }
790 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WNOWAIT|WLINUXCLONE))
791 return (EINVAL);
792loop:
793 if (q->p_flag & P_STATCHILD) {
794 PROC_LOCK(q);
795 q->p_flag &= ~P_STATCHILD;
796 PROC_UNLOCK(q);
797 }
798 nfound = 0;
799 sx_xlock(&proctree_lock);
800 LIST_FOREACH(p, &q->p_children, p_sibling) {
801 PROC_LOCK(p);
802 if (pid != WAIT_ANY &&
803 p->p_pid != pid && p->p_pgid != -pid) {
804 PROC_UNLOCK(p);
805 continue;
806 }
807 if (p_canwait(td, p)) {
808 PROC_UNLOCK(p);
809 continue;
810 }
811
812 /*
813 * This special case handles a kthread spawned by linux_clone
814 * (see linux_misc.c). The linux_wait4 and linux_waitpid
815 * functions need to be able to distinguish between waiting
816 * on a process and waiting on a thread. It is a thread if
817 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
818 * signifies we want to wait for threads and not processes.
819 */
820 if ((p->p_sigparent != SIGCHLD) ^
821 ((options & WLINUXCLONE) != 0)) {
822 PROC_UNLOCK(p);
823 continue;
824 }
825
826 nfound++;
827 PROC_SLOCK(p);
828 if (p->p_state == PRS_ZOMBIE) {
829 proc_reap(td, p, status, options, rusage);
830 return (0);
831 }
832 if ((p->p_flag & P_STOPPED_SIG) &&
833 (p->p_suspcount == p->p_numthreads) &&
834 (p->p_flag & P_WAITED) == 0 &&
835 (p->p_flag & P_TRACED || options & WUNTRACED)) {
836 PROC_SUNLOCK(p);
837 p->p_flag |= P_WAITED;
838 sx_xunlock(&proctree_lock);
839 td->td_retval[0] = p->p_pid;
840 if (status)
841 *status = W_STOPCODE(p->p_xstat);
842
843 PROC_LOCK(q);
844 sigqueue_take(p->p_ksi);
845 PROC_UNLOCK(q);
846 PROC_UNLOCK(p);
847
848 return (0);
849 }
850 PROC_SUNLOCK(p);
851 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
852 sx_xunlock(&proctree_lock);
853 td->td_retval[0] = p->p_pid;
854 p->p_flag &= ~P_CONTINUED;
855
856 PROC_LOCK(q);
857 sigqueue_take(p->p_ksi);
858 PROC_UNLOCK(q);
859 PROC_UNLOCK(p);
860
861 if (status)
862 *status = SIGCONT;
863 return (0);
864 }
865 PROC_UNLOCK(p);
866 }
867 if (nfound == 0) {
868 sx_xunlock(&proctree_lock);
869 return (ECHILD);
870 }
871 if (options & WNOHANG) {
872 sx_xunlock(&proctree_lock);
873 td->td_retval[0] = 0;
874 return (0);
875 }
876 PROC_LOCK(q);
877 sx_xunlock(&proctree_lock);
878 if (q->p_flag & P_STATCHILD) {
879 q->p_flag &= ~P_STATCHILD;
880 error = 0;
881 } else
882 error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
883 PROC_UNLOCK(q);
884 if (error)
885 return (error);
886 goto loop;
887}
888
889/*
890 * Make process 'parent' the new parent of process 'child'.
891 * Must be called with an exclusive hold of proctree lock.
892 */
893void
894proc_reparent(struct proc *child, struct proc *parent)
895{
896
897 sx_assert(&proctree_lock, SX_XLOCKED);
898 PROC_LOCK_ASSERT(child, MA_OWNED);
899 if (child->p_pptr == parent)
900 return;
901
902 PROC_LOCK(child->p_pptr);
903 sigqueue_take(child->p_ksi);
904 PROC_UNLOCK(child->p_pptr);
905 LIST_REMOVE(child, p_sibling);
906 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
907 child->p_pptr = parent;
908}