kern_exit.c revision 112389
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
39 * $FreeBSD: head/sys/kern/kern_exit.c 112389 2003-03-19 00:33:38Z des $
40 */
41
42#include "opt_compat.h"
43#include "opt_ktrace.h"
44#include "opt_mac.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/sysproto.h>
49#include <sys/kernel.h>
50#include <sys/malloc.h>
51#include <sys/lock.h>
52#include <sys/mutex.h>
53#include <sys/proc.h>
54#include <sys/pioctl.h>
55#include <sys/tty.h>
56#include <sys/wait.h>
57#include <sys/vmmeter.h>
58#include <sys/vnode.h>
59#include <sys/resourcevar.h>
60#include <sys/signalvar.h>
61#include <sys/sched.h>
62#include <sys/sx.h>
63#include <sys/ptrace.h>
64#include <sys/acct.h>		/* for acct_process() function prototype */
65#include <sys/filedesc.h>
66#include <sys/mac.h>
67#include <sys/shm.h>
68#include <sys/sem.h>
69#include <sys/jail.h>
70#ifdef KTRACE
71#include <sys/ktrace.h>
72#endif
73
74#include <vm/vm.h>
75#include <vm/vm_extern.h>
76#include <vm/vm_param.h>
77#include <vm/pmap.h>
78#include <vm/vm_map.h>
79#include <vm/vm_page.h>
80#include <vm/uma.h>
81#include <sys/user.h>
82
83/* Required to be non-static for SysVR4 emulator */
84MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
85
86static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
87
88static int wait1(struct thread *, struct wait_args *, int);
89
90/*
91 * callout list for things to do at exit time
92 */
93struct exitlist {
94	exitlist_fn function;
95	TAILQ_ENTRY(exitlist) next;
96};
97
98TAILQ_HEAD(exit_list_head, exitlist);
99static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
100
101/*
102 * exit --
103 *	Death of process.
104 *
105 * MPSAFE
106 */
107void
108sys_exit(td, uap)
109	struct thread *td;
110	struct sys_exit_args /* {
111		int	rval;
112	} */ *uap;
113{
114
115	mtx_lock(&Giant);
116	exit1(td, W_EXITCODE(uap->rval, 0));
117	/* NOTREACHED */
118}
119
120/*
121 * Exit: deallocate address space and other resources, change proc state
122 * to zombie, and unlink proc from allproc and parent's lists.  Save exit
123 * status and rusage for wait().  Check for child processes and orphan them.
124 */
125void
126exit1(td, rv)
127	register struct thread *td;
128	int rv;
129{
130	struct exitlist *ep;
131	struct proc *p, *nq, *q;
132	struct tty *tp;
133	struct vnode *ttyvp;
134	register struct vmspace *vm;
135	struct vnode *vtmp;
136#ifdef KTRACE
137	struct vnode *tracevp;
138	struct ucred *tracecred;
139#endif
140
141	GIANT_REQUIRED;
142
143	p = td->td_proc;
144	if (p == initproc) {
145		printf("init died (signal %d, exit %d)\n",
146		    WTERMSIG(rv), WEXITSTATUS(rv));
147		panic("Going nowhere without my init!");
148	}
149
150	/*
151	 * XXXKSE: MUST abort all other threads before proceeding past here.
152	 */
153	PROC_LOCK(p);
154	if (p->p_flag & P_THREADED) {
155		/*
156		 * First check if some other thread got here before us..
157		 * if so, act apropriatly, (exit or suspend);
158		 */
159		thread_suspend_check(0);
160
161		/*
162		 * Kill off the other threads. This requires
163		 * Some co-operation from other parts of the kernel
164		 * so it may not be instant.
165		 * With this state set:
166		 * Any thread entering the kernel from userspace will
167		 * thread_exit() in trap().  Any thread attempting to
168		 * sleep will return immediatly
169		 * with EINTR or EWOULDBLOCK, which will hopefully force them
170		 * to back out to userland, freeing resources as they go, and
171		 * anything attempting to return to userland will thread_exit()
172		 * from userret().  thread_exit() will unsuspend us
173		 * when the last other thread exits.
174		 */
175		if (thread_single(SINGLE_EXIT)) {
176			panic ("Exit: Single threading fouled up");
177		}
178		/*
179		 * All other activity in this process is now stopped.
180		 * Remove excess KSEs and KSEGRPS. XXXKSE (when we have them)
181		 * ...
182		 * Turn off threading support.
183		 */
184		p->p_flag &= ~P_THREADED;
185		thread_single_end();	/* Don't need this any more. */
186	}
187	/*
188	 * With this state set:
189	 * Any thread entering the kernel from userspace will thread_exit()
190	 * in trap().  Any thread attempting to sleep will return immediatly
191	 * with EINTR or EWOULDBLOCK, which will hopefully force them
192	 * to back out to userland, freeing resources as they go, and
193	 * anything attempting to return to userland will thread_exit()
194	 * from userret().  thread_exit() will do a wakeup on p->p_numthreads
195	 * if it transitions to 1.
196	 */
197
198	p->p_flag |= P_WEXIT;
199	PROC_UNLOCK(p);
200
201	/* Are we a task leader? */
202	if (p == p->p_leader) {
203		mtx_lock(&ppeers_lock);
204		q = p->p_peers;
205		while (q != NULL) {
206			PROC_LOCK(q);
207			psignal(q, SIGKILL);
208			PROC_UNLOCK(q);
209			q = q->p_peers;
210		}
211		while (p->p_peers != NULL)
212			msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
213		mtx_unlock(&ppeers_lock);
214	}
215
216#ifdef PGINPROF
217	vmsizmon();
218#endif
219	STOPEVENT(p, S_EXIT, rv);
220	wakeup(&p->p_stype);	/* Wakeup anyone in procfs' PIOCWAIT */
221
222	/*
223	 * Check if any loadable modules need anything done at process exit.
224	 * e.g. SYSV IPC stuff
225	 * XXX what if one of these generates an error?
226	 */
227	TAILQ_FOREACH(ep, &exit_list, next)
228		(*ep->function)(p);
229
230
231	MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
232		M_ZOMBIE, M_WAITOK);
233	/*
234	 * If parent is waiting for us to exit or exec,
235	 * P_PPWAIT is set; we will wakeup the parent below.
236	 */
237	PROC_LOCK(p);
238	stopprofclock(p);
239	p->p_flag &= ~(P_TRACED | P_PPWAIT);
240	SIGEMPTYSET(p->p_siglist);
241	if (timevalisset(&p->p_realtimer.it_value))
242		callout_stop(&p->p_itcallout);
243	PROC_UNLOCK(p);
244
245	/*
246	 * Reset any sigio structures pointing to us as a result of
247	 * F_SETOWN with our pid.
248	 */
249	funsetownlst(&p->p_sigiolst);
250
251	/*
252	 * Close open files and release open-file table.
253	 * This may block!
254	 */
255	fdfree(td);
256
257	/*
258	 * Remove ourself from our leader's peer list and wake our leader.
259	 */
260	mtx_lock(&ppeers_lock);
261	if (p->p_leader->p_peers) {
262		q = p->p_leader;
263		while (q->p_peers != p)
264			q = q->p_peers;
265		q->p_peers = p->p_peers;
266		wakeup(p->p_leader);
267	}
268	mtx_unlock(&ppeers_lock);
269
270	/* The next two chunks should probably be moved to vmspace_exit. */
271	vm = p->p_vmspace;
272	/*
273	 * Release user portion of address space.
274	 * This releases references to vnodes,
275	 * which could cause I/O if the file has been unlinked.
276	 * Need to do this early enough that we can still sleep.
277	 * Can't free the entire vmspace as the kernel stack
278	 * may be mapped within that space also.
279	 *
280	 * Processes sharing the same vmspace may exit in one order, and
281	 * get cleaned up by vmspace_exit() in a different order.  The
282	 * last exiting process to reach this point releases as much of
283	 * the environment as it can, and the last process cleaned up
284	 * by vmspace_exit() (which decrements exitingcnt) cleans up the
285	 * remainder.
286	 */
287	++vm->vm_exitingcnt;
288	if (--vm->vm_refcnt == 0) {
289		shmexit(vm);
290		vm_page_lock_queues();
291		pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map),
292		    vm_map_max(&vm->vm_map));
293		vm_page_unlock_queues();
294		(void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
295		    vm_map_max(&vm->vm_map));
296	}
297
298	sx_xlock(&proctree_lock);
299	if (SESS_LEADER(p)) {
300		register struct session *sp;
301
302		sp = p->p_session;
303		if (sp->s_ttyvp) {
304			/*
305			 * Controlling process.
306			 * Signal foreground pgrp,
307			 * drain controlling terminal
308			 * and revoke access to controlling terminal.
309			 */
310			if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
311				tp = sp->s_ttyp;
312				if (sp->s_ttyp->t_pgrp) {
313					PGRP_LOCK(sp->s_ttyp->t_pgrp);
314					pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
315					PGRP_UNLOCK(sp->s_ttyp->t_pgrp);
316				}
317				/* XXX tp should be locked. */
318				sx_xunlock(&proctree_lock);
319				(void) ttywait(tp);
320				sx_xlock(&proctree_lock);
321				/*
322				 * The tty could have been revoked
323				 * if we blocked.
324				 */
325				if (sp->s_ttyvp) {
326					ttyvp = sp->s_ttyvp;
327					SESS_LOCK(p->p_session);
328					sp->s_ttyvp = NULL;
329					SESS_UNLOCK(p->p_session);
330					sx_xunlock(&proctree_lock);
331					VOP_REVOKE(ttyvp, REVOKEALL);
332					vrele(ttyvp);
333					sx_xlock(&proctree_lock);
334				}
335			}
336			if (sp->s_ttyvp) {
337				ttyvp = sp->s_ttyvp;
338				SESS_LOCK(p->p_session);
339				sp->s_ttyvp = NULL;
340				SESS_UNLOCK(p->p_session);
341				vrele(ttyvp);
342			}
343			/*
344			 * s_ttyp is not zero'd; we use this to indicate
345			 * that the session once had a controlling terminal.
346			 * (for logging and informational purposes)
347			 */
348		}
349		SESS_LOCK(p->p_session);
350		sp->s_leader = NULL;
351		SESS_UNLOCK(p->p_session);
352	}
353	fixjobc(p, p->p_pgrp, 0);
354	sx_xunlock(&proctree_lock);
355	(void)acct_process(td);
356#ifdef KTRACE
357	/*
358	 * release trace file
359	 */
360	PROC_LOCK(p);
361	mtx_lock(&ktrace_mtx);
362	p->p_traceflag = 0;	/* don't trace the vrele() */
363	tracevp = p->p_tracevp;
364	p->p_tracevp = NULL;
365	tracecred = p->p_tracecred;
366	p->p_tracecred = NULL;
367	mtx_unlock(&ktrace_mtx);
368	PROC_UNLOCK(p);
369	if (tracevp != NULL)
370		vrele(tracevp);
371	if (tracecred != NULL)
372		crfree(tracecred);
373#endif
374	/*
375	 * Release reference to text vnode
376	 */
377	if ((vtmp = p->p_textvp) != NULL) {
378		p->p_textvp = NULL;
379		vrele(vtmp);
380	}
381
382	/*
383	 * Release our limits structure.
384	 */
385	mtx_assert(&Giant, MA_OWNED);
386	if (--p->p_limit->p_refcnt == 0) {
387		FREE(p->p_limit, M_SUBPROC);
388		p->p_limit = NULL;
389	}
390
391	/*
392	 * Release this thread's reference to the ucred.  The actual proc
393	 * reference will stay around until the proc is harvested by
394	 * wait().  At this point the ucred is immutable (no other threads
395	 * from this proc are around that can change it) so we leave the
396	 * per-thread ucred pointer intact in case it is needed although
397	 * in theory nothing should be using it at this point.
398	 */
399	crfree(td->td_ucred);
400
401	/*
402	 * Remove proc from allproc queue and pidhash chain.
403	 * Place onto zombproc.  Unlink from parent's child list.
404	 */
405	sx_xlock(&allproc_lock);
406	LIST_REMOVE(p, p_list);
407	LIST_INSERT_HEAD(&zombproc, p, p_list);
408	LIST_REMOVE(p, p_hash);
409	sx_xunlock(&allproc_lock);
410
411	sx_xlock(&proctree_lock);
412	q = LIST_FIRST(&p->p_children);
413	if (q != NULL)		/* only need this if any child is S_ZOMB */
414		wakeup(initproc);
415	for (; q != NULL; q = nq) {
416		nq = LIST_NEXT(q, p_sibling);
417		PROC_LOCK(q);
418		proc_reparent(q, initproc);
419		q->p_sigparent = SIGCHLD;
420		/*
421		 * Traced processes are killed
422		 * since their existence means someone is screwing up.
423		 */
424		if (q->p_flag & P_TRACED) {
425			q->p_flag &= ~P_TRACED;
426			psignal(q, SIGKILL);
427		}
428		PROC_UNLOCK(q);
429	}
430
431	/*
432	 * Save exit status and final rusage info, adding in child rusage
433	 * info and self times.
434	 */
435	PROC_LOCK(p);
436	p->p_xstat = rv;
437	*p->p_ru = p->p_stats->p_ru;
438	mtx_lock_spin(&sched_lock);
439	calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
440	mtx_unlock_spin(&sched_lock);
441	ruadd(p->p_ru, &p->p_stats->p_cru);
442
443	/*
444	 * Notify interested parties of our demise.
445	 */
446	KNOTE(&p->p_klist, NOTE_EXIT);
447
448	/*
449	 * Notify parent that we're gone.  If parent has the PS_NOCLDWAIT
450	 * flag set, or if the handler is set to SIG_IGN, notify process
451	 * 1 instead (and hope it will handle this situation).
452	 */
453	PROC_LOCK(p->p_pptr);
454	if (p->p_pptr->p_procsig->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
455		struct proc *pp;
456
457		pp = p->p_pptr;
458		PROC_UNLOCK(pp);
459		proc_reparent(p, initproc);
460		PROC_LOCK(p->p_pptr);
461		/*
462		 * If this was the last child of our parent, notify
463		 * parent, so in case he was wait(2)ing, he will
464		 * continue.
465		 */
466		if (LIST_EMPTY(&pp->p_children))
467			wakeup(pp);
468	}
469
470	if (p->p_sigparent && p->p_pptr != initproc)
471		psignal(p->p_pptr, p->p_sigparent);
472	else
473		psignal(p->p_pptr, SIGCHLD);
474	PROC_UNLOCK(p->p_pptr);
475
476	/*
477	 * If this is a kthread, then wakeup anyone waiting for it to exit.
478	 */
479	if (p->p_flag & P_KTHREAD)
480		wakeup(p);
481	PROC_UNLOCK(p);
482
483	/*
484	 * Finally, call machine-dependent code to release the remaining
485	 * resources including address space.
486	 * The address space is released by "vmspace_exitfree(p)" in
487	 * vm_waitproc().
488	 */
489	cpu_exit(td);
490
491	PROC_LOCK(p);
492	PROC_LOCK(p->p_pptr);
493	sx_xunlock(&proctree_lock);
494	mtx_lock_spin(&sched_lock);
495
496	while (mtx_owned(&Giant))
497		mtx_unlock(&Giant);
498
499	/*
500	 * We have to wait until after releasing all locks before
501	 * changing p_state.  If we block on a mutex then we will be
502	 * back at SRUN when we resume and our parent will never
503	 * harvest us.
504	 */
505	p->p_state = PRS_ZOMBIE;
506
507	wakeup(p->p_pptr);
508	PROC_UNLOCK(p->p_pptr);
509	cnt.v_swtch++;
510	binuptime(PCPU_PTR(switchtime));
511	PCPU_SET(switchticks, ticks);
512
513	cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */
514	/*
515	 * Make sure the scheduler takes this thread out of its tables etc.
516	 * This will also release this thread's reference to the ucred.
517	 * Other thread parts to release include pcb bits and such.
518	 */
519	thread_exit();
520}
521
522#ifdef COMPAT_43
523/*
524 * MPSAFE.  The dirty work is handled by wait1().
525 */
526int
527owait(td, uap)
528	struct thread *td;
529	register struct owait_args /* {
530		int     dummy;
531	} */ *uap;
532{
533	struct wait_args w;
534
535	w.options = 0;
536	w.rusage = NULL;
537	w.pid = WAIT_ANY;
538	w.status = NULL;
539	return (wait1(td, &w, 1));
540}
541#endif /* COMPAT_43 */
542
543/*
544 * MPSAFE.  The dirty work is handled by wait1().
545 */
546int
547wait4(td, uap)
548	struct thread *td;
549	struct wait_args *uap;
550{
551
552	return (wait1(td, uap, 0));
553}
554
555/*
556 * MPSAFE
557 */
558static int
559wait1(td, uap, compat)
560	register struct thread *td;
561	register struct wait_args /* {
562		int pid;
563		int *status;
564		int options;
565		struct rusage *rusage;
566	} */ *uap;
567	int compat;
568{
569	struct rusage ru;
570	int nfound;
571	struct proc *p, *q, *t;
572	int status, error;
573
574	q = td->td_proc;
575	if (uap->pid == 0) {
576		PROC_LOCK(q);
577		uap->pid = -q->p_pgid;
578		PROC_UNLOCK(q);
579	}
580	if (uap->options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
581		return (EINVAL);
582	mtx_lock(&Giant);
583loop:
584	nfound = 0;
585	sx_xlock(&proctree_lock);
586	LIST_FOREACH(p, &q->p_children, p_sibling) {
587		PROC_LOCK(p);
588		if (uap->pid != WAIT_ANY &&
589		    p->p_pid != uap->pid && p->p_pgid != -uap->pid) {
590			PROC_UNLOCK(p);
591			continue;
592		}
593
594		/*
595		 * This special case handles a kthread spawned by linux_clone
596		 * (see linux_misc.c).  The linux_wait4 and linux_waitpid
597		 * functions need to be able to distinguish between waiting
598		 * on a process and waiting on a thread.  It is a thread if
599		 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
600		 * signifies we want to wait for threads and not processes.
601		 */
602		if ((p->p_sigparent != SIGCHLD) ^
603		    ((uap->options & WLINUXCLONE) != 0)) {
604			PROC_UNLOCK(p);
605			continue;
606		}
607
608		nfound++;
609		if (p->p_state == PRS_ZOMBIE) {
610			/*
611			 * Allow the scheduler to adjust the priority of the
612			 * parent when a kseg is exiting.
613			 */
614			if (curthread->td_proc->p_pid != 1) {
615				mtx_lock_spin(&sched_lock);
616				sched_exit(curthread->td_ksegrp,
617				    FIRST_KSEGRP_IN_PROC(p));
618				mtx_unlock_spin(&sched_lock);
619			}
620
621			td->td_retval[0] = p->p_pid;
622#ifdef COMPAT_43
623			if (compat)
624				td->td_retval[1] = p->p_xstat;
625			else
626#endif
627			if (uap->status) {
628				status = p->p_xstat;	/* convert to int */
629				PROC_UNLOCK(p);
630				if ((error = copyout(&status,
631				    uap->status, sizeof(status)))) {
632					sx_xunlock(&proctree_lock);
633					mtx_unlock(&Giant);
634					return (error);
635				}
636				PROC_LOCK(p);
637			}
638			if (uap->rusage) {
639				bcopy(p->p_ru, &ru, sizeof(ru));
640				PROC_UNLOCK(p);
641				if ((error = copyout(&ru,
642				    uap->rusage, sizeof (struct rusage)))) {
643					sx_xunlock(&proctree_lock);
644					mtx_unlock(&Giant);
645					return (error);
646				}
647			} else
648				PROC_UNLOCK(p);
649			/*
650			 * If we got the child via a ptrace 'attach',
651			 * we need to give it back to the old parent.
652			 */
653			if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
654				PROC_LOCK(p);
655				p->p_oppid = 0;
656				proc_reparent(p, t);
657				PROC_UNLOCK(p);
658				psignal(t, SIGCHLD);
659				wakeup(t);
660				PROC_UNLOCK(t);
661				sx_xunlock(&proctree_lock);
662				mtx_unlock(&Giant);
663				return (0);
664			}
665
666			/*
667			 * Remove other references to this process to ensure
668			 * we have an exclusive reference.
669			 */
670			sx_xlock(&allproc_lock);
671			LIST_REMOVE(p, p_list);	/* off zombproc */
672			sx_xunlock(&allproc_lock);
673			LIST_REMOVE(p, p_sibling);
674			leavepgrp(p);
675			sx_xunlock(&proctree_lock);
676
677			/*
678			 * As a side effect of this lock, we know that
679			 * all other writes to this proc are visible now, so
680			 * no more locking is needed for p.
681			 */
682			PROC_LOCK(p);
683			p->p_xstat = 0;		/* XXX: why? */
684			PROC_UNLOCK(p);
685			PROC_LOCK(q);
686			ruadd(&q->p_stats->p_cru, p->p_ru);
687			PROC_UNLOCK(q);
688			FREE(p->p_ru, M_ZOMBIE);
689			p->p_ru = NULL;
690
691			/*
692			 * Decrement the count of procs running with this uid.
693			 */
694			(void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
695
696			/*
697			 * Free up credentials.
698			 */
699			crfree(p->p_ucred);
700			p->p_ucred = NULL;	/* XXX: why? */
701
702			/*
703			 * Remove unused arguments
704			 */
705			pargs_drop(p->p_args);
706			p->p_args = NULL;
707
708			if (--p->p_procsig->ps_refcnt == 0) {
709				if (p->p_sigacts != &p->p_uarea->u_sigacts)
710					FREE(p->p_sigacts, M_SUBPROC);
711				FREE(p->p_procsig, M_SUBPROC);
712				p->p_procsig = NULL;
713			}
714
715			/*
716			 * do any thread-system specific cleanups
717			 */
718			thread_wait(p);
719
720			/*
721			 * Give vm and machine-dependent layer a chance
722			 * to free anything that cpu_exit couldn't
723			 * release while still running in process context.
724			 */
725			vm_waitproc(p);
726			mtx_destroy(&p->p_mtx);
727#ifdef MAC
728			mac_destroy_proc(p);
729#endif
730			KASSERT(FIRST_THREAD_IN_PROC(p),
731			    ("wait1: no residual thread!"));
732			uma_zfree(proc_zone, p);
733			sx_xlock(&allproc_lock);
734			nprocs--;
735			sx_xunlock(&allproc_lock);
736			mtx_unlock(&Giant);
737			return (0);
738		}
739		if (P_SHOULDSTOP(p) && (p->p_suspcount == p->p_numthreads) &&
740		    ((p->p_flag & P_WAITED) == 0) &&
741		    (p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
742			p->p_flag |= P_WAITED;
743			sx_xunlock(&proctree_lock);
744			td->td_retval[0] = p->p_pid;
745#ifdef COMPAT_43
746			if (compat) {
747				td->td_retval[1] = W_STOPCODE(p->p_xstat);
748				PROC_UNLOCK(p);
749				error = 0;
750			} else
751#endif
752			if (uap->status) {
753				status = W_STOPCODE(p->p_xstat);
754				PROC_UNLOCK(p);
755				error = copyout(&status,
756					uap->status, sizeof(status));
757			} else {
758				PROC_UNLOCK(p);
759				error = 0;
760			}
761			mtx_unlock(&Giant);
762			return (error);
763		}
764		if (uap->options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
765			sx_xunlock(&proctree_lock);
766			td->td_retval[0] = p->p_pid;
767			p->p_flag &= ~P_CONTINUED;
768			PROC_UNLOCK(p);
769
770			if (uap->status) {
771				status = SIGCONT;
772				error = copyout(&status,
773				    uap->status, sizeof(status));
774			} else
775				error = 0;
776
777			mtx_unlock(&Giant);
778			return (error);
779		}
780		PROC_UNLOCK(p);
781	}
782	if (nfound == 0) {
783		sx_xunlock(&proctree_lock);
784		mtx_unlock(&Giant);
785		return (ECHILD);
786	}
787	if (uap->options & WNOHANG) {
788		sx_xunlock(&proctree_lock);
789		td->td_retval[0] = 0;
790		mtx_unlock(&Giant);
791		return (0);
792	}
793	PROC_LOCK(q);
794	sx_xunlock(&proctree_lock);
795	error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
796	PROC_UNLOCK(q);
797	if (error) {
798		mtx_unlock(&Giant);
799		return (error);
800	}
801	goto loop;
802}
803
804/*
805 * Make process 'parent' the new parent of process 'child'.
806 * Must be called with an exclusive hold of proctree lock.
807 */
808void
809proc_reparent(child, parent)
810	register struct proc *child;
811	register struct proc *parent;
812{
813
814	sx_assert(&proctree_lock, SX_XLOCKED);
815	PROC_LOCK_ASSERT(child, MA_OWNED);
816	if (child->p_pptr == parent)
817		return;
818
819	LIST_REMOVE(child, p_sibling);
820	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
821	child->p_pptr = parent;
822}
823
824/*
825 * The next two functions are to handle adding/deleting items on the
826 * exit callout list
827 *
828 * at_exit():
829 * Take the arguments given and put them onto the exit callout list,
830 * However first make sure that it's not already there.
831 * returns 0 on success.
832 */
833
834int
835at_exit(function)
836	exitlist_fn function;
837{
838	struct exitlist *ep;
839
840#ifdef INVARIANTS
841	/* Be noisy if the programmer has lost track of things */
842	if (rm_at_exit(function))
843		printf("WARNING: exit callout entry (%p) already present\n",
844		    function);
845#endif
846	ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
847	if (ep == NULL)
848		return (ENOMEM);
849	ep->function = function;
850	TAILQ_INSERT_TAIL(&exit_list, ep, next);
851	return (0);
852}
853
854/*
855 * Scan the exit callout list for the given item and remove it.
856 * Returns the number of items removed (0 or 1)
857 */
858int
859rm_at_exit(function)
860	exitlist_fn function;
861{
862	struct exitlist *ep;
863
864	TAILQ_FOREACH(ep, &exit_list, next) {
865		if (ep->function == function) {
866			TAILQ_REMOVE(&exit_list, ep, next);
867			free(ep, M_ATEXIT);
868			return (1);
869		}
870	}
871	return (0);
872}
873