kern_exit.c revision 46129
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
39 * $Id: kern_exit.c,v 1.78 1999/04/17 08:36:04 peter Exp $
40 */
41
42#include "opt_compat.h"
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/proc.h>
51#include <sys/pioctl.h>
52#include <sys/tty.h>
53#include <sys/wait.h>
54#include <sys/vnode.h>
55#include <sys/resourcevar.h>
56#include <sys/signalvar.h>
57#include <sys/ptrace.h>
58#include <sys/acct.h>		/* for acct_process() function prototype */
59#include <sys/filedesc.h>
60#include <sys/shm.h>
61#include <sys/sem.h>
62#include <sys/aio.h>
63
64#ifdef COMPAT_43
65#include <machine/reg.h>
66#include <machine/psl.h>
67#endif
68#include <machine/limits.h>	/* for UCHAR_MAX = typeof(p_priority)_MAX */
69
70#include <vm/vm.h>
71#include <vm/vm_param.h>
72#include <sys/lock.h>
73#include <vm/pmap.h>
74#include <vm/vm_map.h>
75#include <vm/vm_zone.h>
76#include <sys/user.h>
77
78/* Required to be non-static for SysVR4 emulator */
79MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
80
81static int wait1 __P((struct proc *, struct wait_args *, int));
82
83/*
84 * callout list for things to do at exit time
85 */
86typedef struct exit_list_element {
87	struct exit_list_element *next;
88	exitlist_fn function;
89} *ele_p;
90
91static ele_p exit_list;
92
93/*
94 * exit --
95 *	Death of process.
96 */
97void
98exit(p, uap)
99	struct proc *p;
100	struct rexit_args /* {
101		int	rval;
102	} */ *uap;
103{
104
105	exit1(p, W_EXITCODE(uap->rval, 0));
106	/* NOTREACHED */
107}
108
109/*
110 * Exit: deallocate address space and other resources, change proc state
111 * to zombie, and unlink proc from allproc and parent's lists.  Save exit
112 * status and rusage for wait().  Check for child processes and orphan them.
113 */
114void
115exit1(p, rv)
116	register struct proc *p;
117	int rv;
118{
119	register struct proc *q, *nq;
120	register struct vmspace *vm;
121	ele_p ep = exit_list;
122
123	if (p->p_pid == 1) {
124		printf("init died (signal %d, exit %d)\n",
125		    WTERMSIG(rv), WEXITSTATUS(rv));
126		panic("Going nowhere without my init!");
127	}
128
129	aio_proc_rundown(p);
130
131	/* are we a task leader? */
132	if(p == p->p_leader) {
133        	struct kill_args killArgs;
134		killArgs.signum = SIGKILL;
135		q = p->p_peers;
136		while(q) {
137			killArgs.pid = q->p_pid;
138			/*
139		         * The interface for kill is better
140			 * than the internal signal
141			 */
142			kill(p, &killArgs);
143			nq = q;
144			q = q->p_peers;
145			/*
146			 * orphan the threads so we don't mess up
147			 * when they call exit
148			 */
149			nq->p_peers = 0;
150			nq->p_leader = nq;
151		}
152
153	/* otherwise are we a peer? */
154	} else if(p->p_peers) {
155		q = p->p_leader;
156		while(q->p_peers != p)
157			q = q->p_peers;
158		q->p_peers = p->p_peers;
159	}
160
161#ifdef PGINPROF
162	vmsizmon();
163#endif
164	STOPEVENT(p, S_EXIT, rv);
165
166	/*
167	 * Check if any loadable modules need anything done at process exit.
168	 * e.g. SYSV IPC stuff
169	 * XXX what if one of these generates an error?
170	 */
171	while (ep) {
172		(*ep->function)(p);
173		ep = ep->next;
174	}
175
176	if (p->p_flag & P_PROFIL)
177		stopprofclock(p);
178	MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
179		M_ZOMBIE, M_WAITOK);
180	/*
181	 * If parent is waiting for us to exit or exec,
182	 * P_PPWAIT is set; we will wakeup the parent below.
183	 */
184	p->p_flag &= ~(P_TRACED | P_PPWAIT);
185	p->p_flag |= P_WEXIT;
186	p->p_siglist = 0;
187	if (timevalisset(&p->p_realtimer.it_value))
188		untimeout(realitexpire, (caddr_t)p, p->p_ithandle);
189
190	/*
191	 * Reset any sigio structures pointing to us as a result of
192	 * F_SETOWN with our pid.
193	 */
194	funsetownlst(&p->p_sigiolst);
195
196	/*
197	 * Close open files and release open-file table.
198	 * This may block!
199	 */
200	fdfree(p);
201
202	/*
203	 * XXX Shutdown SYSV semaphores
204	 */
205	semexit(p);
206
207	/* The next two chunks should probably be moved to vmspace_exit. */
208	vm = p->p_vmspace;
209	/*
210	 * Release user portion of address space.
211	 * This releases references to vnodes,
212	 * which could cause I/O if the file has been unlinked.
213	 * Need to do this early enough that we can still sleep.
214	 * Can't free the entire vmspace as the kernel stack
215	 * may be mapped within that space also.
216	 */
217	if (vm->vm_refcnt == 1) {
218		if (vm->vm_shm)
219			shmexit(p);
220		pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS,
221		    VM_MAXUSER_ADDRESS);
222		(void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS,
223		    VM_MAXUSER_ADDRESS);
224	}
225
226	if (SESS_LEADER(p)) {
227		register struct session *sp = p->p_session;
228
229		if (sp->s_ttyvp) {
230			/*
231			 * Controlling process.
232			 * Signal foreground pgrp,
233			 * drain controlling terminal
234			 * and revoke access to controlling terminal.
235			 */
236			if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
237				if (sp->s_ttyp->t_pgrp)
238					pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
239				(void) ttywait(sp->s_ttyp);
240				/*
241				 * The tty could have been revoked
242				 * if we blocked.
243				 */
244				if (sp->s_ttyvp)
245					VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
246			}
247			if (sp->s_ttyvp)
248				vrele(sp->s_ttyvp);
249			sp->s_ttyvp = NULL;
250			/*
251			 * s_ttyp is not zero'd; we use this to indicate
252			 * that the session once had a controlling terminal.
253			 * (for logging and informational purposes)
254			 */
255		}
256		sp->s_leader = NULL;
257	}
258	fixjobc(p, p->p_pgrp, 0);
259	(void)acct_process(p);
260#ifdef KTRACE
261	/*
262	 * release trace file
263	 */
264	p->p_traceflag = 0;	/* don't trace the vrele() */
265	if (p->p_tracep)
266		vrele(p->p_tracep);
267#endif
268	/*
269	 * Remove proc from allproc queue and pidhash chain.
270	 * Place onto zombproc.  Unlink from parent's child list.
271	 */
272	LIST_REMOVE(p, p_list);
273	LIST_INSERT_HEAD(&zombproc, p, p_list);
274	p->p_stat = SZOMB;
275
276	LIST_REMOVE(p, p_hash);
277
278	q = p->p_children.lh_first;
279	if (q)		/* only need this if any child is S_ZOMB */
280		wakeup((caddr_t) initproc);
281	for (; q != 0; q = nq) {
282		nq = q->p_sibling.le_next;
283		LIST_REMOVE(q, p_sibling);
284		LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling);
285		q->p_pptr = initproc;
286		q->p_sigparent = SIGCHLD;
287		/*
288		 * Traced processes are killed
289		 * since their existence means someone is screwing up.
290		 */
291		if (q->p_flag & P_TRACED) {
292			q->p_flag &= ~P_TRACED;
293			psignal(q, SIGKILL);
294		}
295	}
296
297	/*
298	 * Save exit status and final rusage info, adding in child rusage
299	 * info and self times.
300	 */
301	p->p_xstat = rv;
302	*p->p_ru = p->p_stats->p_ru;
303	calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
304	ruadd(p->p_ru, &p->p_stats->p_cru);
305
306	/*
307	 * Pretend that an mi_switch() to the next process occurs now.  We
308	 * must set `switchtime' directly since we will call cpu_switch()
309	 * directly.  Set it now so that the rest of the exit time gets
310	 * counted somewhere if possible.
311	 */
312	microuptime(&switchtime);
313	switchticks = ticks;
314
315	/*
316	 * Notify parent that we're gone.  If parent has the P_NOCLDWAIT
317	 * flag set, notify process 1 instead (and hope it will handle
318	 * this situation).
319	 */
320	if (p->p_pptr->p_procsig->ps_flag & P_NOCLDWAIT) {
321		struct proc *pp = p->p_pptr;
322		proc_reparent(p, initproc);
323		/*
324		 * If this was the last child of our parent, notify
325		 * parent, so in case he was wait(2)ing, he will
326		 * continue.
327		 */
328		if (LIST_EMPTY(&pp->p_children))
329			wakeup((caddr_t)pp);
330	}
331
332	if (p->p_sigparent && p->p_pptr != initproc) {
333	        psignal(p->p_pptr, p->p_sigparent);
334	} else {
335	        psignal(p->p_pptr, SIGCHLD);
336	}
337
338	wakeup((caddr_t)p->p_pptr);
339#if defined(tahoe)
340	/* move this to cpu_exit */
341	p->p_addr->u_pcb.pcb_savacc.faddr = (float *)NULL;
342#endif
343	/*
344	 * Clear curproc after we've done all operations
345	 * that could block, and before tearing down the rest
346	 * of the process state that might be used from clock, etc.
347	 * Also, can't clear curproc while we're still runnable,
348	 * as we're not on a run queue (we are current, just not
349	 * a proper proc any longer!).
350	 *
351	 * Other substructures are freed from wait().
352	 */
353	SET_CURPROC(NULL);
354	if (--p->p_limit->p_refcnt == 0) {
355		FREE(p->p_limit, M_SUBPROC);
356		p->p_limit = NULL;
357	}
358
359	/*
360	 * Finally, call machine-dependent code to release the remaining
361	 * resources including address space, the kernel stack and pcb.
362	 * The address space is released by "vmspace_free(p->p_vmspace)";
363	 * This is machine-dependent, as we may have to change stacks
364	 * or ensure that the current one isn't reallocated before we
365	 * finish.  cpu_exit will end with a call to cpu_switch(), finishing
366	 * our execution (pun intended).
367	 */
368	cpu_exit(p);
369}
370
371#ifdef COMPAT_43
372#if defined(hp300) || defined(luna68k)
373#include <machine/frame.h>
374#define GETPS(rp)	((struct frame *)(rp))->f_sr
375#else
376#define GETPS(rp)	(rp)[PS]
377#endif
378
379int
380owait(p, uap)
381	struct proc *p;
382	register struct owait_args /* {
383		int     dummy;
384	} */ *uap;
385{
386	struct wait_args w;
387
388#ifdef PSL_ALLCC
389	if ((GETPS(p->p_md.md_regs) & PSL_ALLCC) != PSL_ALLCC) {
390		w.options = 0;
391		w.rusage = NULL;
392	} else {
393		w.options = p->p_md.md_regs[R0];
394		w.rusage = (struct rusage *)p->p_md.md_regs[R1];
395	}
396#else
397	w.options = 0;
398	w.rusage = NULL;
399#endif
400	w.pid = WAIT_ANY;
401	w.status = NULL;
402	return (wait1(p, &w, 1));
403}
404#endif /* COMPAT_43 */
405
406int
407wait4(p, uap)
408	struct proc *p;
409	struct wait_args *uap;
410{
411
412	return (wait1(p, uap, 0));
413}
414
415static int
416wait1(q, uap, compat)
417	register struct proc *q;
418	register struct wait_args /* {
419		int pid;
420		int *status;
421		int options;
422		struct rusage *rusage;
423	} */ *uap;
424	int compat;
425{
426	register int nfound;
427	register struct proc *p, *t;
428	int status, error;
429
430	if (uap->pid == 0)
431		uap->pid = -q->p_pgid;
432	if (uap->options &~ (WUNTRACED|WNOHANG|WLINUXCLONE))
433		return (EINVAL);
434loop:
435	nfound = 0;
436	for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) {
437		if (uap->pid != WAIT_ANY &&
438		    p->p_pid != uap->pid && p->p_pgid != -uap->pid)
439			continue;
440
441		/* This special case handles a kthread spawned by linux_clone
442		 * (see linux_misc.c).  The linux_wait4 and linux_waitpid functions
443		 * need to be able to distinguish between waiting on a process and
444		 * waiting on a thread.  It is a thread if p_sigparent is not SIGCHLD,
445		 * and the WLINUXCLONE option signifies we want to wait for threads
446		 * and not processes.
447		 */
448		if ((p->p_sigparent != SIGCHLD) ^ ((uap->options & WLINUXCLONE) != 0))
449			continue;
450
451		nfound++;
452		if (p->p_stat == SZOMB) {
453			/* charge childs scheduling cpu usage to parent */
454			if (curproc->p_pid != 1) {
455				curproc->p_estcpu = min(curproc->p_estcpu +
456				    p->p_estcpu, UCHAR_MAX);
457			}
458
459			q->p_retval[0] = p->p_pid;
460#ifdef COMPAT_43
461			if (compat)
462				q->p_retval[1] = p->p_xstat;
463			else
464#endif
465			if (uap->status) {
466				status = p->p_xstat;	/* convert to int */
467				if ((error = copyout((caddr_t)&status,
468				    (caddr_t)uap->status, sizeof(status))))
469					return (error);
470			}
471			if (uap->rusage && (error = copyout((caddr_t)p->p_ru,
472			    (caddr_t)uap->rusage, sizeof (struct rusage))))
473				return (error);
474			/*
475			 * If we got the child via a ptrace 'attach',
476			 * we need to give it back to the old parent.
477			 */
478			if (p->p_oppid && (t = pfind(p->p_oppid))) {
479				p->p_oppid = 0;
480				proc_reparent(p, t);
481				psignal(t, SIGCHLD);
482				wakeup((caddr_t)t);
483				return (0);
484			}
485			p->p_xstat = 0;
486			ruadd(&q->p_stats->p_cru, p->p_ru);
487			FREE(p->p_ru, M_ZOMBIE);
488			p->p_ru = NULL;
489
490			/*
491			 * Decrement the count of procs running with this uid.
492			 */
493			(void)chgproccnt(p->p_cred->p_ruid, -1);
494
495			/*
496			 * Release reference to text vnode
497			 */
498			if (p->p_textvp)
499				vrele(p->p_textvp);
500
501			/*
502			 * Free up credentials.
503			 */
504			if (--p->p_cred->p_refcnt == 0) {
505				crfree(p->p_cred->pc_ucred);
506				FREE(p->p_cred, M_SUBPROC);
507				p->p_cred = NULL;
508			}
509
510			/*
511			 * Finally finished with old proc entry.
512			 * Unlink it from its process group and free it.
513			 */
514			leavepgrp(p);
515			LIST_REMOVE(p, p_list);	/* off zombproc */
516			LIST_REMOVE(p, p_sibling);
517
518			if (--p->p_procsig->ps_refcnt == 0) {
519				if (p->p_sigacts != &p->p_addr->u_sigacts)
520					FREE(p->p_sigacts, M_SUBPROC);
521			        FREE(p->p_procsig, M_SUBPROC);
522				p->p_procsig = NULL;
523			}
524
525			/*
526			 * Give machine-dependent layer a chance
527			 * to free anything that cpu_exit couldn't
528			 * release while still running in process context.
529			 */
530			cpu_wait(p);
531			zfree(proc_zone, p);
532			nprocs--;
533			return (0);
534		}
535		if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
536		    (p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
537			p->p_flag |= P_WAITED;
538			q->p_retval[0] = p->p_pid;
539#ifdef COMPAT_43
540			if (compat) {
541				q->p_retval[1] = W_STOPCODE(p->p_xstat);
542				error = 0;
543			} else
544#endif
545			if (uap->status) {
546				status = W_STOPCODE(p->p_xstat);
547				error = copyout((caddr_t)&status,
548					(caddr_t)uap->status, sizeof(status));
549			} else
550				error = 0;
551			return (error);
552		}
553	}
554	if (nfound == 0)
555		return (ECHILD);
556	if (uap->options & WNOHANG) {
557		q->p_retval[0] = 0;
558		return (0);
559	}
560	if ((error = tsleep((caddr_t)q, PWAIT | PCATCH, "wait", 0)))
561		return (error);
562	goto loop;
563}
564
565/*
566 * make process 'parent' the new parent of process 'child'.
567 */
568void
569proc_reparent(child, parent)
570	register struct proc *child;
571	register struct proc *parent;
572{
573
574	if (child->p_pptr == parent)
575		return;
576
577	LIST_REMOVE(child, p_sibling);
578	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
579	child->p_pptr = parent;
580}
581
582/*
583 * The next two functions are to handle adding/deleting items on the
584 * exit callout list
585 *
586 * at_exit():
587 * Take the arguments given and put them onto the exit callout list,
588 * However first make sure that it's not already there.
589 * returns 0 on success.
590 */
591int
592at_exit(function)
593	exitlist_fn function;
594{
595	ele_p ep;
596
597	/* Be noisy if the programmer has lost track of things */
598	if (rm_at_exit(function))
599		printf("exit callout entry already present\n");
600	ep = malloc(sizeof(*ep), M_TEMP, M_NOWAIT);
601	if (ep == NULL)
602		return (ENOMEM);
603	ep->next = exit_list;
604	ep->function = function;
605	exit_list = ep;
606	return (0);
607}
608/*
609 * Scan the exit callout list for the given items and remove them.
610 * Returns the number of items removed.
611 * Logically this can only be 0 or 1.
612 */
613int
614rm_at_exit(function)
615	exitlist_fn function;
616{
617	ele_p *epp, ep;
618	int count;
619
620	count = 0;
621	epp = &exit_list;
622	ep = *epp;
623	while (ep) {
624		if (ep->function == function) {
625			*epp = ep->next;
626			free(ep, M_TEMP);
627			count++;
628		} else {
629			epp = &ep->next;
630		}
631		ep = *epp;
632	}
633	return (count);
634}
635
636void check_sigacts (void)
637{
638	struct proc *p = curproc;
639	struct sigacts *pss;
640	int s;
641
642	if (p->p_procsig->ps_refcnt == 1 &&
643	    p->p_sigacts != &p->p_addr->u_sigacts) {
644		pss = p->p_sigacts;
645		s = splhigh();
646		p->p_addr->u_sigacts = *pss;
647		p->p_sigacts = &p->p_addr->u_sigacts;
648		splx(s);
649		FREE(pss, M_SUBPROC);
650	}
651}
652
653