kern_exit.c revision 104390
1/* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD: head/sys/kern/kern_exit.c 104390 2002-10-02 23:12:01Z julian $ 40 */ 41 42#include "opt_compat.h" 43#include "opt_ktrace.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/sysproto.h> 48#include <sys/kernel.h> 49#include <sys/malloc.h> 50#include <sys/lock.h> 51#include <sys/mutex.h> 52#include <sys/proc.h> 53#include <sys/pioctl.h> 54#include <sys/tty.h> 55#include <sys/wait.h> 56#include <sys/vmmeter.h> 57#include <sys/vnode.h> 58#include <sys/resourcevar.h> 59#include <sys/signalvar.h> 60#include <sys/sx.h> 61#include <sys/ptrace.h> 62#include <sys/acct.h> /* for acct_process() function prototype */ 63#include <sys/filedesc.h> 64#include <sys/shm.h> 65#include <sys/sem.h> 66#include <sys/jail.h> 67#ifdef KTRACE 68#include <sys/ktrace.h> 69#endif 70 71#include <vm/vm.h> 72#include <vm/vm_extern.h> 73#include <vm/vm_param.h> 74#include <vm/pmap.h> 75#include <vm/vm_map.h> 76#include <vm/uma.h> 77#include <sys/user.h> 78 79/* Required to be non-static for SysVR4 emulator */ 80MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 81 82static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 83 84static int wait1(struct thread *, struct wait_args *, int); 85 86/* 87 * callout list for things to do at exit time 88 */ 89struct exitlist { 90 exitlist_fn function; 91 TAILQ_ENTRY(exitlist) next; 92}; 93 94TAILQ_HEAD(exit_list_head, exitlist); 95static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 96 97/* 98 * exit -- 99 * Death of process. 100 * 101 * MPSAFE 102 */ 103void 104sys_exit(td, uap) 105 struct thread *td; 106 struct sys_exit_args /* { 107 int rval; 108 } */ *uap; 109{ 110 111 mtx_lock(&Giant); 112 exit1(td, W_EXITCODE(uap->rval, 0)); 113 /* NOTREACHED */ 114} 115 116/* 117 * Exit: deallocate address space and other resources, change proc state 118 * to zombie, and unlink proc from allproc and parent's lists. Save exit 119 * status and rusage for wait(). Check for child processes and orphan them. 120 */ 121void 122exit1(td, rv) 123 register struct thread *td; 124 int rv; 125{ 126 struct exitlist *ep; 127 struct proc *p, *nq, *q; 128 struct tty *tp; 129 struct vnode *ttyvp; 130 register struct vmspace *vm; 131 struct vnode *vtmp; 132#ifdef KTRACE 133 struct vnode *tracevp; 134#endif 135 136 GIANT_REQUIRED; 137 138 p = td->td_proc; 139 if (p == initproc) { 140 printf("init died (signal %d, exit %d)\n", 141 WTERMSIG(rv), WEXITSTATUS(rv)); 142 panic("Going nowhere without my init!"); 143 } 144 145 /* 146 * XXXXKSE: MUST abort all other threads before proceeding past here. 147 */ 148 PROC_LOCK(p); 149 if (p->p_flag & P_KSES) { 150 /* 151 * First check if some other thread got here before us.. 152 * if so, act apropriatly, (exit or suspend); 153 */ 154 thread_suspend_check(0); 155 /* 156 * Here is a trick.. 157 * We need to free up our KSE to process other threads 158 * so that we can safely set the UNBOUND flag 159 * (whether or not we have a mailbox) as we are NEVER 160 * going to return to the user. 161 * The flag will not be set yet if we are exiting 162 * because of a signal, pagefault, or similar 163 * (or even an exit(2) from the UTS). 164 */ 165 td->td_flags |= TDF_UNBOUND; 166 167 /* 168 * Kill off the other threads. This requires 169 * Some co-operation from other parts of the kernel 170 * so it may not be instant. 171 * With this state set: 172 * Any thread entering the kernel from userspace will 173 * thread_exit() in trap(). Any thread attempting to 174 * sleep will return immediatly 175 * with EINTR or EWOULDBLOCK, which will hopefully force them 176 * to back out to userland, freeing resources as they go, and 177 * anything attempting to return to userland will thread_exit() 178 * from userret(). thread_exit() will unsuspend us 179 * when the last other thread exits. 180 */ 181 if (thread_single(SINGLE_EXIT)) { 182 panic ("Exit: Single threading fouled up"); 183 } 184 /* 185 * All other activity in this process is now stopped. 186 * Remove excess KSEs and KSEGRPS. XXXKSE (when we have them) 187 * ... 188 * Turn off threading support. 189 */ 190 p->p_flag &= ~P_KSES; 191 td->td_flags &= ~TDF_UNBOUND; 192 thread_single_end(); /* Don't need this any more. */ 193 } 194 /* 195 * With this state set: 196 * Any thread entering the kernel from userspace will thread_exit() 197 * in trap(). Any thread attempting to sleep will return immediatly 198 * with EINTR or EWOULDBLOCK, which will hopefully force them 199 * to back out to userland, freeing resources as they go, and 200 * anything attempting to return to userland will thread_exit() 201 * from userret(). thread_exit() will do a wakeup on p->p_numthreads 202 * if it transitions to 1. 203 */ 204 205 p->p_flag |= P_WEXIT; 206 PROC_UNLOCK(p); 207 208 /* Are we a task leader? */ 209 PROC_LOCK(p); 210 if (p == p->p_leader) { 211 q = p->p_peers; 212 while (q != NULL) { 213 PROC_LOCK(q); 214 psignal(q, SIGKILL); 215 PROC_UNLOCK(q); 216 q = q->p_peers; 217 } 218 while (p->p_peers) 219 msleep(p, &p->p_mtx, PWAIT, "exit1", 0); 220 } 221 PROC_UNLOCK(p); 222 223#ifdef PGINPROF 224 vmsizmon(); 225#endif 226 STOPEVENT(p, S_EXIT, rv); 227 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 228 229 /* 230 * Check if any loadable modules need anything done at process exit. 231 * e.g. SYSV IPC stuff 232 * XXX what if one of these generates an error? 233 */ 234 TAILQ_FOREACH(ep, &exit_list, next) 235 (*ep->function)(p); 236 237 stopprofclock(p); 238 239 MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage), 240 M_ZOMBIE, M_WAITOK); 241 /* 242 * If parent is waiting for us to exit or exec, 243 * P_PPWAIT is set; we will wakeup the parent below. 244 */ 245 PROC_LOCK(p); 246 p->p_flag &= ~(P_TRACED | P_PPWAIT); 247 SIGEMPTYSET(p->p_siglist); 248 PROC_UNLOCK(p); 249 if (timevalisset(&p->p_realtimer.it_value)) 250 callout_stop(&p->p_itcallout); 251 252 /* 253 * Reset any sigio structures pointing to us as a result of 254 * F_SETOWN with our pid. 255 */ 256 funsetownlst(&p->p_sigiolst); 257 258 /* 259 * Close open files and release open-file table. 260 * This may block! 261 */ 262 fdfree(td); /* XXXKSE *//* may not be the one in proc */ 263 264 /* 265 * Remove ourself from our leader's peer list and wake our leader. 266 */ 267 PROC_LOCK(p->p_leader); 268 if (p->p_leader->p_peers) { 269 q = p->p_leader; 270 while (q->p_peers != p) 271 q = q->p_peers; 272 q->p_peers = p->p_peers; 273 wakeup(p->p_leader); 274 } 275 PROC_UNLOCK(p->p_leader); 276 277 /* The next two chunks should probably be moved to vmspace_exit. */ 278 vm = p->p_vmspace; 279 /* 280 * Release user portion of address space. 281 * This releases references to vnodes, 282 * which could cause I/O if the file has been unlinked. 283 * Need to do this early enough that we can still sleep. 284 * Can't free the entire vmspace as the kernel stack 285 * may be mapped within that space also. 286 */ 287 if (--vm->vm_refcnt == 0) { 288 if (vm->vm_shm) 289 shmexit(p); 290 pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map), 291 vm_map_max(&vm->vm_map)); 292 (void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 293 vm_map_max(&vm->vm_map)); 294 vm->vm_freer = p; 295 } 296 297 sx_xlock(&proctree_lock); 298 if (SESS_LEADER(p)) { 299 register struct session *sp; 300 301 sp = p->p_session; 302 if (sp->s_ttyvp) { 303 /* 304 * Controlling process. 305 * Signal foreground pgrp, 306 * drain controlling terminal 307 * and revoke access to controlling terminal. 308 */ 309 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 310 tp = sp->s_ttyp; 311 if (sp->s_ttyp->t_pgrp) { 312 PGRP_LOCK(sp->s_ttyp->t_pgrp); 313 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 314 PGRP_UNLOCK(sp->s_ttyp->t_pgrp); 315 } 316 /* XXX tp should be locked. */ 317 sx_xunlock(&proctree_lock); 318 (void) ttywait(tp); 319 sx_xlock(&proctree_lock); 320 /* 321 * The tty could have been revoked 322 * if we blocked. 323 */ 324 if (sp->s_ttyvp) { 325 ttyvp = sp->s_ttyvp; 326 SESS_LOCK(p->p_session); 327 sp->s_ttyvp = NULL; 328 SESS_UNLOCK(p->p_session); 329 sx_xunlock(&proctree_lock); 330 VOP_REVOKE(ttyvp, REVOKEALL); 331 vrele(ttyvp); 332 sx_xlock(&proctree_lock); 333 } 334 } 335 if (sp->s_ttyvp) { 336 ttyvp = sp->s_ttyvp; 337 SESS_LOCK(p->p_session); 338 sp->s_ttyvp = NULL; 339 SESS_UNLOCK(p->p_session); 340 vrele(ttyvp); 341 } 342 /* 343 * s_ttyp is not zero'd; we use this to indicate 344 * that the session once had a controlling terminal. 345 * (for logging and informational purposes) 346 */ 347 } 348 SESS_LOCK(p->p_session); 349 sp->s_leader = NULL; 350 SESS_UNLOCK(p->p_session); 351 } 352 fixjobc(p, p->p_pgrp, 0); 353 sx_xunlock(&proctree_lock); 354 (void)acct_process(td); 355#ifdef KTRACE 356 /* 357 * release trace file 358 */ 359 PROC_LOCK(p); 360 mtx_lock(&ktrace_mtx); 361 p->p_traceflag = 0; /* don't trace the vrele() */ 362 tracevp = p->p_tracep; 363 p->p_tracep = NULL; 364 mtx_unlock(&ktrace_mtx); 365 PROC_UNLOCK(p); 366 if (tracevp != NULL) 367 vrele(tracevp); 368#endif 369 /* 370 * Release reference to text vnode 371 */ 372 if ((vtmp = p->p_textvp) != NULL) { 373 p->p_textvp = NULL; 374 vrele(vtmp); 375 } 376 377 /* 378 * Release our limits structure. 379 */ 380 mtx_assert(&Giant, MA_OWNED); 381 if (--p->p_limit->p_refcnt == 0) { 382 FREE(p->p_limit, M_SUBPROC); 383 p->p_limit = NULL; 384 } 385 386 /* 387 * Release this thread's reference to the ucred. The actual proc 388 * reference will stay around until the proc is harvested by 389 * wait(). At this point the ucred is immutable (no other threads 390 * from this proc are around that can change it) so we leave the 391 * per-thread ucred pointer intact in case it is needed although 392 * in theory nothing should be using it at this point. 393 */ 394 crfree(td->td_ucred); 395 396 /* 397 * Remove proc from allproc queue and pidhash chain. 398 * Place onto zombproc. Unlink from parent's child list. 399 */ 400 sx_xlock(&allproc_lock); 401 LIST_REMOVE(p, p_list); 402 LIST_INSERT_HEAD(&zombproc, p, p_list); 403 LIST_REMOVE(p, p_hash); 404 sx_xunlock(&allproc_lock); 405 406 sx_xlock(&proctree_lock); 407 q = LIST_FIRST(&p->p_children); 408 if (q != NULL) /* only need this if any child is S_ZOMB */ 409 wakeup(initproc); 410 for (; q != NULL; q = nq) { 411 nq = LIST_NEXT(q, p_sibling); 412 PROC_LOCK(q); 413 proc_reparent(q, initproc); 414 q->p_sigparent = SIGCHLD; 415 /* 416 * Traced processes are killed 417 * since their existence means someone is screwing up. 418 */ 419 if (q->p_flag & P_TRACED) { 420 q->p_flag &= ~P_TRACED; 421 psignal(q, SIGKILL); 422 } 423 PROC_UNLOCK(q); 424 } 425 426 /* 427 * Save exit status and final rusage info, adding in child rusage 428 * info and self times. 429 */ 430 PROC_LOCK(p); 431 p->p_xstat = rv; 432 *p->p_ru = p->p_stats->p_ru; 433 mtx_lock_spin(&sched_lock); 434 calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); 435 mtx_unlock_spin(&sched_lock); 436 ruadd(p->p_ru, &p->p_stats->p_cru); 437 438 /* 439 * Notify interested parties of our demise. 440 */ 441 KNOTE(&p->p_klist, NOTE_EXIT); 442 443 /* 444 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 445 * flag set, or if the handler is set to SIG_IGN, notify process 446 * 1 instead (and hope it will handle this situation). 447 */ 448 PROC_LOCK(p->p_pptr); 449 if (p->p_pptr->p_procsig->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 450 struct proc *pp; 451 452 pp = p->p_pptr; 453 PROC_UNLOCK(pp); 454 proc_reparent(p, initproc); 455 PROC_LOCK(p->p_pptr); 456 /* 457 * If this was the last child of our parent, notify 458 * parent, so in case he was wait(2)ing, he will 459 * continue. 460 */ 461 if (LIST_EMPTY(&pp->p_children)) 462 wakeup(pp); 463 } 464 465 if (p->p_sigparent && p->p_pptr != initproc) 466 psignal(p->p_pptr, p->p_sigparent); 467 else 468 psignal(p->p_pptr, SIGCHLD); 469 PROC_UNLOCK(p->p_pptr); 470 471 /* 472 * If this is a kthread, then wakeup anyone waiting for it to exit. 473 */ 474 if (p->p_flag & P_KTHREAD) 475 wakeup(p); 476 PROC_UNLOCK(p); 477 478 /* 479 * Finally, call machine-dependent code to release the remaining 480 * resources including address space, the kernel stack and pcb. 481 * The address space is released by "vmspace_exitfree(p)" in 482 * vm_waitproc(). 483 */ 484 cpu_exit(td); 485 486 PROC_LOCK(p); 487 PROC_LOCK(p->p_pptr); 488 sx_xunlock(&proctree_lock); 489 mtx_lock_spin(&sched_lock); 490 while (mtx_owned(&Giant)) 491 mtx_unlock(&Giant); 492 493 /* 494 * We have to wait until after releasing all locks before 495 * changing p_state. If we block on a mutex then we will be 496 * back at SRUN when we resume and our parent will never 497 * harvest us. 498 */ 499 p->p_state = PRS_ZOMBIE; 500 501 wakeup(p->p_pptr); 502 PROC_UNLOCK(p->p_pptr); 503 cnt.v_swtch++; 504 binuptime(PCPU_PTR(switchtime)); 505 PCPU_SET(switchticks, ticks); 506 507 cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */ 508 /* 509 * Make sure this thread is discarded from the zombie. 510 * This will also release this thread's reference to the ucred. 511 */ 512 thread_exit(); 513 panic("exit1"); 514} 515 516#ifdef COMPAT_43 517/* 518 * MPSAFE. The dirty work is handled by wait1(). 519 */ 520int 521owait(td, uap) 522 struct thread *td; 523 register struct owait_args /* { 524 int dummy; 525 } */ *uap; 526{ 527 struct wait_args w; 528 529 w.options = 0; 530 w.rusage = NULL; 531 w.pid = WAIT_ANY; 532 w.status = NULL; 533 return (wait1(td, &w, 1)); 534} 535#endif /* COMPAT_43 */ 536 537/* 538 * MPSAFE. The dirty work is handled by wait1(). 539 */ 540int 541wait4(td, uap) 542 struct thread *td; 543 struct wait_args *uap; 544{ 545 546 return (wait1(td, uap, 0)); 547} 548 549/* 550 * MPSAFE 551 */ 552static int 553wait1(td, uap, compat) 554 register struct thread *td; 555 register struct wait_args /* { 556 int pid; 557 int *status; 558 int options; 559 struct rusage *rusage; 560 } */ *uap; 561 int compat; 562{ 563 struct rusage ru; 564 register int nfound; 565 register struct proc *p, *q, *t; 566 int status, error; 567 struct kse *ke; 568 struct ksegrp *kg; 569 570 q = td->td_proc; 571 if (uap->pid == 0) { 572 PROC_LOCK(q); 573 uap->pid = -q->p_pgid; 574 PROC_UNLOCK(q); 575 } 576 if (uap->options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 577 return (EINVAL); 578 mtx_lock(&Giant); 579loop: 580 nfound = 0; 581 sx_xlock(&proctree_lock); 582 LIST_FOREACH(p, &q->p_children, p_sibling) { 583 PROC_LOCK(p); 584 if (uap->pid != WAIT_ANY && 585 p->p_pid != uap->pid && p->p_pgid != -uap->pid) { 586 PROC_UNLOCK(p); 587 continue; 588 } 589 590 /* 591 * This special case handles a kthread spawned by linux_clone 592 * (see linux_misc.c). The linux_wait4 and linux_waitpid 593 * functions need to be able to distinguish between waiting 594 * on a process and waiting on a thread. It is a thread if 595 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 596 * signifies we want to wait for threads and not processes. 597 */ 598 if ((p->p_sigparent != SIGCHLD) ^ 599 ((uap->options & WLINUXCLONE) != 0)) { 600 PROC_UNLOCK(p); 601 continue; 602 } 603 604 nfound++; 605 if (p->p_state == PRS_ZOMBIE) { 606 /* 607 * charge childs scheduling cpu usage to parent 608 * XXXKSE assume only one thread & kse & ksegrp 609 * keep estcpu in each ksegrp 610 * so charge it to the ksegrp that did the wait 611 * since process estcpu is sum of all ksegrps, 612 * this is strictly as expected. 613 * Assume that the child process aggregated all 614 * tke estcpu into the 'build-in' ksegrp. 615 * XXXKSE 616 */ 617 if (curthread->td_proc->p_pid != 1) { 618 mtx_lock_spin(&sched_lock); 619 curthread->td_ksegrp->kg_estcpu = 620 ESTCPULIM(curthread->td_ksegrp->kg_estcpu + 621 FIRST_KSEGRP_IN_PROC(p)->kg_estcpu); 622 mtx_unlock_spin(&sched_lock); 623 } 624 625 td->td_retval[0] = p->p_pid; 626#ifdef COMPAT_43 627 if (compat) 628 td->td_retval[1] = p->p_xstat; 629 else 630#endif 631 if (uap->status) { 632 status = p->p_xstat; /* convert to int */ 633 PROC_UNLOCK(p); 634 if ((error = copyout(&status, 635 uap->status, sizeof(status)))) { 636 sx_xunlock(&proctree_lock); 637 mtx_unlock(&Giant); 638 return (error); 639 } 640 PROC_LOCK(p); 641 } 642 if (uap->rusage) { 643 bcopy(p->p_ru, &ru, sizeof(ru)); 644 PROC_UNLOCK(p); 645 if ((error = copyout(&ru, 646 uap->rusage, sizeof (struct rusage)))) { 647 sx_xunlock(&proctree_lock); 648 mtx_unlock(&Giant); 649 return (error); 650 } 651 } else 652 PROC_UNLOCK(p); 653 /* 654 * If we got the child via a ptrace 'attach', 655 * we need to give it back to the old parent. 656 */ 657 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 658 PROC_LOCK(p); 659 p->p_oppid = 0; 660 proc_reparent(p, t); 661 PROC_UNLOCK(p); 662 psignal(t, SIGCHLD); 663 wakeup(t); 664 PROC_UNLOCK(t); 665 sx_xunlock(&proctree_lock); 666 mtx_unlock(&Giant); 667 return (0); 668 } 669 /* 670 * Remove other references to this process to ensure 671 * we have an exclusive reference. 672 */ 673 leavepgrp(p); 674 675 sx_xlock(&allproc_lock); 676 LIST_REMOVE(p, p_list); /* off zombproc */ 677 sx_xunlock(&allproc_lock); 678 679 LIST_REMOVE(p, p_sibling); 680 sx_xunlock(&proctree_lock); 681 682 /* 683 * As a side effect of this lock, we know that 684 * all other writes to this proc are visible now, so 685 * no more locking is needed for p. 686 */ 687 PROC_LOCK(p); 688 p->p_xstat = 0; /* XXX: why? */ 689 PROC_UNLOCK(p); 690 PROC_LOCK(q); 691 ruadd(&q->p_stats->p_cru, p->p_ru); 692 PROC_UNLOCK(q); 693 FREE(p->p_ru, M_ZOMBIE); 694 p->p_ru = NULL; 695 696 /* 697 * Decrement the count of procs running with this uid. 698 */ 699 (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 700 701 /* 702 * Free up credentials. 703 */ 704 crfree(p->p_ucred); 705 p->p_ucred = NULL; /* XXX: why? */ 706 707 /* 708 * Remove unused arguments 709 */ 710 pargs_drop(p->p_args); 711 p->p_args = NULL; 712 713 if (--p->p_procsig->ps_refcnt == 0) { 714 if (p->p_sigacts != &p->p_uarea->u_sigacts) 715 FREE(p->p_sigacts, M_SUBPROC); 716 FREE(p->p_procsig, M_SUBPROC); 717 p->p_procsig = NULL; 718 } 719 720 /* 721 * There should only be one KSE/KSEGRP but 722 * do it right anyhow. 723 */ 724 FOREACH_KSEGRP_IN_PROC(p, kg) { 725 FOREACH_KSE_IN_GROUP(kg, ke) { 726 /* Free the KSE spare thread. */ 727 if (ke->ke_tdspare != NULL) { 728 thread_free(ke->ke_tdspare); 729 ke->ke_tdspare = NULL; 730 } 731 } 732 } 733 thread_reap(); /* check for zombie threads */ 734 735 /* 736 * Give vm and machine-dependent layer a chance 737 * to free anything that cpu_exit couldn't 738 * release while still running in process context. 739 */ 740 vm_waitproc(p); 741 mtx_destroy(&p->p_mtx); 742 KASSERT(FIRST_THREAD_IN_PROC(p), 743 ("wait1: no residual thread!")); 744 uma_zfree(proc_zone, p); 745 sx_xlock(&allproc_lock); 746 nprocs--; 747 sx_xunlock(&allproc_lock); 748 mtx_unlock(&Giant); 749 return (0); 750 } 751 if (P_SHOULDSTOP(p) && ((p->p_flag & P_WAITED) == 0) && 752 (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { 753 p->p_flag |= P_WAITED; 754 sx_xunlock(&proctree_lock); 755 td->td_retval[0] = p->p_pid; 756#ifdef COMPAT_43 757 if (compat) { 758 td->td_retval[1] = W_STOPCODE(p->p_xstat); 759 PROC_UNLOCK(p); 760 error = 0; 761 } else 762#endif 763 if (uap->status) { 764 status = W_STOPCODE(p->p_xstat); 765 PROC_UNLOCK(p); 766 error = copyout(&status, 767 uap->status, sizeof(status)); 768 } else { 769 PROC_UNLOCK(p); 770 error = 0; 771 } 772 mtx_unlock(&Giant); 773 return (error); 774 } 775 if (uap->options & WCONTINUED && (p->p_flag & P_CONTINUED)) { 776 sx_xunlock(&proctree_lock); 777 td->td_retval[0] = p->p_pid; 778 p->p_flag &= ~P_CONTINUED; 779 PROC_UNLOCK(p); 780 781 if (uap->status) { 782 status = SIGCONT; 783 error = copyout(&status, 784 uap->status, sizeof(status)); 785 } else 786 error = 0; 787 788 mtx_unlock(&Giant); 789 return (error); 790 } 791 PROC_UNLOCK(p); 792 } 793 if (nfound == 0) { 794 sx_xunlock(&proctree_lock); 795 mtx_unlock(&Giant); 796 return (ECHILD); 797 } 798 if (uap->options & WNOHANG) { 799 sx_xunlock(&proctree_lock); 800 td->td_retval[0] = 0; 801 mtx_unlock(&Giant); 802 return (0); 803 } 804 PROC_LOCK(q); 805 sx_xunlock(&proctree_lock); 806 error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0); 807 PROC_UNLOCK(q); 808 if (error) { 809 mtx_unlock(&Giant); 810 return (error); 811 } 812 goto loop; 813} 814 815/* 816 * Make process 'parent' the new parent of process 'child'. 817 * Must be called with an exclusive hold of proctree lock. 818 */ 819void 820proc_reparent(child, parent) 821 register struct proc *child; 822 register struct proc *parent; 823{ 824 825 sx_assert(&proctree_lock, SX_XLOCKED); 826 PROC_LOCK_ASSERT(child, MA_OWNED); 827 if (child->p_pptr == parent) 828 return; 829 830 LIST_REMOVE(child, p_sibling); 831 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 832 child->p_pptr = parent; 833} 834 835/* 836 * The next two functions are to handle adding/deleting items on the 837 * exit callout list 838 * 839 * at_exit(): 840 * Take the arguments given and put them onto the exit callout list, 841 * However first make sure that it's not already there. 842 * returns 0 on success. 843 */ 844 845int 846at_exit(function) 847 exitlist_fn function; 848{ 849 struct exitlist *ep; 850 851#ifdef INVARIANTS 852 /* Be noisy if the programmer has lost track of things */ 853 if (rm_at_exit(function)) 854 printf("WARNING: exit callout entry (%p) already present\n", 855 function); 856#endif 857 ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 858 if (ep == NULL) 859 return (ENOMEM); 860 ep->function = function; 861 TAILQ_INSERT_TAIL(&exit_list, ep, next); 862 return (0); 863} 864 865/* 866 * Scan the exit callout list for the given item and remove it. 867 * Returns the number of items removed (0 or 1) 868 */ 869int 870rm_at_exit(function) 871 exitlist_fn function; 872{ 873 struct exitlist *ep; 874 875 TAILQ_FOREACH(ep, &exit_list, next) { 876 if (ep->function == function) { 877 TAILQ_REMOVE(&exit_list, ep, next); 878 free(ep, M_ATEXIT); 879 return (1); 880 } 881 } 882 return (0); 883} 884