Deleted Added
full compact
kern_fork.c (232240) kern_fork.c (235787)
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_fork.c 232240 2012-02-27 21:10:10Z kib $");
38__FBSDID("$FreeBSD: head/sys/kern/kern_fork.c 235787 2012-05-22 15:58:27Z trasz $");
39
40#include "opt_kdtrace.h"
41#include "opt_ktrace.h"
42#include "opt_kstack_pages.h"
43#include "opt_procdesc.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/eventhandler.h>
49#include <sys/fcntl.h>
50#include <sys/filedesc.h>
51#include <sys/jail.h>
52#include <sys/kernel.h>
53#include <sys/kthread.h>
54#include <sys/sysctl.h>
55#include <sys/lock.h>
56#include <sys/malloc.h>
57#include <sys/mutex.h>
58#include <sys/priv.h>
59#include <sys/proc.h>
60#include <sys/procdesc.h>
61#include <sys/pioctl.h>
62#include <sys/racct.h>
63#include <sys/resourcevar.h>
64#include <sys/sched.h>
65#include <sys/syscall.h>
66#include <sys/vmmeter.h>
67#include <sys/vnode.h>
68#include <sys/acct.h>
69#include <sys/ktr.h>
70#include <sys/ktrace.h>
71#include <sys/unistd.h>
72#include <sys/sdt.h>
73#include <sys/sx.h>
74#include <sys/sysent.h>
75#include <sys/signalvar.h>
76
77#include <security/audit/audit.h>
78#include <security/mac/mac_framework.h>
79
80#include <vm/vm.h>
81#include <vm/pmap.h>
82#include <vm/vm_map.h>
83#include <vm/vm_extern.h>
84#include <vm/uma.h>
85
86#ifdef KDTRACE_HOOKS
87#include <sys/dtrace_bsd.h>
88dtrace_fork_func_t dtrace_fasttrap_fork;
89#endif
90
91SDT_PROVIDER_DECLARE(proc);
92SDT_PROBE_DEFINE(proc, kernel, , create, create);
93SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *");
94SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *");
95SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int");
96
97#ifndef _SYS_SYSPROTO_H_
98struct fork_args {
99 int dummy;
100};
101#endif
102
103/* ARGSUSED */
104int
105sys_fork(struct thread *td, struct fork_args *uap)
106{
107 int error;
108 struct proc *p2;
109
110 error = fork1(td, RFFDG | RFPROC, 0, &p2, NULL, 0);
111 if (error == 0) {
112 td->td_retval[0] = p2->p_pid;
113 td->td_retval[1] = 0;
114 }
115 return (error);
116}
117
118/* ARGUSED */
119int
120sys_pdfork(td, uap)
121 struct thread *td;
122 struct pdfork_args *uap;
123{
124#ifdef PROCDESC
125 int error, fd;
126 struct proc *p2;
127
128 /*
129 * It is necessary to return fd by reference because 0 is a valid file
130 * descriptor number, and the child needs to be able to distinguish
131 * itself from the parent using the return value.
132 */
133 error = fork1(td, RFFDG | RFPROC | RFPROCDESC, 0, &p2,
134 &fd, uap->flags);
135 if (error == 0) {
136 td->td_retval[0] = p2->p_pid;
137 td->td_retval[1] = 0;
138 error = copyout(&fd, uap->fdp, sizeof(fd));
139 }
140 return (error);
141#else
142 return (ENOSYS);
143#endif
144}
145
146/* ARGSUSED */
147int
148sys_vfork(struct thread *td, struct vfork_args *uap)
149{
150 int error, flags;
151 struct proc *p2;
152
153#ifdef XEN
154 flags = RFFDG | RFPROC; /* validate that this is still an issue */
155#else
156 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
157#endif
158 error = fork1(td, flags, 0, &p2, NULL, 0);
159 if (error == 0) {
160 td->td_retval[0] = p2->p_pid;
161 td->td_retval[1] = 0;
162 }
163 return (error);
164}
165
166int
167sys_rfork(struct thread *td, struct rfork_args *uap)
168{
169 struct proc *p2;
170 int error;
171
172 /* Don't allow kernel-only flags. */
173 if ((uap->flags & RFKERNELONLY) != 0)
174 return (EINVAL);
175
176 AUDIT_ARG_FFLAGS(uap->flags);
177 error = fork1(td, uap->flags, 0, &p2, NULL, 0);
178 if (error == 0) {
179 td->td_retval[0] = p2 ? p2->p_pid : 0;
180 td->td_retval[1] = 0;
181 }
182 return (error);
183}
184
185int nprocs = 1; /* process 0 */
186int lastpid = 0;
187SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
188 "Last used PID");
189
190/*
191 * Random component to lastpid generation. We mix in a random factor to make
192 * it a little harder to predict. We sanity check the modulus value to avoid
193 * doing it in critical paths. Don't let it be too small or we pointlessly
194 * waste randomness entropy, and don't let it be impossibly large. Using a
195 * modulus that is too big causes a LOT more process table scans and slows
196 * down fork processing as the pidchecked caching is defeated.
197 */
198static int randompid = 0;
199
200static int
201sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
202{
203 int error, pid;
204
205 error = sysctl_wire_old_buffer(req, sizeof(int));
206 if (error != 0)
207 return(error);
208 sx_xlock(&allproc_lock);
209 pid = randompid;
210 error = sysctl_handle_int(oidp, &pid, 0, req);
211 if (error == 0 && req->newptr != NULL) {
212 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
213 pid = PID_MAX - 100;
214 else if (pid < 2) /* NOP */
215 pid = 0;
216 else if (pid < 100) /* Make it reasonable */
217 pid = 100;
218 randompid = pid;
219 }
220 sx_xunlock(&allproc_lock);
221 return (error);
222}
223
224SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
225 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
226
227static int
228fork_findpid(int flags)
229{
230 struct proc *p;
231 int trypid;
232 static int pidchecked = 0;
233
234 /*
235 * Requires allproc_lock in order to iterate over the list
236 * of processes, and proctree_lock to access p_pgrp.
237 */
238 sx_assert(&allproc_lock, SX_LOCKED);
239 sx_assert(&proctree_lock, SX_LOCKED);
240
241 /*
242 * Find an unused process ID. We remember a range of unused IDs
243 * ready to use (from lastpid+1 through pidchecked-1).
244 *
245 * If RFHIGHPID is set (used during system boot), do not allocate
246 * low-numbered pids.
247 */
248 trypid = lastpid + 1;
249 if (flags & RFHIGHPID) {
250 if (trypid < 10)
251 trypid = 10;
252 } else {
253 if (randompid)
254 trypid += arc4random() % randompid;
255 }
256retry:
257 /*
258 * If the process ID prototype has wrapped around,
259 * restart somewhat above 0, as the low-numbered procs
260 * tend to include daemons that don't exit.
261 */
262 if (trypid >= PID_MAX) {
263 trypid = trypid % PID_MAX;
264 if (trypid < 100)
265 trypid += 100;
266 pidchecked = 0;
267 }
268 if (trypid >= pidchecked) {
269 int doingzomb = 0;
270
271 pidchecked = PID_MAX;
272 /*
273 * Scan the active and zombie procs to check whether this pid
274 * is in use. Remember the lowest pid that's greater
275 * than trypid, so we can avoid checking for a while.
276 */
277 p = LIST_FIRST(&allproc);
278again:
279 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
280 while (p->p_pid == trypid ||
281 (p->p_pgrp != NULL &&
282 (p->p_pgrp->pg_id == trypid ||
283 (p->p_session != NULL &&
284 p->p_session->s_sid == trypid)))) {
285 trypid++;
286 if (trypid >= pidchecked)
287 goto retry;
288 }
289 if (p->p_pid > trypid && pidchecked > p->p_pid)
290 pidchecked = p->p_pid;
291 if (p->p_pgrp != NULL) {
292 if (p->p_pgrp->pg_id > trypid &&
293 pidchecked > p->p_pgrp->pg_id)
294 pidchecked = p->p_pgrp->pg_id;
295 if (p->p_session != NULL &&
296 p->p_session->s_sid > trypid &&
297 pidchecked > p->p_session->s_sid)
298 pidchecked = p->p_session->s_sid;
299 }
300 }
301 if (!doingzomb) {
302 doingzomb = 1;
303 p = LIST_FIRST(&zombproc);
304 goto again;
305 }
306 }
307
308 /*
309 * RFHIGHPID does not mess with the lastpid counter during boot.
310 */
311 if (flags & RFHIGHPID)
312 pidchecked = 0;
313 else
314 lastpid = trypid;
315
316 return (trypid);
317}
318
319static int
320fork_norfproc(struct thread *td, int flags)
321{
322 int error;
323 struct proc *p1;
324
325 KASSERT((flags & RFPROC) == 0,
326 ("fork_norfproc called with RFPROC set"));
327 p1 = td->td_proc;
328
329 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
330 (flags & (RFCFDG | RFFDG))) {
331 PROC_LOCK(p1);
332 if (thread_single(SINGLE_BOUNDARY)) {
333 PROC_UNLOCK(p1);
334 return (ERESTART);
335 }
336 PROC_UNLOCK(p1);
337 }
338
339 error = vm_forkproc(td, NULL, NULL, NULL, flags);
340 if (error)
341 goto fail;
342
343 /*
344 * Close all file descriptors.
345 */
346 if (flags & RFCFDG) {
347 struct filedesc *fdtmp;
348 fdtmp = fdinit(td->td_proc->p_fd);
349 fdfree(td);
350 p1->p_fd = fdtmp;
351 }
352
353 /*
354 * Unshare file descriptors (from parent).
355 */
356 if (flags & RFFDG)
357 fdunshare(p1, td);
358
359fail:
360 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
361 (flags & (RFCFDG | RFFDG))) {
362 PROC_LOCK(p1);
363 thread_single_end();
364 PROC_UNLOCK(p1);
365 }
366 return (error);
367}
368
369static void
370do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
371 struct vmspace *vm2, int pdflags)
372{
373 struct proc *p1, *pptr;
374 int p2_held, trypid;
375 struct filedesc *fd;
376 struct filedesc_to_leader *fdtol;
377 struct sigacts *newsigacts;
378
379 sx_assert(&proctree_lock, SX_SLOCKED);
380 sx_assert(&allproc_lock, SX_XLOCKED);
381
382 p2_held = 0;
383 p1 = td->td_proc;
384
385 /*
386 * Increment the nprocs resource before blocking can occur. There
387 * are hard-limits as to the number of processes that can run.
388 */
389 nprocs++;
390
391 trypid = fork_findpid(flags);
392
393 sx_sunlock(&proctree_lock);
394
395 p2->p_state = PRS_NEW; /* protect against others */
396 p2->p_pid = trypid;
397 AUDIT_ARG_PID(p2->p_pid);
398 LIST_INSERT_HEAD(&allproc, p2, p_list);
399 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
400 tidhash_add(td2);
401 PROC_LOCK(p2);
402 PROC_LOCK(p1);
403
404 sx_xunlock(&allproc_lock);
405
406 bcopy(&p1->p_startcopy, &p2->p_startcopy,
407 __rangeof(struct proc, p_startcopy, p_endcopy));
408 pargs_hold(p2->p_args);
409 PROC_UNLOCK(p1);
410
411 bzero(&p2->p_startzero,
412 __rangeof(struct proc, p_startzero, p_endzero));
413
414 p2->p_ucred = crhold(td->td_ucred);
415
416 /* Tell the prison that we exist. */
417 prison_proc_hold(p2->p_ucred->cr_prison);
418
419 PROC_UNLOCK(p2);
420
421 /*
422 * Malloc things while we don't hold any locks.
423 */
424 if (flags & RFSIGSHARE)
425 newsigacts = NULL;
426 else
427 newsigacts = sigacts_alloc();
428
429 /*
430 * Copy filedesc.
431 */
432 if (flags & RFCFDG) {
433 fd = fdinit(p1->p_fd);
434 fdtol = NULL;
435 } else if (flags & RFFDG) {
436 fd = fdcopy(p1->p_fd);
437 fdtol = NULL;
438 } else {
439 fd = fdshare(p1->p_fd);
440 if (p1->p_fdtol == NULL)
441 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
442 p1->p_leader);
443 if ((flags & RFTHREAD) != 0) {
444 /*
445 * Shared file descriptor table, and shared
446 * process leaders.
447 */
448 fdtol = p1->p_fdtol;
449 FILEDESC_XLOCK(p1->p_fd);
450 fdtol->fdl_refcount++;
451 FILEDESC_XUNLOCK(p1->p_fd);
452 } else {
453 /*
454 * Shared file descriptor table, and different
455 * process leaders.
456 */
457 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
458 p1->p_fd, p2);
459 }
460 }
461 /*
462 * Make a proc table entry for the new process.
463 * Start by zeroing the section of proc that is zero-initialized,
464 * then copy the section that is copied directly from the parent.
465 */
466
467 PROC_LOCK(p2);
468 PROC_LOCK(p1);
469
470 bzero(&td2->td_startzero,
471 __rangeof(struct thread, td_startzero, td_endzero));
472
473 bcopy(&td->td_startcopy, &td2->td_startcopy,
474 __rangeof(struct thread, td_startcopy, td_endcopy));
475
476 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
477 td2->td_sigstk = td->td_sigstk;
478 td2->td_sigmask = td->td_sigmask;
479 td2->td_flags = TDF_INMEM;
480 td2->td_lend_user_pri = PRI_MAX;
481
482#ifdef VIMAGE
483 td2->td_vnet = NULL;
484 td2->td_vnet_lpush = NULL;
485#endif
486
487 /*
488 * Allow the scheduler to initialize the child.
489 */
490 thread_lock(td);
491 sched_fork(td, td2);
492 thread_unlock(td);
493
494 /*
495 * Duplicate sub-structures as needed.
496 * Increase reference counts on shared objects.
497 */
498 p2->p_flag = P_INMEM;
499 p2->p_swtick = ticks;
500 if (p1->p_flag & P_PROFIL)
501 startprofclock(p2);
502 td2->td_ucred = crhold(p2->p_ucred);
503
504 if (flags & RFSIGSHARE) {
505 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
506 } else {
507 sigacts_copy(newsigacts, p1->p_sigacts);
508 p2->p_sigacts = newsigacts;
509 }
510
511 if (flags & RFTSIGZMB)
512 p2->p_sigparent = RFTSIGNUM(flags);
513 else if (flags & RFLINUXTHPN)
514 p2->p_sigparent = SIGUSR1;
515 else
516 p2->p_sigparent = SIGCHLD;
517
518 p2->p_textvp = p1->p_textvp;
519 p2->p_fd = fd;
520 p2->p_fdtol = fdtol;
521
522 /*
523 * p_limit is copy-on-write. Bump its refcount.
524 */
525 lim_fork(p1, p2);
526
527 pstats_fork(p1->p_stats, p2->p_stats);
528
529 PROC_UNLOCK(p1);
530 PROC_UNLOCK(p2);
531
532 /* Bump references to the text vnode (for procfs). */
533 if (p2->p_textvp)
534 vref(p2->p_textvp);
535
536 /*
537 * Set up linkage for kernel based threading.
538 */
539 if ((flags & RFTHREAD) != 0) {
540 mtx_lock(&ppeers_lock);
541 p2->p_peers = p1->p_peers;
542 p1->p_peers = p2;
543 p2->p_leader = p1->p_leader;
544 mtx_unlock(&ppeers_lock);
545 PROC_LOCK(p1->p_leader);
546 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
547 PROC_UNLOCK(p1->p_leader);
548 /*
549 * The task leader is exiting, so process p1 is
550 * going to be killed shortly. Since p1 obviously
551 * isn't dead yet, we know that the leader is either
552 * sending SIGKILL's to all the processes in this
553 * task or is sleeping waiting for all the peers to
554 * exit. We let p1 complete the fork, but we need
555 * to go ahead and kill the new process p2 since
556 * the task leader may not get a chance to send
557 * SIGKILL to it. We leave it on the list so that
558 * the task leader will wait for this new process
559 * to commit suicide.
560 */
561 PROC_LOCK(p2);
562 kern_psignal(p2, SIGKILL);
563 PROC_UNLOCK(p2);
564 } else
565 PROC_UNLOCK(p1->p_leader);
566 } else {
567 p2->p_peers = NULL;
568 p2->p_leader = p2;
569 }
570
571 sx_xlock(&proctree_lock);
572 PGRP_LOCK(p1->p_pgrp);
573 PROC_LOCK(p2);
574 PROC_LOCK(p1);
575
576 /*
577 * Preserve some more flags in subprocess. P_PROFIL has already
578 * been preserved.
579 */
580 p2->p_flag |= p1->p_flag & P_SUGID;
581 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
582 SESS_LOCK(p1->p_session);
583 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
584 p2->p_flag |= P_CONTROLT;
585 SESS_UNLOCK(p1->p_session);
586 if (flags & RFPPWAIT)
587 p2->p_flag |= P_PPWAIT;
588
589 p2->p_pgrp = p1->p_pgrp;
590 LIST_INSERT_AFTER(p1, p2, p_pglist);
591 PGRP_UNLOCK(p1->p_pgrp);
592 LIST_INIT(&p2->p_children);
593 LIST_INIT(&p2->p_orphans);
594
595 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
596
597 /*
598 * If PF_FORK is set, the child process inherits the
599 * procfs ioctl flags from its parent.
600 */
601 if (p1->p_pfsflags & PF_FORK) {
602 p2->p_stops = p1->p_stops;
603 p2->p_pfsflags = p1->p_pfsflags;
604 }
605
606 /*
607 * This begins the section where we must prevent the parent
608 * from being swapped.
609 */
610 _PHOLD(p1);
611 PROC_UNLOCK(p1);
612
613 /*
614 * Attach the new process to its parent.
615 *
616 * If RFNOWAIT is set, the newly created process becomes a child
617 * of init. This effectively disassociates the child from the
618 * parent.
619 */
620 if (flags & RFNOWAIT)
621 pptr = initproc;
622 else
623 pptr = p1;
624 p2->p_pptr = pptr;
625 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
626 sx_xunlock(&proctree_lock);
627
628 /* Inform accounting that we have forked. */
629 p2->p_acflag = AFORK;
630 PROC_UNLOCK(p2);
631
632#ifdef KTRACE
633 ktrprocfork(p1, p2);
634#endif
635
636 /*
637 * Finish creating the child process. It will return via a different
638 * execution path later. (ie: directly into user mode)
639 */
640 vm_forkproc(td, p2, td2, vm2, flags);
641
642 if (flags == (RFFDG | RFPROC)) {
643 PCPU_INC(cnt.v_forks);
644 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
645 p2->p_vmspace->vm_ssize);
646 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
647 PCPU_INC(cnt.v_vforks);
648 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
649 p2->p_vmspace->vm_ssize);
650 } else if (p1 == &proc0) {
651 PCPU_INC(cnt.v_kthreads);
652 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
653 p2->p_vmspace->vm_ssize);
654 } else {
655 PCPU_INC(cnt.v_rforks);
656 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
657 p2->p_vmspace->vm_ssize);
658 }
659
660#ifdef PROCDESC
661 /*
662 * Associate the process descriptor with the process before anything
663 * can happen that might cause that process to need the descriptor.
664 * However, don't do this until after fork(2) can no longer fail.
665 */
666 if (flags & RFPROCDESC)
667 procdesc_new(p2, pdflags);
668#endif
669
670 /*
671 * Both processes are set up, now check if any loadable modules want
672 * to adjust anything.
673 */
674 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
675
676 /*
677 * Set the child start time and mark the process as being complete.
678 */
679 PROC_LOCK(p2);
680 PROC_LOCK(p1);
681 microuptime(&p2->p_stats->p_start);
682 PROC_SLOCK(p2);
683 p2->p_state = PRS_NORMAL;
684 PROC_SUNLOCK(p2);
685
686#ifdef KDTRACE_HOOKS
687 /*
688 * Tell the DTrace fasttrap provider about the new process
689 * if it has registered an interest. We have to do this only after
690 * p_state is PRS_NORMAL since the fasttrap module will use pfind()
691 * later on.
692 */
693 if (dtrace_fasttrap_fork)
694 dtrace_fasttrap_fork(p1, p2);
695#endif
696 if ((p1->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED |
697 P_FOLLOWFORK)) {
698 /*
699 * Arrange for debugger to receive the fork event.
700 *
701 * We can report PL_FLAG_FORKED regardless of
702 * P_FOLLOWFORK settings, but it does not make a sense
703 * for runaway child.
704 */
705 td->td_dbgflags |= TDB_FORK;
706 td->td_dbg_forked = p2->p_pid;
707 td2->td_dbgflags |= TDB_STOPATFORK;
708 _PHOLD(p2);
709 p2_held = 1;
710 }
711 if (flags & RFPPWAIT) {
712 td->td_pflags |= TDP_RFPPWAIT;
713 td->td_rfppwait_p = p2;
714 }
715 PROC_UNLOCK(p2);
716 if ((flags & RFSTOPPED) == 0) {
717 /*
718 * If RFSTOPPED not requested, make child runnable and
719 * add to run queue.
720 */
721 thread_lock(td2);
722 TD_SET_CAN_RUN(td2);
723 sched_add(td2, SRQ_BORING);
724 thread_unlock(td2);
725 }
726
727 /*
728 * Now can be swapped.
729 */
730 _PRELE(p1);
731 PROC_UNLOCK(p1);
732
733 /*
734 * Tell any interested parties about the new process.
735 */
736 knote_fork(&p1->p_klist, p2->p_pid);
737 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
738
739 /*
740 * Wait until debugger is attached to child.
741 */
742 PROC_LOCK(p2);
743 while ((td2->td_dbgflags & TDB_STOPATFORK) != 0)
744 cv_wait(&p2->p_dbgwait, &p2->p_mtx);
745 if (p2_held)
746 _PRELE(p2);
747 PROC_UNLOCK(p2);
748}
749
750int
751fork1(struct thread *td, int flags, int pages, struct proc **procp,
752 int *procdescp, int pdflags)
753{
754 struct proc *p1;
755 struct proc *newproc;
756 int ok;
757 struct thread *td2;
758 struct vmspace *vm2;
759 vm_ooffset_t mem_charged;
760 int error;
761 static int curfail;
762 static struct timeval lastfail;
763#ifdef PROCDESC
764 struct file *fp_procdesc = NULL;
765#endif
766
767 /* Check for the undefined or unimplemented flags. */
768 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
769 return (EINVAL);
770
771 /* Signal value requires RFTSIGZMB. */
772 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
773 return (EINVAL);
774
775 /* Can't copy and clear. */
776 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
777 return (EINVAL);
778
779 /* Check the validity of the signal number. */
780 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
781 return (EINVAL);
782
783#ifdef PROCDESC
784 if ((flags & RFPROCDESC) != 0) {
785 /* Can't not create a process yet get a process descriptor. */
786 if ((flags & RFPROC) == 0)
787 return (EINVAL);
788
789 /* Must provide a place to put a procdesc if creating one. */
790 if (procdescp == NULL)
791 return (EINVAL);
792 }
793#endif
794
795 p1 = td->td_proc;
796
797 /*
798 * Here we don't create a new process, but we divorce
799 * certain parts of a process from itself.
800 */
801 if ((flags & RFPROC) == 0) {
802 *procp = NULL;
803 return (fork_norfproc(td, flags));
804 }
805
806#ifdef PROCDESC
807 /*
808 * If required, create a process descriptor in the parent first; we
809 * will abandon it if something goes wrong. We don't finit() until
810 * later.
811 */
812 if (flags & RFPROCDESC) {
813 error = falloc(td, &fp_procdesc, procdescp, 0);
814 if (error != 0)
815 return (error);
816 }
817#endif
818
819 mem_charged = 0;
820 vm2 = NULL;
821 if (pages == 0)
822 pages = KSTACK_PAGES;
823 /* Allocate new proc. */
824 newproc = uma_zalloc(proc_zone, M_WAITOK);
825 td2 = FIRST_THREAD_IN_PROC(newproc);
826 if (td2 == NULL) {
827 td2 = thread_alloc(pages);
828 if (td2 == NULL) {
829 error = ENOMEM;
830 goto fail1;
831 }
832 proc_linkup(newproc, td2);
833 } else {
834 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
835 if (td2->td_kstack != 0)
836 vm_thread_dispose(td2);
837 if (!thread_alloc_stack(td2, pages)) {
838 error = ENOMEM;
839 goto fail1;
840 }
841 }
842 }
843
844 if ((flags & RFMEM) == 0) {
845 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
846 if (vm2 == NULL) {
847 error = ENOMEM;
848 goto fail1;
849 }
850 if (!swap_reserve(mem_charged)) {
851 /*
852 * The swap reservation failed. The accounting
853 * from the entries of the copied vm2 will be
854 * substracted in vmspace_free(), so force the
855 * reservation there.
856 */
857 swap_reserve_force(mem_charged);
858 error = ENOMEM;
859 goto fail1;
860 }
861 } else
862 vm2 = NULL;
863
864 /*
865 * XXX: This is ugly; when we copy resource usage, we need to bump
866 * per-cred resource counters.
867 */
868 newproc->p_ucred = p1->p_ucred;
869
870 /*
871 * Initialize resource accounting for the child process.
872 */
873 error = racct_proc_fork(p1, newproc);
874 if (error != 0) {
875 error = EAGAIN;
876 goto fail1;
877 }
878
879#ifdef MAC
880 mac_proc_init(newproc);
881#endif
882 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
883 STAILQ_INIT(&newproc->p_ktr);
884
885 /* We have to lock the process tree while we look for a pid. */
886 sx_slock(&proctree_lock);
887
888 /*
889 * Although process entries are dynamically created, we still keep
890 * a global limit on the maximum number we will create. Don't allow
891 * a nonprivileged user to use the last ten processes; don't let root
892 * exceed the limit. The variable nprocs is the current number of
893 * processes, maxproc is the limit.
894 */
895 sx_xlock(&allproc_lock);
896 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
897 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
898 error = EAGAIN;
899 goto fail;
900 }
901
902 /*
903 * Increment the count of procs running with this uid. Don't allow
904 * a nonprivileged user to exceed their current limit.
905 *
906 * XXXRW: Can we avoid privilege here if it's not needed?
907 */
908 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
909 if (error == 0)
910 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
911 else {
912 PROC_LOCK(p1);
913 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
914 lim_cur(p1, RLIMIT_NPROC));
915 PROC_UNLOCK(p1);
916 }
917 if (ok) {
918 do_fork(td, flags, newproc, td2, vm2, pdflags);
919
920 /*
921 * Return child proc pointer to parent.
922 */
923 *procp = newproc;
924#ifdef PROCDESC
925 if (flags & RFPROCDESC)
926 procdesc_finit(newproc->p_procdesc, fp_procdesc);
927#endif
928 racct_proc_fork_done(newproc);
929 return (0);
930 }
931
932 error = EAGAIN;
933fail:
934 sx_sunlock(&proctree_lock);
935 if (ppsratecheck(&lastfail, &curfail, 1))
936 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
937 td->td_ucred->cr_ruid);
938 sx_xunlock(&allproc_lock);
939#ifdef MAC
940 mac_proc_destroy(newproc);
941#endif
39
40#include "opt_kdtrace.h"
41#include "opt_ktrace.h"
42#include "opt_kstack_pages.h"
43#include "opt_procdesc.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/eventhandler.h>
49#include <sys/fcntl.h>
50#include <sys/filedesc.h>
51#include <sys/jail.h>
52#include <sys/kernel.h>
53#include <sys/kthread.h>
54#include <sys/sysctl.h>
55#include <sys/lock.h>
56#include <sys/malloc.h>
57#include <sys/mutex.h>
58#include <sys/priv.h>
59#include <sys/proc.h>
60#include <sys/procdesc.h>
61#include <sys/pioctl.h>
62#include <sys/racct.h>
63#include <sys/resourcevar.h>
64#include <sys/sched.h>
65#include <sys/syscall.h>
66#include <sys/vmmeter.h>
67#include <sys/vnode.h>
68#include <sys/acct.h>
69#include <sys/ktr.h>
70#include <sys/ktrace.h>
71#include <sys/unistd.h>
72#include <sys/sdt.h>
73#include <sys/sx.h>
74#include <sys/sysent.h>
75#include <sys/signalvar.h>
76
77#include <security/audit/audit.h>
78#include <security/mac/mac_framework.h>
79
80#include <vm/vm.h>
81#include <vm/pmap.h>
82#include <vm/vm_map.h>
83#include <vm/vm_extern.h>
84#include <vm/uma.h>
85
86#ifdef KDTRACE_HOOKS
87#include <sys/dtrace_bsd.h>
88dtrace_fork_func_t dtrace_fasttrap_fork;
89#endif
90
91SDT_PROVIDER_DECLARE(proc);
92SDT_PROBE_DEFINE(proc, kernel, , create, create);
93SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *");
94SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *");
95SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int");
96
97#ifndef _SYS_SYSPROTO_H_
98struct fork_args {
99 int dummy;
100};
101#endif
102
103/* ARGSUSED */
104int
105sys_fork(struct thread *td, struct fork_args *uap)
106{
107 int error;
108 struct proc *p2;
109
110 error = fork1(td, RFFDG | RFPROC, 0, &p2, NULL, 0);
111 if (error == 0) {
112 td->td_retval[0] = p2->p_pid;
113 td->td_retval[1] = 0;
114 }
115 return (error);
116}
117
118/* ARGUSED */
119int
120sys_pdfork(td, uap)
121 struct thread *td;
122 struct pdfork_args *uap;
123{
124#ifdef PROCDESC
125 int error, fd;
126 struct proc *p2;
127
128 /*
129 * It is necessary to return fd by reference because 0 is a valid file
130 * descriptor number, and the child needs to be able to distinguish
131 * itself from the parent using the return value.
132 */
133 error = fork1(td, RFFDG | RFPROC | RFPROCDESC, 0, &p2,
134 &fd, uap->flags);
135 if (error == 0) {
136 td->td_retval[0] = p2->p_pid;
137 td->td_retval[1] = 0;
138 error = copyout(&fd, uap->fdp, sizeof(fd));
139 }
140 return (error);
141#else
142 return (ENOSYS);
143#endif
144}
145
146/* ARGSUSED */
147int
148sys_vfork(struct thread *td, struct vfork_args *uap)
149{
150 int error, flags;
151 struct proc *p2;
152
153#ifdef XEN
154 flags = RFFDG | RFPROC; /* validate that this is still an issue */
155#else
156 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
157#endif
158 error = fork1(td, flags, 0, &p2, NULL, 0);
159 if (error == 0) {
160 td->td_retval[0] = p2->p_pid;
161 td->td_retval[1] = 0;
162 }
163 return (error);
164}
165
166int
167sys_rfork(struct thread *td, struct rfork_args *uap)
168{
169 struct proc *p2;
170 int error;
171
172 /* Don't allow kernel-only flags. */
173 if ((uap->flags & RFKERNELONLY) != 0)
174 return (EINVAL);
175
176 AUDIT_ARG_FFLAGS(uap->flags);
177 error = fork1(td, uap->flags, 0, &p2, NULL, 0);
178 if (error == 0) {
179 td->td_retval[0] = p2 ? p2->p_pid : 0;
180 td->td_retval[1] = 0;
181 }
182 return (error);
183}
184
185int nprocs = 1; /* process 0 */
186int lastpid = 0;
187SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
188 "Last used PID");
189
190/*
191 * Random component to lastpid generation. We mix in a random factor to make
192 * it a little harder to predict. We sanity check the modulus value to avoid
193 * doing it in critical paths. Don't let it be too small or we pointlessly
194 * waste randomness entropy, and don't let it be impossibly large. Using a
195 * modulus that is too big causes a LOT more process table scans and slows
196 * down fork processing as the pidchecked caching is defeated.
197 */
198static int randompid = 0;
199
200static int
201sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
202{
203 int error, pid;
204
205 error = sysctl_wire_old_buffer(req, sizeof(int));
206 if (error != 0)
207 return(error);
208 sx_xlock(&allproc_lock);
209 pid = randompid;
210 error = sysctl_handle_int(oidp, &pid, 0, req);
211 if (error == 0 && req->newptr != NULL) {
212 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
213 pid = PID_MAX - 100;
214 else if (pid < 2) /* NOP */
215 pid = 0;
216 else if (pid < 100) /* Make it reasonable */
217 pid = 100;
218 randompid = pid;
219 }
220 sx_xunlock(&allproc_lock);
221 return (error);
222}
223
224SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
225 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
226
227static int
228fork_findpid(int flags)
229{
230 struct proc *p;
231 int trypid;
232 static int pidchecked = 0;
233
234 /*
235 * Requires allproc_lock in order to iterate over the list
236 * of processes, and proctree_lock to access p_pgrp.
237 */
238 sx_assert(&allproc_lock, SX_LOCKED);
239 sx_assert(&proctree_lock, SX_LOCKED);
240
241 /*
242 * Find an unused process ID. We remember a range of unused IDs
243 * ready to use (from lastpid+1 through pidchecked-1).
244 *
245 * If RFHIGHPID is set (used during system boot), do not allocate
246 * low-numbered pids.
247 */
248 trypid = lastpid + 1;
249 if (flags & RFHIGHPID) {
250 if (trypid < 10)
251 trypid = 10;
252 } else {
253 if (randompid)
254 trypid += arc4random() % randompid;
255 }
256retry:
257 /*
258 * If the process ID prototype has wrapped around,
259 * restart somewhat above 0, as the low-numbered procs
260 * tend to include daemons that don't exit.
261 */
262 if (trypid >= PID_MAX) {
263 trypid = trypid % PID_MAX;
264 if (trypid < 100)
265 trypid += 100;
266 pidchecked = 0;
267 }
268 if (trypid >= pidchecked) {
269 int doingzomb = 0;
270
271 pidchecked = PID_MAX;
272 /*
273 * Scan the active and zombie procs to check whether this pid
274 * is in use. Remember the lowest pid that's greater
275 * than trypid, so we can avoid checking for a while.
276 */
277 p = LIST_FIRST(&allproc);
278again:
279 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
280 while (p->p_pid == trypid ||
281 (p->p_pgrp != NULL &&
282 (p->p_pgrp->pg_id == trypid ||
283 (p->p_session != NULL &&
284 p->p_session->s_sid == trypid)))) {
285 trypid++;
286 if (trypid >= pidchecked)
287 goto retry;
288 }
289 if (p->p_pid > trypid && pidchecked > p->p_pid)
290 pidchecked = p->p_pid;
291 if (p->p_pgrp != NULL) {
292 if (p->p_pgrp->pg_id > trypid &&
293 pidchecked > p->p_pgrp->pg_id)
294 pidchecked = p->p_pgrp->pg_id;
295 if (p->p_session != NULL &&
296 p->p_session->s_sid > trypid &&
297 pidchecked > p->p_session->s_sid)
298 pidchecked = p->p_session->s_sid;
299 }
300 }
301 if (!doingzomb) {
302 doingzomb = 1;
303 p = LIST_FIRST(&zombproc);
304 goto again;
305 }
306 }
307
308 /*
309 * RFHIGHPID does not mess with the lastpid counter during boot.
310 */
311 if (flags & RFHIGHPID)
312 pidchecked = 0;
313 else
314 lastpid = trypid;
315
316 return (trypid);
317}
318
319static int
320fork_norfproc(struct thread *td, int flags)
321{
322 int error;
323 struct proc *p1;
324
325 KASSERT((flags & RFPROC) == 0,
326 ("fork_norfproc called with RFPROC set"));
327 p1 = td->td_proc;
328
329 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
330 (flags & (RFCFDG | RFFDG))) {
331 PROC_LOCK(p1);
332 if (thread_single(SINGLE_BOUNDARY)) {
333 PROC_UNLOCK(p1);
334 return (ERESTART);
335 }
336 PROC_UNLOCK(p1);
337 }
338
339 error = vm_forkproc(td, NULL, NULL, NULL, flags);
340 if (error)
341 goto fail;
342
343 /*
344 * Close all file descriptors.
345 */
346 if (flags & RFCFDG) {
347 struct filedesc *fdtmp;
348 fdtmp = fdinit(td->td_proc->p_fd);
349 fdfree(td);
350 p1->p_fd = fdtmp;
351 }
352
353 /*
354 * Unshare file descriptors (from parent).
355 */
356 if (flags & RFFDG)
357 fdunshare(p1, td);
358
359fail:
360 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
361 (flags & (RFCFDG | RFFDG))) {
362 PROC_LOCK(p1);
363 thread_single_end();
364 PROC_UNLOCK(p1);
365 }
366 return (error);
367}
368
369static void
370do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
371 struct vmspace *vm2, int pdflags)
372{
373 struct proc *p1, *pptr;
374 int p2_held, trypid;
375 struct filedesc *fd;
376 struct filedesc_to_leader *fdtol;
377 struct sigacts *newsigacts;
378
379 sx_assert(&proctree_lock, SX_SLOCKED);
380 sx_assert(&allproc_lock, SX_XLOCKED);
381
382 p2_held = 0;
383 p1 = td->td_proc;
384
385 /*
386 * Increment the nprocs resource before blocking can occur. There
387 * are hard-limits as to the number of processes that can run.
388 */
389 nprocs++;
390
391 trypid = fork_findpid(flags);
392
393 sx_sunlock(&proctree_lock);
394
395 p2->p_state = PRS_NEW; /* protect against others */
396 p2->p_pid = trypid;
397 AUDIT_ARG_PID(p2->p_pid);
398 LIST_INSERT_HEAD(&allproc, p2, p_list);
399 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
400 tidhash_add(td2);
401 PROC_LOCK(p2);
402 PROC_LOCK(p1);
403
404 sx_xunlock(&allproc_lock);
405
406 bcopy(&p1->p_startcopy, &p2->p_startcopy,
407 __rangeof(struct proc, p_startcopy, p_endcopy));
408 pargs_hold(p2->p_args);
409 PROC_UNLOCK(p1);
410
411 bzero(&p2->p_startzero,
412 __rangeof(struct proc, p_startzero, p_endzero));
413
414 p2->p_ucred = crhold(td->td_ucred);
415
416 /* Tell the prison that we exist. */
417 prison_proc_hold(p2->p_ucred->cr_prison);
418
419 PROC_UNLOCK(p2);
420
421 /*
422 * Malloc things while we don't hold any locks.
423 */
424 if (flags & RFSIGSHARE)
425 newsigacts = NULL;
426 else
427 newsigacts = sigacts_alloc();
428
429 /*
430 * Copy filedesc.
431 */
432 if (flags & RFCFDG) {
433 fd = fdinit(p1->p_fd);
434 fdtol = NULL;
435 } else if (flags & RFFDG) {
436 fd = fdcopy(p1->p_fd);
437 fdtol = NULL;
438 } else {
439 fd = fdshare(p1->p_fd);
440 if (p1->p_fdtol == NULL)
441 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
442 p1->p_leader);
443 if ((flags & RFTHREAD) != 0) {
444 /*
445 * Shared file descriptor table, and shared
446 * process leaders.
447 */
448 fdtol = p1->p_fdtol;
449 FILEDESC_XLOCK(p1->p_fd);
450 fdtol->fdl_refcount++;
451 FILEDESC_XUNLOCK(p1->p_fd);
452 } else {
453 /*
454 * Shared file descriptor table, and different
455 * process leaders.
456 */
457 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
458 p1->p_fd, p2);
459 }
460 }
461 /*
462 * Make a proc table entry for the new process.
463 * Start by zeroing the section of proc that is zero-initialized,
464 * then copy the section that is copied directly from the parent.
465 */
466
467 PROC_LOCK(p2);
468 PROC_LOCK(p1);
469
470 bzero(&td2->td_startzero,
471 __rangeof(struct thread, td_startzero, td_endzero));
472
473 bcopy(&td->td_startcopy, &td2->td_startcopy,
474 __rangeof(struct thread, td_startcopy, td_endcopy));
475
476 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
477 td2->td_sigstk = td->td_sigstk;
478 td2->td_sigmask = td->td_sigmask;
479 td2->td_flags = TDF_INMEM;
480 td2->td_lend_user_pri = PRI_MAX;
481
482#ifdef VIMAGE
483 td2->td_vnet = NULL;
484 td2->td_vnet_lpush = NULL;
485#endif
486
487 /*
488 * Allow the scheduler to initialize the child.
489 */
490 thread_lock(td);
491 sched_fork(td, td2);
492 thread_unlock(td);
493
494 /*
495 * Duplicate sub-structures as needed.
496 * Increase reference counts on shared objects.
497 */
498 p2->p_flag = P_INMEM;
499 p2->p_swtick = ticks;
500 if (p1->p_flag & P_PROFIL)
501 startprofclock(p2);
502 td2->td_ucred = crhold(p2->p_ucred);
503
504 if (flags & RFSIGSHARE) {
505 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
506 } else {
507 sigacts_copy(newsigacts, p1->p_sigacts);
508 p2->p_sigacts = newsigacts;
509 }
510
511 if (flags & RFTSIGZMB)
512 p2->p_sigparent = RFTSIGNUM(flags);
513 else if (flags & RFLINUXTHPN)
514 p2->p_sigparent = SIGUSR1;
515 else
516 p2->p_sigparent = SIGCHLD;
517
518 p2->p_textvp = p1->p_textvp;
519 p2->p_fd = fd;
520 p2->p_fdtol = fdtol;
521
522 /*
523 * p_limit is copy-on-write. Bump its refcount.
524 */
525 lim_fork(p1, p2);
526
527 pstats_fork(p1->p_stats, p2->p_stats);
528
529 PROC_UNLOCK(p1);
530 PROC_UNLOCK(p2);
531
532 /* Bump references to the text vnode (for procfs). */
533 if (p2->p_textvp)
534 vref(p2->p_textvp);
535
536 /*
537 * Set up linkage for kernel based threading.
538 */
539 if ((flags & RFTHREAD) != 0) {
540 mtx_lock(&ppeers_lock);
541 p2->p_peers = p1->p_peers;
542 p1->p_peers = p2;
543 p2->p_leader = p1->p_leader;
544 mtx_unlock(&ppeers_lock);
545 PROC_LOCK(p1->p_leader);
546 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
547 PROC_UNLOCK(p1->p_leader);
548 /*
549 * The task leader is exiting, so process p1 is
550 * going to be killed shortly. Since p1 obviously
551 * isn't dead yet, we know that the leader is either
552 * sending SIGKILL's to all the processes in this
553 * task or is sleeping waiting for all the peers to
554 * exit. We let p1 complete the fork, but we need
555 * to go ahead and kill the new process p2 since
556 * the task leader may not get a chance to send
557 * SIGKILL to it. We leave it on the list so that
558 * the task leader will wait for this new process
559 * to commit suicide.
560 */
561 PROC_LOCK(p2);
562 kern_psignal(p2, SIGKILL);
563 PROC_UNLOCK(p2);
564 } else
565 PROC_UNLOCK(p1->p_leader);
566 } else {
567 p2->p_peers = NULL;
568 p2->p_leader = p2;
569 }
570
571 sx_xlock(&proctree_lock);
572 PGRP_LOCK(p1->p_pgrp);
573 PROC_LOCK(p2);
574 PROC_LOCK(p1);
575
576 /*
577 * Preserve some more flags in subprocess. P_PROFIL has already
578 * been preserved.
579 */
580 p2->p_flag |= p1->p_flag & P_SUGID;
581 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
582 SESS_LOCK(p1->p_session);
583 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
584 p2->p_flag |= P_CONTROLT;
585 SESS_UNLOCK(p1->p_session);
586 if (flags & RFPPWAIT)
587 p2->p_flag |= P_PPWAIT;
588
589 p2->p_pgrp = p1->p_pgrp;
590 LIST_INSERT_AFTER(p1, p2, p_pglist);
591 PGRP_UNLOCK(p1->p_pgrp);
592 LIST_INIT(&p2->p_children);
593 LIST_INIT(&p2->p_orphans);
594
595 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
596
597 /*
598 * If PF_FORK is set, the child process inherits the
599 * procfs ioctl flags from its parent.
600 */
601 if (p1->p_pfsflags & PF_FORK) {
602 p2->p_stops = p1->p_stops;
603 p2->p_pfsflags = p1->p_pfsflags;
604 }
605
606 /*
607 * This begins the section where we must prevent the parent
608 * from being swapped.
609 */
610 _PHOLD(p1);
611 PROC_UNLOCK(p1);
612
613 /*
614 * Attach the new process to its parent.
615 *
616 * If RFNOWAIT is set, the newly created process becomes a child
617 * of init. This effectively disassociates the child from the
618 * parent.
619 */
620 if (flags & RFNOWAIT)
621 pptr = initproc;
622 else
623 pptr = p1;
624 p2->p_pptr = pptr;
625 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
626 sx_xunlock(&proctree_lock);
627
628 /* Inform accounting that we have forked. */
629 p2->p_acflag = AFORK;
630 PROC_UNLOCK(p2);
631
632#ifdef KTRACE
633 ktrprocfork(p1, p2);
634#endif
635
636 /*
637 * Finish creating the child process. It will return via a different
638 * execution path later. (ie: directly into user mode)
639 */
640 vm_forkproc(td, p2, td2, vm2, flags);
641
642 if (flags == (RFFDG | RFPROC)) {
643 PCPU_INC(cnt.v_forks);
644 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
645 p2->p_vmspace->vm_ssize);
646 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
647 PCPU_INC(cnt.v_vforks);
648 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
649 p2->p_vmspace->vm_ssize);
650 } else if (p1 == &proc0) {
651 PCPU_INC(cnt.v_kthreads);
652 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
653 p2->p_vmspace->vm_ssize);
654 } else {
655 PCPU_INC(cnt.v_rforks);
656 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
657 p2->p_vmspace->vm_ssize);
658 }
659
660#ifdef PROCDESC
661 /*
662 * Associate the process descriptor with the process before anything
663 * can happen that might cause that process to need the descriptor.
664 * However, don't do this until after fork(2) can no longer fail.
665 */
666 if (flags & RFPROCDESC)
667 procdesc_new(p2, pdflags);
668#endif
669
670 /*
671 * Both processes are set up, now check if any loadable modules want
672 * to adjust anything.
673 */
674 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
675
676 /*
677 * Set the child start time and mark the process as being complete.
678 */
679 PROC_LOCK(p2);
680 PROC_LOCK(p1);
681 microuptime(&p2->p_stats->p_start);
682 PROC_SLOCK(p2);
683 p2->p_state = PRS_NORMAL;
684 PROC_SUNLOCK(p2);
685
686#ifdef KDTRACE_HOOKS
687 /*
688 * Tell the DTrace fasttrap provider about the new process
689 * if it has registered an interest. We have to do this only after
690 * p_state is PRS_NORMAL since the fasttrap module will use pfind()
691 * later on.
692 */
693 if (dtrace_fasttrap_fork)
694 dtrace_fasttrap_fork(p1, p2);
695#endif
696 if ((p1->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED |
697 P_FOLLOWFORK)) {
698 /*
699 * Arrange for debugger to receive the fork event.
700 *
701 * We can report PL_FLAG_FORKED regardless of
702 * P_FOLLOWFORK settings, but it does not make a sense
703 * for runaway child.
704 */
705 td->td_dbgflags |= TDB_FORK;
706 td->td_dbg_forked = p2->p_pid;
707 td2->td_dbgflags |= TDB_STOPATFORK;
708 _PHOLD(p2);
709 p2_held = 1;
710 }
711 if (flags & RFPPWAIT) {
712 td->td_pflags |= TDP_RFPPWAIT;
713 td->td_rfppwait_p = p2;
714 }
715 PROC_UNLOCK(p2);
716 if ((flags & RFSTOPPED) == 0) {
717 /*
718 * If RFSTOPPED not requested, make child runnable and
719 * add to run queue.
720 */
721 thread_lock(td2);
722 TD_SET_CAN_RUN(td2);
723 sched_add(td2, SRQ_BORING);
724 thread_unlock(td2);
725 }
726
727 /*
728 * Now can be swapped.
729 */
730 _PRELE(p1);
731 PROC_UNLOCK(p1);
732
733 /*
734 * Tell any interested parties about the new process.
735 */
736 knote_fork(&p1->p_klist, p2->p_pid);
737 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
738
739 /*
740 * Wait until debugger is attached to child.
741 */
742 PROC_LOCK(p2);
743 while ((td2->td_dbgflags & TDB_STOPATFORK) != 0)
744 cv_wait(&p2->p_dbgwait, &p2->p_mtx);
745 if (p2_held)
746 _PRELE(p2);
747 PROC_UNLOCK(p2);
748}
749
750int
751fork1(struct thread *td, int flags, int pages, struct proc **procp,
752 int *procdescp, int pdflags)
753{
754 struct proc *p1;
755 struct proc *newproc;
756 int ok;
757 struct thread *td2;
758 struct vmspace *vm2;
759 vm_ooffset_t mem_charged;
760 int error;
761 static int curfail;
762 static struct timeval lastfail;
763#ifdef PROCDESC
764 struct file *fp_procdesc = NULL;
765#endif
766
767 /* Check for the undefined or unimplemented flags. */
768 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
769 return (EINVAL);
770
771 /* Signal value requires RFTSIGZMB. */
772 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
773 return (EINVAL);
774
775 /* Can't copy and clear. */
776 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
777 return (EINVAL);
778
779 /* Check the validity of the signal number. */
780 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
781 return (EINVAL);
782
783#ifdef PROCDESC
784 if ((flags & RFPROCDESC) != 0) {
785 /* Can't not create a process yet get a process descriptor. */
786 if ((flags & RFPROC) == 0)
787 return (EINVAL);
788
789 /* Must provide a place to put a procdesc if creating one. */
790 if (procdescp == NULL)
791 return (EINVAL);
792 }
793#endif
794
795 p1 = td->td_proc;
796
797 /*
798 * Here we don't create a new process, but we divorce
799 * certain parts of a process from itself.
800 */
801 if ((flags & RFPROC) == 0) {
802 *procp = NULL;
803 return (fork_norfproc(td, flags));
804 }
805
806#ifdef PROCDESC
807 /*
808 * If required, create a process descriptor in the parent first; we
809 * will abandon it if something goes wrong. We don't finit() until
810 * later.
811 */
812 if (flags & RFPROCDESC) {
813 error = falloc(td, &fp_procdesc, procdescp, 0);
814 if (error != 0)
815 return (error);
816 }
817#endif
818
819 mem_charged = 0;
820 vm2 = NULL;
821 if (pages == 0)
822 pages = KSTACK_PAGES;
823 /* Allocate new proc. */
824 newproc = uma_zalloc(proc_zone, M_WAITOK);
825 td2 = FIRST_THREAD_IN_PROC(newproc);
826 if (td2 == NULL) {
827 td2 = thread_alloc(pages);
828 if (td2 == NULL) {
829 error = ENOMEM;
830 goto fail1;
831 }
832 proc_linkup(newproc, td2);
833 } else {
834 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
835 if (td2->td_kstack != 0)
836 vm_thread_dispose(td2);
837 if (!thread_alloc_stack(td2, pages)) {
838 error = ENOMEM;
839 goto fail1;
840 }
841 }
842 }
843
844 if ((flags & RFMEM) == 0) {
845 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
846 if (vm2 == NULL) {
847 error = ENOMEM;
848 goto fail1;
849 }
850 if (!swap_reserve(mem_charged)) {
851 /*
852 * The swap reservation failed. The accounting
853 * from the entries of the copied vm2 will be
854 * substracted in vmspace_free(), so force the
855 * reservation there.
856 */
857 swap_reserve_force(mem_charged);
858 error = ENOMEM;
859 goto fail1;
860 }
861 } else
862 vm2 = NULL;
863
864 /*
865 * XXX: This is ugly; when we copy resource usage, we need to bump
866 * per-cred resource counters.
867 */
868 newproc->p_ucred = p1->p_ucred;
869
870 /*
871 * Initialize resource accounting for the child process.
872 */
873 error = racct_proc_fork(p1, newproc);
874 if (error != 0) {
875 error = EAGAIN;
876 goto fail1;
877 }
878
879#ifdef MAC
880 mac_proc_init(newproc);
881#endif
882 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
883 STAILQ_INIT(&newproc->p_ktr);
884
885 /* We have to lock the process tree while we look for a pid. */
886 sx_slock(&proctree_lock);
887
888 /*
889 * Although process entries are dynamically created, we still keep
890 * a global limit on the maximum number we will create. Don't allow
891 * a nonprivileged user to use the last ten processes; don't let root
892 * exceed the limit. The variable nprocs is the current number of
893 * processes, maxproc is the limit.
894 */
895 sx_xlock(&allproc_lock);
896 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
897 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
898 error = EAGAIN;
899 goto fail;
900 }
901
902 /*
903 * Increment the count of procs running with this uid. Don't allow
904 * a nonprivileged user to exceed their current limit.
905 *
906 * XXXRW: Can we avoid privilege here if it's not needed?
907 */
908 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
909 if (error == 0)
910 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
911 else {
912 PROC_LOCK(p1);
913 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
914 lim_cur(p1, RLIMIT_NPROC));
915 PROC_UNLOCK(p1);
916 }
917 if (ok) {
918 do_fork(td, flags, newproc, td2, vm2, pdflags);
919
920 /*
921 * Return child proc pointer to parent.
922 */
923 *procp = newproc;
924#ifdef PROCDESC
925 if (flags & RFPROCDESC)
926 procdesc_finit(newproc->p_procdesc, fp_procdesc);
927#endif
928 racct_proc_fork_done(newproc);
929 return (0);
930 }
931
932 error = EAGAIN;
933fail:
934 sx_sunlock(&proctree_lock);
935 if (ppsratecheck(&lastfail, &curfail, 1))
936 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
937 td->td_ucred->cr_ruid);
938 sx_xunlock(&allproc_lock);
939#ifdef MAC
940 mac_proc_destroy(newproc);
941#endif
942fail1:
943 racct_proc_exit(newproc);
942 racct_proc_exit(newproc);
943fail1:
944 if (vm2 != NULL)
945 vmspace_free(vm2);
946 uma_zfree(proc_zone, newproc);
947#ifdef PROCDESC
948 if (((flags & RFPROCDESC) != 0) && (fp_procdesc != NULL))
949 fdrop(fp_procdesc, td);
950#endif
951 pause("fork", hz / 2);
952 return (error);
953}
954
955/*
956 * Handle the return of a child process from fork1(). This function
957 * is called from the MD fork_trampoline() entry point.
958 */
959void
960fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
961 struct trapframe *frame)
962{
963 struct proc *p;
964 struct thread *td;
965 struct thread *dtd;
966
967 td = curthread;
968 p = td->td_proc;
969 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
970
971 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
972 td, td->td_sched, p->p_pid, td->td_name);
973
974 sched_fork_exit(td);
975 /*
976 * Processes normally resume in mi_switch() after being
977 * cpu_switch()'ed to, but when children start up they arrive here
978 * instead, so we must do much the same things as mi_switch() would.
979 */
980 if ((dtd = PCPU_GET(deadthread))) {
981 PCPU_SET(deadthread, NULL);
982 thread_stash(dtd);
983 }
984 thread_unlock(td);
985
986 /*
987 * cpu_set_fork_handler intercepts this function call to
988 * have this call a non-return function to stay in kernel mode.
989 * initproc has its own fork handler, but it does return.
990 */
991 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
992 callout(arg, frame);
993
994 /*
995 * Check if a kernel thread misbehaved and returned from its main
996 * function.
997 */
998 if (p->p_flag & P_KTHREAD) {
999 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1000 td->td_name, p->p_pid);
1001 kproc_exit(0);
1002 }
1003 mtx_assert(&Giant, MA_NOTOWNED);
1004
1005 if (p->p_sysent->sv_schedtail != NULL)
1006 (p->p_sysent->sv_schedtail)(td);
1007}
1008
1009/*
1010 * Simplified back end of syscall(), used when returning from fork()
1011 * directly into user mode. Giant is not held on entry, and must not
1012 * be held on return. This function is passed in to fork_exit() as the
1013 * first parameter and is called when returning to a new userland process.
1014 */
1015void
1016fork_return(struct thread *td, struct trapframe *frame)
1017{
1018 struct proc *p, *dbg;
1019
1020 if (td->td_dbgflags & TDB_STOPATFORK) {
1021 p = td->td_proc;
1022 sx_xlock(&proctree_lock);
1023 PROC_LOCK(p);
1024 if ((p->p_pptr->p_flag & (P_TRACED | P_FOLLOWFORK)) ==
1025 (P_TRACED | P_FOLLOWFORK)) {
1026 /*
1027 * If debugger still wants auto-attach for the
1028 * parent's children, do it now.
1029 */
1030 dbg = p->p_pptr->p_pptr;
1031 p->p_flag |= P_TRACED;
1032 p->p_oppid = p->p_pptr->p_pid;
1033 proc_reparent(p, dbg);
1034 sx_xunlock(&proctree_lock);
1035 td->td_dbgflags |= TDB_CHILD;
1036 ptracestop(td, SIGSTOP);
1037 td->td_dbgflags &= ~TDB_CHILD;
1038 } else {
1039 /*
1040 * ... otherwise clear the request.
1041 */
1042 sx_xunlock(&proctree_lock);
1043 td->td_dbgflags &= ~TDB_STOPATFORK;
1044 cv_broadcast(&p->p_dbgwait);
1045 }
1046 PROC_UNLOCK(p);
1047 }
1048
1049 userret(td, frame);
1050
1051#ifdef KTRACE
1052 if (KTRPOINT(td, KTR_SYSRET))
1053 ktrsysret(SYS_fork, 0, 0);
1054#endif
1055 mtx_assert(&Giant, MA_NOTOWNED);
1056}
944 if (vm2 != NULL)
945 vmspace_free(vm2);
946 uma_zfree(proc_zone, newproc);
947#ifdef PROCDESC
948 if (((flags & RFPROCDESC) != 0) && (fp_procdesc != NULL))
949 fdrop(fp_procdesc, td);
950#endif
951 pause("fork", hz / 2);
952 return (error);
953}
954
955/*
956 * Handle the return of a child process from fork1(). This function
957 * is called from the MD fork_trampoline() entry point.
958 */
959void
960fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
961 struct trapframe *frame)
962{
963 struct proc *p;
964 struct thread *td;
965 struct thread *dtd;
966
967 td = curthread;
968 p = td->td_proc;
969 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
970
971 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
972 td, td->td_sched, p->p_pid, td->td_name);
973
974 sched_fork_exit(td);
975 /*
976 * Processes normally resume in mi_switch() after being
977 * cpu_switch()'ed to, but when children start up they arrive here
978 * instead, so we must do much the same things as mi_switch() would.
979 */
980 if ((dtd = PCPU_GET(deadthread))) {
981 PCPU_SET(deadthread, NULL);
982 thread_stash(dtd);
983 }
984 thread_unlock(td);
985
986 /*
987 * cpu_set_fork_handler intercepts this function call to
988 * have this call a non-return function to stay in kernel mode.
989 * initproc has its own fork handler, but it does return.
990 */
991 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
992 callout(arg, frame);
993
994 /*
995 * Check if a kernel thread misbehaved and returned from its main
996 * function.
997 */
998 if (p->p_flag & P_KTHREAD) {
999 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1000 td->td_name, p->p_pid);
1001 kproc_exit(0);
1002 }
1003 mtx_assert(&Giant, MA_NOTOWNED);
1004
1005 if (p->p_sysent->sv_schedtail != NULL)
1006 (p->p_sysent->sv_schedtail)(td);
1007}
1008
1009/*
1010 * Simplified back end of syscall(), used when returning from fork()
1011 * directly into user mode. Giant is not held on entry, and must not
1012 * be held on return. This function is passed in to fork_exit() as the
1013 * first parameter and is called when returning to a new userland process.
1014 */
1015void
1016fork_return(struct thread *td, struct trapframe *frame)
1017{
1018 struct proc *p, *dbg;
1019
1020 if (td->td_dbgflags & TDB_STOPATFORK) {
1021 p = td->td_proc;
1022 sx_xlock(&proctree_lock);
1023 PROC_LOCK(p);
1024 if ((p->p_pptr->p_flag & (P_TRACED | P_FOLLOWFORK)) ==
1025 (P_TRACED | P_FOLLOWFORK)) {
1026 /*
1027 * If debugger still wants auto-attach for the
1028 * parent's children, do it now.
1029 */
1030 dbg = p->p_pptr->p_pptr;
1031 p->p_flag |= P_TRACED;
1032 p->p_oppid = p->p_pptr->p_pid;
1033 proc_reparent(p, dbg);
1034 sx_xunlock(&proctree_lock);
1035 td->td_dbgflags |= TDB_CHILD;
1036 ptracestop(td, SIGSTOP);
1037 td->td_dbgflags &= ~TDB_CHILD;
1038 } else {
1039 /*
1040 * ... otherwise clear the request.
1041 */
1042 sx_xunlock(&proctree_lock);
1043 td->td_dbgflags &= ~TDB_STOPATFORK;
1044 cv_broadcast(&p->p_dbgwait);
1045 }
1046 PROC_UNLOCK(p);
1047 }
1048
1049 userret(td, frame);
1050
1051#ifdef KTRACE
1052 if (KTRPOINT(td, KTR_SYSRET))
1053 ktrsysret(SYS_fork, 0, 0);
1054#endif
1055 mtx_assert(&Giant, MA_NOTOWNED);
1056}