Deleted Added
full compact
kern_fork.c (211616) kern_fork.c (212357)
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_fork.c 211616 2010-08-22 11:18:57Z rpaulo $");
38__FBSDID("$FreeBSD: head/sys/kern/kern_fork.c 212357 2010-09-09 09:58:05Z rpaulo $");
39
40#include "opt_kdtrace.h"
41#include "opt_ktrace.h"
42#include "opt_kstack_pages.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/eventhandler.h>
48#include <sys/filedesc.h>
49#include <sys/jail.h>
50#include <sys/kernel.h>
51#include <sys/kthread.h>
52#include <sys/sysctl.h>
53#include <sys/lock.h>
54#include <sys/malloc.h>
55#include <sys/mutex.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/resourcevar.h>
60#include <sys/sched.h>
61#include <sys/syscall.h>
62#include <sys/vmmeter.h>
63#include <sys/vnode.h>
64#include <sys/acct.h>
65#include <sys/ktr.h>
66#include <sys/ktrace.h>
67#include <sys/unistd.h>
68#include <sys/sdt.h>
69#include <sys/sx.h>
70#include <sys/signalvar.h>
71
72#include <security/audit/audit.h>
73#include <security/mac/mac_framework.h>
74
75#include <vm/vm.h>
76#include <vm/pmap.h>
77#include <vm/vm_map.h>
78#include <vm/vm_extern.h>
79#include <vm/uma.h>
80
81#ifdef KDTRACE_HOOKS
82#include <sys/dtrace_bsd.h>
83dtrace_fork_func_t dtrace_fasttrap_fork;
84#endif
85
86SDT_PROVIDER_DECLARE(proc);
87SDT_PROBE_DEFINE(proc, kernel, , create, create);
88SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *");
89SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *");
90SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int");
91
92#ifndef _SYS_SYSPROTO_H_
93struct fork_args {
94 int dummy;
95};
96#endif
97
98/* ARGSUSED */
99int
100fork(td, uap)
101 struct thread *td;
102 struct fork_args *uap;
103{
104 int error;
105 struct proc *p2;
106
107 error = fork1(td, RFFDG | RFPROC, 0, &p2);
108 if (error == 0) {
109 td->td_retval[0] = p2->p_pid;
110 td->td_retval[1] = 0;
111 }
112 return (error);
113}
114
115/* ARGSUSED */
116int
117vfork(td, uap)
118 struct thread *td;
119 struct vfork_args *uap;
120{
121 int error, flags;
122 struct proc *p2;
123
124#ifdef XEN
125 flags = RFFDG | RFPROC; /* validate that this is still an issue */
126#else
127 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
128#endif
129 error = fork1(td, flags, 0, &p2);
130 if (error == 0) {
131 td->td_retval[0] = p2->p_pid;
132 td->td_retval[1] = 0;
133 }
134 return (error);
135}
136
137int
138rfork(td, uap)
139 struct thread *td;
140 struct rfork_args *uap;
141{
142 struct proc *p2;
143 int error;
144
145 /* Don't allow kernel-only flags. */
146 if ((uap->flags & RFKERNELONLY) != 0)
147 return (EINVAL);
148
149 AUDIT_ARG_FFLAGS(uap->flags);
150 error = fork1(td, uap->flags, 0, &p2);
151 if (error == 0) {
152 td->td_retval[0] = p2 ? p2->p_pid : 0;
153 td->td_retval[1] = 0;
154 }
155 return (error);
156}
157
158int nprocs = 1; /* process 0 */
159int lastpid = 0;
160SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
161 "Last used PID");
162
163/*
164 * Random component to lastpid generation. We mix in a random factor to make
165 * it a little harder to predict. We sanity check the modulus value to avoid
166 * doing it in critical paths. Don't let it be too small or we pointlessly
167 * waste randomness entropy, and don't let it be impossibly large. Using a
168 * modulus that is too big causes a LOT more process table scans and slows
169 * down fork processing as the pidchecked caching is defeated.
170 */
171static int randompid = 0;
172
173static int
174sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
175{
176 int error, pid;
177
178 error = sysctl_wire_old_buffer(req, sizeof(int));
179 if (error != 0)
180 return(error);
181 sx_xlock(&allproc_lock);
182 pid = randompid;
183 error = sysctl_handle_int(oidp, &pid, 0, req);
184 if (error == 0 && req->newptr != NULL) {
185 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
186 pid = PID_MAX - 100;
187 else if (pid < 2) /* NOP */
188 pid = 0;
189 else if (pid < 100) /* Make it reasonable */
190 pid = 100;
191 randompid = pid;
192 }
193 sx_xunlock(&allproc_lock);
194 return (error);
195}
196
197SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
198 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
199
200int
201fork1(td, flags, pages, procp)
202 struct thread *td;
203 int flags;
204 int pages;
205 struct proc **procp;
206{
207 struct proc *p1, *p2, *pptr;
208 struct proc *newproc;
209 int ok, trypid;
210 static int curfail, pidchecked = 0;
211 static struct timeval lastfail;
212 struct filedesc *fd;
213 struct filedesc_to_leader *fdtol;
214 struct thread *td2;
215 struct sigacts *newsigacts;
216 struct vmspace *vm2;
217 vm_ooffset_t mem_charged;
218 int error;
219
220 /* Can't copy and clear. */
221 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
222 return (EINVAL);
223
224 p1 = td->td_proc;
225
226 /*
227 * Here we don't create a new process, but we divorce
228 * certain parts of a process from itself.
229 */
230 if ((flags & RFPROC) == 0) {
231 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
232 (flags & (RFCFDG | RFFDG))) {
233 PROC_LOCK(p1);
234 if (thread_single(SINGLE_BOUNDARY)) {
235 PROC_UNLOCK(p1);
236 return (ERESTART);
237 }
238 PROC_UNLOCK(p1);
239 }
240
241 error = vm_forkproc(td, NULL, NULL, NULL, flags);
242 if (error)
243 goto norfproc_fail;
244
245 /*
246 * Close all file descriptors.
247 */
248 if (flags & RFCFDG) {
249 struct filedesc *fdtmp;
250 fdtmp = fdinit(td->td_proc->p_fd);
251 fdfree(td);
252 p1->p_fd = fdtmp;
253 }
254
255 /*
256 * Unshare file descriptors (from parent).
257 */
258 if (flags & RFFDG)
259 fdunshare(p1, td);
260
261norfproc_fail:
262 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
263 (flags & (RFCFDG | RFFDG))) {
264 PROC_LOCK(p1);
265 thread_single_end();
266 PROC_UNLOCK(p1);
267 }
268 *procp = NULL;
269 return (error);
270 }
271
272 /*
273 * XXX
274 * We did have single-threading code here
275 * however it proved un-needed and caused problems
276 */
277
278 mem_charged = 0;
279 vm2 = NULL;
280 if (pages == 0)
281 pages = KSTACK_PAGES;
282 /* Allocate new proc. */
283 newproc = uma_zalloc(proc_zone, M_WAITOK);
284 td2 = FIRST_THREAD_IN_PROC(newproc);
285 if (td2 == NULL) {
286 td2 = thread_alloc(pages);
287 if (td2 == NULL) {
288 error = ENOMEM;
289 goto fail1;
290 }
291 proc_linkup(newproc, td2);
292 } else {
293 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
294 if (td2->td_kstack != 0)
295 vm_thread_dispose(td2);
296 if (!thread_alloc_stack(td2, pages)) {
297 error = ENOMEM;
298 goto fail1;
299 }
300 }
301 }
302
303 if ((flags & RFMEM) == 0) {
304 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
305 if (vm2 == NULL) {
306 error = ENOMEM;
307 goto fail1;
308 }
309 if (!swap_reserve(mem_charged)) {
310 /*
311 * The swap reservation failed. The accounting
312 * from the entries of the copied vm2 will be
313 * substracted in vmspace_free(), so force the
314 * reservation there.
315 */
316 swap_reserve_force(mem_charged);
317 error = ENOMEM;
318 goto fail1;
319 }
320 } else
321 vm2 = NULL;
322#ifdef MAC
323 mac_proc_init(newproc);
324#endif
325 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
326 STAILQ_INIT(&newproc->p_ktr);
327
328 /* We have to lock the process tree while we look for a pid. */
329 sx_slock(&proctree_lock);
330
331 /*
332 * Although process entries are dynamically created, we still keep
333 * a global limit on the maximum number we will create. Don't allow
334 * a nonprivileged user to use the last ten processes; don't let root
335 * exceed the limit. The variable nprocs is the current number of
336 * processes, maxproc is the limit.
337 */
338 sx_xlock(&allproc_lock);
339 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
340 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
341 error = EAGAIN;
342 goto fail;
343 }
344
345 /*
346 * Increment the count of procs running with this uid. Don't allow
347 * a nonprivileged user to exceed their current limit.
348 *
349 * XXXRW: Can we avoid privilege here if it's not needed?
350 */
351 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
352 if (error == 0)
353 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
354 else {
355 PROC_LOCK(p1);
356 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
357 lim_cur(p1, RLIMIT_NPROC));
358 PROC_UNLOCK(p1);
359 }
360 if (!ok) {
361 error = EAGAIN;
362 goto fail;
363 }
364
365 /*
366 * Increment the nprocs resource before blocking can occur. There
367 * are hard-limits as to the number of processes that can run.
368 */
369 nprocs++;
370
371 /*
372 * Find an unused process ID. We remember a range of unused IDs
373 * ready to use (from lastpid+1 through pidchecked-1).
374 *
375 * If RFHIGHPID is set (used during system boot), do not allocate
376 * low-numbered pids.
377 */
378 trypid = lastpid + 1;
379 if (flags & RFHIGHPID) {
380 if (trypid < 10)
381 trypid = 10;
382 } else {
383 if (randompid)
384 trypid += arc4random() % randompid;
385 }
386retry:
387 /*
388 * If the process ID prototype has wrapped around,
389 * restart somewhat above 0, as the low-numbered procs
390 * tend to include daemons that don't exit.
391 */
392 if (trypid >= PID_MAX) {
393 trypid = trypid % PID_MAX;
394 if (trypid < 100)
395 trypid += 100;
396 pidchecked = 0;
397 }
398 if (trypid >= pidchecked) {
399 int doingzomb = 0;
400
401 pidchecked = PID_MAX;
402 /*
403 * Scan the active and zombie procs to check whether this pid
404 * is in use. Remember the lowest pid that's greater
405 * than trypid, so we can avoid checking for a while.
406 */
407 p2 = LIST_FIRST(&allproc);
408again:
409 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
410 while (p2->p_pid == trypid ||
411 (p2->p_pgrp != NULL &&
412 (p2->p_pgrp->pg_id == trypid ||
413 (p2->p_session != NULL &&
414 p2->p_session->s_sid == trypid)))) {
415 trypid++;
416 if (trypid >= pidchecked)
417 goto retry;
418 }
419 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
420 pidchecked = p2->p_pid;
421 if (p2->p_pgrp != NULL) {
422 if (p2->p_pgrp->pg_id > trypid &&
423 pidchecked > p2->p_pgrp->pg_id)
424 pidchecked = p2->p_pgrp->pg_id;
425 if (p2->p_session != NULL &&
426 p2->p_session->s_sid > trypid &&
427 pidchecked > p2->p_session->s_sid)
428 pidchecked = p2->p_session->s_sid;
429 }
430 }
431 if (!doingzomb) {
432 doingzomb = 1;
433 p2 = LIST_FIRST(&zombproc);
434 goto again;
435 }
436 }
437 sx_sunlock(&proctree_lock);
438
439 /*
440 * RFHIGHPID does not mess with the lastpid counter during boot.
441 */
442 if (flags & RFHIGHPID)
443 pidchecked = 0;
444 else
445 lastpid = trypid;
446
447 p2 = newproc;
448 p2->p_state = PRS_NEW; /* protect against others */
449 p2->p_pid = trypid;
450 /*
451 * Allow the scheduler to initialize the child.
452 */
453 thread_lock(td);
454 sched_fork(td, td2);
455 thread_unlock(td);
456 AUDIT_ARG_PID(p2->p_pid);
457 LIST_INSERT_HEAD(&allproc, p2, p_list);
458 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
459
460 PROC_LOCK(p2);
461 PROC_LOCK(p1);
462
463 sx_xunlock(&allproc_lock);
464
465 bcopy(&p1->p_startcopy, &p2->p_startcopy,
466 __rangeof(struct proc, p_startcopy, p_endcopy));
467 pargs_hold(p2->p_args);
468 PROC_UNLOCK(p1);
469
470 bzero(&p2->p_startzero,
471 __rangeof(struct proc, p_startzero, p_endzero));
472
473 p2->p_ucred = crhold(td->td_ucred);
474
475 /* Tell the prison that we exist. */
476 prison_proc_hold(p2->p_ucred->cr_prison);
477
478 PROC_UNLOCK(p2);
479
480 /*
481 * Malloc things while we don't hold any locks.
482 */
483 if (flags & RFSIGSHARE)
484 newsigacts = NULL;
485 else
486 newsigacts = sigacts_alloc();
487
488 /*
489 * Copy filedesc.
490 */
491 if (flags & RFCFDG) {
492 fd = fdinit(p1->p_fd);
493 fdtol = NULL;
494 } else if (flags & RFFDG) {
495 fd = fdcopy(p1->p_fd);
496 fdtol = NULL;
497 } else {
498 fd = fdshare(p1->p_fd);
499 if (p1->p_fdtol == NULL)
500 p1->p_fdtol =
501 filedesc_to_leader_alloc(NULL,
502 NULL,
503 p1->p_leader);
504 if ((flags & RFTHREAD) != 0) {
505 /*
506 * Shared file descriptor table and
507 * shared process leaders.
508 */
509 fdtol = p1->p_fdtol;
510 FILEDESC_XLOCK(p1->p_fd);
511 fdtol->fdl_refcount++;
512 FILEDESC_XUNLOCK(p1->p_fd);
513 } else {
514 /*
515 * Shared file descriptor table, and
516 * different process leaders
517 */
518 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
519 p1->p_fd,
520 p2);
521 }
522 }
523 /*
524 * Make a proc table entry for the new process.
525 * Start by zeroing the section of proc that is zero-initialized,
526 * then copy the section that is copied directly from the parent.
527 */
528
529 PROC_LOCK(p2);
530 PROC_LOCK(p1);
531
532 bzero(&td2->td_startzero,
533 __rangeof(struct thread, td_startzero, td_endzero));
534
535 bcopy(&td->td_startcopy, &td2->td_startcopy,
536 __rangeof(struct thread, td_startcopy, td_endcopy));
537
538 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
539 td2->td_sigstk = td->td_sigstk;
540 td2->td_sigmask = td->td_sigmask;
541 td2->td_flags = TDF_INMEM;
542
543#ifdef VIMAGE
544 td2->td_vnet = NULL;
545 td2->td_vnet_lpush = NULL;
546#endif
547
548 /*
549 * Duplicate sub-structures as needed.
550 * Increase reference counts on shared objects.
551 */
552 p2->p_flag = P_INMEM;
553 p2->p_swtick = ticks;
554 if (p1->p_flag & P_PROFIL)
555 startprofclock(p2);
556 td2->td_ucred = crhold(p2->p_ucred);
557
558 if (flags & RFSIGSHARE) {
559 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
560 } else {
561 sigacts_copy(newsigacts, p1->p_sigacts);
562 p2->p_sigacts = newsigacts;
563 }
564 if (flags & RFLINUXTHPN)
565 p2->p_sigparent = SIGUSR1;
566 else
567 p2->p_sigparent = SIGCHLD;
568
569 p2->p_textvp = p1->p_textvp;
570 p2->p_fd = fd;
571 p2->p_fdtol = fdtol;
572
573 /*
574 * p_limit is copy-on-write. Bump its refcount.
575 */
576 lim_fork(p1, p2);
577
578 pstats_fork(p1->p_stats, p2->p_stats);
579
580 PROC_UNLOCK(p1);
581 PROC_UNLOCK(p2);
582
583 /* Bump references to the text vnode (for procfs) */
584 if (p2->p_textvp)
585 vref(p2->p_textvp);
586
587 /*
588 * Set up linkage for kernel based threading.
589 */
590 if ((flags & RFTHREAD) != 0) {
591 mtx_lock(&ppeers_lock);
592 p2->p_peers = p1->p_peers;
593 p1->p_peers = p2;
594 p2->p_leader = p1->p_leader;
595 mtx_unlock(&ppeers_lock);
596 PROC_LOCK(p1->p_leader);
597 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
598 PROC_UNLOCK(p1->p_leader);
599 /*
600 * The task leader is exiting, so process p1 is
601 * going to be killed shortly. Since p1 obviously
602 * isn't dead yet, we know that the leader is either
603 * sending SIGKILL's to all the processes in this
604 * task or is sleeping waiting for all the peers to
605 * exit. We let p1 complete the fork, but we need
606 * to go ahead and kill the new process p2 since
607 * the task leader may not get a chance to send
608 * SIGKILL to it. We leave it on the list so that
609 * the task leader will wait for this new process
610 * to commit suicide.
611 */
612 PROC_LOCK(p2);
613 psignal(p2, SIGKILL);
614 PROC_UNLOCK(p2);
615 } else
616 PROC_UNLOCK(p1->p_leader);
617 } else {
618 p2->p_peers = NULL;
619 p2->p_leader = p2;
620 }
621
622 sx_xlock(&proctree_lock);
623 PGRP_LOCK(p1->p_pgrp);
624 PROC_LOCK(p2);
625 PROC_LOCK(p1);
626
627 /*
628 * Preserve some more flags in subprocess. P_PROFIL has already
629 * been preserved.
630 */
631 p2->p_flag |= p1->p_flag & P_SUGID;
632 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
633 SESS_LOCK(p1->p_session);
634 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
635 p2->p_flag |= P_CONTROLT;
636 SESS_UNLOCK(p1->p_session);
637 if (flags & RFPPWAIT)
638 p2->p_flag |= P_PPWAIT;
639
640 p2->p_pgrp = p1->p_pgrp;
641 LIST_INSERT_AFTER(p1, p2, p_pglist);
642 PGRP_UNLOCK(p1->p_pgrp);
643 LIST_INIT(&p2->p_children);
644
645 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
646
647#ifdef KTRACE
648 /*
649 * Copy traceflag and tracefile if enabled.
650 */
651 mtx_lock(&ktrace_mtx);
652 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
653 if (p1->p_traceflag & KTRFAC_INHERIT) {
654 p2->p_traceflag = p1->p_traceflag;
655 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
656 VREF(p2->p_tracevp);
657 KASSERT(p1->p_tracecred != NULL,
658 ("ktrace vnode with no cred"));
659 p2->p_tracecred = crhold(p1->p_tracecred);
660 }
661 }
662 mtx_unlock(&ktrace_mtx);
663#endif
664
665 /*
666 * If PF_FORK is set, the child process inherits the
667 * procfs ioctl flags from its parent.
668 */
669 if (p1->p_pfsflags & PF_FORK) {
670 p2->p_stops = p1->p_stops;
671 p2->p_pfsflags = p1->p_pfsflags;
672 }
673
39
40#include "opt_kdtrace.h"
41#include "opt_ktrace.h"
42#include "opt_kstack_pages.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/eventhandler.h>
48#include <sys/filedesc.h>
49#include <sys/jail.h>
50#include <sys/kernel.h>
51#include <sys/kthread.h>
52#include <sys/sysctl.h>
53#include <sys/lock.h>
54#include <sys/malloc.h>
55#include <sys/mutex.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/resourcevar.h>
60#include <sys/sched.h>
61#include <sys/syscall.h>
62#include <sys/vmmeter.h>
63#include <sys/vnode.h>
64#include <sys/acct.h>
65#include <sys/ktr.h>
66#include <sys/ktrace.h>
67#include <sys/unistd.h>
68#include <sys/sdt.h>
69#include <sys/sx.h>
70#include <sys/signalvar.h>
71
72#include <security/audit/audit.h>
73#include <security/mac/mac_framework.h>
74
75#include <vm/vm.h>
76#include <vm/pmap.h>
77#include <vm/vm_map.h>
78#include <vm/vm_extern.h>
79#include <vm/uma.h>
80
81#ifdef KDTRACE_HOOKS
82#include <sys/dtrace_bsd.h>
83dtrace_fork_func_t dtrace_fasttrap_fork;
84#endif
85
86SDT_PROVIDER_DECLARE(proc);
87SDT_PROBE_DEFINE(proc, kernel, , create, create);
88SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *");
89SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *");
90SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int");
91
92#ifndef _SYS_SYSPROTO_H_
93struct fork_args {
94 int dummy;
95};
96#endif
97
98/* ARGSUSED */
99int
100fork(td, uap)
101 struct thread *td;
102 struct fork_args *uap;
103{
104 int error;
105 struct proc *p2;
106
107 error = fork1(td, RFFDG | RFPROC, 0, &p2);
108 if (error == 0) {
109 td->td_retval[0] = p2->p_pid;
110 td->td_retval[1] = 0;
111 }
112 return (error);
113}
114
115/* ARGSUSED */
116int
117vfork(td, uap)
118 struct thread *td;
119 struct vfork_args *uap;
120{
121 int error, flags;
122 struct proc *p2;
123
124#ifdef XEN
125 flags = RFFDG | RFPROC; /* validate that this is still an issue */
126#else
127 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
128#endif
129 error = fork1(td, flags, 0, &p2);
130 if (error == 0) {
131 td->td_retval[0] = p2->p_pid;
132 td->td_retval[1] = 0;
133 }
134 return (error);
135}
136
137int
138rfork(td, uap)
139 struct thread *td;
140 struct rfork_args *uap;
141{
142 struct proc *p2;
143 int error;
144
145 /* Don't allow kernel-only flags. */
146 if ((uap->flags & RFKERNELONLY) != 0)
147 return (EINVAL);
148
149 AUDIT_ARG_FFLAGS(uap->flags);
150 error = fork1(td, uap->flags, 0, &p2);
151 if (error == 0) {
152 td->td_retval[0] = p2 ? p2->p_pid : 0;
153 td->td_retval[1] = 0;
154 }
155 return (error);
156}
157
158int nprocs = 1; /* process 0 */
159int lastpid = 0;
160SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
161 "Last used PID");
162
163/*
164 * Random component to lastpid generation. We mix in a random factor to make
165 * it a little harder to predict. We sanity check the modulus value to avoid
166 * doing it in critical paths. Don't let it be too small or we pointlessly
167 * waste randomness entropy, and don't let it be impossibly large. Using a
168 * modulus that is too big causes a LOT more process table scans and slows
169 * down fork processing as the pidchecked caching is defeated.
170 */
171static int randompid = 0;
172
173static int
174sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
175{
176 int error, pid;
177
178 error = sysctl_wire_old_buffer(req, sizeof(int));
179 if (error != 0)
180 return(error);
181 sx_xlock(&allproc_lock);
182 pid = randompid;
183 error = sysctl_handle_int(oidp, &pid, 0, req);
184 if (error == 0 && req->newptr != NULL) {
185 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
186 pid = PID_MAX - 100;
187 else if (pid < 2) /* NOP */
188 pid = 0;
189 else if (pid < 100) /* Make it reasonable */
190 pid = 100;
191 randompid = pid;
192 }
193 sx_xunlock(&allproc_lock);
194 return (error);
195}
196
197SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
198 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
199
200int
201fork1(td, flags, pages, procp)
202 struct thread *td;
203 int flags;
204 int pages;
205 struct proc **procp;
206{
207 struct proc *p1, *p2, *pptr;
208 struct proc *newproc;
209 int ok, trypid;
210 static int curfail, pidchecked = 0;
211 static struct timeval lastfail;
212 struct filedesc *fd;
213 struct filedesc_to_leader *fdtol;
214 struct thread *td2;
215 struct sigacts *newsigacts;
216 struct vmspace *vm2;
217 vm_ooffset_t mem_charged;
218 int error;
219
220 /* Can't copy and clear. */
221 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
222 return (EINVAL);
223
224 p1 = td->td_proc;
225
226 /*
227 * Here we don't create a new process, but we divorce
228 * certain parts of a process from itself.
229 */
230 if ((flags & RFPROC) == 0) {
231 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
232 (flags & (RFCFDG | RFFDG))) {
233 PROC_LOCK(p1);
234 if (thread_single(SINGLE_BOUNDARY)) {
235 PROC_UNLOCK(p1);
236 return (ERESTART);
237 }
238 PROC_UNLOCK(p1);
239 }
240
241 error = vm_forkproc(td, NULL, NULL, NULL, flags);
242 if (error)
243 goto norfproc_fail;
244
245 /*
246 * Close all file descriptors.
247 */
248 if (flags & RFCFDG) {
249 struct filedesc *fdtmp;
250 fdtmp = fdinit(td->td_proc->p_fd);
251 fdfree(td);
252 p1->p_fd = fdtmp;
253 }
254
255 /*
256 * Unshare file descriptors (from parent).
257 */
258 if (flags & RFFDG)
259 fdunshare(p1, td);
260
261norfproc_fail:
262 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
263 (flags & (RFCFDG | RFFDG))) {
264 PROC_LOCK(p1);
265 thread_single_end();
266 PROC_UNLOCK(p1);
267 }
268 *procp = NULL;
269 return (error);
270 }
271
272 /*
273 * XXX
274 * We did have single-threading code here
275 * however it proved un-needed and caused problems
276 */
277
278 mem_charged = 0;
279 vm2 = NULL;
280 if (pages == 0)
281 pages = KSTACK_PAGES;
282 /* Allocate new proc. */
283 newproc = uma_zalloc(proc_zone, M_WAITOK);
284 td2 = FIRST_THREAD_IN_PROC(newproc);
285 if (td2 == NULL) {
286 td2 = thread_alloc(pages);
287 if (td2 == NULL) {
288 error = ENOMEM;
289 goto fail1;
290 }
291 proc_linkup(newproc, td2);
292 } else {
293 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
294 if (td2->td_kstack != 0)
295 vm_thread_dispose(td2);
296 if (!thread_alloc_stack(td2, pages)) {
297 error = ENOMEM;
298 goto fail1;
299 }
300 }
301 }
302
303 if ((flags & RFMEM) == 0) {
304 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
305 if (vm2 == NULL) {
306 error = ENOMEM;
307 goto fail1;
308 }
309 if (!swap_reserve(mem_charged)) {
310 /*
311 * The swap reservation failed. The accounting
312 * from the entries of the copied vm2 will be
313 * substracted in vmspace_free(), so force the
314 * reservation there.
315 */
316 swap_reserve_force(mem_charged);
317 error = ENOMEM;
318 goto fail1;
319 }
320 } else
321 vm2 = NULL;
322#ifdef MAC
323 mac_proc_init(newproc);
324#endif
325 knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
326 STAILQ_INIT(&newproc->p_ktr);
327
328 /* We have to lock the process tree while we look for a pid. */
329 sx_slock(&proctree_lock);
330
331 /*
332 * Although process entries are dynamically created, we still keep
333 * a global limit on the maximum number we will create. Don't allow
334 * a nonprivileged user to use the last ten processes; don't let root
335 * exceed the limit. The variable nprocs is the current number of
336 * processes, maxproc is the limit.
337 */
338 sx_xlock(&allproc_lock);
339 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
340 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
341 error = EAGAIN;
342 goto fail;
343 }
344
345 /*
346 * Increment the count of procs running with this uid. Don't allow
347 * a nonprivileged user to exceed their current limit.
348 *
349 * XXXRW: Can we avoid privilege here if it's not needed?
350 */
351 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
352 if (error == 0)
353 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
354 else {
355 PROC_LOCK(p1);
356 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
357 lim_cur(p1, RLIMIT_NPROC));
358 PROC_UNLOCK(p1);
359 }
360 if (!ok) {
361 error = EAGAIN;
362 goto fail;
363 }
364
365 /*
366 * Increment the nprocs resource before blocking can occur. There
367 * are hard-limits as to the number of processes that can run.
368 */
369 nprocs++;
370
371 /*
372 * Find an unused process ID. We remember a range of unused IDs
373 * ready to use (from lastpid+1 through pidchecked-1).
374 *
375 * If RFHIGHPID is set (used during system boot), do not allocate
376 * low-numbered pids.
377 */
378 trypid = lastpid + 1;
379 if (flags & RFHIGHPID) {
380 if (trypid < 10)
381 trypid = 10;
382 } else {
383 if (randompid)
384 trypid += arc4random() % randompid;
385 }
386retry:
387 /*
388 * If the process ID prototype has wrapped around,
389 * restart somewhat above 0, as the low-numbered procs
390 * tend to include daemons that don't exit.
391 */
392 if (trypid >= PID_MAX) {
393 trypid = trypid % PID_MAX;
394 if (trypid < 100)
395 trypid += 100;
396 pidchecked = 0;
397 }
398 if (trypid >= pidchecked) {
399 int doingzomb = 0;
400
401 pidchecked = PID_MAX;
402 /*
403 * Scan the active and zombie procs to check whether this pid
404 * is in use. Remember the lowest pid that's greater
405 * than trypid, so we can avoid checking for a while.
406 */
407 p2 = LIST_FIRST(&allproc);
408again:
409 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
410 while (p2->p_pid == trypid ||
411 (p2->p_pgrp != NULL &&
412 (p2->p_pgrp->pg_id == trypid ||
413 (p2->p_session != NULL &&
414 p2->p_session->s_sid == trypid)))) {
415 trypid++;
416 if (trypid >= pidchecked)
417 goto retry;
418 }
419 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
420 pidchecked = p2->p_pid;
421 if (p2->p_pgrp != NULL) {
422 if (p2->p_pgrp->pg_id > trypid &&
423 pidchecked > p2->p_pgrp->pg_id)
424 pidchecked = p2->p_pgrp->pg_id;
425 if (p2->p_session != NULL &&
426 p2->p_session->s_sid > trypid &&
427 pidchecked > p2->p_session->s_sid)
428 pidchecked = p2->p_session->s_sid;
429 }
430 }
431 if (!doingzomb) {
432 doingzomb = 1;
433 p2 = LIST_FIRST(&zombproc);
434 goto again;
435 }
436 }
437 sx_sunlock(&proctree_lock);
438
439 /*
440 * RFHIGHPID does not mess with the lastpid counter during boot.
441 */
442 if (flags & RFHIGHPID)
443 pidchecked = 0;
444 else
445 lastpid = trypid;
446
447 p2 = newproc;
448 p2->p_state = PRS_NEW; /* protect against others */
449 p2->p_pid = trypid;
450 /*
451 * Allow the scheduler to initialize the child.
452 */
453 thread_lock(td);
454 sched_fork(td, td2);
455 thread_unlock(td);
456 AUDIT_ARG_PID(p2->p_pid);
457 LIST_INSERT_HEAD(&allproc, p2, p_list);
458 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
459
460 PROC_LOCK(p2);
461 PROC_LOCK(p1);
462
463 sx_xunlock(&allproc_lock);
464
465 bcopy(&p1->p_startcopy, &p2->p_startcopy,
466 __rangeof(struct proc, p_startcopy, p_endcopy));
467 pargs_hold(p2->p_args);
468 PROC_UNLOCK(p1);
469
470 bzero(&p2->p_startzero,
471 __rangeof(struct proc, p_startzero, p_endzero));
472
473 p2->p_ucred = crhold(td->td_ucred);
474
475 /* Tell the prison that we exist. */
476 prison_proc_hold(p2->p_ucred->cr_prison);
477
478 PROC_UNLOCK(p2);
479
480 /*
481 * Malloc things while we don't hold any locks.
482 */
483 if (flags & RFSIGSHARE)
484 newsigacts = NULL;
485 else
486 newsigacts = sigacts_alloc();
487
488 /*
489 * Copy filedesc.
490 */
491 if (flags & RFCFDG) {
492 fd = fdinit(p1->p_fd);
493 fdtol = NULL;
494 } else if (flags & RFFDG) {
495 fd = fdcopy(p1->p_fd);
496 fdtol = NULL;
497 } else {
498 fd = fdshare(p1->p_fd);
499 if (p1->p_fdtol == NULL)
500 p1->p_fdtol =
501 filedesc_to_leader_alloc(NULL,
502 NULL,
503 p1->p_leader);
504 if ((flags & RFTHREAD) != 0) {
505 /*
506 * Shared file descriptor table and
507 * shared process leaders.
508 */
509 fdtol = p1->p_fdtol;
510 FILEDESC_XLOCK(p1->p_fd);
511 fdtol->fdl_refcount++;
512 FILEDESC_XUNLOCK(p1->p_fd);
513 } else {
514 /*
515 * Shared file descriptor table, and
516 * different process leaders
517 */
518 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
519 p1->p_fd,
520 p2);
521 }
522 }
523 /*
524 * Make a proc table entry for the new process.
525 * Start by zeroing the section of proc that is zero-initialized,
526 * then copy the section that is copied directly from the parent.
527 */
528
529 PROC_LOCK(p2);
530 PROC_LOCK(p1);
531
532 bzero(&td2->td_startzero,
533 __rangeof(struct thread, td_startzero, td_endzero));
534
535 bcopy(&td->td_startcopy, &td2->td_startcopy,
536 __rangeof(struct thread, td_startcopy, td_endcopy));
537
538 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
539 td2->td_sigstk = td->td_sigstk;
540 td2->td_sigmask = td->td_sigmask;
541 td2->td_flags = TDF_INMEM;
542
543#ifdef VIMAGE
544 td2->td_vnet = NULL;
545 td2->td_vnet_lpush = NULL;
546#endif
547
548 /*
549 * Duplicate sub-structures as needed.
550 * Increase reference counts on shared objects.
551 */
552 p2->p_flag = P_INMEM;
553 p2->p_swtick = ticks;
554 if (p1->p_flag & P_PROFIL)
555 startprofclock(p2);
556 td2->td_ucred = crhold(p2->p_ucred);
557
558 if (flags & RFSIGSHARE) {
559 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
560 } else {
561 sigacts_copy(newsigacts, p1->p_sigacts);
562 p2->p_sigacts = newsigacts;
563 }
564 if (flags & RFLINUXTHPN)
565 p2->p_sigparent = SIGUSR1;
566 else
567 p2->p_sigparent = SIGCHLD;
568
569 p2->p_textvp = p1->p_textvp;
570 p2->p_fd = fd;
571 p2->p_fdtol = fdtol;
572
573 /*
574 * p_limit is copy-on-write. Bump its refcount.
575 */
576 lim_fork(p1, p2);
577
578 pstats_fork(p1->p_stats, p2->p_stats);
579
580 PROC_UNLOCK(p1);
581 PROC_UNLOCK(p2);
582
583 /* Bump references to the text vnode (for procfs) */
584 if (p2->p_textvp)
585 vref(p2->p_textvp);
586
587 /*
588 * Set up linkage for kernel based threading.
589 */
590 if ((flags & RFTHREAD) != 0) {
591 mtx_lock(&ppeers_lock);
592 p2->p_peers = p1->p_peers;
593 p1->p_peers = p2;
594 p2->p_leader = p1->p_leader;
595 mtx_unlock(&ppeers_lock);
596 PROC_LOCK(p1->p_leader);
597 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
598 PROC_UNLOCK(p1->p_leader);
599 /*
600 * The task leader is exiting, so process p1 is
601 * going to be killed shortly. Since p1 obviously
602 * isn't dead yet, we know that the leader is either
603 * sending SIGKILL's to all the processes in this
604 * task or is sleeping waiting for all the peers to
605 * exit. We let p1 complete the fork, but we need
606 * to go ahead and kill the new process p2 since
607 * the task leader may not get a chance to send
608 * SIGKILL to it. We leave it on the list so that
609 * the task leader will wait for this new process
610 * to commit suicide.
611 */
612 PROC_LOCK(p2);
613 psignal(p2, SIGKILL);
614 PROC_UNLOCK(p2);
615 } else
616 PROC_UNLOCK(p1->p_leader);
617 } else {
618 p2->p_peers = NULL;
619 p2->p_leader = p2;
620 }
621
622 sx_xlock(&proctree_lock);
623 PGRP_LOCK(p1->p_pgrp);
624 PROC_LOCK(p2);
625 PROC_LOCK(p1);
626
627 /*
628 * Preserve some more flags in subprocess. P_PROFIL has already
629 * been preserved.
630 */
631 p2->p_flag |= p1->p_flag & P_SUGID;
632 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
633 SESS_LOCK(p1->p_session);
634 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
635 p2->p_flag |= P_CONTROLT;
636 SESS_UNLOCK(p1->p_session);
637 if (flags & RFPPWAIT)
638 p2->p_flag |= P_PPWAIT;
639
640 p2->p_pgrp = p1->p_pgrp;
641 LIST_INSERT_AFTER(p1, p2, p_pglist);
642 PGRP_UNLOCK(p1->p_pgrp);
643 LIST_INIT(&p2->p_children);
644
645 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
646
647#ifdef KTRACE
648 /*
649 * Copy traceflag and tracefile if enabled.
650 */
651 mtx_lock(&ktrace_mtx);
652 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
653 if (p1->p_traceflag & KTRFAC_INHERIT) {
654 p2->p_traceflag = p1->p_traceflag;
655 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
656 VREF(p2->p_tracevp);
657 KASSERT(p1->p_tracecred != NULL,
658 ("ktrace vnode with no cred"));
659 p2->p_tracecred = crhold(p1->p_tracecred);
660 }
661 }
662 mtx_unlock(&ktrace_mtx);
663#endif
664
665 /*
666 * If PF_FORK is set, the child process inherits the
667 * procfs ioctl flags from its parent.
668 */
669 if (p1->p_pfsflags & PF_FORK) {
670 p2->p_stops = p1->p_stops;
671 p2->p_pfsflags = p1->p_pfsflags;
672 }
673
674#ifdef KDTRACE_HOOKS
675 /*
674 /*
676 * Tell the DTrace fasttrap provider about the new process
677 * if it has registered an interest.
678 */
679 if (dtrace_fasttrap_fork)
680 dtrace_fasttrap_fork(p1, p2);
681#endif
682
683 /*
684 * This begins the section where we must prevent the parent
685 * from being swapped.
686 */
687 _PHOLD(p1);
688 PROC_UNLOCK(p1);
689
690 /*
691 * Attach the new process to its parent.
692 *
693 * If RFNOWAIT is set, the newly created process becomes a child
694 * of init. This effectively disassociates the child from the
695 * parent.
696 */
697 if (flags & RFNOWAIT)
698 pptr = initproc;
699 else
700 pptr = p1;
701 p2->p_pptr = pptr;
702 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
703 sx_xunlock(&proctree_lock);
704
705 /* Inform accounting that we have forked. */
706 p2->p_acflag = AFORK;
707 PROC_UNLOCK(p2);
708
709 /*
710 * Finish creating the child process. It will return via a different
711 * execution path later. (ie: directly into user mode)
712 */
713 vm_forkproc(td, p2, td2, vm2, flags);
714
715 if (flags == (RFFDG | RFPROC)) {
716 PCPU_INC(cnt.v_forks);
717 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
718 p2->p_vmspace->vm_ssize);
719 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
720 PCPU_INC(cnt.v_vforks);
721 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
722 p2->p_vmspace->vm_ssize);
723 } else if (p1 == &proc0) {
724 PCPU_INC(cnt.v_kthreads);
725 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
726 p2->p_vmspace->vm_ssize);
727 } else {
728 PCPU_INC(cnt.v_rforks);
729 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
730 p2->p_vmspace->vm_ssize);
731 }
732
733 /*
734 * Both processes are set up, now check if any loadable modules want
735 * to adjust anything.
736 * What if they have an error? XXX
737 */
738 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
739
740 /*
741 * Set the child start time and mark the process as being complete.
742 */
743 microuptime(&p2->p_stats->p_start);
744 PROC_SLOCK(p2);
745 p2->p_state = PRS_NORMAL;
746 PROC_SUNLOCK(p2);
675 * This begins the section where we must prevent the parent
676 * from being swapped.
677 */
678 _PHOLD(p1);
679 PROC_UNLOCK(p1);
680
681 /*
682 * Attach the new process to its parent.
683 *
684 * If RFNOWAIT is set, the newly created process becomes a child
685 * of init. This effectively disassociates the child from the
686 * parent.
687 */
688 if (flags & RFNOWAIT)
689 pptr = initproc;
690 else
691 pptr = p1;
692 p2->p_pptr = pptr;
693 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
694 sx_xunlock(&proctree_lock);
695
696 /* Inform accounting that we have forked. */
697 p2->p_acflag = AFORK;
698 PROC_UNLOCK(p2);
699
700 /*
701 * Finish creating the child process. It will return via a different
702 * execution path later. (ie: directly into user mode)
703 */
704 vm_forkproc(td, p2, td2, vm2, flags);
705
706 if (flags == (RFFDG | RFPROC)) {
707 PCPU_INC(cnt.v_forks);
708 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
709 p2->p_vmspace->vm_ssize);
710 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
711 PCPU_INC(cnt.v_vforks);
712 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
713 p2->p_vmspace->vm_ssize);
714 } else if (p1 == &proc0) {
715 PCPU_INC(cnt.v_kthreads);
716 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
717 p2->p_vmspace->vm_ssize);
718 } else {
719 PCPU_INC(cnt.v_rforks);
720 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
721 p2->p_vmspace->vm_ssize);
722 }
723
724 /*
725 * Both processes are set up, now check if any loadable modules want
726 * to adjust anything.
727 * What if they have an error? XXX
728 */
729 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
730
731 /*
732 * Set the child start time and mark the process as being complete.
733 */
734 microuptime(&p2->p_stats->p_start);
735 PROC_SLOCK(p2);
736 p2->p_state = PRS_NORMAL;
737 PROC_SUNLOCK(p2);
738#ifdef KDTRACE_HOOKS
739 /*
740 * Tell the DTrace fasttrap provider about the new process
741 * if it has registered an interest. We have to do this only after
742 * p_state is PRS_NORMAL since the fasttrap module will use pfind()
743 * later on.
744 */
745 if (dtrace_fasttrap_fork) {
746 PROC_LOCK(p1);
747 PROC_LOCK(p2);
748 dtrace_fasttrap_fork(p1, p2);
749 PROC_UNLOCK(p2);
750 PROC_UNLOCK(p1);
751 }
752#endif
747
748 /*
749 * If RFSTOPPED not requested, make child runnable and add to
750 * run queue.
751 */
752 if ((flags & RFSTOPPED) == 0) {
753 thread_lock(td2);
754 TD_SET_CAN_RUN(td2);
755 sched_add(td2, SRQ_BORING);
756 thread_unlock(td2);
757 }
758
759 /*
760 * Now can be swapped.
761 */
762 PROC_LOCK(p1);
763 _PRELE(p1);
764 PROC_UNLOCK(p1);
765
766 /*
767 * Tell any interested parties about the new process.
768 */
769 knote_fork(&p1->p_klist, p2->p_pid);
770 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
771
772 /*
773 * Preserve synchronization semantics of vfork. If waiting for
774 * child to exec or exit, set P_PPWAIT on child, and sleep on our
775 * proc (in case of exit).
776 */
777 PROC_LOCK(p2);
778 while (p2->p_flag & P_PPWAIT)
779 cv_wait(&p2->p_pwait, &p2->p_mtx);
780 PROC_UNLOCK(p2);
781
782 /*
783 * Return child proc pointer to parent.
784 */
785 *procp = p2;
786 return (0);
787fail:
788 sx_sunlock(&proctree_lock);
789 if (ppsratecheck(&lastfail, &curfail, 1))
790 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
791 td->td_ucred->cr_ruid);
792 sx_xunlock(&allproc_lock);
793#ifdef MAC
794 mac_proc_destroy(newproc);
795#endif
796fail1:
797 if (vm2 != NULL)
798 vmspace_free(vm2);
799 uma_zfree(proc_zone, newproc);
800 pause("fork", hz / 2);
801 return (error);
802}
803
804/*
805 * Handle the return of a child process from fork1(). This function
806 * is called from the MD fork_trampoline() entry point.
807 */
808void
809fork_exit(callout, arg, frame)
810 void (*callout)(void *, struct trapframe *);
811 void *arg;
812 struct trapframe *frame;
813{
814 struct proc *p;
815 struct thread *td;
816 struct thread *dtd;
817
818 td = curthread;
819 p = td->td_proc;
820 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
821
822 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
823 td, td->td_sched, p->p_pid, td->td_name);
824
825 sched_fork_exit(td);
826 /*
827 * Processes normally resume in mi_switch() after being
828 * cpu_switch()'ed to, but when children start up they arrive here
829 * instead, so we must do much the same things as mi_switch() would.
830 */
831 if ((dtd = PCPU_GET(deadthread))) {
832 PCPU_SET(deadthread, NULL);
833 thread_stash(dtd);
834 }
835 thread_unlock(td);
836
837 /*
838 * cpu_set_fork_handler intercepts this function call to
839 * have this call a non-return function to stay in kernel mode.
840 * initproc has its own fork handler, but it does return.
841 */
842 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
843 callout(arg, frame);
844
845 /*
846 * Check if a kernel thread misbehaved and returned from its main
847 * function.
848 */
849 if (p->p_flag & P_KTHREAD) {
850 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
851 td->td_name, p->p_pid);
852 kproc_exit(0);
853 }
854 mtx_assert(&Giant, MA_NOTOWNED);
855
856 EVENTHANDLER_INVOKE(schedtail, p);
857}
858
859/*
860 * Simplified back end of syscall(), used when returning from fork()
861 * directly into user mode. Giant is not held on entry, and must not
862 * be held on return. This function is passed in to fork_exit() as the
863 * first parameter and is called when returning to a new userland process.
864 */
865void
866fork_return(td, frame)
867 struct thread *td;
868 struct trapframe *frame;
869{
870
871 userret(td, frame);
872#ifdef KTRACE
873 if (KTRPOINT(td, KTR_SYSRET))
874 ktrsysret(SYS_fork, 0, 0);
875#endif
876 mtx_assert(&Giant, MA_NOTOWNED);
877}
753
754 /*
755 * If RFSTOPPED not requested, make child runnable and add to
756 * run queue.
757 */
758 if ((flags & RFSTOPPED) == 0) {
759 thread_lock(td2);
760 TD_SET_CAN_RUN(td2);
761 sched_add(td2, SRQ_BORING);
762 thread_unlock(td2);
763 }
764
765 /*
766 * Now can be swapped.
767 */
768 PROC_LOCK(p1);
769 _PRELE(p1);
770 PROC_UNLOCK(p1);
771
772 /*
773 * Tell any interested parties about the new process.
774 */
775 knote_fork(&p1->p_klist, p2->p_pid);
776 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
777
778 /*
779 * Preserve synchronization semantics of vfork. If waiting for
780 * child to exec or exit, set P_PPWAIT on child, and sleep on our
781 * proc (in case of exit).
782 */
783 PROC_LOCK(p2);
784 while (p2->p_flag & P_PPWAIT)
785 cv_wait(&p2->p_pwait, &p2->p_mtx);
786 PROC_UNLOCK(p2);
787
788 /*
789 * Return child proc pointer to parent.
790 */
791 *procp = p2;
792 return (0);
793fail:
794 sx_sunlock(&proctree_lock);
795 if (ppsratecheck(&lastfail, &curfail, 1))
796 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
797 td->td_ucred->cr_ruid);
798 sx_xunlock(&allproc_lock);
799#ifdef MAC
800 mac_proc_destroy(newproc);
801#endif
802fail1:
803 if (vm2 != NULL)
804 vmspace_free(vm2);
805 uma_zfree(proc_zone, newproc);
806 pause("fork", hz / 2);
807 return (error);
808}
809
810/*
811 * Handle the return of a child process from fork1(). This function
812 * is called from the MD fork_trampoline() entry point.
813 */
814void
815fork_exit(callout, arg, frame)
816 void (*callout)(void *, struct trapframe *);
817 void *arg;
818 struct trapframe *frame;
819{
820 struct proc *p;
821 struct thread *td;
822 struct thread *dtd;
823
824 td = curthread;
825 p = td->td_proc;
826 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
827
828 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
829 td, td->td_sched, p->p_pid, td->td_name);
830
831 sched_fork_exit(td);
832 /*
833 * Processes normally resume in mi_switch() after being
834 * cpu_switch()'ed to, but when children start up they arrive here
835 * instead, so we must do much the same things as mi_switch() would.
836 */
837 if ((dtd = PCPU_GET(deadthread))) {
838 PCPU_SET(deadthread, NULL);
839 thread_stash(dtd);
840 }
841 thread_unlock(td);
842
843 /*
844 * cpu_set_fork_handler intercepts this function call to
845 * have this call a non-return function to stay in kernel mode.
846 * initproc has its own fork handler, but it does return.
847 */
848 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
849 callout(arg, frame);
850
851 /*
852 * Check if a kernel thread misbehaved and returned from its main
853 * function.
854 */
855 if (p->p_flag & P_KTHREAD) {
856 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
857 td->td_name, p->p_pid);
858 kproc_exit(0);
859 }
860 mtx_assert(&Giant, MA_NOTOWNED);
861
862 EVENTHANDLER_INVOKE(schedtail, p);
863}
864
865/*
866 * Simplified back end of syscall(), used when returning from fork()
867 * directly into user mode. Giant is not held on entry, and must not
868 * be held on return. This function is passed in to fork_exit() as the
869 * first parameter and is called when returning to a new userland process.
870 */
871void
872fork_return(td, frame)
873 struct thread *td;
874 struct trapframe *frame;
875{
876
877 userret(td, frame);
878#ifdef KTRACE
879 if (KTRPOINT(td, KTR_SYSRET))
880 ktrsysret(SYS_fork, 0, 0);
881#endif
882 mtx_assert(&Giant, MA_NOTOWNED);
883}