Deleted Added
full compact
kern_proc.c (107137) kern_proc.c (108470)
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
34 * $FreeBSD: head/sys/kern/kern_proc.c 107137 2002-11-21 09:30:55Z jeff $
34 * $FreeBSD: head/sys/kern/kern_proc.c 108470 2002-12-30 21:18:15Z schweikh $
35 */
36
37#include "opt_ktrace.h"
38#include "opt_kstack_pages.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/kse.h>
48#include <sys/sched.h>
49#include <sys/smp.h>
50#include <sys/sysctl.h>
51#include <sys/filedesc.h>
52#include <sys/tty.h>
53#include <sys/signalvar.h>
54#include <sys/sx.h>
55#include <sys/user.h>
56#include <sys/jail.h>
57#ifdef KTRACE
58#include <sys/uio.h>
59#include <sys/ktrace.h>
60#endif
61
62#include <vm/vm.h>
63#include <vm/vm_extern.h>
64#include <vm/pmap.h>
65#include <vm/vm_map.h>
66#include <vm/uma.h>
67#include <machine/critical.h>
68
69MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
70MALLOC_DEFINE(M_SESSION, "session", "session header");
71static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
72MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
73
74static struct proc *dopfind(register pid_t);
75
76static void doenterpgrp(struct proc *, struct pgrp *);
77
78static void pgdelete(struct pgrp *);
79
80static void orphanpg(struct pgrp *pg);
81
82static void proc_ctor(void *mem, int size, void *arg);
83static void proc_dtor(void *mem, int size, void *arg);
84static void proc_init(void *mem, int size);
85static void proc_fini(void *mem, int size);
86
87/*
88 * Other process lists
89 */
90struct pidhashhead *pidhashtbl;
91u_long pidhash;
92struct pgrphashhead *pgrphashtbl;
93u_long pgrphash;
94struct proclist allproc;
95struct proclist zombproc;
96struct sx allproc_lock;
97struct sx proctree_lock;
98struct mtx pargs_ref_lock;
99struct mtx ppeers_lock;
100uma_zone_t proc_zone;
101uma_zone_t ithread_zone;
102
103int kstack_pages = KSTACK_PAGES;
104int uarea_pages = UAREA_PAGES;
105SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
106SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
107
108#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
109
110CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
111
112/*
113 * Initialize global process hashing structures.
114 */
115void
116procinit()
117{
118
119 sx_init(&allproc_lock, "allproc");
120 sx_init(&proctree_lock, "proctree");
121 mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
122 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
123 LIST_INIT(&allproc);
124 LIST_INIT(&zombproc);
125 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
126 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
127 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
128 proc_ctor, proc_dtor, proc_init, proc_fini,
129 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
130 uihashinit();
131}
132
133/*
134 * Prepare a proc for use.
135 */
136static void
137proc_ctor(void *mem, int size, void *arg)
138{
139 struct proc *p;
140
141 p = (struct proc *)mem;
142}
143
144/*
145 * Reclaim a proc after use.
146 */
147static void
148proc_dtor(void *mem, int size, void *arg)
149{
150 struct proc *p;
151 struct thread *td;
152 struct ksegrp *kg;
153 struct kse *ke;
154
155 /* INVARIANTS checks go here */
156 p = (struct proc *)mem;
157 KASSERT((p->p_numthreads == 1),
158 ("bad number of threads in exiting process"));
159 td = FIRST_THREAD_IN_PROC(p);
160 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
161 kg = FIRST_KSEGRP_IN_PROC(p);
162 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
163 ke = FIRST_KSE_IN_KSEGRP(kg);
164 KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
165
166 /* Dispose of an alternate kstack, if it exists.
167 * XXX What if there are more than one thread in the proc?
168 * The first thread in the proc is special and not
169 * freed, so you gotta do this here.
170 */
171 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
172 pmap_dispose_altkstack(td);
173
174 /*
175 * We want to make sure we know the initial linkages.
176 * so for now tear them down and remake them.
177 * This is probably un-needed as we can probably rely
178 * on the state coming in here from wait4().
179 */
180 proc_linkup(p, kg, ke, td);
181}
182
183/*
184 * Initialize type-stable parts of a proc (when newly created).
185 */
186static void
187proc_init(void *mem, int size)
188{
189 struct proc *p;
190 struct thread *td;
191 struct ksegrp *kg;
192 struct kse *ke;
193
194 p = (struct proc *)mem;
195 p->p_sched = (struct p_sched *)&p[1];
196 vm_proc_new(p);
197 td = thread_alloc();
198 ke = kse_alloc();
199 kg = ksegrp_alloc();
200 proc_linkup(p, kg, ke, td);
201}
202
203/*
204 * Tear down type-stable parts of a proc (just before being discarded)
205 */
206static void
207proc_fini(void *mem, int size)
208{
209 struct proc *p;
210 struct thread *td;
211 struct ksegrp *kg;
212 struct kse *ke;
213
214 p = (struct proc *)mem;
215 KASSERT((p->p_numthreads == 1),
216 ("bad number of threads in freeing process"));
217 td = FIRST_THREAD_IN_PROC(p);
218 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
219 kg = FIRST_KSEGRP_IN_PROC(p);
220 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
221 ke = FIRST_KSE_IN_KSEGRP(kg);
222 KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
223 vm_proc_dispose(p);
224 thread_free(td);
225 ksegrp_free(kg);
226 kse_free(ke);
227}
228
229/*
230 * Is p an inferior of the current process?
231 */
232int
233inferior(p)
234 register struct proc *p;
235{
236
237 sx_assert(&proctree_lock, SX_LOCKED);
238 for (; p != curproc; p = p->p_pptr)
239 if (p->p_pid == 0)
240 return (0);
241 return (1);
242}
243
244/*
245 * Locate a process by number
246 */
247struct proc *
248pfind(pid)
249 register pid_t pid;
250{
251 register struct proc *p;
252
253 sx_slock(&allproc_lock);
254 p = dopfind(pid);
255 sx_sunlock(&allproc_lock);
256 return (p);
257}
258
259static struct proc *
260dopfind(pid)
261 register pid_t pid;
262{
263 register struct proc *p;
264
265 sx_assert(&allproc_lock, SX_LOCKED);
266
267 LIST_FOREACH(p, PIDHASH(pid), p_hash)
268 if (p->p_pid == pid) {
269 PROC_LOCK(p);
270 break;
271 }
272 return (p);
273}
274
275/*
276 * Locate a process group by number.
277 * The caller must hold proctree_lock.
278 */
279struct pgrp *
280pgfind(pgid)
281 register pid_t pgid;
282{
283 register struct pgrp *pgrp;
284
285 sx_assert(&proctree_lock, SX_LOCKED);
286
287 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
288 if (pgrp->pg_id == pgid) {
289 PGRP_LOCK(pgrp);
290 return (pgrp);
291 }
292 }
293 return (NULL);
294}
295
296/*
297 * Create a new process group.
298 * pgid must be equal to the pid of p.
299 * Begin a new session if required.
300 */
301int
302enterpgrp(p, pgid, pgrp, sess)
303 register struct proc *p;
304 pid_t pgid;
305 struct pgrp *pgrp;
306 struct session *sess;
307{
308 struct pgrp *pgrp2;
309
310 sx_assert(&proctree_lock, SX_XLOCKED);
311
312 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
313 KASSERT(p->p_pid == pgid,
314 ("enterpgrp: new pgrp and pid != pgid"));
315
316 pgrp2 = pgfind(pgid);
317
318 KASSERT(pgrp2 == NULL,
319 ("enterpgrp: pgrp with pgid exists"));
320 KASSERT(!SESS_LEADER(p),
321 ("enterpgrp: session leader attempted setpgrp"));
322
323 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
324
325 if (sess != NULL) {
326 /*
327 * new session
328 */
329 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
330 PROC_LOCK(p);
331 p->p_flag &= ~P_CONTROLT;
332 PROC_UNLOCK(p);
333 PGRP_LOCK(pgrp);
334 sess->s_leader = p;
335 sess->s_sid = p->p_pid;
336 sess->s_count = 1;
337 sess->s_ttyvp = NULL;
338 sess->s_ttyp = NULL;
339 bcopy(p->p_session->s_login, sess->s_login,
340 sizeof(sess->s_login));
341 pgrp->pg_session = sess;
342 KASSERT(p == curproc,
343 ("enterpgrp: mksession and p != curproc"));
344 } else {
345 pgrp->pg_session = p->p_session;
346 SESS_LOCK(pgrp->pg_session);
347 pgrp->pg_session->s_count++;
348 SESS_UNLOCK(pgrp->pg_session);
349 PGRP_LOCK(pgrp);
350 }
351 pgrp->pg_id = pgid;
352 LIST_INIT(&pgrp->pg_members);
353
354 /*
355 * As we have an exclusive lock of proctree_lock,
356 * this should not deadlock.
357 */
358 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
359 pgrp->pg_jobc = 0;
360 SLIST_INIT(&pgrp->pg_sigiolst);
361 PGRP_UNLOCK(pgrp);
362
363 doenterpgrp(p, pgrp);
364
365 return (0);
366}
367
368/*
369 * Move p to an existing process group
370 */
371int
372enterthispgrp(p, pgrp)
373 register struct proc *p;
374 struct pgrp *pgrp;
375{
376
377 sx_assert(&proctree_lock, SX_XLOCKED);
378 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
379 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
380 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
381 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
382 KASSERT(pgrp->pg_session == p->p_session,
383 ("%s: pgrp's session %p, p->p_session %p.\n",
384 __func__,
385 pgrp->pg_session,
386 p->p_session));
387 KASSERT(pgrp != p->p_pgrp,
388 ("%s: p belongs to pgrp.", __func__));
389
390 doenterpgrp(p, pgrp);
391
392 return (0);
393}
394
395/*
396 * Move p to a process group
397 */
398static void
399doenterpgrp(p, pgrp)
400 struct proc *p;
401 struct pgrp *pgrp;
402{
403 struct pgrp *savepgrp;
404
405 sx_assert(&proctree_lock, SX_XLOCKED);
406 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
407 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
408 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
409 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
410
411 savepgrp = p->p_pgrp;
412
413 /*
414 * Adjust eligibility of affected pgrps to participate in job control.
415 * Increment eligibility counts before decrementing, otherwise we
416 * could reach 0 spuriously during the first call.
417 */
418 fixjobc(p, pgrp, 1);
419 fixjobc(p, p->p_pgrp, 0);
420
421 PGRP_LOCK(pgrp);
422 PGRP_LOCK(savepgrp);
423 PROC_LOCK(p);
424 LIST_REMOVE(p, p_pglist);
425 p->p_pgrp = pgrp;
426 PROC_UNLOCK(p);
427 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
428 PGRP_UNLOCK(savepgrp);
429 PGRP_UNLOCK(pgrp);
430 if (LIST_EMPTY(&savepgrp->pg_members))
431 pgdelete(savepgrp);
432}
433
434/*
435 * remove process from process group
436 */
437int
438leavepgrp(p)
439 register struct proc *p;
440{
441 struct pgrp *savepgrp;
442
443 sx_assert(&proctree_lock, SX_XLOCKED);
444 savepgrp = p->p_pgrp;
445 PGRP_LOCK(savepgrp);
446 PROC_LOCK(p);
447 LIST_REMOVE(p, p_pglist);
448 p->p_pgrp = NULL;
449 PROC_UNLOCK(p);
450 PGRP_UNLOCK(savepgrp);
451 if (LIST_EMPTY(&savepgrp->pg_members))
452 pgdelete(savepgrp);
453 return (0);
454}
455
456/*
457 * delete a process group
458 */
459static void
460pgdelete(pgrp)
461 register struct pgrp *pgrp;
462{
463 struct session *savesess;
464
465 sx_assert(&proctree_lock, SX_XLOCKED);
466 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
467 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
468
469 /*
470 * Reset any sigio structures pointing to us as a result of
471 * F_SETOWN with our pgid.
472 */
473 funsetownlst(&pgrp->pg_sigiolst);
474
475 PGRP_LOCK(pgrp);
476 if (pgrp->pg_session->s_ttyp != NULL &&
477 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
478 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
479 LIST_REMOVE(pgrp, pg_hash);
480 savesess = pgrp->pg_session;
481 SESS_LOCK(savesess);
482 savesess->s_count--;
483 SESS_UNLOCK(savesess);
484 PGRP_UNLOCK(pgrp);
485 if (savesess->s_count == 0) {
486 mtx_destroy(&savesess->s_mtx);
487 FREE(pgrp->pg_session, M_SESSION);
488 }
489 mtx_destroy(&pgrp->pg_mtx);
490 FREE(pgrp, M_PGRP);
491}
492
493/*
494 * Adjust pgrp jobc counters when specified process changes process group.
495 * We count the number of processes in each process group that "qualify"
496 * the group for terminal job control (those with a parent in a different
497 * process group of the same session). If that count reaches zero, the
498 * process group becomes orphaned. Check both the specified process'
499 * process group and that of its children.
500 * entering == 0 => p is leaving specified group.
501 * entering == 1 => p is entering specified group.
502 */
503void
504fixjobc(p, pgrp, entering)
505 register struct proc *p;
506 register struct pgrp *pgrp;
507 int entering;
508{
509 register struct pgrp *hispgrp;
510 register struct session *mysession;
511
512 sx_assert(&proctree_lock, SX_LOCKED);
513 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
514 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
515 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
516
517 /*
518 * Check p's parent to see whether p qualifies its own process
519 * group; if so, adjust count for p's process group.
520 */
521 mysession = pgrp->pg_session;
522 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
523 hispgrp->pg_session == mysession) {
524 PGRP_LOCK(pgrp);
525 if (entering)
526 pgrp->pg_jobc++;
527 else {
528 --pgrp->pg_jobc;
529 if (pgrp->pg_jobc == 0)
530 orphanpg(pgrp);
531 }
532 PGRP_UNLOCK(pgrp);
533 }
534
535 /*
536 * Check this process' children to see whether they qualify
537 * their process groups; if so, adjust counts for children's
538 * process groups.
539 */
540 LIST_FOREACH(p, &p->p_children, p_sibling) {
541 if ((hispgrp = p->p_pgrp) != pgrp &&
542 hispgrp->pg_session == mysession &&
543 p->p_state != PRS_ZOMBIE) {
544 PGRP_LOCK(hispgrp);
545 if (entering)
546 hispgrp->pg_jobc++;
547 else {
548 --hispgrp->pg_jobc;
549 if (hispgrp->pg_jobc == 0)
550 orphanpg(hispgrp);
551 }
552 PGRP_UNLOCK(hispgrp);
553 }
554 }
555}
556
557/*
558 * A process group has become orphaned;
559 * if there are any stopped processes in the group,
560 * hang-up all process in that group.
561 */
562static void
563orphanpg(pg)
564 struct pgrp *pg;
565{
566 register struct proc *p;
567
568 PGRP_LOCK_ASSERT(pg, MA_OWNED);
569
570 mtx_lock_spin(&sched_lock);
571 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
572 if (P_SHOULDSTOP(p)) {
573 mtx_unlock_spin(&sched_lock);
574 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
575 PROC_LOCK(p);
576 psignal(p, SIGHUP);
577 psignal(p, SIGCONT);
578 PROC_UNLOCK(p);
579 }
580 return;
581 }
582 }
583 mtx_unlock_spin(&sched_lock);
584}
585
586#include "opt_ddb.h"
587#ifdef DDB
588#include <ddb/ddb.h>
589
590DB_SHOW_COMMAND(pgrpdump, pgrpdump)
591{
592 register struct pgrp *pgrp;
593 register struct proc *p;
594 register int i;
595
596 for (i = 0; i <= pgrphash; i++) {
597 if (!LIST_EMPTY(&pgrphashtbl[i])) {
598 printf("\tindx %d\n", i);
599 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
600 printf(
601 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
602 (void *)pgrp, (long)pgrp->pg_id,
603 (void *)pgrp->pg_session,
604 pgrp->pg_session->s_count,
605 (void *)LIST_FIRST(&pgrp->pg_members));
606 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
607 printf("\t\tpid %ld addr %p pgrp %p\n",
608 (long)p->p_pid, (void *)p,
609 (void *)p->p_pgrp);
610 }
611 }
612 }
613 }
614}
615#endif /* DDB */
616
617/*
35 */
36
37#include "opt_ktrace.h"
38#include "opt_kstack_pages.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/kse.h>
48#include <sys/sched.h>
49#include <sys/smp.h>
50#include <sys/sysctl.h>
51#include <sys/filedesc.h>
52#include <sys/tty.h>
53#include <sys/signalvar.h>
54#include <sys/sx.h>
55#include <sys/user.h>
56#include <sys/jail.h>
57#ifdef KTRACE
58#include <sys/uio.h>
59#include <sys/ktrace.h>
60#endif
61
62#include <vm/vm.h>
63#include <vm/vm_extern.h>
64#include <vm/pmap.h>
65#include <vm/vm_map.h>
66#include <vm/uma.h>
67#include <machine/critical.h>
68
69MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
70MALLOC_DEFINE(M_SESSION, "session", "session header");
71static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
72MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
73
74static struct proc *dopfind(register pid_t);
75
76static void doenterpgrp(struct proc *, struct pgrp *);
77
78static void pgdelete(struct pgrp *);
79
80static void orphanpg(struct pgrp *pg);
81
82static void proc_ctor(void *mem, int size, void *arg);
83static void proc_dtor(void *mem, int size, void *arg);
84static void proc_init(void *mem, int size);
85static void proc_fini(void *mem, int size);
86
87/*
88 * Other process lists
89 */
90struct pidhashhead *pidhashtbl;
91u_long pidhash;
92struct pgrphashhead *pgrphashtbl;
93u_long pgrphash;
94struct proclist allproc;
95struct proclist zombproc;
96struct sx allproc_lock;
97struct sx proctree_lock;
98struct mtx pargs_ref_lock;
99struct mtx ppeers_lock;
100uma_zone_t proc_zone;
101uma_zone_t ithread_zone;
102
103int kstack_pages = KSTACK_PAGES;
104int uarea_pages = UAREA_PAGES;
105SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
106SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
107
108#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
109
110CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
111
112/*
113 * Initialize global process hashing structures.
114 */
115void
116procinit()
117{
118
119 sx_init(&allproc_lock, "allproc");
120 sx_init(&proctree_lock, "proctree");
121 mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF);
122 mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
123 LIST_INIT(&allproc);
124 LIST_INIT(&zombproc);
125 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
126 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
127 proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
128 proc_ctor, proc_dtor, proc_init, proc_fini,
129 UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
130 uihashinit();
131}
132
133/*
134 * Prepare a proc for use.
135 */
136static void
137proc_ctor(void *mem, int size, void *arg)
138{
139 struct proc *p;
140
141 p = (struct proc *)mem;
142}
143
144/*
145 * Reclaim a proc after use.
146 */
147static void
148proc_dtor(void *mem, int size, void *arg)
149{
150 struct proc *p;
151 struct thread *td;
152 struct ksegrp *kg;
153 struct kse *ke;
154
155 /* INVARIANTS checks go here */
156 p = (struct proc *)mem;
157 KASSERT((p->p_numthreads == 1),
158 ("bad number of threads in exiting process"));
159 td = FIRST_THREAD_IN_PROC(p);
160 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
161 kg = FIRST_KSEGRP_IN_PROC(p);
162 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
163 ke = FIRST_KSE_IN_KSEGRP(kg);
164 KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
165
166 /* Dispose of an alternate kstack, if it exists.
167 * XXX What if there are more than one thread in the proc?
168 * The first thread in the proc is special and not
169 * freed, so you gotta do this here.
170 */
171 if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
172 pmap_dispose_altkstack(td);
173
174 /*
175 * We want to make sure we know the initial linkages.
176 * so for now tear them down and remake them.
177 * This is probably un-needed as we can probably rely
178 * on the state coming in here from wait4().
179 */
180 proc_linkup(p, kg, ke, td);
181}
182
183/*
184 * Initialize type-stable parts of a proc (when newly created).
185 */
186static void
187proc_init(void *mem, int size)
188{
189 struct proc *p;
190 struct thread *td;
191 struct ksegrp *kg;
192 struct kse *ke;
193
194 p = (struct proc *)mem;
195 p->p_sched = (struct p_sched *)&p[1];
196 vm_proc_new(p);
197 td = thread_alloc();
198 ke = kse_alloc();
199 kg = ksegrp_alloc();
200 proc_linkup(p, kg, ke, td);
201}
202
203/*
204 * Tear down type-stable parts of a proc (just before being discarded)
205 */
206static void
207proc_fini(void *mem, int size)
208{
209 struct proc *p;
210 struct thread *td;
211 struct ksegrp *kg;
212 struct kse *ke;
213
214 p = (struct proc *)mem;
215 KASSERT((p->p_numthreads == 1),
216 ("bad number of threads in freeing process"));
217 td = FIRST_THREAD_IN_PROC(p);
218 KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
219 kg = FIRST_KSEGRP_IN_PROC(p);
220 KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
221 ke = FIRST_KSE_IN_KSEGRP(kg);
222 KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
223 vm_proc_dispose(p);
224 thread_free(td);
225 ksegrp_free(kg);
226 kse_free(ke);
227}
228
229/*
230 * Is p an inferior of the current process?
231 */
232int
233inferior(p)
234 register struct proc *p;
235{
236
237 sx_assert(&proctree_lock, SX_LOCKED);
238 for (; p != curproc; p = p->p_pptr)
239 if (p->p_pid == 0)
240 return (0);
241 return (1);
242}
243
244/*
245 * Locate a process by number
246 */
247struct proc *
248pfind(pid)
249 register pid_t pid;
250{
251 register struct proc *p;
252
253 sx_slock(&allproc_lock);
254 p = dopfind(pid);
255 sx_sunlock(&allproc_lock);
256 return (p);
257}
258
259static struct proc *
260dopfind(pid)
261 register pid_t pid;
262{
263 register struct proc *p;
264
265 sx_assert(&allproc_lock, SX_LOCKED);
266
267 LIST_FOREACH(p, PIDHASH(pid), p_hash)
268 if (p->p_pid == pid) {
269 PROC_LOCK(p);
270 break;
271 }
272 return (p);
273}
274
275/*
276 * Locate a process group by number.
277 * The caller must hold proctree_lock.
278 */
279struct pgrp *
280pgfind(pgid)
281 register pid_t pgid;
282{
283 register struct pgrp *pgrp;
284
285 sx_assert(&proctree_lock, SX_LOCKED);
286
287 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
288 if (pgrp->pg_id == pgid) {
289 PGRP_LOCK(pgrp);
290 return (pgrp);
291 }
292 }
293 return (NULL);
294}
295
296/*
297 * Create a new process group.
298 * pgid must be equal to the pid of p.
299 * Begin a new session if required.
300 */
301int
302enterpgrp(p, pgid, pgrp, sess)
303 register struct proc *p;
304 pid_t pgid;
305 struct pgrp *pgrp;
306 struct session *sess;
307{
308 struct pgrp *pgrp2;
309
310 sx_assert(&proctree_lock, SX_XLOCKED);
311
312 KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL"));
313 KASSERT(p->p_pid == pgid,
314 ("enterpgrp: new pgrp and pid != pgid"));
315
316 pgrp2 = pgfind(pgid);
317
318 KASSERT(pgrp2 == NULL,
319 ("enterpgrp: pgrp with pgid exists"));
320 KASSERT(!SESS_LEADER(p),
321 ("enterpgrp: session leader attempted setpgrp"));
322
323 mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
324
325 if (sess != NULL) {
326 /*
327 * new session
328 */
329 mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF);
330 PROC_LOCK(p);
331 p->p_flag &= ~P_CONTROLT;
332 PROC_UNLOCK(p);
333 PGRP_LOCK(pgrp);
334 sess->s_leader = p;
335 sess->s_sid = p->p_pid;
336 sess->s_count = 1;
337 sess->s_ttyvp = NULL;
338 sess->s_ttyp = NULL;
339 bcopy(p->p_session->s_login, sess->s_login,
340 sizeof(sess->s_login));
341 pgrp->pg_session = sess;
342 KASSERT(p == curproc,
343 ("enterpgrp: mksession and p != curproc"));
344 } else {
345 pgrp->pg_session = p->p_session;
346 SESS_LOCK(pgrp->pg_session);
347 pgrp->pg_session->s_count++;
348 SESS_UNLOCK(pgrp->pg_session);
349 PGRP_LOCK(pgrp);
350 }
351 pgrp->pg_id = pgid;
352 LIST_INIT(&pgrp->pg_members);
353
354 /*
355 * As we have an exclusive lock of proctree_lock,
356 * this should not deadlock.
357 */
358 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
359 pgrp->pg_jobc = 0;
360 SLIST_INIT(&pgrp->pg_sigiolst);
361 PGRP_UNLOCK(pgrp);
362
363 doenterpgrp(p, pgrp);
364
365 return (0);
366}
367
368/*
369 * Move p to an existing process group
370 */
371int
372enterthispgrp(p, pgrp)
373 register struct proc *p;
374 struct pgrp *pgrp;
375{
376
377 sx_assert(&proctree_lock, SX_XLOCKED);
378 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
379 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
380 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
381 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
382 KASSERT(pgrp->pg_session == p->p_session,
383 ("%s: pgrp's session %p, p->p_session %p.\n",
384 __func__,
385 pgrp->pg_session,
386 p->p_session));
387 KASSERT(pgrp != p->p_pgrp,
388 ("%s: p belongs to pgrp.", __func__));
389
390 doenterpgrp(p, pgrp);
391
392 return (0);
393}
394
395/*
396 * Move p to a process group
397 */
398static void
399doenterpgrp(p, pgrp)
400 struct proc *p;
401 struct pgrp *pgrp;
402{
403 struct pgrp *savepgrp;
404
405 sx_assert(&proctree_lock, SX_XLOCKED);
406 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
407 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
408 PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED);
409 SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED);
410
411 savepgrp = p->p_pgrp;
412
413 /*
414 * Adjust eligibility of affected pgrps to participate in job control.
415 * Increment eligibility counts before decrementing, otherwise we
416 * could reach 0 spuriously during the first call.
417 */
418 fixjobc(p, pgrp, 1);
419 fixjobc(p, p->p_pgrp, 0);
420
421 PGRP_LOCK(pgrp);
422 PGRP_LOCK(savepgrp);
423 PROC_LOCK(p);
424 LIST_REMOVE(p, p_pglist);
425 p->p_pgrp = pgrp;
426 PROC_UNLOCK(p);
427 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
428 PGRP_UNLOCK(savepgrp);
429 PGRP_UNLOCK(pgrp);
430 if (LIST_EMPTY(&savepgrp->pg_members))
431 pgdelete(savepgrp);
432}
433
434/*
435 * remove process from process group
436 */
437int
438leavepgrp(p)
439 register struct proc *p;
440{
441 struct pgrp *savepgrp;
442
443 sx_assert(&proctree_lock, SX_XLOCKED);
444 savepgrp = p->p_pgrp;
445 PGRP_LOCK(savepgrp);
446 PROC_LOCK(p);
447 LIST_REMOVE(p, p_pglist);
448 p->p_pgrp = NULL;
449 PROC_UNLOCK(p);
450 PGRP_UNLOCK(savepgrp);
451 if (LIST_EMPTY(&savepgrp->pg_members))
452 pgdelete(savepgrp);
453 return (0);
454}
455
456/*
457 * delete a process group
458 */
459static void
460pgdelete(pgrp)
461 register struct pgrp *pgrp;
462{
463 struct session *savesess;
464
465 sx_assert(&proctree_lock, SX_XLOCKED);
466 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
467 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
468
469 /*
470 * Reset any sigio structures pointing to us as a result of
471 * F_SETOWN with our pgid.
472 */
473 funsetownlst(&pgrp->pg_sigiolst);
474
475 PGRP_LOCK(pgrp);
476 if (pgrp->pg_session->s_ttyp != NULL &&
477 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
478 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
479 LIST_REMOVE(pgrp, pg_hash);
480 savesess = pgrp->pg_session;
481 SESS_LOCK(savesess);
482 savesess->s_count--;
483 SESS_UNLOCK(savesess);
484 PGRP_UNLOCK(pgrp);
485 if (savesess->s_count == 0) {
486 mtx_destroy(&savesess->s_mtx);
487 FREE(pgrp->pg_session, M_SESSION);
488 }
489 mtx_destroy(&pgrp->pg_mtx);
490 FREE(pgrp, M_PGRP);
491}
492
493/*
494 * Adjust pgrp jobc counters when specified process changes process group.
495 * We count the number of processes in each process group that "qualify"
496 * the group for terminal job control (those with a parent in a different
497 * process group of the same session). If that count reaches zero, the
498 * process group becomes orphaned. Check both the specified process'
499 * process group and that of its children.
500 * entering == 0 => p is leaving specified group.
501 * entering == 1 => p is entering specified group.
502 */
503void
504fixjobc(p, pgrp, entering)
505 register struct proc *p;
506 register struct pgrp *pgrp;
507 int entering;
508{
509 register struct pgrp *hispgrp;
510 register struct session *mysession;
511
512 sx_assert(&proctree_lock, SX_LOCKED);
513 PROC_LOCK_ASSERT(p, MA_NOTOWNED);
514 PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED);
515 SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED);
516
517 /*
518 * Check p's parent to see whether p qualifies its own process
519 * group; if so, adjust count for p's process group.
520 */
521 mysession = pgrp->pg_session;
522 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
523 hispgrp->pg_session == mysession) {
524 PGRP_LOCK(pgrp);
525 if (entering)
526 pgrp->pg_jobc++;
527 else {
528 --pgrp->pg_jobc;
529 if (pgrp->pg_jobc == 0)
530 orphanpg(pgrp);
531 }
532 PGRP_UNLOCK(pgrp);
533 }
534
535 /*
536 * Check this process' children to see whether they qualify
537 * their process groups; if so, adjust counts for children's
538 * process groups.
539 */
540 LIST_FOREACH(p, &p->p_children, p_sibling) {
541 if ((hispgrp = p->p_pgrp) != pgrp &&
542 hispgrp->pg_session == mysession &&
543 p->p_state != PRS_ZOMBIE) {
544 PGRP_LOCK(hispgrp);
545 if (entering)
546 hispgrp->pg_jobc++;
547 else {
548 --hispgrp->pg_jobc;
549 if (hispgrp->pg_jobc == 0)
550 orphanpg(hispgrp);
551 }
552 PGRP_UNLOCK(hispgrp);
553 }
554 }
555}
556
557/*
558 * A process group has become orphaned;
559 * if there are any stopped processes in the group,
560 * hang-up all process in that group.
561 */
562static void
563orphanpg(pg)
564 struct pgrp *pg;
565{
566 register struct proc *p;
567
568 PGRP_LOCK_ASSERT(pg, MA_OWNED);
569
570 mtx_lock_spin(&sched_lock);
571 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
572 if (P_SHOULDSTOP(p)) {
573 mtx_unlock_spin(&sched_lock);
574 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
575 PROC_LOCK(p);
576 psignal(p, SIGHUP);
577 psignal(p, SIGCONT);
578 PROC_UNLOCK(p);
579 }
580 return;
581 }
582 }
583 mtx_unlock_spin(&sched_lock);
584}
585
586#include "opt_ddb.h"
587#ifdef DDB
588#include <ddb/ddb.h>
589
590DB_SHOW_COMMAND(pgrpdump, pgrpdump)
591{
592 register struct pgrp *pgrp;
593 register struct proc *p;
594 register int i;
595
596 for (i = 0; i <= pgrphash; i++) {
597 if (!LIST_EMPTY(&pgrphashtbl[i])) {
598 printf("\tindx %d\n", i);
599 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
600 printf(
601 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
602 (void *)pgrp, (long)pgrp->pg_id,
603 (void *)pgrp->pg_session,
604 pgrp->pg_session->s_count,
605 (void *)LIST_FIRST(&pgrp->pg_members));
606 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
607 printf("\t\tpid %ld addr %p pgrp %p\n",
608 (long)p->p_pid, (void *)p,
609 (void *)p->p_pgrp);
610 }
611 }
612 }
613 }
614}
615#endif /* DDB */
616
617/*
618 * Fill in an kinfo_proc structure for the specified process.
618 * Fill in a kinfo_proc structure for the specified process.
619 * Must be called with the target process locked.
620 */
621void
622fill_kinfo_proc(p, kp)
623 struct proc *p;
624 struct kinfo_proc *kp;
625{
626 struct thread *td;
627 struct kse *ke;
628 struct ksegrp *kg;
629 struct tty *tp;
630 struct session *sp;
631 struct timeval tv;
632
633 bzero(kp, sizeof(*kp));
634
635 kp->ki_structsize = sizeof(*kp);
636 kp->ki_paddr = p;
637 PROC_LOCK_ASSERT(p, MA_OWNED);
638 kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
639 kp->ki_args = p->p_args;
640 kp->ki_textvp = p->p_textvp;
641#ifdef KTRACE
642 kp->ki_tracep = p->p_tracep;
643 mtx_lock(&ktrace_mtx);
644 kp->ki_traceflag = p->p_traceflag;
645 mtx_unlock(&ktrace_mtx);
646#endif
647 kp->ki_fd = p->p_fd;
648 kp->ki_vmspace = p->p_vmspace;
649 if (p->p_ucred) {
650 kp->ki_uid = p->p_ucred->cr_uid;
651 kp->ki_ruid = p->p_ucred->cr_ruid;
652 kp->ki_svuid = p->p_ucred->cr_svuid;
653 /* XXX bde doesn't like KI_NGROUPS */
654 kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
655 bcopy(p->p_ucred->cr_groups, kp->ki_groups,
656 kp->ki_ngroups * sizeof(gid_t));
657 kp->ki_rgid = p->p_ucred->cr_rgid;
658 kp->ki_svgid = p->p_ucred->cr_svgid;
659 }
660 if (p->p_procsig) {
661 kp->ki_sigignore = p->p_procsig->ps_sigignore;
662 kp->ki_sigcatch = p->p_procsig->ps_sigcatch;
663 }
664 mtx_lock_spin(&sched_lock);
665 if (p->p_state != PRS_NEW &&
666 p->p_state != PRS_ZOMBIE &&
667 p->p_vmspace != NULL) {
668 struct vmspace *vm = p->p_vmspace;
669
670 kp->ki_size = vm->vm_map.size;
671 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
672 if (p->p_sflag & PS_INMEM)
673 kp->ki_rssize += UAREA_PAGES;
674 FOREACH_THREAD_IN_PROC(p, td) /* XXXKSE: thread swapout check */
675 kp->ki_rssize += KSTACK_PAGES;
676 kp->ki_swrss = vm->vm_swrss;
677 kp->ki_tsize = vm->vm_tsize;
678 kp->ki_dsize = vm->vm_dsize;
679 kp->ki_ssize = vm->vm_ssize;
680 }
681 if ((p->p_sflag & PS_INMEM) && p->p_stats) {
682 kp->ki_start = p->p_stats->p_start;
683 kp->ki_rusage = p->p_stats->p_ru;
684 kp->ki_childtime.tv_sec = p->p_stats->p_cru.ru_utime.tv_sec +
685 p->p_stats->p_cru.ru_stime.tv_sec;
686 kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
687 p->p_stats->p_cru.ru_stime.tv_usec;
688 }
689 if (p->p_state != PRS_ZOMBIE) {
690 td = FIRST_THREAD_IN_PROC(p);
691 if (td == NULL) {
692 /* XXXKSE: This should never happen. */
693 printf("fill_kinfo_proc(): pid %d has no threads!\n",
694 p->p_pid);
695 mtx_unlock_spin(&sched_lock);
696 return;
697 }
698 if (!(p->p_flag & P_KSES)) {
699 if (td->td_wmesg != NULL) {
700 strlcpy(kp->ki_wmesg, td->td_wmesg,
701 sizeof(kp->ki_wmesg));
702 }
703 if (TD_ON_LOCK(td)) {
704 kp->ki_kiflag |= KI_LOCKBLOCK;
705 strlcpy(kp->ki_lockname, td->td_lockname,
706 sizeof(kp->ki_lockname));
707 }
708 }
709
710 if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
711 if (TD_ON_RUNQ(td) ||
712 TD_CAN_RUN(td) ||
713 TD_IS_RUNNING(td)) {
714 kp->ki_stat = SRUN;
715 } else if (P_SHOULDSTOP(p)) {
716 kp->ki_stat = SSTOP;
717 } else if (TD_IS_SLEEPING(td)) {
718 kp->ki_stat = SSLEEP;
719 } else if (TD_ON_LOCK(td)) {
720 kp->ki_stat = SLOCK;
721 } else {
722 kp->ki_stat = SWAIT;
723 }
724 } else {
725 kp->ki_stat = SIDL;
726 }
727
728 kp->ki_sflag = p->p_sflag;
729 kp->ki_swtime = p->p_swtime;
730 kp->ki_pid = p->p_pid;
731 /* vvv XXXKSE */
732 if (!(p->p_flag & P_KSES)) {
733 kg = td->td_ksegrp;
734 ke = td->td_kse;
735 KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
736 bintime2timeval(&p->p_runtime, &tv);
737 kp->ki_runtime =
738 tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
739
740 /* things in the KSE GROUP */
741 kp->ki_estcpu = kg->kg_estcpu;
742 kp->ki_slptime = kg->kg_slptime;
743 kp->ki_pri.pri_user = kg->kg_user_pri;
744 kp->ki_pri.pri_class = kg->kg_pri_class;
745 kp->ki_nice = kg->kg_nice;
746
747 /* Things in the thread */
748 kp->ki_wchan = td->td_wchan;
749 kp->ki_pri.pri_level = td->td_priority;
750 kp->ki_pri.pri_native = td->td_base_pri;
751 kp->ki_lastcpu = td->td_lastcpu;
752 kp->ki_tdflags = td->td_flags;
753 kp->ki_pcb = td->td_pcb;
754 kp->ki_kstack = (void *)td->td_kstack;
755
756 /* Things in the kse */
757 kp->ki_rqindex = ke->ke_rqindex;
758 kp->ki_oncpu = ke->ke_oncpu;
759 kp->ki_pctcpu = sched_pctcpu(ke);
760 } else {
761 kp->ki_oncpu = -1;
762 kp->ki_lastcpu = -1;
763 kp->ki_tdflags = -1;
764 /* All the rest are 0 for now */
765 }
766 /* ^^^ XXXKSE */
767 } else {
768 kp->ki_stat = SZOMB;
769 }
770 mtx_unlock_spin(&sched_lock);
771 sp = NULL;
772 tp = NULL;
773 if (p->p_pgrp) {
774 kp->ki_pgid = p->p_pgrp->pg_id;
775 kp->ki_jobc = p->p_pgrp->pg_jobc;
776 sp = p->p_pgrp->pg_session;
777
778 if (sp != NULL) {
779 kp->ki_sid = sp->s_sid;
780 SESS_LOCK(sp);
781 strlcpy(kp->ki_login, sp->s_login,
782 sizeof(kp->ki_login));
783 if (sp->s_ttyvp)
784 kp->ki_kiflag |= KI_CTTY;
785 if (SESS_LEADER(p))
786 kp->ki_kiflag |= KI_SLEADER;
787 tp = sp->s_ttyp;
788 SESS_UNLOCK(sp);
789 }
790 }
791 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
792 kp->ki_tdev = dev2udev(tp->t_dev);
793 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
794 if (tp->t_session)
795 kp->ki_tsid = tp->t_session->s_sid;
796 } else
797 kp->ki_tdev = NOUDEV;
798 if (p->p_comm[0] != '\0') {
799 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
800 strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
801 }
802 kp->ki_siglist = p->p_siglist;
803 kp->ki_sigmask = p->p_sigmask;
804 kp->ki_xstat = p->p_xstat;
805 kp->ki_acflag = p->p_acflag;
806 kp->ki_flag = p->p_flag;
807 /* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
808 if (jailed(p->p_ucred))
809 kp->ki_flag |= P_JAILED;
810 kp->ki_lock = p->p_lock;
811 if (p->p_pptr)
812 kp->ki_ppid = p->p_pptr->p_pid;
813}
814
815/*
816 * Locate a zombie process by number
817 */
818struct proc *
819zpfind(pid_t pid)
820{
821 struct proc *p;
822
823 sx_slock(&allproc_lock);
824 LIST_FOREACH(p, &zombproc, p_list)
825 if (p->p_pid == pid) {
826 PROC_LOCK(p);
827 break;
828 }
829 sx_sunlock(&allproc_lock);
830 return (p);
831}
832
833
834/*
835 * Must be called with the process locked and will return with it unlocked.
836 */
837static int
838sysctl_out_proc(struct proc *p, struct sysctl_req *req, int doingzomb)
839{
840 struct kinfo_proc kinfo_proc;
841 int error;
842 struct proc *np;
843 pid_t pid = p->p_pid;
844
845 PROC_LOCK_ASSERT(p, MA_OWNED);
846 fill_kinfo_proc(p, &kinfo_proc);
847 PROC_UNLOCK(p);
848 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc));
849 if (error)
850 return (error);
851 if (doingzomb)
852 np = zpfind(pid);
853 else {
854 if (pid == 0)
855 return (0);
856 np = pfind(pid);
857 }
858 if (np == NULL)
859 return EAGAIN;
860 if (np != p) {
861 PROC_UNLOCK(np);
862 return EAGAIN;
863 }
864 PROC_UNLOCK(np);
865 return (0);
866}
867
868static int
869sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
870{
871 int *name = (int*) arg1;
872 u_int namelen = arg2;
873 struct proc *p;
874 int doingzomb;
875 int error = 0;
876
877 if (oidp->oid_number == KERN_PROC_PID) {
878 if (namelen != 1)
879 return (EINVAL);
880 p = pfind((pid_t)name[0]);
881 if (!p)
882 return (0);
883 if (p_cansee(curthread, p)) {
884 PROC_UNLOCK(p);
885 return (0);
886 }
887 error = sysctl_out_proc(p, req, 0);
888 return (error);
889 }
890 if (oidp->oid_number == KERN_PROC_ALL && !namelen)
891 ;
892 else if (oidp->oid_number != KERN_PROC_ALL && namelen == 1)
893 ;
894 else
895 return (EINVAL);
896
897 if (!req->oldptr) {
898 /* overestimate by 5 procs */
899 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
900 if (error)
901 return (error);
902 }
903 sysctl_wire_old_buffer(req, 0);
904 sx_slock(&allproc_lock);
905 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
906 if (!doingzomb)
907 p = LIST_FIRST(&allproc);
908 else
909 p = LIST_FIRST(&zombproc);
910 for (; p != 0; p = LIST_NEXT(p, p_list)) {
911 PROC_LOCK(p);
912 /*
913 * Show a user only appropriate processes.
914 */
915 if (p_cansee(curthread, p)) {
916 PROC_UNLOCK(p);
917 continue;
918 }
919 /*
920 * Skip embryonic processes.
921 */
922 if (p->p_state == PRS_NEW) {
923 PROC_UNLOCK(p);
924 continue;
925 }
926 /*
927 * TODO - make more efficient (see notes below).
928 * do by session.
929 */
930 switch (oidp->oid_number) {
931
932 case KERN_PROC_PGRP:
933 /* could do this by traversing pgrp */
934 if (p->p_pgrp == NULL ||
935 p->p_pgrp->pg_id != (pid_t)name[0]) {
936 PROC_UNLOCK(p);
937 continue;
938 }
939 break;
940
941 case KERN_PROC_TTY:
942 if ((p->p_flag & P_CONTROLT) == 0 ||
943 p->p_session == NULL) {
944 PROC_UNLOCK(p);
945 continue;
946 }
947 SESS_LOCK(p->p_session);
948 if (p->p_session->s_ttyp == NULL ||
949 dev2udev(p->p_session->s_ttyp->t_dev) !=
950 (udev_t)name[0]) {
951 SESS_UNLOCK(p->p_session);
952 PROC_UNLOCK(p);
953 continue;
954 }
955 SESS_UNLOCK(p->p_session);
956 break;
957
958 case KERN_PROC_UID:
959 if (p->p_ucred == NULL ||
960 p->p_ucred->cr_uid != (uid_t)name[0]) {
961 PROC_UNLOCK(p);
962 continue;
963 }
964 break;
965
966 case KERN_PROC_RUID:
967 if (p->p_ucred == NULL ||
968 p->p_ucred->cr_ruid != (uid_t)name[0]) {
969 PROC_UNLOCK(p);
970 continue;
971 }
972 break;
973 }
974
975 error = sysctl_out_proc(p, req, doingzomb);
976 if (error) {
977 sx_sunlock(&allproc_lock);
978 return (error);
979 }
980 }
981 }
982 sx_sunlock(&allproc_lock);
983 return (0);
984}
985
986struct pargs *
987pargs_alloc(int len)
988{
989 struct pargs *pa;
990
991 MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
992 M_WAITOK);
993 pa->ar_ref = 1;
994 pa->ar_length = len;
995 return (pa);
996}
997
998void
999pargs_free(struct pargs *pa)
1000{
1001
1002 FREE(pa, M_PARGS);
1003}
1004
1005void
1006pargs_hold(struct pargs *pa)
1007{
1008
1009 if (pa == NULL)
1010 return;
1011 PARGS_LOCK(pa);
1012 pa->ar_ref++;
1013 PARGS_UNLOCK(pa);
1014}
1015
1016void
1017pargs_drop(struct pargs *pa)
1018{
1019
1020 if (pa == NULL)
1021 return;
1022 PARGS_LOCK(pa);
1023 if (--pa->ar_ref == 0) {
1024 PARGS_UNLOCK(pa);
1025 pargs_free(pa);
1026 } else
1027 PARGS_UNLOCK(pa);
1028}
1029
1030/*
1031 * This sysctl allows a process to retrieve the argument list or process
1032 * title for another process without groping around in the address space
1033 * of the other process. It also allow a process to set its own "process
1034 * title to a string of its own choice.
1035 */
1036static int
1037sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1038{
1039 int *name = (int*) arg1;
1040 u_int namelen = arg2;
1041 struct proc *p;
1042 struct pargs *pa;
1043 int error = 0;
1044
1045 if (namelen != 1)
1046 return (EINVAL);
1047
1048 p = pfind((pid_t)name[0]);
1049 if (!p)
1050 return (0);
1051
1052 if ((!ps_argsopen) && p_cansee(curthread, p)) {
1053 PROC_UNLOCK(p);
1054 return (0);
1055 }
1056 PROC_UNLOCK(p);
1057
1058 if (req->newptr && curproc != p)
1059 return (EPERM);
1060
1061 PROC_LOCK(p);
1062 pa = p->p_args;
1063 pargs_hold(pa);
1064 PROC_UNLOCK(p);
1065 if (req->oldptr && pa != NULL) {
1066 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1067 }
1068 pargs_drop(pa);
1069 if (req->newptr == NULL)
1070 return (error);
1071
1072 PROC_LOCK(p);
1073 pa = p->p_args;
1074 p->p_args = NULL;
1075 PROC_UNLOCK(p);
1076 pargs_drop(pa);
1077
1078 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1079 return (error);
1080
1081 pa = pargs_alloc(req->newlen);
1082 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1083 if (!error) {
1084 PROC_LOCK(p);
1085 p->p_args = pa;
1086 PROC_UNLOCK(p);
1087 } else
1088 pargs_free(pa);
1089 return (error);
1090}
1091
1092SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1093
1094SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1095 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1096
1097SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1098 sysctl_kern_proc, "Process table");
1099
1100SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1101 sysctl_kern_proc, "Process table");
1102
1103SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1104 sysctl_kern_proc, "Process table");
1105
1106SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1107 sysctl_kern_proc, "Process table");
1108
1109SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1110 sysctl_kern_proc, "Process table");
1111
1112SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1113 sysctl_kern_proc_args, "Process argument list");
1114
619 * Must be called with the target process locked.
620 */
621void
622fill_kinfo_proc(p, kp)
623 struct proc *p;
624 struct kinfo_proc *kp;
625{
626 struct thread *td;
627 struct kse *ke;
628 struct ksegrp *kg;
629 struct tty *tp;
630 struct session *sp;
631 struct timeval tv;
632
633 bzero(kp, sizeof(*kp));
634
635 kp->ki_structsize = sizeof(*kp);
636 kp->ki_paddr = p;
637 PROC_LOCK_ASSERT(p, MA_OWNED);
638 kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
639 kp->ki_args = p->p_args;
640 kp->ki_textvp = p->p_textvp;
641#ifdef KTRACE
642 kp->ki_tracep = p->p_tracep;
643 mtx_lock(&ktrace_mtx);
644 kp->ki_traceflag = p->p_traceflag;
645 mtx_unlock(&ktrace_mtx);
646#endif
647 kp->ki_fd = p->p_fd;
648 kp->ki_vmspace = p->p_vmspace;
649 if (p->p_ucred) {
650 kp->ki_uid = p->p_ucred->cr_uid;
651 kp->ki_ruid = p->p_ucred->cr_ruid;
652 kp->ki_svuid = p->p_ucred->cr_svuid;
653 /* XXX bde doesn't like KI_NGROUPS */
654 kp->ki_ngroups = min(p->p_ucred->cr_ngroups, KI_NGROUPS);
655 bcopy(p->p_ucred->cr_groups, kp->ki_groups,
656 kp->ki_ngroups * sizeof(gid_t));
657 kp->ki_rgid = p->p_ucred->cr_rgid;
658 kp->ki_svgid = p->p_ucred->cr_svgid;
659 }
660 if (p->p_procsig) {
661 kp->ki_sigignore = p->p_procsig->ps_sigignore;
662 kp->ki_sigcatch = p->p_procsig->ps_sigcatch;
663 }
664 mtx_lock_spin(&sched_lock);
665 if (p->p_state != PRS_NEW &&
666 p->p_state != PRS_ZOMBIE &&
667 p->p_vmspace != NULL) {
668 struct vmspace *vm = p->p_vmspace;
669
670 kp->ki_size = vm->vm_map.size;
671 kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
672 if (p->p_sflag & PS_INMEM)
673 kp->ki_rssize += UAREA_PAGES;
674 FOREACH_THREAD_IN_PROC(p, td) /* XXXKSE: thread swapout check */
675 kp->ki_rssize += KSTACK_PAGES;
676 kp->ki_swrss = vm->vm_swrss;
677 kp->ki_tsize = vm->vm_tsize;
678 kp->ki_dsize = vm->vm_dsize;
679 kp->ki_ssize = vm->vm_ssize;
680 }
681 if ((p->p_sflag & PS_INMEM) && p->p_stats) {
682 kp->ki_start = p->p_stats->p_start;
683 kp->ki_rusage = p->p_stats->p_ru;
684 kp->ki_childtime.tv_sec = p->p_stats->p_cru.ru_utime.tv_sec +
685 p->p_stats->p_cru.ru_stime.tv_sec;
686 kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
687 p->p_stats->p_cru.ru_stime.tv_usec;
688 }
689 if (p->p_state != PRS_ZOMBIE) {
690 td = FIRST_THREAD_IN_PROC(p);
691 if (td == NULL) {
692 /* XXXKSE: This should never happen. */
693 printf("fill_kinfo_proc(): pid %d has no threads!\n",
694 p->p_pid);
695 mtx_unlock_spin(&sched_lock);
696 return;
697 }
698 if (!(p->p_flag & P_KSES)) {
699 if (td->td_wmesg != NULL) {
700 strlcpy(kp->ki_wmesg, td->td_wmesg,
701 sizeof(kp->ki_wmesg));
702 }
703 if (TD_ON_LOCK(td)) {
704 kp->ki_kiflag |= KI_LOCKBLOCK;
705 strlcpy(kp->ki_lockname, td->td_lockname,
706 sizeof(kp->ki_lockname));
707 }
708 }
709
710 if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
711 if (TD_ON_RUNQ(td) ||
712 TD_CAN_RUN(td) ||
713 TD_IS_RUNNING(td)) {
714 kp->ki_stat = SRUN;
715 } else if (P_SHOULDSTOP(p)) {
716 kp->ki_stat = SSTOP;
717 } else if (TD_IS_SLEEPING(td)) {
718 kp->ki_stat = SSLEEP;
719 } else if (TD_ON_LOCK(td)) {
720 kp->ki_stat = SLOCK;
721 } else {
722 kp->ki_stat = SWAIT;
723 }
724 } else {
725 kp->ki_stat = SIDL;
726 }
727
728 kp->ki_sflag = p->p_sflag;
729 kp->ki_swtime = p->p_swtime;
730 kp->ki_pid = p->p_pid;
731 /* vvv XXXKSE */
732 if (!(p->p_flag & P_KSES)) {
733 kg = td->td_ksegrp;
734 ke = td->td_kse;
735 KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
736 bintime2timeval(&p->p_runtime, &tv);
737 kp->ki_runtime =
738 tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
739
740 /* things in the KSE GROUP */
741 kp->ki_estcpu = kg->kg_estcpu;
742 kp->ki_slptime = kg->kg_slptime;
743 kp->ki_pri.pri_user = kg->kg_user_pri;
744 kp->ki_pri.pri_class = kg->kg_pri_class;
745 kp->ki_nice = kg->kg_nice;
746
747 /* Things in the thread */
748 kp->ki_wchan = td->td_wchan;
749 kp->ki_pri.pri_level = td->td_priority;
750 kp->ki_pri.pri_native = td->td_base_pri;
751 kp->ki_lastcpu = td->td_lastcpu;
752 kp->ki_tdflags = td->td_flags;
753 kp->ki_pcb = td->td_pcb;
754 kp->ki_kstack = (void *)td->td_kstack;
755
756 /* Things in the kse */
757 kp->ki_rqindex = ke->ke_rqindex;
758 kp->ki_oncpu = ke->ke_oncpu;
759 kp->ki_pctcpu = sched_pctcpu(ke);
760 } else {
761 kp->ki_oncpu = -1;
762 kp->ki_lastcpu = -1;
763 kp->ki_tdflags = -1;
764 /* All the rest are 0 for now */
765 }
766 /* ^^^ XXXKSE */
767 } else {
768 kp->ki_stat = SZOMB;
769 }
770 mtx_unlock_spin(&sched_lock);
771 sp = NULL;
772 tp = NULL;
773 if (p->p_pgrp) {
774 kp->ki_pgid = p->p_pgrp->pg_id;
775 kp->ki_jobc = p->p_pgrp->pg_jobc;
776 sp = p->p_pgrp->pg_session;
777
778 if (sp != NULL) {
779 kp->ki_sid = sp->s_sid;
780 SESS_LOCK(sp);
781 strlcpy(kp->ki_login, sp->s_login,
782 sizeof(kp->ki_login));
783 if (sp->s_ttyvp)
784 kp->ki_kiflag |= KI_CTTY;
785 if (SESS_LEADER(p))
786 kp->ki_kiflag |= KI_SLEADER;
787 tp = sp->s_ttyp;
788 SESS_UNLOCK(sp);
789 }
790 }
791 if ((p->p_flag & P_CONTROLT) && tp != NULL) {
792 kp->ki_tdev = dev2udev(tp->t_dev);
793 kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
794 if (tp->t_session)
795 kp->ki_tsid = tp->t_session->s_sid;
796 } else
797 kp->ki_tdev = NOUDEV;
798 if (p->p_comm[0] != '\0') {
799 strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm));
800 strlcpy(kp->ki_ocomm, p->p_comm, sizeof(kp->ki_ocomm));
801 }
802 kp->ki_siglist = p->p_siglist;
803 kp->ki_sigmask = p->p_sigmask;
804 kp->ki_xstat = p->p_xstat;
805 kp->ki_acflag = p->p_acflag;
806 kp->ki_flag = p->p_flag;
807 /* If jailed(p->p_ucred), emulate the old P_JAILED flag. */
808 if (jailed(p->p_ucred))
809 kp->ki_flag |= P_JAILED;
810 kp->ki_lock = p->p_lock;
811 if (p->p_pptr)
812 kp->ki_ppid = p->p_pptr->p_pid;
813}
814
815/*
816 * Locate a zombie process by number
817 */
818struct proc *
819zpfind(pid_t pid)
820{
821 struct proc *p;
822
823 sx_slock(&allproc_lock);
824 LIST_FOREACH(p, &zombproc, p_list)
825 if (p->p_pid == pid) {
826 PROC_LOCK(p);
827 break;
828 }
829 sx_sunlock(&allproc_lock);
830 return (p);
831}
832
833
834/*
835 * Must be called with the process locked and will return with it unlocked.
836 */
837static int
838sysctl_out_proc(struct proc *p, struct sysctl_req *req, int doingzomb)
839{
840 struct kinfo_proc kinfo_proc;
841 int error;
842 struct proc *np;
843 pid_t pid = p->p_pid;
844
845 PROC_LOCK_ASSERT(p, MA_OWNED);
846 fill_kinfo_proc(p, &kinfo_proc);
847 PROC_UNLOCK(p);
848 error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc, sizeof(kinfo_proc));
849 if (error)
850 return (error);
851 if (doingzomb)
852 np = zpfind(pid);
853 else {
854 if (pid == 0)
855 return (0);
856 np = pfind(pid);
857 }
858 if (np == NULL)
859 return EAGAIN;
860 if (np != p) {
861 PROC_UNLOCK(np);
862 return EAGAIN;
863 }
864 PROC_UNLOCK(np);
865 return (0);
866}
867
868static int
869sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
870{
871 int *name = (int*) arg1;
872 u_int namelen = arg2;
873 struct proc *p;
874 int doingzomb;
875 int error = 0;
876
877 if (oidp->oid_number == KERN_PROC_PID) {
878 if (namelen != 1)
879 return (EINVAL);
880 p = pfind((pid_t)name[0]);
881 if (!p)
882 return (0);
883 if (p_cansee(curthread, p)) {
884 PROC_UNLOCK(p);
885 return (0);
886 }
887 error = sysctl_out_proc(p, req, 0);
888 return (error);
889 }
890 if (oidp->oid_number == KERN_PROC_ALL && !namelen)
891 ;
892 else if (oidp->oid_number != KERN_PROC_ALL && namelen == 1)
893 ;
894 else
895 return (EINVAL);
896
897 if (!req->oldptr) {
898 /* overestimate by 5 procs */
899 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
900 if (error)
901 return (error);
902 }
903 sysctl_wire_old_buffer(req, 0);
904 sx_slock(&allproc_lock);
905 for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
906 if (!doingzomb)
907 p = LIST_FIRST(&allproc);
908 else
909 p = LIST_FIRST(&zombproc);
910 for (; p != 0; p = LIST_NEXT(p, p_list)) {
911 PROC_LOCK(p);
912 /*
913 * Show a user only appropriate processes.
914 */
915 if (p_cansee(curthread, p)) {
916 PROC_UNLOCK(p);
917 continue;
918 }
919 /*
920 * Skip embryonic processes.
921 */
922 if (p->p_state == PRS_NEW) {
923 PROC_UNLOCK(p);
924 continue;
925 }
926 /*
927 * TODO - make more efficient (see notes below).
928 * do by session.
929 */
930 switch (oidp->oid_number) {
931
932 case KERN_PROC_PGRP:
933 /* could do this by traversing pgrp */
934 if (p->p_pgrp == NULL ||
935 p->p_pgrp->pg_id != (pid_t)name[0]) {
936 PROC_UNLOCK(p);
937 continue;
938 }
939 break;
940
941 case KERN_PROC_TTY:
942 if ((p->p_flag & P_CONTROLT) == 0 ||
943 p->p_session == NULL) {
944 PROC_UNLOCK(p);
945 continue;
946 }
947 SESS_LOCK(p->p_session);
948 if (p->p_session->s_ttyp == NULL ||
949 dev2udev(p->p_session->s_ttyp->t_dev) !=
950 (udev_t)name[0]) {
951 SESS_UNLOCK(p->p_session);
952 PROC_UNLOCK(p);
953 continue;
954 }
955 SESS_UNLOCK(p->p_session);
956 break;
957
958 case KERN_PROC_UID:
959 if (p->p_ucred == NULL ||
960 p->p_ucred->cr_uid != (uid_t)name[0]) {
961 PROC_UNLOCK(p);
962 continue;
963 }
964 break;
965
966 case KERN_PROC_RUID:
967 if (p->p_ucred == NULL ||
968 p->p_ucred->cr_ruid != (uid_t)name[0]) {
969 PROC_UNLOCK(p);
970 continue;
971 }
972 break;
973 }
974
975 error = sysctl_out_proc(p, req, doingzomb);
976 if (error) {
977 sx_sunlock(&allproc_lock);
978 return (error);
979 }
980 }
981 }
982 sx_sunlock(&allproc_lock);
983 return (0);
984}
985
986struct pargs *
987pargs_alloc(int len)
988{
989 struct pargs *pa;
990
991 MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS,
992 M_WAITOK);
993 pa->ar_ref = 1;
994 pa->ar_length = len;
995 return (pa);
996}
997
998void
999pargs_free(struct pargs *pa)
1000{
1001
1002 FREE(pa, M_PARGS);
1003}
1004
1005void
1006pargs_hold(struct pargs *pa)
1007{
1008
1009 if (pa == NULL)
1010 return;
1011 PARGS_LOCK(pa);
1012 pa->ar_ref++;
1013 PARGS_UNLOCK(pa);
1014}
1015
1016void
1017pargs_drop(struct pargs *pa)
1018{
1019
1020 if (pa == NULL)
1021 return;
1022 PARGS_LOCK(pa);
1023 if (--pa->ar_ref == 0) {
1024 PARGS_UNLOCK(pa);
1025 pargs_free(pa);
1026 } else
1027 PARGS_UNLOCK(pa);
1028}
1029
1030/*
1031 * This sysctl allows a process to retrieve the argument list or process
1032 * title for another process without groping around in the address space
1033 * of the other process. It also allow a process to set its own "process
1034 * title to a string of its own choice.
1035 */
1036static int
1037sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1038{
1039 int *name = (int*) arg1;
1040 u_int namelen = arg2;
1041 struct proc *p;
1042 struct pargs *pa;
1043 int error = 0;
1044
1045 if (namelen != 1)
1046 return (EINVAL);
1047
1048 p = pfind((pid_t)name[0]);
1049 if (!p)
1050 return (0);
1051
1052 if ((!ps_argsopen) && p_cansee(curthread, p)) {
1053 PROC_UNLOCK(p);
1054 return (0);
1055 }
1056 PROC_UNLOCK(p);
1057
1058 if (req->newptr && curproc != p)
1059 return (EPERM);
1060
1061 PROC_LOCK(p);
1062 pa = p->p_args;
1063 pargs_hold(pa);
1064 PROC_UNLOCK(p);
1065 if (req->oldptr && pa != NULL) {
1066 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1067 }
1068 pargs_drop(pa);
1069 if (req->newptr == NULL)
1070 return (error);
1071
1072 PROC_LOCK(p);
1073 pa = p->p_args;
1074 p->p_args = NULL;
1075 PROC_UNLOCK(p);
1076 pargs_drop(pa);
1077
1078 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
1079 return (error);
1080
1081 pa = pargs_alloc(req->newlen);
1082 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1083 if (!error) {
1084 PROC_LOCK(p);
1085 p->p_args = pa;
1086 PROC_UNLOCK(p);
1087 } else
1088 pargs_free(pa);
1089 return (error);
1090}
1091
1092SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1093
1094SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1095 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1096
1097SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1098 sysctl_kern_proc, "Process table");
1099
1100SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1101 sysctl_kern_proc, "Process table");
1102
1103SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1104 sysctl_kern_proc, "Process table");
1105
1106SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1107 sysctl_kern_proc, "Process table");
1108
1109SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1110 sysctl_kern_proc, "Process table");
1111
1112SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1113 sysctl_kern_proc_args, "Process argument list");
1114