Deleted Added
full compact
kern_sig.c (116101) kern_sig.c (116182)
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 * $FreeBSD: head/sys/kern/kern_sig.c 116101 2003-06-09 17:38:32Z jhb $
40 */
41
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/kern_sig.c 116182 2003-06-11 00:56:59Z obrien $");
43
42#include "opt_compat.h"
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/signalvar.h>
48#include <sys/vnode.h>
49#include <sys/acct.h>
50#include <sys/condvar.h>
51#include <sys/event.h>
52#include <sys/fcntl.h>
53#include <sys/kernel.h>
54#include <sys/ktr.h>
55#include <sys/ktrace.h>
56#include <sys/lock.h>
57#include <sys/malloc.h>
58#include <sys/mutex.h>
59#include <sys/namei.h>
60#include <sys/proc.h>
61#include <sys/pioctl.h>
62#include <sys/resourcevar.h>
63#include <sys/smp.h>
64#include <sys/stat.h>
65#include <sys/sx.h>
66#include <sys/syscallsubr.h>
67#include <sys/sysctl.h>
68#include <sys/sysent.h>
69#include <sys/syslog.h>
70#include <sys/sysproto.h>
71#include <sys/unistd.h>
72#include <sys/wait.h>
73
74#include <machine/cpu.h>
75
76#if defined (__alpha__) && !defined(COMPAT_43)
77#error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
78#endif
79
80#define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
81
82static int coredump(struct thread *);
83static char *expand_name(const char *, uid_t, pid_t);
84static int killpg1(struct thread *td, int sig, int pgid, int all);
85static int issignal(struct thread *p);
86static int sigprop(int sig);
87static void stop(struct proc *);
88static void tdsigwakeup(struct thread *td, int sig, sig_t action);
89static int filt_sigattach(struct knote *kn);
90static void filt_sigdetach(struct knote *kn);
91static int filt_signal(struct knote *kn, long hint);
92static struct thread *sigtd(struct proc *p, int sig, int prop);
93static int kern_sigtimedwait(struct thread *td, sigset_t set,
94 siginfo_t *info, struct timespec *timeout);
95
96struct filterops sig_filtops =
97 { 0, filt_sigattach, filt_sigdetach, filt_signal };
98
99static int kern_logsigexit = 1;
100SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
101 &kern_logsigexit, 0,
102 "Log processes quitting on abnormal signals to syslog(3)");
103
104/*
105 * Policy -- Can ucred cr1 send SIGIO to process cr2?
106 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
107 * in the right situations.
108 */
109#define CANSIGIO(cr1, cr2) \
110 ((cr1)->cr_uid == 0 || \
111 (cr1)->cr_ruid == (cr2)->cr_ruid || \
112 (cr1)->cr_uid == (cr2)->cr_ruid || \
113 (cr1)->cr_ruid == (cr2)->cr_uid || \
114 (cr1)->cr_uid == (cr2)->cr_uid)
115
116int sugid_coredump;
117SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
119
120static int do_coredump = 1;
121SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
122 &do_coredump, 0, "Enable/Disable coredumps");
123
124/*
125 * Signal properties and actions.
126 * The array below categorizes the signals and their default actions
127 * according to the following properties:
128 */
129#define SA_KILL 0x01 /* terminates process by default */
130#define SA_CORE 0x02 /* ditto and coredumps */
131#define SA_STOP 0x04 /* suspend process */
132#define SA_TTYSTOP 0x08 /* ditto, from tty */
133#define SA_IGNORE 0x10 /* ignore by default */
134#define SA_CONT 0x20 /* continue if suspended */
135#define SA_CANTMASK 0x40 /* non-maskable, catchable */
136#define SA_PROC 0x80 /* deliverable to any thread */
137
138static int sigproptbl[NSIG] = {
139 SA_KILL|SA_PROC, /* SIGHUP */
140 SA_KILL|SA_PROC, /* SIGINT */
141 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
142 SA_KILL|SA_CORE, /* SIGILL */
143 SA_KILL|SA_CORE, /* SIGTRAP */
144 SA_KILL|SA_CORE, /* SIGABRT */
145 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
146 SA_KILL|SA_CORE, /* SIGFPE */
147 SA_KILL|SA_PROC, /* SIGKILL */
148 SA_KILL|SA_CORE, /* SIGBUS */
149 SA_KILL|SA_CORE, /* SIGSEGV */
150 SA_KILL|SA_CORE, /* SIGSYS */
151 SA_KILL|SA_PROC, /* SIGPIPE */
152 SA_KILL|SA_PROC, /* SIGALRM */
153 SA_KILL|SA_PROC, /* SIGTERM */
154 SA_IGNORE|SA_PROC, /* SIGURG */
155 SA_STOP|SA_PROC, /* SIGSTOP */
156 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
157 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
158 SA_IGNORE|SA_PROC, /* SIGCHLD */
159 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
161 SA_IGNORE|SA_PROC, /* SIGIO */
162 SA_KILL, /* SIGXCPU */
163 SA_KILL, /* SIGXFSZ */
164 SA_KILL|SA_PROC, /* SIGVTALRM */
165 SA_KILL|SA_PROC, /* SIGPROF */
166 SA_IGNORE|SA_PROC, /* SIGWINCH */
167 SA_IGNORE|SA_PROC, /* SIGINFO */
168 SA_KILL|SA_PROC, /* SIGUSR1 */
169 SA_KILL|SA_PROC, /* SIGUSR2 */
170};
171
172/*
173 * Determine signal that should be delivered to process p, the current
174 * process, 0 if none. If there is a pending stop signal with default
175 * action, the process stops in issignal().
176 * XXXKSE the check for a pending stop is not done under KSE
177 *
178 * MP SAFE.
179 */
180int
181cursig(struct thread *td)
182{
183 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
184 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
185 mtx_assert(&sched_lock, MA_NOTOWNED);
186 return (SIGPENDING(td) ? issignal(td) : 0);
187}
188
189/*
190 * Arrange for ast() to handle unmasked pending signals on return to user
191 * mode. This must be called whenever a signal is added to td_siglist or
192 * unmasked in td_sigmask.
193 */
194void
195signotify(struct thread *td)
196{
197 struct proc *p;
198 sigset_t set;
199
200 p = td->td_proc;
201
202 PROC_LOCK_ASSERT(p, MA_OWNED);
203
204 /*
205 * If our mask changed we may have to move signal that were
206 * previously masked by all threads to our siglist.
207 */
208 set = p->p_siglist;
209 SIGSETNAND(set, td->td_sigmask);
210 SIGSETNAND(p->p_siglist, set);
211 SIGSETOR(td->td_siglist, set);
212
213 if (SIGPENDING(td)) {
214 mtx_lock_spin(&sched_lock);
215 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
216 mtx_unlock_spin(&sched_lock);
217 }
218}
219
220int
221sigonstack(size_t sp)
222{
223 struct proc *p = curthread->td_proc;
224
225 PROC_LOCK_ASSERT(p, MA_OWNED);
226 return ((p->p_flag & P_ALTSTACK) ?
227#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
228 ((p->p_sigstk.ss_size == 0) ? (p->p_sigstk.ss_flags & SS_ONSTACK) :
229 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size))
230#else
231 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size)
232#endif
233 : 0);
234}
235
236static __inline int
237sigprop(int sig)
238{
239
240 if (sig > 0 && sig < NSIG)
241 return (sigproptbl[_SIG_IDX(sig)]);
242 return (0);
243}
244
245int
246sig_ffs(sigset_t *set)
247{
248 int i;
249
250 for (i = 0; i < _SIG_WORDS; i++)
251 if (set->__bits[i])
252 return (ffs(set->__bits[i]) + (i * 32));
253 return (0);
254}
255
256/*
257 * kern_sigaction
258 * sigaction
259 * freebsd4_sigaction
260 * osigaction
261 *
262 * MPSAFE
263 */
264int
265kern_sigaction(td, sig, act, oact, flags)
266 struct thread *td;
267 register int sig;
268 struct sigaction *act, *oact;
269 int flags;
270{
271 struct sigacts *ps;
272 struct thread *td0;
273 struct proc *p = td->td_proc;
274
275 if (!_SIG_VALID(sig))
276 return (EINVAL);
277
278 PROC_LOCK(p);
279 ps = p->p_sigacts;
280 mtx_lock(&ps->ps_mtx);
281 if (oact) {
282 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
283 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
284 oact->sa_flags = 0;
285 if (SIGISMEMBER(ps->ps_sigonstack, sig))
286 oact->sa_flags |= SA_ONSTACK;
287 if (!SIGISMEMBER(ps->ps_sigintr, sig))
288 oact->sa_flags |= SA_RESTART;
289 if (SIGISMEMBER(ps->ps_sigreset, sig))
290 oact->sa_flags |= SA_RESETHAND;
291 if (SIGISMEMBER(ps->ps_signodefer, sig))
292 oact->sa_flags |= SA_NODEFER;
293 if (SIGISMEMBER(ps->ps_siginfo, sig))
294 oact->sa_flags |= SA_SIGINFO;
295 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
296 oact->sa_flags |= SA_NOCLDSTOP;
297 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
298 oact->sa_flags |= SA_NOCLDWAIT;
299 }
300 if (act) {
301 if ((sig == SIGKILL || sig == SIGSTOP) &&
302 act->sa_handler != SIG_DFL) {
303 mtx_unlock(&ps->ps_mtx);
304 PROC_UNLOCK(p);
305 return (EINVAL);
306 }
307
308 /*
309 * Change setting atomically.
310 */
311
312 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
313 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
314 if (act->sa_flags & SA_SIGINFO) {
315 ps->ps_sigact[_SIG_IDX(sig)] =
316 (__sighandler_t *)act->sa_sigaction;
317 SIGADDSET(ps->ps_siginfo, sig);
318 } else {
319 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
320 SIGDELSET(ps->ps_siginfo, sig);
321 }
322 if (!(act->sa_flags & SA_RESTART))
323 SIGADDSET(ps->ps_sigintr, sig);
324 else
325 SIGDELSET(ps->ps_sigintr, sig);
326 if (act->sa_flags & SA_ONSTACK)
327 SIGADDSET(ps->ps_sigonstack, sig);
328 else
329 SIGDELSET(ps->ps_sigonstack, sig);
330 if (act->sa_flags & SA_RESETHAND)
331 SIGADDSET(ps->ps_sigreset, sig);
332 else
333 SIGDELSET(ps->ps_sigreset, sig);
334 if (act->sa_flags & SA_NODEFER)
335 SIGADDSET(ps->ps_signodefer, sig);
336 else
337 SIGDELSET(ps->ps_signodefer, sig);
338#ifdef COMPAT_SUNOS
339 if (act->sa_flags & SA_USERTRAMP)
340 SIGADDSET(ps->ps_usertramp, sig);
341 else
342 SIGDELSET(ps->ps_usertramp, sig);
343#endif
344 if (sig == SIGCHLD) {
345 if (act->sa_flags & SA_NOCLDSTOP)
346 ps->ps_flag |= PS_NOCLDSTOP;
347 else
348 ps->ps_flag &= ~PS_NOCLDSTOP;
349 if (act->sa_flags & SA_NOCLDWAIT) {
350 /*
351 * Paranoia: since SA_NOCLDWAIT is implemented
352 * by reparenting the dying child to PID 1 (and
353 * trust it to reap the zombie), PID 1 itself
354 * is forbidden to set SA_NOCLDWAIT.
355 */
356 if (p->p_pid == 1)
357 ps->ps_flag &= ~PS_NOCLDWAIT;
358 else
359 ps->ps_flag |= PS_NOCLDWAIT;
360 } else
361 ps->ps_flag &= ~PS_NOCLDWAIT;
362 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
363 ps->ps_flag |= PS_CLDSIGIGN;
364 else
365 ps->ps_flag &= ~PS_CLDSIGIGN;
366 }
367 /*
368 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
369 * and for signals set to SIG_DFL where the default is to
370 * ignore. However, don't put SIGCONT in ps_sigignore, as we
371 * have to restart the process.
372 */
373 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
374 (sigprop(sig) & SA_IGNORE &&
375 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
376 /* never to be seen again */
377 SIGDELSET(p->p_siglist, sig);
378 FOREACH_THREAD_IN_PROC(p, td0)
379 SIGDELSET(td0->td_siglist, sig);
380 if (sig != SIGCONT)
381 /* easier in psignal */
382 SIGADDSET(ps->ps_sigignore, sig);
383 SIGDELSET(ps->ps_sigcatch, sig);
384 } else {
385 SIGDELSET(ps->ps_sigignore, sig);
386 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
387 SIGDELSET(ps->ps_sigcatch, sig);
388 else
389 SIGADDSET(ps->ps_sigcatch, sig);
390 }
391#ifdef COMPAT_FREEBSD4
392 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
393 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
394 (flags & KSA_FREEBSD4) == 0)
395 SIGDELSET(ps->ps_freebsd4, sig);
396 else
397 SIGADDSET(ps->ps_freebsd4, sig);
398#endif
399#ifdef COMPAT_43
400 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
401 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
402 (flags & KSA_OSIGSET) == 0)
403 SIGDELSET(ps->ps_osigset, sig);
404 else
405 SIGADDSET(ps->ps_osigset, sig);
406#endif
407 }
408 mtx_unlock(&ps->ps_mtx);
409 PROC_UNLOCK(p);
410 return (0);
411}
412
413#ifndef _SYS_SYSPROTO_H_
414struct sigaction_args {
415 int sig;
416 struct sigaction *act;
417 struct sigaction *oact;
418};
419#endif
420/*
421 * MPSAFE
422 */
423int
424sigaction(td, uap)
425 struct thread *td;
426 register struct sigaction_args *uap;
427{
428 struct sigaction act, oact;
429 register struct sigaction *actp, *oactp;
430 int error;
431
432 actp = (uap->act != NULL) ? &act : NULL;
433 oactp = (uap->oact != NULL) ? &oact : NULL;
434 if (actp) {
435 error = copyin(uap->act, actp, sizeof(act));
436 if (error)
437 return (error);
438 }
439 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
440 if (oactp && !error)
441 error = copyout(oactp, uap->oact, sizeof(oact));
442 return (error);
443}
444
445#ifdef COMPAT_FREEBSD4
446#ifndef _SYS_SYSPROTO_H_
447struct freebsd4_sigaction_args {
448 int sig;
449 struct sigaction *act;
450 struct sigaction *oact;
451};
452#endif
453/*
454 * MPSAFE
455 */
456int
457freebsd4_sigaction(td, uap)
458 struct thread *td;
459 register struct freebsd4_sigaction_args *uap;
460{
461 struct sigaction act, oact;
462 register struct sigaction *actp, *oactp;
463 int error;
464
465
466 actp = (uap->act != NULL) ? &act : NULL;
467 oactp = (uap->oact != NULL) ? &oact : NULL;
468 if (actp) {
469 error = copyin(uap->act, actp, sizeof(act));
470 if (error)
471 return (error);
472 }
473 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
474 if (oactp && !error)
475 error = copyout(oactp, uap->oact, sizeof(oact));
476 return (error);
477}
478#endif /* COMAPT_FREEBSD4 */
479
480#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
481#ifndef _SYS_SYSPROTO_H_
482struct osigaction_args {
483 int signum;
484 struct osigaction *nsa;
485 struct osigaction *osa;
486};
487#endif
488/*
489 * MPSAFE
490 */
491int
492osigaction(td, uap)
493 struct thread *td;
494 register struct osigaction_args *uap;
495{
496 struct osigaction sa;
497 struct sigaction nsa, osa;
498 register struct sigaction *nsap, *osap;
499 int error;
500
501 if (uap->signum <= 0 || uap->signum >= ONSIG)
502 return (EINVAL);
503
504 nsap = (uap->nsa != NULL) ? &nsa : NULL;
505 osap = (uap->osa != NULL) ? &osa : NULL;
506
507 if (nsap) {
508 error = copyin(uap->nsa, &sa, sizeof(sa));
509 if (error)
510 return (error);
511 nsap->sa_handler = sa.sa_handler;
512 nsap->sa_flags = sa.sa_flags;
513 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
514 }
515 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
516 if (osap && !error) {
517 sa.sa_handler = osap->sa_handler;
518 sa.sa_flags = osap->sa_flags;
519 SIG2OSIG(osap->sa_mask, sa.sa_mask);
520 error = copyout(&sa, uap->osa, sizeof(sa));
521 }
522 return (error);
523}
524
525#if !defined(__i386__) && !defined(__alpha__)
526/* Avoid replicating the same stub everywhere */
527int
528osigreturn(td, uap)
529 struct thread *td;
530 struct osigreturn_args *uap;
531{
532
533 return (nosys(td, (struct nosys_args *)uap));
534}
535#endif
536#endif /* COMPAT_43 */
537
538/*
539 * Initialize signal state for process 0;
540 * set to ignore signals that are ignored by default.
541 */
542void
543siginit(p)
544 struct proc *p;
545{
546 register int i;
547 struct sigacts *ps;
548
549 PROC_LOCK(p);
550 ps = p->p_sigacts;
551 mtx_lock(&ps->ps_mtx);
552 for (i = 1; i <= NSIG; i++)
553 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
554 SIGADDSET(ps->ps_sigignore, i);
555 mtx_unlock(&ps->ps_mtx);
556 PROC_UNLOCK(p);
557}
558
559/*
560 * Reset signals for an exec of the specified process.
561 */
562void
563execsigs(p)
564 register struct proc *p;
565{
566 register struct sigacts *ps;
567 register int sig;
568
569 /*
570 * Reset caught signals. Held signals remain held
571 * through td_sigmask (unless they were caught,
572 * and are now ignored by default).
573 */
574 PROC_LOCK_ASSERT(p, MA_OWNED);
575 ps = p->p_sigacts;
576 mtx_lock(&ps->ps_mtx);
577 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
578 sig = sig_ffs(&ps->ps_sigcatch);
579 SIGDELSET(ps->ps_sigcatch, sig);
580 if (sigprop(sig) & SA_IGNORE) {
581 if (sig != SIGCONT)
582 SIGADDSET(ps->ps_sigignore, sig);
583 SIGDELSET(p->p_siglist, sig);
584 /*
585 * There is only one thread at this point.
586 */
587 SIGDELSET(FIRST_THREAD_IN_PROC(p)->td_siglist, sig);
588 }
589 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
590 }
591 /*
592 * Clear out the td's sigmask. Normal processes use the proc sigmask.
593 */
594 SIGEMPTYSET(FIRST_THREAD_IN_PROC(p)->td_sigmask);
595 /*
596 * Reset stack state to the user stack.
597 * Clear set of signals caught on the signal stack.
598 */
599 p->p_sigstk.ss_flags = SS_DISABLE;
600 p->p_sigstk.ss_size = 0;
601 p->p_sigstk.ss_sp = 0;
602 p->p_flag &= ~P_ALTSTACK;
603 /*
604 * Reset no zombies if child dies flag as Solaris does.
605 */
606 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
607 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
608 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
609 mtx_unlock(&ps->ps_mtx);
610}
611
612/*
613 * kern_sigprocmask()
614 *
615 * Manipulate signal mask.
616 */
617int
618kern_sigprocmask(td, how, set, oset, old)
619 struct thread *td;
620 int how;
621 sigset_t *set, *oset;
622 int old;
623{
624 int error;
625
626 PROC_LOCK(td->td_proc);
627 if (oset != NULL)
628 *oset = td->td_sigmask;
629
630 error = 0;
631 if (set != NULL) {
632 switch (how) {
633 case SIG_BLOCK:
634 SIG_CANTMASK(*set);
635 SIGSETOR(td->td_sigmask, *set);
636 break;
637 case SIG_UNBLOCK:
638 SIGSETNAND(td->td_sigmask, *set);
639 signotify(td);
640 break;
641 case SIG_SETMASK:
642 SIG_CANTMASK(*set);
643 if (old)
644 SIGSETLO(td->td_sigmask, *set);
645 else
646 td->td_sigmask = *set;
647 signotify(td);
648 break;
649 default:
650 error = EINVAL;
651 break;
652 }
653 }
654 PROC_UNLOCK(td->td_proc);
655 return (error);
656}
657
658/*
659 * sigprocmask() - MP SAFE
660 */
661
662#ifndef _SYS_SYSPROTO_H_
663struct sigprocmask_args {
664 int how;
665 const sigset_t *set;
666 sigset_t *oset;
667};
668#endif
669int
670sigprocmask(td, uap)
671 register struct thread *td;
672 struct sigprocmask_args *uap;
673{
674 sigset_t set, oset;
675 sigset_t *setp, *osetp;
676 int error;
677
678 setp = (uap->set != NULL) ? &set : NULL;
679 osetp = (uap->oset != NULL) ? &oset : NULL;
680 if (setp) {
681 error = copyin(uap->set, setp, sizeof(set));
682 if (error)
683 return (error);
684 }
685 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
686 if (osetp && !error) {
687 error = copyout(osetp, uap->oset, sizeof(oset));
688 }
689 return (error);
690}
691
692#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
693/*
694 * osigprocmask() - MP SAFE
695 */
696#ifndef _SYS_SYSPROTO_H_
697struct osigprocmask_args {
698 int how;
699 osigset_t mask;
700};
701#endif
702int
703osigprocmask(td, uap)
704 register struct thread *td;
705 struct osigprocmask_args *uap;
706{
707 sigset_t set, oset;
708 int error;
709
710 OSIG2SIG(uap->mask, set);
711 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
712 SIG2OSIG(oset, td->td_retval[0]);
713 return (error);
714}
715#endif /* COMPAT_43 */
716
717#ifndef _SYS_SYSPROTO_H_
718struct sigpending_args {
719 sigset_t *set;
720};
721#endif
722/*
723 * MPSAFE
724 */
725int
726sigwait(struct thread *td, struct sigwait_args *uap)
727{
728 siginfo_t info;
729 sigset_t set;
730 int error;
731
732 error = copyin(uap->set, &set, sizeof(set));
733 if (error)
734 return (error);
735
736 error = kern_sigtimedwait(td, set, &info, NULL);
737 if (error)
738 return (error);
739
740 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
741 /* Repost if we got an error. */
742 if (error && info.si_signo) {
743 PROC_LOCK(td->td_proc);
744 tdsignal(td, info.si_signo);
745 PROC_UNLOCK(td->td_proc);
746 }
747 return (error);
748}
749/*
750 * MPSAFE
751 */
752int
753sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
754{
755 struct timespec ts;
756 struct timespec *timeout;
757 sigset_t set;
758 siginfo_t info;
759 int error;
760
761 if (uap->timeout) {
762 error = copyin(uap->timeout, &ts, sizeof(ts));
763 if (error)
764 return (error);
765
766 timeout = &ts;
767 } else
768 timeout = NULL;
769
770 error = copyin(uap->set, &set, sizeof(set));
771 if (error)
772 return (error);
773
774 error = kern_sigtimedwait(td, set, &info, timeout);
775 if (error)
776 return (error);
777
778 error = copyout(&info, uap->info, sizeof(info));
779 /* Repost if we got an error. */
780 if (error && info.si_signo) {
781 PROC_LOCK(td->td_proc);
782 tdsignal(td, info.si_signo);
783 PROC_UNLOCK(td->td_proc);
784 }
785 return (error);
786}
787
788/*
789 * MPSAFE
790 */
791int
792sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
793{
794 siginfo_t info;
795 sigset_t set;
796 int error;
797
798 error = copyin(uap->set, &set, sizeof(set));
799 if (error)
800 return (error);
801
802 error = kern_sigtimedwait(td, set, &info, NULL);
803 if (error)
804 return (error);
805
806 error = copyout(&info, uap->info, sizeof(info));
807 /* Repost if we got an error. */
808 if (error && info.si_signo) {
809 PROC_LOCK(td->td_proc);
810 tdsignal(td, info.si_signo);
811 PROC_UNLOCK(td->td_proc);
812 }
813 return (error);
814}
815
816static int
817kern_sigtimedwait(struct thread *td, sigset_t set, siginfo_t *info,
818 struct timespec *timeout)
819{
820 register struct sigacts *ps;
821 sigset_t oldmask;
822 struct proc *p;
823 int error;
824 int sig;
825 int hz;
826
827 p = td->td_proc;
828 error = 0;
829 sig = 0;
830 SIG_CANTMASK(set);
831
832 PROC_LOCK(p);
833 ps = p->p_sigacts;
834 oldmask = td->td_sigmask;
835 td->td_sigmask = set;
836 signotify(td);
837
838 mtx_lock(&ps->ps_mtx);
839 sig = cursig(td);
840 if (sig)
841 goto out;
842
843 /*
844 * POSIX says this must be checked after looking for pending
845 * signals.
846 */
847 if (timeout) {
848 struct timeval tv;
849
850 if (timeout->tv_nsec > 1000000000) {
851 error = EINVAL;
852 goto out;
853 }
854 TIMESPEC_TO_TIMEVAL(&tv, timeout);
855 hz = tvtohz(&tv);
856 } else
857 hz = 0;
858
859 mtx_unlock(&ps->ps_mtx);
860 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", hz);
861 mtx_lock(&ps->ps_mtx);
862 if (error == EINTR)
863 error = 0;
864 else if (error)
865 goto out;
866
867 sig = cursig(td);
868out:
869 td->td_sigmask = oldmask;
870 if (sig) {
871 sig_t action;
872
873 action = ps->ps_sigact[_SIG_IDX(sig)];
874 mtx_unlock(&ps->ps_mtx);
875#ifdef KTRACE
876 if (KTRPOINT(td, KTR_PSIG))
877 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
878 &td->td_oldsigmask : &td->td_sigmask, 0);
879#endif
880 _STOPEVENT(p, S_SIG, sig);
881
882 if (action == SIG_DFL)
883 sigexit(td, sig);
884 /* NOTREACHED */
885
886 SIGDELSET(td->td_siglist, sig);
887 info->si_signo = sig;
888 info->si_code = 0;
889 } else
890 mtx_unlock(&ps->ps_mtx);
891 PROC_UNLOCK(p);
892 return (error);
893}
894
895/*
896 * MPSAFE
897 */
898int
899sigpending(td, uap)
900 struct thread *td;
901 struct sigpending_args *uap;
902{
903 struct proc *p = td->td_proc;
904 sigset_t siglist;
905
906 PROC_LOCK(p);
907 siglist = p->p_siglist;
908 SIGSETOR(siglist, td->td_siglist);
909 PROC_UNLOCK(p);
910 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
911}
912
913#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
914#ifndef _SYS_SYSPROTO_H_
915struct osigpending_args {
916 int dummy;
917};
918#endif
919/*
920 * MPSAFE
921 */
922int
923osigpending(td, uap)
924 struct thread *td;
925 struct osigpending_args *uap;
926{
927 struct proc *p = td->td_proc;
928 sigset_t siglist;
929
930 PROC_LOCK(p);
931 siglist = p->p_siglist;
932 SIGSETOR(siglist, td->td_siglist);
933 PROC_UNLOCK(p);
934 SIG2OSIG(siglist, td->td_retval[0]);
935 return (0);
936}
937#endif /* COMPAT_43 */
938
939#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
940/*
941 * Generalized interface signal handler, 4.3-compatible.
942 */
943#ifndef _SYS_SYSPROTO_H_
944struct osigvec_args {
945 int signum;
946 struct sigvec *nsv;
947 struct sigvec *osv;
948};
949#endif
950/*
951 * MPSAFE
952 */
953/* ARGSUSED */
954int
955osigvec(td, uap)
956 struct thread *td;
957 register struct osigvec_args *uap;
958{
959 struct sigvec vec;
960 struct sigaction nsa, osa;
961 register struct sigaction *nsap, *osap;
962 int error;
963
964 if (uap->signum <= 0 || uap->signum >= ONSIG)
965 return (EINVAL);
966 nsap = (uap->nsv != NULL) ? &nsa : NULL;
967 osap = (uap->osv != NULL) ? &osa : NULL;
968 if (nsap) {
969 error = copyin(uap->nsv, &vec, sizeof(vec));
970 if (error)
971 return (error);
972 nsap->sa_handler = vec.sv_handler;
973 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
974 nsap->sa_flags = vec.sv_flags;
975 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
976#ifdef COMPAT_SUNOS
977 nsap->sa_flags |= SA_USERTRAMP;
978#endif
979 }
980 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
981 if (osap && !error) {
982 vec.sv_handler = osap->sa_handler;
983 SIG2OSIG(osap->sa_mask, vec.sv_mask);
984 vec.sv_flags = osap->sa_flags;
985 vec.sv_flags &= ~SA_NOCLDWAIT;
986 vec.sv_flags ^= SA_RESTART;
987#ifdef COMPAT_SUNOS
988 vec.sv_flags &= ~SA_NOCLDSTOP;
989#endif
990 error = copyout(&vec, uap->osv, sizeof(vec));
991 }
992 return (error);
993}
994
995#ifndef _SYS_SYSPROTO_H_
996struct osigblock_args {
997 int mask;
998};
999#endif
1000/*
1001 * MPSAFE
1002 */
1003int
1004osigblock(td, uap)
1005 register struct thread *td;
1006 struct osigblock_args *uap;
1007{
1008 struct proc *p = td->td_proc;
1009 sigset_t set;
1010
1011 OSIG2SIG(uap->mask, set);
1012 SIG_CANTMASK(set);
1013 PROC_LOCK(p);
1014 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1015 SIGSETOR(td->td_sigmask, set);
1016 PROC_UNLOCK(p);
1017 return (0);
1018}
1019
1020#ifndef _SYS_SYSPROTO_H_
1021struct osigsetmask_args {
1022 int mask;
1023};
1024#endif
1025/*
1026 * MPSAFE
1027 */
1028int
1029osigsetmask(td, uap)
1030 struct thread *td;
1031 struct osigsetmask_args *uap;
1032{
1033 struct proc *p = td->td_proc;
1034 sigset_t set;
1035
1036 OSIG2SIG(uap->mask, set);
1037 SIG_CANTMASK(set);
1038 PROC_LOCK(p);
1039 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1040 SIGSETLO(td->td_sigmask, set);
1041 signotify(td);
1042 PROC_UNLOCK(p);
1043 return (0);
1044}
1045#endif /* COMPAT_43 || COMPAT_SUNOS */
1046
1047/*
1048 * Suspend process until signal, providing mask to be set
1049 * in the meantime. Note nonstandard calling convention:
1050 * libc stub passes mask, not pointer, to save a copyin.
1051 ***** XXXKSE this doesn't make sense under KSE.
1052 ***** Do we suspend the thread or all threads in the process?
1053 ***** How do we suspend threads running NOW on another processor?
1054 */
1055#ifndef _SYS_SYSPROTO_H_
1056struct sigsuspend_args {
1057 const sigset_t *sigmask;
1058};
1059#endif
1060/*
1061 * MPSAFE
1062 */
1063/* ARGSUSED */
1064int
1065sigsuspend(td, uap)
1066 struct thread *td;
1067 struct sigsuspend_args *uap;
1068{
1069 sigset_t mask;
1070 int error;
1071
1072 error = copyin(uap->sigmask, &mask, sizeof(mask));
1073 if (error)
1074 return (error);
1075 return (kern_sigsuspend(td, mask));
1076}
1077
1078int
1079kern_sigsuspend(struct thread *td, sigset_t mask)
1080{
1081 struct proc *p = td->td_proc;
1082
1083 /*
1084 * When returning from sigsuspend, we want
1085 * the old mask to be restored after the
1086 * signal handler has finished. Thus, we
1087 * save it here and mark the sigacts structure
1088 * to indicate this.
1089 */
1090 PROC_LOCK(p);
1091 td->td_oldsigmask = td->td_sigmask;
1092 td->td_pflags |= TDP_OLDMASK;
1093 SIG_CANTMASK(mask);
1094 td->td_sigmask = mask;
1095 signotify(td);
1096 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1097 /* void */;
1098 PROC_UNLOCK(p);
1099 /* always return EINTR rather than ERESTART... */
1100 return (EINTR);
1101}
1102
1103#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1104#ifndef _SYS_SYSPROTO_H_
1105struct osigsuspend_args {
1106 osigset_t mask;
1107};
1108#endif
1109/*
1110 * MPSAFE
1111 */
1112/* ARGSUSED */
1113int
1114osigsuspend(td, uap)
1115 struct thread *td;
1116 struct osigsuspend_args *uap;
1117{
1118 struct proc *p = td->td_proc;
1119 sigset_t mask;
1120
1121 PROC_LOCK(p);
1122 td->td_oldsigmask = td->td_sigmask;
1123 td->td_pflags |= TDP_OLDMASK;
1124 OSIG2SIG(uap->mask, mask);
1125 SIG_CANTMASK(mask);
1126 SIGSETLO(td->td_sigmask, mask);
1127 signotify(td);
1128 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1129 /* void */;
1130 PROC_UNLOCK(p);
1131 /* always return EINTR rather than ERESTART... */
1132 return (EINTR);
1133}
1134#endif /* COMPAT_43 */
1135
1136#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1137#ifndef _SYS_SYSPROTO_H_
1138struct osigstack_args {
1139 struct sigstack *nss;
1140 struct sigstack *oss;
1141};
1142#endif
1143/*
1144 * MPSAFE
1145 */
1146/* ARGSUSED */
1147int
1148osigstack(td, uap)
1149 struct thread *td;
1150 register struct osigstack_args *uap;
1151{
1152 struct proc *p = td->td_proc;
1153 struct sigstack nss, oss;
1154 int error = 0;
1155
1156 if (uap->nss != NULL) {
1157 error = copyin(uap->nss, &nss, sizeof(nss));
1158 if (error)
1159 return (error);
1160 }
1161 PROC_LOCK(p);
1162 oss.ss_sp = p->p_sigstk.ss_sp;
1163 oss.ss_onstack = sigonstack(cpu_getstack(td));
1164 if (uap->nss != NULL) {
1165 p->p_sigstk.ss_sp = nss.ss_sp;
1166 p->p_sigstk.ss_size = 0;
1167 p->p_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1168 p->p_flag |= P_ALTSTACK;
1169 }
1170 PROC_UNLOCK(p);
1171 if (uap->oss != NULL)
1172 error = copyout(&oss, uap->oss, sizeof(oss));
1173
1174 return (error);
1175}
1176#endif /* COMPAT_43 || COMPAT_SUNOS */
1177
1178#ifndef _SYS_SYSPROTO_H_
1179struct sigaltstack_args {
1180 stack_t *ss;
1181 stack_t *oss;
1182};
1183#endif
1184/*
1185 * MPSAFE
1186 */
1187/* ARGSUSED */
1188int
1189sigaltstack(td, uap)
1190 struct thread *td;
1191 register struct sigaltstack_args *uap;
1192{
1193 stack_t ss, oss;
1194 int error;
1195
1196 if (uap->ss != NULL) {
1197 error = copyin(uap->ss, &ss, sizeof(ss));
1198 if (error)
1199 return (error);
1200 }
1201 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1202 (uap->oss != NULL) ? &oss : NULL);
1203 if (error)
1204 return (error);
1205 if (uap->oss != NULL)
1206 error = copyout(&oss, uap->oss, sizeof(stack_t));
1207 return (error);
1208}
1209
1210int
1211kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1212{
1213 struct proc *p = td->td_proc;
1214 int oonstack;
1215
1216 PROC_LOCK(p);
1217 oonstack = sigonstack(cpu_getstack(td));
1218
1219 if (oss != NULL) {
1220 *oss = p->p_sigstk;
1221 oss->ss_flags = (p->p_flag & P_ALTSTACK)
1222 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1223 }
1224
1225 if (ss != NULL) {
1226 if (oonstack) {
1227 PROC_UNLOCK(p);
1228 return (EPERM);
1229 }
1230 if ((ss->ss_flags & ~SS_DISABLE) != 0) {
1231 PROC_UNLOCK(p);
1232 return (EINVAL);
1233 }
1234 if (!(ss->ss_flags & SS_DISABLE)) {
1235 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1236 PROC_UNLOCK(p);
1237 return (ENOMEM);
1238 }
1239 p->p_sigstk = *ss;
1240 p->p_flag |= P_ALTSTACK;
1241 } else {
1242 p->p_flag &= ~P_ALTSTACK;
1243 }
1244 }
1245 PROC_UNLOCK(p);
1246 return (0);
1247}
1248
1249/*
1250 * Common code for kill process group/broadcast kill.
1251 * cp is calling process.
1252 */
1253static int
1254killpg1(td, sig, pgid, all)
1255 register struct thread *td;
1256 int sig, pgid, all;
1257{
1258 register struct proc *p;
1259 struct pgrp *pgrp;
1260 int nfound = 0;
1261
1262 if (all) {
1263 /*
1264 * broadcast
1265 */
1266 sx_slock(&allproc_lock);
1267 LIST_FOREACH(p, &allproc, p_list) {
1268 PROC_LOCK(p);
1269 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1270 p == td->td_proc) {
1271 PROC_UNLOCK(p);
1272 continue;
1273 }
1274 if (p_cansignal(td, p, sig) == 0) {
1275 nfound++;
1276 if (sig)
1277 psignal(p, sig);
1278 }
1279 PROC_UNLOCK(p);
1280 }
1281 sx_sunlock(&allproc_lock);
1282 } else {
1283 sx_slock(&proctree_lock);
1284 if (pgid == 0) {
1285 /*
1286 * zero pgid means send to my process group.
1287 */
1288 pgrp = td->td_proc->p_pgrp;
1289 PGRP_LOCK(pgrp);
1290 } else {
1291 pgrp = pgfind(pgid);
1292 if (pgrp == NULL) {
1293 sx_sunlock(&proctree_lock);
1294 return (ESRCH);
1295 }
1296 }
1297 sx_sunlock(&proctree_lock);
1298 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1299 PROC_LOCK(p);
1300 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1301 PROC_UNLOCK(p);
1302 continue;
1303 }
1304 if (p->p_state == PRS_ZOMBIE) {
1305 PROC_UNLOCK(p);
1306 continue;
1307 }
1308 if (p_cansignal(td, p, sig) == 0) {
1309 nfound++;
1310 if (sig)
1311 psignal(p, sig);
1312 }
1313 PROC_UNLOCK(p);
1314 }
1315 PGRP_UNLOCK(pgrp);
1316 }
1317 return (nfound ? 0 : ESRCH);
1318}
1319
1320#ifndef _SYS_SYSPROTO_H_
1321struct kill_args {
1322 int pid;
1323 int signum;
1324};
1325#endif
1326/*
1327 * MPSAFE
1328 */
1329/* ARGSUSED */
1330int
1331kill(td, uap)
1332 register struct thread *td;
1333 register struct kill_args *uap;
1334{
1335 register struct proc *p;
1336 int error;
1337
1338 if ((u_int)uap->signum > _SIG_MAXSIG)
1339 return (EINVAL);
1340
1341 if (uap->pid > 0) {
1342 /* kill single process */
1343 if ((p = pfind(uap->pid)) == NULL)
1344 return (ESRCH);
1345 error = p_cansignal(td, p, uap->signum);
1346 if (error == 0 && uap->signum)
1347 psignal(p, uap->signum);
1348 PROC_UNLOCK(p);
1349 return (error);
1350 }
1351 switch (uap->pid) {
1352 case -1: /* broadcast signal */
1353 return (killpg1(td, uap->signum, 0, 1));
1354 case 0: /* signal own process group */
1355 return (killpg1(td, uap->signum, 0, 0));
1356 default: /* negative explicit process group */
1357 return (killpg1(td, uap->signum, -uap->pid, 0));
1358 }
1359 /* NOTREACHED */
1360}
1361
1362#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1363#ifndef _SYS_SYSPROTO_H_
1364struct okillpg_args {
1365 int pgid;
1366 int signum;
1367};
1368#endif
1369/*
1370 * MPSAFE
1371 */
1372/* ARGSUSED */
1373int
1374okillpg(td, uap)
1375 struct thread *td;
1376 register struct okillpg_args *uap;
1377{
1378
1379 if ((u_int)uap->signum > _SIG_MAXSIG)
1380 return (EINVAL);
1381 return (killpg1(td, uap->signum, uap->pgid, 0));
1382}
1383#endif /* COMPAT_43 || COMPAT_SUNOS */
1384
1385/*
1386 * Send a signal to a process group.
1387 */
1388void
1389gsignal(pgid, sig)
1390 int pgid, sig;
1391{
1392 struct pgrp *pgrp;
1393
1394 if (pgid != 0) {
1395 sx_slock(&proctree_lock);
1396 pgrp = pgfind(pgid);
1397 sx_sunlock(&proctree_lock);
1398 if (pgrp != NULL) {
1399 pgsignal(pgrp, sig, 0);
1400 PGRP_UNLOCK(pgrp);
1401 }
1402 }
1403}
1404
1405/*
1406 * Send a signal to a process group. If checktty is 1,
1407 * limit to members which have a controlling terminal.
1408 */
1409void
1410pgsignal(pgrp, sig, checkctty)
1411 struct pgrp *pgrp;
1412 int sig, checkctty;
1413{
1414 register struct proc *p;
1415
1416 if (pgrp) {
1417 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1418 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1419 PROC_LOCK(p);
1420 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1421 psignal(p, sig);
1422 PROC_UNLOCK(p);
1423 }
1424 }
1425}
1426
1427/*
1428 * Send a signal caused by a trap to the current thread.
1429 * If it will be caught immediately, deliver it with correct code.
1430 * Otherwise, post it normally.
1431 *
1432 * MPSAFE
1433 */
1434void
1435trapsignal(struct thread *td, int sig, u_long code)
1436{
1437 struct sigacts *ps;
1438 struct proc *p;
1439
1440 p = td->td_proc;
1441
1442 PROC_LOCK(p);
1443 ps = p->p_sigacts;
1444 mtx_lock(&ps->ps_mtx);
1445 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1446 !SIGISMEMBER(td->td_sigmask, sig)) {
1447 p->p_stats->p_ru.ru_nsignals++;
1448#ifdef KTRACE
1449 if (KTRPOINT(curthread, KTR_PSIG))
1450 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1451 &td->td_sigmask, code);
1452#endif
1453 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
1454 &td->td_sigmask, code);
1455 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1456 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1457 SIGADDSET(td->td_sigmask, sig);
1458 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1459 /*
1460 * See kern_sigaction() for origin of this code.
1461 */
1462 SIGDELSET(ps->ps_sigcatch, sig);
1463 if (sig != SIGCONT &&
1464 sigprop(sig) & SA_IGNORE)
1465 SIGADDSET(ps->ps_sigignore, sig);
1466 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1467 }
1468 mtx_unlock(&ps->ps_mtx);
1469 } else {
1470 mtx_unlock(&ps->ps_mtx);
1471 p->p_code = code; /* XXX for core dump/debugger */
1472 p->p_sig = sig; /* XXX to verify code */
1473 tdsignal(td, sig);
1474 }
1475 PROC_UNLOCK(p);
1476}
1477
1478static struct thread *
1479sigtd(struct proc *p, int sig, int prop)
1480{
1481 struct thread *td;
1482
1483 PROC_LOCK_ASSERT(p, MA_OWNED);
1484
1485 /*
1486 * If we know the signal is bound for a specific thread then we
1487 * assume that we are in that threads context. This is the case
1488 * for SIGXCPU, SIGILL, etc. Otherwise someone did a kill() from
1489 * userland and the real thread doesn't actually matter.
1490 */
1491 if ((prop & SA_PROC) != 0 && curthread->td_proc == p)
1492 return (curthread);
1493
1494 /*
1495 * We should search for the first thread that is blocked in
1496 * sigsuspend with this signal unmasked.
1497 */
1498
1499 /* XXX */
1500
1501 /*
1502 * Find the first thread in the proc that doesn't have this signal
1503 * masked.
1504 */
1505 FOREACH_THREAD_IN_PROC(p, td)
1506 if (!SIGISMEMBER(td->td_sigmask, sig))
1507 return (td);
1508
1509 return (FIRST_THREAD_IN_PROC(p));
1510}
1511
1512/*
1513 * Send the signal to the process. If the signal has an action, the action
1514 * is usually performed by the target process rather than the caller; we add
1515 * the signal to the set of pending signals for the process.
1516 *
1517 * Exceptions:
1518 * o When a stop signal is sent to a sleeping process that takes the
1519 * default action, the process is stopped without awakening it.
1520 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1521 * regardless of the signal action (eg, blocked or ignored).
1522 *
1523 * Other ignored signals are discarded immediately.
1524 *
1525 * MPSAFE
1526 */
1527void
1528psignal(struct proc *p, int sig)
1529{
1530 struct thread *td;
1531 int prop;
1532
1533 PROC_LOCK_ASSERT(p, MA_OWNED);
1534 prop = sigprop(sig);
1535
1536 /*
1537 * Find a thread to deliver the signal to.
1538 */
1539 td = sigtd(p, sig, prop);
1540
1541 tdsignal(td, sig);
1542}
1543
1544/*
1545 * MPSAFE
1546 */
1547void
1548tdsignal(struct thread *td, int sig)
1549{
1550 struct proc *p;
1551 register sig_t action;
1552 sigset_t *siglist;
1553 struct thread *td0;
1554 register int prop;
1555 struct sigacts *ps;
1556
1557 KASSERT(_SIG_VALID(sig),
1558 ("tdsignal(): invalid signal %d\n", sig));
1559
1560 p = td->td_proc;
1561 ps = p->p_sigacts;
1562
1563 PROC_LOCK_ASSERT(p, MA_OWNED);
1564 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1565
1566 prop = sigprop(sig);
1567
1568 /*
1569 * If this thread is blocking this signal then we'll leave it in the
1570 * proc so that we can find it in the first thread that unblocks it.
1571 */
1572 if (SIGISMEMBER(td->td_sigmask, sig))
1573 siglist = &p->p_siglist;
1574 else
1575 siglist = &td->td_siglist;
1576
1577 /*
1578 * If proc is traced, always give parent a chance;
1579 * if signal event is tracked by procfs, give *that*
1580 * a chance, as well.
1581 */
1582 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1583 action = SIG_DFL;
1584 } else {
1585 /*
1586 * If the signal is being ignored,
1587 * then we forget about it immediately.
1588 * (Note: we don't set SIGCONT in ps_sigignore,
1589 * and if it is set to SIG_IGN,
1590 * action will be SIG_DFL here.)
1591 */
1592 mtx_lock(&ps->ps_mtx);
1593 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1594 (p->p_flag & P_WEXIT)) {
1595 mtx_unlock(&ps->ps_mtx);
1596 return;
1597 }
1598 if (SIGISMEMBER(td->td_sigmask, sig))
1599 action = SIG_HOLD;
1600 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1601 action = SIG_CATCH;
1602 else
1603 action = SIG_DFL;
1604 mtx_unlock(&ps->ps_mtx);
1605 }
1606
1607 if (prop & SA_CONT) {
1608 SIG_STOPSIGMASK(p->p_siglist);
1609 /*
1610 * XXX Should investigate leaving STOP and CONT sigs only in
1611 * the proc's siglist.
1612 */
1613 FOREACH_THREAD_IN_PROC(p, td0)
1614 SIG_STOPSIGMASK(td0->td_siglist);
1615 }
1616
1617 if (prop & SA_STOP) {
1618 /*
1619 * If sending a tty stop signal to a member of an orphaned
1620 * process group, discard the signal here if the action
1621 * is default; don't stop the process below if sleeping,
1622 * and don't clear any pending SIGCONT.
1623 */
1624 if ((prop & SA_TTYSTOP) &&
1625 (p->p_pgrp->pg_jobc == 0) &&
1626 (action == SIG_DFL))
1627 return;
1628 SIG_CONTSIGMASK(p->p_siglist);
1629 FOREACH_THREAD_IN_PROC(p, td0)
1630 SIG_CONTSIGMASK(td0->td_siglist);
1631 p->p_flag &= ~P_CONTINUED;
1632 }
1633 SIGADDSET(*siglist, sig);
1634 signotify(td); /* uses schedlock */
1635 /*
1636 * Defer further processing for signals which are held,
1637 * except that stopped processes must be continued by SIGCONT.
1638 */
1639 if (action == SIG_HOLD &&
1640 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1641 return;
1642 /*
1643 * Some signals have a process-wide effect and a per-thread
1644 * component. Most processing occurs when the process next
1645 * tries to cross the user boundary, however there are some
1646 * times when processing needs to be done immediatly, such as
1647 * waking up threads so that they can cross the user boundary.
1648 * We try do the per-process part here.
1649 */
1650 if (P_SHOULDSTOP(p)) {
1651 /*
1652 * The process is in stopped mode. All the threads should be
1653 * either winding down or already on the suspended queue.
1654 */
1655 if (p->p_flag & P_TRACED) {
1656 /*
1657 * The traced process is already stopped,
1658 * so no further action is necessary.
1659 * No signal can restart us.
1660 */
1661 goto out;
1662 }
1663
1664 if (sig == SIGKILL) {
1665 /*
1666 * SIGKILL sets process running.
1667 * It will die elsewhere.
1668 * All threads must be restarted.
1669 */
1670 p->p_flag &= ~P_STOPPED;
1671 goto runfast;
1672 }
1673
1674 if (prop & SA_CONT) {
1675 /*
1676 * If SIGCONT is default (or ignored), we continue the
1677 * process but don't leave the signal in siglist as
1678 * it has no further action. If SIGCONT is held, we
1679 * continue the process and leave the signal in
1680 * siglist. If the process catches SIGCONT, let it
1681 * handle the signal itself. If it isn't waiting on
1682 * an event, it goes back to run state.
1683 * Otherwise, process goes back to sleep state.
1684 */
1685 p->p_flag &= ~P_STOPPED_SIG;
1686 p->p_flag |= P_CONTINUED;
1687 if (action == SIG_DFL) {
1688 SIGDELSET(*siglist, sig);
1689 } else if (action == SIG_CATCH) {
1690 /*
1691 * The process wants to catch it so it needs
1692 * to run at least one thread, but which one?
1693 * It would seem that the answer would be to
1694 * run an upcall in the next KSE to run, and
1695 * deliver the signal that way. In a NON KSE
1696 * process, we need to make sure that the
1697 * single thread is runnable asap.
1698 * XXXKSE for now however, make them all run.
1699 */
1700 goto runfast;
1701 }
1702 /*
1703 * The signal is not ignored or caught.
1704 */
1705 mtx_lock_spin(&sched_lock);
1706 thread_unsuspend(p);
1707 mtx_unlock_spin(&sched_lock);
1708 goto out;
1709 }
1710
1711 if (prop & SA_STOP) {
1712 /*
1713 * Already stopped, don't need to stop again
1714 * (If we did the shell could get confused).
1715 * Just make sure the signal STOP bit set.
1716 */
1717 p->p_flag |= P_STOPPED_SIG;
1718 SIGDELSET(*siglist, sig);
1719 goto out;
1720 }
1721
1722 /*
1723 * All other kinds of signals:
1724 * If a thread is sleeping interruptibly, simulate a
1725 * wakeup so that when it is continued it will be made
1726 * runnable and can look at the signal. However, don't make
1727 * the PROCESS runnable, leave it stopped.
1728 * It may run a bit until it hits a thread_suspend_check().
1729 */
1730 mtx_lock_spin(&sched_lock);
1731 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1732 if (td->td_flags & TDF_CVWAITQ)
1733 cv_abort(td);
1734 else
1735 abortsleep(td);
1736 }
1737 mtx_unlock_spin(&sched_lock);
1738 goto out;
1739 /*
1740 * XXXKSE What about threads that are waiting on mutexes?
1741 * Shouldn't they abort too?
1742 * No, hopefully mutexes are short lived.. They'll
1743 * eventually hit thread_suspend_check().
1744 */
1745 } else if (p->p_state == PRS_NORMAL) {
1746 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1747 !(prop & SA_STOP)) {
1748 mtx_lock_spin(&sched_lock);
1749 tdsigwakeup(td, sig, action);
1750 mtx_unlock_spin(&sched_lock);
1751 goto out;
1752 }
1753 if (prop & SA_STOP) {
1754 if (p->p_flag & P_PPWAIT)
1755 goto out;
1756 p->p_flag |= P_STOPPED_SIG;
1757 p->p_xstat = sig;
1758 mtx_lock_spin(&sched_lock);
1759 FOREACH_THREAD_IN_PROC(p, td0) {
1760 if (TD_IS_SLEEPING(td0) &&
1761 (td->td_flags & TDF_SINTR))
1762 thread_suspend_one(td0);
1763 }
1764 thread_stopped(p);
1765 if (p->p_numthreads == p->p_suspcount) {
1766 SIGDELSET(p->p_siglist, p->p_xstat);
1767 FOREACH_THREAD_IN_PROC(p, td0)
1768 SIGDELSET(td0->td_siglist, p->p_xstat);
1769 }
1770 mtx_unlock_spin(&sched_lock);
1771 goto out;
1772 }
1773 else
1774 goto runfast;
1775 /* NOTREACHED */
1776 } else {
1777 /* Not in "NORMAL" state. discard the signal. */
1778 SIGDELSET(*siglist, sig);
1779 goto out;
1780 }
1781
1782 /*
1783 * The process is not stopped so we need to apply the signal to all the
1784 * running threads.
1785 */
1786
1787runfast:
1788 mtx_lock_spin(&sched_lock);
1789 tdsigwakeup(td, sig, action);
1790 thread_unsuspend(p);
1791 mtx_unlock_spin(&sched_lock);
1792out:
1793 /* If we jump here, sched_lock should not be owned. */
1794 mtx_assert(&sched_lock, MA_NOTOWNED);
1795}
1796
1797/*
1798 * The force of a signal has been directed against a single
1799 * thread. We need to see what we can do about knocking it
1800 * out of any sleep it may be in etc.
1801 */
1802static void
1803tdsigwakeup(struct thread *td, int sig, sig_t action)
1804{
1805 struct proc *p = td->td_proc;
1806 register int prop;
1807
1808 PROC_LOCK_ASSERT(p, MA_OWNED);
1809 mtx_assert(&sched_lock, MA_OWNED);
1810 prop = sigprop(sig);
1811 /*
1812 * Bring the priority of a thread up if we want it to get
1813 * killed in this lifetime.
1814 */
1815 if ((action == SIG_DFL) && (prop & SA_KILL)) {
1816 if (td->td_priority > PUSER) {
1817 td->td_priority = PUSER;
1818 }
1819 }
1820 if (TD_IS_SLEEPING(td)) {
1821 /*
1822 * If thread is sleeping uninterruptibly
1823 * we can't interrupt the sleep... the signal will
1824 * be noticed when the process returns through
1825 * trap() or syscall().
1826 */
1827 if ((td->td_flags & TDF_SINTR) == 0) {
1828 return;
1829 }
1830 /*
1831 * Process is sleeping and traced. Make it runnable
1832 * so it can discover the signal in issignal() and stop
1833 * for its parent.
1834 */
1835 if (p->p_flag & P_TRACED) {
1836 p->p_flag &= ~P_STOPPED_TRACE;
1837 } else {
1838
1839 /*
1840 * If SIGCONT is default (or ignored) and process is
1841 * asleep, we are finished; the process should not
1842 * be awakened.
1843 */
1844 if ((prop & SA_CONT) && action == SIG_DFL) {
1845 SIGDELSET(p->p_siglist, sig);
1846 /*
1847 * It may be on either list in this state.
1848 * Remove from both for now.
1849 */
1850 SIGDELSET(td->td_siglist, sig);
1851 return;
1852 }
1853
1854 /*
1855 * Raise priority to at least PUSER.
1856 */
1857 if (td->td_priority > PUSER) {
1858 td->td_priority = PUSER;
1859 }
1860 }
1861 if (td->td_flags & TDF_CVWAITQ)
1862 cv_abort(td);
1863 else
1864 abortsleep(td);
1865 }
1866#ifdef SMP
1867 else {
1868 /*
1869 * Other states do nothing with the signal immediatly,
1870 * other than kicking ourselves if we are running.
1871 * It will either never be noticed, or noticed very soon.
1872 */
1873 if (TD_IS_RUNNING(td) && td != curthread) {
1874 forward_signal(td);
1875 }
1876 }
1877#endif
1878}
1879
1880/*
1881 * If the current process has received a signal (should be caught or cause
1882 * termination, should interrupt current syscall), return the signal number.
1883 * Stop signals with default action are processed immediately, then cleared;
1884 * they aren't returned. This is checked after each entry to the system for
1885 * a syscall or trap (though this can usually be done without calling issignal
1886 * by checking the pending signal masks in cursig.) The normal call
1887 * sequence is
1888 *
1889 * while (sig = cursig(curthread))
1890 * postsig(sig);
1891 */
1892static int
1893issignal(td)
1894 struct thread *td;
1895{
1896 struct proc *p;
1897 struct sigacts *ps;
1898 sigset_t sigpending;
1899 register int sig, prop;
1900
1901 p = td->td_proc;
1902 ps = p->p_sigacts;
1903 mtx_assert(&ps->ps_mtx, MA_OWNED);
1904 PROC_LOCK_ASSERT(p, MA_OWNED);
1905 for (;;) {
1906 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1907
1908 sigpending = td->td_siglist;
1909 SIGSETNAND(sigpending, td->td_sigmask);
1910
1911 if (p->p_flag & P_PPWAIT)
1912 SIG_STOPSIGMASK(sigpending);
1913 if (SIGISEMPTY(sigpending)) /* no signal to send */
1914 return (0);
1915 sig = sig_ffs(&sigpending);
1916
1917 _STOPEVENT(p, S_SIG, sig);
1918
1919 /*
1920 * We should see pending but ignored signals
1921 * only if P_TRACED was on when they were posted.
1922 */
1923 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
1924 SIGDELSET(td->td_siglist, sig);
1925 continue;
1926 }
1927 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1928 /*
1929 * If traced, always stop.
1930 */
1931 mtx_unlock(&ps->ps_mtx);
1932 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1933 &p->p_mtx.mtx_object, "Stopping for traced signal");
1934 p->p_xstat = sig;
1935 PROC_LOCK(p->p_pptr);
1936 psignal(p->p_pptr, SIGCHLD);
1937 PROC_UNLOCK(p->p_pptr);
1938 mtx_lock_spin(&sched_lock);
1939 stop(p); /* uses schedlock too eventually */
1940 thread_suspend_one(td);
1941 PROC_UNLOCK(p);
1942 DROP_GIANT();
1943 p->p_stats->p_ru.ru_nivcsw++;
1944 mi_switch();
1945 mtx_unlock_spin(&sched_lock);
1946 PICKUP_GIANT();
1947 PROC_LOCK(p);
1948 mtx_lock(&ps->ps_mtx);
1949
1950 /*
1951 * If parent wants us to take the signal,
1952 * then it will leave it in p->p_xstat;
1953 * otherwise we just look for signals again.
1954 */
1955 SIGDELSET(td->td_siglist, sig); /* clear old signal */
1956 sig = p->p_xstat;
1957 if (sig == 0)
1958 continue;
1959
1960 /*
1961 * If the traced bit got turned off, go back up
1962 * to the top to rescan signals. This ensures
1963 * that p_sig* and p_sigact are consistent.
1964 */
1965 if ((p->p_flag & P_TRACED) == 0)
1966 continue;
1967
1968 /*
1969 * Put the new signal into td_siglist. If the
1970 * signal is being masked, look for other signals.
1971 */
1972 SIGADDSET(td->td_siglist, sig);
1973 if (SIGISMEMBER(td->td_sigmask, sig))
1974 continue;
1975 signotify(td);
1976 }
1977
1978 prop = sigprop(sig);
1979
1980 /*
1981 * Decide whether the signal should be returned.
1982 * Return the signal's number, or fall through
1983 * to clear it from the pending mask.
1984 */
1985 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1986
1987 case (intptr_t)SIG_DFL:
1988 /*
1989 * Don't take default actions on system processes.
1990 */
1991 if (p->p_pid <= 1) {
1992#ifdef DIAGNOSTIC
1993 /*
1994 * Are you sure you want to ignore SIGSEGV
1995 * in init? XXX
1996 */
1997 printf("Process (pid %lu) got signal %d\n",
1998 (u_long)p->p_pid, sig);
1999#endif
2000 break; /* == ignore */
2001 }
2002 /*
2003 * If there is a pending stop signal to process
2004 * with default action, stop here,
2005 * then clear the signal. However,
2006 * if process is member of an orphaned
2007 * process group, ignore tty stop signals.
2008 */
2009 if (prop & SA_STOP) {
2010 if (p->p_flag & P_TRACED ||
2011 (p->p_pgrp->pg_jobc == 0 &&
2012 prop & SA_TTYSTOP))
2013 break; /* == ignore */
2014 mtx_unlock(&ps->ps_mtx);
2015 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2016 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2017 p->p_flag |= P_STOPPED_SIG;
2018 p->p_xstat = sig;
2019 mtx_lock_spin(&sched_lock);
2020 thread_stopped(p);
2021 thread_suspend_one(td);
2022 PROC_UNLOCK(p);
2023 DROP_GIANT();
2024 p->p_stats->p_ru.ru_nivcsw++;
2025 mi_switch();
2026 mtx_unlock_spin(&sched_lock);
2027 PICKUP_GIANT();
2028 PROC_LOCK(p);
2029 mtx_lock(&ps->ps_mtx);
2030 break;
2031 } else if (prop & SA_IGNORE) {
2032 /*
2033 * Except for SIGCONT, shouldn't get here.
2034 * Default action is to ignore; drop it.
2035 */
2036 break; /* == ignore */
2037 } else
2038 return (sig);
2039 /*NOTREACHED*/
2040
2041 case (intptr_t)SIG_IGN:
2042 /*
2043 * Masking above should prevent us ever trying
2044 * to take action on an ignored signal other
2045 * than SIGCONT, unless process is traced.
2046 */
2047 if ((prop & SA_CONT) == 0 &&
2048 (p->p_flag & P_TRACED) == 0)
2049 printf("issignal\n");
2050 break; /* == ignore */
2051
2052 default:
2053 /*
2054 * This signal has an action, let
2055 * postsig() process it.
2056 */
2057 return (sig);
2058 }
2059 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2060 }
2061 /* NOTREACHED */
2062}
2063
2064/*
2065 * Put the argument process into the stopped state and notify the parent
2066 * via wakeup. Signals are handled elsewhere. The process must not be
2067 * on the run queue. Must be called with the proc p locked and the scheduler
2068 * lock held.
2069 */
2070static void
2071stop(struct proc *p)
2072{
2073
2074 PROC_LOCK_ASSERT(p, MA_OWNED);
2075 p->p_flag |= P_STOPPED_SIG;
2076 p->p_flag &= ~P_WAITED;
2077 wakeup(p->p_pptr);
2078}
2079
2080/*
2081 * MPSAFE
2082 */
2083void
2084thread_stopped(struct proc *p)
2085{
2086 struct proc *p1 = curthread->td_proc;
2087 struct sigacts *ps;
2088 int n;
2089
2090 PROC_LOCK_ASSERT(p, MA_OWNED);
2091 mtx_assert(&sched_lock, MA_OWNED);
2092 n = p->p_suspcount;
2093 if (p == p1)
2094 n++;
2095 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2096 mtx_unlock_spin(&sched_lock);
2097 stop(p);
2098 PROC_LOCK(p->p_pptr);
2099 ps = p->p_pptr->p_sigacts;
2100 mtx_lock(&ps->ps_mtx);
2101 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2102 mtx_unlock(&ps->ps_mtx);
2103 psignal(p->p_pptr, SIGCHLD);
2104 } else
2105 mtx_unlock(&ps->ps_mtx);
2106 PROC_UNLOCK(p->p_pptr);
2107 mtx_lock_spin(&sched_lock);
2108 }
2109}
2110
2111/*
2112 * Take the action for the specified signal
2113 * from the current set of pending signals.
2114 */
2115void
2116postsig(sig)
2117 register int sig;
2118{
2119 struct thread *td = curthread;
2120 register struct proc *p = td->td_proc;
2121 struct sigacts *ps;
2122 sig_t action;
2123 sigset_t returnmask;
2124 int code;
2125
2126 KASSERT(sig != 0, ("postsig"));
2127
2128 PROC_LOCK_ASSERT(p, MA_OWNED);
2129 ps = p->p_sigacts;
2130 mtx_assert(&ps->ps_mtx, MA_OWNED);
2131 SIGDELSET(td->td_siglist, sig);
2132 action = ps->ps_sigact[_SIG_IDX(sig)];
2133#ifdef KTRACE
2134 if (KTRPOINT(td, KTR_PSIG))
2135 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2136 &td->td_oldsigmask : &td->td_sigmask, 0);
2137#endif
2138 _STOPEVENT(p, S_SIG, sig);
2139
2140 if (action == SIG_DFL) {
2141 /*
2142 * Default action, where the default is to kill
2143 * the process. (Other cases were ignored above.)
2144 */
2145 mtx_unlock(&ps->ps_mtx);
2146 sigexit(td, sig);
2147 /* NOTREACHED */
2148 } else {
2149 /*
2150 * If we get here, the signal must be caught.
2151 */
2152 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2153 ("postsig action"));
2154 /*
2155 * Set the new mask value and also defer further
2156 * occurrences of this signal.
2157 *
2158 * Special case: user has done a sigsuspend. Here the
2159 * current mask is not of interest, but rather the
2160 * mask from before the sigsuspend is what we want
2161 * restored after the signal processing is completed.
2162 */
2163 if (td->td_pflags & TDP_OLDMASK) {
2164 returnmask = td->td_oldsigmask;
2165 td->td_pflags &= ~TDP_OLDMASK;
2166 } else
2167 returnmask = td->td_sigmask;
2168
2169 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2170 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2171 SIGADDSET(td->td_sigmask, sig);
2172
2173 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2174 /*
2175 * See kern_sigaction() for origin of this code.
2176 */
2177 SIGDELSET(ps->ps_sigcatch, sig);
2178 if (sig != SIGCONT &&
2179 sigprop(sig) & SA_IGNORE)
2180 SIGADDSET(ps->ps_sigignore, sig);
2181 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2182 }
2183 p->p_stats->p_ru.ru_nsignals++;
2184 if (p->p_sig != sig) {
2185 code = 0;
2186 } else {
2187 code = p->p_code;
2188 p->p_code = 0;
2189 p->p_sig = 0;
2190 }
2191 if (p->p_flag & P_THREADED)
2192 thread_signal_add(curthread, sig);
2193 else
2194 (*p->p_sysent->sv_sendsig)(action, sig,
2195 &returnmask, code);
2196 }
2197}
2198
2199/*
2200 * Kill the current process for stated reason.
2201 */
2202void
2203killproc(p, why)
2204 struct proc *p;
2205 char *why;
2206{
2207
2208 PROC_LOCK_ASSERT(p, MA_OWNED);
2209 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2210 p, p->p_pid, p->p_comm);
2211 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2212 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2213 psignal(p, SIGKILL);
2214}
2215
2216/*
2217 * Force the current process to exit with the specified signal, dumping core
2218 * if appropriate. We bypass the normal tests for masked and caught signals,
2219 * allowing unrecoverable failures to terminate the process without changing
2220 * signal state. Mark the accounting record with the signal termination.
2221 * If dumping core, save the signal number for the debugger. Calls exit and
2222 * does not return.
2223 *
2224 * MPSAFE
2225 */
2226void
2227sigexit(td, sig)
2228 struct thread *td;
2229 int sig;
2230{
2231 struct proc *p = td->td_proc;
2232
2233 PROC_LOCK_ASSERT(p, MA_OWNED);
2234 p->p_acflag |= AXSIG;
2235 if (sigprop(sig) & SA_CORE) {
2236 p->p_sig = sig;
2237 /*
2238 * Log signals which would cause core dumps
2239 * (Log as LOG_INFO to appease those who don't want
2240 * these messages.)
2241 * XXX : Todo, as well as euid, write out ruid too
2242 */
2243 PROC_UNLOCK(p);
2244 if (!mtx_owned(&Giant))
2245 mtx_lock(&Giant);
2246 if (coredump(td) == 0)
2247 sig |= WCOREFLAG;
2248 if (kern_logsigexit)
2249 log(LOG_INFO,
2250 "pid %d (%s), uid %d: exited on signal %d%s\n",
2251 p->p_pid, p->p_comm,
2252 td->td_ucred ? td->td_ucred->cr_uid : -1,
2253 sig &~ WCOREFLAG,
2254 sig & WCOREFLAG ? " (core dumped)" : "");
2255 } else {
2256 PROC_UNLOCK(p);
2257 if (!mtx_owned(&Giant))
2258 mtx_lock(&Giant);
2259 }
2260 exit1(td, W_EXITCODE(0, sig));
2261 /* NOTREACHED */
2262}
2263
2264static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2265SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2266 sizeof(corefilename), "process corefile name format string");
2267
2268/*
2269 * expand_name(name, uid, pid)
2270 * Expand the name described in corefilename, using name, uid, and pid.
2271 * corefilename is a printf-like string, with three format specifiers:
2272 * %N name of process ("name")
2273 * %P process id (pid)
2274 * %U user id (uid)
2275 * For example, "%N.core" is the default; they can be disabled completely
2276 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2277 * This is controlled by the sysctl variable kern.corefile (see above).
2278 */
2279
2280static char *
2281expand_name(name, uid, pid)
2282 const char *name;
2283 uid_t uid;
2284 pid_t pid;
2285{
2286 const char *format, *appendstr;
2287 char *temp;
2288 char buf[11]; /* Buffer for pid/uid -- max 4B */
2289 size_t i, l, n;
2290
2291 format = corefilename;
2292 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2293 if (temp == NULL)
2294 return (NULL);
2295 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2296 switch (format[i]) {
2297 case '%': /* Format character */
2298 i++;
2299 switch (format[i]) {
2300 case '%':
2301 appendstr = "%";
2302 break;
2303 case 'N': /* process name */
2304 appendstr = name;
2305 break;
2306 case 'P': /* process id */
2307 sprintf(buf, "%u", pid);
2308 appendstr = buf;
2309 break;
2310 case 'U': /* user id */
2311 sprintf(buf, "%u", uid);
2312 appendstr = buf;
2313 break;
2314 default:
2315 appendstr = "";
2316 log(LOG_ERR,
2317 "Unknown format character %c in `%s'\n",
2318 format[i], format);
2319 }
2320 l = strlen(appendstr);
2321 if ((n + l) >= MAXPATHLEN)
2322 goto toolong;
2323 memcpy(temp + n, appendstr, l);
2324 n += l;
2325 break;
2326 default:
2327 temp[n++] = format[i];
2328 }
2329 }
2330 if (format[i] != '\0')
2331 goto toolong;
2332 return (temp);
2333toolong:
2334 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2335 (long)pid, name, (u_long)uid);
2336 free(temp, M_TEMP);
2337 return (NULL);
2338}
2339
2340/*
2341 * Dump a process' core. The main routine does some
2342 * policy checking, and creates the name of the coredump;
2343 * then it passes on a vnode and a size limit to the process-specific
2344 * coredump routine if there is one; if there _is not_ one, it returns
2345 * ENOSYS; otherwise it returns the error from the process-specific routine.
2346 */
2347
2348static int
2349coredump(struct thread *td)
2350{
2351 struct proc *p = td->td_proc;
2352 register struct vnode *vp;
2353 register struct ucred *cred = td->td_ucred;
2354 struct flock lf;
2355 struct nameidata nd;
2356 struct vattr vattr;
2357 int error, error1, flags;
2358 struct mount *mp;
2359 char *name; /* name of corefile */
2360 off_t limit;
2361
2362 PROC_LOCK(p);
2363 _STOPEVENT(p, S_CORE, 0);
2364
2365 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2366 PROC_UNLOCK(p);
2367 return (EFAULT);
2368 }
2369
2370 /*
2371 * Note that the bulk of limit checking is done after
2372 * the corefile is created. The exception is if the limit
2373 * for corefiles is 0, in which case we don't bother
2374 * creating the corefile at all. This layout means that
2375 * a corefile is truncated instead of not being created,
2376 * if it is larger than the limit.
2377 */
2378 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2379 if (limit == 0) {
2380 PROC_UNLOCK(p);
2381 return 0;
2382 }
2383 PROC_UNLOCK(p);
2384
2385restart:
2386 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2387 if (name == NULL)
2388 return (EINVAL);
2389 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2390 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2391 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
2392 free(name, M_TEMP);
2393 if (error)
2394 return (error);
2395 NDFREE(&nd, NDF_ONLY_PNBUF);
2396 vp = nd.ni_vp;
2397
2398 /* Don't dump to non-regular files or files with links. */
2399 if (vp->v_type != VREG ||
2400 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2401 VOP_UNLOCK(vp, 0, td);
2402 error = EFAULT;
2403 goto out2;
2404 }
2405
2406 VOP_UNLOCK(vp, 0, td);
2407 lf.l_whence = SEEK_SET;
2408 lf.l_start = 0;
2409 lf.l_len = 0;
2410 lf.l_type = F_WRLCK;
2411 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK);
2412 if (error)
2413 goto out2;
2414
2415 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2416 lf.l_type = F_UNLCK;
2417 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2418 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2419 return (error);
2420 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2421 return (error);
2422 goto restart;
2423 }
2424
2425 VATTR_NULL(&vattr);
2426 vattr.va_size = 0;
2427 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2428 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2429 VOP_SETATTR(vp, &vattr, cred, td);
2430 VOP_UNLOCK(vp, 0, td);
2431 PROC_LOCK(p);
2432 p->p_acflag |= ACORE;
2433 PROC_UNLOCK(p);
2434
2435 error = p->p_sysent->sv_coredump ?
2436 p->p_sysent->sv_coredump(td, vp, limit) :
2437 ENOSYS;
2438
2439 lf.l_type = F_UNLCK;
2440 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2441 vn_finished_write(mp);
2442out2:
2443 error1 = vn_close(vp, FWRITE, cred, td);
2444 if (error == 0)
2445 error = error1;
2446 return (error);
2447}
2448
2449/*
2450 * Nonexistent system call-- signal process (may want to handle it).
2451 * Flag error in case process won't see signal immediately (blocked or ignored).
2452 */
2453#ifndef _SYS_SYSPROTO_H_
2454struct nosys_args {
2455 int dummy;
2456};
2457#endif
2458/*
2459 * MPSAFE
2460 */
2461/* ARGSUSED */
2462int
2463nosys(td, args)
2464 struct thread *td;
2465 struct nosys_args *args;
2466{
2467 struct proc *p = td->td_proc;
2468
2469 PROC_LOCK(p);
2470 psignal(p, SIGSYS);
2471 PROC_UNLOCK(p);
2472 return (ENOSYS);
2473}
2474
2475/*
2476 * Send a SIGIO or SIGURG signal to a process or process group using
2477 * stored credentials rather than those of the current process.
2478 */
2479void
2480pgsigio(sigiop, sig, checkctty)
2481 struct sigio **sigiop;
2482 int sig, checkctty;
2483{
2484 struct sigio *sigio;
2485
2486 SIGIO_LOCK();
2487 sigio = *sigiop;
2488 if (sigio == NULL) {
2489 SIGIO_UNLOCK();
2490 return;
2491 }
2492 if (sigio->sio_pgid > 0) {
2493 PROC_LOCK(sigio->sio_proc);
2494 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2495 psignal(sigio->sio_proc, sig);
2496 PROC_UNLOCK(sigio->sio_proc);
2497 } else if (sigio->sio_pgid < 0) {
2498 struct proc *p;
2499
2500 PGRP_LOCK(sigio->sio_pgrp);
2501 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2502 PROC_LOCK(p);
2503 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2504 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2505 psignal(p, sig);
2506 PROC_UNLOCK(p);
2507 }
2508 PGRP_UNLOCK(sigio->sio_pgrp);
2509 }
2510 SIGIO_UNLOCK();
2511}
2512
2513static int
2514filt_sigattach(struct knote *kn)
2515{
2516 struct proc *p = curproc;
2517
2518 kn->kn_ptr.p_proc = p;
2519 kn->kn_flags |= EV_CLEAR; /* automatically set */
2520
2521 PROC_LOCK(p);
2522 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2523 PROC_UNLOCK(p);
2524
2525 return (0);
2526}
2527
2528static void
2529filt_sigdetach(struct knote *kn)
2530{
2531 struct proc *p = kn->kn_ptr.p_proc;
2532
2533 PROC_LOCK(p);
2534 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2535 PROC_UNLOCK(p);
2536}
2537
2538/*
2539 * signal knotes are shared with proc knotes, so we apply a mask to
2540 * the hint in order to differentiate them from process hints. This
2541 * could be avoided by using a signal-specific knote list, but probably
2542 * isn't worth the trouble.
2543 */
2544static int
2545filt_signal(struct knote *kn, long hint)
2546{
2547
2548 if (hint & NOTE_SIGNAL) {
2549 hint &= ~NOTE_SIGNAL;
2550
2551 if (kn->kn_id == hint)
2552 kn->kn_data++;
2553 }
2554 return (kn->kn_data != 0);
2555}
2556
2557struct sigacts *
2558sigacts_alloc(void)
2559{
2560 struct sigacts *ps;
2561
2562 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2563 ps->ps_refcnt = 1;
2564 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2565 return (ps);
2566}
2567
2568void
2569sigacts_free(struct sigacts *ps)
2570{
2571
2572 mtx_lock(&ps->ps_mtx);
2573 ps->ps_refcnt--;
2574 if (ps->ps_refcnt == 0) {
2575 mtx_destroy(&ps->ps_mtx);
2576 free(ps, M_SUBPROC);
2577 } else
2578 mtx_unlock(&ps->ps_mtx);
2579}
2580
2581struct sigacts *
2582sigacts_hold(struct sigacts *ps)
2583{
2584 mtx_lock(&ps->ps_mtx);
2585 ps->ps_refcnt++;
2586 mtx_unlock(&ps->ps_mtx);
2587 return (ps);
2588}
2589
2590void
2591sigacts_copy(struct sigacts *dest, struct sigacts *src)
2592{
2593
2594 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2595 mtx_lock(&src->ps_mtx);
2596 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2597 mtx_unlock(&src->ps_mtx);
2598}
2599
2600int
2601sigacts_shared(struct sigacts *ps)
2602{
2603 int shared;
2604
2605 mtx_lock(&ps->ps_mtx);
2606 shared = ps->ps_refcnt > 1;
2607 mtx_unlock(&ps->ps_mtx);
2608 return (shared);
2609}
44#include "opt_compat.h"
45#include "opt_ktrace.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/signalvar.h>
50#include <sys/vnode.h>
51#include <sys/acct.h>
52#include <sys/condvar.h>
53#include <sys/event.h>
54#include <sys/fcntl.h>
55#include <sys/kernel.h>
56#include <sys/ktr.h>
57#include <sys/ktrace.h>
58#include <sys/lock.h>
59#include <sys/malloc.h>
60#include <sys/mutex.h>
61#include <sys/namei.h>
62#include <sys/proc.h>
63#include <sys/pioctl.h>
64#include <sys/resourcevar.h>
65#include <sys/smp.h>
66#include <sys/stat.h>
67#include <sys/sx.h>
68#include <sys/syscallsubr.h>
69#include <sys/sysctl.h>
70#include <sys/sysent.h>
71#include <sys/syslog.h>
72#include <sys/sysproto.h>
73#include <sys/unistd.h>
74#include <sys/wait.h>
75
76#include <machine/cpu.h>
77
78#if defined (__alpha__) && !defined(COMPAT_43)
79#error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
80#endif
81
82#define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
83
84static int coredump(struct thread *);
85static char *expand_name(const char *, uid_t, pid_t);
86static int killpg1(struct thread *td, int sig, int pgid, int all);
87static int issignal(struct thread *p);
88static int sigprop(int sig);
89static void stop(struct proc *);
90static void tdsigwakeup(struct thread *td, int sig, sig_t action);
91static int filt_sigattach(struct knote *kn);
92static void filt_sigdetach(struct knote *kn);
93static int filt_signal(struct knote *kn, long hint);
94static struct thread *sigtd(struct proc *p, int sig, int prop);
95static int kern_sigtimedwait(struct thread *td, sigset_t set,
96 siginfo_t *info, struct timespec *timeout);
97
98struct filterops sig_filtops =
99 { 0, filt_sigattach, filt_sigdetach, filt_signal };
100
101static int kern_logsigexit = 1;
102SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
103 &kern_logsigexit, 0,
104 "Log processes quitting on abnormal signals to syslog(3)");
105
106/*
107 * Policy -- Can ucred cr1 send SIGIO to process cr2?
108 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
109 * in the right situations.
110 */
111#define CANSIGIO(cr1, cr2) \
112 ((cr1)->cr_uid == 0 || \
113 (cr1)->cr_ruid == (cr2)->cr_ruid || \
114 (cr1)->cr_uid == (cr2)->cr_ruid || \
115 (cr1)->cr_ruid == (cr2)->cr_uid || \
116 (cr1)->cr_uid == (cr2)->cr_uid)
117
118int sugid_coredump;
119SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
120 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
121
122static int do_coredump = 1;
123SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
124 &do_coredump, 0, "Enable/Disable coredumps");
125
126/*
127 * Signal properties and actions.
128 * The array below categorizes the signals and their default actions
129 * according to the following properties:
130 */
131#define SA_KILL 0x01 /* terminates process by default */
132#define SA_CORE 0x02 /* ditto and coredumps */
133#define SA_STOP 0x04 /* suspend process */
134#define SA_TTYSTOP 0x08 /* ditto, from tty */
135#define SA_IGNORE 0x10 /* ignore by default */
136#define SA_CONT 0x20 /* continue if suspended */
137#define SA_CANTMASK 0x40 /* non-maskable, catchable */
138#define SA_PROC 0x80 /* deliverable to any thread */
139
140static int sigproptbl[NSIG] = {
141 SA_KILL|SA_PROC, /* SIGHUP */
142 SA_KILL|SA_PROC, /* SIGINT */
143 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
144 SA_KILL|SA_CORE, /* SIGILL */
145 SA_KILL|SA_CORE, /* SIGTRAP */
146 SA_KILL|SA_CORE, /* SIGABRT */
147 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
148 SA_KILL|SA_CORE, /* SIGFPE */
149 SA_KILL|SA_PROC, /* SIGKILL */
150 SA_KILL|SA_CORE, /* SIGBUS */
151 SA_KILL|SA_CORE, /* SIGSEGV */
152 SA_KILL|SA_CORE, /* SIGSYS */
153 SA_KILL|SA_PROC, /* SIGPIPE */
154 SA_KILL|SA_PROC, /* SIGALRM */
155 SA_KILL|SA_PROC, /* SIGTERM */
156 SA_IGNORE|SA_PROC, /* SIGURG */
157 SA_STOP|SA_PROC, /* SIGSTOP */
158 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
159 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
160 SA_IGNORE|SA_PROC, /* SIGCHLD */
161 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
162 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
163 SA_IGNORE|SA_PROC, /* SIGIO */
164 SA_KILL, /* SIGXCPU */
165 SA_KILL, /* SIGXFSZ */
166 SA_KILL|SA_PROC, /* SIGVTALRM */
167 SA_KILL|SA_PROC, /* SIGPROF */
168 SA_IGNORE|SA_PROC, /* SIGWINCH */
169 SA_IGNORE|SA_PROC, /* SIGINFO */
170 SA_KILL|SA_PROC, /* SIGUSR1 */
171 SA_KILL|SA_PROC, /* SIGUSR2 */
172};
173
174/*
175 * Determine signal that should be delivered to process p, the current
176 * process, 0 if none. If there is a pending stop signal with default
177 * action, the process stops in issignal().
178 * XXXKSE the check for a pending stop is not done under KSE
179 *
180 * MP SAFE.
181 */
182int
183cursig(struct thread *td)
184{
185 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
186 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
187 mtx_assert(&sched_lock, MA_NOTOWNED);
188 return (SIGPENDING(td) ? issignal(td) : 0);
189}
190
191/*
192 * Arrange for ast() to handle unmasked pending signals on return to user
193 * mode. This must be called whenever a signal is added to td_siglist or
194 * unmasked in td_sigmask.
195 */
196void
197signotify(struct thread *td)
198{
199 struct proc *p;
200 sigset_t set;
201
202 p = td->td_proc;
203
204 PROC_LOCK_ASSERT(p, MA_OWNED);
205
206 /*
207 * If our mask changed we may have to move signal that were
208 * previously masked by all threads to our siglist.
209 */
210 set = p->p_siglist;
211 SIGSETNAND(set, td->td_sigmask);
212 SIGSETNAND(p->p_siglist, set);
213 SIGSETOR(td->td_siglist, set);
214
215 if (SIGPENDING(td)) {
216 mtx_lock_spin(&sched_lock);
217 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
218 mtx_unlock_spin(&sched_lock);
219 }
220}
221
222int
223sigonstack(size_t sp)
224{
225 struct proc *p = curthread->td_proc;
226
227 PROC_LOCK_ASSERT(p, MA_OWNED);
228 return ((p->p_flag & P_ALTSTACK) ?
229#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
230 ((p->p_sigstk.ss_size == 0) ? (p->p_sigstk.ss_flags & SS_ONSTACK) :
231 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size))
232#else
233 ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size)
234#endif
235 : 0);
236}
237
238static __inline int
239sigprop(int sig)
240{
241
242 if (sig > 0 && sig < NSIG)
243 return (sigproptbl[_SIG_IDX(sig)]);
244 return (0);
245}
246
247int
248sig_ffs(sigset_t *set)
249{
250 int i;
251
252 for (i = 0; i < _SIG_WORDS; i++)
253 if (set->__bits[i])
254 return (ffs(set->__bits[i]) + (i * 32));
255 return (0);
256}
257
258/*
259 * kern_sigaction
260 * sigaction
261 * freebsd4_sigaction
262 * osigaction
263 *
264 * MPSAFE
265 */
266int
267kern_sigaction(td, sig, act, oact, flags)
268 struct thread *td;
269 register int sig;
270 struct sigaction *act, *oact;
271 int flags;
272{
273 struct sigacts *ps;
274 struct thread *td0;
275 struct proc *p = td->td_proc;
276
277 if (!_SIG_VALID(sig))
278 return (EINVAL);
279
280 PROC_LOCK(p);
281 ps = p->p_sigacts;
282 mtx_lock(&ps->ps_mtx);
283 if (oact) {
284 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
285 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
286 oact->sa_flags = 0;
287 if (SIGISMEMBER(ps->ps_sigonstack, sig))
288 oact->sa_flags |= SA_ONSTACK;
289 if (!SIGISMEMBER(ps->ps_sigintr, sig))
290 oact->sa_flags |= SA_RESTART;
291 if (SIGISMEMBER(ps->ps_sigreset, sig))
292 oact->sa_flags |= SA_RESETHAND;
293 if (SIGISMEMBER(ps->ps_signodefer, sig))
294 oact->sa_flags |= SA_NODEFER;
295 if (SIGISMEMBER(ps->ps_siginfo, sig))
296 oact->sa_flags |= SA_SIGINFO;
297 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
298 oact->sa_flags |= SA_NOCLDSTOP;
299 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
300 oact->sa_flags |= SA_NOCLDWAIT;
301 }
302 if (act) {
303 if ((sig == SIGKILL || sig == SIGSTOP) &&
304 act->sa_handler != SIG_DFL) {
305 mtx_unlock(&ps->ps_mtx);
306 PROC_UNLOCK(p);
307 return (EINVAL);
308 }
309
310 /*
311 * Change setting atomically.
312 */
313
314 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
315 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
316 if (act->sa_flags & SA_SIGINFO) {
317 ps->ps_sigact[_SIG_IDX(sig)] =
318 (__sighandler_t *)act->sa_sigaction;
319 SIGADDSET(ps->ps_siginfo, sig);
320 } else {
321 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
322 SIGDELSET(ps->ps_siginfo, sig);
323 }
324 if (!(act->sa_flags & SA_RESTART))
325 SIGADDSET(ps->ps_sigintr, sig);
326 else
327 SIGDELSET(ps->ps_sigintr, sig);
328 if (act->sa_flags & SA_ONSTACK)
329 SIGADDSET(ps->ps_sigonstack, sig);
330 else
331 SIGDELSET(ps->ps_sigonstack, sig);
332 if (act->sa_flags & SA_RESETHAND)
333 SIGADDSET(ps->ps_sigreset, sig);
334 else
335 SIGDELSET(ps->ps_sigreset, sig);
336 if (act->sa_flags & SA_NODEFER)
337 SIGADDSET(ps->ps_signodefer, sig);
338 else
339 SIGDELSET(ps->ps_signodefer, sig);
340#ifdef COMPAT_SUNOS
341 if (act->sa_flags & SA_USERTRAMP)
342 SIGADDSET(ps->ps_usertramp, sig);
343 else
344 SIGDELSET(ps->ps_usertramp, sig);
345#endif
346 if (sig == SIGCHLD) {
347 if (act->sa_flags & SA_NOCLDSTOP)
348 ps->ps_flag |= PS_NOCLDSTOP;
349 else
350 ps->ps_flag &= ~PS_NOCLDSTOP;
351 if (act->sa_flags & SA_NOCLDWAIT) {
352 /*
353 * Paranoia: since SA_NOCLDWAIT is implemented
354 * by reparenting the dying child to PID 1 (and
355 * trust it to reap the zombie), PID 1 itself
356 * is forbidden to set SA_NOCLDWAIT.
357 */
358 if (p->p_pid == 1)
359 ps->ps_flag &= ~PS_NOCLDWAIT;
360 else
361 ps->ps_flag |= PS_NOCLDWAIT;
362 } else
363 ps->ps_flag &= ~PS_NOCLDWAIT;
364 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
365 ps->ps_flag |= PS_CLDSIGIGN;
366 else
367 ps->ps_flag &= ~PS_CLDSIGIGN;
368 }
369 /*
370 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
371 * and for signals set to SIG_DFL where the default is to
372 * ignore. However, don't put SIGCONT in ps_sigignore, as we
373 * have to restart the process.
374 */
375 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
376 (sigprop(sig) & SA_IGNORE &&
377 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
378 /* never to be seen again */
379 SIGDELSET(p->p_siglist, sig);
380 FOREACH_THREAD_IN_PROC(p, td0)
381 SIGDELSET(td0->td_siglist, sig);
382 if (sig != SIGCONT)
383 /* easier in psignal */
384 SIGADDSET(ps->ps_sigignore, sig);
385 SIGDELSET(ps->ps_sigcatch, sig);
386 } else {
387 SIGDELSET(ps->ps_sigignore, sig);
388 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
389 SIGDELSET(ps->ps_sigcatch, sig);
390 else
391 SIGADDSET(ps->ps_sigcatch, sig);
392 }
393#ifdef COMPAT_FREEBSD4
394 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
395 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
396 (flags & KSA_FREEBSD4) == 0)
397 SIGDELSET(ps->ps_freebsd4, sig);
398 else
399 SIGADDSET(ps->ps_freebsd4, sig);
400#endif
401#ifdef COMPAT_43
402 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
403 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
404 (flags & KSA_OSIGSET) == 0)
405 SIGDELSET(ps->ps_osigset, sig);
406 else
407 SIGADDSET(ps->ps_osigset, sig);
408#endif
409 }
410 mtx_unlock(&ps->ps_mtx);
411 PROC_UNLOCK(p);
412 return (0);
413}
414
415#ifndef _SYS_SYSPROTO_H_
416struct sigaction_args {
417 int sig;
418 struct sigaction *act;
419 struct sigaction *oact;
420};
421#endif
422/*
423 * MPSAFE
424 */
425int
426sigaction(td, uap)
427 struct thread *td;
428 register struct sigaction_args *uap;
429{
430 struct sigaction act, oact;
431 register struct sigaction *actp, *oactp;
432 int error;
433
434 actp = (uap->act != NULL) ? &act : NULL;
435 oactp = (uap->oact != NULL) ? &oact : NULL;
436 if (actp) {
437 error = copyin(uap->act, actp, sizeof(act));
438 if (error)
439 return (error);
440 }
441 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
442 if (oactp && !error)
443 error = copyout(oactp, uap->oact, sizeof(oact));
444 return (error);
445}
446
447#ifdef COMPAT_FREEBSD4
448#ifndef _SYS_SYSPROTO_H_
449struct freebsd4_sigaction_args {
450 int sig;
451 struct sigaction *act;
452 struct sigaction *oact;
453};
454#endif
455/*
456 * MPSAFE
457 */
458int
459freebsd4_sigaction(td, uap)
460 struct thread *td;
461 register struct freebsd4_sigaction_args *uap;
462{
463 struct sigaction act, oact;
464 register struct sigaction *actp, *oactp;
465 int error;
466
467
468 actp = (uap->act != NULL) ? &act : NULL;
469 oactp = (uap->oact != NULL) ? &oact : NULL;
470 if (actp) {
471 error = copyin(uap->act, actp, sizeof(act));
472 if (error)
473 return (error);
474 }
475 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
476 if (oactp && !error)
477 error = copyout(oactp, uap->oact, sizeof(oact));
478 return (error);
479}
480#endif /* COMAPT_FREEBSD4 */
481
482#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
483#ifndef _SYS_SYSPROTO_H_
484struct osigaction_args {
485 int signum;
486 struct osigaction *nsa;
487 struct osigaction *osa;
488};
489#endif
490/*
491 * MPSAFE
492 */
493int
494osigaction(td, uap)
495 struct thread *td;
496 register struct osigaction_args *uap;
497{
498 struct osigaction sa;
499 struct sigaction nsa, osa;
500 register struct sigaction *nsap, *osap;
501 int error;
502
503 if (uap->signum <= 0 || uap->signum >= ONSIG)
504 return (EINVAL);
505
506 nsap = (uap->nsa != NULL) ? &nsa : NULL;
507 osap = (uap->osa != NULL) ? &osa : NULL;
508
509 if (nsap) {
510 error = copyin(uap->nsa, &sa, sizeof(sa));
511 if (error)
512 return (error);
513 nsap->sa_handler = sa.sa_handler;
514 nsap->sa_flags = sa.sa_flags;
515 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
516 }
517 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
518 if (osap && !error) {
519 sa.sa_handler = osap->sa_handler;
520 sa.sa_flags = osap->sa_flags;
521 SIG2OSIG(osap->sa_mask, sa.sa_mask);
522 error = copyout(&sa, uap->osa, sizeof(sa));
523 }
524 return (error);
525}
526
527#if !defined(__i386__) && !defined(__alpha__)
528/* Avoid replicating the same stub everywhere */
529int
530osigreturn(td, uap)
531 struct thread *td;
532 struct osigreturn_args *uap;
533{
534
535 return (nosys(td, (struct nosys_args *)uap));
536}
537#endif
538#endif /* COMPAT_43 */
539
540/*
541 * Initialize signal state for process 0;
542 * set to ignore signals that are ignored by default.
543 */
544void
545siginit(p)
546 struct proc *p;
547{
548 register int i;
549 struct sigacts *ps;
550
551 PROC_LOCK(p);
552 ps = p->p_sigacts;
553 mtx_lock(&ps->ps_mtx);
554 for (i = 1; i <= NSIG; i++)
555 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
556 SIGADDSET(ps->ps_sigignore, i);
557 mtx_unlock(&ps->ps_mtx);
558 PROC_UNLOCK(p);
559}
560
561/*
562 * Reset signals for an exec of the specified process.
563 */
564void
565execsigs(p)
566 register struct proc *p;
567{
568 register struct sigacts *ps;
569 register int sig;
570
571 /*
572 * Reset caught signals. Held signals remain held
573 * through td_sigmask (unless they were caught,
574 * and are now ignored by default).
575 */
576 PROC_LOCK_ASSERT(p, MA_OWNED);
577 ps = p->p_sigacts;
578 mtx_lock(&ps->ps_mtx);
579 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
580 sig = sig_ffs(&ps->ps_sigcatch);
581 SIGDELSET(ps->ps_sigcatch, sig);
582 if (sigprop(sig) & SA_IGNORE) {
583 if (sig != SIGCONT)
584 SIGADDSET(ps->ps_sigignore, sig);
585 SIGDELSET(p->p_siglist, sig);
586 /*
587 * There is only one thread at this point.
588 */
589 SIGDELSET(FIRST_THREAD_IN_PROC(p)->td_siglist, sig);
590 }
591 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
592 }
593 /*
594 * Clear out the td's sigmask. Normal processes use the proc sigmask.
595 */
596 SIGEMPTYSET(FIRST_THREAD_IN_PROC(p)->td_sigmask);
597 /*
598 * Reset stack state to the user stack.
599 * Clear set of signals caught on the signal stack.
600 */
601 p->p_sigstk.ss_flags = SS_DISABLE;
602 p->p_sigstk.ss_size = 0;
603 p->p_sigstk.ss_sp = 0;
604 p->p_flag &= ~P_ALTSTACK;
605 /*
606 * Reset no zombies if child dies flag as Solaris does.
607 */
608 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
609 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
610 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
611 mtx_unlock(&ps->ps_mtx);
612}
613
614/*
615 * kern_sigprocmask()
616 *
617 * Manipulate signal mask.
618 */
619int
620kern_sigprocmask(td, how, set, oset, old)
621 struct thread *td;
622 int how;
623 sigset_t *set, *oset;
624 int old;
625{
626 int error;
627
628 PROC_LOCK(td->td_proc);
629 if (oset != NULL)
630 *oset = td->td_sigmask;
631
632 error = 0;
633 if (set != NULL) {
634 switch (how) {
635 case SIG_BLOCK:
636 SIG_CANTMASK(*set);
637 SIGSETOR(td->td_sigmask, *set);
638 break;
639 case SIG_UNBLOCK:
640 SIGSETNAND(td->td_sigmask, *set);
641 signotify(td);
642 break;
643 case SIG_SETMASK:
644 SIG_CANTMASK(*set);
645 if (old)
646 SIGSETLO(td->td_sigmask, *set);
647 else
648 td->td_sigmask = *set;
649 signotify(td);
650 break;
651 default:
652 error = EINVAL;
653 break;
654 }
655 }
656 PROC_UNLOCK(td->td_proc);
657 return (error);
658}
659
660/*
661 * sigprocmask() - MP SAFE
662 */
663
664#ifndef _SYS_SYSPROTO_H_
665struct sigprocmask_args {
666 int how;
667 const sigset_t *set;
668 sigset_t *oset;
669};
670#endif
671int
672sigprocmask(td, uap)
673 register struct thread *td;
674 struct sigprocmask_args *uap;
675{
676 sigset_t set, oset;
677 sigset_t *setp, *osetp;
678 int error;
679
680 setp = (uap->set != NULL) ? &set : NULL;
681 osetp = (uap->oset != NULL) ? &oset : NULL;
682 if (setp) {
683 error = copyin(uap->set, setp, sizeof(set));
684 if (error)
685 return (error);
686 }
687 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
688 if (osetp && !error) {
689 error = copyout(osetp, uap->oset, sizeof(oset));
690 }
691 return (error);
692}
693
694#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
695/*
696 * osigprocmask() - MP SAFE
697 */
698#ifndef _SYS_SYSPROTO_H_
699struct osigprocmask_args {
700 int how;
701 osigset_t mask;
702};
703#endif
704int
705osigprocmask(td, uap)
706 register struct thread *td;
707 struct osigprocmask_args *uap;
708{
709 sigset_t set, oset;
710 int error;
711
712 OSIG2SIG(uap->mask, set);
713 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
714 SIG2OSIG(oset, td->td_retval[0]);
715 return (error);
716}
717#endif /* COMPAT_43 */
718
719#ifndef _SYS_SYSPROTO_H_
720struct sigpending_args {
721 sigset_t *set;
722};
723#endif
724/*
725 * MPSAFE
726 */
727int
728sigwait(struct thread *td, struct sigwait_args *uap)
729{
730 siginfo_t info;
731 sigset_t set;
732 int error;
733
734 error = copyin(uap->set, &set, sizeof(set));
735 if (error)
736 return (error);
737
738 error = kern_sigtimedwait(td, set, &info, NULL);
739 if (error)
740 return (error);
741
742 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
743 /* Repost if we got an error. */
744 if (error && info.si_signo) {
745 PROC_LOCK(td->td_proc);
746 tdsignal(td, info.si_signo);
747 PROC_UNLOCK(td->td_proc);
748 }
749 return (error);
750}
751/*
752 * MPSAFE
753 */
754int
755sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
756{
757 struct timespec ts;
758 struct timespec *timeout;
759 sigset_t set;
760 siginfo_t info;
761 int error;
762
763 if (uap->timeout) {
764 error = copyin(uap->timeout, &ts, sizeof(ts));
765 if (error)
766 return (error);
767
768 timeout = &ts;
769 } else
770 timeout = NULL;
771
772 error = copyin(uap->set, &set, sizeof(set));
773 if (error)
774 return (error);
775
776 error = kern_sigtimedwait(td, set, &info, timeout);
777 if (error)
778 return (error);
779
780 error = copyout(&info, uap->info, sizeof(info));
781 /* Repost if we got an error. */
782 if (error && info.si_signo) {
783 PROC_LOCK(td->td_proc);
784 tdsignal(td, info.si_signo);
785 PROC_UNLOCK(td->td_proc);
786 }
787 return (error);
788}
789
790/*
791 * MPSAFE
792 */
793int
794sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
795{
796 siginfo_t info;
797 sigset_t set;
798 int error;
799
800 error = copyin(uap->set, &set, sizeof(set));
801 if (error)
802 return (error);
803
804 error = kern_sigtimedwait(td, set, &info, NULL);
805 if (error)
806 return (error);
807
808 error = copyout(&info, uap->info, sizeof(info));
809 /* Repost if we got an error. */
810 if (error && info.si_signo) {
811 PROC_LOCK(td->td_proc);
812 tdsignal(td, info.si_signo);
813 PROC_UNLOCK(td->td_proc);
814 }
815 return (error);
816}
817
818static int
819kern_sigtimedwait(struct thread *td, sigset_t set, siginfo_t *info,
820 struct timespec *timeout)
821{
822 register struct sigacts *ps;
823 sigset_t oldmask;
824 struct proc *p;
825 int error;
826 int sig;
827 int hz;
828
829 p = td->td_proc;
830 error = 0;
831 sig = 0;
832 SIG_CANTMASK(set);
833
834 PROC_LOCK(p);
835 ps = p->p_sigacts;
836 oldmask = td->td_sigmask;
837 td->td_sigmask = set;
838 signotify(td);
839
840 mtx_lock(&ps->ps_mtx);
841 sig = cursig(td);
842 if (sig)
843 goto out;
844
845 /*
846 * POSIX says this must be checked after looking for pending
847 * signals.
848 */
849 if (timeout) {
850 struct timeval tv;
851
852 if (timeout->tv_nsec > 1000000000) {
853 error = EINVAL;
854 goto out;
855 }
856 TIMESPEC_TO_TIMEVAL(&tv, timeout);
857 hz = tvtohz(&tv);
858 } else
859 hz = 0;
860
861 mtx_unlock(&ps->ps_mtx);
862 error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "pause", hz);
863 mtx_lock(&ps->ps_mtx);
864 if (error == EINTR)
865 error = 0;
866 else if (error)
867 goto out;
868
869 sig = cursig(td);
870out:
871 td->td_sigmask = oldmask;
872 if (sig) {
873 sig_t action;
874
875 action = ps->ps_sigact[_SIG_IDX(sig)];
876 mtx_unlock(&ps->ps_mtx);
877#ifdef KTRACE
878 if (KTRPOINT(td, KTR_PSIG))
879 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
880 &td->td_oldsigmask : &td->td_sigmask, 0);
881#endif
882 _STOPEVENT(p, S_SIG, sig);
883
884 if (action == SIG_DFL)
885 sigexit(td, sig);
886 /* NOTREACHED */
887
888 SIGDELSET(td->td_siglist, sig);
889 info->si_signo = sig;
890 info->si_code = 0;
891 } else
892 mtx_unlock(&ps->ps_mtx);
893 PROC_UNLOCK(p);
894 return (error);
895}
896
897/*
898 * MPSAFE
899 */
900int
901sigpending(td, uap)
902 struct thread *td;
903 struct sigpending_args *uap;
904{
905 struct proc *p = td->td_proc;
906 sigset_t siglist;
907
908 PROC_LOCK(p);
909 siglist = p->p_siglist;
910 SIGSETOR(siglist, td->td_siglist);
911 PROC_UNLOCK(p);
912 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
913}
914
915#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
916#ifndef _SYS_SYSPROTO_H_
917struct osigpending_args {
918 int dummy;
919};
920#endif
921/*
922 * MPSAFE
923 */
924int
925osigpending(td, uap)
926 struct thread *td;
927 struct osigpending_args *uap;
928{
929 struct proc *p = td->td_proc;
930 sigset_t siglist;
931
932 PROC_LOCK(p);
933 siglist = p->p_siglist;
934 SIGSETOR(siglist, td->td_siglist);
935 PROC_UNLOCK(p);
936 SIG2OSIG(siglist, td->td_retval[0]);
937 return (0);
938}
939#endif /* COMPAT_43 */
940
941#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
942/*
943 * Generalized interface signal handler, 4.3-compatible.
944 */
945#ifndef _SYS_SYSPROTO_H_
946struct osigvec_args {
947 int signum;
948 struct sigvec *nsv;
949 struct sigvec *osv;
950};
951#endif
952/*
953 * MPSAFE
954 */
955/* ARGSUSED */
956int
957osigvec(td, uap)
958 struct thread *td;
959 register struct osigvec_args *uap;
960{
961 struct sigvec vec;
962 struct sigaction nsa, osa;
963 register struct sigaction *nsap, *osap;
964 int error;
965
966 if (uap->signum <= 0 || uap->signum >= ONSIG)
967 return (EINVAL);
968 nsap = (uap->nsv != NULL) ? &nsa : NULL;
969 osap = (uap->osv != NULL) ? &osa : NULL;
970 if (nsap) {
971 error = copyin(uap->nsv, &vec, sizeof(vec));
972 if (error)
973 return (error);
974 nsap->sa_handler = vec.sv_handler;
975 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
976 nsap->sa_flags = vec.sv_flags;
977 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
978#ifdef COMPAT_SUNOS
979 nsap->sa_flags |= SA_USERTRAMP;
980#endif
981 }
982 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
983 if (osap && !error) {
984 vec.sv_handler = osap->sa_handler;
985 SIG2OSIG(osap->sa_mask, vec.sv_mask);
986 vec.sv_flags = osap->sa_flags;
987 vec.sv_flags &= ~SA_NOCLDWAIT;
988 vec.sv_flags ^= SA_RESTART;
989#ifdef COMPAT_SUNOS
990 vec.sv_flags &= ~SA_NOCLDSTOP;
991#endif
992 error = copyout(&vec, uap->osv, sizeof(vec));
993 }
994 return (error);
995}
996
997#ifndef _SYS_SYSPROTO_H_
998struct osigblock_args {
999 int mask;
1000};
1001#endif
1002/*
1003 * MPSAFE
1004 */
1005int
1006osigblock(td, uap)
1007 register struct thread *td;
1008 struct osigblock_args *uap;
1009{
1010 struct proc *p = td->td_proc;
1011 sigset_t set;
1012
1013 OSIG2SIG(uap->mask, set);
1014 SIG_CANTMASK(set);
1015 PROC_LOCK(p);
1016 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1017 SIGSETOR(td->td_sigmask, set);
1018 PROC_UNLOCK(p);
1019 return (0);
1020}
1021
1022#ifndef _SYS_SYSPROTO_H_
1023struct osigsetmask_args {
1024 int mask;
1025};
1026#endif
1027/*
1028 * MPSAFE
1029 */
1030int
1031osigsetmask(td, uap)
1032 struct thread *td;
1033 struct osigsetmask_args *uap;
1034{
1035 struct proc *p = td->td_proc;
1036 sigset_t set;
1037
1038 OSIG2SIG(uap->mask, set);
1039 SIG_CANTMASK(set);
1040 PROC_LOCK(p);
1041 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1042 SIGSETLO(td->td_sigmask, set);
1043 signotify(td);
1044 PROC_UNLOCK(p);
1045 return (0);
1046}
1047#endif /* COMPAT_43 || COMPAT_SUNOS */
1048
1049/*
1050 * Suspend process until signal, providing mask to be set
1051 * in the meantime. Note nonstandard calling convention:
1052 * libc stub passes mask, not pointer, to save a copyin.
1053 ***** XXXKSE this doesn't make sense under KSE.
1054 ***** Do we suspend the thread or all threads in the process?
1055 ***** How do we suspend threads running NOW on another processor?
1056 */
1057#ifndef _SYS_SYSPROTO_H_
1058struct sigsuspend_args {
1059 const sigset_t *sigmask;
1060};
1061#endif
1062/*
1063 * MPSAFE
1064 */
1065/* ARGSUSED */
1066int
1067sigsuspend(td, uap)
1068 struct thread *td;
1069 struct sigsuspend_args *uap;
1070{
1071 sigset_t mask;
1072 int error;
1073
1074 error = copyin(uap->sigmask, &mask, sizeof(mask));
1075 if (error)
1076 return (error);
1077 return (kern_sigsuspend(td, mask));
1078}
1079
1080int
1081kern_sigsuspend(struct thread *td, sigset_t mask)
1082{
1083 struct proc *p = td->td_proc;
1084
1085 /*
1086 * When returning from sigsuspend, we want
1087 * the old mask to be restored after the
1088 * signal handler has finished. Thus, we
1089 * save it here and mark the sigacts structure
1090 * to indicate this.
1091 */
1092 PROC_LOCK(p);
1093 td->td_oldsigmask = td->td_sigmask;
1094 td->td_pflags |= TDP_OLDMASK;
1095 SIG_CANTMASK(mask);
1096 td->td_sigmask = mask;
1097 signotify(td);
1098 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1099 /* void */;
1100 PROC_UNLOCK(p);
1101 /* always return EINTR rather than ERESTART... */
1102 return (EINTR);
1103}
1104
1105#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1106#ifndef _SYS_SYSPROTO_H_
1107struct osigsuspend_args {
1108 osigset_t mask;
1109};
1110#endif
1111/*
1112 * MPSAFE
1113 */
1114/* ARGSUSED */
1115int
1116osigsuspend(td, uap)
1117 struct thread *td;
1118 struct osigsuspend_args *uap;
1119{
1120 struct proc *p = td->td_proc;
1121 sigset_t mask;
1122
1123 PROC_LOCK(p);
1124 td->td_oldsigmask = td->td_sigmask;
1125 td->td_pflags |= TDP_OLDMASK;
1126 OSIG2SIG(uap->mask, mask);
1127 SIG_CANTMASK(mask);
1128 SIGSETLO(td->td_sigmask, mask);
1129 signotify(td);
1130 while (msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1131 /* void */;
1132 PROC_UNLOCK(p);
1133 /* always return EINTR rather than ERESTART... */
1134 return (EINTR);
1135}
1136#endif /* COMPAT_43 */
1137
1138#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1139#ifndef _SYS_SYSPROTO_H_
1140struct osigstack_args {
1141 struct sigstack *nss;
1142 struct sigstack *oss;
1143};
1144#endif
1145/*
1146 * MPSAFE
1147 */
1148/* ARGSUSED */
1149int
1150osigstack(td, uap)
1151 struct thread *td;
1152 register struct osigstack_args *uap;
1153{
1154 struct proc *p = td->td_proc;
1155 struct sigstack nss, oss;
1156 int error = 0;
1157
1158 if (uap->nss != NULL) {
1159 error = copyin(uap->nss, &nss, sizeof(nss));
1160 if (error)
1161 return (error);
1162 }
1163 PROC_LOCK(p);
1164 oss.ss_sp = p->p_sigstk.ss_sp;
1165 oss.ss_onstack = sigonstack(cpu_getstack(td));
1166 if (uap->nss != NULL) {
1167 p->p_sigstk.ss_sp = nss.ss_sp;
1168 p->p_sigstk.ss_size = 0;
1169 p->p_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1170 p->p_flag |= P_ALTSTACK;
1171 }
1172 PROC_UNLOCK(p);
1173 if (uap->oss != NULL)
1174 error = copyout(&oss, uap->oss, sizeof(oss));
1175
1176 return (error);
1177}
1178#endif /* COMPAT_43 || COMPAT_SUNOS */
1179
1180#ifndef _SYS_SYSPROTO_H_
1181struct sigaltstack_args {
1182 stack_t *ss;
1183 stack_t *oss;
1184};
1185#endif
1186/*
1187 * MPSAFE
1188 */
1189/* ARGSUSED */
1190int
1191sigaltstack(td, uap)
1192 struct thread *td;
1193 register struct sigaltstack_args *uap;
1194{
1195 stack_t ss, oss;
1196 int error;
1197
1198 if (uap->ss != NULL) {
1199 error = copyin(uap->ss, &ss, sizeof(ss));
1200 if (error)
1201 return (error);
1202 }
1203 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1204 (uap->oss != NULL) ? &oss : NULL);
1205 if (error)
1206 return (error);
1207 if (uap->oss != NULL)
1208 error = copyout(&oss, uap->oss, sizeof(stack_t));
1209 return (error);
1210}
1211
1212int
1213kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1214{
1215 struct proc *p = td->td_proc;
1216 int oonstack;
1217
1218 PROC_LOCK(p);
1219 oonstack = sigonstack(cpu_getstack(td));
1220
1221 if (oss != NULL) {
1222 *oss = p->p_sigstk;
1223 oss->ss_flags = (p->p_flag & P_ALTSTACK)
1224 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1225 }
1226
1227 if (ss != NULL) {
1228 if (oonstack) {
1229 PROC_UNLOCK(p);
1230 return (EPERM);
1231 }
1232 if ((ss->ss_flags & ~SS_DISABLE) != 0) {
1233 PROC_UNLOCK(p);
1234 return (EINVAL);
1235 }
1236 if (!(ss->ss_flags & SS_DISABLE)) {
1237 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1238 PROC_UNLOCK(p);
1239 return (ENOMEM);
1240 }
1241 p->p_sigstk = *ss;
1242 p->p_flag |= P_ALTSTACK;
1243 } else {
1244 p->p_flag &= ~P_ALTSTACK;
1245 }
1246 }
1247 PROC_UNLOCK(p);
1248 return (0);
1249}
1250
1251/*
1252 * Common code for kill process group/broadcast kill.
1253 * cp is calling process.
1254 */
1255static int
1256killpg1(td, sig, pgid, all)
1257 register struct thread *td;
1258 int sig, pgid, all;
1259{
1260 register struct proc *p;
1261 struct pgrp *pgrp;
1262 int nfound = 0;
1263
1264 if (all) {
1265 /*
1266 * broadcast
1267 */
1268 sx_slock(&allproc_lock);
1269 LIST_FOREACH(p, &allproc, p_list) {
1270 PROC_LOCK(p);
1271 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1272 p == td->td_proc) {
1273 PROC_UNLOCK(p);
1274 continue;
1275 }
1276 if (p_cansignal(td, p, sig) == 0) {
1277 nfound++;
1278 if (sig)
1279 psignal(p, sig);
1280 }
1281 PROC_UNLOCK(p);
1282 }
1283 sx_sunlock(&allproc_lock);
1284 } else {
1285 sx_slock(&proctree_lock);
1286 if (pgid == 0) {
1287 /*
1288 * zero pgid means send to my process group.
1289 */
1290 pgrp = td->td_proc->p_pgrp;
1291 PGRP_LOCK(pgrp);
1292 } else {
1293 pgrp = pgfind(pgid);
1294 if (pgrp == NULL) {
1295 sx_sunlock(&proctree_lock);
1296 return (ESRCH);
1297 }
1298 }
1299 sx_sunlock(&proctree_lock);
1300 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1301 PROC_LOCK(p);
1302 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1303 PROC_UNLOCK(p);
1304 continue;
1305 }
1306 if (p->p_state == PRS_ZOMBIE) {
1307 PROC_UNLOCK(p);
1308 continue;
1309 }
1310 if (p_cansignal(td, p, sig) == 0) {
1311 nfound++;
1312 if (sig)
1313 psignal(p, sig);
1314 }
1315 PROC_UNLOCK(p);
1316 }
1317 PGRP_UNLOCK(pgrp);
1318 }
1319 return (nfound ? 0 : ESRCH);
1320}
1321
1322#ifndef _SYS_SYSPROTO_H_
1323struct kill_args {
1324 int pid;
1325 int signum;
1326};
1327#endif
1328/*
1329 * MPSAFE
1330 */
1331/* ARGSUSED */
1332int
1333kill(td, uap)
1334 register struct thread *td;
1335 register struct kill_args *uap;
1336{
1337 register struct proc *p;
1338 int error;
1339
1340 if ((u_int)uap->signum > _SIG_MAXSIG)
1341 return (EINVAL);
1342
1343 if (uap->pid > 0) {
1344 /* kill single process */
1345 if ((p = pfind(uap->pid)) == NULL)
1346 return (ESRCH);
1347 error = p_cansignal(td, p, uap->signum);
1348 if (error == 0 && uap->signum)
1349 psignal(p, uap->signum);
1350 PROC_UNLOCK(p);
1351 return (error);
1352 }
1353 switch (uap->pid) {
1354 case -1: /* broadcast signal */
1355 return (killpg1(td, uap->signum, 0, 1));
1356 case 0: /* signal own process group */
1357 return (killpg1(td, uap->signum, 0, 0));
1358 default: /* negative explicit process group */
1359 return (killpg1(td, uap->signum, -uap->pid, 0));
1360 }
1361 /* NOTREACHED */
1362}
1363
1364#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1365#ifndef _SYS_SYSPROTO_H_
1366struct okillpg_args {
1367 int pgid;
1368 int signum;
1369};
1370#endif
1371/*
1372 * MPSAFE
1373 */
1374/* ARGSUSED */
1375int
1376okillpg(td, uap)
1377 struct thread *td;
1378 register struct okillpg_args *uap;
1379{
1380
1381 if ((u_int)uap->signum > _SIG_MAXSIG)
1382 return (EINVAL);
1383 return (killpg1(td, uap->signum, uap->pgid, 0));
1384}
1385#endif /* COMPAT_43 || COMPAT_SUNOS */
1386
1387/*
1388 * Send a signal to a process group.
1389 */
1390void
1391gsignal(pgid, sig)
1392 int pgid, sig;
1393{
1394 struct pgrp *pgrp;
1395
1396 if (pgid != 0) {
1397 sx_slock(&proctree_lock);
1398 pgrp = pgfind(pgid);
1399 sx_sunlock(&proctree_lock);
1400 if (pgrp != NULL) {
1401 pgsignal(pgrp, sig, 0);
1402 PGRP_UNLOCK(pgrp);
1403 }
1404 }
1405}
1406
1407/*
1408 * Send a signal to a process group. If checktty is 1,
1409 * limit to members which have a controlling terminal.
1410 */
1411void
1412pgsignal(pgrp, sig, checkctty)
1413 struct pgrp *pgrp;
1414 int sig, checkctty;
1415{
1416 register struct proc *p;
1417
1418 if (pgrp) {
1419 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1420 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1421 PROC_LOCK(p);
1422 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1423 psignal(p, sig);
1424 PROC_UNLOCK(p);
1425 }
1426 }
1427}
1428
1429/*
1430 * Send a signal caused by a trap to the current thread.
1431 * If it will be caught immediately, deliver it with correct code.
1432 * Otherwise, post it normally.
1433 *
1434 * MPSAFE
1435 */
1436void
1437trapsignal(struct thread *td, int sig, u_long code)
1438{
1439 struct sigacts *ps;
1440 struct proc *p;
1441
1442 p = td->td_proc;
1443
1444 PROC_LOCK(p);
1445 ps = p->p_sigacts;
1446 mtx_lock(&ps->ps_mtx);
1447 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1448 !SIGISMEMBER(td->td_sigmask, sig)) {
1449 p->p_stats->p_ru.ru_nsignals++;
1450#ifdef KTRACE
1451 if (KTRPOINT(curthread, KTR_PSIG))
1452 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1453 &td->td_sigmask, code);
1454#endif
1455 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
1456 &td->td_sigmask, code);
1457 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1458 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1459 SIGADDSET(td->td_sigmask, sig);
1460 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1461 /*
1462 * See kern_sigaction() for origin of this code.
1463 */
1464 SIGDELSET(ps->ps_sigcatch, sig);
1465 if (sig != SIGCONT &&
1466 sigprop(sig) & SA_IGNORE)
1467 SIGADDSET(ps->ps_sigignore, sig);
1468 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1469 }
1470 mtx_unlock(&ps->ps_mtx);
1471 } else {
1472 mtx_unlock(&ps->ps_mtx);
1473 p->p_code = code; /* XXX for core dump/debugger */
1474 p->p_sig = sig; /* XXX to verify code */
1475 tdsignal(td, sig);
1476 }
1477 PROC_UNLOCK(p);
1478}
1479
1480static struct thread *
1481sigtd(struct proc *p, int sig, int prop)
1482{
1483 struct thread *td;
1484
1485 PROC_LOCK_ASSERT(p, MA_OWNED);
1486
1487 /*
1488 * If we know the signal is bound for a specific thread then we
1489 * assume that we are in that threads context. This is the case
1490 * for SIGXCPU, SIGILL, etc. Otherwise someone did a kill() from
1491 * userland and the real thread doesn't actually matter.
1492 */
1493 if ((prop & SA_PROC) != 0 && curthread->td_proc == p)
1494 return (curthread);
1495
1496 /*
1497 * We should search for the first thread that is blocked in
1498 * sigsuspend with this signal unmasked.
1499 */
1500
1501 /* XXX */
1502
1503 /*
1504 * Find the first thread in the proc that doesn't have this signal
1505 * masked.
1506 */
1507 FOREACH_THREAD_IN_PROC(p, td)
1508 if (!SIGISMEMBER(td->td_sigmask, sig))
1509 return (td);
1510
1511 return (FIRST_THREAD_IN_PROC(p));
1512}
1513
1514/*
1515 * Send the signal to the process. If the signal has an action, the action
1516 * is usually performed by the target process rather than the caller; we add
1517 * the signal to the set of pending signals for the process.
1518 *
1519 * Exceptions:
1520 * o When a stop signal is sent to a sleeping process that takes the
1521 * default action, the process is stopped without awakening it.
1522 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1523 * regardless of the signal action (eg, blocked or ignored).
1524 *
1525 * Other ignored signals are discarded immediately.
1526 *
1527 * MPSAFE
1528 */
1529void
1530psignal(struct proc *p, int sig)
1531{
1532 struct thread *td;
1533 int prop;
1534
1535 PROC_LOCK_ASSERT(p, MA_OWNED);
1536 prop = sigprop(sig);
1537
1538 /*
1539 * Find a thread to deliver the signal to.
1540 */
1541 td = sigtd(p, sig, prop);
1542
1543 tdsignal(td, sig);
1544}
1545
1546/*
1547 * MPSAFE
1548 */
1549void
1550tdsignal(struct thread *td, int sig)
1551{
1552 struct proc *p;
1553 register sig_t action;
1554 sigset_t *siglist;
1555 struct thread *td0;
1556 register int prop;
1557 struct sigacts *ps;
1558
1559 KASSERT(_SIG_VALID(sig),
1560 ("tdsignal(): invalid signal %d\n", sig));
1561
1562 p = td->td_proc;
1563 ps = p->p_sigacts;
1564
1565 PROC_LOCK_ASSERT(p, MA_OWNED);
1566 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1567
1568 prop = sigprop(sig);
1569
1570 /*
1571 * If this thread is blocking this signal then we'll leave it in the
1572 * proc so that we can find it in the first thread that unblocks it.
1573 */
1574 if (SIGISMEMBER(td->td_sigmask, sig))
1575 siglist = &p->p_siglist;
1576 else
1577 siglist = &td->td_siglist;
1578
1579 /*
1580 * If proc is traced, always give parent a chance;
1581 * if signal event is tracked by procfs, give *that*
1582 * a chance, as well.
1583 */
1584 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1585 action = SIG_DFL;
1586 } else {
1587 /*
1588 * If the signal is being ignored,
1589 * then we forget about it immediately.
1590 * (Note: we don't set SIGCONT in ps_sigignore,
1591 * and if it is set to SIG_IGN,
1592 * action will be SIG_DFL here.)
1593 */
1594 mtx_lock(&ps->ps_mtx);
1595 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1596 (p->p_flag & P_WEXIT)) {
1597 mtx_unlock(&ps->ps_mtx);
1598 return;
1599 }
1600 if (SIGISMEMBER(td->td_sigmask, sig))
1601 action = SIG_HOLD;
1602 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1603 action = SIG_CATCH;
1604 else
1605 action = SIG_DFL;
1606 mtx_unlock(&ps->ps_mtx);
1607 }
1608
1609 if (prop & SA_CONT) {
1610 SIG_STOPSIGMASK(p->p_siglist);
1611 /*
1612 * XXX Should investigate leaving STOP and CONT sigs only in
1613 * the proc's siglist.
1614 */
1615 FOREACH_THREAD_IN_PROC(p, td0)
1616 SIG_STOPSIGMASK(td0->td_siglist);
1617 }
1618
1619 if (prop & SA_STOP) {
1620 /*
1621 * If sending a tty stop signal to a member of an orphaned
1622 * process group, discard the signal here if the action
1623 * is default; don't stop the process below if sleeping,
1624 * and don't clear any pending SIGCONT.
1625 */
1626 if ((prop & SA_TTYSTOP) &&
1627 (p->p_pgrp->pg_jobc == 0) &&
1628 (action == SIG_DFL))
1629 return;
1630 SIG_CONTSIGMASK(p->p_siglist);
1631 FOREACH_THREAD_IN_PROC(p, td0)
1632 SIG_CONTSIGMASK(td0->td_siglist);
1633 p->p_flag &= ~P_CONTINUED;
1634 }
1635 SIGADDSET(*siglist, sig);
1636 signotify(td); /* uses schedlock */
1637 /*
1638 * Defer further processing for signals which are held,
1639 * except that stopped processes must be continued by SIGCONT.
1640 */
1641 if (action == SIG_HOLD &&
1642 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1643 return;
1644 /*
1645 * Some signals have a process-wide effect and a per-thread
1646 * component. Most processing occurs when the process next
1647 * tries to cross the user boundary, however there are some
1648 * times when processing needs to be done immediatly, such as
1649 * waking up threads so that they can cross the user boundary.
1650 * We try do the per-process part here.
1651 */
1652 if (P_SHOULDSTOP(p)) {
1653 /*
1654 * The process is in stopped mode. All the threads should be
1655 * either winding down or already on the suspended queue.
1656 */
1657 if (p->p_flag & P_TRACED) {
1658 /*
1659 * The traced process is already stopped,
1660 * so no further action is necessary.
1661 * No signal can restart us.
1662 */
1663 goto out;
1664 }
1665
1666 if (sig == SIGKILL) {
1667 /*
1668 * SIGKILL sets process running.
1669 * It will die elsewhere.
1670 * All threads must be restarted.
1671 */
1672 p->p_flag &= ~P_STOPPED;
1673 goto runfast;
1674 }
1675
1676 if (prop & SA_CONT) {
1677 /*
1678 * If SIGCONT is default (or ignored), we continue the
1679 * process but don't leave the signal in siglist as
1680 * it has no further action. If SIGCONT is held, we
1681 * continue the process and leave the signal in
1682 * siglist. If the process catches SIGCONT, let it
1683 * handle the signal itself. If it isn't waiting on
1684 * an event, it goes back to run state.
1685 * Otherwise, process goes back to sleep state.
1686 */
1687 p->p_flag &= ~P_STOPPED_SIG;
1688 p->p_flag |= P_CONTINUED;
1689 if (action == SIG_DFL) {
1690 SIGDELSET(*siglist, sig);
1691 } else if (action == SIG_CATCH) {
1692 /*
1693 * The process wants to catch it so it needs
1694 * to run at least one thread, but which one?
1695 * It would seem that the answer would be to
1696 * run an upcall in the next KSE to run, and
1697 * deliver the signal that way. In a NON KSE
1698 * process, we need to make sure that the
1699 * single thread is runnable asap.
1700 * XXXKSE for now however, make them all run.
1701 */
1702 goto runfast;
1703 }
1704 /*
1705 * The signal is not ignored or caught.
1706 */
1707 mtx_lock_spin(&sched_lock);
1708 thread_unsuspend(p);
1709 mtx_unlock_spin(&sched_lock);
1710 goto out;
1711 }
1712
1713 if (prop & SA_STOP) {
1714 /*
1715 * Already stopped, don't need to stop again
1716 * (If we did the shell could get confused).
1717 * Just make sure the signal STOP bit set.
1718 */
1719 p->p_flag |= P_STOPPED_SIG;
1720 SIGDELSET(*siglist, sig);
1721 goto out;
1722 }
1723
1724 /*
1725 * All other kinds of signals:
1726 * If a thread is sleeping interruptibly, simulate a
1727 * wakeup so that when it is continued it will be made
1728 * runnable and can look at the signal. However, don't make
1729 * the PROCESS runnable, leave it stopped.
1730 * It may run a bit until it hits a thread_suspend_check().
1731 */
1732 mtx_lock_spin(&sched_lock);
1733 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
1734 if (td->td_flags & TDF_CVWAITQ)
1735 cv_abort(td);
1736 else
1737 abortsleep(td);
1738 }
1739 mtx_unlock_spin(&sched_lock);
1740 goto out;
1741 /*
1742 * XXXKSE What about threads that are waiting on mutexes?
1743 * Shouldn't they abort too?
1744 * No, hopefully mutexes are short lived.. They'll
1745 * eventually hit thread_suspend_check().
1746 */
1747 } else if (p->p_state == PRS_NORMAL) {
1748 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1749 !(prop & SA_STOP)) {
1750 mtx_lock_spin(&sched_lock);
1751 tdsigwakeup(td, sig, action);
1752 mtx_unlock_spin(&sched_lock);
1753 goto out;
1754 }
1755 if (prop & SA_STOP) {
1756 if (p->p_flag & P_PPWAIT)
1757 goto out;
1758 p->p_flag |= P_STOPPED_SIG;
1759 p->p_xstat = sig;
1760 mtx_lock_spin(&sched_lock);
1761 FOREACH_THREAD_IN_PROC(p, td0) {
1762 if (TD_IS_SLEEPING(td0) &&
1763 (td->td_flags & TDF_SINTR))
1764 thread_suspend_one(td0);
1765 }
1766 thread_stopped(p);
1767 if (p->p_numthreads == p->p_suspcount) {
1768 SIGDELSET(p->p_siglist, p->p_xstat);
1769 FOREACH_THREAD_IN_PROC(p, td0)
1770 SIGDELSET(td0->td_siglist, p->p_xstat);
1771 }
1772 mtx_unlock_spin(&sched_lock);
1773 goto out;
1774 }
1775 else
1776 goto runfast;
1777 /* NOTREACHED */
1778 } else {
1779 /* Not in "NORMAL" state. discard the signal. */
1780 SIGDELSET(*siglist, sig);
1781 goto out;
1782 }
1783
1784 /*
1785 * The process is not stopped so we need to apply the signal to all the
1786 * running threads.
1787 */
1788
1789runfast:
1790 mtx_lock_spin(&sched_lock);
1791 tdsigwakeup(td, sig, action);
1792 thread_unsuspend(p);
1793 mtx_unlock_spin(&sched_lock);
1794out:
1795 /* If we jump here, sched_lock should not be owned. */
1796 mtx_assert(&sched_lock, MA_NOTOWNED);
1797}
1798
1799/*
1800 * The force of a signal has been directed against a single
1801 * thread. We need to see what we can do about knocking it
1802 * out of any sleep it may be in etc.
1803 */
1804static void
1805tdsigwakeup(struct thread *td, int sig, sig_t action)
1806{
1807 struct proc *p = td->td_proc;
1808 register int prop;
1809
1810 PROC_LOCK_ASSERT(p, MA_OWNED);
1811 mtx_assert(&sched_lock, MA_OWNED);
1812 prop = sigprop(sig);
1813 /*
1814 * Bring the priority of a thread up if we want it to get
1815 * killed in this lifetime.
1816 */
1817 if ((action == SIG_DFL) && (prop & SA_KILL)) {
1818 if (td->td_priority > PUSER) {
1819 td->td_priority = PUSER;
1820 }
1821 }
1822 if (TD_IS_SLEEPING(td)) {
1823 /*
1824 * If thread is sleeping uninterruptibly
1825 * we can't interrupt the sleep... the signal will
1826 * be noticed when the process returns through
1827 * trap() or syscall().
1828 */
1829 if ((td->td_flags & TDF_SINTR) == 0) {
1830 return;
1831 }
1832 /*
1833 * Process is sleeping and traced. Make it runnable
1834 * so it can discover the signal in issignal() and stop
1835 * for its parent.
1836 */
1837 if (p->p_flag & P_TRACED) {
1838 p->p_flag &= ~P_STOPPED_TRACE;
1839 } else {
1840
1841 /*
1842 * If SIGCONT is default (or ignored) and process is
1843 * asleep, we are finished; the process should not
1844 * be awakened.
1845 */
1846 if ((prop & SA_CONT) && action == SIG_DFL) {
1847 SIGDELSET(p->p_siglist, sig);
1848 /*
1849 * It may be on either list in this state.
1850 * Remove from both for now.
1851 */
1852 SIGDELSET(td->td_siglist, sig);
1853 return;
1854 }
1855
1856 /*
1857 * Raise priority to at least PUSER.
1858 */
1859 if (td->td_priority > PUSER) {
1860 td->td_priority = PUSER;
1861 }
1862 }
1863 if (td->td_flags & TDF_CVWAITQ)
1864 cv_abort(td);
1865 else
1866 abortsleep(td);
1867 }
1868#ifdef SMP
1869 else {
1870 /*
1871 * Other states do nothing with the signal immediatly,
1872 * other than kicking ourselves if we are running.
1873 * It will either never be noticed, or noticed very soon.
1874 */
1875 if (TD_IS_RUNNING(td) && td != curthread) {
1876 forward_signal(td);
1877 }
1878 }
1879#endif
1880}
1881
1882/*
1883 * If the current process has received a signal (should be caught or cause
1884 * termination, should interrupt current syscall), return the signal number.
1885 * Stop signals with default action are processed immediately, then cleared;
1886 * they aren't returned. This is checked after each entry to the system for
1887 * a syscall or trap (though this can usually be done without calling issignal
1888 * by checking the pending signal masks in cursig.) The normal call
1889 * sequence is
1890 *
1891 * while (sig = cursig(curthread))
1892 * postsig(sig);
1893 */
1894static int
1895issignal(td)
1896 struct thread *td;
1897{
1898 struct proc *p;
1899 struct sigacts *ps;
1900 sigset_t sigpending;
1901 register int sig, prop;
1902
1903 p = td->td_proc;
1904 ps = p->p_sigacts;
1905 mtx_assert(&ps->ps_mtx, MA_OWNED);
1906 PROC_LOCK_ASSERT(p, MA_OWNED);
1907 for (;;) {
1908 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1909
1910 sigpending = td->td_siglist;
1911 SIGSETNAND(sigpending, td->td_sigmask);
1912
1913 if (p->p_flag & P_PPWAIT)
1914 SIG_STOPSIGMASK(sigpending);
1915 if (SIGISEMPTY(sigpending)) /* no signal to send */
1916 return (0);
1917 sig = sig_ffs(&sigpending);
1918
1919 _STOPEVENT(p, S_SIG, sig);
1920
1921 /*
1922 * We should see pending but ignored signals
1923 * only if P_TRACED was on when they were posted.
1924 */
1925 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
1926 SIGDELSET(td->td_siglist, sig);
1927 continue;
1928 }
1929 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
1930 /*
1931 * If traced, always stop.
1932 */
1933 mtx_unlock(&ps->ps_mtx);
1934 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1935 &p->p_mtx.mtx_object, "Stopping for traced signal");
1936 p->p_xstat = sig;
1937 PROC_LOCK(p->p_pptr);
1938 psignal(p->p_pptr, SIGCHLD);
1939 PROC_UNLOCK(p->p_pptr);
1940 mtx_lock_spin(&sched_lock);
1941 stop(p); /* uses schedlock too eventually */
1942 thread_suspend_one(td);
1943 PROC_UNLOCK(p);
1944 DROP_GIANT();
1945 p->p_stats->p_ru.ru_nivcsw++;
1946 mi_switch();
1947 mtx_unlock_spin(&sched_lock);
1948 PICKUP_GIANT();
1949 PROC_LOCK(p);
1950 mtx_lock(&ps->ps_mtx);
1951
1952 /*
1953 * If parent wants us to take the signal,
1954 * then it will leave it in p->p_xstat;
1955 * otherwise we just look for signals again.
1956 */
1957 SIGDELSET(td->td_siglist, sig); /* clear old signal */
1958 sig = p->p_xstat;
1959 if (sig == 0)
1960 continue;
1961
1962 /*
1963 * If the traced bit got turned off, go back up
1964 * to the top to rescan signals. This ensures
1965 * that p_sig* and p_sigact are consistent.
1966 */
1967 if ((p->p_flag & P_TRACED) == 0)
1968 continue;
1969
1970 /*
1971 * Put the new signal into td_siglist. If the
1972 * signal is being masked, look for other signals.
1973 */
1974 SIGADDSET(td->td_siglist, sig);
1975 if (SIGISMEMBER(td->td_sigmask, sig))
1976 continue;
1977 signotify(td);
1978 }
1979
1980 prop = sigprop(sig);
1981
1982 /*
1983 * Decide whether the signal should be returned.
1984 * Return the signal's number, or fall through
1985 * to clear it from the pending mask.
1986 */
1987 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1988
1989 case (intptr_t)SIG_DFL:
1990 /*
1991 * Don't take default actions on system processes.
1992 */
1993 if (p->p_pid <= 1) {
1994#ifdef DIAGNOSTIC
1995 /*
1996 * Are you sure you want to ignore SIGSEGV
1997 * in init? XXX
1998 */
1999 printf("Process (pid %lu) got signal %d\n",
2000 (u_long)p->p_pid, sig);
2001#endif
2002 break; /* == ignore */
2003 }
2004 /*
2005 * If there is a pending stop signal to process
2006 * with default action, stop here,
2007 * then clear the signal. However,
2008 * if process is member of an orphaned
2009 * process group, ignore tty stop signals.
2010 */
2011 if (prop & SA_STOP) {
2012 if (p->p_flag & P_TRACED ||
2013 (p->p_pgrp->pg_jobc == 0 &&
2014 prop & SA_TTYSTOP))
2015 break; /* == ignore */
2016 mtx_unlock(&ps->ps_mtx);
2017 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2018 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2019 p->p_flag |= P_STOPPED_SIG;
2020 p->p_xstat = sig;
2021 mtx_lock_spin(&sched_lock);
2022 thread_stopped(p);
2023 thread_suspend_one(td);
2024 PROC_UNLOCK(p);
2025 DROP_GIANT();
2026 p->p_stats->p_ru.ru_nivcsw++;
2027 mi_switch();
2028 mtx_unlock_spin(&sched_lock);
2029 PICKUP_GIANT();
2030 PROC_LOCK(p);
2031 mtx_lock(&ps->ps_mtx);
2032 break;
2033 } else if (prop & SA_IGNORE) {
2034 /*
2035 * Except for SIGCONT, shouldn't get here.
2036 * Default action is to ignore; drop it.
2037 */
2038 break; /* == ignore */
2039 } else
2040 return (sig);
2041 /*NOTREACHED*/
2042
2043 case (intptr_t)SIG_IGN:
2044 /*
2045 * Masking above should prevent us ever trying
2046 * to take action on an ignored signal other
2047 * than SIGCONT, unless process is traced.
2048 */
2049 if ((prop & SA_CONT) == 0 &&
2050 (p->p_flag & P_TRACED) == 0)
2051 printf("issignal\n");
2052 break; /* == ignore */
2053
2054 default:
2055 /*
2056 * This signal has an action, let
2057 * postsig() process it.
2058 */
2059 return (sig);
2060 }
2061 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2062 }
2063 /* NOTREACHED */
2064}
2065
2066/*
2067 * Put the argument process into the stopped state and notify the parent
2068 * via wakeup. Signals are handled elsewhere. The process must not be
2069 * on the run queue. Must be called with the proc p locked and the scheduler
2070 * lock held.
2071 */
2072static void
2073stop(struct proc *p)
2074{
2075
2076 PROC_LOCK_ASSERT(p, MA_OWNED);
2077 p->p_flag |= P_STOPPED_SIG;
2078 p->p_flag &= ~P_WAITED;
2079 wakeup(p->p_pptr);
2080}
2081
2082/*
2083 * MPSAFE
2084 */
2085void
2086thread_stopped(struct proc *p)
2087{
2088 struct proc *p1 = curthread->td_proc;
2089 struct sigacts *ps;
2090 int n;
2091
2092 PROC_LOCK_ASSERT(p, MA_OWNED);
2093 mtx_assert(&sched_lock, MA_OWNED);
2094 n = p->p_suspcount;
2095 if (p == p1)
2096 n++;
2097 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2098 mtx_unlock_spin(&sched_lock);
2099 stop(p);
2100 PROC_LOCK(p->p_pptr);
2101 ps = p->p_pptr->p_sigacts;
2102 mtx_lock(&ps->ps_mtx);
2103 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2104 mtx_unlock(&ps->ps_mtx);
2105 psignal(p->p_pptr, SIGCHLD);
2106 } else
2107 mtx_unlock(&ps->ps_mtx);
2108 PROC_UNLOCK(p->p_pptr);
2109 mtx_lock_spin(&sched_lock);
2110 }
2111}
2112
2113/*
2114 * Take the action for the specified signal
2115 * from the current set of pending signals.
2116 */
2117void
2118postsig(sig)
2119 register int sig;
2120{
2121 struct thread *td = curthread;
2122 register struct proc *p = td->td_proc;
2123 struct sigacts *ps;
2124 sig_t action;
2125 sigset_t returnmask;
2126 int code;
2127
2128 KASSERT(sig != 0, ("postsig"));
2129
2130 PROC_LOCK_ASSERT(p, MA_OWNED);
2131 ps = p->p_sigacts;
2132 mtx_assert(&ps->ps_mtx, MA_OWNED);
2133 SIGDELSET(td->td_siglist, sig);
2134 action = ps->ps_sigact[_SIG_IDX(sig)];
2135#ifdef KTRACE
2136 if (KTRPOINT(td, KTR_PSIG))
2137 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2138 &td->td_oldsigmask : &td->td_sigmask, 0);
2139#endif
2140 _STOPEVENT(p, S_SIG, sig);
2141
2142 if (action == SIG_DFL) {
2143 /*
2144 * Default action, where the default is to kill
2145 * the process. (Other cases were ignored above.)
2146 */
2147 mtx_unlock(&ps->ps_mtx);
2148 sigexit(td, sig);
2149 /* NOTREACHED */
2150 } else {
2151 /*
2152 * If we get here, the signal must be caught.
2153 */
2154 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2155 ("postsig action"));
2156 /*
2157 * Set the new mask value and also defer further
2158 * occurrences of this signal.
2159 *
2160 * Special case: user has done a sigsuspend. Here the
2161 * current mask is not of interest, but rather the
2162 * mask from before the sigsuspend is what we want
2163 * restored after the signal processing is completed.
2164 */
2165 if (td->td_pflags & TDP_OLDMASK) {
2166 returnmask = td->td_oldsigmask;
2167 td->td_pflags &= ~TDP_OLDMASK;
2168 } else
2169 returnmask = td->td_sigmask;
2170
2171 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2172 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2173 SIGADDSET(td->td_sigmask, sig);
2174
2175 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2176 /*
2177 * See kern_sigaction() for origin of this code.
2178 */
2179 SIGDELSET(ps->ps_sigcatch, sig);
2180 if (sig != SIGCONT &&
2181 sigprop(sig) & SA_IGNORE)
2182 SIGADDSET(ps->ps_sigignore, sig);
2183 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2184 }
2185 p->p_stats->p_ru.ru_nsignals++;
2186 if (p->p_sig != sig) {
2187 code = 0;
2188 } else {
2189 code = p->p_code;
2190 p->p_code = 0;
2191 p->p_sig = 0;
2192 }
2193 if (p->p_flag & P_THREADED)
2194 thread_signal_add(curthread, sig);
2195 else
2196 (*p->p_sysent->sv_sendsig)(action, sig,
2197 &returnmask, code);
2198 }
2199}
2200
2201/*
2202 * Kill the current process for stated reason.
2203 */
2204void
2205killproc(p, why)
2206 struct proc *p;
2207 char *why;
2208{
2209
2210 PROC_LOCK_ASSERT(p, MA_OWNED);
2211 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2212 p, p->p_pid, p->p_comm);
2213 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2214 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2215 psignal(p, SIGKILL);
2216}
2217
2218/*
2219 * Force the current process to exit with the specified signal, dumping core
2220 * if appropriate. We bypass the normal tests for masked and caught signals,
2221 * allowing unrecoverable failures to terminate the process without changing
2222 * signal state. Mark the accounting record with the signal termination.
2223 * If dumping core, save the signal number for the debugger. Calls exit and
2224 * does not return.
2225 *
2226 * MPSAFE
2227 */
2228void
2229sigexit(td, sig)
2230 struct thread *td;
2231 int sig;
2232{
2233 struct proc *p = td->td_proc;
2234
2235 PROC_LOCK_ASSERT(p, MA_OWNED);
2236 p->p_acflag |= AXSIG;
2237 if (sigprop(sig) & SA_CORE) {
2238 p->p_sig = sig;
2239 /*
2240 * Log signals which would cause core dumps
2241 * (Log as LOG_INFO to appease those who don't want
2242 * these messages.)
2243 * XXX : Todo, as well as euid, write out ruid too
2244 */
2245 PROC_UNLOCK(p);
2246 if (!mtx_owned(&Giant))
2247 mtx_lock(&Giant);
2248 if (coredump(td) == 0)
2249 sig |= WCOREFLAG;
2250 if (kern_logsigexit)
2251 log(LOG_INFO,
2252 "pid %d (%s), uid %d: exited on signal %d%s\n",
2253 p->p_pid, p->p_comm,
2254 td->td_ucred ? td->td_ucred->cr_uid : -1,
2255 sig &~ WCOREFLAG,
2256 sig & WCOREFLAG ? " (core dumped)" : "");
2257 } else {
2258 PROC_UNLOCK(p);
2259 if (!mtx_owned(&Giant))
2260 mtx_lock(&Giant);
2261 }
2262 exit1(td, W_EXITCODE(0, sig));
2263 /* NOTREACHED */
2264}
2265
2266static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2267SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2268 sizeof(corefilename), "process corefile name format string");
2269
2270/*
2271 * expand_name(name, uid, pid)
2272 * Expand the name described in corefilename, using name, uid, and pid.
2273 * corefilename is a printf-like string, with three format specifiers:
2274 * %N name of process ("name")
2275 * %P process id (pid)
2276 * %U user id (uid)
2277 * For example, "%N.core" is the default; they can be disabled completely
2278 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2279 * This is controlled by the sysctl variable kern.corefile (see above).
2280 */
2281
2282static char *
2283expand_name(name, uid, pid)
2284 const char *name;
2285 uid_t uid;
2286 pid_t pid;
2287{
2288 const char *format, *appendstr;
2289 char *temp;
2290 char buf[11]; /* Buffer for pid/uid -- max 4B */
2291 size_t i, l, n;
2292
2293 format = corefilename;
2294 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2295 if (temp == NULL)
2296 return (NULL);
2297 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2298 switch (format[i]) {
2299 case '%': /* Format character */
2300 i++;
2301 switch (format[i]) {
2302 case '%':
2303 appendstr = "%";
2304 break;
2305 case 'N': /* process name */
2306 appendstr = name;
2307 break;
2308 case 'P': /* process id */
2309 sprintf(buf, "%u", pid);
2310 appendstr = buf;
2311 break;
2312 case 'U': /* user id */
2313 sprintf(buf, "%u", uid);
2314 appendstr = buf;
2315 break;
2316 default:
2317 appendstr = "";
2318 log(LOG_ERR,
2319 "Unknown format character %c in `%s'\n",
2320 format[i], format);
2321 }
2322 l = strlen(appendstr);
2323 if ((n + l) >= MAXPATHLEN)
2324 goto toolong;
2325 memcpy(temp + n, appendstr, l);
2326 n += l;
2327 break;
2328 default:
2329 temp[n++] = format[i];
2330 }
2331 }
2332 if (format[i] != '\0')
2333 goto toolong;
2334 return (temp);
2335toolong:
2336 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2337 (long)pid, name, (u_long)uid);
2338 free(temp, M_TEMP);
2339 return (NULL);
2340}
2341
2342/*
2343 * Dump a process' core. The main routine does some
2344 * policy checking, and creates the name of the coredump;
2345 * then it passes on a vnode and a size limit to the process-specific
2346 * coredump routine if there is one; if there _is not_ one, it returns
2347 * ENOSYS; otherwise it returns the error from the process-specific routine.
2348 */
2349
2350static int
2351coredump(struct thread *td)
2352{
2353 struct proc *p = td->td_proc;
2354 register struct vnode *vp;
2355 register struct ucred *cred = td->td_ucred;
2356 struct flock lf;
2357 struct nameidata nd;
2358 struct vattr vattr;
2359 int error, error1, flags;
2360 struct mount *mp;
2361 char *name; /* name of corefile */
2362 off_t limit;
2363
2364 PROC_LOCK(p);
2365 _STOPEVENT(p, S_CORE, 0);
2366
2367 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2368 PROC_UNLOCK(p);
2369 return (EFAULT);
2370 }
2371
2372 /*
2373 * Note that the bulk of limit checking is done after
2374 * the corefile is created. The exception is if the limit
2375 * for corefiles is 0, in which case we don't bother
2376 * creating the corefile at all. This layout means that
2377 * a corefile is truncated instead of not being created,
2378 * if it is larger than the limit.
2379 */
2380 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2381 if (limit == 0) {
2382 PROC_UNLOCK(p);
2383 return 0;
2384 }
2385 PROC_UNLOCK(p);
2386
2387restart:
2388 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2389 if (name == NULL)
2390 return (EINVAL);
2391 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2392 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2393 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
2394 free(name, M_TEMP);
2395 if (error)
2396 return (error);
2397 NDFREE(&nd, NDF_ONLY_PNBUF);
2398 vp = nd.ni_vp;
2399
2400 /* Don't dump to non-regular files or files with links. */
2401 if (vp->v_type != VREG ||
2402 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2403 VOP_UNLOCK(vp, 0, td);
2404 error = EFAULT;
2405 goto out2;
2406 }
2407
2408 VOP_UNLOCK(vp, 0, td);
2409 lf.l_whence = SEEK_SET;
2410 lf.l_start = 0;
2411 lf.l_len = 0;
2412 lf.l_type = F_WRLCK;
2413 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK);
2414 if (error)
2415 goto out2;
2416
2417 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2418 lf.l_type = F_UNLCK;
2419 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2420 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2421 return (error);
2422 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2423 return (error);
2424 goto restart;
2425 }
2426
2427 VATTR_NULL(&vattr);
2428 vattr.va_size = 0;
2429 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2430 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2431 VOP_SETATTR(vp, &vattr, cred, td);
2432 VOP_UNLOCK(vp, 0, td);
2433 PROC_LOCK(p);
2434 p->p_acflag |= ACORE;
2435 PROC_UNLOCK(p);
2436
2437 error = p->p_sysent->sv_coredump ?
2438 p->p_sysent->sv_coredump(td, vp, limit) :
2439 ENOSYS;
2440
2441 lf.l_type = F_UNLCK;
2442 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2443 vn_finished_write(mp);
2444out2:
2445 error1 = vn_close(vp, FWRITE, cred, td);
2446 if (error == 0)
2447 error = error1;
2448 return (error);
2449}
2450
2451/*
2452 * Nonexistent system call-- signal process (may want to handle it).
2453 * Flag error in case process won't see signal immediately (blocked or ignored).
2454 */
2455#ifndef _SYS_SYSPROTO_H_
2456struct nosys_args {
2457 int dummy;
2458};
2459#endif
2460/*
2461 * MPSAFE
2462 */
2463/* ARGSUSED */
2464int
2465nosys(td, args)
2466 struct thread *td;
2467 struct nosys_args *args;
2468{
2469 struct proc *p = td->td_proc;
2470
2471 PROC_LOCK(p);
2472 psignal(p, SIGSYS);
2473 PROC_UNLOCK(p);
2474 return (ENOSYS);
2475}
2476
2477/*
2478 * Send a SIGIO or SIGURG signal to a process or process group using
2479 * stored credentials rather than those of the current process.
2480 */
2481void
2482pgsigio(sigiop, sig, checkctty)
2483 struct sigio **sigiop;
2484 int sig, checkctty;
2485{
2486 struct sigio *sigio;
2487
2488 SIGIO_LOCK();
2489 sigio = *sigiop;
2490 if (sigio == NULL) {
2491 SIGIO_UNLOCK();
2492 return;
2493 }
2494 if (sigio->sio_pgid > 0) {
2495 PROC_LOCK(sigio->sio_proc);
2496 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2497 psignal(sigio->sio_proc, sig);
2498 PROC_UNLOCK(sigio->sio_proc);
2499 } else if (sigio->sio_pgid < 0) {
2500 struct proc *p;
2501
2502 PGRP_LOCK(sigio->sio_pgrp);
2503 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2504 PROC_LOCK(p);
2505 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2506 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2507 psignal(p, sig);
2508 PROC_UNLOCK(p);
2509 }
2510 PGRP_UNLOCK(sigio->sio_pgrp);
2511 }
2512 SIGIO_UNLOCK();
2513}
2514
2515static int
2516filt_sigattach(struct knote *kn)
2517{
2518 struct proc *p = curproc;
2519
2520 kn->kn_ptr.p_proc = p;
2521 kn->kn_flags |= EV_CLEAR; /* automatically set */
2522
2523 PROC_LOCK(p);
2524 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2525 PROC_UNLOCK(p);
2526
2527 return (0);
2528}
2529
2530static void
2531filt_sigdetach(struct knote *kn)
2532{
2533 struct proc *p = kn->kn_ptr.p_proc;
2534
2535 PROC_LOCK(p);
2536 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2537 PROC_UNLOCK(p);
2538}
2539
2540/*
2541 * signal knotes are shared with proc knotes, so we apply a mask to
2542 * the hint in order to differentiate them from process hints. This
2543 * could be avoided by using a signal-specific knote list, but probably
2544 * isn't worth the trouble.
2545 */
2546static int
2547filt_signal(struct knote *kn, long hint)
2548{
2549
2550 if (hint & NOTE_SIGNAL) {
2551 hint &= ~NOTE_SIGNAL;
2552
2553 if (kn->kn_id == hint)
2554 kn->kn_data++;
2555 }
2556 return (kn->kn_data != 0);
2557}
2558
2559struct sigacts *
2560sigacts_alloc(void)
2561{
2562 struct sigacts *ps;
2563
2564 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2565 ps->ps_refcnt = 1;
2566 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2567 return (ps);
2568}
2569
2570void
2571sigacts_free(struct sigacts *ps)
2572{
2573
2574 mtx_lock(&ps->ps_mtx);
2575 ps->ps_refcnt--;
2576 if (ps->ps_refcnt == 0) {
2577 mtx_destroy(&ps->ps_mtx);
2578 free(ps, M_SUBPROC);
2579 } else
2580 mtx_unlock(&ps->ps_mtx);
2581}
2582
2583struct sigacts *
2584sigacts_hold(struct sigacts *ps)
2585{
2586 mtx_lock(&ps->ps_mtx);
2587 ps->ps_refcnt++;
2588 mtx_unlock(&ps->ps_mtx);
2589 return (ps);
2590}
2591
2592void
2593sigacts_copy(struct sigacts *dest, struct sigacts *src)
2594{
2595
2596 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2597 mtx_lock(&src->ps_mtx);
2598 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2599 mtx_unlock(&src->ps_mtx);
2600}
2601
2602int
2603sigacts_shared(struct sigacts *ps)
2604{
2605 int shared;
2606
2607 mtx_lock(&ps->ps_mtx);
2608 shared = ps->ps_refcnt > 1;
2609 mtx_unlock(&ps->ps_mtx);
2610 return (shared);
2611}