Deleted Added
full compact
kern_sig.c (130192) kern_sig.c (130344)
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
35 */
36
37#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_sig.c 130192 2004-06-07 13:35:02Z davidxu $");
38__FBSDID("$FreeBSD: head/sys/kern/kern_sig.c 130344 2004-06-11 11:16:26Z phk $");
39
40#include "opt_compat.h"
41#include "opt_ktrace.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/signalvar.h>
46#include <sys/vnode.h>
47#include <sys/acct.h>
48#include <sys/condvar.h>
49#include <sys/event.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/kse.h>
53#include <sys/ktr.h>
54#include <sys/ktrace.h>
55#include <sys/lock.h>
56#include <sys/malloc.h>
57#include <sys/mutex.h>
58#include <sys/namei.h>
59#include <sys/proc.h>
60#include <sys/pioctl.h>
61#include <sys/resourcevar.h>
62#include <sys/sleepqueue.h>
63#include <sys/smp.h>
64#include <sys/stat.h>
65#include <sys/sx.h>
66#include <sys/syscallsubr.h>
67#include <sys/sysctl.h>
68#include <sys/sysent.h>
69#include <sys/syslog.h>
70#include <sys/sysproto.h>
71#include <sys/unistd.h>
72#include <sys/wait.h>
73
74#include <machine/cpu.h>
75
76#if defined (__alpha__) && !defined(COMPAT_43)
77#error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
78#endif
79
80#define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
81
82static int coredump(struct thread *);
83static char *expand_name(const char *, uid_t, pid_t);
84static int killpg1(struct thread *td, int sig, int pgid, int all);
85static int issignal(struct thread *p);
86static int sigprop(int sig);
87static void stop(struct proc *);
88static void tdsigwakeup(struct thread *td, int sig, sig_t action);
89static int filt_sigattach(struct knote *kn);
90static void filt_sigdetach(struct knote *kn);
91static int filt_signal(struct knote *kn, long hint);
92static struct thread *sigtd(struct proc *p, int sig, int prop);
93static int kern_sigtimedwait(struct thread *td, sigset_t set,
94 siginfo_t *info, struct timespec *timeout);
95static void do_tdsignal(struct thread *td, int sig, sigtarget_t target);
96
97struct filterops sig_filtops =
98 { 0, filt_sigattach, filt_sigdetach, filt_signal };
99
100static int kern_logsigexit = 1;
101SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
102 &kern_logsigexit, 0,
103 "Log processes quitting on abnormal signals to syslog(3)");
104
105/*
106 * Policy -- Can ucred cr1 send SIGIO to process cr2?
107 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
108 * in the right situations.
109 */
110#define CANSIGIO(cr1, cr2) \
111 ((cr1)->cr_uid == 0 || \
112 (cr1)->cr_ruid == (cr2)->cr_ruid || \
113 (cr1)->cr_uid == (cr2)->cr_ruid || \
114 (cr1)->cr_ruid == (cr2)->cr_uid || \
115 (cr1)->cr_uid == (cr2)->cr_uid)
116
117int sugid_coredump;
118SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
119 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
120
121static int do_coredump = 1;
122SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
123 &do_coredump, 0, "Enable/Disable coredumps");
124
125/*
126 * Signal properties and actions.
127 * The array below categorizes the signals and their default actions
128 * according to the following properties:
129 */
130#define SA_KILL 0x01 /* terminates process by default */
131#define SA_CORE 0x02 /* ditto and coredumps */
132#define SA_STOP 0x04 /* suspend process */
133#define SA_TTYSTOP 0x08 /* ditto, from tty */
134#define SA_IGNORE 0x10 /* ignore by default */
135#define SA_CONT 0x20 /* continue if suspended */
136#define SA_CANTMASK 0x40 /* non-maskable, catchable */
137#define SA_PROC 0x80 /* deliverable to any thread */
138
139static int sigproptbl[NSIG] = {
140 SA_KILL|SA_PROC, /* SIGHUP */
141 SA_KILL|SA_PROC, /* SIGINT */
142 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
143 SA_KILL|SA_CORE, /* SIGILL */
144 SA_KILL|SA_CORE, /* SIGTRAP */
145 SA_KILL|SA_CORE, /* SIGABRT */
146 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
147 SA_KILL|SA_CORE, /* SIGFPE */
148 SA_KILL|SA_PROC, /* SIGKILL */
149 SA_KILL|SA_CORE, /* SIGBUS */
150 SA_KILL|SA_CORE, /* SIGSEGV */
151 SA_KILL|SA_CORE, /* SIGSYS */
152 SA_KILL|SA_PROC, /* SIGPIPE */
153 SA_KILL|SA_PROC, /* SIGALRM */
154 SA_KILL|SA_PROC, /* SIGTERM */
155 SA_IGNORE|SA_PROC, /* SIGURG */
156 SA_STOP|SA_PROC, /* SIGSTOP */
157 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
158 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
159 SA_IGNORE|SA_PROC, /* SIGCHLD */
160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
161 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
162 SA_IGNORE|SA_PROC, /* SIGIO */
163 SA_KILL, /* SIGXCPU */
164 SA_KILL, /* SIGXFSZ */
165 SA_KILL|SA_PROC, /* SIGVTALRM */
166 SA_KILL|SA_PROC, /* SIGPROF */
167 SA_IGNORE|SA_PROC, /* SIGWINCH */
168 SA_IGNORE|SA_PROC, /* SIGINFO */
169 SA_KILL|SA_PROC, /* SIGUSR1 */
170 SA_KILL|SA_PROC, /* SIGUSR2 */
171};
172
173/*
174 * Determine signal that should be delivered to process p, the current
175 * process, 0 if none. If there is a pending stop signal with default
176 * action, the process stops in issignal().
177 * XXXKSE the check for a pending stop is not done under KSE
178 *
179 * MP SAFE.
180 */
181int
182cursig(struct thread *td)
183{
184 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
185 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
186 mtx_assert(&sched_lock, MA_NOTOWNED);
187 return (SIGPENDING(td) ? issignal(td) : 0);
188}
189
190/*
191 * Arrange for ast() to handle unmasked pending signals on return to user
192 * mode. This must be called whenever a signal is added to td_siglist or
193 * unmasked in td_sigmask.
194 */
195void
196signotify(struct thread *td)
197{
198 struct proc *p;
199 sigset_t set, saved;
200
201 p = td->td_proc;
202
203 PROC_LOCK_ASSERT(p, MA_OWNED);
204
205 /*
206 * If our mask changed we may have to move signal that were
207 * previously masked by all threads to our siglist.
208 */
209 set = p->p_siglist;
210 if (p->p_flag & P_SA)
211 saved = p->p_siglist;
212 SIGSETNAND(set, td->td_sigmask);
213 SIGSETNAND(p->p_siglist, set);
214 SIGSETOR(td->td_siglist, set);
215
216 if (SIGPENDING(td)) {
217 mtx_lock_spin(&sched_lock);
218 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
219 mtx_unlock_spin(&sched_lock);
220 }
221 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
222 if (SIGSETEQ(saved, p->p_siglist))
223 return;
224 else {
225 /* pending set changed */
226 p->p_flag |= P_SIGEVENT;
227 wakeup(&p->p_siglist);
228 }
229 }
230}
231
232int
233sigonstack(size_t sp)
234{
235 struct thread *td = curthread;
236
237 return ((td->td_pflags & TDP_ALTSTACK) ?
39
40#include "opt_compat.h"
41#include "opt_ktrace.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/signalvar.h>
46#include <sys/vnode.h>
47#include <sys/acct.h>
48#include <sys/condvar.h>
49#include <sys/event.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/kse.h>
53#include <sys/ktr.h>
54#include <sys/ktrace.h>
55#include <sys/lock.h>
56#include <sys/malloc.h>
57#include <sys/mutex.h>
58#include <sys/namei.h>
59#include <sys/proc.h>
60#include <sys/pioctl.h>
61#include <sys/resourcevar.h>
62#include <sys/sleepqueue.h>
63#include <sys/smp.h>
64#include <sys/stat.h>
65#include <sys/sx.h>
66#include <sys/syscallsubr.h>
67#include <sys/sysctl.h>
68#include <sys/sysent.h>
69#include <sys/syslog.h>
70#include <sys/sysproto.h>
71#include <sys/unistd.h>
72#include <sys/wait.h>
73
74#include <machine/cpu.h>
75
76#if defined (__alpha__) && !defined(COMPAT_43)
77#error "You *really* need COMPAT_43 on the alpha for longjmp(3)"
78#endif
79
80#define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
81
82static int coredump(struct thread *);
83static char *expand_name(const char *, uid_t, pid_t);
84static int killpg1(struct thread *td, int sig, int pgid, int all);
85static int issignal(struct thread *p);
86static int sigprop(int sig);
87static void stop(struct proc *);
88static void tdsigwakeup(struct thread *td, int sig, sig_t action);
89static int filt_sigattach(struct knote *kn);
90static void filt_sigdetach(struct knote *kn);
91static int filt_signal(struct knote *kn, long hint);
92static struct thread *sigtd(struct proc *p, int sig, int prop);
93static int kern_sigtimedwait(struct thread *td, sigset_t set,
94 siginfo_t *info, struct timespec *timeout);
95static void do_tdsignal(struct thread *td, int sig, sigtarget_t target);
96
97struct filterops sig_filtops =
98 { 0, filt_sigattach, filt_sigdetach, filt_signal };
99
100static int kern_logsigexit = 1;
101SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
102 &kern_logsigexit, 0,
103 "Log processes quitting on abnormal signals to syslog(3)");
104
105/*
106 * Policy -- Can ucred cr1 send SIGIO to process cr2?
107 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
108 * in the right situations.
109 */
110#define CANSIGIO(cr1, cr2) \
111 ((cr1)->cr_uid == 0 || \
112 (cr1)->cr_ruid == (cr2)->cr_ruid || \
113 (cr1)->cr_uid == (cr2)->cr_ruid || \
114 (cr1)->cr_ruid == (cr2)->cr_uid || \
115 (cr1)->cr_uid == (cr2)->cr_uid)
116
117int sugid_coredump;
118SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
119 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
120
121static int do_coredump = 1;
122SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
123 &do_coredump, 0, "Enable/Disable coredumps");
124
125/*
126 * Signal properties and actions.
127 * The array below categorizes the signals and their default actions
128 * according to the following properties:
129 */
130#define SA_KILL 0x01 /* terminates process by default */
131#define SA_CORE 0x02 /* ditto and coredumps */
132#define SA_STOP 0x04 /* suspend process */
133#define SA_TTYSTOP 0x08 /* ditto, from tty */
134#define SA_IGNORE 0x10 /* ignore by default */
135#define SA_CONT 0x20 /* continue if suspended */
136#define SA_CANTMASK 0x40 /* non-maskable, catchable */
137#define SA_PROC 0x80 /* deliverable to any thread */
138
139static int sigproptbl[NSIG] = {
140 SA_KILL|SA_PROC, /* SIGHUP */
141 SA_KILL|SA_PROC, /* SIGINT */
142 SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */
143 SA_KILL|SA_CORE, /* SIGILL */
144 SA_KILL|SA_CORE, /* SIGTRAP */
145 SA_KILL|SA_CORE, /* SIGABRT */
146 SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */
147 SA_KILL|SA_CORE, /* SIGFPE */
148 SA_KILL|SA_PROC, /* SIGKILL */
149 SA_KILL|SA_CORE, /* SIGBUS */
150 SA_KILL|SA_CORE, /* SIGSEGV */
151 SA_KILL|SA_CORE, /* SIGSYS */
152 SA_KILL|SA_PROC, /* SIGPIPE */
153 SA_KILL|SA_PROC, /* SIGALRM */
154 SA_KILL|SA_PROC, /* SIGTERM */
155 SA_IGNORE|SA_PROC, /* SIGURG */
156 SA_STOP|SA_PROC, /* SIGSTOP */
157 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */
158 SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */
159 SA_IGNORE|SA_PROC, /* SIGCHLD */
160 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */
161 SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */
162 SA_IGNORE|SA_PROC, /* SIGIO */
163 SA_KILL, /* SIGXCPU */
164 SA_KILL, /* SIGXFSZ */
165 SA_KILL|SA_PROC, /* SIGVTALRM */
166 SA_KILL|SA_PROC, /* SIGPROF */
167 SA_IGNORE|SA_PROC, /* SIGWINCH */
168 SA_IGNORE|SA_PROC, /* SIGINFO */
169 SA_KILL|SA_PROC, /* SIGUSR1 */
170 SA_KILL|SA_PROC, /* SIGUSR2 */
171};
172
173/*
174 * Determine signal that should be delivered to process p, the current
175 * process, 0 if none. If there is a pending stop signal with default
176 * action, the process stops in issignal().
177 * XXXKSE the check for a pending stop is not done under KSE
178 *
179 * MP SAFE.
180 */
181int
182cursig(struct thread *td)
183{
184 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
185 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
186 mtx_assert(&sched_lock, MA_NOTOWNED);
187 return (SIGPENDING(td) ? issignal(td) : 0);
188}
189
190/*
191 * Arrange for ast() to handle unmasked pending signals on return to user
192 * mode. This must be called whenever a signal is added to td_siglist or
193 * unmasked in td_sigmask.
194 */
195void
196signotify(struct thread *td)
197{
198 struct proc *p;
199 sigset_t set, saved;
200
201 p = td->td_proc;
202
203 PROC_LOCK_ASSERT(p, MA_OWNED);
204
205 /*
206 * If our mask changed we may have to move signal that were
207 * previously masked by all threads to our siglist.
208 */
209 set = p->p_siglist;
210 if (p->p_flag & P_SA)
211 saved = p->p_siglist;
212 SIGSETNAND(set, td->td_sigmask);
213 SIGSETNAND(p->p_siglist, set);
214 SIGSETOR(td->td_siglist, set);
215
216 if (SIGPENDING(td)) {
217 mtx_lock_spin(&sched_lock);
218 td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
219 mtx_unlock_spin(&sched_lock);
220 }
221 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
222 if (SIGSETEQ(saved, p->p_siglist))
223 return;
224 else {
225 /* pending set changed */
226 p->p_flag |= P_SIGEVENT;
227 wakeup(&p->p_siglist);
228 }
229 }
230}
231
232int
233sigonstack(size_t sp)
234{
235 struct thread *td = curthread;
236
237 return ((td->td_pflags & TDP_ALTSTACK) ?
238#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
238#if defined(COMPAT_43)
239 ((td->td_sigstk.ss_size == 0) ?
240 (td->td_sigstk.ss_flags & SS_ONSTACK) :
241 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
242#else
243 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
244#endif
245 : 0);
246}
247
248static __inline int
249sigprop(int sig)
250{
251
252 if (sig > 0 && sig < NSIG)
253 return (sigproptbl[_SIG_IDX(sig)]);
254 return (0);
255}
256
257int
258sig_ffs(sigset_t *set)
259{
260 int i;
261
262 for (i = 0; i < _SIG_WORDS; i++)
263 if (set->__bits[i])
264 return (ffs(set->__bits[i]) + (i * 32));
265 return (0);
266}
267
268/*
269 * kern_sigaction
270 * sigaction
271 * freebsd4_sigaction
272 * osigaction
273 *
274 * MPSAFE
275 */
276int
277kern_sigaction(td, sig, act, oact, flags)
278 struct thread *td;
279 register int sig;
280 struct sigaction *act, *oact;
281 int flags;
282{
283 struct sigacts *ps;
284 struct thread *td0;
285 struct proc *p = td->td_proc;
286
287 if (!_SIG_VALID(sig))
288 return (EINVAL);
289
290 PROC_LOCK(p);
291 ps = p->p_sigacts;
292 mtx_lock(&ps->ps_mtx);
293 if (oact) {
294 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
295 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
296 oact->sa_flags = 0;
297 if (SIGISMEMBER(ps->ps_sigonstack, sig))
298 oact->sa_flags |= SA_ONSTACK;
299 if (!SIGISMEMBER(ps->ps_sigintr, sig))
300 oact->sa_flags |= SA_RESTART;
301 if (SIGISMEMBER(ps->ps_sigreset, sig))
302 oact->sa_flags |= SA_RESETHAND;
303 if (SIGISMEMBER(ps->ps_signodefer, sig))
304 oact->sa_flags |= SA_NODEFER;
305 if (SIGISMEMBER(ps->ps_siginfo, sig))
306 oact->sa_flags |= SA_SIGINFO;
307 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
308 oact->sa_flags |= SA_NOCLDSTOP;
309 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
310 oact->sa_flags |= SA_NOCLDWAIT;
311 }
312 if (act) {
313 if ((sig == SIGKILL || sig == SIGSTOP) &&
314 act->sa_handler != SIG_DFL) {
315 mtx_unlock(&ps->ps_mtx);
316 PROC_UNLOCK(p);
317 return (EINVAL);
318 }
319
320 /*
321 * Change setting atomically.
322 */
323
324 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
325 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
326 if (act->sa_flags & SA_SIGINFO) {
327 ps->ps_sigact[_SIG_IDX(sig)] =
328 (__sighandler_t *)act->sa_sigaction;
329 SIGADDSET(ps->ps_siginfo, sig);
330 } else {
331 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
332 SIGDELSET(ps->ps_siginfo, sig);
333 }
334 if (!(act->sa_flags & SA_RESTART))
335 SIGADDSET(ps->ps_sigintr, sig);
336 else
337 SIGDELSET(ps->ps_sigintr, sig);
338 if (act->sa_flags & SA_ONSTACK)
339 SIGADDSET(ps->ps_sigonstack, sig);
340 else
341 SIGDELSET(ps->ps_sigonstack, sig);
342 if (act->sa_flags & SA_RESETHAND)
343 SIGADDSET(ps->ps_sigreset, sig);
344 else
345 SIGDELSET(ps->ps_sigreset, sig);
346 if (act->sa_flags & SA_NODEFER)
347 SIGADDSET(ps->ps_signodefer, sig);
348 else
349 SIGDELSET(ps->ps_signodefer, sig);
239 ((td->td_sigstk.ss_size == 0) ?
240 (td->td_sigstk.ss_flags & SS_ONSTACK) :
241 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
242#else
243 ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
244#endif
245 : 0);
246}
247
248static __inline int
249sigprop(int sig)
250{
251
252 if (sig > 0 && sig < NSIG)
253 return (sigproptbl[_SIG_IDX(sig)]);
254 return (0);
255}
256
257int
258sig_ffs(sigset_t *set)
259{
260 int i;
261
262 for (i = 0; i < _SIG_WORDS; i++)
263 if (set->__bits[i])
264 return (ffs(set->__bits[i]) + (i * 32));
265 return (0);
266}
267
268/*
269 * kern_sigaction
270 * sigaction
271 * freebsd4_sigaction
272 * osigaction
273 *
274 * MPSAFE
275 */
276int
277kern_sigaction(td, sig, act, oact, flags)
278 struct thread *td;
279 register int sig;
280 struct sigaction *act, *oact;
281 int flags;
282{
283 struct sigacts *ps;
284 struct thread *td0;
285 struct proc *p = td->td_proc;
286
287 if (!_SIG_VALID(sig))
288 return (EINVAL);
289
290 PROC_LOCK(p);
291 ps = p->p_sigacts;
292 mtx_lock(&ps->ps_mtx);
293 if (oact) {
294 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
295 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
296 oact->sa_flags = 0;
297 if (SIGISMEMBER(ps->ps_sigonstack, sig))
298 oact->sa_flags |= SA_ONSTACK;
299 if (!SIGISMEMBER(ps->ps_sigintr, sig))
300 oact->sa_flags |= SA_RESTART;
301 if (SIGISMEMBER(ps->ps_sigreset, sig))
302 oact->sa_flags |= SA_RESETHAND;
303 if (SIGISMEMBER(ps->ps_signodefer, sig))
304 oact->sa_flags |= SA_NODEFER;
305 if (SIGISMEMBER(ps->ps_siginfo, sig))
306 oact->sa_flags |= SA_SIGINFO;
307 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
308 oact->sa_flags |= SA_NOCLDSTOP;
309 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
310 oact->sa_flags |= SA_NOCLDWAIT;
311 }
312 if (act) {
313 if ((sig == SIGKILL || sig == SIGSTOP) &&
314 act->sa_handler != SIG_DFL) {
315 mtx_unlock(&ps->ps_mtx);
316 PROC_UNLOCK(p);
317 return (EINVAL);
318 }
319
320 /*
321 * Change setting atomically.
322 */
323
324 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
325 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
326 if (act->sa_flags & SA_SIGINFO) {
327 ps->ps_sigact[_SIG_IDX(sig)] =
328 (__sighandler_t *)act->sa_sigaction;
329 SIGADDSET(ps->ps_siginfo, sig);
330 } else {
331 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
332 SIGDELSET(ps->ps_siginfo, sig);
333 }
334 if (!(act->sa_flags & SA_RESTART))
335 SIGADDSET(ps->ps_sigintr, sig);
336 else
337 SIGDELSET(ps->ps_sigintr, sig);
338 if (act->sa_flags & SA_ONSTACK)
339 SIGADDSET(ps->ps_sigonstack, sig);
340 else
341 SIGDELSET(ps->ps_sigonstack, sig);
342 if (act->sa_flags & SA_RESETHAND)
343 SIGADDSET(ps->ps_sigreset, sig);
344 else
345 SIGDELSET(ps->ps_sigreset, sig);
346 if (act->sa_flags & SA_NODEFER)
347 SIGADDSET(ps->ps_signodefer, sig);
348 else
349 SIGDELSET(ps->ps_signodefer, sig);
350#ifdef COMPAT_SUNOS
351 if (act->sa_flags & SA_USERTRAMP)
352 SIGADDSET(ps->ps_usertramp, sig);
353 else
354 SIGDELSET(ps->ps_usertramp, sig);
355#endif
356 if (sig == SIGCHLD) {
357 if (act->sa_flags & SA_NOCLDSTOP)
358 ps->ps_flag |= PS_NOCLDSTOP;
359 else
360 ps->ps_flag &= ~PS_NOCLDSTOP;
361 if (act->sa_flags & SA_NOCLDWAIT) {
362 /*
363 * Paranoia: since SA_NOCLDWAIT is implemented
364 * by reparenting the dying child to PID 1 (and
365 * trust it to reap the zombie), PID 1 itself
366 * is forbidden to set SA_NOCLDWAIT.
367 */
368 if (p->p_pid == 1)
369 ps->ps_flag &= ~PS_NOCLDWAIT;
370 else
371 ps->ps_flag |= PS_NOCLDWAIT;
372 } else
373 ps->ps_flag &= ~PS_NOCLDWAIT;
374 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
375 ps->ps_flag |= PS_CLDSIGIGN;
376 else
377 ps->ps_flag &= ~PS_CLDSIGIGN;
378 }
379 /*
380 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
381 * and for signals set to SIG_DFL where the default is to
382 * ignore. However, don't put SIGCONT in ps_sigignore, as we
383 * have to restart the process.
384 */
385 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
386 (sigprop(sig) & SA_IGNORE &&
387 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
388 if ((p->p_flag & P_SA) &&
389 SIGISMEMBER(p->p_siglist, sig)) {
390 p->p_flag |= P_SIGEVENT;
391 wakeup(&p->p_siglist);
392 }
393 /* never to be seen again */
394 SIGDELSET(p->p_siglist, sig);
395 mtx_lock_spin(&sched_lock);
396 FOREACH_THREAD_IN_PROC(p, td0)
397 SIGDELSET(td0->td_siglist, sig);
398 mtx_unlock_spin(&sched_lock);
399 if (sig != SIGCONT)
400 /* easier in psignal */
401 SIGADDSET(ps->ps_sigignore, sig);
402 SIGDELSET(ps->ps_sigcatch, sig);
403 } else {
404 SIGDELSET(ps->ps_sigignore, sig);
405 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
406 SIGDELSET(ps->ps_sigcatch, sig);
407 else
408 SIGADDSET(ps->ps_sigcatch, sig);
409 }
410#ifdef COMPAT_FREEBSD4
411 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
412 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
413 (flags & KSA_FREEBSD4) == 0)
414 SIGDELSET(ps->ps_freebsd4, sig);
415 else
416 SIGADDSET(ps->ps_freebsd4, sig);
417#endif
418#ifdef COMPAT_43
419 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
420 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
421 (flags & KSA_OSIGSET) == 0)
422 SIGDELSET(ps->ps_osigset, sig);
423 else
424 SIGADDSET(ps->ps_osigset, sig);
425#endif
426 }
427 mtx_unlock(&ps->ps_mtx);
428 PROC_UNLOCK(p);
429 return (0);
430}
431
432#ifndef _SYS_SYSPROTO_H_
433struct sigaction_args {
434 int sig;
435 struct sigaction *act;
436 struct sigaction *oact;
437};
438#endif
439/*
440 * MPSAFE
441 */
442int
443sigaction(td, uap)
444 struct thread *td;
445 register struct sigaction_args *uap;
446{
447 struct sigaction act, oact;
448 register struct sigaction *actp, *oactp;
449 int error;
450
451 actp = (uap->act != NULL) ? &act : NULL;
452 oactp = (uap->oact != NULL) ? &oact : NULL;
453 if (actp) {
454 error = copyin(uap->act, actp, sizeof(act));
455 if (error)
456 return (error);
457 }
458 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
459 if (oactp && !error)
460 error = copyout(oactp, uap->oact, sizeof(oact));
461 return (error);
462}
463
464#ifdef COMPAT_FREEBSD4
465#ifndef _SYS_SYSPROTO_H_
466struct freebsd4_sigaction_args {
467 int sig;
468 struct sigaction *act;
469 struct sigaction *oact;
470};
471#endif
472/*
473 * MPSAFE
474 */
475int
476freebsd4_sigaction(td, uap)
477 struct thread *td;
478 register struct freebsd4_sigaction_args *uap;
479{
480 struct sigaction act, oact;
481 register struct sigaction *actp, *oactp;
482 int error;
483
484
485 actp = (uap->act != NULL) ? &act : NULL;
486 oactp = (uap->oact != NULL) ? &oact : NULL;
487 if (actp) {
488 error = copyin(uap->act, actp, sizeof(act));
489 if (error)
490 return (error);
491 }
492 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
493 if (oactp && !error)
494 error = copyout(oactp, uap->oact, sizeof(oact));
495 return (error);
496}
497#endif /* COMAPT_FREEBSD4 */
498
499#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
500#ifndef _SYS_SYSPROTO_H_
501struct osigaction_args {
502 int signum;
503 struct osigaction *nsa;
504 struct osigaction *osa;
505};
506#endif
507/*
508 * MPSAFE
509 */
510int
511osigaction(td, uap)
512 struct thread *td;
513 register struct osigaction_args *uap;
514{
515 struct osigaction sa;
516 struct sigaction nsa, osa;
517 register struct sigaction *nsap, *osap;
518 int error;
519
520 if (uap->signum <= 0 || uap->signum >= ONSIG)
521 return (EINVAL);
522
523 nsap = (uap->nsa != NULL) ? &nsa : NULL;
524 osap = (uap->osa != NULL) ? &osa : NULL;
525
526 if (nsap) {
527 error = copyin(uap->nsa, &sa, sizeof(sa));
528 if (error)
529 return (error);
530 nsap->sa_handler = sa.sa_handler;
531 nsap->sa_flags = sa.sa_flags;
532 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
533 }
534 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
535 if (osap && !error) {
536 sa.sa_handler = osap->sa_handler;
537 sa.sa_flags = osap->sa_flags;
538 SIG2OSIG(osap->sa_mask, sa.sa_mask);
539 error = copyout(&sa, uap->osa, sizeof(sa));
540 }
541 return (error);
542}
543
544#if !defined(__i386__) && !defined(__alpha__)
545/* Avoid replicating the same stub everywhere */
546int
547osigreturn(td, uap)
548 struct thread *td;
549 struct osigreturn_args *uap;
550{
551
552 return (nosys(td, (struct nosys_args *)uap));
553}
554#endif
555#endif /* COMPAT_43 */
556
557/*
558 * Initialize signal state for process 0;
559 * set to ignore signals that are ignored by default.
560 */
561void
562siginit(p)
563 struct proc *p;
564{
565 register int i;
566 struct sigacts *ps;
567
568 PROC_LOCK(p);
569 ps = p->p_sigacts;
570 mtx_lock(&ps->ps_mtx);
571 for (i = 1; i <= NSIG; i++)
572 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
573 SIGADDSET(ps->ps_sigignore, i);
574 mtx_unlock(&ps->ps_mtx);
575 PROC_UNLOCK(p);
576}
577
578/*
579 * Reset signals for an exec of the specified process.
580 */
581void
582execsigs(struct proc *p)
583{
584 struct sigacts *ps;
585 int sig;
586 struct thread *td;
587
588 /*
589 * Reset caught signals. Held signals remain held
590 * through td_sigmask (unless they were caught,
591 * and are now ignored by default).
592 */
593 PROC_LOCK_ASSERT(p, MA_OWNED);
594 td = FIRST_THREAD_IN_PROC(p);
595 ps = p->p_sigacts;
596 mtx_lock(&ps->ps_mtx);
597 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
598 sig = sig_ffs(&ps->ps_sigcatch);
599 SIGDELSET(ps->ps_sigcatch, sig);
600 if (sigprop(sig) & SA_IGNORE) {
601 if (sig != SIGCONT)
602 SIGADDSET(ps->ps_sigignore, sig);
603 SIGDELSET(p->p_siglist, sig);
604 /*
605 * There is only one thread at this point.
606 */
607 SIGDELSET(td->td_siglist, sig);
608 }
609 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
610 }
611 /*
612 * Reset stack state to the user stack.
613 * Clear set of signals caught on the signal stack.
614 */
615 td->td_sigstk.ss_flags = SS_DISABLE;
616 td->td_sigstk.ss_size = 0;
617 td->td_sigstk.ss_sp = 0;
618 td->td_pflags &= ~TDP_ALTSTACK;
619 /*
620 * Reset no zombies if child dies flag as Solaris does.
621 */
622 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
623 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
624 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
625 mtx_unlock(&ps->ps_mtx);
626}
627
628/*
629 * kern_sigprocmask()
630 *
631 * Manipulate signal mask.
632 */
633int
634kern_sigprocmask(td, how, set, oset, old)
635 struct thread *td;
636 int how;
637 sigset_t *set, *oset;
638 int old;
639{
640 int error;
641
642 PROC_LOCK(td->td_proc);
643 if (oset != NULL)
644 *oset = td->td_sigmask;
645
646 error = 0;
647 if (set != NULL) {
648 switch (how) {
649 case SIG_BLOCK:
650 SIG_CANTMASK(*set);
651 SIGSETOR(td->td_sigmask, *set);
652 break;
653 case SIG_UNBLOCK:
654 SIGSETNAND(td->td_sigmask, *set);
655 signotify(td);
656 break;
657 case SIG_SETMASK:
658 SIG_CANTMASK(*set);
659 if (old)
660 SIGSETLO(td->td_sigmask, *set);
661 else
662 td->td_sigmask = *set;
663 signotify(td);
664 break;
665 default:
666 error = EINVAL;
667 break;
668 }
669 }
670 PROC_UNLOCK(td->td_proc);
671 return (error);
672}
673
674/*
675 * sigprocmask() - MP SAFE
676 */
677
678#ifndef _SYS_SYSPROTO_H_
679struct sigprocmask_args {
680 int how;
681 const sigset_t *set;
682 sigset_t *oset;
683};
684#endif
685int
686sigprocmask(td, uap)
687 register struct thread *td;
688 struct sigprocmask_args *uap;
689{
690 sigset_t set, oset;
691 sigset_t *setp, *osetp;
692 int error;
693
694 setp = (uap->set != NULL) ? &set : NULL;
695 osetp = (uap->oset != NULL) ? &oset : NULL;
696 if (setp) {
697 error = copyin(uap->set, setp, sizeof(set));
698 if (error)
699 return (error);
700 }
701 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
702 if (osetp && !error) {
703 error = copyout(osetp, uap->oset, sizeof(oset));
704 }
705 return (error);
706}
707
708#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
709/*
710 * osigprocmask() - MP SAFE
711 */
712#ifndef _SYS_SYSPROTO_H_
713struct osigprocmask_args {
714 int how;
715 osigset_t mask;
716};
717#endif
718int
719osigprocmask(td, uap)
720 register struct thread *td;
721 struct osigprocmask_args *uap;
722{
723 sigset_t set, oset;
724 int error;
725
726 OSIG2SIG(uap->mask, set);
727 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
728 SIG2OSIG(oset, td->td_retval[0]);
729 return (error);
730}
731#endif /* COMPAT_43 */
732
733#ifndef _SYS_SYSPROTO_H_
734struct sigpending_args {
735 sigset_t *set;
736};
737#endif
738/*
739 * MPSAFE
740 */
741int
742sigwait(struct thread *td, struct sigwait_args *uap)
743{
744 siginfo_t info;
745 sigset_t set;
746 int error;
747
748 error = copyin(uap->set, &set, sizeof(set));
749 if (error) {
750 td->td_retval[0] = error;
751 return (0);
752 }
753
754 error = kern_sigtimedwait(td, set, &info, NULL);
755 if (error) {
756 if (error == ERESTART)
757 return (error);
758 td->td_retval[0] = error;
759 return (0);
760 }
761
762 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
763 /* Repost if we got an error. */
764 if (error && info.si_signo) {
765 PROC_LOCK(td->td_proc);
766 tdsignal(td, info.si_signo, SIGTARGET_TD);
767 PROC_UNLOCK(td->td_proc);
768 }
769 td->td_retval[0] = error;
770 return (0);
771}
772/*
773 * MPSAFE
774 */
775int
776sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
777{
778 struct timespec ts;
779 struct timespec *timeout;
780 sigset_t set;
781 siginfo_t info;
782 int error;
783
784 if (uap->timeout) {
785 error = copyin(uap->timeout, &ts, sizeof(ts));
786 if (error)
787 return (error);
788
789 timeout = &ts;
790 } else
791 timeout = NULL;
792
793 error = copyin(uap->set, &set, sizeof(set));
794 if (error)
795 return (error);
796
797 error = kern_sigtimedwait(td, set, &info, timeout);
798 if (error)
799 return (error);
800
801 if (uap->info)
802 error = copyout(&info, uap->info, sizeof(info));
803 /* Repost if we got an error. */
804 if (error && info.si_signo) {
805 PROC_LOCK(td->td_proc);
806 tdsignal(td, info.si_signo, SIGTARGET_TD);
807 PROC_UNLOCK(td->td_proc);
808 } else {
809 td->td_retval[0] = info.si_signo;
810 }
811 return (error);
812}
813
814/*
815 * MPSAFE
816 */
817int
818sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
819{
820 siginfo_t info;
821 sigset_t set;
822 int error;
823
824 error = copyin(uap->set, &set, sizeof(set));
825 if (error)
826 return (error);
827
828 error = kern_sigtimedwait(td, set, &info, NULL);
829 if (error)
830 return (error);
831
832 if (uap->info)
833 error = copyout(&info, uap->info, sizeof(info));
834 /* Repost if we got an error. */
835 if (error && info.si_signo) {
836 PROC_LOCK(td->td_proc);
837 tdsignal(td, info.si_signo, SIGTARGET_TD);
838 PROC_UNLOCK(td->td_proc);
839 } else {
840 td->td_retval[0] = info.si_signo;
841 }
842 return (error);
843}
844
845static int
846kern_sigtimedwait(struct thread *td, sigset_t waitset, siginfo_t *info,
847 struct timespec *timeout)
848{
849 struct sigacts *ps;
850 sigset_t savedmask, sigset;
851 struct proc *p;
852 int error;
853 int sig;
854 int hz;
855 int i;
856
857 p = td->td_proc;
858 error = 0;
859 sig = 0;
860 SIG_CANTMASK(waitset);
861
862 PROC_LOCK(p);
863 ps = p->p_sigacts;
864 savedmask = td->td_sigmask;
865
866again:
867 for (i = 1; i <= _SIG_MAXSIG; ++i) {
868 if (!SIGISMEMBER(waitset, i))
869 continue;
870 if (SIGISMEMBER(td->td_siglist, i)) {
871 SIGFILLSET(td->td_sigmask);
872 SIG_CANTMASK(td->td_sigmask);
873 SIGDELSET(td->td_sigmask, i);
874 mtx_lock(&ps->ps_mtx);
875 sig = cursig(td);
876 i = 0;
877 mtx_unlock(&ps->ps_mtx);
878 } else if (SIGISMEMBER(p->p_siglist, i)) {
879 if (p->p_flag & P_SA) {
880 p->p_flag |= P_SIGEVENT;
881 wakeup(&p->p_siglist);
882 }
883 SIGDELSET(p->p_siglist, i);
884 SIGADDSET(td->td_siglist, i);
885 SIGFILLSET(td->td_sigmask);
886 SIG_CANTMASK(td->td_sigmask);
887 SIGDELSET(td->td_sigmask, i);
888 mtx_lock(&ps->ps_mtx);
889 sig = cursig(td);
890 i = 0;
891 mtx_unlock(&ps->ps_mtx);
892 }
893 if (sig) {
894 td->td_sigmask = savedmask;
895 signotify(td);
896 goto out;
897 }
898 }
899 if (error)
900 goto out;
901
902 td->td_sigmask = savedmask;
903 signotify(td);
904 sigset = td->td_siglist;
905 SIGSETOR(sigset, p->p_siglist);
906 SIGSETAND(sigset, waitset);
907 if (!SIGISEMPTY(sigset))
908 goto again;
909
910 /*
911 * POSIX says this must be checked after looking for pending
912 * signals.
913 */
914 if (timeout) {
915 struct timeval tv;
916
917 if (timeout->tv_nsec < 0 || timeout->tv_nsec > 1000000000) {
918 error = EINVAL;
919 goto out;
920 }
921 if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
922 error = EAGAIN;
923 goto out;
924 }
925 TIMESPEC_TO_TIMEVAL(&tv, timeout);
926 hz = tvtohz(&tv);
927 } else
928 hz = 0;
929
930 td->td_waitset = &waitset;
931 error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz);
932 td->td_waitset = NULL;
933 if (error == 0) /* surplus wakeup ? */
934 error = EINTR;
935 goto again;
936
937out:
938 if (sig) {
939 sig_t action;
940
941 error = 0;
942 mtx_lock(&ps->ps_mtx);
943 action = ps->ps_sigact[_SIG_IDX(sig)];
944 mtx_unlock(&ps->ps_mtx);
945#ifdef KTRACE
946 if (KTRPOINT(td, KTR_PSIG))
947 ktrpsig(sig, action, &td->td_sigmask, 0);
948#endif
949 _STOPEVENT(p, S_SIG, sig);
950
951 SIGDELSET(td->td_siglist, sig);
952 info->si_signo = sig;
953 info->si_code = 0;
954 }
955 PROC_UNLOCK(p);
956 return (error);
957}
958
959/*
960 * MPSAFE
961 */
962int
963sigpending(td, uap)
964 struct thread *td;
965 struct sigpending_args *uap;
966{
967 struct proc *p = td->td_proc;
968 sigset_t siglist;
969
970 PROC_LOCK(p);
971 siglist = p->p_siglist;
972 SIGSETOR(siglist, td->td_siglist);
973 PROC_UNLOCK(p);
974 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
975}
976
977#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
978#ifndef _SYS_SYSPROTO_H_
979struct osigpending_args {
980 int dummy;
981};
982#endif
983/*
984 * MPSAFE
985 */
986int
987osigpending(td, uap)
988 struct thread *td;
989 struct osigpending_args *uap;
990{
991 struct proc *p = td->td_proc;
992 sigset_t siglist;
993
994 PROC_LOCK(p);
995 siglist = p->p_siglist;
996 SIGSETOR(siglist, td->td_siglist);
997 PROC_UNLOCK(p);
998 SIG2OSIG(siglist, td->td_retval[0]);
999 return (0);
1000}
1001#endif /* COMPAT_43 */
1002
350 if (sig == SIGCHLD) {
351 if (act->sa_flags & SA_NOCLDSTOP)
352 ps->ps_flag |= PS_NOCLDSTOP;
353 else
354 ps->ps_flag &= ~PS_NOCLDSTOP;
355 if (act->sa_flags & SA_NOCLDWAIT) {
356 /*
357 * Paranoia: since SA_NOCLDWAIT is implemented
358 * by reparenting the dying child to PID 1 (and
359 * trust it to reap the zombie), PID 1 itself
360 * is forbidden to set SA_NOCLDWAIT.
361 */
362 if (p->p_pid == 1)
363 ps->ps_flag &= ~PS_NOCLDWAIT;
364 else
365 ps->ps_flag |= PS_NOCLDWAIT;
366 } else
367 ps->ps_flag &= ~PS_NOCLDWAIT;
368 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
369 ps->ps_flag |= PS_CLDSIGIGN;
370 else
371 ps->ps_flag &= ~PS_CLDSIGIGN;
372 }
373 /*
374 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
375 * and for signals set to SIG_DFL where the default is to
376 * ignore. However, don't put SIGCONT in ps_sigignore, as we
377 * have to restart the process.
378 */
379 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
380 (sigprop(sig) & SA_IGNORE &&
381 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
382 if ((p->p_flag & P_SA) &&
383 SIGISMEMBER(p->p_siglist, sig)) {
384 p->p_flag |= P_SIGEVENT;
385 wakeup(&p->p_siglist);
386 }
387 /* never to be seen again */
388 SIGDELSET(p->p_siglist, sig);
389 mtx_lock_spin(&sched_lock);
390 FOREACH_THREAD_IN_PROC(p, td0)
391 SIGDELSET(td0->td_siglist, sig);
392 mtx_unlock_spin(&sched_lock);
393 if (sig != SIGCONT)
394 /* easier in psignal */
395 SIGADDSET(ps->ps_sigignore, sig);
396 SIGDELSET(ps->ps_sigcatch, sig);
397 } else {
398 SIGDELSET(ps->ps_sigignore, sig);
399 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
400 SIGDELSET(ps->ps_sigcatch, sig);
401 else
402 SIGADDSET(ps->ps_sigcatch, sig);
403 }
404#ifdef COMPAT_FREEBSD4
405 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
406 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
407 (flags & KSA_FREEBSD4) == 0)
408 SIGDELSET(ps->ps_freebsd4, sig);
409 else
410 SIGADDSET(ps->ps_freebsd4, sig);
411#endif
412#ifdef COMPAT_43
413 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
414 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
415 (flags & KSA_OSIGSET) == 0)
416 SIGDELSET(ps->ps_osigset, sig);
417 else
418 SIGADDSET(ps->ps_osigset, sig);
419#endif
420 }
421 mtx_unlock(&ps->ps_mtx);
422 PROC_UNLOCK(p);
423 return (0);
424}
425
426#ifndef _SYS_SYSPROTO_H_
427struct sigaction_args {
428 int sig;
429 struct sigaction *act;
430 struct sigaction *oact;
431};
432#endif
433/*
434 * MPSAFE
435 */
436int
437sigaction(td, uap)
438 struct thread *td;
439 register struct sigaction_args *uap;
440{
441 struct sigaction act, oact;
442 register struct sigaction *actp, *oactp;
443 int error;
444
445 actp = (uap->act != NULL) ? &act : NULL;
446 oactp = (uap->oact != NULL) ? &oact : NULL;
447 if (actp) {
448 error = copyin(uap->act, actp, sizeof(act));
449 if (error)
450 return (error);
451 }
452 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
453 if (oactp && !error)
454 error = copyout(oactp, uap->oact, sizeof(oact));
455 return (error);
456}
457
458#ifdef COMPAT_FREEBSD4
459#ifndef _SYS_SYSPROTO_H_
460struct freebsd4_sigaction_args {
461 int sig;
462 struct sigaction *act;
463 struct sigaction *oact;
464};
465#endif
466/*
467 * MPSAFE
468 */
469int
470freebsd4_sigaction(td, uap)
471 struct thread *td;
472 register struct freebsd4_sigaction_args *uap;
473{
474 struct sigaction act, oact;
475 register struct sigaction *actp, *oactp;
476 int error;
477
478
479 actp = (uap->act != NULL) ? &act : NULL;
480 oactp = (uap->oact != NULL) ? &oact : NULL;
481 if (actp) {
482 error = copyin(uap->act, actp, sizeof(act));
483 if (error)
484 return (error);
485 }
486 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
487 if (oactp && !error)
488 error = copyout(oactp, uap->oact, sizeof(oact));
489 return (error);
490}
491#endif /* COMAPT_FREEBSD4 */
492
493#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
494#ifndef _SYS_SYSPROTO_H_
495struct osigaction_args {
496 int signum;
497 struct osigaction *nsa;
498 struct osigaction *osa;
499};
500#endif
501/*
502 * MPSAFE
503 */
504int
505osigaction(td, uap)
506 struct thread *td;
507 register struct osigaction_args *uap;
508{
509 struct osigaction sa;
510 struct sigaction nsa, osa;
511 register struct sigaction *nsap, *osap;
512 int error;
513
514 if (uap->signum <= 0 || uap->signum >= ONSIG)
515 return (EINVAL);
516
517 nsap = (uap->nsa != NULL) ? &nsa : NULL;
518 osap = (uap->osa != NULL) ? &osa : NULL;
519
520 if (nsap) {
521 error = copyin(uap->nsa, &sa, sizeof(sa));
522 if (error)
523 return (error);
524 nsap->sa_handler = sa.sa_handler;
525 nsap->sa_flags = sa.sa_flags;
526 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
527 }
528 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
529 if (osap && !error) {
530 sa.sa_handler = osap->sa_handler;
531 sa.sa_flags = osap->sa_flags;
532 SIG2OSIG(osap->sa_mask, sa.sa_mask);
533 error = copyout(&sa, uap->osa, sizeof(sa));
534 }
535 return (error);
536}
537
538#if !defined(__i386__) && !defined(__alpha__)
539/* Avoid replicating the same stub everywhere */
540int
541osigreturn(td, uap)
542 struct thread *td;
543 struct osigreturn_args *uap;
544{
545
546 return (nosys(td, (struct nosys_args *)uap));
547}
548#endif
549#endif /* COMPAT_43 */
550
551/*
552 * Initialize signal state for process 0;
553 * set to ignore signals that are ignored by default.
554 */
555void
556siginit(p)
557 struct proc *p;
558{
559 register int i;
560 struct sigacts *ps;
561
562 PROC_LOCK(p);
563 ps = p->p_sigacts;
564 mtx_lock(&ps->ps_mtx);
565 for (i = 1; i <= NSIG; i++)
566 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
567 SIGADDSET(ps->ps_sigignore, i);
568 mtx_unlock(&ps->ps_mtx);
569 PROC_UNLOCK(p);
570}
571
572/*
573 * Reset signals for an exec of the specified process.
574 */
575void
576execsigs(struct proc *p)
577{
578 struct sigacts *ps;
579 int sig;
580 struct thread *td;
581
582 /*
583 * Reset caught signals. Held signals remain held
584 * through td_sigmask (unless they were caught,
585 * and are now ignored by default).
586 */
587 PROC_LOCK_ASSERT(p, MA_OWNED);
588 td = FIRST_THREAD_IN_PROC(p);
589 ps = p->p_sigacts;
590 mtx_lock(&ps->ps_mtx);
591 while (SIGNOTEMPTY(ps->ps_sigcatch)) {
592 sig = sig_ffs(&ps->ps_sigcatch);
593 SIGDELSET(ps->ps_sigcatch, sig);
594 if (sigprop(sig) & SA_IGNORE) {
595 if (sig != SIGCONT)
596 SIGADDSET(ps->ps_sigignore, sig);
597 SIGDELSET(p->p_siglist, sig);
598 /*
599 * There is only one thread at this point.
600 */
601 SIGDELSET(td->td_siglist, sig);
602 }
603 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
604 }
605 /*
606 * Reset stack state to the user stack.
607 * Clear set of signals caught on the signal stack.
608 */
609 td->td_sigstk.ss_flags = SS_DISABLE;
610 td->td_sigstk.ss_size = 0;
611 td->td_sigstk.ss_sp = 0;
612 td->td_pflags &= ~TDP_ALTSTACK;
613 /*
614 * Reset no zombies if child dies flag as Solaris does.
615 */
616 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
617 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
618 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
619 mtx_unlock(&ps->ps_mtx);
620}
621
622/*
623 * kern_sigprocmask()
624 *
625 * Manipulate signal mask.
626 */
627int
628kern_sigprocmask(td, how, set, oset, old)
629 struct thread *td;
630 int how;
631 sigset_t *set, *oset;
632 int old;
633{
634 int error;
635
636 PROC_LOCK(td->td_proc);
637 if (oset != NULL)
638 *oset = td->td_sigmask;
639
640 error = 0;
641 if (set != NULL) {
642 switch (how) {
643 case SIG_BLOCK:
644 SIG_CANTMASK(*set);
645 SIGSETOR(td->td_sigmask, *set);
646 break;
647 case SIG_UNBLOCK:
648 SIGSETNAND(td->td_sigmask, *set);
649 signotify(td);
650 break;
651 case SIG_SETMASK:
652 SIG_CANTMASK(*set);
653 if (old)
654 SIGSETLO(td->td_sigmask, *set);
655 else
656 td->td_sigmask = *set;
657 signotify(td);
658 break;
659 default:
660 error = EINVAL;
661 break;
662 }
663 }
664 PROC_UNLOCK(td->td_proc);
665 return (error);
666}
667
668/*
669 * sigprocmask() - MP SAFE
670 */
671
672#ifndef _SYS_SYSPROTO_H_
673struct sigprocmask_args {
674 int how;
675 const sigset_t *set;
676 sigset_t *oset;
677};
678#endif
679int
680sigprocmask(td, uap)
681 register struct thread *td;
682 struct sigprocmask_args *uap;
683{
684 sigset_t set, oset;
685 sigset_t *setp, *osetp;
686 int error;
687
688 setp = (uap->set != NULL) ? &set : NULL;
689 osetp = (uap->oset != NULL) ? &oset : NULL;
690 if (setp) {
691 error = copyin(uap->set, setp, sizeof(set));
692 if (error)
693 return (error);
694 }
695 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
696 if (osetp && !error) {
697 error = copyout(osetp, uap->oset, sizeof(oset));
698 }
699 return (error);
700}
701
702#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
703/*
704 * osigprocmask() - MP SAFE
705 */
706#ifndef _SYS_SYSPROTO_H_
707struct osigprocmask_args {
708 int how;
709 osigset_t mask;
710};
711#endif
712int
713osigprocmask(td, uap)
714 register struct thread *td;
715 struct osigprocmask_args *uap;
716{
717 sigset_t set, oset;
718 int error;
719
720 OSIG2SIG(uap->mask, set);
721 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
722 SIG2OSIG(oset, td->td_retval[0]);
723 return (error);
724}
725#endif /* COMPAT_43 */
726
727#ifndef _SYS_SYSPROTO_H_
728struct sigpending_args {
729 sigset_t *set;
730};
731#endif
732/*
733 * MPSAFE
734 */
735int
736sigwait(struct thread *td, struct sigwait_args *uap)
737{
738 siginfo_t info;
739 sigset_t set;
740 int error;
741
742 error = copyin(uap->set, &set, sizeof(set));
743 if (error) {
744 td->td_retval[0] = error;
745 return (0);
746 }
747
748 error = kern_sigtimedwait(td, set, &info, NULL);
749 if (error) {
750 if (error == ERESTART)
751 return (error);
752 td->td_retval[0] = error;
753 return (0);
754 }
755
756 error = copyout(&info.si_signo, uap->sig, sizeof(info.si_signo));
757 /* Repost if we got an error. */
758 if (error && info.si_signo) {
759 PROC_LOCK(td->td_proc);
760 tdsignal(td, info.si_signo, SIGTARGET_TD);
761 PROC_UNLOCK(td->td_proc);
762 }
763 td->td_retval[0] = error;
764 return (0);
765}
766/*
767 * MPSAFE
768 */
769int
770sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
771{
772 struct timespec ts;
773 struct timespec *timeout;
774 sigset_t set;
775 siginfo_t info;
776 int error;
777
778 if (uap->timeout) {
779 error = copyin(uap->timeout, &ts, sizeof(ts));
780 if (error)
781 return (error);
782
783 timeout = &ts;
784 } else
785 timeout = NULL;
786
787 error = copyin(uap->set, &set, sizeof(set));
788 if (error)
789 return (error);
790
791 error = kern_sigtimedwait(td, set, &info, timeout);
792 if (error)
793 return (error);
794
795 if (uap->info)
796 error = copyout(&info, uap->info, sizeof(info));
797 /* Repost if we got an error. */
798 if (error && info.si_signo) {
799 PROC_LOCK(td->td_proc);
800 tdsignal(td, info.si_signo, SIGTARGET_TD);
801 PROC_UNLOCK(td->td_proc);
802 } else {
803 td->td_retval[0] = info.si_signo;
804 }
805 return (error);
806}
807
808/*
809 * MPSAFE
810 */
811int
812sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
813{
814 siginfo_t info;
815 sigset_t set;
816 int error;
817
818 error = copyin(uap->set, &set, sizeof(set));
819 if (error)
820 return (error);
821
822 error = kern_sigtimedwait(td, set, &info, NULL);
823 if (error)
824 return (error);
825
826 if (uap->info)
827 error = copyout(&info, uap->info, sizeof(info));
828 /* Repost if we got an error. */
829 if (error && info.si_signo) {
830 PROC_LOCK(td->td_proc);
831 tdsignal(td, info.si_signo, SIGTARGET_TD);
832 PROC_UNLOCK(td->td_proc);
833 } else {
834 td->td_retval[0] = info.si_signo;
835 }
836 return (error);
837}
838
839static int
840kern_sigtimedwait(struct thread *td, sigset_t waitset, siginfo_t *info,
841 struct timespec *timeout)
842{
843 struct sigacts *ps;
844 sigset_t savedmask, sigset;
845 struct proc *p;
846 int error;
847 int sig;
848 int hz;
849 int i;
850
851 p = td->td_proc;
852 error = 0;
853 sig = 0;
854 SIG_CANTMASK(waitset);
855
856 PROC_LOCK(p);
857 ps = p->p_sigacts;
858 savedmask = td->td_sigmask;
859
860again:
861 for (i = 1; i <= _SIG_MAXSIG; ++i) {
862 if (!SIGISMEMBER(waitset, i))
863 continue;
864 if (SIGISMEMBER(td->td_siglist, i)) {
865 SIGFILLSET(td->td_sigmask);
866 SIG_CANTMASK(td->td_sigmask);
867 SIGDELSET(td->td_sigmask, i);
868 mtx_lock(&ps->ps_mtx);
869 sig = cursig(td);
870 i = 0;
871 mtx_unlock(&ps->ps_mtx);
872 } else if (SIGISMEMBER(p->p_siglist, i)) {
873 if (p->p_flag & P_SA) {
874 p->p_flag |= P_SIGEVENT;
875 wakeup(&p->p_siglist);
876 }
877 SIGDELSET(p->p_siglist, i);
878 SIGADDSET(td->td_siglist, i);
879 SIGFILLSET(td->td_sigmask);
880 SIG_CANTMASK(td->td_sigmask);
881 SIGDELSET(td->td_sigmask, i);
882 mtx_lock(&ps->ps_mtx);
883 sig = cursig(td);
884 i = 0;
885 mtx_unlock(&ps->ps_mtx);
886 }
887 if (sig) {
888 td->td_sigmask = savedmask;
889 signotify(td);
890 goto out;
891 }
892 }
893 if (error)
894 goto out;
895
896 td->td_sigmask = savedmask;
897 signotify(td);
898 sigset = td->td_siglist;
899 SIGSETOR(sigset, p->p_siglist);
900 SIGSETAND(sigset, waitset);
901 if (!SIGISEMPTY(sigset))
902 goto again;
903
904 /*
905 * POSIX says this must be checked after looking for pending
906 * signals.
907 */
908 if (timeout) {
909 struct timeval tv;
910
911 if (timeout->tv_nsec < 0 || timeout->tv_nsec > 1000000000) {
912 error = EINVAL;
913 goto out;
914 }
915 if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
916 error = EAGAIN;
917 goto out;
918 }
919 TIMESPEC_TO_TIMEVAL(&tv, timeout);
920 hz = tvtohz(&tv);
921 } else
922 hz = 0;
923
924 td->td_waitset = &waitset;
925 error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz);
926 td->td_waitset = NULL;
927 if (error == 0) /* surplus wakeup ? */
928 error = EINTR;
929 goto again;
930
931out:
932 if (sig) {
933 sig_t action;
934
935 error = 0;
936 mtx_lock(&ps->ps_mtx);
937 action = ps->ps_sigact[_SIG_IDX(sig)];
938 mtx_unlock(&ps->ps_mtx);
939#ifdef KTRACE
940 if (KTRPOINT(td, KTR_PSIG))
941 ktrpsig(sig, action, &td->td_sigmask, 0);
942#endif
943 _STOPEVENT(p, S_SIG, sig);
944
945 SIGDELSET(td->td_siglist, sig);
946 info->si_signo = sig;
947 info->si_code = 0;
948 }
949 PROC_UNLOCK(p);
950 return (error);
951}
952
953/*
954 * MPSAFE
955 */
956int
957sigpending(td, uap)
958 struct thread *td;
959 struct sigpending_args *uap;
960{
961 struct proc *p = td->td_proc;
962 sigset_t siglist;
963
964 PROC_LOCK(p);
965 siglist = p->p_siglist;
966 SIGSETOR(siglist, td->td_siglist);
967 PROC_UNLOCK(p);
968 return (copyout(&siglist, uap->set, sizeof(sigset_t)));
969}
970
971#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
972#ifndef _SYS_SYSPROTO_H_
973struct osigpending_args {
974 int dummy;
975};
976#endif
977/*
978 * MPSAFE
979 */
980int
981osigpending(td, uap)
982 struct thread *td;
983 struct osigpending_args *uap;
984{
985 struct proc *p = td->td_proc;
986 sigset_t siglist;
987
988 PROC_LOCK(p);
989 siglist = p->p_siglist;
990 SIGSETOR(siglist, td->td_siglist);
991 PROC_UNLOCK(p);
992 SIG2OSIG(siglist, td->td_retval[0]);
993 return (0);
994}
995#endif /* COMPAT_43 */
996
1003#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
997#if defined(COMPAT_43)
1004/*
1005 * Generalized interface signal handler, 4.3-compatible.
1006 */
1007#ifndef _SYS_SYSPROTO_H_
1008struct osigvec_args {
1009 int signum;
1010 struct sigvec *nsv;
1011 struct sigvec *osv;
1012};
1013#endif
1014/*
1015 * MPSAFE
1016 */
1017/* ARGSUSED */
1018int
1019osigvec(td, uap)
1020 struct thread *td;
1021 register struct osigvec_args *uap;
1022{
1023 struct sigvec vec;
1024 struct sigaction nsa, osa;
1025 register struct sigaction *nsap, *osap;
1026 int error;
1027
1028 if (uap->signum <= 0 || uap->signum >= ONSIG)
1029 return (EINVAL);
1030 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1031 osap = (uap->osv != NULL) ? &osa : NULL;
1032 if (nsap) {
1033 error = copyin(uap->nsv, &vec, sizeof(vec));
1034 if (error)
1035 return (error);
1036 nsap->sa_handler = vec.sv_handler;
1037 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1038 nsap->sa_flags = vec.sv_flags;
1039 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
998/*
999 * Generalized interface signal handler, 4.3-compatible.
1000 */
1001#ifndef _SYS_SYSPROTO_H_
1002struct osigvec_args {
1003 int signum;
1004 struct sigvec *nsv;
1005 struct sigvec *osv;
1006};
1007#endif
1008/*
1009 * MPSAFE
1010 */
1011/* ARGSUSED */
1012int
1013osigvec(td, uap)
1014 struct thread *td;
1015 register struct osigvec_args *uap;
1016{
1017 struct sigvec vec;
1018 struct sigaction nsa, osa;
1019 register struct sigaction *nsap, *osap;
1020 int error;
1021
1022 if (uap->signum <= 0 || uap->signum >= ONSIG)
1023 return (EINVAL);
1024 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1025 osap = (uap->osv != NULL) ? &osa : NULL;
1026 if (nsap) {
1027 error = copyin(uap->nsv, &vec, sizeof(vec));
1028 if (error)
1029 return (error);
1030 nsap->sa_handler = vec.sv_handler;
1031 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1032 nsap->sa_flags = vec.sv_flags;
1033 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1040#ifdef COMPAT_SUNOS
1041 nsap->sa_flags |= SA_USERTRAMP;
1042#endif
1043 }
1044 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1045 if (osap && !error) {
1046 vec.sv_handler = osap->sa_handler;
1047 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1048 vec.sv_flags = osap->sa_flags;
1049 vec.sv_flags &= ~SA_NOCLDWAIT;
1050 vec.sv_flags ^= SA_RESTART;
1034 }
1035 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1036 if (osap && !error) {
1037 vec.sv_handler = osap->sa_handler;
1038 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1039 vec.sv_flags = osap->sa_flags;
1040 vec.sv_flags &= ~SA_NOCLDWAIT;
1041 vec.sv_flags ^= SA_RESTART;
1051#ifdef COMPAT_SUNOS
1052 vec.sv_flags &= ~SA_NOCLDSTOP;
1053#endif
1054 error = copyout(&vec, uap->osv, sizeof(vec));
1055 }
1056 return (error);
1057}
1058
1059#ifndef _SYS_SYSPROTO_H_
1060struct osigblock_args {
1061 int mask;
1062};
1063#endif
1064/*
1065 * MPSAFE
1066 */
1067int
1068osigblock(td, uap)
1069 register struct thread *td;
1070 struct osigblock_args *uap;
1071{
1072 struct proc *p = td->td_proc;
1073 sigset_t set;
1074
1075 OSIG2SIG(uap->mask, set);
1076 SIG_CANTMASK(set);
1077 PROC_LOCK(p);
1078 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1079 SIGSETOR(td->td_sigmask, set);
1080 PROC_UNLOCK(p);
1081 return (0);
1082}
1083
1084#ifndef _SYS_SYSPROTO_H_
1085struct osigsetmask_args {
1086 int mask;
1087};
1088#endif
1089/*
1090 * MPSAFE
1091 */
1092int
1093osigsetmask(td, uap)
1094 struct thread *td;
1095 struct osigsetmask_args *uap;
1096{
1097 struct proc *p = td->td_proc;
1098 sigset_t set;
1099
1100 OSIG2SIG(uap->mask, set);
1101 SIG_CANTMASK(set);
1102 PROC_LOCK(p);
1103 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1104 SIGSETLO(td->td_sigmask, set);
1105 signotify(td);
1106 PROC_UNLOCK(p);
1107 return (0);
1108}
1042 error = copyout(&vec, uap->osv, sizeof(vec));
1043 }
1044 return (error);
1045}
1046
1047#ifndef _SYS_SYSPROTO_H_
1048struct osigblock_args {
1049 int mask;
1050};
1051#endif
1052/*
1053 * MPSAFE
1054 */
1055int
1056osigblock(td, uap)
1057 register struct thread *td;
1058 struct osigblock_args *uap;
1059{
1060 struct proc *p = td->td_proc;
1061 sigset_t set;
1062
1063 OSIG2SIG(uap->mask, set);
1064 SIG_CANTMASK(set);
1065 PROC_LOCK(p);
1066 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1067 SIGSETOR(td->td_sigmask, set);
1068 PROC_UNLOCK(p);
1069 return (0);
1070}
1071
1072#ifndef _SYS_SYSPROTO_H_
1073struct osigsetmask_args {
1074 int mask;
1075};
1076#endif
1077/*
1078 * MPSAFE
1079 */
1080int
1081osigsetmask(td, uap)
1082 struct thread *td;
1083 struct osigsetmask_args *uap;
1084{
1085 struct proc *p = td->td_proc;
1086 sigset_t set;
1087
1088 OSIG2SIG(uap->mask, set);
1089 SIG_CANTMASK(set);
1090 PROC_LOCK(p);
1091 SIG2OSIG(td->td_sigmask, td->td_retval[0]);
1092 SIGSETLO(td->td_sigmask, set);
1093 signotify(td);
1094 PROC_UNLOCK(p);
1095 return (0);
1096}
1109#endif /* COMPAT_43 || COMPAT_SUNOS */
1097#endif /* COMPAT_43 */
1110
1111/*
1112 * Suspend process until signal, providing mask to be set
1113 * in the meantime.
1114 ***** XXXKSE this doesn't make sense under KSE.
1115 ***** Do we suspend the thread or all threads in the process?
1116 ***** How do we suspend threads running NOW on another processor?
1117 */
1118#ifndef _SYS_SYSPROTO_H_
1119struct sigsuspend_args {
1120 const sigset_t *sigmask;
1121};
1122#endif
1123/*
1124 * MPSAFE
1125 */
1126/* ARGSUSED */
1127int
1128sigsuspend(td, uap)
1129 struct thread *td;
1130 struct sigsuspend_args *uap;
1131{
1132 sigset_t mask;
1133 int error;
1134
1135 error = copyin(uap->sigmask, &mask, sizeof(mask));
1136 if (error)
1137 return (error);
1138 return (kern_sigsuspend(td, mask));
1139}
1140
1141int
1142kern_sigsuspend(struct thread *td, sigset_t mask)
1143{
1144 struct proc *p = td->td_proc;
1145
1146 /*
1147 * When returning from sigsuspend, we want
1148 * the old mask to be restored after the
1149 * signal handler has finished. Thus, we
1150 * save it here and mark the sigacts structure
1151 * to indicate this.
1152 */
1153 PROC_LOCK(p);
1154 td->td_oldsigmask = td->td_sigmask;
1155 td->td_pflags |= TDP_OLDMASK;
1156 SIG_CANTMASK(mask);
1157 td->td_sigmask = mask;
1158 signotify(td);
1159 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1160 /* void */;
1161 PROC_UNLOCK(p);
1162 /* always return EINTR rather than ERESTART... */
1163 return (EINTR);
1164}
1165
1166#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1167/*
1168 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1169 * convention: libc stub passes mask, not pointer, to save a copyin.
1170 */
1171#ifndef _SYS_SYSPROTO_H_
1172struct osigsuspend_args {
1173 osigset_t mask;
1174};
1175#endif
1176/*
1177 * MPSAFE
1178 */
1179/* ARGSUSED */
1180int
1181osigsuspend(td, uap)
1182 struct thread *td;
1183 struct osigsuspend_args *uap;
1184{
1185 struct proc *p = td->td_proc;
1186 sigset_t mask;
1187
1188 PROC_LOCK(p);
1189 td->td_oldsigmask = td->td_sigmask;
1190 td->td_pflags |= TDP_OLDMASK;
1191 OSIG2SIG(uap->mask, mask);
1192 SIG_CANTMASK(mask);
1193 SIGSETLO(td->td_sigmask, mask);
1194 signotify(td);
1195 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1196 /* void */;
1197 PROC_UNLOCK(p);
1198 /* always return EINTR rather than ERESTART... */
1199 return (EINTR);
1200}
1201#endif /* COMPAT_43 */
1202
1098
1099/*
1100 * Suspend process until signal, providing mask to be set
1101 * in the meantime.
1102 ***** XXXKSE this doesn't make sense under KSE.
1103 ***** Do we suspend the thread or all threads in the process?
1104 ***** How do we suspend threads running NOW on another processor?
1105 */
1106#ifndef _SYS_SYSPROTO_H_
1107struct sigsuspend_args {
1108 const sigset_t *sigmask;
1109};
1110#endif
1111/*
1112 * MPSAFE
1113 */
1114/* ARGSUSED */
1115int
1116sigsuspend(td, uap)
1117 struct thread *td;
1118 struct sigsuspend_args *uap;
1119{
1120 sigset_t mask;
1121 int error;
1122
1123 error = copyin(uap->sigmask, &mask, sizeof(mask));
1124 if (error)
1125 return (error);
1126 return (kern_sigsuspend(td, mask));
1127}
1128
1129int
1130kern_sigsuspend(struct thread *td, sigset_t mask)
1131{
1132 struct proc *p = td->td_proc;
1133
1134 /*
1135 * When returning from sigsuspend, we want
1136 * the old mask to be restored after the
1137 * signal handler has finished. Thus, we
1138 * save it here and mark the sigacts structure
1139 * to indicate this.
1140 */
1141 PROC_LOCK(p);
1142 td->td_oldsigmask = td->td_sigmask;
1143 td->td_pflags |= TDP_OLDMASK;
1144 SIG_CANTMASK(mask);
1145 td->td_sigmask = mask;
1146 signotify(td);
1147 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0)
1148 /* void */;
1149 PROC_UNLOCK(p);
1150 /* always return EINTR rather than ERESTART... */
1151 return (EINTR);
1152}
1153
1154#ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1155/*
1156 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1157 * convention: libc stub passes mask, not pointer, to save a copyin.
1158 */
1159#ifndef _SYS_SYSPROTO_H_
1160struct osigsuspend_args {
1161 osigset_t mask;
1162};
1163#endif
1164/*
1165 * MPSAFE
1166 */
1167/* ARGSUSED */
1168int
1169osigsuspend(td, uap)
1170 struct thread *td;
1171 struct osigsuspend_args *uap;
1172{
1173 struct proc *p = td->td_proc;
1174 sigset_t mask;
1175
1176 PROC_LOCK(p);
1177 td->td_oldsigmask = td->td_sigmask;
1178 td->td_pflags |= TDP_OLDMASK;
1179 OSIG2SIG(uap->mask, mask);
1180 SIG_CANTMASK(mask);
1181 SIGSETLO(td->td_sigmask, mask);
1182 signotify(td);
1183 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0)
1184 /* void */;
1185 PROC_UNLOCK(p);
1186 /* always return EINTR rather than ERESTART... */
1187 return (EINTR);
1188}
1189#endif /* COMPAT_43 */
1190
1203#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1191#if defined(COMPAT_43)
1204#ifndef _SYS_SYSPROTO_H_
1205struct osigstack_args {
1206 struct sigstack *nss;
1207 struct sigstack *oss;
1208};
1209#endif
1210/*
1211 * MPSAFE
1212 */
1213/* ARGSUSED */
1214int
1215osigstack(td, uap)
1216 struct thread *td;
1217 register struct osigstack_args *uap;
1218{
1219 struct sigstack nss, oss;
1220 int error = 0;
1221
1222 if (uap->nss != NULL) {
1223 error = copyin(uap->nss, &nss, sizeof(nss));
1224 if (error)
1225 return (error);
1226 }
1227 oss.ss_sp = td->td_sigstk.ss_sp;
1228 oss.ss_onstack = sigonstack(cpu_getstack(td));
1229 if (uap->nss != NULL) {
1230 td->td_sigstk.ss_sp = nss.ss_sp;
1231 td->td_sigstk.ss_size = 0;
1232 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1233 td->td_pflags |= TDP_ALTSTACK;
1234 }
1235 if (uap->oss != NULL)
1236 error = copyout(&oss, uap->oss, sizeof(oss));
1237
1238 return (error);
1239}
1192#ifndef _SYS_SYSPROTO_H_
1193struct osigstack_args {
1194 struct sigstack *nss;
1195 struct sigstack *oss;
1196};
1197#endif
1198/*
1199 * MPSAFE
1200 */
1201/* ARGSUSED */
1202int
1203osigstack(td, uap)
1204 struct thread *td;
1205 register struct osigstack_args *uap;
1206{
1207 struct sigstack nss, oss;
1208 int error = 0;
1209
1210 if (uap->nss != NULL) {
1211 error = copyin(uap->nss, &nss, sizeof(nss));
1212 if (error)
1213 return (error);
1214 }
1215 oss.ss_sp = td->td_sigstk.ss_sp;
1216 oss.ss_onstack = sigonstack(cpu_getstack(td));
1217 if (uap->nss != NULL) {
1218 td->td_sigstk.ss_sp = nss.ss_sp;
1219 td->td_sigstk.ss_size = 0;
1220 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1221 td->td_pflags |= TDP_ALTSTACK;
1222 }
1223 if (uap->oss != NULL)
1224 error = copyout(&oss, uap->oss, sizeof(oss));
1225
1226 return (error);
1227}
1240#endif /* COMPAT_43 || COMPAT_SUNOS */
1228#endif /* COMPAT_43 */
1241
1242#ifndef _SYS_SYSPROTO_H_
1243struct sigaltstack_args {
1244 stack_t *ss;
1245 stack_t *oss;
1246};
1247#endif
1248/*
1249 * MPSAFE
1250 */
1251/* ARGSUSED */
1252int
1253sigaltstack(td, uap)
1254 struct thread *td;
1255 register struct sigaltstack_args *uap;
1256{
1257 stack_t ss, oss;
1258 int error;
1259
1260 if (uap->ss != NULL) {
1261 error = copyin(uap->ss, &ss, sizeof(ss));
1262 if (error)
1263 return (error);
1264 }
1265 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1266 (uap->oss != NULL) ? &oss : NULL);
1267 if (error)
1268 return (error);
1269 if (uap->oss != NULL)
1270 error = copyout(&oss, uap->oss, sizeof(stack_t));
1271 return (error);
1272}
1273
1274int
1275kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1276{
1277 struct proc *p = td->td_proc;
1278 int oonstack;
1279
1280 oonstack = sigonstack(cpu_getstack(td));
1281
1282 if (oss != NULL) {
1283 *oss = td->td_sigstk;
1284 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1285 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1286 }
1287
1288 if (ss != NULL) {
1289 if (oonstack)
1290 return (EPERM);
1291 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1292 return (EINVAL);
1293 if (!(ss->ss_flags & SS_DISABLE)) {
1294 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1295 return (ENOMEM);
1296 }
1297 td->td_sigstk = *ss;
1298 td->td_pflags |= TDP_ALTSTACK;
1299 } else {
1300 td->td_pflags &= ~TDP_ALTSTACK;
1301 }
1302 }
1303 return (0);
1304}
1305
1306/*
1307 * Common code for kill process group/broadcast kill.
1308 * cp is calling process.
1309 */
1310static int
1311killpg1(td, sig, pgid, all)
1312 register struct thread *td;
1313 int sig, pgid, all;
1314{
1315 register struct proc *p;
1316 struct pgrp *pgrp;
1317 int nfound = 0;
1318
1319 if (all) {
1320 /*
1321 * broadcast
1322 */
1323 sx_slock(&allproc_lock);
1324 LIST_FOREACH(p, &allproc, p_list) {
1325 PROC_LOCK(p);
1326 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1327 p == td->td_proc) {
1328 PROC_UNLOCK(p);
1329 continue;
1330 }
1331 if (p_cansignal(td, p, sig) == 0) {
1332 nfound++;
1333 if (sig)
1334 psignal(p, sig);
1335 }
1336 PROC_UNLOCK(p);
1337 }
1338 sx_sunlock(&allproc_lock);
1339 } else {
1340 sx_slock(&proctree_lock);
1341 if (pgid == 0) {
1342 /*
1343 * zero pgid means send to my process group.
1344 */
1345 pgrp = td->td_proc->p_pgrp;
1346 PGRP_LOCK(pgrp);
1347 } else {
1348 pgrp = pgfind(pgid);
1349 if (pgrp == NULL) {
1350 sx_sunlock(&proctree_lock);
1351 return (ESRCH);
1352 }
1353 }
1354 sx_sunlock(&proctree_lock);
1355 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1356 PROC_LOCK(p);
1357 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1358 PROC_UNLOCK(p);
1359 continue;
1360 }
1361 if (p->p_state == PRS_ZOMBIE) {
1362 PROC_UNLOCK(p);
1363 continue;
1364 }
1365 if (p_cansignal(td, p, sig) == 0) {
1366 nfound++;
1367 if (sig)
1368 psignal(p, sig);
1369 }
1370 PROC_UNLOCK(p);
1371 }
1372 PGRP_UNLOCK(pgrp);
1373 }
1374 return (nfound ? 0 : ESRCH);
1375}
1376
1377#ifndef _SYS_SYSPROTO_H_
1378struct kill_args {
1379 int pid;
1380 int signum;
1381};
1382#endif
1383/*
1384 * MPSAFE
1385 */
1386/* ARGSUSED */
1387int
1388kill(td, uap)
1389 register struct thread *td;
1390 register struct kill_args *uap;
1391{
1392 register struct proc *p;
1393 int error;
1394
1395 if ((u_int)uap->signum > _SIG_MAXSIG)
1396 return (EINVAL);
1397
1398 if (uap->pid > 0) {
1399 /* kill single process */
1400 if ((p = pfind(uap->pid)) == NULL)
1401 return (ESRCH);
1402 error = p_cansignal(td, p, uap->signum);
1403 if (error == 0 && uap->signum)
1404 psignal(p, uap->signum);
1405 PROC_UNLOCK(p);
1406 return (error);
1407 }
1408 switch (uap->pid) {
1409 case -1: /* broadcast signal */
1410 return (killpg1(td, uap->signum, 0, 1));
1411 case 0: /* signal own process group */
1412 return (killpg1(td, uap->signum, 0, 0));
1413 default: /* negative explicit process group */
1414 return (killpg1(td, uap->signum, -uap->pid, 0));
1415 }
1416 /* NOTREACHED */
1417}
1418
1229
1230#ifndef _SYS_SYSPROTO_H_
1231struct sigaltstack_args {
1232 stack_t *ss;
1233 stack_t *oss;
1234};
1235#endif
1236/*
1237 * MPSAFE
1238 */
1239/* ARGSUSED */
1240int
1241sigaltstack(td, uap)
1242 struct thread *td;
1243 register struct sigaltstack_args *uap;
1244{
1245 stack_t ss, oss;
1246 int error;
1247
1248 if (uap->ss != NULL) {
1249 error = copyin(uap->ss, &ss, sizeof(ss));
1250 if (error)
1251 return (error);
1252 }
1253 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1254 (uap->oss != NULL) ? &oss : NULL);
1255 if (error)
1256 return (error);
1257 if (uap->oss != NULL)
1258 error = copyout(&oss, uap->oss, sizeof(stack_t));
1259 return (error);
1260}
1261
1262int
1263kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1264{
1265 struct proc *p = td->td_proc;
1266 int oonstack;
1267
1268 oonstack = sigonstack(cpu_getstack(td));
1269
1270 if (oss != NULL) {
1271 *oss = td->td_sigstk;
1272 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1273 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1274 }
1275
1276 if (ss != NULL) {
1277 if (oonstack)
1278 return (EPERM);
1279 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1280 return (EINVAL);
1281 if (!(ss->ss_flags & SS_DISABLE)) {
1282 if (ss->ss_size < p->p_sysent->sv_minsigstksz) {
1283 return (ENOMEM);
1284 }
1285 td->td_sigstk = *ss;
1286 td->td_pflags |= TDP_ALTSTACK;
1287 } else {
1288 td->td_pflags &= ~TDP_ALTSTACK;
1289 }
1290 }
1291 return (0);
1292}
1293
1294/*
1295 * Common code for kill process group/broadcast kill.
1296 * cp is calling process.
1297 */
1298static int
1299killpg1(td, sig, pgid, all)
1300 register struct thread *td;
1301 int sig, pgid, all;
1302{
1303 register struct proc *p;
1304 struct pgrp *pgrp;
1305 int nfound = 0;
1306
1307 if (all) {
1308 /*
1309 * broadcast
1310 */
1311 sx_slock(&allproc_lock);
1312 LIST_FOREACH(p, &allproc, p_list) {
1313 PROC_LOCK(p);
1314 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1315 p == td->td_proc) {
1316 PROC_UNLOCK(p);
1317 continue;
1318 }
1319 if (p_cansignal(td, p, sig) == 0) {
1320 nfound++;
1321 if (sig)
1322 psignal(p, sig);
1323 }
1324 PROC_UNLOCK(p);
1325 }
1326 sx_sunlock(&allproc_lock);
1327 } else {
1328 sx_slock(&proctree_lock);
1329 if (pgid == 0) {
1330 /*
1331 * zero pgid means send to my process group.
1332 */
1333 pgrp = td->td_proc->p_pgrp;
1334 PGRP_LOCK(pgrp);
1335 } else {
1336 pgrp = pgfind(pgid);
1337 if (pgrp == NULL) {
1338 sx_sunlock(&proctree_lock);
1339 return (ESRCH);
1340 }
1341 }
1342 sx_sunlock(&proctree_lock);
1343 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1344 PROC_LOCK(p);
1345 if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) {
1346 PROC_UNLOCK(p);
1347 continue;
1348 }
1349 if (p->p_state == PRS_ZOMBIE) {
1350 PROC_UNLOCK(p);
1351 continue;
1352 }
1353 if (p_cansignal(td, p, sig) == 0) {
1354 nfound++;
1355 if (sig)
1356 psignal(p, sig);
1357 }
1358 PROC_UNLOCK(p);
1359 }
1360 PGRP_UNLOCK(pgrp);
1361 }
1362 return (nfound ? 0 : ESRCH);
1363}
1364
1365#ifndef _SYS_SYSPROTO_H_
1366struct kill_args {
1367 int pid;
1368 int signum;
1369};
1370#endif
1371/*
1372 * MPSAFE
1373 */
1374/* ARGSUSED */
1375int
1376kill(td, uap)
1377 register struct thread *td;
1378 register struct kill_args *uap;
1379{
1380 register struct proc *p;
1381 int error;
1382
1383 if ((u_int)uap->signum > _SIG_MAXSIG)
1384 return (EINVAL);
1385
1386 if (uap->pid > 0) {
1387 /* kill single process */
1388 if ((p = pfind(uap->pid)) == NULL)
1389 return (ESRCH);
1390 error = p_cansignal(td, p, uap->signum);
1391 if (error == 0 && uap->signum)
1392 psignal(p, uap->signum);
1393 PROC_UNLOCK(p);
1394 return (error);
1395 }
1396 switch (uap->pid) {
1397 case -1: /* broadcast signal */
1398 return (killpg1(td, uap->signum, 0, 1));
1399 case 0: /* signal own process group */
1400 return (killpg1(td, uap->signum, 0, 0));
1401 default: /* negative explicit process group */
1402 return (killpg1(td, uap->signum, -uap->pid, 0));
1403 }
1404 /* NOTREACHED */
1405}
1406
1419#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1407#if defined(COMPAT_43)
1420#ifndef _SYS_SYSPROTO_H_
1421struct okillpg_args {
1422 int pgid;
1423 int signum;
1424};
1425#endif
1426/*
1427 * MPSAFE
1428 */
1429/* ARGSUSED */
1430int
1431okillpg(td, uap)
1432 struct thread *td;
1433 register struct okillpg_args *uap;
1434{
1435
1436 if ((u_int)uap->signum > _SIG_MAXSIG)
1437 return (EINVAL);
1438 return (killpg1(td, uap->signum, uap->pgid, 0));
1439}
1408#ifndef _SYS_SYSPROTO_H_
1409struct okillpg_args {
1410 int pgid;
1411 int signum;
1412};
1413#endif
1414/*
1415 * MPSAFE
1416 */
1417/* ARGSUSED */
1418int
1419okillpg(td, uap)
1420 struct thread *td;
1421 register struct okillpg_args *uap;
1422{
1423
1424 if ((u_int)uap->signum > _SIG_MAXSIG)
1425 return (EINVAL);
1426 return (killpg1(td, uap->signum, uap->pgid, 0));
1427}
1440#endif /* COMPAT_43 || COMPAT_SUNOS */
1428#endif /* COMPAT_43 */
1441
1442/*
1443 * Send a signal to a process group.
1444 */
1445void
1446gsignal(pgid, sig)
1447 int pgid, sig;
1448{
1449 struct pgrp *pgrp;
1450
1451 if (pgid != 0) {
1452 sx_slock(&proctree_lock);
1453 pgrp = pgfind(pgid);
1454 sx_sunlock(&proctree_lock);
1455 if (pgrp != NULL) {
1456 pgsignal(pgrp, sig, 0);
1457 PGRP_UNLOCK(pgrp);
1458 }
1459 }
1460}
1461
1462/*
1463 * Send a signal to a process group. If checktty is 1,
1464 * limit to members which have a controlling terminal.
1465 */
1466void
1467pgsignal(pgrp, sig, checkctty)
1468 struct pgrp *pgrp;
1469 int sig, checkctty;
1470{
1471 register struct proc *p;
1472
1473 if (pgrp) {
1474 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1475 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1476 PROC_LOCK(p);
1477 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1478 psignal(p, sig);
1479 PROC_UNLOCK(p);
1480 }
1481 }
1482}
1483
1484/*
1485 * Send a signal caused by a trap to the current thread.
1486 * If it will be caught immediately, deliver it with correct code.
1487 * Otherwise, post it normally.
1488 *
1489 * MPSAFE
1490 */
1491void
1492trapsignal(struct thread *td, int sig, u_long code)
1493{
1494 struct sigacts *ps;
1495 struct proc *p;
1496 siginfo_t siginfo;
1497 int error;
1498
1499 p = td->td_proc;
1500 if (td->td_pflags & TDP_SA) {
1501 if (td->td_mailbox == NULL)
1502 thread_user_enter(p, td);
1503 PROC_LOCK(p);
1504 if (td->td_mailbox) {
1505 SIGDELSET(td->td_sigmask, sig);
1506 mtx_lock_spin(&sched_lock);
1507 /*
1508 * Force scheduling an upcall, so UTS has chance to
1509 * process the signal before thread runs again in
1510 * userland.
1511 */
1512 if (td->td_upcall)
1513 td->td_upcall->ku_flags |= KUF_DOUPCALL;
1514 mtx_unlock_spin(&sched_lock);
1515 } else {
1516 /* UTS caused a sync signal */
1517 p->p_code = code; /* XXX for core dump/debugger */
1518 p->p_sig = sig; /* XXX to verify code */
1519 sigexit(td, sig);
1520 }
1521 } else {
1522 PROC_LOCK(p);
1523 }
1524 ps = p->p_sigacts;
1525 mtx_lock(&ps->ps_mtx);
1526 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1527 !SIGISMEMBER(td->td_sigmask, sig)) {
1528 p->p_stats->p_ru.ru_nsignals++;
1529#ifdef KTRACE
1530 if (KTRPOINT(curthread, KTR_PSIG))
1531 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1532 &td->td_sigmask, code);
1533#endif
1534 if (!(td->td_pflags & TDP_SA))
1535 (*p->p_sysent->sv_sendsig)(
1536 ps->ps_sigact[_SIG_IDX(sig)], sig,
1537 &td->td_sigmask, code);
1538 else {
1539 cpu_thread_siginfo(sig, code, &siginfo);
1540 mtx_unlock(&ps->ps_mtx);
1541 PROC_UNLOCK(p);
1542 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig,
1543 sizeof(siginfo));
1544 PROC_LOCK(p);
1545 /* UTS memory corrupted */
1546 if (error)
1547 sigexit(td, SIGILL);
1548 SIGADDSET(td->td_sigmask, sig);
1549 mtx_lock(&ps->ps_mtx);
1550 }
1551 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1552 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1553 SIGADDSET(td->td_sigmask, sig);
1554 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1555 /*
1556 * See kern_sigaction() for origin of this code.
1557 */
1558 SIGDELSET(ps->ps_sigcatch, sig);
1559 if (sig != SIGCONT &&
1560 sigprop(sig) & SA_IGNORE)
1561 SIGADDSET(ps->ps_sigignore, sig);
1562 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1563 }
1564 mtx_unlock(&ps->ps_mtx);
1565 } else {
1566 mtx_unlock(&ps->ps_mtx);
1567 p->p_code = code; /* XXX for core dump/debugger */
1568 p->p_sig = sig; /* XXX to verify code */
1569 tdsignal(td, sig, SIGTARGET_TD);
1570 }
1571 PROC_UNLOCK(p);
1572}
1573
1574static struct thread *
1575sigtd(struct proc *p, int sig, int prop)
1576{
1577 struct thread *td, *signal_td;
1578
1579 PROC_LOCK_ASSERT(p, MA_OWNED);
1580
1581 /*
1582 * First find a thread in sigwait state and signal belongs to
1583 * its wait set. POSIX's arguments is that speed of delivering signal
1584 * to sigwait thread is faster than delivering signal to user stack.
1585 * If we can not find sigwait thread, then find the first thread in
1586 * the proc that doesn't have this signal masked, an exception is
1587 * if current thread is sending signal to its process, and it does not
1588 * mask the signal, it should get the signal, this is another fast
1589 * way to deliver signal.
1590 */
1591 signal_td = NULL;
1592 mtx_lock_spin(&sched_lock);
1593 FOREACH_THREAD_IN_PROC(p, td) {
1594 if (td->td_waitset != NULL &&
1595 SIGISMEMBER(*(td->td_waitset), sig)) {
1596 mtx_unlock_spin(&sched_lock);
1597 return (td);
1598 }
1599 if (!SIGISMEMBER(td->td_sigmask, sig)) {
1600 if (td == curthread)
1601 signal_td = curthread;
1602 else if (signal_td == NULL)
1603 signal_td = td;
1604 }
1605 }
1606 if (signal_td == NULL)
1607 signal_td = FIRST_THREAD_IN_PROC(p);
1608 mtx_unlock_spin(&sched_lock);
1609 return (signal_td);
1610}
1611
1612/*
1613 * Send the signal to the process. If the signal has an action, the action
1614 * is usually performed by the target process rather than the caller; we add
1615 * the signal to the set of pending signals for the process.
1616 *
1617 * Exceptions:
1618 * o When a stop signal is sent to a sleeping process that takes the
1619 * default action, the process is stopped without awakening it.
1620 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1621 * regardless of the signal action (eg, blocked or ignored).
1622 *
1623 * Other ignored signals are discarded immediately.
1624 *
1625 * MPSAFE
1626 */
1627void
1628psignal(struct proc *p, int sig)
1629{
1630 struct thread *td;
1631 int prop;
1632
1633 if (!_SIG_VALID(sig))
1634 panic("psignal(): invalid signal");
1635
1636 PROC_LOCK_ASSERT(p, MA_OWNED);
1637 prop = sigprop(sig);
1638
1639 /*
1640 * Find a thread to deliver the signal to.
1641 */
1642 td = sigtd(p, sig, prop);
1643
1644 tdsignal(td, sig, SIGTARGET_P);
1645}
1646
1647/*
1648 * MPSAFE
1649 */
1650void
1651tdsignal(struct thread *td, int sig, sigtarget_t target)
1652{
1653 sigset_t saved;
1654 struct proc *p = td->td_proc;
1655
1656 if (p->p_flag & P_SA)
1657 saved = p->p_siglist;
1658 do_tdsignal(td, sig, target);
1659 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
1660 if (SIGSETEQ(saved, p->p_siglist))
1661 return;
1662 else {
1663 /* pending set changed */
1664 p->p_flag |= P_SIGEVENT;
1665 wakeup(&p->p_siglist);
1666 }
1667 }
1668}
1669
1670static void
1671do_tdsignal(struct thread *td, int sig, sigtarget_t target)
1672{
1673 struct proc *p;
1674 register sig_t action;
1675 sigset_t *siglist;
1676 struct thread *td0;
1677 register int prop;
1678 struct sigacts *ps;
1679
1680 if (!_SIG_VALID(sig))
1681 panic("do_tdsignal(): invalid signal");
1682
1683 p = td->td_proc;
1684 ps = p->p_sigacts;
1685
1686 PROC_LOCK_ASSERT(p, MA_OWNED);
1687 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1688
1689 prop = sigprop(sig);
1690
1691 /*
1692 * If the signal is blocked and not destined for this thread, then
1693 * assign it to the process so that we can find it later in the first
1694 * thread that unblocks it. Otherwise, assign it to this thread now.
1695 */
1696 if (target == SIGTARGET_TD) {
1697 siglist = &td->td_siglist;
1698 } else {
1699 if (!SIGISMEMBER(td->td_sigmask, sig))
1700 siglist = &td->td_siglist;
1701 else if (td->td_waitset != NULL &&
1702 SIGISMEMBER(*(td->td_waitset), sig))
1703 siglist = &td->td_siglist;
1704 else
1705 siglist = &p->p_siglist;
1706 }
1707
1708 /*
1709 * If proc is traced, always give parent a chance;
1710 * if signal event is tracked by procfs, give *that*
1711 * a chance, as well.
1712 */
1713 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1714 action = SIG_DFL;
1715 } else {
1716 /*
1717 * If the signal is being ignored,
1718 * then we forget about it immediately.
1719 * (Note: we don't set SIGCONT in ps_sigignore,
1720 * and if it is set to SIG_IGN,
1721 * action will be SIG_DFL here.)
1722 */
1723 mtx_lock(&ps->ps_mtx);
1724 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1725 (p->p_flag & P_WEXIT)) {
1726 mtx_unlock(&ps->ps_mtx);
1727 return;
1728 }
1729 if (((td->td_waitset == NULL) &&
1730 SIGISMEMBER(td->td_sigmask, sig)) ||
1731 ((td->td_waitset != NULL) &&
1732 SIGISMEMBER(td->td_sigmask, sig) &&
1733 !SIGISMEMBER(*(td->td_waitset), sig)))
1734 action = SIG_HOLD;
1735 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1736 action = SIG_CATCH;
1737 else
1738 action = SIG_DFL;
1739 mtx_unlock(&ps->ps_mtx);
1740 }
1741
1742 if (prop & SA_CONT) {
1743 SIG_STOPSIGMASK(p->p_siglist);
1744 /*
1745 * XXX Should investigate leaving STOP and CONT sigs only in
1746 * the proc's siglist.
1747 */
1748 mtx_lock_spin(&sched_lock);
1749 FOREACH_THREAD_IN_PROC(p, td0)
1750 SIG_STOPSIGMASK(td0->td_siglist);
1751 mtx_unlock_spin(&sched_lock);
1752 }
1753
1754 if (prop & SA_STOP) {
1755 /*
1756 * If sending a tty stop signal to a member of an orphaned
1757 * process group, discard the signal here if the action
1758 * is default; don't stop the process below if sleeping,
1759 * and don't clear any pending SIGCONT.
1760 */
1761 if ((prop & SA_TTYSTOP) &&
1762 (p->p_pgrp->pg_jobc == 0) &&
1763 (action == SIG_DFL))
1764 return;
1765 SIG_CONTSIGMASK(p->p_siglist);
1766 mtx_lock_spin(&sched_lock);
1767 FOREACH_THREAD_IN_PROC(p, td0)
1768 SIG_CONTSIGMASK(td0->td_siglist);
1769 mtx_unlock_spin(&sched_lock);
1770 p->p_flag &= ~P_CONTINUED;
1771 }
1772
1773 SIGADDSET(*siglist, sig);
1774 signotify(td); /* uses schedlock */
1775 if (siglist == &td->td_siglist && (td->td_waitset != NULL) &&
1776 action != SIG_HOLD) {
1777 td->td_waitset = NULL;
1778 }
1779
1780 /*
1781 * Defer further processing for signals which are held,
1782 * except that stopped processes must be continued by SIGCONT.
1783 */
1784 if (action == SIG_HOLD &&
1785 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1786 return;
1787 /*
1788 * Some signals have a process-wide effect and a per-thread
1789 * component. Most processing occurs when the process next
1790 * tries to cross the user boundary, however there are some
1791 * times when processing needs to be done immediatly, such as
1792 * waking up threads so that they can cross the user boundary.
1793 * We try do the per-process part here.
1794 */
1795 if (P_SHOULDSTOP(p)) {
1796 /*
1797 * The process is in stopped mode. All the threads should be
1798 * either winding down or already on the suspended queue.
1799 */
1800 if (p->p_flag & P_TRACED) {
1801 /*
1802 * The traced process is already stopped,
1803 * so no further action is necessary.
1804 * No signal can restart us.
1805 */
1806 goto out;
1807 }
1808
1809 if (sig == SIGKILL) {
1810 /*
1811 * SIGKILL sets process running.
1812 * It will die elsewhere.
1813 * All threads must be restarted.
1814 */
1815 p->p_flag &= ~P_STOPPED;
1816 goto runfast;
1817 }
1818
1819 if (prop & SA_CONT) {
1820 /*
1821 * If SIGCONT is default (or ignored), we continue the
1822 * process but don't leave the signal in siglist as
1823 * it has no further action. If SIGCONT is held, we
1824 * continue the process and leave the signal in
1825 * siglist. If the process catches SIGCONT, let it
1826 * handle the signal itself. If it isn't waiting on
1827 * an event, it goes back to run state.
1828 * Otherwise, process goes back to sleep state.
1829 */
1830 p->p_flag &= ~P_STOPPED_SIG;
1831 p->p_flag |= P_CONTINUED;
1832 if (action == SIG_DFL) {
1833 SIGDELSET(*siglist, sig);
1834 } else if (action == SIG_CATCH) {
1835 /*
1836 * The process wants to catch it so it needs
1837 * to run at least one thread, but which one?
1838 * It would seem that the answer would be to
1839 * run an upcall in the next KSE to run, and
1840 * deliver the signal that way. In a NON KSE
1841 * process, we need to make sure that the
1842 * single thread is runnable asap.
1843 * XXXKSE for now however, make them all run.
1844 */
1845 goto runfast;
1846 }
1847 /*
1848 * The signal is not ignored or caught.
1849 */
1850 mtx_lock_spin(&sched_lock);
1851 thread_unsuspend(p);
1852 mtx_unlock_spin(&sched_lock);
1853 goto out;
1854 }
1855
1856 if (prop & SA_STOP) {
1857 /*
1858 * Already stopped, don't need to stop again
1859 * (If we did the shell could get confused).
1860 * Just make sure the signal STOP bit set.
1861 */
1862 p->p_flag |= P_STOPPED_SIG;
1863 SIGDELSET(*siglist, sig);
1864 goto out;
1865 }
1866
1867 /*
1868 * All other kinds of signals:
1869 * If a thread is sleeping interruptibly, simulate a
1870 * wakeup so that when it is continued it will be made
1871 * runnable and can look at the signal. However, don't make
1872 * the PROCESS runnable, leave it stopped.
1873 * It may run a bit until it hits a thread_suspend_check().
1874 */
1875 mtx_lock_spin(&sched_lock);
1876 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
1877 sleepq_abort(td);
1878 mtx_unlock_spin(&sched_lock);
1879 goto out;
1880 /*
1881 * Mutexes are short lived. Threads waiting on them will
1882 * hit thread_suspend_check() soon.
1883 */
1884 } else if (p->p_state == PRS_NORMAL) {
1885 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1886 !(prop & SA_STOP)) {
1887 mtx_lock_spin(&sched_lock);
1888 tdsigwakeup(td, sig, action);
1889 mtx_unlock_spin(&sched_lock);
1890 goto out;
1891 }
1892 if (prop & SA_STOP) {
1893 if (p->p_flag & P_PPWAIT)
1894 goto out;
1895 p->p_flag |= P_STOPPED_SIG;
1896 p->p_xstat = sig;
1897 mtx_lock_spin(&sched_lock);
1898 FOREACH_THREAD_IN_PROC(p, td0) {
1899 if (TD_IS_SLEEPING(td0) &&
1900 (td0->td_flags & TDF_SINTR) &&
1901 !TD_IS_SUSPENDED(td0)) {
1902 thread_suspend_one(td0);
1903 } else if (td != td0) {
1904 td0->td_flags |= TDF_ASTPENDING;
1905 }
1906 }
1907 thread_stopped(p);
1908 if (p->p_numthreads == p->p_suspcount) {
1909 SIGDELSET(p->p_siglist, p->p_xstat);
1910 FOREACH_THREAD_IN_PROC(p, td0)
1911 SIGDELSET(td0->td_siglist, p->p_xstat);
1912 }
1913 mtx_unlock_spin(&sched_lock);
1914 goto out;
1915 }
1916 else
1917 goto runfast;
1918 /* NOTREACHED */
1919 } else {
1920 /* Not in "NORMAL" state. discard the signal. */
1921 SIGDELSET(*siglist, sig);
1922 goto out;
1923 }
1924
1925 /*
1926 * The process is not stopped so we need to apply the signal to all the
1927 * running threads.
1928 */
1929
1930runfast:
1931 mtx_lock_spin(&sched_lock);
1932 tdsigwakeup(td, sig, action);
1933 thread_unsuspend(p);
1934 mtx_unlock_spin(&sched_lock);
1935out:
1936 /* If we jump here, sched_lock should not be owned. */
1937 mtx_assert(&sched_lock, MA_NOTOWNED);
1938}
1939
1940/*
1941 * The force of a signal has been directed against a single
1942 * thread. We need to see what we can do about knocking it
1943 * out of any sleep it may be in etc.
1944 */
1945static void
1946tdsigwakeup(struct thread *td, int sig, sig_t action)
1947{
1948 struct proc *p = td->td_proc;
1949 register int prop;
1950
1951 PROC_LOCK_ASSERT(p, MA_OWNED);
1952 mtx_assert(&sched_lock, MA_OWNED);
1953 prop = sigprop(sig);
1954
1955 /*
1956 * Bring the priority of a thread up if we want it to get
1957 * killed in this lifetime.
1958 */
1959 if (action == SIG_DFL && (prop & SA_KILL)) {
1960 if (td->td_priority > PUSER)
1961 td->td_priority = PUSER;
1962 }
1963
1964 if (TD_ON_SLEEPQ(td)) {
1965 /*
1966 * If thread is sleeping uninterruptibly
1967 * we can't interrupt the sleep... the signal will
1968 * be noticed when the process returns through
1969 * trap() or syscall().
1970 */
1971 if ((td->td_flags & TDF_SINTR) == 0)
1972 return;
1973 /*
1974 * Process is sleeping and traced. Make it runnable
1975 * so it can discover the signal in issignal() and stop
1976 * for its parent.
1977 */
1978 if (p->p_flag & P_TRACED) {
1979 p->p_flag &= ~P_STOPPED_TRACE;
1980 } else {
1981 /*
1982 * If SIGCONT is default (or ignored) and process is
1983 * asleep, we are finished; the process should not
1984 * be awakened.
1985 */
1986 if ((prop & SA_CONT) && action == SIG_DFL) {
1987 SIGDELSET(p->p_siglist, sig);
1988 /*
1989 * It may be on either list in this state.
1990 * Remove from both for now.
1991 */
1992 SIGDELSET(td->td_siglist, sig);
1993 return;
1994 }
1995
1996 /*
1997 * Give low priority threads a better chance to run.
1998 */
1999 if (td->td_priority > PUSER)
2000 td->td_priority = PUSER;
2001 }
2002 sleepq_abort(td);
2003 } else {
2004 /*
2005 * Other states do nothing with the signal immediately,
2006 * other than kicking ourselves if we are running.
2007 * It will either never be noticed, or noticed very soon.
2008 */
2009#ifdef SMP
2010 if (TD_IS_RUNNING(td) && td != curthread)
2011 forward_signal(td);
2012#endif
2013 }
2014}
2015
2016void
2017ptracestop(struct thread *td, int sig)
2018{
2019 struct proc *p = td->td_proc;
2020
2021 PROC_LOCK_ASSERT(p, MA_OWNED);
2022 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2023 &p->p_mtx.mtx_object, "Stopping for traced signal");
2024
2025 p->p_xstat = sig;
2026 PROC_LOCK(p->p_pptr);
2027 psignal(p->p_pptr, SIGCHLD);
2028 PROC_UNLOCK(p->p_pptr);
2029 stop(p);
2030 mtx_lock_spin(&sched_lock);
2031 thread_suspend_one(td);
2032 PROC_UNLOCK(p);
2033 DROP_GIANT();
2034 mi_switch(SW_INVOL);
2035 mtx_unlock_spin(&sched_lock);
2036 PICKUP_GIANT();
2037}
2038
2039/*
2040 * If the current process has received a signal (should be caught or cause
2041 * termination, should interrupt current syscall), return the signal number.
2042 * Stop signals with default action are processed immediately, then cleared;
2043 * they aren't returned. This is checked after each entry to the system for
2044 * a syscall or trap (though this can usually be done without calling issignal
2045 * by checking the pending signal masks in cursig.) The normal call
2046 * sequence is
2047 *
2048 * while (sig = cursig(curthread))
2049 * postsig(sig);
2050 */
2051static int
2052issignal(td)
2053 struct thread *td;
2054{
2055 struct proc *p;
2056 struct sigacts *ps;
2057 sigset_t sigpending;
2058 int sig, prop;
2059 struct thread *td0;
2060
2061 p = td->td_proc;
2062 ps = p->p_sigacts;
2063 mtx_assert(&ps->ps_mtx, MA_OWNED);
2064 PROC_LOCK_ASSERT(p, MA_OWNED);
2065 for (;;) {
2066 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2067
2068 sigpending = td->td_siglist;
2069 SIGSETNAND(sigpending, td->td_sigmask);
2070
2071 if (p->p_flag & P_PPWAIT)
2072 SIG_STOPSIGMASK(sigpending);
2073 if (SIGISEMPTY(sigpending)) /* no signal to send */
2074 return (0);
2075 sig = sig_ffs(&sigpending);
2076
2077 if (p->p_stops & S_SIG) {
2078 mtx_unlock(&ps->ps_mtx);
2079 stopevent(p, S_SIG, sig);
2080 mtx_lock(&ps->ps_mtx);
2081 }
2082
2083 /*
2084 * We should see pending but ignored signals
2085 * only if P_TRACED was on when they were posted.
2086 */
2087 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2088 SIGDELSET(td->td_siglist, sig);
2089 continue;
2090 }
2091 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2092 /*
2093 * If traced, always stop.
2094 */
2095 mtx_unlock(&ps->ps_mtx);
2096 ptracestop(td, sig);
2097 PROC_LOCK(p);
2098 mtx_lock(&ps->ps_mtx);
2099
2100 /*
2101 * If parent wants us to take the signal,
2102 * then it will leave it in p->p_xstat;
2103 * otherwise we just look for signals again.
2104 */
2105 SIGDELSET(td->td_siglist, sig); /* clear old signal */
2106 sig = p->p_xstat;
2107 if (sig == 0)
2108 continue;
2109
2110 /*
2111 * If the traced bit got turned off, go back up
2112 * to the top to rescan signals. This ensures
2113 * that p_sig* and p_sigact are consistent.
2114 */
2115 if ((p->p_flag & P_TRACED) == 0)
2116 continue;
2117
2118 /*
2119 * Put the new signal into td_siglist. If the
2120 * signal is being masked, look for other signals.
2121 */
2122 SIGADDSET(td->td_siglist, sig);
2123 if (SIGISMEMBER(td->td_sigmask, sig))
2124 continue;
2125 signotify(td);
2126 }
2127
2128 prop = sigprop(sig);
2129
2130 /*
2131 * Decide whether the signal should be returned.
2132 * Return the signal's number, or fall through
2133 * to clear it from the pending mask.
2134 */
2135 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2136
2137 case (intptr_t)SIG_DFL:
2138 /*
2139 * Don't take default actions on system processes.
2140 */
2141 if (p->p_pid <= 1) {
2142#ifdef DIAGNOSTIC
2143 /*
2144 * Are you sure you want to ignore SIGSEGV
2145 * in init? XXX
2146 */
2147 printf("Process (pid %lu) got signal %d\n",
2148 (u_long)p->p_pid, sig);
2149#endif
2150 break; /* == ignore */
2151 }
2152 /*
2153 * If there is a pending stop signal to process
2154 * with default action, stop here,
2155 * then clear the signal. However,
2156 * if process is member of an orphaned
2157 * process group, ignore tty stop signals.
2158 */
2159 if (prop & SA_STOP) {
2160 if (p->p_flag & P_TRACED ||
2161 (p->p_pgrp->pg_jobc == 0 &&
2162 prop & SA_TTYSTOP))
2163 break; /* == ignore */
2164 mtx_unlock(&ps->ps_mtx);
2165 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2166 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2167 p->p_flag |= P_STOPPED_SIG;
2168 p->p_xstat = sig;
2169 mtx_lock_spin(&sched_lock);
2170 FOREACH_THREAD_IN_PROC(p, td0) {
2171 if (TD_IS_SLEEPING(td0) &&
2172 (td0->td_flags & TDF_SINTR) &&
2173 !TD_IS_SUSPENDED(td0)) {
2174 thread_suspend_one(td0);
2175 } else if (td != td0) {
2176 td0->td_flags |= TDF_ASTPENDING;
2177 }
2178 }
2179 thread_stopped(p);
2180 thread_suspend_one(td);
2181 PROC_UNLOCK(p);
2182 DROP_GIANT();
2183 mi_switch(SW_INVOL);
2184 mtx_unlock_spin(&sched_lock);
2185 PICKUP_GIANT();
2186 PROC_LOCK(p);
2187 mtx_lock(&ps->ps_mtx);
2188 break;
2189 } else if (prop & SA_IGNORE) {
2190 /*
2191 * Except for SIGCONT, shouldn't get here.
2192 * Default action is to ignore; drop it.
2193 */
2194 break; /* == ignore */
2195 } else
2196 return (sig);
2197 /*NOTREACHED*/
2198
2199 case (intptr_t)SIG_IGN:
2200 /*
2201 * Masking above should prevent us ever trying
2202 * to take action on an ignored signal other
2203 * than SIGCONT, unless process is traced.
2204 */
2205 if ((prop & SA_CONT) == 0 &&
2206 (p->p_flag & P_TRACED) == 0)
2207 printf("issignal\n");
2208 break; /* == ignore */
2209
2210 default:
2211 /*
2212 * This signal has an action, let
2213 * postsig() process it.
2214 */
2215 return (sig);
2216 }
2217 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2218 }
2219 /* NOTREACHED */
2220}
2221
2222/*
2223 * Put the argument process into the stopped state and notify the parent
2224 * via wakeup. Signals are handled elsewhere. The process must not be
2225 * on the run queue. Must be called with the proc p locked.
2226 */
2227static void
2228stop(struct proc *p)
2229{
2230
2231 PROC_LOCK_ASSERT(p, MA_OWNED);
2232 p->p_flag |= P_STOPPED_SIG;
2233 p->p_flag &= ~P_WAITED;
2234 wakeup(p->p_pptr);
2235}
2236
2237/*
2238 * MPSAFE
2239 */
2240void
2241thread_stopped(struct proc *p)
2242{
2243 struct proc *p1 = curthread->td_proc;
2244 struct sigacts *ps;
2245 int n;
2246
2247 PROC_LOCK_ASSERT(p, MA_OWNED);
2248 mtx_assert(&sched_lock, MA_OWNED);
2249 n = p->p_suspcount;
2250 if (p == p1)
2251 n++;
2252 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2253 mtx_unlock_spin(&sched_lock);
2254 stop(p);
2255 PROC_LOCK(p->p_pptr);
2256 ps = p->p_pptr->p_sigacts;
2257 mtx_lock(&ps->ps_mtx);
2258 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2259 mtx_unlock(&ps->ps_mtx);
2260 psignal(p->p_pptr, SIGCHLD);
2261 } else
2262 mtx_unlock(&ps->ps_mtx);
2263 PROC_UNLOCK(p->p_pptr);
2264 mtx_lock_spin(&sched_lock);
2265 }
2266}
2267
2268/*
2269 * Take the action for the specified signal
2270 * from the current set of pending signals.
2271 */
2272void
2273postsig(sig)
2274 register int sig;
2275{
2276 struct thread *td = curthread;
2277 register struct proc *p = td->td_proc;
2278 struct sigacts *ps;
2279 sig_t action;
2280 sigset_t returnmask;
2281 int code;
2282
2283 KASSERT(sig != 0, ("postsig"));
2284
2285 PROC_LOCK_ASSERT(p, MA_OWNED);
2286 ps = p->p_sigacts;
2287 mtx_assert(&ps->ps_mtx, MA_OWNED);
2288 SIGDELSET(td->td_siglist, sig);
2289 action = ps->ps_sigact[_SIG_IDX(sig)];
2290#ifdef KTRACE
2291 if (KTRPOINT(td, KTR_PSIG))
2292 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2293 &td->td_oldsigmask : &td->td_sigmask, 0);
2294#endif
2295 if (p->p_stops & S_SIG) {
2296 mtx_unlock(&ps->ps_mtx);
2297 stopevent(p, S_SIG, sig);
2298 mtx_lock(&ps->ps_mtx);
2299 }
2300
2301 if (!(td->td_pflags & TDP_SA && td->td_mailbox) &&
2302 action == SIG_DFL) {
2303 /*
2304 * Default action, where the default is to kill
2305 * the process. (Other cases were ignored above.)
2306 */
2307 mtx_unlock(&ps->ps_mtx);
2308 sigexit(td, sig);
2309 /* NOTREACHED */
2310 } else {
2311 if (td->td_pflags & TDP_SA && td->td_mailbox) {
2312 if (sig == SIGKILL) {
2313 mtx_unlock(&ps->ps_mtx);
2314 sigexit(td, sig);
2315 }
2316 }
2317
2318 /*
2319 * If we get here, the signal must be caught.
2320 */
2321 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2322 ("postsig action"));
2323 /*
2324 * Set the new mask value and also defer further
2325 * occurrences of this signal.
2326 *
2327 * Special case: user has done a sigsuspend. Here the
2328 * current mask is not of interest, but rather the
2329 * mask from before the sigsuspend is what we want
2330 * restored after the signal processing is completed.
2331 */
2332 if (td->td_pflags & TDP_OLDMASK) {
2333 returnmask = td->td_oldsigmask;
2334 td->td_pflags &= ~TDP_OLDMASK;
2335 } else
2336 returnmask = td->td_sigmask;
2337
2338 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2339 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2340 SIGADDSET(td->td_sigmask, sig);
2341
2342 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2343 /*
2344 * See kern_sigaction() for origin of this code.
2345 */
2346 SIGDELSET(ps->ps_sigcatch, sig);
2347 if (sig != SIGCONT &&
2348 sigprop(sig) & SA_IGNORE)
2349 SIGADDSET(ps->ps_sigignore, sig);
2350 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2351 }
2352 p->p_stats->p_ru.ru_nsignals++;
2353 if (p->p_sig != sig) {
2354 code = 0;
2355 } else {
2356 code = p->p_code;
2357 p->p_code = 0;
2358 p->p_sig = 0;
2359 }
2360 if (td->td_pflags & TDP_SA && td->td_mailbox)
2361 thread_signal_add(curthread, sig);
2362 else
2363 (*p->p_sysent->sv_sendsig)(action, sig,
2364 &returnmask, code);
2365 }
2366}
2367
2368/*
2369 * Kill the current process for stated reason.
2370 */
2371void
2372killproc(p, why)
2373 struct proc *p;
2374 char *why;
2375{
2376
2377 PROC_LOCK_ASSERT(p, MA_OWNED);
2378 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2379 p, p->p_pid, p->p_comm);
2380 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2381 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2382 psignal(p, SIGKILL);
2383}
2384
2385/*
2386 * Force the current process to exit with the specified signal, dumping core
2387 * if appropriate. We bypass the normal tests for masked and caught signals,
2388 * allowing unrecoverable failures to terminate the process without changing
2389 * signal state. Mark the accounting record with the signal termination.
2390 * If dumping core, save the signal number for the debugger. Calls exit and
2391 * does not return.
2392 *
2393 * MPSAFE
2394 */
2395void
2396sigexit(td, sig)
2397 struct thread *td;
2398 int sig;
2399{
2400 struct proc *p = td->td_proc;
2401
2402 PROC_LOCK_ASSERT(p, MA_OWNED);
2403 p->p_acflag |= AXSIG;
2404 if (sigprop(sig) & SA_CORE) {
2405 p->p_sig = sig;
2406 /*
2407 * Log signals which would cause core dumps
2408 * (Log as LOG_INFO to appease those who don't want
2409 * these messages.)
2410 * XXX : Todo, as well as euid, write out ruid too
2411 * Note that coredump() drops proc lock.
2412 */
2413 if (coredump(td) == 0)
2414 sig |= WCOREFLAG;
2415 if (kern_logsigexit)
2416 log(LOG_INFO,
2417 "pid %d (%s), uid %d: exited on signal %d%s\n",
2418 p->p_pid, p->p_comm,
2419 td->td_ucred ? td->td_ucred->cr_uid : -1,
2420 sig &~ WCOREFLAG,
2421 sig & WCOREFLAG ? " (core dumped)" : "");
2422 } else
2423 PROC_UNLOCK(p);
2424 exit1(td, W_EXITCODE(0, sig));
2425 /* NOTREACHED */
2426}
2427
2428static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2429SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2430 sizeof(corefilename), "process corefile name format string");
2431
2432/*
2433 * expand_name(name, uid, pid)
2434 * Expand the name described in corefilename, using name, uid, and pid.
2435 * corefilename is a printf-like string, with three format specifiers:
2436 * %N name of process ("name")
2437 * %P process id (pid)
2438 * %U user id (uid)
2439 * For example, "%N.core" is the default; they can be disabled completely
2440 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2441 * This is controlled by the sysctl variable kern.corefile (see above).
2442 */
2443
2444static char *
2445expand_name(name, uid, pid)
2446 const char *name;
2447 uid_t uid;
2448 pid_t pid;
2449{
2450 const char *format, *appendstr;
2451 char *temp;
2452 char buf[11]; /* Buffer for pid/uid -- max 4B */
2453 size_t i, l, n;
2454
2455 format = corefilename;
2456 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2457 if (temp == NULL)
2458 return (NULL);
2459 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2460 switch (format[i]) {
2461 case '%': /* Format character */
2462 i++;
2463 switch (format[i]) {
2464 case '%':
2465 appendstr = "%";
2466 break;
2467 case 'N': /* process name */
2468 appendstr = name;
2469 break;
2470 case 'P': /* process id */
2471 sprintf(buf, "%u", pid);
2472 appendstr = buf;
2473 break;
2474 case 'U': /* user id */
2475 sprintf(buf, "%u", uid);
2476 appendstr = buf;
2477 break;
2478 default:
2479 appendstr = "";
2480 log(LOG_ERR,
2481 "Unknown format character %c in `%s'\n",
2482 format[i], format);
2483 }
2484 l = strlen(appendstr);
2485 if ((n + l) >= MAXPATHLEN)
2486 goto toolong;
2487 memcpy(temp + n, appendstr, l);
2488 n += l;
2489 break;
2490 default:
2491 temp[n++] = format[i];
2492 }
2493 }
2494 if (format[i] != '\0')
2495 goto toolong;
2496 return (temp);
2497toolong:
2498 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2499 (long)pid, name, (u_long)uid);
2500 free(temp, M_TEMP);
2501 return (NULL);
2502}
2503
2504/*
2505 * Dump a process' core. The main routine does some
2506 * policy checking, and creates the name of the coredump;
2507 * then it passes on a vnode and a size limit to the process-specific
2508 * coredump routine if there is one; if there _is not_ one, it returns
2509 * ENOSYS; otherwise it returns the error from the process-specific routine.
2510 */
2511
2512static int
2513coredump(struct thread *td)
2514{
2515 struct proc *p = td->td_proc;
2516 register struct vnode *vp;
2517 register struct ucred *cred = td->td_ucred;
2518 struct flock lf;
2519 struct nameidata nd;
2520 struct vattr vattr;
2521 int error, error1, flags, locked;
2522 struct mount *mp;
2523 char *name; /* name of corefile */
2524 off_t limit;
2525
2526 PROC_LOCK_ASSERT(p, MA_OWNED);
2527 _STOPEVENT(p, S_CORE, 0);
2528
2529 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2530 PROC_UNLOCK(p);
2531 return (EFAULT);
2532 }
2533
2534 /*
2535 * Note that the bulk of limit checking is done after
2536 * the corefile is created. The exception is if the limit
2537 * for corefiles is 0, in which case we don't bother
2538 * creating the corefile at all. This layout means that
2539 * a corefile is truncated instead of not being created,
2540 * if it is larger than the limit.
2541 */
2542 limit = (off_t)lim_cur(p, RLIMIT_CORE);
2543 PROC_UNLOCK(p);
2544 if (limit == 0)
2545 return (EFBIG);
2546
2547 mtx_lock(&Giant);
2548restart:
2549 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2550 if (name == NULL) {
2551 mtx_unlock(&Giant);
2552 return (EINVAL);
2553 }
2554 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2555 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2556 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1);
2557 free(name, M_TEMP);
2558 if (error) {
2559 mtx_unlock(&Giant);
2560 return (error);
2561 }
2562 NDFREE(&nd, NDF_ONLY_PNBUF);
2563 vp = nd.ni_vp;
2564
2565 /* Don't dump to non-regular files or files with links. */
2566 if (vp->v_type != VREG ||
2567 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2568 VOP_UNLOCK(vp, 0, td);
2569 error = EFAULT;
2570 goto out;
2571 }
2572
2573 VOP_UNLOCK(vp, 0, td);
2574 lf.l_whence = SEEK_SET;
2575 lf.l_start = 0;
2576 lf.l_len = 0;
2577 lf.l_type = F_WRLCK;
2578 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
2579
2580 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2581 lf.l_type = F_UNLCK;
2582 if (locked)
2583 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2584 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2585 return (error);
2586 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2587 return (error);
2588 goto restart;
2589 }
2590
2591 VATTR_NULL(&vattr);
2592 vattr.va_size = 0;
2593 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2594 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2595 VOP_SETATTR(vp, &vattr, cred, td);
2596 VOP_UNLOCK(vp, 0, td);
2597 PROC_LOCK(p);
2598 p->p_acflag |= ACORE;
2599 PROC_UNLOCK(p);
2600
2601 error = p->p_sysent->sv_coredump ?
2602 p->p_sysent->sv_coredump(td, vp, limit) :
2603 ENOSYS;
2604
2605 if (locked) {
2606 lf.l_type = F_UNLCK;
2607 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2608 }
2609 vn_finished_write(mp);
2610out:
2611 error1 = vn_close(vp, FWRITE, cred, td);
2612 mtx_unlock(&Giant);
2613 if (error == 0)
2614 error = error1;
2615 return (error);
2616}
2617
2618/*
2619 * Nonexistent system call-- signal process (may want to handle it).
2620 * Flag error in case process won't see signal immediately (blocked or ignored).
2621 */
2622#ifndef _SYS_SYSPROTO_H_
2623struct nosys_args {
2624 int dummy;
2625};
2626#endif
2627/*
2628 * MPSAFE
2629 */
2630/* ARGSUSED */
2631int
2632nosys(td, args)
2633 struct thread *td;
2634 struct nosys_args *args;
2635{
2636 struct proc *p = td->td_proc;
2637
2638 PROC_LOCK(p);
2639 psignal(p, SIGSYS);
2640 PROC_UNLOCK(p);
2641 return (ENOSYS);
2642}
2643
2644/*
2645 * Send a SIGIO or SIGURG signal to a process or process group using
2646 * stored credentials rather than those of the current process.
2647 */
2648void
2649pgsigio(sigiop, sig, checkctty)
2650 struct sigio **sigiop;
2651 int sig, checkctty;
2652{
2653 struct sigio *sigio;
2654
2655 SIGIO_LOCK();
2656 sigio = *sigiop;
2657 if (sigio == NULL) {
2658 SIGIO_UNLOCK();
2659 return;
2660 }
2661 if (sigio->sio_pgid > 0) {
2662 PROC_LOCK(sigio->sio_proc);
2663 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2664 psignal(sigio->sio_proc, sig);
2665 PROC_UNLOCK(sigio->sio_proc);
2666 } else if (sigio->sio_pgid < 0) {
2667 struct proc *p;
2668
2669 PGRP_LOCK(sigio->sio_pgrp);
2670 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2671 PROC_LOCK(p);
2672 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2673 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2674 psignal(p, sig);
2675 PROC_UNLOCK(p);
2676 }
2677 PGRP_UNLOCK(sigio->sio_pgrp);
2678 }
2679 SIGIO_UNLOCK();
2680}
2681
2682static int
2683filt_sigattach(struct knote *kn)
2684{
2685 struct proc *p = curproc;
2686
2687 kn->kn_ptr.p_proc = p;
2688 kn->kn_flags |= EV_CLEAR; /* automatically set */
2689
2690 PROC_LOCK(p);
2691 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2692 PROC_UNLOCK(p);
2693
2694 return (0);
2695}
2696
2697static void
2698filt_sigdetach(struct knote *kn)
2699{
2700 struct proc *p = kn->kn_ptr.p_proc;
2701
2702 PROC_LOCK(p);
2703 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2704 PROC_UNLOCK(p);
2705}
2706
2707/*
2708 * signal knotes are shared with proc knotes, so we apply a mask to
2709 * the hint in order to differentiate them from process hints. This
2710 * could be avoided by using a signal-specific knote list, but probably
2711 * isn't worth the trouble.
2712 */
2713static int
2714filt_signal(struct knote *kn, long hint)
2715{
2716
2717 if (hint & NOTE_SIGNAL) {
2718 hint &= ~NOTE_SIGNAL;
2719
2720 if (kn->kn_id == hint)
2721 kn->kn_data++;
2722 }
2723 return (kn->kn_data != 0);
2724}
2725
2726struct sigacts *
2727sigacts_alloc(void)
2728{
2729 struct sigacts *ps;
2730
2731 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2732 ps->ps_refcnt = 1;
2733 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2734 return (ps);
2735}
2736
2737void
2738sigacts_free(struct sigacts *ps)
2739{
2740
2741 mtx_lock(&ps->ps_mtx);
2742 ps->ps_refcnt--;
2743 if (ps->ps_refcnt == 0) {
2744 mtx_destroy(&ps->ps_mtx);
2745 free(ps, M_SUBPROC);
2746 } else
2747 mtx_unlock(&ps->ps_mtx);
2748}
2749
2750struct sigacts *
2751sigacts_hold(struct sigacts *ps)
2752{
2753 mtx_lock(&ps->ps_mtx);
2754 ps->ps_refcnt++;
2755 mtx_unlock(&ps->ps_mtx);
2756 return (ps);
2757}
2758
2759void
2760sigacts_copy(struct sigacts *dest, struct sigacts *src)
2761{
2762
2763 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2764 mtx_lock(&src->ps_mtx);
2765 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2766 mtx_unlock(&src->ps_mtx);
2767}
2768
2769int
2770sigacts_shared(struct sigacts *ps)
2771{
2772 int shared;
2773
2774 mtx_lock(&ps->ps_mtx);
2775 shared = ps->ps_refcnt > 1;
2776 mtx_unlock(&ps->ps_mtx);
2777 return (shared);
2778}
1429
1430/*
1431 * Send a signal to a process group.
1432 */
1433void
1434gsignal(pgid, sig)
1435 int pgid, sig;
1436{
1437 struct pgrp *pgrp;
1438
1439 if (pgid != 0) {
1440 sx_slock(&proctree_lock);
1441 pgrp = pgfind(pgid);
1442 sx_sunlock(&proctree_lock);
1443 if (pgrp != NULL) {
1444 pgsignal(pgrp, sig, 0);
1445 PGRP_UNLOCK(pgrp);
1446 }
1447 }
1448}
1449
1450/*
1451 * Send a signal to a process group. If checktty is 1,
1452 * limit to members which have a controlling terminal.
1453 */
1454void
1455pgsignal(pgrp, sig, checkctty)
1456 struct pgrp *pgrp;
1457 int sig, checkctty;
1458{
1459 register struct proc *p;
1460
1461 if (pgrp) {
1462 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1463 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1464 PROC_LOCK(p);
1465 if (checkctty == 0 || p->p_flag & P_CONTROLT)
1466 psignal(p, sig);
1467 PROC_UNLOCK(p);
1468 }
1469 }
1470}
1471
1472/*
1473 * Send a signal caused by a trap to the current thread.
1474 * If it will be caught immediately, deliver it with correct code.
1475 * Otherwise, post it normally.
1476 *
1477 * MPSAFE
1478 */
1479void
1480trapsignal(struct thread *td, int sig, u_long code)
1481{
1482 struct sigacts *ps;
1483 struct proc *p;
1484 siginfo_t siginfo;
1485 int error;
1486
1487 p = td->td_proc;
1488 if (td->td_pflags & TDP_SA) {
1489 if (td->td_mailbox == NULL)
1490 thread_user_enter(p, td);
1491 PROC_LOCK(p);
1492 if (td->td_mailbox) {
1493 SIGDELSET(td->td_sigmask, sig);
1494 mtx_lock_spin(&sched_lock);
1495 /*
1496 * Force scheduling an upcall, so UTS has chance to
1497 * process the signal before thread runs again in
1498 * userland.
1499 */
1500 if (td->td_upcall)
1501 td->td_upcall->ku_flags |= KUF_DOUPCALL;
1502 mtx_unlock_spin(&sched_lock);
1503 } else {
1504 /* UTS caused a sync signal */
1505 p->p_code = code; /* XXX for core dump/debugger */
1506 p->p_sig = sig; /* XXX to verify code */
1507 sigexit(td, sig);
1508 }
1509 } else {
1510 PROC_LOCK(p);
1511 }
1512 ps = p->p_sigacts;
1513 mtx_lock(&ps->ps_mtx);
1514 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1515 !SIGISMEMBER(td->td_sigmask, sig)) {
1516 p->p_stats->p_ru.ru_nsignals++;
1517#ifdef KTRACE
1518 if (KTRPOINT(curthread, KTR_PSIG))
1519 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1520 &td->td_sigmask, code);
1521#endif
1522 if (!(td->td_pflags & TDP_SA))
1523 (*p->p_sysent->sv_sendsig)(
1524 ps->ps_sigact[_SIG_IDX(sig)], sig,
1525 &td->td_sigmask, code);
1526 else {
1527 cpu_thread_siginfo(sig, code, &siginfo);
1528 mtx_unlock(&ps->ps_mtx);
1529 PROC_UNLOCK(p);
1530 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig,
1531 sizeof(siginfo));
1532 PROC_LOCK(p);
1533 /* UTS memory corrupted */
1534 if (error)
1535 sigexit(td, SIGILL);
1536 SIGADDSET(td->td_sigmask, sig);
1537 mtx_lock(&ps->ps_mtx);
1538 }
1539 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1540 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1541 SIGADDSET(td->td_sigmask, sig);
1542 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1543 /*
1544 * See kern_sigaction() for origin of this code.
1545 */
1546 SIGDELSET(ps->ps_sigcatch, sig);
1547 if (sig != SIGCONT &&
1548 sigprop(sig) & SA_IGNORE)
1549 SIGADDSET(ps->ps_sigignore, sig);
1550 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1551 }
1552 mtx_unlock(&ps->ps_mtx);
1553 } else {
1554 mtx_unlock(&ps->ps_mtx);
1555 p->p_code = code; /* XXX for core dump/debugger */
1556 p->p_sig = sig; /* XXX to verify code */
1557 tdsignal(td, sig, SIGTARGET_TD);
1558 }
1559 PROC_UNLOCK(p);
1560}
1561
1562static struct thread *
1563sigtd(struct proc *p, int sig, int prop)
1564{
1565 struct thread *td, *signal_td;
1566
1567 PROC_LOCK_ASSERT(p, MA_OWNED);
1568
1569 /*
1570 * First find a thread in sigwait state and signal belongs to
1571 * its wait set. POSIX's arguments is that speed of delivering signal
1572 * to sigwait thread is faster than delivering signal to user stack.
1573 * If we can not find sigwait thread, then find the first thread in
1574 * the proc that doesn't have this signal masked, an exception is
1575 * if current thread is sending signal to its process, and it does not
1576 * mask the signal, it should get the signal, this is another fast
1577 * way to deliver signal.
1578 */
1579 signal_td = NULL;
1580 mtx_lock_spin(&sched_lock);
1581 FOREACH_THREAD_IN_PROC(p, td) {
1582 if (td->td_waitset != NULL &&
1583 SIGISMEMBER(*(td->td_waitset), sig)) {
1584 mtx_unlock_spin(&sched_lock);
1585 return (td);
1586 }
1587 if (!SIGISMEMBER(td->td_sigmask, sig)) {
1588 if (td == curthread)
1589 signal_td = curthread;
1590 else if (signal_td == NULL)
1591 signal_td = td;
1592 }
1593 }
1594 if (signal_td == NULL)
1595 signal_td = FIRST_THREAD_IN_PROC(p);
1596 mtx_unlock_spin(&sched_lock);
1597 return (signal_td);
1598}
1599
1600/*
1601 * Send the signal to the process. If the signal has an action, the action
1602 * is usually performed by the target process rather than the caller; we add
1603 * the signal to the set of pending signals for the process.
1604 *
1605 * Exceptions:
1606 * o When a stop signal is sent to a sleeping process that takes the
1607 * default action, the process is stopped without awakening it.
1608 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1609 * regardless of the signal action (eg, blocked or ignored).
1610 *
1611 * Other ignored signals are discarded immediately.
1612 *
1613 * MPSAFE
1614 */
1615void
1616psignal(struct proc *p, int sig)
1617{
1618 struct thread *td;
1619 int prop;
1620
1621 if (!_SIG_VALID(sig))
1622 panic("psignal(): invalid signal");
1623
1624 PROC_LOCK_ASSERT(p, MA_OWNED);
1625 prop = sigprop(sig);
1626
1627 /*
1628 * Find a thread to deliver the signal to.
1629 */
1630 td = sigtd(p, sig, prop);
1631
1632 tdsignal(td, sig, SIGTARGET_P);
1633}
1634
1635/*
1636 * MPSAFE
1637 */
1638void
1639tdsignal(struct thread *td, int sig, sigtarget_t target)
1640{
1641 sigset_t saved;
1642 struct proc *p = td->td_proc;
1643
1644 if (p->p_flag & P_SA)
1645 saved = p->p_siglist;
1646 do_tdsignal(td, sig, target);
1647 if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
1648 if (SIGSETEQ(saved, p->p_siglist))
1649 return;
1650 else {
1651 /* pending set changed */
1652 p->p_flag |= P_SIGEVENT;
1653 wakeup(&p->p_siglist);
1654 }
1655 }
1656}
1657
1658static void
1659do_tdsignal(struct thread *td, int sig, sigtarget_t target)
1660{
1661 struct proc *p;
1662 register sig_t action;
1663 sigset_t *siglist;
1664 struct thread *td0;
1665 register int prop;
1666 struct sigacts *ps;
1667
1668 if (!_SIG_VALID(sig))
1669 panic("do_tdsignal(): invalid signal");
1670
1671 p = td->td_proc;
1672 ps = p->p_sigacts;
1673
1674 PROC_LOCK_ASSERT(p, MA_OWNED);
1675 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1676
1677 prop = sigprop(sig);
1678
1679 /*
1680 * If the signal is blocked and not destined for this thread, then
1681 * assign it to the process so that we can find it later in the first
1682 * thread that unblocks it. Otherwise, assign it to this thread now.
1683 */
1684 if (target == SIGTARGET_TD) {
1685 siglist = &td->td_siglist;
1686 } else {
1687 if (!SIGISMEMBER(td->td_sigmask, sig))
1688 siglist = &td->td_siglist;
1689 else if (td->td_waitset != NULL &&
1690 SIGISMEMBER(*(td->td_waitset), sig))
1691 siglist = &td->td_siglist;
1692 else
1693 siglist = &p->p_siglist;
1694 }
1695
1696 /*
1697 * If proc is traced, always give parent a chance;
1698 * if signal event is tracked by procfs, give *that*
1699 * a chance, as well.
1700 */
1701 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
1702 action = SIG_DFL;
1703 } else {
1704 /*
1705 * If the signal is being ignored,
1706 * then we forget about it immediately.
1707 * (Note: we don't set SIGCONT in ps_sigignore,
1708 * and if it is set to SIG_IGN,
1709 * action will be SIG_DFL here.)
1710 */
1711 mtx_lock(&ps->ps_mtx);
1712 if (SIGISMEMBER(ps->ps_sigignore, sig) ||
1713 (p->p_flag & P_WEXIT)) {
1714 mtx_unlock(&ps->ps_mtx);
1715 return;
1716 }
1717 if (((td->td_waitset == NULL) &&
1718 SIGISMEMBER(td->td_sigmask, sig)) ||
1719 ((td->td_waitset != NULL) &&
1720 SIGISMEMBER(td->td_sigmask, sig) &&
1721 !SIGISMEMBER(*(td->td_waitset), sig)))
1722 action = SIG_HOLD;
1723 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
1724 action = SIG_CATCH;
1725 else
1726 action = SIG_DFL;
1727 mtx_unlock(&ps->ps_mtx);
1728 }
1729
1730 if (prop & SA_CONT) {
1731 SIG_STOPSIGMASK(p->p_siglist);
1732 /*
1733 * XXX Should investigate leaving STOP and CONT sigs only in
1734 * the proc's siglist.
1735 */
1736 mtx_lock_spin(&sched_lock);
1737 FOREACH_THREAD_IN_PROC(p, td0)
1738 SIG_STOPSIGMASK(td0->td_siglist);
1739 mtx_unlock_spin(&sched_lock);
1740 }
1741
1742 if (prop & SA_STOP) {
1743 /*
1744 * If sending a tty stop signal to a member of an orphaned
1745 * process group, discard the signal here if the action
1746 * is default; don't stop the process below if sleeping,
1747 * and don't clear any pending SIGCONT.
1748 */
1749 if ((prop & SA_TTYSTOP) &&
1750 (p->p_pgrp->pg_jobc == 0) &&
1751 (action == SIG_DFL))
1752 return;
1753 SIG_CONTSIGMASK(p->p_siglist);
1754 mtx_lock_spin(&sched_lock);
1755 FOREACH_THREAD_IN_PROC(p, td0)
1756 SIG_CONTSIGMASK(td0->td_siglist);
1757 mtx_unlock_spin(&sched_lock);
1758 p->p_flag &= ~P_CONTINUED;
1759 }
1760
1761 SIGADDSET(*siglist, sig);
1762 signotify(td); /* uses schedlock */
1763 if (siglist == &td->td_siglist && (td->td_waitset != NULL) &&
1764 action != SIG_HOLD) {
1765 td->td_waitset = NULL;
1766 }
1767
1768 /*
1769 * Defer further processing for signals which are held,
1770 * except that stopped processes must be continued by SIGCONT.
1771 */
1772 if (action == SIG_HOLD &&
1773 !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
1774 return;
1775 /*
1776 * Some signals have a process-wide effect and a per-thread
1777 * component. Most processing occurs when the process next
1778 * tries to cross the user boundary, however there are some
1779 * times when processing needs to be done immediatly, such as
1780 * waking up threads so that they can cross the user boundary.
1781 * We try do the per-process part here.
1782 */
1783 if (P_SHOULDSTOP(p)) {
1784 /*
1785 * The process is in stopped mode. All the threads should be
1786 * either winding down or already on the suspended queue.
1787 */
1788 if (p->p_flag & P_TRACED) {
1789 /*
1790 * The traced process is already stopped,
1791 * so no further action is necessary.
1792 * No signal can restart us.
1793 */
1794 goto out;
1795 }
1796
1797 if (sig == SIGKILL) {
1798 /*
1799 * SIGKILL sets process running.
1800 * It will die elsewhere.
1801 * All threads must be restarted.
1802 */
1803 p->p_flag &= ~P_STOPPED;
1804 goto runfast;
1805 }
1806
1807 if (prop & SA_CONT) {
1808 /*
1809 * If SIGCONT is default (or ignored), we continue the
1810 * process but don't leave the signal in siglist as
1811 * it has no further action. If SIGCONT is held, we
1812 * continue the process and leave the signal in
1813 * siglist. If the process catches SIGCONT, let it
1814 * handle the signal itself. If it isn't waiting on
1815 * an event, it goes back to run state.
1816 * Otherwise, process goes back to sleep state.
1817 */
1818 p->p_flag &= ~P_STOPPED_SIG;
1819 p->p_flag |= P_CONTINUED;
1820 if (action == SIG_DFL) {
1821 SIGDELSET(*siglist, sig);
1822 } else if (action == SIG_CATCH) {
1823 /*
1824 * The process wants to catch it so it needs
1825 * to run at least one thread, but which one?
1826 * It would seem that the answer would be to
1827 * run an upcall in the next KSE to run, and
1828 * deliver the signal that way. In a NON KSE
1829 * process, we need to make sure that the
1830 * single thread is runnable asap.
1831 * XXXKSE for now however, make them all run.
1832 */
1833 goto runfast;
1834 }
1835 /*
1836 * The signal is not ignored or caught.
1837 */
1838 mtx_lock_spin(&sched_lock);
1839 thread_unsuspend(p);
1840 mtx_unlock_spin(&sched_lock);
1841 goto out;
1842 }
1843
1844 if (prop & SA_STOP) {
1845 /*
1846 * Already stopped, don't need to stop again
1847 * (If we did the shell could get confused).
1848 * Just make sure the signal STOP bit set.
1849 */
1850 p->p_flag |= P_STOPPED_SIG;
1851 SIGDELSET(*siglist, sig);
1852 goto out;
1853 }
1854
1855 /*
1856 * All other kinds of signals:
1857 * If a thread is sleeping interruptibly, simulate a
1858 * wakeup so that when it is continued it will be made
1859 * runnable and can look at the signal. However, don't make
1860 * the PROCESS runnable, leave it stopped.
1861 * It may run a bit until it hits a thread_suspend_check().
1862 */
1863 mtx_lock_spin(&sched_lock);
1864 if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
1865 sleepq_abort(td);
1866 mtx_unlock_spin(&sched_lock);
1867 goto out;
1868 /*
1869 * Mutexes are short lived. Threads waiting on them will
1870 * hit thread_suspend_check() soon.
1871 */
1872 } else if (p->p_state == PRS_NORMAL) {
1873 if ((p->p_flag & P_TRACED) || (action != SIG_DFL) ||
1874 !(prop & SA_STOP)) {
1875 mtx_lock_spin(&sched_lock);
1876 tdsigwakeup(td, sig, action);
1877 mtx_unlock_spin(&sched_lock);
1878 goto out;
1879 }
1880 if (prop & SA_STOP) {
1881 if (p->p_flag & P_PPWAIT)
1882 goto out;
1883 p->p_flag |= P_STOPPED_SIG;
1884 p->p_xstat = sig;
1885 mtx_lock_spin(&sched_lock);
1886 FOREACH_THREAD_IN_PROC(p, td0) {
1887 if (TD_IS_SLEEPING(td0) &&
1888 (td0->td_flags & TDF_SINTR) &&
1889 !TD_IS_SUSPENDED(td0)) {
1890 thread_suspend_one(td0);
1891 } else if (td != td0) {
1892 td0->td_flags |= TDF_ASTPENDING;
1893 }
1894 }
1895 thread_stopped(p);
1896 if (p->p_numthreads == p->p_suspcount) {
1897 SIGDELSET(p->p_siglist, p->p_xstat);
1898 FOREACH_THREAD_IN_PROC(p, td0)
1899 SIGDELSET(td0->td_siglist, p->p_xstat);
1900 }
1901 mtx_unlock_spin(&sched_lock);
1902 goto out;
1903 }
1904 else
1905 goto runfast;
1906 /* NOTREACHED */
1907 } else {
1908 /* Not in "NORMAL" state. discard the signal. */
1909 SIGDELSET(*siglist, sig);
1910 goto out;
1911 }
1912
1913 /*
1914 * The process is not stopped so we need to apply the signal to all the
1915 * running threads.
1916 */
1917
1918runfast:
1919 mtx_lock_spin(&sched_lock);
1920 tdsigwakeup(td, sig, action);
1921 thread_unsuspend(p);
1922 mtx_unlock_spin(&sched_lock);
1923out:
1924 /* If we jump here, sched_lock should not be owned. */
1925 mtx_assert(&sched_lock, MA_NOTOWNED);
1926}
1927
1928/*
1929 * The force of a signal has been directed against a single
1930 * thread. We need to see what we can do about knocking it
1931 * out of any sleep it may be in etc.
1932 */
1933static void
1934tdsigwakeup(struct thread *td, int sig, sig_t action)
1935{
1936 struct proc *p = td->td_proc;
1937 register int prop;
1938
1939 PROC_LOCK_ASSERT(p, MA_OWNED);
1940 mtx_assert(&sched_lock, MA_OWNED);
1941 prop = sigprop(sig);
1942
1943 /*
1944 * Bring the priority of a thread up if we want it to get
1945 * killed in this lifetime.
1946 */
1947 if (action == SIG_DFL && (prop & SA_KILL)) {
1948 if (td->td_priority > PUSER)
1949 td->td_priority = PUSER;
1950 }
1951
1952 if (TD_ON_SLEEPQ(td)) {
1953 /*
1954 * If thread is sleeping uninterruptibly
1955 * we can't interrupt the sleep... the signal will
1956 * be noticed when the process returns through
1957 * trap() or syscall().
1958 */
1959 if ((td->td_flags & TDF_SINTR) == 0)
1960 return;
1961 /*
1962 * Process is sleeping and traced. Make it runnable
1963 * so it can discover the signal in issignal() and stop
1964 * for its parent.
1965 */
1966 if (p->p_flag & P_TRACED) {
1967 p->p_flag &= ~P_STOPPED_TRACE;
1968 } else {
1969 /*
1970 * If SIGCONT is default (or ignored) and process is
1971 * asleep, we are finished; the process should not
1972 * be awakened.
1973 */
1974 if ((prop & SA_CONT) && action == SIG_DFL) {
1975 SIGDELSET(p->p_siglist, sig);
1976 /*
1977 * It may be on either list in this state.
1978 * Remove from both for now.
1979 */
1980 SIGDELSET(td->td_siglist, sig);
1981 return;
1982 }
1983
1984 /*
1985 * Give low priority threads a better chance to run.
1986 */
1987 if (td->td_priority > PUSER)
1988 td->td_priority = PUSER;
1989 }
1990 sleepq_abort(td);
1991 } else {
1992 /*
1993 * Other states do nothing with the signal immediately,
1994 * other than kicking ourselves if we are running.
1995 * It will either never be noticed, or noticed very soon.
1996 */
1997#ifdef SMP
1998 if (TD_IS_RUNNING(td) && td != curthread)
1999 forward_signal(td);
2000#endif
2001 }
2002}
2003
2004void
2005ptracestop(struct thread *td, int sig)
2006{
2007 struct proc *p = td->td_proc;
2008
2009 PROC_LOCK_ASSERT(p, MA_OWNED);
2010 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2011 &p->p_mtx.mtx_object, "Stopping for traced signal");
2012
2013 p->p_xstat = sig;
2014 PROC_LOCK(p->p_pptr);
2015 psignal(p->p_pptr, SIGCHLD);
2016 PROC_UNLOCK(p->p_pptr);
2017 stop(p);
2018 mtx_lock_spin(&sched_lock);
2019 thread_suspend_one(td);
2020 PROC_UNLOCK(p);
2021 DROP_GIANT();
2022 mi_switch(SW_INVOL);
2023 mtx_unlock_spin(&sched_lock);
2024 PICKUP_GIANT();
2025}
2026
2027/*
2028 * If the current process has received a signal (should be caught or cause
2029 * termination, should interrupt current syscall), return the signal number.
2030 * Stop signals with default action are processed immediately, then cleared;
2031 * they aren't returned. This is checked after each entry to the system for
2032 * a syscall or trap (though this can usually be done without calling issignal
2033 * by checking the pending signal masks in cursig.) The normal call
2034 * sequence is
2035 *
2036 * while (sig = cursig(curthread))
2037 * postsig(sig);
2038 */
2039static int
2040issignal(td)
2041 struct thread *td;
2042{
2043 struct proc *p;
2044 struct sigacts *ps;
2045 sigset_t sigpending;
2046 int sig, prop;
2047 struct thread *td0;
2048
2049 p = td->td_proc;
2050 ps = p->p_sigacts;
2051 mtx_assert(&ps->ps_mtx, MA_OWNED);
2052 PROC_LOCK_ASSERT(p, MA_OWNED);
2053 for (;;) {
2054 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2055
2056 sigpending = td->td_siglist;
2057 SIGSETNAND(sigpending, td->td_sigmask);
2058
2059 if (p->p_flag & P_PPWAIT)
2060 SIG_STOPSIGMASK(sigpending);
2061 if (SIGISEMPTY(sigpending)) /* no signal to send */
2062 return (0);
2063 sig = sig_ffs(&sigpending);
2064
2065 if (p->p_stops & S_SIG) {
2066 mtx_unlock(&ps->ps_mtx);
2067 stopevent(p, S_SIG, sig);
2068 mtx_lock(&ps->ps_mtx);
2069 }
2070
2071 /*
2072 * We should see pending but ignored signals
2073 * only if P_TRACED was on when they were posted.
2074 */
2075 if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2076 SIGDELSET(td->td_siglist, sig);
2077 continue;
2078 }
2079 if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
2080 /*
2081 * If traced, always stop.
2082 */
2083 mtx_unlock(&ps->ps_mtx);
2084 ptracestop(td, sig);
2085 PROC_LOCK(p);
2086 mtx_lock(&ps->ps_mtx);
2087
2088 /*
2089 * If parent wants us to take the signal,
2090 * then it will leave it in p->p_xstat;
2091 * otherwise we just look for signals again.
2092 */
2093 SIGDELSET(td->td_siglist, sig); /* clear old signal */
2094 sig = p->p_xstat;
2095 if (sig == 0)
2096 continue;
2097
2098 /*
2099 * If the traced bit got turned off, go back up
2100 * to the top to rescan signals. This ensures
2101 * that p_sig* and p_sigact are consistent.
2102 */
2103 if ((p->p_flag & P_TRACED) == 0)
2104 continue;
2105
2106 /*
2107 * Put the new signal into td_siglist. If the
2108 * signal is being masked, look for other signals.
2109 */
2110 SIGADDSET(td->td_siglist, sig);
2111 if (SIGISMEMBER(td->td_sigmask, sig))
2112 continue;
2113 signotify(td);
2114 }
2115
2116 prop = sigprop(sig);
2117
2118 /*
2119 * Decide whether the signal should be returned.
2120 * Return the signal's number, or fall through
2121 * to clear it from the pending mask.
2122 */
2123 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2124
2125 case (intptr_t)SIG_DFL:
2126 /*
2127 * Don't take default actions on system processes.
2128 */
2129 if (p->p_pid <= 1) {
2130#ifdef DIAGNOSTIC
2131 /*
2132 * Are you sure you want to ignore SIGSEGV
2133 * in init? XXX
2134 */
2135 printf("Process (pid %lu) got signal %d\n",
2136 (u_long)p->p_pid, sig);
2137#endif
2138 break; /* == ignore */
2139 }
2140 /*
2141 * If there is a pending stop signal to process
2142 * with default action, stop here,
2143 * then clear the signal. However,
2144 * if process is member of an orphaned
2145 * process group, ignore tty stop signals.
2146 */
2147 if (prop & SA_STOP) {
2148 if (p->p_flag & P_TRACED ||
2149 (p->p_pgrp->pg_jobc == 0 &&
2150 prop & SA_TTYSTOP))
2151 break; /* == ignore */
2152 mtx_unlock(&ps->ps_mtx);
2153 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2154 &p->p_mtx.mtx_object, "Catching SIGSTOP");
2155 p->p_flag |= P_STOPPED_SIG;
2156 p->p_xstat = sig;
2157 mtx_lock_spin(&sched_lock);
2158 FOREACH_THREAD_IN_PROC(p, td0) {
2159 if (TD_IS_SLEEPING(td0) &&
2160 (td0->td_flags & TDF_SINTR) &&
2161 !TD_IS_SUSPENDED(td0)) {
2162 thread_suspend_one(td0);
2163 } else if (td != td0) {
2164 td0->td_flags |= TDF_ASTPENDING;
2165 }
2166 }
2167 thread_stopped(p);
2168 thread_suspend_one(td);
2169 PROC_UNLOCK(p);
2170 DROP_GIANT();
2171 mi_switch(SW_INVOL);
2172 mtx_unlock_spin(&sched_lock);
2173 PICKUP_GIANT();
2174 PROC_LOCK(p);
2175 mtx_lock(&ps->ps_mtx);
2176 break;
2177 } else if (prop & SA_IGNORE) {
2178 /*
2179 * Except for SIGCONT, shouldn't get here.
2180 * Default action is to ignore; drop it.
2181 */
2182 break; /* == ignore */
2183 } else
2184 return (sig);
2185 /*NOTREACHED*/
2186
2187 case (intptr_t)SIG_IGN:
2188 /*
2189 * Masking above should prevent us ever trying
2190 * to take action on an ignored signal other
2191 * than SIGCONT, unless process is traced.
2192 */
2193 if ((prop & SA_CONT) == 0 &&
2194 (p->p_flag & P_TRACED) == 0)
2195 printf("issignal\n");
2196 break; /* == ignore */
2197
2198 default:
2199 /*
2200 * This signal has an action, let
2201 * postsig() process it.
2202 */
2203 return (sig);
2204 }
2205 SIGDELSET(td->td_siglist, sig); /* take the signal! */
2206 }
2207 /* NOTREACHED */
2208}
2209
2210/*
2211 * Put the argument process into the stopped state and notify the parent
2212 * via wakeup. Signals are handled elsewhere. The process must not be
2213 * on the run queue. Must be called with the proc p locked.
2214 */
2215static void
2216stop(struct proc *p)
2217{
2218
2219 PROC_LOCK_ASSERT(p, MA_OWNED);
2220 p->p_flag |= P_STOPPED_SIG;
2221 p->p_flag &= ~P_WAITED;
2222 wakeup(p->p_pptr);
2223}
2224
2225/*
2226 * MPSAFE
2227 */
2228void
2229thread_stopped(struct proc *p)
2230{
2231 struct proc *p1 = curthread->td_proc;
2232 struct sigacts *ps;
2233 int n;
2234
2235 PROC_LOCK_ASSERT(p, MA_OWNED);
2236 mtx_assert(&sched_lock, MA_OWNED);
2237 n = p->p_suspcount;
2238 if (p == p1)
2239 n++;
2240 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2241 mtx_unlock_spin(&sched_lock);
2242 stop(p);
2243 PROC_LOCK(p->p_pptr);
2244 ps = p->p_pptr->p_sigacts;
2245 mtx_lock(&ps->ps_mtx);
2246 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2247 mtx_unlock(&ps->ps_mtx);
2248 psignal(p->p_pptr, SIGCHLD);
2249 } else
2250 mtx_unlock(&ps->ps_mtx);
2251 PROC_UNLOCK(p->p_pptr);
2252 mtx_lock_spin(&sched_lock);
2253 }
2254}
2255
2256/*
2257 * Take the action for the specified signal
2258 * from the current set of pending signals.
2259 */
2260void
2261postsig(sig)
2262 register int sig;
2263{
2264 struct thread *td = curthread;
2265 register struct proc *p = td->td_proc;
2266 struct sigacts *ps;
2267 sig_t action;
2268 sigset_t returnmask;
2269 int code;
2270
2271 KASSERT(sig != 0, ("postsig"));
2272
2273 PROC_LOCK_ASSERT(p, MA_OWNED);
2274 ps = p->p_sigacts;
2275 mtx_assert(&ps->ps_mtx, MA_OWNED);
2276 SIGDELSET(td->td_siglist, sig);
2277 action = ps->ps_sigact[_SIG_IDX(sig)];
2278#ifdef KTRACE
2279 if (KTRPOINT(td, KTR_PSIG))
2280 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2281 &td->td_oldsigmask : &td->td_sigmask, 0);
2282#endif
2283 if (p->p_stops & S_SIG) {
2284 mtx_unlock(&ps->ps_mtx);
2285 stopevent(p, S_SIG, sig);
2286 mtx_lock(&ps->ps_mtx);
2287 }
2288
2289 if (!(td->td_pflags & TDP_SA && td->td_mailbox) &&
2290 action == SIG_DFL) {
2291 /*
2292 * Default action, where the default is to kill
2293 * the process. (Other cases were ignored above.)
2294 */
2295 mtx_unlock(&ps->ps_mtx);
2296 sigexit(td, sig);
2297 /* NOTREACHED */
2298 } else {
2299 if (td->td_pflags & TDP_SA && td->td_mailbox) {
2300 if (sig == SIGKILL) {
2301 mtx_unlock(&ps->ps_mtx);
2302 sigexit(td, sig);
2303 }
2304 }
2305
2306 /*
2307 * If we get here, the signal must be caught.
2308 */
2309 KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2310 ("postsig action"));
2311 /*
2312 * Set the new mask value and also defer further
2313 * occurrences of this signal.
2314 *
2315 * Special case: user has done a sigsuspend. Here the
2316 * current mask is not of interest, but rather the
2317 * mask from before the sigsuspend is what we want
2318 * restored after the signal processing is completed.
2319 */
2320 if (td->td_pflags & TDP_OLDMASK) {
2321 returnmask = td->td_oldsigmask;
2322 td->td_pflags &= ~TDP_OLDMASK;
2323 } else
2324 returnmask = td->td_sigmask;
2325
2326 SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2327 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2328 SIGADDSET(td->td_sigmask, sig);
2329
2330 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2331 /*
2332 * See kern_sigaction() for origin of this code.
2333 */
2334 SIGDELSET(ps->ps_sigcatch, sig);
2335 if (sig != SIGCONT &&
2336 sigprop(sig) & SA_IGNORE)
2337 SIGADDSET(ps->ps_sigignore, sig);
2338 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2339 }
2340 p->p_stats->p_ru.ru_nsignals++;
2341 if (p->p_sig != sig) {
2342 code = 0;
2343 } else {
2344 code = p->p_code;
2345 p->p_code = 0;
2346 p->p_sig = 0;
2347 }
2348 if (td->td_pflags & TDP_SA && td->td_mailbox)
2349 thread_signal_add(curthread, sig);
2350 else
2351 (*p->p_sysent->sv_sendsig)(action, sig,
2352 &returnmask, code);
2353 }
2354}
2355
2356/*
2357 * Kill the current process for stated reason.
2358 */
2359void
2360killproc(p, why)
2361 struct proc *p;
2362 char *why;
2363{
2364
2365 PROC_LOCK_ASSERT(p, MA_OWNED);
2366 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2367 p, p->p_pid, p->p_comm);
2368 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2369 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2370 psignal(p, SIGKILL);
2371}
2372
2373/*
2374 * Force the current process to exit with the specified signal, dumping core
2375 * if appropriate. We bypass the normal tests for masked and caught signals,
2376 * allowing unrecoverable failures to terminate the process without changing
2377 * signal state. Mark the accounting record with the signal termination.
2378 * If dumping core, save the signal number for the debugger. Calls exit and
2379 * does not return.
2380 *
2381 * MPSAFE
2382 */
2383void
2384sigexit(td, sig)
2385 struct thread *td;
2386 int sig;
2387{
2388 struct proc *p = td->td_proc;
2389
2390 PROC_LOCK_ASSERT(p, MA_OWNED);
2391 p->p_acflag |= AXSIG;
2392 if (sigprop(sig) & SA_CORE) {
2393 p->p_sig = sig;
2394 /*
2395 * Log signals which would cause core dumps
2396 * (Log as LOG_INFO to appease those who don't want
2397 * these messages.)
2398 * XXX : Todo, as well as euid, write out ruid too
2399 * Note that coredump() drops proc lock.
2400 */
2401 if (coredump(td) == 0)
2402 sig |= WCOREFLAG;
2403 if (kern_logsigexit)
2404 log(LOG_INFO,
2405 "pid %d (%s), uid %d: exited on signal %d%s\n",
2406 p->p_pid, p->p_comm,
2407 td->td_ucred ? td->td_ucred->cr_uid : -1,
2408 sig &~ WCOREFLAG,
2409 sig & WCOREFLAG ? " (core dumped)" : "");
2410 } else
2411 PROC_UNLOCK(p);
2412 exit1(td, W_EXITCODE(0, sig));
2413 /* NOTREACHED */
2414}
2415
2416static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2417SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2418 sizeof(corefilename), "process corefile name format string");
2419
2420/*
2421 * expand_name(name, uid, pid)
2422 * Expand the name described in corefilename, using name, uid, and pid.
2423 * corefilename is a printf-like string, with three format specifiers:
2424 * %N name of process ("name")
2425 * %P process id (pid)
2426 * %U user id (uid)
2427 * For example, "%N.core" is the default; they can be disabled completely
2428 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2429 * This is controlled by the sysctl variable kern.corefile (see above).
2430 */
2431
2432static char *
2433expand_name(name, uid, pid)
2434 const char *name;
2435 uid_t uid;
2436 pid_t pid;
2437{
2438 const char *format, *appendstr;
2439 char *temp;
2440 char buf[11]; /* Buffer for pid/uid -- max 4B */
2441 size_t i, l, n;
2442
2443 format = corefilename;
2444 temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
2445 if (temp == NULL)
2446 return (NULL);
2447 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2448 switch (format[i]) {
2449 case '%': /* Format character */
2450 i++;
2451 switch (format[i]) {
2452 case '%':
2453 appendstr = "%";
2454 break;
2455 case 'N': /* process name */
2456 appendstr = name;
2457 break;
2458 case 'P': /* process id */
2459 sprintf(buf, "%u", pid);
2460 appendstr = buf;
2461 break;
2462 case 'U': /* user id */
2463 sprintf(buf, "%u", uid);
2464 appendstr = buf;
2465 break;
2466 default:
2467 appendstr = "";
2468 log(LOG_ERR,
2469 "Unknown format character %c in `%s'\n",
2470 format[i], format);
2471 }
2472 l = strlen(appendstr);
2473 if ((n + l) >= MAXPATHLEN)
2474 goto toolong;
2475 memcpy(temp + n, appendstr, l);
2476 n += l;
2477 break;
2478 default:
2479 temp[n++] = format[i];
2480 }
2481 }
2482 if (format[i] != '\0')
2483 goto toolong;
2484 return (temp);
2485toolong:
2486 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n",
2487 (long)pid, name, (u_long)uid);
2488 free(temp, M_TEMP);
2489 return (NULL);
2490}
2491
2492/*
2493 * Dump a process' core. The main routine does some
2494 * policy checking, and creates the name of the coredump;
2495 * then it passes on a vnode and a size limit to the process-specific
2496 * coredump routine if there is one; if there _is not_ one, it returns
2497 * ENOSYS; otherwise it returns the error from the process-specific routine.
2498 */
2499
2500static int
2501coredump(struct thread *td)
2502{
2503 struct proc *p = td->td_proc;
2504 register struct vnode *vp;
2505 register struct ucred *cred = td->td_ucred;
2506 struct flock lf;
2507 struct nameidata nd;
2508 struct vattr vattr;
2509 int error, error1, flags, locked;
2510 struct mount *mp;
2511 char *name; /* name of corefile */
2512 off_t limit;
2513
2514 PROC_LOCK_ASSERT(p, MA_OWNED);
2515 _STOPEVENT(p, S_CORE, 0);
2516
2517 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
2518 PROC_UNLOCK(p);
2519 return (EFAULT);
2520 }
2521
2522 /*
2523 * Note that the bulk of limit checking is done after
2524 * the corefile is created. The exception is if the limit
2525 * for corefiles is 0, in which case we don't bother
2526 * creating the corefile at all. This layout means that
2527 * a corefile is truncated instead of not being created,
2528 * if it is larger than the limit.
2529 */
2530 limit = (off_t)lim_cur(p, RLIMIT_CORE);
2531 PROC_UNLOCK(p);
2532 if (limit == 0)
2533 return (EFBIG);
2534
2535 mtx_lock(&Giant);
2536restart:
2537 name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid);
2538 if (name == NULL) {
2539 mtx_unlock(&Giant);
2540 return (EINVAL);
2541 }
2542 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
2543 flags = O_CREAT | FWRITE | O_NOFOLLOW;
2544 error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1);
2545 free(name, M_TEMP);
2546 if (error) {
2547 mtx_unlock(&Giant);
2548 return (error);
2549 }
2550 NDFREE(&nd, NDF_ONLY_PNBUF);
2551 vp = nd.ni_vp;
2552
2553 /* Don't dump to non-regular files or files with links. */
2554 if (vp->v_type != VREG ||
2555 VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
2556 VOP_UNLOCK(vp, 0, td);
2557 error = EFAULT;
2558 goto out;
2559 }
2560
2561 VOP_UNLOCK(vp, 0, td);
2562 lf.l_whence = SEEK_SET;
2563 lf.l_start = 0;
2564 lf.l_len = 0;
2565 lf.l_type = F_WRLCK;
2566 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
2567
2568 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2569 lf.l_type = F_UNLCK;
2570 if (locked)
2571 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2572 if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
2573 return (error);
2574 if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
2575 return (error);
2576 goto restart;
2577 }
2578
2579 VATTR_NULL(&vattr);
2580 vattr.va_size = 0;
2581 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2582 VOP_LEASE(vp, td, cred, LEASE_WRITE);
2583 VOP_SETATTR(vp, &vattr, cred, td);
2584 VOP_UNLOCK(vp, 0, td);
2585 PROC_LOCK(p);
2586 p->p_acflag |= ACORE;
2587 PROC_UNLOCK(p);
2588
2589 error = p->p_sysent->sv_coredump ?
2590 p->p_sysent->sv_coredump(td, vp, limit) :
2591 ENOSYS;
2592
2593 if (locked) {
2594 lf.l_type = F_UNLCK;
2595 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
2596 }
2597 vn_finished_write(mp);
2598out:
2599 error1 = vn_close(vp, FWRITE, cred, td);
2600 mtx_unlock(&Giant);
2601 if (error == 0)
2602 error = error1;
2603 return (error);
2604}
2605
2606/*
2607 * Nonexistent system call-- signal process (may want to handle it).
2608 * Flag error in case process won't see signal immediately (blocked or ignored).
2609 */
2610#ifndef _SYS_SYSPROTO_H_
2611struct nosys_args {
2612 int dummy;
2613};
2614#endif
2615/*
2616 * MPSAFE
2617 */
2618/* ARGSUSED */
2619int
2620nosys(td, args)
2621 struct thread *td;
2622 struct nosys_args *args;
2623{
2624 struct proc *p = td->td_proc;
2625
2626 PROC_LOCK(p);
2627 psignal(p, SIGSYS);
2628 PROC_UNLOCK(p);
2629 return (ENOSYS);
2630}
2631
2632/*
2633 * Send a SIGIO or SIGURG signal to a process or process group using
2634 * stored credentials rather than those of the current process.
2635 */
2636void
2637pgsigio(sigiop, sig, checkctty)
2638 struct sigio **sigiop;
2639 int sig, checkctty;
2640{
2641 struct sigio *sigio;
2642
2643 SIGIO_LOCK();
2644 sigio = *sigiop;
2645 if (sigio == NULL) {
2646 SIGIO_UNLOCK();
2647 return;
2648 }
2649 if (sigio->sio_pgid > 0) {
2650 PROC_LOCK(sigio->sio_proc);
2651 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
2652 psignal(sigio->sio_proc, sig);
2653 PROC_UNLOCK(sigio->sio_proc);
2654 } else if (sigio->sio_pgid < 0) {
2655 struct proc *p;
2656
2657 PGRP_LOCK(sigio->sio_pgrp);
2658 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
2659 PROC_LOCK(p);
2660 if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
2661 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
2662 psignal(p, sig);
2663 PROC_UNLOCK(p);
2664 }
2665 PGRP_UNLOCK(sigio->sio_pgrp);
2666 }
2667 SIGIO_UNLOCK();
2668}
2669
2670static int
2671filt_sigattach(struct knote *kn)
2672{
2673 struct proc *p = curproc;
2674
2675 kn->kn_ptr.p_proc = p;
2676 kn->kn_flags |= EV_CLEAR; /* automatically set */
2677
2678 PROC_LOCK(p);
2679 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2680 PROC_UNLOCK(p);
2681
2682 return (0);
2683}
2684
2685static void
2686filt_sigdetach(struct knote *kn)
2687{
2688 struct proc *p = kn->kn_ptr.p_proc;
2689
2690 PROC_LOCK(p);
2691 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2692 PROC_UNLOCK(p);
2693}
2694
2695/*
2696 * signal knotes are shared with proc knotes, so we apply a mask to
2697 * the hint in order to differentiate them from process hints. This
2698 * could be avoided by using a signal-specific knote list, but probably
2699 * isn't worth the trouble.
2700 */
2701static int
2702filt_signal(struct knote *kn, long hint)
2703{
2704
2705 if (hint & NOTE_SIGNAL) {
2706 hint &= ~NOTE_SIGNAL;
2707
2708 if (kn->kn_id == hint)
2709 kn->kn_data++;
2710 }
2711 return (kn->kn_data != 0);
2712}
2713
2714struct sigacts *
2715sigacts_alloc(void)
2716{
2717 struct sigacts *ps;
2718
2719 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
2720 ps->ps_refcnt = 1;
2721 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
2722 return (ps);
2723}
2724
2725void
2726sigacts_free(struct sigacts *ps)
2727{
2728
2729 mtx_lock(&ps->ps_mtx);
2730 ps->ps_refcnt--;
2731 if (ps->ps_refcnt == 0) {
2732 mtx_destroy(&ps->ps_mtx);
2733 free(ps, M_SUBPROC);
2734 } else
2735 mtx_unlock(&ps->ps_mtx);
2736}
2737
2738struct sigacts *
2739sigacts_hold(struct sigacts *ps)
2740{
2741 mtx_lock(&ps->ps_mtx);
2742 ps->ps_refcnt++;
2743 mtx_unlock(&ps->ps_mtx);
2744 return (ps);
2745}
2746
2747void
2748sigacts_copy(struct sigacts *dest, struct sigacts *src)
2749{
2750
2751 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
2752 mtx_lock(&src->ps_mtx);
2753 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
2754 mtx_unlock(&src->ps_mtx);
2755}
2756
2757int
2758sigacts_shared(struct sigacts *ps)
2759{
2760 int shared;
2761
2762 mtx_lock(&ps->ps_mtx);
2763 shared = ps->ps_refcnt > 1;
2764 mtx_unlock(&ps->ps_mtx);
2765 return (shared);
2766}