1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD$");
39
40#include "opt_compat.h"
41#include "opt_kdtrace.h"
42#include "opt_ktrace.h"
43#include "opt_core.h"
44#include "opt_procdesc.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/signalvar.h>
49#include <sys/vnode.h>
50#include <sys/acct.h>
51#include <sys/capability.h>
52#include <sys/condvar.h>
53#include <sys/event.h>
54#include <sys/fcntl.h>
55#include <sys/imgact.h>
56#include <sys/kernel.h>
57#include <sys/ktr.h>
58#include <sys/ktrace.h>
59#include <sys/lock.h>
60#include <sys/malloc.h>
61#include <sys/mutex.h>
62#include <sys/namei.h>
63#include <sys/proc.h>
64#include <sys/procdesc.h>
65#include <sys/posix4.h>
66#include <sys/pioctl.h>
67#include <sys/racct.h>
68#include <sys/resourcevar.h>
69#include <sys/sdt.h>
70#include <sys/sbuf.h>
71#include <sys/sleepqueue.h>
72#include <sys/smp.h>
73#include <sys/stat.h>
74#include <sys/sx.h>
75#include <sys/syscallsubr.h>
76#include <sys/sysctl.h>
77#include <sys/sysent.h>
78#include <sys/syslog.h>
79#include <sys/sysproto.h>
80#include <sys/timers.h>
81#include <sys/unistd.h>
82#include <sys/wait.h>
83#include <vm/vm.h>
84#include <vm/vm_extern.h>
85#include <vm/uma.h>
86
87#include <sys/jail.h>
88
89#include <machine/cpu.h>
90
91#include <security/audit/audit.h>
92
93#define	ONSIG	32		/* NSIG for osig* syscalls.  XXX. */
94
95SDT_PROVIDER_DECLARE(proc);
96SDT_PROBE_DEFINE3(proc, kernel, , signal__send, "struct thread *",
97    "struct proc *", "int");
98SDT_PROBE_DEFINE2(proc, kernel, , signal__clear, "int",
99    "ksiginfo_t *");
100SDT_PROBE_DEFINE3(proc, kernel, , signal__discard,
101    "struct thread *", "struct proc *", "int");
102
103static int	coredump(struct thread *);
104static char	*expand_name(const char *, uid_t, pid_t, struct thread *, int);
105static int	killpg1(struct thread *td, int sig, int pgid, int all,
106		    ksiginfo_t *ksi);
107static int	issignal(struct thread *td, int stop_allowed);
108static int	sigprop(int sig);
109static void	tdsigwakeup(struct thread *, int, sig_t, int);
110static void	sig_suspend_threads(struct thread *, struct proc *, int);
111static int	filt_sigattach(struct knote *kn);
112static void	filt_sigdetach(struct knote *kn);
113static int	filt_signal(struct knote *kn, long hint);
114static struct thread *sigtd(struct proc *p, int sig, int prop);
115static void	sigqueue_start(void);
116
117static uma_zone_t	ksiginfo_zone = NULL;
118struct filterops sig_filtops = {
119	.f_isfd = 0,
120	.f_attach = filt_sigattach,
121	.f_detach = filt_sigdetach,
122	.f_event = filt_signal,
123};
124
125static int	kern_logsigexit = 1;
126SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
127    &kern_logsigexit, 0,
128    "Log processes quitting on abnormal signals to syslog(3)");
129
130static int	kern_forcesigexit = 1;
131SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
132    &kern_forcesigexit, 0, "Force trap signal to be handled");
133
134static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0,
135    "POSIX real time signal");
136
137static int	max_pending_per_proc = 128;
138SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
139    &max_pending_per_proc, 0, "Max pending signals per proc");
140
141static int	preallocate_siginfo = 1024;
142TUNABLE_INT("kern.sigqueue.preallocate", &preallocate_siginfo);
143SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RD,
144    &preallocate_siginfo, 0, "Preallocated signal memory size");
145
146static int	signal_overflow = 0;
147SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
148    &signal_overflow, 0, "Number of signals overflew");
149
150static int	signal_alloc_fail = 0;
151SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
152    &signal_alloc_fail, 0, "signals failed to be allocated");
153
154SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
155
156/*
157 * Policy -- Can ucred cr1 send SIGIO to process cr2?
158 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
159 * in the right situations.
160 */
161#define CANSIGIO(cr1, cr2) \
162	((cr1)->cr_uid == 0 || \
163	    (cr1)->cr_ruid == (cr2)->cr_ruid || \
164	    (cr1)->cr_uid == (cr2)->cr_ruid || \
165	    (cr1)->cr_ruid == (cr2)->cr_uid || \
166	    (cr1)->cr_uid == (cr2)->cr_uid)
167
168static int	sugid_coredump;
169SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
170    &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
171
172static int	do_coredump = 1;
173SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
174	&do_coredump, 0, "Enable/Disable coredumps");
175
176static int	set_core_nodump_flag = 0;
177SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
178	0, "Enable setting the NODUMP flag on coredump files");
179
180/*
181 * Signal properties and actions.
182 * The array below categorizes the signals and their default actions
183 * according to the following properties:
184 */
185#define	SA_KILL		0x01		/* terminates process by default */
186#define	SA_CORE		0x02		/* ditto and coredumps */
187#define	SA_STOP		0x04		/* suspend process */
188#define	SA_TTYSTOP	0x08		/* ditto, from tty */
189#define	SA_IGNORE	0x10		/* ignore by default */
190#define	SA_CONT		0x20		/* continue if suspended */
191#define	SA_CANTMASK	0x40		/* non-maskable, catchable */
192#define	SA_PROC		0x80		/* deliverable to any thread */
193
194static int sigproptbl[NSIG] = {
195        SA_KILL|SA_PROC,		/* SIGHUP */
196        SA_KILL|SA_PROC,		/* SIGINT */
197        SA_KILL|SA_CORE|SA_PROC,	/* SIGQUIT */
198        SA_KILL|SA_CORE,		/* SIGILL */
199        SA_KILL|SA_CORE,		/* SIGTRAP */
200        SA_KILL|SA_CORE,		/* SIGABRT */
201        SA_KILL|SA_CORE|SA_PROC,	/* SIGEMT */
202        SA_KILL|SA_CORE,		/* SIGFPE */
203        SA_KILL|SA_PROC,		/* SIGKILL */
204        SA_KILL|SA_CORE,		/* SIGBUS */
205        SA_KILL|SA_CORE,		/* SIGSEGV */
206        SA_KILL|SA_CORE,		/* SIGSYS */
207        SA_KILL|SA_PROC,		/* SIGPIPE */
208        SA_KILL|SA_PROC,		/* SIGALRM */
209        SA_KILL|SA_PROC,		/* SIGTERM */
210        SA_IGNORE|SA_PROC,		/* SIGURG */
211        SA_STOP|SA_PROC,		/* SIGSTOP */
212        SA_STOP|SA_TTYSTOP|SA_PROC,	/* SIGTSTP */
213        SA_IGNORE|SA_CONT|SA_PROC,	/* SIGCONT */
214        SA_IGNORE|SA_PROC,		/* SIGCHLD */
215        SA_STOP|SA_TTYSTOP|SA_PROC,	/* SIGTTIN */
216        SA_STOP|SA_TTYSTOP|SA_PROC,	/* SIGTTOU */
217        SA_IGNORE|SA_PROC,		/* SIGIO */
218        SA_KILL,			/* SIGXCPU */
219        SA_KILL,			/* SIGXFSZ */
220        SA_KILL|SA_PROC,		/* SIGVTALRM */
221        SA_KILL|SA_PROC,		/* SIGPROF */
222        SA_IGNORE|SA_PROC,		/* SIGWINCH  */
223        SA_IGNORE|SA_PROC,		/* SIGINFO */
224        SA_KILL|SA_PROC,		/* SIGUSR1 */
225        SA_KILL|SA_PROC,		/* SIGUSR2 */
226};
227
228static void reschedule_signals(struct proc *p, sigset_t block, int flags);
229
230static void
231sigqueue_start(void)
232{
233	ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
234		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
235	uma_prealloc(ksiginfo_zone, preallocate_siginfo);
236	p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
237	p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
238	p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
239}
240
241ksiginfo_t *
242ksiginfo_alloc(int wait)
243{
244	int flags;
245
246	flags = M_ZERO;
247	if (! wait)
248		flags |= M_NOWAIT;
249	if (ksiginfo_zone != NULL)
250		return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, flags));
251	return (NULL);
252}
253
254void
255ksiginfo_free(ksiginfo_t *ksi)
256{
257	uma_zfree(ksiginfo_zone, ksi);
258}
259
260static __inline int
261ksiginfo_tryfree(ksiginfo_t *ksi)
262{
263	if (!(ksi->ksi_flags & KSI_EXT)) {
264		uma_zfree(ksiginfo_zone, ksi);
265		return (1);
266	}
267	return (0);
268}
269
270void
271sigqueue_init(sigqueue_t *list, struct proc *p)
272{
273	SIGEMPTYSET(list->sq_signals);
274	SIGEMPTYSET(list->sq_kill);
275	TAILQ_INIT(&list->sq_list);
276	list->sq_proc = p;
277	list->sq_flags = SQ_INIT;
278}
279
280/*
281 * Get a signal's ksiginfo.
282 * Return:
283 * 	0	-	signal not found
284 *	others	-	signal number
285 */
286static int
287sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
288{
289	struct proc *p = sq->sq_proc;
290	struct ksiginfo *ksi, *next;
291	int count = 0;
292
293	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
294
295	if (!SIGISMEMBER(sq->sq_signals, signo))
296		return (0);
297
298	if (SIGISMEMBER(sq->sq_kill, signo)) {
299		count++;
300		SIGDELSET(sq->sq_kill, signo);
301	}
302
303	TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
304		if (ksi->ksi_signo == signo) {
305			if (count == 0) {
306				TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
307				ksi->ksi_sigq = NULL;
308				ksiginfo_copy(ksi, si);
309				if (ksiginfo_tryfree(ksi) && p != NULL)
310					p->p_pendingcnt--;
311			}
312			if (++count > 1)
313				break;
314		}
315	}
316
317	if (count <= 1)
318		SIGDELSET(sq->sq_signals, signo);
319	si->ksi_signo = signo;
320	return (signo);
321}
322
323void
324sigqueue_take(ksiginfo_t *ksi)
325{
326	struct ksiginfo *kp;
327	struct proc	*p;
328	sigqueue_t	*sq;
329
330	if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
331		return;
332
333	p = sq->sq_proc;
334	TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
335	ksi->ksi_sigq = NULL;
336	if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
337		p->p_pendingcnt--;
338
339	for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
340	     kp = TAILQ_NEXT(kp, ksi_link)) {
341		if (kp->ksi_signo == ksi->ksi_signo)
342			break;
343	}
344	if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo))
345		SIGDELSET(sq->sq_signals, ksi->ksi_signo);
346}
347
348static int
349sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
350{
351	struct proc *p = sq->sq_proc;
352	struct ksiginfo *ksi;
353	int ret = 0;
354
355	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
356
357	if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
358		SIGADDSET(sq->sq_kill, signo);
359		goto out_set_bit;
360	}
361
362	/* directly insert the ksi, don't copy it */
363	if (si->ksi_flags & KSI_INS) {
364		if (si->ksi_flags & KSI_HEAD)
365			TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
366		else
367			TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
368		si->ksi_sigq = sq;
369		goto out_set_bit;
370	}
371
372	if (__predict_false(ksiginfo_zone == NULL)) {
373		SIGADDSET(sq->sq_kill, signo);
374		goto out_set_bit;
375	}
376
377	if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
378		signal_overflow++;
379		ret = EAGAIN;
380	} else if ((ksi = ksiginfo_alloc(0)) == NULL) {
381		signal_alloc_fail++;
382		ret = EAGAIN;
383	} else {
384		if (p != NULL)
385			p->p_pendingcnt++;
386		ksiginfo_copy(si, ksi);
387		ksi->ksi_signo = signo;
388		if (si->ksi_flags & KSI_HEAD)
389			TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
390		else
391			TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
392		ksi->ksi_sigq = sq;
393	}
394
395	if ((si->ksi_flags & KSI_TRAP) != 0 ||
396	    (si->ksi_flags & KSI_SIGQ) == 0) {
397		if (ret != 0)
398			SIGADDSET(sq->sq_kill, signo);
399		ret = 0;
400		goto out_set_bit;
401	}
402
403	if (ret != 0)
404		return (ret);
405
406out_set_bit:
407	SIGADDSET(sq->sq_signals, signo);
408	return (ret);
409}
410
411void
412sigqueue_flush(sigqueue_t *sq)
413{
414	struct proc *p = sq->sq_proc;
415	ksiginfo_t *ksi;
416
417	KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
418
419	if (p != NULL)
420		PROC_LOCK_ASSERT(p, MA_OWNED);
421
422	while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
423		TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
424		ksi->ksi_sigq = NULL;
425		if (ksiginfo_tryfree(ksi) && p != NULL)
426			p->p_pendingcnt--;
427	}
428
429	SIGEMPTYSET(sq->sq_signals);
430	SIGEMPTYSET(sq->sq_kill);
431}
432
433static void
434sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
435{
436	sigset_t tmp;
437	struct proc *p1, *p2;
438	ksiginfo_t *ksi, *next;
439
440	KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
441	KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
442	p1 = src->sq_proc;
443	p2 = dst->sq_proc;
444	/* Move siginfo to target list */
445	TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
446		if (SIGISMEMBER(*set, ksi->ksi_signo)) {
447			TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
448			if (p1 != NULL)
449				p1->p_pendingcnt--;
450			TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
451			ksi->ksi_sigq = dst;
452			if (p2 != NULL)
453				p2->p_pendingcnt++;
454		}
455	}
456
457	/* Move pending bits to target list */
458	tmp = src->sq_kill;
459	SIGSETAND(tmp, *set);
460	SIGSETOR(dst->sq_kill, tmp);
461	SIGSETNAND(src->sq_kill, tmp);
462
463	tmp = src->sq_signals;
464	SIGSETAND(tmp, *set);
465	SIGSETOR(dst->sq_signals, tmp);
466	SIGSETNAND(src->sq_signals, tmp);
467}
468
469#if 0
470static void
471sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
472{
473	sigset_t set;
474
475	SIGEMPTYSET(set);
476	SIGADDSET(set, signo);
477	sigqueue_move_set(src, dst, &set);
478}
479#endif
480
481static void
482sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
483{
484	struct proc *p = sq->sq_proc;
485	ksiginfo_t *ksi, *next;
486
487	KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
488
489	/* Remove siginfo queue */
490	TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
491		if (SIGISMEMBER(*set, ksi->ksi_signo)) {
492			TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
493			ksi->ksi_sigq = NULL;
494			if (ksiginfo_tryfree(ksi) && p != NULL)
495				p->p_pendingcnt--;
496		}
497	}
498	SIGSETNAND(sq->sq_kill, *set);
499	SIGSETNAND(sq->sq_signals, *set);
500}
501
502void
503sigqueue_delete(sigqueue_t *sq, int signo)
504{
505	sigset_t set;
506
507	SIGEMPTYSET(set);
508	SIGADDSET(set, signo);
509	sigqueue_delete_set(sq, &set);
510}
511
512/* Remove a set of signals for a process */
513static void
514sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
515{
516	sigqueue_t worklist;
517	struct thread *td0;
518
519	PROC_LOCK_ASSERT(p, MA_OWNED);
520
521	sigqueue_init(&worklist, NULL);
522	sigqueue_move_set(&p->p_sigqueue, &worklist, set);
523
524	FOREACH_THREAD_IN_PROC(p, td0)
525		sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
526
527	sigqueue_flush(&worklist);
528}
529
530void
531sigqueue_delete_proc(struct proc *p, int signo)
532{
533	sigset_t set;
534
535	SIGEMPTYSET(set);
536	SIGADDSET(set, signo);
537	sigqueue_delete_set_proc(p, &set);
538}
539
540static void
541sigqueue_delete_stopmask_proc(struct proc *p)
542{
543	sigset_t set;
544
545	SIGEMPTYSET(set);
546	SIGADDSET(set, SIGSTOP);
547	SIGADDSET(set, SIGTSTP);
548	SIGADDSET(set, SIGTTIN);
549	SIGADDSET(set, SIGTTOU);
550	sigqueue_delete_set_proc(p, &set);
551}
552
553/*
554 * Determine signal that should be delivered to process p, the current
555 * process, 0 if none.  If there is a pending stop signal with default
556 * action, the process stops in issignal().
557 */
558int
559cursig(struct thread *td, int stop_allowed)
560{
561	PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
562	KASSERT(stop_allowed == SIG_STOP_ALLOWED ||
563	    stop_allowed == SIG_STOP_NOT_ALLOWED, ("cursig: stop_allowed"));
564	mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
565	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
566	return (SIGPENDING(td) ? issignal(td, stop_allowed) : 0);
567}
568
569/*
570 * Arrange for ast() to handle unmasked pending signals on return to user
571 * mode.  This must be called whenever a signal is added to td_sigqueue or
572 * unmasked in td_sigmask.
573 */
574void
575signotify(struct thread *td)
576{
577	struct proc *p;
578
579	p = td->td_proc;
580
581	PROC_LOCK_ASSERT(p, MA_OWNED);
582
583	if (SIGPENDING(td)) {
584		thread_lock(td);
585		td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
586		thread_unlock(td);
587	}
588}
589
590int
591sigonstack(size_t sp)
592{
593	struct thread *td = curthread;
594
595	return ((td->td_pflags & TDP_ALTSTACK) ?
596#if defined(COMPAT_43)
597	    ((td->td_sigstk.ss_size == 0) ?
598		(td->td_sigstk.ss_flags & SS_ONSTACK) :
599		((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size))
600#else
601	    ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)
602#endif
603	    : 0);
604}
605
606static __inline int
607sigprop(int sig)
608{
609
610	if (sig > 0 && sig < NSIG)
611		return (sigproptbl[_SIG_IDX(sig)]);
612	return (0);
613}
614
615int
616sig_ffs(sigset_t *set)
617{
618	int i;
619
620	for (i = 0; i < _SIG_WORDS; i++)
621		if (set->__bits[i])
622			return (ffs(set->__bits[i]) + (i * 32));
623	return (0);
624}
625
626/*
627 * kern_sigaction
628 * sigaction
629 * freebsd4_sigaction
630 * osigaction
631 */
632int
633kern_sigaction(td, sig, act, oact, flags)
634	struct thread *td;
635	register int sig;
636	struct sigaction *act, *oact;
637	int flags;
638{
639	struct sigacts *ps;
640	struct proc *p = td->td_proc;
641
642	if (!_SIG_VALID(sig))
643		return (EINVAL);
644
645	PROC_LOCK(p);
646	ps = p->p_sigacts;
647	mtx_lock(&ps->ps_mtx);
648	if (oact) {
649		oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
650		oact->sa_flags = 0;
651		if (SIGISMEMBER(ps->ps_sigonstack, sig))
652			oact->sa_flags |= SA_ONSTACK;
653		if (!SIGISMEMBER(ps->ps_sigintr, sig))
654			oact->sa_flags |= SA_RESTART;
655		if (SIGISMEMBER(ps->ps_sigreset, sig))
656			oact->sa_flags |= SA_RESETHAND;
657		if (SIGISMEMBER(ps->ps_signodefer, sig))
658			oact->sa_flags |= SA_NODEFER;
659		if (SIGISMEMBER(ps->ps_siginfo, sig)) {
660			oact->sa_flags |= SA_SIGINFO;
661			oact->sa_sigaction =
662			    (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
663		} else
664			oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
665		if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
666			oact->sa_flags |= SA_NOCLDSTOP;
667		if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
668			oact->sa_flags |= SA_NOCLDWAIT;
669	}
670	if (act) {
671		if ((sig == SIGKILL || sig == SIGSTOP) &&
672		    act->sa_handler != SIG_DFL) {
673			mtx_unlock(&ps->ps_mtx);
674			PROC_UNLOCK(p);
675			return (EINVAL);
676		}
677
678		/*
679		 * Change setting atomically.
680		 */
681
682		ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
683		SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
684		if (act->sa_flags & SA_SIGINFO) {
685			ps->ps_sigact[_SIG_IDX(sig)] =
686			    (__sighandler_t *)act->sa_sigaction;
687			SIGADDSET(ps->ps_siginfo, sig);
688		} else {
689			ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
690			SIGDELSET(ps->ps_siginfo, sig);
691		}
692		if (!(act->sa_flags & SA_RESTART))
693			SIGADDSET(ps->ps_sigintr, sig);
694		else
695			SIGDELSET(ps->ps_sigintr, sig);
696		if (act->sa_flags & SA_ONSTACK)
697			SIGADDSET(ps->ps_sigonstack, sig);
698		else
699			SIGDELSET(ps->ps_sigonstack, sig);
700		if (act->sa_flags & SA_RESETHAND)
701			SIGADDSET(ps->ps_sigreset, sig);
702		else
703			SIGDELSET(ps->ps_sigreset, sig);
704		if (act->sa_flags & SA_NODEFER)
705			SIGADDSET(ps->ps_signodefer, sig);
706		else
707			SIGDELSET(ps->ps_signodefer, sig);
708		if (sig == SIGCHLD) {
709			if (act->sa_flags & SA_NOCLDSTOP)
710				ps->ps_flag |= PS_NOCLDSTOP;
711			else
712				ps->ps_flag &= ~PS_NOCLDSTOP;
713			if (act->sa_flags & SA_NOCLDWAIT) {
714				/*
715				 * Paranoia: since SA_NOCLDWAIT is implemented
716				 * by reparenting the dying child to PID 1 (and
717				 * trust it to reap the zombie), PID 1 itself
718				 * is forbidden to set SA_NOCLDWAIT.
719				 */
720				if (p->p_pid == 1)
721					ps->ps_flag &= ~PS_NOCLDWAIT;
722				else
723					ps->ps_flag |= PS_NOCLDWAIT;
724			} else
725				ps->ps_flag &= ~PS_NOCLDWAIT;
726			if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
727				ps->ps_flag |= PS_CLDSIGIGN;
728			else
729				ps->ps_flag &= ~PS_CLDSIGIGN;
730		}
731		/*
732		 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
733		 * and for signals set to SIG_DFL where the default is to
734		 * ignore. However, don't put SIGCONT in ps_sigignore, as we
735		 * have to restart the process.
736		 */
737		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
738		    (sigprop(sig) & SA_IGNORE &&
739		     ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
740			/* never to be seen again */
741			sigqueue_delete_proc(p, sig);
742			if (sig != SIGCONT)
743				/* easier in psignal */
744				SIGADDSET(ps->ps_sigignore, sig);
745			SIGDELSET(ps->ps_sigcatch, sig);
746		} else {
747			SIGDELSET(ps->ps_sigignore, sig);
748			if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
749				SIGDELSET(ps->ps_sigcatch, sig);
750			else
751				SIGADDSET(ps->ps_sigcatch, sig);
752		}
753#ifdef COMPAT_FREEBSD4
754		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
755		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
756		    (flags & KSA_FREEBSD4) == 0)
757			SIGDELSET(ps->ps_freebsd4, sig);
758		else
759			SIGADDSET(ps->ps_freebsd4, sig);
760#endif
761#ifdef COMPAT_43
762		if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
763		    ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
764		    (flags & KSA_OSIGSET) == 0)
765			SIGDELSET(ps->ps_osigset, sig);
766		else
767			SIGADDSET(ps->ps_osigset, sig);
768#endif
769	}
770	mtx_unlock(&ps->ps_mtx);
771	PROC_UNLOCK(p);
772	return (0);
773}
774
775#ifndef _SYS_SYSPROTO_H_
776struct sigaction_args {
777	int	sig;
778	struct	sigaction *act;
779	struct	sigaction *oact;
780};
781#endif
782int
783sys_sigaction(td, uap)
784	struct thread *td;
785	register struct sigaction_args *uap;
786{
787	struct sigaction act, oact;
788	register struct sigaction *actp, *oactp;
789	int error;
790
791	actp = (uap->act != NULL) ? &act : NULL;
792	oactp = (uap->oact != NULL) ? &oact : NULL;
793	if (actp) {
794		error = copyin(uap->act, actp, sizeof(act));
795		if (error)
796			return (error);
797	}
798	error = kern_sigaction(td, uap->sig, actp, oactp, 0);
799	if (oactp && !error)
800		error = copyout(oactp, uap->oact, sizeof(oact));
801	return (error);
802}
803
804#ifdef COMPAT_FREEBSD4
805#ifndef _SYS_SYSPROTO_H_
806struct freebsd4_sigaction_args {
807	int	sig;
808	struct	sigaction *act;
809	struct	sigaction *oact;
810};
811#endif
812int
813freebsd4_sigaction(td, uap)
814	struct thread *td;
815	register struct freebsd4_sigaction_args *uap;
816{
817	struct sigaction act, oact;
818	register struct sigaction *actp, *oactp;
819	int error;
820
821
822	actp = (uap->act != NULL) ? &act : NULL;
823	oactp = (uap->oact != NULL) ? &oact : NULL;
824	if (actp) {
825		error = copyin(uap->act, actp, sizeof(act));
826		if (error)
827			return (error);
828	}
829	error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
830	if (oactp && !error)
831		error = copyout(oactp, uap->oact, sizeof(oact));
832	return (error);
833}
834#endif	/* COMAPT_FREEBSD4 */
835
836#ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
837#ifndef _SYS_SYSPROTO_H_
838struct osigaction_args {
839	int	signum;
840	struct	osigaction *nsa;
841	struct	osigaction *osa;
842};
843#endif
844int
845osigaction(td, uap)
846	struct thread *td;
847	register struct osigaction_args *uap;
848{
849	struct osigaction sa;
850	struct sigaction nsa, osa;
851	register struct sigaction *nsap, *osap;
852	int error;
853
854	if (uap->signum <= 0 || uap->signum >= ONSIG)
855		return (EINVAL);
856
857	nsap = (uap->nsa != NULL) ? &nsa : NULL;
858	osap = (uap->osa != NULL) ? &osa : NULL;
859
860	if (nsap) {
861		error = copyin(uap->nsa, &sa, sizeof(sa));
862		if (error)
863			return (error);
864		nsap->sa_handler = sa.sa_handler;
865		nsap->sa_flags = sa.sa_flags;
866		OSIG2SIG(sa.sa_mask, nsap->sa_mask);
867	}
868	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
869	if (osap && !error) {
870		sa.sa_handler = osap->sa_handler;
871		sa.sa_flags = osap->sa_flags;
872		SIG2OSIG(osap->sa_mask, sa.sa_mask);
873		error = copyout(&sa, uap->osa, sizeof(sa));
874	}
875	return (error);
876}
877
878#if !defined(__i386__)
879/* Avoid replicating the same stub everywhere */
880int
881osigreturn(td, uap)
882	struct thread *td;
883	struct osigreturn_args *uap;
884{
885
886	return (nosys(td, (struct nosys_args *)uap));
887}
888#endif
889#endif /* COMPAT_43 */
890
891/*
892 * Initialize signal state for process 0;
893 * set to ignore signals that are ignored by default.
894 */
895void
896siginit(p)
897	struct proc *p;
898{
899	register int i;
900	struct sigacts *ps;
901
902	PROC_LOCK(p);
903	ps = p->p_sigacts;
904	mtx_lock(&ps->ps_mtx);
905	for (i = 1; i <= NSIG; i++)
906		if (sigprop(i) & SA_IGNORE && i != SIGCONT)
907			SIGADDSET(ps->ps_sigignore, i);
908	mtx_unlock(&ps->ps_mtx);
909	PROC_UNLOCK(p);
910}
911
912/*
913 * Reset signals for an exec of the specified process.
914 */
915void
916execsigs(struct proc *p)
917{
918	struct sigacts *ps;
919	int sig;
920	struct thread *td;
921
922	/*
923	 * Reset caught signals.  Held signals remain held
924	 * through td_sigmask (unless they were caught,
925	 * and are now ignored by default).
926	 */
927	PROC_LOCK_ASSERT(p, MA_OWNED);
928	td = FIRST_THREAD_IN_PROC(p);
929	ps = p->p_sigacts;
930	mtx_lock(&ps->ps_mtx);
931	while (SIGNOTEMPTY(ps->ps_sigcatch)) {
932		sig = sig_ffs(&ps->ps_sigcatch);
933		SIGDELSET(ps->ps_sigcatch, sig);
934		if (sigprop(sig) & SA_IGNORE) {
935			if (sig != SIGCONT)
936				SIGADDSET(ps->ps_sigignore, sig);
937			sigqueue_delete_proc(p, sig);
938		}
939		ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
940	}
941	/*
942	 * Reset stack state to the user stack.
943	 * Clear set of signals caught on the signal stack.
944	 */
945	td->td_sigstk.ss_flags = SS_DISABLE;
946	td->td_sigstk.ss_size = 0;
947	td->td_sigstk.ss_sp = 0;
948	td->td_pflags &= ~TDP_ALTSTACK;
949	/*
950	 * Reset no zombies if child dies flag as Solaris does.
951	 */
952	ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
953	if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
954		ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
955	mtx_unlock(&ps->ps_mtx);
956}
957
958/*
959 * kern_sigprocmask()
960 *
961 *	Manipulate signal mask.
962 */
963int
964kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
965    int flags)
966{
967	sigset_t new_block, oset1;
968	struct proc *p;
969	int error;
970
971	p = td->td_proc;
972	if (!(flags & SIGPROCMASK_PROC_LOCKED))
973		PROC_LOCK(p);
974	if (oset != NULL)
975		*oset = td->td_sigmask;
976
977	error = 0;
978	if (set != NULL) {
979		switch (how) {
980		case SIG_BLOCK:
981			SIG_CANTMASK(*set);
982			oset1 = td->td_sigmask;
983			SIGSETOR(td->td_sigmask, *set);
984			new_block = td->td_sigmask;
985			SIGSETNAND(new_block, oset1);
986			break;
987		case SIG_UNBLOCK:
988			SIGSETNAND(td->td_sigmask, *set);
989			signotify(td);
990			goto out;
991		case SIG_SETMASK:
992			SIG_CANTMASK(*set);
993			oset1 = td->td_sigmask;
994			if (flags & SIGPROCMASK_OLD)
995				SIGSETLO(td->td_sigmask, *set);
996			else
997				td->td_sigmask = *set;
998			new_block = td->td_sigmask;
999			SIGSETNAND(new_block, oset1);
1000			signotify(td);
1001			break;
1002		default:
1003			error = EINVAL;
1004			goto out;
1005		}
1006
1007		/*
1008		 * The new_block set contains signals that were not previously
1009		 * blocked, but are blocked now.
1010		 *
1011		 * In case we block any signal that was not previously blocked
1012		 * for td, and process has the signal pending, try to schedule
1013		 * signal delivery to some thread that does not block the
1014		 * signal, possibly waking it up.
1015		 */
1016		if (p->p_numthreads != 1)
1017			reschedule_signals(p, new_block, flags);
1018	}
1019
1020out:
1021	if (!(flags & SIGPROCMASK_PROC_LOCKED))
1022		PROC_UNLOCK(p);
1023	return (error);
1024}
1025
1026#ifndef _SYS_SYSPROTO_H_
1027struct sigprocmask_args {
1028	int	how;
1029	const sigset_t *set;
1030	sigset_t *oset;
1031};
1032#endif
1033int
1034sys_sigprocmask(td, uap)
1035	register struct thread *td;
1036	struct sigprocmask_args *uap;
1037{
1038	sigset_t set, oset;
1039	sigset_t *setp, *osetp;
1040	int error;
1041
1042	setp = (uap->set != NULL) ? &set : NULL;
1043	osetp = (uap->oset != NULL) ? &oset : NULL;
1044	if (setp) {
1045		error = copyin(uap->set, setp, sizeof(set));
1046		if (error)
1047			return (error);
1048	}
1049	error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1050	if (osetp && !error) {
1051		error = copyout(osetp, uap->oset, sizeof(oset));
1052	}
1053	return (error);
1054}
1055
1056#ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1057#ifndef _SYS_SYSPROTO_H_
1058struct osigprocmask_args {
1059	int	how;
1060	osigset_t mask;
1061};
1062#endif
1063int
1064osigprocmask(td, uap)
1065	register struct thread *td;
1066	struct osigprocmask_args *uap;
1067{
1068	sigset_t set, oset;
1069	int error;
1070
1071	OSIG2SIG(uap->mask, set);
1072	error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1073	SIG2OSIG(oset, td->td_retval[0]);
1074	return (error);
1075}
1076#endif /* COMPAT_43 */
1077
1078int
1079sys_sigwait(struct thread *td, struct sigwait_args *uap)
1080{
1081	ksiginfo_t ksi;
1082	sigset_t set;
1083	int error;
1084
1085	error = copyin(uap->set, &set, sizeof(set));
1086	if (error) {
1087		td->td_retval[0] = error;
1088		return (0);
1089	}
1090
1091	error = kern_sigtimedwait(td, set, &ksi, NULL);
1092	if (error) {
1093		if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1094			error = ERESTART;
1095		if (error == ERESTART)
1096			return (error);
1097		td->td_retval[0] = error;
1098		return (0);
1099	}
1100
1101	error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1102	td->td_retval[0] = error;
1103	return (0);
1104}
1105
1106int
1107sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1108{
1109	struct timespec ts;
1110	struct timespec *timeout;
1111	sigset_t set;
1112	ksiginfo_t ksi;
1113	int error;
1114
1115	if (uap->timeout) {
1116		error = copyin(uap->timeout, &ts, sizeof(ts));
1117		if (error)
1118			return (error);
1119
1120		timeout = &ts;
1121	} else
1122		timeout = NULL;
1123
1124	error = copyin(uap->set, &set, sizeof(set));
1125	if (error)
1126		return (error);
1127
1128	error = kern_sigtimedwait(td, set, &ksi, timeout);
1129	if (error)
1130		return (error);
1131
1132	if (uap->info)
1133		error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1134
1135	if (error == 0)
1136		td->td_retval[0] = ksi.ksi_signo;
1137	return (error);
1138}
1139
1140int
1141sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1142{
1143	ksiginfo_t ksi;
1144	sigset_t set;
1145	int error;
1146
1147	error = copyin(uap->set, &set, sizeof(set));
1148	if (error)
1149		return (error);
1150
1151	error = kern_sigtimedwait(td, set, &ksi, NULL);
1152	if (error)
1153		return (error);
1154
1155	if (uap->info)
1156		error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1157
1158	if (error == 0)
1159		td->td_retval[0] = ksi.ksi_signo;
1160	return (error);
1161}
1162
1163int
1164kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1165	struct timespec *timeout)
1166{
1167	struct sigacts *ps;
1168	sigset_t saved_mask, new_block;
1169	struct proc *p;
1170	int error, sig, timo, timevalid = 0;
1171	struct timespec rts, ets, ts;
1172	struct timeval tv;
1173
1174	p = td->td_proc;
1175	error = 0;
1176	ets.tv_sec = 0;
1177	ets.tv_nsec = 0;
1178
1179	if (timeout != NULL) {
1180		if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1181			timevalid = 1;
1182			getnanouptime(&rts);
1183		 	ets = rts;
1184			timespecadd(&ets, timeout);
1185		}
1186	}
1187	ksiginfo_init(ksi);
1188	/* Some signals can not be waited for. */
1189	SIG_CANTMASK(waitset);
1190	ps = p->p_sigacts;
1191	PROC_LOCK(p);
1192	saved_mask = td->td_sigmask;
1193	SIGSETNAND(td->td_sigmask, waitset);
1194	for (;;) {
1195		mtx_lock(&ps->ps_mtx);
1196		sig = cursig(td, SIG_STOP_ALLOWED);
1197		mtx_unlock(&ps->ps_mtx);
1198		if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1199			if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1200		    	    sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1201				error = 0;
1202				break;
1203			}
1204		}
1205
1206		if (error != 0)
1207			break;
1208
1209		/*
1210		 * POSIX says this must be checked after looking for pending
1211		 * signals.
1212		 */
1213		if (timeout != NULL) {
1214			if (!timevalid) {
1215				error = EINVAL;
1216				break;
1217			}
1218			getnanouptime(&rts);
1219			if (timespeccmp(&rts, &ets, >=)) {
1220				error = EAGAIN;
1221				break;
1222			}
1223			ts = ets;
1224			timespecsub(&ts, &rts);
1225			TIMESPEC_TO_TIMEVAL(&tv, &ts);
1226			timo = tvtohz(&tv);
1227		} else {
1228			timo = 0;
1229		}
1230
1231		error = msleep(ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", timo);
1232
1233		if (timeout != NULL) {
1234			if (error == ERESTART) {
1235				/* Timeout can not be restarted. */
1236				error = EINTR;
1237			} else if (error == EAGAIN) {
1238				/* We will calculate timeout by ourself. */
1239				error = 0;
1240			}
1241		}
1242	}
1243
1244	new_block = saved_mask;
1245	SIGSETNAND(new_block, td->td_sigmask);
1246	td->td_sigmask = saved_mask;
1247	/*
1248	 * Fewer signals can be delivered to us, reschedule signal
1249	 * notification.
1250	 */
1251	if (p->p_numthreads != 1)
1252		reschedule_signals(p, new_block, 0);
1253
1254	if (error == 0) {
1255		SDT_PROBE(proc, kernel, , signal__clear, sig, ksi, 0, 0, 0);
1256
1257		if (ksi->ksi_code == SI_TIMER)
1258			itimer_accept(p, ksi->ksi_timerid, ksi);
1259
1260#ifdef KTRACE
1261		if (KTRPOINT(td, KTR_PSIG)) {
1262			sig_t action;
1263
1264			mtx_lock(&ps->ps_mtx);
1265			action = ps->ps_sigact[_SIG_IDX(sig)];
1266			mtx_unlock(&ps->ps_mtx);
1267			ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1268		}
1269#endif
1270		if (sig == SIGKILL)
1271			sigexit(td, sig);
1272	}
1273	PROC_UNLOCK(p);
1274	return (error);
1275}
1276
1277#ifndef _SYS_SYSPROTO_H_
1278struct sigpending_args {
1279	sigset_t	*set;
1280};
1281#endif
1282int
1283sys_sigpending(td, uap)
1284	struct thread *td;
1285	struct sigpending_args *uap;
1286{
1287	struct proc *p = td->td_proc;
1288	sigset_t pending;
1289
1290	PROC_LOCK(p);
1291	pending = p->p_sigqueue.sq_signals;
1292	SIGSETOR(pending, td->td_sigqueue.sq_signals);
1293	PROC_UNLOCK(p);
1294	return (copyout(&pending, uap->set, sizeof(sigset_t)));
1295}
1296
1297#ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1298#ifndef _SYS_SYSPROTO_H_
1299struct osigpending_args {
1300	int	dummy;
1301};
1302#endif
1303int
1304osigpending(td, uap)
1305	struct thread *td;
1306	struct osigpending_args *uap;
1307{
1308	struct proc *p = td->td_proc;
1309	sigset_t pending;
1310
1311	PROC_LOCK(p);
1312	pending = p->p_sigqueue.sq_signals;
1313	SIGSETOR(pending, td->td_sigqueue.sq_signals);
1314	PROC_UNLOCK(p);
1315	SIG2OSIG(pending, td->td_retval[0]);
1316	return (0);
1317}
1318#endif /* COMPAT_43 */
1319
1320#if defined(COMPAT_43)
1321/*
1322 * Generalized interface signal handler, 4.3-compatible.
1323 */
1324#ifndef _SYS_SYSPROTO_H_
1325struct osigvec_args {
1326	int	signum;
1327	struct	sigvec *nsv;
1328	struct	sigvec *osv;
1329};
1330#endif
1331/* ARGSUSED */
1332int
1333osigvec(td, uap)
1334	struct thread *td;
1335	register struct osigvec_args *uap;
1336{
1337	struct sigvec vec;
1338	struct sigaction nsa, osa;
1339	register struct sigaction *nsap, *osap;
1340	int error;
1341
1342	if (uap->signum <= 0 || uap->signum >= ONSIG)
1343		return (EINVAL);
1344	nsap = (uap->nsv != NULL) ? &nsa : NULL;
1345	osap = (uap->osv != NULL) ? &osa : NULL;
1346	if (nsap) {
1347		error = copyin(uap->nsv, &vec, sizeof(vec));
1348		if (error)
1349			return (error);
1350		nsap->sa_handler = vec.sv_handler;
1351		OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1352		nsap->sa_flags = vec.sv_flags;
1353		nsap->sa_flags ^= SA_RESTART;	/* opposite of SV_INTERRUPT */
1354	}
1355	error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1356	if (osap && !error) {
1357		vec.sv_handler = osap->sa_handler;
1358		SIG2OSIG(osap->sa_mask, vec.sv_mask);
1359		vec.sv_flags = osap->sa_flags;
1360		vec.sv_flags &= ~SA_NOCLDWAIT;
1361		vec.sv_flags ^= SA_RESTART;
1362		error = copyout(&vec, uap->osv, sizeof(vec));
1363	}
1364	return (error);
1365}
1366
1367#ifndef _SYS_SYSPROTO_H_
1368struct osigblock_args {
1369	int	mask;
1370};
1371#endif
1372int
1373osigblock(td, uap)
1374	register struct thread *td;
1375	struct osigblock_args *uap;
1376{
1377	sigset_t set, oset;
1378
1379	OSIG2SIG(uap->mask, set);
1380	kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1381	SIG2OSIG(oset, td->td_retval[0]);
1382	return (0);
1383}
1384
1385#ifndef _SYS_SYSPROTO_H_
1386struct osigsetmask_args {
1387	int	mask;
1388};
1389#endif
1390int
1391osigsetmask(td, uap)
1392	struct thread *td;
1393	struct osigsetmask_args *uap;
1394{
1395	sigset_t set, oset;
1396
1397	OSIG2SIG(uap->mask, set);
1398	kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1399	SIG2OSIG(oset, td->td_retval[0]);
1400	return (0);
1401}
1402#endif /* COMPAT_43 */
1403
1404/*
1405 * Suspend calling thread until signal, providing mask to be set in the
1406 * meantime.
1407 */
1408#ifndef _SYS_SYSPROTO_H_
1409struct sigsuspend_args {
1410	const sigset_t *sigmask;
1411};
1412#endif
1413/* ARGSUSED */
1414int
1415sys_sigsuspend(td, uap)
1416	struct thread *td;
1417	struct sigsuspend_args *uap;
1418{
1419	sigset_t mask;
1420	int error;
1421
1422	error = copyin(uap->sigmask, &mask, sizeof(mask));
1423	if (error)
1424		return (error);
1425	return (kern_sigsuspend(td, mask));
1426}
1427
1428int
1429kern_sigsuspend(struct thread *td, sigset_t mask)
1430{
1431	struct proc *p = td->td_proc;
1432	int has_sig, sig;
1433
1434	/*
1435	 * When returning from sigsuspend, we want
1436	 * the old mask to be restored after the
1437	 * signal handler has finished.  Thus, we
1438	 * save it here and mark the sigacts structure
1439	 * to indicate this.
1440	 */
1441	PROC_LOCK(p);
1442	kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1443	    SIGPROCMASK_PROC_LOCKED);
1444	td->td_pflags |= TDP_OLDMASK;
1445
1446	/*
1447	 * Process signals now. Otherwise, we can get spurious wakeup
1448	 * due to signal entered process queue, but delivered to other
1449	 * thread. But sigsuspend should return only on signal
1450	 * delivery.
1451	 */
1452	(p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1453	for (has_sig = 0; !has_sig;) {
1454		while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1455			0) == 0)
1456			/* void */;
1457		thread_suspend_check(0);
1458		mtx_lock(&p->p_sigacts->ps_mtx);
1459		while ((sig = cursig(td, SIG_STOP_ALLOWED)) != 0)
1460			has_sig += postsig(sig);
1461		mtx_unlock(&p->p_sigacts->ps_mtx);
1462	}
1463	PROC_UNLOCK(p);
1464	td->td_errno = EINTR;
1465	td->td_pflags |= TDP_NERRNO;
1466	return (EJUSTRETURN);
1467}
1468
1469#ifdef COMPAT_43	/* XXX - COMPAT_FBSD3 */
1470/*
1471 * Compatibility sigsuspend call for old binaries.  Note nonstandard calling
1472 * convention: libc stub passes mask, not pointer, to save a copyin.
1473 */
1474#ifndef _SYS_SYSPROTO_H_
1475struct osigsuspend_args {
1476	osigset_t mask;
1477};
1478#endif
1479/* ARGSUSED */
1480int
1481osigsuspend(td, uap)
1482	struct thread *td;
1483	struct osigsuspend_args *uap;
1484{
1485	sigset_t mask;
1486
1487	OSIG2SIG(uap->mask, mask);
1488	return (kern_sigsuspend(td, mask));
1489}
1490#endif /* COMPAT_43 */
1491
1492#if defined(COMPAT_43)
1493#ifndef _SYS_SYSPROTO_H_
1494struct osigstack_args {
1495	struct	sigstack *nss;
1496	struct	sigstack *oss;
1497};
1498#endif
1499/* ARGSUSED */
1500int
1501osigstack(td, uap)
1502	struct thread *td;
1503	register struct osigstack_args *uap;
1504{
1505	struct sigstack nss, oss;
1506	int error = 0;
1507
1508	if (uap->nss != NULL) {
1509		error = copyin(uap->nss, &nss, sizeof(nss));
1510		if (error)
1511			return (error);
1512	}
1513	oss.ss_sp = td->td_sigstk.ss_sp;
1514	oss.ss_onstack = sigonstack(cpu_getstack(td));
1515	if (uap->nss != NULL) {
1516		td->td_sigstk.ss_sp = nss.ss_sp;
1517		td->td_sigstk.ss_size = 0;
1518		td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1519		td->td_pflags |= TDP_ALTSTACK;
1520	}
1521	if (uap->oss != NULL)
1522		error = copyout(&oss, uap->oss, sizeof(oss));
1523
1524	return (error);
1525}
1526#endif /* COMPAT_43 */
1527
1528#ifndef _SYS_SYSPROTO_H_
1529struct sigaltstack_args {
1530	stack_t	*ss;
1531	stack_t	*oss;
1532};
1533#endif
1534/* ARGSUSED */
1535int
1536sys_sigaltstack(td, uap)
1537	struct thread *td;
1538	register struct sigaltstack_args *uap;
1539{
1540	stack_t ss, oss;
1541	int error;
1542
1543	if (uap->ss != NULL) {
1544		error = copyin(uap->ss, &ss, sizeof(ss));
1545		if (error)
1546			return (error);
1547	}
1548	error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1549	    (uap->oss != NULL) ? &oss : NULL);
1550	if (error)
1551		return (error);
1552	if (uap->oss != NULL)
1553		error = copyout(&oss, uap->oss, sizeof(stack_t));
1554	return (error);
1555}
1556
1557int
1558kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1559{
1560	struct proc *p = td->td_proc;
1561	int oonstack;
1562
1563	oonstack = sigonstack(cpu_getstack(td));
1564
1565	if (oss != NULL) {
1566		*oss = td->td_sigstk;
1567		oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1568		    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1569	}
1570
1571	if (ss != NULL) {
1572		if (oonstack)
1573			return (EPERM);
1574		if ((ss->ss_flags & ~SS_DISABLE) != 0)
1575			return (EINVAL);
1576		if (!(ss->ss_flags & SS_DISABLE)) {
1577			if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1578				return (ENOMEM);
1579
1580			td->td_sigstk = *ss;
1581			td->td_pflags |= TDP_ALTSTACK;
1582		} else {
1583			td->td_pflags &= ~TDP_ALTSTACK;
1584		}
1585	}
1586	return (0);
1587}
1588
1589/*
1590 * Common code for kill process group/broadcast kill.
1591 * cp is calling process.
1592 */
1593static int
1594killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1595{
1596	struct proc *p;
1597	struct pgrp *pgrp;
1598	int err;
1599	int ret;
1600
1601	ret = ESRCH;
1602	if (all) {
1603		/*
1604		 * broadcast
1605		 */
1606		sx_slock(&allproc_lock);
1607		FOREACH_PROC_IN_SYSTEM(p) {
1608			PROC_LOCK(p);
1609			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1610			    p == td->td_proc || p->p_state == PRS_NEW) {
1611				PROC_UNLOCK(p);
1612				continue;
1613			}
1614			err = p_cansignal(td, p, sig);
1615			if (err == 0) {
1616				if (sig)
1617					pksignal(p, sig, ksi);
1618				ret = err;
1619			}
1620			else if (ret == ESRCH)
1621				ret = err;
1622			PROC_UNLOCK(p);
1623		}
1624		sx_sunlock(&allproc_lock);
1625	} else {
1626		sx_slock(&proctree_lock);
1627		if (pgid == 0) {
1628			/*
1629			 * zero pgid means send to my process group.
1630			 */
1631			pgrp = td->td_proc->p_pgrp;
1632			PGRP_LOCK(pgrp);
1633		} else {
1634			pgrp = pgfind(pgid);
1635			if (pgrp == NULL) {
1636				sx_sunlock(&proctree_lock);
1637				return (ESRCH);
1638			}
1639		}
1640		sx_sunlock(&proctree_lock);
1641		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1642			PROC_LOCK(p);
1643			if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
1644			    p->p_state == PRS_NEW) {
1645				PROC_UNLOCK(p);
1646				continue;
1647			}
1648			err = p_cansignal(td, p, sig);
1649			if (err == 0) {
1650				if (sig)
1651					pksignal(p, sig, ksi);
1652				ret = err;
1653			}
1654			else if (ret == ESRCH)
1655				ret = err;
1656			PROC_UNLOCK(p);
1657		}
1658		PGRP_UNLOCK(pgrp);
1659	}
1660	return (ret);
1661}
1662
1663#ifndef _SYS_SYSPROTO_H_
1664struct kill_args {
1665	int	pid;
1666	int	signum;
1667};
1668#endif
1669/* ARGSUSED */
1670int
1671sys_kill(struct thread *td, struct kill_args *uap)
1672{
1673	ksiginfo_t ksi;
1674	struct proc *p;
1675	int error;
1676
1677	AUDIT_ARG_SIGNUM(uap->signum);
1678	AUDIT_ARG_PID(uap->pid);
1679	if ((u_int)uap->signum > _SIG_MAXSIG)
1680		return (EINVAL);
1681
1682	ksiginfo_init(&ksi);
1683	ksi.ksi_signo = uap->signum;
1684	ksi.ksi_code = SI_USER;
1685	ksi.ksi_pid = td->td_proc->p_pid;
1686	ksi.ksi_uid = td->td_ucred->cr_ruid;
1687
1688	if (uap->pid > 0) {
1689		/* kill single process */
1690		if ((p = pfind(uap->pid)) == NULL) {
1691			if ((p = zpfind(uap->pid)) == NULL)
1692				return (ESRCH);
1693		}
1694		AUDIT_ARG_PROCESS(p);
1695		error = p_cansignal(td, p, uap->signum);
1696		if (error == 0 && uap->signum)
1697			pksignal(p, uap->signum, &ksi);
1698		PROC_UNLOCK(p);
1699		return (error);
1700	}
1701	switch (uap->pid) {
1702	case -1:		/* broadcast signal */
1703		return (killpg1(td, uap->signum, 0, 1, &ksi));
1704	case 0:			/* signal own process group */
1705		return (killpg1(td, uap->signum, 0, 0, &ksi));
1706	default:		/* negative explicit process group */
1707		return (killpg1(td, uap->signum, -uap->pid, 0, &ksi));
1708	}
1709	/* NOTREACHED */
1710}
1711
1712int
1713sys_pdkill(td, uap)
1714	struct thread *td;
1715	struct pdkill_args *uap;
1716{
1717#ifdef PROCDESC
1718	struct proc *p;
1719	int error;
1720
1721	AUDIT_ARG_SIGNUM(uap->signum);
1722	AUDIT_ARG_FD(uap->fd);
1723	if ((u_int)uap->signum > _SIG_MAXSIG)
1724		return (EINVAL);
1725
1726	error = procdesc_find(td, uap->fd, CAP_PDKILL, &p);
1727	if (error)
1728		return (error);
1729	AUDIT_ARG_PROCESS(p);
1730	error = p_cansignal(td, p, uap->signum);
1731	if (error == 0 && uap->signum)
1732		kern_psignal(p, uap->signum);
1733	PROC_UNLOCK(p);
1734	return (error);
1735#else
1736	return (ENOSYS);
1737#endif
1738}
1739
1740#if defined(COMPAT_43)
1741#ifndef _SYS_SYSPROTO_H_
1742struct okillpg_args {
1743	int	pgid;
1744	int	signum;
1745};
1746#endif
1747/* ARGSUSED */
1748int
1749okillpg(struct thread *td, struct okillpg_args *uap)
1750{
1751	ksiginfo_t ksi;
1752
1753	AUDIT_ARG_SIGNUM(uap->signum);
1754	AUDIT_ARG_PID(uap->pgid);
1755	if ((u_int)uap->signum > _SIG_MAXSIG)
1756		return (EINVAL);
1757
1758	ksiginfo_init(&ksi);
1759	ksi.ksi_signo = uap->signum;
1760	ksi.ksi_code = SI_USER;
1761	ksi.ksi_pid = td->td_proc->p_pid;
1762	ksi.ksi_uid = td->td_ucred->cr_ruid;
1763	return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1764}
1765#endif /* COMPAT_43 */
1766
1767#ifndef _SYS_SYSPROTO_H_
1768struct sigqueue_args {
1769	pid_t pid;
1770	int signum;
1771	/* union sigval */ void *value;
1772};
1773#endif
1774int
1775sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
1776{
1777	ksiginfo_t ksi;
1778	struct proc *p;
1779	int error;
1780
1781	if ((u_int)uap->signum > _SIG_MAXSIG)
1782		return (EINVAL);
1783
1784	/*
1785	 * Specification says sigqueue can only send signal to
1786	 * single process.
1787	 */
1788	if (uap->pid <= 0)
1789		return (EINVAL);
1790
1791	if ((p = pfind(uap->pid)) == NULL) {
1792		if ((p = zpfind(uap->pid)) == NULL)
1793			return (ESRCH);
1794	}
1795	error = p_cansignal(td, p, uap->signum);
1796	if (error == 0 && uap->signum != 0) {
1797		ksiginfo_init(&ksi);
1798		ksi.ksi_flags = KSI_SIGQ;
1799		ksi.ksi_signo = uap->signum;
1800		ksi.ksi_code = SI_QUEUE;
1801		ksi.ksi_pid = td->td_proc->p_pid;
1802		ksi.ksi_uid = td->td_ucred->cr_ruid;
1803		ksi.ksi_value.sival_ptr = uap->value;
1804		error = pksignal(p, ksi.ksi_signo, &ksi);
1805	}
1806	PROC_UNLOCK(p);
1807	return (error);
1808}
1809
1810/*
1811 * Send a signal to a process group.
1812 */
1813void
1814gsignal(int pgid, int sig, ksiginfo_t *ksi)
1815{
1816	struct pgrp *pgrp;
1817
1818	if (pgid != 0) {
1819		sx_slock(&proctree_lock);
1820		pgrp = pgfind(pgid);
1821		sx_sunlock(&proctree_lock);
1822		if (pgrp != NULL) {
1823			pgsignal(pgrp, sig, 0, ksi);
1824			PGRP_UNLOCK(pgrp);
1825		}
1826	}
1827}
1828
1829/*
1830 * Send a signal to a process group.  If checktty is 1,
1831 * limit to members which have a controlling terminal.
1832 */
1833void
1834pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
1835{
1836	struct proc *p;
1837
1838	if (pgrp) {
1839		PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
1840		LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1841			PROC_LOCK(p);
1842			if (p->p_state == PRS_NORMAL &&
1843			    (checkctty == 0 || p->p_flag & P_CONTROLT))
1844				pksignal(p, sig, ksi);
1845			PROC_UNLOCK(p);
1846		}
1847	}
1848}
1849
1850/*
1851 * Send a signal caused by a trap to the current thread.  If it will be
1852 * caught immediately, deliver it with correct code.  Otherwise, post it
1853 * normally.
1854 */
1855void
1856trapsignal(struct thread *td, ksiginfo_t *ksi)
1857{
1858	struct sigacts *ps;
1859	sigset_t mask;
1860	struct proc *p;
1861	int sig;
1862	int code;
1863
1864	p = td->td_proc;
1865	sig = ksi->ksi_signo;
1866	code = ksi->ksi_code;
1867	KASSERT(_SIG_VALID(sig), ("invalid signal"));
1868
1869	PROC_LOCK(p);
1870	ps = p->p_sigacts;
1871	mtx_lock(&ps->ps_mtx);
1872	if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
1873	    !SIGISMEMBER(td->td_sigmask, sig)) {
1874		td->td_ru.ru_nsignals++;
1875#ifdef KTRACE
1876		if (KTRPOINT(curthread, KTR_PSIG))
1877			ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
1878			    &td->td_sigmask, code);
1879#endif
1880		(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
1881				ksi, &td->td_sigmask);
1882		mask = ps->ps_catchmask[_SIG_IDX(sig)];
1883		if (!SIGISMEMBER(ps->ps_signodefer, sig))
1884			SIGADDSET(mask, sig);
1885		kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
1886		    SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
1887		if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1888			/*
1889			 * See kern_sigaction() for origin of this code.
1890			 */
1891			SIGDELSET(ps->ps_sigcatch, sig);
1892			if (sig != SIGCONT &&
1893			    sigprop(sig) & SA_IGNORE)
1894				SIGADDSET(ps->ps_sigignore, sig);
1895			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1896		}
1897		mtx_unlock(&ps->ps_mtx);
1898	} else {
1899		/*
1900		 * Avoid a possible infinite loop if the thread
1901		 * masking the signal or process is ignoring the
1902		 * signal.
1903		 */
1904		if (kern_forcesigexit &&
1905		    (SIGISMEMBER(td->td_sigmask, sig) ||
1906		     ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
1907			SIGDELSET(td->td_sigmask, sig);
1908			SIGDELSET(ps->ps_sigcatch, sig);
1909			SIGDELSET(ps->ps_sigignore, sig);
1910			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1911		}
1912		mtx_unlock(&ps->ps_mtx);
1913		p->p_code = code;	/* XXX for core dump/debugger */
1914		p->p_sig = sig;		/* XXX to verify code */
1915		tdsendsignal(p, td, sig, ksi);
1916	}
1917	PROC_UNLOCK(p);
1918}
1919
1920static struct thread *
1921sigtd(struct proc *p, int sig, int prop)
1922{
1923	struct thread *td, *signal_td;
1924
1925	PROC_LOCK_ASSERT(p, MA_OWNED);
1926
1927	/*
1928	 * Check if current thread can handle the signal without
1929	 * switching context to another thread.
1930	 */
1931	if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
1932		return (curthread);
1933	signal_td = NULL;
1934	FOREACH_THREAD_IN_PROC(p, td) {
1935		if (!SIGISMEMBER(td->td_sigmask, sig)) {
1936			signal_td = td;
1937			break;
1938		}
1939	}
1940	if (signal_td == NULL)
1941		signal_td = FIRST_THREAD_IN_PROC(p);
1942	return (signal_td);
1943}
1944
1945/*
1946 * Send the signal to the process.  If the signal has an action, the action
1947 * is usually performed by the target process rather than the caller; we add
1948 * the signal to the set of pending signals for the process.
1949 *
1950 * Exceptions:
1951 *   o When a stop signal is sent to a sleeping process that takes the
1952 *     default action, the process is stopped without awakening it.
1953 *   o SIGCONT restarts stopped processes (or puts them back to sleep)
1954 *     regardless of the signal action (eg, blocked or ignored).
1955 *
1956 * Other ignored signals are discarded immediately.
1957 *
1958 * NB: This function may be entered from the debugger via the "kill" DDB
1959 * command.  There is little that can be done to mitigate the possibly messy
1960 * side effects of this unwise possibility.
1961 */
1962void
1963kern_psignal(struct proc *p, int sig)
1964{
1965	ksiginfo_t ksi;
1966
1967	ksiginfo_init(&ksi);
1968	ksi.ksi_signo = sig;
1969	ksi.ksi_code = SI_KERNEL;
1970	(void) tdsendsignal(p, NULL, sig, &ksi);
1971}
1972
1973int
1974pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
1975{
1976
1977	return (tdsendsignal(p, NULL, sig, ksi));
1978}
1979
1980/* Utility function for finding a thread to send signal event to. */
1981int
1982sigev_findtd(struct proc *p ,struct sigevent *sigev, struct thread **ttd)
1983{
1984	struct thread *td;
1985
1986	if (sigev->sigev_notify == SIGEV_THREAD_ID) {
1987		td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
1988		if (td == NULL)
1989			return (ESRCH);
1990		*ttd = td;
1991	} else {
1992		*ttd = NULL;
1993		PROC_LOCK(p);
1994	}
1995	return (0);
1996}
1997
1998void
1999tdsignal(struct thread *td, int sig)
2000{
2001	ksiginfo_t ksi;
2002
2003	ksiginfo_init(&ksi);
2004	ksi.ksi_signo = sig;
2005	ksi.ksi_code = SI_KERNEL;
2006	(void) tdsendsignal(td->td_proc, td, sig, &ksi);
2007}
2008
2009void
2010tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2011{
2012
2013	(void) tdsendsignal(td->td_proc, td, sig, ksi);
2014}
2015
2016int
2017tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2018{
2019	sig_t action;
2020	sigqueue_t *sigqueue;
2021	int prop;
2022	struct sigacts *ps;
2023	int intrval;
2024	int ret = 0;
2025	int wakeup_swapper;
2026
2027	MPASS(td == NULL || p == td->td_proc);
2028	PROC_LOCK_ASSERT(p, MA_OWNED);
2029
2030	if (!_SIG_VALID(sig))
2031		panic("%s(): invalid signal %d", __func__, sig);
2032
2033	KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2034
2035	/*
2036	 * IEEE Std 1003.1-2001: return success when killing a zombie.
2037	 */
2038	if (p->p_state == PRS_ZOMBIE) {
2039		if (ksi && (ksi->ksi_flags & KSI_INS))
2040			ksiginfo_tryfree(ksi);
2041		return (ret);
2042	}
2043
2044	ps = p->p_sigacts;
2045	KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig);
2046	prop = sigprop(sig);
2047
2048	if (td == NULL) {
2049		td = sigtd(p, sig, prop);
2050		sigqueue = &p->p_sigqueue;
2051	} else {
2052		KASSERT(td->td_proc == p, ("invalid thread"));
2053		sigqueue = &td->td_sigqueue;
2054	}
2055
2056	SDT_PROBE(proc, kernel, , signal__send, td, p, sig, 0, 0 );
2057
2058	/*
2059	 * If the signal is being ignored,
2060	 * then we forget about it immediately.
2061	 * (Note: we don't set SIGCONT in ps_sigignore,
2062	 * and if it is set to SIG_IGN,
2063	 * action will be SIG_DFL here.)
2064	 */
2065	mtx_lock(&ps->ps_mtx);
2066	if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2067		SDT_PROBE(proc, kernel, , signal__discard, td, p, sig, 0, 0 );
2068
2069		mtx_unlock(&ps->ps_mtx);
2070		if (ksi && (ksi->ksi_flags & KSI_INS))
2071			ksiginfo_tryfree(ksi);
2072		return (ret);
2073	}
2074	if (SIGISMEMBER(td->td_sigmask, sig))
2075		action = SIG_HOLD;
2076	else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2077		action = SIG_CATCH;
2078	else
2079		action = SIG_DFL;
2080	if (SIGISMEMBER(ps->ps_sigintr, sig))
2081		intrval = EINTR;
2082	else
2083		intrval = ERESTART;
2084	mtx_unlock(&ps->ps_mtx);
2085
2086	if (prop & SA_CONT)
2087		sigqueue_delete_stopmask_proc(p);
2088	else if (prop & SA_STOP) {
2089		/*
2090		 * If sending a tty stop signal to a member of an orphaned
2091		 * process group, discard the signal here if the action
2092		 * is default; don't stop the process below if sleeping,
2093		 * and don't clear any pending SIGCONT.
2094		 */
2095		if ((prop & SA_TTYSTOP) &&
2096		    (p->p_pgrp->pg_jobc == 0) &&
2097		    (action == SIG_DFL)) {
2098			if (ksi && (ksi->ksi_flags & KSI_INS))
2099				ksiginfo_tryfree(ksi);
2100			return (ret);
2101		}
2102		sigqueue_delete_proc(p, SIGCONT);
2103		if (p->p_flag & P_CONTINUED) {
2104			p->p_flag &= ~P_CONTINUED;
2105			PROC_LOCK(p->p_pptr);
2106			sigqueue_take(p->p_ksi);
2107			PROC_UNLOCK(p->p_pptr);
2108		}
2109	}
2110
2111	ret = sigqueue_add(sigqueue, sig, ksi);
2112	if (ret != 0)
2113		return (ret);
2114	signotify(td);
2115	/*
2116	 * Defer further processing for signals which are held,
2117	 * except that stopped processes must be continued by SIGCONT.
2118	 */
2119	if (action == SIG_HOLD &&
2120	    !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG)))
2121		return (ret);
2122	/*
2123	 * SIGKILL: Remove procfs STOPEVENTs.
2124	 */
2125	if (sig == SIGKILL) {
2126		/* from procfs_ioctl.c: PIOCBIC */
2127		p->p_stops = 0;
2128		/* from procfs_ioctl.c: PIOCCONT */
2129		p->p_step = 0;
2130		wakeup(&p->p_step);
2131	}
2132	/*
2133	 * Some signals have a process-wide effect and a per-thread
2134	 * component.  Most processing occurs when the process next
2135	 * tries to cross the user boundary, however there are some
2136	 * times when processing needs to be done immediatly, such as
2137	 * waking up threads so that they can cross the user boundary.
2138	 * We try do the per-process part here.
2139	 */
2140	if (P_SHOULDSTOP(p)) {
2141		KASSERT(!(p->p_flag & P_WEXIT),
2142		    ("signal to stopped but exiting process"));
2143		if (sig == SIGKILL) {
2144			/*
2145			 * If traced process is already stopped,
2146			 * then no further action is necessary.
2147			 */
2148			if (p->p_flag & P_TRACED)
2149				goto out;
2150			/*
2151			 * SIGKILL sets process running.
2152			 * It will die elsewhere.
2153			 * All threads must be restarted.
2154			 */
2155			p->p_flag &= ~P_STOPPED_SIG;
2156			goto runfast;
2157		}
2158
2159		if (prop & SA_CONT) {
2160			/*
2161			 * If traced process is already stopped,
2162			 * then no further action is necessary.
2163			 */
2164			if (p->p_flag & P_TRACED)
2165				goto out;
2166			/*
2167			 * If SIGCONT is default (or ignored), we continue the
2168			 * process but don't leave the signal in sigqueue as
2169			 * it has no further action.  If SIGCONT is held, we
2170			 * continue the process and leave the signal in
2171			 * sigqueue.  If the process catches SIGCONT, let it
2172			 * handle the signal itself.  If it isn't waiting on
2173			 * an event, it goes back to run state.
2174			 * Otherwise, process goes back to sleep state.
2175			 */
2176			p->p_flag &= ~P_STOPPED_SIG;
2177			PROC_SLOCK(p);
2178			if (p->p_numthreads == p->p_suspcount) {
2179				PROC_SUNLOCK(p);
2180				p->p_flag |= P_CONTINUED;
2181				p->p_xstat = SIGCONT;
2182				PROC_LOCK(p->p_pptr);
2183				childproc_continued(p);
2184				PROC_UNLOCK(p->p_pptr);
2185				PROC_SLOCK(p);
2186			}
2187			if (action == SIG_DFL) {
2188				thread_unsuspend(p);
2189				PROC_SUNLOCK(p);
2190				sigqueue_delete(sigqueue, sig);
2191				goto out;
2192			}
2193			if (action == SIG_CATCH) {
2194				/*
2195				 * The process wants to catch it so it needs
2196				 * to run at least one thread, but which one?
2197				 */
2198				PROC_SUNLOCK(p);
2199				goto runfast;
2200			}
2201			/*
2202			 * The signal is not ignored or caught.
2203			 */
2204			thread_unsuspend(p);
2205			PROC_SUNLOCK(p);
2206			goto out;
2207		}
2208
2209		if (prop & SA_STOP) {
2210			/*
2211			 * If traced process is already stopped,
2212			 * then no further action is necessary.
2213			 */
2214			if (p->p_flag & P_TRACED)
2215				goto out;
2216			/*
2217			 * Already stopped, don't need to stop again
2218			 * (If we did the shell could get confused).
2219			 * Just make sure the signal STOP bit set.
2220			 */
2221			p->p_flag |= P_STOPPED_SIG;
2222			sigqueue_delete(sigqueue, sig);
2223			goto out;
2224		}
2225
2226		/*
2227		 * All other kinds of signals:
2228		 * If a thread is sleeping interruptibly, simulate a
2229		 * wakeup so that when it is continued it will be made
2230		 * runnable and can look at the signal.  However, don't make
2231		 * the PROCESS runnable, leave it stopped.
2232		 * It may run a bit until it hits a thread_suspend_check().
2233		 */
2234		wakeup_swapper = 0;
2235		PROC_SLOCK(p);
2236		thread_lock(td);
2237		if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
2238			wakeup_swapper = sleepq_abort(td, intrval);
2239		thread_unlock(td);
2240		PROC_SUNLOCK(p);
2241		if (wakeup_swapper)
2242			kick_proc0();
2243		goto out;
2244		/*
2245		 * Mutexes are short lived. Threads waiting on them will
2246		 * hit thread_suspend_check() soon.
2247		 */
2248	} else if (p->p_state == PRS_NORMAL) {
2249		if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2250			tdsigwakeup(td, sig, action, intrval);
2251			goto out;
2252		}
2253
2254		MPASS(action == SIG_DFL);
2255
2256		if (prop & SA_STOP) {
2257			if (p->p_flag & (P_PPWAIT|P_WEXIT))
2258				goto out;
2259			p->p_flag |= P_STOPPED_SIG;
2260			p->p_xstat = sig;
2261			PROC_SLOCK(p);
2262			sig_suspend_threads(td, p, 1);
2263			if (p->p_numthreads == p->p_suspcount) {
2264				/*
2265				 * only thread sending signal to another
2266				 * process can reach here, if thread is sending
2267				 * signal to its process, because thread does
2268				 * not suspend itself here, p_numthreads
2269				 * should never be equal to p_suspcount.
2270				 */
2271				thread_stopped(p);
2272				PROC_SUNLOCK(p);
2273				sigqueue_delete_proc(p, p->p_xstat);
2274			} else
2275				PROC_SUNLOCK(p);
2276			goto out;
2277		}
2278	} else {
2279		/* Not in "NORMAL" state. discard the signal. */
2280		sigqueue_delete(sigqueue, sig);
2281		goto out;
2282	}
2283
2284	/*
2285	 * The process is not stopped so we need to apply the signal to all the
2286	 * running threads.
2287	 */
2288runfast:
2289	tdsigwakeup(td, sig, action, intrval);
2290	PROC_SLOCK(p);
2291	thread_unsuspend(p);
2292	PROC_SUNLOCK(p);
2293out:
2294	/* If we jump here, proc slock should not be owned. */
2295	PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
2296	return (ret);
2297}
2298
2299/*
2300 * The force of a signal has been directed against a single
2301 * thread.  We need to see what we can do about knocking it
2302 * out of any sleep it may be in etc.
2303 */
2304static void
2305tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2306{
2307	struct proc *p = td->td_proc;
2308	register int prop;
2309	int wakeup_swapper;
2310
2311	wakeup_swapper = 0;
2312	PROC_LOCK_ASSERT(p, MA_OWNED);
2313	prop = sigprop(sig);
2314
2315	PROC_SLOCK(p);
2316	thread_lock(td);
2317	/*
2318	 * Bring the priority of a thread up if we want it to get
2319	 * killed in this lifetime.
2320	 */
2321	if (action == SIG_DFL && (prop & SA_KILL) && td->td_priority > PUSER)
2322		sched_prio(td, PUSER);
2323	if (TD_ON_SLEEPQ(td)) {
2324		/*
2325		 * If thread is sleeping uninterruptibly
2326		 * we can't interrupt the sleep... the signal will
2327		 * be noticed when the process returns through
2328		 * trap() or syscall().
2329		 */
2330		if ((td->td_flags & TDF_SINTR) == 0)
2331			goto out;
2332		/*
2333		 * If SIGCONT is default (or ignored) and process is
2334		 * asleep, we are finished; the process should not
2335		 * be awakened.
2336		 */
2337		if ((prop & SA_CONT) && action == SIG_DFL) {
2338			thread_unlock(td);
2339			PROC_SUNLOCK(p);
2340			sigqueue_delete(&p->p_sigqueue, sig);
2341			/*
2342			 * It may be on either list in this state.
2343			 * Remove from both for now.
2344			 */
2345			sigqueue_delete(&td->td_sigqueue, sig);
2346			return;
2347		}
2348
2349		/*
2350		 * Don't awaken a sleeping thread for SIGSTOP if the
2351		 * STOP signal is deferred.
2352		 */
2353		if ((prop & SA_STOP) && (td->td_flags & TDF_SBDRY))
2354			goto out;
2355
2356		/*
2357		 * Give low priority threads a better chance to run.
2358		 */
2359		if (td->td_priority > PUSER)
2360			sched_prio(td, PUSER);
2361
2362		wakeup_swapper = sleepq_abort(td, intrval);
2363	} else {
2364		/*
2365		 * Other states do nothing with the signal immediately,
2366		 * other than kicking ourselves if we are running.
2367		 * It will either never be noticed, or noticed very soon.
2368		 */
2369#ifdef SMP
2370		if (TD_IS_RUNNING(td) && td != curthread)
2371			forward_signal(td);
2372#endif
2373	}
2374out:
2375	PROC_SUNLOCK(p);
2376	thread_unlock(td);
2377	if (wakeup_swapper)
2378		kick_proc0();
2379}
2380
2381static void
2382sig_suspend_threads(struct thread *td, struct proc *p, int sending)
2383{
2384	struct thread *td2;
2385	int wakeup_swapper;
2386
2387	PROC_LOCK_ASSERT(p, MA_OWNED);
2388	PROC_SLOCK_ASSERT(p, MA_OWNED);
2389
2390	wakeup_swapper = 0;
2391	FOREACH_THREAD_IN_PROC(p, td2) {
2392		thread_lock(td2);
2393		td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
2394		if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
2395		    (td2->td_flags & TDF_SINTR)) {
2396			if (td2->td_flags & TDF_SBDRY) {
2397				/*
2398				 * Once a thread is asleep with
2399				 * TDF_SBDRY set, it should never
2400				 * become suspended due to this check.
2401				 */
2402				KASSERT(!TD_IS_SUSPENDED(td2),
2403				    ("thread with deferred stops suspended"));
2404			} else if (!TD_IS_SUSPENDED(td2)) {
2405				thread_suspend_one(td2);
2406			}
2407		} else if (!TD_IS_SUSPENDED(td2)) {
2408			if (sending || td != td2)
2409				td2->td_flags |= TDF_ASTPENDING;
2410#ifdef SMP
2411			if (TD_IS_RUNNING(td2) && td2 != td)
2412				forward_signal(td2);
2413#endif
2414		}
2415		thread_unlock(td2);
2416	}
2417	if (wakeup_swapper)
2418		kick_proc0();
2419}
2420
2421int
2422ptracestop(struct thread *td, int sig)
2423{
2424	struct proc *p = td->td_proc;
2425
2426	PROC_LOCK_ASSERT(p, MA_OWNED);
2427	KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2428	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2429	    &p->p_mtx.lock_object, "Stopping for traced signal");
2430
2431	td->td_dbgflags |= TDB_XSIG;
2432	td->td_xsig = sig;
2433	PROC_SLOCK(p);
2434	while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2435		if (p->p_flag & P_SINGLE_EXIT) {
2436			td->td_dbgflags &= ~TDB_XSIG;
2437			PROC_SUNLOCK(p);
2438			return (sig);
2439		}
2440		/*
2441		 * Just make wait() to work, the last stopped thread
2442		 * will win.
2443		 */
2444		p->p_xstat = sig;
2445		p->p_xthread = td;
2446		p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE);
2447		sig_suspend_threads(td, p, 0);
2448		if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2449			td->td_dbgflags &= ~TDB_STOPATFORK;
2450			cv_broadcast(&p->p_dbgwait);
2451		}
2452stopme:
2453		thread_suspend_switch(td);
2454		if (!(p->p_flag & P_TRACED)) {
2455			break;
2456		}
2457		if (td->td_dbgflags & TDB_SUSPEND) {
2458			if (p->p_flag & P_SINGLE_EXIT)
2459				break;
2460			goto stopme;
2461		}
2462	}
2463	PROC_SUNLOCK(p);
2464	return (td->td_xsig);
2465}
2466
2467static void
2468reschedule_signals(struct proc *p, sigset_t block, int flags)
2469{
2470	struct sigacts *ps;
2471	struct thread *td;
2472	int sig;
2473
2474	PROC_LOCK_ASSERT(p, MA_OWNED);
2475	if (SIGISEMPTY(p->p_siglist))
2476		return;
2477	ps = p->p_sigacts;
2478	SIGSETAND(block, p->p_siglist);
2479	while ((sig = sig_ffs(&block)) != 0) {
2480		SIGDELSET(block, sig);
2481		td = sigtd(p, sig, 0);
2482		signotify(td);
2483		if (!(flags & SIGPROCMASK_PS_LOCKED))
2484			mtx_lock(&ps->ps_mtx);
2485		if (p->p_flag & P_TRACED || SIGISMEMBER(ps->ps_sigcatch, sig))
2486			tdsigwakeup(td, sig, SIG_CATCH,
2487			    (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2488			     ERESTART));
2489		if (!(flags & SIGPROCMASK_PS_LOCKED))
2490			mtx_unlock(&ps->ps_mtx);
2491	}
2492}
2493
2494void
2495tdsigcleanup(struct thread *td)
2496{
2497	struct proc *p;
2498	sigset_t unblocked;
2499
2500	p = td->td_proc;
2501	PROC_LOCK_ASSERT(p, MA_OWNED);
2502
2503	sigqueue_flush(&td->td_sigqueue);
2504	if (p->p_numthreads == 1)
2505		return;
2506
2507	/*
2508	 * Since we cannot handle signals, notify signal post code
2509	 * about this by filling the sigmask.
2510	 *
2511	 * Also, if needed, wake up thread(s) that do not block the
2512	 * same signals as the exiting thread, since the thread might
2513	 * have been selected for delivery and woken up.
2514	 */
2515	SIGFILLSET(unblocked);
2516	SIGSETNAND(unblocked, td->td_sigmask);
2517	SIGFILLSET(td->td_sigmask);
2518	reschedule_signals(p, unblocked, 0);
2519
2520}
2521
2522/*
2523 * Defer the delivery of SIGSTOP for the current thread.  Returns true
2524 * if stops were deferred and false if they were already deferred.
2525 */
2526int
2527sigdeferstop(void)
2528{
2529	struct thread *td;
2530
2531	td = curthread;
2532	if (td->td_flags & TDF_SBDRY)
2533		return (0);
2534	thread_lock(td);
2535	td->td_flags |= TDF_SBDRY;
2536	thread_unlock(td);
2537	return (1);
2538}
2539
2540/*
2541 * Permit the delivery of SIGSTOP for the current thread.  This does
2542 * not immediately suspend if a stop was posted.  Instead, the thread
2543 * will suspend either via ast() or a subsequent interruptible sleep.
2544 */
2545void
2546sigallowstop()
2547{
2548	struct thread *td;
2549
2550	td = curthread;
2551	thread_lock(td);
2552	td->td_flags &= ~TDF_SBDRY;
2553	thread_unlock(td);
2554}
2555
2556/*
2557 * If the current process has received a signal (should be caught or cause
2558 * termination, should interrupt current syscall), return the signal number.
2559 * Stop signals with default action are processed immediately, then cleared;
2560 * they aren't returned.  This is checked after each entry to the system for
2561 * a syscall or trap (though this can usually be done without calling issignal
2562 * by checking the pending signal masks in cursig.) The normal call
2563 * sequence is
2564 *
2565 *	while (sig = cursig(curthread))
2566 *		postsig(sig);
2567 */
2568static int
2569issignal(struct thread *td, int stop_allowed)
2570{
2571	struct proc *p;
2572	struct sigacts *ps;
2573	struct sigqueue *queue;
2574	sigset_t sigpending;
2575	int sig, prop, newsig;
2576
2577	p = td->td_proc;
2578	ps = p->p_sigacts;
2579	mtx_assert(&ps->ps_mtx, MA_OWNED);
2580	PROC_LOCK_ASSERT(p, MA_OWNED);
2581	for (;;) {
2582		int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
2583
2584		sigpending = td->td_sigqueue.sq_signals;
2585		SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
2586		SIGSETNAND(sigpending, td->td_sigmask);
2587
2588		if (p->p_flag & P_PPWAIT || td->td_flags & TDF_SBDRY)
2589			SIG_STOPSIGMASK(sigpending);
2590		if (SIGISEMPTY(sigpending))	/* no signal to send */
2591			return (0);
2592		sig = sig_ffs(&sigpending);
2593
2594		if (p->p_stops & S_SIG) {
2595			mtx_unlock(&ps->ps_mtx);
2596			stopevent(p, S_SIG, sig);
2597			mtx_lock(&ps->ps_mtx);
2598		}
2599
2600		/*
2601		 * We should see pending but ignored signals
2602		 * only if P_TRACED was on when they were posted.
2603		 */
2604		if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
2605			sigqueue_delete(&td->td_sigqueue, sig);
2606			sigqueue_delete(&p->p_sigqueue, sig);
2607			continue;
2608		}
2609		if (p->p_flag & P_TRACED && (p->p_flag & P_PPTRACE) == 0) {
2610			/*
2611			 * If traced, always stop.
2612			 * Remove old signal from queue before the stop.
2613			 * XXX shrug off debugger, it causes siginfo to
2614			 * be thrown away.
2615			 */
2616			queue = &td->td_sigqueue;
2617			td->td_dbgksi.ksi_signo = 0;
2618			if (sigqueue_get(queue, sig, &td->td_dbgksi) == 0) {
2619				queue = &p->p_sigqueue;
2620				sigqueue_get(queue, sig, &td->td_dbgksi);
2621			}
2622
2623			mtx_unlock(&ps->ps_mtx);
2624			newsig = ptracestop(td, sig);
2625			mtx_lock(&ps->ps_mtx);
2626
2627			if (sig != newsig) {
2628
2629				/*
2630				 * If parent wants us to take the signal,
2631				 * then it will leave it in p->p_xstat;
2632				 * otherwise we just look for signals again.
2633			 	*/
2634				if (newsig == 0)
2635					continue;
2636				sig = newsig;
2637
2638				/*
2639				 * Put the new signal into td_sigqueue. If the
2640				 * signal is being masked, look for other signals.
2641				 */
2642				sigqueue_add(queue, sig, NULL);
2643				if (SIGISMEMBER(td->td_sigmask, sig))
2644					continue;
2645				signotify(td);
2646			} else {
2647				if (td->td_dbgksi.ksi_signo != 0) {
2648					td->td_dbgksi.ksi_flags |= KSI_HEAD;
2649					if (sigqueue_add(&td->td_sigqueue, sig,
2650					    &td->td_dbgksi) != 0)
2651						td->td_dbgksi.ksi_signo = 0;
2652				}
2653				if (td->td_dbgksi.ksi_signo == 0)
2654					sigqueue_add(&td->td_sigqueue, sig,
2655					    NULL);
2656			}
2657
2658			/*
2659			 * If the traced bit got turned off, go back up
2660			 * to the top to rescan signals.  This ensures
2661			 * that p_sig* and p_sigact are consistent.
2662			 */
2663			if ((p->p_flag & P_TRACED) == 0)
2664				continue;
2665		}
2666
2667		prop = sigprop(sig);
2668
2669		/*
2670		 * Decide whether the signal should be returned.
2671		 * Return the signal's number, or fall through
2672		 * to clear it from the pending mask.
2673		 */
2674		switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2675
2676		case (intptr_t)SIG_DFL:
2677			/*
2678			 * Don't take default actions on system processes.
2679			 */
2680			if (p->p_pid <= 1) {
2681#ifdef DIAGNOSTIC
2682				/*
2683				 * Are you sure you want to ignore SIGSEGV
2684				 * in init? XXX
2685				 */
2686				printf("Process (pid %lu) got signal %d\n",
2687					(u_long)p->p_pid, sig);
2688#endif
2689				break;		/* == ignore */
2690			}
2691			/*
2692			 * If there is a pending stop signal to process
2693			 * with default action, stop here,
2694			 * then clear the signal.  However,
2695			 * if process is member of an orphaned
2696			 * process group, ignore tty stop signals.
2697			 */
2698			if (prop & SA_STOP) {
2699				if (p->p_flag & (P_TRACED|P_WEXIT) ||
2700		    		    (p->p_pgrp->pg_jobc == 0 &&
2701				     prop & SA_TTYSTOP))
2702					break;	/* == ignore */
2703				mtx_unlock(&ps->ps_mtx);
2704				WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2705				    &p->p_mtx.lock_object, "Catching SIGSTOP");
2706				p->p_flag |= P_STOPPED_SIG;
2707				p->p_xstat = sig;
2708				PROC_SLOCK(p);
2709				sig_suspend_threads(td, p, 0);
2710				thread_suspend_switch(td);
2711				PROC_SUNLOCK(p);
2712				mtx_lock(&ps->ps_mtx);
2713				break;
2714			} else if (prop & SA_IGNORE) {
2715				/*
2716				 * Except for SIGCONT, shouldn't get here.
2717				 * Default action is to ignore; drop it.
2718				 */
2719				break;		/* == ignore */
2720			} else
2721				return (sig);
2722			/*NOTREACHED*/
2723
2724		case (intptr_t)SIG_IGN:
2725			/*
2726			 * Masking above should prevent us ever trying
2727			 * to take action on an ignored signal other
2728			 * than SIGCONT, unless process is traced.
2729			 */
2730			if ((prop & SA_CONT) == 0 &&
2731			    (p->p_flag & P_TRACED) == 0)
2732				printf("issignal\n");
2733			break;		/* == ignore */
2734
2735		default:
2736			/*
2737			 * This signal has an action, let
2738			 * postsig() process it.
2739			 */
2740			return (sig);
2741		}
2742		sigqueue_delete(&td->td_sigqueue, sig);		/* take the signal! */
2743		sigqueue_delete(&p->p_sigqueue, sig);
2744	}
2745	/* NOTREACHED */
2746}
2747
2748void
2749thread_stopped(struct proc *p)
2750{
2751	int n;
2752
2753	PROC_LOCK_ASSERT(p, MA_OWNED);
2754	PROC_SLOCK_ASSERT(p, MA_OWNED);
2755	n = p->p_suspcount;
2756	if (p == curproc)
2757		n++;
2758	if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
2759		PROC_SUNLOCK(p);
2760		p->p_flag &= ~P_WAITED;
2761		PROC_LOCK(p->p_pptr);
2762		childproc_stopped(p, (p->p_flag & P_TRACED) ?
2763			CLD_TRAPPED : CLD_STOPPED);
2764		PROC_UNLOCK(p->p_pptr);
2765		PROC_SLOCK(p);
2766	}
2767}
2768
2769/*
2770 * Take the action for the specified signal
2771 * from the current set of pending signals.
2772 */
2773int
2774postsig(sig)
2775	register int sig;
2776{
2777	struct thread *td = curthread;
2778	register struct proc *p = td->td_proc;
2779	struct sigacts *ps;
2780	sig_t action;
2781	ksiginfo_t ksi;
2782	sigset_t returnmask, mask;
2783
2784	KASSERT(sig != 0, ("postsig"));
2785
2786	PROC_LOCK_ASSERT(p, MA_OWNED);
2787	ps = p->p_sigacts;
2788	mtx_assert(&ps->ps_mtx, MA_OWNED);
2789	ksiginfo_init(&ksi);
2790	if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
2791	    sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
2792		return (0);
2793	ksi.ksi_signo = sig;
2794	if (ksi.ksi_code == SI_TIMER)
2795		itimer_accept(p, ksi.ksi_timerid, &ksi);
2796	action = ps->ps_sigact[_SIG_IDX(sig)];
2797#ifdef KTRACE
2798	if (KTRPOINT(td, KTR_PSIG))
2799		ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
2800		    &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
2801#endif
2802	if (p->p_stops & S_SIG) {
2803		mtx_unlock(&ps->ps_mtx);
2804		stopevent(p, S_SIG, sig);
2805		mtx_lock(&ps->ps_mtx);
2806	}
2807
2808	if (action == SIG_DFL) {
2809		/*
2810		 * Default action, where the default is to kill
2811		 * the process.  (Other cases were ignored above.)
2812		 */
2813		mtx_unlock(&ps->ps_mtx);
2814		sigexit(td, sig);
2815		/* NOTREACHED */
2816	} else {
2817		/*
2818		 * If we get here, the signal must be caught.
2819		 */
2820		KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig),
2821		    ("postsig action"));
2822		/*
2823		 * Set the new mask value and also defer further
2824		 * occurrences of this signal.
2825		 *
2826		 * Special case: user has done a sigsuspend.  Here the
2827		 * current mask is not of interest, but rather the
2828		 * mask from before the sigsuspend is what we want
2829		 * restored after the signal processing is completed.
2830		 */
2831		if (td->td_pflags & TDP_OLDMASK) {
2832			returnmask = td->td_oldsigmask;
2833			td->td_pflags &= ~TDP_OLDMASK;
2834		} else
2835			returnmask = td->td_sigmask;
2836
2837		mask = ps->ps_catchmask[_SIG_IDX(sig)];
2838		if (!SIGISMEMBER(ps->ps_signodefer, sig))
2839			SIGADDSET(mask, sig);
2840		kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
2841		    SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
2842
2843		if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2844			/*
2845			 * See kern_sigaction() for origin of this code.
2846			 */
2847			SIGDELSET(ps->ps_sigcatch, sig);
2848			if (sig != SIGCONT &&
2849			    sigprop(sig) & SA_IGNORE)
2850				SIGADDSET(ps->ps_sigignore, sig);
2851			ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2852		}
2853		td->td_ru.ru_nsignals++;
2854		if (p->p_sig == sig) {
2855			p->p_code = 0;
2856			p->p_sig = 0;
2857		}
2858		(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
2859	}
2860	return (1);
2861}
2862
2863/*
2864 * Kill the current process for stated reason.
2865 */
2866void
2867killproc(p, why)
2868	struct proc *p;
2869	char *why;
2870{
2871
2872	PROC_LOCK_ASSERT(p, MA_OWNED);
2873	CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)",
2874		p, p->p_pid, p->p_comm);
2875	log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm,
2876		p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2877	p->p_flag |= P_WKILLED;
2878	kern_psignal(p, SIGKILL);
2879}
2880
2881/*
2882 * Force the current process to exit with the specified signal, dumping core
2883 * if appropriate.  We bypass the normal tests for masked and caught signals,
2884 * allowing unrecoverable failures to terminate the process without changing
2885 * signal state.  Mark the accounting record with the signal termination.
2886 * If dumping core, save the signal number for the debugger.  Calls exit and
2887 * does not return.
2888 */
2889void
2890sigexit(td, sig)
2891	struct thread *td;
2892	int sig;
2893{
2894	struct proc *p = td->td_proc;
2895
2896	PROC_LOCK_ASSERT(p, MA_OWNED);
2897	p->p_acflag |= AXSIG;
2898	/*
2899	 * We must be single-threading to generate a core dump.  This
2900	 * ensures that the registers in the core file are up-to-date.
2901	 * Also, the ELF dump handler assumes that the thread list doesn't
2902	 * change out from under it.
2903	 *
2904	 * XXX If another thread attempts to single-thread before us
2905	 *     (e.g. via fork()), we won't get a dump at all.
2906	 */
2907	if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) {
2908		p->p_sig = sig;
2909		/*
2910		 * Log signals which would cause core dumps
2911		 * (Log as LOG_INFO to appease those who don't want
2912		 * these messages.)
2913		 * XXX : Todo, as well as euid, write out ruid too
2914		 * Note that coredump() drops proc lock.
2915		 */
2916		if (coredump(td) == 0)
2917			sig |= WCOREFLAG;
2918		if (kern_logsigexit)
2919			log(LOG_INFO,
2920			    "pid %d (%s), uid %d: exited on signal %d%s\n",
2921			    p->p_pid, p->p_comm,
2922			    td->td_ucred ? td->td_ucred->cr_uid : -1,
2923			    sig &~ WCOREFLAG,
2924			    sig & WCOREFLAG ? " (core dumped)" : "");
2925	} else
2926		PROC_UNLOCK(p);
2927	exit1(td, W_EXITCODE(0, sig));
2928	/* NOTREACHED */
2929}
2930
2931/*
2932 * Send queued SIGCHLD to parent when child process's state
2933 * is changed.
2934 */
2935static void
2936sigparent(struct proc *p, int reason, int status)
2937{
2938	PROC_LOCK_ASSERT(p, MA_OWNED);
2939	PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
2940
2941	if (p->p_ksi != NULL) {
2942		p->p_ksi->ksi_signo  = SIGCHLD;
2943		p->p_ksi->ksi_code   = reason;
2944		p->p_ksi->ksi_status = status;
2945		p->p_ksi->ksi_pid    = p->p_pid;
2946		p->p_ksi->ksi_uid    = p->p_ucred->cr_ruid;
2947		if (KSI_ONQ(p->p_ksi))
2948			return;
2949	}
2950	pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
2951}
2952
2953static void
2954childproc_jobstate(struct proc *p, int reason, int sig)
2955{
2956	struct sigacts *ps;
2957
2958	PROC_LOCK_ASSERT(p, MA_OWNED);
2959	PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
2960
2961	/*
2962	 * Wake up parent sleeping in kern_wait(), also send
2963	 * SIGCHLD to parent, but SIGCHLD does not guarantee
2964	 * that parent will awake, because parent may masked
2965	 * the signal.
2966	 */
2967	p->p_pptr->p_flag |= P_STATCHILD;
2968	wakeup(p->p_pptr);
2969
2970	ps = p->p_pptr->p_sigacts;
2971	mtx_lock(&ps->ps_mtx);
2972	if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
2973		mtx_unlock(&ps->ps_mtx);
2974		sigparent(p, reason, sig);
2975	} else
2976		mtx_unlock(&ps->ps_mtx);
2977}
2978
2979void
2980childproc_stopped(struct proc *p, int reason)
2981{
2982	/* p_xstat is a plain signal number, not a full wait() status here. */
2983	childproc_jobstate(p, reason, p->p_xstat);
2984}
2985
2986void
2987childproc_continued(struct proc *p)
2988{
2989	childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
2990}
2991
2992void
2993childproc_exited(struct proc *p)
2994{
2995	int reason;
2996	int xstat = p->p_xstat; /* convert to int */
2997	int status;
2998
2999	if (WCOREDUMP(xstat))
3000		reason = CLD_DUMPED, status = WTERMSIG(xstat);
3001	else if (WIFSIGNALED(xstat))
3002		reason = CLD_KILLED, status = WTERMSIG(xstat);
3003	else
3004		reason = CLD_EXITED, status = WEXITSTATUS(xstat);
3005	/*
3006	 * XXX avoid calling wakeup(p->p_pptr), the work is
3007	 * done in exit1().
3008	 */
3009	sigparent(p, reason, status);
3010}
3011
3012/*
3013 * We only have 1 character for the core count in the format
3014 * string, so the range will be 0-9
3015 */
3016#define MAX_NUM_CORES 10
3017static int num_cores = 5;
3018
3019static int
3020sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3021{
3022	int error;
3023	int new_val;
3024
3025	new_val = num_cores;
3026	error = sysctl_handle_int(oidp, &new_val, 0, req);
3027	if (error != 0 || req->newptr == NULL)
3028		return (error);
3029	if (new_val > MAX_NUM_CORES)
3030		new_val = MAX_NUM_CORES;
3031	if (new_val < 0)
3032		new_val = 0;
3033	num_cores = new_val;
3034	return (0);
3035}
3036SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW,
3037	    0, sizeof(int), sysctl_debug_num_cores_check, "I", "");
3038
3039#if defined(COMPRESS_USER_CORES)
3040int compress_user_cores = 1;
3041SYSCTL_INT(_kern, OID_AUTO, compress_user_cores, CTLFLAG_RW,
3042        &compress_user_cores, 0, "");
3043
3044int compress_user_cores_gzlevel = -1; /* default level */
3045SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_gzlevel, CTLFLAG_RW,
3046    &compress_user_cores_gzlevel, -1, "user core gz compression level");
3047
3048#define GZ_SUFFIX	".gz"
3049#define GZ_SUFFIX_LEN	3
3050#endif
3051
3052static char corefilename[MAXPATHLEN] = {"%N.core"};
3053SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
3054	      sizeof(corefilename), "process corefile name format string");
3055
3056/*
3057 * expand_name(name, uid, pid, td, compress)
3058 * Expand the name described in corefilename, using name, uid, and pid.
3059 * corefilename is a printf-like string, with three format specifiers:
3060 *	%N	name of process ("name")
3061 *	%P	process id (pid)
3062 *	%U	user id (uid)
3063 * For example, "%N.core" is the default; they can be disabled completely
3064 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3065 * This is controlled by the sysctl variable kern.corefile (see above).
3066 */
3067static char *
3068expand_name(const char *name, uid_t uid, pid_t pid, struct thread *td,
3069    int compress)
3070{
3071	struct sbuf sb;
3072	const char *format;
3073	char *temp;
3074	size_t i;
3075	int indexpos;
3076	char *hostname;
3077
3078	hostname = NULL;
3079	format = corefilename;
3080	temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO);
3081	if (temp == NULL)
3082		return (NULL);
3083	indexpos = -1;
3084	(void)sbuf_new(&sb, temp, MAXPATHLEN, SBUF_FIXEDLEN);
3085	for (i = 0; format[i]; i++) {
3086		switch (format[i]) {
3087		case '%':	/* Format character */
3088			i++;
3089			switch (format[i]) {
3090			case '%':
3091				sbuf_putc(&sb, '%');
3092				break;
3093			case 'H':	/* hostname */
3094				if (hostname == NULL) {
3095					hostname = malloc(MAXHOSTNAMELEN,
3096					    M_TEMP, M_NOWAIT);
3097					if (hostname == NULL) {
3098						log(LOG_ERR,
3099						    "pid %ld (%s), uid (%lu): "
3100						    "unable to alloc memory "
3101						    "for corefile hostname\n",
3102						    (long)pid, name,
3103						    (u_long)uid);
3104                                                goto nomem;
3105                                        }
3106                                }
3107				getcredhostname(td->td_ucred, hostname,
3108				    MAXHOSTNAMELEN);
3109				sbuf_printf(&sb, "%s", hostname);
3110				break;
3111			case 'I':       /* autoincrementing index */
3112				sbuf_printf(&sb, "0");
3113				indexpos = sbuf_len(&sb) - 1;
3114				break;
3115			case 'N':	/* process name */
3116				sbuf_printf(&sb, "%s", name);
3117				break;
3118			case 'P':	/* process id */
3119				sbuf_printf(&sb, "%u", pid);
3120				break;
3121			case 'U':	/* user id */
3122				sbuf_printf(&sb, "%u", uid);
3123				break;
3124			default:
3125			  	log(LOG_ERR,
3126				    "Unknown format character %c in "
3127				    "corename `%s'\n", format[i], format);
3128			}
3129			break;
3130		default:
3131			sbuf_putc(&sb, format[i]);
3132		}
3133	}
3134	free(hostname, M_TEMP);
3135#ifdef COMPRESS_USER_CORES
3136	if (compress) {
3137		sbuf_printf(&sb, GZ_SUFFIX);
3138	}
3139#endif
3140	if (sbuf_error(&sb) != 0) {
3141		log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
3142		    "long\n", (long)pid, name, (u_long)uid);
3143nomem:
3144		sbuf_delete(&sb);
3145		free(temp, M_TEMP);
3146		return (NULL);
3147	}
3148	sbuf_finish(&sb);
3149	sbuf_delete(&sb);
3150
3151	/*
3152	 * If the core format has a %I in it, then we need to check
3153	 * for existing corefiles before returning a name.
3154	 * To do this we iterate over 0..num_cores to find a
3155	 * non-existing core file name to use.
3156	 */
3157	if (indexpos != -1) {
3158		struct nameidata nd;
3159		int error, n;
3160		int flags = O_CREAT | O_EXCL | FWRITE | O_NOFOLLOW;
3161		int cmode = S_IRUSR | S_IWUSR;
3162		int vfslocked;
3163
3164		for (n = 0; n < num_cores; n++) {
3165			temp[indexpos] = '0' + n;
3166			NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE,
3167			    temp, td);
3168			error = vn_open(&nd, &flags, cmode, NULL);
3169			if (error) {
3170				if (error == EEXIST) {
3171					continue;
3172				}
3173				log(LOG_ERR,
3174				    "pid %d (%s), uid (%u):  Path `%s' failed "
3175                                    "on initial open test, error = %d\n",
3176				    pid, name, uid, temp, error);
3177				free(temp, M_TEMP);
3178				return (NULL);
3179			}
3180			vfslocked = NDHASGIANT(&nd);
3181			NDFREE(&nd, NDF_ONLY_PNBUF);
3182			VOP_UNLOCK(nd.ni_vp, 0);
3183			error = vn_close(nd.ni_vp, FWRITE, td->td_ucred, td);
3184			VFS_UNLOCK_GIANT(vfslocked);
3185			if (error) {
3186				log(LOG_ERR,
3187				    "pid %d (%s), uid (%u):  Path `%s' failed "
3188                                    "on close after initial open test, "
3189                                    "error = %d\n",
3190				    pid, name, uid, temp, error);
3191				free(temp, M_TEMP);
3192				return (NULL);
3193			}
3194			break;
3195		}
3196	}
3197	return (temp);
3198}
3199
3200/*
3201 * Dump a process' core.  The main routine does some
3202 * policy checking, and creates the name of the coredump;
3203 * then it passes on a vnode and a size limit to the process-specific
3204 * coredump routine if there is one; if there _is not_ one, it returns
3205 * ENOSYS; otherwise it returns the error from the process-specific routine.
3206 */
3207
3208static int
3209coredump(struct thread *td)
3210{
3211	struct proc *p = td->td_proc;
3212	register struct vnode *vp;
3213	register struct ucred *cred = td->td_ucred;
3214	struct flock lf;
3215	struct nameidata nd;
3216	struct vattr vattr;
3217	int error, error1, flags, locked;
3218	struct mount *mp;
3219	char *name;			/* name of corefile */
3220	off_t limit;
3221	int vfslocked;
3222	int compress;
3223
3224#ifdef COMPRESS_USER_CORES
3225	compress = compress_user_cores;
3226#else
3227	compress = 0;
3228#endif
3229	PROC_LOCK_ASSERT(p, MA_OWNED);
3230	MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
3231	_STOPEVENT(p, S_CORE, 0);
3232
3233	name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid, td,
3234	    compress);
3235	if (name == NULL) {
3236		PROC_UNLOCK(p);
3237#ifdef AUDIT
3238		audit_proc_coredump(td, NULL, EINVAL);
3239#endif
3240		return (EINVAL);
3241	}
3242	if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) {
3243		PROC_UNLOCK(p);
3244#ifdef AUDIT
3245		audit_proc_coredump(td, name, EFAULT);
3246#endif
3247		free(name, M_TEMP);
3248		return (EFAULT);
3249	}
3250
3251	/*
3252	 * Note that the bulk of limit checking is done after
3253	 * the corefile is created.  The exception is if the limit
3254	 * for corefiles is 0, in which case we don't bother
3255	 * creating the corefile at all.  This layout means that
3256	 * a corefile is truncated instead of not being created,
3257	 * if it is larger than the limit.
3258	 */
3259	limit = (off_t)lim_cur(p, RLIMIT_CORE);
3260	if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
3261		PROC_UNLOCK(p);
3262#ifdef AUDIT
3263		audit_proc_coredump(td, name, EFBIG);
3264#endif
3265		free(name, M_TEMP);
3266		return (EFBIG);
3267	}
3268	PROC_UNLOCK(p);
3269
3270restart:
3271	NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, name, td);
3272	flags = O_CREAT | FWRITE | O_NOFOLLOW;
3273	error = vn_open_cred(&nd, &flags, S_IRUSR | S_IWUSR, VN_OPEN_NOAUDIT,
3274	    cred, NULL);
3275	if (error) {
3276#ifdef AUDIT
3277		audit_proc_coredump(td, name, error);
3278#endif
3279		free(name, M_TEMP);
3280		return (error);
3281	}
3282	vfslocked = NDHASGIANT(&nd);
3283	NDFREE(&nd, NDF_ONLY_PNBUF);
3284	vp = nd.ni_vp;
3285
3286	/* Don't dump to non-regular files or files with links. */
3287	if (vp->v_type != VREG ||
3288	    VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1) {
3289		VOP_UNLOCK(vp, 0);
3290		error = EFAULT;
3291		goto close;
3292	}
3293
3294	VOP_UNLOCK(vp, 0);
3295	lf.l_whence = SEEK_SET;
3296	lf.l_start = 0;
3297	lf.l_len = 0;
3298	lf.l_type = F_WRLCK;
3299	locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
3300
3301	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3302		lf.l_type = F_UNLCK;
3303		if (locked)
3304			VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3305		if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
3306			goto out;
3307		if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
3308			goto out;
3309		VFS_UNLOCK_GIANT(vfslocked);
3310		goto restart;
3311	}
3312
3313	VATTR_NULL(&vattr);
3314	vattr.va_size = 0;
3315	if (set_core_nodump_flag)
3316		vattr.va_flags = UF_NODUMP;
3317	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3318	VOP_SETATTR(vp, &vattr, cred);
3319	VOP_UNLOCK(vp, 0);
3320	vn_finished_write(mp);
3321	PROC_LOCK(p);
3322	p->p_acflag |= ACORE;
3323	PROC_UNLOCK(p);
3324
3325	error = p->p_sysent->sv_coredump ?
3326	  p->p_sysent->sv_coredump(td, vp, limit, compress ? IMGACT_CORE_COMPRESS : 0) :
3327	  ENOSYS;
3328
3329	if (locked) {
3330		lf.l_type = F_UNLCK;
3331		VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
3332	}
3333close:
3334	error1 = vn_close(vp, FWRITE, cred, td);
3335	if (error == 0)
3336		error = error1;
3337out:
3338#ifdef AUDIT
3339	audit_proc_coredump(td, name, error);
3340#endif
3341	free(name, M_TEMP);
3342	VFS_UNLOCK_GIANT(vfslocked);
3343	return (error);
3344}
3345
3346/*
3347 * Nonexistent system call-- signal process (may want to handle it).  Flag
3348 * error in case process won't see signal immediately (blocked or ignored).
3349 */
3350#ifndef _SYS_SYSPROTO_H_
3351struct nosys_args {
3352	int	dummy;
3353};
3354#endif
3355/* ARGSUSED */
3356int
3357nosys(td, args)
3358	struct thread *td;
3359	struct nosys_args *args;
3360{
3361	struct proc *p = td->td_proc;
3362
3363	PROC_LOCK(p);
3364	tdsignal(td, SIGSYS);
3365	PROC_UNLOCK(p);
3366	return (ENOSYS);
3367}
3368
3369/*
3370 * Send a SIGIO or SIGURG signal to a process or process group using stored
3371 * credentials rather than those of the current process.
3372 */
3373void
3374pgsigio(sigiop, sig, checkctty)
3375	struct sigio **sigiop;
3376	int sig, checkctty;
3377{
3378	ksiginfo_t ksi;
3379	struct sigio *sigio;
3380
3381	ksiginfo_init(&ksi);
3382	ksi.ksi_signo = sig;
3383	ksi.ksi_code = SI_KERNEL;
3384
3385	SIGIO_LOCK();
3386	sigio = *sigiop;
3387	if (sigio == NULL) {
3388		SIGIO_UNLOCK();
3389		return;
3390	}
3391	if (sigio->sio_pgid > 0) {
3392		PROC_LOCK(sigio->sio_proc);
3393		if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
3394			kern_psignal(sigio->sio_proc, sig);
3395		PROC_UNLOCK(sigio->sio_proc);
3396	} else if (sigio->sio_pgid < 0) {
3397		struct proc *p;
3398
3399		PGRP_LOCK(sigio->sio_pgrp);
3400		LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
3401			PROC_LOCK(p);
3402			if (p->p_state == PRS_NORMAL &&
3403			    CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
3404			    (checkctty == 0 || (p->p_flag & P_CONTROLT)))
3405				kern_psignal(p, sig);
3406			PROC_UNLOCK(p);
3407		}
3408		PGRP_UNLOCK(sigio->sio_pgrp);
3409	}
3410	SIGIO_UNLOCK();
3411}
3412
3413static int
3414filt_sigattach(struct knote *kn)
3415{
3416	struct proc *p = curproc;
3417
3418	kn->kn_ptr.p_proc = p;
3419	kn->kn_flags |= EV_CLEAR;		/* automatically set */
3420
3421	knlist_add(&p->p_klist, kn, 0);
3422
3423	return (0);
3424}
3425
3426static void
3427filt_sigdetach(struct knote *kn)
3428{
3429	struct proc *p = kn->kn_ptr.p_proc;
3430
3431	knlist_remove(&p->p_klist, kn, 0);
3432}
3433
3434/*
3435 * signal knotes are shared with proc knotes, so we apply a mask to
3436 * the hint in order to differentiate them from process hints.  This
3437 * could be avoided by using a signal-specific knote list, but probably
3438 * isn't worth the trouble.
3439 */
3440static int
3441filt_signal(struct knote *kn, long hint)
3442{
3443
3444	if (hint & NOTE_SIGNAL) {
3445		hint &= ~NOTE_SIGNAL;
3446
3447		if (kn->kn_id == hint)
3448			kn->kn_data++;
3449	}
3450	return (kn->kn_data != 0);
3451}
3452
3453struct sigacts *
3454sigacts_alloc(void)
3455{
3456	struct sigacts *ps;
3457
3458	ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
3459	ps->ps_refcnt = 1;
3460	mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
3461	return (ps);
3462}
3463
3464void
3465sigacts_free(struct sigacts *ps)
3466{
3467
3468	mtx_lock(&ps->ps_mtx);
3469	ps->ps_refcnt--;
3470	if (ps->ps_refcnt == 0) {
3471		mtx_destroy(&ps->ps_mtx);
3472		free(ps, M_SUBPROC);
3473	} else
3474		mtx_unlock(&ps->ps_mtx);
3475}
3476
3477struct sigacts *
3478sigacts_hold(struct sigacts *ps)
3479{
3480	mtx_lock(&ps->ps_mtx);
3481	ps->ps_refcnt++;
3482	mtx_unlock(&ps->ps_mtx);
3483	return (ps);
3484}
3485
3486void
3487sigacts_copy(struct sigacts *dest, struct sigacts *src)
3488{
3489
3490	KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
3491	mtx_lock(&src->ps_mtx);
3492	bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
3493	mtx_unlock(&src->ps_mtx);
3494}
3495
3496int
3497sigacts_shared(struct sigacts *ps)
3498{
3499	int shared;
3500
3501	mtx_lock(&ps->ps_mtx);
3502	shared = ps->ps_refcnt > 1;
3503	mtx_unlock(&ps->ps_mtx);
3504	return (shared);
3505}
3506