1/*
2 *  linux/kernel/signal.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *
6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7 *
8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9 *		Changes to use preallocated sigqueue structures
10 *		to allow signals to be sent reliably.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/capability.h>
26#include <linux/freezer.h>
27#include <linux/pid_namespace.h>
28#include <linux/nsproxy.h>
29
30#include <asm/param.h>
31#include <asm/uaccess.h>
32#include <asm/unistd.h>
33#include <asm/siginfo.h>
34#include "audit.h"	/* audit_signal_info() */
35
36/*
37 * SLAB caches for signal bits.
38 */
39
40static struct kmem_cache *sigqueue_cachep;
41
42
43static int sig_ignored(struct task_struct *t, int sig)
44{
45	void __user * handler;
46
47	/*
48	 * Tracers always want to know about signals..
49	 */
50	if (t->ptrace & PT_PTRACED)
51		return 0;
52
53	/*
54	 * Blocked signals are never ignored, since the
55	 * signal handler may change by the time it is
56	 * unblocked.
57	 */
58	if (sigismember(&t->blocked, sig))
59		return 0;
60
61	/* Is it explicitly or implicitly ignored? */
62	handler = t->sighand->action[sig-1].sa.sa_handler;
63	return   handler == SIG_IGN ||
64		(handler == SIG_DFL && sig_kernel_ignore(sig));
65}
66
67/*
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
70 */
71static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72{
73	unsigned long ready;
74	long i;
75
76	switch (_NSIG_WORDS) {
77	default:
78		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79			ready |= signal->sig[i] &~ blocked->sig[i];
80		break;
81
82	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83		ready |= signal->sig[2] &~ blocked->sig[2];
84		ready |= signal->sig[1] &~ blocked->sig[1];
85		ready |= signal->sig[0] &~ blocked->sig[0];
86		break;
87
88	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89		ready |= signal->sig[0] &~ blocked->sig[0];
90		break;
91
92	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93	}
94	return ready !=	0;
95}
96
97#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98
99static int recalc_sigpending_tsk(struct task_struct *t)
100{
101	if (t->signal->group_stop_count > 0 ||
102	    (freezing(t)) ||
103	    PENDING(&t->pending, &t->blocked) ||
104	    PENDING(&t->signal->shared_pending, &t->blocked)) {
105		set_tsk_thread_flag(t, TIF_SIGPENDING);
106		return 1;
107	}
108	/*
109	 * We must never clear the flag in another thread, or in current
110	 * when it's possible the current syscall is returning -ERESTART*.
111	 * So we don't clear it here, and only callers who know they should do.
112	 */
113	return 0;
114}
115
116/*
117 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118 * This is superfluous when called on current, the wakeup is a harmless no-op.
119 */
120void recalc_sigpending_and_wake(struct task_struct *t)
121{
122	if (recalc_sigpending_tsk(t))
123		signal_wake_up(t, 0);
124}
125
126void recalc_sigpending(void)
127{
128	if (!recalc_sigpending_tsk(current))
129		clear_thread_flag(TIF_SIGPENDING);
130
131}
132
133/* Given the mask, find the first available signal that should be serviced. */
134
135int next_signal(struct sigpending *pending, sigset_t *mask)
136{
137	unsigned long i, *s, *m, x;
138	int sig = 0;
139
140	s = pending->signal.sig;
141	m = mask->sig;
142	switch (_NSIG_WORDS) {
143	default:
144		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145			if ((x = *s &~ *m) != 0) {
146				sig = ffz(~x) + i*_NSIG_BPW + 1;
147				break;
148			}
149		break;
150
151	case 2: if ((x = s[0] &~ m[0]) != 0)
152			sig = 1;
153		else if ((x = s[1] &~ m[1]) != 0)
154			sig = _NSIG_BPW + 1;
155		else
156			break;
157		sig += ffz(~x);
158		break;
159
160	case 1: if ((x = *s &~ *m) != 0)
161			sig = ffz(~x) + 1;
162		break;
163	}
164
165	return sig;
166}
167
168static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169					 int override_rlimit)
170{
171	struct sigqueue *q = NULL;
172	struct user_struct *user;
173
174	/*
175	 * In order to avoid problems with "switch_user()", we want to make
176	 * sure that the compiler doesn't re-load "t->user"
177	 */
178	user = t->user;
179	barrier();
180	atomic_inc(&user->sigpending);
181	if (override_rlimit ||
182	    atomic_read(&user->sigpending) <=
183			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184		q = kmem_cache_alloc(sigqueue_cachep, flags);
185	if (unlikely(q == NULL)) {
186		atomic_dec(&user->sigpending);
187	} else {
188		INIT_LIST_HEAD(&q->list);
189		q->flags = 0;
190		q->user = get_uid(user);
191	}
192	return(q);
193}
194
195static void __sigqueue_free(struct sigqueue *q)
196{
197	if (q->flags & SIGQUEUE_PREALLOC)
198		return;
199	atomic_dec(&q->user->sigpending);
200	free_uid(q->user);
201	kmem_cache_free(sigqueue_cachep, q);
202}
203
204void flush_sigqueue(struct sigpending *queue)
205{
206	struct sigqueue *q;
207
208	sigemptyset(&queue->signal);
209	while (!list_empty(&queue->list)) {
210		q = list_entry(queue->list.next, struct sigqueue , list);
211		list_del_init(&q->list);
212		__sigqueue_free(q);
213	}
214}
215
216/*
217 * Flush all pending signals for a task.
218 */
219void flush_signals(struct task_struct *t)
220{
221	unsigned long flags;
222
223	spin_lock_irqsave(&t->sighand->siglock, flags);
224	clear_tsk_thread_flag(t,TIF_SIGPENDING);
225	flush_sigqueue(&t->pending);
226	flush_sigqueue(&t->signal->shared_pending);
227	spin_unlock_irqrestore(&t->sighand->siglock, flags);
228}
229
230void ignore_signals(struct task_struct *t)
231{
232	int i;
233
234	for (i = 0; i < _NSIG; ++i)
235		t->sighand->action[i].sa.sa_handler = SIG_IGN;
236
237	flush_signals(t);
238}
239
240/*
241 * Flush all handlers for a task.
242 */
243
244void
245flush_signal_handlers(struct task_struct *t, int force_default)
246{
247	int i;
248	struct k_sigaction *ka = &t->sighand->action[0];
249	for (i = _NSIG ; i != 0 ; i--) {
250		if (force_default || ka->sa.sa_handler != SIG_IGN)
251			ka->sa.sa_handler = SIG_DFL;
252		ka->sa.sa_flags = 0;
253		sigemptyset(&ka->sa.sa_mask);
254		ka++;
255	}
256}
257
258
259/* Notify the system that a driver wants to block all signals for this
260 * process, and wants to be notified if any signals at all were to be
261 * sent/acted upon.  If the notifier routine returns non-zero, then the
262 * signal will be acted upon after all.  If the notifier routine returns 0,
263 * then then signal will be blocked.  Only one block per process is
264 * allowed.  priv is a pointer to private data that the notifier routine
265 * can use to determine if the signal should be blocked or not.  */
266
267void
268block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
269{
270	unsigned long flags;
271
272	spin_lock_irqsave(&current->sighand->siglock, flags);
273	current->notifier_mask = mask;
274	current->notifier_data = priv;
275	current->notifier = notifier;
276	spin_unlock_irqrestore(&current->sighand->siglock, flags);
277}
278
279/* Notify the system that blocking has ended. */
280
281void
282unblock_all_signals(void)
283{
284	unsigned long flags;
285
286	spin_lock_irqsave(&current->sighand->siglock, flags);
287	current->notifier = NULL;
288	current->notifier_data = NULL;
289	recalc_sigpending();
290	spin_unlock_irqrestore(&current->sighand->siglock, flags);
291}
292
293static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
294{
295	struct sigqueue *q, *first = NULL;
296	int still_pending = 0;
297
298	if (unlikely(!sigismember(&list->signal, sig)))
299		return 0;
300
301	/*
302	 * Collect the siginfo appropriate to this signal.  Check if
303	 * there is another siginfo for the same signal.
304	*/
305	list_for_each_entry(q, &list->list, list) {
306		if (q->info.si_signo == sig) {
307			if (first) {
308				still_pending = 1;
309				break;
310			}
311			first = q;
312		}
313	}
314	if (first) {
315		list_del_init(&first->list);
316		copy_siginfo(info, &first->info);
317		__sigqueue_free(first);
318		if (!still_pending)
319			sigdelset(&list->signal, sig);
320	} else {
321
322		/* Ok, it wasn't in the queue.  This must be
323		   a fast-pathed signal or we must have been
324		   out of queue space.  So zero out the info.
325		 */
326		sigdelset(&list->signal, sig);
327		info->si_signo = sig;
328		info->si_errno = 0;
329		info->si_code = 0;
330		info->si_pid = 0;
331		info->si_uid = 0;
332	}
333	return 1;
334}
335
336static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
337			siginfo_t *info)
338{
339	int sig = next_signal(pending, mask);
340
341	if (sig) {
342		if (current->notifier) {
343			if (sigismember(current->notifier_mask, sig)) {
344				if (!(current->notifier)(current->notifier_data)) {
345					clear_thread_flag(TIF_SIGPENDING);
346					return 0;
347				}
348			}
349		}
350
351		if (!collect_signal(sig, pending, info))
352			sig = 0;
353	}
354
355	return sig;
356}
357
358/*
359 * Dequeue a signal and return the element to the caller, which is
360 * expected to free it.
361 *
362 * All callers have to hold the siglock.
363 */
364int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
365{
366	int signr = 0;
367
368	/* We only dequeue private signals from ourselves, we don't let
369	 * signalfd steal them
370	 */
371	if (tsk == current)
372		signr = __dequeue_signal(&tsk->pending, mask, info);
373	if (!signr) {
374		signr = __dequeue_signal(&tsk->signal->shared_pending,
375					 mask, info);
376		/*
377		 * itimer signal ?
378		 *
379		 * itimers are process shared and we restart periodic
380		 * itimers in the signal delivery path to prevent DoS
381		 * attacks in the high resolution timer case. This is
382		 * compliant with the old way of self restarting
383		 * itimers, as the SIGALRM is a legacy signal and only
384		 * queued once. Changing the restart behaviour to
385		 * restart the timer in the signal dequeue path is
386		 * reducing the timer noise on heavy loaded !highres
387		 * systems too.
388		 */
389		if (unlikely(signr == SIGALRM)) {
390			struct hrtimer *tmr = &tsk->signal->real_timer;
391
392			if (!hrtimer_is_queued(tmr) &&
393			    tsk->signal->it_real_incr.tv64 != 0) {
394				hrtimer_forward(tmr, tmr->base->get_time(),
395						tsk->signal->it_real_incr);
396				hrtimer_restart(tmr);
397			}
398		}
399	}
400	if (likely(tsk == current))
401		recalc_sigpending();
402	if (signr && unlikely(sig_kernel_stop(signr))) {
403		/*
404		 * Set a marker that we have dequeued a stop signal.  Our
405		 * caller might release the siglock and then the pending
406		 * stop signal it is about to process is no longer in the
407		 * pending bitmasks, but must still be cleared by a SIGCONT
408		 * (and overruled by a SIGKILL).  So those cases clear this
409		 * shared flag after we've set it.  Note that this flag may
410		 * remain set after the signal we return is ignored or
411		 * handled.  That doesn't matter because its only purpose
412		 * is to alert stop-signal processing code when another
413		 * processor has come along and cleared the flag.
414		 */
415		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
416			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
417	}
418	if ( signr &&
419	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
420	     info->si_sys_private){
421		/*
422		 * Release the siglock to ensure proper locking order
423		 * of timer locks outside of siglocks.  Note, we leave
424		 * irqs disabled here, since the posix-timers code is
425		 * about to disable them again anyway.
426		 */
427		spin_unlock(&tsk->sighand->siglock);
428		do_schedule_next_timer(info);
429		spin_lock(&tsk->sighand->siglock);
430	}
431	return signr;
432}
433
434/*
435 * Tell a process that it has a new active signal..
436 *
437 * NOTE! we rely on the previous spin_lock to
438 * lock interrupts for us! We can only be called with
439 * "siglock" held, and the local interrupt must
440 * have been disabled when that got acquired!
441 *
442 * No need to set need_resched since signal event passing
443 * goes through ->blocked
444 */
445void signal_wake_up(struct task_struct *t, int resume)
446{
447	unsigned int mask;
448
449	set_tsk_thread_flag(t, TIF_SIGPENDING);
450
451	/*
452	 * For SIGKILL, we want to wake it up in the stopped/traced case.
453	 * We don't check t->state here because there is a race with it
454	 * executing another processor and just now entering stopped state.
455	 * By using wake_up_state, we ensure the process will wake up and
456	 * handle its death signal.
457	 */
458	mask = TASK_INTERRUPTIBLE;
459	if (resume)
460		mask |= TASK_STOPPED | TASK_TRACED;
461	if (!wake_up_state(t, mask))
462		kick_process(t);
463}
464
465/*
466 * Remove signals in mask from the pending set and queue.
467 * Returns 1 if any signals were found.
468 *
469 * All callers must be holding the siglock.
470 *
471 * This version takes a sigset mask and looks at all signals,
472 * not just those in the first mask word.
473 */
474static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
475{
476	struct sigqueue *q, *n;
477	sigset_t m;
478
479	sigandsets(&m, mask, &s->signal);
480	if (sigisemptyset(&m))
481		return 0;
482
483	signandsets(&s->signal, &s->signal, mask);
484	list_for_each_entry_safe(q, n, &s->list, list) {
485		if (sigismember(mask, q->info.si_signo)) {
486			list_del_init(&q->list);
487			__sigqueue_free(q);
488		}
489	}
490	return 1;
491}
492/*
493 * Remove signals in mask from the pending set and queue.
494 * Returns 1 if any signals were found.
495 *
496 * All callers must be holding the siglock.
497 */
498static int rm_from_queue(unsigned long mask, struct sigpending *s)
499{
500	struct sigqueue *q, *n;
501
502	if (!sigtestsetmask(&s->signal, mask))
503		return 0;
504
505	sigdelsetmask(&s->signal, mask);
506	list_for_each_entry_safe(q, n, &s->list, list) {
507		if (q->info.si_signo < SIGRTMIN &&
508		    (mask & sigmask(q->info.si_signo))) {
509			list_del_init(&q->list);
510			__sigqueue_free(q);
511		}
512	}
513	return 1;
514}
515
516/*
517 * Bad permissions for sending the signal
518 */
519static int check_kill_permission(int sig, struct siginfo *info,
520				 struct task_struct *t)
521{
522	int error = -EINVAL;
523	if (!valid_signal(sig))
524		return error;
525
526	error = audit_signal_info(sig, t); /* Let audit system see the signal */
527	if (error)
528		return error;
529
530	error = -EPERM;
531	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
532	    && ((sig != SIGCONT) ||
533		(process_session(current) != process_session(t)))
534	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
535	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
536	    && !capable(CAP_KILL))
537		return error;
538
539	return security_task_kill(t, info, sig, 0);
540}
541
542/* forward decl */
543static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
544
545/*
546 * Handle magic process-wide effects of stop/continue signals.
547 * Unlike the signal actions, these happen immediately at signal-generation
548 * time regardless of blocking, ignoring, or handling.  This does the
549 * actual continuing for SIGCONT, but not the actual stopping for stop
550 * signals.  The process stop is done as a signal action for SIG_DFL.
551 */
552static void handle_stop_signal(int sig, struct task_struct *p)
553{
554	struct task_struct *t;
555
556	if (p->signal->flags & SIGNAL_GROUP_EXIT)
557		/*
558		 * The process is in the middle of dying already.
559		 */
560		return;
561
562	if (sig_kernel_stop(sig)) {
563		/*
564		 * This is a stop signal.  Remove SIGCONT from all queues.
565		 */
566		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
567		t = p;
568		do {
569			rm_from_queue(sigmask(SIGCONT), &t->pending);
570			t = next_thread(t);
571		} while (t != p);
572	} else if (sig == SIGCONT) {
573		/*
574		 * Remove all stop signals from all queues,
575		 * and wake all threads.
576		 */
577		if (unlikely(p->signal->group_stop_count > 0)) {
578			/*
579			 * There was a group stop in progress.  We'll
580			 * pretend it finished before we got here.  We are
581			 * obliged to report it to the parent: if the
582			 * SIGSTOP happened "after" this SIGCONT, then it
583			 * would have cleared this pending SIGCONT.  If it
584			 * happened "before" this SIGCONT, then the parent
585			 * got the SIGCHLD about the stop finishing before
586			 * the continue happened.  We do the notification
587			 * now, and it's as if the stop had finished and
588			 * the SIGCHLD was pending on entry to this kill.
589			 */
590			p->signal->group_stop_count = 0;
591			p->signal->flags = SIGNAL_STOP_CONTINUED;
592			spin_unlock(&p->sighand->siglock);
593			do_notify_parent_cldstop(p, CLD_STOPPED);
594			spin_lock(&p->sighand->siglock);
595		}
596		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
597		t = p;
598		do {
599			unsigned int state;
600			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
601
602			/*
603			 * If there is a handler for SIGCONT, we must make
604			 * sure that no thread returns to user mode before
605			 * we post the signal, in case it was the only
606			 * thread eligible to run the signal handler--then
607			 * it must not do anything between resuming and
608			 * running the handler.  With the TIF_SIGPENDING
609			 * flag set, the thread will pause and acquire the
610			 * siglock that we hold now and until we've queued
611			 * the pending signal.
612			 *
613			 * Wake up the stopped thread _after_ setting
614			 * TIF_SIGPENDING
615			 */
616			state = TASK_STOPPED;
617			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
618				set_tsk_thread_flag(t, TIF_SIGPENDING);
619				state |= TASK_INTERRUPTIBLE;
620			}
621			wake_up_state(t, state);
622
623			t = next_thread(t);
624		} while (t != p);
625
626		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
627			/*
628			 * We were in fact stopped, and are now continued.
629			 * Notify the parent with CLD_CONTINUED.
630			 */
631			p->signal->flags = SIGNAL_STOP_CONTINUED;
632			p->signal->group_exit_code = 0;
633			spin_unlock(&p->sighand->siglock);
634			do_notify_parent_cldstop(p, CLD_CONTINUED);
635			spin_lock(&p->sighand->siglock);
636		} else {
637			/*
638			 * We are not stopped, but there could be a stop
639			 * signal in the middle of being processed after
640			 * being removed from the queue.  Clear that too.
641			 */
642			p->signal->flags = 0;
643		}
644	} else if (sig == SIGKILL) {
645		/*
646		 * Make sure that any pending stop signal already dequeued
647		 * is undone by the wakeup for SIGKILL.
648		 */
649		p->signal->flags = 0;
650	}
651}
652
653static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
654			struct sigpending *signals)
655{
656	struct sigqueue * q = NULL;
657	int ret = 0;
658
659	/*
660	 * Deliver the signal to listening signalfds. This must be called
661	 * with the sighand lock held.
662	 */
663	signalfd_notify(t, sig);
664
665	/*
666	 * fast-pathed signals for kernel-internal things like SIGSTOP
667	 * or SIGKILL.
668	 */
669	if (info == SEND_SIG_FORCED)
670		goto out_set;
671
672	/* Real-time signals must be queued if sent by sigqueue, or
673	   some other real-time mechanism.  It is implementation
674	   defined whether kill() does so.  We attempt to do so, on
675	   the principle of least surprise, but since kill is not
676	   allowed to fail with EAGAIN when low on memory we just
677	   make sure at least one signal gets delivered and don't
678	   pass on the info struct.  */
679
680	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
681					     (is_si_special(info) ||
682					      info->si_code >= 0)));
683	if (q) {
684		list_add_tail(&q->list, &signals->list);
685		switch ((unsigned long) info) {
686		case (unsigned long) SEND_SIG_NOINFO:
687			q->info.si_signo = sig;
688			q->info.si_errno = 0;
689			q->info.si_code = SI_USER;
690			q->info.si_pid = current->pid;
691			q->info.si_uid = current->uid;
692			break;
693		case (unsigned long) SEND_SIG_PRIV:
694			q->info.si_signo = sig;
695			q->info.si_errno = 0;
696			q->info.si_code = SI_KERNEL;
697			q->info.si_pid = 0;
698			q->info.si_uid = 0;
699			break;
700		default:
701			copy_siginfo(&q->info, info);
702			break;
703		}
704	} else if (!is_si_special(info)) {
705		if (sig >= SIGRTMIN && info->si_code != SI_USER)
706		/*
707		 * Queue overflow, abort.  We may abort if the signal was rt
708		 * and sent by user using something other than kill().
709		 */
710			return -EAGAIN;
711	}
712
713out_set:
714	sigaddset(&signals->signal, sig);
715	return ret;
716}
717
718#define LEGACY_QUEUE(sigptr, sig) \
719	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
720
721
722static int
723specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
724{
725	int ret = 0;
726
727	BUG_ON(!irqs_disabled());
728	assert_spin_locked(&t->sighand->siglock);
729
730	/* Short-circuit ignored signals.  */
731	if (sig_ignored(t, sig))
732		goto out;
733
734	/* Support queueing exactly one non-rt signal, so that we
735	   can get more detailed information about the cause of
736	   the signal. */
737	if (LEGACY_QUEUE(&t->pending, sig))
738		goto out;
739
740	ret = send_signal(sig, info, t, &t->pending);
741	if (!ret && !sigismember(&t->blocked, sig))
742		signal_wake_up(t, sig == SIGKILL);
743out:
744	return ret;
745}
746
747/*
748 * Force a signal that the process can't ignore: if necessary
749 * we unblock the signal and change any SIG_IGN to SIG_DFL.
750 *
751 * Note: If we unblock the signal, we always reset it to SIG_DFL,
752 * since we do not want to have a signal handler that was blocked
753 * be invoked when user space had explicitly blocked it.
754 *
755 * We don't want to have recursive SIGSEGV's etc, for example.
756 */
757int
758force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
759{
760	unsigned long int flags;
761	int ret, blocked, ignored;
762	struct k_sigaction *action;
763
764	spin_lock_irqsave(&t->sighand->siglock, flags);
765	action = &t->sighand->action[sig-1];
766	ignored = action->sa.sa_handler == SIG_IGN;
767	blocked = sigismember(&t->blocked, sig);
768	if (blocked || ignored) {
769		action->sa.sa_handler = SIG_DFL;
770		if (blocked) {
771			sigdelset(&t->blocked, sig);
772			recalc_sigpending_and_wake(t);
773		}
774	}
775	ret = specific_send_sig_info(sig, info, t);
776	spin_unlock_irqrestore(&t->sighand->siglock, flags);
777
778	return ret;
779}
780
781void
782force_sig_specific(int sig, struct task_struct *t)
783{
784	force_sig_info(sig, SEND_SIG_FORCED, t);
785}
786
787/*
788 * Test if P wants to take SIG.  After we've checked all threads with this,
789 * it's equivalent to finding no threads not blocking SIG.  Any threads not
790 * blocking SIG were ruled out because they are not running and already
791 * have pending signals.  Such threads will dequeue from the shared queue
792 * as soon as they're available, so putting the signal on the shared queue
793 * will be equivalent to sending it to one such thread.
794 */
795static inline int wants_signal(int sig, struct task_struct *p)
796{
797	if (sigismember(&p->blocked, sig))
798		return 0;
799	if (p->flags & PF_EXITING)
800		return 0;
801	if (sig == SIGKILL)
802		return 1;
803	if (p->state & (TASK_STOPPED | TASK_TRACED))
804		return 0;
805	return task_curr(p) || !signal_pending(p);
806}
807
808static void
809__group_complete_signal(int sig, struct task_struct *p)
810{
811	struct task_struct *t;
812
813	/*
814	 * Now find a thread we can wake up to take the signal off the queue.
815	 *
816	 * If the main thread wants the signal, it gets first crack.
817	 * Probably the least surprising to the average bear.
818	 */
819	if (wants_signal(sig, p))
820		t = p;
821	else if (thread_group_empty(p))
822		/*
823		 * There is just one thread and it does not need to be woken.
824		 * It will dequeue unblocked signals before it runs again.
825		 */
826		return;
827	else {
828		/*
829		 * Otherwise try to find a suitable thread.
830		 */
831		t = p->signal->curr_target;
832		if (t == NULL)
833			/* restart balancing at this thread */
834			t = p->signal->curr_target = p;
835
836		while (!wants_signal(sig, t)) {
837			t = next_thread(t);
838			if (t == p->signal->curr_target)
839				/*
840				 * No thread needs to be woken.
841				 * Any eligible threads will see
842				 * the signal in the queue soon.
843				 */
844				return;
845		}
846		p->signal->curr_target = t;
847	}
848
849	/*
850	 * Found a killable thread.  If the signal will be fatal,
851	 * then start taking the whole group down immediately.
852	 */
853	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
854	    !sigismember(&t->real_blocked, sig) &&
855	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
856		/*
857		 * This signal will be fatal to the whole group.
858		 */
859		if (!sig_kernel_coredump(sig)) {
860			/*
861			 * Start a group exit and wake everybody up.
862			 * This way we don't have other threads
863			 * running and doing things after a slower
864			 * thread has the fatal signal pending.
865			 */
866			p->signal->flags = SIGNAL_GROUP_EXIT;
867			p->signal->group_exit_code = sig;
868			p->signal->group_stop_count = 0;
869			t = p;
870			do {
871				sigaddset(&t->pending.signal, SIGKILL);
872				signal_wake_up(t, 1);
873				t = next_thread(t);
874			} while (t != p);
875			return;
876		}
877
878		/*
879		 * There will be a core dump.  We make all threads other
880		 * than the chosen one go into a group stop so that nothing
881		 * happens until it gets scheduled, takes the signal off
882		 * the shared queue, and does the core dump.  This is a
883		 * little more complicated than strictly necessary, but it
884		 * keeps the signal state that winds up in the core dump
885		 * unchanged from the death state, e.g. which thread had
886		 * the core-dump signal unblocked.
887		 */
888		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
889		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
890		p->signal->group_stop_count = 0;
891		p->signal->group_exit_task = t;
892		t = p;
893		do {
894			p->signal->group_stop_count++;
895			signal_wake_up(t, 0);
896			t = next_thread(t);
897		} while (t != p);
898		wake_up_process(p->signal->group_exit_task);
899		return;
900	}
901
902	/*
903	 * The signal is already in the shared-pending queue.
904	 * Tell the chosen thread to wake up and dequeue it.
905	 */
906	signal_wake_up(t, sig == SIGKILL);
907	return;
908}
909
910int
911__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
912{
913	int ret = 0;
914
915	assert_spin_locked(&p->sighand->siglock);
916	handle_stop_signal(sig, p);
917
918	/* Short-circuit ignored signals.  */
919	if (sig_ignored(p, sig))
920		return ret;
921
922	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
923		/* This is a non-RT signal and we already have one queued.  */
924		return ret;
925
926	/*
927	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
928	 * We always use the shared queue for process-wide signals,
929	 * to avoid several races.
930	 */
931	ret = send_signal(sig, info, p, &p->signal->shared_pending);
932	if (unlikely(ret))
933		return ret;
934
935	__group_complete_signal(sig, p);
936	return 0;
937}
938
939/*
940 * Nuke all other threads in the group.
941 */
942void zap_other_threads(struct task_struct *p)
943{
944	struct task_struct *t;
945
946	p->signal->flags = SIGNAL_GROUP_EXIT;
947	p->signal->group_stop_count = 0;
948
949	if (thread_group_empty(p))
950		return;
951
952	for (t = next_thread(p); t != p; t = next_thread(t)) {
953		/*
954		 * Don't bother with already dead threads
955		 */
956		if (t->exit_state)
957			continue;
958
959		/* SIGKILL will be handled before any pending SIGSTOP */
960		sigaddset(&t->pending.signal, SIGKILL);
961		signal_wake_up(t, 1);
962	}
963}
964
965/*
966 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
967 */
968struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
969{
970	struct sighand_struct *sighand;
971
972	for (;;) {
973		sighand = rcu_dereference(tsk->sighand);
974		if (unlikely(sighand == NULL))
975			break;
976
977		spin_lock_irqsave(&sighand->siglock, *flags);
978		if (likely(sighand == tsk->sighand))
979			break;
980		spin_unlock_irqrestore(&sighand->siglock, *flags);
981	}
982
983	return sighand;
984}
985
986int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
987{
988	unsigned long flags;
989	int ret;
990
991	ret = check_kill_permission(sig, info, p);
992
993	if (!ret && sig) {
994		ret = -ESRCH;
995		if (lock_task_sighand(p, &flags)) {
996			ret = __group_send_sig_info(sig, info, p);
997			unlock_task_sighand(p, &flags);
998		}
999	}
1000
1001	return ret;
1002}
1003
1004/*
1005 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1006 * control characters do (^C, ^Z etc)
1007 */
1008
1009int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1010{
1011	struct task_struct *p = NULL;
1012	int retval, success;
1013
1014	success = 0;
1015	retval = -ESRCH;
1016	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1017		int err = group_send_sig_info(sig, info, p);
1018		success |= !err;
1019		retval = err;
1020	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1021	return success ? 0 : retval;
1022}
1023
1024int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1025{
1026	int retval;
1027
1028	read_lock(&tasklist_lock);
1029	retval = __kill_pgrp_info(sig, info, pgrp);
1030	read_unlock(&tasklist_lock);
1031
1032	return retval;
1033}
1034
1035int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1036{
1037	int error;
1038	struct task_struct *p;
1039
1040	rcu_read_lock();
1041	if (unlikely(sig_needs_tasklist(sig)))
1042		read_lock(&tasklist_lock);
1043
1044	p = pid_task(pid, PIDTYPE_PID);
1045	error = -ESRCH;
1046	if (p)
1047		error = group_send_sig_info(sig, info, p);
1048
1049	if (unlikely(sig_needs_tasklist(sig)))
1050		read_unlock(&tasklist_lock);
1051	rcu_read_unlock();
1052	return error;
1053}
1054
1055int
1056kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1057{
1058	int error;
1059	rcu_read_lock();
1060	error = kill_pid_info(sig, info, find_pid(pid));
1061	rcu_read_unlock();
1062	return error;
1063}
1064
1065/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1066int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1067		      uid_t uid, uid_t euid, u32 secid)
1068{
1069	int ret = -EINVAL;
1070	struct task_struct *p;
1071
1072	if (!valid_signal(sig))
1073		return ret;
1074
1075	read_lock(&tasklist_lock);
1076	p = pid_task(pid, PIDTYPE_PID);
1077	if (!p) {
1078		ret = -ESRCH;
1079		goto out_unlock;
1080	}
1081	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1082	    && (euid != p->suid) && (euid != p->uid)
1083	    && (uid != p->suid) && (uid != p->uid)) {
1084		ret = -EPERM;
1085		goto out_unlock;
1086	}
1087	ret = security_task_kill(p, info, sig, secid);
1088	if (ret)
1089		goto out_unlock;
1090	if (sig && p->sighand) {
1091		unsigned long flags;
1092		spin_lock_irqsave(&p->sighand->siglock, flags);
1093		ret = __group_send_sig_info(sig, info, p);
1094		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1095	}
1096out_unlock:
1097	read_unlock(&tasklist_lock);
1098	return ret;
1099}
1100EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1101
1102/*
1103 * kill_something_info() interprets pid in interesting ways just like kill(2).
1104 *
1105 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1106 * is probably wrong.  Should make it like BSD or SYSV.
1107 */
1108
1109static int kill_something_info(int sig, struct siginfo *info, int pid)
1110{
1111	int ret;
1112	rcu_read_lock();
1113	if (!pid) {
1114		ret = kill_pgrp_info(sig, info, task_pgrp(current));
1115	} else if (pid == -1) {
1116		int retval = 0, count = 0;
1117		struct task_struct * p;
1118
1119		read_lock(&tasklist_lock);
1120		for_each_process(p) {
1121			if (p->pid > 1 && p->tgid != current->tgid) {
1122				int err = group_send_sig_info(sig, info, p);
1123				++count;
1124				if (err != -EPERM)
1125					retval = err;
1126			}
1127		}
1128		read_unlock(&tasklist_lock);
1129		ret = count ? retval : -ESRCH;
1130	} else if (pid < 0) {
1131		ret = kill_pgrp_info(sig, info, find_pid(-pid));
1132	} else {
1133		ret = kill_pid_info(sig, info, find_pid(pid));
1134	}
1135	rcu_read_unlock();
1136	return ret;
1137}
1138
1139/*
1140 * These are for backward compatibility with the rest of the kernel source.
1141 */
1142
1143/*
1144 * These two are the most common entry points.  They send a signal
1145 * just to the specific thread.
1146 */
1147int
1148send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1149{
1150	int ret;
1151	unsigned long flags;
1152
1153	/*
1154	 * Make sure legacy kernel users don't send in bad values
1155	 * (normal paths check this in check_kill_permission).
1156	 */
1157	if (!valid_signal(sig))
1158		return -EINVAL;
1159
1160	/*
1161	 * We need the tasklist lock even for the specific
1162	 * thread case (when we don't need to follow the group
1163	 * lists) in order to avoid races with "p->sighand"
1164	 * going away or changing from under us.
1165	 */
1166	read_lock(&tasklist_lock);
1167	spin_lock_irqsave(&p->sighand->siglock, flags);
1168	ret = specific_send_sig_info(sig, info, p);
1169	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1170	read_unlock(&tasklist_lock);
1171	return ret;
1172}
1173
1174#define __si_special(priv) \
1175	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1176
1177int
1178send_sig(int sig, struct task_struct *p, int priv)
1179{
1180	return send_sig_info(sig, __si_special(priv), p);
1181}
1182
1183/*
1184 * This is the entry point for "process-wide" signals.
1185 * They will go to an appropriate thread in the thread group.
1186 */
1187int
1188send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1189{
1190	int ret;
1191	read_lock(&tasklist_lock);
1192	ret = group_send_sig_info(sig, info, p);
1193	read_unlock(&tasklist_lock);
1194	return ret;
1195}
1196
1197void
1198force_sig(int sig, struct task_struct *p)
1199{
1200	force_sig_info(sig, SEND_SIG_PRIV, p);
1201}
1202
1203/*
1204 * When things go south during signal handling, we
1205 * will force a SIGSEGV. And if the signal that caused
1206 * the problem was already a SIGSEGV, we'll want to
1207 * make sure we don't even try to deliver the signal..
1208 */
1209int
1210force_sigsegv(int sig, struct task_struct *p)
1211{
1212	if (sig == SIGSEGV) {
1213		unsigned long flags;
1214		spin_lock_irqsave(&p->sighand->siglock, flags);
1215		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1216		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1217	}
1218	force_sig(SIGSEGV, p);
1219	return 0;
1220}
1221
1222int kill_pgrp(struct pid *pid, int sig, int priv)
1223{
1224	return kill_pgrp_info(sig, __si_special(priv), pid);
1225}
1226EXPORT_SYMBOL(kill_pgrp);
1227
1228int kill_pid(struct pid *pid, int sig, int priv)
1229{
1230	return kill_pid_info(sig, __si_special(priv), pid);
1231}
1232EXPORT_SYMBOL(kill_pid);
1233
1234int
1235kill_proc(pid_t pid, int sig, int priv)
1236{
1237	return kill_proc_info(sig, __si_special(priv), pid);
1238}
1239
1240/*
1241 * These functions support sending signals using preallocated sigqueue
1242 * structures.  This is needed "because realtime applications cannot
1243 * afford to lose notifications of asynchronous events, like timer
1244 * expirations or I/O completions".  In the case of Posix Timers
1245 * we allocate the sigqueue structure from the timer_create.  If this
1246 * allocation fails we are able to report the failure to the application
1247 * with an EAGAIN error.
1248 */
1249
1250struct sigqueue *sigqueue_alloc(void)
1251{
1252	struct sigqueue *q;
1253
1254	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1255		q->flags |= SIGQUEUE_PREALLOC;
1256	return(q);
1257}
1258
1259void sigqueue_free(struct sigqueue *q)
1260{
1261	unsigned long flags;
1262	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1263	/*
1264	 * If the signal is still pending remove it from the
1265	 * pending queue.
1266	 */
1267	if (unlikely(!list_empty(&q->list))) {
1268		spinlock_t *lock = &current->sighand->siglock;
1269		read_lock(&tasklist_lock);
1270		spin_lock_irqsave(lock, flags);
1271		if (!list_empty(&q->list))
1272			list_del_init(&q->list);
1273		spin_unlock_irqrestore(lock, flags);
1274		read_unlock(&tasklist_lock);
1275	}
1276	q->flags &= ~SIGQUEUE_PREALLOC;
1277	__sigqueue_free(q);
1278}
1279
1280int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1281{
1282	unsigned long flags;
1283	int ret = 0;
1284
1285	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1286
1287	/*
1288	 * The rcu based delayed sighand destroy makes it possible to
1289	 * run this without tasklist lock held. The task struct itself
1290	 * cannot go away as create_timer did get_task_struct().
1291	 *
1292	 * We return -1, when the task is marked exiting, so
1293	 * posix_timer_event can redirect it to the group leader
1294	 */
1295	rcu_read_lock();
1296
1297	if (!likely(lock_task_sighand(p, &flags))) {
1298		ret = -1;
1299		goto out_err;
1300	}
1301
1302	if (unlikely(!list_empty(&q->list))) {
1303		/*
1304		 * If an SI_TIMER entry is already queue just increment
1305		 * the overrun count.
1306		 */
1307		BUG_ON(q->info.si_code != SI_TIMER);
1308		q->info.si_overrun++;
1309		goto out;
1310	}
1311	/* Short-circuit ignored signals.  */
1312	if (sig_ignored(p, sig)) {
1313		ret = 1;
1314		goto out;
1315	}
1316	/*
1317	 * Deliver the signal to listening signalfds. This must be called
1318	 * with the sighand lock held.
1319	 */
1320	signalfd_notify(p, sig);
1321
1322	list_add_tail(&q->list, &p->pending.list);
1323	sigaddset(&p->pending.signal, sig);
1324	if (!sigismember(&p->blocked, sig))
1325		signal_wake_up(p, sig == SIGKILL);
1326
1327out:
1328	unlock_task_sighand(p, &flags);
1329out_err:
1330	rcu_read_unlock();
1331
1332	return ret;
1333}
1334
1335int
1336send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1337{
1338	unsigned long flags;
1339	int ret = 0;
1340
1341	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1342
1343	read_lock(&tasklist_lock);
1344	/* Since it_lock is held, p->sighand cannot be NULL. */
1345	spin_lock_irqsave(&p->sighand->siglock, flags);
1346	handle_stop_signal(sig, p);
1347
1348	/* Short-circuit ignored signals.  */
1349	if (sig_ignored(p, sig)) {
1350		ret = 1;
1351		goto out;
1352	}
1353
1354	if (unlikely(!list_empty(&q->list))) {
1355		/*
1356		 * If an SI_TIMER entry is already queue just increment
1357		 * the overrun count.  Other uses should not try to
1358		 * send the signal multiple times.
1359		 */
1360		BUG_ON(q->info.si_code != SI_TIMER);
1361		q->info.si_overrun++;
1362		goto out;
1363	}
1364	/*
1365	 * Deliver the signal to listening signalfds. This must be called
1366	 * with the sighand lock held.
1367	 */
1368	signalfd_notify(p, sig);
1369
1370	/*
1371	 * Put this signal on the shared-pending queue.
1372	 * We always use the shared queue for process-wide signals,
1373	 * to avoid several races.
1374	 */
1375	list_add_tail(&q->list, &p->signal->shared_pending.list);
1376	sigaddset(&p->signal->shared_pending.signal, sig);
1377
1378	__group_complete_signal(sig, p);
1379out:
1380	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1381	read_unlock(&tasklist_lock);
1382	return ret;
1383}
1384
1385/*
1386 * Wake up any threads in the parent blocked in wait* syscalls.
1387 */
1388static inline void __wake_up_parent(struct task_struct *p,
1389				    struct task_struct *parent)
1390{
1391	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1392}
1393
1394/*
1395 * Let a parent know about the death of a child.
1396 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1397 */
1398
1399void do_notify_parent(struct task_struct *tsk, int sig)
1400{
1401	struct siginfo info;
1402	unsigned long flags;
1403	struct sighand_struct *psig;
1404
1405	BUG_ON(sig == -1);
1406
1407 	/* do_notify_parent_cldstop should have been called instead.  */
1408 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1409
1410	BUG_ON(!tsk->ptrace &&
1411	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1412
1413	info.si_signo = sig;
1414	info.si_errno = 0;
1415	info.si_pid = tsk->pid;
1416	info.si_uid = tsk->uid;
1417
1418	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1419						       tsk->signal->utime));
1420	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1421						       tsk->signal->stime));
1422
1423	info.si_status = tsk->exit_code & 0x7f;
1424	if (tsk->exit_code & 0x80)
1425		info.si_code = CLD_DUMPED;
1426	else if (tsk->exit_code & 0x7f)
1427		info.si_code = CLD_KILLED;
1428	else {
1429		info.si_code = CLD_EXITED;
1430		info.si_status = tsk->exit_code >> 8;
1431	}
1432
1433	psig = tsk->parent->sighand;
1434	spin_lock_irqsave(&psig->siglock, flags);
1435	if (!tsk->ptrace && sig == SIGCHLD &&
1436	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1437	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1438		/*
1439		 * We are exiting and our parent doesn't care.  POSIX.1
1440		 * defines special semantics for setting SIGCHLD to SIG_IGN
1441		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1442		 * automatically and not left for our parent's wait4 call.
1443		 * Rather than having the parent do it as a magic kind of
1444		 * signal handler, we just set this to tell do_exit that we
1445		 * can be cleaned up without becoming a zombie.  Note that
1446		 * we still call __wake_up_parent in this case, because a
1447		 * blocked sys_wait4 might now return -ECHILD.
1448		 *
1449		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1450		 * is implementation-defined: we do (if you don't want
1451		 * it, just use SIG_IGN instead).
1452		 */
1453		tsk->exit_signal = -1;
1454		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1455			sig = 0;
1456	}
1457	if (valid_signal(sig) && sig > 0)
1458		__group_send_sig_info(sig, &info, tsk->parent);
1459	__wake_up_parent(tsk, tsk->parent);
1460	spin_unlock_irqrestore(&psig->siglock, flags);
1461}
1462
1463static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1464{
1465	struct siginfo info;
1466	unsigned long flags;
1467	struct task_struct *parent;
1468	struct sighand_struct *sighand;
1469
1470	if (tsk->ptrace & PT_PTRACED)
1471		parent = tsk->parent;
1472	else {
1473		tsk = tsk->group_leader;
1474		parent = tsk->real_parent;
1475	}
1476
1477	info.si_signo = SIGCHLD;
1478	info.si_errno = 0;
1479	info.si_pid = tsk->pid;
1480	info.si_uid = tsk->uid;
1481
1482	info.si_utime = cputime_to_jiffies(tsk->utime);
1483	info.si_stime = cputime_to_jiffies(tsk->stime);
1484
1485 	info.si_code = why;
1486 	switch (why) {
1487 	case CLD_CONTINUED:
1488 		info.si_status = SIGCONT;
1489 		break;
1490 	case CLD_STOPPED:
1491 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1492 		break;
1493 	case CLD_TRAPPED:
1494 		info.si_status = tsk->exit_code & 0x7f;
1495 		break;
1496 	default:
1497 		BUG();
1498 	}
1499
1500	sighand = parent->sighand;
1501	spin_lock_irqsave(&sighand->siglock, flags);
1502	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1503	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1504		__group_send_sig_info(SIGCHLD, &info, parent);
1505	/*
1506	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1507	 */
1508	__wake_up_parent(tsk, parent);
1509	spin_unlock_irqrestore(&sighand->siglock, flags);
1510}
1511
1512static inline int may_ptrace_stop(void)
1513{
1514	if (!likely(current->ptrace & PT_PTRACED))
1515		return 0;
1516
1517	if (unlikely(current->parent == current->real_parent &&
1518		    (current->ptrace & PT_ATTACHED)))
1519		return 0;
1520
1521	if (unlikely(current->signal == current->parent->signal) &&
1522	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1523		return 0;
1524
1525	/*
1526	 * Are we in the middle of do_coredump?
1527	 * If so and our tracer is also part of the coredump stopping
1528	 * is a deadlock situation, and pointless because our tracer
1529	 * is dead so don't allow us to stop.
1530	 * If SIGKILL was already sent before the caller unlocked
1531	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1532	 * is safe to enter schedule().
1533	 */
1534	if (unlikely(current->mm->core_waiters) &&
1535	    unlikely(current->mm == current->parent->mm))
1536		return 0;
1537
1538	return 1;
1539}
1540
1541/*
1542 * This must be called with current->sighand->siglock held.
1543 *
1544 * This should be the path for all ptrace stops.
1545 * We always set current->last_siginfo while stopped here.
1546 * That makes it a way to test a stopped process for
1547 * being ptrace-stopped vs being job-control-stopped.
1548 *
1549 * If we actually decide not to stop at all because the tracer is gone,
1550 * we leave nostop_code in current->exit_code.
1551 */
1552static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1553{
1554	/*
1555	 * If there is a group stop in progress,
1556	 * we must participate in the bookkeeping.
1557	 */
1558	if (current->signal->group_stop_count > 0)
1559		--current->signal->group_stop_count;
1560
1561	current->last_siginfo = info;
1562	current->exit_code = exit_code;
1563
1564	/* Let the debugger run.  */
1565	set_current_state(TASK_TRACED);
1566	spin_unlock_irq(&current->sighand->siglock);
1567	try_to_freeze();
1568	read_lock(&tasklist_lock);
1569	if (may_ptrace_stop()) {
1570		do_notify_parent_cldstop(current, CLD_TRAPPED);
1571		read_unlock(&tasklist_lock);
1572		schedule();
1573	} else {
1574		/*
1575		 * By the time we got the lock, our tracer went away.
1576		 * Don't stop here.
1577		 */
1578		read_unlock(&tasklist_lock);
1579		set_current_state(TASK_RUNNING);
1580		current->exit_code = nostop_code;
1581	}
1582
1583	/*
1584	 * We are back.  Now reacquire the siglock before touching
1585	 * last_siginfo, so that we are sure to have synchronized with
1586	 * any signal-sending on another CPU that wants to examine it.
1587	 */
1588	spin_lock_irq(&current->sighand->siglock);
1589	current->last_siginfo = NULL;
1590
1591	/*
1592	 * Queued signals ignored us while we were stopped for tracing.
1593	 * So check for any that we should take before resuming user mode.
1594	 * This sets TIF_SIGPENDING, but never clears it.
1595	 */
1596	recalc_sigpending_tsk(current);
1597}
1598
1599void ptrace_notify(int exit_code)
1600{
1601	siginfo_t info;
1602
1603	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1604
1605	memset(&info, 0, sizeof info);
1606	info.si_signo = SIGTRAP;
1607	info.si_code = exit_code;
1608	info.si_pid = current->pid;
1609	info.si_uid = current->uid;
1610
1611	/* Let the debugger run.  */
1612	spin_lock_irq(&current->sighand->siglock);
1613	ptrace_stop(exit_code, 0, &info);
1614	spin_unlock_irq(&current->sighand->siglock);
1615}
1616
1617static void
1618finish_stop(int stop_count)
1619{
1620	/*
1621	 * If there are no other threads in the group, or if there is
1622	 * a group stop in progress and we are the last to stop,
1623	 * report to the parent.  When ptraced, every thread reports itself.
1624	 */
1625	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1626		read_lock(&tasklist_lock);
1627		do_notify_parent_cldstop(current, CLD_STOPPED);
1628		read_unlock(&tasklist_lock);
1629	}
1630
1631	do {
1632		schedule();
1633	} while (try_to_freeze());
1634	/*
1635	 * Now we don't run again until continued.
1636	 */
1637	current->exit_code = 0;
1638}
1639
1640/*
1641 * This performs the stopping for SIGSTOP and other stop signals.
1642 * We have to stop all threads in the thread group.
1643 * Returns nonzero if we've actually stopped and released the siglock.
1644 * Returns zero if we didn't stop and still hold the siglock.
1645 */
1646static int do_signal_stop(int signr)
1647{
1648	struct signal_struct *sig = current->signal;
1649	int stop_count;
1650
1651	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1652		return 0;
1653
1654	if (sig->group_stop_count > 0) {
1655		/*
1656		 * There is a group stop in progress.  We don't need to
1657		 * start another one.
1658		 */
1659		stop_count = --sig->group_stop_count;
1660	} else {
1661		/*
1662		 * There is no group stop already in progress.
1663		 * We must initiate one now.
1664		 */
1665		struct task_struct *t;
1666
1667		sig->group_exit_code = signr;
1668
1669		stop_count = 0;
1670		for (t = next_thread(current); t != current; t = next_thread(t))
1671			/*
1672			 * Setting state to TASK_STOPPED for a group
1673			 * stop is always done with the siglock held,
1674			 * so this check has no races.
1675			 */
1676			if (!t->exit_state &&
1677			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1678				stop_count++;
1679				signal_wake_up(t, 0);
1680			}
1681		sig->group_stop_count = stop_count;
1682	}
1683
1684	if (stop_count == 0)
1685		sig->flags = SIGNAL_STOP_STOPPED;
1686	current->exit_code = sig->group_exit_code;
1687	__set_current_state(TASK_STOPPED);
1688
1689	spin_unlock_irq(&current->sighand->siglock);
1690	finish_stop(stop_count);
1691	return 1;
1692}
1693
1694/*
1695 * Do appropriate magic when group_stop_count > 0.
1696 * We return nonzero if we stopped, after releasing the siglock.
1697 * We return zero if we still hold the siglock and should look
1698 * for another signal without checking group_stop_count again.
1699 */
1700static int handle_group_stop(void)
1701{
1702	int stop_count;
1703
1704	if (current->signal->group_exit_task == current) {
1705		/*
1706		 * Group stop is so we can do a core dump,
1707		 * We are the initiating thread, so get on with it.
1708		 */
1709		current->signal->group_exit_task = NULL;
1710		return 0;
1711	}
1712
1713	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1714		/*
1715		 * Group stop is so another thread can do a core dump,
1716		 * or else we are racing against a death signal.
1717		 * Just punt the stop so we can get the next signal.
1718		 */
1719		return 0;
1720
1721	/*
1722	 * There is a group stop in progress.  We stop
1723	 * without any associated signal being in our queue.
1724	 */
1725	stop_count = --current->signal->group_stop_count;
1726	if (stop_count == 0)
1727		current->signal->flags = SIGNAL_STOP_STOPPED;
1728	current->exit_code = current->signal->group_exit_code;
1729	set_current_state(TASK_STOPPED);
1730	spin_unlock_irq(&current->sighand->siglock);
1731	finish_stop(stop_count);
1732	return 1;
1733}
1734
1735int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1736			  struct pt_regs *regs, void *cookie)
1737{
1738	sigset_t *mask = &current->blocked;
1739	int signr = 0;
1740
1741	try_to_freeze();
1742
1743relock:
1744	spin_lock_irq(&current->sighand->siglock);
1745	for (;;) {
1746		struct k_sigaction *ka;
1747
1748		if (unlikely(current->signal->group_stop_count > 0) &&
1749		    handle_group_stop())
1750			goto relock;
1751
1752		signr = dequeue_signal(current, mask, info);
1753
1754		if (!signr)
1755			break; /* will return 0 */
1756
1757		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1758			ptrace_signal_deliver(regs, cookie);
1759
1760			/* Let the debugger run.  */
1761			ptrace_stop(signr, signr, info);
1762
1763			/* We're back.  Did the debugger cancel the sig?  */
1764			signr = current->exit_code;
1765			if (signr == 0)
1766				continue;
1767
1768			current->exit_code = 0;
1769
1770			/* Update the siginfo structure if the signal has
1771			   changed.  If the debugger wanted something
1772			   specific in the siginfo structure then it should
1773			   have updated *info via PTRACE_SETSIGINFO.  */
1774			if (signr != info->si_signo) {
1775				info->si_signo = signr;
1776				info->si_errno = 0;
1777				info->si_code = SI_USER;
1778				info->si_pid = current->parent->pid;
1779				info->si_uid = current->parent->uid;
1780			}
1781
1782			/* If the (new) signal is now blocked, requeue it.  */
1783			if (sigismember(&current->blocked, signr)) {
1784				specific_send_sig_info(signr, info, current);
1785				continue;
1786			}
1787		}
1788
1789		ka = &current->sighand->action[signr-1];
1790		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1791			continue;
1792		if (ka->sa.sa_handler != SIG_DFL) {
1793			/* Run the handler.  */
1794			*return_ka = *ka;
1795
1796			if (ka->sa.sa_flags & SA_ONESHOT)
1797				ka->sa.sa_handler = SIG_DFL;
1798
1799			break; /* will return non-zero "signr" value */
1800		}
1801
1802		/*
1803		 * Now we are doing the default action for this signal.
1804		 */
1805		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1806			continue;
1807
1808		/*
1809		 * Init of a pid space gets no signals it doesn't want from
1810		 * within that pid space. It can of course get signals from
1811		 * its parent pid space.
1812		 */
1813		if (current == child_reaper(current))
1814			continue;
1815
1816		if (sig_kernel_stop(signr)) {
1817			/*
1818			 * The default action is to stop all threads in
1819			 * the thread group.  The job control signals
1820			 * do nothing in an orphaned pgrp, but SIGSTOP
1821			 * always works.  Note that siglock needs to be
1822			 * dropped during the call to is_orphaned_pgrp()
1823			 * because of lock ordering with tasklist_lock.
1824			 * This allows an intervening SIGCONT to be posted.
1825			 * We need to check for that and bail out if necessary.
1826			 */
1827			if (signr != SIGSTOP) {
1828				spin_unlock_irq(&current->sighand->siglock);
1829
1830				/* signals can be posted during this window */
1831
1832				if (is_current_pgrp_orphaned())
1833					goto relock;
1834
1835				spin_lock_irq(&current->sighand->siglock);
1836			}
1837
1838			if (likely(do_signal_stop(signr))) {
1839				/* It released the siglock.  */
1840				goto relock;
1841			}
1842
1843			/*
1844			 * We didn't actually stop, due to a race
1845			 * with SIGCONT or something like that.
1846			 */
1847			continue;
1848		}
1849
1850		spin_unlock_irq(&current->sighand->siglock);
1851
1852		/*
1853		 * Anything else is fatal, maybe with a core dump.
1854		 */
1855		current->flags |= PF_SIGNALED;
1856		if (sig_kernel_coredump(signr)) {
1857			/*
1858			 * If it was able to dump core, this kills all
1859			 * other threads in the group and synchronizes with
1860			 * their demise.  If we lost the race with another
1861			 * thread getting here, it set group_exit_code
1862			 * first and our do_group_exit call below will use
1863			 * that value and ignore the one we pass it.
1864			 */
1865			do_coredump((long)signr, signr, regs);
1866		}
1867
1868		/*
1869		 * Death signals, no core dump.
1870		 */
1871		do_group_exit(signr);
1872		/* NOTREACHED */
1873	}
1874	spin_unlock_irq(&current->sighand->siglock);
1875	return signr;
1876}
1877
1878EXPORT_SYMBOL(recalc_sigpending);
1879EXPORT_SYMBOL_GPL(dequeue_signal);
1880EXPORT_SYMBOL(flush_signals);
1881EXPORT_SYMBOL(force_sig);
1882EXPORT_SYMBOL(kill_proc);
1883EXPORT_SYMBOL(ptrace_notify);
1884EXPORT_SYMBOL(send_sig);
1885EXPORT_SYMBOL(send_sig_info);
1886EXPORT_SYMBOL(sigprocmask);
1887EXPORT_SYMBOL(block_all_signals);
1888EXPORT_SYMBOL(unblock_all_signals);
1889
1890
1891/*
1892 * System call entry points.
1893 */
1894
1895asmlinkage long sys_restart_syscall(void)
1896{
1897	struct restart_block *restart = &current_thread_info()->restart_block;
1898	return restart->fn(restart);
1899}
1900
1901long do_no_restart_syscall(struct restart_block *param)
1902{
1903	return -EINTR;
1904}
1905
1906/*
1907 * We don't need to get the kernel lock - this is all local to this
1908 * particular thread.. (and that's good, because this is _heavily_
1909 * used by various programs)
1910 */
1911
1912/*
1913 * This is also useful for kernel threads that want to temporarily
1914 * (or permanently) block certain signals.
1915 *
1916 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1917 * interface happily blocks "unblockable" signals like SIGKILL
1918 * and friends.
1919 */
1920int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1921{
1922	int error;
1923
1924	spin_lock_irq(&current->sighand->siglock);
1925	if (oldset)
1926		*oldset = current->blocked;
1927
1928	error = 0;
1929	switch (how) {
1930	case SIG_BLOCK:
1931		sigorsets(&current->blocked, &current->blocked, set);
1932		break;
1933	case SIG_UNBLOCK:
1934		signandsets(&current->blocked, &current->blocked, set);
1935		break;
1936	case SIG_SETMASK:
1937		current->blocked = *set;
1938		break;
1939	default:
1940		error = -EINVAL;
1941	}
1942	recalc_sigpending();
1943	spin_unlock_irq(&current->sighand->siglock);
1944
1945	return error;
1946}
1947
1948asmlinkage long
1949sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1950{
1951	int error = -EINVAL;
1952	sigset_t old_set, new_set;
1953
1954	if (sigsetsize != sizeof(sigset_t))
1955		goto out;
1956
1957	if (set) {
1958		error = -EFAULT;
1959		if (copy_from_user(&new_set, set, sizeof(*set)))
1960			goto out;
1961		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1962
1963		error = sigprocmask(how, &new_set, &old_set);
1964		if (error)
1965			goto out;
1966		if (oset)
1967			goto set_old;
1968	} else if (oset) {
1969		spin_lock_irq(&current->sighand->siglock);
1970		old_set = current->blocked;
1971		spin_unlock_irq(&current->sighand->siglock);
1972
1973	set_old:
1974		error = -EFAULT;
1975		if (copy_to_user(oset, &old_set, sizeof(*oset)))
1976			goto out;
1977	}
1978	error = 0;
1979out:
1980	return error;
1981}
1982
1983long do_sigpending(void __user *set, unsigned long sigsetsize)
1984{
1985	long error = -EINVAL;
1986	sigset_t pending;
1987
1988	if (sigsetsize > sizeof(sigset_t))
1989		goto out;
1990
1991	spin_lock_irq(&current->sighand->siglock);
1992	sigorsets(&pending, &current->pending.signal,
1993		  &current->signal->shared_pending.signal);
1994	spin_unlock_irq(&current->sighand->siglock);
1995
1996	/* Outside the lock because only this thread touches it.  */
1997	sigandsets(&pending, &current->blocked, &pending);
1998
1999	error = -EFAULT;
2000	if (!copy_to_user(set, &pending, sigsetsize))
2001		error = 0;
2002
2003out:
2004	return error;
2005}
2006
2007asmlinkage long
2008sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2009{
2010	return do_sigpending(set, sigsetsize);
2011}
2012
2013#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2014
2015int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2016{
2017	int err;
2018
2019	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2020		return -EFAULT;
2021	if (from->si_code < 0)
2022		return __copy_to_user(to, from, sizeof(siginfo_t))
2023			? -EFAULT : 0;
2024	/*
2025	 * If you change siginfo_t structure, please be sure
2026	 * this code is fixed accordingly.
2027	 * Please remember to update the signalfd_copyinfo() function
2028	 * inside fs/signalfd.c too, in case siginfo_t changes.
2029	 * It should never copy any pad contained in the structure
2030	 * to avoid security leaks, but must copy the generic
2031	 * 3 ints plus the relevant union member.
2032	 */
2033	err = __put_user(from->si_signo, &to->si_signo);
2034	err |= __put_user(from->si_errno, &to->si_errno);
2035	err |= __put_user((short)from->si_code, &to->si_code);
2036	switch (from->si_code & __SI_MASK) {
2037	case __SI_KILL:
2038		err |= __put_user(from->si_pid, &to->si_pid);
2039		err |= __put_user(from->si_uid, &to->si_uid);
2040		break;
2041	case __SI_TIMER:
2042		 err |= __put_user(from->si_tid, &to->si_tid);
2043		 err |= __put_user(from->si_overrun, &to->si_overrun);
2044		 err |= __put_user(from->si_ptr, &to->si_ptr);
2045		break;
2046	case __SI_POLL:
2047		err |= __put_user(from->si_band, &to->si_band);
2048		err |= __put_user(from->si_fd, &to->si_fd);
2049		break;
2050	case __SI_FAULT:
2051		err |= __put_user(from->si_addr, &to->si_addr);
2052#ifdef __ARCH_SI_TRAPNO
2053		err |= __put_user(from->si_trapno, &to->si_trapno);
2054#endif
2055		break;
2056	case __SI_CHLD:
2057		err |= __put_user(from->si_pid, &to->si_pid);
2058		err |= __put_user(from->si_uid, &to->si_uid);
2059		err |= __put_user(from->si_status, &to->si_status);
2060		err |= __put_user(from->si_utime, &to->si_utime);
2061		err |= __put_user(from->si_stime, &to->si_stime);
2062		break;
2063	case __SI_RT: /* This is not generated by the kernel as of now. */
2064	case __SI_MESGQ: /* But this is */
2065		err |= __put_user(from->si_pid, &to->si_pid);
2066		err |= __put_user(from->si_uid, &to->si_uid);
2067		err |= __put_user(from->si_ptr, &to->si_ptr);
2068		break;
2069	default: /* this is just in case for now ... */
2070		err |= __put_user(from->si_pid, &to->si_pid);
2071		err |= __put_user(from->si_uid, &to->si_uid);
2072		break;
2073	}
2074	return err;
2075}
2076
2077#endif
2078
2079asmlinkage long
2080sys_rt_sigtimedwait(const sigset_t __user *uthese,
2081		    siginfo_t __user *uinfo,
2082		    const struct timespec __user *uts,
2083		    size_t sigsetsize)
2084{
2085	int ret, sig;
2086	sigset_t these;
2087	struct timespec ts;
2088	siginfo_t info;
2089	long timeout = 0;
2090
2091	if (sigsetsize != sizeof(sigset_t))
2092		return -EINVAL;
2093
2094	if (copy_from_user(&these, uthese, sizeof(these)))
2095		return -EFAULT;
2096
2097	/*
2098	 * Invert the set of allowed signals to get those we
2099	 * want to block.
2100	 */
2101	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2102	signotset(&these);
2103
2104	if (uts) {
2105		if (copy_from_user(&ts, uts, sizeof(ts)))
2106			return -EFAULT;
2107		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2108		    || ts.tv_sec < 0)
2109			return -EINVAL;
2110	}
2111
2112	spin_lock_irq(&current->sighand->siglock);
2113	sig = dequeue_signal(current, &these, &info);
2114	if (!sig) {
2115		timeout = MAX_SCHEDULE_TIMEOUT;
2116		if (uts)
2117			timeout = (timespec_to_jiffies(&ts)
2118				   + (ts.tv_sec || ts.tv_nsec));
2119
2120		if (timeout) {
2121			/* None ready -- temporarily unblock those we're
2122			 * interested while we are sleeping in so that we'll
2123			 * be awakened when they arrive.  */
2124			current->real_blocked = current->blocked;
2125			sigandsets(&current->blocked, &current->blocked, &these);
2126			recalc_sigpending();
2127			spin_unlock_irq(&current->sighand->siglock);
2128
2129			timeout = schedule_timeout_interruptible(timeout);
2130
2131			spin_lock_irq(&current->sighand->siglock);
2132			sig = dequeue_signal(current, &these, &info);
2133			current->blocked = current->real_blocked;
2134			siginitset(&current->real_blocked, 0);
2135			recalc_sigpending();
2136		}
2137	}
2138	spin_unlock_irq(&current->sighand->siglock);
2139
2140	if (sig) {
2141		ret = sig;
2142		if (uinfo) {
2143			if (copy_siginfo_to_user(uinfo, &info))
2144				ret = -EFAULT;
2145		}
2146	} else {
2147		ret = -EAGAIN;
2148		if (timeout)
2149			ret = -EINTR;
2150	}
2151
2152	return ret;
2153}
2154
2155asmlinkage long
2156sys_kill(int pid, int sig)
2157{
2158	struct siginfo info;
2159
2160	info.si_signo = sig;
2161	info.si_errno = 0;
2162	info.si_code = SI_USER;
2163	info.si_pid = current->tgid;
2164	info.si_uid = current->uid;
2165
2166	return kill_something_info(sig, &info, pid);
2167}
2168
2169static int do_tkill(int tgid, int pid, int sig)
2170{
2171	int error;
2172	struct siginfo info;
2173	struct task_struct *p;
2174
2175	error = -ESRCH;
2176	info.si_signo = sig;
2177	info.si_errno = 0;
2178	info.si_code = SI_TKILL;
2179	info.si_pid = current->tgid;
2180	info.si_uid = current->uid;
2181
2182	read_lock(&tasklist_lock);
2183	p = find_task_by_pid(pid);
2184	if (p && (tgid <= 0 || p->tgid == tgid)) {
2185		error = check_kill_permission(sig, &info, p);
2186		/*
2187		 * The null signal is a permissions and process existence
2188		 * probe.  No signal is actually delivered.
2189		 */
2190		if (!error && sig && p->sighand) {
2191			spin_lock_irq(&p->sighand->siglock);
2192			handle_stop_signal(sig, p);
2193			error = specific_send_sig_info(sig, &info, p);
2194			spin_unlock_irq(&p->sighand->siglock);
2195		}
2196	}
2197	read_unlock(&tasklist_lock);
2198
2199	return error;
2200}
2201
2202/**
2203 *  sys_tgkill - send signal to one specific thread
2204 *  @tgid: the thread group ID of the thread
2205 *  @pid: the PID of the thread
2206 *  @sig: signal to be sent
2207 *
2208 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2209 *  exists but it's not belonging to the target process anymore. This
2210 *  method solves the problem of threads exiting and PIDs getting reused.
2211 */
2212asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2213{
2214	/* This is only valid for single tasks */
2215	if (pid <= 0 || tgid <= 0)
2216		return -EINVAL;
2217
2218	return do_tkill(tgid, pid, sig);
2219}
2220
2221/*
2222 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2223 */
2224asmlinkage long
2225sys_tkill(int pid, int sig)
2226{
2227	/* This is only valid for single tasks */
2228	if (pid <= 0)
2229		return -EINVAL;
2230
2231	return do_tkill(0, pid, sig);
2232}
2233
2234asmlinkage long
2235sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2236{
2237	siginfo_t info;
2238
2239	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2240		return -EFAULT;
2241
2242	/* Not even root can pretend to send signals from the kernel.
2243	   Nor can they impersonate a kill(), which adds source info.  */
2244	if (info.si_code >= 0)
2245		return -EPERM;
2246	info.si_signo = sig;
2247
2248	/* POSIX.1b doesn't mention process groups.  */
2249	return kill_proc_info(sig, &info, pid);
2250}
2251
2252int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2253{
2254	struct k_sigaction *k;
2255	sigset_t mask;
2256
2257	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2258		return -EINVAL;
2259
2260	k = &current->sighand->action[sig-1];
2261
2262	spin_lock_irq(&current->sighand->siglock);
2263	if (signal_pending(current)) {
2264		/*
2265		 * If there might be a fatal signal pending on multiple
2266		 * threads, make sure we take it before changing the action.
2267		 */
2268		spin_unlock_irq(&current->sighand->siglock);
2269		return -ERESTARTNOINTR;
2270	}
2271
2272	if (oact)
2273		*oact = *k;
2274
2275	if (act) {
2276		sigdelsetmask(&act->sa.sa_mask,
2277			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2278		*k = *act;
2279		/*
2280		 * POSIX 3.3.1.3:
2281		 *  "Setting a signal action to SIG_IGN for a signal that is
2282		 *   pending shall cause the pending signal to be discarded,
2283		 *   whether or not it is blocked."
2284		 *
2285		 *  "Setting a signal action to SIG_DFL for a signal that is
2286		 *   pending and whose default action is to ignore the signal
2287		 *   (for example, SIGCHLD), shall cause the pending signal to
2288		 *   be discarded, whether or not it is blocked"
2289		 */
2290		if (act->sa.sa_handler == SIG_IGN ||
2291		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2292			struct task_struct *t = current;
2293			sigemptyset(&mask);
2294			sigaddset(&mask, sig);
2295			rm_from_queue_full(&mask, &t->signal->shared_pending);
2296			do {
2297				rm_from_queue_full(&mask, &t->pending);
2298				recalc_sigpending_and_wake(t);
2299				t = next_thread(t);
2300			} while (t != current);
2301		}
2302	}
2303
2304	spin_unlock_irq(&current->sighand->siglock);
2305	return 0;
2306}
2307
2308int
2309do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2310{
2311	stack_t oss;
2312	int error;
2313
2314	if (uoss) {
2315		oss.ss_sp = (void __user *) current->sas_ss_sp;
2316		oss.ss_size = current->sas_ss_size;
2317		oss.ss_flags = sas_ss_flags(sp);
2318	}
2319
2320	if (uss) {
2321		void __user *ss_sp;
2322		size_t ss_size;
2323		int ss_flags;
2324
2325		error = -EFAULT;
2326		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2327		    || __get_user(ss_sp, &uss->ss_sp)
2328		    || __get_user(ss_flags, &uss->ss_flags)
2329		    || __get_user(ss_size, &uss->ss_size))
2330			goto out;
2331
2332		error = -EPERM;
2333		if (on_sig_stack(sp))
2334			goto out;
2335
2336		error = -EINVAL;
2337		/*
2338		 *
2339		 * Note - this code used to test ss_flags incorrectly
2340		 *  	  old code may have been written using ss_flags==0
2341		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2342		 *	  way that worked) - this fix preserves that older
2343		 *	  mechanism
2344		 */
2345		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2346			goto out;
2347
2348		if (ss_flags == SS_DISABLE) {
2349			ss_size = 0;
2350			ss_sp = NULL;
2351		} else {
2352			error = -ENOMEM;
2353			if (ss_size < MINSIGSTKSZ)
2354				goto out;
2355		}
2356
2357		current->sas_ss_sp = (unsigned long) ss_sp;
2358		current->sas_ss_size = ss_size;
2359	}
2360
2361	if (uoss) {
2362		error = -EFAULT;
2363		if (copy_to_user(uoss, &oss, sizeof(oss)))
2364			goto out;
2365	}
2366
2367	error = 0;
2368out:
2369	return error;
2370}
2371
2372#ifdef __ARCH_WANT_SYS_SIGPENDING
2373
2374asmlinkage long
2375sys_sigpending(old_sigset_t __user *set)
2376{
2377	return do_sigpending(set, sizeof(*set));
2378}
2379
2380#endif
2381
2382#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2383/* Some platforms have their own version with special arguments others
2384   support only sys_rt_sigprocmask.  */
2385
2386asmlinkage long
2387sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2388{
2389	int error;
2390	old_sigset_t old_set, new_set;
2391
2392	if (set) {
2393		error = -EFAULT;
2394		if (copy_from_user(&new_set, set, sizeof(*set)))
2395			goto out;
2396		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2397
2398		spin_lock_irq(&current->sighand->siglock);
2399		old_set = current->blocked.sig[0];
2400
2401		error = 0;
2402		switch (how) {
2403		default:
2404			error = -EINVAL;
2405			break;
2406		case SIG_BLOCK:
2407			sigaddsetmask(&current->blocked, new_set);
2408			break;
2409		case SIG_UNBLOCK:
2410			sigdelsetmask(&current->blocked, new_set);
2411			break;
2412		case SIG_SETMASK:
2413			current->blocked.sig[0] = new_set;
2414			break;
2415		}
2416
2417		recalc_sigpending();
2418		spin_unlock_irq(&current->sighand->siglock);
2419		if (error)
2420			goto out;
2421		if (oset)
2422			goto set_old;
2423	} else if (oset) {
2424		old_set = current->blocked.sig[0];
2425	set_old:
2426		error = -EFAULT;
2427		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2428			goto out;
2429	}
2430	error = 0;
2431out:
2432	return error;
2433}
2434#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2435
2436#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2437asmlinkage long
2438sys_rt_sigaction(int sig,
2439		 const struct sigaction __user *act,
2440		 struct sigaction __user *oact,
2441		 size_t sigsetsize)
2442{
2443	struct k_sigaction new_sa, old_sa;
2444	int ret = -EINVAL;
2445
2446	if (sigsetsize != sizeof(sigset_t))
2447		goto out;
2448
2449	if (act) {
2450		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2451			return -EFAULT;
2452	}
2453
2454	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2455
2456	if (!ret && oact) {
2457		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2458			return -EFAULT;
2459	}
2460out:
2461	return ret;
2462}
2463#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2464
2465#ifdef __ARCH_WANT_SYS_SGETMASK
2466
2467/*
2468 * For backwards compatibility.  Functionality superseded by sigprocmask.
2469 */
2470asmlinkage long
2471sys_sgetmask(void)
2472{
2473	/* SMP safe */
2474	return current->blocked.sig[0];
2475}
2476
2477asmlinkage long
2478sys_ssetmask(int newmask)
2479{
2480	int old;
2481
2482	spin_lock_irq(&current->sighand->siglock);
2483	old = current->blocked.sig[0];
2484
2485	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2486						  sigmask(SIGSTOP)));
2487	recalc_sigpending();
2488	spin_unlock_irq(&current->sighand->siglock);
2489
2490	return old;
2491}
2492#endif /* __ARCH_WANT_SGETMASK */
2493
2494#ifdef __ARCH_WANT_SYS_SIGNAL
2495/*
2496 * For backwards compatibility.  Functionality superseded by sigaction.
2497 */
2498asmlinkage unsigned long
2499sys_signal(int sig, __sighandler_t handler)
2500{
2501	struct k_sigaction new_sa, old_sa;
2502	int ret;
2503
2504	new_sa.sa.sa_handler = handler;
2505	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2506	sigemptyset(&new_sa.sa.sa_mask);
2507
2508	ret = do_sigaction(sig, &new_sa, &old_sa);
2509
2510	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2511}
2512#endif /* __ARCH_WANT_SYS_SIGNAL */
2513
2514#ifdef __ARCH_WANT_SYS_PAUSE
2515
2516asmlinkage long
2517sys_pause(void)
2518{
2519	current->state = TASK_INTERRUPTIBLE;
2520	schedule();
2521	return -ERESTARTNOHAND;
2522}
2523
2524#endif
2525
2526#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2527asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2528{
2529	sigset_t newset;
2530
2531	if (sigsetsize != sizeof(sigset_t))
2532		return -EINVAL;
2533
2534	if (copy_from_user(&newset, unewset, sizeof(newset)))
2535		return -EFAULT;
2536	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2537
2538	spin_lock_irq(&current->sighand->siglock);
2539	current->saved_sigmask = current->blocked;
2540	current->blocked = newset;
2541	recalc_sigpending();
2542	spin_unlock_irq(&current->sighand->siglock);
2543
2544	current->state = TASK_INTERRUPTIBLE;
2545	schedule();
2546	set_thread_flag(TIF_RESTORE_SIGMASK);
2547	return -ERESTARTNOHAND;
2548}
2549#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2550
2551__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2552{
2553	return NULL;
2554}
2555
2556void __init signals_init(void)
2557{
2558	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2559}
2560