1/*
2 *  linux/kernel/signal.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *
6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7 *
8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9 *		Changes to use preallocated sigqueue structures
10 *		to allow signals to be sent reliably.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
26#include <linux/tracehook.h>
27#include <linux/capability.h>
28#include <linux/freezer.h>
29#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
33
34#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
38#include "audit.h"	/* audit_signal_info() */
39
40/*
41 * SLAB caches for signal bits.
42 */
43
44static struct kmem_cache *sigqueue_cachep;
45
46int print_fatal_signals __read_mostly;
47
48static void __user *sig_handler(struct task_struct *t, int sig)
49{
50	return t->sighand->action[sig - 1].sa.sa_handler;
51}
52
53static int sig_handler_ignored(void __user *handler, int sig)
54{
55	/* Is it explicitly or implicitly ignored? */
56	return handler == SIG_IGN ||
57		(handler == SIG_DFL && sig_kernel_ignore(sig));
58}
59
60static int sig_task_ignored(struct task_struct *t, int sig,
61		int from_ancestor_ns)
62{
63	void __user *handler;
64
65	handler = sig_handler(t, sig);
66
67	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68			handler == SIG_DFL && !from_ancestor_ns)
69		return 1;
70
71	return sig_handler_ignored(handler, sig);
72}
73
74static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75{
76	/*
77	 * Blocked signals are never ignored, since the
78	 * signal handler may change by the time it is
79	 * unblocked.
80	 */
81	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82		return 0;
83
84	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85		return 0;
86
87	/*
88	 * Tracers may want to know about even ignored signals.
89	 */
90	return !tracehook_consider_ignored_signal(t, sig);
91}
92
93/*
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
96 */
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99	unsigned long ready;
100	long i;
101
102	switch (_NSIG_WORDS) {
103	default:
104		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105			ready |= signal->sig[i] &~ blocked->sig[i];
106		break;
107
108	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109		ready |= signal->sig[2] &~ blocked->sig[2];
110		ready |= signal->sig[1] &~ blocked->sig[1];
111		ready |= signal->sig[0] &~ blocked->sig[0];
112		break;
113
114	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115		ready |= signal->sig[0] &~ blocked->sig[0];
116		break;
117
118	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119	}
120	return ready !=	0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
125static int recalc_sigpending_tsk(struct task_struct *t)
126{
127	if (t->signal->group_stop_count > 0 ||
128	    PENDING(&t->pending, &t->blocked) ||
129	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130		set_tsk_thread_flag(t, TIF_SIGPENDING);
131		return 1;
132	}
133	/*
134	 * We must never clear the flag in another thread, or in current
135	 * when it's possible the current syscall is returning -ERESTART*.
136	 * So we don't clear it here, and only callers who know they should do.
137	 */
138	return 0;
139}
140
141/*
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
144 */
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147	if (recalc_sigpending_tsk(t))
148		signal_wake_up(t, 0);
149}
150
151void recalc_sigpending(void)
152{
153	if (unlikely(tracehook_force_sigpending()))
154		set_thread_flag(TIF_SIGPENDING);
155	else if (!recalc_sigpending_tsk(current) && !freezing(current))
156		clear_thread_flag(TIF_SIGPENDING);
157
158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
162#define SYNCHRONOUS_MASK \
163	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164	 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
166int next_signal(struct sigpending *pending, sigset_t *mask)
167{
168	unsigned long i, *s, *m, x;
169	int sig = 0;
170
171	s = pending->signal.sig;
172	m = mask->sig;
173
174	/*
175	 * Handle the first word specially: it contains the
176	 * synchronous signals that need to be dequeued first.
177	 */
178	x = *s &~ *m;
179	if (x) {
180		if (x & SYNCHRONOUS_MASK)
181			x &= SYNCHRONOUS_MASK;
182		sig = ffz(~x) + 1;
183		return sig;
184	}
185
186	switch (_NSIG_WORDS) {
187	default:
188		for (i = 1; i < _NSIG_WORDS; ++i) {
189			x = *++s &~ *++m;
190			if (!x)
191				continue;
192			sig = ffz(~x) + i*_NSIG_BPW + 1;
193			break;
194		}
195		break;
196
197	case 2:
198		x = s[1] &~ m[1];
199		if (!x)
200			break;
201		sig = ffz(~x) + _NSIG_BPW + 1;
202		break;
203
204	case 1:
205		/* Nothing to do */
206		break;
207	}
208
209	return sig;
210}
211
212static inline void print_dropped_signal(int sig)
213{
214	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216	if (!print_fatal_signals)
217		return;
218
219	if (!__ratelimit(&ratelimit_state))
220		return;
221
222	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223				current->comm, current->pid, sig);
224}
225
226/*
227 * allocate a new signal queue record
228 * - this may be called without locks if and only if t == current, otherwise an
229 *   appopriate lock must be held to stop the target task from exiting
230 */
231static struct sigqueue *
232__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
233{
234	struct sigqueue *q = NULL;
235	struct user_struct *user;
236
237	/*
238	 * Protect access to @t credentials. This can go away when all
239	 * callers hold rcu read lock.
240	 */
241	rcu_read_lock();
242	user = get_uid(__task_cred(t)->user);
243	atomic_inc(&user->sigpending);
244	rcu_read_unlock();
245
246	if (override_rlimit ||
247	    atomic_read(&user->sigpending) <=
248			task_rlimit(t, RLIMIT_SIGPENDING)) {
249		q = kmem_cache_alloc(sigqueue_cachep, flags);
250	} else {
251		print_dropped_signal(sig);
252	}
253
254	if (unlikely(q == NULL)) {
255		atomic_dec(&user->sigpending);
256		free_uid(user);
257	} else {
258		INIT_LIST_HEAD(&q->list);
259		q->flags = 0;
260		q->user = user;
261	}
262
263	return q;
264}
265
266static void __sigqueue_free(struct sigqueue *q)
267{
268	if (q->flags & SIGQUEUE_PREALLOC)
269		return;
270	atomic_dec(&q->user->sigpending);
271	free_uid(q->user);
272	kmem_cache_free(sigqueue_cachep, q);
273}
274
275void flush_sigqueue(struct sigpending *queue)
276{
277	struct sigqueue *q;
278
279	sigemptyset(&queue->signal);
280	while (!list_empty(&queue->list)) {
281		q = list_entry(queue->list.next, struct sigqueue , list);
282		list_del_init(&q->list);
283		__sigqueue_free(q);
284	}
285}
286
287/*
288 * Flush all pending signals for a task.
289 */
290void __flush_signals(struct task_struct *t)
291{
292	clear_tsk_thread_flag(t, TIF_SIGPENDING);
293	flush_sigqueue(&t->pending);
294	flush_sigqueue(&t->signal->shared_pending);
295}
296
297void flush_signals(struct task_struct *t)
298{
299	unsigned long flags;
300
301	spin_lock_irqsave(&t->sighand->siglock, flags);
302	__flush_signals(t);
303	spin_unlock_irqrestore(&t->sighand->siglock, flags);
304}
305
306static void __flush_itimer_signals(struct sigpending *pending)
307{
308	sigset_t signal, retain;
309	struct sigqueue *q, *n;
310
311	signal = pending->signal;
312	sigemptyset(&retain);
313
314	list_for_each_entry_safe(q, n, &pending->list, list) {
315		int sig = q->info.si_signo;
316
317		if (likely(q->info.si_code != SI_TIMER)) {
318			sigaddset(&retain, sig);
319		} else {
320			sigdelset(&signal, sig);
321			list_del_init(&q->list);
322			__sigqueue_free(q);
323		}
324	}
325
326	sigorsets(&pending->signal, &signal, &retain);
327}
328
329void flush_itimer_signals(void)
330{
331	struct task_struct *tsk = current;
332	unsigned long flags;
333
334	spin_lock_irqsave(&tsk->sighand->siglock, flags);
335	__flush_itimer_signals(&tsk->pending);
336	__flush_itimer_signals(&tsk->signal->shared_pending);
337	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
338}
339
340void ignore_signals(struct task_struct *t)
341{
342	int i;
343
344	for (i = 0; i < _NSIG; ++i)
345		t->sighand->action[i].sa.sa_handler = SIG_IGN;
346
347	flush_signals(t);
348}
349
350/*
351 * Flush all handlers for a task.
352 */
353
354void
355flush_signal_handlers(struct task_struct *t, int force_default)
356{
357	int i;
358	struct k_sigaction *ka = &t->sighand->action[0];
359	for (i = _NSIG ; i != 0 ; i--) {
360		if (force_default || ka->sa.sa_handler != SIG_IGN)
361			ka->sa.sa_handler = SIG_DFL;
362		ka->sa.sa_flags = 0;
363		sigemptyset(&ka->sa.sa_mask);
364		ka++;
365	}
366}
367
368int unhandled_signal(struct task_struct *tsk, int sig)
369{
370	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
371	if (is_global_init(tsk))
372		return 1;
373	if (handler != SIG_IGN && handler != SIG_DFL)
374		return 0;
375	return !tracehook_consider_fatal_signal(tsk, sig);
376}
377
378
379/* Notify the system that a driver wants to block all signals for this
380 * process, and wants to be notified if any signals at all were to be
381 * sent/acted upon.  If the notifier routine returns non-zero, then the
382 * signal will be acted upon after all.  If the notifier routine returns 0,
383 * then then signal will be blocked.  Only one block per process is
384 * allowed.  priv is a pointer to private data that the notifier routine
385 * can use to determine if the signal should be blocked or not.  */
386
387void
388block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389{
390	unsigned long flags;
391
392	spin_lock_irqsave(&current->sighand->siglock, flags);
393	current->notifier_mask = mask;
394	current->notifier_data = priv;
395	current->notifier = notifier;
396	spin_unlock_irqrestore(&current->sighand->siglock, flags);
397}
398
399/* Notify the system that blocking has ended. */
400
401void
402unblock_all_signals(void)
403{
404	unsigned long flags;
405
406	spin_lock_irqsave(&current->sighand->siglock, flags);
407	current->notifier = NULL;
408	current->notifier_data = NULL;
409	recalc_sigpending();
410	spin_unlock_irqrestore(&current->sighand->siglock, flags);
411}
412
413static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
414{
415	struct sigqueue *q, *first = NULL;
416
417	/*
418	 * Collect the siginfo appropriate to this signal.  Check if
419	 * there is another siginfo for the same signal.
420	*/
421	list_for_each_entry(q, &list->list, list) {
422		if (q->info.si_signo == sig) {
423			if (first)
424				goto still_pending;
425			first = q;
426		}
427	}
428
429	sigdelset(&list->signal, sig);
430
431	if (first) {
432still_pending:
433		list_del_init(&first->list);
434		copy_siginfo(info, &first->info);
435		__sigqueue_free(first);
436	} else {
437		/* Ok, it wasn't in the queue.  This must be
438		   a fast-pathed signal or we must have been
439		   out of queue space.  So zero out the info.
440		 */
441		info->si_signo = sig;
442		info->si_errno = 0;
443		info->si_code = SI_USER;
444		info->si_pid = 0;
445		info->si_uid = 0;
446	}
447}
448
449static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
450			siginfo_t *info)
451{
452	int sig = next_signal(pending, mask);
453
454	if (sig) {
455		if (current->notifier) {
456			if (sigismember(current->notifier_mask, sig)) {
457				if (!(current->notifier)(current->notifier_data)) {
458					clear_thread_flag(TIF_SIGPENDING);
459					return 0;
460				}
461			}
462		}
463
464		collect_signal(sig, pending, info);
465	}
466
467	return sig;
468}
469
470/*
471 * Dequeue a signal and return the element to the caller, which is
472 * expected to free it.
473 *
474 * All callers have to hold the siglock.
475 */
476int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
477{
478	int signr;
479
480	/* We only dequeue private signals from ourselves, we don't let
481	 * signalfd steal them
482	 */
483	signr = __dequeue_signal(&tsk->pending, mask, info);
484	if (!signr) {
485		signr = __dequeue_signal(&tsk->signal->shared_pending,
486					 mask, info);
487		/*
488		 * itimer signal ?
489		 *
490		 * itimers are process shared and we restart periodic
491		 * itimers in the signal delivery path to prevent DoS
492		 * attacks in the high resolution timer case. This is
493		 * compliant with the old way of self restarting
494		 * itimers, as the SIGALRM is a legacy signal and only
495		 * queued once. Changing the restart behaviour to
496		 * restart the timer in the signal dequeue path is
497		 * reducing the timer noise on heavy loaded !highres
498		 * systems too.
499		 */
500		if (unlikely(signr == SIGALRM)) {
501			struct hrtimer *tmr = &tsk->signal->real_timer;
502
503			if (!hrtimer_is_queued(tmr) &&
504			    tsk->signal->it_real_incr.tv64 != 0) {
505				hrtimer_forward(tmr, tmr->base->get_time(),
506						tsk->signal->it_real_incr);
507				hrtimer_restart(tmr);
508			}
509		}
510	}
511
512	recalc_sigpending();
513	if (!signr)
514		return 0;
515
516	if (unlikely(sig_kernel_stop(signr))) {
517		/*
518		 * Set a marker that we have dequeued a stop signal.  Our
519		 * caller might release the siglock and then the pending
520		 * stop signal it is about to process is no longer in the
521		 * pending bitmasks, but must still be cleared by a SIGCONT
522		 * (and overruled by a SIGKILL).  So those cases clear this
523		 * shared flag after we've set it.  Note that this flag may
524		 * remain set after the signal we return is ignored or
525		 * handled.  That doesn't matter because its only purpose
526		 * is to alert stop-signal processing code when another
527		 * processor has come along and cleared the flag.
528		 */
529		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
530	}
531	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
532		/*
533		 * Release the siglock to ensure proper locking order
534		 * of timer locks outside of siglocks.  Note, we leave
535		 * irqs disabled here, since the posix-timers code is
536		 * about to disable them again anyway.
537		 */
538		spin_unlock(&tsk->sighand->siglock);
539		do_schedule_next_timer(info);
540		spin_lock(&tsk->sighand->siglock);
541	}
542	return signr;
543}
544
545/*
546 * Tell a process that it has a new active signal..
547 *
548 * NOTE! we rely on the previous spin_lock to
549 * lock interrupts for us! We can only be called with
550 * "siglock" held, and the local interrupt must
551 * have been disabled when that got acquired!
552 *
553 * No need to set need_resched since signal event passing
554 * goes through ->blocked
555 */
556void signal_wake_up(struct task_struct *t, int resume)
557{
558	unsigned int mask;
559
560	set_tsk_thread_flag(t, TIF_SIGPENDING);
561
562	/*
563	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
564	 * case. We don't check t->state here because there is a race with it
565	 * executing another processor and just now entering stopped state.
566	 * By using wake_up_state, we ensure the process will wake up and
567	 * handle its death signal.
568	 */
569	mask = TASK_INTERRUPTIBLE;
570	if (resume)
571		mask |= TASK_WAKEKILL;
572	if (!wake_up_state(t, mask))
573		kick_process(t);
574}
575
576/*
577 * Remove signals in mask from the pending set and queue.
578 * Returns 1 if any signals were found.
579 *
580 * All callers must be holding the siglock.
581 *
582 * This version takes a sigset mask and looks at all signals,
583 * not just those in the first mask word.
584 */
585static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
586{
587	struct sigqueue *q, *n;
588	sigset_t m;
589
590	sigandsets(&m, mask, &s->signal);
591	if (sigisemptyset(&m))
592		return 0;
593
594	signandsets(&s->signal, &s->signal, mask);
595	list_for_each_entry_safe(q, n, &s->list, list) {
596		if (sigismember(mask, q->info.si_signo)) {
597			list_del_init(&q->list);
598			__sigqueue_free(q);
599		}
600	}
601	return 1;
602}
603/*
604 * Remove signals in mask from the pending set and queue.
605 * Returns 1 if any signals were found.
606 *
607 * All callers must be holding the siglock.
608 */
609static int rm_from_queue(unsigned long mask, struct sigpending *s)
610{
611	struct sigqueue *q, *n;
612
613	if (!sigtestsetmask(&s->signal, mask))
614		return 0;
615
616	sigdelsetmask(&s->signal, mask);
617	list_for_each_entry_safe(q, n, &s->list, list) {
618		if (q->info.si_signo < SIGRTMIN &&
619		    (mask & sigmask(q->info.si_signo))) {
620			list_del_init(&q->list);
621			__sigqueue_free(q);
622		}
623	}
624	return 1;
625}
626
627static inline int is_si_special(const struct siginfo *info)
628{
629	return info <= SEND_SIG_FORCED;
630}
631
632static inline bool si_fromuser(const struct siginfo *info)
633{
634	return info == SEND_SIG_NOINFO ||
635		(!is_si_special(info) && SI_FROMUSER(info));
636}
637
638/*
639 * Bad permissions for sending the signal
640 * - the caller must hold the RCU read lock
641 */
642static int check_kill_permission(int sig, struct siginfo *info,
643				 struct task_struct *t)
644{
645	const struct cred *cred, *tcred;
646	struct pid *sid;
647	int error;
648
649	if (!valid_signal(sig))
650		return -EINVAL;
651
652	if (!si_fromuser(info))
653		return 0;
654
655	error = audit_signal_info(sig, t); /* Let audit system see the signal */
656	if (error)
657		return error;
658
659	cred = current_cred();
660	tcred = __task_cred(t);
661	if (!same_thread_group(current, t) &&
662	    (cred->euid ^ tcred->suid) &&
663	    (cred->euid ^ tcred->uid) &&
664	    (cred->uid  ^ tcred->suid) &&
665	    (cred->uid  ^ tcred->uid) &&
666	    !capable(CAP_KILL)) {
667		switch (sig) {
668		case SIGCONT:
669			sid = task_session(t);
670			/*
671			 * We don't return the error if sid == NULL. The
672			 * task was unhashed, the caller must notice this.
673			 */
674			if (!sid || sid == task_session(current))
675				break;
676		default:
677			return -EPERM;
678		}
679	}
680
681	return security_task_kill(t, info, sig, 0);
682}
683
684/*
685 * Handle magic process-wide effects of stop/continue signals. Unlike
686 * the signal actions, these happen immediately at signal-generation
687 * time regardless of blocking, ignoring, or handling.  This does the
688 * actual continuing for SIGCONT, but not the actual stopping for stop
689 * signals. The process stop is done as a signal action for SIG_DFL.
690 *
691 * Returns true if the signal should be actually delivered, otherwise
692 * it should be dropped.
693 */
694static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
695{
696	struct signal_struct *signal = p->signal;
697	struct task_struct *t;
698
699	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
700		/*
701		 * The process is in the middle of dying, nothing to do.
702		 */
703	} else if (sig_kernel_stop(sig)) {
704		/*
705		 * This is a stop signal.  Remove SIGCONT from all queues.
706		 */
707		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
708		t = p;
709		do {
710			rm_from_queue(sigmask(SIGCONT), &t->pending);
711		} while_each_thread(p, t);
712	} else if (sig == SIGCONT) {
713		unsigned int why;
714		/*
715		 * Remove all stop signals from all queues,
716		 * and wake all threads.
717		 */
718		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
719		t = p;
720		do {
721			unsigned int state;
722			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
723			/*
724			 * If there is a handler for SIGCONT, we must make
725			 * sure that no thread returns to user mode before
726			 * we post the signal, in case it was the only
727			 * thread eligible to run the signal handler--then
728			 * it must not do anything between resuming and
729			 * running the handler.  With the TIF_SIGPENDING
730			 * flag set, the thread will pause and acquire the
731			 * siglock that we hold now and until we've queued
732			 * the pending signal.
733			 *
734			 * Wake up the stopped thread _after_ setting
735			 * TIF_SIGPENDING
736			 */
737			state = __TASK_STOPPED;
738			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
739				set_tsk_thread_flag(t, TIF_SIGPENDING);
740				state |= TASK_INTERRUPTIBLE;
741			}
742			wake_up_state(t, state);
743		} while_each_thread(p, t);
744
745		/*
746		 * Notify the parent with CLD_CONTINUED if we were stopped.
747		 *
748		 * If we were in the middle of a group stop, we pretend it
749		 * was already finished, and then continued. Since SIGCHLD
750		 * doesn't queue we report only CLD_STOPPED, as if the next
751		 * CLD_CONTINUED was dropped.
752		 */
753		why = 0;
754		if (signal->flags & SIGNAL_STOP_STOPPED)
755			why |= SIGNAL_CLD_CONTINUED;
756		else if (signal->group_stop_count)
757			why |= SIGNAL_CLD_STOPPED;
758
759		if (why) {
760			/*
761			 * The first thread which returns from do_signal_stop()
762			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
763			 * notify its parent. See get_signal_to_deliver().
764			 */
765			signal->flags = why | SIGNAL_STOP_CONTINUED;
766			signal->group_stop_count = 0;
767			signal->group_exit_code = 0;
768		} else {
769			/*
770			 * We are not stopped, but there could be a stop
771			 * signal in the middle of being processed after
772			 * being removed from the queue.  Clear that too.
773			 */
774			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
775		}
776	}
777
778	return !sig_ignored(p, sig, from_ancestor_ns);
779}
780
781/*
782 * Test if P wants to take SIG.  After we've checked all threads with this,
783 * it's equivalent to finding no threads not blocking SIG.  Any threads not
784 * blocking SIG were ruled out because they are not running and already
785 * have pending signals.  Such threads will dequeue from the shared queue
786 * as soon as they're available, so putting the signal on the shared queue
787 * will be equivalent to sending it to one such thread.
788 */
789static inline int wants_signal(int sig, struct task_struct *p)
790{
791	if (sigismember(&p->blocked, sig))
792		return 0;
793	if (p->flags & PF_EXITING)
794		return 0;
795	if (sig == SIGKILL)
796		return 1;
797	if (task_is_stopped_or_traced(p))
798		return 0;
799	return task_curr(p) || !signal_pending(p);
800}
801
802static void complete_signal(int sig, struct task_struct *p, int group)
803{
804	struct signal_struct *signal = p->signal;
805	struct task_struct *t;
806
807	/*
808	 * Now find a thread we can wake up to take the signal off the queue.
809	 *
810	 * If the main thread wants the signal, it gets first crack.
811	 * Probably the least surprising to the average bear.
812	 */
813	if (wants_signal(sig, p))
814		t = p;
815	else if (!group || thread_group_empty(p))
816		/*
817		 * There is just one thread and it does not need to be woken.
818		 * It will dequeue unblocked signals before it runs again.
819		 */
820		return;
821	else {
822		/*
823		 * Otherwise try to find a suitable thread.
824		 */
825		t = signal->curr_target;
826		while (!wants_signal(sig, t)) {
827			t = next_thread(t);
828			if (t == signal->curr_target)
829				/*
830				 * No thread needs to be woken.
831				 * Any eligible threads will see
832				 * the signal in the queue soon.
833				 */
834				return;
835		}
836		signal->curr_target = t;
837	}
838
839	/*
840	 * Found a killable thread.  If the signal will be fatal,
841	 * then start taking the whole group down immediately.
842	 */
843	if (sig_fatal(p, sig) &&
844	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
845	    !sigismember(&t->real_blocked, sig) &&
846	    (sig == SIGKILL ||
847	     !tracehook_consider_fatal_signal(t, sig))) {
848		/*
849		 * This signal will be fatal to the whole group.
850		 */
851		if (!sig_kernel_coredump(sig)) {
852			/*
853			 * Start a group exit and wake everybody up.
854			 * This way we don't have other threads
855			 * running and doing things after a slower
856			 * thread has the fatal signal pending.
857			 */
858			signal->flags = SIGNAL_GROUP_EXIT;
859			signal->group_exit_code = sig;
860			signal->group_stop_count = 0;
861			t = p;
862			do {
863				sigaddset(&t->pending.signal, SIGKILL);
864				signal_wake_up(t, 1);
865			} while_each_thread(p, t);
866			return;
867		}
868	}
869
870	/*
871	 * The signal is already in the shared-pending queue.
872	 * Tell the chosen thread to wake up and dequeue it.
873	 */
874	signal_wake_up(t, sig == SIGKILL);
875	return;
876}
877
878static inline int legacy_queue(struct sigpending *signals, int sig)
879{
880	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
881}
882
883static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
884			int group, int from_ancestor_ns)
885{
886	struct sigpending *pending;
887	struct sigqueue *q;
888	int override_rlimit;
889
890	trace_signal_generate(sig, info, t);
891
892	assert_spin_locked(&t->sighand->siglock);
893
894	if (!prepare_signal(sig, t, from_ancestor_ns))
895		return 0;
896
897	pending = group ? &t->signal->shared_pending : &t->pending;
898	/*
899	 * Short-circuit ignored signals and support queuing
900	 * exactly one non-rt signal, so that we can get more
901	 * detailed information about the cause of the signal.
902	 */
903	if (legacy_queue(pending, sig))
904		return 0;
905	/*
906	 * fast-pathed signals for kernel-internal things like SIGSTOP
907	 * or SIGKILL.
908	 */
909	if (info == SEND_SIG_FORCED)
910		goto out_set;
911
912	/* Real-time signals must be queued if sent by sigqueue, or
913	   some other real-time mechanism.  It is implementation
914	   defined whether kill() does so.  We attempt to do so, on
915	   the principle of least surprise, but since kill is not
916	   allowed to fail with EAGAIN when low on memory we just
917	   make sure at least one signal gets delivered and don't
918	   pass on the info struct.  */
919
920	if (sig < SIGRTMIN)
921		override_rlimit = (is_si_special(info) || info->si_code >= 0);
922	else
923		override_rlimit = 0;
924
925	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
926		override_rlimit);
927	if (q) {
928		list_add_tail(&q->list, &pending->list);
929		switch ((unsigned long) info) {
930		case (unsigned long) SEND_SIG_NOINFO:
931			q->info.si_signo = sig;
932			q->info.si_errno = 0;
933			q->info.si_code = SI_USER;
934			q->info.si_pid = task_tgid_nr_ns(current,
935							task_active_pid_ns(t));
936			q->info.si_uid = current_uid();
937			break;
938		case (unsigned long) SEND_SIG_PRIV:
939			q->info.si_signo = sig;
940			q->info.si_errno = 0;
941			q->info.si_code = SI_KERNEL;
942			q->info.si_pid = 0;
943			q->info.si_uid = 0;
944			break;
945		default:
946			copy_siginfo(&q->info, info);
947			if (from_ancestor_ns)
948				q->info.si_pid = 0;
949			break;
950		}
951	} else if (!is_si_special(info)) {
952		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
953			/*
954			 * Queue overflow, abort.  We may abort if the
955			 * signal was rt and sent by user using something
956			 * other than kill().
957			 */
958			trace_signal_overflow_fail(sig, group, info);
959			return -EAGAIN;
960		} else {
961			/*
962			 * This is a silent loss of information.  We still
963			 * send the signal, but the *info bits are lost.
964			 */
965			trace_signal_lose_info(sig, group, info);
966		}
967	}
968
969out_set:
970	signalfd_notify(t, sig);
971	sigaddset(&pending->signal, sig);
972	complete_signal(sig, t, group);
973	return 0;
974}
975
976static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
977			int group)
978{
979	int from_ancestor_ns = 0;
980
981#ifdef CONFIG_PID_NS
982	from_ancestor_ns = si_fromuser(info) &&
983			   !task_pid_nr_ns(current, task_active_pid_ns(t));
984#endif
985
986	return __send_signal(sig, info, t, group, from_ancestor_ns);
987}
988
989static void print_fatal_signal(struct pt_regs *regs, int signr)
990{
991	printk("%s/%d: potentially unexpected fatal signal %d.\n",
992		current->comm, task_pid_nr(current), signr);
993
994#if defined(__i386__) && !defined(__arch_um__)
995	printk("code at %08lx: ", regs->ip);
996	{
997		int i;
998		for (i = 0; i < 16; i++) {
999			unsigned char insn;
1000
1001			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1002				break;
1003			printk("%02x ", insn);
1004		}
1005	}
1006#endif
1007	printk("\n");
1008	preempt_disable();
1009	show_regs(regs);
1010	preempt_enable();
1011}
1012
1013static int __init setup_print_fatal_signals(char *str)
1014{
1015	get_option (&str, &print_fatal_signals);
1016
1017	return 1;
1018}
1019
1020__setup("print-fatal-signals=", setup_print_fatal_signals);
1021
1022int
1023__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1024{
1025	return send_signal(sig, info, p, 1);
1026}
1027
1028static int
1029specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1030{
1031	return send_signal(sig, info, t, 0);
1032}
1033
1034int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1035			bool group)
1036{
1037	unsigned long flags;
1038	int ret = -ESRCH;
1039
1040	if (lock_task_sighand(p, &flags)) {
1041		ret = send_signal(sig, info, p, group);
1042		unlock_task_sighand(p, &flags);
1043	}
1044
1045	return ret;
1046}
1047
1048/*
1049 * Force a signal that the process can't ignore: if necessary
1050 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1051 *
1052 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1053 * since we do not want to have a signal handler that was blocked
1054 * be invoked when user space had explicitly blocked it.
1055 *
1056 * We don't want to have recursive SIGSEGV's etc, for example,
1057 * that is why we also clear SIGNAL_UNKILLABLE.
1058 */
1059int
1060force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1061{
1062	unsigned long int flags;
1063	int ret, blocked, ignored;
1064	struct k_sigaction *action;
1065
1066	spin_lock_irqsave(&t->sighand->siglock, flags);
1067	action = &t->sighand->action[sig-1];
1068	ignored = action->sa.sa_handler == SIG_IGN;
1069	blocked = sigismember(&t->blocked, sig);
1070	if (blocked || ignored) {
1071		action->sa.sa_handler = SIG_DFL;
1072		if (blocked) {
1073			sigdelset(&t->blocked, sig);
1074			recalc_sigpending_and_wake(t);
1075		}
1076	}
1077	if (action->sa.sa_handler == SIG_DFL)
1078		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1079	ret = specific_send_sig_info(sig, info, t);
1080	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1081
1082	return ret;
1083}
1084
1085/*
1086 * Nuke all other threads in the group.
1087 */
1088int zap_other_threads(struct task_struct *p)
1089{
1090	struct task_struct *t = p;
1091	int count = 0;
1092
1093	p->signal->group_stop_count = 0;
1094
1095	while_each_thread(p, t) {
1096		count++;
1097
1098		/* Don't bother with already dead threads */
1099		if (t->exit_state)
1100			continue;
1101		sigaddset(&t->pending.signal, SIGKILL);
1102		signal_wake_up(t, 1);
1103	}
1104
1105	return count;
1106}
1107
1108struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1109{
1110	struct sighand_struct *sighand;
1111
1112	rcu_read_lock();
1113	for (;;) {
1114		sighand = rcu_dereference(tsk->sighand);
1115		if (unlikely(sighand == NULL))
1116			break;
1117
1118		spin_lock_irqsave(&sighand->siglock, *flags);
1119		if (likely(sighand == tsk->sighand))
1120			break;
1121		spin_unlock_irqrestore(&sighand->siglock, *flags);
1122	}
1123	rcu_read_unlock();
1124
1125	return sighand;
1126}
1127
1128/*
1129 * send signal info to all the members of a group
1130 */
1131int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1132{
1133	int ret;
1134
1135	rcu_read_lock();
1136	ret = check_kill_permission(sig, info, p);
1137	rcu_read_unlock();
1138
1139	if (!ret && sig)
1140		ret = do_send_sig_info(sig, info, p, true);
1141
1142	return ret;
1143}
1144
1145/*
1146 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1147 * control characters do (^C, ^Z etc)
1148 * - the caller must hold at least a readlock on tasklist_lock
1149 */
1150int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1151{
1152	struct task_struct *p = NULL;
1153	int retval, success;
1154
1155	success = 0;
1156	retval = -ESRCH;
1157	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1158		int err = group_send_sig_info(sig, info, p);
1159		success |= !err;
1160		retval = err;
1161	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1162	return success ? 0 : retval;
1163}
1164
1165int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1166{
1167	int error = -ESRCH;
1168	struct task_struct *p;
1169
1170	rcu_read_lock();
1171retry:
1172	p = pid_task(pid, PIDTYPE_PID);
1173	if (p) {
1174		error = group_send_sig_info(sig, info, p);
1175		if (unlikely(error == -ESRCH))
1176			/*
1177			 * The task was unhashed in between, try again.
1178			 * If it is dead, pid_task() will return NULL,
1179			 * if we race with de_thread() it will find the
1180			 * new leader.
1181			 */
1182			goto retry;
1183	}
1184	rcu_read_unlock();
1185
1186	return error;
1187}
1188
1189int
1190kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1191{
1192	int error;
1193	rcu_read_lock();
1194	error = kill_pid_info(sig, info, find_vpid(pid));
1195	rcu_read_unlock();
1196	return error;
1197}
1198
1199/*Foxconn modify start by Hank 08/10/2012 */
1200/*add a function for user space using*/
1201#define __si_special(priv) \
1202	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1203
1204int
1205kill_proc(pid_t pid, int sig, int priv)
1206{
1207	return kill_proc_info(sig, __si_special(priv), pid);
1208}
1209EXPORT_SYMBOL(kill_proc);
1210/*Foxconn modify end by Hank 08/10/2012 */
1211/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1212int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1213		      uid_t uid, uid_t euid, u32 secid)
1214{
1215	int ret = -EINVAL;
1216	struct task_struct *p;
1217	const struct cred *pcred;
1218	unsigned long flags;
1219
1220	if (!valid_signal(sig))
1221		return ret;
1222
1223	rcu_read_lock();
1224	p = pid_task(pid, PIDTYPE_PID);
1225	if (!p) {
1226		ret = -ESRCH;
1227		goto out_unlock;
1228	}
1229	pcred = __task_cred(p);
1230	if (si_fromuser(info) &&
1231	    euid != pcred->suid && euid != pcred->uid &&
1232	    uid  != pcred->suid && uid  != pcred->uid) {
1233		ret = -EPERM;
1234		goto out_unlock;
1235	}
1236	ret = security_task_kill(p, info, sig, secid);
1237	if (ret)
1238		goto out_unlock;
1239
1240	if (sig) {
1241		if (lock_task_sighand(p, &flags)) {
1242			ret = __send_signal(sig, info, p, 1, 0);
1243			unlock_task_sighand(p, &flags);
1244		} else
1245			ret = -ESRCH;
1246	}
1247out_unlock:
1248	rcu_read_unlock();
1249	return ret;
1250}
1251EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1252
1253/*
1254 * kill_something_info() interprets pid in interesting ways just like kill(2).
1255 *
1256 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1257 * is probably wrong.  Should make it like BSD or SYSV.
1258 */
1259
1260static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1261{
1262	int ret;
1263
1264	if (pid > 0) {
1265		rcu_read_lock();
1266		ret = kill_pid_info(sig, info, find_vpid(pid));
1267		rcu_read_unlock();
1268		return ret;
1269	}
1270
1271	read_lock(&tasklist_lock);
1272	if (pid != -1) {
1273		ret = __kill_pgrp_info(sig, info,
1274				pid ? find_vpid(-pid) : task_pgrp(current));
1275	} else {
1276		int retval = 0, count = 0;
1277		struct task_struct * p;
1278
1279		for_each_process(p) {
1280			if (task_pid_vnr(p) > 1 &&
1281					!same_thread_group(p, current)) {
1282				int err = group_send_sig_info(sig, info, p);
1283				++count;
1284				if (err != -EPERM)
1285					retval = err;
1286			}
1287		}
1288		ret = count ? retval : -ESRCH;
1289	}
1290	read_unlock(&tasklist_lock);
1291
1292	return ret;
1293}
1294
1295/*
1296 * These are for backward compatibility with the rest of the kernel source.
1297 */
1298
1299int
1300send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1301{
1302	/*
1303	 * Make sure legacy kernel users don't send in bad values
1304	 * (normal paths check this in check_kill_permission).
1305	 */
1306	if (!valid_signal(sig))
1307		return -EINVAL;
1308
1309	return do_send_sig_info(sig, info, p, false);
1310}
1311
1312int
1313send_sig(int sig, struct task_struct *p, int priv)
1314{
1315	return send_sig_info(sig, __si_special(priv), p);
1316}
1317
1318void
1319force_sig(int sig, struct task_struct *p)
1320{
1321	force_sig_info(sig, SEND_SIG_PRIV, p);
1322}
1323
1324/*
1325 * When things go south during signal handling, we
1326 * will force a SIGSEGV. And if the signal that caused
1327 * the problem was already a SIGSEGV, we'll want to
1328 * make sure we don't even try to deliver the signal..
1329 */
1330int
1331force_sigsegv(int sig, struct task_struct *p)
1332{
1333	if (sig == SIGSEGV) {
1334		unsigned long flags;
1335		spin_lock_irqsave(&p->sighand->siglock, flags);
1336		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1337		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1338	}
1339	force_sig(SIGSEGV, p);
1340	return 0;
1341}
1342
1343int kill_pgrp(struct pid *pid, int sig, int priv)
1344{
1345	int ret;
1346
1347	read_lock(&tasklist_lock);
1348	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1349	read_unlock(&tasklist_lock);
1350
1351	return ret;
1352}
1353EXPORT_SYMBOL(kill_pgrp);
1354
1355int kill_pid(struct pid *pid, int sig, int priv)
1356{
1357	return kill_pid_info(sig, __si_special(priv), pid);
1358}
1359EXPORT_SYMBOL(kill_pid);
1360
1361/*
1362 * These functions support sending signals using preallocated sigqueue
1363 * structures.  This is needed "because realtime applications cannot
1364 * afford to lose notifications of asynchronous events, like timer
1365 * expirations or I/O completions".  In the case of Posix Timers
1366 * we allocate the sigqueue structure from the timer_create.  If this
1367 * allocation fails we are able to report the failure to the application
1368 * with an EAGAIN error.
1369 */
1370struct sigqueue *sigqueue_alloc(void)
1371{
1372	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1373
1374	if (q)
1375		q->flags |= SIGQUEUE_PREALLOC;
1376
1377	return q;
1378}
1379
1380void sigqueue_free(struct sigqueue *q)
1381{
1382	unsigned long flags;
1383	spinlock_t *lock = &current->sighand->siglock;
1384
1385	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1386	/*
1387	 * We must hold ->siglock while testing q->list
1388	 * to serialize with collect_signal() or with
1389	 * __exit_signal()->flush_sigqueue().
1390	 */
1391	spin_lock_irqsave(lock, flags);
1392	q->flags &= ~SIGQUEUE_PREALLOC;
1393	/*
1394	 * If it is queued it will be freed when dequeued,
1395	 * like the "regular" sigqueue.
1396	 */
1397	if (!list_empty(&q->list))
1398		q = NULL;
1399	spin_unlock_irqrestore(lock, flags);
1400
1401	if (q)
1402		__sigqueue_free(q);
1403}
1404
1405int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1406{
1407	int sig = q->info.si_signo;
1408	struct sigpending *pending;
1409	unsigned long flags;
1410	int ret;
1411
1412	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1413
1414	ret = -1;
1415	if (!likely(lock_task_sighand(t, &flags)))
1416		goto ret;
1417
1418	ret = 1; /* the signal is ignored */
1419	if (!prepare_signal(sig, t, 0))
1420		goto out;
1421
1422	ret = 0;
1423	if (unlikely(!list_empty(&q->list))) {
1424		/*
1425		 * If an SI_TIMER entry is already queue just increment
1426		 * the overrun count.
1427		 */
1428		BUG_ON(q->info.si_code != SI_TIMER);
1429		q->info.si_overrun++;
1430		goto out;
1431	}
1432	q->info.si_overrun = 0;
1433
1434	signalfd_notify(t, sig);
1435	pending = group ? &t->signal->shared_pending : &t->pending;
1436	list_add_tail(&q->list, &pending->list);
1437	sigaddset(&pending->signal, sig);
1438	complete_signal(sig, t, group);
1439out:
1440	unlock_task_sighand(t, &flags);
1441ret:
1442	return ret;
1443}
1444
1445/*
1446 * Let a parent know about the death of a child.
1447 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1448 *
1449 * Returns -1 if our parent ignored us and so we've switched to
1450 * self-reaping, or else @sig.
1451 */
1452int do_notify_parent(struct task_struct *tsk, int sig)
1453{
1454	struct siginfo info;
1455	unsigned long flags;
1456	struct sighand_struct *psig;
1457	int ret = sig;
1458
1459	BUG_ON(sig == -1);
1460
1461 	/* do_notify_parent_cldstop should have been called instead.  */
1462 	BUG_ON(task_is_stopped_or_traced(tsk));
1463
1464	BUG_ON(!task_ptrace(tsk) &&
1465	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1466
1467	info.si_signo = sig;
1468	info.si_errno = 0;
1469	/*
1470	 * we are under tasklist_lock here so our parent is tied to
1471	 * us and cannot exit and release its namespace.
1472	 *
1473	 * the only it can is to switch its nsproxy with sys_unshare,
1474	 * bu uncharing pid namespaces is not allowed, so we'll always
1475	 * see relevant namespace
1476	 *
1477	 * write_lock() currently calls preempt_disable() which is the
1478	 * same as rcu_read_lock(), but according to Oleg, this is not
1479	 * correct to rely on this
1480	 */
1481	rcu_read_lock();
1482	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1483	info.si_uid = __task_cred(tsk)->uid;
1484	rcu_read_unlock();
1485
1486	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1487				tsk->signal->utime));
1488	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1489				tsk->signal->stime));
1490
1491	info.si_status = tsk->exit_code & 0x7f;
1492	if (tsk->exit_code & 0x80)
1493		info.si_code = CLD_DUMPED;
1494	else if (tsk->exit_code & 0x7f)
1495		info.si_code = CLD_KILLED;
1496	else {
1497		info.si_code = CLD_EXITED;
1498		info.si_status = tsk->exit_code >> 8;
1499	}
1500
1501	psig = tsk->parent->sighand;
1502	spin_lock_irqsave(&psig->siglock, flags);
1503	if (!task_ptrace(tsk) && sig == SIGCHLD &&
1504	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1505	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1506		/*
1507		 * We are exiting and our parent doesn't care.  POSIX.1
1508		 * defines special semantics for setting SIGCHLD to SIG_IGN
1509		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1510		 * automatically and not left for our parent's wait4 call.
1511		 * Rather than having the parent do it as a magic kind of
1512		 * signal handler, we just set this to tell do_exit that we
1513		 * can be cleaned up without becoming a zombie.  Note that
1514		 * we still call __wake_up_parent in this case, because a
1515		 * blocked sys_wait4 might now return -ECHILD.
1516		 *
1517		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1518		 * is implementation-defined: we do (if you don't want
1519		 * it, just use SIG_IGN instead).
1520		 */
1521		ret = tsk->exit_signal = -1;
1522		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1523			sig = -1;
1524	}
1525	if (valid_signal(sig) && sig > 0)
1526		__group_send_sig_info(sig, &info, tsk->parent);
1527	__wake_up_parent(tsk, tsk->parent);
1528	spin_unlock_irqrestore(&psig->siglock, flags);
1529
1530	return ret;
1531}
1532
1533static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1534{
1535	struct siginfo info;
1536	unsigned long flags;
1537	struct task_struct *parent;
1538	struct sighand_struct *sighand;
1539
1540	if (task_ptrace(tsk))
1541		parent = tsk->parent;
1542	else {
1543		tsk = tsk->group_leader;
1544		parent = tsk->real_parent;
1545	}
1546
1547	info.si_signo = SIGCHLD;
1548	info.si_errno = 0;
1549	/*
1550	 * see comment in do_notify_parent() abot the following 3 lines
1551	 */
1552	rcu_read_lock();
1553	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1554	info.si_uid = __task_cred(tsk)->uid;
1555	rcu_read_unlock();
1556
1557	info.si_utime = cputime_to_clock_t(tsk->utime);
1558	info.si_stime = cputime_to_clock_t(tsk->stime);
1559
1560 	info.si_code = why;
1561 	switch (why) {
1562 	case CLD_CONTINUED:
1563 		info.si_status = SIGCONT;
1564 		break;
1565 	case CLD_STOPPED:
1566 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1567 		break;
1568 	case CLD_TRAPPED:
1569 		info.si_status = tsk->exit_code & 0x7f;
1570 		break;
1571 	default:
1572 		BUG();
1573 	}
1574
1575	sighand = parent->sighand;
1576	spin_lock_irqsave(&sighand->siglock, flags);
1577	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1578	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1579		__group_send_sig_info(SIGCHLD, &info, parent);
1580	/*
1581	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1582	 */
1583	__wake_up_parent(tsk, parent);
1584	spin_unlock_irqrestore(&sighand->siglock, flags);
1585}
1586
1587static inline int may_ptrace_stop(void)
1588{
1589	if (!likely(task_ptrace(current)))
1590		return 0;
1591	/*
1592	 * Are we in the middle of do_coredump?
1593	 * If so and our tracer is also part of the coredump stopping
1594	 * is a deadlock situation, and pointless because our tracer
1595	 * is dead so don't allow us to stop.
1596	 * If SIGKILL was already sent before the caller unlocked
1597	 * ->siglock we must see ->core_state != NULL. Otherwise it
1598	 * is safe to enter schedule().
1599	 */
1600	if (unlikely(current->mm->core_state) &&
1601	    unlikely(current->mm == current->parent->mm))
1602		return 0;
1603
1604	return 1;
1605}
1606
1607/*
1608 * Return nonzero if there is a SIGKILL that should be waking us up.
1609 * Called with the siglock held.
1610 */
1611static int sigkill_pending(struct task_struct *tsk)
1612{
1613	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1614		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1615}
1616
1617/*
1618 * This must be called with current->sighand->siglock held.
1619 *
1620 * This should be the path for all ptrace stops.
1621 * We always set current->last_siginfo while stopped here.
1622 * That makes it a way to test a stopped process for
1623 * being ptrace-stopped vs being job-control-stopped.
1624 *
1625 * If we actually decide not to stop at all because the tracer
1626 * is gone, we keep current->exit_code unless clear_code.
1627 */
1628static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1629{
1630	if (arch_ptrace_stop_needed(exit_code, info)) {
1631		/*
1632		 * The arch code has something special to do before a
1633		 * ptrace stop.  This is allowed to block, e.g. for faults
1634		 * on user stack pages.  We can't keep the siglock while
1635		 * calling arch_ptrace_stop, so we must release it now.
1636		 * To preserve proper semantics, we must do this before
1637		 * any signal bookkeeping like checking group_stop_count.
1638		 * Meanwhile, a SIGKILL could come in before we retake the
1639		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1640		 * So after regaining the lock, we must check for SIGKILL.
1641		 */
1642		spin_unlock_irq(&current->sighand->siglock);
1643		arch_ptrace_stop(exit_code, info);
1644		spin_lock_irq(&current->sighand->siglock);
1645		if (sigkill_pending(current))
1646			return;
1647	}
1648
1649	/*
1650	 * If there is a group stop in progress,
1651	 * we must participate in the bookkeeping.
1652	 */
1653	if (current->signal->group_stop_count > 0)
1654		--current->signal->group_stop_count;
1655
1656	current->last_siginfo = info;
1657	current->exit_code = exit_code;
1658
1659	/* Let the debugger run.  */
1660	__set_current_state(TASK_TRACED);
1661	spin_unlock_irq(&current->sighand->siglock);
1662	read_lock(&tasklist_lock);
1663	if (may_ptrace_stop()) {
1664		do_notify_parent_cldstop(current, CLD_TRAPPED);
1665		preempt_disable();
1666		read_unlock(&tasklist_lock);
1667		preempt_enable_no_resched();
1668		schedule();
1669	} else {
1670		/*
1671		 * By the time we got the lock, our tracer went away.
1672		 * Don't drop the lock yet, another tracer may come.
1673		 */
1674		__set_current_state(TASK_RUNNING);
1675		if (clear_code)
1676			current->exit_code = 0;
1677		read_unlock(&tasklist_lock);
1678	}
1679
1680	/*
1681	 * While in TASK_TRACED, we were considered "frozen enough".
1682	 * Now that we woke up, it's crucial if we're supposed to be
1683	 * frozen that we freeze now before running anything substantial.
1684	 */
1685	try_to_freeze();
1686
1687	/*
1688	 * We are back.  Now reacquire the siglock before touching
1689	 * last_siginfo, so that we are sure to have synchronized with
1690	 * any signal-sending on another CPU that wants to examine it.
1691	 */
1692	spin_lock_irq(&current->sighand->siglock);
1693	current->last_siginfo = NULL;
1694
1695	/*
1696	 * Queued signals ignored us while we were stopped for tracing.
1697	 * So check for any that we should take before resuming user mode.
1698	 * This sets TIF_SIGPENDING, but never clears it.
1699	 */
1700	recalc_sigpending_tsk(current);
1701}
1702
1703void ptrace_notify(int exit_code)
1704{
1705	siginfo_t info;
1706
1707	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1708
1709	memset(&info, 0, sizeof info);
1710	info.si_signo = SIGTRAP;
1711	info.si_code = exit_code;
1712	info.si_pid = task_pid_vnr(current);
1713	info.si_uid = current_uid();
1714
1715	/* Let the debugger run.  */
1716	spin_lock_irq(&current->sighand->siglock);
1717	ptrace_stop(exit_code, 1, &info);
1718	spin_unlock_irq(&current->sighand->siglock);
1719}
1720
1721/*
1722 * This performs the stopping for SIGSTOP and other stop signals.
1723 * We have to stop all threads in the thread group.
1724 * Returns nonzero if we've actually stopped and released the siglock.
1725 * Returns zero if we didn't stop and still hold the siglock.
1726 */
1727static int do_signal_stop(int signr)
1728{
1729	struct signal_struct *sig = current->signal;
1730	int notify;
1731
1732	if (!sig->group_stop_count) {
1733		struct task_struct *t;
1734
1735		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1736		    unlikely(signal_group_exit(sig)))
1737			return 0;
1738		/*
1739		 * There is no group stop already in progress.
1740		 * We must initiate one now.
1741		 */
1742		sig->group_exit_code = signr;
1743
1744		sig->group_stop_count = 1;
1745		for (t = next_thread(current); t != current; t = next_thread(t))
1746			/*
1747			 * Setting state to TASK_STOPPED for a group
1748			 * stop is always done with the siglock held,
1749			 * so this check has no races.
1750			 */
1751			if (!(t->flags & PF_EXITING) &&
1752			    !task_is_stopped_or_traced(t)) {
1753				sig->group_stop_count++;
1754				signal_wake_up(t, 0);
1755			}
1756	}
1757	/*
1758	 * If there are no other threads in the group, or if there is
1759	 * a group stop in progress and we are the last to stop, report
1760	 * to the parent.  When ptraced, every thread reports itself.
1761	 */
1762	notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1763	notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1764	/*
1765	 * tracehook_notify_jctl() can drop and reacquire siglock, so
1766	 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1767	 * or SIGKILL comes in between ->group_stop_count == 0.
1768	 */
1769	if (sig->group_stop_count) {
1770		if (!--sig->group_stop_count)
1771			sig->flags = SIGNAL_STOP_STOPPED;
1772		current->exit_code = sig->group_exit_code;
1773		__set_current_state(TASK_STOPPED);
1774	}
1775	spin_unlock_irq(&current->sighand->siglock);
1776
1777	if (notify) {
1778		read_lock(&tasklist_lock);
1779		do_notify_parent_cldstop(current, notify);
1780		read_unlock(&tasklist_lock);
1781	}
1782
1783	/* Now we don't run again until woken by SIGCONT or SIGKILL */
1784	do {
1785		schedule();
1786	} while (try_to_freeze());
1787
1788	tracehook_finish_jctl();
1789	current->exit_code = 0;
1790
1791	return 1;
1792}
1793
1794static int ptrace_signal(int signr, siginfo_t *info,
1795			 struct pt_regs *regs, void *cookie)
1796{
1797	if (!task_ptrace(current))
1798		return signr;
1799
1800	ptrace_signal_deliver(regs, cookie);
1801
1802	/* Let the debugger run.  */
1803	ptrace_stop(signr, 0, info);
1804
1805	/* We're back.  Did the debugger cancel the sig?  */
1806	signr = current->exit_code;
1807	if (signr == 0)
1808		return signr;
1809
1810	current->exit_code = 0;
1811
1812	/* Update the siginfo structure if the signal has
1813	   changed.  If the debugger wanted something
1814	   specific in the siginfo structure then it should
1815	   have updated *info via PTRACE_SETSIGINFO.  */
1816	if (signr != info->si_signo) {
1817		info->si_signo = signr;
1818		info->si_errno = 0;
1819		info->si_code = SI_USER;
1820		info->si_pid = task_pid_vnr(current->parent);
1821		info->si_uid = task_uid(current->parent);
1822	}
1823
1824	/* If the (new) signal is now blocked, requeue it.  */
1825	if (sigismember(&current->blocked, signr)) {
1826		specific_send_sig_info(signr, info, current);
1827		signr = 0;
1828	}
1829
1830	return signr;
1831}
1832
1833int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1834			  struct pt_regs *regs, void *cookie)
1835{
1836	struct sighand_struct *sighand = current->sighand;
1837	struct signal_struct *signal = current->signal;
1838	int signr;
1839
1840relock:
1841	/*
1842	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1843	 * While in TASK_STOPPED, we were considered "frozen enough".
1844	 * Now that we woke up, it's crucial if we're supposed to be
1845	 * frozen that we freeze now before running anything substantial.
1846	 */
1847	try_to_freeze();
1848
1849	spin_lock_irq(&sighand->siglock);
1850	/*
1851	 * Every stopped thread goes here after wakeup. Check to see if
1852	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1853	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1854	 */
1855	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1856		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1857				? CLD_CONTINUED : CLD_STOPPED;
1858		signal->flags &= ~SIGNAL_CLD_MASK;
1859
1860		why = tracehook_notify_jctl(why, CLD_CONTINUED);
1861		spin_unlock_irq(&sighand->siglock);
1862
1863		if (why) {
1864			read_lock(&tasklist_lock);
1865			do_notify_parent_cldstop(current->group_leader, why);
1866			read_unlock(&tasklist_lock);
1867		}
1868		goto relock;
1869	}
1870
1871	for (;;) {
1872		struct k_sigaction *ka;
1873		/*
1874		 * Tracing can induce an artifical signal and choose sigaction.
1875		 * The return value in @signr determines the default action,
1876		 * but @info->si_signo is the signal number we will report.
1877		 */
1878		signr = tracehook_get_signal(current, regs, info, return_ka);
1879		if (unlikely(signr < 0))
1880			goto relock;
1881		if (unlikely(signr != 0))
1882			ka = return_ka;
1883		else {
1884			if (unlikely(signal->group_stop_count > 0) &&
1885			    do_signal_stop(0))
1886				goto relock;
1887
1888			signr = dequeue_signal(current, &current->blocked,
1889					       info);
1890
1891			if (!signr)
1892				break; /* will return 0 */
1893
1894			if (signr != SIGKILL) {
1895				signr = ptrace_signal(signr, info,
1896						      regs, cookie);
1897				if (!signr)
1898					continue;
1899			}
1900
1901			ka = &sighand->action[signr-1];
1902		}
1903
1904		/* Trace actually delivered signals. */
1905		trace_signal_deliver(signr, info, ka);
1906
1907		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1908			continue;
1909		if (ka->sa.sa_handler != SIG_DFL) {
1910			/* Run the handler.  */
1911			*return_ka = *ka;
1912
1913			if (ka->sa.sa_flags & SA_ONESHOT)
1914				ka->sa.sa_handler = SIG_DFL;
1915
1916			break; /* will return non-zero "signr" value */
1917		}
1918
1919		/*
1920		 * Now we are doing the default action for this signal.
1921		 */
1922		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1923			continue;
1924
1925		/*
1926		 * Global init gets no signals it doesn't want.
1927		 * Container-init gets no signals it doesn't want from same
1928		 * container.
1929		 *
1930		 * Note that if global/container-init sees a sig_kernel_only()
1931		 * signal here, the signal must have been generated internally
1932		 * or must have come from an ancestor namespace. In either
1933		 * case, the signal cannot be dropped.
1934		 */
1935		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1936				!sig_kernel_only(signr))
1937			continue;
1938
1939		if (sig_kernel_stop(signr)) {
1940			/*
1941			 * The default action is to stop all threads in
1942			 * the thread group.  The job control signals
1943			 * do nothing in an orphaned pgrp, but SIGSTOP
1944			 * always works.  Note that siglock needs to be
1945			 * dropped during the call to is_orphaned_pgrp()
1946			 * because of lock ordering with tasklist_lock.
1947			 * This allows an intervening SIGCONT to be posted.
1948			 * We need to check for that and bail out if necessary.
1949			 */
1950			if (signr != SIGSTOP) {
1951				spin_unlock_irq(&sighand->siglock);
1952
1953				/* signals can be posted during this window */
1954
1955				if (is_current_pgrp_orphaned())
1956					goto relock;
1957
1958				spin_lock_irq(&sighand->siglock);
1959			}
1960
1961			if (likely(do_signal_stop(info->si_signo))) {
1962				/* It released the siglock.  */
1963				goto relock;
1964			}
1965
1966			/*
1967			 * We didn't actually stop, due to a race
1968			 * with SIGCONT or something like that.
1969			 */
1970			continue;
1971		}
1972
1973		spin_unlock_irq(&sighand->siglock);
1974
1975		/*
1976		 * Anything else is fatal, maybe with a core dump.
1977		 */
1978		current->flags |= PF_SIGNALED;
1979
1980		if (sig_kernel_coredump(signr)) {
1981			if (print_fatal_signals)
1982				print_fatal_signal(regs, info->si_signo);
1983			/*
1984			 * If it was able to dump core, this kills all
1985			 * other threads in the group and synchronizes with
1986			 * their demise.  If we lost the race with another
1987			 * thread getting here, it set group_exit_code
1988			 * first and our do_group_exit call below will use
1989			 * that value and ignore the one we pass it.
1990			 */
1991			do_coredump(info->si_signo, info->si_signo, regs);
1992		}
1993
1994		/*
1995		 * Death signals, no core dump.
1996		 */
1997		do_group_exit(info->si_signo);
1998		/* NOTREACHED */
1999	}
2000	spin_unlock_irq(&sighand->siglock);
2001	return signr;
2002}
2003
2004void exit_signals(struct task_struct *tsk)
2005{
2006	int group_stop = 0;
2007	struct task_struct *t;
2008
2009	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2010		tsk->flags |= PF_EXITING;
2011		return;
2012	}
2013
2014	spin_lock_irq(&tsk->sighand->siglock);
2015	/*
2016	 * From now this task is not visible for group-wide signals,
2017	 * see wants_signal(), do_signal_stop().
2018	 */
2019	tsk->flags |= PF_EXITING;
2020	if (!signal_pending(tsk))
2021		goto out;
2022
2023	/* It could be that __group_complete_signal() choose us to
2024	 * notify about group-wide signal. Another thread should be
2025	 * woken now to take the signal since we will not.
2026	 */
2027	for (t = tsk; (t = next_thread(t)) != tsk; )
2028		if (!signal_pending(t) && !(t->flags & PF_EXITING))
2029			recalc_sigpending_and_wake(t);
2030
2031	if (unlikely(tsk->signal->group_stop_count) &&
2032			!--tsk->signal->group_stop_count) {
2033		tsk->signal->flags = SIGNAL_STOP_STOPPED;
2034		group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2035	}
2036out:
2037	spin_unlock_irq(&tsk->sighand->siglock);
2038
2039	if (unlikely(group_stop)) {
2040		read_lock(&tasklist_lock);
2041		do_notify_parent_cldstop(tsk, group_stop);
2042		read_unlock(&tasklist_lock);
2043	}
2044}
2045
2046EXPORT_SYMBOL(recalc_sigpending);
2047EXPORT_SYMBOL_GPL(dequeue_signal);
2048EXPORT_SYMBOL(flush_signals);
2049EXPORT_SYMBOL(force_sig);
2050EXPORT_SYMBOL(send_sig);
2051EXPORT_SYMBOL(send_sig_info);
2052EXPORT_SYMBOL(sigprocmask);
2053EXPORT_SYMBOL(block_all_signals);
2054EXPORT_SYMBOL(unblock_all_signals);
2055
2056
2057/*
2058 * System call entry points.
2059 */
2060
2061SYSCALL_DEFINE0(restart_syscall)
2062{
2063	struct restart_block *restart = &current_thread_info()->restart_block;
2064	return restart->fn(restart);
2065}
2066
2067long do_no_restart_syscall(struct restart_block *param)
2068{
2069	return -EINTR;
2070}
2071
2072/*
2073 * We don't need to get the kernel lock - this is all local to this
2074 * particular thread.. (and that's good, because this is _heavily_
2075 * used by various programs)
2076 */
2077
2078/*
2079 * This is also useful for kernel threads that want to temporarily
2080 * (or permanently) block certain signals.
2081 *
2082 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2083 * interface happily blocks "unblockable" signals like SIGKILL
2084 * and friends.
2085 */
2086int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2087{
2088	int error;
2089
2090	spin_lock_irq(&current->sighand->siglock);
2091	if (oldset)
2092		*oldset = current->blocked;
2093
2094	error = 0;
2095	switch (how) {
2096	case SIG_BLOCK:
2097		sigorsets(&current->blocked, &current->blocked, set);
2098		break;
2099	case SIG_UNBLOCK:
2100		signandsets(&current->blocked, &current->blocked, set);
2101		break;
2102	case SIG_SETMASK:
2103		current->blocked = *set;
2104		break;
2105	default:
2106		error = -EINVAL;
2107	}
2108	recalc_sigpending();
2109	spin_unlock_irq(&current->sighand->siglock);
2110
2111	return error;
2112}
2113
2114SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2115		sigset_t __user *, oset, size_t, sigsetsize)
2116{
2117	int error = -EINVAL;
2118	sigset_t old_set, new_set;
2119
2120	if (sigsetsize != sizeof(sigset_t))
2121		goto out;
2122
2123	if (set) {
2124		error = -EFAULT;
2125		if (copy_from_user(&new_set, set, sizeof(*set)))
2126			goto out;
2127		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2128
2129		error = sigprocmask(how, &new_set, &old_set);
2130		if (error)
2131			goto out;
2132		if (oset)
2133			goto set_old;
2134	} else if (oset) {
2135		spin_lock_irq(&current->sighand->siglock);
2136		old_set = current->blocked;
2137		spin_unlock_irq(&current->sighand->siglock);
2138
2139	set_old:
2140		error = -EFAULT;
2141		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2142			goto out;
2143	}
2144	error = 0;
2145out:
2146	return error;
2147}
2148
2149long do_sigpending(void __user *set, unsigned long sigsetsize)
2150{
2151	long error = -EINVAL;
2152	sigset_t pending;
2153
2154	if (sigsetsize > sizeof(sigset_t))
2155		goto out;
2156
2157	spin_lock_irq(&current->sighand->siglock);
2158	sigorsets(&pending, &current->pending.signal,
2159		  &current->signal->shared_pending.signal);
2160	spin_unlock_irq(&current->sighand->siglock);
2161
2162	/* Outside the lock because only this thread touches it.  */
2163	sigandsets(&pending, &current->blocked, &pending);
2164
2165	error = -EFAULT;
2166	if (!copy_to_user(set, &pending, sigsetsize))
2167		error = 0;
2168
2169out:
2170	return error;
2171}
2172
2173SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2174{
2175	return do_sigpending(set, sigsetsize);
2176}
2177
2178#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2179
2180int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2181{
2182	int err;
2183
2184	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2185		return -EFAULT;
2186	if (from->si_code < 0)
2187		return __copy_to_user(to, from, sizeof(siginfo_t))
2188			? -EFAULT : 0;
2189	/*
2190	 * If you change siginfo_t structure, please be sure
2191	 * this code is fixed accordingly.
2192	 * Please remember to update the signalfd_copyinfo() function
2193	 * inside fs/signalfd.c too, in case siginfo_t changes.
2194	 * It should never copy any pad contained in the structure
2195	 * to avoid security leaks, but must copy the generic
2196	 * 3 ints plus the relevant union member.
2197	 */
2198	err = __put_user(from->si_signo, &to->si_signo);
2199	err |= __put_user(from->si_errno, &to->si_errno);
2200	err |= __put_user((short)from->si_code, &to->si_code);
2201	switch (from->si_code & __SI_MASK) {
2202	case __SI_KILL:
2203		err |= __put_user(from->si_pid, &to->si_pid);
2204		err |= __put_user(from->si_uid, &to->si_uid);
2205		break;
2206	case __SI_TIMER:
2207		 err |= __put_user(from->si_tid, &to->si_tid);
2208		 err |= __put_user(from->si_overrun, &to->si_overrun);
2209		 err |= __put_user(from->si_ptr, &to->si_ptr);
2210		break;
2211	case __SI_POLL:
2212		err |= __put_user(from->si_band, &to->si_band);
2213		err |= __put_user(from->si_fd, &to->si_fd);
2214		break;
2215	case __SI_FAULT:
2216		err |= __put_user(from->si_addr, &to->si_addr);
2217#ifdef __ARCH_SI_TRAPNO
2218		err |= __put_user(from->si_trapno, &to->si_trapno);
2219#endif
2220#ifdef BUS_MCEERR_AO
2221		/*
2222		 * Other callers might not initialize the si_lsb field,
2223	 	 * so check explicitely for the right codes here.
2224		 */
2225		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2226			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2227#endif
2228		break;
2229	case __SI_CHLD:
2230		err |= __put_user(from->si_pid, &to->si_pid);
2231		err |= __put_user(from->si_uid, &to->si_uid);
2232		err |= __put_user(from->si_status, &to->si_status);
2233		err |= __put_user(from->si_utime, &to->si_utime);
2234		err |= __put_user(from->si_stime, &to->si_stime);
2235		break;
2236	case __SI_RT: /* This is not generated by the kernel as of now. */
2237	case __SI_MESGQ: /* But this is */
2238		err |= __put_user(from->si_pid, &to->si_pid);
2239		err |= __put_user(from->si_uid, &to->si_uid);
2240		err |= __put_user(from->si_ptr, &to->si_ptr);
2241		break;
2242	default: /* this is just in case for now ... */
2243		err |= __put_user(from->si_pid, &to->si_pid);
2244		err |= __put_user(from->si_uid, &to->si_uid);
2245		break;
2246	}
2247	return err;
2248}
2249
2250#endif
2251
2252SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2253		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2254		size_t, sigsetsize)
2255{
2256	int ret, sig;
2257	sigset_t these;
2258	struct timespec ts;
2259	siginfo_t info;
2260	long timeout = 0;
2261
2262	if (sigsetsize != sizeof(sigset_t))
2263		return -EINVAL;
2264
2265	if (copy_from_user(&these, uthese, sizeof(these)))
2266		return -EFAULT;
2267
2268	/*
2269	 * Invert the set of allowed signals to get those we
2270	 * want to block.
2271	 */
2272	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2273	signotset(&these);
2274
2275	if (uts) {
2276		if (copy_from_user(&ts, uts, sizeof(ts)))
2277			return -EFAULT;
2278		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2279		    || ts.tv_sec < 0)
2280			return -EINVAL;
2281	}
2282
2283	spin_lock_irq(&current->sighand->siglock);
2284	sig = dequeue_signal(current, &these, &info);
2285	if (!sig) {
2286		timeout = MAX_SCHEDULE_TIMEOUT;
2287		if (uts)
2288			timeout = (timespec_to_jiffies(&ts)
2289				   + (ts.tv_sec || ts.tv_nsec));
2290
2291		if (timeout) {
2292			/* None ready -- temporarily unblock those we're
2293			 * interested while we are sleeping in so that we'll
2294			 * be awakened when they arrive.  */
2295			current->real_blocked = current->blocked;
2296			sigandsets(&current->blocked, &current->blocked, &these);
2297			recalc_sigpending();
2298			spin_unlock_irq(&current->sighand->siglock);
2299
2300			timeout = schedule_timeout_interruptible(timeout);
2301
2302			spin_lock_irq(&current->sighand->siglock);
2303			sig = dequeue_signal(current, &these, &info);
2304			current->blocked = current->real_blocked;
2305			siginitset(&current->real_blocked, 0);
2306			recalc_sigpending();
2307		}
2308	}
2309	spin_unlock_irq(&current->sighand->siglock);
2310
2311	if (sig) {
2312		ret = sig;
2313		if (uinfo) {
2314			if (copy_siginfo_to_user(uinfo, &info))
2315				ret = -EFAULT;
2316		}
2317	} else {
2318		ret = -EAGAIN;
2319		if (timeout)
2320			ret = -EINTR;
2321	}
2322
2323	return ret;
2324}
2325
2326SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2327{
2328	struct siginfo info;
2329
2330	info.si_signo = sig;
2331	info.si_errno = 0;
2332	info.si_code = SI_USER;
2333	info.si_pid = task_tgid_vnr(current);
2334	info.si_uid = current_uid();
2335
2336	return kill_something_info(sig, &info, pid);
2337}
2338
2339static int
2340do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2341{
2342	struct task_struct *p;
2343	int error = -ESRCH;
2344
2345	rcu_read_lock();
2346	p = find_task_by_vpid(pid);
2347	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2348		error = check_kill_permission(sig, info, p);
2349		/*
2350		 * The null signal is a permissions and process existence
2351		 * probe.  No signal is actually delivered.
2352		 */
2353		if (!error && sig) {
2354			error = do_send_sig_info(sig, info, p, false);
2355			/*
2356			 * If lock_task_sighand() failed we pretend the task
2357			 * dies after receiving the signal. The window is tiny,
2358			 * and the signal is private anyway.
2359			 */
2360			if (unlikely(error == -ESRCH))
2361				error = 0;
2362		}
2363	}
2364	rcu_read_unlock();
2365
2366	return error;
2367}
2368
2369static int do_tkill(pid_t tgid, pid_t pid, int sig)
2370{
2371	struct siginfo info;
2372
2373	info.si_signo = sig;
2374	info.si_errno = 0;
2375	info.si_code = SI_TKILL;
2376	info.si_pid = task_tgid_vnr(current);
2377	info.si_uid = current_uid();
2378
2379	return do_send_specific(tgid, pid, sig, &info);
2380}
2381
2382/**
2383 *  sys_tgkill - send signal to one specific thread
2384 *  @tgid: the thread group ID of the thread
2385 *  @pid: the PID of the thread
2386 *  @sig: signal to be sent
2387 *
2388 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2389 *  exists but it's not belonging to the target process anymore. This
2390 *  method solves the problem of threads exiting and PIDs getting reused.
2391 */
2392SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2393{
2394	/* This is only valid for single tasks */
2395	if (pid <= 0 || tgid <= 0)
2396		return -EINVAL;
2397
2398	return do_tkill(tgid, pid, sig);
2399}
2400
2401/*
2402 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2403 */
2404SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2405{
2406	/* This is only valid for single tasks */
2407	if (pid <= 0)
2408		return -EINVAL;
2409
2410	return do_tkill(0, pid, sig);
2411}
2412
2413SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2414		siginfo_t __user *, uinfo)
2415{
2416	siginfo_t info;
2417
2418	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2419		return -EFAULT;
2420
2421	/* Not even root can pretend to send signals from the kernel.
2422	   Nor can they impersonate a kill(), which adds source info.  */
2423	if (info.si_code >= 0)
2424		return -EPERM;
2425	info.si_signo = sig;
2426
2427	/* POSIX.1b doesn't mention process groups.  */
2428	return kill_proc_info(sig, &info, pid);
2429}
2430
2431long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2432{
2433	/* This is only valid for single tasks */
2434	if (pid <= 0 || tgid <= 0)
2435		return -EINVAL;
2436
2437	/* Not even root can pretend to send signals from the kernel.
2438	   Nor can they impersonate a kill(), which adds source info.  */
2439	if (info->si_code >= 0)
2440		return -EPERM;
2441	info->si_signo = sig;
2442
2443	return do_send_specific(tgid, pid, sig, info);
2444}
2445
2446SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2447		siginfo_t __user *, uinfo)
2448{
2449	siginfo_t info;
2450
2451	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2452		return -EFAULT;
2453
2454	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2455}
2456
2457int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2458{
2459	struct task_struct *t = current;
2460	struct k_sigaction *k;
2461	sigset_t mask;
2462
2463	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2464		return -EINVAL;
2465
2466	k = &t->sighand->action[sig-1];
2467
2468	spin_lock_irq(&current->sighand->siglock);
2469	if (oact)
2470		*oact = *k;
2471
2472	if (act) {
2473		sigdelsetmask(&act->sa.sa_mask,
2474			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2475		*k = *act;
2476		/*
2477		 * POSIX 3.3.1.3:
2478		 *  "Setting a signal action to SIG_IGN for a signal that is
2479		 *   pending shall cause the pending signal to be discarded,
2480		 *   whether or not it is blocked."
2481		 *
2482		 *  "Setting a signal action to SIG_DFL for a signal that is
2483		 *   pending and whose default action is to ignore the signal
2484		 *   (for example, SIGCHLD), shall cause the pending signal to
2485		 *   be discarded, whether or not it is blocked"
2486		 */
2487		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2488			sigemptyset(&mask);
2489			sigaddset(&mask, sig);
2490			rm_from_queue_full(&mask, &t->signal->shared_pending);
2491			do {
2492				rm_from_queue_full(&mask, &t->pending);
2493				t = next_thread(t);
2494			} while (t != current);
2495		}
2496	}
2497
2498	spin_unlock_irq(&current->sighand->siglock);
2499	return 0;
2500}
2501
2502int
2503do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2504{
2505	stack_t oss;
2506	int error;
2507
2508	oss.ss_sp = (void __user *) current->sas_ss_sp;
2509	oss.ss_size = current->sas_ss_size;
2510	oss.ss_flags = sas_ss_flags(sp);
2511
2512	if (uss) {
2513		void __user *ss_sp;
2514		size_t ss_size;
2515		int ss_flags;
2516
2517		error = -EFAULT;
2518		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2519			goto out;
2520		error = __get_user(ss_sp, &uss->ss_sp) |
2521			__get_user(ss_flags, &uss->ss_flags) |
2522			__get_user(ss_size, &uss->ss_size);
2523		if (error)
2524			goto out;
2525
2526		error = -EPERM;
2527		if (on_sig_stack(sp))
2528			goto out;
2529
2530		error = -EINVAL;
2531		/*
2532		 *
2533		 * Note - this code used to test ss_flags incorrectly
2534		 *  	  old code may have been written using ss_flags==0
2535		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2536		 *	  way that worked) - this fix preserves that older
2537		 *	  mechanism
2538		 */
2539		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2540			goto out;
2541
2542		if (ss_flags == SS_DISABLE) {
2543			ss_size = 0;
2544			ss_sp = NULL;
2545		} else {
2546			error = -ENOMEM;
2547			if (ss_size < MINSIGSTKSZ)
2548				goto out;
2549		}
2550
2551		current->sas_ss_sp = (unsigned long) ss_sp;
2552		current->sas_ss_size = ss_size;
2553	}
2554
2555	error = 0;
2556	if (uoss) {
2557		error = -EFAULT;
2558		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2559			goto out;
2560		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2561			__put_user(oss.ss_size, &uoss->ss_size) |
2562			__put_user(oss.ss_flags, &uoss->ss_flags);
2563	}
2564
2565out:
2566	return error;
2567}
2568
2569#ifdef __ARCH_WANT_SYS_SIGPENDING
2570
2571SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2572{
2573	return do_sigpending(set, sizeof(*set));
2574}
2575
2576#endif
2577
2578#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2579/* Some platforms have their own version with special arguments others
2580   support only sys_rt_sigprocmask.  */
2581
2582SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2583		old_sigset_t __user *, oset)
2584{
2585	int error;
2586	old_sigset_t old_set, new_set;
2587
2588	if (set) {
2589		error = -EFAULT;
2590		if (copy_from_user(&new_set, set, sizeof(*set)))
2591			goto out;
2592		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2593
2594		spin_lock_irq(&current->sighand->siglock);
2595		old_set = current->blocked.sig[0];
2596
2597		error = 0;
2598		switch (how) {
2599		default:
2600			error = -EINVAL;
2601			break;
2602		case SIG_BLOCK:
2603			sigaddsetmask(&current->blocked, new_set);
2604			break;
2605		case SIG_UNBLOCK:
2606			sigdelsetmask(&current->blocked, new_set);
2607			break;
2608		case SIG_SETMASK:
2609			current->blocked.sig[0] = new_set;
2610			break;
2611		}
2612
2613		recalc_sigpending();
2614		spin_unlock_irq(&current->sighand->siglock);
2615		if (error)
2616			goto out;
2617		if (oset)
2618			goto set_old;
2619	} else if (oset) {
2620		old_set = current->blocked.sig[0];
2621	set_old:
2622		error = -EFAULT;
2623		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2624			goto out;
2625	}
2626	error = 0;
2627out:
2628	return error;
2629}
2630#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2631
2632#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2633SYSCALL_DEFINE4(rt_sigaction, int, sig,
2634		const struct sigaction __user *, act,
2635		struct sigaction __user *, oact,
2636		size_t, sigsetsize)
2637{
2638	struct k_sigaction new_sa, old_sa;
2639	int ret = -EINVAL;
2640
2641	if (sigsetsize != sizeof(sigset_t))
2642		goto out;
2643
2644	if (act) {
2645		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2646			return -EFAULT;
2647	}
2648
2649	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2650
2651	if (!ret && oact) {
2652		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2653			return -EFAULT;
2654	}
2655out:
2656	return ret;
2657}
2658#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2659
2660#ifdef __ARCH_WANT_SYS_SGETMASK
2661
2662/*
2663 * For backwards compatibility.  Functionality superseded by sigprocmask.
2664 */
2665SYSCALL_DEFINE0(sgetmask)
2666{
2667	/* SMP safe */
2668	return current->blocked.sig[0];
2669}
2670
2671SYSCALL_DEFINE1(ssetmask, int, newmask)
2672{
2673	int old;
2674
2675	spin_lock_irq(&current->sighand->siglock);
2676	old = current->blocked.sig[0];
2677
2678	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2679						  sigmask(SIGSTOP)));
2680	recalc_sigpending();
2681	spin_unlock_irq(&current->sighand->siglock);
2682
2683	return old;
2684}
2685#endif /* __ARCH_WANT_SGETMASK */
2686
2687#ifdef __ARCH_WANT_SYS_SIGNAL
2688/*
2689 * For backwards compatibility.  Functionality superseded by sigaction.
2690 */
2691SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2692{
2693	struct k_sigaction new_sa, old_sa;
2694	int ret;
2695
2696	new_sa.sa.sa_handler = handler;
2697	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2698	sigemptyset(&new_sa.sa.sa_mask);
2699
2700	ret = do_sigaction(sig, &new_sa, &old_sa);
2701
2702	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2703}
2704#endif /* __ARCH_WANT_SYS_SIGNAL */
2705
2706#ifdef __ARCH_WANT_SYS_PAUSE
2707
2708SYSCALL_DEFINE0(pause)
2709{
2710	current->state = TASK_INTERRUPTIBLE;
2711	schedule();
2712	return -ERESTARTNOHAND;
2713}
2714
2715#endif
2716
2717#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2718SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2719{
2720	sigset_t newset;
2721
2722	if (sigsetsize != sizeof(sigset_t))
2723		return -EINVAL;
2724
2725	if (copy_from_user(&newset, unewset, sizeof(newset)))
2726		return -EFAULT;
2727	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2728
2729	spin_lock_irq(&current->sighand->siglock);
2730	current->saved_sigmask = current->blocked;
2731	current->blocked = newset;
2732	recalc_sigpending();
2733	spin_unlock_irq(&current->sighand->siglock);
2734
2735	current->state = TASK_INTERRUPTIBLE;
2736	schedule();
2737	set_restore_sigmask();
2738	return -ERESTARTNOHAND;
2739}
2740#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2741
2742__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2743{
2744	return NULL;
2745}
2746
2747void __init signals_init(void)
2748{
2749	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2750}
2751
2752#ifdef CONFIG_KGDB_KDB
2753#include <linux/kdb.h>
2754/*
2755 * kdb_send_sig_info - Allows kdb to send signals without exposing
2756 * signal internals.  This function checks if the required locks are
2757 * available before calling the main signal code, to avoid kdb
2758 * deadlocks.
2759 */
2760void
2761kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2762{
2763	static struct task_struct *kdb_prev_t;
2764	int sig, new_t;
2765	if (!spin_trylock(&t->sighand->siglock)) {
2766		kdb_printf("Can't do kill command now.\n"
2767			   "The sigmask lock is held somewhere else in "
2768			   "kernel, try again later\n");
2769		return;
2770	}
2771	spin_unlock(&t->sighand->siglock);
2772	new_t = kdb_prev_t != t;
2773	kdb_prev_t = t;
2774	if (t->state != TASK_RUNNING && new_t) {
2775		kdb_printf("Process is not RUNNING, sending a signal from "
2776			   "kdb risks deadlock\n"
2777			   "on the run queue locks. "
2778			   "The signal has _not_ been sent.\n"
2779			   "Reissue the kill command if you want to risk "
2780			   "the deadlock.\n");
2781		return;
2782	}
2783	sig = info->si_signo;
2784	if (send_sig_info(sig, info, t))
2785		kdb_printf("Fail to deliver Signal %d to process %d.\n",
2786			   sig, t->pid);
2787	else
2788		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2789}
2790#endif	/* CONFIG_KGDB_KDB */
2791