1262395Sbapt/*
2262395Sbapt *  linux/arch/m68k/kernel/signal.c
3262395Sbapt *
4262395Sbapt *  Copyright (C) 1991, 1992  Linus Torvalds
5262395Sbapt *
6262395Sbapt * This file is subject to the terms and conditions of the GNU General Public
7262395Sbapt * License.  See the file COPYING in the main directory of this archive
8262395Sbapt * for more details.
9262395Sbapt */
10262395Sbapt
11262395Sbapt/*
12262395Sbapt * Linux/m68k support by Hamish Macdonald
13262395Sbapt *
14262395Sbapt * 68060 fixes by Jesper Skov
15262395Sbapt *
16262395Sbapt * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
17262395Sbapt *
18262395Sbapt * mathemu support by Roman Zippel
19262395Sbapt *  (Note: fpstate in the signal context is completely ignored for the emulator
20262395Sbapt *         and the internal floating point format is put on stack)
21262395Sbapt */
22262395Sbapt
23262395Sbapt/*
24262395Sbapt * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25262395Sbapt * Atari :-) Current limitation: Only one sigstack can be active at one time.
26262395Sbapt * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27262395Sbapt * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28262395Sbapt * signal handlers!
29262395Sbapt */
30262395Sbapt
31262395Sbapt#include <linux/sched.h>
32262395Sbapt#include <linux/mm.h>
33262395Sbapt#include <linux/kernel.h>
34262395Sbapt#include <linux/signal.h>
35262395Sbapt#include <linux/syscalls.h>
36262395Sbapt#include <linux/errno.h>
37262395Sbapt#include <linux/wait.h>
38262395Sbapt#include <linux/ptrace.h>
39262395Sbapt#include <linux/unistd.h>
40262395Sbapt#include <linux/stddef.h>
41262395Sbapt#include <linux/highuid.h>
42262395Sbapt#include <linux/personality.h>
43262395Sbapt#include <linux/tty.h>
44262395Sbapt#include <linux/binfmts.h>
45262395Sbapt#include <linux/extable.h>
46262395Sbapt#include <linux/resume_user_mode.h>
47262395Sbapt
48262395Sbapt#include <asm/setup.h>
49262395Sbapt#include <linux/uaccess.h>
50262395Sbapt#include <asm/traps.h>
51262395Sbapt#include <asm/ucontext.h>
52262395Sbapt#include <asm/cacheflush.h>
53262395Sbapt
54262395Sbapt#include "signal.h"
55262395Sbapt
56262395Sbapt#ifdef CONFIG_MMU
57290071Sbapt
58262395Sbapt/*
59290071Sbapt * Handle the slight differences in classic 68k and ColdFire trap frames.
60262395Sbapt */
61262395Sbapt#ifdef CONFIG_COLDFIRE
62262395Sbapt#define	FORMAT		4
63262395Sbapt#define	FMT4SIZE	0
64262395Sbapt#else
65262395Sbapt#define	FORMAT		0
66262395Sbapt#define	FMT4SIZE	sizeof_field(struct frame, un.fmt4)
67262395Sbapt#endif
68262395Sbapt
69262395Sbaptstatic const int frame_size_change[16] = {
70262395Sbapt  [1]	= -1, /* sizeof_field(struct frame, un.fmt1), */
71262395Sbapt  [2]	= sizeof_field(struct frame, un.fmt2),
72262395Sbapt  [3]	= sizeof_field(struct frame, un.fmt3),
73262395Sbapt  [4]	= FMT4SIZE,
74262395Sbapt  [5]	= -1, /* sizeof_field(struct frame, un.fmt5), */
75262395Sbapt  [6]	= -1, /* sizeof_field(struct frame, un.fmt6), */
76262395Sbapt  [7]	= sizeof_field(struct frame, un.fmt7),
77262395Sbapt  [8]	= -1, /* sizeof_field(struct frame, un.fmt8), */
78262395Sbapt  [9]	= sizeof_field(struct frame, un.fmt9),
79262395Sbapt  [10]	= sizeof_field(struct frame, un.fmta),
80262395Sbapt  [11]	= sizeof_field(struct frame, un.fmtb),
81262395Sbapt  [12]	= -1, /* sizeof_field(struct frame, un.fmtc), */
82262395Sbapt  [13]	= -1, /* sizeof_field(struct frame, un.fmtd), */
83262395Sbapt  [14]	= -1, /* sizeof_field(struct frame, un.fmte), */
84262395Sbapt  [15]	= -1, /* sizeof_field(struct frame, un.fmtf), */
85262395Sbapt};
86262395Sbapt
87262395Sbaptstatic inline int frame_extra_sizes(int f)
88262395Sbapt{
89262395Sbapt	return frame_size_change[f];
90262395Sbapt}
91262395Sbapt
92262395Sbaptint fixup_exception(struct pt_regs *regs)
93262395Sbapt{
94262395Sbapt	const struct exception_table_entry *fixup;
95262395Sbapt	struct pt_regs *tregs;
96262395Sbapt
97262395Sbapt	/* Are we prepared to handle this kernel fault? */
98262395Sbapt	fixup = search_exception_tables(regs->pc);
99262395Sbapt	if (!fixup)
100262395Sbapt		return 0;
101262395Sbapt
102262395Sbapt	/* Create a new four word stack frame, discarding the old one. */
103262395Sbapt	regs->stkadj = frame_extra_sizes(regs->format);
104262395Sbapt	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
105262395Sbapt	tregs->vector = regs->vector;
106262395Sbapt	tregs->format = FORMAT;
107262395Sbapt	tregs->pc = fixup->fixup;
108262395Sbapt	tregs->sr = regs->sr;
109262395Sbapt
110262395Sbapt	return 1;
111262395Sbapt}
112262395Sbapt
113290071Sbaptstatic inline void push_cache (unsigned long vaddr)
114262395Sbapt{
115262395Sbapt	/*
116262395Sbapt	 * Using the old cache_push_v() was really a big waste.
117262395Sbapt	 *
118262395Sbapt	 * What we are trying to do is to flush 8 bytes to ram.
119262395Sbapt	 * Flushing 2 cache lines of 16 bytes is much cheaper than
120262395Sbapt	 * flushing 1 or 2 pages, as previously done in
121262395Sbapt	 * cache_push_v().
122262395Sbapt	 *                                                     Jes
123262395Sbapt	 */
124262395Sbapt	if (CPU_IS_040) {
125262395Sbapt		unsigned long temp;
126262395Sbapt
127262395Sbapt		__asm__ __volatile__ (".chip 68040\n\t"
128262395Sbapt				      "nop\n\t"
129				      "ptestr (%1)\n\t"
130				      "movec %%mmusr,%0\n\t"
131				      ".chip 68k"
132				      : "=r" (temp)
133				      : "a" (vaddr));
134
135		temp &= PAGE_MASK;
136		temp |= vaddr & ~PAGE_MASK;
137
138		__asm__ __volatile__ (".chip 68040\n\t"
139				      "nop\n\t"
140				      "cpushl %%bc,(%0)\n\t"
141				      ".chip 68k"
142				      : : "a" (temp));
143	}
144	else if (CPU_IS_060) {
145		unsigned long temp;
146		__asm__ __volatile__ (".chip 68060\n\t"
147				      "plpar (%0)\n\t"
148				      ".chip 68k"
149				      : "=a" (temp)
150				      : "0" (vaddr));
151		__asm__ __volatile__ (".chip 68060\n\t"
152				      "cpushl %%bc,(%0)\n\t"
153				      ".chip 68k"
154				      : : "a" (temp));
155	} else if (!CPU_IS_COLDFIRE) {
156		/*
157		 * 68030/68020 have no writeback cache;
158		 * still need to clear icache.
159		 * Note that vaddr is guaranteed to be long word aligned.
160		 */
161		unsigned long temp;
162		asm volatile ("movec %%cacr,%0" : "=r" (temp));
163		temp += 4;
164		asm volatile ("movec %0,%%caar\n\t"
165			      "movec %1,%%cacr"
166			      : : "r" (vaddr), "r" (temp));
167		asm volatile ("movec %0,%%caar\n\t"
168			      "movec %1,%%cacr"
169			      : : "r" (vaddr + 4), "r" (temp));
170	} else {
171		/* CPU_IS_COLDFIRE */
172#if defined(CONFIG_CACHE_COPYBACK)
173		flush_cf_dcache(0, DCACHE_MAX_ADDR);
174#endif
175		/* Invalidate instruction cache for the pushed bytes */
176		clear_cf_icache(vaddr, vaddr + 8);
177	}
178}
179
180static inline void adjustformat(struct pt_regs *regs)
181{
182}
183
184static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
185{
186}
187
188#else /* CONFIG_MMU */
189
190void ret_from_user_signal(void);
191void ret_from_user_rt_signal(void);
192
193static inline int frame_extra_sizes(int f)
194{
195	/* No frame size adjustments required on non-MMU CPUs */
196	return 0;
197}
198
199static inline void adjustformat(struct pt_regs *regs)
200{
201	/*
202	 * set format byte to make stack appear modulo 4, which it will
203	 * be when doing the rte
204	 */
205	regs->format = 0x4;
206}
207
208static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
209{
210	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
211}
212
213static inline void push_cache(unsigned long vaddr)
214{
215}
216
217#endif /* CONFIG_MMU */
218
219/*
220 * Do a signal return; undo the signal stack.
221 *
222 * Keep the return code on the stack quadword aligned!
223 * That makes the cache flush below easier.
224 */
225
226struct sigframe
227{
228	char __user *pretcode;
229	int sig;
230	int code;
231	struct sigcontext __user *psc;
232	char retcode[8];
233	unsigned long extramask[_NSIG_WORDS-1];
234	struct sigcontext sc;
235};
236
237struct rt_sigframe
238{
239	char __user *pretcode;
240	int sig;
241	struct siginfo __user *pinfo;
242	void __user *puc;
243	char retcode[8];
244	struct siginfo info;
245	struct ucontext uc;
246};
247
248#define FPCONTEXT_SIZE	216
249#define uc_fpstate	uc_filler[0]
250#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
251#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
252
253#ifdef CONFIG_FPU
254
255static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
256
257static inline int restore_fpu_state(struct sigcontext *sc)
258{
259	int err = 1;
260
261	if (FPU_IS_EMU) {
262	    /* restore registers */
263	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
264	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
265	    return 0;
266	}
267
268	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
269	    /* Verify the frame format.  */
270	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
271		 (sc->sc_fpstate[0] != fpu_version))
272		goto out;
273	    if (CPU_IS_020_OR_030) {
274		if (m68k_fputype & FPU_68881 &&
275		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
276		    goto out;
277		if (m68k_fputype & FPU_68882 &&
278		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
279		    goto out;
280	    } else if (CPU_IS_040) {
281		if (!(sc->sc_fpstate[1] == 0x00 ||
282                      sc->sc_fpstate[1] == 0x28 ||
283                      sc->sc_fpstate[1] == 0x60))
284		    goto out;
285	    } else if (CPU_IS_060) {
286		if (!(sc->sc_fpstate[3] == 0x00 ||
287                      sc->sc_fpstate[3] == 0x60 ||
288		      sc->sc_fpstate[3] == 0xe0))
289		    goto out;
290	    } else if (CPU_IS_COLDFIRE) {
291		if (!(sc->sc_fpstate[0] == 0x00 ||
292		      sc->sc_fpstate[0] == 0x05 ||
293		      sc->sc_fpstate[0] == 0xe5))
294		    goto out;
295	    } else
296		goto out;
297
298	    if (CPU_IS_COLDFIRE) {
299		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
300				  "fmovel %1,%%fpcr\n\t"
301				  "fmovel %2,%%fpsr\n\t"
302				  "fmovel %3,%%fpiar"
303				  : /* no outputs */
304				  : "m" (sc->sc_fpregs[0]),
305				    "m" (sc->sc_fpcntl[0]),
306				    "m" (sc->sc_fpcntl[1]),
307				    "m" (sc->sc_fpcntl[2]));
308	    } else {
309		__asm__ volatile (".chip 68k/68881\n\t"
310				  "fmovemx %0,%%fp0-%%fp1\n\t"
311				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
312				  ".chip 68k"
313				  : /* no outputs */
314				  : "m" (*sc->sc_fpregs),
315				    "m" (*sc->sc_fpcntl));
316	    }
317	}
318
319	if (CPU_IS_COLDFIRE) {
320		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
321	} else {
322		__asm__ volatile (".chip 68k/68881\n\t"
323				  "frestore %0\n\t"
324				  ".chip 68k"
325				  : : "m" (*sc->sc_fpstate));
326	}
327	err = 0;
328
329out:
330	return err;
331}
332
333static inline int rt_restore_fpu_state(struct ucontext __user *uc)
334{
335	unsigned char fpstate[FPCONTEXT_SIZE];
336	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
337	fpregset_t fpregs;
338	int err = 1;
339
340	if (FPU_IS_EMU) {
341		/* restore fpu control register */
342		if (__copy_from_user(current->thread.fpcntl,
343				uc->uc_mcontext.fpregs.f_fpcntl, 12))
344			goto out;
345		/* restore all other fpu register */
346		if (__copy_from_user(current->thread.fp,
347				uc->uc_mcontext.fpregs.f_fpregs, 96))
348			goto out;
349		return 0;
350	}
351
352	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
353		goto out;
354	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
355		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
356			context_size = fpstate[1];
357		/* Verify the frame format.  */
358		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
359		     (fpstate[0] != fpu_version))
360			goto out;
361		if (CPU_IS_020_OR_030) {
362			if (m68k_fputype & FPU_68881 &&
363			    !(context_size == 0x18 || context_size == 0xb4))
364				goto out;
365			if (m68k_fputype & FPU_68882 &&
366			    !(context_size == 0x38 || context_size == 0xd4))
367				goto out;
368		} else if (CPU_IS_040) {
369			if (!(context_size == 0x00 ||
370			      context_size == 0x28 ||
371			      context_size == 0x60))
372				goto out;
373		} else if (CPU_IS_060) {
374			if (!(fpstate[3] == 0x00 ||
375			      fpstate[3] == 0x60 ||
376			      fpstate[3] == 0xe0))
377				goto out;
378		} else if (CPU_IS_COLDFIRE) {
379			if (!(fpstate[3] == 0x00 ||
380			      fpstate[3] == 0x05 ||
381			      fpstate[3] == 0xe5))
382				goto out;
383		} else
384			goto out;
385		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
386				     sizeof(fpregs)))
387			goto out;
388
389		if (CPU_IS_COLDFIRE) {
390			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
391					  "fmovel %1,%%fpcr\n\t"
392					  "fmovel %2,%%fpsr\n\t"
393					  "fmovel %3,%%fpiar"
394					  : /* no outputs */
395					  : "m" (fpregs.f_fpregs[0]),
396					    "m" (fpregs.f_fpcntl[0]),
397					    "m" (fpregs.f_fpcntl[1]),
398					    "m" (fpregs.f_fpcntl[2]));
399		} else {
400			__asm__ volatile (".chip 68k/68881\n\t"
401					  "fmovemx %0,%%fp0-%%fp7\n\t"
402					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
403					  ".chip 68k"
404					  : /* no outputs */
405					  : "m" (*fpregs.f_fpregs),
406					    "m" (*fpregs.f_fpcntl));
407		}
408	}
409	if (context_size &&
410	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
411			     context_size))
412		goto out;
413
414	if (CPU_IS_COLDFIRE) {
415		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
416	} else {
417		__asm__ volatile (".chip 68k/68881\n\t"
418				  "frestore %0\n\t"
419				  ".chip 68k"
420				  : : "m" (*fpstate));
421	}
422	err = 0;
423
424out:
425	return err;
426}
427
428/*
429 * Set up a signal frame.
430 */
431static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
432{
433	if (FPU_IS_EMU) {
434		/* save registers */
435		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
436		memcpy(sc->sc_fpregs, current->thread.fp, 24);
437		return;
438	}
439
440	if (CPU_IS_COLDFIRE) {
441		__asm__ volatile ("fsave %0"
442				  : : "m" (*sc->sc_fpstate) : "memory");
443	} else {
444		__asm__ volatile (".chip 68k/68881\n\t"
445				  "fsave %0\n\t"
446				  ".chip 68k"
447				  : : "m" (*sc->sc_fpstate) : "memory");
448	}
449
450	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
451		fpu_version = sc->sc_fpstate[0];
452		if (CPU_IS_020_OR_030 && !regs->stkadj &&
453		    regs->vector >= (VEC_FPBRUC * 4) &&
454		    regs->vector <= (VEC_FPNAN * 4)) {
455			/* Clear pending exception in 68882 idle frame */
456			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
457				sc->sc_fpstate[0x38] |= 1 << 3;
458		}
459
460		if (CPU_IS_COLDFIRE) {
461			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
462					  "fmovel %%fpcr,%1\n\t"
463					  "fmovel %%fpsr,%2\n\t"
464					  "fmovel %%fpiar,%3"
465					  : "=m" (sc->sc_fpregs[0]),
466					    "=m" (sc->sc_fpcntl[0]),
467					    "=m" (sc->sc_fpcntl[1]),
468					    "=m" (sc->sc_fpcntl[2])
469					  : /* no inputs */
470					  : "memory");
471		} else {
472			__asm__ volatile (".chip 68k/68881\n\t"
473					  "fmovemx %%fp0-%%fp1,%0\n\t"
474					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
475					  ".chip 68k"
476					  : "=m" (*sc->sc_fpregs),
477					    "=m" (*sc->sc_fpcntl)
478					  : /* no inputs */
479					  : "memory");
480		}
481	}
482}
483
484static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
485{
486	unsigned char fpstate[FPCONTEXT_SIZE];
487	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
488	int err = 0;
489
490	if (FPU_IS_EMU) {
491		/* save fpu control register */
492		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
493				current->thread.fpcntl, 12);
494		/* save all other fpu register */
495		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
496				current->thread.fp, 96);
497		return err;
498	}
499
500	if (CPU_IS_COLDFIRE) {
501		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
502	} else {
503		__asm__ volatile (".chip 68k/68881\n\t"
504				  "fsave %0\n\t"
505				  ".chip 68k"
506				  : : "m" (*fpstate) : "memory");
507	}
508
509	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
510	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
511		fpregset_t fpregs;
512		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
513			context_size = fpstate[1];
514		fpu_version = fpstate[0];
515		if (CPU_IS_020_OR_030 && !regs->stkadj &&
516		    regs->vector >= (VEC_FPBRUC * 4) &&
517		    regs->vector <= (VEC_FPNAN * 4)) {
518			/* Clear pending exception in 68882 idle frame */
519			if (*(unsigned short *) fpstate == 0x1f38)
520				fpstate[0x38] |= 1 << 3;
521		}
522		if (CPU_IS_COLDFIRE) {
523			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
524					  "fmovel %%fpcr,%1\n\t"
525					  "fmovel %%fpsr,%2\n\t"
526					  "fmovel %%fpiar,%3"
527					  : "=m" (fpregs.f_fpregs[0]),
528					    "=m" (fpregs.f_fpcntl[0]),
529					    "=m" (fpregs.f_fpcntl[1]),
530					    "=m" (fpregs.f_fpcntl[2])
531					  : /* no inputs */
532					  : "memory");
533		} else {
534			__asm__ volatile (".chip 68k/68881\n\t"
535					  "fmovemx %%fp0-%%fp7,%0\n\t"
536					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
537					  ".chip 68k"
538					  : "=m" (*fpregs.f_fpregs),
539					    "=m" (*fpregs.f_fpcntl)
540					  : /* no inputs */
541					  : "memory");
542		}
543		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
544				    sizeof(fpregs));
545	}
546	if (context_size)
547		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
548				    context_size);
549	return err;
550}
551
552#else /* CONFIG_FPU */
553
554/*
555 * For the case with no FPU configured these all do nothing.
556 */
557static inline int restore_fpu_state(struct sigcontext *sc)
558{
559	return 0;
560}
561
562static inline int rt_restore_fpu_state(struct ucontext __user *uc)
563{
564	return 0;
565}
566
567static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
568{
569}
570
571static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
572{
573	return 0;
574}
575
576#endif /* CONFIG_FPU */
577
578static inline void siginfo_build_tests(void)
579{
580	/*
581	 * This needs to be tested on m68k as it has a lesser
582	 * alignment requirement than x86 and that can cause surprises.
583	 */
584
585	/* This is part of the ABI and can never change in size: */
586	BUILD_BUG_ON(sizeof(siginfo_t) != 128);
587
588	/* Ensure the known fields never change in location */
589	BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
590	BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
591	BUILD_BUG_ON(offsetof(siginfo_t, si_code)  != 8);
592
593	/* _kill */
594	BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c);
595	BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10);
596
597	/* _timer */
598	BUILD_BUG_ON(offsetof(siginfo_t, si_tid)     != 0x0c);
599	BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x10);
600	BUILD_BUG_ON(offsetof(siginfo_t, si_value)   != 0x14);
601
602	/* _rt */
603	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)   != 0x0c);
604	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)   != 0x10);
605	BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14);
606
607	/* _sigchld */
608	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)    != 0x0c);
609	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)    != 0x10);
610	BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x14);
611	BUILD_BUG_ON(offsetof(siginfo_t, si_utime)  != 0x18);
612	BUILD_BUG_ON(offsetof(siginfo_t, si_stime)  != 0x1c);
613
614	/* _sigfault */
615	BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x0c);
616
617	/* _sigfault._mcerr */
618	BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x10);
619
620	/* _sigfault._addr_bnd */
621	BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x12);
622	BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x16);
623
624	/* _sigfault._addr_pkey */
625	BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
626
627	/* _sigfault._perf */
628	BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
629	BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
630	BUILD_BUG_ON(offsetof(siginfo_t, si_perf_flags) != 0x18);
631
632	/* _sigpoll */
633	BUILD_BUG_ON(offsetof(siginfo_t, si_band)   != 0x0c);
634	BUILD_BUG_ON(offsetof(siginfo_t, si_fd)     != 0x10);
635
636	/* _sigsys */
637	BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x0c);
638	BUILD_BUG_ON(offsetof(siginfo_t, si_syscall)   != 0x10);
639	BUILD_BUG_ON(offsetof(siginfo_t, si_arch)      != 0x14);
640
641	/* any new si_fields should be added here */
642}
643
644static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
645			       void __user *fp)
646{
647	int extra = frame_extra_sizes(formatvec >> 12);
648	char buf[sizeof_field(struct frame, un)];
649
650	if (extra < 0) {
651		/*
652		 * user process trying to return with weird frame format
653		 */
654		pr_debug("user process returning with weird frame format\n");
655		return -1;
656	}
657	if (extra && copy_from_user(buf, fp, extra))
658		return -1;
659	regs->format = formatvec >> 12;
660	regs->vector = formatvec & 0xfff;
661	if (extra) {
662		void *p = (struct switch_stack *)regs - 1;
663		struct frame *new = (void *)regs - extra;
664		int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
665
666		memmove(p - extra, p, size);
667		memcpy(p - extra + size, buf, extra);
668		current->thread.esp0 = (unsigned long)&new->ptregs;
669#ifdef CONFIG_M68040
670		/* on 68040 complete pending writebacks if any */
671		if (new->ptregs.format == 7) // bus error frame
672			berr_040cleanup(new);
673#endif
674	}
675	return extra;
676}
677
678static inline int
679restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
680{
681	int formatvec;
682	struct sigcontext context;
683
684	siginfo_build_tests();
685
686	/* Always make any pending restarted system calls return -EINTR */
687	current->restart_block.fn = do_no_restart_syscall;
688
689	/* get previous context */
690	if (copy_from_user(&context, usc, sizeof(context)))
691		return -1;
692
693	/* restore passed registers */
694	regs->d0 = context.sc_d0;
695	regs->d1 = context.sc_d1;
696	regs->a0 = context.sc_a0;
697	regs->a1 = context.sc_a1;
698	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
699	regs->pc = context.sc_pc;
700	regs->orig_d0 = -1;		/* disable syscall checks */
701	wrusp(context.sc_usp);
702	formatvec = context.sc_formatvec;
703
704	if (restore_fpu_state(&context))
705		return -1;
706
707	return mangle_kernel_stack(regs, formatvec, fp);
708}
709
710static inline int
711rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
712		    struct ucontext __user *uc)
713{
714	int temp;
715	greg_t __user *gregs = uc->uc_mcontext.gregs;
716	unsigned long usp;
717	int err;
718
719	/* Always make any pending restarted system calls return -EINTR */
720	current->restart_block.fn = do_no_restart_syscall;
721
722	err = __get_user(temp, &uc->uc_mcontext.version);
723	if (temp != MCONTEXT_VERSION)
724		return -1;
725	/* restore passed registers */
726	err |= __get_user(regs->d0, &gregs[0]);
727	err |= __get_user(regs->d1, &gregs[1]);
728	err |= __get_user(regs->d2, &gregs[2]);
729	err |= __get_user(regs->d3, &gregs[3]);
730	err |= __get_user(regs->d4, &gregs[4]);
731	err |= __get_user(regs->d5, &gregs[5]);
732	err |= __get_user(sw->d6, &gregs[6]);
733	err |= __get_user(sw->d7, &gregs[7]);
734	err |= __get_user(regs->a0, &gregs[8]);
735	err |= __get_user(regs->a1, &gregs[9]);
736	err |= __get_user(regs->a2, &gregs[10]);
737	err |= __get_user(sw->a3, &gregs[11]);
738	err |= __get_user(sw->a4, &gregs[12]);
739	err |= __get_user(sw->a5, &gregs[13]);
740	err |= __get_user(sw->a6, &gregs[14]);
741	err |= __get_user(usp, &gregs[15]);
742	wrusp(usp);
743	err |= __get_user(regs->pc, &gregs[16]);
744	err |= __get_user(temp, &gregs[17]);
745	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
746	regs->orig_d0 = -1;		/* disable syscall checks */
747	err |= __get_user(temp, &uc->uc_formatvec);
748
749	err |= rt_restore_fpu_state(uc);
750	err |= restore_altstack(&uc->uc_stack);
751
752	if (err)
753		return -1;
754
755	return mangle_kernel_stack(regs, temp, &uc->uc_extra);
756}
757
758asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
759{
760	unsigned long usp = rdusp();
761	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
762	sigset_t set;
763	int size;
764
765	if (!access_ok(frame, sizeof(*frame)))
766		goto badframe;
767	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
768	    (_NSIG_WORDS > 1 &&
769	     __copy_from_user(&set.sig[1], &frame->extramask,
770			      sizeof(frame->extramask))))
771		goto badframe;
772
773	set_current_blocked(&set);
774
775	size = restore_sigcontext(regs, &frame->sc, frame + 1);
776	if (size < 0)
777		goto badframe;
778	return (void *)sw - size;
779
780badframe:
781	force_sig(SIGSEGV);
782	return sw;
783}
784
785asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
786{
787	unsigned long usp = rdusp();
788	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
789	sigset_t set;
790	int size;
791
792	if (!access_ok(frame, sizeof(*frame)))
793		goto badframe;
794	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
795		goto badframe;
796
797	set_current_blocked(&set);
798
799	size = rt_restore_ucontext(regs, sw, &frame->uc);
800	if (size < 0)
801		goto badframe;
802	return (void *)sw - size;
803
804badframe:
805	force_sig(SIGSEGV);
806	return sw;
807}
808
809static inline struct pt_regs *rte_regs(struct pt_regs *regs)
810{
811	return (void *)regs + regs->stkadj;
812}
813
814static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
815			     unsigned long mask)
816{
817	struct pt_regs *tregs = rte_regs(regs);
818	sc->sc_mask = mask;
819	sc->sc_usp = rdusp();
820	sc->sc_d0 = regs->d0;
821	sc->sc_d1 = regs->d1;
822	sc->sc_a0 = regs->a0;
823	sc->sc_a1 = regs->a1;
824	sc->sc_sr = tregs->sr;
825	sc->sc_pc = tregs->pc;
826	sc->sc_formatvec = tregs->format << 12 | tregs->vector;
827	save_a5_state(sc, regs);
828	save_fpu_state(sc, regs);
829}
830
831static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
832{
833	struct switch_stack *sw = (struct switch_stack *)regs - 1;
834	struct pt_regs *tregs = rte_regs(regs);
835	greg_t __user *gregs = uc->uc_mcontext.gregs;
836	int err = 0;
837
838	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
839	err |= __put_user(regs->d0, &gregs[0]);
840	err |= __put_user(regs->d1, &gregs[1]);
841	err |= __put_user(regs->d2, &gregs[2]);
842	err |= __put_user(regs->d3, &gregs[3]);
843	err |= __put_user(regs->d4, &gregs[4]);
844	err |= __put_user(regs->d5, &gregs[5]);
845	err |= __put_user(sw->d6, &gregs[6]);
846	err |= __put_user(sw->d7, &gregs[7]);
847	err |= __put_user(regs->a0, &gregs[8]);
848	err |= __put_user(regs->a1, &gregs[9]);
849	err |= __put_user(regs->a2, &gregs[10]);
850	err |= __put_user(sw->a3, &gregs[11]);
851	err |= __put_user(sw->a4, &gregs[12]);
852	err |= __put_user(sw->a5, &gregs[13]);
853	err |= __put_user(sw->a6, &gregs[14]);
854	err |= __put_user(rdusp(), &gregs[15]);
855	err |= __put_user(tregs->pc, &gregs[16]);
856	err |= __put_user(tregs->sr, &gregs[17]);
857	err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
858	err |= rt_save_fpu_state(uc, regs);
859	return err;
860}
861
862static inline void __user *
863get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
864{
865	unsigned long usp = sigsp(rdusp(), ksig);
866	unsigned long gap = 0;
867
868	if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
869		/* USP is unreliable so use worst-case value */
870		gap = 256;
871	}
872
873	return (void __user *)((usp - gap - frame_size) & -8UL);
874}
875
876static int setup_frame(struct ksignal *ksig, sigset_t *set,
877			struct pt_regs *regs)
878{
879	struct sigframe __user *frame;
880	struct pt_regs *tregs = rte_regs(regs);
881	int fsize = frame_extra_sizes(tregs->format);
882	struct sigcontext context;
883	int err = 0, sig = ksig->sig;
884
885	if (fsize < 0) {
886		pr_debug("setup_frame: Unknown frame format %#x\n",
887			 tregs->format);
888		return -EFAULT;
889	}
890
891	frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
892
893	if (fsize)
894		err |= copy_to_user (frame + 1, regs + 1, fsize);
895
896	err |= __put_user(sig, &frame->sig);
897
898	err |= __put_user(tregs->vector, &frame->code);
899	err |= __put_user(&frame->sc, &frame->psc);
900
901	if (_NSIG_WORDS > 1)
902		err |= copy_to_user(frame->extramask, &set->sig[1],
903				    sizeof(frame->extramask));
904
905	setup_sigcontext(&context, regs, set->sig[0]);
906	err |= copy_to_user (&frame->sc, &context, sizeof(context));
907
908	/* Set up to return from userspace.  */
909#ifdef CONFIG_MMU
910	err |= __put_user(frame->retcode, &frame->pretcode);
911	/* moveq #,d0; trap #0 */
912	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
913			  (long __user *)(frame->retcode));
914#else
915	err |= __put_user((long) ret_from_user_signal,
916			  (long __user *) &frame->pretcode);
917#endif
918
919	if (err)
920		return -EFAULT;
921
922	push_cache ((unsigned long) &frame->retcode);
923
924	/*
925	 * This is subtle; if we build more than one sigframe, all but the
926	 * first one will see frame format 0 and have fsize == 0, so we won't
927	 * screw stkadj.
928	 */
929	if (fsize) {
930		regs->stkadj = fsize;
931		tregs = rte_regs(regs);
932		pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
933		tregs->vector = 0;
934		tregs->format = 0;
935		tregs->sr = regs->sr;
936	}
937
938	/*
939	 * Set up registers for signal handler.  All the state we are about
940	 * to destroy is successfully copied to sigframe.
941	 */
942	wrusp ((unsigned long) frame);
943	tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
944	adjustformat(regs);
945
946	return 0;
947}
948
949static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
950			   struct pt_regs *regs)
951{
952	struct rt_sigframe __user *frame;
953	struct pt_regs *tregs = rte_regs(regs);
954	int fsize = frame_extra_sizes(tregs->format);
955	int err = 0, sig = ksig->sig;
956
957	if (fsize < 0) {
958		pr_debug("setup_frame: Unknown frame format %#x\n",
959			 regs->format);
960		return -EFAULT;
961	}
962
963	frame = get_sigframe(ksig, tregs, sizeof(*frame));
964
965	if (fsize)
966		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
967
968	err |= __put_user(sig, &frame->sig);
969	err |= __put_user(&frame->info, &frame->pinfo);
970	err |= __put_user(&frame->uc, &frame->puc);
971	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
972
973	/* Create the ucontext.  */
974	err |= __put_user(0, &frame->uc.uc_flags);
975	err |= __put_user(NULL, &frame->uc.uc_link);
976	err |= __save_altstack(&frame->uc.uc_stack, rdusp());
977	err |= rt_setup_ucontext(&frame->uc, regs);
978	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
979
980	/* Set up to return from userspace.  */
981#ifdef CONFIG_MMU
982	err |= __put_user(frame->retcode, &frame->pretcode);
983#ifdef __mcoldfire__
984	/* movel #__NR_rt_sigreturn,d0; trap #0 */
985	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
986	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
987			  (long __user *)(frame->retcode + 4));
988#else
989	/* moveq #,d0; notb d0; trap #0 */
990	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
991			  (long __user *)(frame->retcode + 0));
992	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
993#endif
994#else
995	err |= __put_user((long) ret_from_user_rt_signal,
996			  (long __user *) &frame->pretcode);
997#endif /* CONFIG_MMU */
998
999	if (err)
1000		return -EFAULT;
1001
1002	push_cache ((unsigned long) &frame->retcode);
1003
1004	/*
1005	 * This is subtle; if we build more than one sigframe, all but the
1006	 * first one will see frame format 0 and have fsize == 0, so we won't
1007	 * screw stkadj.
1008	 */
1009	if (fsize) {
1010		regs->stkadj = fsize;
1011		tregs = rte_regs(regs);
1012		pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
1013		tregs->vector = 0;
1014		tregs->format = 0;
1015		tregs->sr = regs->sr;
1016	}
1017
1018	/*
1019	 * Set up registers for signal handler.  All the state we are about
1020	 * to destroy is successfully copied to sigframe.
1021	 */
1022	wrusp ((unsigned long) frame);
1023	tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
1024	adjustformat(regs);
1025	return 0;
1026}
1027
1028static inline void
1029handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
1030{
1031	switch (regs->d0) {
1032	case -ERESTARTNOHAND:
1033		if (!has_handler)
1034			goto do_restart;
1035		regs->d0 = -EINTR;
1036		break;
1037
1038	case -ERESTART_RESTARTBLOCK:
1039		if (!has_handler) {
1040			regs->d0 = __NR_restart_syscall;
1041			regs->pc -= 2;
1042			break;
1043		}
1044		regs->d0 = -EINTR;
1045		break;
1046
1047	case -ERESTARTSYS:
1048		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1049			regs->d0 = -EINTR;
1050			break;
1051		}
1052		fallthrough;
1053	case -ERESTARTNOINTR:
1054	do_restart:
1055		regs->d0 = regs->orig_d0;
1056		regs->pc -= 2;
1057		break;
1058	}
1059}
1060
1061/*
1062 * OK, we're invoking a handler
1063 */
1064static void
1065handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1066{
1067	sigset_t *oldset = sigmask_to_save();
1068	int err;
1069	/* are we from a system call? */
1070	if (regs->orig_d0 >= 0)
1071		/* If so, check system call restarting.. */
1072		handle_restart(regs, &ksig->ka, 1);
1073
1074	/* set up the stack frame */
1075	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1076		err = setup_rt_frame(ksig, oldset, regs);
1077	else
1078		err = setup_frame(ksig, oldset, regs);
1079
1080	signal_setup_done(err, ksig, 0);
1081
1082	if (test_thread_flag(TIF_DELAYED_TRACE)) {
1083		regs->sr &= ~0x8000;
1084		send_sig(SIGTRAP, current, 1);
1085	}
1086}
1087
1088/*
1089 * Note that 'init' is a special process: it doesn't get signals it doesn't
1090 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1091 * mistake.
1092 */
1093static void do_signal(struct pt_regs *regs)
1094{
1095	struct ksignal ksig;
1096
1097	current->thread.esp0 = (unsigned long) regs;
1098
1099	if (get_signal(&ksig)) {
1100		/* Whee!  Actually deliver the signal.  */
1101		handle_signal(&ksig, regs);
1102		return;
1103	}
1104
1105	/* Did we come from a system call? */
1106	if (regs->orig_d0 >= 0)
1107		/* Restart the system call - no handlers present */
1108		handle_restart(regs, NULL, 0);
1109
1110	/* If there's no signal to deliver, we just restore the saved mask.  */
1111	restore_saved_sigmask();
1112}
1113
1114asmlinkage void do_notify_resume(struct pt_regs *regs)
1115{
1116	if (test_thread_flag(TIF_NOTIFY_SIGNAL) ||
1117	    test_thread_flag(TIF_SIGPENDING))
1118		do_signal(regs);
1119
1120	if (test_thread_flag(TIF_NOTIFY_RESUME))
1121		resume_user_mode_work(regs);
1122}
1123