1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1992 NeXT, Inc.
30 *
31 * HISTORY
32 * 13 May 1992 ? at NeXT
33 *	Created.
34 */
35
36#include <mach/mach_types.h>
37#include <mach/exception.h>
38
39#include <kern/thread.h>
40
41#include <sys/systm.h>
42#include <sys/param.h>
43#include <sys/proc_internal.h>
44#include <sys/user.h>
45#include <sys/sysproto.h>
46#include <sys/sysent.h>
47#include <sys/ucontext.h>
48#include <sys/wait.h>
49#include <mach/thread_act.h>	/* for thread_abort_safely */
50#include <mach/thread_status.h>
51
52#include <i386/eflags.h>
53#include <i386/psl.h>
54#include <i386/machine_routines.h>
55#include <i386/seg.h>
56
57#include <machine/pal_routines.h>
58
59#include <sys/kdebug.h>
60#include <sys/sdt.h>
61
62
63/* Forward: */
64extern boolean_t machine_exception(int, mach_exception_code_t,
65		mach_exception_subcode_t, int *, mach_exception_subcode_t *);
66extern kern_return_t thread_getstatus(register thread_t act, int flavor,
67			thread_state_t tstate, mach_msg_type_number_t *count);
68extern kern_return_t thread_setstatus(thread_t thread, int flavor,
69			thread_state_t tstate, mach_msg_type_number_t count);
70
71/* Signal handler flavors supported */
72/* These defns should match the Libc implmn */
73#define UC_TRAD			1
74#define UC_FLAVOR		30
75#define	UC_SET_ALT_STACK	0x40000000
76#define	UC_RESET_ALT_STACK	0x80000000
77
78#define	C_32_STK_ALIGN		16
79#define	C_64_STK_ALIGN		16
80#define	C_64_REDZONE_LEN	128
81#define TRUNC_DOWN32(a,c)	((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
82#define TRUNC_DOWN64(a,c)	((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
83
84/*
85 * Send an interrupt to process.
86 *
87 * Stack is set up to allow sigcode stored
88 * in u. to call routine, followed by chmk
89 * to sigreturn routine below.  After sigreturn
90 * resets the signal mask, the stack, the frame
91 * pointer, and the argument pointer, it returns
92 * to the user specified pc, psl.
93 */
94struct sigframe32 {
95	int		retaddr;
96	user32_addr_t	catcher; /* sig_t */
97	int		sigstyle;
98	int		sig;
99	user32_addr_t	sinfo;	/* siginfo32_t* */
100	user32_addr_t	uctx;	/* struct ucontext32 */
101};
102
103/*
104 * NOTE: Source and target may *NOT* overlap!
105 * XXX: Unify with bsd/kern/kern_exit.c
106 */
107static void
108siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out)
109{
110	out->si_signo	= in->si_signo;
111	out->si_errno	= in->si_errno;
112	out->si_code	= in->si_code;
113	out->si_pid	= in->si_pid;
114	out->si_uid	= in->si_uid;
115	out->si_status	= in->si_status;
116	out->si_addr	= CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr);
117	/* following cast works for sival_int because of padding */
118	out->si_value.sival_ptr	= CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr);
119	out->si_band	= in->si_band;			/* range reduction */
120	out->__pad[0]	= in->pad[0];			/* mcontext.ss.r1 */
121}
122
123static void
124siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out)
125{
126	out->si_signo	= in->si_signo;
127	out->si_errno	= in->si_errno;
128	out->si_code	= in->si_code;
129	out->si_pid	= in->si_pid;
130	out->si_uid	= in->si_uid;
131	out->si_status	= in->si_status;
132	out->si_addr	= in->si_addr;
133	out->si_value.sival_ptr	= in->si_value.sival_ptr;
134	out->si_band	= in->si_band;			/* range reduction */
135	out->__pad[0]	= in->pad[0];			/* mcontext.ss.r1 */
136}
137
138void
139sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code)
140{
141	union {
142		struct mcontext_avx32	mctx_avx32;
143		struct mcontext_avx64	mctx_avx64;
144	} mctx_store, *mctxp = &mctx_store;
145
146	user_addr_t	ua_sp;
147	user_addr_t	ua_fp;
148	user_addr_t	ua_cr2;
149	user_addr_t	ua_sip;
150	user_addr_t 	ua_uctxp;
151	user_addr_t	ua_mctxp;
152	user_siginfo_t	sinfo64;
153
154	struct sigacts *ps = p->p_sigacts;
155	int oonstack, flavor;
156	user_addr_t trampact;
157	int sigonstack;
158	void * state;
159	mach_msg_type_number_t state_count;
160
161	thread_t thread;
162	struct uthread * ut;
163	int stack_size = 0;
164	int infostyle = UC_TRAD;
165	boolean_t	sig_avx;
166
167	thread = current_thread();
168	ut = get_bsdthread_info(thread);
169
170	if (p->p_sigacts->ps_siginfo & sigmask(sig))
171		infostyle = UC_FLAVOR;
172
173	oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
174	trampact = ps->ps_trampact[sig];
175	sigonstack = (ps->ps_sigonstack & sigmask(sig));
176
177	/*
178	 * init siginfo
179	 */
180	proc_unlock(p);
181
182	bzero((caddr_t)&sinfo64, sizeof(sinfo64));
183	sinfo64.si_signo = sig;
184
185	bzero(mctxp, sizeof(*mctxp));
186	sig_avx = ml_fpu_avx_enabled();
187
188	if (proc_is64bit(p)) {
189	        x86_thread_state64_t	*tstate64;
190	        struct user_ucontext64 	uctx64;
191
192	        flavor = x86_THREAD_STATE64;
193		state_count = x86_THREAD_STATE64_COUNT;
194		state = (void *)&mctxp->mctx_avx64.ss;
195		if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
196		        goto bad;
197
198		if (sig_avx) {
199			flavor = x86_AVX_STATE64;
200			state_count = x86_AVX_STATE64_COUNT;
201		}
202		else {
203			flavor = x86_FLOAT_STATE64;
204			state_count = x86_FLOAT_STATE64_COUNT;
205		}
206		state = (void *)&mctxp->mctx_avx64.fs;
207		if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
208		        goto bad;
209
210		flavor = x86_EXCEPTION_STATE64;
211		state_count = x86_EXCEPTION_STATE64_COUNT;
212		state = (void *)&mctxp->mctx_avx64.es;
213		if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
214		        goto bad;
215
216		tstate64 = &mctxp->mctx_avx64.ss;
217
218		/* figure out where our new stack lives */
219		if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
220		    (sigonstack)) {
221			ua_sp = ut->uu_sigstk.ss_sp;
222			stack_size = ut->uu_sigstk.ss_size;
223			ua_sp += stack_size;
224			ut->uu_sigstk.ss_flags |= SA_ONSTACK;
225		} else {
226		        ua_sp = tstate64->rsp;
227		}
228		ua_cr2 = mctxp->mctx_avx64.es.faultvaddr;
229
230		/* The x86_64 ABI defines a 128-byte red zone. */
231		ua_sp -= C_64_REDZONE_LEN;
232
233		ua_sp -= sizeof (struct user_ucontext64);
234		ua_uctxp = ua_sp;			 // someone tramples the first word!
235
236		ua_sp -= sizeof (user64_siginfo_t);
237		ua_sip = ua_sp;
238
239		ua_sp -= sizeof (struct mcontext_avx64);
240		ua_mctxp = ua_sp;
241
242		/*
243		 * Align the frame and stack pointers to 16 bytes for SSE.
244		 * (Note that we use 'ua_fp' as the base of the stack going forward)
245		 */
246		ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
247
248		/*
249		 * But we need to account for the return address so the alignment is
250		 * truly "correct" at _sigtramp
251		 */
252		ua_fp -= sizeof(user_addr_t);
253
254		/*
255		 * Build the signal context to be used by sigreturn.
256		 */
257		bzero(&uctx64, sizeof(uctx64));
258
259		uctx64.uc_onstack = oonstack;
260		uctx64.uc_sigmask = mask;
261		uctx64.uc_stack.ss_sp = ua_fp;
262		uctx64.uc_stack.ss_size = stack_size;
263
264		if (oonstack)
265		        uctx64.uc_stack.ss_flags |= SS_ONSTACK;
266		uctx64.uc_link = 0;
267
268		uctx64.uc_mcsize = sig_avx ? sizeof(struct mcontext_avx64) : sizeof(struct mcontext64);
269		uctx64.uc_mcontext64 = ua_mctxp;
270
271		if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64)))
272		        goto bad;
273
274		if (copyout((caddr_t)&mctxp->mctx_avx64, ua_mctxp, sizeof (struct mcontext_avx64)))
275		        goto bad;
276
277		sinfo64.pad[0]  = tstate64->rsp;
278		sinfo64.si_addr = tstate64->rip;
279
280		tstate64->rip = trampact;
281		tstate64->rsp = ua_fp;
282		tstate64->rflags = get_eflags_exportmask();
283		/*
284		 * JOE - might not need to set these
285		 */
286		tstate64->cs = USER64_CS;
287		tstate64->fs = NULL_SEG;
288		tstate64->gs = USER_CTHREAD;
289
290		/*
291		 * Build the argument list for the signal handler.
292		 * Handler should call sigreturn to get out of it
293		 */
294		tstate64->rdi = ua_catcher;
295		tstate64->rsi = infostyle;
296		tstate64->rdx = sig;
297		tstate64->rcx = ua_sip;
298		tstate64->r8  = ua_uctxp;
299
300	} else {
301	        x86_thread_state32_t	*tstate32;
302	        struct user_ucontext32 	uctx32;
303		struct sigframe32	frame32;
304
305	        flavor = x86_THREAD_STATE32;
306		state_count = x86_THREAD_STATE32_COUNT;
307		state = (void *)&mctxp->mctx_avx32.ss;
308		if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
309		        goto bad;
310
311		if (sig_avx) {
312			flavor = x86_AVX_STATE32;
313			state_count = x86_AVX_STATE32_COUNT;
314		}
315		else {
316			flavor = x86_FLOAT_STATE32;
317			state_count = x86_FLOAT_STATE32_COUNT;
318		}
319
320		state = (void *)&mctxp->mctx_avx32.fs;
321		if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
322		        goto bad;
323
324		flavor = x86_EXCEPTION_STATE32;
325		state_count = x86_EXCEPTION_STATE32_COUNT;
326		state = (void *)&mctxp->mctx_avx32.es;
327		if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
328		        goto bad;
329
330		tstate32 = &mctxp->mctx_avx32.ss;
331
332		/* figure out where our new stack lives */
333		if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
334		    (sigonstack)) {
335			ua_sp = ut->uu_sigstk.ss_sp;
336			stack_size = ut->uu_sigstk.ss_size;
337			ua_sp += stack_size;
338			ut->uu_sigstk.ss_flags |= SA_ONSTACK;
339		} else {
340		        ua_sp = tstate32->esp;
341		}
342		ua_cr2 = mctxp->mctx_avx32.es.faultvaddr;
343
344		ua_sp -= sizeof (struct user_ucontext32);
345		ua_uctxp = ua_sp;			 // someone tramples the first word!
346
347		ua_sp -= sizeof (user32_siginfo_t);
348		ua_sip = ua_sp;
349
350		ua_sp -= sizeof (struct mcontext_avx32);
351		ua_mctxp = ua_sp;
352
353		ua_sp -= sizeof (struct sigframe32);
354		ua_fp = ua_sp;
355
356		/*
357		 * Align the frame and stack pointers to 16 bytes for SSE.
358		 * (Note that we use 'fp' as the base of the stack going forward)
359		 */
360		ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
361
362		/*
363		 * But we need to account for the return address so the alignment is
364		 * truly "correct" at _sigtramp
365		 */
366		ua_fp -= sizeof(frame32.retaddr);
367
368		/*
369		 * Build the argument list for the signal handler.
370		 * Handler should call sigreturn to get out of it
371		 */
372		frame32.retaddr = -1;
373		frame32.sigstyle = infostyle;
374		frame32.sig = sig;
375		frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher);
376		frame32.sinfo = CAST_DOWN_EXPLICIT(user32_addr_t, ua_sip);
377		frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp);
378
379		if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32)))
380		        goto bad;
381
382		/*
383		 * Build the signal context to be used by sigreturn.
384		 */
385		bzero(&uctx32, sizeof(uctx32));
386
387		uctx32.uc_onstack = oonstack;
388		uctx32.uc_sigmask = mask;
389		uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
390		uctx32.uc_stack.ss_size = stack_size;
391
392		if (oonstack)
393		        uctx32.uc_stack.ss_flags |= SS_ONSTACK;
394		uctx32.uc_link = 0;
395
396		uctx32.uc_mcsize = sig_avx ? sizeof(struct mcontext_avx32) : sizeof(struct mcontext32);
397
398		uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp);
399
400		if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32)))
401		        goto bad;
402
403		if (copyout((caddr_t)&mctxp->mctx_avx32, ua_mctxp, sizeof (struct mcontext_avx32)))
404		        goto bad;
405
406		sinfo64.pad[0]  = tstate32->esp;
407		sinfo64.si_addr = tstate32->eip;
408	}
409
410	switch (sig) {
411		case SIGILL:
412			switch (ut->uu_code) {
413				case EXC_I386_INVOP:
414					sinfo64.si_code = ILL_ILLOPC;
415					break;
416				default:
417					sinfo64.si_code = ILL_NOOP;
418			}
419			break;
420		case SIGFPE:
421#define FP_IE 0 /* Invalid operation */
422#define FP_DE 1 /* Denormalized operand */
423#define FP_ZE 2 /* Zero divide */
424#define FP_OE 3 /* overflow */
425#define FP_UE 4 /* underflow */
426#define FP_PE 5 /* precision */
427			if (ut->uu_code == EXC_I386_DIV) {
428				sinfo64.si_code = FPE_INTDIV;
429			}
430			else if (ut->uu_code == EXC_I386_INTO) {
431				sinfo64.si_code = FPE_INTOVF;
432			}
433			else if (ut->uu_subcode & (1 << FP_ZE)) {
434				sinfo64.si_code = FPE_FLTDIV;
435			} else if (ut->uu_subcode & (1 << FP_OE)) {
436				sinfo64.si_code = FPE_FLTOVF;
437			} else if (ut->uu_subcode & (1 << FP_UE)) {
438				sinfo64.si_code = FPE_FLTUND;
439			} else if (ut->uu_subcode & (1 << FP_PE)) {
440				sinfo64.si_code = FPE_FLTRES;
441			} else if (ut->uu_subcode & (1 << FP_IE)) {
442				sinfo64.si_code = FPE_FLTINV;
443			} else {
444				sinfo64.si_code = FPE_NOOP;
445			}
446			break;
447		case SIGBUS:
448			sinfo64.si_code = BUS_ADRERR;
449			sinfo64.si_addr = ua_cr2;
450			break;
451		case SIGTRAP:
452			sinfo64.si_code = TRAP_BRKPT;
453			break;
454		case SIGSEGV:
455		        sinfo64.si_addr = ua_cr2;
456
457			switch (ut->uu_code) {
458				case EXC_I386_GPFLT:
459					/* CR2 is meaningless after GP fault */
460					/* XXX namespace clash! */
461					sinfo64.si_addr = 0ULL;
462					sinfo64.si_code = 0;
463					break;
464				case KERN_PROTECTION_FAILURE:
465					sinfo64.si_code = SEGV_ACCERR;
466					break;
467				case KERN_INVALID_ADDRESS:
468					sinfo64.si_code = SEGV_MAPERR;
469					break;
470				default:
471					sinfo64.si_code = FPE_NOOP;
472			}
473				break;
474		default:
475		{
476			int status_and_exitcode;
477
478			/*
479			 * All other signals need to fill out a minimum set of
480			 * information for the siginfo structure passed into
481			 * the signal handler, if SA_SIGINFO was specified.
482			 *
483			 * p->si_status actually contains both the status and
484			 * the exit code; we save it off in its own variable
485			 * for later breakdown.
486			 */
487			proc_lock(p);
488			sinfo64.si_pid = p->si_pid;
489			p->si_pid =0;
490			status_and_exitcode = p->si_status;
491			p->si_status = 0;
492			sinfo64.si_uid = p->si_uid;
493			p->si_uid =0;
494			sinfo64.si_code = p->si_code;
495			p->si_code = 0;
496			proc_unlock(p);
497			if (sinfo64.si_code == CLD_EXITED) {
498				if (WIFEXITED(status_and_exitcode))
499					sinfo64.si_code = CLD_EXITED;
500				else if (WIFSIGNALED(status_and_exitcode)) {
501					if (WCOREDUMP(status_and_exitcode)) {
502						sinfo64.si_code = CLD_DUMPED;
503						status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
504					} else {
505						sinfo64.si_code = CLD_KILLED;
506						status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
507					}
508				}
509			}
510			/*
511			 * The recorded status contains the exit code and the
512			 * signal information, but the information to be passed
513			 * in the siginfo to the handler is supposed to only
514			 * contain the status, so we have to shift it out.
515			 */
516			sinfo64.si_status = WEXITSTATUS(status_and_exitcode);
517			break;
518		}
519	}
520	if (proc_is64bit(p)) {
521		user64_siginfo_t sinfo64_user64;
522
523		bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64));
524
525		siginfo_user_to_user64_x86(&sinfo64,&sinfo64_user64);
526
527#if CONFIG_DTRACE
528        bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
529
530        ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo;
531        ut->t_dtrace_siginfo.si_code = sinfo64.si_code;
532        ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid;
533        ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid;
534        ut->t_dtrace_siginfo.si_status = sinfo64.si_status;
535		/* XXX truncates faulting address to void * on K32  */
536        ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
537
538		/* Fire DTrace proc:::fault probe when signal is generated by hardware. */
539		switch (sig) {
540		case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
541			DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
542			break;
543		default:
544			break;
545		}
546
547		/* XXX truncates catcher address to uintptr_t */
548		DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
549			void (*)(void), CAST_DOWN(sig_t, ua_catcher));
550#endif /* CONFIG_DTRACE */
551
552		if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof (sinfo64_user64)))
553			goto bad;
554
555		flavor = x86_THREAD_STATE64;
556		state_count = x86_THREAD_STATE64_COUNT;
557		state = (void *)&mctxp->mctx_avx64.ss;
558	} else {
559		x86_thread_state32_t	*tstate32;
560		user32_siginfo_t sinfo32;
561
562		bzero((caddr_t)&sinfo32, sizeof(sinfo32));
563
564		siginfo_user_to_user32_x86(&sinfo64,&sinfo32);
565
566#if CONFIG_DTRACE
567        bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
568
569        ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo;
570        ut->t_dtrace_siginfo.si_code = sinfo32.si_code;
571        ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid;
572        ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid;
573        ut->t_dtrace_siginfo.si_status = sinfo32.si_status;
574        ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr);
575
576		/* Fire DTrace proc:::fault probe when signal is generated by hardware. */
577		switch (sig) {
578		case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
579			DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
580			break;
581		default:
582			break;
583		}
584
585		DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo),
586			void (*)(void), CAST_DOWN(sig_t, ua_catcher));
587#endif /* CONFIG_DTRACE */
588
589		if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32)))
590			goto bad;
591
592		tstate32 = &mctxp->mctx_avx32.ss;
593
594		tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact);
595		tstate32->esp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp);
596
597		tstate32->eflags = get_eflags_exportmask();
598
599		tstate32->cs = USER_CS;
600		tstate32->ss = USER_DS;
601		tstate32->ds = USER_DS;
602		tstate32->es = USER_DS;
603		tstate32->fs = NULL_SEG;
604		tstate32->gs = USER_CTHREAD;
605
606		flavor = x86_THREAD_STATE32;
607		state_count = x86_THREAD_STATE32_COUNT;
608		state = (void *)tstate32;
609	}
610	if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS)
611	        goto bad;
612	ml_fp_setvalid(FALSE);
613
614	/* Tell the PAL layer about the signal */
615	pal_set_signal_delivery( thread );
616
617	proc_lock(p);
618
619	return;
620
621bad:
622
623	proc_lock(p);
624	SIGACTION(p, SIGILL) = SIG_DFL;
625	sig = sigmask(SIGILL);
626	p->p_sigignore &= ~sig;
627	p->p_sigcatch &= ~sig;
628	ut->uu_sigmask &= ~sig;
629	/* sendsig is called with signal lock held */
630	proc_unlock(p);
631	psignal_locked(p, SIGILL);
632	proc_lock(p);
633	return;
634}
635
636/*
637 * System call to cleanup state after a signal
638 * has been taken.  Reset signal mask and
639 * stack state from context left by sendsig (above).
640 * Return to previous pc and psl as specified by
641 * context left by sendsig. Check carefully to
642 * make sure that the user has not modified the
643 * psl to gain improper priviledges or to cause
644 * a machine fault.
645 */
646
647int
648sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval)
649{
650	union {
651		struct mcontext_avx32	mctx_avx32;
652		struct mcontext_avx64	mctx_avx64;
653	} mctx_store, *mctxp = &mctx_store;
654
655	thread_t thread = current_thread();
656	struct uthread * ut;
657	int	error;
658	int	onstack = 0;
659
660	mach_msg_type_number_t ts_count;
661	unsigned int           ts_flavor;
662	void		    *  ts;
663	mach_msg_type_number_t fs_count;
664	unsigned int           fs_flavor;
665	void		    *  fs;
666	int	rval = EJUSTRETURN;
667	boolean_t	sig_avx;
668
669	ut = (struct uthread *)get_bsdthread_info(thread);
670
671	/*
672	 * If we are being asked to change the altstack flag on the thread, we
673	 * just set/reset it and return (the uap->uctx is not used).
674	 */
675	if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) {
676		ut->uu_sigstk.ss_flags |= SA_ONSTACK;
677		return (0);
678	} else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) {
679		ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
680		return (0);
681	}
682
683	bzero(mctxp, sizeof(*mctxp));
684	sig_avx = ml_fpu_avx_enabled();
685
686	if (proc_is64bit(p)) {
687	        struct user_ucontext64	uctx64;
688
689	        if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64))))
690		        return(error);
691
692		if ((error = copyin(uctx64.uc_mcontext64, (void *)&mctxp->mctx_avx64, sizeof (struct mcontext_avx64))))
693		        return(error);
694
695		onstack = uctx64.uc_onstack & 01;
696		ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask;
697
698		ts_flavor = x86_THREAD_STATE64;
699		ts_count  = x86_THREAD_STATE64_COUNT;
700		ts = (void *)&mctxp->mctx_avx64.ss;
701
702		if (sig_avx) {
703			fs_flavor = x86_AVX_STATE64;
704			fs_count = x86_AVX_STATE64_COUNT;
705		}
706		else {
707			fs_flavor = x86_FLOAT_STATE64;
708			fs_count = x86_FLOAT_STATE64_COUNT;
709		}
710
711		fs = (void *)&mctxp->mctx_avx64.fs;
712
713      } else {
714	        struct user_ucontext32	uctx32;
715
716	        if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32))))
717		        return(error);
718
719		if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)&mctxp->mctx_avx32, sizeof (struct mcontext_avx32))))
720		        return(error);
721
722		onstack = uctx32.uc_onstack & 01;
723		ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask;
724
725	        ts_flavor = x86_THREAD_STATE32;
726		ts_count  = x86_THREAD_STATE32_COUNT;
727		ts = (void *)&mctxp->mctx_avx32.ss;
728
729		if (sig_avx) {
730			fs_flavor = x86_AVX_STATE32;
731			fs_count = x86_AVX_STATE32_COUNT;
732		}
733		else {
734			fs_flavor = x86_FLOAT_STATE32;
735			fs_count = x86_FLOAT_STATE32_COUNT;
736		}
737
738		fs = (void *)&mctxp->mctx_avx32.fs;
739	}
740
741	if (onstack)
742		ut->uu_sigstk.ss_flags |= SA_ONSTACK;
743	else
744		ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
745
746	if (ut->uu_siglist & ~ut->uu_sigmask)
747		signal_setast(thread);
748	/*
749	 * thread_set_state() does all the needed checks for the passed in
750	 * content
751	 */
752	if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS) {
753		rval = EINVAL;
754		goto error_ret;
755	}
756
757	ml_fp_setvalid(TRUE);
758
759	if (thread_setstatus(thread, fs_flavor, fs, fs_count)  != KERN_SUCCESS) {
760		rval = EINVAL;
761		goto error_ret;
762
763	}
764error_ret:
765	return rval;
766}
767
768
769/*
770 * machine_exception() performs MD translation
771 * of a mach exception to a unix signal and code.
772 */
773
774boolean_t
775machine_exception(
776	int				exception,
777	mach_exception_code_t		code,
778	__unused mach_exception_subcode_t subcode,
779	int 				*unix_signal,
780	mach_exception_code_t		*unix_code)
781{
782
783	switch(exception) {
784
785	case EXC_BAD_ACCESS:
786		/* Map GP fault to SIGSEGV, otherwise defer to caller */
787		if (code == EXC_I386_GPFLT) {
788			*unix_signal = SIGSEGV;
789			*unix_code = code;
790			break;
791		}
792		return(FALSE);
793
794	case EXC_BAD_INSTRUCTION:
795		*unix_signal = SIGILL;
796		*unix_code = code;
797		break;
798
799	case EXC_ARITHMETIC:
800		*unix_signal = SIGFPE;
801		*unix_code = code;
802		break;
803
804	case EXC_SOFTWARE:
805		if (code == EXC_I386_BOUND) {
806			/*
807			 * Map #BR, the Bound Range Exceeded exception, to
808			 * SIGTRAP.
809			 */
810			*unix_signal = SIGTRAP;
811			*unix_code = code;
812			break;
813		}
814
815	default:
816		return(FALSE);
817	}
818
819	return(TRUE);
820}
821
822