1/*	$NetBSD$	*/
2
3/*
4 * Copyright (c) 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD$");
40
41#ifdef _KERNEL_OPT
42#include "opt_compat_netbsd.h"
43#include "opt_coredump.h"
44#include "opt_execfmt.h"
45#include "opt_user_ldt.h"
46#include "opt_mtrr.h"
47#endif
48
49#include <sys/param.h>
50#include <sys/exec.h>
51#include <sys/exec_aout.h>
52#include <sys/kmem.h>
53#include <sys/proc.h>
54#include <sys/signalvar.h>
55#include <sys/systm.h>
56#include <sys/sa.h>
57#include <sys/savar.h>
58#include <sys/core.h>
59#include <sys/mount.h>
60#include <sys/buf.h>
61#include <sys/vnode.h>
62#include <sys/ras.h>
63#include <sys/ptrace.h>
64#include <sys/kauth.h>
65
66#include <machine/fpu.h>
67#include <machine/frame.h>
68#include <machine/reg.h>
69#include <machine/vmparam.h>
70#ifdef MTRR
71#include <machine/mtrr.h>
72#endif
73#include <machine/netbsd32_machdep.h>
74#include <machine/sysarch.h>
75#include <machine/userret.h>
76
77#include <compat/netbsd32/netbsd32.h>
78#include <compat/netbsd32/netbsd32_exec.h>
79#include <compat/netbsd32/netbsd32_syscallargs.h>
80
81#include <compat/sys/signal.h>
82#include <compat/sys/signalvar.h>
83
84/* Provide a the name of the architecture we're emulating */
85const char	machine32[] = "i386";
86const char	machine_arch32[] = "i386";
87
88extern void (osyscall_return)(void);
89
90#ifdef MTRR
91static int x86_64_get_mtrr32(struct lwp *, void *, register_t *);
92static int x86_64_set_mtrr32(struct lwp *, void *, register_t *);
93#else
94#define x86_64_get_mtrr32(x, y, z)	ENOSYS
95#define x86_64_set_mtrr32(x, y, z)	ENOSYS
96#endif
97
98static int check_sigcontext32(struct lwp *, const struct netbsd32_sigcontext *);
99
100#ifdef EXEC_AOUT
101/*
102 * There is no native a.out -- this function is required
103 * for i386 a.out emulation (COMPAT_NETBSD32+EXEC_AOUT).
104 */
105int
106cpu_exec_aout_makecmds(struct lwp *p, struct exec_package *e)
107{
108
109	return ENOEXEC;
110}
111#endif
112
113#ifdef COMPAT_16
114/*
115 * There is no NetBSD-1.6 compatibility for native code.
116 * COMPAT_16 is useful for i386 emulation (COMPAT_NETBSD32) only.
117 */
118int
119compat_16_sys___sigreturn14(struct lwp *l, const struct compat_16_sys___sigreturn14_args *uap, register_t *retval)
120{
121
122	return ENOSYS;
123}
124#endif
125
126void
127netbsd32_setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
128{
129	struct pcb *pcb;
130	struct trapframe *tf;
131	struct proc *p = l->l_proc;
132	void **retaddr;
133
134	pcb = lwp_getpcb(l);
135
136	/* If we were using the FPU, forget about it. */
137	if (pcb->pcb_fpcpu != NULL) {
138		fpusave_lwp(l, false);
139	}
140
141#if defined(USER_LDT) && 0
142	pmap_ldt_cleanup(p);
143#endif
144
145	netbsd32_adjust_limits(p);
146
147	l->l_md.md_flags &= ~MDP_USEDFPU;
148	pcb->pcb_flags = PCB_COMPAT32;
149        pcb->pcb_savefpu.fp_fxsave.fx_fcw = __NetBSD_NPXCW__;
150        pcb->pcb_savefpu.fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__;
151	pcb->pcb_savefpu.fp_fxsave.fx_mxcsr_mask = __INITIAL_MXCSR_MASK__;
152
153	p->p_flag |= PK_32;
154
155	tf = l->l_md.md_regs;
156	tf->tf_ds = LSEL(LUDATA32_SEL, SEL_UPL);
157	tf->tf_es = LSEL(LUDATA32_SEL, SEL_UPL);
158	cpu_fsgs_zero(l);
159	cpu_fsgs_reload(l, tf->tf_ds, tf->tf_es);
160	tf->tf_rdi = 0;
161	tf->tf_rsi = 0;
162	tf->tf_rbp = 0;
163	tf->tf_rbx = (uint32_t)p->p_psstrp;
164	tf->tf_rdx = 0;
165	tf->tf_rcx = 0;
166	tf->tf_rax = 0;
167	tf->tf_rip = pack->ep_entry;
168	tf->tf_cs = LSEL(LUCODE32_SEL, SEL_UPL);
169	tf->tf_rflags = PSL_USERSET;
170	tf->tf_rsp = stack;
171	tf->tf_ss = LSEL(LUDATA32_SEL, SEL_UPL);
172
173	/* XXX frob return address to return via old iret method, not sysret */
174	retaddr = (void **)tf - 1;
175	*retaddr = (void *)osyscall_return;
176}
177
178#ifdef COMPAT_16
179static void
180netbsd32_sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask)
181{
182	struct lwp *l = curlwp;
183	struct proc *p = l->l_proc;
184	struct trapframe *tf;
185	int sig = ksi->ksi_signo;
186	sig_t catcher = SIGACTION(p, sig).sa_handler;
187	struct netbsd32_sigframe_sigcontext *fp, frame;
188	int onstack, error;
189	struct sigacts *ps = p->p_sigacts;
190
191	tf = l->l_md.md_regs;
192
193	/* Do we need to jump onto the signal stack? */
194	onstack =
195	    (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
196	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
197
198	/* Allocate space for the signal handler context. */
199	if (onstack)
200		fp = (struct netbsd32_sigframe_sigcontext *)
201		    ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
202	else
203		fp = (struct netbsd32_sigframe_sigcontext *)tf->tf_rsp;
204	fp--;
205
206	/* Build stack frame for signal trampoline. */
207	switch (ps->sa_sigdesc[sig].sd_vers) {
208	case 0:
209		frame.sf_ra = (uint32_t)(u_long)p->p_sigctx.ps_sigcode;
210		break;
211	case 1:
212		frame.sf_ra = (uint32_t)(u_long)ps->sa_sigdesc[sig].sd_tramp;
213		break;
214	default:
215		/* Don't know what trampoline version; kill it. */
216		sigexit(l, SIGILL);
217	}
218	frame.sf_signum = sig;
219	frame.sf_code = ksi->ksi_trap;
220	frame.sf_scp = (uint32_t)(u_long)&fp->sf_sc;
221
222	frame.sf_sc.sc_ds = tf->tf_ds;
223	frame.sf_sc.sc_es = tf->tf_es;
224	frame.sf_sc.sc_fs = tf->tf_fs;
225	frame.sf_sc.sc_gs = tf->tf_gs;
226
227	frame.sf_sc.sc_eflags = tf->tf_rflags;
228	frame.sf_sc.sc_edi = tf->tf_rdi;
229	frame.sf_sc.sc_esi = tf->tf_rsi;
230	frame.sf_sc.sc_ebp = tf->tf_rbp;
231	frame.sf_sc.sc_ebx = tf->tf_rbx;
232	frame.sf_sc.sc_edx = tf->tf_rdx;
233	frame.sf_sc.sc_ecx = tf->tf_rcx;
234	frame.sf_sc.sc_eax = tf->tf_rax;
235	frame.sf_sc.sc_eip = tf->tf_rip;
236	frame.sf_sc.sc_cs = tf->tf_cs;
237	frame.sf_sc.sc_esp = tf->tf_rsp;
238	frame.sf_sc.sc_ss = tf->tf_ss;
239	frame.sf_sc.sc_trapno = tf->tf_trapno;
240	frame.sf_sc.sc_err = tf->tf_err;
241
242	/* Save signal stack. */
243	frame.sf_sc.sc_onstack = l->l_sigstk.ss_flags & SS_ONSTACK;
244
245	/* Save signal mask. */
246	frame.sf_sc.sc_mask = *mask;
247
248	sendsig_reset(l, sig);
249
250	mutex_exit(p->p_lock);
251	error = copyout(&frame, fp, sizeof(frame));
252	mutex_enter(p->p_lock);
253
254	if (error != 0) {
255		/*
256		 * Process has trashed its stack; give it an illegal
257		 * instruction to halt it in its tracks.
258		 */
259		sigexit(l, SIGILL);
260		/* NOTREACHED */
261	}
262
263	/*
264	 * Build context to run handler in.
265	 */
266	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
267	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
268	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
269	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
270
271	/* Ensure FP state is reset, if FP is used. */
272	l->l_md.md_flags &= ~MDP_USEDFPU;
273
274	tf->tf_rip = (uint64_t)catcher;
275	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
276	tf->tf_rflags &= ~PSL_CLEARSIG;
277	tf->tf_rsp = (uint64_t)fp;
278	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
279
280	/* Remember that we're now on the signal stack. */
281	if (onstack)
282		l->l_sigstk.ss_flags |= SS_ONSTACK;
283
284	if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
285		/*
286		 * process has given an invalid address for the
287		 * handler. Stop it, but do not do it before so
288		 * we can return the right info to userland (or in core dump)
289		 */
290		sigexit(l, SIGILL);
291		/* NOTREACHED */
292	}
293}
294#endif
295
296static void
297netbsd32_sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
298{
299	struct lwp *l = curlwp;
300	struct proc *p = l->l_proc;
301	struct sigacts *ps = p->p_sigacts;
302	int onstack, error;
303	int sig = ksi->ksi_signo;
304	struct netbsd32_sigframe_siginfo *fp, frame;
305	sig_t catcher = SIGACTION(p, sig).sa_handler;
306	struct trapframe *tf = l->l_md.md_regs;
307
308	/* Do we need to jump onto the signal stack? */
309	onstack =
310	    (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
311	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
312
313	/* Allocate space for the signal handler context. */
314	if (onstack)
315		fp = (struct netbsd32_sigframe_siginfo *)
316		    ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
317	else
318		fp = (struct netbsd32_sigframe_siginfo *)tf->tf_rsp;
319
320	fp--;
321
322	/* Build stack frame for signal trampoline. */
323	switch (ps->sa_sigdesc[sig].sd_vers) {
324	case 0:		/* handled by sendsig_sigcontext */
325	case 1:		/* handled by sendsig_sigcontext */
326	default:	/* unknown version */
327		printf("nsendsig: bad version %d\n",
328		    ps->sa_sigdesc[sig].sd_vers);
329		sigexit(l, SIGILL);
330	case 2:
331		break;
332	}
333
334	frame.sf_ra = (uint32_t)(uintptr_t)ps->sa_sigdesc[sig].sd_tramp;
335	frame.sf_signum = sig;
336	frame.sf_sip = (uint32_t)(uintptr_t)&fp->sf_si;
337	frame.sf_ucp = (uint32_t)(uintptr_t)&fp->sf_uc;
338	netbsd32_si_to_si32(&frame.sf_si, (const siginfo_t *)&ksi->ksi_info);
339	frame.sf_uc.uc_flags = _UC_SIGMASK;
340	frame.sf_uc.uc_sigmask = *mask;
341	frame.sf_uc.uc_link = (uint32_t)(uintptr_t)l->l_ctxlink;
342	frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
343	    ? _UC_SETSTACK : _UC_CLRSTACK;
344	memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
345	sendsig_reset(l, sig);
346
347	mutex_exit(p->p_lock);
348	cpu_getmcontext32(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
349	error = copyout(&frame, fp, sizeof(frame));
350	mutex_enter(p->p_lock);
351
352	if (error != 0) {
353		/*
354		 * Process has trashed its stack; give it an illegal
355		 * instruction to halt it in its tracks.
356		 */
357		sigexit(l, SIGILL);
358		/* NOTREACHED */
359	}
360
361	/*
362	 * Build context to run handler in.
363	 */
364	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
365	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
366	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
367	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
368
369	tf->tf_rip = (uint64_t)catcher;
370	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
371	tf->tf_rflags &= ~PSL_CLEARSIG;
372	tf->tf_rsp = (uint64_t)fp;
373	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
374
375	/* Ensure FP state is reset, if FP is used. */
376	l->l_md.md_flags &= ~MDP_USEDFPU;
377
378	/* Remember that we're now on the signal stack. */
379	if (onstack)
380		l->l_sigstk.ss_flags |= SS_ONSTACK;
381
382	if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
383		/*
384		 * process has given an invalid address for the
385		 * handler. Stop it, but do not do it before so
386		 * we can return the right info to userland (or in core dump)
387		 */
388		sigexit(l, SIGILL);
389		/* NOTREACHED */
390	}
391}
392
393void
394netbsd32_sendsig(const ksiginfo_t *ksi, const sigset_t *mask)
395{
396#ifdef COMPAT_16
397	if (curproc->p_sigacts->sa_sigdesc[ksi->ksi_signo].sd_vers < 2)
398		netbsd32_sendsig_sigcontext(ksi, mask);
399	else
400#endif
401		netbsd32_sendsig_siginfo(ksi, mask);
402}
403
404int
405compat_16_netbsd32___sigreturn14(struct lwp *l, const struct compat_16_netbsd32___sigreturn14_args *uap, register_t *retval)
406{
407	/* {
408		syscallarg(netbsd32_sigcontextp_t) sigcntxp;
409	} */
410	struct netbsd32_sigcontext *scp, context;
411	struct proc *p = l->l_proc;
412	struct trapframe *tf;
413	int error;
414
415	/*
416	 * The trampoline code hands us the context.
417	 * It is unsafe to keep track of it ourselves, in the event that a
418	 * program jumps out of a signal handler.
419	 */
420	scp = NETBSD32PTR64(SCARG(uap, sigcntxp));
421	if (copyin(scp, &context, sizeof(*scp)) != 0)
422		return (EFAULT);
423
424	/*
425	 * Check for security violations.
426	 */
427	error = check_sigcontext32(l, &context);
428	if (error != 0)
429		return error;
430
431	/* Restore register context. */
432	tf = l->l_md.md_regs;
433	tf->tf_ds = context.sc_ds;
434	tf->tf_es = context.sc_es;
435	cpu_fsgs_reload(l, context.sc_fs, context.sc_gs);
436	tf->tf_rflags = context.sc_eflags;
437	tf->tf_rdi = context.sc_edi;
438	tf->tf_rsi = context.sc_esi;
439	tf->tf_rbp = context.sc_ebp;
440	tf->tf_rbx = context.sc_ebx;
441	tf->tf_rdx = context.sc_edx;
442	tf->tf_rcx = context.sc_ecx;
443	tf->tf_rax = context.sc_eax;
444
445	tf->tf_rip = context.sc_eip;
446	tf->tf_cs = context.sc_cs;
447	tf->tf_rsp = context.sc_esp;
448	tf->tf_ss = context.sc_ss;
449
450	mutex_enter(p->p_lock);
451	/* Restore signal stack. */
452	if (context.sc_onstack & SS_ONSTACK)
453		l->l_sigstk.ss_flags |= SS_ONSTACK;
454	else
455		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
456	/* Restore signal mask. */
457	(void) sigprocmask1(l, SIG_SETMASK, &context.sc_mask, 0);
458	mutex_exit(p->p_lock);
459
460	return (EJUSTRETURN);
461}
462
463
464#ifdef COREDUMP
465/*
466 * Dump the machine specific segment at the start of a core dump.
467 */
468struct md_core32 {
469	struct reg32 intreg;
470	struct fpreg32 freg;
471};
472
473int
474cpu_coredump32(struct lwp *l, void *iocookie, struct core32 *chdr)
475{
476	struct md_core32 md_core;
477	struct coreseg cseg;
478	int error;
479
480	if (iocookie == NULL) {
481		CORE_SETMAGIC(*chdr, COREMAGIC, MID_I386, 0);
482		chdr->c_hdrsize = ALIGN32(sizeof(*chdr));
483		chdr->c_seghdrsize = ALIGN32(sizeof(cseg));
484		chdr->c_cpusize = sizeof(md_core);
485		chdr->c_nseg++;
486		return 0;
487	}
488
489	/* Save integer registers. */
490	error = netbsd32_process_read_regs(l, &md_core.intreg);
491	if (error)
492		return error;
493
494	/* Save floating point registers. */
495	error = netbsd32_process_read_fpregs(l, &md_core.freg);
496	if (error)
497		return error;
498
499	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_I386, CORE_CPU);
500	cseg.c_addr = 0;
501	cseg.c_size = chdr->c_cpusize;
502
503	error = coredump_write(iocookie, UIO_SYSSPACE, &cseg,
504	    chdr->c_seghdrsize);
505	if (error)
506		return error;
507
508	return coredump_write(iocookie, UIO_SYSSPACE, &md_core,
509	    sizeof(md_core));
510}
511#endif
512
513int
514netbsd32_process_read_regs(struct lwp *l, struct reg32 *regs)
515{
516	struct trapframe *tf = l->l_md.md_regs;
517
518	regs->r_gs = LSEL(LUCODE32_SEL, SEL_UPL);
519	regs->r_fs = LSEL(LUCODE32_SEL, SEL_UPL);
520	regs->r_es = LSEL(LUCODE32_SEL, SEL_UPL);
521	regs->r_ds = LSEL(LUCODE32_SEL, SEL_UPL);
522	regs->r_eflags = tf->tf_rflags;
523	/* XXX avoid sign extension problems with unknown upper bits? */
524	regs->r_edi = tf->tf_rdi & 0xffffffff;
525	regs->r_esi = tf->tf_rsi & 0xffffffff;
526	regs->r_ebp = tf->tf_rbp & 0xffffffff;
527	regs->r_ebx = tf->tf_rbx & 0xffffffff;
528	regs->r_edx = tf->tf_rdx & 0xffffffff;
529	regs->r_ecx = tf->tf_rcx & 0xffffffff;
530	regs->r_eax = tf->tf_rax & 0xffffffff;
531	regs->r_eip = tf->tf_rip & 0xffffffff;
532	regs->r_cs = tf->tf_cs;
533	regs->r_esp = tf->tf_rsp & 0xffffffff;
534	regs->r_ss = tf->tf_ss;
535
536	return (0);
537}
538
539/*
540 * XXX-cube (20060311):  This doesn't seem to work fine.
541 */
542static int
543xmm_to_s87_tag(const uint8_t *fpac, int regno, uint8_t tw)
544{
545	static const uint8_t empty_significand[8] = { 0 };
546	int tag;
547	uint16_t exponent;
548
549	if (tw & (1U << regno)) {
550		exponent = fpac[8] | (fpac[9] << 8);
551		switch (exponent) {
552		case 0x7fff:
553			tag = 2;
554			break;
555
556		case 0x0000:
557			if (memcmp(empty_significand, fpac,
558				   sizeof(empty_significand)) == 0)
559				tag = 1;
560			else
561				tag = 2;
562			break;
563
564		default:
565			if ((fpac[7] & 0x80) == 0)
566				tag = 2;
567			else
568				tag = 0;
569			break;
570		}
571	} else
572		tag = 3;
573
574	return (tag);
575}
576
577int
578netbsd32_process_read_fpregs(struct lwp *l, struct fpreg32 *regs)
579{
580	struct pcb *pcb = lwp_getpcb(l);
581	struct savefpu *sf = &pcb->pcb_savefpu;
582	struct fpreg regs64;
583	struct save87 *s87 = (struct save87 *)regs;
584	int error, i;
585
586	/*
587	 * All that stuff makes no sense in i386 code :(
588	 */
589
590	error = process_read_fpregs(l, &regs64);
591	if (error)
592		return error;
593
594	s87->sv_env.en_cw = regs64.fxstate.fx_fcw;
595	s87->sv_env.en_sw = regs64.fxstate.fx_fsw;
596	s87->sv_env.en_fip = regs64.fxstate.fx_rip >> 16; /* XXX Order? */
597	s87->sv_env.en_fcs = regs64.fxstate.fx_rip & 0xffff;
598	s87->sv_env.en_opcode = regs64.fxstate.fx_fop;
599	s87->sv_env.en_foo = regs64.fxstate.fx_rdp >> 16; /* XXX See above */
600	s87->sv_env.en_fos = regs64.fxstate.fx_rdp & 0xffff;
601
602	s87->sv_env.en_tw = 0;
603	s87->sv_ex_tw = 0;
604	for (i = 0; i < 8; i++) {
605		s87->sv_env.en_tw |=
606		    (xmm_to_s87_tag((uint8_t *)&regs64.fxstate.fx_st[i][0], i,
607		     regs64.fxstate.fx_ftw) << (i * 2));
608
609		s87->sv_ex_tw |=
610		    (xmm_to_s87_tag((uint8_t *)&regs64.fxstate.fx_st[i][0], i,
611		     sf->fp_ex_tw) << (i * 2));
612
613		memcpy(&s87->sv_ac[i].fp_bytes, &regs64.fxstate.fx_st[i][0],
614		    sizeof(s87->sv_ac[i].fp_bytes));
615	}
616
617	s87->sv_ex_sw = sf->fp_ex_sw;
618
619	return (0);
620}
621
622int
623netbsd32_sysarch(struct lwp *l, const struct netbsd32_sysarch_args *uap, register_t *retval)
624{
625	/* {
626		syscallarg(int) op;
627		syscallarg(netbsd32_voidp) parms;
628	} */
629	int error;
630
631	switch (SCARG(uap, op)) {
632	case X86_IOPL:
633		error = x86_iopl(l,
634		    NETBSD32PTR64(SCARG(uap, parms)), retval);
635		break;
636	case X86_GET_MTRR:
637		error = x86_64_get_mtrr32(l,
638		    NETBSD32PTR64(SCARG(uap, parms)), retval);
639		break;
640	case X86_SET_MTRR:
641		error = x86_64_set_mtrr32(l,
642		    NETBSD32PTR64(SCARG(uap, parms)), retval);
643		break;
644	default:
645		error = EINVAL;
646		break;
647	}
648	return error;
649}
650
651#ifdef MTRR
652static int
653x86_64_get_mtrr32(struct lwp *l, void *args, register_t *retval)
654{
655	struct x86_64_get_mtrr_args32 args32;
656	int error, i;
657	int32_t n;
658	struct mtrr32 *m32p, m32;
659	struct mtrr *m64p, *mp;
660	size_t size;
661
662	m64p = NULL;
663
664	if (mtrr_funcs == NULL)
665		return ENOSYS;
666
667	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
668	    NULL, NULL, NULL, NULL);
669	if (error)
670		return (error);
671
672	error = copyin(args, &args32, sizeof args32);
673	if (error != 0)
674		return error;
675
676	if (args32.mtrrp == 0) {
677		n = (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX);
678		return copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
679	}
680
681	error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n);
682	if (error != 0)
683		return error;
684
685	if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX))
686		return EINVAL;
687
688	size = n * sizeof(struct mtrr);
689	m64p = kmem_zalloc(size, KM_SLEEP);
690	if (m64p == NULL) {
691		error = ENOMEM;
692		goto fail;
693	}
694	error = mtrr_get(m64p, &n, l->l_proc, 0);
695	if (error != 0)
696		goto fail;
697	m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
698	mp = m64p;
699	for (i = 0; i < n; i++) {
700		m32.base = mp->base;
701		m32.len = mp->len;
702		m32.type = mp->type;
703		m32.flags = mp->flags;
704		m32.owner = mp->owner;
705		error = copyout(&m32, m32p, sizeof m32);
706		if (error != 0)
707			break;
708		mp++;
709		m32p++;
710	}
711fail:
712	if (m64p != NULL)
713		kmem_free(m64p, size);
714	if (error != 0)
715		n = 0;
716	copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
717	return error;
718}
719
720static int
721x86_64_set_mtrr32(struct lwp *l, void *args, register_t *retval)
722{
723	struct x86_64_set_mtrr_args32 args32;
724	struct mtrr32 *m32p, m32;
725	struct mtrr *m64p, *mp;
726	int error, i;
727	int32_t n;
728	size_t size;
729
730	m64p = NULL;
731
732	if (mtrr_funcs == NULL)
733		return ENOSYS;
734
735	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
736	    NULL, NULL, NULL, NULL);
737	if (error)
738		return (error);
739
740	error = copyin(args, &args32, sizeof args32);
741	if (error != 0)
742		return error;
743
744	error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n);
745	if (error != 0)
746		return error;
747
748	if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) {
749		error = EINVAL;
750		goto fail;
751	}
752
753	size = n * sizeof(struct mtrr);
754	m64p = kmem_zalloc(size, KM_SLEEP);
755	if (m64p == NULL) {
756		error = ENOMEM;
757		goto fail;
758	}
759	m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
760	mp = m64p;
761	for (i = 0; i < n; i++) {
762		error = copyin(m32p, &m32, sizeof m32);
763		if (error != 0)
764			goto fail;
765		mp->base = m32.base;
766		mp->len = m32.len;
767		mp->type = m32.type;
768		mp->flags = m32.flags;
769		mp->owner = m32.owner;
770		m32p++;
771		mp++;
772	}
773
774	error = mtrr_set(m64p, &n, l->l_proc, 0);
775fail:
776	if (m64p != NULL)
777		kmem_free(m64p, size);
778	if (error != 0)
779		n = 0;
780	copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
781	return error;
782}
783#endif
784
785#if 0
786void
787netbsd32_mcontext_to_mcontext32(mcontext32_t *m32, mcontext_t *m, int flags)
788{
789	if ((flags & _UC_CPU) != 0) {
790		m32->__gregs[_REG32_GS] = m->__gregs[_REG_GS] & 0xffffffff;
791		m32->__gregs[_REG32_FS] = m->__gregs[_REG_FS] & 0xffffffff;
792		m32->__gregs[_REG32_ES] = m->__gregs[_REG_ES] & 0xffffffff;
793		m32->__gregs[_REG32_DS] = m->__gregs[_REG_DS] & 0xffffffff;
794		m32->__gregs[_REG32_EDI] = m->__gregs[_REG_RDI] & 0xffffffff;
795		m32->__gregs[_REG32_ESI] = m->__gregs[_REG_RSI] & 0xffffffff;
796		m32->__gregs[_REG32_EBP] = m->__gregs[_REG_RBP] & 0xffffffff;
797		m32->__gregs[_REG32_ESP] = m->__gregs[_REG_URSP] & 0xffffffff;
798		m32->__gregs[_REG32_EBX] = m->__gregs[_REG_RBX] & 0xffffffff;
799		m32->__gregs[_REG32_EDX] = m->__gregs[_REG_RDX] & 0xffffffff;
800		m32->__gregs[_REG32_ECX] = m->__gregs[_REG_RCX] & 0xffffffff;
801		m32->__gregs[_REG32_EAX] = m->__gregs[_REG_RAX] & 0xffffffff;
802		m32->__gregs[_REG32_TRAPNO] =
803		    m->__gregs[_REG_TRAPNO] & 0xffffffff;
804		m32->__gregs[_REG32_ERR] = m->__gregs[_REG_ERR] & 0xffffffff;
805		m32->__gregs[_REG32_EIP] = m->__gregs[_REG_RIP] & 0xffffffff;
806		m32->__gregs[_REG32_CS] = m->__gregs[_REG_CS] & 0xffffffff;
807		m32->__gregs[_REG32_EFL] = m->__gregs[_REG_RFL] & 0xffffffff;
808		m32->__gregs[_REG32_UESP] = m->__gregs[_REG_URSP] & 0xffffffff;
809		m32->__gregs[_REG32_SS] = m->__gregs[_REG_SS] & 0xffffffff;
810	}
811	if ((flags & _UC_FPU) != 0)
812		memcpy(&m32->__fpregs, &m->__fpregs, sizeof (m32->__fpregs));
813}
814
815void
816netbsd32_mcontext32_to_mcontext(mcontext_t *m, mcontext32_t *m32, int flags)
817{
818	if ((flags & _UC_CPU) != 0) {
819		m->__gregs[_REG_GS] = m32->__gregs[_REG32_GS];
820		m->__gregs[_REG_FS] = m32->__gregs[_REG32_FS];
821		m->__gregs[_REG_ES] = m32->__gregs[_REG32_ES];
822		m->__gregs[_REG_DS] = m32->__gregs[_REG32_DS];
823		m->__gregs[_REG_RDI] = m32->__gregs[_REG32_EDI];
824		m->__gregs[_REG_RSI] = m32->__gregs[_REG32_ESI];
825		m->__gregs[_REG_RBP] = m32->__gregs[_REG32_EBP];
826		m->__gregs[_REG_URSP] = m32->__gregs[_REG32_ESP];
827		m->__gregs[_REG_RBX] = m32->__gregs[_REG32_EBX];
828		m->__gregs[_REG_RDX] = m32->__gregs[_REG32_EDX];
829		m->__gregs[_REG_RCX] = m32->__gregs[_REG32_ECX];
830		m->__gregs[_REG_RAX] = m32->__gregs[_REG32_EAX];
831		m->__gregs[_REG_TRAPNO] = m32->__gregs[_REG32_TRAPNO];
832		m->__gregs[_REG_ERR] = m32->__gregs[_REG32_ERR];
833		m->__gregs[_REG_RIP] = m32->__gregs[_REG32_EIP];
834		m->__gregs[_REG_CS] = m32->__gregs[_REG32_CS];
835		m->__gregs[_REG_RFL] = m32->__gregs[_REG32_EFL];
836		m->__gregs[_REG_URSP] = m32->__gregs[_REG32_UESP];
837		m->__gregs[_REG_SS] = m32->__gregs[_REG32_SS];
838	}
839	if (flags & _UC_FPU)
840		memcpy(&m->__fpregs, &m32->__fpregs, sizeof (m->__fpregs));
841}
842#endif
843
844
845int
846cpu_setmcontext32(struct lwp *l, const mcontext32_t *mcp, unsigned int flags)
847{
848	struct trapframe *tf = l->l_md.md_regs;
849	const __greg32_t *gr = mcp->__gregs;
850	struct proc *p = l->l_proc;
851	int error;
852
853	/* Restore register context, if any. */
854	if ((flags & _UC_CPU) != 0) {
855		/*
856		 * Check for security violations.
857		 */
858		error = cpu_mcontext32_validate(l, mcp);
859		if (error != 0)
860			return error;
861
862		cpu_fsgs_reload(l, gr[_REG32_FS], gr[_REG32_GS]);
863		tf->tf_es = gr[_REG32_ES];
864		tf->tf_ds = gr[_REG32_DS];
865		/* Only change the user-alterable part of eflags */
866		tf->tf_rflags &= ~PSL_USER;
867		tf->tf_rflags |= (gr[_REG32_EFL] & PSL_USER);
868		tf->tf_rdi    = gr[_REG32_EDI];
869		tf->tf_rsi    = gr[_REG32_ESI];
870		tf->tf_rbp    = gr[_REG32_EBP];
871		tf->tf_rbx    = gr[_REG32_EBX];
872		tf->tf_rdx    = gr[_REG32_EDX];
873		tf->tf_rcx    = gr[_REG32_ECX];
874		tf->tf_rax    = gr[_REG32_EAX];
875		tf->tf_rip    = gr[_REG32_EIP];
876		tf->tf_cs     = gr[_REG32_CS];
877		tf->tf_rsp    = gr[_REG32_UESP];
878		tf->tf_ss     = gr[_REG32_SS];
879	}
880
881	if ((flags & _UC_TLSBASE) != 0)
882		lwp_setprivate(l, (void *)(uintptr_t)mcp->_mc_tlsbase);
883
884	/* Restore floating point register context, if any. */
885	if ((flags & _UC_FPU) != 0) {
886		struct pcb *pcb = lwp_getpcb(l);
887
888		/*
889		 * If we were using the FPU, forget that we were.
890		 */
891		if (pcb->pcb_fpcpu != NULL) {
892			fpusave_lwp(l, false);
893		}
894		memcpy(&pcb->pcb_savefpu.fp_fxsave, &mcp->__fpregs,
895		    sizeof (pcb->pcb_savefpu.fp_fxsave));
896		/* If not set already. */
897		l->l_md.md_flags |= MDP_USEDFPU;
898	}
899
900	mutex_enter(p->p_lock);
901	if (flags & _UC_SETSTACK)
902		l->l_sigstk.ss_flags |= SS_ONSTACK;
903	if (flags & _UC_CLRSTACK)
904		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
905	mutex_exit(p->p_lock);
906
907	return (0);
908}
909
910void
911cpu_getmcontext32(struct lwp *l, mcontext32_t *mcp, unsigned int *flags)
912{
913	const struct trapframe *tf = l->l_md.md_regs;
914	__greg32_t *gr = mcp->__gregs;
915	__greg32_t ras_eip;
916
917	/* Save register context. */
918	gr[_REG32_GS]  = tf->tf_gs;
919	gr[_REG32_FS]  = tf->tf_fs;
920	gr[_REG32_ES]  = tf->tf_es;
921	gr[_REG32_DS]  = tf->tf_ds;
922	gr[_REG32_EFL] = tf->tf_rflags;
923	gr[_REG32_EDI]    = tf->tf_rdi;
924	gr[_REG32_ESI]    = tf->tf_rsi;
925	gr[_REG32_EBP]    = tf->tf_rbp;
926	gr[_REG32_EBX]    = tf->tf_rbx;
927	gr[_REG32_EDX]    = tf->tf_rdx;
928	gr[_REG32_ECX]    = tf->tf_rcx;
929	gr[_REG32_EAX]    = tf->tf_rax;
930	gr[_REG32_EIP]    = tf->tf_rip;
931	gr[_REG32_CS]     = tf->tf_cs;
932	gr[_REG32_ESP]    = tf->tf_rsp;
933	gr[_REG32_UESP]   = tf->tf_rsp;
934	gr[_REG32_SS]     = tf->tf_ss;
935	gr[_REG32_TRAPNO] = tf->tf_trapno;
936	gr[_REG32_ERR]    = tf->tf_err;
937
938	if ((ras_eip = (__greg32_t)(uintptr_t)ras_lookup(l->l_proc,
939	    (void *) (uintptr_t)gr[_REG32_EIP])) != -1)
940		gr[_REG32_EIP] = ras_eip;
941
942	*flags |= _UC_CPU;
943
944	mcp->_mc_tlsbase = (uint32_t)(uintptr_t)l->l_private;
945	*flags |= _UC_TLSBASE;
946
947	/* Save floating point register context, if any. */
948	if ((l->l_md.md_flags & MDP_USEDFPU) != 0) {
949		struct pcb *pcb = lwp_getpcb(l);
950
951		if (pcb->pcb_fpcpu) {
952			fpusave_lwp(l, true);
953		}
954		memcpy(&mcp->__fpregs, &pcb->pcb_savefpu.fp_fxsave,
955		    sizeof (pcb->pcb_savefpu.fp_fxsave));
956		*flags |= _UC_FPU;
957	}
958}
959
960void
961startlwp32(void *arg)
962{
963	ucontext32_t *uc = arg;
964	lwp_t *l = curlwp;
965	int error;
966
967	error = cpu_setmcontext32(l, &uc->uc_mcontext, uc->uc_flags);
968	KASSERT(error == 0);
969
970	/* Note: we are freeing ucontext_t, not ucontext32_t. */
971	kmem_free(uc, sizeof(ucontext_t));
972	userret(l);
973}
974
975/*
976 * For various reasons, the amd64 port can't do what the i386 port does,
977 * and rely on catching invalid user contexts on exit from the kernel.
978 * These functions perform the needed checks.
979 */
980
981static int
982check_sigcontext32(struct lwp *l, const struct netbsd32_sigcontext *scp)
983{
984	struct trapframe *tf;
985	struct pcb *pcb;
986
987	tf = l->l_md.md_regs;
988	pcb = lwp_getpcb(curlwp);
989
990	if (((scp->sc_eflags ^ tf->tf_rflags) & PSL_USERSTATIC) != 0 ||
991	    !VALID_USER_CSEL32(scp->sc_cs))
992		return EINVAL;
993	if (scp->sc_fs != 0 && !VALID_USER_DSEL32(scp->sc_fs) &&
994	    !(VALID_USER_FSEL32(scp->sc_fs) && pcb->pcb_fs != 0))
995		return EINVAL;
996	if (scp->sc_gs != 0 && !VALID_USER_DSEL32(scp->sc_gs) &&
997	    !(VALID_USER_GSEL32(scp->sc_gs) && pcb->pcb_gs != 0))
998		return EINVAL;
999	if (scp->sc_es != 0 && !VALID_USER_DSEL32(scp->sc_es))
1000		return EINVAL;
1001	if (!VALID_USER_DSEL32(scp->sc_ds) || !VALID_USER_DSEL32(scp->sc_ss))
1002		return EINVAL;
1003	if (scp->sc_eip >= VM_MAXUSER_ADDRESS32)
1004		return EINVAL;
1005	return 0;
1006}
1007
1008int
1009cpu_mcontext32_validate(struct lwp *l, const mcontext32_t *mcp)
1010{
1011	const __greg32_t *gr;
1012	struct trapframe *tf;
1013	struct pcb *pcb;
1014
1015	gr = mcp->__gregs;
1016	tf = l->l_md.md_regs;
1017	pcb = lwp_getpcb(l);
1018
1019	if (((gr[_REG32_EFL] ^ tf->tf_rflags) & PSL_USERSTATIC) != 0 ||
1020	    !VALID_USER_CSEL32(gr[_REG32_CS]))
1021		return EINVAL;
1022	if (gr[_REG32_FS] != 0 && !VALID_USER_DSEL32(gr[_REG32_FS]) &&
1023	    !(VALID_USER_FSEL32(gr[_REG32_FS]) && pcb->pcb_fs != 0))
1024		return EINVAL;
1025	if (gr[_REG32_GS] != 0 && !VALID_USER_DSEL32(gr[_REG32_GS]) &&
1026	    !(VALID_USER_GSEL32(gr[_REG32_GS]) && pcb->pcb_gs != 0))
1027		return EINVAL;
1028	if (gr[_REG32_ES] != 0 && !VALID_USER_DSEL32(gr[_REG32_ES]))
1029		return EINVAL;
1030	if (!VALID_USER_DSEL32(gr[_REG32_DS]) ||
1031	    !VALID_USER_DSEL32(gr[_REG32_SS]))
1032		return EINVAL;
1033	if (gr[_REG32_EIP] >= VM_MAXUSER_ADDRESS32)
1034		return EINVAL;
1035	return 0;
1036}
1037
1038void
1039netbsd32_cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted,
1040    void *sas, void *ap, void *sp, sa_upcall_t upcall)
1041{
1042	struct trapframe *tf;
1043	struct netbsd32_saframe *sf, frame;
1044
1045	tf = l->l_md.md_regs;
1046
1047	frame.sa_type = type;
1048	NETBSD32PTR32(frame.sa_sas, sas);
1049	frame.sa_events = nevents;
1050	frame.sa_interrupted = ninterrupted;
1051	NETBSD32PTR32(frame.sa_arg, ap);
1052	frame.sa_ra = 0;
1053
1054	sf = (struct netbsd32_saframe *)sp - 1;
1055	if (copyout(&frame, sf, sizeof(frame)) != 0) {
1056		sigexit(l, SIGILL);
1057		/* NOTREACHED */
1058	}
1059
1060	tf->tf_rip = (uintptr_t)upcall;
1061	tf->tf_rsp = (uintptr_t)sf;
1062	tf->tf_rbp = 0;
1063	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
1064	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
1065	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
1066	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
1067	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
1068	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
1069	tf->tf_rflags &= ~(PSL_T|PSL_VM|PSL_AC);
1070
1071	l->l_md.md_flags |= MDP_IRET;
1072}
1073
1074vaddr_t
1075netbsd32_vm_default_addr(struct proc *p, vaddr_t base, vsize_t size)
1076{
1077	return VM_DEFAULT_ADDRESS32(base, size);
1078}
1079
1080#ifdef COMPAT_13
1081int
1082compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args *uap, register_t *retval)
1083{
1084	return ENOSYS;
1085}
1086
1087int
1088compat_13_netbsd32_sigreturn(struct lwp *l, const struct compat_13_netbsd32_sigreturn_args *uap, register_t *retval)
1089{
1090	/* {
1091		syscallarg(struct netbsd32_sigcontext13 *) sigcntxp;
1092	} */
1093	struct proc *p = l->l_proc;
1094	struct netbsd32_sigcontext13 *scp, context;
1095	struct trapframe *tf;
1096	sigset_t mask;
1097	int error;
1098
1099	/*
1100	 * The trampoline code hands us the context.
1101	 * It is unsafe to keep track of it ourselves, in the event that a
1102	 * program jumps out of a signal handler.
1103	 */
1104	scp = (struct netbsd32_sigcontext13 *)NETBSD32PTR64(SCARG(uap, sigcntxp));
1105	if (copyin((void *)scp, &context, sizeof(*scp)) != 0)
1106		return (EFAULT);
1107
1108	/* Restore register context. */
1109	tf = l->l_md.md_regs;
1110
1111	/*
1112	 * Check for security violations.
1113	 */
1114	error = check_sigcontext32(l, (const struct netbsd32_sigcontext *)&context);
1115	if (error != 0)
1116		return error;
1117
1118	tf->tf_gs = context.sc_gs;
1119	tf->tf_fs = context.sc_fs;
1120	tf->tf_es = context.sc_es;
1121	tf->tf_ds = context.sc_ds;
1122	tf->tf_rflags = context.sc_eflags;
1123	tf->tf_rdi = context.sc_edi;
1124	tf->tf_rsi = context.sc_esi;
1125	tf->tf_rbp = context.sc_ebp;
1126	tf->tf_rbx = context.sc_ebx;
1127	tf->tf_rdx = context.sc_edx;
1128	tf->tf_rcx = context.sc_ecx;
1129	tf->tf_rax = context.sc_eax;
1130	tf->tf_rip = context.sc_eip;
1131	tf->tf_cs = context.sc_cs;
1132	tf->tf_rsp = context.sc_esp;
1133	tf->tf_ss = context.sc_ss;
1134
1135	mutex_enter(p->p_lock);
1136	/* Restore signal stack. */
1137	if (context.sc_onstack & SS_ONSTACK)
1138		l->l_sigstk.ss_flags |= SS_ONSTACK;
1139	else
1140		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
1141	/* Restore signal mask. */
1142	native_sigset13_to_sigset((sigset13_t *)&context.sc_mask, &mask);
1143	(void) sigprocmask1(l, SIG_SETMASK, &mask, 0);
1144	mutex_exit(p->p_lock);
1145
1146	return (EJUSTRETURN);
1147}
1148#endif
1149