trap.c revision 333204
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: stable/11/sys/i386/i386/trap.c 333204 2018-05-03 07:47:03Z avg $");
42
43/*
44 * 386 Trap and System call handling
45 */
46
47#include "opt_clock.h"
48#include "opt_cpu.h"
49#include "opt_hwpmc_hooks.h"
50#include "opt_isa.h"
51#include "opt_kdb.h"
52#include "opt_stack.h"
53#include "opt_trap.h"
54
55#include <sys/param.h>
56#include <sys/bus.h>
57#include <sys/systm.h>
58#include <sys/proc.h>
59#include <sys/pioctl.h>
60#include <sys/ptrace.h>
61#include <sys/kdb.h>
62#include <sys/kernel.h>
63#include <sys/ktr.h>
64#include <sys/lock.h>
65#include <sys/mutex.h>
66#include <sys/resourcevar.h>
67#include <sys/signalvar.h>
68#include <sys/syscall.h>
69#include <sys/sysctl.h>
70#include <sys/sysent.h>
71#include <sys/uio.h>
72#include <sys/vmmeter.h>
73#ifdef HWPMC_HOOKS
74#include <sys/pmckern.h>
75PMC_SOFT_DEFINE( , , page_fault, all);
76PMC_SOFT_DEFINE( , , page_fault, read);
77PMC_SOFT_DEFINE( , , page_fault, write);
78#endif
79#include <security/audit/audit.h>
80
81#include <vm/vm.h>
82#include <vm/vm_param.h>
83#include <vm/pmap.h>
84#include <vm/vm_kern.h>
85#include <vm/vm_map.h>
86#include <vm/vm_page.h>
87#include <vm/vm_extern.h>
88
89#include <machine/cpu.h>
90#include <machine/intr_machdep.h>
91#include <x86/mca.h>
92#include <machine/md_var.h>
93#include <machine/pcb.h>
94#ifdef SMP
95#include <machine/smp.h>
96#endif
97#include <machine/stack.h>
98#include <machine/tss.h>
99#include <machine/vm86.h>
100
101#ifdef POWERFAIL_NMI
102#include <sys/syslog.h>
103#include <machine/clock.h>
104#endif
105
106#ifdef KDTRACE_HOOKS
107#include <sys/dtrace_bsd.h>
108#endif
109
110void trap(struct trapframe *frame);
111void syscall(struct trapframe *frame);
112
113static int trap_pfault(struct trapframe *, int, vm_offset_t);
114static void trap_fatal(struct trapframe *, vm_offset_t);
115void dblfault_handler(void);
116
117extern inthand_t IDTVEC(lcall_syscall);
118
119#define MAX_TRAP_MSG		32
120static char *trap_msg[] = {
121	"",					/*  0 unused */
122	"privileged instruction fault",		/*  1 T_PRIVINFLT */
123	"",					/*  2 unused */
124	"breakpoint instruction fault",		/*  3 T_BPTFLT */
125	"",					/*  4 unused */
126	"",					/*  5 unused */
127	"arithmetic trap",			/*  6 T_ARITHTRAP */
128	"",					/*  7 unused */
129	"",					/*  8 unused */
130	"general protection fault",		/*  9 T_PROTFLT */
131	"trace trap",				/* 10 T_TRCTRAP */
132	"",					/* 11 unused */
133	"page fault",				/* 12 T_PAGEFLT */
134	"",					/* 13 unused */
135	"alignment fault",			/* 14 T_ALIGNFLT */
136	"",					/* 15 unused */
137	"",					/* 16 unused */
138	"",					/* 17 unused */
139	"integer divide fault",			/* 18 T_DIVIDE */
140	"non-maskable interrupt trap",		/* 19 T_NMI */
141	"overflow trap",			/* 20 T_OFLOW */
142	"FPU bounds check fault",		/* 21 T_BOUND */
143	"FPU device not available",		/* 22 T_DNA */
144	"double fault",				/* 23 T_DOUBLEFLT */
145	"FPU operand fetch fault",		/* 24 T_FPOPFLT */
146	"invalid TSS fault",			/* 25 T_TSSFLT */
147	"segment not present fault",		/* 26 T_SEGNPFLT */
148	"stack fault",				/* 27 T_STKFLT */
149	"machine check trap",			/* 28 T_MCHK */
150	"SIMD floating-point exception",	/* 29 T_XMMFLT */
151	"reserved (unknown) fault",		/* 30 T_RESERVED */
152	"",					/* 31 unused (reserved) */
153	"DTrace pid return trap",               /* 32 T_DTRACE_RET */
154};
155
156#if defined(I586_CPU) && !defined(NO_F00F_HACK)
157int has_f00f_bug = 0;		/* Initialized so that it can be patched. */
158#endif
159
160static int prot_fault_translation = 0;
161SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RW,
162	&prot_fault_translation, 0, "Select signal to deliver on protection fault");
163static int uprintf_signal;
164SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RW,
165    &uprintf_signal, 0,
166    "Print debugging information on trap signal to ctty");
167
168/*
169 * Exception, fault, and trap interface to the FreeBSD kernel.
170 * This common code is called from assembly language IDT gate entry
171 * routines that prepare a suitable stack frame, and restore this
172 * frame after the exception has been processed.
173 */
174
175void
176trap(struct trapframe *frame)
177{
178	ksiginfo_t ksi;
179	struct thread *td;
180	struct proc *p;
181#ifdef KDB
182	register_t dr6;
183#endif
184	int signo, ucode;
185	u_int type;
186	register_t addr;
187	vm_offset_t eva;
188#ifdef POWERFAIL_NMI
189	static int lastalert = 0;
190#endif
191
192	td = curthread;
193	p = td->td_proc;
194	signo = 0;
195	ucode = 0;
196	addr = 0;
197
198	PCPU_INC(cnt.v_trap);
199	type = frame->tf_trapno;
200
201#ifdef SMP
202	/* Handler for NMI IPIs used for stopping CPUs. */
203	if (type == T_NMI && ipi_nmi_handler() == 0)
204		return;
205#endif /* SMP */
206
207#ifdef KDB
208	if (kdb_active) {
209		kdb_reenter();
210		return;
211	}
212#endif
213
214	if (type == T_RESERVED) {
215		trap_fatal(frame, 0);
216		return;
217	}
218
219	if (type == T_NMI) {
220#ifdef HWPMC_HOOKS
221		/*
222		 * CPU PMCs interrupt using an NMI so we check for that first.
223		 * If the HWPMC module is active, 'pmc_hook' will point to
224		 * the function to be called.  A non-zero return value from the
225		 * hook means that the NMI was consumed by it and that we can
226		 * return immediately.
227		 */
228		if (pmc_intr != NULL &&
229		    (*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
230			return;
231#endif
232
233#ifdef STACK
234		if (stack_nmi_handler(frame) != 0)
235			return;
236#endif
237	}
238
239	if (type == T_MCHK) {
240		mca_intr();
241		return;
242	}
243
244#ifdef KDTRACE_HOOKS
245	/*
246	 * A trap can occur while DTrace executes a probe. Before
247	 * executing the probe, DTrace blocks re-scheduling and sets
248	 * a flag in its per-cpu flags to indicate that it doesn't
249	 * want to fault. On returning from the probe, the no-fault
250	 * flag is cleared and finally re-scheduling is enabled.
251	 */
252	if ((type == T_PROTFLT || type == T_PAGEFLT) &&
253	    dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
254		return;
255#endif
256
257	if ((frame->tf_eflags & PSL_I) == 0) {
258		/*
259		 * Buggy application or kernel code has disabled
260		 * interrupts and then trapped.  Enabling interrupts
261		 * now is wrong, but it is better than running with
262		 * interrupts disabled until they are accidentally
263		 * enabled later.
264		 */
265		if (TRAPF_USERMODE(frame) &&
266		    (curpcb->pcb_flags & PCB_VM86CALL) == 0)
267			uprintf(
268			    "pid %ld (%s): trap %d with interrupts disabled\n",
269			    (long)curproc->p_pid, curthread->td_name, type);
270		else if (type != T_NMI && type != T_BPTFLT &&
271		    type != T_TRCTRAP &&
272		    frame->tf_eip != (int)cpu_switch_load_gs) {
273			/*
274			 * XXX not quite right, since this may be for a
275			 * multiple fault in user mode.
276			 */
277			printf("kernel trap %d with interrupts disabled\n",
278			    type);
279			/*
280			 * Page faults need interrupts disabled until later,
281			 * and we shouldn't enable interrupts while holding
282			 * a spin lock.
283			 */
284			if (type != T_PAGEFLT &&
285			    td->td_md.md_spinlock_count == 0)
286				enable_intr();
287		}
288	}
289	eva = 0;
290	if (type == T_PAGEFLT) {
291		/*
292		 * For some Cyrix CPUs, %cr2 is clobbered by
293		 * interrupts.  This problem is worked around by using
294		 * an interrupt gate for the pagefault handler.  We
295		 * are finally ready to read %cr2 and conditionally
296		 * reenable interrupts.  If we hold a spin lock, then
297		 * we must not reenable interrupts.  This might be a
298		 * spurious page fault.
299		 */
300		eva = rcr2();
301		if (td->td_md.md_spinlock_count == 0)
302			enable_intr();
303	}
304
305        if (TRAPF_USERMODE(frame) && (curpcb->pcb_flags & PCB_VM86CALL) == 0) {
306		/* user trap */
307
308		td->td_pticks = 0;
309		td->td_frame = frame;
310		addr = frame->tf_eip;
311		if (td->td_cowgen != p->p_cowgen)
312			thread_cow_update(td);
313
314		switch (type) {
315		case T_PRIVINFLT:	/* privileged instruction fault */
316			signo = SIGILL;
317			ucode = ILL_PRVOPC;
318			break;
319
320		case T_BPTFLT:		/* bpt instruction fault */
321		case T_TRCTRAP:		/* trace trap */
322			enable_intr();
323#ifdef KDTRACE_HOOKS
324			if (type == T_BPTFLT) {
325				if (dtrace_pid_probe_ptr != NULL &&
326				    dtrace_pid_probe_ptr(frame) == 0)
327					return;
328			}
329#endif
330user_trctrap_out:
331			frame->tf_eflags &= ~PSL_T;
332			signo = SIGTRAP;
333			ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
334			break;
335
336		case T_ARITHTRAP:	/* arithmetic trap */
337			ucode = npxtrap_x87();
338			if (ucode == -1)
339				return;
340			signo = SIGFPE;
341			break;
342
343		/*
344		 * The following two traps can happen in vm86 mode,
345		 * and, if so, we want to handle them specially.
346		 */
347		case T_PROTFLT:		/* general protection fault */
348		case T_STKFLT:		/* stack fault */
349			if (frame->tf_eflags & PSL_VM) {
350				signo = vm86_emulate((struct vm86frame *)frame);
351				if (signo == SIGTRAP) {
352					type = T_TRCTRAP;
353					load_dr6(rdr6() | 0x4000);
354					goto user_trctrap_out;
355				}
356				if (signo == 0)
357					goto user;
358				break;
359			}
360			signo = SIGBUS;
361			ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
362			break;
363		case T_SEGNPFLT:	/* segment not present fault */
364			signo = SIGBUS;
365			ucode = BUS_ADRERR;
366			break;
367		case T_TSSFLT:		/* invalid TSS fault */
368			signo = SIGBUS;
369			ucode = BUS_OBJERR;
370			break;
371		case T_ALIGNFLT:
372			signo = SIGBUS;
373			ucode = BUS_ADRALN;
374			break;
375		case T_DOUBLEFLT:	/* double fault */
376		default:
377			signo = SIGBUS;
378			ucode = BUS_OBJERR;
379			break;
380
381		case T_PAGEFLT:		/* page fault */
382			signo = trap_pfault(frame, TRUE, eva);
383#if defined(I586_CPU) && !defined(NO_F00F_HACK)
384			if (signo == -2) {
385				/*
386				 * The f00f hack workaround has triggered, so
387				 * treat the fault as an illegal instruction
388				 * (T_PRIVINFLT) instead of a page fault.
389				 */
390				type = frame->tf_trapno = T_PRIVINFLT;
391
392				/* Proceed as in that case. */
393				ucode = ILL_PRVOPC;
394				signo = SIGILL;
395				break;
396			}
397#endif
398			if (signo == -1)
399				return;
400			if (signo == 0)
401				goto user;
402
403			if (signo == SIGSEGV)
404				ucode = SEGV_MAPERR;
405			else if (prot_fault_translation == 0) {
406				/*
407				 * Autodetect.  This check also covers
408				 * the images without the ABI-tag ELF
409				 * note.
410				 */
411				if (SV_CURPROC_ABI() == SV_ABI_FREEBSD &&
412				    p->p_osrel >= P_OSREL_SIGSEGV) {
413					signo = SIGSEGV;
414					ucode = SEGV_ACCERR;
415				} else {
416					signo = SIGBUS;
417					ucode = BUS_PAGE_FAULT;
418				}
419			} else if (prot_fault_translation == 1) {
420				/*
421				 * Always compat mode.
422				 */
423				signo = SIGBUS;
424				ucode = BUS_PAGE_FAULT;
425			} else {
426				/*
427				 * Always SIGSEGV mode.
428				 */
429				signo = SIGSEGV;
430				ucode = SEGV_ACCERR;
431			}
432			addr = eva;
433			break;
434
435		case T_DIVIDE:		/* integer divide fault */
436			ucode = FPE_INTDIV;
437			signo = SIGFPE;
438			break;
439
440#ifdef DEV_ISA
441		case T_NMI:
442#ifdef POWERFAIL_NMI
443#ifndef TIMER_FREQ
444#  define TIMER_FREQ 1193182
445#endif
446			if (time_second - lastalert > 10) {
447				log(LOG_WARNING, "NMI: power fail\n");
448				sysbeep(880, hz);
449				lastalert = time_second;
450			}
451			return;
452#else /* !POWERFAIL_NMI */
453			nmi_handle_intr(type, frame);
454			return;
455#endif /* POWERFAIL_NMI */
456#endif /* DEV_ISA */
457
458		case T_OFLOW:		/* integer overflow fault */
459			ucode = FPE_INTOVF;
460			signo = SIGFPE;
461			break;
462
463		case T_BOUND:		/* bounds check fault */
464			ucode = FPE_FLTSUB;
465			signo = SIGFPE;
466			break;
467
468		case T_DNA:
469			KASSERT(PCB_USER_FPU(td->td_pcb),
470			    ("kernel FPU ctx has leaked"));
471			/* transparent fault (due to context switch "late") */
472			if (npxdna())
473				return;
474			uprintf("pid %d killed due to lack of floating point\n",
475				p->p_pid);
476			signo = SIGKILL;
477			ucode = 0;
478			break;
479
480		case T_FPOPFLT:		/* FPU operand fetch fault */
481			ucode = ILL_COPROC;
482			signo = SIGILL;
483			break;
484
485		case T_XMMFLT:		/* SIMD floating-point exception */
486			ucode = npxtrap_sse();
487			if (ucode == -1)
488				return;
489			signo = SIGFPE;
490			break;
491#ifdef KDTRACE_HOOKS
492		case T_DTRACE_RET:
493			enable_intr();
494			if (dtrace_return_probe_ptr != NULL)
495				dtrace_return_probe_ptr(frame);
496			return;
497#endif
498		}
499	} else {
500		/* kernel trap */
501
502		KASSERT(cold || td->td_ucred != NULL,
503		    ("kernel trap doesn't have ucred"));
504		switch (type) {
505		case T_PAGEFLT:			/* page fault */
506			(void) trap_pfault(frame, FALSE, eva);
507			return;
508
509		case T_DNA:
510			if (PCB_USER_FPU(td->td_pcb))
511				panic("Unregistered use of FPU in kernel");
512			if (npxdna())
513				return;
514			break;
515
516		case T_ARITHTRAP:	/* arithmetic trap */
517		case T_XMMFLT:		/* SIMD floating-point exception */
518		case T_FPOPFLT:		/* FPU operand fetch fault */
519			/*
520			 * XXXKIB for now disable any FPU traps in kernel
521			 * handler registration seems to be overkill
522			 */
523			trap_fatal(frame, 0);
524			return;
525
526			/*
527			 * The following two traps can happen in
528			 * vm86 mode, and, if so, we want to handle
529			 * them specially.
530			 */
531		case T_PROTFLT:		/* general protection fault */
532		case T_STKFLT:		/* stack fault */
533			if (frame->tf_eflags & PSL_VM) {
534				signo = vm86_emulate((struct vm86frame *)frame);
535				if (signo == SIGTRAP) {
536					type = T_TRCTRAP;
537					load_dr6(rdr6() | 0x4000);
538					goto kernel_trctrap;
539				}
540				if (signo != 0)
541					/*
542					 * returns to original process
543					 */
544					vm86_trap((struct vm86frame *)frame);
545				return;
546			}
547			/* FALL THROUGH */
548		case T_SEGNPFLT:	/* segment not present fault */
549			if (curpcb->pcb_flags & PCB_VM86CALL)
550				break;
551
552			/*
553			 * Invalid %fs's and %gs's can be created using
554			 * procfs or PT_SETREGS or by invalidating the
555			 * underlying LDT entry.  This causes a fault
556			 * in kernel mode when the kernel attempts to
557			 * switch contexts.  Lose the bad context
558			 * (XXX) so that we can continue, and generate
559			 * a signal.
560			 */
561			if (frame->tf_eip == (int)cpu_switch_load_gs) {
562				curpcb->pcb_gs = 0;
563#if 0
564				PROC_LOCK(p);
565				kern_psignal(p, SIGBUS);
566				PROC_UNLOCK(p);
567#endif
568				return;
569			}
570
571			if (td->td_intr_nesting_level != 0)
572				break;
573
574			/*
575			 * Invalid segment selectors and out of bounds
576			 * %eip's and %esp's can be set up in user mode.
577			 * This causes a fault in kernel mode when the
578			 * kernel tries to return to user mode.  We want
579			 * to get this fault so that we can fix the
580			 * problem here and not have to check all the
581			 * selectors and pointers when the user changes
582			 * them.
583			 */
584			if (frame->tf_eip == (int)doreti_iret) {
585				frame->tf_eip = (int)doreti_iret_fault;
586				return;
587			}
588			if (type == T_STKFLT)
589				break;
590
591			if (frame->tf_eip == (int)doreti_popl_ds) {
592				frame->tf_eip = (int)doreti_popl_ds_fault;
593				return;
594			}
595			if (frame->tf_eip == (int)doreti_popl_es) {
596				frame->tf_eip = (int)doreti_popl_es_fault;
597				return;
598			}
599			if (frame->tf_eip == (int)doreti_popl_fs) {
600				frame->tf_eip = (int)doreti_popl_fs_fault;
601				return;
602			}
603			if (curpcb->pcb_onfault != NULL) {
604				frame->tf_eip = (int)curpcb->pcb_onfault;
605				return;
606			}
607			break;
608
609		case T_TSSFLT:
610			/*
611			 * PSL_NT can be set in user mode and isn't cleared
612			 * automatically when the kernel is entered.  This
613			 * causes a TSS fault when the kernel attempts to
614			 * `iret' because the TSS link is uninitialized.  We
615			 * want to get this fault so that we can fix the
616			 * problem here and not every time the kernel is
617			 * entered.
618			 */
619			if (frame->tf_eflags & PSL_NT) {
620				frame->tf_eflags &= ~PSL_NT;
621				return;
622			}
623			break;
624
625		case T_TRCTRAP:	 /* trace trap */
626kernel_trctrap:
627			if (frame->tf_eip == (int)IDTVEC(lcall_syscall)) {
628				/*
629				 * We've just entered system mode via the
630				 * syscall lcall.  Continue single stepping
631				 * silently until the syscall handler has
632				 * saved the flags.
633				 */
634				return;
635			}
636			if (frame->tf_eip == (int)IDTVEC(lcall_syscall) + 1) {
637				/*
638				 * The syscall handler has now saved the
639				 * flags.  Stop single stepping it.
640				 */
641				frame->tf_eflags &= ~PSL_T;
642				return;
643			}
644			/*
645			 * Ignore debug register trace traps due to
646			 * accesses in the user's address space, which
647			 * can happen under several conditions such as
648			 * if a user sets a watchpoint on a buffer and
649			 * then passes that buffer to a system call.
650			 * We still want to get TRCTRAPS for addresses
651			 * in kernel space because that is useful when
652			 * debugging the kernel.
653			 */
654			if (user_dbreg_trap() &&
655			   !(curpcb->pcb_flags & PCB_VM86CALL)) {
656				/*
657				 * Reset breakpoint bits because the
658				 * processor doesn't
659				 */
660				load_dr6(rdr6() & ~0xf);
661				return;
662			}
663			/*
664			 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
665			 */
666		case T_BPTFLT:
667			/*
668			 * If KDB is enabled, let it handle the debugger trap.
669			 * Otherwise, debugger traps "can't happen".
670			 */
671#ifdef KDB
672			/* XXX %dr6 is not quite reentrant. */
673			dr6 = rdr6();
674			load_dr6(dr6 & ~0x4000);
675			if (kdb_trap(type, dr6, frame))
676				return;
677#endif
678			break;
679
680#ifdef DEV_ISA
681		case T_NMI:
682#ifdef POWERFAIL_NMI
683			if (time_second - lastalert > 10) {
684				log(LOG_WARNING, "NMI: power fail\n");
685				sysbeep(880, hz);
686				lastalert = time_second;
687			}
688			return;
689#else /* !POWERFAIL_NMI */
690			nmi_handle_intr(type, frame);
691			return;
692#endif /* POWERFAIL_NMI */
693#endif /* DEV_ISA */
694		}
695
696		trap_fatal(frame, eva);
697		return;
698	}
699
700	/* Translate fault for emulators (e.g. Linux) */
701	if (*p->p_sysent->sv_transtrap != NULL)
702		signo = (*p->p_sysent->sv_transtrap)(signo, type);
703
704	ksiginfo_init_trap(&ksi);
705	ksi.ksi_signo = signo;
706	ksi.ksi_code = ucode;
707	ksi.ksi_addr = (void *)addr;
708	ksi.ksi_trapno = type;
709	if (uprintf_signal) {
710		uprintf("pid %d comm %s: signal %d err %x code %d type %d "
711		    "addr 0x%x esp 0x%08x eip 0x%08x "
712		    "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
713		    p->p_pid, p->p_comm, signo, frame->tf_err, ucode, type,
714		    addr, frame->tf_esp, frame->tf_eip,
715		    fubyte((void *)(frame->tf_eip + 0)),
716		    fubyte((void *)(frame->tf_eip + 1)),
717		    fubyte((void *)(frame->tf_eip + 2)),
718		    fubyte((void *)(frame->tf_eip + 3)),
719		    fubyte((void *)(frame->tf_eip + 4)),
720		    fubyte((void *)(frame->tf_eip + 5)),
721		    fubyte((void *)(frame->tf_eip + 6)),
722		    fubyte((void *)(frame->tf_eip + 7)));
723	}
724	KASSERT((read_eflags() & PSL_I) != 0, ("interrupts disabled"));
725	trapsignal(td, &ksi);
726
727user:
728	userret(td, frame);
729	KASSERT(PCB_USER_FPU(td->td_pcb),
730	    ("Return from trap with kernel FPU ctx leaked"));
731}
732
733static int
734trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva)
735{
736	struct thread *td;
737	struct proc *p;
738	vm_offset_t va;
739	vm_map_t map;
740	int rv;
741	vm_prot_t ftype;
742
743	td = curthread;
744	p = td->td_proc;
745
746	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
747		/*
748		 * Due to both processor errata and lazy TLB invalidation when
749		 * access restrictions are removed from virtual pages, memory
750		 * accesses that are allowed by the physical mapping layer may
751		 * nonetheless cause one spurious page fault per virtual page.
752		 * When the thread is executing a "no faulting" section that
753		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
754		 * every page fault is treated as a spurious page fault,
755		 * unless it accesses the same virtual address as the most
756		 * recent page fault within the same "no faulting" section.
757		 */
758		if (td->td_md.md_spurflt_addr != eva ||
759		    (td->td_pflags & TDP_RESETSPUR) != 0) {
760			/*
761			 * Do nothing to the TLB.  A stale TLB entry is
762			 * flushed automatically by a page fault.
763			 */
764			td->td_md.md_spurflt_addr = eva;
765			td->td_pflags &= ~TDP_RESETSPUR;
766			return (0);
767		}
768	} else {
769		/*
770		 * If we get a page fault while in a critical section, then
771		 * it is most likely a fatal kernel page fault.  The kernel
772		 * is already going to panic trying to get a sleep lock to
773		 * do the VM lookup, so just consider it a fatal trap so the
774		 * kernel can print out a useful trap message and even get
775		 * to the debugger.
776		 *
777		 * If we get a page fault while holding a non-sleepable
778		 * lock, then it is most likely a fatal kernel page fault.
779		 * If WITNESS is enabled, then it's going to whine about
780		 * bogus LORs with various VM locks, so just skip to the
781		 * fatal trap handling directly.
782		 */
783		if (td->td_critnest != 0 ||
784		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
785		    "Kernel page fault") != 0) {
786			trap_fatal(frame, eva);
787			return (-1);
788		}
789	}
790	va = trunc_page(eva);
791	if (va >= KERNBASE) {
792		/*
793		 * Don't allow user-mode faults in kernel address space.
794		 * An exception:  if the faulting address is the invalid
795		 * instruction entry in the IDT, then the Intel Pentium
796		 * F00F bug workaround was triggered, and we need to
797		 * treat it is as an illegal instruction, and not a page
798		 * fault.
799		 */
800#if defined(I586_CPU) && !defined(NO_F00F_HACK)
801		if ((eva == (unsigned int)&idt[6]) && has_f00f_bug)
802			return (-2);
803#endif
804		if (usermode)
805			return (SIGSEGV);
806
807		map = kernel_map;
808	} else {
809		map = &p->p_vmspace->vm_map;
810
811		/*
812		 * When accessing a user-space address, kernel must be
813		 * ready to accept the page fault, and provide a
814		 * handling routine.  Since accessing the address
815		 * without the handler is a bug, do not try to handle
816		 * it normally, and panic immediately.
817		 */
818		if (!usermode && (td->td_intr_nesting_level != 0 ||
819		    curpcb->pcb_onfault == NULL)) {
820			trap_fatal(frame, eva);
821			return (-1);
822		}
823	}
824
825	/*
826	 * If the trap was caused by errant bits in the PTE then panic.
827	 */
828	if (frame->tf_err & PGEX_RSV) {
829		trap_fatal(frame, eva);
830		return (-1);
831	}
832
833	/*
834	 * PGEX_I is defined only if the execute disable bit capability is
835	 * supported and enabled.
836	 */
837	if (frame->tf_err & PGEX_W)
838		ftype = VM_PROT_WRITE;
839#if defined(PAE) || defined(PAE_TABLES)
840	else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
841		ftype = VM_PROT_EXECUTE;
842#endif
843	else
844		ftype = VM_PROT_READ;
845
846	/* Fault in the page. */
847	rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
848	if (rv == KERN_SUCCESS) {
849#ifdef HWPMC_HOOKS
850		if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
851			PMC_SOFT_CALL_TF( , , page_fault, all, frame);
852			if (ftype == VM_PROT_READ)
853				PMC_SOFT_CALL_TF( , , page_fault, read,
854				    frame);
855			else
856				PMC_SOFT_CALL_TF( , , page_fault, write,
857				    frame);
858		}
859#endif
860		return (0);
861	}
862	if (!usermode) {
863		if (td->td_intr_nesting_level == 0 &&
864		    curpcb->pcb_onfault != NULL) {
865			frame->tf_eip = (int)curpcb->pcb_onfault;
866			return (0);
867		}
868		trap_fatal(frame, eva);
869		return (-1);
870	}
871	return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
872}
873
874static void
875trap_fatal(frame, eva)
876	struct trapframe *frame;
877	vm_offset_t eva;
878{
879	int code, ss, esp;
880	u_int type;
881	struct soft_segment_descriptor softseg;
882	char *msg;
883#ifdef KDB
884	bool handled;
885#endif
886
887	code = frame->tf_err;
888	type = frame->tf_trapno;
889	sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
890
891	if (type <= MAX_TRAP_MSG)
892		msg = trap_msg[type];
893	else
894		msg = "UNKNOWN";
895	printf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
896	    frame->tf_eflags & PSL_VM ? "vm86" :
897	    ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
898#ifdef SMP
899	/* two separate prints in case of a trap on an unmapped page */
900	printf("cpuid = %d; ", PCPU_GET(cpuid));
901	printf("apic id = %02x\n", PCPU_GET(apic_id));
902#endif
903	if (type == T_PAGEFLT) {
904		printf("fault virtual address	= 0x%x\n", eva);
905		printf("fault code		= %s %s%s, %s\n",
906			code & PGEX_U ? "user" : "supervisor",
907			code & PGEX_W ? "write" : "read",
908#if defined(PAE) || defined(PAE_TABLES)
909			pg_nx != 0 ?
910			(code & PGEX_I ? " instruction" : " data") :
911#endif
912			"",
913			code & PGEX_RSV ? "reserved bits in PTE" :
914			code & PGEX_P ? "protection violation" : "page not present");
915	} else {
916		printf("error code		= %#x\n", code);
917	}
918	printf("instruction pointer	= 0x%x:0x%x\n",
919	       frame->tf_cs & 0xffff, frame->tf_eip);
920        if (TF_HAS_STACKREGS(frame)) {
921		ss = frame->tf_ss & 0xffff;
922		esp = frame->tf_esp;
923	} else {
924		ss = GSEL(GDATA_SEL, SEL_KPL);
925		esp = (int)&frame->tf_esp;
926	}
927	printf("stack pointer	        = 0x%x:0x%x\n", ss, esp);
928	printf("frame pointer	        = 0x%x:0x%x\n", ss, frame->tf_ebp);
929	printf("code segment		= base 0x%x, limit 0x%x, type 0x%x\n",
930	       softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
931	printf("			= DPL %d, pres %d, def32 %d, gran %d\n",
932	       softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
933	       softseg.ssd_gran);
934	printf("processor eflags	= ");
935	if (frame->tf_eflags & PSL_T)
936		printf("trace trap, ");
937	if (frame->tf_eflags & PSL_I)
938		printf("interrupt enabled, ");
939	if (frame->tf_eflags & PSL_NT)
940		printf("nested task, ");
941	if (frame->tf_eflags & PSL_RF)
942		printf("resume, ");
943	if (frame->tf_eflags & PSL_VM)
944		printf("vm86, ");
945	printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
946	printf("current process		= %d (%s)\n",
947	    curproc->p_pid, curthread->td_name);
948
949#ifdef KDB
950	if (debugger_on_panic) {
951		kdb_why = KDB_WHY_TRAP;
952		frame->tf_err = eva;	/* smuggle fault address to ddb */
953		handled = kdb_trap(type, 0, frame);
954		frame->tf_err = code;	/* restore error code */
955		kdb_why = KDB_WHY_UNSET;
956		if (handled)
957			return;
958	}
959#endif
960	printf("trap number		= %d\n", type);
961	if (type <= MAX_TRAP_MSG)
962		panic("%s", trap_msg[type]);
963	else
964		panic("unknown/reserved trap");
965}
966
967/*
968 * Double fault handler. Called when a fault occurs while writing
969 * a frame for a trap/exception onto the stack. This usually occurs
970 * when the stack overflows (such is the case with infinite recursion,
971 * for example).
972 *
973 * XXX Note that the current PTD gets replaced by IdlePTD when the
974 * task switch occurs. This means that the stack that was active at
975 * the time of the double fault is not available at <kstack> unless
976 * the machine was idle when the double fault occurred. The downside
977 * of this is that "trace <ebp>" in ddb won't work.
978 */
979void
980dblfault_handler()
981{
982#ifdef KDTRACE_HOOKS
983	if (dtrace_doubletrap_func != NULL)
984		(*dtrace_doubletrap_func)();
985#endif
986	printf("\nFatal double fault:\n");
987	printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
988	printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
989	printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
990#ifdef SMP
991	/* two separate prints in case of a trap on an unmapped page */
992	printf("cpuid = %d; ", PCPU_GET(cpuid));
993	printf("apic id = %02x\n", PCPU_GET(apic_id));
994#endif
995	panic("double fault");
996}
997
998int
999cpu_fetch_syscall_args(struct thread *td)
1000{
1001	struct proc *p;
1002	struct trapframe *frame;
1003	struct syscall_args *sa;
1004	caddr_t params;
1005	long tmp;
1006	int error;
1007
1008	p = td->td_proc;
1009	frame = td->td_frame;
1010	sa = &td->td_sa;
1011
1012	params = (caddr_t)frame->tf_esp + sizeof(int);
1013	sa->code = frame->tf_eax;
1014
1015	/*
1016	 * Need to check if this is a 32 bit or 64 bit syscall.
1017	 */
1018	if (sa->code == SYS_syscall) {
1019		/*
1020		 * Code is first argument, followed by actual args.
1021		 */
1022		error = fueword(params, &tmp);
1023		if (error == -1)
1024			return (EFAULT);
1025		sa->code = tmp;
1026		params += sizeof(int);
1027	} else if (sa->code == SYS___syscall) {
1028		/*
1029		 * Like syscall, but code is a quad, so as to maintain
1030		 * quad alignment for the rest of the arguments.
1031		 */
1032		error = fueword(params, &tmp);
1033		if (error == -1)
1034			return (EFAULT);
1035		sa->code = tmp;
1036		params += sizeof(quad_t);
1037	}
1038
1039 	if (p->p_sysent->sv_mask)
1040 		sa->code &= p->p_sysent->sv_mask;
1041 	if (sa->code >= p->p_sysent->sv_size)
1042 		sa->callp = &p->p_sysent->sv_table[0];
1043  	else
1044 		sa->callp = &p->p_sysent->sv_table[sa->code];
1045	sa->narg = sa->callp->sy_narg;
1046
1047	if (params != NULL && sa->narg != 0)
1048		error = copyin(params, (caddr_t)sa->args,
1049		    (u_int)(sa->narg * sizeof(int)));
1050	else
1051		error = 0;
1052
1053	if (error == 0) {
1054		td->td_retval[0] = 0;
1055		td->td_retval[1] = frame->tf_edx;
1056	}
1057
1058	return (error);
1059}
1060
1061#include "../../kern/subr_syscall.c"
1062
1063/*
1064 * syscall - system call request C handler.  A system call is
1065 * essentially treated as a trap by reusing the frame layout.
1066 */
1067void
1068syscall(struct trapframe *frame)
1069{
1070	struct thread *td;
1071	register_t orig_tf_eflags;
1072	int error;
1073	ksiginfo_t ksi;
1074
1075#ifdef DIAGNOSTIC
1076	if (!(TRAPF_USERMODE(frame) &&
1077	    (curpcb->pcb_flags & PCB_VM86CALL) == 0)) {
1078		panic("syscall");
1079		/* NOT REACHED */
1080	}
1081#endif
1082	orig_tf_eflags = frame->tf_eflags;
1083
1084	td = curthread;
1085	td->td_frame = frame;
1086
1087	error = syscallenter(td);
1088
1089	/*
1090	 * Traced syscall.
1091	 */
1092	if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1093		frame->tf_eflags &= ~PSL_T;
1094		ksiginfo_init_trap(&ksi);
1095		ksi.ksi_signo = SIGTRAP;
1096		ksi.ksi_code = TRAP_TRACE;
1097		ksi.ksi_addr = (void *)frame->tf_eip;
1098		trapsignal(td, &ksi);
1099	}
1100
1101	KASSERT(PCB_USER_FPU(td->td_pcb),
1102	    ("System call %s returning with kernel FPU ctx leaked",
1103	     syscallname(td->td_proc, td->td_sa.code)));
1104	KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td),
1105	    ("System call %s returning with mangled pcb_save",
1106	     syscallname(td->td_proc, td->td_sa.code)));
1107
1108	syscallret(td, error);
1109}
1110