trap.c revision 99900
1/*-
2 * Copyright (c) 2001, Jake Burkholder
3 * Copyright (C) 1994, David Greenman
4 * Copyright (c) 1990, 1993
5 *      The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the University of Utah, and William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *      This product includes software developed by the University of
21 *      California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
39 * 	from: FreeBSD: src/sys/i386/i386/trap.c,v 1.197 2001/07/19
40 * $FreeBSD: head/sys/sparc64/sparc64/trap.c 99900 2002-07-13 04:36:50Z mini $
41 */
42
43#include "opt_ddb.h"
44#include "opt_ktr.h"
45#include "opt_ktrace.h"
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/bus.h>
50#include <sys/interrupt.h>
51#include <sys/ktr.h>
52#include <sys/kse.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/systm.h>
56#include <sys/pioctl.h>
57#include <sys/proc.h>
58#include <sys/smp.h>
59#include <sys/syscall.h>
60#include <sys/sysctl.h>
61#include <sys/sysent.h>
62#include <sys/user.h>
63#include <sys/vmmeter.h>
64#ifdef KTRACE
65#include <sys/uio.h>
66#include <sys/ktrace.h>
67#endif
68
69#include <vm/vm.h>
70#include <vm/pmap.h>
71#include <vm/vm_extern.h>
72#include <vm/vm_param.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_map.h>
75#include <vm/vm_page.h>
76
77#include <machine/clock.h>
78#include <machine/frame.h>
79#include <machine/intr_machdep.h>
80#include <machine/pcb.h>
81#include <machine/smp.h>
82#include <machine/trap.h>
83#include <machine/tstate.h>
84#include <machine/tte.h>
85#include <machine/tlb.h>
86#include <machine/tsb.h>
87#include <machine/watch.h>
88
89void trap(struct trapframe *tf);
90void syscall(struct trapframe *tf);
91
92static int trap_pfault(struct thread *td, struct trapframe *tf);
93
94extern char fsbail[];
95
96extern char *syscallnames[];
97
98const char *trap_msg[] = {
99	"reserved",
100	"instruction access exception",
101	"instruction access error",
102	"instruction access protection",
103	"illtrap instruction",
104	"illegal instruction",
105	"privileged opcode",
106	"floating point disabled",
107	"floating point exception ieee 754",
108	"floating point exception other",
109	"tag overflow",
110	"division by zero",
111	"data access exception",
112	"data access error",
113	"data access protection",
114	"memory address not aligned",
115	"privileged action",
116	"async data error",
117	"trap instruction 16",
118	"trap instruction 17",
119	"trap instruction 18",
120	"trap instruction 19",
121	"trap instruction 20",
122	"trap instruction 21",
123	"trap instruction 22",
124	"trap instruction 23",
125	"trap instruction 24",
126	"trap instruction 25",
127	"trap instruction 26",
128	"trap instruction 27",
129	"trap instruction 28",
130	"trap instruction 29",
131	"trap instruction 30",
132	"trap instruction 31",
133	"interrupt",
134	"physical address watchpoint",
135	"virtual address watchpoint",
136	"corrected ecc error",
137	"fast instruction access mmu miss",
138	"fast data access mmu miss",
139	"spill",
140	"fill",
141	"fill",
142	"breakpoint",
143	"clean window",
144	"range check",
145	"fix alignment",
146	"integer overflow",
147	"syscall",
148	"restore physical watchpoint",
149	"restore virtual watchpoint",
150	"kernel stack fault",
151};
152
153int debugger_on_signal = 0;
154SYSCTL_INT(_debug, OID_AUTO, debugger_on_signal, CTLFLAG_RW,
155    &debugger_on_signal, 0, "");
156
157void
158trap(struct trapframe *tf)
159{
160	struct thread *td;
161	struct proc *p;
162	u_int sticks;
163	int error;
164	int ucode;
165#ifdef DDB
166	int mask;
167#endif
168	int type;
169	int sig;
170
171	KASSERT(PCPU_GET(curthread) != NULL, ("trap: curthread NULL"));
172	KASSERT(PCPU_GET(curthread)->td_kse != NULL, ("trap: curkse NULL"));
173	KASSERT(PCPU_GET(curthread)->td_proc != NULL, ("trap: curproc NULL"));
174
175	atomic_add_int(&cnt.v_trap, 1);
176
177	td = PCPU_GET(curthread);
178	p = td->td_proc;
179
180	error = 0;
181	type = tf->tf_type;
182	ucode = type;	/* XXX */
183
184	CTR4(KTR_TRAP, "trap: %s type=%s (%s) pil=%#lx",
185	    p->p_comm, trap_msg[type & ~T_KERNEL],
186	    ((type & T_KERNEL) ? "kernel" : "user"),
187	    rdpr(pil));
188
189	if ((type & T_KERNEL) == 0) {
190		sticks = td->td_kse->ke_sticks;
191		td->td_frame = tf;
192		if (td->td_ucred != p->p_ucred)
193			cred_update_thread(td);
194		if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
195			PROC_LOCK(p);
196			mtx_lock_spin(&sched_lock);
197			thread_exit();
198			/* NOTREACHED */
199		}
200 	} else {
201 		sticks = 0;
202if ((type & ~T_KERNEL) != T_BREAKPOINT)
203		KASSERT(cold || td->td_ucred != NULL,
204		    ("kernel trap doesn't have ucred"));
205	}
206
207	switch (type) {
208
209	/*
210	 * User Mode Traps
211	 */
212	case T_MEM_ADDRESS_NOT_ALIGNED:
213		sig = SIGILL;
214		goto trapsig;
215#if 0
216	case T_ALIGN_LDDF:
217	case T_ALIGN_STDF:
218		sig = SIGBUS;
219		goto trapsig;
220#endif
221	case T_BREAKPOINT:
222		sig = SIGTRAP;
223		goto trapsig;
224	case T_DIVISION_BY_ZERO:
225		sig = SIGFPE;
226		goto trapsig;
227	case T_FP_DISABLED:
228	case T_FP_EXCEPTION_IEEE_754:
229	case T_FP_EXCEPTION_OTHER:
230		sig = SIGFPE;
231		goto trapsig;
232	case T_DATA_ERROR:
233	case T_DATA_EXCEPTION:
234	case T_INSTRUCTION_ERROR:
235	case T_INSTRUCTION_EXCEPTION:
236		sig = SIGILL;	/* XXX */
237		goto trapsig;
238	case T_DATA_MISS:
239	case T_DATA_PROTECTION:
240	case T_INSTRUCTION_MISS:
241		error = trap_pfault(td, tf);
242		if (error == 0)
243			goto user;
244		sig = error;
245		goto trapsig;
246	case T_FILL:
247		if (rwindow_load(td, tf, 2)) {
248			PROC_LOCK(p);
249			sigexit(td, SIGILL);
250			/* Not reached. */
251		}
252		goto user;
253	case T_FILL_RET:
254		if (rwindow_load(td, tf, 1)) {
255			PROC_LOCK(p);
256			sigexit(td, SIGILL);
257			/* Not reached. */
258		}
259		goto user;
260	case T_ILLEGAL_INSTRUCTION:
261		sig = SIGILL;
262		goto trapsig;
263	case T_PRIVILEGED_ACTION:
264	case T_PRIVILEGED_OPCODE:
265		sig = SIGBUS;
266		goto trapsig;
267	case T_TRAP_INSTRUCTION_16:
268	case T_TRAP_INSTRUCTION_17:
269	case T_TRAP_INSTRUCTION_18:
270	case T_TRAP_INSTRUCTION_19:
271	case T_TRAP_INSTRUCTION_20:
272	case T_TRAP_INSTRUCTION_21:
273	case T_TRAP_INSTRUCTION_22:
274	case T_TRAP_INSTRUCTION_23:
275	case T_TRAP_INSTRUCTION_24:
276	case T_TRAP_INSTRUCTION_25:
277	case T_TRAP_INSTRUCTION_26:
278	case T_TRAP_INSTRUCTION_27:
279	case T_TRAP_INSTRUCTION_28:
280	case T_TRAP_INSTRUCTION_29:
281	case T_TRAP_INSTRUCTION_30:
282	case T_TRAP_INSTRUCTION_31:
283		sig = SIGILL;
284		goto trapsig;
285	case T_SPILL:
286		if (rwindow_save(td)) {
287			PROC_LOCK(p);
288			sigexit(td, SIGILL);
289			/* Not reached. */
290		}
291		goto user;
292	case T_TAG_OFERFLOW:
293		sig = SIGEMT;
294		goto trapsig;
295
296	/*
297	 * Kernel Mode Traps
298	 */
299#ifdef DDB
300	case T_BREAKPOINT | T_KERNEL:
301	case T_KSTACK_FAULT | T_KERNEL:
302		if (kdb_trap(tf) != 0)
303			goto out;
304		break;
305#endif
306	case T_DATA_MISS | T_KERNEL:
307	case T_DATA_PROTECTION | T_KERNEL:
308	case T_INSTRUCTION_MISS | T_KERNEL:
309		error = trap_pfault(td, tf);
310		if (error == 0)
311			goto out;
312		break;
313#ifdef DDB
314	case T_PA_WATCHPOINT | T_KERNEL:
315		TR3("trap: watch phys pa=%#lx tpc=%#lx, tnpc=%#lx",
316		    watch_phys_get(&mask), tf->tf_tpc, tf->tf_tnpc);
317		PCPU_SET(wp_pstate, (tf->tf_tstate & TSTATE_PSTATE_MASK) >>
318		    TSTATE_PSTATE_SHIFT);
319		tf->tf_tstate &= ~TSTATE_IE;
320		intr_disable();
321		PCPU_SET(wp_insn, *((u_int *)tf->tf_tnpc));
322		*((u_int *)tf->tf_tnpc) = 0x91d03002;	/* ta %xcc, 2 */
323		flush(tf->tf_tnpc);
324		PCPU_SET(wp_va, watch_phys_get(&mask));
325		PCPU_SET(wp_mask, mask);
326		watch_phys_clear();
327		goto out;
328	case T_VA_WATCHPOINT | T_KERNEL:
329		/*
330		 * At the moment, just print the information from the trap,
331		 * remove the watchpoint, use evil magic to execute the
332		 * instruction (we temporarily save the instruction at
333		 * %tnpc, write a trap instruction, resume, and reset the
334		 * watch point when the trap arrives).
335		 * To make sure that no interrupt gets in between and creates
336		 * a potentially large window where the watchpoint is inactive,
337		 * disable interrupts temporarily.
338		 * This is obviously fragile and evilish.
339		 */
340		TR3("trap: watch virt pa=%#lx tpc=%#lx, tnpc=%#lx",
341		    watch_virt_get(&mask), tf->tf_tpc, tf->tf_tnpc);
342		PCPU_SET(wp_pstate, (tf->tf_tstate & TSTATE_PSTATE_MASK) >>
343		    TSTATE_PSTATE_SHIFT);
344		tf->tf_tstate &= ~TSTATE_IE;
345		/*
346		 * This has no matching intr_restore; the PSTATE_IE state of the
347		 * trapping code will be restored when the watch point is
348		 * restored.
349		 */
350		intr_disable();
351		PCPU_SET(wp_insn, *((u_int *)tf->tf_tnpc));
352		*((u_int *)tf->tf_tnpc) = 0x91d03003;	/* ta %xcc, 3 */
353		flush(tf->tf_tnpc);
354		PCPU_SET(wp_va, watch_virt_get(&mask));
355		PCPU_SET(wp_mask, mask);
356		watch_virt_clear();
357		goto out;
358	case T_RSTRWP_PHYS | T_KERNEL:
359		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_PSTATE_MASK) |
360		    PCPU_GET(wp_pstate) << TSTATE_PSTATE_SHIFT;
361		watch_phys_set_mask(PCPU_GET(wp_va), PCPU_GET(wp_mask));
362		*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
363		flush(tf->tf_tpc);
364		goto out;
365	case T_RSTRWP_VIRT | T_KERNEL:
366		/*
367		 * Undo the tweaks tone for T_WATCH, reset the watch point and
368		 * contunue execution.
369		 * Note that here, we run with interrupts enabled, so there
370		 * is a small chance that we will be interrupted before we
371		 * could reset the watch point.
372		 */
373		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_PSTATE_MASK) |
374		    PCPU_GET(wp_pstate) << TSTATE_PSTATE_SHIFT;
375		watch_virt_set_mask(PCPU_GET(wp_va), PCPU_GET(wp_mask));
376		*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
377		flush(tf->tf_tpc);
378		goto out;
379#endif
380	default:
381		break;
382	}
383	panic("trap: %s", trap_msg[type & ~T_KERNEL]);
384
385trapsig:
386	/* Translate fault for emulators. */
387	if (p->p_sysent->sv_transtrap != NULL)
388		sig = (p->p_sysent->sv_transtrap)(sig, type);
389	if (debugger_on_signal && (sig == 4 || sig == 10 || sig == 11))
390		Debugger("trapsig");
391	trapsignal(p, sig, ucode);
392user:
393	userret(td, tf, sticks);
394	mtx_assert(&Giant, MA_NOTOWNED);
395#ifdef DIAGNOSTIC
396	cred_free_thread(td);
397#endif
398out:
399	CTR1(KTR_TRAP, "trap: td=%p return", td);
400	return;
401}
402
403static int
404trap_pfault(struct thread *td, struct trapframe *tf)
405{
406	struct vmspace *vm;
407	struct pcb *pcb;
408	struct proc *p;
409	vm_offset_t va;
410	vm_prot_t prot;
411	u_long ctx;
412	int flags;
413	int type;
414	int rv;
415
416	p = td->td_proc;
417	KASSERT(td->td_pcb != NULL, ("trap_pfault: pcb NULL"));
418	KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
419
420	rv = KERN_SUCCESS;
421	ctx = TLB_TAR_CTX(tf->tf_tar);
422	pcb = td->td_pcb;
423	type = tf->tf_type & ~T_KERNEL;
424	va = TLB_TAR_VA(tf->tf_tar);
425
426	CTR4(KTR_TRAP, "trap_pfault: td=%p pm_ctx=%#lx va=%#lx ctx=%#lx",
427	    td, p->p_vmspace->vm_pmap.pm_context[PCPU_GET(cpuid)], va, ctx);
428
429	if (type == T_DATA_PROTECTION) {
430		prot = VM_PROT_WRITE;
431		flags = VM_FAULT_DIRTY;
432	} else {
433		if (type == T_DATA_MISS)
434			prot = VM_PROT_READ;
435		else
436			prot = VM_PROT_READ | VM_PROT_EXECUTE;
437		flags = VM_FAULT_NORMAL;
438	}
439
440	if (ctx != TLB_CTX_KERNEL) {
441		if ((tf->tf_tstate & TSTATE_PRIV) != 0 &&
442		    (td->td_intr_nesting_level != 0 ||
443		    pcb->pcb_onfault == NULL || pcb->pcb_onfault == fsbail))
444			return (-1);
445
446		/*
447		 * This is a fault on non-kernel virtual memory.
448		 */
449		vm = p->p_vmspace;
450
451		/*
452		 * Keep swapout from messing with us during this
453		 * critical time.
454		 */
455		PROC_LOCK(p);
456		++p->p_lock;
457		PROC_UNLOCK(p);
458
459		/* Fault in the user page. */
460		rv = vm_fault(&vm->vm_map, va, prot, flags);
461
462		/*
463		 * Now the process can be swapped again.
464		 */
465		PROC_LOCK(p);
466		--p->p_lock;
467		PROC_UNLOCK(p);
468	} else {
469		/*
470		 * This is a fault on kernel virtual memory.  Attempts to access
471		 * kernel memory from user mode cause privileged action traps,
472		 * not page fault.
473		 */
474		KASSERT(tf->tf_tstate & TSTATE_PRIV,
475		    ("trap_pfault: fault on nucleus context from user mode"));
476
477		/*
478		 * Don't have to worry about process locking or stacks in the
479		 * kernel.
480		 */
481		rv = vm_fault(kernel_map, va, prot, VM_FAULT_NORMAL);
482	}
483
484	CTR3(KTR_TRAP, "trap_pfault: return td=%p va=%#lx rv=%d",
485	    td, va, rv);
486	if (rv == KERN_SUCCESS)
487		return (0);
488	if ((tf->tf_tstate & TSTATE_PRIV) != 0) {
489		if (td->td_intr_nesting_level == 0 &&
490		    pcb->pcb_onfault != NULL) {
491			tf->tf_tpc = (u_long)pcb->pcb_onfault;
492			tf->tf_tnpc = tf->tf_tpc + 4;
493			return (0);
494		}
495	}
496	return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
497}
498
499/* Maximum number of arguments that can be passed via the out registers. */
500#define	REG_MAXARGS	6
501
502/*
503 * Syscall handler. The arguments to the syscall are passed in the o registers
504 * by the caller, and are saved in the trap frame. The syscall number is passed
505 * in %g1 (and also saved in the trap frame).
506 */
507void
508syscall(struct trapframe *tf)
509{
510	struct sysent *callp;
511	struct thread *td;
512	register_t args[8];
513	register_t *argp;
514	struct proc *p;
515	u_int sticks;
516	u_long code;
517	u_long tpc;
518	int reg;
519	int regcnt;
520	int narg;
521	int error;
522
523	KASSERT(PCPU_GET(curthread) != NULL, ("trap: curthread NULL"));
524	KASSERT(PCPU_GET(curthread)->td_kse != NULL, ("trap: curkse NULL"));
525	KASSERT(PCPU_GET(curthread)->td_proc != NULL, ("trap: curproc NULL"));
526
527	atomic_add_int(&cnt.v_syscall, 1);
528
529	td = PCPU_GET(curthread);
530	p = td->td_proc;
531
532	narg = 0;
533	error = 0;
534	reg = 0;
535	regcnt = REG_MAXARGS;
536
537	sticks = td->td_kse->ke_sticks;
538	td->td_frame = tf;
539	if (td->td_ucred != p->p_ucred)
540		cred_update_thread(td);
541	if (p->p_flag & P_KSES) {
542		/*
543		 * If we are doing a syscall in a KSE environment,
544		 * note where our mailbox is. There is always the
545		 * possibility that we could do this lazily (in sleep()),
546		 * but for now do it every time.
547		 */
548		td->td_mailbox = (void *)fuword((caddr_t)td->td_kse->ke_mailbox
549		    + offsetof(struct kse_mailbox, kmbx_current_thread));
550		if ((td->td_mailbox == NULL) ||
551		    (td->td_mailbox == (void *)-1)) {
552			td->td_mailbox = NULL;  /* single thread it.. */
553			td->td_flags &= ~TDF_UNBOUND;
554		} else {
555			td->td_flags |= TDF_UNBOUND;
556		}
557	}
558	code = tf->tf_global[1];
559
560	/*
561	 * For syscalls, we don't want to retry the faulting instruction
562	 * (usually), instead we need to advance one instruction.
563	 */
564	tpc = tf->tf_tpc;
565	TF_DONE(tf);
566
567	if (p->p_sysent->sv_prepsyscall) {
568		/*
569		 * The prep code is MP aware.
570		 */
571#if 0
572		(*p->p_sysent->sv_prepsyscall)(tf, args, &code, &params);
573#endif
574	} else 	if (code == SYS_syscall || code == SYS___syscall) {
575		code = tf->tf_out[reg++];
576		regcnt--;
577	}
578
579 	if (p->p_sysent->sv_mask)
580 		code &= p->p_sysent->sv_mask;
581
582 	if (code >= p->p_sysent->sv_size)
583 		callp = &p->p_sysent->sv_table[0];
584  	else
585 		callp = &p->p_sysent->sv_table[code];
586
587	narg = callp->sy_narg & SYF_ARGMASK;
588
589	if (narg <= regcnt) {
590		argp = &tf->tf_out[reg];
591		error = 0;
592	} else {
593		KASSERT(narg <= sizeof(args) / sizeof(args[0]),
594		    ("Too many syscall arguments!"));
595		argp = args;
596		bcopy(&tf->tf_out[reg], args, sizeof(args[0]) * regcnt);
597		error = copyin((void *)(tf->tf_out[6] + SPOFF +
598		    offsetof(struct frame, fr_pad[6])),
599		    &args[regcnt], (narg - regcnt) * sizeof(args[0]));
600	}
601
602	CTR5(KTR_SYSC, "syscall: td=%p %s(%#lx, %#lx, %#lx)", td,
603	    syscallnames[code], argp[0], argp[1], argp[2]);
604
605	/*
606	 * Try to run the syscall without the MP lock if the syscall
607	 * is MP safe.
608	 */
609	if ((callp->sy_narg & SYF_MPSAFE) == 0)
610		mtx_lock(&Giant);
611
612#ifdef KTRACE
613	if (KTRPOINT(td, KTR_SYSCALL))
614		ktrsyscall(code, narg, argp);
615#endif
616	if (error == 0) {
617		td->td_retval[0] = 0;
618		td->td_retval[1] = 0;
619
620		STOPEVENT(p, S_SCE, narg);	/* MP aware */
621
622		error = (*callp->sy_call)(td, argp);
623
624		CTR5(KTR_SYSC, "syscall: p=%p error=%d %s return %#lx %#lx ", p,
625		    error, syscallnames[code], td->td_retval[0],
626		    td->td_retval[1]);
627	}
628
629	/*
630	 * MP SAFE (we may or may not have the MP lock at this point)
631	 */
632	switch (error) {
633	case 0:
634		tf->tf_out[0] = td->td_retval[0];
635		tf->tf_out[1] = td->td_retval[1];
636		tf->tf_tstate &= ~TSTATE_XCC_C;
637		break;
638
639	case ERESTART:
640		/*
641		 * Undo the tpc advancement we have done above, we want to
642		 * reexecute the system call.
643		 */
644		tf->tf_tpc = tpc;
645		tf->tf_tnpc -= 4;
646		break;
647
648	case EJUSTRETURN:
649		break;
650
651	default:
652 		if (p->p_sysent->sv_errsize) {
653 			if (error >= p->p_sysent->sv_errsize)
654  				error = -1;	/* XXX */
655   			else
656  				error = p->p_sysent->sv_errtbl[error];
657		}
658		tf->tf_out[0] = error;
659		tf->tf_tstate |= TSTATE_XCC_C;
660		break;
661	}
662
663	/*
664	 * Release Giant if we had to get it.  Don't use mtx_owned(),
665	 * we want to catch broken syscalls.
666	 */
667	if ((callp->sy_narg & SYF_MPSAFE) == 0)
668		mtx_unlock(&Giant);
669
670	/*
671	 * Handle reschedule and other end-of-syscall issues
672	 */
673	userret(td, tf, sticks);
674
675#ifdef KTRACE
676	if (KTRPOINT(td, KTR_SYSRET))
677		ktrsysret(code, error, td->td_retval[0]);
678#endif
679	/*
680	 * This works because errno is findable through the
681	 * register set.  If we ever support an emulation where this
682	 * is not the case, this code will need to be revisited.
683	 */
684	STOPEVENT(p, S_SCX, code);
685
686#ifdef DIAGNOSTIC
687	cred_free_thread(td);
688#endif
689#ifdef WITNESS
690	if (witness_list(td)) {
691		panic("system call %s returning with mutex(s) held\n",
692		    syscallnames[code]);
693	}
694#endif
695	mtx_assert(&sched_lock, MA_NOTOWNED);
696	mtx_assert(&Giant, MA_NOTOWNED);
697}
698