trap.c revision 91475
1/*-
2 * Copyright (c) 2001, Jake Burkholder
3 * Copyright (C) 1994, David Greenman
4 * Copyright (c) 1990, 1993
5 *      The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the University of Utah, and William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *      This product includes software developed by the University of
21 *      California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
39 * 	from: FreeBSD: src/sys/i386/i386/trap.c,v 1.197 2001/07/19
40 * $FreeBSD: head/sys/sparc64/sparc64/trap.c 91475 2002-02-28 08:28:14Z arr $
41 */
42
43#include "opt_ddb.h"
44#include "opt_ktr.h"
45
46#include <sys/param.h>
47#include <sys/bus.h>
48#include <sys/interrupt.h>
49#include <sys/ktr.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/systm.h>
53#include <sys/pioctl.h>
54#include <sys/proc.h>
55#include <sys/syscall.h>
56#include <sys/sysent.h>
57#include <sys/user.h>
58#include <sys/vmmeter.h>
59
60#include <vm/vm.h>
61#include <vm/pmap.h>
62#include <vm/vm_extern.h>
63#include <vm/vm_param.h>
64#include <vm/vm_kern.h>
65#include <vm/vm_map.h>
66#include <vm/vm_page.h>
67#include <vm/vm_zone.h>
68
69#include <machine/clock.h>
70#include <machine/frame.h>
71#include <machine/intr_machdep.h>
72#include <machine/pcb.h>
73#include <machine/pv.h>
74#include <machine/trap.h>
75#include <machine/tstate.h>
76#include <machine/tte.h>
77#include <machine/tlb.h>
78#include <machine/tsb.h>
79#include <machine/watch.h>
80
81void trap(struct trapframe *tf);
82void syscall(struct trapframe *tf);
83
84static int trap_pfault(struct thread *td, struct trapframe *tf);
85
86extern char fsbail[];
87
88extern char *syscallnames[];
89
90const char *trap_msg[] = {
91	"reserved",
92	"instruction access exception",
93	"instruction access error",
94	"instruction access protection",
95	"illtrap instruction",
96	"illegal instruction",
97	"privileged opcode",
98	"floating point disabled",
99	"floating point exception ieee 754",
100	"floating point exception other",
101	"tag overflow",
102	"division by zero",
103	"data access exception",
104	"data access error",
105	"data access protection",
106	"memory address not aligned",
107	"privileged action",
108	"async data error",
109	"trap instruction 16",
110	"trap instruction 17",
111	"trap instruction 18",
112	"trap instruction 19",
113	"trap instruction 20",
114	"trap instruction 21",
115	"trap instruction 22",
116	"trap instruction 23",
117	"trap instruction 24",
118	"trap instruction 25",
119	"trap instruction 26",
120	"trap instruction 27",
121	"trap instruction 28",
122	"trap instruction 29",
123	"trap instruction 30",
124	"trap instruction 31",
125	"interrupt",
126	"physical address watchpoint",
127	"virtual address watchpoint",
128	"corrected ecc error",
129	"fast instruction access mmu miss",
130	"fast data access mmu miss",
131	"spill",
132	"fill",
133	"fill",
134	"breakpoint",
135	"clean window",
136	"range check",
137	"fix alignment",
138	"integer overflow",
139	"syscall",
140	"restore physical watchpoint",
141	"restore virtual watchpoint",
142	"kernel stack fault",
143};
144
145void
146trap(struct trapframe *tf)
147{
148	struct thread *td;
149	struct proc *p;
150	u_int sticks;
151	int error;
152	int ucode;
153	int mask;
154	int type;
155	int sig;
156
157	KASSERT(PCPU_GET(curthread) != NULL, ("trap: curthread NULL"));
158	KASSERT(PCPU_GET(curthread)->td_kse != NULL, ("trap: curkse NULL"));
159	KASSERT(PCPU_GET(curthread)->td_proc != NULL, ("trap: curproc NULL"));
160
161	atomic_add_int(&cnt.v_trap, 1);
162
163	td = PCPU_GET(curthread);
164	p = td->td_proc;
165
166	error = 0;
167	type = tf->tf_type;
168	ucode = type;	/* XXX */
169
170	CTR4(KTR_TRAP, "trap: %s type=%s (%s) pil=%#lx",
171	    p->p_comm, trap_msg[type & ~T_KERNEL],
172	    ((type & T_KERNEL) ? "kernel" : "user"),
173	    rdpr(pil));
174
175	if ((type & T_KERNEL) == 0) {
176		sticks = td->td_kse->ke_sticks;
177		td->td_frame = tf;
178#ifdef DIAGNOSTIC 			/* see the comment in ast() */
179		if (td->td_ucred != NULL)
180			panic("trap(): thread got a ucred while in userspace");
181		td->td_ucred = td->td_ucred_cache;
182		td->td_ucred_cache = NULL;
183#endif
184		if (td->td_ucred != p->p_ucred)
185			cred_update_thread(td);
186 	} else {
187 		sticks = 0;
188if ((type & ~T_KERNEL) != T_BREAKPOINT)
189		KASSERT(cold || td->td_ucred != NULL,
190		    ("kernel trap doesn't have ucred"));
191	}
192
193	switch (type) {
194
195	/*
196	 * User Mode Traps
197	 */
198	case T_MEM_ADDRESS_NOT_ALIGNED:
199		if ((sig = unaligned_fixup(td, tf)) == 0) {
200			TF_DONE(tf);
201			goto user;
202		}
203		goto trapsig;
204#if 0
205	case T_ALIGN_LDDF:
206	case T_ALIGN_STDF:
207		sig = SIGBUS;
208		goto trapsig;
209#endif
210	case T_BREAKPOINT:
211		sig = SIGTRAP;
212		goto trapsig;
213	case T_DIVISION_BY_ZERO:
214		sig = SIGFPE;
215		goto trapsig;
216	case T_FP_DISABLED:
217		if (fp_enable_thread(td, tf))
218			goto user;
219		/* Fallthrough. */
220	case T_FP_EXCEPTION_IEEE_754:
221	case T_FP_EXCEPTION_OTHER:
222		mtx_lock(&Giant);
223		if ((sig = fp_exception_other(td, tf, &ucode)) == 0) {
224			mtx_unlock(&Giant);
225			TF_DONE(tf);
226			goto user;
227		}
228		mtx_unlock(&Giant);
229		goto trapsig;
230	case T_DATA_ERROR:
231	case T_DATA_EXCEPTION:
232	case T_INSTRUCTION_ERROR:
233	case T_INSTRUCTION_EXCEPTION:
234		sig = SIGILL;	/* XXX */
235		goto trapsig;
236	case T_DATA_MISS:
237	case T_DATA_PROTECTION:
238	case T_INSTRUCTION_MISS:
239		error = trap_pfault(td, tf);
240		if (error == 0)
241			goto user;
242		sig = error;
243		goto trapsig;
244	case T_FILL:
245		if (rwindow_load(td, tf, 2)) {
246			PROC_LOCK(p);
247			sigexit(td, SIGILL);
248			/* Not reached. */
249		}
250		goto user;
251	case T_FILL_RET:
252		if (rwindow_load(td, tf, 1)) {
253			PROC_LOCK(p);
254			sigexit(td, SIGILL);
255			/* Not reached. */
256		}
257		goto user;
258	case T_ILLEGAL_INSTRUCTION:
259		if ((sig = emul_insn(td, tf)) == 0) {
260			TF_DONE(tf);
261			goto user;
262		}
263		goto trapsig;
264	case T_PRIVILEGED_ACTION:
265	case T_PRIVILEGED_OPCODE:
266		sig = SIGBUS;
267		goto trapsig;
268	case T_TRAP_INSTRUCTION_16:
269	case T_TRAP_INSTRUCTION_17:
270	case T_TRAP_INSTRUCTION_18:
271	case T_TRAP_INSTRUCTION_19:
272	case T_TRAP_INSTRUCTION_20:
273	case T_TRAP_INSTRUCTION_21:
274	case T_TRAP_INSTRUCTION_22:
275	case T_TRAP_INSTRUCTION_23:
276	case T_TRAP_INSTRUCTION_24:
277	case T_TRAP_INSTRUCTION_25:
278	case T_TRAP_INSTRUCTION_26:
279	case T_TRAP_INSTRUCTION_27:
280	case T_TRAP_INSTRUCTION_28:
281	case T_TRAP_INSTRUCTION_29:
282	case T_TRAP_INSTRUCTION_30:
283	case T_TRAP_INSTRUCTION_31:
284		sig = SIGILL;
285		goto trapsig;
286	case T_SPILL:
287		if (rwindow_save(td)) {
288			PROC_LOCK(p);
289			sigexit(td, SIGILL);
290			/* Not reached. */
291		}
292		goto user;
293	case T_TAG_OFERFLOW:
294		sig = SIGEMT;
295		goto trapsig;
296
297	/*
298	 * Kernel Mode Traps
299	 */
300#ifdef DDB
301	case T_BREAKPOINT | T_KERNEL:
302	case T_KSTACK_FAULT | T_KERNEL:
303		if (kdb_trap(tf) != 0)
304			goto out;
305		break;
306#endif
307	case T_DATA_MISS | T_KERNEL:
308	case T_DATA_PROTECTION | T_KERNEL:
309	case T_INSTRUCTION_MISS | T_KERNEL:
310		error = trap_pfault(td, tf);
311		if (error == 0)
312			goto out;
313		break;
314	case T_PA_WATCHPOINT | T_KERNEL:
315		TR3("trap: watch phys pa=%#lx tpc=%#lx, tnpc=%#lx",
316		    watch_phys_get(&mask), tf->tf_tpc, tf->tf_tnpc);
317		PCPU_SET(wp_pstate, (tf->tf_tstate & TSTATE_PSTATE_MASK) >>
318		    TSTATE_PSTATE_SHIFT);
319		tf->tf_tstate &= ~TSTATE_IE;
320		wrpr(pstate, rdpr(pstate), PSTATE_IE);
321		PCPU_SET(wp_insn, *((u_int *)tf->tf_tnpc));
322		*((u_int *)tf->tf_tnpc) = 0x91d03002;	/* ta %xcc, 2 */
323		flush(tf->tf_tnpc);
324		PCPU_SET(wp_va, watch_phys_get(&mask));
325		PCPU_SET(wp_mask, mask);
326		watch_phys_clear();
327		goto out;
328	case T_VA_WATCHPOINT | T_KERNEL:
329		/*
330		 * At the moment, just print the information from the trap,
331		 * remove the watchpoint, use evil magic to execute the
332		 * instruction (we temporarily save the instruction at
333		 * %tnpc, write a trap instruction, resume, and reset the
334		 * watch point when the trap arrives).
335		 * To make sure that no interrupt gets in between and creates
336		 * a potentially large window where the watchpoint is inactive,
337		 * disable interrupts temporarily.
338		 * This is obviously fragile and evilish.
339		 */
340		TR3("trap: watch virt pa=%#lx tpc=%#lx, tnpc=%#lx",
341		    watch_virt_get(&mask), tf->tf_tpc, tf->tf_tnpc);
342		PCPU_SET(wp_pstate, (tf->tf_tstate & TSTATE_PSTATE_MASK) >>
343		    TSTATE_PSTATE_SHIFT);
344		tf->tf_tstate &= ~TSTATE_IE;
345		wrpr(pstate, rdpr(pstate), PSTATE_IE);
346		PCPU_SET(wp_insn, *((u_int *)tf->tf_tnpc));
347		*((u_int *)tf->tf_tnpc) = 0x91d03003;	/* ta %xcc, 3 */
348		flush(tf->tf_tnpc);
349		PCPU_SET(wp_va, watch_virt_get(&mask));
350		PCPU_SET(wp_mask, mask);
351		watch_virt_clear();
352		goto out;
353	case T_RSTRWP_PHYS | T_KERNEL:
354		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_PSTATE_MASK) |
355		    PCPU_GET(wp_pstate) << TSTATE_PSTATE_SHIFT;
356		watch_phys_set_mask(PCPU_GET(wp_va), PCPU_GET(wp_mask));
357		*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
358		flush(tf->tf_tpc);
359		goto out;
360	case T_RSTRWP_VIRT | T_KERNEL:
361		/*
362		 * Undo the tweaks tone for T_WATCH, reset the watch point and
363		 * contunue execution.
364		 * Note that here, we run with interrupts enabled, so there
365		 * is a small chance that we will be interrupted before we
366		 * could reset the watch point.
367		 */
368		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_PSTATE_MASK) |
369		    PCPU_GET(wp_pstate) << TSTATE_PSTATE_SHIFT;
370		watch_virt_set_mask(PCPU_GET(wp_va), PCPU_GET(wp_mask));
371		*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
372		flush(tf->tf_tpc);
373		goto out;
374	default:
375		break;
376	}
377	panic("trap: %s", trap_msg[type & ~T_KERNEL]);
378
379trapsig:
380	/* Translate fault for emulators. */
381	if (p->p_sysent->sv_transtrap != NULL)
382		sig = (p->p_sysent->sv_transtrap)(sig, type);
383	trapsignal(p, sig, ucode);
384user:
385	userret(td, tf, sticks);
386	mtx_assert(&Giant, MA_NOTOWNED);
387#ifdef DIAGNOSTIC 			/* see the comment in ast() */
388	if (td->td_ucred_cache)
389		panic("trap:thread already has cached ucred");
390	td->td_ucred_cache = td->td_ucred;
391       	td->td_ucred = NULL;
392#endif /* DIAGNOSTIC */
393out:
394	CTR1(KTR_TRAP, "trap: td=%p return", td);
395	return;
396}
397
398static int
399trap_pfault(struct thread *td, struct trapframe *tf)
400{
401	struct vmspace *vm;
402	struct pcb *pcb;
403	struct proc *p;
404	vm_offset_t va;
405	vm_prot_t prot;
406	u_long ctx;
407	int flags;
408	int type;
409	int rv;
410
411	p = td->td_proc;
412	KASSERT(td->td_pcb != NULL, ("trap_pfault: pcb NULL"));
413	KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
414
415	rv = KERN_SUCCESS;
416	ctx = TLB_TAR_CTX(tf->tf_tar);
417	pcb = td->td_pcb;
418	type = tf->tf_type & ~T_KERNEL;
419	va = TLB_TAR_VA(tf->tf_tar);
420
421	CTR4(KTR_TRAP, "trap_pfault: td=%p pm_ctx=%#lx va=%#lx ctx=%#lx",
422	    td, p->p_vmspace->vm_pmap.pm_context[PCPU_GET(cpuid)], va, ctx);
423
424	if (type == T_DATA_PROTECTION) {
425		prot = VM_PROT_WRITE;
426		flags = VM_FAULT_DIRTY;
427	} else {
428		if (type == T_DATA_MISS)
429			prot = VM_PROT_READ;
430		else
431			prot = VM_PROT_READ | VM_PROT_EXECUTE;
432		flags = VM_FAULT_NORMAL;
433	}
434
435	if (ctx != TLB_CTX_KERNEL) {
436		if ((tf->tf_tstate & TSTATE_PRIV) != 0 &&
437		    (td->td_intr_nesting_level != 0 ||
438		    pcb->pcb_onfault == NULL || pcb->pcb_onfault == fsbail))
439			return (-1);
440
441		/*
442		 * This is a fault on non-kernel virtual memory.
443		 */
444		vm = p->p_vmspace;
445
446		mtx_lock(&Giant);
447
448		/*
449		 * Keep swapout from messing with us during this
450		 * critical time.
451		 */
452		PROC_LOCK(p);
453		++p->p_lock;
454		PROC_UNLOCK(p);
455
456		/*
457		 * Grow the stack if necessary.  vm_map_growstack only
458		 * fails if the va falls into a growable stack region
459		 * and the stack growth fails.  If it succeeds, or the
460		 * va was not within a growable stack region, fault in
461		 * the user page.
462		 */
463		if (vm_map_growstack(p, va) != KERN_SUCCESS)
464			rv = KERN_FAILURE;
465		else
466			rv = vm_fault(&vm->vm_map, va, prot, flags);
467
468		/*
469		 * Now the process can be swapped again.
470		 */
471		PROC_LOCK(p);
472		--p->p_lock;
473		PROC_UNLOCK(p);
474	} else {
475		/*
476		 * This is a fault on kernel virtual memory.  Attempts to access
477		 * kernel memory from user mode cause priviledged action traps,
478		 * not page fault.
479		 */
480		KASSERT(tf->tf_tstate & TSTATE_PRIV,
481		    ("trap_pfault: fault on nucleus context from user mode"));
482
483		mtx_lock(&Giant);
484
485		/*
486		 * Don't have to worry about process locking or stacks in the
487		 * kernel.
488		 */
489		rv = vm_fault(kernel_map, va, prot, VM_FAULT_NORMAL);
490	}
491	mtx_unlock(&Giant);
492
493	CTR3(KTR_TRAP, "trap_pfault: return td=%p va=%#lx rv=%d",
494	    td, va, rv);
495	if (rv == KERN_SUCCESS)
496		return (0);
497	if ((tf->tf_tstate & TSTATE_PRIV) != 0) {
498		if (td->td_intr_nesting_level == 0 &&
499		    pcb->pcb_onfault != NULL) {
500			tf->tf_tpc = (u_long)pcb->pcb_onfault;
501			tf->tf_tnpc = tf->tf_tpc + 4;
502			return (0);
503		}
504	}
505	return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
506}
507
508/* Maximum number of arguments that can be passed via the out registers. */
509#define	REG_MAXARGS	6
510
511/*
512 * Syscall handler. The arguments to the syscall are passed in the o registers
513 * by the caller, and are saved in the trap frame. The syscall number is passed
514 * in %g1 (and also saved in the trap frame).
515 */
516void
517syscall(struct trapframe *tf)
518{
519	struct sysent *callp;
520	struct thread *td;
521	register_t args[8];
522	register_t *argp;
523	struct proc *p;
524	u_int sticks;
525	u_long code;
526	u_long tpc;
527	int reg;
528	int regcnt;
529	int narg;
530	int error;
531
532	KASSERT(PCPU_GET(curthread) != NULL, ("trap: curthread NULL"));
533	KASSERT(PCPU_GET(curthread)->td_kse != NULL, ("trap: curkse NULL"));
534	KASSERT(PCPU_GET(curthread)->td_proc != NULL, ("trap: curproc NULL"));
535
536	atomic_add_int(&cnt.v_syscall, 1);
537
538	td = PCPU_GET(curthread);
539	p = td->td_proc;
540
541	narg = 0;
542	error = 0;
543	reg = 0;
544	regcnt = REG_MAXARGS;
545
546	sticks = td->td_kse->ke_sticks;
547	td->td_frame = tf;
548#ifdef DIAGNOSTIC 			/* see the comment in ast() */
549	if (td->td_ucred)
550		panic("syscall:thread got a cred while userspace");
551	td->td_ucred = td->td_ucred_cache;
552	td->td_ucred_cache = NULL;
553#endif /* DIAGNOSTIC */
554	if (td->td_ucred != p->p_ucred)
555		cred_update_thread(td);
556	code = tf->tf_global[1];
557
558	/*
559	 * For syscalls, we don't want to retry the faulting instruction
560	 * (usually), instead we need to advance one instruction.
561	 */
562	tpc = tf->tf_tpc;
563	TF_DONE(tf);
564
565	if (p->p_sysent->sv_prepsyscall) {
566		/*
567		 * The prep code is MP aware.
568		 */
569#if 0
570		(*p->p_sysent->sv_prepsyscall)(tf, args, &code, &params);
571#endif
572	} else 	if (code == SYS_syscall || code == SYS___syscall) {
573		code = tf->tf_out[reg++];
574		regcnt--;
575	}
576
577 	if (p->p_sysent->sv_mask)
578 		code &= p->p_sysent->sv_mask;
579
580 	if (code >= p->p_sysent->sv_size)
581 		callp = &p->p_sysent->sv_table[0];
582  	else
583 		callp = &p->p_sysent->sv_table[code];
584
585	narg = callp->sy_narg & SYF_ARGMASK;
586
587	if (narg <= regcnt)
588		argp = &tf->tf_out[reg];
589	else {
590		KASSERT(narg <= sizeof(args) / sizeof(args[0]),
591		    ("Too many syscall arguments!"));
592		argp = args;
593		bcopy(&tf->tf_out[reg], args, sizeof(args[0]) * regcnt);
594		error = copyin((void *)(tf->tf_out[6] + SPOFF +
595		    offsetof(struct frame, f_pad[6])),
596		    &args[regcnt], (narg - regcnt) * sizeof(args[0]));
597		if (error != 0)
598			goto bad;
599	}
600
601	CTR5(KTR_SYSC, "syscall: td=%p %s(%#lx, %#lx, %#lx)", td,
602	    syscallnames[code], argp[0], argp[1], argp[2]);
603
604	/*
605	 * Try to run the syscall without the MP lock if the syscall
606	 * is MP safe.
607	 */
608	if ((callp->sy_narg & SYF_MPSAFE) == 0)
609		mtx_lock(&Giant);
610
611#ifdef KTRACE
612	/*
613	 * We have to obtain the MP lock no matter what if
614	 * we are ktracing
615	 */
616	if (KTRPOINT(p, KTR_SYSCALL)) {
617		ktrsyscall(p->p_tracep, code, narg, args);
618	}
619#endif
620	td->td_retval[0] = 0;
621	td->td_retval[1] = 0;
622
623	STOPEVENT(p, S_SCE, narg);	/* MP aware */
624
625	error = (*callp->sy_call)(td, argp);
626
627	CTR5(KTR_SYSC, "syscall: p=%p error=%d %s return %#lx %#lx ", p,
628	    error, syscallnames[code], td->td_retval[0], td->td_retval[1]);
629
630	/*
631	 * MP SAFE (we may or may not have the MP lock at this point)
632	 */
633	switch (error) {
634	case 0:
635		tf->tf_out[0] = td->td_retval[0];
636		tf->tf_out[1] = td->td_retval[1];
637		tf->tf_tstate &= ~TSTATE_XCC_C;
638		break;
639
640	case ERESTART:
641		/*
642		 * Undo the tpc advancement we have done above, we want to
643		 * reexecute the system call.
644		 */
645		tf->tf_tpc = tpc;
646		tf->tf_tnpc -= 4;
647		break;
648
649	case EJUSTRETURN:
650		break;
651
652	default:
653bad:
654 		if (p->p_sysent->sv_errsize) {
655 			if (error >= p->p_sysent->sv_errsize)
656  				error = -1;	/* XXX */
657   			else
658  				error = p->p_sysent->sv_errtbl[error];
659		}
660		tf->tf_out[0] = error;
661		tf->tf_tstate |= TSTATE_XCC_C;
662		break;
663	}
664
665	/*
666	 * Handle reschedule and other end-of-syscall issues
667	 */
668	userret(td, tf, sticks);
669
670#ifdef KTRACE
671	if (KTRPOINT(p, KTR_SYSRET)) {
672		ktrsysret(p->p_tracep, code, error, td->td_retval[0]);
673	}
674#endif
675
676	/*
677	 * Release Giant if we had to get it.  Don't use mtx_owned(),
678	 * we want to catch broken syscalls.
679	 */
680	if ((callp->sy_narg & SYF_MPSAFE) == 0)
681		mtx_unlock(&Giant);
682
683	/*
684	 * This works because errno is findable through the
685	 * register set.  If we ever support an emulation where this
686	 * is not the case, this code will need to be revisited.
687	 */
688	STOPEVENT(p, S_SCX, code);
689
690#ifdef DIAGNOSTIC 			/* see the comment in ast() */
691	if (td->td_ucred_cache)
692		panic("syscall:thread already has cached ucred");
693	td->td_ucred_cache = td->td_ucred;
694       	td->td_ucred = NULL;
695#endif /* DIAGNOSTIC */
696#ifdef WITNESS
697	if (witness_list(td)) {
698		panic("system call %s returning with mutex(s) held\n",
699		    syscallnames[code]);
700	}
701#endif
702	mtx_assert(&sched_lock, MA_NOTOWNED);
703	mtx_assert(&Giant, MA_NOTOWNED);
704}
705