trap.c revision 94257
1/*-
2 * Copyright (c) 2001, Jake Burkholder
3 * Copyright (C) 1994, David Greenman
4 * Copyright (c) 1990, 1993
5 *      The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the University of Utah, and William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *      This product includes software developed by the University of
21 *      California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
39 * 	from: FreeBSD: src/sys/i386/i386/trap.c,v 1.197 2001/07/19
40 * $FreeBSD: head/sys/sparc64/sparc64/trap.c 94257 2002-04-09 01:43:07Z jake $
41 */
42
43#include "opt_ddb.h"
44#include "opt_ktr.h"
45#include "opt_ktrace.h"
46
47#include <sys/param.h>
48#include <sys/bus.h>
49#include <sys/interrupt.h>
50#include <sys/ktr.h>
51#include <sys/lock.h>
52#include <sys/mutex.h>
53#include <sys/systm.h>
54#include <sys/pioctl.h>
55#include <sys/proc.h>
56#include <sys/smp.h>
57#include <sys/syscall.h>
58#include <sys/sysent.h>
59#include <sys/user.h>
60#include <sys/vmmeter.h>
61#ifdef KTRACE
62#include <sys/uio.h>
63#include <sys/ktrace.h>
64#endif
65
66#include <vm/vm.h>
67#include <vm/pmap.h>
68#include <vm/vm_extern.h>
69#include <vm/vm_param.h>
70#include <vm/vm_kern.h>
71#include <vm/vm_map.h>
72#include <vm/vm_page.h>
73
74#include <machine/clock.h>
75#include <machine/emul.h>
76#include <machine/frame.h>
77#include <machine/intr_machdep.h>
78#include <machine/pcb.h>
79#include <machine/pv.h>
80#include <machine/smp.h>
81#include <machine/trap.h>
82#include <machine/tstate.h>
83#include <machine/tte.h>
84#include <machine/tlb.h>
85#include <machine/tsb.h>
86#include <machine/watch.h>
87
88void trap(struct trapframe *tf);
89void syscall(struct trapframe *tf);
90
91static int trap_pfault(struct thread *td, struct trapframe *tf);
92
93extern char fsbail[];
94
95extern char *syscallnames[];
96
97const char *trap_msg[] = {
98	"reserved",
99	"instruction access exception",
100	"instruction access error",
101	"instruction access protection",
102	"illtrap instruction",
103	"illegal instruction",
104	"privileged opcode",
105	"floating point disabled",
106	"floating point exception ieee 754",
107	"floating point exception other",
108	"tag overflow",
109	"division by zero",
110	"data access exception",
111	"data access error",
112	"data access protection",
113	"memory address not aligned",
114	"privileged action",
115	"async data error",
116	"trap instruction 16",
117	"trap instruction 17",
118	"trap instruction 18",
119	"trap instruction 19",
120	"trap instruction 20",
121	"trap instruction 21",
122	"trap instruction 22",
123	"trap instruction 23",
124	"trap instruction 24",
125	"trap instruction 25",
126	"trap instruction 26",
127	"trap instruction 27",
128	"trap instruction 28",
129	"trap instruction 29",
130	"trap instruction 30",
131	"trap instruction 31",
132	"interrupt",
133	"physical address watchpoint",
134	"virtual address watchpoint",
135	"corrected ecc error",
136	"fast instruction access mmu miss",
137	"fast data access mmu miss",
138	"spill",
139	"fill",
140	"fill",
141	"breakpoint",
142	"clean window",
143	"range check",
144	"fix alignment",
145	"integer overflow",
146	"syscall",
147	"restore physical watchpoint",
148	"restore virtual watchpoint",
149	"kernel stack fault",
150};
151
152void
153trap(struct trapframe *tf)
154{
155	struct thread *td;
156	struct proc *p;
157	u_int sticks;
158	int error;
159	int ucode;
160#ifdef DDB
161	int mask;
162#endif
163	int type;
164	int sig;
165
166	KASSERT(PCPU_GET(curthread) != NULL, ("trap: curthread NULL"));
167	KASSERT(PCPU_GET(curthread)->td_kse != NULL, ("trap: curkse NULL"));
168	KASSERT(PCPU_GET(curthread)->td_proc != NULL, ("trap: curproc NULL"));
169
170	atomic_add_int(&cnt.v_trap, 1);
171
172	td = PCPU_GET(curthread);
173	p = td->td_proc;
174
175	error = 0;
176	type = tf->tf_type;
177	ucode = type;	/* XXX */
178
179	CTR4(KTR_TRAP, "trap: %s type=%s (%s) pil=%#lx",
180	    p->p_comm, trap_msg[type & ~T_KERNEL],
181	    ((type & T_KERNEL) ? "kernel" : "user"),
182	    rdpr(pil));
183
184	if ((type & T_KERNEL) == 0) {
185		sticks = td->td_kse->ke_sticks;
186		td->td_frame = tf;
187		if (td->td_ucred != p->p_ucred)
188			cred_update_thread(td);
189 	} else {
190 		sticks = 0;
191if ((type & ~T_KERNEL) != T_BREAKPOINT)
192		KASSERT(cold || td->td_ucred != NULL,
193		    ("kernel trap doesn't have ucred"));
194	}
195
196	switch (type) {
197
198	/*
199	 * User Mode Traps
200	 */
201	case T_MEM_ADDRESS_NOT_ALIGNED:
202		if ((sig = unaligned_fixup(td, tf)) == 0) {
203			TF_DONE(tf);
204			goto user;
205		}
206		goto trapsig;
207#if 0
208	case T_ALIGN_LDDF:
209	case T_ALIGN_STDF:
210		sig = SIGBUS;
211		goto trapsig;
212#endif
213	case T_BREAKPOINT:
214		sig = SIGTRAP;
215		goto trapsig;
216	case T_DIVISION_BY_ZERO:
217		sig = SIGFPE;
218		goto trapsig;
219	case T_FP_DISABLED:
220		if (fp_enable_thread(td, tf))
221			goto user;
222		/* Fallthrough. */
223	case T_FP_EXCEPTION_IEEE_754:
224	case T_FP_EXCEPTION_OTHER:
225		mtx_lock(&Giant);
226		if ((sig = fp_exception_other(td, tf, &ucode)) == 0) {
227			mtx_unlock(&Giant);
228			TF_DONE(tf);
229			goto user;
230		}
231		mtx_unlock(&Giant);
232		goto trapsig;
233	case T_DATA_ERROR:
234	case T_DATA_EXCEPTION:
235	case T_INSTRUCTION_ERROR:
236	case T_INSTRUCTION_EXCEPTION:
237		sig = SIGILL;	/* XXX */
238		goto trapsig;
239	case T_DATA_MISS:
240	case T_DATA_PROTECTION:
241	case T_INSTRUCTION_MISS:
242		error = trap_pfault(td, tf);
243		if (error == 0)
244			goto user;
245		sig = error;
246		goto trapsig;
247	case T_FILL:
248		if (rwindow_load(td, tf, 2)) {
249			PROC_LOCK(p);
250			sigexit(td, SIGILL);
251			/* Not reached. */
252		}
253		goto user;
254	case T_FILL_RET:
255		if (rwindow_load(td, tf, 1)) {
256			PROC_LOCK(p);
257			sigexit(td, SIGILL);
258			/* Not reached. */
259		}
260		goto user;
261	case T_ILLEGAL_INSTRUCTION:
262		if ((sig = emul_insn(td, tf)) == 0) {
263			TF_DONE(tf);
264			goto user;
265		}
266		goto trapsig;
267	case T_PRIVILEGED_ACTION:
268	case T_PRIVILEGED_OPCODE:
269		sig = SIGBUS;
270		goto trapsig;
271	case T_TRAP_INSTRUCTION_16:
272	case T_TRAP_INSTRUCTION_17:
273	case T_TRAP_INSTRUCTION_18:
274	case T_TRAP_INSTRUCTION_19:
275	case T_TRAP_INSTRUCTION_20:
276	case T_TRAP_INSTRUCTION_21:
277	case T_TRAP_INSTRUCTION_22:
278	case T_TRAP_INSTRUCTION_23:
279	case T_TRAP_INSTRUCTION_24:
280	case T_TRAP_INSTRUCTION_25:
281	case T_TRAP_INSTRUCTION_26:
282	case T_TRAP_INSTRUCTION_27:
283	case T_TRAP_INSTRUCTION_28:
284	case T_TRAP_INSTRUCTION_29:
285	case T_TRAP_INSTRUCTION_30:
286	case T_TRAP_INSTRUCTION_31:
287		sig = SIGILL;
288		goto trapsig;
289	case T_SPILL:
290		if (rwindow_save(td)) {
291			PROC_LOCK(p);
292			sigexit(td, SIGILL);
293			/* Not reached. */
294		}
295		goto user;
296	case T_TAG_OFERFLOW:
297		sig = SIGEMT;
298		goto trapsig;
299
300	/*
301	 * Kernel Mode Traps
302	 */
303#ifdef DDB
304	case T_BREAKPOINT | T_KERNEL:
305	case T_KSTACK_FAULT | T_KERNEL:
306		if (kdb_trap(tf) != 0)
307			goto out;
308		break;
309#endif
310	case T_DATA_MISS | T_KERNEL:
311	case T_DATA_PROTECTION | T_KERNEL:
312	case T_INSTRUCTION_MISS | T_KERNEL:
313		error = trap_pfault(td, tf);
314		if (error == 0)
315			goto out;
316		break;
317#ifdef DDB
318	case T_PA_WATCHPOINT | T_KERNEL:
319		TR3("trap: watch phys pa=%#lx tpc=%#lx, tnpc=%#lx",
320		    watch_phys_get(&mask), tf->tf_tpc, tf->tf_tnpc);
321		PCPU_SET(wp_pstate, (tf->tf_tstate & TSTATE_PSTATE_MASK) >>
322		    TSTATE_PSTATE_SHIFT);
323		tf->tf_tstate &= ~TSTATE_IE;
324		intr_disable();
325		PCPU_SET(wp_insn, *((u_int *)tf->tf_tnpc));
326		*((u_int *)tf->tf_tnpc) = 0x91d03002;	/* ta %xcc, 2 */
327		flush(tf->tf_tnpc);
328		PCPU_SET(wp_va, watch_phys_get(&mask));
329		PCPU_SET(wp_mask, mask);
330		watch_phys_clear();
331		goto out;
332	case T_VA_WATCHPOINT | T_KERNEL:
333		/*
334		 * At the moment, just print the information from the trap,
335		 * remove the watchpoint, use evil magic to execute the
336		 * instruction (we temporarily save the instruction at
337		 * %tnpc, write a trap instruction, resume, and reset the
338		 * watch point when the trap arrives).
339		 * To make sure that no interrupt gets in between and creates
340		 * a potentially large window where the watchpoint is inactive,
341		 * disable interrupts temporarily.
342		 * This is obviously fragile and evilish.
343		 */
344		TR3("trap: watch virt pa=%#lx tpc=%#lx, tnpc=%#lx",
345		    watch_virt_get(&mask), tf->tf_tpc, tf->tf_tnpc);
346		PCPU_SET(wp_pstate, (tf->tf_tstate & TSTATE_PSTATE_MASK) >>
347		    TSTATE_PSTATE_SHIFT);
348		tf->tf_tstate &= ~TSTATE_IE;
349		/*
350		 * This has no matching intr_restore; the PSTATE_IE state of the
351		 * trapping code will be restored when the watch point is
352		 * restored.
353		 */
354		intr_disable();
355		PCPU_SET(wp_insn, *((u_int *)tf->tf_tnpc));
356		*((u_int *)tf->tf_tnpc) = 0x91d03003;	/* ta %xcc, 3 */
357		flush(tf->tf_tnpc);
358		PCPU_SET(wp_va, watch_virt_get(&mask));
359		PCPU_SET(wp_mask, mask);
360		watch_virt_clear();
361		goto out;
362	case T_RSTRWP_PHYS | T_KERNEL:
363		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_PSTATE_MASK) |
364		    PCPU_GET(wp_pstate) << TSTATE_PSTATE_SHIFT;
365		watch_phys_set_mask(PCPU_GET(wp_va), PCPU_GET(wp_mask));
366		*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
367		flush(tf->tf_tpc);
368		goto out;
369	case T_RSTRWP_VIRT | T_KERNEL:
370		/*
371		 * Undo the tweaks tone for T_WATCH, reset the watch point and
372		 * contunue execution.
373		 * Note that here, we run with interrupts enabled, so there
374		 * is a small chance that we will be interrupted before we
375		 * could reset the watch point.
376		 */
377		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_PSTATE_MASK) |
378		    PCPU_GET(wp_pstate) << TSTATE_PSTATE_SHIFT;
379		watch_virt_set_mask(PCPU_GET(wp_va), PCPU_GET(wp_mask));
380		*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
381		flush(tf->tf_tpc);
382		goto out;
383#endif
384	default:
385		break;
386	}
387	panic("trap: %s", trap_msg[type & ~T_KERNEL]);
388
389trapsig:
390	/* Translate fault for emulators. */
391	if (p->p_sysent->sv_transtrap != NULL)
392		sig = (p->p_sysent->sv_transtrap)(sig, type);
393	trapsignal(p, sig, ucode);
394user:
395	userret(td, tf, sticks);
396	mtx_assert(&Giant, MA_NOTOWNED);
397#ifdef DIAGNOSTIC
398	cred_free_thread(td);
399#endif
400out:
401	CTR1(KTR_TRAP, "trap: td=%p return", td);
402	return;
403}
404
405static int
406trap_pfault(struct thread *td, struct trapframe *tf)
407{
408	struct vmspace *vm;
409	struct pcb *pcb;
410	struct proc *p;
411	vm_offset_t va;
412	vm_prot_t prot;
413	u_long ctx;
414	int flags;
415	int type;
416	int rv;
417
418	p = td->td_proc;
419	KASSERT(td->td_pcb != NULL, ("trap_pfault: pcb NULL"));
420	KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
421
422	rv = KERN_SUCCESS;
423	ctx = TLB_TAR_CTX(tf->tf_tar);
424	pcb = td->td_pcb;
425	type = tf->tf_type & ~T_KERNEL;
426	va = TLB_TAR_VA(tf->tf_tar);
427
428	CTR4(KTR_TRAP, "trap_pfault: td=%p pm_ctx=%#lx va=%#lx ctx=%#lx",
429	    td, p->p_vmspace->vm_pmap.pm_context[PCPU_GET(cpuid)], va, ctx);
430
431	if (type == T_DATA_PROTECTION) {
432		prot = VM_PROT_WRITE;
433		flags = VM_FAULT_DIRTY;
434	} else {
435		if (type == T_DATA_MISS)
436			prot = VM_PROT_READ;
437		else
438			prot = VM_PROT_READ | VM_PROT_EXECUTE;
439		flags = VM_FAULT_NORMAL;
440	}
441
442	if (ctx != TLB_CTX_KERNEL) {
443		if ((tf->tf_tstate & TSTATE_PRIV) != 0 &&
444		    (td->td_intr_nesting_level != 0 ||
445		    pcb->pcb_onfault == NULL || pcb->pcb_onfault == fsbail))
446			return (-1);
447
448		/*
449		 * This is a fault on non-kernel virtual memory.
450		 */
451		vm = p->p_vmspace;
452
453		mtx_lock(&Giant);
454
455		/*
456		 * Keep swapout from messing with us during this
457		 * critical time.
458		 */
459		PROC_LOCK(p);
460		++p->p_lock;
461		PROC_UNLOCK(p);
462
463		/*
464		 * Grow the stack if necessary.  vm_map_growstack only
465		 * fails if the va falls into a growable stack region
466		 * and the stack growth fails.  If it succeeds, or the
467		 * va was not within a growable stack region, fault in
468		 * the user page.
469		 */
470		if (vm_map_growstack(p, va) != KERN_SUCCESS)
471			rv = KERN_FAILURE;
472		else
473			rv = vm_fault(&vm->vm_map, va, prot, flags);
474
475		/*
476		 * Now the process can be swapped again.
477		 */
478		PROC_LOCK(p);
479		--p->p_lock;
480		PROC_UNLOCK(p);
481	} else {
482		/*
483		 * This is a fault on kernel virtual memory.  Attempts to access
484		 * kernel memory from user mode cause priviledged action traps,
485		 * not page fault.
486		 */
487		KASSERT(tf->tf_tstate & TSTATE_PRIV,
488		    ("trap_pfault: fault on nucleus context from user mode"));
489
490		mtx_lock(&Giant);
491
492		/*
493		 * Don't have to worry about process locking or stacks in the
494		 * kernel.
495		 */
496		rv = vm_fault(kernel_map, va, prot, VM_FAULT_NORMAL);
497	}
498	mtx_unlock(&Giant);
499
500	CTR3(KTR_TRAP, "trap_pfault: return td=%p va=%#lx rv=%d",
501	    td, va, rv);
502	if (rv == KERN_SUCCESS)
503		return (0);
504	if ((tf->tf_tstate & TSTATE_PRIV) != 0) {
505		if (td->td_intr_nesting_level == 0 &&
506		    pcb->pcb_onfault != NULL) {
507			tf->tf_tpc = (u_long)pcb->pcb_onfault;
508			tf->tf_tnpc = tf->tf_tpc + 4;
509			return (0);
510		}
511	}
512	return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
513}
514
515/* Maximum number of arguments that can be passed via the out registers. */
516#define	REG_MAXARGS	6
517
518/*
519 * Syscall handler. The arguments to the syscall are passed in the o registers
520 * by the caller, and are saved in the trap frame. The syscall number is passed
521 * in %g1 (and also saved in the trap frame).
522 */
523void
524syscall(struct trapframe *tf)
525{
526	struct sysent *callp;
527	struct thread *td;
528	register_t args[8];
529	register_t *argp;
530	struct proc *p;
531	u_int sticks;
532	u_long code;
533	u_long tpc;
534	int reg;
535	int regcnt;
536	int narg;
537	int error;
538
539	KASSERT(PCPU_GET(curthread) != NULL, ("trap: curthread NULL"));
540	KASSERT(PCPU_GET(curthread)->td_kse != NULL, ("trap: curkse NULL"));
541	KASSERT(PCPU_GET(curthread)->td_proc != NULL, ("trap: curproc NULL"));
542
543	atomic_add_int(&cnt.v_syscall, 1);
544
545	td = PCPU_GET(curthread);
546	p = td->td_proc;
547
548	narg = 0;
549	error = 0;
550	reg = 0;
551	regcnt = REG_MAXARGS;
552
553	sticks = td->td_kse->ke_sticks;
554	td->td_frame = tf;
555	if (td->td_ucred != p->p_ucred)
556		cred_update_thread(td);
557	code = tf->tf_global[1];
558
559	/*
560	 * For syscalls, we don't want to retry the faulting instruction
561	 * (usually), instead we need to advance one instruction.
562	 */
563	tpc = tf->tf_tpc;
564	TF_DONE(tf);
565
566	if (p->p_sysent->sv_prepsyscall) {
567		/*
568		 * The prep code is MP aware.
569		 */
570#if 0
571		(*p->p_sysent->sv_prepsyscall)(tf, args, &code, &params);
572#endif
573	} else 	if (code == SYS_syscall || code == SYS___syscall) {
574		code = tf->tf_out[reg++];
575		regcnt--;
576	}
577
578 	if (p->p_sysent->sv_mask)
579 		code &= p->p_sysent->sv_mask;
580
581 	if (code >= p->p_sysent->sv_size)
582 		callp = &p->p_sysent->sv_table[0];
583  	else
584 		callp = &p->p_sysent->sv_table[code];
585
586	narg = callp->sy_narg & SYF_ARGMASK;
587
588	if (narg <= regcnt)
589		argp = &tf->tf_out[reg];
590	else {
591		KASSERT(narg <= sizeof(args) / sizeof(args[0]),
592		    ("Too many syscall arguments!"));
593		argp = args;
594		bcopy(&tf->tf_out[reg], args, sizeof(args[0]) * regcnt);
595		error = copyin((void *)(tf->tf_out[6] + SPOFF +
596		    offsetof(struct frame, fr_pad[6])),
597		    &args[regcnt], (narg - regcnt) * sizeof(args[0]));
598		if (error != 0)
599			goto bad;
600	}
601
602	CTR5(KTR_SYSC, "syscall: td=%p %s(%#lx, %#lx, %#lx)", td,
603	    syscallnames[code], argp[0], argp[1], argp[2]);
604
605	/*
606	 * Try to run the syscall without the MP lock if the syscall
607	 * is MP safe.
608	 */
609	if ((callp->sy_narg & SYF_MPSAFE) == 0)
610		mtx_lock(&Giant);
611
612#ifdef KTRACE
613	/*
614	 * We have to obtain the MP lock no matter what if
615	 * we are ktracing
616	 */
617	if (KTRPOINT(p, KTR_SYSCALL)) {
618		ktrsyscall(p->p_tracep, code, narg, argp);
619	}
620#endif
621	td->td_retval[0] = 0;
622	td->td_retval[1] = 0;
623
624	STOPEVENT(p, S_SCE, narg);	/* MP aware */
625
626	error = (*callp->sy_call)(td, argp);
627
628	CTR5(KTR_SYSC, "syscall: p=%p error=%d %s return %#lx %#lx ", p,
629	    error, syscallnames[code], td->td_retval[0], td->td_retval[1]);
630
631	/*
632	 * MP SAFE (we may or may not have the MP lock at this point)
633	 */
634	switch (error) {
635	case 0:
636		tf->tf_out[0] = td->td_retval[0];
637		tf->tf_out[1] = td->td_retval[1];
638		tf->tf_tstate &= ~TSTATE_XCC_C;
639		break;
640
641	case ERESTART:
642		/*
643		 * Undo the tpc advancement we have done above, we want to
644		 * reexecute the system call.
645		 */
646		tf->tf_tpc = tpc;
647		tf->tf_tnpc -= 4;
648		break;
649
650	case EJUSTRETURN:
651		break;
652
653	default:
654bad:
655 		if (p->p_sysent->sv_errsize) {
656 			if (error >= p->p_sysent->sv_errsize)
657  				error = -1;	/* XXX */
658   			else
659  				error = p->p_sysent->sv_errtbl[error];
660		}
661		tf->tf_out[0] = error;
662		tf->tf_tstate |= TSTATE_XCC_C;
663		break;
664	}
665
666	/*
667	 * Handle reschedule and other end-of-syscall issues
668	 */
669	userret(td, tf, sticks);
670
671#ifdef KTRACE
672	if (KTRPOINT(p, KTR_SYSRET)) {
673		ktrsysret(p->p_tracep, code, error, td->td_retval[0]);
674	}
675#endif
676
677	/*
678	 * Release Giant if we had to get it.  Don't use mtx_owned(),
679	 * we want to catch broken syscalls.
680	 */
681	if ((callp->sy_narg & SYF_MPSAFE) == 0)
682		mtx_unlock(&Giant);
683
684	/*
685	 * This works because errno is findable through the
686	 * register set.  If we ever support an emulation where this
687	 * is not the case, this code will need to be revisited.
688	 */
689	STOPEVENT(p, S_SCX, code);
690
691#ifdef DIAGNOSTIC
692	cred_free_thread(td);
693#endif
694#ifdef WITNESS
695	if (witness_list(td)) {
696		panic("system call %s returning with mutex(s) held\n",
697		    syscallnames[code]);
698	}
699#endif
700	mtx_assert(&sched_lock, MA_NOTOWNED);
701	mtx_assert(&Giant, MA_NOTOWNED);
702}
703