subr_syscall.c revision 1549
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the University of Utah, and William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
37 *	$Id: trap.c,v 1.22 1994/04/07 10:51:00 davidg Exp $
38 */
39
40/*
41 * 386 Trap and System call handleing
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46
47#include <sys/proc.h>
48#include <sys/user.h>
49#include <sys/acct.h>
50#include <sys/kernel.h>
51#include <sys/syscall.h>
52#ifdef KTRACE
53#include <sys/ktrace.h>
54#endif
55
56#include <vm/vm_param.h>
57#include <vm/pmap.h>
58#include <vm/vm_map.h>
59#include <vm/vm_page.h>
60
61#include <machine/cpu.h>
62#include <machine/psl.h>
63#include <machine/reg.h>
64#include <machine/eflags.h>
65
66#include <machine/trap.h>
67
68#include "isa.h"
69#include "npx.h"
70#include "ddb.h"
71
72#ifdef	__GNUC__
73
74/*
75 * The "r" contraint could be "rm" except for fatal bugs in gas.  As usual,
76 * we omit the size from the mov instruction to avoid nonfatal bugs in gas.
77 */
78#define	read_gs()	({ u_short gs; __asm("mov %%gs,%0" : "=r" (gs)); gs; })
79#define	write_gs(newgs)	__asm("mov %0,%%gs" : : "r" ((u_short) newgs))
80
81#else	/* not __GNUC__ */
82
83u_short	read_gs		__P((void));
84void	write_gs	__P((/* promoted u_short */ int gs));
85
86#endif	/* __GNUC__ */
87
88extern int grow(struct proc *,u_int);
89
90struct	sysent sysent[];
91int	nsysent;
92
93#define MAX_TRAP_MSG		27
94char *trap_msg[] = {
95	"reserved addressing fault",		/*  0 T_RESADFLT */
96	"privileged instruction fault",		/*  1 T_PRIVINFLT */
97	"reserved operand fault",		/*  2 T_RESOPFLT */
98	"breakpoint instruction fault",		/*  3 T_BPTFLT */
99	"",					/*  4 unused */
100	"system call trap",			/*  5 T_SYSCALL */
101	"arithmetic trap",			/*  6 T_ARITHTRAP */
102	"system forced exception",		/*  7 T_ASTFLT */
103	"segmentation (limit) fault",		/*  8 T_SEGFLT */
104	"protection fault",			/*  9 T_PROTFLT */
105	"trace trap",				/* 10 T_TRCTRAP */
106	"",					/* 11 unused */
107	"page fault",				/* 12 T_PAGEFLT */
108	"page table fault",			/* 13 T_TABLEFLT */
109	"alignment fault",			/* 14 T_ALIGNFLT */
110	"kernel stack pointer not valid",	/* 15 T_KSPNOTVAL */
111	"bus error",				/* 16 T_BUSERR */
112	"kernel debugger fault",		/* 17 T_KDBTRAP */
113	"integer divide fault",			/* 18 T_DIVIDE */
114	"non-maskable interrupt trap",		/* 19 T_NMI */
115	"overflow trap",			/* 20 T_OFLOW */
116	"FPU bounds check fault",		/* 21 T_BOUND */
117	"FPU device not available",		/* 22 T_DNA */
118	"double fault",				/* 23 T_DOUBLEFLT */
119	"FPU operand fetch fault",		/* 24 T_FPOPFLT */
120	"invalid TSS fault",			/* 25 T_TSSFLT */
121	"segment not present fault",		/* 26 T_SEGNPFLT */
122	"stack fault",				/* 27 T_STKFLT */
123};
124
125#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
126
127/*
128 * trap(frame):
129 *	Exception, fault, and trap interface to BSD kernel. This
130 * common code is called from assembly language IDT gate entry
131 * routines that prepare a suitable stack frame, and restore this
132 * frame after the exception has been processed. Note that the
133 * effect is as if the arguments were passed call by reference.
134 */
135
136/*ARGSUSED*/
137void
138trap(frame)
139	struct trapframe frame;
140{
141	register int i;
142	register struct proc *p = curproc;
143	u_quad_t sticks = 0;
144	int ucode, type, code, eva, fault_type;
145
146	frame.tf_eflags &= ~PSL_NT;	/* clear nested trap XXX */
147	type = frame.tf_trapno;
148#if NDDB > 0
149	if (curpcb && curpcb->pcb_onfault) {
150		if (frame.tf_trapno == T_BPTFLT
151		    || frame.tf_trapno == T_TRCTRAP)
152			if (kdb_trap (type, 0, &frame))
153				return;
154	}
155#endif
156
157	if (curpcb == 0 || curproc == 0)
158		goto skiptoswitch;
159	if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
160		extern int _udatasel;
161
162		if (read_gs() != (u_short) _udatasel)
163			/*
164			 * Some user has corrupted %gs but we depend on it in
165			 * copyout() etc.  Fix it up and retry.
166			 *
167			 * (We don't preserve %fs or %gs, so users can change
168			 * them to either _ucodesel, _udatasel or a not-present
169			 * selector, possibly ORed with 0 to 3, making them
170			 * volatile for other users.  Not preserving them saves
171			 * time and doesn't lose functionality or open security
172			 * holes.)
173			 */
174			write_gs(_udatasel);
175		else
176copyfault:
177			frame.tf_eip = (int)curpcb->pcb_onfault;
178		return;
179	}
180
181	if (ISPL(frame.tf_cs) == SEL_UPL) {
182		type |= T_USER;
183		p->p_md.md_regs = (int *)&frame;
184		sticks = p->p_sticks;
185	}
186
187skiptoswitch:
188	ucode=0;
189	eva = rcr2();
190	code = frame.tf_err;
191
192	if ((type & ~T_USER) == T_PAGEFLT)
193		goto pfault;
194
195	switch (type) {
196	case T_SEGNPFLT|T_USER:
197	case T_STKFLT|T_USER:
198	case T_PROTFLT|T_USER:		/* protection fault */
199		ucode = code + BUS_SEGM_FAULT ;
200		i = SIGBUS;
201		break;
202
203	case T_PRIVINFLT|T_USER:	/* privileged instruction fault */
204	case T_RESADFLT|T_USER:		/* reserved addressing fault */
205	case T_RESOPFLT|T_USER:		/* reserved operand fault */
206	case T_FPOPFLT|T_USER:		/* coprocessor operand fault */
207		ucode = type &~ T_USER;
208		i = SIGILL;
209		break;
210
211	case T_ASTFLT|T_USER:		/* Allow process switch */
212		astoff();
213		cnt.v_soft++;
214		if ((p->p_flag & P_OWEUPC) && p->p_stats->p_prof.pr_scale) {
215			addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
216			p->p_flag &= ~P_OWEUPC;
217		}
218		goto out;
219
220	case T_DNA|T_USER:
221#if NNPX > 0
222		/* if a transparent fault (due to context switch "late") */
223		if (npxdna()) return;
224#endif	/* NNPX > 0 */
225#if   defined(MATH_EMULATE) || defined(GPL_MATH_EMULATE)
226		i = math_emulate(&frame);
227		if (i == 0) return;
228#else	/* MATH_EMULATE || GPL_MATH_EMULATE */
229		panic("trap: math emulation necessary!");
230#endif	/* MATH_EMULATE || GPL_MATH_EMULATE */
231		ucode = FPE_FPU_NP_TRAP;
232		break;
233
234	case T_BOUND|T_USER:
235		ucode = FPE_SUBRNG_TRAP;
236		i = SIGFPE;
237		break;
238
239	case T_OFLOW|T_USER:
240		ucode = FPE_INTOVF_TRAP;
241		i = SIGFPE;
242		break;
243
244	case T_DIVIDE|T_USER:
245		ucode = FPE_INTDIV_TRAP;
246		i = SIGFPE;
247		break;
248
249	case T_ARITHTRAP|T_USER:
250		ucode = code;
251		i = SIGFPE;
252		break;
253
254	pfault:
255	case T_PAGEFLT:			/* allow page faults in kernel mode */
256	case T_PAGEFLT|T_USER:		/* page fault */
257	    {
258		vm_offset_t va;
259		struct vmspace *vm;
260		vm_map_t map = 0;
261		int rv = 0, oldflags;
262		vm_prot_t ftype;
263		unsigned v;
264		extern vm_map_t kernel_map;
265
266		va = trunc_page((vm_offset_t)eva);
267
268		/*
269		 * Don't allow user-mode faults in kernel address space
270		 */
271		if ((type == (T_PAGEFLT|T_USER)) && (va >= KERNBASE)) {
272			goto nogo;
273		}
274
275		if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
276			vm = 0;
277			map = kernel_map;
278		} else {
279			vm = p->p_vmspace;
280			map = &vm->vm_map;
281		}
282
283		if (code & PGEX_W)
284			ftype = VM_PROT_READ | VM_PROT_WRITE;
285		else
286			ftype = VM_PROT_READ;
287
288		if (map != kernel_map) {
289			vm_offset_t pa;
290			vm_offset_t v = (vm_offset_t) vtopte(va);
291			vm_page_t ptepg;
292
293			/*
294			 * Keep swapout from messing with us during this
295			 *	critical time.
296			 */
297			++p->p_lock;
298
299			/*
300			 * Grow the stack if necessary
301			 */
302			if ((caddr_t)va > vm->vm_maxsaddr
303			    && (caddr_t)va < (caddr_t)USRSTACK) {
304				if (!grow(p, va)) {
305					rv = KERN_FAILURE;
306					--p->p_lock;
307					goto nogo;
308				}
309			}
310
311			/*
312			 * Check if page table is mapped, if not,
313			 *	fault it first
314			 */
315
316			/* Fault the pte only if needed: */
317			*(volatile char *)v += 0;
318
319			ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
320			vm_page_hold(ptepg);
321
322			/* Fault in the user page: */
323			rv = vm_fault(map, va, ftype, FALSE);
324
325			vm_page_unhold(ptepg);
326
327			/*
328			 * page table pages don't need to be kept if they
329			 * are not held
330			 */
331			if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
332				pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
333					VM_PROT_NONE);
334				vm_page_free(ptepg);
335			}
336
337			--p->p_lock;
338		} else {
339			/*
340			 * Since we know that kernel virtual address addresses
341			 * always have pte pages mapped, we just have to fault
342			 * the page.
343			 */
344			rv = vm_fault(map, va, ftype, FALSE);
345		}
346
347		if (rv == KERN_SUCCESS) {
348			if (type == T_PAGEFLT)
349				return;
350			goto out;
351		}
352nogo:
353		if (type == T_PAGEFLT) {
354			if (curpcb->pcb_onfault)
355				goto copyfault;
356
357			goto we_re_toast;
358		}
359		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
360
361		/* kludge to pass faulting virtual address to sendsig */
362		ucode = type &~ T_USER;
363		frame.tf_err = eva;
364
365		break;
366	    }
367
368#if NDDB == 0
369	case T_TRCTRAP:	 /* trace trap -- someone single stepping lcall's */
370		frame.tf_eflags &= ~PSL_T;
371
372			/* Q: how do we turn it on again? */
373		return;
374#endif
375
376	case T_BPTFLT|T_USER:		/* bpt instruction fault */
377	case T_TRCTRAP|T_USER:		/* trace trap */
378		frame.tf_eflags &= ~PSL_T;
379		i = SIGTRAP;
380		break;
381
382#if NISA > 0
383	case T_NMI:
384	case T_NMI|T_USER:
385#if NDDB > 0
386		/* NMI can be hooked up to a pushbutton for debugging */
387		printf ("NMI ... going to debugger\n");
388		if (kdb_trap (type, 0, &frame))
389			return;
390#endif
391		/* machine/parity/power fail/"kitchen sink" faults */
392		if (isa_nmi(code) == 0) return;
393		/* FALL THROUGH */
394#endif
395	default:
396	we_re_toast:
397
398		fault_type = type & ~T_USER;
399#if NDDB > 0
400		if ((fault_type == T_BPTFLT) || (fault_type == T_TRCTRAP)) {
401			if (kdb_trap (type, 0, &frame))
402				return;
403		}
404#endif
405		if (fault_type <= MAX_TRAP_MSG)
406			printf("\n\nFatal trap %d: %s while in %s mode\n",
407				fault_type, trap_msg[fault_type],
408				ISPL(frame.tf_cs) == SEL_UPL ? "user" : "kernel");
409		if (fault_type == T_PAGEFLT) {
410			printf("fault virtual address	= 0x%x\n", eva);
411			printf("fault code		= %s %s, %s\n",
412				code & PGEX_U ? "user" : "supervisor",
413				code & PGEX_W ? "write" : "read",
414				code & PGEX_P ? "protection violation" : "page not present");
415		}
416		printf("instruction pointer	= 0x%x\n", frame.tf_eip);
417		printf("processor eflags	= ");
418		if (frame.tf_eflags & EFL_TF)
419			printf("trace/trap, ");
420		if (frame.tf_eflags & EFL_IF)
421			printf("interrupt enabled, ");
422		if (frame.tf_eflags & EFL_NT)
423			printf("nested task, ");
424		if (frame.tf_eflags & EFL_RF)
425			printf("resume, ");
426		if (frame.tf_eflags & EFL_VM)
427			printf("vm86, ");
428		printf("IOPL = %d\n", (frame.tf_eflags & EFL_IOPL) >> 12);
429		printf("current process		= ");
430		if (curproc) {
431			printf("%d (%s)\n",
432			    curproc->p_pid, curproc->p_comm ?
433			    curproc->p_comm : "");
434		} else {
435			printf("Idle\n");
436		}
437		printf("interrupt mask		= ");
438		if ((cpl & net_imask) == net_imask)
439			printf("net ");
440		if ((cpl & tty_imask) == tty_imask)
441			printf("tty ");
442		if ((cpl & bio_imask) == bio_imask)
443			printf("bio ");
444		if (cpl == 0)
445			printf("none");
446		printf("\n");
447
448#ifdef KDB
449		if (kdb_trap(&psl))
450			return;
451#endif
452#if NDDB > 0
453		if (kdb_trap (type, 0, &frame))
454			return;
455#endif
456		if (fault_type <= MAX_TRAP_MSG)
457			panic(trap_msg[fault_type]);
458		else
459			panic("unknown/reserved trap");
460
461		/* NOTREACHED */
462	}
463
464	trapsignal(p, i, ucode);
465	if ((type & T_USER) == 0)
466		return;
467
468#ifdef DIAGNOSTIC
469	fault_type = type & ~T_USER;
470	if (fault_type <= MAX_TRAP_MSG) {
471		uprintf("fatal process exception: %s",
472			trap_msg[fault_type]);
473		if ((fault_type == T_PAGEFLT) || (fault_type == T_PROTFLT))
474			uprintf(", fault VA = 0x%x", eva);
475		uprintf("\n");
476	}
477#endif
478
479out:
480	while (i = CURSIG(p))
481		postsig(i);
482	p->p_priority = p->p_usrpri;
483	if (want_resched) {
484		int s;
485		/*
486		 * Since we are curproc, clock will normally just change
487		 * our priority without moving us from one queue to another
488		 * (since the running process is not on a queue.)
489		 * If that happened after we setrunqueue ourselves but before we
490		 * mi_switch()'ed, we might not be on the queue indicated by
491		 * our priority.
492		 */
493		s = splclock();
494		setrunqueue(p);
495		p->p_stats->p_ru.ru_nivcsw++;
496		mi_switch();
497		splx(s);
498		while (i = CURSIG(p))
499			postsig(i);
500	}
501	if (p->p_stats->p_prof.pr_scale) {
502		u_quad_t ticks = p->p_sticks - sticks;
503
504		if (ticks) {
505#ifdef PROFTIMER
506			extern int profscale;
507			addupc(frame.tf_eip, &p->p_stats->p_prof,
508			    ticks * profscale);
509#else
510			addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
511#endif
512		}
513	}
514	curpriority = p->p_priority;
515}
516
517/*
518 * Compensate for 386 brain damage (missing URKR).
519 * This is a little simpler than the pagefault handler in trap() because
520 * it the page tables have already been faulted in and high addresses
521 * are thrown out early for other reasons.
522 */
523int trapwrite(addr)
524	unsigned addr;
525{
526	struct proc *p;
527	vm_offset_t va, v;
528	struct vmspace *vm;
529	int oldflags;
530	int rv;
531
532	va = trunc_page((vm_offset_t)addr);
533	/*
534	 * XXX - MAX is END.  Changed > to >= for temp. fix.
535	 */
536	if (va >= VM_MAXUSER_ADDRESS)
537		return (1);
538
539	p = curproc;
540	vm = p->p_vmspace;
541
542	++p->p_lock;
543
544	if ((caddr_t)va >= vm->vm_maxsaddr
545	    && (caddr_t)va < (caddr_t)USRSTACK) {
546		if (!grow(p, va)) {
547			--p->p_lock;
548			return (1);
549		}
550	}
551
552	v = trunc_page(vtopte(va));
553
554	/*
555	 * wire the pte page
556	 */
557	if (va < USRSTACK) {
558		vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
559	}
560
561	/*
562	 * fault the data page
563	 */
564	rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
565
566	/*
567	 * unwire the pte page
568	 */
569	if (va < USRSTACK) {
570		vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
571	}
572
573	--p->p_lock;
574
575	if (rv != KERN_SUCCESS)
576		return 1;
577
578	return (0);
579}
580
581/*
582 * syscall(frame):
583 *	System call request from POSIX system call gate interface to kernel.
584 * Like trap(), argument is call by reference.
585 */
586/*ARGSUSED*/
587void
588syscall(frame)
589	volatile struct trapframe frame;
590{
591	register int *locr0 = ((int *)&frame);
592	register caddr_t params;
593	register int i;
594	register struct sysent *callp;
595	register struct proc *p = curproc;
596	u_quad_t sticks;
597	int error, opc;
598	int args[8], rval[2];
599	u_int code;
600
601#ifdef lint
602	r0 = 0; r0 = r0; r1 = 0; r1 = r1;
603#endif
604	sticks = p->p_sticks;
605	if (ISPL(frame.tf_cs) != SEL_UPL)
606		panic("syscall");
607
608	code = frame.tf_eax;
609	p->p_md.md_regs = (int *)&frame;
610	params = (caddr_t)frame.tf_esp + sizeof (int) ;
611
612	/*
613	 * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
614	 */
615	opc = frame.tf_eip - 7;
616	/*
617	 * Need to check if this is a 32 bit or 64 bit syscall.
618	 */
619	if (code == SYS_syscall) {
620		/*
621		 * Code is first argument, followed by actual args.
622		 */
623		code = fuword(params);
624		params += sizeof (int);
625	} else if (code == SYS___syscall) {
626		/*
627		 * Like syscall, but code is a quad, so as to maintain
628		 * quad alignment for the rest of the arguments.
629		 */
630		code = fuword(params + _QUAD_LOWWORD * sizeof(int));
631		params += sizeof(quad_t);
632	}
633
634	if (code >= nsysent)
635		callp = &sysent[0];
636	else
637		callp = &sysent[code];
638
639	if ((i = callp->sy_narg * sizeof (int)) &&
640	    (error = copyin(params, (caddr_t)args, (u_int)i))) {
641		frame.tf_eax = error;
642		frame.tf_eflags |= PSL_C;	/* carry bit */
643#ifdef KTRACE
644		if (KTRPOINT(p, KTR_SYSCALL))
645			ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
646#endif
647		goto done;
648	}
649#ifdef KTRACE
650	if (KTRPOINT(p, KTR_SYSCALL))
651		ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
652#endif
653	rval[0] = 0;
654	rval[1] = frame.tf_edx;
655/*pg("%d. s %d\n", p->p_pid, code);*/
656	error = (*callp->sy_call)(p, args, rval);
657	if (error == ERESTART)
658		frame.tf_eip = opc;
659	else if (error != EJUSTRETURN) {
660		if (error) {
661/*pg("error %d", error);*/
662			frame.tf_eax = error;
663			frame.tf_eflags |= PSL_C;	/* carry bit */
664		} else {
665			frame.tf_eax = rval[0];
666			frame.tf_edx = rval[1];
667			frame.tf_eflags &= ~PSL_C;	/* carry bit */
668		}
669	}
670	/* else if (error == EJUSTRETURN) */
671		/* nothing to do */
672done:
673	/*
674	 * Reinitialize proc pointer `p' as it may be different
675	 * if this is a child returning from fork syscall.
676	 */
677	p = curproc;
678	while (i = CURSIG(p))
679		postsig(i);
680	p->p_priority = p->p_usrpri;
681	if (want_resched) {
682		int s;
683		/*
684		 * Since we are curproc, clock will normally just change
685		 * our priority without moving us from one queue to another
686		 * (since the running process is not on a queue.)
687		 * If that happened after we setrunqueue ourselves but before we
688		 * swtch()'ed, we might not be on the queue indicated by
689		 * our priority.
690		 */
691		s = splclock();
692		setrunqueue(p);
693		p->p_stats->p_ru.ru_nivcsw++;
694		mi_switch();
695		splx(s);
696		while (i = CURSIG(p))
697			postsig(i);
698	}
699	if (p->p_stats->p_prof.pr_scale) {
700		u_quad_t ticks = p->p_sticks - sticks;
701
702		if (ticks) {
703#ifdef PROFTIMER
704			extern int profscale;
705			addupc(frame.tf_eip, &p->p_stats->p_prof,
706			    ticks * profscale);
707#else
708			addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
709#endif
710		}
711	}
712	curpriority = p->p_priority;
713#ifdef KTRACE
714	if (KTRPOINT(p, KTR_SYSRET))
715		ktrsysret(p->p_tracep, code, error, rval[0]);
716#endif
717}
718