subr_syscall.c revision 924
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the University of Utah, and William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
37 *	$Id: trap.c,v 1.12 1993/12/19 00:50:09 wollman Exp $
38 */
39
40/*
41 * 386 Trap and System call handleing
42 */
43
44#include "npx.h"
45#include "machine/cpu.h"
46#include "machine/psl.h"
47#include "machine/reg.h"
48
49#include "param.h"
50#include "systm.h"
51#include "proc.h"
52#include "user.h"
53#include "acct.h"
54#include "kernel.h"
55#ifdef KTRACE
56#include "ktrace.h"
57#endif
58
59#include "vm/vm_param.h"
60#include "vm/pmap.h"
61#include "vm/vm_map.h"
62#include "vm/vm_user.h"
63#include "sys/vmmeter.h"
64
65#include "machine/trap.h"
66
67#ifdef	__GNUC__
68
69/*
70 * The "r" contraint could be "rm" except for fatal bugs in gas.  As usual,
71 * we omit the size from the mov instruction to avoid nonfatal bugs in gas.
72 */
73#define	read_gs()	({ u_short gs; __asm("mov %%gs,%0" : "=r" (gs)); gs; })
74#define	write_gs(newgs)	__asm("mov %0,%%gs" : : "r" ((u_short) newgs))
75
76#else	/* not __GNUC__ */
77
78u_short	read_gs		__P((void));
79void	write_gs	__P((/* promoted u_short */ int gs));
80
81#endif	/* __GNUC__ */
82
83struct	sysent sysent[];
84int	nsysent;
85extern short cpl;
86
87#define MAX_TRAP_MSG		27
88char *trap_msg[] = {
89	"reserved addressing fault",		/*  0 T_RESADFLT */
90	"privileged instruction fault",		/*  1 T_PRIVINFLT */
91	"reserved operand fault",		/*  2 T_RESOPFLT */
92	"breakpoint instruction fault",		/*  3 T_BPTFLT */
93	"",					/*  4 unused */
94	"system call trap",			/*  5 T_SYSCALL */
95	"arithmetic trap",			/*  6 T_ARITHTRAP */
96	"system forced exception",		/*  7 T_ASTFLT */
97	"segmentation (limit) fault",		/*  8 T_SEGFLT */
98	"protection fault",			/*  9 T_PROTFLT */
99	"trace trap",				/* 10 T_TRCTRAP */
100	"",					/* 11 unused */
101	"page fault",				/* 12 T_PAGEFLT */
102	"page table fault",			/* 13 T_TABLEFLT */
103	"alignment fault",			/* 14 T_ALIGNFLT */
104	"kernel stack pointer not valid",	/* 15 T_KSPNOTVAL */
105	"bus error",				/* 16 T_BUSERR */
106	"kernel debugger fault",		/* 17 T_KDBTRAP */
107	"integer divide fault",			/* 18 T_DIVIDE */
108	"non-maskable interrupt trap",		/* 19 T_NMI */
109	"overflow trap",			/* 20 T_OFLOW */
110	"FPU bounds check fault",		/* 21 T_BOUND */
111	"FPU device not available",		/* 22 T_DNA */
112	"double fault",				/* 23 T_DOUBLEFLT */
113	"FPU operand fetch fault",		/* 24 T_FPOPFLT */
114	"invalid TSS fault",			/* 25 T_TSSFLT */
115	"segment not present fault",		/* 26 T_SEGNPFLT */
116	"stack fault",				/* 27 T_STKFLT */
117};
118
119#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
120
121/*
122 * trap(frame):
123 *	Exception, fault, and trap interface to BSD kernel. This
124 * common code is called from assembly language IDT gate entry
125 * routines that prepare a suitable stack frame, and restore this
126 * frame after the exception has been processed. Note that the
127 * effect is as if the arguments were passed call by reference.
128 */
129
130/*ARGSUSED*/
131void
132trap(frame)
133	struct trapframe frame;
134{
135	register int i;
136	register struct proc *p = curproc;
137	struct timeval syst;
138	int ucode, type, code, eva;
139
140	frame.tf_eflags &= ~PSL_NT;	/* clear nested trap XXX */
141	type = frame.tf_trapno;
142#include "ddb.h"
143#if NDDB > 0
144	if (curpcb && curpcb->pcb_onfault) {
145		if (frame.tf_trapno == T_BPTFLT
146		    || frame.tf_trapno == T_TRCTRAP)
147			if (kdb_trap (type, 0, &frame))
148				return;
149	}
150#endif
151
152/*pg("trap type %d code = %x eip = %x cs = %x eva = %x esp %x",
153			frame.tf_trapno, frame.tf_err, frame.tf_eip,
154			frame.tf_cs, rcr2(), frame.tf_esp);*/
155if(curpcb == 0 || curproc == 0) goto we_re_toast;
156	if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
157		extern int _udatasel;
158
159		if (read_gs() != (u_short) _udatasel)
160			/*
161			 * Some user has corrupted %gs but we depend on it in
162			 * copyout() etc.  Fix it up and retry.
163			 *
164			 * (We don't preserve %fs or %gs, so users can change
165			 * them to either _ucodesel, _udatasel or a not-present
166			 * selector, possibly ORed with 0 to 3, making them
167			 * volatile for other users.  Not preserving them saves
168			 * time and doesn't lose functionality or open security
169			 * holes.)
170			 */
171			write_gs(_udatasel);
172		else
173copyfault:
174			frame.tf_eip = (int)curpcb->pcb_onfault;
175		return;
176	}
177
178	syst = p->p_stime;
179	if (ISPL(frame.tf_cs) == SEL_UPL) {
180		type |= T_USER;
181		p->p_regs = (int *)&frame;
182	}
183
184	ucode=0;
185	eva = rcr2();
186	code = frame.tf_err;
187	switch (type) {
188
189	default:
190	we_re_toast:
191#ifdef KDB
192		if (kdb_trap(&psl))
193			return;
194#endif
195#if NDDB > 0
196		if (kdb_trap (type, 0, &frame))
197			return;
198#endif
199
200		if ((type & ~T_USER) <= MAX_TRAP_MSG)
201			printf("\n\nFatal trap %d: %s while in %s mode\n",
202				type & ~T_USER, trap_msg[type & ~T_USER],
203				(type & T_USER) ? "user" : "kernel");
204
205		printf("trap type = %d, code = %x\n     eip = %x, cs = %x, eflags = %x, ",
206			frame.tf_trapno, frame.tf_err, frame.tf_eip,
207			frame.tf_cs, frame.tf_eflags);
208		eva = rcr2();
209		printf("cr2 = %x, current priority = %x\n", eva, cpl);
210
211		type &= ~T_USER;
212		if (type <= MAX_TRAP_MSG)
213			panic(trap_msg[type]);
214		else
215			panic("unknown/reserved trap");
216
217		/*NOTREACHED*/
218
219	case T_SEGNPFLT|T_USER:
220	case T_STKFLT|T_USER:
221	case T_PROTFLT|T_USER:		/* protection fault */
222		ucode = code + BUS_SEGM_FAULT ;
223		i = SIGBUS;
224		break;
225
226	case T_PRIVINFLT|T_USER:	/* privileged instruction fault */
227	case T_RESADFLT|T_USER:		/* reserved addressing fault */
228	case T_RESOPFLT|T_USER:		/* reserved operand fault */
229	case T_FPOPFLT|T_USER:		/* coprocessor operand fault */
230		ucode = type &~ T_USER;
231		i = SIGILL;
232		break;
233
234	case T_ASTFLT|T_USER:		/* Allow process switch */
235		astoff();
236		cnt.v_soft++;
237		if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
238			addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
239			p->p_flag &= ~SOWEUPC;
240		}
241		goto out;
242
243	case T_DNA|T_USER:
244#if NNPX > 0
245		/* if a transparent fault (due to context switch "late") */
246		if (npxdna()) return;
247#endif	/* NNPX > 0 */
248#ifdef	MATH_EMULATE
249		i = math_emulate(&frame);
250		if (i == 0) return;
251#else	/* MATH_EMULTATE */
252		panic("trap: math emulation necessary!");
253#endif	/* MATH_EMULTATE */
254		ucode = FPE_FPU_NP_TRAP;
255		break;
256
257	case T_BOUND|T_USER:
258		ucode = FPE_SUBRNG_TRAP;
259		i = SIGFPE;
260		break;
261
262	case T_OFLOW|T_USER:
263		ucode = FPE_INTOVF_TRAP;
264		i = SIGFPE;
265		break;
266
267	case T_DIVIDE|T_USER:
268		ucode = FPE_INTDIV_TRAP;
269		i = SIGFPE;
270		break;
271
272	case T_ARITHTRAP|T_USER:
273		ucode = code;
274		i = SIGFPE;
275		break;
276
277	case T_PAGEFLT:			/* allow page faults in kernel mode */
278#if 0
279		/* XXX - check only applies to 386's and 486's with WP off */
280		if (code & PGEX_P) goto we_re_toast;
281#endif
282
283		/* fall into */
284	case T_PAGEFLT|T_USER:		/* page fault */
285	    {
286		register vm_offset_t va;
287		register struct vmspace *vm = p->p_vmspace;
288		register vm_map_t map;
289		int rv = 0;
290		vm_prot_t ftype;
291		extern vm_map_t kernel_map;
292		unsigned nss;
293		char *v;
294
295		va = trunc_page((vm_offset_t)eva);
296		/*
297		 * It is only a kernel address space fault iff:
298		 * 	1. (type & T_USER) == 0  and
299		 * 	2. pcb_onfault not set or
300		 *	3. pcb_onfault set but supervisor space fault
301		 * The last can occur during an exec() copyin where the
302		 * argument space is lazy-allocated.
303		 */
304		if (type == T_PAGEFLT && va >= KERNBASE)
305			map = kernel_map;
306		else
307			map = &vm->vm_map;
308		if (code & PGEX_W)
309			ftype = VM_PROT_READ | VM_PROT_WRITE;
310		else
311			ftype = VM_PROT_READ;
312
313#ifdef DEBUG
314		if (map == kernel_map && va == 0) {
315			printf("trap: bad kernel access at %x\n", va);
316			goto we_re_toast;
317		}
318#endif
319
320		/*
321		 * XXX: rude hack to make stack limits "work"
322		 */
323		nss = 0;
324		if ((caddr_t)va >= vm->vm_maxsaddr
325			&& (caddr_t)va < (caddr_t)USRSTACK
326			&& map != kernel_map) {
327			nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
328			if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
329				rv = KERN_FAILURE;
330				goto nogo;
331			}
332
333			if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
334			    DFLSSIZ) < nss) {
335				int grow_amount;
336				/*
337				 * If necessary, grow the VM that the stack occupies
338				 * to allow for the rlimit. This allows us to not have
339				 * to allocate all of the VM up-front in execve (which
340				 * is expensive).
341				 * Grow the VM by the amount requested rounded up to
342				 * the nearest DFLSSIZ to provide for some hysteresis.
343				 */
344				grow_amount = roundup(nss, DFLSSIZ);
345				v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT,
346				    DFLSSIZ) - grow_amount;
347				/*
348				 * If there isn't enough room to extend by DFLSSIZ, then
349				 * just extend to the maximum size
350				 */
351				if (v < vm->vm_maxsaddr) {
352					v = vm->vm_maxsaddr;
353					grow_amount = MAXSSIZ - (vm->vm_ssize << PGSHIFT);
354				}
355				if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
356						grow_amount, FALSE) !=
357				    KERN_SUCCESS) {
358					goto nogo;
359				}
360			}
361		}
362
363		/* check if page table is mapped, if not, fault it first */
364		if (!pde_v(va)) {
365			v = (char *)trunc_page(vtopte(va));
366			rv = vm_fault(map, (vm_offset_t)v, ftype, FALSE);
367			if (rv != KERN_SUCCESS) goto nogo;
368			/* check if page table fault, increment wiring */
369			vm_map_pageable(map, (vm_offset_t)v,
370					round_page(v+1), FALSE);
371		} else v=0;
372		rv = vm_fault(map, va, ftype, FALSE);
373		if (rv == KERN_SUCCESS) {
374			/*
375			 * XXX: continuation of rude stack hack
376			 */
377			nss = nss >> PGSHIFT;
378			if (nss > vm->vm_ssize)
379				vm->vm_ssize = nss;
380 			/*
381 			 * va could be a page table address, if the fault
382 			 * occurred from within copyout.  In that case,
383 			 * we have to wire it. (EWS 12/11/93)
384 			 */
385 			if (ispt(va))
386 				vm_map_pageable(map, va, round_page(va+1), FALSE);
387			va = trunc_page(vtopte(va));
388			/*
389			 * for page table, increment wiring
390			 * as long as not a page table fault as well
391			 */
392			if (!v && type != T_PAGEFLT)
393				vm_map_pageable(map, va, round_page(va+1), FALSE);
394			if (type == T_PAGEFLT)
395				return;
396			goto out;
397		}
398nogo:
399		if (type == T_PAGEFLT) {
400			if (curpcb->pcb_onfault)
401				goto copyfault;
402			printf("vm_fault(%x, %x, %x, 0) -> %x\n",
403			       map, va, ftype, rv);
404			printf("  type %x, code %x\n",
405			       type, code);
406			goto we_re_toast;
407		}
408		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
409
410		/* kludge to pass faulting virtual address to sendsig */
411		ucode = type &~ T_USER;
412		frame.tf_err = eva;
413
414		break;
415	    }
416
417#if NDDB == 0
418	case T_TRCTRAP:	 /* trace trap -- someone single stepping lcall's */
419		frame.tf_eflags &= ~PSL_T;
420
421			/* Q: how do we turn it on again? */
422		return;
423#endif
424
425	case T_BPTFLT|T_USER:		/* bpt instruction fault */
426	case T_TRCTRAP|T_USER:		/* trace trap */
427		frame.tf_eflags &= ~PSL_T;
428		i = SIGTRAP;
429		break;
430
431#include "isa.h"
432#if	NISA > 0
433	case T_NMI:
434	case T_NMI|T_USER:
435#if NDDB > 0
436		/* NMI can be hooked up to a pushbutton for debugging */
437		printf ("NMI ... going to debugger\n");
438		if (kdb_trap (type, 0, &frame))
439			return;
440#endif
441		/* machine/parity/power fail/"kitchen sink" faults */
442		if(isa_nmi(code) == 0) return;
443		else goto we_re_toast;
444#endif
445	}
446
447	trapsignal(p, i, ucode);
448	if ((type & T_USER) == 0)
449		return;
450out:
451	while (i = CURSIG(p))
452		psig(i);
453	p->p_pri = p->p_usrpri;
454	if (want_resched) {
455		int s;
456		/*
457		 * Since we are curproc, clock will normally just change
458		 * our priority without moving us from one queue to another
459		 * (since the running process is not on a queue.)
460		 * If that happened after we setrq ourselves but before we
461		 * swtch()'ed, we might not be on the queue indicated by
462		 * our priority.
463		 */
464		s = splclock();
465		setrq(p);
466		p->p_stats->p_ru.ru_nivcsw++;
467		swtch();
468		splx(s);
469		while (i = CURSIG(p))
470			psig(i);
471	}
472	if (p->p_stats->p_prof.pr_scale) {
473		int ticks;
474		struct timeval *tv = &p->p_stime;
475
476		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
477			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
478		if (ticks) {
479#ifdef PROFTIMER
480			extern int profscale;
481			addupc(frame.tf_eip, &p->p_stats->p_prof,
482			    ticks * profscale);
483#else
484			addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
485#endif
486		}
487	}
488	curpri = p->p_pri;
489}
490
491/*
492 * Compensate for 386 brain damage (missing URKR).
493 * This is a little simpler than the pagefault handler in trap() because
494 * it the page tables have already been faulted in and high addresses
495 * are thrown out early for other reasons.
496 */
497int trapwrite(addr)
498	unsigned addr;
499{
500	unsigned nss;
501	struct proc *p;
502	vm_offset_t va;
503	struct vmspace *vm;
504	char *v;
505
506	va = trunc_page((vm_offset_t)addr);
507	/*
508	 * XXX - MAX is END.  Changed > to >= for temp. fix.
509	 */
510	if (va >= VM_MAXUSER_ADDRESS)
511		return (1);
512	/*
513	 * XXX: rude stack hack adapted from trap().
514	 */
515	nss = 0;
516	p = curproc;
517	vm = p->p_vmspace;
518	if ((caddr_t)va >= vm->vm_maxsaddr
519	    && (caddr_t)va < (caddr_t)USRSTACK) {
520		nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
521		if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
522			return (1);
523
524		if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
525			DFLSSIZ) < nss) {
526			int grow_amount;
527			/*
528			 * If necessary, grow the VM that the stack occupies
529			 * to allow for the rlimit. This allows us to not have
530			 * to allocate all of the VM up-front in execve (which
531			 * is expensive).
532			 * Grow the VM by the amount requested rounded up to
533			 * the nearest DFLSSIZ to provide for some hysteresis.
534			 */
535			grow_amount = roundup(nss, DFLSSIZ);
536			v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT, DFLSSIZ) -
537				grow_amount;
538			/*
539			 * If there isn't enough room to extend by DFLSSIZ, then
540			 * just extend to the maximum size
541			 */
542			if (v < vm->vm_maxsaddr) {
543				v = vm->vm_maxsaddr;
544				grow_amount = MAXSSIZ - (vm->vm_ssize << PGSHIFT);
545			}
546			if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
547					grow_amount, FALSE)
548			    != KERN_SUCCESS) {
549				return(1);
550			}
551		}
552	}
553
554	if (vm_fault(&vm->vm_map, va, VM_PROT_READ | VM_PROT_WRITE, FALSE)
555	    != KERN_SUCCESS)
556		return (1);
557
558	/*
559	 * XXX: continuation of rude stack hack
560	 */
561	nss = nss >> PGSHIFT;
562	if (nss > vm->vm_ssize)
563		vm->vm_ssize = nss;
564
565	return (0);
566}
567
568/*
569 * syscall(frame):
570 *	System call request from POSIX system call gate interface to kernel.
571 * Like trap(), argument is call by reference.
572 */
573/*ARGSUSED*/
574void
575syscall(frame)
576	volatile struct trapframe frame;
577{
578	register int *locr0 = ((int *)&frame);
579	register caddr_t params;
580	register int i;
581	register struct sysent *callp;
582	register struct proc *p = curproc;
583	struct timeval syst;
584	int error, opc;
585	int args[8], rval[2];
586	int code;
587
588#ifdef lint
589	r0 = 0; r0 = r0; r1 = 0; r1 = r1;
590#endif
591	syst = p->p_stime;
592	if (ISPL(frame.tf_cs) != SEL_UPL)
593		panic("syscall");
594
595	code = frame.tf_eax;
596	p->p_regs = (int *)&frame;
597	params = (caddr_t)frame.tf_esp + sizeof (int) ;
598
599	/*
600	 * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
601	 */
602	opc = frame.tf_eip - 7;
603	if (code == 0) {
604		code = fuword(params);
605		params += sizeof (int);
606	}
607	if (code < 0 || code >= nsysent)
608		callp = &sysent[0];
609	else
610		callp = &sysent[code];
611
612	if ((i = callp->sy_narg * sizeof (int)) &&
613	    (error = copyin(params, (caddr_t)args, (u_int)i))) {
614		frame.tf_eax = error;
615		frame.tf_eflags |= PSL_C;	/* carry bit */
616#ifdef KTRACE
617		if (KTRPOINT(p, KTR_SYSCALL))
618			ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
619#endif
620		goto done;
621	}
622#ifdef KTRACE
623	if (KTRPOINT(p, KTR_SYSCALL))
624		ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
625#endif
626	rval[0] = 0;
627	rval[1] = frame.tf_edx;
628/*pg("%d. s %d\n", p->p_pid, code);*/
629	error = (*callp->sy_call)(p, args, rval);
630	if (error == ERESTART)
631		frame.tf_eip = opc;
632	else if (error != EJUSTRETURN) {
633		if (error) {
634/*pg("error %d", error);*/
635			frame.tf_eax = error;
636			frame.tf_eflags |= PSL_C;	/* carry bit */
637		} else {
638			frame.tf_eax = rval[0];
639			frame.tf_edx = rval[1];
640			frame.tf_eflags &= ~PSL_C;	/* carry bit */
641		}
642	}
643	/* else if (error == EJUSTRETURN) */
644		/* nothing to do */
645done:
646	/*
647	 * Reinitialize proc pointer `p' as it may be different
648	 * if this is a child returning from fork syscall.
649	 */
650	p = curproc;
651	while (i = CURSIG(p))
652		psig(i);
653	p->p_pri = p->p_usrpri;
654	if (want_resched) {
655		int s;
656		/*
657		 * Since we are curproc, clock will normally just change
658		 * our priority without moving us from one queue to another
659		 * (since the running process is not on a queue.)
660		 * If that happened after we setrq ourselves but before we
661		 * swtch()'ed, we might not be on the queue indicated by
662		 * our priority.
663		 */
664		s = splclock();
665		setrq(p);
666		p->p_stats->p_ru.ru_nivcsw++;
667		swtch();
668		splx(s);
669		while (i = CURSIG(p))
670			psig(i);
671	}
672	if (p->p_stats->p_prof.pr_scale) {
673		int ticks;
674		struct timeval *tv = &p->p_stime;
675
676		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
677			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
678		if (ticks) {
679#ifdef PROFTIMER
680			extern int profscale;
681			addupc(frame.tf_eip, &p->p_stats->p_prof,
682			    ticks * profscale);
683#else
684			addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
685#endif
686		}
687	}
688	curpri = p->p_pri;
689#ifdef KTRACE
690	if (KTRPOINT(p, KTR_SYSRET))
691		ktrsysret(p->p_tracep, code, error, rval[0]);
692#endif
693#ifdef	DIAGNOSTICx
694{ extern int _udatasel, _ucodesel;
695	if (frame.tf_ss != _udatasel)
696		printf("ss %x call %d\n", frame.tf_ss, code);
697	if ((frame.tf_cs&0xffff) != _ucodesel)
698		printf("cs %x call %d\n", frame.tf_cs, code);
699	if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
700		printf("eip %x call %d\n", frame.tf_eip, code);
701		frame.tf_eip = 0;
702	}
703}
704#endif
705}
706