subr_syscall.c revision 81493
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
38 * $FreeBSD: head/sys/kern/subr_trap.c 81493 2001-08-10 22:53:32Z jhb $
39 */
40
41#ifdef __i386__
42#include "opt_npx.h"
43#endif
44
45#include <sys/param.h>
46#include <sys/bus.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/resourcevar.h>
52#include <sys/signalvar.h>
53#include <sys/systm.h>
54#include <sys/vmmeter.h>
55#include <machine/cpu.h>
56#include <machine/pcb.h>
57
58/*
59 * Define the code needed before returning to user mode, for
60 * trap and syscall.
61 */
62void
63userret(p, frame, oticks)
64	struct proc *p;
65	struct trapframe *frame;
66	u_int oticks;
67{
68	int sig;
69
70	mtx_lock(&Giant);
71	PROC_LOCK(p);
72	while ((sig = CURSIG(p)) != 0)
73		postsig(sig);
74	mtx_unlock(&Giant);
75	PROC_UNLOCK(p);
76
77	mtx_lock_spin(&sched_lock);
78	p->p_pri.pri_level = p->p_pri.pri_user;
79	if (p->p_sflag & PS_NEEDRESCHED) {
80		/*
81		 * Since we are curproc, a clock interrupt could
82		 * change our priority without changing run queues
83		 * (the running process is not kept on a run queue).
84		 * If this happened after we setrunqueue ourselves but
85		 * before we switch()'ed, we might not be on the queue
86		 * indicated by our priority.
87		 */
88		DROP_GIANT_NOSWITCH();
89		setrunqueue(p);
90		p->p_stats->p_ru.ru_nivcsw++;
91		mi_switch();
92		mtx_unlock_spin(&sched_lock);
93		PICKUP_GIANT();
94		mtx_lock(&Giant);
95		PROC_LOCK(p);
96		while ((sig = CURSIG(p)) != 0)
97			postsig(sig);
98		mtx_unlock(&Giant);
99		PROC_UNLOCK(p);
100	} else
101		mtx_unlock_spin(&sched_lock);
102
103	/*
104	 * Charge system time if profiling.
105	 */
106	if (p->p_sflag & PS_PROFIL)
107		addupc_task(p, TRAPF_PC(frame),
108			    ((u_int)p->p_sticks - oticks) * psratio);
109}
110
111/*
112 * Process an asynchronous software trap.
113 * This is relatively easy.
114 * This function will return with preemption disabled.
115 */
116void
117ast(framep)
118	struct trapframe *framep;
119{
120	struct proc *p = CURPROC;
121	u_int prticks, sticks;
122	critical_t s;
123	int sflag;
124#if defined(DEV_NPX) && !defined(SMP)
125	int ucode;
126#endif
127
128	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
129#ifdef WITNESS
130	if (witness_list(p))
131		panic("Returning to user mode with mutex(s) held");
132#endif
133	mtx_assert(&Giant, MA_NOTOWNED);
134	s = critical_enter();
135	while ((p->p_sflag & (PS_ASTPENDING | PS_NEEDRESCHED)) != 0) {
136		critical_exit(s);
137		p->p_frame = framep;
138		/*
139		 * This updates the p_sflag's for the checks below in one
140		 * "atomic" operation with turning off the astpending flag.
141		 * If another AST is triggered while we are handling the
142		 * AST's saved in sflag, the astpending flag will be set and
143		 * we will loop again.
144		 */
145		mtx_lock_spin(&sched_lock);
146		sticks = p->p_sticks;
147		sflag = p->p_sflag;
148		p->p_sflag &= ~(PS_OWEUPC | PS_ALRMPEND | PS_PROFPEND |
149		    PS_ASTPENDING);
150		cnt.v_soft++;
151		if (sflag & PS_OWEUPC) {
152			prticks = p->p_stats->p_prof.pr_ticks;
153			p->p_stats->p_prof.pr_ticks = 0;
154			mtx_unlock_spin(&sched_lock);
155			addupc_task(p, p->p_stats->p_prof.pr_addr, prticks);
156		} else
157			mtx_unlock_spin(&sched_lock);
158		if (sflag & PS_ALRMPEND) {
159			PROC_LOCK(p);
160			psignal(p, SIGVTALRM);
161			PROC_UNLOCK(p);
162		}
163#if defined(DEV_NPX) && !defined(SMP)
164		if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
165			atomic_clear_char(&PCPU_GET(curpcb)->pcb_flags,
166			    PCB_NPXTRAP);
167			ucode = npxtrap();
168			if (ucode != -1) {
169				mtx_lock(&Giant);
170				trapsignal(p, SIGFPE, ucode);
171			}
172		}
173#endif
174		if (sflag & PS_PROFPEND) {
175			PROC_LOCK(p);
176			psignal(p, SIGPROF);
177			PROC_UNLOCK(p);
178		}
179
180		userret(p, framep, sticks);
181		if (mtx_owned(&Giant))
182			mtx_unlock(&Giant);
183		s = critical_enter();
184	}
185	mtx_assert(&Giant, MA_NOTOWNED);
186	/*
187	 * We need to keep interrupts disabled so that if any further AST's
188	 * come in, the interrupt they come in on will be delayed until we
189	 * finish returning to userland.  We assume that the return to userland
190	 * will perform the equivalent of critical_exit().
191	 */
192}
193