subr_syscall.c revision 109877
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
38 * $FreeBSD: head/sys/kern/subr_trap.c 109877 2003-01-26 11:41:35Z davidxu $
39 */
40
41#include "opt_mac.h"
42#ifdef __i386__
43#include "opt_npx.h"
44#endif
45
46#include <sys/param.h>
47#include <sys/bus.h>
48#include <sys/kernel.h>
49#include <sys/lock.h>
50#include <sys/mac.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/kse.h>
54#include <sys/ktr.h>
55#include <sys/resourcevar.h>
56#include <sys/sched.h>
57#include <sys/signalvar.h>
58#include <sys/systm.h>
59#include <sys/vmmeter.h>
60#include <machine/cpu.h>
61#include <machine/pcb.h>
62
63/*
64 * Define the code needed before returning to user mode, for
65 * trap and syscall.
66 *
67 * MPSAFE
68 */
69void
70userret(td, frame, oticks)
71	struct thread *td;
72	struct trapframe *frame;
73	u_int oticks;
74{
75	struct proc *p = td->td_proc;
76#ifdef INVARIANTS
77	struct kse *ke;
78#endif
79
80	CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
81            p->p_comm);
82#ifdef INVARIANTS
83	/*
84	 * Check that we called signotify() enough.
85	 * XXXKSE this checking is bogus for threaded program,
86	 */
87	mtx_lock(&Giant);
88	PROC_LOCK(p);
89	mtx_lock_spin(&sched_lock);
90	ke = td->td_kse;
91	if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 ||
92	    (td->td_kse->ke_flags & KEF_ASTPENDING) == 0))
93		printf("failed to set signal flags properly for ast()\n");
94	mtx_unlock_spin(&sched_lock);
95	PROC_UNLOCK(p);
96	mtx_unlock(&Giant);
97#endif
98
99	/*
100	 * Let the scheduler adjust our priority etc.
101	 */
102	sched_userret(td);
103
104	/*
105	 * Charge system time if profiling.
106	 *
107	 * XXX should move PS_PROFIL to a place that can obviously be
108	 * accessed safely without sched_lock.
109	 */
110
111	if (p->p_sflag & PS_PROFIL) {
112		ticks = td->td_sticks - oticks;
113		addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
114	}
115
116	/*
117	 * We need to check to see if we have to exit or wait due to a
118	 * single threading requirement or some other STOP condition.
119	 * Don't bother doing all the work if the stop bits are not set
120	 * at this time.. If we miss it, we miss it.. no big deal.
121	 */
122	if (P_SHOULDSTOP(p)) {
123		PROC_LOCK(p);
124		thread_suspend_check(0);	/* Can suspend or kill */
125		PROC_UNLOCK(p);
126	}
127
128	/*
129	 * Do special thread processing, e.g. upcall tweaking and such.
130	 */
131	if (p->p_flag & P_KSES) {
132		thread_userret(td, frame);
133	}
134}
135
136/*
137 * Process an asynchronous software trap.
138 * This is relatively easy.
139 * This function will return with preemption disabled.
140 */
141void
142ast(struct trapframe *framep)
143{
144	struct thread *td;
145	struct proc *p;
146	struct kse *ke;
147	struct ksegrp *kg;
148	struct rlimit *rlim;
149	u_int prticks, sticks;
150	int sflag;
151	int flags;
152	int tflags;
153	int sig;
154#if defined(DEV_NPX) && !defined(SMP)
155	int ucode;
156#endif
157
158	td = curthread;
159	p = td->td_proc;
160	kg = td->td_ksegrp;
161
162	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
163            p->p_comm);
164	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
165#ifdef WITNESS
166	if (witness_list(td))
167		panic("Returning to user mode with mutex(s) held");
168#endif
169	mtx_assert(&Giant, MA_NOTOWNED);
170	mtx_assert(&sched_lock, MA_NOTOWNED);
171	td->td_frame = framep;
172
173	/*
174	 * This updates the p_sflag's for the checks below in one
175	 * "atomic" operation with turning off the astpending flag.
176	 * If another AST is triggered while we are handling the
177	 * AST's saved in sflag, the astpending flag will be set and
178	 * ast() will be called again.
179	 */
180	mtx_lock_spin(&sched_lock);
181	ke = td->td_kse;
182	sticks = td->td_sticks;
183	tflags = td->td_flags;
184	flags = ke->ke_flags;
185	sflag = p->p_sflag;
186	p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU);
187#ifdef MAC
188	p->p_sflag &= ~PS_MACPEND;
189#endif
190	ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED);
191	td->td_flags &= ~(TDF_ASTPENDING | TDF_OWEUPC);
192	cnt.v_soft++;
193	prticks = 0;
194	if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) {
195		prticks = td->td_prticks;
196		td->td_prticks = 0;
197	}
198	mtx_unlock_spin(&sched_lock);
199	/*
200	 * XXXKSE While the fact that we owe a user profiling
201	 * tick is stored per KSE in this code, the statistics
202	 * themselves are still stored per process.
203	 * This should probably change, by which I mean that
204	 * possibly the location of both might change.
205	 */
206
207	if (td->td_ucred != p->p_ucred)
208		cred_update_thread(td);
209	if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) {
210		addupc_task(td, td->td_praddr, prticks);
211	}
212	if (sflag & PS_ALRMPEND) {
213		PROC_LOCK(p);
214		psignal(p, SIGVTALRM);
215		PROC_UNLOCK(p);
216	}
217#if defined(DEV_NPX) && !defined(SMP)
218	if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
219		atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags,
220		    PCB_NPXTRAP);
221		ucode = npxtrap();
222		if (ucode != -1) {
223			trapsignal(p, SIGFPE, ucode);
224		}
225	}
226#endif
227	if (sflag & PS_PROFPEND) {
228		PROC_LOCK(p);
229		psignal(p, SIGPROF);
230		PROC_UNLOCK(p);
231	}
232	if (sflag & PS_XCPU) {
233		PROC_LOCK(p);
234		rlim = &p->p_rlimit[RLIMIT_CPU];
235		if (p->p_runtime.sec >= rlim->rlim_max)
236			killproc(p, "exceeded maximum CPU limit");
237		else {
238			psignal(p, SIGXCPU);
239			mtx_lock_spin(&sched_lock);
240			if (p->p_cpulimit < rlim->rlim_max)
241				p->p_cpulimit += 5;
242			mtx_unlock_spin(&sched_lock);
243		}
244		PROC_UNLOCK(p);
245	}
246#ifdef MAC
247	if (sflag & PS_MACPEND)
248		mac_thread_userret(td);
249#endif
250	if (flags & KEF_NEEDRESCHED) {
251		mtx_lock_spin(&sched_lock);
252		sched_prio(td, kg->kg_user_pri);
253		p->p_stats->p_ru.ru_nivcsw++;
254		mi_switch();
255		mtx_unlock_spin(&sched_lock);
256	}
257	if (sflag & PS_NEEDSIGCHK) {
258		PROC_LOCK(p);
259		while ((sig = cursig(td)) != 0)
260			postsig(sig);
261		PROC_UNLOCK(p);
262	}
263
264	userret(td, framep, sticks);
265#ifdef DIAGNOSTIC
266	cred_free_thread(td);
267#endif
268	mtx_assert(&Giant, MA_NOTOWNED);
269}
270