subr_syscall.c revision 132266
1/*-
2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/kern/subr_trap.c 132266 2004-07-16 21:04:55Z jhb $");
42
43#include "opt_ktrace.h"
44#include "opt_mac.h"
45#ifdef __i386__
46#include "opt_npx.h"
47#endif
48
49#include <sys/param.h>
50#include <sys/bus.h>
51#include <sys/kernel.h>
52#include <sys/lock.h>
53#include <sys/mac.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/ktr.h>
57#include <sys/resourcevar.h>
58#include <sys/sched.h>
59#include <sys/signalvar.h>
60#include <sys/systm.h>
61#include <sys/vmmeter.h>
62#ifdef KTRACE
63#include <sys/uio.h>
64#include <sys/ktrace.h>
65#endif
66
67#include <machine/cpu.h>
68#include <machine/pcb.h>
69
70/*
71 * Define the code needed before returning to user mode, for
72 * trap and syscall.
73 *
74 * MPSAFE
75 */
76void
77userret(td, frame, oticks)
78	struct thread *td;
79	struct trapframe *frame;
80	u_int oticks;
81{
82	struct proc *p = td->td_proc;
83
84	CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
85            p->p_comm);
86#ifdef DIAGNOSTIC
87	/* Check that we called signotify() enough. */
88	PROC_LOCK(p);
89	mtx_lock_spin(&sched_lock);
90	if (SIGPENDING(td) && ((td->td_flags & TDF_NEEDSIGCHK) == 0 ||
91	    (td->td_flags & TDF_ASTPENDING) == 0))
92		printf("failed to set signal flags properly for ast()\n");
93	mtx_unlock_spin(&sched_lock);
94	PROC_UNLOCK(p);
95#endif
96
97	/*
98	 * Let the scheduler adjust our priority etc.
99	 */
100	sched_userret(td);
101
102	/*
103	 * We need to check to see if we have to exit or wait due to a
104	 * single threading requirement or some other STOP condition.
105	 * Don't bother doing all the work if the stop bits are not set
106	 * at this time.. If we miss it, we miss it.. no big deal.
107	 */
108	if (P_SHOULDSTOP(p)) {
109		PROC_LOCK(p);
110		thread_suspend_check(0);	/* Can suspend or kill */
111		PROC_UNLOCK(p);
112	}
113
114	/*
115	 * Do special thread processing, e.g. upcall tweaking and such.
116	 */
117	if (p->p_flag & P_SA)
118		thread_userret(td, frame);
119
120	/*
121	 * Charge system time if profiling.
122	 */
123	if (p->p_flag & P_PROFIL) {
124		quad_t ticks;
125
126		mtx_lock_spin(&sched_lock);
127		ticks = td->td_sticks - oticks;
128		mtx_unlock_spin(&sched_lock);
129		addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
130	}
131}
132
133/*
134 * Process an asynchronous software trap.
135 * This is relatively easy.
136 * This function will return with preemption disabled.
137 */
138void
139ast(struct trapframe *framep)
140{
141	struct thread *td;
142	struct proc *p;
143	struct ksegrp *kg;
144	struct rlimit rlim;
145	u_int sticks;
146	int sflag;
147	int flags;
148	int sig;
149#if defined(DEV_NPX) && !defined(SMP)
150	int ucode;
151#endif
152
153	td = curthread;
154	p = td->td_proc;
155	kg = td->td_ksegrp;
156
157	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
158            p->p_comm);
159	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
160	WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
161	mtx_assert(&Giant, MA_NOTOWNED);
162	mtx_assert(&sched_lock, MA_NOTOWNED);
163	td->td_frame = framep;
164
165	/*
166	 * This updates the p_sflag's for the checks below in one
167	 * "atomic" operation with turning off the astpending flag.
168	 * If another AST is triggered while we are handling the
169	 * AST's saved in sflag, the astpending flag will be set and
170	 * ast() will be called again.
171	 */
172	mtx_lock_spin(&sched_lock);
173	sticks = td->td_sticks;
174	flags = td->td_flags;
175	sflag = p->p_sflag;
176	p->p_sflag &= ~(PS_ALRMPEND | PS_PROFPEND | PS_XCPU);
177#ifdef MAC
178	p->p_sflag &= ~PS_MACPEND;
179#endif
180	td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
181	    TDF_NEEDRESCHED | TDF_INTERRUPT);
182	cnt.v_soft++;
183	mtx_unlock_spin(&sched_lock);
184	/*
185	 * XXXKSE While the fact that we owe a user profiling
186	 * tick is stored per KSE in this code, the statistics
187	 * themselves are still stored per process.
188	 * This should probably change, by which I mean that
189	 * possibly the location of both might change.
190	 */
191
192	if (td->td_ucred != p->p_ucred)
193		cred_update_thread(td);
194	if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
195		addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
196		td->td_profil_ticks = 0;
197		td->td_pflags &= ~TDP_OWEUPC;
198	}
199	if (sflag & PS_ALRMPEND) {
200		PROC_LOCK(p);
201		psignal(p, SIGVTALRM);
202		PROC_UNLOCK(p);
203	}
204#if defined(DEV_NPX) && !defined(SMP)
205	if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
206		atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags,
207		    PCB_NPXTRAP);
208		ucode = npxtrap();
209		if (ucode != -1) {
210			trapsignal(td, SIGFPE, ucode);
211		}
212	}
213#endif
214	if (sflag & PS_PROFPEND) {
215		PROC_LOCK(p);
216		psignal(p, SIGPROF);
217		PROC_UNLOCK(p);
218	}
219	if (sflag & PS_XCPU) {
220		PROC_LOCK(p);
221		lim_rlimit(p, RLIMIT_CPU, &rlim);
222		mtx_lock_spin(&sched_lock);
223		if (p->p_runtime.sec >= rlim.rlim_max) {
224			mtx_unlock_spin(&sched_lock);
225			killproc(p, "exceeded maximum CPU limit");
226		} else {
227			if (p->p_cpulimit < rlim.rlim_max)
228				p->p_cpulimit += 5;
229			mtx_unlock_spin(&sched_lock);
230			psignal(p, SIGXCPU);
231		}
232		PROC_UNLOCK(p);
233	}
234#ifdef MAC
235	if (sflag & PS_MACPEND)
236		mac_thread_userret(td);
237#endif
238	if (flags & TDF_NEEDRESCHED) {
239#ifdef KTRACE
240		if (KTRPOINT(td, KTR_CSW))
241			ktrcsw(1, 1);
242#endif
243		mtx_lock_spin(&sched_lock);
244		sched_prio(td, kg->kg_user_pri);
245		mi_switch(SW_INVOL, NULL);
246		mtx_unlock_spin(&sched_lock);
247#ifdef KTRACE
248		if (KTRPOINT(td, KTR_CSW))
249			ktrcsw(0, 1);
250#endif
251	}
252	if (flags & TDF_NEEDSIGCHK) {
253		PROC_LOCK(p);
254		mtx_lock(&p->p_sigacts->ps_mtx);
255		while ((sig = cursig(td)) != 0)
256			postsig(sig);
257		mtx_unlock(&p->p_sigacts->ps_mtx);
258		PROC_UNLOCK(p);
259	}
260
261	userret(td, framep, sticks);
262#ifdef DIAGNOSTIC
263	cred_free_thread(td);
264#endif
265	mtx_assert(&Giant, MA_NOTOWNED);
266}
267