subr_syscall.c revision 139324
1178580Simp/*- 2178580Simp * Copyright (C) 1994, David Greenman 3178580Simp * Copyright (c) 1990, 1993 4178580Simp * The Regents of the University of California. All rights reserved. 5178580Simp * 6178580Simp * This code is derived from software contributed to Berkeley by 7178580Simp * the University of Utah, and William Jolitz. 8178580Simp * 9178580Simp * Redistribution and use in source and binary forms, with or without 10178580Simp * modification, are permitted provided that the following conditions 11178580Simp * are met: 12178580Simp * 1. Redistributions of source code must retain the above copyright 13178580Simp * notice, this list of conditions and the following disclaimer. 14178580Simp * 2. Redistributions in binary form must reproduce the above copyright 15178580Simp * notice, this list of conditions and the following disclaimer in the 16178580Simp * documentation and/or other materials provided with the distribution. 17178580Simp * 3. All advertising materials mentioning features or use of this software 18178580Simp * must display the following acknowledgement: 19178580Simp * This product includes software developed by the University of 20178580Simp * California, Berkeley and its contributors. 21178580Simp * 4. Neither the name of the University nor the names of its contributors 22178580Simp * may be used to endorse or promote products derived from this software 23178580Simp * without specific prior written permission. 24178580Simp * 25178580Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26178580Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27178580Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28178580Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29178580Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: head/sys/kern/subr_trap.c 139324 2004-12-26 07:30:35Z jeff $"); 42 43#include "opt_ktrace.h" 44#include "opt_mac.h" 45#ifdef __i386__ 46#include "opt_npx.h" 47#endif 48 49#include <sys/param.h> 50#include <sys/bus.h> 51#include <sys/kernel.h> 52#include <sys/lock.h> 53#include <sys/mac.h> 54#include <sys/mutex.h> 55#include <sys/proc.h> 56#include <sys/ktr.h> 57#include <sys/resourcevar.h> 58#include <sys/sched.h> 59#include <sys/signalvar.h> 60#include <sys/systm.h> 61#include <sys/vmmeter.h> 62#ifdef KTRACE 63#include <sys/uio.h> 64#include <sys/ktrace.h> 65#endif 66 67#include <machine/cpu.h> 68#include <machine/pcb.h> 69 70/* 71 * Define the code needed before returning to user mode, for 72 * trap and syscall. 73 * 74 * MPSAFE 75 */ 76void 77userret(td, frame, oticks) 78 struct thread *td; 79 struct trapframe *frame; 80 u_int oticks; 81{ 82 struct proc *p = td->td_proc; 83 84 CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, 85 p->p_comm); 86#ifdef DIAGNOSTIC 87 /* Check that we called signotify() enough. */ 88 PROC_LOCK(p); 89 mtx_lock_spin(&sched_lock); 90 if (SIGPENDING(td) && ((td->td_flags & TDF_NEEDSIGCHK) == 0 || 91 (td->td_flags & TDF_ASTPENDING) == 0)) 92 printf("failed to set signal flags properly for ast()\n"); 93 mtx_unlock_spin(&sched_lock); 94 PROC_UNLOCK(p); 95#endif 96 97 /* 98 * If this thread tickled GEOM, we need to wait for the giggling to 99 * stop before we return to userland 100 */ 101 if (td->td_pflags & TDP_GEOM) 102 g_waitidle(); 103 104 /* 105 * We need to check to see if we have to exit or wait due to a 106 * single threading requirement or some other STOP condition. 107 * Don't bother doing all the work if the stop bits are not set 108 * at this time.. If we miss it, we miss it.. no big deal. 109 */ 110 if (P_SHOULDSTOP(p)) { 111 PROC_LOCK(p); 112 thread_suspend_check(0); /* Can suspend or kill */ 113 PROC_UNLOCK(p); 114 } 115 116 /* 117 * Do special thread processing, e.g. upcall tweaking and such. 118 */ 119 if (p->p_flag & P_SA) 120 thread_userret(td, frame); 121 122 /* 123 * Charge system time if profiling. 124 */ 125 if (p->p_flag & P_PROFIL) { 126 quad_t ticks; 127 128 ticks = td->td_sticks - oticks; 129 addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio); 130 } 131 /* 132 * Let the scheduler adjust our priority etc. 133 */ 134 sched_userret(td); 135} 136 137/* 138 * Process an asynchronous software trap. 139 * This is relatively easy. 140 * This function will return with preemption disabled. 141 */ 142void 143ast(struct trapframe *framep) 144{ 145 struct thread *td; 146 struct proc *p; 147 struct ksegrp *kg; 148 struct rlimit rlim; 149 u_int sticks; 150 int sflag; 151 int flags; 152 int sig; 153#if defined(DEV_NPX) && !defined(SMP) 154 int ucode; 155#endif 156 157 td = curthread; 158 p = td->td_proc; 159 kg = td->td_ksegrp; 160 161 CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid, 162 p->p_comm); 163 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); 164 WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode"); 165 mtx_assert(&Giant, MA_NOTOWNED); 166 mtx_assert(&sched_lock, MA_NOTOWNED); 167 td->td_frame = framep; 168 sticks = td->td_sticks; 169 170 if ((p->p_flag & P_SA) && (td->td_mailbox == NULL)) 171 thread_user_enter(td); 172 173 /* 174 * This updates the p_sflag's for the checks below in one 175 * "atomic" operation with turning off the astpending flag. 176 * If another AST is triggered while we are handling the 177 * AST's saved in sflag, the astpending flag will be set and 178 * ast() will be called again. 179 */ 180 mtx_lock_spin(&sched_lock); 181 flags = td->td_flags; 182 sflag = p->p_sflag; 183 p->p_sflag &= ~(PS_ALRMPEND | PS_PROFPEND | PS_XCPU); 184#ifdef MAC 185 p->p_sflag &= ~PS_MACPEND; 186#endif 187 td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | 188 TDF_NEEDRESCHED | TDF_INTERRUPT); 189 cnt.v_soft++; 190 mtx_unlock_spin(&sched_lock); 191 192 /* 193 * XXXKSE While the fact that we owe a user profiling 194 * tick is stored per KSE in this code, the statistics 195 * themselves are still stored per process. 196 * This should probably change, by which I mean that 197 * possibly the location of both might change. 198 */ 199 if (td->td_ucred != p->p_ucred) 200 cred_update_thread(td); 201 if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) { 202 addupc_task(td, td->td_profil_addr, td->td_profil_ticks); 203 td->td_profil_ticks = 0; 204 td->td_pflags &= ~TDP_OWEUPC; 205 } 206 if (sflag & PS_ALRMPEND) { 207 PROC_LOCK(p); 208 psignal(p, SIGVTALRM); 209 PROC_UNLOCK(p); 210 } 211#if defined(DEV_NPX) && !defined(SMP) 212 if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) { 213 atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags, 214 PCB_NPXTRAP); 215 ucode = npxtrap(); 216 if (ucode != -1) { 217 trapsignal(td, SIGFPE, ucode); 218 } 219 } 220#endif 221 if (sflag & PS_PROFPEND) { 222 PROC_LOCK(p); 223 psignal(p, SIGPROF); 224 PROC_UNLOCK(p); 225 } 226 if (sflag & PS_XCPU) { 227 PROC_LOCK(p); 228 lim_rlimit(p, RLIMIT_CPU, &rlim); 229 mtx_lock_spin(&sched_lock); 230 if (p->p_rux.rux_runtime.sec >= rlim.rlim_max) { 231 mtx_unlock_spin(&sched_lock); 232 killproc(p, "exceeded maximum CPU limit"); 233 } else { 234 if (p->p_cpulimit < rlim.rlim_max) 235 p->p_cpulimit += 5; 236 mtx_unlock_spin(&sched_lock); 237 psignal(p, SIGXCPU); 238 } 239 PROC_UNLOCK(p); 240 } 241#ifdef MAC 242 if (sflag & PS_MACPEND) 243 mac_thread_userret(td); 244#endif 245 if (flags & TDF_NEEDRESCHED) { 246#ifdef KTRACE 247 if (KTRPOINT(td, KTR_CSW)) 248 ktrcsw(1, 1); 249#endif 250 mtx_lock_spin(&sched_lock); 251 sched_prio(td, kg->kg_user_pri); 252 mi_switch(SW_INVOL, NULL); 253 mtx_unlock_spin(&sched_lock); 254#ifdef KTRACE 255 if (KTRPOINT(td, KTR_CSW)) 256 ktrcsw(0, 1); 257#endif 258 } 259 if (flags & TDF_NEEDSIGCHK) { 260 PROC_LOCK(p); 261 mtx_lock(&p->p_sigacts->ps_mtx); 262 while ((sig = cursig(td)) != 0) 263 postsig(sig); 264 mtx_unlock(&p->p_sigacts->ps_mtx); 265 PROC_UNLOCK(p); 266 } 267 268 userret(td, framep, sticks); 269 mtx_assert(&Giant, MA_NOTOWNED); 270} 271