subr_syscall.c revision 104233
1202283Slulf/*- 2202283Slulf * Copyright (C) 1994, David Greenman 3202283Slulf * Copyright (c) 1990, 1993 4202283Slulf * The Regents of the University of California. All rights reserved. 5202283Slulf * 6202283Slulf * This code is derived from software contributed to Berkeley by 7202283Slulf * the University of Utah, and William Jolitz. 8202283Slulf * 9202283Slulf * Redistribution and use in source and binary forms, with or without 10202283Slulf * modification, are permitted provided that the following conditions 11202283Slulf * are met: 12202283Slulf * 1. Redistributions of source code must retain the above copyright 13202283Slulf * notice, this list of conditions and the following disclaimer. 14202283Slulf * 2. Redistributions in binary form must reproduce the above copyright 15202283Slulf * notice, this list of conditions and the following disclaimer in the 16202283Slulf * documentation and/or other materials provided with the distribution. 17202283Slulf * 3. All advertising materials mentioning features or use of this software 18202283Slulf * must display the following acknowledgement: 19202283Slulf * This product includes software developed by the University of 20202283Slulf * California, Berkeley and its contributors. 21202283Slulf * 4. Neither the name of the University nor the names of its contributors 22202283Slulf * may be used to endorse or promote products derived from this software 23202283Slulf * without specific prior written permission. 24202283Slulf * 25202283Slulf * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26202283Slulf * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27202283Slulf * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28202283Slulf * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29202283Slulf * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30262623Spfg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31202283Slulf * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32202283Slulf * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33217703Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34217703Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35217703Sjhb * SUCH DAMAGE. 36217703Sjhb * 37217703Sjhb * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38217703Sjhb * $FreeBSD: head/sys/kern/subr_trap.c 104233 2002-09-30 20:20:22Z jmallett $ 39217703Sjhb */ 40217703Sjhb 41217703Sjhb#ifdef __i386__ 42217703Sjhb#include "opt_npx.h" 43217703Sjhb#endif 44217703Sjhb 45217703Sjhb#include <sys/param.h> 46217703Sjhb#include <sys/bus.h> 47258904Spfg#include <sys/kernel.h> 48258904Spfg#include <sys/lock.h> 49217703Sjhb#include <sys/mutex.h> 50217703Sjhb#include <sys/proc.h> 51217703Sjhb#include <sys/kse.h> 52202283Slulf#include <sys/ktr.h> 53260988Spfg#include <sys/resourcevar.h> 54294653Spfg#include <sys/signalvar.h> 55260988Spfg#include <sys/systm.h> 56260988Spfg#include <sys/vmmeter.h> 57202283Slulf#include <sys/malloc.h> 58262623Spfg#include <sys/ksiginfo.h> 59262623Spfg#include <machine/cpu.h> 60262623Spfg#include <machine/pcb.h> 61262623Spfg 62262623Spfg/* 63262623Spfg * Define the code needed before returning to user mode, for 64262623Spfg * trap and syscall. 65262623Spfg * 66294653Spfg * MPSAFE 67262623Spfg */ 68262623Spfgvoid 69262623Spfguserret(td, frame, oticks) 70262623Spfg struct thread *td; 71262623Spfg struct trapframe *frame; 72262623Spfg u_int oticks; 73262623Spfg{ 74262623Spfg struct proc *p = td->td_proc; 75244475Spfg struct kse *ke = td->td_kse; 76232703Spfg struct ksegrp *kg = td->td_ksegrp; 77232703Spfg 78232703Spfg CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, 79232703Spfg p->p_comm); 80262623Spfg#ifdef INVARIANTS 81262623Spfg /* Check that we called signotify() enough. */ 82262623Spfg mtx_lock(&Giant); 83202283Slulf PROC_LOCK(p); 84262623Spfg mtx_lock_spin(&sched_lock); 85251344Spfg if (signal_pending(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 || 86262623Spfg (td->td_kse->ke_flags & KEF_ASTPENDING) == 0)) 87254260Spfg printf("failed to set signal flags properly for ast()\n"); 88232703Spfg mtx_unlock_spin(&sched_lock); 89202283Slulf PROC_UNLOCK(p); 90251809Spfg mtx_unlock(&Giant); 91251809Spfg#endif 92251809Spfg 93251809Spfg /* 94251809Spfg * XXX we cheat slightly on the locking here to avoid locking in 95251809Spfg * the usual case. Setting td_priority here is essentially an 96251809Spfg * incomplete workaround for not setting it properly elsewhere. 97262623Spfg * Now that some interrupt handlers are threads, not setting it 98251809Spfg * properly elsewhere can clobber it in the window between setting 99251809Spfg * it here and returning to user mode, so don't waste time setting 100202283Slulf * it perfectly here. 101202283Slulf */ 102202283Slulf if (td->td_priority != kg->kg_user_pri) { 103221126Sjhb mtx_lock_spin(&sched_lock); 104221126Sjhb td->td_priority = kg->kg_user_pri; 105245121Spfg mtx_unlock_spin(&sched_lock); 106245121Spfg } 107245121Spfg 108245121Spfg /* 109245121Spfg * We need to check to see if we have to exit or wait due to a 110221126Sjhb * single threading requirement or some other STOP condition. 111221126Sjhb * Don't bother doing all the work if the stop bits are not set 112221126Sjhb * at this time.. If we miss it, we miss it.. no big deal. 113221126Sjhb */ 114232703Spfg if (P_SHOULDSTOP(p)) { 115221126Sjhb PROC_LOCK(p); 116221126Sjhb thread_suspend_check(0); /* Can suspend or kill */ 117245121Spfg PROC_UNLOCK(p); 118245121Spfg } 119245121Spfg 120243652Spfg /* 121245121Spfg * DO special thread processing, e.g. upcall tweaking and such 122221126Sjhb */ 123221126Sjhb if (p->p_flag & P_KSES) { 124262623Spfg thread_userret(td, frame); 125245121Spfg /* printf("KSE thread returned"); */ 126245121Spfg } 127245121Spfg 128262623Spfg /* 129262623Spfg * Charge system time if profiling. 130262623Spfg * 131262623Spfg * XXX should move PS_PROFIL to a place that can obviously be 132262623Spfg * accessed safely without sched_lock. 133262623Spfg */ 134202283Slulf if (p->p_sflag & PS_PROFIL) { 135202283Slulf quad_t ticks; 136217585Sjhb 137202283Slulf mtx_lock_spin(&sched_lock); 138 ticks = ke->ke_sticks - oticks; 139 mtx_unlock_spin(&sched_lock); 140 addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio); 141 } 142} 143 144/* 145 * Process an asynchronous software trap. 146 * This is relatively easy. 147 * This function will return with preemption disabled. 148 */ 149void 150ast(struct trapframe *framep) 151{ 152 struct thread *td = curthread; 153 struct proc *p = td->td_proc; 154 struct kse *ke; 155 struct ksegrp *kg = td->td_ksegrp; 156 u_int prticks, sticks; 157 int sflag; 158 int flags; 159 int sig; 160#if defined(DEV_NPX) && !defined(SMP) 161 int ucode; 162#endif 163 164 CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid, 165 p->p_comm); 166 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); 167#ifdef WITNESS 168 if (witness_list(td)) 169 panic("Returning to user mode with mutex(s) held"); 170#endif 171 mtx_assert(&Giant, MA_NOTOWNED); 172 mtx_assert(&sched_lock, MA_NOTOWNED); 173 prticks = 0; /* XXX: Quiet warning. */ 174 td->td_frame = framep; 175 /* 176 * This updates the p_sflag's for the checks below in one 177 * "atomic" operation with turning off the astpending flag. 178 * If another AST is triggered while we are handling the 179 * AST's saved in sflag, the astpending flag will be set and 180 * ast() will be called again. 181 */ 182 mtx_lock_spin(&sched_lock); 183 ke = td->td_kse; 184 sticks = ke->ke_sticks; 185 sflag = p->p_sflag; 186 flags = ke->ke_flags; 187 p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND); 188 ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED | KEF_OWEUPC); 189 cnt.v_soft++; 190 if (flags & KEF_OWEUPC && sflag & PS_PROFIL) { 191 prticks = p->p_stats->p_prof.pr_ticks; 192 p->p_stats->p_prof.pr_ticks = 0; 193 } 194 mtx_unlock_spin(&sched_lock); 195 /* 196 * XXXKSE While the fact that we owe a user profiling 197 * tick is stored per KSE in this code, the statistics 198 * themselves are still stored per process. 199 * This should probably change, by which I mean that 200 * possibly the location of both might change. 201 */ 202 203 if (td->td_ucred != p->p_ucred) 204 cred_update_thread(td); 205 if (flags & KEF_OWEUPC && sflag & PS_PROFIL) 206 addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks); 207 if (sflag & PS_ALRMPEND) { 208 PROC_LOCK(p); 209 psignal(p, SIGVTALRM); 210 PROC_UNLOCK(p); 211 } 212#if defined(DEV_NPX) && !defined(SMP) 213 if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) { 214 atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags, 215 PCB_NPXTRAP); 216 ucode = npxtrap(); 217 if (ucode != -1) { 218 trapsignal(p, SIGFPE, ucode); 219 } 220 } 221#endif 222 if (sflag & PS_PROFPEND) { 223 PROC_LOCK(p); 224 psignal(p, SIGPROF); 225 PROC_UNLOCK(p); 226 } 227 if (flags & KEF_NEEDRESCHED) { 228 mtx_lock_spin(&sched_lock); 229 td->td_priority = kg->kg_user_pri; 230 p->p_stats->p_ru.ru_nivcsw++; 231 mi_switch(); 232 mtx_unlock_spin(&sched_lock); 233 } 234 if (sflag & PS_NEEDSIGCHK) { 235 PROC_LOCK(p); 236 while ((sig = cursig(td)) != 0) 237 postsig(sig); 238 PROC_UNLOCK(p); 239 } 240 241 userret(td, framep, sticks); 242#ifdef DIAGNOSTIC 243 cred_free_thread(td); 244#endif 245 mtx_assert(&Giant, MA_NOTOWNED); 246} 247