subr_syscall.c revision 91103
151673Smdodd/*- 251673Smdodd * Copyright (C) 1994, David Greenman 351673Smdodd * Copyright (c) 1990, 1993 451673Smdodd * The Regents of the University of California. All rights reserved. 551673Smdodd * 651673Smdodd * This code is derived from software contributed to Berkeley by 751673Smdodd * the University of Utah, and William Jolitz. 851673Smdodd * 951673Smdodd * Redistribution and use in source and binary forms, with or without 1051673Smdodd * modification, are permitted provided that the following conditions 1151673Smdodd * are met: 1251673Smdodd * 1. Redistributions of source code must retain the above copyright 1351673Smdodd * notice, this list of conditions and the following disclaimer. 1451673Smdodd * 2. Redistributions in binary form must reproduce the above copyright 1551673Smdodd * notice, this list of conditions and the following disclaimer in the 1651673Smdodd * documentation and/or other materials provided with the distribution. 1751673Smdodd * 3. All advertising materials mentioning features or use of this software 1851673Smdodd * must display the following acknowledgement: 1951673Smdodd * This product includes software developed by the University of 2051673Smdodd * California, Berkeley and its contributors. 2151673Smdodd * 4. Neither the name of the University nor the names of its contributors 2251673Smdodd * may be used to endorse or promote products derived from this software 2351673Smdodd * without specific prior written permission. 2451673Smdodd * 2551673Smdodd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2651673Smdodd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2751673Smdodd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2851673Smdodd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2951673Smdodd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3051673Smdodd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31117700Smarkm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32117700Smarkm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33117700Smarkm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3451673Smdodd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3552549Smdodd * SUCH DAMAGE. 3651673Smdodd * 3751673Smdodd * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 3852549Smdodd * $FreeBSD: head/sys/kern/subr_trap.c 91103 2002-02-23 01:42:13Z jake $ 3952549Smdodd */ 4052549Smdodd 4152549Smdodd#ifdef __i386__ 4252549Smdodd#include "opt_npx.h" 43117700Smarkm#endif 4452549Smdodd 4551673Smdodd#include <sys/param.h> 4652549Smdodd#include <sys/bus.h> 47117700Smarkm#include <sys/kernel.h> 4851673Smdodd#include <sys/lock.h> 4952549Smdodd#include <sys/mutex.h> 5051673Smdodd#include <sys/proc.h> 5151673Smdodd#include <sys/resourcevar.h> 5251673Smdodd#include <sys/signalvar.h> 53110835Smdodd#include <sys/systm.h> 54110835Smdodd#include <sys/vmmeter.h> 5551673Smdodd#include <machine/cpu.h> 56110835Smdodd#include <machine/pcb.h> 5751673Smdodd 58111292Smarcel/* 59117700Smarkm * Define the code needed before returning to user mode, for 60117700Smarkm * trap and syscall. 61110835Smdodd * 6251673Smdodd * MPSAFE 63117700Smarkm */ 64117700Smarkmvoid 65117700Smarkmuserret(td, frame, oticks) 66117700Smarkm struct thread *td; 6752549Smdodd struct trapframe *frame; 68117700Smarkm u_int oticks; 69117700Smarkm{ 7051673Smdodd struct proc *p = td->td_proc; 71117700Smarkm struct kse *ke = td->td_kse; 7251673Smdodd struct ksegrp *kg = td->td_ksegrp; 7356428Smdodd int sig; 7452549Smdodd 7552549Smdodd mtx_lock(&Giant); 7652549Smdodd PROC_LOCK(p); 7752549Smdodd while ((sig = CURSIG(p)) != 0) 7856428Smdodd postsig(sig); 7969788Snyan PROC_UNLOCK(p); 8069788Snyan mtx_unlock(&Giant); 8169788Snyan 8269788Snyan mtx_lock_spin(&sched_lock); 8351673Smdodd td->td_priority = kg->kg_user_pri; 84111292Smarcel if (ke->ke_flags & KEF_NEEDRESCHED) { 8552549Smdodd DROP_GIANT(); 86117700Smarkm setrunqueue(td); 87117700Smarkm p->p_stats->p_ru.ru_nivcsw++; 88117700Smarkm mi_switch(); 89117700Smarkm mtx_unlock_spin(&sched_lock); 90117700Smarkm PICKUP_GIANT(); 9169788Snyan mtx_lock(&Giant); 92117700Smarkm PROC_LOCK(p); 93117700Smarkm while ((sig = CURSIG(p)) != 0) 9469788Snyan postsig(sig); 95117700Smarkm mtx_unlock(&Giant); 9652549Smdodd PROC_UNLOCK(p); 97111292Smarcel mtx_lock_spin(&sched_lock); 9851673Smdodd } 9952549Smdodd 100117700Smarkm /* 101117700Smarkm * Charge system time if profiling. 102117700Smarkm */ 103117700Smarkm if (p->p_sflag & PS_PROFIL) { 104117700Smarkm quad_t ticks; 105117700Smarkm 106117700Smarkm ticks = ke->ke_sticks - oticks; 10752549Smdodd mtx_unlock_spin(&sched_lock); 10851673Smdodd addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio); 10952549Smdodd } else 11052549Smdodd mtx_unlock_spin(&sched_lock); 11152549Smdodd} 11252549Smdodd 11352549Smdodd/* 11452549Smdodd * Process an asynchronous software trap. 11552549Smdodd * This is relatively easy. 11652549Smdodd * This function will return with preemption disabled. 11752549Smdodd */ 11852549Smdoddvoid 11952549Smdoddast(framep) 12052549Smdodd struct trapframe *framep; 121111292Smarcel{ 12254198Smdodd struct thread *td = curthread; 123117700Smarkm struct proc *p = td->td_proc; 12452549Smdodd struct kse *ke = td->td_kse; 125117700Smarkm u_int prticks, sticks; 126117700Smarkm critical_t s; 12754198Smdodd int sflag; 128117700Smarkm int flags; 12952549Smdodd#if defined(DEV_NPX) && !defined(SMP) 13054198Smdodd int ucode; 13154198Smdodd#endif 13254198Smdodd 13354198Smdodd KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); 13454198Smdodd#ifdef WITNESS 13552549Smdodd if (witness_list(td)) 136111292Smarcel panic("Returning to user mode with mutex(s) held"); 13751673Smdodd#endif 13852549Smdodd mtx_assert(&Giant, MA_NOTOWNED); 139117700Smarkm prticks = 0; /* XXX: Quiet warning. */ 14052549Smdodd s = cpu_critical_enter(); 141117700Smarkm while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) { 142117700Smarkm cpu_critical_exit(s); 143117700Smarkm td->td_frame = framep; 144117700Smarkm /* 145117700Smarkm * This updates the p_sflag's for the checks below in one 146117700Smarkm * "atomic" operation with turning off the astpending flag. 14752549Smdodd * If another AST is triggered while we are handling the 14856428Smdodd * AST's saved in sflag, the astpending flag will be set and 14956428Smdodd * we will loop again. 15056428Smdodd */ 15156428Smdodd mtx_lock_spin(&sched_lock); 15256428Smdodd sticks = ke->ke_sticks; 153117700Smarkm sflag = p->p_sflag; 15456428Smdodd flags = ke->ke_flags; 15552549Smdodd p->p_sflag &= ~(PS_PROFPEND | PS_ALRMPEND); 15652549Smdodd ke->ke_flags &= ~(KEF_OWEUPC | KEF_ASTPENDING); 15751673Smdodd cnt.v_soft++; 158110835Smdodd if (flags & KEF_OWEUPC && sflag & PS_PROFIL) { 15952549Smdodd prticks = p->p_stats->p_prof.pr_ticks; 160117700Smarkm p->p_stats->p_prof.pr_ticks = 0; 16152549Smdodd } 162117700Smarkm mtx_unlock_spin(&sched_lock); 163117700Smarkm 164117700Smarkm#ifdef DIAGNOSTIC 165117700Smarkm /* 166117700Smarkm * As a diagnostic tool we make sure that td->td_ucred 167117700Smarkm * is NULL while we are in user space. This is 168117700Smarkm * because theoreticaly this field is only defined 169117700Smarkm * while the thread is in the kernel. Making it NULL 170117700Smarkm * will immediatly trap invalid usage of this field. 171117700Smarkm * In practice however we keep the reference to the ucred 17251673Smdodd * because it's almost always going to be the same cred we will 17352549Smdodd * need at the next syscall, and it can be expensive 17452549Smdodd * to keep dropping and reacquiring the reference. 17552549Smdodd * We thus stash it away elsewhere until we return 17652549Smdodd * to the kernel, where we bring it back. If 17751673Smdodd * DIAGNOSTIC is not defined we don't bother with 17852549Smdodd * making it NULL, and just leave it in place. 17951673Smdodd * (don't remove this comment without removing the pointers 18052549Smdodd * to it in sys/proc.h, trap.c, kern/kern_fork.c and here.) 18151673Smdodd */ 18252549Smdodd if (td->td_ucred) 18352549Smdodd panic("ast:thread got a cred before reaching AST"); 18454196Smdodd td->td_ucred = td->td_ucred_cache; 18554196Smdodd td->td_ucred_cache = NULL; 18651673Smdodd#endif /* DIAGNOSTIC */ 187117700Smarkm if (td->td_ucred != p->p_ucred) 188117700Smarkm cred_update_thread(td); 189117700Smarkm if (flags & KEF_OWEUPC && sflag & PS_PROFIL) 19052549Smdodd addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks); 191117700Smarkm if (sflag & PS_ALRMPEND) { 19252549Smdodd PROC_LOCK(p); 193117700Smarkm psignal(p, SIGVTALRM); 19452549Smdodd PROC_UNLOCK(p); 19551673Smdodd } 19654196Smdodd#if defined(DEV_NPX) && !defined(SMP) 19754196Smdodd if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) { 198117700Smarkm atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags, 19954196Smdodd PCB_NPXTRAP); 20054196Smdodd ucode = npxtrap(); 201117700Smarkm if (ucode != -1) { 20254196Smdodd trapsignal(p, SIGFPE, ucode); 20354196Smdodd } 20452549Smdodd } 20552549Smdodd#endif 20652549Smdodd if (sflag & PS_PROFPEND) { 20752549Smdodd PROC_LOCK(p); 20852549Smdodd psignal(p, SIGPROF); 20952549Smdodd PROC_UNLOCK(p); 21052549Smdodd } 21152549Smdodd 21251673Smdodd userret(td, framep, sticks); 21352549Smdodd#ifdef DIAGNOSTIC /* see comment above */ 21452549Smdodd if (td->td_ucred_cache) 21552549Smdodd panic("ast:thread already has cached ucred"); 216117700Smarkm td->td_ucred_cache = td->td_ucred; 21752549Smdodd td->td_ucred = NULL; 218117700Smarkm#endif /* DIAGNOSTIC */ 21956429Smdodd 22052549Smdodd s = cpu_critical_enter(); 22152549Smdodd } 22252549Smdodd mtx_assert(&Giant, MA_NOTOWNED); 22352549Smdodd /* 22451673Smdodd * We need to keep interrupts disabled so that if any further AST's 22552549Smdodd * come in, the interrupt they come in on will be delayed until we 22652549Smdodd * finish returning to userland. We assume that the return to userland 22752549Smdodd * will perform the equivalent of cpu_critical_exit(). 22863379Smdodd */ 22952549Smdodd} 23063379Smdodd