1/*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * --- 21 unchanged lines hidden (view full) --- 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 |
38 * $FreeBSD: head/sys/kern/subr_trap.c 83366 2001-09-12 08:38:13Z julian $ |
39 */ 40 41#ifdef __i386__ 42#include "opt_npx.h" 43#endif 44 45#include <sys/param.h> 46#include <sys/bus.h> --- 10 unchanged lines hidden (view full) --- 57 58/* 59 * Define the code needed before returning to user mode, for 60 * trap and syscall. 61 * 62 * MPSAFE 63 */ 64void |
65userret(td, frame, oticks) 66 struct thread *td; |
67 struct trapframe *frame; 68 u_int oticks; 69{ |
70 struct proc *p = td->td_proc; 71 struct kse *ke = td->td_kse; 72 struct ksegrp *kg = td->td_ksegrp; |
73 int sig; 74 75 mtx_lock(&Giant); 76 PROC_LOCK(p); 77 while ((sig = CURSIG(p)) != 0) 78 postsig(sig); 79 PROC_UNLOCK(p); 80 mtx_unlock(&Giant); 81 82 mtx_lock_spin(&sched_lock); |
83 kg->kg_pri.pri_level = kg->kg_pri.pri_user; 84 if (ke->ke_flags & KEF_NEEDRESCHED) { |
85 /* 86 * Since we are curproc, a clock interrupt could 87 * change our priority without changing run queues 88 * (the running process is not kept on a run queue). 89 * If this happened after we setrunqueue ourselves but 90 * before we switch()'ed, we might not be on the queue 91 * indicated by our priority. 92 */ 93 DROP_GIANT_NOSWITCH(); |
94 setrunqueue(td); |
95 p->p_stats->p_ru.ru_nivcsw++; 96 mi_switch(); 97 mtx_unlock_spin(&sched_lock); 98 PICKUP_GIANT(); 99 mtx_lock(&Giant); 100 PROC_LOCK(p); 101 while ((sig = CURSIG(p)) != 0) 102 postsig(sig); 103 mtx_unlock(&Giant); 104 PROC_UNLOCK(p); 105 } else 106 mtx_unlock_spin(&sched_lock); 107 108 /* 109 * Charge system time if profiling. 110 */ |
111 if (p->p_sflag & PS_PROFIL) { 112 addupc_task(ke, TRAPF_PC(frame), 113 (u_int)(ke->ke_sticks - oticks) * psratio); 114 } |
115} 116 117/* 118 * Process an asynchronous software trap. 119 * This is relatively easy. 120 * This function will return with preemption disabled. 121 */ 122void 123ast(framep) 124 struct trapframe *framep; 125{ |
126 struct thread *td = curthread; 127 struct proc *p = td->td_proc; 128 struct kse *ke = td->td_kse; |
129 u_int prticks, sticks; 130 critical_t s; 131 int sflag; |
132 int flags; |
133#if defined(DEV_NPX) && !defined(SMP) 134 int ucode; 135#endif 136 137 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); 138#ifdef WITNESS |
139 if (witness_list(td)) |
140 panic("Returning to user mode with mutex(s) held"); 141#endif 142 mtx_assert(&Giant, MA_NOTOWNED); 143 s = critical_enter(); |
144 while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) { |
145 critical_exit(s); |
146 td->td_frame = framep; |
147 /* 148 * This updates the p_sflag's for the checks below in one 149 * "atomic" operation with turning off the astpending flag. 150 * If another AST is triggered while we are handling the 151 * AST's saved in sflag, the astpending flag will be set and 152 * we will loop again. |
153 * XXXKSE Can't do it atomically in KSE |
154 */ 155 mtx_lock_spin(&sched_lock); |
156 sticks = ke->ke_sticks; |
157 sflag = p->p_sflag; |
158 flags = ke->ke_flags; 159 p->p_sflag &= ~(PS_PROFPEND | PS_ALRMPEND); 160 ke->ke_flags &= ~(KEF_OWEUPC | KEF_ASTPENDING); |
161 cnt.v_soft++; |
162 if (flags & KEF_OWEUPC) { |
163 prticks = p->p_stats->p_prof.pr_ticks; 164 p->p_stats->p_prof.pr_ticks = 0; 165 mtx_unlock_spin(&sched_lock); |
166 addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks); |
167 } else 168 mtx_unlock_spin(&sched_lock); 169 if (sflag & PS_ALRMPEND) { 170 PROC_LOCK(p); 171 psignal(p, SIGVTALRM); 172 PROC_UNLOCK(p); 173 } 174#if defined(DEV_NPX) && !defined(SMP) --- 7 unchanged lines hidden (view full) --- 182 } 183#endif 184 if (sflag & PS_PROFPEND) { 185 PROC_LOCK(p); 186 psignal(p, SIGPROF); 187 PROC_UNLOCK(p); 188 } 189 |
190 userret(td, framep, sticks); |
191 s = critical_enter(); 192 } 193 mtx_assert(&Giant, MA_NOTOWNED); 194 /* 195 * We need to keep interrupts disabled so that if any further AST's 196 * come in, the interrupt they come in on will be delayed until we 197 * finish returning to userland. We assume that the return to userland 198 * will perform the equivalent of critical_exit(). 199 */ 200} |