subr_syscall.c (79222) | subr_syscall.c (81493) |
---|---|
1/*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * --- 21 unchanged lines hidden (view full) --- 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 | 1/*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * --- 21 unchanged lines hidden (view full) --- 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 |
38 * $FreeBSD: head/sys/kern/subr_trap.c 79222 2001-07-04 15:36:30Z dillon $ | 38 * $FreeBSD: head/sys/kern/subr_trap.c 81493 2001-08-10 22:53:32Z jhb $ |
39 */ 40 41#ifdef __i386__ 42#include "opt_npx.h" 43#endif 44 45#include <sys/param.h> 46#include <sys/bus.h> --- 11 unchanged lines hidden (view full) --- 58/* 59 * Define the code needed before returning to user mode, for 60 * trap and syscall. 61 */ 62void 63userret(p, frame, oticks) 64 struct proc *p; 65 struct trapframe *frame; | 39 */ 40 41#ifdef __i386__ 42#include "opt_npx.h" 43#endif 44 45#include <sys/param.h> 46#include <sys/bus.h> --- 11 unchanged lines hidden (view full) --- 58/* 59 * Define the code needed before returning to user mode, for 60 * trap and syscall. 61 */ 62void 63userret(p, frame, oticks) 64 struct proc *p; 65 struct trapframe *frame; |
66 u_quad_t oticks; | 66 u_int oticks; |
67{ 68 int sig; 69 70 mtx_lock(&Giant); 71 PROC_LOCK(p); 72 while ((sig = CURSIG(p)) != 0) 73 postsig(sig); 74 mtx_unlock(&Giant); | 67{ 68 int sig; 69 70 mtx_lock(&Giant); 71 PROC_LOCK(p); 72 while ((sig = CURSIG(p)) != 0) 73 postsig(sig); 74 mtx_unlock(&Giant); |
75 PROC_UNLOCK(p); |
|
75 76 mtx_lock_spin(&sched_lock); | 76 77 mtx_lock_spin(&sched_lock); |
77 PROC_UNLOCK_NOSWITCH(p); | |
78 p->p_pri.pri_level = p->p_pri.pri_user; | 78 p->p_pri.pri_level = p->p_pri.pri_user; |
79 if (resched_wanted(p)) { | 79 if (p->p_sflag & PS_NEEDRESCHED) { |
80 /* 81 * Since we are curproc, a clock interrupt could 82 * change our priority without changing run queues 83 * (the running process is not kept on a run queue). 84 * If this happened after we setrunqueue ourselves but 85 * before we switch()'ed, we might not be on the queue 86 * indicated by our priority. 87 */ 88 DROP_GIANT_NOSWITCH(); 89 setrunqueue(p); 90 p->p_stats->p_ru.ru_nivcsw++; 91 mi_switch(); 92 mtx_unlock_spin(&sched_lock); 93 PICKUP_GIANT(); 94 mtx_lock(&Giant); 95 PROC_LOCK(p); 96 while ((sig = CURSIG(p)) != 0) 97 postsig(sig); 98 mtx_unlock(&Giant); | 80 /* 81 * Since we are curproc, a clock interrupt could 82 * change our priority without changing run queues 83 * (the running process is not kept on a run queue). 84 * If this happened after we setrunqueue ourselves but 85 * before we switch()'ed, we might not be on the queue 86 * indicated by our priority. 87 */ 88 DROP_GIANT_NOSWITCH(); 89 setrunqueue(p); 90 p->p_stats->p_ru.ru_nivcsw++; 91 mi_switch(); 92 mtx_unlock_spin(&sched_lock); 93 PICKUP_GIANT(); 94 mtx_lock(&Giant); 95 PROC_LOCK(p); 96 while ((sig = CURSIG(p)) != 0) 97 postsig(sig); 98 mtx_unlock(&Giant); |
99 mtx_lock_spin(&sched_lock); 100 PROC_UNLOCK_NOSWITCH(p); 101 } | 99 PROC_UNLOCK(p); 100 } else 101 mtx_unlock_spin(&sched_lock); |
102 103 /* 104 * Charge system time if profiling. 105 */ | 102 103 /* 104 * Charge system time if profiling. 105 */ |
106 if (p->p_sflag & PS_PROFIL) { 107 mtx_unlock_spin(&sched_lock); | 106 if (p->p_sflag & PS_PROFIL) |
108 addupc_task(p, TRAPF_PC(frame), | 107 addupc_task(p, TRAPF_PC(frame), |
109 (u_int)(p->p_sticks - oticks) * psratio); 110 } else 111 mtx_unlock_spin(&sched_lock); | 108 ((u_int)p->p_sticks - oticks) * psratio); |
112} 113 114/* 115 * Process an asynchronous software trap. 116 * This is relatively easy. | 109} 110 111/* 112 * Process an asynchronous software trap. 113 * This is relatively easy. |
114 * This function will return with preemption disabled. |
|
117 */ 118void 119ast(framep) 120 struct trapframe *framep; 121{ 122 struct proc *p = CURPROC; | 115 */ 116void 117ast(framep) 118 struct trapframe *framep; 119{ 120 struct proc *p = CURPROC; |
123 u_quad_t sticks; | 121 u_int prticks, sticks; 122 critical_t s; 123 int sflag; |
124#if defined(DEV_NPX) && !defined(SMP) 125 int ucode; 126#endif 127 128 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); | 124#if defined(DEV_NPX) && !defined(SMP) 125 int ucode; 126#endif 127 128 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); |
129 130 /* 131 * We check for a pending AST here rather than in the assembly as 132 * acquiring and releasing mutexes in assembly is not fun. 133 */ 134 mtx_lock_spin(&sched_lock); 135 if (!(astpending(p) || resched_wanted(p))) { 136 mtx_unlock_spin(&sched_lock); 137 return; 138 } 139 140 sticks = p->p_sticks; 141 p->p_frame = framep; 142 143 astoff(p); 144 cnt.v_soft++; 145 mtx_intr_enable(&sched_lock); 146 if (p->p_sflag & PS_OWEUPC) { 147 p->p_sflag &= ~PS_OWEUPC; 148 mtx_unlock_spin(&sched_lock); 149 mtx_lock(&Giant); 150 addupc_task(p, p->p_stats->p_prof.pr_addr, 151 p->p_stats->p_prof.pr_ticks); | 129#ifdef WITNESS 130 if (witness_list(p)) 131 panic("Returning to user mode with mutex(s) held"); 132#endif 133 mtx_assert(&Giant, MA_NOTOWNED); 134 s = critical_enter(); 135 while ((p->p_sflag & (PS_ASTPENDING | PS_NEEDRESCHED)) != 0) { 136 critical_exit(s); 137 p->p_frame = framep; 138 /* 139 * This updates the p_sflag's for the checks below in one 140 * "atomic" operation with turning off the astpending flag. 141 * If another AST is triggered while we are handling the 142 * AST's saved in sflag, the astpending flag will be set and 143 * we will loop again. 144 */ |
152 mtx_lock_spin(&sched_lock); | 145 mtx_lock_spin(&sched_lock); |
153 } 154 if (p->p_sflag & PS_ALRMPEND) { 155 p->p_sflag &= ~PS_ALRMPEND; 156 mtx_unlock_spin(&sched_lock); 157 PROC_LOCK(p); 158 psignal(p, SIGVTALRM); 159 PROC_UNLOCK(p); 160 mtx_lock_spin(&sched_lock); 161 } | 146 sticks = p->p_sticks; 147 sflag = p->p_sflag; 148 p->p_sflag &= ~(PS_OWEUPC | PS_ALRMPEND | PS_PROFPEND | 149 PS_ASTPENDING); 150 cnt.v_soft++; 151 if (sflag & PS_OWEUPC) { 152 prticks = p->p_stats->p_prof.pr_ticks; 153 p->p_stats->p_prof.pr_ticks = 0; 154 mtx_unlock_spin(&sched_lock); 155 addupc_task(p, p->p_stats->p_prof.pr_addr, prticks); 156 } else 157 mtx_unlock_spin(&sched_lock); 158 if (sflag & PS_ALRMPEND) { 159 PROC_LOCK(p); 160 psignal(p, SIGVTALRM); 161 PROC_UNLOCK(p); 162 } |
162#if defined(DEV_NPX) && !defined(SMP) | 163#if defined(DEV_NPX) && !defined(SMP) |
163 if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) { 164 PCPU_GET(curpcb)->pcb_flags &= ~PCB_NPXTRAP; 165 mtx_unlock_spin(&sched_lock); 166 ucode = npxtrap(); 167 if (ucode != -1) { 168 if (!mtx_owned(&Giant)) | 164 if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) { 165 atomic_clear_char(&PCPU_GET(curpcb)->pcb_flags, 166 PCB_NPXTRAP); 167 ucode = npxtrap(); 168 if (ucode != -1) { |
169 mtx_lock(&Giant); | 169 mtx_lock(&Giant); |
170 trapsignal(p, SIGFPE, ucode); | 170 trapsignal(p, SIGFPE, ucode); 171 } |
171 } | 172 } |
172 mtx_lock_spin(&sched_lock); 173 } | |
174#endif | 173#endif |
175 if (p->p_sflag & PS_PROFPEND) { 176 p->p_sflag &= ~PS_PROFPEND; 177 mtx_unlock_spin(&sched_lock); 178 PROC_LOCK(p); 179 psignal(p, SIGPROF); 180 PROC_UNLOCK(p); 181 } else 182 mtx_unlock_spin(&sched_lock); | 174 if (sflag & PS_PROFPEND) { 175 PROC_LOCK(p); 176 psignal(p, SIGPROF); 177 PROC_UNLOCK(p); 178 } |
183 | 179 |
184 userret(p, framep, sticks); 185 186 if (mtx_owned(&Giant)) 187 mtx_unlock(&Giant); | 180 userret(p, framep, sticks); 181 if (mtx_owned(&Giant)) 182 mtx_unlock(&Giant); 183 s = critical_enter(); 184 } 185 mtx_assert(&Giant, MA_NOTOWNED); 186 /* 187 * We need to keep interrupts disabled so that if any further AST's 188 * come in, the interrupt they come in on will be delayed until we 189 * finish returning to userland. We assume that the return to userland 190 * will perform the equivalent of critical_exit(). 191 */ |
188} | 192} |