Deleted Added
full compact
subr_trap.c (1307) subr_trap.c (1321)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the University of Utah, and William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the University of Utah, and William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
37 * $Id: trap.c,v 1.19 1994/03/14 21:54:03 davidg Exp $
37 * $Id: trap.c,v 1.20 1994/03/24 23:12:34 davidg Exp $
38 */
39
40/*
41 * 386 Trap and System call handleing
42 */
43
44#include "isa.h"
45#include "npx.h"
46#include "ddb.h"
47#include "machine/cpu.h"
48#include "machine/psl.h"
49#include "machine/reg.h"
50#include "machine/eflags.h"
51
52#include "param.h"
53#include "systm.h"
54#include "proc.h"
55#include "user.h"
56#include "acct.h"
57#include "kernel.h"
58#ifdef KTRACE
59#include "ktrace.h"
60#endif
61
62#include "vm/vm_param.h"
63#include "vm/pmap.h"
64#include "vm/vm_map.h"
65#include "vm/vm_user.h"
66#include "vm/vm_page.h"
67#include "sys/vmmeter.h"
68
69#include "machine/trap.h"
70
71#ifdef __GNUC__
72
73/*
74 * The "r" contraint could be "rm" except for fatal bugs in gas. As usual,
75 * we omit the size from the mov instruction to avoid nonfatal bugs in gas.
76 */
77#define read_gs() ({ u_short gs; __asm("mov %%gs,%0" : "=r" (gs)); gs; })
78#define write_gs(newgs) __asm("mov %0,%%gs" : : "r" ((u_short) newgs))
79
80#else /* not __GNUC__ */
81
82u_short read_gs __P((void));
83void write_gs __P((/* promoted u_short */ int gs));
84
85#endif /* __GNUC__ */
86
87extern int grow(struct proc *,int);
88
89struct sysent sysent[];
90int nsysent;
38 */
39
40/*
41 * 386 Trap and System call handleing
42 */
43
44#include "isa.h"
45#include "npx.h"
46#include "ddb.h"
47#include "machine/cpu.h"
48#include "machine/psl.h"
49#include "machine/reg.h"
50#include "machine/eflags.h"
51
52#include "param.h"
53#include "systm.h"
54#include "proc.h"
55#include "user.h"
56#include "acct.h"
57#include "kernel.h"
58#ifdef KTRACE
59#include "ktrace.h"
60#endif
61
62#include "vm/vm_param.h"
63#include "vm/pmap.h"
64#include "vm/vm_map.h"
65#include "vm/vm_user.h"
66#include "vm/vm_page.h"
67#include "sys/vmmeter.h"
68
69#include "machine/trap.h"
70
71#ifdef __GNUC__
72
73/*
74 * The "r" contraint could be "rm" except for fatal bugs in gas. As usual,
75 * we omit the size from the mov instruction to avoid nonfatal bugs in gas.
76 */
77#define read_gs() ({ u_short gs; __asm("mov %%gs,%0" : "=r" (gs)); gs; })
78#define write_gs(newgs) __asm("mov %0,%%gs" : : "r" ((u_short) newgs))
79
80#else /* not __GNUC__ */
81
82u_short read_gs __P((void));
83void write_gs __P((/* promoted u_short */ int gs));
84
85#endif /* __GNUC__ */
86
87extern int grow(struct proc *,int);
88
89struct sysent sysent[];
90int nsysent;
91extern unsigned cpl;
92extern unsigned netmask, ttymask, biomask;
93
94#define MAX_TRAP_MSG 27
95char *trap_msg[] = {
96 "reserved addressing fault", /* 0 T_RESADFLT */
97 "privileged instruction fault", /* 1 T_PRIVINFLT */
98 "reserved operand fault", /* 2 T_RESOPFLT */
99 "breakpoint instruction fault", /* 3 T_BPTFLT */
100 "", /* 4 unused */
101 "system call trap", /* 5 T_SYSCALL */
102 "arithmetic trap", /* 6 T_ARITHTRAP */
103 "system forced exception", /* 7 T_ASTFLT */
104 "segmentation (limit) fault", /* 8 T_SEGFLT */
105 "protection fault", /* 9 T_PROTFLT */
106 "trace trap", /* 10 T_TRCTRAP */
107 "", /* 11 unused */
108 "page fault", /* 12 T_PAGEFLT */
109 "page table fault", /* 13 T_TABLEFLT */
110 "alignment fault", /* 14 T_ALIGNFLT */
111 "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */
112 "bus error", /* 16 T_BUSERR */
113 "kernel debugger fault", /* 17 T_KDBTRAP */
114 "integer divide fault", /* 18 T_DIVIDE */
115 "non-maskable interrupt trap", /* 19 T_NMI */
116 "overflow trap", /* 20 T_OFLOW */
117 "FPU bounds check fault", /* 21 T_BOUND */
118 "FPU device not available", /* 22 T_DNA */
119 "double fault", /* 23 T_DOUBLEFLT */
120 "FPU operand fetch fault", /* 24 T_FPOPFLT */
121 "invalid TSS fault", /* 25 T_TSSFLT */
122 "segment not present fault", /* 26 T_SEGNPFLT */
123 "stack fault", /* 27 T_STKFLT */
124};
125
126#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
127
128/*
129 * trap(frame):
130 * Exception, fault, and trap interface to BSD kernel. This
131 * common code is called from assembly language IDT gate entry
132 * routines that prepare a suitable stack frame, and restore this
133 * frame after the exception has been processed. Note that the
134 * effect is as if the arguments were passed call by reference.
135 */
136
137/*ARGSUSED*/
138void
139trap(frame)
140 struct trapframe frame;
141{
142 register int i;
143 register struct proc *p = curproc;
144 struct timeval syst;
145 int ucode, type, code, eva, fault_type;
146
147 frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
148 type = frame.tf_trapno;
149#if NDDB > 0
150 if (curpcb && curpcb->pcb_onfault) {
151 if (frame.tf_trapno == T_BPTFLT
152 || frame.tf_trapno == T_TRCTRAP)
153 if (kdb_trap (type, 0, &frame))
154 return;
155 }
156#endif
157
158 if (curpcb == 0 || curproc == 0)
159 goto skiptoswitch;
160 if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
161 extern int _udatasel;
162
163 if (read_gs() != (u_short) _udatasel)
164 /*
165 * Some user has corrupted %gs but we depend on it in
166 * copyout() etc. Fix it up and retry.
167 *
168 * (We don't preserve %fs or %gs, so users can change
169 * them to either _ucodesel, _udatasel or a not-present
170 * selector, possibly ORed with 0 to 3, making them
171 * volatile for other users. Not preserving them saves
172 * time and doesn't lose functionality or open security
173 * holes.)
174 */
175 write_gs(_udatasel);
176 else
177copyfault:
178 frame.tf_eip = (int)curpcb->pcb_onfault;
179 return;
180 }
181
182 syst = p->p_stime;
183 if (ISPL(frame.tf_cs) == SEL_UPL) {
184 type |= T_USER;
185 p->p_regs = (int *)&frame;
186 }
187
188skiptoswitch:
189 ucode=0;
190 eva = rcr2();
191 code = frame.tf_err;
192
193 if ((type & ~T_USER) == T_PAGEFLT)
194 goto pfault;
195
196 switch (type) {
197 case T_SEGNPFLT|T_USER:
198 case T_STKFLT|T_USER:
199 case T_PROTFLT|T_USER: /* protection fault */
200 ucode = code + BUS_SEGM_FAULT ;
201 i = SIGBUS;
202 break;
203
204 case T_PRIVINFLT|T_USER: /* privileged instruction fault */
205 case T_RESADFLT|T_USER: /* reserved addressing fault */
206 case T_RESOPFLT|T_USER: /* reserved operand fault */
207 case T_FPOPFLT|T_USER: /* coprocessor operand fault */
208 ucode = type &~ T_USER;
209 i = SIGILL;
210 break;
211
212 case T_ASTFLT|T_USER: /* Allow process switch */
213 astoff();
214 cnt.v_soft++;
215 if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
216 addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
217 p->p_flag &= ~SOWEUPC;
218 }
219 goto out;
220
221 case T_DNA|T_USER:
222#if NNPX > 0
223 /* if a transparent fault (due to context switch "late") */
224 if (npxdna()) return;
225#endif /* NNPX > 0 */
226#ifdef MATH_EMULATE
227 i = math_emulate(&frame);
228 if (i == 0) return;
91
92#define MAX_TRAP_MSG 27
93char *trap_msg[] = {
94 "reserved addressing fault", /* 0 T_RESADFLT */
95 "privileged instruction fault", /* 1 T_PRIVINFLT */
96 "reserved operand fault", /* 2 T_RESOPFLT */
97 "breakpoint instruction fault", /* 3 T_BPTFLT */
98 "", /* 4 unused */
99 "system call trap", /* 5 T_SYSCALL */
100 "arithmetic trap", /* 6 T_ARITHTRAP */
101 "system forced exception", /* 7 T_ASTFLT */
102 "segmentation (limit) fault", /* 8 T_SEGFLT */
103 "protection fault", /* 9 T_PROTFLT */
104 "trace trap", /* 10 T_TRCTRAP */
105 "", /* 11 unused */
106 "page fault", /* 12 T_PAGEFLT */
107 "page table fault", /* 13 T_TABLEFLT */
108 "alignment fault", /* 14 T_ALIGNFLT */
109 "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */
110 "bus error", /* 16 T_BUSERR */
111 "kernel debugger fault", /* 17 T_KDBTRAP */
112 "integer divide fault", /* 18 T_DIVIDE */
113 "non-maskable interrupt trap", /* 19 T_NMI */
114 "overflow trap", /* 20 T_OFLOW */
115 "FPU bounds check fault", /* 21 T_BOUND */
116 "FPU device not available", /* 22 T_DNA */
117 "double fault", /* 23 T_DOUBLEFLT */
118 "FPU operand fetch fault", /* 24 T_FPOPFLT */
119 "invalid TSS fault", /* 25 T_TSSFLT */
120 "segment not present fault", /* 26 T_SEGNPFLT */
121 "stack fault", /* 27 T_STKFLT */
122};
123
124#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
125
126/*
127 * trap(frame):
128 * Exception, fault, and trap interface to BSD kernel. This
129 * common code is called from assembly language IDT gate entry
130 * routines that prepare a suitable stack frame, and restore this
131 * frame after the exception has been processed. Note that the
132 * effect is as if the arguments were passed call by reference.
133 */
134
135/*ARGSUSED*/
136void
137trap(frame)
138 struct trapframe frame;
139{
140 register int i;
141 register struct proc *p = curproc;
142 struct timeval syst;
143 int ucode, type, code, eva, fault_type;
144
145 frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */
146 type = frame.tf_trapno;
147#if NDDB > 0
148 if (curpcb && curpcb->pcb_onfault) {
149 if (frame.tf_trapno == T_BPTFLT
150 || frame.tf_trapno == T_TRCTRAP)
151 if (kdb_trap (type, 0, &frame))
152 return;
153 }
154#endif
155
156 if (curpcb == 0 || curproc == 0)
157 goto skiptoswitch;
158 if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
159 extern int _udatasel;
160
161 if (read_gs() != (u_short) _udatasel)
162 /*
163 * Some user has corrupted %gs but we depend on it in
164 * copyout() etc. Fix it up and retry.
165 *
166 * (We don't preserve %fs or %gs, so users can change
167 * them to either _ucodesel, _udatasel or a not-present
168 * selector, possibly ORed with 0 to 3, making them
169 * volatile for other users. Not preserving them saves
170 * time and doesn't lose functionality or open security
171 * holes.)
172 */
173 write_gs(_udatasel);
174 else
175copyfault:
176 frame.tf_eip = (int)curpcb->pcb_onfault;
177 return;
178 }
179
180 syst = p->p_stime;
181 if (ISPL(frame.tf_cs) == SEL_UPL) {
182 type |= T_USER;
183 p->p_regs = (int *)&frame;
184 }
185
186skiptoswitch:
187 ucode=0;
188 eva = rcr2();
189 code = frame.tf_err;
190
191 if ((type & ~T_USER) == T_PAGEFLT)
192 goto pfault;
193
194 switch (type) {
195 case T_SEGNPFLT|T_USER:
196 case T_STKFLT|T_USER:
197 case T_PROTFLT|T_USER: /* protection fault */
198 ucode = code + BUS_SEGM_FAULT ;
199 i = SIGBUS;
200 break;
201
202 case T_PRIVINFLT|T_USER: /* privileged instruction fault */
203 case T_RESADFLT|T_USER: /* reserved addressing fault */
204 case T_RESOPFLT|T_USER: /* reserved operand fault */
205 case T_FPOPFLT|T_USER: /* coprocessor operand fault */
206 ucode = type &~ T_USER;
207 i = SIGILL;
208 break;
209
210 case T_ASTFLT|T_USER: /* Allow process switch */
211 astoff();
212 cnt.v_soft++;
213 if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) {
214 addupc(frame.tf_eip, &p->p_stats->p_prof, 1);
215 p->p_flag &= ~SOWEUPC;
216 }
217 goto out;
218
219 case T_DNA|T_USER:
220#if NNPX > 0
221 /* if a transparent fault (due to context switch "late") */
222 if (npxdna()) return;
223#endif /* NNPX > 0 */
224#ifdef MATH_EMULATE
225 i = math_emulate(&frame);
226 if (i == 0) return;
229#else /* MATH_EMULTATE */
227#else /* MATH_EMULATE */
230 panic("trap: math emulation necessary!");
228 panic("trap: math emulation necessary!");
231#endif /* MATH_EMULTATE */
229#endif /* MATH_EMULATE */
232 ucode = FPE_FPU_NP_TRAP;
233 break;
234
235 case T_BOUND|T_USER:
236 ucode = FPE_SUBRNG_TRAP;
237 i = SIGFPE;
238 break;
239
240 case T_OFLOW|T_USER:
241 ucode = FPE_INTOVF_TRAP;
242 i = SIGFPE;
243 break;
244
245 case T_DIVIDE|T_USER:
246 ucode = FPE_INTDIV_TRAP;
247 i = SIGFPE;
248 break;
249
250 case T_ARITHTRAP|T_USER:
251 ucode = code;
252 i = SIGFPE;
253 break;
254
255 pfault:
256 case T_PAGEFLT: /* allow page faults in kernel mode */
257 case T_PAGEFLT|T_USER: /* page fault */
258 {
259 vm_offset_t va;
260 struct vmspace *vm;
261 vm_map_t map = 0;
262 int rv = 0, oldflags;
263 vm_prot_t ftype;
230 ucode = FPE_FPU_NP_TRAP;
231 break;
232
233 case T_BOUND|T_USER:
234 ucode = FPE_SUBRNG_TRAP;
235 i = SIGFPE;
236 break;
237
238 case T_OFLOW|T_USER:
239 ucode = FPE_INTOVF_TRAP;
240 i = SIGFPE;
241 break;
242
243 case T_DIVIDE|T_USER:
244 ucode = FPE_INTDIV_TRAP;
245 i = SIGFPE;
246 break;
247
248 case T_ARITHTRAP|T_USER:
249 ucode = code;
250 i = SIGFPE;
251 break;
252
253 pfault:
254 case T_PAGEFLT: /* allow page faults in kernel mode */
255 case T_PAGEFLT|T_USER: /* page fault */
256 {
257 vm_offset_t va;
258 struct vmspace *vm;
259 vm_map_t map = 0;
260 int rv = 0, oldflags;
261 vm_prot_t ftype;
264 unsigned nss, v;
262 unsigned v;
265 extern vm_map_t kernel_map;
266
267 va = trunc_page((vm_offset_t)eva);
268
269 /*
270 * Don't allow user-mode faults in kernel address space
271 */
272 if ((type == (T_PAGEFLT|T_USER)) && (va >= KERNBASE)) {
273 goto nogo;
274 }
275
276 if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
277 vm = 0;
278 map = kernel_map;
279 } else {
280 vm = p->p_vmspace;
281 map = &vm->vm_map;
282 }
283
284 if (code & PGEX_W)
285 ftype = VM_PROT_READ | VM_PROT_WRITE;
286 else
287 ftype = VM_PROT_READ;
288
289 oldflags = p->p_flag;
290 if (map != kernel_map) {
291 vm_offset_t pa;
292 vm_offset_t v = (vm_offset_t) vtopte(va);
293 vm_page_t ptepg;
294
295 /*
296 * Keep swapout from messing with us during this
297 * critical time.
298 */
299 p->p_flag |= SLOCK;
300
301 /*
302 * Grow the stack if necessary
303 */
304 if ((caddr_t)va > vm->vm_maxsaddr
305 && (caddr_t)va < (caddr_t)USRSTACK) {
306 if (!grow(p, va)) {
307 rv = KERN_FAILURE;
308 p->p_flag &= ~SLOCK;
309 p->p_flag |= (oldflags & SLOCK);
310 goto nogo;
311 }
312 }
313
314 /*
315 * Check if page table is mapped, if not,
316 * fault it first
317 */
318
319 /* Fault the pte only if needed: */
320 *(volatile char *)v += 0;
321
322 ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
323 vm_page_hold(ptepg);
324
325 /* Fault in the user page: */
326 rv = vm_fault(map, va, ftype, FALSE);
327
328 vm_page_unhold(ptepg);
329
330 /*
331 * page table pages don't need to be kept if they
332 * are not held
333 */
334 if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
335 pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
336 VM_PROT_NONE);
337 if( ptepg->flags & PG_CLEAN)
338 vm_page_free(ptepg);
339 }
340
341
342 p->p_flag &= ~SLOCK;
343 p->p_flag |= (oldflags & SLOCK);
344 } else {
345 /*
346 * Since we know that kernel virtual address addresses
347 * always have pte pages mapped, we just have to fault
348 * the page.
349 */
350 rv = vm_fault(map, va, ftype, FALSE);
351 }
352
353 if (rv == KERN_SUCCESS) {
354 if (type == T_PAGEFLT)
355 return;
356 goto out;
357 }
358nogo:
359 if (type == T_PAGEFLT) {
360 if (curpcb->pcb_onfault)
361 goto copyfault;
362
363 goto we_re_toast;
364 }
365 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
366
367 /* kludge to pass faulting virtual address to sendsig */
368 ucode = type &~ T_USER;
369 frame.tf_err = eva;
370
371 break;
372 }
373
374#if NDDB == 0
375 case T_TRCTRAP: /* trace trap -- someone single stepping lcall's */
376 frame.tf_eflags &= ~PSL_T;
377
378 /* Q: how do we turn it on again? */
379 return;
380#endif
381
382 case T_BPTFLT|T_USER: /* bpt instruction fault */
383 case T_TRCTRAP|T_USER: /* trace trap */
384 frame.tf_eflags &= ~PSL_T;
385 i = SIGTRAP;
386 break;
387
388#if NISA > 0
389 case T_NMI:
390 case T_NMI|T_USER:
391#if NDDB > 0
392 /* NMI can be hooked up to a pushbutton for debugging */
393 printf ("NMI ... going to debugger\n");
394 if (kdb_trap (type, 0, &frame))
395 return;
396#endif
397 /* machine/parity/power fail/"kitchen sink" faults */
398 if (isa_nmi(code) == 0) return;
399 /* FALL THROUGH */
400#endif
401 default:
402 we_re_toast:
403
404 fault_type = type & ~T_USER;
405 if (fault_type <= MAX_TRAP_MSG)
406 printf("\n\nFatal trap %d: %s while in %s mode\n",
407 fault_type, trap_msg[fault_type],
408 ISPL(frame.tf_cs) == SEL_UPL ? "user" : "kernel");
409 if (fault_type == T_PAGEFLT) {
410 printf("fault virtual address = 0x%x\n", eva);
411 printf("fault code = %s %s, %s\n",
412 code & PGEX_U ? "user" : "supervisor",
413 code & PGEX_W ? "write" : "read",
414 code & PGEX_P ? "protection violation" : "page not present");
415 }
416 printf("instruction pointer = 0x%x\n", frame.tf_eip);
417 printf("processor eflags = ");
418 if (frame.tf_eflags & EFL_TF)
419 printf("trace/trap, ");
420 if (frame.tf_eflags & EFL_IF)
421 printf("interrupt enabled, ");
422 if (frame.tf_eflags & EFL_NT)
423 printf("nested task, ");
424 if (frame.tf_eflags & EFL_RF)
425 printf("resume, ");
426 if (frame.tf_eflags & EFL_VM)
427 printf("vm86, ");
428 printf("IOPL = %d\n", (frame.tf_eflags & EFL_IOPL) >> 12);
429 printf("current process = ");
430 if (curproc) {
431 printf("%d (%s)\n",
432 curproc->p_pid, curproc->p_comm ?
433 curproc->p_comm : "");
434 } else {
435 printf("Idle\n");
436 }
437 printf("interrupt mask = ");
263 extern vm_map_t kernel_map;
264
265 va = trunc_page((vm_offset_t)eva);
266
267 /*
268 * Don't allow user-mode faults in kernel address space
269 */
270 if ((type == (T_PAGEFLT|T_USER)) && (va >= KERNBASE)) {
271 goto nogo;
272 }
273
274 if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
275 vm = 0;
276 map = kernel_map;
277 } else {
278 vm = p->p_vmspace;
279 map = &vm->vm_map;
280 }
281
282 if (code & PGEX_W)
283 ftype = VM_PROT_READ | VM_PROT_WRITE;
284 else
285 ftype = VM_PROT_READ;
286
287 oldflags = p->p_flag;
288 if (map != kernel_map) {
289 vm_offset_t pa;
290 vm_offset_t v = (vm_offset_t) vtopte(va);
291 vm_page_t ptepg;
292
293 /*
294 * Keep swapout from messing with us during this
295 * critical time.
296 */
297 p->p_flag |= SLOCK;
298
299 /*
300 * Grow the stack if necessary
301 */
302 if ((caddr_t)va > vm->vm_maxsaddr
303 && (caddr_t)va < (caddr_t)USRSTACK) {
304 if (!grow(p, va)) {
305 rv = KERN_FAILURE;
306 p->p_flag &= ~SLOCK;
307 p->p_flag |= (oldflags & SLOCK);
308 goto nogo;
309 }
310 }
311
312 /*
313 * Check if page table is mapped, if not,
314 * fault it first
315 */
316
317 /* Fault the pte only if needed: */
318 *(volatile char *)v += 0;
319
320 ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
321 vm_page_hold(ptepg);
322
323 /* Fault in the user page: */
324 rv = vm_fault(map, va, ftype, FALSE);
325
326 vm_page_unhold(ptepg);
327
328 /*
329 * page table pages don't need to be kept if they
330 * are not held
331 */
332 if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
333 pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
334 VM_PROT_NONE);
335 if( ptepg->flags & PG_CLEAN)
336 vm_page_free(ptepg);
337 }
338
339
340 p->p_flag &= ~SLOCK;
341 p->p_flag |= (oldflags & SLOCK);
342 } else {
343 /*
344 * Since we know that kernel virtual address addresses
345 * always have pte pages mapped, we just have to fault
346 * the page.
347 */
348 rv = vm_fault(map, va, ftype, FALSE);
349 }
350
351 if (rv == KERN_SUCCESS) {
352 if (type == T_PAGEFLT)
353 return;
354 goto out;
355 }
356nogo:
357 if (type == T_PAGEFLT) {
358 if (curpcb->pcb_onfault)
359 goto copyfault;
360
361 goto we_re_toast;
362 }
363 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
364
365 /* kludge to pass faulting virtual address to sendsig */
366 ucode = type &~ T_USER;
367 frame.tf_err = eva;
368
369 break;
370 }
371
372#if NDDB == 0
373 case T_TRCTRAP: /* trace trap -- someone single stepping lcall's */
374 frame.tf_eflags &= ~PSL_T;
375
376 /* Q: how do we turn it on again? */
377 return;
378#endif
379
380 case T_BPTFLT|T_USER: /* bpt instruction fault */
381 case T_TRCTRAP|T_USER: /* trace trap */
382 frame.tf_eflags &= ~PSL_T;
383 i = SIGTRAP;
384 break;
385
386#if NISA > 0
387 case T_NMI:
388 case T_NMI|T_USER:
389#if NDDB > 0
390 /* NMI can be hooked up to a pushbutton for debugging */
391 printf ("NMI ... going to debugger\n");
392 if (kdb_trap (type, 0, &frame))
393 return;
394#endif
395 /* machine/parity/power fail/"kitchen sink" faults */
396 if (isa_nmi(code) == 0) return;
397 /* FALL THROUGH */
398#endif
399 default:
400 we_re_toast:
401
402 fault_type = type & ~T_USER;
403 if (fault_type <= MAX_TRAP_MSG)
404 printf("\n\nFatal trap %d: %s while in %s mode\n",
405 fault_type, trap_msg[fault_type],
406 ISPL(frame.tf_cs) == SEL_UPL ? "user" : "kernel");
407 if (fault_type == T_PAGEFLT) {
408 printf("fault virtual address = 0x%x\n", eva);
409 printf("fault code = %s %s, %s\n",
410 code & PGEX_U ? "user" : "supervisor",
411 code & PGEX_W ? "write" : "read",
412 code & PGEX_P ? "protection violation" : "page not present");
413 }
414 printf("instruction pointer = 0x%x\n", frame.tf_eip);
415 printf("processor eflags = ");
416 if (frame.tf_eflags & EFL_TF)
417 printf("trace/trap, ");
418 if (frame.tf_eflags & EFL_IF)
419 printf("interrupt enabled, ");
420 if (frame.tf_eflags & EFL_NT)
421 printf("nested task, ");
422 if (frame.tf_eflags & EFL_RF)
423 printf("resume, ");
424 if (frame.tf_eflags & EFL_VM)
425 printf("vm86, ");
426 printf("IOPL = %d\n", (frame.tf_eflags & EFL_IOPL) >> 12);
427 printf("current process = ");
428 if (curproc) {
429 printf("%d (%s)\n",
430 curproc->p_pid, curproc->p_comm ?
431 curproc->p_comm : "");
432 } else {
433 printf("Idle\n");
434 }
435 printf("interrupt mask = ");
438 if ((cpl & netmask) == netmask)
436 if ((cpl & net_imask) == net_imask)
439 printf("net ");
437 printf("net ");
440 if ((cpl & ttymask) == ttymask)
438 if ((cpl & tty_imask) == tty_imask)
441 printf("tty ");
439 printf("tty ");
442 if ((cpl & biomask) == biomask)
440 if ((cpl & bio_imask) == bio_imask)
443 printf("bio ");
444 if (cpl == 0)
445 printf("none");
446 printf("\n");
447
448#ifdef KDB
449 if (kdb_trap(&psl))
450 return;
451#endif
452#if NDDB > 0
453 if (kdb_trap (type, 0, &frame))
454 return;
455#endif
456 if (fault_type <= MAX_TRAP_MSG)
457 panic(trap_msg[fault_type]);
458 else
459 panic("unknown/reserved trap");
460
461 /* NOTREACHED */
462 }
463
464 trapsignal(p, i, ucode);
465 if ((type & T_USER) == 0)
466 return;
467out:
468 while (i = CURSIG(p))
469 psig(i);
470 p->p_pri = p->p_usrpri;
471 if (want_resched) {
472 int s;
473 /*
474 * Since we are curproc, clock will normally just change
475 * our priority without moving us from one queue to another
476 * (since the running process is not on a queue.)
477 * If that happened after we setrq ourselves but before we
478 * swtch()'ed, we might not be on the queue indicated by
479 * our priority.
480 */
481 s = splclock();
482 setrq(p);
483 p->p_stats->p_ru.ru_nivcsw++;
484 swtch();
485 splx(s);
486 while (i = CURSIG(p))
487 psig(i);
488 }
489 if (p->p_stats->p_prof.pr_scale) {
490 int ticks;
491 struct timeval *tv = &p->p_stime;
492
493 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
494 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
495 if (ticks) {
496#ifdef PROFTIMER
497 extern int profscale;
498 addupc(frame.tf_eip, &p->p_stats->p_prof,
499 ticks * profscale);
500#else
501 addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
502#endif
503 }
504 }
505 curpri = p->p_pri;
506}
507
508/*
509 * Compensate for 386 brain damage (missing URKR).
510 * This is a little simpler than the pagefault handler in trap() because
511 * it the page tables have already been faulted in and high addresses
512 * are thrown out early for other reasons.
513 */
514int trapwrite(addr)
515 unsigned addr;
516{
441 printf("bio ");
442 if (cpl == 0)
443 printf("none");
444 printf("\n");
445
446#ifdef KDB
447 if (kdb_trap(&psl))
448 return;
449#endif
450#if NDDB > 0
451 if (kdb_trap (type, 0, &frame))
452 return;
453#endif
454 if (fault_type <= MAX_TRAP_MSG)
455 panic(trap_msg[fault_type]);
456 else
457 panic("unknown/reserved trap");
458
459 /* NOTREACHED */
460 }
461
462 trapsignal(p, i, ucode);
463 if ((type & T_USER) == 0)
464 return;
465out:
466 while (i = CURSIG(p))
467 psig(i);
468 p->p_pri = p->p_usrpri;
469 if (want_resched) {
470 int s;
471 /*
472 * Since we are curproc, clock will normally just change
473 * our priority without moving us from one queue to another
474 * (since the running process is not on a queue.)
475 * If that happened after we setrq ourselves but before we
476 * swtch()'ed, we might not be on the queue indicated by
477 * our priority.
478 */
479 s = splclock();
480 setrq(p);
481 p->p_stats->p_ru.ru_nivcsw++;
482 swtch();
483 splx(s);
484 while (i = CURSIG(p))
485 psig(i);
486 }
487 if (p->p_stats->p_prof.pr_scale) {
488 int ticks;
489 struct timeval *tv = &p->p_stime;
490
491 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
492 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
493 if (ticks) {
494#ifdef PROFTIMER
495 extern int profscale;
496 addupc(frame.tf_eip, &p->p_stats->p_prof,
497 ticks * profscale);
498#else
499 addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
500#endif
501 }
502 }
503 curpri = p->p_pri;
504}
505
506/*
507 * Compensate for 386 brain damage (missing URKR).
508 * This is a little simpler than the pagefault handler in trap() because
509 * it the page tables have already been faulted in and high addresses
510 * are thrown out early for other reasons.
511 */
512int trapwrite(addr)
513 unsigned addr;
514{
517 unsigned nss;
518 struct proc *p;
519 vm_offset_t va, v;
520 struct vmspace *vm;
521 int oldflags;
522 int rv;
523
524 va = trunc_page((vm_offset_t)addr);
525 /*
526 * XXX - MAX is END. Changed > to >= for temp. fix.
527 */
528 if (va >= VM_MAXUSER_ADDRESS)
529 return (1);
530
531 p = curproc;
532 vm = p->p_vmspace;
533
534 oldflags = p->p_flag;
535 p->p_flag |= SLOCK;
536
537 if ((caddr_t)va >= vm->vm_maxsaddr
538 && (caddr_t)va < (caddr_t)USRSTACK) {
539 if (!grow(p, va)) {
540 p->p_flag &= ~SLOCK;
541 p->p_flag |= (oldflags & SLOCK);
542 return (1);
543 }
544 }
545
546 v = trunc_page(vtopte(va));
547
548 /*
549 * wire the pte page
550 */
551 if (va < USRSTACK) {
552 vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
553 }
554
555 /*
556 * fault the data page
557 */
558 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
559
560 /*
561 * unwire the pte page
562 */
563 if (va < USRSTACK) {
564 vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
565 }
566
567 p->p_flag &= ~SLOCK;
568 p->p_flag |= (oldflags & SLOCK);
569
570 if (rv != KERN_SUCCESS)
571 return 1;
572
573 return (0);
574}
575
576/*
577 * syscall(frame):
578 * System call request from POSIX system call gate interface to kernel.
579 * Like trap(), argument is call by reference.
580 */
581/*ARGSUSED*/
582void
583syscall(frame)
584 volatile struct trapframe frame;
585{
586 register int *locr0 = ((int *)&frame);
587 register caddr_t params;
588 register int i;
589 register struct sysent *callp;
590 register struct proc *p = curproc;
591 struct timeval syst;
592 int error, opc;
593 int args[8], rval[2];
594 int code;
595
596#ifdef lint
597 r0 = 0; r0 = r0; r1 = 0; r1 = r1;
598#endif
599 syst = p->p_stime;
600 if (ISPL(frame.tf_cs) != SEL_UPL)
601 panic("syscall");
602
603 code = frame.tf_eax;
604 p->p_regs = (int *)&frame;
605 params = (caddr_t)frame.tf_esp + sizeof (int) ;
606
607 /*
608 * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
609 */
610 opc = frame.tf_eip - 7;
611 if (code == 0) {
612 code = fuword(params);
613 params += sizeof (int);
614 }
615 if (code < 0 || code >= nsysent)
616 callp = &sysent[0];
617 else
618 callp = &sysent[code];
619
620 if ((i = callp->sy_narg * sizeof (int)) &&
621 (error = copyin(params, (caddr_t)args, (u_int)i))) {
622 frame.tf_eax = error;
623 frame.tf_eflags |= PSL_C; /* carry bit */
624#ifdef KTRACE
625 if (KTRPOINT(p, KTR_SYSCALL))
626 ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
627#endif
628 goto done;
629 }
630#ifdef KTRACE
631 if (KTRPOINT(p, KTR_SYSCALL))
632 ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
633#endif
634 rval[0] = 0;
635 rval[1] = frame.tf_edx;
636/*pg("%d. s %d\n", p->p_pid, code);*/
637 error = (*callp->sy_call)(p, args, rval);
638 if (error == ERESTART)
639 frame.tf_eip = opc;
640 else if (error != EJUSTRETURN) {
641 if (error) {
642/*pg("error %d", error);*/
643 frame.tf_eax = error;
644 frame.tf_eflags |= PSL_C; /* carry bit */
645 } else {
646 frame.tf_eax = rval[0];
647 frame.tf_edx = rval[1];
648 frame.tf_eflags &= ~PSL_C; /* carry bit */
649 }
650 }
651 /* else if (error == EJUSTRETURN) */
652 /* nothing to do */
653done:
654 /*
655 * Reinitialize proc pointer `p' as it may be different
656 * if this is a child returning from fork syscall.
657 */
658 p = curproc;
659 while (i = CURSIG(p))
660 psig(i);
661 p->p_pri = p->p_usrpri;
662 if (want_resched) {
663 int s;
664 /*
665 * Since we are curproc, clock will normally just change
666 * our priority without moving us from one queue to another
667 * (since the running process is not on a queue.)
668 * If that happened after we setrq ourselves but before we
669 * swtch()'ed, we might not be on the queue indicated by
670 * our priority.
671 */
672 s = splclock();
673 setrq(p);
674 p->p_stats->p_ru.ru_nivcsw++;
675 swtch();
676 splx(s);
677 while (i = CURSIG(p))
678 psig(i);
679 }
680 if (p->p_stats->p_prof.pr_scale) {
681 int ticks;
682 struct timeval *tv = &p->p_stime;
683
684 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
685 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
686 if (ticks) {
687#ifdef PROFTIMER
688 extern int profscale;
689 addupc(frame.tf_eip, &p->p_stats->p_prof,
690 ticks * profscale);
691#else
692 addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
693#endif
694 }
695 }
696 curpri = p->p_pri;
697#ifdef KTRACE
698 if (KTRPOINT(p, KTR_SYSRET))
699 ktrsysret(p->p_tracep, code, error, rval[0]);
700#endif
701#ifdef DIAGNOSTICx
702{ extern int _udatasel, _ucodesel;
703 if (frame.tf_ss != _udatasel)
704 printf("ss %x call %d\n", frame.tf_ss, code);
705 if ((frame.tf_cs&0xffff) != _ucodesel)
706 printf("cs %x call %d\n", frame.tf_cs, code);
707 if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
708 printf("eip %x call %d\n", frame.tf_eip, code);
709 frame.tf_eip = 0;
710 }
711}
712#endif
713}
515 struct proc *p;
516 vm_offset_t va, v;
517 struct vmspace *vm;
518 int oldflags;
519 int rv;
520
521 va = trunc_page((vm_offset_t)addr);
522 /*
523 * XXX - MAX is END. Changed > to >= for temp. fix.
524 */
525 if (va >= VM_MAXUSER_ADDRESS)
526 return (1);
527
528 p = curproc;
529 vm = p->p_vmspace;
530
531 oldflags = p->p_flag;
532 p->p_flag |= SLOCK;
533
534 if ((caddr_t)va >= vm->vm_maxsaddr
535 && (caddr_t)va < (caddr_t)USRSTACK) {
536 if (!grow(p, va)) {
537 p->p_flag &= ~SLOCK;
538 p->p_flag |= (oldflags & SLOCK);
539 return (1);
540 }
541 }
542
543 v = trunc_page(vtopte(va));
544
545 /*
546 * wire the pte page
547 */
548 if (va < USRSTACK) {
549 vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
550 }
551
552 /*
553 * fault the data page
554 */
555 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
556
557 /*
558 * unwire the pte page
559 */
560 if (va < USRSTACK) {
561 vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
562 }
563
564 p->p_flag &= ~SLOCK;
565 p->p_flag |= (oldflags & SLOCK);
566
567 if (rv != KERN_SUCCESS)
568 return 1;
569
570 return (0);
571}
572
573/*
574 * syscall(frame):
575 * System call request from POSIX system call gate interface to kernel.
576 * Like trap(), argument is call by reference.
577 */
578/*ARGSUSED*/
579void
580syscall(frame)
581 volatile struct trapframe frame;
582{
583 register int *locr0 = ((int *)&frame);
584 register caddr_t params;
585 register int i;
586 register struct sysent *callp;
587 register struct proc *p = curproc;
588 struct timeval syst;
589 int error, opc;
590 int args[8], rval[2];
591 int code;
592
593#ifdef lint
594 r0 = 0; r0 = r0; r1 = 0; r1 = r1;
595#endif
596 syst = p->p_stime;
597 if (ISPL(frame.tf_cs) != SEL_UPL)
598 panic("syscall");
599
600 code = frame.tf_eax;
601 p->p_regs = (int *)&frame;
602 params = (caddr_t)frame.tf_esp + sizeof (int) ;
603
604 /*
605 * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always.
606 */
607 opc = frame.tf_eip - 7;
608 if (code == 0) {
609 code = fuword(params);
610 params += sizeof (int);
611 }
612 if (code < 0 || code >= nsysent)
613 callp = &sysent[0];
614 else
615 callp = &sysent[code];
616
617 if ((i = callp->sy_narg * sizeof (int)) &&
618 (error = copyin(params, (caddr_t)args, (u_int)i))) {
619 frame.tf_eax = error;
620 frame.tf_eflags |= PSL_C; /* carry bit */
621#ifdef KTRACE
622 if (KTRPOINT(p, KTR_SYSCALL))
623 ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
624#endif
625 goto done;
626 }
627#ifdef KTRACE
628 if (KTRPOINT(p, KTR_SYSCALL))
629 ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
630#endif
631 rval[0] = 0;
632 rval[1] = frame.tf_edx;
633/*pg("%d. s %d\n", p->p_pid, code);*/
634 error = (*callp->sy_call)(p, args, rval);
635 if (error == ERESTART)
636 frame.tf_eip = opc;
637 else if (error != EJUSTRETURN) {
638 if (error) {
639/*pg("error %d", error);*/
640 frame.tf_eax = error;
641 frame.tf_eflags |= PSL_C; /* carry bit */
642 } else {
643 frame.tf_eax = rval[0];
644 frame.tf_edx = rval[1];
645 frame.tf_eflags &= ~PSL_C; /* carry bit */
646 }
647 }
648 /* else if (error == EJUSTRETURN) */
649 /* nothing to do */
650done:
651 /*
652 * Reinitialize proc pointer `p' as it may be different
653 * if this is a child returning from fork syscall.
654 */
655 p = curproc;
656 while (i = CURSIG(p))
657 psig(i);
658 p->p_pri = p->p_usrpri;
659 if (want_resched) {
660 int s;
661 /*
662 * Since we are curproc, clock will normally just change
663 * our priority without moving us from one queue to another
664 * (since the running process is not on a queue.)
665 * If that happened after we setrq ourselves but before we
666 * swtch()'ed, we might not be on the queue indicated by
667 * our priority.
668 */
669 s = splclock();
670 setrq(p);
671 p->p_stats->p_ru.ru_nivcsw++;
672 swtch();
673 splx(s);
674 while (i = CURSIG(p))
675 psig(i);
676 }
677 if (p->p_stats->p_prof.pr_scale) {
678 int ticks;
679 struct timeval *tv = &p->p_stime;
680
681 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
682 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
683 if (ticks) {
684#ifdef PROFTIMER
685 extern int profscale;
686 addupc(frame.tf_eip, &p->p_stats->p_prof,
687 ticks * profscale);
688#else
689 addupc(frame.tf_eip, &p->p_stats->p_prof, ticks);
690#endif
691 }
692 }
693 curpri = p->p_pri;
694#ifdef KTRACE
695 if (KTRPOINT(p, KTR_SYSRET))
696 ktrsysret(p->p_tracep, code, error, rval[0]);
697#endif
698#ifdef DIAGNOSTICx
699{ extern int _udatasel, _ucodesel;
700 if (frame.tf_ss != _udatasel)
701 printf("ss %x call %d\n", frame.tf_ss, code);
702 if ((frame.tf_cs&0xffff) != _ucodesel)
703 printf("cs %x call %d\n", frame.tf_cs, code);
704 if (frame.tf_eip > VM_MAXUSER_ADDRESS) {
705 printf("eip %x call %d\n", frame.tf_eip, code);
706 frame.tf_eip = 0;
707 }
708}
709#endif
710}