subr_syscall.c revision 1127
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the University of Utah, and William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 37 * $Id: trap.c,v 1.16 1994/02/01 23:07:35 davidg Exp $ 38 */ 39 40/* 41 * 386 Trap and System call handleing 42 */ 43 44#include "isa.h" 45#include "npx.h" 46#include "ddb.h" 47#include "machine/cpu.h" 48#include "machine/psl.h" 49#include "machine/reg.h" 50#include "machine/eflags.h" 51 52#include "param.h" 53#include "systm.h" 54#include "proc.h" 55#include "user.h" 56#include "acct.h" 57#include "kernel.h" 58#ifdef KTRACE 59#include "ktrace.h" 60#endif 61 62#include "vm/vm_param.h" 63#include "vm/pmap.h" 64#include "vm/vm_map.h" 65#include "vm/vm_user.h" 66#include "vm/vm_page.h" 67#include "sys/vmmeter.h" 68 69#include "machine/trap.h" 70 71#ifdef __GNUC__ 72 73/* 74 * The "r" contraint could be "rm" except for fatal bugs in gas. As usual, 75 * we omit the size from the mov instruction to avoid nonfatal bugs in gas. 76 */ 77#define read_gs() ({ u_short gs; __asm("mov %%gs,%0" : "=r" (gs)); gs; }) 78#define write_gs(newgs) __asm("mov %0,%%gs" : : "r" ((u_short) newgs)) 79 80#else /* not __GNUC__ */ 81 82u_short read_gs __P((void)); 83void write_gs __P((/* promoted u_short */ int gs)); 84 85#endif /* __GNUC__ */ 86 87extern int grow(struct proc *,int); 88 89struct sysent sysent[]; 90int nsysent; 91extern short cpl; 92extern short netmask, ttymask, biomask; 93 94#define MAX_TRAP_MSG 27 95char *trap_msg[] = { 96 "reserved addressing fault", /* 0 T_RESADFLT */ 97 "privileged instruction fault", /* 1 T_PRIVINFLT */ 98 "reserved operand fault", /* 2 T_RESOPFLT */ 99 "breakpoint instruction fault", /* 3 T_BPTFLT */ 100 "", /* 4 unused */ 101 "system call trap", /* 5 T_SYSCALL */ 102 "arithmetic trap", /* 6 T_ARITHTRAP */ 103 "system forced exception", /* 7 T_ASTFLT */ 104 "segmentation (limit) fault", /* 8 T_SEGFLT */ 105 "protection fault", /* 9 T_PROTFLT */ 106 "trace trap", /* 10 T_TRCTRAP */ 107 "", /* 11 unused */ 108 "page fault", /* 12 T_PAGEFLT */ 109 "page table fault", /* 13 T_TABLEFLT */ 110 "alignment fault", /* 14 T_ALIGNFLT */ 111 "kernel stack pointer not valid", /* 15 T_KSPNOTVAL */ 112 "bus error", /* 16 T_BUSERR */ 113 "kernel debugger fault", /* 17 T_KDBTRAP */ 114 "integer divide fault", /* 18 T_DIVIDE */ 115 "non-maskable interrupt trap", /* 19 T_NMI */ 116 "overflow trap", /* 20 T_OFLOW */ 117 "FPU bounds check fault", /* 21 T_BOUND */ 118 "FPU device not available", /* 22 T_DNA */ 119 "double fault", /* 23 T_DOUBLEFLT */ 120 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 121 "invalid TSS fault", /* 25 T_TSSFLT */ 122 "segment not present fault", /* 26 T_SEGNPFLT */ 123 "stack fault", /* 27 T_STKFLT */ 124}; 125 126#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v) 127 128/* 129 * trap(frame): 130 * Exception, fault, and trap interface to BSD kernel. This 131 * common code is called from assembly language IDT gate entry 132 * routines that prepare a suitable stack frame, and restore this 133 * frame after the exception has been processed. Note that the 134 * effect is as if the arguments were passed call by reference. 135 */ 136 137/*ARGSUSED*/ 138void 139trap(frame) 140 struct trapframe frame; 141{ 142 register int i; 143 register struct proc *p = curproc; 144 struct timeval syst; 145 int ucode, type, code, eva, fault_type; 146 147 frame.tf_eflags &= ~PSL_NT; /* clear nested trap XXX */ 148 type = frame.tf_trapno; 149#if NDDB > 0 150 if (curpcb && curpcb->pcb_onfault) { 151 if (frame.tf_trapno == T_BPTFLT 152 || frame.tf_trapno == T_TRCTRAP) 153 if (kdb_trap (type, 0, &frame)) 154 return; 155 } 156#endif 157 158 if (curpcb == 0 || curproc == 0) 159 goto skiptoswitch; 160 if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) { 161 extern int _udatasel; 162 163 if (read_gs() != (u_short) _udatasel) 164 /* 165 * Some user has corrupted %gs but we depend on it in 166 * copyout() etc. Fix it up and retry. 167 * 168 * (We don't preserve %fs or %gs, so users can change 169 * them to either _ucodesel, _udatasel or a not-present 170 * selector, possibly ORed with 0 to 3, making them 171 * volatile for other users. Not preserving them saves 172 * time and doesn't lose functionality or open security 173 * holes.) 174 */ 175 write_gs(_udatasel); 176 else 177copyfault: 178 frame.tf_eip = (int)curpcb->pcb_onfault; 179 return; 180 } 181 182 syst = p->p_stime; 183 if (ISPL(frame.tf_cs) == SEL_UPL) { 184 type |= T_USER; 185 p->p_regs = (int *)&frame; 186 } 187 188skiptoswitch: 189 ucode=0; 190 eva = rcr2(); 191 code = frame.tf_err; 192 193 if ((type & ~T_USER) == T_PAGEFLT) 194 goto pfault; 195 196 switch (type) { 197 case T_SEGNPFLT|T_USER: 198 case T_STKFLT|T_USER: 199 case T_PROTFLT|T_USER: /* protection fault */ 200 ucode = code + BUS_SEGM_FAULT ; 201 i = SIGBUS; 202 break; 203 204 case T_PRIVINFLT|T_USER: /* privileged instruction fault */ 205 case T_RESADFLT|T_USER: /* reserved addressing fault */ 206 case T_RESOPFLT|T_USER: /* reserved operand fault */ 207 case T_FPOPFLT|T_USER: /* coprocessor operand fault */ 208 ucode = type &~ T_USER; 209 i = SIGILL; 210 break; 211 212 case T_ASTFLT|T_USER: /* Allow process switch */ 213 astoff(); 214 cnt.v_soft++; 215 if ((p->p_flag & SOWEUPC) && p->p_stats->p_prof.pr_scale) { 216 addupc(frame.tf_eip, &p->p_stats->p_prof, 1); 217 p->p_flag &= ~SOWEUPC; 218 } 219 goto out; 220 221 case T_DNA|T_USER: 222#if NNPX > 0 223 /* if a transparent fault (due to context switch "late") */ 224 if (npxdna()) return; 225#endif /* NNPX > 0 */ 226#ifdef MATH_EMULATE 227 i = math_emulate(&frame); 228 if (i == 0) return; 229#else /* MATH_EMULTATE */ 230 panic("trap: math emulation necessary!"); 231#endif /* MATH_EMULTATE */ 232 ucode = FPE_FPU_NP_TRAP; 233 break; 234 235 case T_BOUND|T_USER: 236 ucode = FPE_SUBRNG_TRAP; 237 i = SIGFPE; 238 break; 239 240 case T_OFLOW|T_USER: 241 ucode = FPE_INTOVF_TRAP; 242 i = SIGFPE; 243 break; 244 245 case T_DIVIDE|T_USER: 246 ucode = FPE_INTDIV_TRAP; 247 i = SIGFPE; 248 break; 249 250 case T_ARITHTRAP|T_USER: 251 ucode = code; 252 i = SIGFPE; 253 break; 254 255 pfault: 256 case T_PAGEFLT: /* allow page faults in kernel mode */ 257 case T_PAGEFLT|T_USER: /* page fault */ 258 { 259 vm_offset_t va; 260 struct vmspace *vm; 261 vm_map_t map = 0; 262 int rv = 0, oldflags; 263 vm_prot_t ftype; 264 unsigned nss, v; 265 extern vm_map_t kernel_map; 266 267 va = trunc_page((vm_offset_t)eva); 268 269 /* 270 * Don't allow user-mode faults in kernel address space 271 */ 272 if ((type == (T_PAGEFLT|T_USER)) && (va >= KERNBASE)) { 273 goto nogo; 274 } 275 276 if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) { 277 vm = 0; 278 map = kernel_map; 279 } else { 280 vm = p->p_vmspace; 281 map = &vm->vm_map; 282 } 283 284 if (code & PGEX_W) 285 ftype = VM_PROT_READ | VM_PROT_WRITE; 286 else 287 ftype = VM_PROT_READ; 288 289 oldflags = p->p_flag; 290 if (map != kernel_map) { 291 vm_offset_t pa; 292 vm_offset_t v = (vm_offset_t) vtopte(va); 293 294 /* 295 * Keep swapout from messing with us during this 296 * critical time. 297 */ 298 p->p_flag |= SLOCK; 299 300 /* 301 * Grow the stack if necessary 302 */ 303 if ((caddr_t)va > vm->vm_maxsaddr 304 && (caddr_t)va < (caddr_t)USRSTACK) { 305 if (!grow(p, va)) { 306 rv = KERN_FAILURE; 307 p->p_flag &= ~SLOCK; 308 p->p_flag |= (oldflags & SLOCK); 309 goto nogo; 310 } 311 } 312 313 /* 314 * Check if page table is mapped, if not, 315 * fault it first 316 */ 317 318 /* Fault the pte only if needed: */ 319 *(volatile char *)v += 0; 320 321 /* Get the physical address: */ 322 pa = pmap_extract(vm_map_pmap(map), v); 323 324 /* And wire the pte page at system vm level: */ 325 vm_page_wire(PHYS_TO_VM_PAGE(pa)); 326 327 /* Fault in the user page: */ 328 rv = vm_fault(map, va, ftype, FALSE); 329 330 /* Unwire the pte page: */ 331 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 332 333 p->p_flag &= ~SLOCK; 334 p->p_flag |= (oldflags & SLOCK); 335 } else { 336 /* 337 * Since we know that kernel virtual address addresses 338 * always have pte pages mapped, we just have to fault 339 * the page. 340 */ 341 rv = vm_fault(map, va, ftype, FALSE); 342 } 343 344 if (rv == KERN_SUCCESS) { 345 if (type == T_PAGEFLT) 346 return; 347 goto out; 348 } 349nogo: 350 if (type == T_PAGEFLT) { 351 if (curpcb->pcb_onfault) 352 goto copyfault; 353 354 goto we_re_toast; 355 } 356 i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV; 357 358 /* kludge to pass faulting virtual address to sendsig */ 359 ucode = type &~ T_USER; 360 frame.tf_err = eva; 361 362 break; 363 } 364 365#if NDDB == 0 366 case T_TRCTRAP: /* trace trap -- someone single stepping lcall's */ 367 frame.tf_eflags &= ~PSL_T; 368 369 /* Q: how do we turn it on again? */ 370 return; 371#endif 372 373 case T_BPTFLT|T_USER: /* bpt instruction fault */ 374 case T_TRCTRAP|T_USER: /* trace trap */ 375 frame.tf_eflags &= ~PSL_T; 376 i = SIGTRAP; 377 break; 378 379#if NISA > 0 380 case T_NMI: 381 case T_NMI|T_USER: 382#if NDDB > 0 383 /* NMI can be hooked up to a pushbutton for debugging */ 384 printf ("NMI ... going to debugger\n"); 385 if (kdb_trap (type, 0, &frame)) 386 return; 387#endif 388 /* machine/parity/power fail/"kitchen sink" faults */ 389 if (isa_nmi(code) == 0) return; 390 /* FALL THROUGH */ 391#endif 392 default: 393 we_re_toast: 394 395 fault_type = type & ~T_USER; 396 if (fault_type <= MAX_TRAP_MSG) 397 printf("\n\nFatal trap %d: %s while in %s mode\n", 398 fault_type, trap_msg[fault_type], 399 ISPL(frame.tf_cs) == SEL_UPL ? "user" : "kernel"); 400 if (fault_type == T_PAGEFLT) { 401 printf("fault virtual address = 0x%x\n", eva); 402 printf("fault code = %s %s, %s\n", 403 code & PGEX_U ? "user" : "supervisor", 404 code & PGEX_W ? "write" : "read", 405 code & PGEX_P ? "protection violation" : "page not present"); 406 } 407 printf("instruction pointer = 0x%x\n", frame.tf_eip); 408 printf("processor eflags = "); 409 if (frame.tf_eflags & EFL_TF) 410 printf("trace/trap, "); 411 if (frame.tf_eflags & EFL_IF) 412 printf("interrupt enabled, "); 413 if (frame.tf_eflags & EFL_NT) 414 printf("nested task, "); 415 if (frame.tf_eflags & EFL_RF) 416 printf("resume, "); 417 if (frame.tf_eflags & EFL_VM) 418 printf("vm86, "); 419 printf("IOPL = %d\n", (frame.tf_eflags & EFL_IOPL) >> 12); 420 printf("current process = "); 421 if (curproc) { 422 printf("%d (%s)\n", 423 curproc->p_pid, curproc->p_comm ? 424 curproc->p_comm : ""); 425 } else { 426 printf("Idle\n"); 427 } 428 printf("interrupt mask = "); 429 if ((cpl & netmask) == netmask) 430 printf("net "); 431 if ((cpl & ttymask) == ttymask) 432 printf("tty "); 433 if ((cpl & biomask) == biomask) 434 printf("bio "); 435 if (cpl == 0) 436 printf("none"); 437 printf("\n"); 438 439#ifdef KDB 440 if (kdb_trap(&psl)) 441 return; 442#endif 443#if NDDB > 0 444 if (kdb_trap (type, 0, &frame)) 445 return; 446#endif 447 if (fault_type <= MAX_TRAP_MSG) 448 panic(trap_msg[fault_type]); 449 else 450 panic("unknown/reserved trap"); 451 452 /* NOT REACHED */ 453 } 454 455 trapsignal(p, i, ucode); 456 if ((type & T_USER) == 0) 457 return; 458out: 459 while (i = CURSIG(p)) 460 psig(i); 461 p->p_pri = p->p_usrpri; 462 if (want_resched) { 463 int s; 464 /* 465 * Since we are curproc, clock will normally just change 466 * our priority without moving us from one queue to another 467 * (since the running process is not on a queue.) 468 * If that happened after we setrq ourselves but before we 469 * swtch()'ed, we might not be on the queue indicated by 470 * our priority. 471 */ 472 s = splclock(); 473 setrq(p); 474 p->p_stats->p_ru.ru_nivcsw++; 475 swtch(); 476 splx(s); 477 while (i = CURSIG(p)) 478 psig(i); 479 } 480 if (p->p_stats->p_prof.pr_scale) { 481 int ticks; 482 struct timeval *tv = &p->p_stime; 483 484 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + 485 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); 486 if (ticks) { 487#ifdef PROFTIMER 488 extern int profscale; 489 addupc(frame.tf_eip, &p->p_stats->p_prof, 490 ticks * profscale); 491#else 492 addupc(frame.tf_eip, &p->p_stats->p_prof, ticks); 493#endif 494 } 495 } 496 curpri = p->p_pri; 497} 498 499/* 500 * Compensate for 386 brain damage (missing URKR). 501 * This is a little simpler than the pagefault handler in trap() because 502 * it the page tables have already been faulted in and high addresses 503 * are thrown out early for other reasons. 504 */ 505int trapwrite(addr) 506 unsigned addr; 507{ 508 unsigned nss; 509 struct proc *p; 510 vm_offset_t va, v; 511 struct vmspace *vm; 512 int oldflags; 513 int rv; 514 515 va = trunc_page((vm_offset_t)addr); 516 /* 517 * XXX - MAX is END. Changed > to >= for temp. fix. 518 */ 519 if (va >= VM_MAXUSER_ADDRESS) 520 return (1); 521 522 p = curproc; 523 vm = p->p_vmspace; 524 525 oldflags = p->p_flag; 526 p->p_flag |= SLOCK; 527 528 if ((caddr_t)va >= vm->vm_maxsaddr 529 && (caddr_t)va < (caddr_t)USRSTACK) { 530 if (!grow(p, va)) { 531 p->p_flag &= ~SLOCK; 532 p->p_flag |= (oldflags & SLOCK); 533 return (1); 534 } 535 } 536 537 v = trunc_page(vtopte(va)); 538 539 /* 540 * wire the pte page 541 */ 542 if (va < USRSTACK) { 543 vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE); 544 } 545 546 /* 547 * fault the data page 548 */ 549 rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE); 550 551 /* 552 * unwire the pte page 553 */ 554 if (va < USRSTACK) { 555 vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE); 556 } 557 558 p->p_flag &= ~SLOCK; 559 p->p_flag |= (oldflags & SLOCK); 560 561 if (rv != KERN_SUCCESS) 562 return 1; 563 564 return (0); 565} 566 567/* 568 * syscall(frame): 569 * System call request from POSIX system call gate interface to kernel. 570 * Like trap(), argument is call by reference. 571 */ 572/*ARGSUSED*/ 573void 574syscall(frame) 575 volatile struct trapframe frame; 576{ 577 register int *locr0 = ((int *)&frame); 578 register caddr_t params; 579 register int i; 580 register struct sysent *callp; 581 register struct proc *p = curproc; 582 struct timeval syst; 583 int error, opc; 584 int args[8], rval[2]; 585 int code; 586 587#ifdef lint 588 r0 = 0; r0 = r0; r1 = 0; r1 = r1; 589#endif 590 syst = p->p_stime; 591 if (ISPL(frame.tf_cs) != SEL_UPL) 592 panic("syscall"); 593 594 code = frame.tf_eax; 595 p->p_regs = (int *)&frame; 596 params = (caddr_t)frame.tf_esp + sizeof (int) ; 597 598 /* 599 * Reconstruct pc, assuming lcall $X,y is 7 bytes, as it is always. 600 */ 601 opc = frame.tf_eip - 7; 602 if (code == 0) { 603 code = fuword(params); 604 params += sizeof (int); 605 } 606 if (code < 0 || code >= nsysent) 607 callp = &sysent[0]; 608 else 609 callp = &sysent[code]; 610 611 if ((i = callp->sy_narg * sizeof (int)) && 612 (error = copyin(params, (caddr_t)args, (u_int)i))) { 613 frame.tf_eax = error; 614 frame.tf_eflags |= PSL_C; /* carry bit */ 615#ifdef KTRACE 616 if (KTRPOINT(p, KTR_SYSCALL)) 617 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 618#endif 619 goto done; 620 } 621#ifdef KTRACE 622 if (KTRPOINT(p, KTR_SYSCALL)) 623 ktrsyscall(p->p_tracep, code, callp->sy_narg, args); 624#endif 625 rval[0] = 0; 626 rval[1] = frame.tf_edx; 627/*pg("%d. s %d\n", p->p_pid, code);*/ 628 error = (*callp->sy_call)(p, args, rval); 629 if (error == ERESTART) 630 frame.tf_eip = opc; 631 else if (error != EJUSTRETURN) { 632 if (error) { 633/*pg("error %d", error);*/ 634 frame.tf_eax = error; 635 frame.tf_eflags |= PSL_C; /* carry bit */ 636 } else { 637 frame.tf_eax = rval[0]; 638 frame.tf_edx = rval[1]; 639 frame.tf_eflags &= ~PSL_C; /* carry bit */ 640 } 641 } 642 /* else if (error == EJUSTRETURN) */ 643 /* nothing to do */ 644done: 645 /* 646 * Reinitialize proc pointer `p' as it may be different 647 * if this is a child returning from fork syscall. 648 */ 649 p = curproc; 650 while (i = CURSIG(p)) 651 psig(i); 652 p->p_pri = p->p_usrpri; 653 if (want_resched) { 654 int s; 655 /* 656 * Since we are curproc, clock will normally just change 657 * our priority without moving us from one queue to another 658 * (since the running process is not on a queue.) 659 * If that happened after we setrq ourselves but before we 660 * swtch()'ed, we might not be on the queue indicated by 661 * our priority. 662 */ 663 s = splclock(); 664 setrq(p); 665 p->p_stats->p_ru.ru_nivcsw++; 666 swtch(); 667 splx(s); 668 while (i = CURSIG(p)) 669 psig(i); 670 } 671 if (p->p_stats->p_prof.pr_scale) { 672 int ticks; 673 struct timeval *tv = &p->p_stime; 674 675 ticks = ((tv->tv_sec - syst.tv_sec) * 1000 + 676 (tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000); 677 if (ticks) { 678#ifdef PROFTIMER 679 extern int profscale; 680 addupc(frame.tf_eip, &p->p_stats->p_prof, 681 ticks * profscale); 682#else 683 addupc(frame.tf_eip, &p->p_stats->p_prof, ticks); 684#endif 685 } 686 } 687 curpri = p->p_pri; 688#ifdef KTRACE 689 if (KTRPOINT(p, KTR_SYSRET)) 690 ktrsysret(p->p_tracep, code, error, rval[0]); 691#endif 692#ifdef DIAGNOSTICx 693{ extern int _udatasel, _ucodesel; 694 if (frame.tf_ss != _udatasel) 695 printf("ss %x call %d\n", frame.tf_ss, code); 696 if ((frame.tf_cs&0xffff) != _ucodesel) 697 printf("cs %x call %d\n", frame.tf_cs, code); 698 if (frame.tf_eip > VM_MAXUSER_ADDRESS) { 699 printf("eip %x call %d\n", frame.tf_eip, code); 700 frame.tf_eip = 0; 701 } 702} 703#endif 704} 705