1/* $NetBSD: trap.c,v 1.187 2011/04/18 00:26:12 rmind Exp $ */ 2 3/* 4 * Copyright (c) 1996 5 * The President and Fellows of Harvard College. All rights reserved. 6 * Copyright (c) 1992, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This software was developed by the Computer Systems Engineering group 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 11 * contributed to Berkeley. 12 * 13 * All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Lawrence Berkeley Laboratory. 17 * This product includes software developed by Harvard University. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. All advertising materials mentioning features or use of this software 28 * must display the following acknowledgement: 29 * This product includes software developed by the University of 30 * California, Berkeley and its contributors. 31 * This product includes software developed by Harvard University. 32 * 4. Neither the name of the University nor the names of its contributors 33 * may be used to endorse or promote products derived from this software 34 * without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 * 48 * @(#)trap.c 8.4 (Berkeley) 9/23/93 49 */ 50 51#include <sys/cdefs.h> 52__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.187 2011/04/18 00:26:12 rmind Exp $"); 53 54#include "opt_ddb.h" 55#include "opt_compat_svr4.h" 56#include "opt_compat_sunos.h" 57#include "opt_sparc_arch.h" 58#include "opt_multiprocessor.h" 59 60#include <sys/param.h> 61#include <sys/systm.h> 62#include <sys/proc.h> 63#include <sys/kernel.h> 64#include <sys/kmem.h> 65#include <sys/resource.h> 66#include <sys/signal.h> 67#include <sys/wait.h> 68#include <sys/sa.h> 69#include <sys/savar.h> 70#include <sys/syscall.h> 71#include <sys/syslog.h> 72#include <sys/kauth.h> 73 74#include <uvm/uvm_extern.h> 75 76#include <sparc/sparc/asm.h> 77#include <machine/cpu.h> 78#include <machine/ctlreg.h> 79#include <machine/trap.h> 80#include <machine/instr.h> 81#include <machine/pcb.h> 82#include <machine/pmap.h> 83#include <machine/userret.h> 84 85#ifdef DDB 86#include <machine/db_machdep.h> 87#else 88#include <machine/frame.h> 89#endif 90#ifdef COMPAT_SVR4 91#include <machine/svr4_machdep.h> 92#endif 93#ifdef COMPAT_SUNOS 94extern struct emul emul_sunos; 95#define SUNOS_MAXSADDR_SLOP (32 * 1024) 96#endif 97 98#include <sparc/fpu/fpu_extern.h> 99#include <sparc/sparc/memreg.h> 100#include <sparc/sparc/cpuvar.h> 101 102#ifdef DEBUG 103int rwindow_debug = 0; 104#endif 105 106/* 107 * Initial FPU state is all registers == all 1s, everything else == all 0s. 108 * This makes every floating point register a signalling NaN, with sign bit 109 * set, no matter how it is interpreted. Appendix N of the Sparc V8 document 110 * seems to imply that we should do this, and it does make sense. 111 */ 112struct fpstate initfpstate = { 113 { ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, 114 ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0 }, 115 0, 0, 116}; 117 118/* 119 * There are more than 100 trap types, but most are unused. 120 * 121 * Trap type 0 is taken over as an `Asynchronous System Trap'. 122 * This is left-over Vax emulation crap that should be fixed. 123 */ 124static const char T[] = "trap"; 125const char *trap_type[] = { 126 /* non-user vectors */ 127 "ast", /* 0 */ 128 "text fault", /* 1 */ 129 "illegal instruction", /* 2 */ 130 "privileged instruction",/*3 */ 131 "fp disabled", /* 4 */ 132 "window overflow", /* 5 */ 133 "window underflow", /* 6 */ 134 "alignment fault", /* 7 */ 135 "fp exception", /* 8 */ 136 "data fault", /* 9 */ 137 "tag overflow", /* 0a */ 138 "watchpoint", /* 0b */ 139 T, T, T, T, T, /* 0c..10 */ 140 "level 1 int", /* 11 */ 141 "level 2 int", /* 12 */ 142 "level 3 int", /* 13 */ 143 "level 4 int", /* 14 */ 144 "level 5 int", /* 15 */ 145 "level 6 int", /* 16 */ 146 "level 7 int", /* 17 */ 147 "level 8 int", /* 18 */ 148 "level 9 int", /* 19 */ 149 "level 10 int", /* 1a */ 150 "level 11 int", /* 1b */ 151 "level 12 int", /* 1c */ 152 "level 13 int", /* 1d */ 153 "level 14 int", /* 1e */ 154 "level 15 int", /* 1f */ 155 "register access error",/* 20 */ 156 "instruction access error",/* 21 */ 157 T, T, /* 22..23 */ 158 "cp disabled", /* 24 */ 159 "unimplemented flush", /* 25 */ 160 T, T, /* 26..27 */ 161 "cp exception", /* 28 */ 162 "data access error", /* 29 */ 163 "hw zero divide", /* 2a */ 164 "data store error", /* 2b */ 165 "data access MMU miss", /* 2c */ 166 T, T, T, /* 2d..2f */ 167 T, T, T, T, T, T, T, T, /* 30..37 */ 168 T, T, T, T, /* 38..3b */ 169 "insn access MMU miss", /* 3c */ 170 T, T, T, /* 3d..3f */ 171 T, T, T, T, T, T, T, T, /* 40..47 */ 172 T, T, T, T, T, T, T, T, /* 48..4f */ 173 T, T, T, T, T, T, T, T, /* 50..57 */ 174 T, T, T, T, T, T, T, T, /* 58..5f */ 175 T, T, T, T, T, T, T, T, /* 60..67 */ 176 T, T, T, T, T, T, T, T, /* 68..6f */ 177 T, T, T, T, T, T, T, T, /* 70..77 */ 178 T, T, T, T, T, T, T, T, /* 78..7f */ 179 180 /* user (software trap) vectors */ 181 "syscall", /* 80 */ 182 "breakpoint", /* 81 */ 183 "zero divide", /* 82 */ 184 "flush windows", /* 83 */ 185 "clean windows", /* 84 */ 186 "range check", /* 85 */ 187 "fix align", /* 86 */ 188 "integer overflow", /* 87 */ 189 "svr4 syscall", /* 88 */ 190 "4.4 syscall", /* 89 */ 191 "kgdb exec", /* 8a */ 192 T, T, T, T, T, /* 8b..8f */ 193 T, T, T, T, T, T, T, T, /* 9a..97 */ 194 T, T, T, T, T, T, T, T, /* 98..9f */ 195 "svr4 getcc", /* a0 */ 196 "svr4 setcc", /* a1 */ 197 "svr4 getpsr", /* a2 */ 198 "svr4 setpsr", /* a3 */ 199 "svr4 gethrtime", /* a4 */ 200 "svr4 gethrvtime", /* a5 */ 201 T, /* a6 */ 202 "svr4 gethrestime", /* a7 */ 203}; 204 205#define N_TRAP_TYPES (sizeof trap_type / sizeof *trap_type) 206 207void trap(unsigned, int, int, struct trapframe *); 208void mem_access_fault(unsigned, int, u_int, int, int, struct trapframe *); 209void mem_access_fault4m(unsigned, u_int, u_int, struct trapframe *); 210 211int ignore_bogus_traps = 1; 212 213/* 214 * Called from locore.s trap handling, for non-MMU-related traps. 215 * (MMU-related traps go through mem_access_fault, below.) 216 */ 217void 218trap(unsigned type, int psr, int pc, struct trapframe *tf) 219{ 220 struct proc *p; 221 struct lwp *l; 222 struct pcb *pcb; 223 int n, s; 224 char bits[64]; 225 u_quad_t sticks; 226 ksiginfo_t ksi; 227 int code, sig; 228 229 /* This steps the PC over the trap. */ 230#define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4) 231 232 curcpu()->ci_data.cpu_ntrap++; 233 /* 234 * Generally, kernel traps cause a panic. Any exceptions are 235 * handled early here. 236 */ 237 if (psr & PSR_PS) { 238#ifdef DDB 239 if (type == T_BREAKPOINT) { 240 write_all_windows(); 241 if (kdb_trap(type, tf)) { 242 return; 243 } 244 } 245#if defined(MULTIPROCESSOR) 246 if (type == T_DBPAUSE) { 247 /* XXX - deal with kgdb too */ 248 extern void ddb_suspend(struct trapframe *); 249 write_all_windows(); 250 ddb_suspend(tf); 251 ADVANCE; 252 return; 253 } 254#endif 255#endif 256#ifdef DIAGNOSTIC 257 /* 258 * Currently, we allow DIAGNOSTIC kernel code to 259 * flush the windows to record stack traces. 260 */ 261 if (type == T_FLUSHWIN) { 262 write_all_windows(); 263 ADVANCE; 264 return; 265 } 266#endif 267 if (type == T_UNIMPLFLUSH) { 268 /* 269 * This should happen only on hypersparc. 270 * It also is a rare event to get this trap 271 * from kernel space. For now, just flush the 272 * entire I-cache. 273 */ 274#if defined(MULTIPROCESSOR) 275 /* Broadcast to all CPUs */ 276 XCALL0(*cpuinfo.pure_vcache_flush, CPUSET_ALL); 277#else 278 (*cpuinfo.pure_vcache_flush)(); 279#endif 280 ADVANCE; 281 return; 282 } 283 284 /* 285 * Storing %fsr in cpu_attach will cause this trap 286 * even though the fpu has been enabled, if and only 287 * if there is no FPU. 288 */ 289 if (type == T_FPDISABLED && cold) { 290 ADVANCE; 291 return; 292 } 293 dopanic: 294 snprintb(bits, sizeof(bits), PSR_BITS, psr); 295 printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%s\n", 296 type, pc, tf->tf_npc, bits); 297#ifdef DDB 298 write_all_windows(); 299 (void) kdb_trap(type, tf); 300#endif 301 panic(type < N_TRAP_TYPES ? trap_type[type] : T); 302 /* NOTREACHED */ 303 } 304 if ((l = curlwp) == NULL) 305 l = &lwp0; 306 p = l->l_proc; 307 LWP_CACHE_CREDS(l, p); 308 sticks = p->p_sticks; 309 pcb = lwp_getpcb(l); 310 l->l_md.md_tf = tf; /* for ptrace/signals */ 311 312#ifdef FPU_DEBUG 313 if (type != T_FPDISABLED && (tf->tf_psr & PSR_EF) != 0) { 314 if (cpuinfo.fplwp != l) 315 panic("FPU enabled but wrong proc (0) [l=%p, fwlp=%p]", 316 l, cpuinfo.fplwp); 317 savefpstate(l->l_md.md_fpstate); 318 l->l_md.md_fpu = NULL; 319 cpuinfo.fplwp = NULL; 320 tf->tf_psr &= ~PSR_EF; 321 setpsr(getpsr() & ~PSR_EF); 322 } 323#endif 324 325 sig = 0; 326 327 switch (type) { 328 329 default: 330 if (type < 0x80) { 331 if (!ignore_bogus_traps) 332 goto dopanic; 333 snprintb(bits, sizeof(bits), PSR_BITS, psr); 334 printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%s\n", 335 type, pc, tf->tf_npc, bits); 336 sig = SIGILL; 337 KSI_INIT_TRAP(&ksi); 338 ksi.ksi_trap = type; 339 ksi.ksi_code = ILL_ILLTRP; 340 ksi.ksi_addr = (void *)pc; 341 break; 342 } 343#if defined(COMPAT_SVR4) 344badtrap: 345#endif 346#ifdef DIAGNOSTIC 347 if (type < 0x90 || type > 0x9f) { 348 /* the following message is gratuitous */ 349 /* ... but leave it in until we find anything */ 350 uprintf("%s[%d]: unimplemented software trap 0x%x\n", 351 p->p_comm, p->p_pid, type); 352 } 353#endif 354 sig = SIGILL; 355 KSI_INIT_TRAP(&ksi); 356 ksi.ksi_trap = type; 357 ksi.ksi_code = ILL_ILLTRP; 358 ksi.ksi_addr = (void *)pc; 359 break; 360 361#ifdef COMPAT_SVR4 362 case T_SVR4_GETCC: 363 case T_SVR4_SETCC: 364 case T_SVR4_GETPSR: 365 case T_SVR4_SETPSR: 366 case T_SVR4_GETHRTIME: 367 case T_SVR4_GETHRVTIME: 368 case T_SVR4_GETHRESTIME: 369 if (!svr4_trap(type, l)) 370 goto badtrap; 371 break; 372#endif 373 374 case T_AST: 375 break; /* the work is all in userret() */ 376 377 case T_UNIMPLFLUSH: 378 /* Invalidate the entire I-cache */ 379#if defined(MULTIPROCESSOR) 380 /* Broadcast to all CPUs */ 381 XCALL0(*cpuinfo.pure_vcache_flush, CPUSET_ALL); 382#else 383 (*cpuinfo.pure_vcache_flush)(); 384#endif 385 ADVANCE; 386 break; 387 388 case T_ILLINST: 389 /* Note: Cypress generates a T_ILLINST on FLUSH instructions */ 390 if ((sig = emulinstr(pc, tf)) == 0) { 391 ADVANCE; 392 break; 393 } 394 KSI_INIT_TRAP(&ksi); 395 ksi.ksi_trap = type; 396 ksi.ksi_code = ILL_ILLOPC; 397 ksi.ksi_addr = (void *)pc; 398 break; 399 400 case T_PRIVINST: 401 sig = SIGILL; 402 KSI_INIT_TRAP(&ksi); 403 ksi.ksi_trap = type; 404 ksi.ksi_code = ILL_PRVOPC; 405 ksi.ksi_addr = (void *)pc; 406 break; 407 408 case T_FPDISABLED: { 409 struct fpstate *fs = l->l_md.md_fpstate; 410 411#ifdef FPU_DEBUG 412 if ((tf->tf_psr & PSR_PS) != 0) { 413 printf("FPU fault from kernel mode, pc=%x\n", pc); 414#ifdef DDB 415 Debugger(); 416#endif 417 } 418#endif 419 420 if (fs == NULL) { 421 fs = kmem_alloc(sizeof(struct fpstate), KM_SLEEP); 422 *fs = initfpstate; 423 l->l_md.md_fpstate = fs; 424 } 425 /* 426 * If we have not found an FPU, we have to emulate it. 427 */ 428 if (!cpuinfo.fpupresent) { 429#ifdef notyet 430 fpu_emulate(l, tf, fs); 431#else 432 sig = SIGFPE; 433 KSI_INIT_TRAP(&ksi); 434 ksi.ksi_trap = type; 435 ksi.ksi_code = SI_NOINFO; 436 ksi.ksi_addr = (void *)pc; 437#endif 438 break; 439 } 440 /* 441 * We may have more FPEs stored up and/or ops queued. 442 * If they exist, handle them and get out. Otherwise, 443 * resolve the FPU state, turn it on, and try again. 444 */ 445 if (fs->fs_qsize) { 446 if ((code = fpu_cleanup(l, fs)) != 0) { 447 sig = SIGFPE; 448 KSI_INIT_TRAP(&ksi); 449 ksi.ksi_trap = type; 450 ksi.ksi_code = code; 451 ksi.ksi_addr = (void *)pc; 452 } 453 break; 454 } 455 456 /* 457 * If we do not own the FPU state on this CPU, we must 458 * now acquire it. 459 */ 460 if (cpuinfo.fplwp != l) { 461 struct cpu_info *cpi; 462 463 FPU_LOCK(s); 464 if (cpuinfo.fplwp != NULL) { 465 /* someone else had it*/ 466 savefpstate(cpuinfo.fplwp->l_md.md_fpstate); 467 cpuinfo.fplwp->l_md.md_fpu = NULL; 468 } 469 470 /* 471 * On MP machines, some of the other FPUs might 472 * still have our state. Tell the owning processor 473 * to save the process' FPU state. 474 */ 475 if ((cpi = l->l_md.md_fpu) != NULL) { 476 if (cpi->ci_cpuid == cpuinfo.ci_cpuid) 477 panic("FPU(%d): state for %p", 478 cpi->ci_cpuid, l); 479#if defined(MULTIPROCESSOR) 480 XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid); 481#endif 482 cpi->fplwp = NULL; 483 } 484 loadfpstate(fs); 485 486 /* now we do have it */ 487 cpuinfo.fplwp = l; 488 l->l_md.md_fpu = curcpu(); 489 FPU_UNLOCK(s); 490 } 491 492 tf->tf_psr |= PSR_EF; 493 break; 494 } 495 496 case T_WINOF: 497 if (rwindow_save(l)) { 498 mutex_enter(p->p_lock); 499 sigexit(l, SIGILL); 500 } 501 break; 502 503#define read_rw(src, dst) \ 504 copyin((void *)(src), (void *)(dst), sizeof(struct rwindow)) 505 506 case T_RWRET: 507 /* 508 * T_RWRET is a window load needed in order to rett. 509 * It simply needs the window to which tf->tf_out[6] 510 * (%sp) points. There are no user or saved windows now. 511 * Copy the one from %sp into pcb->pcb_rw[0] and set 512 * nsaved to -1. If we decide to deliver a signal on 513 * our way out, we will clear nsaved. 514 */ 515 if (pcb->pcb_uw || pcb->pcb_nsaved) 516 panic("trap T_RWRET 1"); 517#ifdef DEBUG 518 if (rwindow_debug) 519 printf("cpu%d:%s[%d]: rwindow: pcb<-stack: 0x%x\n", 520 cpuinfo.ci_cpuid, p->p_comm, p->p_pid, 521 tf->tf_out[6]); 522#endif 523 if (read_rw(tf->tf_out[6], &pcb->pcb_rw[0])) { 524 mutex_enter(p->p_lock); 525 sigexit(l, SIGILL); 526 } 527 if (pcb->pcb_nsaved) 528 panic("trap T_RWRET 2"); 529 pcb->pcb_nsaved = -1; /* mark success */ 530 break; 531 532 case T_WINUF: 533 /* 534 * T_WINUF is a real window underflow, from a restore 535 * instruction. It needs to have the contents of two 536 * windows---the one belonging to the restore instruction 537 * itself, which is at its %sp, and the one belonging to 538 * the window above, which is at its %fp or %i6---both 539 * in the pcb. The restore's window may still be in 540 * the CPU; we need to force it out to the stack. 541 */ 542#ifdef DEBUG 543 if (rwindow_debug) 544 printf("cpu%d:%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n", 545 cpuinfo.ci_cpuid, p->p_comm, p->p_pid, 546 tf->tf_out[6]); 547#endif 548 write_user_windows(); 549 if (rwindow_save(l) || read_rw(tf->tf_out[6], &pcb->pcb_rw[0])) { 550 mutex_enter(p->p_lock); 551 sigexit(l, SIGILL); 552 } 553#ifdef DEBUG 554 if (rwindow_debug) 555 printf("cpu%d:%s[%d]: rwindow: T_WINUF 1: pcb<-stack: 0x%x\n", 556 cpuinfo.ci_cpuid, p->p_comm, p->p_pid, 557 pcb->pcb_rw[0].rw_in[6]); 558#endif 559 if (read_rw(pcb->pcb_rw[0].rw_in[6], &pcb->pcb_rw[1])) { 560 mutex_enter(p->p_lock); 561 sigexit(l, SIGILL); 562 } 563 if (pcb->pcb_nsaved) 564 panic("trap T_WINUF"); 565 pcb->pcb_nsaved = -1; /* mark success */ 566 break; 567 568 case T_ALIGN: 569 if ((p->p_md.md_flags & MDP_FIXALIGN) != 0) { 570 n = fixalign(l, tf); 571 if (n == 0) { 572 ADVANCE; 573 break; 574 } 575 } 576 sig = SIGBUS; 577 KSI_INIT_TRAP(&ksi); 578 ksi.ksi_trap = type; 579 ksi.ksi_code = BUS_ADRALN; 580 ksi.ksi_addr = (void *)pc; 581 break; 582 583 case T_FPE: 584 /* 585 * Clean up after a floating point exception. 586 * fpu_cleanup can (and usually does) modify the 587 * state we save here, so we must `give up' the FPU 588 * chip context. (The software and hardware states 589 * will not match once fpu_cleanup does its job, so 590 * we must not save again later.) 591 */ 592 if (l != cpuinfo.fplwp) 593 panic("fpe without being the FP user"); 594 FPU_LOCK(s); 595 savefpstate(l->l_md.md_fpstate); 596 cpuinfo.fplwp = NULL; 597 l->l_md.md_fpu = NULL; 598 FPU_UNLOCK(s); 599 /* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */ 600 if ((code = fpu_cleanup(l, l->l_md.md_fpstate)) != 0) { 601 sig = SIGFPE; 602 KSI_INIT_TRAP(&ksi); 603 ksi.ksi_trap = type; 604 ksi.ksi_code = code; 605 ksi.ksi_addr = (void *)pc; 606 } 607#if 0 /* ??? really never??? */ 608 ADVANCE; 609#endif 610 break; 611 612 case T_TAGOF: 613 sig = SIGEMT; 614 KSI_INIT_TRAP(&ksi); 615 ksi.ksi_trap = type; 616 ksi.ksi_code = SI_NOINFO; 617 ksi.ksi_addr = (void *)pc; 618 break; 619 620 case T_CPDISABLED: 621 uprintf("coprocessor instruction\n"); /* XXX */ 622 sig = SIGILL; 623 KSI_INIT_TRAP(&ksi); 624 ksi.ksi_trap = type; 625 ksi.ksi_code = ILL_COPROC; 626 ksi.ksi_addr = (void *)pc; 627 break; 628 629 case T_BREAKPOINT: 630 sig = SIGTRAP; 631 KSI_INIT_TRAP(&ksi); 632 ksi.ksi_trap = type; 633 ksi.ksi_code = TRAP_BRKPT; 634 ksi.ksi_addr = (void *)pc; 635 break; 636 637 case T_DIV0: 638 case T_IDIV0: 639 ADVANCE; 640 sig = SIGFPE; 641 KSI_INIT_TRAP(&ksi); 642 ksi.ksi_trap = type; 643 ksi.ksi_code = FPE_INTDIV; 644 ksi.ksi_addr = (void *)pc; 645 break; 646 647 case T_FLUSHWIN: 648 write_user_windows(); 649#ifdef probably_slower_since_this_is_usually_false 650 if (pcb->pcb_nsaved && rwindow_save(p)) { 651 mutex_enter(p->p_lock); 652 sigexit(l, SIGILL); 653 } 654#endif 655 ADVANCE; 656 break; 657 658 case T_CLEANWIN: 659 uprintf("T_CLEANWIN\n"); /* XXX */ 660 ADVANCE; 661 break; 662 663 case T_RANGECHECK: 664 uprintf("T_RANGECHECK\n"); /* XXX */ 665 ADVANCE; 666 sig = SIGILL; 667 KSI_INIT_TRAP(&ksi); 668 ksi.ksi_trap = type; 669 ksi.ksi_code = ILL_ILLOPN; 670 ksi.ksi_addr = (void *)pc; 671 break; 672 673 case T_FIXALIGN: 674#ifdef DEBUG_ALIGN 675 uprintf("T_FIXALIGN\n"); 676#endif 677 /* User wants us to fix alignment faults */ 678 p->p_md.md_flags |= MDP_FIXALIGN; 679 ADVANCE; 680 break; 681 682 case T_INTOF: 683 uprintf("T_INTOF\n"); /* XXX */ 684 ADVANCE; 685 sig = SIGFPE; 686 KSI_INIT_TRAP(&ksi); 687 ksi.ksi_trap = type; 688 ksi.ksi_code = FPE_INTOVF; 689 ksi.ksi_addr = (void *)pc; 690 break; 691 } 692 if (sig != 0) { 693 ksi.ksi_signo = sig; 694 trapsignal(l, &ksi); 695 } 696 userret(l, pc, sticks); 697 share_fpu(l, tf); 698#undef ADVANCE 699} 700 701/* 702 * Save windows from PCB into user stack, and return 0. This is used on 703 * window overflow pseudo-traps (from locore.s, just before returning to 704 * user mode) and when ptrace or sendsig needs a consistent state. 705 * As a side effect, rwindow_save() always sets pcb_nsaved to 0, 706 * clobbering the `underflow restore' indicator if it was -1. 707 * 708 * If the windows cannot be saved, pcb_nsaved is restored and we return -1. 709 */ 710int 711rwindow_save(struct lwp *l) 712{ 713 struct pcb *pcb = lwp_getpcb(l); 714 struct rwindow *rw = &pcb->pcb_rw[0]; 715 int i; 716 717 i = pcb->pcb_nsaved; 718 if (i < 0) { 719 pcb->pcb_nsaved = 0; 720 return (0); 721 } 722 if (i == 0) 723 return (0); 724#ifdef DEBUG 725 if (rwindow_debug) 726 printf("cpu%d:%s[%d]: rwindow: pcb->stack:", 727 cpuinfo.ci_cpuid, l->l_proc->p_comm, l->l_proc->p_pid); 728#endif 729 do { 730#ifdef DEBUG 731 if (rwindow_debug) 732 printf(" [%d]0x%x", cpuinfo.ci_cpuid, rw[1].rw_in[6]); 733#endif 734 if (copyout((void *)rw, (void *)rw[1].rw_in[6], 735 sizeof *rw)) 736 return (-1); 737 rw++; 738 } while (--i > 0); 739#ifdef DEBUG 740 if (rwindow_debug) 741 printf("\n"); 742#endif 743 pcb->pcb_nsaved = 0; 744 return (0); 745} 746 747/* 748 * Kill user windows (before exec) by writing back to stack or pcb 749 * and then erasing any pcb tracks. Otherwise we might try to write 750 * the registers into the new process after the exec. 751 */ 752void 753cpu_vmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end) 754{ 755 struct pcb *pcb = lwp_getpcb(l); 756 757 write_user_windows(); 758 pcb->pcb_nsaved = 0; 759} 760 761/* 762 * Called from locore.s trap handling, for synchronous memory faults. 763 * 764 * This duplicates a lot of logic in trap() and perhaps should be 765 * moved there; but the bus-error-register parameters are unique to 766 * this routine. 767 * 768 * Since synchronous errors accumulate during prefetch, we can have 769 * more than one `cause'. But we do not care what the cause, here; 770 * we just want to page in the page and try again. 771 */ 772void 773mem_access_fault(unsigned type, int ser, u_int v, int pc, int psr, 774 struct trapframe *tf) 775{ 776#if defined(SUN4) || defined(SUN4C) 777 struct proc *p; 778 struct lwp *l; 779 struct pcb *pcb; 780 struct vmspace *vm; 781 vaddr_t va; 782 int rv; 783 vm_prot_t atype; 784 vaddr_t onfault; 785 u_quad_t sticks; 786 char bits[64]; 787 ksiginfo_t ksi; 788 789 curcpu()->ci_data.cpu_ntrap++; 790 l = curlwp; 791 p = l->l_proc; 792 pcb = lwp_getpcb(l); 793 onfault = (vaddr_t)pcb->pcb_onfault; 794 795 LWP_CACHE_CREDS(l, p); 796 sticks = p->p_sticks; 797 798#ifdef FPU_DEBUG 799 if ((tf->tf_psr & PSR_EF) != 0) { 800 if (cpuinfo.fplwp != l) 801 panic("FPU enabled but wrong proc (1) [l=%p, fwlp=%p]", 802 l, cpuinfo.fplwp); 803 savefpstate(l->l_md.md_fpstate); 804 l->l_md.md_fpu = NULL; 805 cpuinfo.fplwp = NULL; 806 tf->tf_psr &= ~PSR_EF; 807 setpsr(getpsr() & ~PSR_EF); 808 } 809#endif 810 811 /* 812 * Figure out what to pass the VM code, and ignore the sva register 813 * value in v on text faults (text faults are always at pc). 814 * Kernel faults are somewhat different: text faults are always 815 * illegal, and data faults are extra complex. User faults must 816 * set p->p_md.md_tf, in case we decide to deliver a signal. Check 817 * for illegal virtual addresses early since those can induce more 818 * faults. 819 */ 820 if (type == T_TEXTFAULT) 821 v = pc; 822 if (VA_INHOLE(v)) { 823 rv = EACCES; 824 goto fault; 825 } 826 atype = ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ; 827 if ((ser & SER_PROT) && atype == VM_PROT_READ && type != T_TEXTFAULT) { 828 829 /* 830 * The hardware reports faults by the atomic load/store 831 * instructions as read faults, so if the faulting instruction 832 * is one of those, relabel this fault as both read and write. 833 */ 834 if ((fuword((void *)pc) & 0xc1680000) == 0xc0680000) { 835 atype = VM_PROT_READ | VM_PROT_WRITE; 836 } 837 } 838 va = trunc_page(v); 839 if (psr & PSR_PS) { 840 extern char Lfsbail[]; 841 842 if (type == T_TEXTFAULT) { 843 (void) splhigh(); 844 snprintb(bits, sizeof(bits), SER_BITS, ser); 845 printf("cpu%d: text fault: pc=0x%x ser=%s\n", 846 cpu_number(), pc, bits); 847 panic("kernel fault"); 848 /* NOTREACHED */ 849 } 850 /* 851 * If this was an access that we shouldn't try to page in, 852 * resume at the fault handler without any action. 853 */ 854 if (onfault == (vaddr_t)Lfsbail) { 855 rv = EFAULT; 856 goto kfault; 857 } 858 859 /* 860 * During autoconfiguration, faults are never OK unless 861 * pcb_onfault is set. Once running normally we must allow 862 * exec() to cause copy-on-write faults to kernel addresses. 863 */ 864 if (cold) { 865 rv = EFAULT; 866 goto kfault; 867 } 868 if (va >= KERNBASE) { 869 rv = mmu_pagein(pmap_kernel(), va, atype); 870 if (rv < 0) { 871 rv = EACCES; 872 goto kfault; 873 } 874 if (rv > 0) 875 return; 876 pcb->pcb_onfault = NULL; 877 rv = uvm_fault(kernel_map, va, atype); 878 pcb->pcb_onfault = (void *)onfault; 879 if (rv == 0) 880 return; 881 goto kfault; 882 } 883 } else { 884 l->l_md.md_tf = tf; 885 /* 886 * WRS: Can drop LP_SA_NOBLOCK test iff can only get 887 * here from a usermode-initiated access. LP_SA_NOBLOCK 888 * should never be set there - it's kernel-only. 889 */ 890 if ((l->l_flag & LW_SA) 891 && (~l->l_pflag & LP_SA_NOBLOCK)) { 892 l->l_savp->savp_faultaddr = (vaddr_t)v; 893 l->l_pflag |= LP_SA_PAGEFAULT; 894 } 895 } 896 897 /* 898 * mmu_pagein returns -1 if the page is already valid, in which 899 * case we have a hard fault; it returns 1 if it loads a segment 900 * that got bumped out via LRU replacement. 901 */ 902 vm = p->p_vmspace; 903 rv = mmu_pagein(vm->vm_map.pmap, va, atype); 904 if (rv < 0) { 905 rv = EACCES; 906 goto fault; 907 } 908 if (rv > 0) 909 goto out; 910 911 /* alas! must call the horrible vm code */ 912 pcb->pcb_onfault = NULL; 913 rv = uvm_fault(&vm->vm_map, (vaddr_t)va, atype); 914 pcb->pcb_onfault = (void *)onfault; 915 916 /* 917 * If this was a stack access we keep track of the maximum 918 * accessed stack size. Also, if vm_fault gets a protection 919 * failure it is due to accessing the stack region outside 920 * the current limit and we need to reflect that as an access 921 * error. 922 */ 923 if ((void *)va >= vm->vm_maxsaddr 924#ifdef COMPAT_SUNOS 925 && !(p->p_emul == &emul_sunos && va < USRSTACK - 926 (vaddr_t)p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur + 927 SUNOS_MAXSADDR_SLOP) 928#endif 929 && rv == 0) 930 uvm_grow(p, va); 931 932 if (rv == 0) { 933 /* 934 * pmap_enter() does not enter all requests made from 935 * vm_fault into the MMU (as that causes unnecessary 936 * entries for `wired' pages). Instead, we call 937 * mmu_pagein here to make sure the new PTE gets installed. 938 */ 939 (void) mmu_pagein(vm->vm_map.pmap, va, VM_PROT_NONE); 940 } else { 941 /* 942 * Pagein failed. If doing copyin/out, return to onfault 943 * address. Any other page fault in kernel, die; if user 944 * fault, deliver SIGSEGV. 945 */ 946fault: 947 if (psr & PSR_PS) { 948kfault: 949 if (!onfault) { 950 (void) splhigh(); 951 snprintb(bits, sizeof(bits), SER_BITS, ser); 952 printf("cpu%d: data fault: pc=0x%x " 953 "addr=0x%x ser=%s\n", 954 cpu_number(), pc, v, bits); 955 panic("kernel fault"); 956 /* NOTREACHED */ 957 } 958 tf->tf_pc = onfault; 959 tf->tf_npc = onfault + 4; 960 tf->tf_out[0] = (rv == EACCES) ? EFAULT : rv; 961 return; 962 } 963 KSI_INIT_TRAP(&ksi); 964 if (rv == ENOMEM) { 965 printf("UVM: pid %d (%s), uid %d killed: out of swap\n", 966 p->p_pid, p->p_comm, 967 l->l_cred ? 968 kauth_cred_geteuid(l->l_cred) : -1); 969 ksi.ksi_signo = SIGKILL; 970 ksi.ksi_code = SI_NOINFO; 971 } else { 972 ksi.ksi_signo = SIGSEGV; 973 ksi.ksi_code = (rv == EACCES 974 ? SEGV_ACCERR : SEGV_MAPERR); 975 } 976 ksi.ksi_trap = type; 977 ksi.ksi_addr = (void *)v; 978 trapsignal(l, &ksi); 979 } 980out: 981 if ((psr & PSR_PS) == 0) { 982 l->l_pflag &= ~LP_SA_PAGEFAULT; 983 userret(l, pc, sticks); 984 share_fpu(l, tf); 985 } 986#endif /* SUN4 || SUN4C */ 987} 988 989#if defined(SUN4M) /* 4m version of mem_access_fault() follows */ 990static int tfaultaddr = (int) 0xdeadbeef; 991 992void 993mem_access_fault4m(unsigned type, u_int sfsr, u_int sfva, struct trapframe *tf) 994{ 995 int pc, psr; 996 struct proc *p; 997 struct lwp *l; 998 struct pcb *pcb; 999 struct vmspace *vm; 1000 vaddr_t va; 1001 int rv; 1002 vm_prot_t atype; 1003 int onfault; 1004 u_quad_t sticks; 1005 char bits[64]; 1006 ksiginfo_t ksi; 1007 1008 curcpu()->ci_data.cpu_ntrap++; 1009 1010 l = curlwp; 1011 p = l->l_proc; 1012 LWP_CACHE_CREDS(l, p); 1013 sticks = p->p_sticks; 1014 pcb = lwp_getpcb(l); 1015 onfault = (vaddr_t)pcb->pcb_onfault; 1016 1017#ifdef FPU_DEBUG 1018 if ((tf->tf_psr & PSR_EF) != 0) { 1019 if (cpuinfo.fplwp != l) 1020 panic("FPU enabled but wrong proc (2) [l=%p, fwlp=%p]", 1021 l, cpuinfo.fplwp); 1022 savefpstate(l->l_md.md_fpstate); 1023 l->l_md.md_fpu = NULL; 1024 cpuinfo.fplwp = NULL; 1025 tf->tf_psr &= ~PSR_EF; 1026 setpsr(getpsr() & ~PSR_EF); 1027 } 1028#endif 1029 1030 pc = tf->tf_pc; /* These are needed below */ 1031 psr = tf->tf_psr; 1032 1033#if /*DIAGNOSTICS*/1 1034 if (type == T_DATAERROR || type == T_TEXTERROR) 1035 printf("%s[%d]: trap 0x%x: pc=0x%x sfsr=0x%x sfva=0x%x\n", 1036 p->p_comm, p->p_pid, type, pc, sfsr, sfva); 1037#endif 1038 1039 /* 1040 * Our first priority is handling serious faults, such as 1041 * parity errors or async faults that might have come through here. 1042 * If afsr & AFSR_AFO != 0, then we're on a HyperSPARC and we 1043 * got an async fault. We pass it on to memerr4m. Similarly, if 1044 * the trap was T_STOREBUFFAULT, we pass it on to memerr4m. 1045 * If we have a data fault, but SFSR_FAV is not set in the sfsr, 1046 * then things are really bizarre, and we treat it as a hard 1047 * error and pass it on to memerr4m. See section 8.12.4 in the 1048 * SuperSPARC user's guide for more info, and for a possible 1049 * solution which we don't implement here. 1050 * Note: store buffer faults may also lead to a level 15 interrupt 1051 * being posted to the module (see sun4m system architecture, 1052 * section B.I.9). 1053 */ 1054 if (type == T_STOREBUFFAULT || 1055 (type == T_DATAFAULT && (sfsr & SFSR_FAV) == 0)) { 1056 (*cpuinfo.memerr)(type, sfsr, sfva, tf); 1057 /* 1058 * If we get here, exit the trap handler and wait for the 1059 * trap to re-occur. 1060 */ 1061 goto out_nounlock; 1062 } 1063 1064 /* 1065 * Figure out what to pass the VM code. We cannot ignore the sfva 1066 * register on text faults, since this might be a trap on an 1067 * alternate-ASI access to code space. However, if we're on a 1068 * supersparc, we can't help using PC, since we don't get a VA in 1069 * sfva. 1070 * Kernel faults are somewhat different: text faults are always 1071 * illegal, and data faults are extra complex. User faults must 1072 * set p->p_md.md_tf, in case we decide to deliver a signal. Check 1073 * for illegal virtual addresses early since those can induce more 1074 * faults. 1075 * All translation faults are illegal, and result in a SIGSEGV 1076 * being delivered to the running process (or a kernel panic, for 1077 * a kernel fault). We check the translation first to make sure 1078 * it is not spurious. 1079 * Also, note that in the case where we have an overwritten 1080 * text fault (OW==1, AT==2,3), we attempt to service the 1081 * second (overwriting) fault, then restart the instruction 1082 * (which is from the first fault) and allow the first trap 1083 * to reappear. XXX is this right? It will probably change... 1084 */ 1085 if ((sfsr & SFSR_FT) == SFSR_FT_NONE) 1086 goto out; /* No fault. Why were we called? */ 1087 1088 /* 1089 * NOTE: the per-CPU fault status register readers (in locore) 1090 * may already have decided to pass `pc' in `sfva', so we avoid 1091 * testing CPU types here. 1092 * Q: test SFSR_FAV in the locore stubs too? 1093 */ 1094 if ((sfsr & SFSR_FAV) == 0) { 1095 /* note: T_TEXTERROR == T_TEXTFAULT | 0x20 */ 1096 if ((type & ~0x20) == T_TEXTFAULT) 1097 sfva = pc; 1098 else { 1099 rv = EACCES; 1100 goto fault; 1101 } 1102 } 1103 1104 if ((sfsr & SFSR_FT) == SFSR_FT_TRANSERR) { 1105 /* 1106 * Translation errors are always fatal, as they indicate 1107 * a corrupt translation (page) table hierarchy. 1108 */ 1109 rv = EACCES; 1110 1111 /* XXXSMP - why bother with this anyway? */ 1112 if (tfaultaddr == sfva) /* Prevent infinite loops w/a static */ 1113 goto fault; 1114 tfaultaddr = sfva; 1115 if ((lda((sfva & 0xFFFFF000) | ASI_SRMMUFP_LN, ASI_SRMMUFP) & 1116 SRMMU_TETYPE) != SRMMU_TEPTE) 1117 goto fault; /* Translation bad */ 1118 lda(SRMMU_SFSR, ASI_SRMMU); 1119#ifdef DEBUG 1120 printf("mem_access_fault4m: SFSR_FT_TRANSERR: " 1121 "pid %d, va 0x%x: retrying\n", p->p_pid, sfva); 1122#endif 1123 goto out; /* Translation OK, retry operation */ 1124 } 1125 1126 va = trunc_page(sfva); 1127 1128 if (((sfsr & SFSR_AT_TEXT) || type == T_TEXTFAULT) && 1129 !(sfsr & SFSR_AT_STORE) && (sfsr & SFSR_OW)) { 1130 if (psr & PSR_PS) { /* never allow in kernel */ 1131 rv = EFAULT; 1132 goto kfault; 1133 } 1134#if 0 1135 /* 1136 * Double text fault. The evil "case 5" from the HS manual... 1137 * Attempt to handle early fault. Ignores ASI 8,9 issue...may 1138 * do a useless VM read. 1139 * XXX: Is this really necessary? 1140 * XXX: If it's necessary, add SA_PAGEFAULT handling 1141 */ 1142 if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) { 1143 /* On HS, we have va for both */ 1144 vm = p->p_vmspace; 1145 pcb->pcb_onfault = NULL; 1146 rv = uvm_fault(&vm->vm_map, trunc_page(pc), 1147 VM_PROT_READ); 1148 pcb->pcb_onfault = onfault; 1149 if (rv != 0) 1150#ifdef DEBUG 1151 printf("mem_access_fault: " 1152 "can't pagein 1st text fault.\n") 1153#endif 1154 ; 1155 } 1156#endif 1157 } 1158 1159 /* Now munch on protections... */ 1160 if (sfsr & SFSR_AT_STORE) { 1161 /* stores are never text faults. */ 1162 atype = VM_PROT_WRITE; 1163 } else { 1164 if ((sfsr & SFSR_AT_TEXT) || (type & ~0x20) == T_TEXTFAULT) { 1165 atype = VM_PROT_EXECUTE; 1166 } else { 1167 atype = VM_PROT_READ; 1168 } 1169 } 1170 1171 if (psr & PSR_PS) { 1172 extern char Lfsbail[]; 1173 if (sfsr & SFSR_AT_TEXT || type == T_TEXTFAULT) { 1174 (void) splhigh(); 1175 snprintb(bits, sizeof(bits), SFSR_BITS, sfsr); 1176 printf("cpu%d text fault: pc=0x%x sfsr=%s sfva=0x%x\n", 1177 cpu_number(), pc, bits, sfva); 1178 panic("kernel fault"); 1179 /* NOTREACHED */ 1180 } 1181 /* 1182 * If this was an access that we shouldn't try to page in, 1183 * resume at the fault handler without any action. 1184 */ 1185 if (onfault == (vaddr_t)Lfsbail) { 1186 rv = EFAULT; 1187 goto kfault; 1188 } 1189 1190 /* 1191 * During autoconfiguration, faults are never OK unless 1192 * pcb_onfault is set. Once running normally we must allow 1193 * exec() to cause copy-on-write faults to kernel addresses. 1194 */ 1195 if (cold) { 1196 rv = EFAULT; 1197 goto kfault; 1198 } 1199 if (va >= KERNBASE) { 1200 pcb->pcb_onfault = NULL; 1201 rv = uvm_fault(kernel_map, va, atype); 1202 pcb->pcb_onfault = (void *)onfault; 1203 if (rv == 0) { 1204 return; 1205 } 1206 goto kfault; 1207 } 1208 } else { 1209 l->l_md.md_tf = tf; 1210 /* 1211 * WRS: Can drop LP_SA_NOBLOCK test iff can only get 1212 * here from a usermode-initiated access. LP_SA_NOBLOCK 1213 * should never be set there - it's kernel-only. 1214 */ 1215 if ((l->l_flag & LW_SA) 1216 && (~l->l_pflag & LP_SA_NOBLOCK)) { 1217 l->l_savp->savp_faultaddr = (vaddr_t)sfva; 1218 l->l_pflag |= LP_SA_PAGEFAULT; 1219 } 1220 } 1221 1222 vm = p->p_vmspace; 1223 1224 /* alas! must call the horrible vm code */ 1225 pcb->pcb_onfault = NULL; 1226 rv = uvm_fault(&vm->vm_map, (vaddr_t)va, atype); 1227 pcb->pcb_onfault = (void *)onfault; 1228 1229 /* 1230 * If this was a stack access we keep track of the maximum 1231 * accessed stack size. Also, if vm_fault gets a protection 1232 * failure it is due to accessing the stack region outside 1233 * the current limit and we need to reflect that as an access 1234 * error. 1235 */ 1236 if (rv == 0 && (void *)va >= vm->vm_maxsaddr) 1237 uvm_grow(p, va); 1238 if (rv != 0) { 1239 /* 1240 * Pagein failed. If doing copyin/out, return to onfault 1241 * address. Any other page fault in kernel, die; if user 1242 * fault, deliver SIGSEGV. 1243 */ 1244fault: 1245 if (psr & PSR_PS) { 1246kfault: 1247 if (!onfault) { 1248 (void) splhigh(); 1249 snprintb(bits, sizeof(bits), SFSR_BITS, sfsr); 1250 printf("cpu%d: data fault: pc=0x%x " 1251 "addr=0x%x sfsr=%s\n", 1252 cpu_number(), pc, sfva, bits); 1253 panic("kernel fault"); 1254 /* NOTREACHED */ 1255 } 1256 tf->tf_pc = onfault; 1257 tf->tf_npc = onfault + 4; 1258 tf->tf_out[0] = (rv == EACCES) ? EFAULT : rv; 1259 return; 1260 } 1261 KSI_INIT_TRAP(&ksi); 1262 if (rv == ENOMEM) { 1263 printf("UVM: pid %d (%s), uid %d killed: out of swap\n", 1264 p->p_pid, p->p_comm, 1265 l->l_cred ? 1266 kauth_cred_geteuid(l->l_cred) : -1); 1267 ksi.ksi_signo = SIGKILL; 1268 ksi.ksi_code = SI_NOINFO; 1269 } else { 1270 ksi.ksi_signo = SIGSEGV; 1271 ksi.ksi_code = (rv == EACCES) 1272 ? SEGV_ACCERR : SEGV_MAPERR; 1273 } 1274 ksi.ksi_trap = type; 1275 ksi.ksi_addr = (void *)sfva; 1276 trapsignal(l, &ksi); 1277 } 1278out: 1279 if ((psr & PSR_PS) == 0) { 1280 l->l_pflag &= ~LP_SA_PAGEFAULT; 1281out_nounlock: 1282 userret(l, pc, sticks); 1283 share_fpu(l, tf); 1284 } 1285} 1286#endif /* SUN4M */ 1287 1288/* 1289 * XXX This is a terrible name. 1290 */ 1291void 1292upcallret(struct lwp *l) 1293{ 1294 1295 KERNEL_UNLOCK_LAST(l); 1296 userret(l, l->l_md.md_tf->tf_pc, 0); 1297} 1298 1299/* 1300 * Start a new LWP 1301 */ 1302void 1303startlwp(void *arg) 1304{ 1305 ucontext_t *uc = arg; 1306 lwp_t *l = curlwp; 1307 int error; 1308 1309 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags); 1310 KASSERT(error == 0); 1311 1312 kmem_free(uc, sizeof(ucontext_t)); 1313 userret(l, l->l_md.md_tf->tf_pc, 0); 1314} 1315 1316