trap.c revision 1.93
1/* $NetBSD: trap.c,v 1.93 2022/09/11 09:03:25 rin Exp $ */ 2 3/* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38/* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69#define __UFETCHSTORE_PRIVATE 70 71#include <sys/cdefs.h> 72__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.93 2022/09/11 09:03:25 rin Exp $"); 73 74#ifdef _KERNEL_OPT 75#include "opt_ddb.h" 76#include "opt_kgdb.h" 77#include "opt_ppcarch.h" 78#include "opt_ppcopts.h" 79#endif 80 81#include <sys/param.h> 82#include <sys/cpu.h> 83#include <sys/kauth.h> 84#include <sys/proc.h> 85#include <sys/ptrace.h> 86#include <sys/reboot.h> 87#include <sys/syscall.h> 88#include <sys/systm.h> 89 90#if defined(KGDB) 91#include <sys/kgdb.h> 92#endif 93 94#include <uvm/uvm_extern.h> 95 96#include <dev/cons.h> 97 98#include <machine/fpu.h> 99#include <machine/frame.h> 100#include <machine/pcb.h> 101#include <machine/psl.h> 102#include <machine/trap.h> 103 104#include <powerpc/db_machdep.h> 105#include <powerpc/spr.h> 106#include <powerpc/userret.h> 107 108#include <powerpc/ibm4xx/cpu.h> 109#include <powerpc/ibm4xx/pmap.h> 110#include <powerpc/ibm4xx/spr.h> 111#include <powerpc/ibm4xx/tlb.h> 112 113#include <powerpc/fpu/fpu_extern.h> 114 115/* These definitions should probably be somewhere else XXX */ 116#define FIRSTARG 3 /* first argument is in reg 3 */ 117#define NARGREG 8 /* 8 args are in registers */ 118#define MOREARGS(sp) ((void *)((int)(sp) + 8)) /* more args go here */ 119 120void trap(struct trapframe *); /* Called from locore / trap_subr */ 121#if 0 122/* Not currently used nor exposed externally in any header file */ 123int badaddr(void *, size_t); 124int badaddr_read(void *, size_t, int *); 125#endif 126int ctx_setup(int, int); 127 128#ifndef PPC_NO_UNALIGNED 129static bool fix_unaligned(struct trapframe *, ksiginfo_t *); 130#endif 131 132#ifdef DEBUG 133#define TDB_ALL 0x1 134int trapdebug = /* TDB_ALL */ 0; 135#define DBPRINTF(x, y) if (trapdebug & (x)) printf y 136#else 137#define DBPRINTF(x, y) 138#endif 139 140void 141trap(struct trapframe *tf) 142{ 143 struct lwp *l = curlwp; 144 struct proc *p = l->l_proc; 145 struct pcb *pcb; 146 int type = tf->tf_exc; 147 int ftype, rv; 148 ksiginfo_t ksi; 149 150 KASSERT(l->l_stat == LSONPROC); 151 152 if (tf->tf_srr1 & PSL_PR) { 153 LWP_CACHE_CREDS(l, p); 154 type |= EXC_USER; 155 } 156 157 ftype = VM_PROT_READ; 158 159 DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n", 160 type, tf->tf_srr0, tf, &tf)); 161 162 switch (type) { 163 case EXC_DEBUG|EXC_USER: 164 /* We don't use hardware breakpoints for userland. */ 165 goto brain_damage; 166 167 case EXC_TRC|EXC_USER: 168 KSI_INIT_TRAP(&ksi); 169 ksi.ksi_signo = SIGTRAP; 170 ksi.ksi_trap = EXC_TRC; 171 ksi.ksi_addr = (void *)tf->tf_srr0; 172 trapsignal(l, &ksi); 173 break; 174 175 case EXC_DSI: 176 /* FALLTHROUGH */ 177 case EXC_DTMISS: 178 { 179 struct vm_map *map; 180 vaddr_t va; 181 struct faultbuf *fb; 182 183 pcb = lwp_getpcb(l); 184 fb = pcb->pcb_onfault; 185 186 if (curcpu()->ci_idepth >= 0) { 187 rv = EFAULT; 188 goto out; 189 } 190 191 va = tf->tf_dear; 192 if (tf->tf_pid == KERNEL_PID) { 193 map = kernel_map; 194 } else { 195 map = &p->p_vmspace->vm_map; 196 } 197 198 if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 199 ftype = VM_PROT_WRITE; 200 201 DBPRINTF(TDB_ALL, 202 ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n", 203 tf->tf_srr0, 204 (ftype & VM_PROT_WRITE) ? "write" : "read", 205 (void *)va, tf->tf_esr)); 206 207 pcb->pcb_onfault = NULL; 208 rv = uvm_fault(map, trunc_page(va), ftype); 209 pcb->pcb_onfault = fb; 210 if (rv == 0) 211 return; 212out: 213 if (fb != NULL) { 214 tf->tf_pid = KERNEL_PID; 215 tf->tf_srr0 = fb->fb_pc; 216 tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 217 tf->tf_cr = fb->fb_cr; 218 tf->tf_fixreg[1] = fb->fb_sp; 219 tf->tf_fixreg[2] = fb->fb_r2; 220 tf->tf_fixreg[3] = rv; 221 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 222 sizeof(fb->fb_fixreg)); 223 return; 224 } 225 } 226 goto brain_damage; 227 228 case EXC_DSI|EXC_USER: 229 /* FALLTHROUGH */ 230 case EXC_DTMISS|EXC_USER: 231 if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 232 ftype = VM_PROT_WRITE; 233 234 DBPRINTF(TDB_ALL, 235 ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n", 236 tf->tf_srr0, (ftype & VM_PROT_WRITE) ? "write" : "read", 237 tf->tf_dear, tf->tf_esr)); 238 KASSERT(l == curlwp && (l->l_stat == LSONPROC)); 239// KASSERT(curpcb->pcb_onfault == NULL); 240 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_dear), 241 ftype); 242 if (rv == 0) { 243 break; 244 } 245 KSI_INIT_TRAP(&ksi); 246 ksi.ksi_trap = EXC_DSI; 247 ksi.ksi_addr = (void *)tf->tf_dear; 248vm_signal: 249 switch (rv) { 250 case EINVAL: 251 ksi.ksi_signo = SIGBUS; 252 ksi.ksi_code = BUS_ADRERR; 253 break; 254 case EACCES: 255 ksi.ksi_signo = SIGSEGV; 256 ksi.ksi_code = SEGV_ACCERR; 257 break; 258 case ENOMEM: 259 ksi.ksi_signo = SIGKILL; 260 printf("UVM: pid %d.%d (%s), uid %d killed: " 261 "out of swap\n", p->p_pid, l->l_lid, p->p_comm, 262 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 263 break; 264 default: 265 ksi.ksi_signo = SIGSEGV; 266 ksi.ksi_code = SEGV_MAPERR; 267 break; 268 } 269 trapsignal(l, &ksi); 270 break; 271 272 case EXC_ITMISS|EXC_USER: 273 case EXC_ISI|EXC_USER: 274 ftype = VM_PROT_EXECUTE; 275 DBPRINTF(TDB_ALL, 276 ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n", 277 tf->tf_srr0, tf)); 278// KASSERT(curpcb->pcb_onfault == NULL); 279 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_srr0), 280 ftype); 281 if (rv == 0) { 282 break; 283 } 284isi: 285 KSI_INIT_TRAP(&ksi); 286 ksi.ksi_trap = EXC_ISI; 287 ksi.ksi_addr = (void *)tf->tf_srr0; 288 goto vm_signal; 289 break; 290 291 case EXC_AST|EXC_USER: 292 cpu_ast(l, curcpu()); 293 break; 294 295 case EXC_ALI|EXC_USER: 296 if (fix_unaligned(tf, &ksi)) 297 trapsignal(l, &ksi); 298 break; 299 300 case EXC_PGM|EXC_USER: 301 curcpu()->ci_data.cpu_ntrap++; 302 303 KSI_INIT_TRAP(&ksi); 304 ksi.ksi_trap = EXC_PGM; 305 ksi.ksi_addr = (void *)tf->tf_srr0; 306 307 if (tf->tf_esr & ESR_PTR) { 308 vaddr_t va; 309sigtrap: 310 va = (vaddr_t)tf->tf_srr0; 311 /* 312 * Restore original instruction and clear BP. 313 */ 314 if (p->p_md.md_ss_addr[0] == va || 315 p->p_md.md_ss_addr[1] == va) { 316 rv = ppc_sstep(l, 0); 317 if (rv != 0) 318 goto vm_signal; 319 ksi.ksi_code = TRAP_TRACE; 320 } else 321 ksi.ksi_code = TRAP_BRKPT; 322 if (p->p_raslist != NULL && 323 ras_lookup(p, (void *)va) != (void *)-1) { 324 tf->tf_srr0 += (ksi.ksi_code == TRAP_TRACE) ? 325 0 : 4; 326 break; 327 } 328 ksi.ksi_signo = SIGTRAP; 329 } else if (tf->tf_esr & ESR_PPR) { 330 uint32_t opcode; 331 332 rv = copyin((void *)tf->tf_srr0, &opcode, 333 sizeof(opcode)); 334 if (rv) 335 goto isi; 336 if (emulate_mxmsr(l, tf, opcode)) { 337 tf->tf_srr0 += 4; 338 break; 339 } 340 341 ksi.ksi_code = ILL_PRVOPC; 342 ksi.ksi_signo = SIGILL; 343 } else { 344 pcb = lwp_getpcb(l); 345 346 if (__predict_false(!fpu_used_p(l))) { 347 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu)); 348 fpu_mark_used(l); 349 } 350 351 if (fpu_emulate(tf, &pcb->pcb_fpu, &ksi)) { 352 if (ksi.ksi_signo == 0) /* was emulated */ 353 break; 354 else if (ksi.ksi_signo == SIGTRAP) 355 goto sigtrap; /* XXX H/W bug? */ 356 } else { 357 ksi.ksi_code = ILL_ILLOPC; 358 ksi.ksi_signo = SIGILL; 359 } 360 } 361 362 trapsignal(l, &ksi); 363 break; 364 365 case EXC_MCHK: 366 { 367 struct faultbuf *fb; 368 369 pcb = lwp_getpcb(l); 370 if ((fb = pcb->pcb_onfault) != NULL) { 371 tf->tf_pid = KERNEL_PID; 372 tf->tf_srr0 = fb->fb_pc; 373 tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 374 tf->tf_fixreg[1] = fb->fb_sp; 375 tf->tf_fixreg[2] = fb->fb_r2; 376 tf->tf_fixreg[3] = 1; /* Return TRUE */ 377 tf->tf_cr = fb->fb_cr; 378 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 379 sizeof(fb->fb_fixreg)); 380 return; 381 } 382 } 383 goto brain_damage; 384 385 default: 386brain_damage: 387 printf("trap type 0x%x at 0x%lx\n", type, tf->tf_srr0); 388#if defined(DDB) || defined(KGDB) 389 if (kdb_trap(type, tf)) 390 return; 391#endif 392#ifdef TRAP_PANICWAIT 393 printf("Press a key to panic.\n"); 394 cngetc(); 395#endif 396 panic("trap"); 397 } 398 399 /* Invoke powerpc userret code */ 400 userret(l, tf); 401} 402 403int 404ctx_setup(int ctx, int srr1) 405{ 406 volatile struct pmap *pm; 407 408 /* Update PID if we're returning to user mode. */ 409 if (srr1 & PSL_PR) { 410 pm = curproc->p_vmspace->vm_map.pmap; 411 if (!pm->pm_ctx) { 412 ctx_alloc(__UNVOLATILE(pm)); 413 } 414 ctx = pm->pm_ctx; 415 } 416 else if (!ctx) { 417 ctx = KERNEL_PID; 418 } 419 return (ctx); 420} 421 422/* 423 * Used by copyin()/copyout() 424 */ 425extern vaddr_t vmaprange(struct proc *, vaddr_t, vsize_t, int); 426extern void vunmaprange(vaddr_t, vsize_t); 427static int bigcopyin(const void *, void *, size_t ); 428static int bigcopyout(const void *, void *, size_t ); 429 430int 431copyin(const void *uaddr, void *kaddr, size_t len) 432{ 433 struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 434 int rv, msr, pid, tmp, ctx, count = 0; 435 struct faultbuf env; 436 437 /* For bigger buffers use the faster copy */ 438 if (len > 1024) 439 return (bigcopyin(uaddr, kaddr, len)); 440 441 if ((rv = setfault(&env))) { 442 curpcb->pcb_onfault = NULL; 443 return rv; 444 } 445 446 if (!(ctx = pm->pm_ctx)) { 447 /* No context -- assign it one */ 448 ctx_alloc(pm); 449 ctx = pm->pm_ctx; 450 } 451 452 __asm volatile( 453 "mfmsr %[msr];" /* Save MSR */ 454 "li %[tmp],0x20;" /* Disable IMMU */ 455 "andc %[tmp],%[msr],%[tmp];" 456 "mtmsr %[tmp];" 457 "isync;" 458 "mfpid %[pid];" /* Save old PID */ 459 460 "srwi. %[count],%[len],0x2;" /* How many words? */ 461 "beq- 2f;" /* No words. Go do bytes */ 462 "mtctr %[count];" 463 464 "1:" "mtpid %[ctx];" 465 "isync;" 466#ifdef PPC_IBM403 467 "lswi %[tmp],%[uaddr],4;" /* Load user word */ 468#else 469 "lwz %[tmp],0(%[uaddr]);" 470#endif 471 "addi %[uaddr],%[uaddr],0x4;" /* next uaddr word */ 472 "sync;" 473 474 "mtpid %[pid];" 475 "isync;" 476#ifdef PPC_IBM403 477 "stswi %[tmp],%[kaddr],4;" /* Store kernel word */ 478#else 479 "stw %[tmp],0(%[kaddr]);" 480#endif 481 "dcbst 0,%[kaddr];" /* flush cache */ 482 "addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */ 483 "sync;" 484 "bdnz 1b;" /* repeat */ 485 486 "2:" "andi. %[count],%[len],0x3;" /* How many remaining bytes? */ 487 "beq 10f;" 488 "addi %[count],%[count],0x1;" 489 "mtctr %[count];" 490 "3:" "bdz 10f;" /* while count */ 491 492 "mtpid %[ctx];" 493 "isync;" 494 "lbz %[tmp],0(%[uaddr]);" /* Load user byte */ 495 "addi %[uaddr],%[uaddr],0x1;" /* next uaddr byte */ 496 "sync;" 497 498 "mtpid %[pid];" 499 "isync;" 500 "stb %[tmp],0(%[kaddr]);" /* Store kernel byte */ 501 "dcbst 0,%[kaddr];" /* flush cache */ 502 "addi %[kaddr],%[kaddr],0x1;" 503 "sync;" 504 "b 3b;" 505 506 "10:" "mtpid %[pid];" /* Restore PID and MSR */ 507 "mtmsr %[msr];" 508 "isync;" 509 510 : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 511 : [uaddr] "b" (uaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr), 512 [len] "b" (len), [count] "b" (count) 513 : "cr0", "ctr"); 514 515 curpcb->pcb_onfault = NULL; 516 return 0; 517} 518 519static int 520bigcopyin(const void *uaddr, void *kaddr, size_t len) 521{ 522 const char *up; 523 char *kp = kaddr; 524 struct lwp *l = curlwp; 525 struct proc *p; 526 struct faultbuf env; 527 int error; 528 529 p = l->l_proc; 530 531 /* 532 * Stolen from physio(): 533 */ 534 error = uvm_vslock(p->p_vmspace, __UNCONST(uaddr), len, VM_PROT_READ); 535 if (error) { 536 return error; 537 } 538 up = (char *)vmaprange(p, (vaddr_t)uaddr, len, VM_PROT_READ); 539 540 if ((error = setfault(&env)) == 0) { 541 memcpy(kp, up, len); 542 } 543 544 curpcb->pcb_onfault = NULL; 545 vunmaprange((vaddr_t)up, len); 546 uvm_vsunlock(p->p_vmspace, __UNCONST(uaddr), len); 547 548 return error; 549} 550 551int 552copyout(const void *kaddr, void *uaddr, size_t len) 553{ 554 struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 555 int rv, msr, pid, tmp, ctx, count = 0; 556 struct faultbuf env; 557 558 /* For big copies use more efficient routine */ 559 if (len > 1024) 560 return (bigcopyout(kaddr, uaddr, len)); 561 562 if ((rv = setfault(&env))) { 563 curpcb->pcb_onfault = NULL; 564 return rv; 565 } 566 567 if (!(ctx = pm->pm_ctx)) { 568 /* No context -- assign it one */ 569 ctx_alloc(pm); 570 ctx = pm->pm_ctx; 571 } 572 573 __asm volatile( 574 "mfmsr %[msr];" /* Save MSR */ 575 "li %[tmp],0x20;" /* Disable IMMU */ 576 "andc %[tmp],%[msr],%[tmp];" 577 "mtmsr %[tmp];" 578 "isync;" 579 "mfpid %[pid];" /* Save old PID */ 580 581 "srwi. %[count],%[len],0x2;" /* How many words? */ 582 "beq- 2f;" /* No words. Go do bytes */ 583 "mtctr %[count];" 584 585 "1:" "mtpid %[pid];" 586 "isync;" 587#ifdef PPC_IBM403 588 "lswi %[tmp],%[kaddr],4;" /* Load kernel word */ 589#else 590 "lwz %[tmp],0(%[kaddr]);" 591#endif 592 "addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */ 593 "sync;" 594 595 "mtpid %[ctx];" 596 "isync;" 597#ifdef PPC_IBM403 598 "stswi %[tmp],%[uaddr],4;" /* Store user word */ 599#else 600 "stw %[tmp],0(%[uaddr]);" 601#endif 602 "dcbst 0,%[uaddr];" /* flush cache */ 603 "addi %[uaddr],%[uaddr],0x4;" /* next uaddr word */ 604 "sync;" 605 "bdnz 1b;" /* repeat */ 606 607 "2:" "andi. %[count],%[len],0x3;" /* How many remaining bytes? */ 608 "beq 10f;" 609 "addi %[count],%[count],0x1;" 610 "mtctr %[count];" 611 "3:" "bdz 10f;" /* while count */ 612 613 "mtpid %[pid];" 614 "isync;" 615 "lbz %[tmp],0(%[kaddr]);" /* Load kernel byte */ 616 "addi %[kaddr],%[kaddr],0x1;" /* next kaddr byte */ 617 "sync;" 618 619 "mtpid %[ctx];" 620 "isync;" 621 "stb %[tmp],0(%[uaddr]);" /* Store user byte */ 622 "dcbst 0,%[uaddr];" /* flush cache */ 623 "addi %[uaddr],%[uaddr],0x1;" 624 "sync;" 625 "b 3b;" 626 627 "10:" "mtpid %[pid];" /* Restore PID and MSR */ 628 "mtmsr %[msr];" 629 "isync;" 630 631 : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 632 : [uaddr] "b" (uaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr), 633 [len] "b" (len), [count] "b" (count) 634 : "cr0", "ctr"); 635 636 curpcb->pcb_onfault = NULL; 637 return 0; 638} 639 640static int 641bigcopyout(const void *kaddr, void *uaddr, size_t len) 642{ 643 char *up; 644 const char *kp = (const char *)kaddr; 645 struct lwp *l = curlwp; 646 struct proc *p; 647 struct faultbuf env; 648 int error; 649 650 p = l->l_proc; 651 652 /* 653 * Stolen from physio(): 654 */ 655 error = uvm_vslock(p->p_vmspace, uaddr, len, VM_PROT_WRITE); 656 if (error) { 657 return error; 658 } 659 up = (char *)vmaprange(p, (vaddr_t)uaddr, len, 660 VM_PROT_READ | VM_PROT_WRITE); 661 662 if ((error = setfault(&env)) == 0) { 663 memcpy(up, kp, len); 664 } 665 666 curpcb->pcb_onfault = NULL; 667 vunmaprange((vaddr_t)up, len); 668 uvm_vsunlock(p->p_vmspace, uaddr, len); 669 670 return error; 671} 672 673/* 674 * kcopy(const void *src, void *dst, size_t len); 675 * 676 * Copy len bytes from src to dst, aborting if we encounter a fatal 677 * page fault. 678 * 679 * kcopy() _must_ save and restore the old fault handler since it is 680 * called by uiomove(), which may be in the path of servicing a non-fatal 681 * page fault. 682 */ 683int 684kcopy(const void *src, void *dst, size_t len) 685{ 686 struct faultbuf env, *oldfault; 687 int rv; 688 689 oldfault = curpcb->pcb_onfault; 690 if ((rv = setfault(&env))) { 691 curpcb->pcb_onfault = oldfault; 692 return rv; 693 } 694 695 memcpy(dst, src, len); 696 697 curpcb->pcb_onfault = oldfault; 698 return 0; 699} 700 701#if 0 702int 703badaddr(void *addr, size_t size) 704{ 705 706 return badaddr_read(addr, size, NULL); 707} 708 709int 710badaddr_read(void *addr, size_t size, int *rptr) 711{ 712 struct faultbuf env; 713 int x; 714 715 /* Get rid of any stale machine checks that have been waiting. */ 716 __asm volatile ("sync; isync"); 717 718 if (setfault(&env)) { 719 curpcb->pcb_onfault = NULL; 720 __asm volatile ("sync"); 721 return 1; 722 } 723 724 __asm volatile ("sync"); 725 726 switch (size) { 727 case 1: 728 x = *(volatile int8_t *)addr; 729 break; 730 case 2: 731 x = *(volatile int16_t *)addr; 732 break; 733 case 4: 734 x = *(volatile int32_t *)addr; 735 break; 736 default: 737 panic("badaddr: invalid size (%d)", size); 738 } 739 740 /* Make sure we took the machine check, if we caused one. */ 741 __asm volatile ("sync; isync"); 742 743 curpcb->pcb_onfault = NULL; 744 __asm volatile ("sync"); /* To be sure. */ 745 746 /* Use the value to avoid reorder. */ 747 if (rptr) 748 *rptr = x; 749 750 return 0; 751} 752#endif 753 754#ifndef PPC_NO_UNALIGNED 755static bool 756fix_unaligned(struct trapframe *tf, ksiginfo_t *ksi) 757{ 758 759 KSI_INIT_TRAP(ksi); 760 ksi->ksi_signo = SIGBUS; 761 ksi->ksi_trap = EXC_ALI; 762 ksi->ksi_addr = (void *)tf->tf_dear; 763 return true; 764} 765#endif 766 767/* 768 * XXX Extremely lame implementations of _ufetch_* / _ustore_*. IBM 4xx 769 * experts should make versions that are good. 770 */ 771 772#define UFETCH(sz) \ 773int \ 774_ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp) \ 775{ \ 776 return copyin(uaddr, valp, sizeof(*valp)); \ 777} 778 779UFETCH(8) 780UFETCH(16) 781UFETCH(32) 782 783#define USTORE(sz) \ 784int \ 785_ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \ 786{ \ 787 return copyout(&val, uaddr, sizeof(val)); \ 788} 789 790USTORE(8) 791USTORE(16) 792USTORE(32) 793