vm_machdep.c revision 232577
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 36 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 37 * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota 38 * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/mips/mips/vm_machdep.c 232577 2012-03-06 03:25:50Z gonzo $"); 43 44#include "opt_compat.h" 45#include "opt_cputype.h" 46#include "opt_ddb.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/malloc.h> 51#include <sys/proc.h> 52#include <sys/syscall.h> 53#include <sys/sysent.h> 54#include <sys/buf.h> 55#include <sys/vnode.h> 56#include <sys/vmmeter.h> 57#include <sys/kernel.h> 58#include <sys/sysctl.h> 59#include <sys/unistd.h> 60 61#include <machine/asm.h> 62#include <machine/cache.h> 63#include <machine/clock.h> 64#include <machine/cpu.h> 65#include <machine/md_var.h> 66#include <machine/pcb.h> 67 68#include <vm/vm.h> 69#include <vm/vm_extern.h> 70#include <vm/pmap.h> 71#include <vm/vm_kern.h> 72#include <vm/vm_map.h> 73#include <vm/vm_page.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_param.h> 76#include <vm/uma.h> 77#include <vm/uma_int.h> 78 79#include <sys/user.h> 80#include <sys/mbuf.h> 81#include <sys/sf_buf.h> 82 83#ifndef NSFBUFS 84#define NSFBUFS (512 + maxusers * 16) 85#endif 86 87#ifndef __mips_n64 88static void sf_buf_init(void *arg); 89SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 90 91/* 92 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the 93 * sf_freelist head with the sf_lock mutex. 94 */ 95static struct { 96 SLIST_HEAD(, sf_buf) sf_head; 97 struct mtx sf_lock; 98} sf_freelist; 99 100static u_int sf_buf_alloc_want; 101#endif 102 103/* 104 * Finish a fork operation, with process p2 nearly set up. 105 * Copy and update the pcb, set up the stack so that the child 106 * ready to run and return to user mode. 107 */ 108void 109cpu_fork(register struct thread *td1,register struct proc *p2, 110 struct thread *td2,int flags) 111{ 112 register struct proc *p1; 113 struct pcb *pcb2; 114 115 p1 = td1->td_proc; 116 if ((flags & RFPROC) == 0) 117 return; 118 /* It is assumed that the vm_thread_alloc called 119 * cpu_thread_alloc() before cpu_fork is called. 120 */ 121 122 /* Point the pcb to the top of the stack */ 123 pcb2 = td2->td_pcb; 124 125 /* Copy p1's pcb, note that in this case 126 * our pcb also includes the td_frame being copied 127 * too. The older mips2 code did an additional copy 128 * of the td_frame, for us that's not needed any 129 * longer (this copy does them both) 130 */ 131 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 132 133 /* Point mdproc and then copy over td1's contents 134 * md_proc is empty for MIPS 135 */ 136 td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED; 137 138 /* 139 * Set up return-value registers as fork() libc stub expects. 140 */ 141 td2->td_frame->v0 = 0; 142 td2->td_frame->v1 = 1; 143 td2->td_frame->a3 = 0; 144 145 if (td1 == PCPU_GET(fpcurthread)) 146 MipsSaveCurFPState(td1); 147 148 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; 149 /* Make sp 64-bit aligned */ 150 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb & 151 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); 152 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; 153 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td2; 154 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td2->td_frame; 155 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & 156 (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); 157 /* 158 * FREEBSD_DEVELOPERS_FIXME: 159 * Setup any other CPU-Specific registers (Not MIPS Standard) 160 * and/or bits in other standard MIPS registers (if CPU-Specific) 161 * that are needed. 162 */ 163 164 td2->td_md.md_tls = td1->td_md.md_tls; 165 td2->td_md.md_saved_intr = MIPS_SR_INT_IE; 166 td2->td_md.md_spinlock_count = 1; 167#ifdef CPU_CNMIPS 168 if (td1->td_md.md_flags & MDTD_COP2USED) { 169 if (td1->td_md.md_cop2owner == COP2_OWNER_USERLAND) { 170 if (td1->td_md.md_ucop2) 171 octeon_cop2_save(td1->td_md.md_ucop2); 172 else 173 panic("cpu_fork: ucop2 is NULL but COP2 is enabled"); 174 } 175 else { 176 if (td1->td_md.md_cop2) 177 octeon_cop2_save(td1->td_md.md_cop2); 178 else 179 panic("cpu_fork: cop2 is NULL but COP2 is enabled"); 180 } 181 } 182 183 if (td1->td_md.md_cop2) { 184 td2->td_md.md_cop2 = octeon_cop2_alloc_ctx(); 185 memcpy(td2->td_md.md_cop2, td1->td_md.md_cop2, 186 sizeof(*td1->td_md.md_cop2)); 187 } 188 if (td1->td_md.md_ucop2) { 189 td2->td_md.md_ucop2 = octeon_cop2_alloc_ctx(); 190 memcpy(td2->td_md.md_ucop2, td1->td_md.md_ucop2, 191 sizeof(*td1->td_md.md_ucop2)); 192 } 193 td2->td_md.md_cop2owner = td1->td_md.md_cop2owner; 194 pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; 195 /* Clear COP2 bits for userland & kernel */ 196 td2->td_frame->sr &= ~MIPS_SR_COP_2_BIT; 197 pcb2->pcb_context[PCB_REG_SR] &= ~MIPS_SR_COP_2_BIT; 198#endif 199} 200 201/* 202 * Intercept the return address from a freshly forked process that has NOT 203 * been scheduled yet. 204 * 205 * This is needed to make kernel threads stay in kernel mode. 206 */ 207void 208cpu_set_fork_handler(struct thread *td, void (*func) __P((void *)), void *arg) 209{ 210 /* 211 * Note that the trap frame follows the args, so the function 212 * is really called like this: func(arg, frame); 213 */ 214 td->td_pcb->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)func; 215 td->td_pcb->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)arg; 216} 217 218void 219cpu_exit(struct thread *td) 220{ 221} 222 223void 224cpu_thread_exit(struct thread *td) 225{ 226 227 if (PCPU_GET(fpcurthread) == td) 228 PCPU_GET(fpcurthread) = (struct thread *)0; 229#ifdef CPU_CNMIPS 230 if (td->td_md.md_cop2) 231 memset(td->td_md.md_cop2, 0, 232 sizeof(*td->td_md.md_cop2)); 233 if (td->td_md.md_ucop2) 234 memset(td->td_md.md_ucop2, 0, 235 sizeof(*td->td_md.md_ucop2)); 236#endif 237} 238 239void 240cpu_thread_free(struct thread *td) 241{ 242#ifdef CPU_CNMIPS 243 if (td->td_md.md_cop2) 244 octeon_cop2_free_ctx(td->td_md.md_cop2); 245 if (td->td_md.md_ucop2) 246 octeon_cop2_free_ctx(td->td_md.md_ucop2); 247 td->td_md.md_cop2 = NULL; 248 td->td_md.md_ucop2 = NULL; 249#endif 250} 251 252void 253cpu_thread_clean(struct thread *td) 254{ 255} 256 257void 258cpu_thread_swapin(struct thread *td) 259{ 260 pt_entry_t *pte; 261 int i; 262 263 /* 264 * The kstack may be at a different physical address now. 265 * Cache the PTEs for the Kernel stack in the machine dependent 266 * part of the thread struct so cpu_switch() can quickly map in 267 * the pcb struct and kernel stack. 268 */ 269 for (i = 0; i < KSTACK_PAGES; i++) { 270 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); 271 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; 272 } 273} 274 275void 276cpu_thread_swapout(struct thread *td) 277{ 278} 279 280void 281cpu_thread_alloc(struct thread *td) 282{ 283 pt_entry_t *pte; 284 int i; 285 286 KASSERT((td->td_kstack & (1 << PAGE_SHIFT)) == 0, ("kernel stack must be aligned.")); 287 td->td_pcb = (struct pcb *)(td->td_kstack + 288 td->td_kstack_pages * PAGE_SIZE) - 1; 289 td->td_frame = &td->td_pcb->pcb_regs; 290 291 for (i = 0; i < KSTACK_PAGES; i++) { 292 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); 293 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; 294 } 295} 296 297void 298cpu_set_syscall_retval(struct thread *td, int error) 299{ 300 struct trapframe *locr0 = td->td_frame; 301 unsigned int code; 302 int quad_syscall; 303 304 code = locr0->v0; 305 quad_syscall = 0; 306#if defined(__mips_n32) || defined(__mips_n64) 307#ifdef COMPAT_FREEBSD32 308 if (code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) 309 quad_syscall = 1; 310#endif 311#else 312 if (code == SYS___syscall) 313 quad_syscall = 1; 314#endif 315 316 if (code == SYS_syscall) 317 code = locr0->a0; 318 else if (code == SYS___syscall) { 319 if (quad_syscall) 320 code = _QUAD_LOWWORD ? locr0->a1 : locr0->a0; 321 else 322 code = locr0->a0; 323 } 324 325 switch (error) { 326 case 0: 327 if (quad_syscall && code != SYS_lseek) { 328 /* 329 * System call invoked through the 330 * SYS___syscall interface but the 331 * return value is really just 32 332 * bits. 333 */ 334 locr0->v0 = td->td_retval[0]; 335 if (_QUAD_LOWWORD) 336 locr0->v1 = td->td_retval[0]; 337 locr0->a3 = 0; 338 } else { 339 locr0->v0 = td->td_retval[0]; 340 locr0->v1 = td->td_retval[1]; 341 locr0->a3 = 0; 342 } 343 break; 344 345 case ERESTART: 346 locr0->pc = td->td_pcb->pcb_tpc; 347 break; 348 349 case EJUSTRETURN: 350 break; /* nothing to do */ 351 352 default: 353 if (quad_syscall && code != SYS_lseek) { 354 locr0->v0 = error; 355 if (_QUAD_LOWWORD) 356 locr0->v1 = error; 357 locr0->a3 = 1; 358 } else { 359 locr0->v0 = error; 360 locr0->a3 = 1; 361 } 362 } 363} 364 365/* 366 * Initialize machine state (pcb and trap frame) for a new thread about to 367 * upcall. Put enough state in the new thread's PCB to get it to go back 368 * userret(), where we can intercept it again to set the return (upcall) 369 * Address and stack, along with those from upcalls that are from other sources 370 * such as those generated in thread_userret() itself. 371 */ 372void 373cpu_set_upcall(struct thread *td, struct thread *td0) 374{ 375 struct pcb *pcb2; 376 377 /* Point the pcb to the top of the stack. */ 378 pcb2 = td->td_pcb; 379 380 /* 381 * Copy the upcall pcb. This loads kernel regs. 382 * Those not loaded individually below get their default 383 * values here. 384 * 385 * XXXKSE It might be a good idea to simply skip this as 386 * the values of the other registers may be unimportant. 387 * This would remove any requirement for knowing the KSE 388 * at this time (see the matching comment below for 389 * more analysis) (need a good safe default). 390 * In MIPS, the trapframe is the first element of the PCB 391 * and gets copied when we copy the PCB. No separate copy 392 * is needed. 393 */ 394 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 395 396 /* 397 * Set registers for trampoline to user mode. 398 */ 399 400 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; 401 /* Make sp 64-bit aligned */ 402 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb & 403 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); 404 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; 405 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td; 406 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td->td_frame; 407 /* Dont set IE bit in SR. sched lock release will take care of it */ 408 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & 409 (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); 410 411#ifdef CPU_CNMIPS 412 pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_COP_0_BIT | 413 MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; 414#endif 415 416 /* 417 * FREEBSD_DEVELOPERS_FIXME: 418 * Setup any other CPU-Specific registers (Not MIPS Standard) 419 * that are needed. 420 */ 421 422 /* SMP Setup to release sched_lock in fork_exit(). */ 423 td->td_md.md_spinlock_count = 1; 424 td->td_md.md_saved_intr = MIPS_SR_INT_IE; 425#if 0 426 /* Maybe we need to fix this? */ 427 td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) | 428 (MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) | 429 (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK)); 430#endif 431} 432 433/* 434 * Set that machine state for performing an upcall that has to 435 * be done in thread_userret() so that those upcalls generated 436 * in thread_userret() itself can be done as well. 437 */ 438void 439cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 440 stack_t *stack) 441{ 442 struct trapframe *tf; 443 register_t sp; 444 445 /* 446 * At the point where a function is called, sp must be 8 447 * byte aligned[for compatibility with 64-bit CPUs] 448 * in ``See MIPS Run'' by D. Sweetman, p. 269 449 * align stack */ 450 sp = ((register_t)(intptr_t)(stack->ss_sp + stack->ss_size) & ~0x7) - 451 CALLFRAME_SIZ; 452 453 /* 454 * Set the trap frame to point at the beginning of the uts 455 * function. 456 */ 457 tf = td->td_frame; 458 bzero(tf, sizeof(struct trapframe)); 459 tf->sp = sp; 460 tf->pc = (register_t)(intptr_t)entry; 461 /* 462 * MIPS ABI requires T9 to be the same as PC 463 * in subroutine entry point 464 */ 465 tf->t9 = (register_t)(intptr_t)entry; 466 tf->a0 = (register_t)(intptr_t)arg; 467 468 /* 469 * Keep interrupt mask 470 */ 471 td->td_frame->sr = MIPS_SR_KSU_USER | MIPS_SR_EXL | MIPS_SR_INT_IE | 472 (mips_rd_status() & MIPS_SR_INT_MASK); 473#if defined(__mips_n32) 474 td->td_frame->sr |= MIPS_SR_PX; 475#elif defined(__mips_n64) 476 td->td_frame->sr |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX; 477#endif 478#ifdef CPU_CNMIPS 479 tf->sr |= MIPS_SR_INT_IE | MIPS_SR_COP_0_BIT | MIPS_SR_PX | MIPS_SR_UX | 480 MIPS_SR_KX; 481#endif 482/* tf->sr |= (ALL_INT_MASK & idle_mask) | SR_INT_ENAB; */ 483 /**XXX the above may now be wrong -- mips2 implements this as panic */ 484 /* 485 * FREEBSD_DEVELOPERS_FIXME: 486 * Setup any other CPU-Specific registers (Not MIPS Standard) 487 * that are needed. 488 */ 489} 490/* 491 * Convert kernel VA to physical address 492 */ 493u_long 494kvtop(void *addr) 495{ 496 vm_offset_t va; 497 498 va = pmap_kextract((vm_offset_t)addr); 499 if (va == 0) 500 panic("kvtop: zero page frame"); 501 return((intptr_t)va); 502} 503 504/* 505 * Implement the pre-zeroed page mechanism. 506 * This routine is called from the idle loop. 507 */ 508 509#define ZIDLE_LO(v) ((v) * 2 / 3) 510#define ZIDLE_HI(v) ((v) * 4 / 5) 511 512/* 513 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 514 */ 515#ifndef __mips_n64 516static void 517sf_buf_init(void *arg) 518{ 519 struct sf_buf *sf_bufs; 520 vm_offset_t sf_base; 521 int i; 522 523 nsfbufs = NSFBUFS; 524 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 525 526 mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF); 527 SLIST_INIT(&sf_freelist.sf_head); 528 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); 529 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 530 M_NOWAIT | M_ZERO); 531 for (i = 0; i < nsfbufs; i++) { 532 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 533 SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list); 534 } 535 sf_buf_alloc_want = 0; 536} 537#endif 538 539/* 540 * Get an sf_buf from the freelist. Will block if none are available. 541 */ 542struct sf_buf * 543sf_buf_alloc(struct vm_page *m, int flags) 544{ 545#ifndef __mips_n64 546 struct sf_buf *sf; 547 int error; 548 549 mtx_lock(&sf_freelist.sf_lock); 550 while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) { 551 if (flags & SFB_NOWAIT) 552 break; 553 sf_buf_alloc_want++; 554 mbstat.sf_allocwait++; 555 error = msleep(&sf_freelist, &sf_freelist.sf_lock, 556 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 557 sf_buf_alloc_want--; 558 559 /* 560 * If we got a signal, don't risk going back to sleep. 561 */ 562 if (error) 563 break; 564 } 565 if (sf != NULL) { 566 SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list); 567 sf->m = m; 568 nsfbufsused++; 569 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 570 pmap_qenter(sf->kva, &sf->m, 1); 571 } 572 mtx_unlock(&sf_freelist.sf_lock); 573 return (sf); 574#else 575 return ((struct sf_buf *)m); 576#endif 577} 578 579/* 580 * Release resources back to the system. 581 */ 582void 583sf_buf_free(struct sf_buf *sf) 584{ 585#ifndef __mips_n64 586 pmap_qremove(sf->kva, 1); 587 mtx_lock(&sf_freelist.sf_lock); 588 SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list); 589 nsfbufsused--; 590 if (sf_buf_alloc_want > 0) 591 wakeup(&sf_freelist); 592 mtx_unlock(&sf_freelist.sf_lock); 593#endif 594} 595 596/* 597 * Software interrupt handler for queued VM system processing. 598 */ 599void 600swi_vm(void *dummy) 601{ 602 603 if (busdma_swi_pending) 604 busdma_swi(); 605} 606 607int 608cpu_set_user_tls(struct thread *td, void *tls_base) 609{ 610 611 td->td_md.md_tls = (char*)tls_base; 612 613 return (0); 614} 615 616#ifdef DDB 617#include <ddb/ddb.h> 618 619#define DB_PRINT_REG(ptr, regname) \ 620 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->regname)) 621 622#define DB_PRINT_REG_ARRAY(ptr, arrname, regname) \ 623 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->arrname[regname])) 624 625static void 626dump_trapframe(struct trapframe *trapframe) 627{ 628 629 db_printf("Trapframe at %p\n", trapframe); 630 631 DB_PRINT_REG(trapframe, zero); 632 DB_PRINT_REG(trapframe, ast); 633 DB_PRINT_REG(trapframe, v0); 634 DB_PRINT_REG(trapframe, v1); 635 DB_PRINT_REG(trapframe, a0); 636 DB_PRINT_REG(trapframe, a1); 637 DB_PRINT_REG(trapframe, a2); 638 DB_PRINT_REG(trapframe, a3); 639 DB_PRINT_REG(trapframe, t0); 640 DB_PRINT_REG(trapframe, t1); 641 DB_PRINT_REG(trapframe, t2); 642 DB_PRINT_REG(trapframe, t3); 643 DB_PRINT_REG(trapframe, t4); 644 DB_PRINT_REG(trapframe, t5); 645 DB_PRINT_REG(trapframe, t6); 646 DB_PRINT_REG(trapframe, t7); 647 DB_PRINT_REG(trapframe, s0); 648 DB_PRINT_REG(trapframe, s1); 649 DB_PRINT_REG(trapframe, s2); 650 DB_PRINT_REG(trapframe, s3); 651 DB_PRINT_REG(trapframe, s4); 652 DB_PRINT_REG(trapframe, s5); 653 DB_PRINT_REG(trapframe, s6); 654 DB_PRINT_REG(trapframe, s7); 655 DB_PRINT_REG(trapframe, t8); 656 DB_PRINT_REG(trapframe, t9); 657 DB_PRINT_REG(trapframe, k0); 658 DB_PRINT_REG(trapframe, k1); 659 DB_PRINT_REG(trapframe, gp); 660 DB_PRINT_REG(trapframe, sp); 661 DB_PRINT_REG(trapframe, s8); 662 DB_PRINT_REG(trapframe, ra); 663 DB_PRINT_REG(trapframe, sr); 664 DB_PRINT_REG(trapframe, mullo); 665 DB_PRINT_REG(trapframe, mulhi); 666 DB_PRINT_REG(trapframe, badvaddr); 667 DB_PRINT_REG(trapframe, cause); 668 DB_PRINT_REG(trapframe, pc); 669} 670 671DB_SHOW_COMMAND(pcb, ddb_dump_pcb) 672{ 673 struct thread *td; 674 struct pcb *pcb; 675 struct trapframe *trapframe; 676 677 /* Determine which thread to examine. */ 678 if (have_addr) 679 td = db_lookup_thread(addr, TRUE); 680 else 681 td = curthread; 682 683 pcb = td->td_pcb; 684 685 db_printf("Thread %d at %p\n", td->td_tid, td); 686 687 db_printf("PCB at %p\n", pcb); 688 689 trapframe = &pcb->pcb_regs; 690 dump_trapframe(trapframe); 691 692 db_printf("PCB Context:\n"); 693 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S0); 694 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S1); 695 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S2); 696 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S3); 697 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S4); 698 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S5); 699 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S6); 700 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S7); 701 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SP); 702 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S8); 703 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_RA); 704 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SR); 705 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_GP); 706 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_PC); 707 708 db_printf("PCB onfault = %p\n", pcb->pcb_onfault); 709 db_printf("md_saved_intr = 0x%0lx\n", (long)td->td_md.md_saved_intr); 710 db_printf("md_spinlock_count = %d\n", td->td_md.md_spinlock_count); 711 712 if (td->td_frame != trapframe) { 713 db_printf("td->td_frame %p is not the same as pcb_regs %p\n", 714 td->td_frame, trapframe); 715 } 716} 717 718/* 719 * Dump the trapframe beginning at address specified by first argument. 720 */ 721DB_SHOW_COMMAND(trapframe, ddb_dump_trapframe) 722{ 723 724 if (!have_addr) 725 return; 726 727 dump_trapframe((struct trapframe *)addr); 728} 729 730#endif /* DDB */ 731