vm_machdep.c revision 232770
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 36 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 37 * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota 38 * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/mips/mips/vm_machdep.c 232770 2012-03-10 06:54:37Z jmallett $"); 43 44#include "opt_compat.h" 45#include "opt_cputype.h" 46#include "opt_ddb.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/malloc.h> 51#include <sys/proc.h> 52#include <sys/syscall.h> 53#include <sys/sysent.h> 54#include <sys/buf.h> 55#include <sys/vnode.h> 56#include <sys/vmmeter.h> 57#include <sys/kernel.h> 58#include <sys/sysctl.h> 59#include <sys/unistd.h> 60 61#include <machine/asm.h> 62#include <machine/cache.h> 63#include <machine/clock.h> 64#include <machine/cpu.h> 65#include <machine/md_var.h> 66#include <machine/pcb.h> 67 68#include <vm/vm.h> 69#include <vm/vm_extern.h> 70#include <vm/pmap.h> 71#include <vm/vm_kern.h> 72#include <vm/vm_map.h> 73#include <vm/vm_page.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_param.h> 76#include <vm/uma.h> 77#include <vm/uma_int.h> 78 79#include <sys/user.h> 80#include <sys/mbuf.h> 81#include <sys/sf_buf.h> 82 83#ifndef NSFBUFS 84#define NSFBUFS (512 + maxusers * 16) 85#endif 86 87#ifndef __mips_n64 88static void sf_buf_init(void *arg); 89SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 90 91/* 92 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the 93 * sf_freelist head with the sf_lock mutex. 94 */ 95static struct { 96 SLIST_HEAD(, sf_buf) sf_head; 97 struct mtx sf_lock; 98} sf_freelist; 99 100static u_int sf_buf_alloc_want; 101#endif 102 103/* 104 * Finish a fork operation, with process p2 nearly set up. 105 * Copy and update the pcb, set up the stack so that the child 106 * ready to run and return to user mode. 107 */ 108void 109cpu_fork(register struct thread *td1,register struct proc *p2, 110 struct thread *td2,int flags) 111{ 112 register struct proc *p1; 113 struct pcb *pcb2; 114 115 p1 = td1->td_proc; 116 if ((flags & RFPROC) == 0) 117 return; 118 /* It is assumed that the vm_thread_alloc called 119 * cpu_thread_alloc() before cpu_fork is called. 120 */ 121 122 /* Point the pcb to the top of the stack */ 123 pcb2 = td2->td_pcb; 124 125 /* Copy p1's pcb, note that in this case 126 * our pcb also includes the td_frame being copied 127 * too. The older mips2 code did an additional copy 128 * of the td_frame, for us that's not needed any 129 * longer (this copy does them both) 130 */ 131 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 132 133 /* Point mdproc and then copy over td1's contents 134 * md_proc is empty for MIPS 135 */ 136 td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED; 137 138 /* 139 * Set up return-value registers as fork() libc stub expects. 140 */ 141 td2->td_frame->v0 = 0; 142 td2->td_frame->v1 = 1; 143 td2->td_frame->a3 = 0; 144 145 if (td1 == PCPU_GET(fpcurthread)) 146 MipsSaveCurFPState(td1); 147 148 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; 149 /* Make sp 64-bit aligned */ 150 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb & 151 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); 152 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; 153 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td2; 154 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td2->td_frame; 155 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & 156 (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); 157 /* 158 * FREEBSD_DEVELOPERS_FIXME: 159 * Setup any other CPU-Specific registers (Not MIPS Standard) 160 * and/or bits in other standard MIPS registers (if CPU-Specific) 161 * that are needed. 162 */ 163 164 td2->td_md.md_tls = td1->td_md.md_tls; 165 td2->td_md.md_saved_intr = MIPS_SR_INT_IE; 166 td2->td_md.md_spinlock_count = 1; 167#ifdef CPU_CNMIPS 168 if (td1->td_md.md_flags & MDTD_COP2USED) { 169 if (td1->td_md.md_cop2owner == COP2_OWNER_USERLAND) { 170 if (td1->td_md.md_ucop2) 171 octeon_cop2_save(td1->td_md.md_ucop2); 172 else 173 panic("cpu_fork: ucop2 is NULL but COP2 is enabled"); 174 } 175 else { 176 if (td1->td_md.md_cop2) 177 octeon_cop2_save(td1->td_md.md_cop2); 178 else 179 panic("cpu_fork: cop2 is NULL but COP2 is enabled"); 180 } 181 } 182 183 if (td1->td_md.md_cop2) { 184 td2->td_md.md_cop2 = octeon_cop2_alloc_ctx(); 185 memcpy(td2->td_md.md_cop2, td1->td_md.md_cop2, 186 sizeof(*td1->td_md.md_cop2)); 187 } 188 if (td1->td_md.md_ucop2) { 189 td2->td_md.md_ucop2 = octeon_cop2_alloc_ctx(); 190 memcpy(td2->td_md.md_ucop2, td1->td_md.md_ucop2, 191 sizeof(*td1->td_md.md_ucop2)); 192 } 193 td2->td_md.md_cop2owner = td1->td_md.md_cop2owner; 194 pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; 195 /* Clear COP2 bits for userland & kernel */ 196 td2->td_frame->sr &= ~MIPS_SR_COP_2_BIT; 197 pcb2->pcb_context[PCB_REG_SR] &= ~MIPS_SR_COP_2_BIT; 198#endif 199} 200 201/* 202 * Intercept the return address from a freshly forked process that has NOT 203 * been scheduled yet. 204 * 205 * This is needed to make kernel threads stay in kernel mode. 206 */ 207void 208cpu_set_fork_handler(struct thread *td, void (*func) __P((void *)), void *arg) 209{ 210 /* 211 * Note that the trap frame follows the args, so the function 212 * is really called like this: func(arg, frame); 213 */ 214 td->td_pcb->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)func; 215 td->td_pcb->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)arg; 216} 217 218void 219cpu_exit(struct thread *td) 220{ 221} 222 223void 224cpu_thread_exit(struct thread *td) 225{ 226 227 if (PCPU_GET(fpcurthread) == td) 228 PCPU_GET(fpcurthread) = (struct thread *)0; 229#ifdef CPU_CNMIPS 230 if (td->td_md.md_cop2) 231 memset(td->td_md.md_cop2, 0, 232 sizeof(*td->td_md.md_cop2)); 233 if (td->td_md.md_ucop2) 234 memset(td->td_md.md_ucop2, 0, 235 sizeof(*td->td_md.md_ucop2)); 236#endif 237} 238 239void 240cpu_thread_free(struct thread *td) 241{ 242#ifdef CPU_CNMIPS 243 if (td->td_md.md_cop2) 244 octeon_cop2_free_ctx(td->td_md.md_cop2); 245 if (td->td_md.md_ucop2) 246 octeon_cop2_free_ctx(td->td_md.md_ucop2); 247 td->td_md.md_cop2 = NULL; 248 td->td_md.md_ucop2 = NULL; 249#endif 250} 251 252void 253cpu_thread_clean(struct thread *td) 254{ 255} 256 257void 258cpu_thread_swapin(struct thread *td) 259{ 260 pt_entry_t *pte; 261 int i; 262 263 /* 264 * The kstack may be at a different physical address now. 265 * Cache the PTEs for the Kernel stack in the machine dependent 266 * part of the thread struct so cpu_switch() can quickly map in 267 * the pcb struct and kernel stack. 268 */ 269 for (i = 0; i < KSTACK_PAGES; i++) { 270 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); 271 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; 272 } 273} 274 275void 276cpu_thread_swapout(struct thread *td) 277{ 278} 279 280void 281cpu_thread_alloc(struct thread *td) 282{ 283 pt_entry_t *pte; 284 int i; 285 286 KASSERT((td->td_kstack & (1 << PAGE_SHIFT)) == 0, ("kernel stack must be aligned.")); 287 td->td_pcb = (struct pcb *)(td->td_kstack + 288 td->td_kstack_pages * PAGE_SIZE) - 1; 289 td->td_frame = &td->td_pcb->pcb_regs; 290 291 for (i = 0; i < KSTACK_PAGES; i++) { 292 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); 293 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; 294 } 295} 296 297void 298cpu_set_syscall_retval(struct thread *td, int error) 299{ 300 struct trapframe *locr0 = td->td_frame; 301 unsigned int code; 302 int quad_syscall; 303 304 code = locr0->v0; 305 quad_syscall = 0; 306#if defined(__mips_n32) || defined(__mips_n64) 307#ifdef COMPAT_FREEBSD32 308 if (code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) 309 quad_syscall = 1; 310#endif 311#else 312 if (code == SYS___syscall) 313 quad_syscall = 1; 314#endif 315 316 if (code == SYS_syscall) 317 code = locr0->a0; 318 else if (code == SYS___syscall) { 319 if (quad_syscall) 320 code = _QUAD_LOWWORD ? locr0->a1 : locr0->a0; 321 else 322 code = locr0->a0; 323 } 324 325 switch (error) { 326 case 0: 327 if (quad_syscall && code != SYS_lseek) { 328 /* 329 * System call invoked through the 330 * SYS___syscall interface but the 331 * return value is really just 32 332 * bits. 333 */ 334 locr0->v0 = td->td_retval[0]; 335 if (_QUAD_LOWWORD) 336 locr0->v1 = td->td_retval[0]; 337 locr0->a3 = 0; 338 } else { 339 locr0->v0 = td->td_retval[0]; 340 locr0->v1 = td->td_retval[1]; 341 locr0->a3 = 0; 342 } 343 break; 344 345 case ERESTART: 346 locr0->pc = td->td_pcb->pcb_tpc; 347 break; 348 349 case EJUSTRETURN: 350 break; /* nothing to do */ 351 352 default: 353 if (quad_syscall && code != SYS_lseek) { 354 locr0->v0 = error; 355 if (_QUAD_LOWWORD) 356 locr0->v1 = error; 357 locr0->a3 = 1; 358 } else { 359 locr0->v0 = error; 360 locr0->a3 = 1; 361 } 362 } 363} 364 365/* 366 * Initialize machine state (pcb and trap frame) for a new thread about to 367 * upcall. Put enough state in the new thread's PCB to get it to go back 368 * userret(), where we can intercept it again to set the return (upcall) 369 * Address and stack, along with those from upcalls that are from other sources 370 * such as those generated in thread_userret() itself. 371 */ 372void 373cpu_set_upcall(struct thread *td, struct thread *td0) 374{ 375 struct pcb *pcb2; 376 377 /* Point the pcb to the top of the stack. */ 378 pcb2 = td->td_pcb; 379 380 /* 381 * Copy the upcall pcb. This loads kernel regs. 382 * Those not loaded individually below get their default 383 * values here. 384 * 385 * XXXKSE It might be a good idea to simply skip this as 386 * the values of the other registers may be unimportant. 387 * This would remove any requirement for knowing the KSE 388 * at this time (see the matching comment below for 389 * more analysis) (need a good safe default). 390 * In MIPS, the trapframe is the first element of the PCB 391 * and gets copied when we copy the PCB. No separate copy 392 * is needed. 393 */ 394 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 395 396 /* 397 * Set registers for trampoline to user mode. 398 */ 399 400 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; 401 /* Make sp 64-bit aligned */ 402 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb & 403 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); 404 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; 405 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td; 406 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td->td_frame; 407 /* Dont set IE bit in SR. sched lock release will take care of it */ 408 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & 409 (MIPS_SR_PX | MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); 410 411 /* 412 * FREEBSD_DEVELOPERS_FIXME: 413 * Setup any other CPU-Specific registers (Not MIPS Standard) 414 * that are needed. 415 */ 416 417 /* SMP Setup to release sched_lock in fork_exit(). */ 418 td->td_md.md_spinlock_count = 1; 419 td->td_md.md_saved_intr = MIPS_SR_INT_IE; 420#if 0 421 /* Maybe we need to fix this? */ 422 td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) | 423 (MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) | 424 (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK)); 425#endif 426} 427 428/* 429 * Set that machine state for performing an upcall that has to 430 * be done in thread_userret() so that those upcalls generated 431 * in thread_userret() itself can be done as well. 432 */ 433void 434cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 435 stack_t *stack) 436{ 437 struct trapframe *tf; 438 register_t sp; 439 440 /* 441 * At the point where a function is called, sp must be 8 442 * byte aligned[for compatibility with 64-bit CPUs] 443 * in ``See MIPS Run'' by D. Sweetman, p. 269 444 * align stack */ 445 sp = ((register_t)(intptr_t)(stack->ss_sp + stack->ss_size) & ~0x7) - 446 CALLFRAME_SIZ; 447 448 /* 449 * Set the trap frame to point at the beginning of the uts 450 * function. 451 */ 452 tf = td->td_frame; 453 bzero(tf, sizeof(struct trapframe)); 454 tf->sp = sp; 455 tf->pc = (register_t)(intptr_t)entry; 456 /* 457 * MIPS ABI requires T9 to be the same as PC 458 * in subroutine entry point 459 */ 460 tf->t9 = (register_t)(intptr_t)entry; 461 tf->a0 = (register_t)(intptr_t)arg; 462 463 /* 464 * Keep interrupt mask 465 */ 466 td->td_frame->sr = MIPS_SR_KSU_USER | MIPS_SR_EXL | MIPS_SR_INT_IE | 467 (mips_rd_status() & MIPS_SR_INT_MASK); 468#if defined(__mips_n32) 469 td->td_frame->sr |= MIPS_SR_PX; 470#elif defined(__mips_n64) 471 td->td_frame->sr |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX; 472#endif 473/* tf->sr |= (ALL_INT_MASK & idle_mask) | SR_INT_ENAB; */ 474 /**XXX the above may now be wrong -- mips2 implements this as panic */ 475 /* 476 * FREEBSD_DEVELOPERS_FIXME: 477 * Setup any other CPU-Specific registers (Not MIPS Standard) 478 * that are needed. 479 */ 480} 481/* 482 * Convert kernel VA to physical address 483 */ 484u_long 485kvtop(void *addr) 486{ 487 vm_offset_t va; 488 489 va = pmap_kextract((vm_offset_t)addr); 490 if (va == 0) 491 panic("kvtop: zero page frame"); 492 return((intptr_t)va); 493} 494 495/* 496 * Implement the pre-zeroed page mechanism. 497 * This routine is called from the idle loop. 498 */ 499 500#define ZIDLE_LO(v) ((v) * 2 / 3) 501#define ZIDLE_HI(v) ((v) * 4 / 5) 502 503/* 504 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 505 */ 506#ifndef __mips_n64 507static void 508sf_buf_init(void *arg) 509{ 510 struct sf_buf *sf_bufs; 511 vm_offset_t sf_base; 512 int i; 513 514 nsfbufs = NSFBUFS; 515 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 516 517 mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF); 518 SLIST_INIT(&sf_freelist.sf_head); 519 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); 520 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 521 M_NOWAIT | M_ZERO); 522 for (i = 0; i < nsfbufs; i++) { 523 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 524 SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list); 525 } 526 sf_buf_alloc_want = 0; 527} 528#endif 529 530/* 531 * Get an sf_buf from the freelist. Will block if none are available. 532 */ 533struct sf_buf * 534sf_buf_alloc(struct vm_page *m, int flags) 535{ 536#ifndef __mips_n64 537 struct sf_buf *sf; 538 int error; 539 540 mtx_lock(&sf_freelist.sf_lock); 541 while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) { 542 if (flags & SFB_NOWAIT) 543 break; 544 sf_buf_alloc_want++; 545 mbstat.sf_allocwait++; 546 error = msleep(&sf_freelist, &sf_freelist.sf_lock, 547 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 548 sf_buf_alloc_want--; 549 550 /* 551 * If we got a signal, don't risk going back to sleep. 552 */ 553 if (error) 554 break; 555 } 556 if (sf != NULL) { 557 SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list); 558 sf->m = m; 559 nsfbufsused++; 560 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 561 pmap_qenter(sf->kva, &sf->m, 1); 562 } 563 mtx_unlock(&sf_freelist.sf_lock); 564 return (sf); 565#else 566 return ((struct sf_buf *)m); 567#endif 568} 569 570/* 571 * Release resources back to the system. 572 */ 573void 574sf_buf_free(struct sf_buf *sf) 575{ 576#ifndef __mips_n64 577 pmap_qremove(sf->kva, 1); 578 mtx_lock(&sf_freelist.sf_lock); 579 SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list); 580 nsfbufsused--; 581 if (sf_buf_alloc_want > 0) 582 wakeup(&sf_freelist); 583 mtx_unlock(&sf_freelist.sf_lock); 584#endif 585} 586 587/* 588 * Software interrupt handler for queued VM system processing. 589 */ 590void 591swi_vm(void *dummy) 592{ 593 594 if (busdma_swi_pending) 595 busdma_swi(); 596} 597 598int 599cpu_set_user_tls(struct thread *td, void *tls_base) 600{ 601 602 td->td_md.md_tls = (char*)tls_base; 603 604 return (0); 605} 606 607#ifdef DDB 608#include <ddb/ddb.h> 609 610#define DB_PRINT_REG(ptr, regname) \ 611 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->regname)) 612 613#define DB_PRINT_REG_ARRAY(ptr, arrname, regname) \ 614 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->arrname[regname])) 615 616static void 617dump_trapframe(struct trapframe *trapframe) 618{ 619 620 db_printf("Trapframe at %p\n", trapframe); 621 622 DB_PRINT_REG(trapframe, zero); 623 DB_PRINT_REG(trapframe, ast); 624 DB_PRINT_REG(trapframe, v0); 625 DB_PRINT_REG(trapframe, v1); 626 DB_PRINT_REG(trapframe, a0); 627 DB_PRINT_REG(trapframe, a1); 628 DB_PRINT_REG(trapframe, a2); 629 DB_PRINT_REG(trapframe, a3); 630 DB_PRINT_REG(trapframe, t0); 631 DB_PRINT_REG(trapframe, t1); 632 DB_PRINT_REG(trapframe, t2); 633 DB_PRINT_REG(trapframe, t3); 634 DB_PRINT_REG(trapframe, t4); 635 DB_PRINT_REG(trapframe, t5); 636 DB_PRINT_REG(trapframe, t6); 637 DB_PRINT_REG(trapframe, t7); 638 DB_PRINT_REG(trapframe, s0); 639 DB_PRINT_REG(trapframe, s1); 640 DB_PRINT_REG(trapframe, s2); 641 DB_PRINT_REG(trapframe, s3); 642 DB_PRINT_REG(trapframe, s4); 643 DB_PRINT_REG(trapframe, s5); 644 DB_PRINT_REG(trapframe, s6); 645 DB_PRINT_REG(trapframe, s7); 646 DB_PRINT_REG(trapframe, t8); 647 DB_PRINT_REG(trapframe, t9); 648 DB_PRINT_REG(trapframe, k0); 649 DB_PRINT_REG(trapframe, k1); 650 DB_PRINT_REG(trapframe, gp); 651 DB_PRINT_REG(trapframe, sp); 652 DB_PRINT_REG(trapframe, s8); 653 DB_PRINT_REG(trapframe, ra); 654 DB_PRINT_REG(trapframe, sr); 655 DB_PRINT_REG(trapframe, mullo); 656 DB_PRINT_REG(trapframe, mulhi); 657 DB_PRINT_REG(trapframe, badvaddr); 658 DB_PRINT_REG(trapframe, cause); 659 DB_PRINT_REG(trapframe, pc); 660} 661 662DB_SHOW_COMMAND(pcb, ddb_dump_pcb) 663{ 664 struct thread *td; 665 struct pcb *pcb; 666 struct trapframe *trapframe; 667 668 /* Determine which thread to examine. */ 669 if (have_addr) 670 td = db_lookup_thread(addr, TRUE); 671 else 672 td = curthread; 673 674 pcb = td->td_pcb; 675 676 db_printf("Thread %d at %p\n", td->td_tid, td); 677 678 db_printf("PCB at %p\n", pcb); 679 680 trapframe = &pcb->pcb_regs; 681 dump_trapframe(trapframe); 682 683 db_printf("PCB Context:\n"); 684 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S0); 685 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S1); 686 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S2); 687 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S3); 688 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S4); 689 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S5); 690 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S6); 691 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S7); 692 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SP); 693 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S8); 694 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_RA); 695 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SR); 696 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_GP); 697 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_PC); 698 699 db_printf("PCB onfault = %p\n", pcb->pcb_onfault); 700 db_printf("md_saved_intr = 0x%0lx\n", (long)td->td_md.md_saved_intr); 701 db_printf("md_spinlock_count = %d\n", td->td_md.md_spinlock_count); 702 703 if (td->td_frame != trapframe) { 704 db_printf("td->td_frame %p is not the same as pcb_regs %p\n", 705 td->td_frame, trapframe); 706 } 707} 708 709/* 710 * Dump the trapframe beginning at address specified by first argument. 711 */ 712DB_SHOW_COMMAND(trapframe, ddb_dump_trapframe) 713{ 714 715 if (!have_addr) 716 return; 717 718 dump_trapframe((struct trapframe *)addr); 719} 720 721#endif /* DDB */ 722