vm_machdep.c revision 253351
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 36 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 37 * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota 38 * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/mips/mips/vm_machdep.c 253351 2013-07-15 06:16:57Z ae $"); 43 44#include "opt_compat.h" 45#include "opt_ddb.h" 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/malloc.h> 50#include <sys/proc.h> 51#include <sys/syscall.h> 52#include <sys/sysent.h> 53#include <sys/buf.h> 54#include <sys/vnode.h> 55#include <sys/vmmeter.h> 56#include <sys/kernel.h> 57#include <sys/sysctl.h> 58#include <sys/unistd.h> 59 60#include <machine/cache.h> 61#include <machine/clock.h> 62#include <machine/cpu.h> 63#include <machine/md_var.h> 64#include <machine/pcb.h> 65 66#include <vm/vm.h> 67#include <vm/vm_extern.h> 68#include <vm/pmap.h> 69#include <vm/vm_kern.h> 70#include <vm/vm_map.h> 71#include <vm/vm_page.h> 72#include <vm/vm_pageout.h> 73#include <vm/vm_param.h> 74#include <vm/uma.h> 75#include <vm/uma_int.h> 76 77#include <sys/user.h> 78#include <sys/mbuf.h> 79#include <sys/sf_buf.h> 80 81#ifndef NSFBUFS 82#define NSFBUFS (512 + maxusers * 16) 83#endif 84 85/* Duplicated from asm.h */ 86#if defined(__mips_o32) 87#define SZREG 4 88#else 89#define SZREG 8 90#endif 91#if defined(__mips_o32) || defined(__mips_o64) 92#define CALLFRAME_SIZ (SZREG * (4 + 2)) 93#elif defined(__mips_n32) || defined(__mips_n64) 94#define CALLFRAME_SIZ (SZREG * 4) 95#endif 96 97#ifndef __mips_n64 98static void sf_buf_init(void *arg); 99SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 100 101/* 102 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the 103 * sf_freelist head with the sf_lock mutex. 104 */ 105static struct { 106 SLIST_HEAD(, sf_buf) sf_head; 107 struct mtx sf_lock; 108} sf_freelist; 109 110static u_int sf_buf_alloc_want; 111#endif 112 113/* 114 * Finish a fork operation, with process p2 nearly set up. 115 * Copy and update the pcb, set up the stack so that the child 116 * ready to run and return to user mode. 117 */ 118void 119cpu_fork(register struct thread *td1,register struct proc *p2, 120 struct thread *td2,int flags) 121{ 122 register struct proc *p1; 123 struct pcb *pcb2; 124 125 p1 = td1->td_proc; 126 if ((flags & RFPROC) == 0) 127 return; 128 /* It is assumed that the vm_thread_alloc called 129 * cpu_thread_alloc() before cpu_fork is called. 130 */ 131 132 /* Point the pcb to the top of the stack */ 133 pcb2 = td2->td_pcb; 134 135 /* Copy p1's pcb, note that in this case 136 * our pcb also includes the td_frame being copied 137 * too. The older mips2 code did an additional copy 138 * of the td_frame, for us that's not needed any 139 * longer (this copy does them both) 140 */ 141 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 142 143 /* Point mdproc and then copy over td1's contents 144 * md_proc is empty for MIPS 145 */ 146 td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED; 147 148 /* 149 * Set up return-value registers as fork() libc stub expects. 150 */ 151 td2->td_frame->v0 = 0; 152 td2->td_frame->v1 = 1; 153 td2->td_frame->a3 = 0; 154 155 if (td1 == PCPU_GET(fpcurthread)) 156 MipsSaveCurFPState(td1); 157 158 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; 159 /* Make sp 64-bit aligned */ 160 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb & 161 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); 162 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; 163 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td2; 164 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td2->td_frame; 165 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & 166 (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); 167 /* 168 * FREEBSD_DEVELOPERS_FIXME: 169 * Setup any other CPU-Specific registers (Not MIPS Standard) 170 * and/or bits in other standard MIPS registers (if CPU-Specific) 171 * that are needed. 172 */ 173 174 td2->td_md.md_tls = td1->td_md.md_tls; 175 td2->td_md.md_saved_intr = MIPS_SR_INT_IE; 176 td2->td_md.md_spinlock_count = 1; 177#ifdef CPU_CNMIPS 178 if (td1->td_md.md_flags & MDTD_COP2USED) { 179 if (td1->td_md.md_cop2owner == COP2_OWNER_USERLAND) { 180 if (td1->td_md.md_ucop2) 181 octeon_cop2_save(td1->td_md.md_ucop2); 182 else 183 panic("cpu_fork: ucop2 is NULL but COP2 is enabled"); 184 } 185 else { 186 if (td1->td_md.md_cop2) 187 octeon_cop2_save(td1->td_md.md_cop2); 188 else 189 panic("cpu_fork: cop2 is NULL but COP2 is enabled"); 190 } 191 } 192 193 if (td1->td_md.md_cop2) { 194 td2->td_md.md_cop2 = octeon_cop2_alloc_ctx(); 195 memcpy(td2->td_md.md_cop2, td1->td_md.md_cop2, 196 sizeof(*td1->td_md.md_cop2)); 197 } 198 if (td1->td_md.md_ucop2) { 199 td2->td_md.md_ucop2 = octeon_cop2_alloc_ctx(); 200 memcpy(td2->td_md.md_ucop2, td1->td_md.md_ucop2, 201 sizeof(*td1->td_md.md_ucop2)); 202 } 203 td2->td_md.md_cop2owner = td1->td_md.md_cop2owner; 204 pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; 205 /* Clear COP2 bits for userland & kernel */ 206 td2->td_frame->sr &= ~MIPS_SR_COP_2_BIT; 207 pcb2->pcb_context[PCB_REG_SR] &= ~MIPS_SR_COP_2_BIT; 208#endif 209} 210 211/* 212 * Intercept the return address from a freshly forked process that has NOT 213 * been scheduled yet. 214 * 215 * This is needed to make kernel threads stay in kernel mode. 216 */ 217void 218cpu_set_fork_handler(struct thread *td, void (*func) __P((void *)), void *arg) 219{ 220 /* 221 * Note that the trap frame follows the args, so the function 222 * is really called like this: func(arg, frame); 223 */ 224 td->td_pcb->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)func; 225 td->td_pcb->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)arg; 226} 227 228void 229cpu_exit(struct thread *td) 230{ 231} 232 233void 234cpu_thread_exit(struct thread *td) 235{ 236 237 if (PCPU_GET(fpcurthread) == td) 238 PCPU_GET(fpcurthread) = (struct thread *)0; 239#ifdef CPU_CNMIPS 240 if (td->td_md.md_cop2) 241 memset(td->td_md.md_cop2, 0, 242 sizeof(*td->td_md.md_cop2)); 243 if (td->td_md.md_ucop2) 244 memset(td->td_md.md_ucop2, 0, 245 sizeof(*td->td_md.md_ucop2)); 246#endif 247} 248 249void 250cpu_thread_free(struct thread *td) 251{ 252#ifdef CPU_CNMIPS 253 if (td->td_md.md_cop2) 254 octeon_cop2_free_ctx(td->td_md.md_cop2); 255 if (td->td_md.md_ucop2) 256 octeon_cop2_free_ctx(td->td_md.md_ucop2); 257 td->td_md.md_cop2 = NULL; 258 td->td_md.md_ucop2 = NULL; 259#endif 260} 261 262void 263cpu_thread_clean(struct thread *td) 264{ 265} 266 267void 268cpu_thread_swapin(struct thread *td) 269{ 270 pt_entry_t *pte; 271 int i; 272 273 /* 274 * The kstack may be at a different physical address now. 275 * Cache the PTEs for the Kernel stack in the machine dependent 276 * part of the thread struct so cpu_switch() can quickly map in 277 * the pcb struct and kernel stack. 278 */ 279 for (i = 0; i < KSTACK_PAGES; i++) { 280 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); 281 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; 282 } 283} 284 285void 286cpu_thread_swapout(struct thread *td) 287{ 288} 289 290void 291cpu_thread_alloc(struct thread *td) 292{ 293 pt_entry_t *pte; 294 int i; 295 296 KASSERT((td->td_kstack & (1 << PAGE_SHIFT)) == 0, ("kernel stack must be aligned.")); 297 td->td_pcb = (struct pcb *)(td->td_kstack + 298 td->td_kstack_pages * PAGE_SIZE) - 1; 299 td->td_frame = &td->td_pcb->pcb_regs; 300 301 for (i = 0; i < KSTACK_PAGES; i++) { 302 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); 303 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; 304 } 305} 306 307void 308cpu_set_syscall_retval(struct thread *td, int error) 309{ 310 struct trapframe *locr0 = td->td_frame; 311 unsigned int code; 312 int quad_syscall; 313 314 code = locr0->v0; 315 quad_syscall = 0; 316#if defined(__mips_n32) || defined(__mips_n64) 317#ifdef COMPAT_FREEBSD32 318 if (code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) 319 quad_syscall = 1; 320#endif 321#else 322 if (code == SYS___syscall) 323 quad_syscall = 1; 324#endif 325 326 if (code == SYS_syscall) 327 code = locr0->a0; 328 else if (code == SYS___syscall) { 329 if (quad_syscall) 330 code = _QUAD_LOWWORD ? locr0->a1 : locr0->a0; 331 else 332 code = locr0->a0; 333 } 334 335 switch (error) { 336 case 0: 337 if (quad_syscall && code != SYS_lseek) { 338 /* 339 * System call invoked through the 340 * SYS___syscall interface but the 341 * return value is really just 32 342 * bits. 343 */ 344 locr0->v0 = td->td_retval[0]; 345 if (_QUAD_LOWWORD) 346 locr0->v1 = td->td_retval[0]; 347 locr0->a3 = 0; 348 } else { 349 locr0->v0 = td->td_retval[0]; 350 locr0->v1 = td->td_retval[1]; 351 locr0->a3 = 0; 352 } 353 break; 354 355 case ERESTART: 356 locr0->pc = td->td_pcb->pcb_tpc; 357 break; 358 359 case EJUSTRETURN: 360 break; /* nothing to do */ 361 362 default: 363 if (quad_syscall && code != SYS_lseek) { 364 locr0->v0 = error; 365 if (_QUAD_LOWWORD) 366 locr0->v1 = error; 367 locr0->a3 = 1; 368 } else { 369 locr0->v0 = error; 370 locr0->a3 = 1; 371 } 372 } 373} 374 375/* 376 * Initialize machine state (pcb and trap frame) for a new thread about to 377 * upcall. Put enough state in the new thread's PCB to get it to go back 378 * userret(), where we can intercept it again to set the return (upcall) 379 * Address and stack, along with those from upcalls that are from other sources 380 * such as those generated in thread_userret() itself. 381 */ 382void 383cpu_set_upcall(struct thread *td, struct thread *td0) 384{ 385 struct pcb *pcb2; 386 387 /* Point the pcb to the top of the stack. */ 388 pcb2 = td->td_pcb; 389 390 /* 391 * Copy the upcall pcb. This loads kernel regs. 392 * Those not loaded individually below get their default 393 * values here. 394 * 395 * XXXKSE It might be a good idea to simply skip this as 396 * the values of the other registers may be unimportant. 397 * This would remove any requirement for knowing the KSE 398 * at this time (see the matching comment below for 399 * more analysis) (need a good safe default). 400 * In MIPS, the trapframe is the first element of the PCB 401 * and gets copied when we copy the PCB. No separate copy 402 * is needed. 403 */ 404 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 405 406 /* 407 * Set registers for trampoline to user mode. 408 */ 409 410 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; 411 /* Make sp 64-bit aligned */ 412 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb & 413 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); 414 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; 415 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td; 416 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td->td_frame; 417 /* Dont set IE bit in SR. sched lock release will take care of it */ 418 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & 419 (MIPS_SR_PX | MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); 420 421 /* 422 * FREEBSD_DEVELOPERS_FIXME: 423 * Setup any other CPU-Specific registers (Not MIPS Standard) 424 * that are needed. 425 */ 426 427 /* SMP Setup to release sched_lock in fork_exit(). */ 428 td->td_md.md_spinlock_count = 1; 429 td->td_md.md_saved_intr = MIPS_SR_INT_IE; 430#if 0 431 /* Maybe we need to fix this? */ 432 td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) | 433 (MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) | 434 (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK)); 435#endif 436} 437 438/* 439 * Set that machine state for performing an upcall that has to 440 * be done in thread_userret() so that those upcalls generated 441 * in thread_userret() itself can be done as well. 442 */ 443void 444cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, 445 stack_t *stack) 446{ 447 struct trapframe *tf; 448 register_t sp; 449 450 /* 451 * At the point where a function is called, sp must be 8 452 * byte aligned[for compatibility with 64-bit CPUs] 453 * in ``See MIPS Run'' by D. Sweetman, p. 269 454 * align stack 455 */ 456 sp = ((register_t)(intptr_t)(stack->ss_sp + stack->ss_size) & ~0x7) - 457 CALLFRAME_SIZ; 458 459 /* 460 * Set the trap frame to point at the beginning of the uts 461 * function. 462 */ 463 tf = td->td_frame; 464 bzero(tf, sizeof(struct trapframe)); 465 tf->sp = sp; 466 tf->pc = (register_t)(intptr_t)entry; 467 /* 468 * MIPS ABI requires T9 to be the same as PC 469 * in subroutine entry point 470 */ 471 tf->t9 = (register_t)(intptr_t)entry; 472 tf->a0 = (register_t)(intptr_t)arg; 473 474 /* 475 * Keep interrupt mask 476 */ 477 td->td_frame->sr = MIPS_SR_KSU_USER | MIPS_SR_EXL | MIPS_SR_INT_IE | 478 (mips_rd_status() & MIPS_SR_INT_MASK); 479#if defined(__mips_n32) 480 td->td_frame->sr |= MIPS_SR_PX; 481#elif defined(__mips_n64) 482 td->td_frame->sr |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX; 483#endif 484/* tf->sr |= (ALL_INT_MASK & idle_mask) | SR_INT_ENAB; */ 485 /**XXX the above may now be wrong -- mips2 implements this as panic */ 486 /* 487 * FREEBSD_DEVELOPERS_FIXME: 488 * Setup any other CPU-Specific registers (Not MIPS Standard) 489 * that are needed. 490 */ 491} 492 493/* 494 * Implement the pre-zeroed page mechanism. 495 * This routine is called from the idle loop. 496 */ 497 498#define ZIDLE_LO(v) ((v) * 2 / 3) 499#define ZIDLE_HI(v) ((v) * 4 / 5) 500 501/* 502 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 503 */ 504#ifndef __mips_n64 505static void 506sf_buf_init(void *arg) 507{ 508 struct sf_buf *sf_bufs; 509 vm_offset_t sf_base; 510 int i; 511 512 nsfbufs = NSFBUFS; 513 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 514 515 mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF); 516 SLIST_INIT(&sf_freelist.sf_head); 517 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); 518 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 519 M_NOWAIT | M_ZERO); 520 for (i = 0; i < nsfbufs; i++) { 521 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 522 SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list); 523 } 524 sf_buf_alloc_want = 0; 525} 526#endif 527 528/* 529 * Get an sf_buf from the freelist. Will block if none are available. 530 */ 531struct sf_buf * 532sf_buf_alloc(struct vm_page *m, int flags) 533{ 534#ifndef __mips_n64 535 struct sf_buf *sf; 536 int error; 537 538 mtx_lock(&sf_freelist.sf_lock); 539 while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) { 540 if (flags & SFB_NOWAIT) 541 break; 542 sf_buf_alloc_want++; 543 SFSTAT_INC(sf_allocwait); 544 error = msleep(&sf_freelist, &sf_freelist.sf_lock, 545 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 546 sf_buf_alloc_want--; 547 548 /* 549 * If we got a signal, don't risk going back to sleep. 550 */ 551 if (error) 552 break; 553 } 554 if (sf != NULL) { 555 SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list); 556 sf->m = m; 557 nsfbufsused++; 558 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 559 pmap_qenter(sf->kva, &sf->m, 1); 560 } 561 mtx_unlock(&sf_freelist.sf_lock); 562 return (sf); 563#else 564 return ((struct sf_buf *)m); 565#endif 566} 567 568/* 569 * Release resources back to the system. 570 */ 571void 572sf_buf_free(struct sf_buf *sf) 573{ 574#ifndef __mips_n64 575 pmap_qremove(sf->kva, 1); 576 mtx_lock(&sf_freelist.sf_lock); 577 SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list); 578 nsfbufsused--; 579 if (sf_buf_alloc_want > 0) 580 wakeup(&sf_freelist); 581 mtx_unlock(&sf_freelist.sf_lock); 582#endif 583} 584 585/* 586 * Software interrupt handler for queued VM system processing. 587 */ 588void 589swi_vm(void *dummy) 590{ 591 592 if (busdma_swi_pending) 593 busdma_swi(); 594} 595 596int 597cpu_set_user_tls(struct thread *td, void *tls_base) 598{ 599 600 td->td_md.md_tls = (char*)tls_base; 601 602 return (0); 603} 604 605#ifdef DDB 606#include <ddb/ddb.h> 607 608#define DB_PRINT_REG(ptr, regname) \ 609 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->regname)) 610 611#define DB_PRINT_REG_ARRAY(ptr, arrname, regname) \ 612 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->arrname[regname])) 613 614static void 615dump_trapframe(struct trapframe *trapframe) 616{ 617 618 db_printf("Trapframe at %p\n", trapframe); 619 620 DB_PRINT_REG(trapframe, zero); 621 DB_PRINT_REG(trapframe, ast); 622 DB_PRINT_REG(trapframe, v0); 623 DB_PRINT_REG(trapframe, v1); 624 DB_PRINT_REG(trapframe, a0); 625 DB_PRINT_REG(trapframe, a1); 626 DB_PRINT_REG(trapframe, a2); 627 DB_PRINT_REG(trapframe, a3); 628#if defined(__mips_n32) || defined(__mips_n64) 629 DB_PRINT_REG(trapframe, a4); 630 DB_PRINT_REG(trapframe, a5); 631 DB_PRINT_REG(trapframe, a6); 632 DB_PRINT_REG(trapframe, a7); 633 DB_PRINT_REG(trapframe, t0); 634 DB_PRINT_REG(trapframe, t1); 635 DB_PRINT_REG(trapframe, t2); 636 DB_PRINT_REG(trapframe, t3); 637#else 638 DB_PRINT_REG(trapframe, t0); 639 DB_PRINT_REG(trapframe, t1); 640 DB_PRINT_REG(trapframe, t2); 641 DB_PRINT_REG(trapframe, t3); 642 DB_PRINT_REG(trapframe, t4); 643 DB_PRINT_REG(trapframe, t5); 644 DB_PRINT_REG(trapframe, t6); 645 DB_PRINT_REG(trapframe, t7); 646#endif 647 DB_PRINT_REG(trapframe, s0); 648 DB_PRINT_REG(trapframe, s1); 649 DB_PRINT_REG(trapframe, s2); 650 DB_PRINT_REG(trapframe, s3); 651 DB_PRINT_REG(trapframe, s4); 652 DB_PRINT_REG(trapframe, s5); 653 DB_PRINT_REG(trapframe, s6); 654 DB_PRINT_REG(trapframe, s7); 655 DB_PRINT_REG(trapframe, t8); 656 DB_PRINT_REG(trapframe, t9); 657 DB_PRINT_REG(trapframe, k0); 658 DB_PRINT_REG(trapframe, k1); 659 DB_PRINT_REG(trapframe, gp); 660 DB_PRINT_REG(trapframe, sp); 661 DB_PRINT_REG(trapframe, s8); 662 DB_PRINT_REG(trapframe, ra); 663 DB_PRINT_REG(trapframe, sr); 664 DB_PRINT_REG(trapframe, mullo); 665 DB_PRINT_REG(trapframe, mulhi); 666 DB_PRINT_REG(trapframe, badvaddr); 667 DB_PRINT_REG(trapframe, cause); 668 DB_PRINT_REG(trapframe, pc); 669} 670 671DB_SHOW_COMMAND(pcb, ddb_dump_pcb) 672{ 673 struct thread *td; 674 struct pcb *pcb; 675 struct trapframe *trapframe; 676 677 /* Determine which thread to examine. */ 678 if (have_addr) 679 td = db_lookup_thread(addr, TRUE); 680 else 681 td = curthread; 682 683 pcb = td->td_pcb; 684 685 db_printf("Thread %d at %p\n", td->td_tid, td); 686 687 db_printf("PCB at %p\n", pcb); 688 689 trapframe = &pcb->pcb_regs; 690 dump_trapframe(trapframe); 691 692 db_printf("PCB Context:\n"); 693 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S0); 694 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S1); 695 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S2); 696 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S3); 697 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S4); 698 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S5); 699 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S6); 700 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S7); 701 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SP); 702 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S8); 703 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_RA); 704 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SR); 705 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_GP); 706 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_PC); 707 708 db_printf("PCB onfault = %p\n", pcb->pcb_onfault); 709 db_printf("md_saved_intr = 0x%0lx\n", (long)td->td_md.md_saved_intr); 710 db_printf("md_spinlock_count = %d\n", td->td_md.md_spinlock_count); 711 712 if (td->td_frame != trapframe) { 713 db_printf("td->td_frame %p is not the same as pcb_regs %p\n", 714 td->td_frame, trapframe); 715 } 716} 717 718/* 719 * Dump the trapframe beginning at address specified by first argument. 720 */ 721DB_SHOW_COMMAND(trapframe, ddb_dump_trapframe) 722{ 723 724 if (!have_addr) 725 return; 726 727 dump_trapframe((struct trapframe *)addr); 728} 729 730#endif /* DDB */ 731