1/*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31/*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57#include <sys/cdefs.h> 58__FBSDID("$FreeBSD: releng/10.3/sys/powerpc/aim/machdep.c 266019 2014-05-14 14:08:45Z ian $"); 59 60#include "opt_compat.h" 61#include "opt_ddb.h" 62#include "opt_kstack_pages.h" 63#include "opt_platform.h" 64 65#include <sys/param.h> 66#include <sys/proc.h> 67#include <sys/systm.h> 68#include <sys/bio.h> 69#include <sys/buf.h> 70#include <sys/bus.h> 71#include <sys/cons.h> 72#include <sys/cpu.h> 73#include <sys/eventhandler.h> 74#include <sys/exec.h> 75#include <sys/imgact.h> 76#include <sys/kdb.h> 77#include <sys/kernel.h> 78#include <sys/ktr.h> 79#include <sys/linker.h> 80#include <sys/lock.h> 81#include <sys/malloc.h> 82#include <sys/mbuf.h> 83#include <sys/msgbuf.h> 84#include <sys/mutex.h> 85#include <sys/ptrace.h> 86#include <sys/reboot.h> 87#include <sys/rwlock.h> 88#include <sys/signalvar.h> 89#include <sys/syscallsubr.h> 90#include <sys/sysctl.h> 91#include <sys/sysent.h> 92#include <sys/sysproto.h> 93#include <sys/ucontext.h> 94#include <sys/uio.h> 95#include <sys/vmmeter.h> 96#include <sys/vnode.h> 97 98#include <net/netisr.h> 99 100#include <vm/vm.h> 101#include <vm/vm_extern.h> 102#include <vm/vm_kern.h> 103#include <vm/vm_page.h> 104#include <vm/vm_map.h> 105#include <vm/vm_object.h> 106#include <vm/vm_pager.h> 107 108#include <machine/altivec.h> 109#ifndef __powerpc64__ 110#include <machine/bat.h> 111#endif 112#include <machine/cpu.h> 113#include <machine/elf.h> 114#include <machine/fpu.h> 115#include <machine/hid.h> 116#include <machine/kdb.h> 117#include <machine/md_var.h> 118#include <machine/metadata.h> 119#include <machine/mmuvar.h> 120#include <machine/pcb.h> 121#include <machine/reg.h> 122#include <machine/sigframe.h> 123#include <machine/spr.h> 124#include <machine/trap.h> 125#include <machine/vmparam.h> 126#include <machine/ofw_machdep.h> 127 128#include <ddb/ddb.h> 129 130#include <dev/ofw/openfirm.h> 131 132#ifdef DDB 133extern vm_offset_t ksym_start, ksym_end; 134#endif 135 136int cold = 1; 137#ifdef __powerpc64__ 138extern int n_slbs; 139int cacheline_size = 128; 140#else 141int cacheline_size = 32; 142#endif 143int hw_direct_map = 1; 144 145extern void *ap_pcpu; 146 147struct pcpu __pcpu[MAXCPU]; 148 149static struct trapframe frame0; 150 151char machine[] = "powerpc"; 152SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); 153 154static void cpu_startup(void *); 155SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 156 157SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, 158 CTLFLAG_RD, &cacheline_size, 0, ""); 159 160uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *); 161 162long Maxmem = 0; 163long realmem = 0; 164 165#ifndef __powerpc64__ 166struct bat battable[16]; 167#endif 168 169struct kva_md_info kmi; 170 171static void 172cpu_startup(void *dummy) 173{ 174 175 /* 176 * Initialise the decrementer-based clock. 177 */ 178 decr_init(); 179 180 /* 181 * Good {morning,afternoon,evening,night}. 182 */ 183 cpu_setup(PCPU_GET(cpuid)); 184 185#ifdef PERFMON 186 perfmon_init(); 187#endif 188 printf("real memory = %ld (%ld MB)\n", ptoa(physmem), 189 ptoa(physmem) / 1048576); 190 realmem = physmem; 191 192 if (bootverbose) 193 printf("available KVA = %zd (%zd MB)\n", 194 virtual_end - virtual_avail, 195 (virtual_end - virtual_avail) / 1048576); 196 197 /* 198 * Display any holes after the first chunk of extended memory. 199 */ 200 if (bootverbose) { 201 int indx; 202 203 printf("Physical memory chunk(s):\n"); 204 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 205 vm_offset_t size1 = 206 phys_avail[indx + 1] - phys_avail[indx]; 207 208 #ifdef __powerpc64__ 209 printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n", 210 #else 211 printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n", 212 #endif 213 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 214 size1 / PAGE_SIZE); 215 } 216 } 217 218 vm_ksubmap_init(&kmi); 219 220 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count), 221 ptoa(cnt.v_free_count) / 1048576); 222 223 /* 224 * Set up buffers, so they can be used to read disk labels. 225 */ 226 bufinit(); 227 vm_pager_bufferinit(); 228} 229 230extern char kernel_text[], _end[]; 231 232#ifndef __powerpc64__ 233/* Bits for running on 64-bit systems in 32-bit mode. */ 234extern void *testppc64, *testppc64size; 235extern void *restorebridge, *restorebridgesize; 236extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 237extern void *trapcode64; 238#endif 239 240extern void *rstcode, *rstsize; 241extern void *trapcode, *trapsize; 242extern void *slbtrap, *slbtrapsize; 243extern void *alitrap, *alisize; 244extern void *dsitrap, *dsisize; 245extern void *decrint, *decrsize; 246extern void *extint, *extsize; 247extern void *dblow, *dbsize; 248extern void *imisstrap, *imisssize; 249extern void *dlmisstrap, *dlmisssize; 250extern void *dsmisstrap, *dsmisssize; 251char save_trap_init[0x2f00]; /* EXC_LAST */ 252 253uintptr_t 254powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel, 255 vm_offset_t basekernel, void *mdp) 256{ 257 struct pcpu *pc; 258 void *generictrap; 259 size_t trap_offset; 260 void *kmdp; 261 char *env; 262 register_t msr, scratch; 263#ifdef WII 264 register_t vers; 265#endif 266 uint8_t *cache_check; 267 int cacheline_warn; 268 #ifndef __powerpc64__ 269 int ppc64; 270 #endif 271 272 kmdp = NULL; 273 trap_offset = 0; 274 cacheline_warn = 0; 275 276 /* Save trap vectors. */ 277 ofw_save_trap_vec(save_trap_init); 278 279#ifdef WII 280 /* 281 * The Wii loader doesn't pass us any environment so, mdp 282 * points to garbage at this point. The Wii CPU is a 750CL. 283 */ 284 vers = mfpvr(); 285 if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL)) 286 mdp = NULL; 287#endif 288 289 /* 290 * Parse metadata if present and fetch parameters. Must be done 291 * before console is inited so cninit gets the right value of 292 * boothowto. 293 */ 294 if (mdp != NULL) { 295 preload_metadata = mdp; 296 kmdp = preload_search_by_type("elf kernel"); 297 if (kmdp != NULL) { 298 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 299 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 300 endkernel = ulmax(endkernel, MD_FETCH(kmdp, 301 MODINFOMD_KERNEND, vm_offset_t)); 302#ifdef DDB 303 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 304 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 305#endif 306 } 307 } 308 309 /* 310 * Init params/tunables that can be overridden by the loader 311 */ 312 init_param1(); 313 314 /* 315 * Start initializing proc0 and thread0. 316 */ 317 proc_linkup0(&proc0, &thread0); 318 thread0.td_frame = &frame0; 319 320 /* 321 * Set up per-cpu data. 322 */ 323 pc = __pcpu; 324 pcpu_init(pc, 0, sizeof(struct pcpu)); 325 pc->pc_curthread = &thread0; 326#ifdef __powerpc64__ 327 __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); 328#else 329 __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); 330#endif 331 pc->pc_cpuid = 0; 332 333 __asm __volatile("mtsprg 0, %0" :: "r"(pc)); 334 335 /* 336 * Init mutexes, which we use heavily in PMAP 337 */ 338 339 mutex_init(); 340 341 /* 342 * Install the OF client interface 343 */ 344 345 OF_bootstrap(); 346 347 /* 348 * Initialize the console before printing anything. 349 */ 350 cninit(); 351 352 /* 353 * Complain if there is no metadata. 354 */ 355 if (mdp == NULL || kmdp == NULL) { 356 printf("powerpc_init: no loader metadata.\n"); 357 } 358 359 /* 360 * Init KDB 361 */ 362 363 kdb_init(); 364 365 /* Various very early CPU fix ups */ 366 switch (mfpvr() >> 16) { 367 /* 368 * PowerPC 970 CPUs have a misfeature requested by Apple that 369 * makes them pretend they have a 32-byte cacheline. Turn this 370 * off before we measure the cacheline size. 371 */ 372 case IBM970: 373 case IBM970FX: 374 case IBM970MP: 375 case IBM970GX: 376 scratch = mfspr(SPR_HID5); 377 scratch &= ~HID5_970_DCBZ_SIZE_HI; 378 mtspr(SPR_HID5, scratch); 379 break; 380 #ifdef __powerpc64__ 381 case IBMPOWER7: 382 /* XXX: get from ibm,slb-size in device tree */ 383 n_slbs = 32; 384 break; 385 #endif 386 } 387 388 /* 389 * Initialize the interrupt tables and figure out our cache line 390 * size and whether or not we need the 64-bit bridge code. 391 */ 392 393 /* 394 * Disable translation in case the vector area hasn't been 395 * mapped (G5). Note that no OFW calls can be made until 396 * translation is re-enabled. 397 */ 398 399 msr = mfmsr(); 400 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 401 402 /* 403 * Measure the cacheline size using dcbz 404 * 405 * Use EXC_PGM as a playground. We are about to overwrite it 406 * anyway, we know it exists, and we know it is cache-aligned. 407 */ 408 409 cache_check = (void *)EXC_PGM; 410 411 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 412 cache_check[cacheline_size] = 0xff; 413 414 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 415 416 /* Find the first byte dcbz did not zero to get the cache line size */ 417 for (cacheline_size = 0; cacheline_size < 0x100 && 418 cache_check[cacheline_size] == 0; cacheline_size++); 419 420 /* Work around psim bug */ 421 if (cacheline_size == 0) { 422 cacheline_warn = 1; 423 cacheline_size = 32; 424 } 425 426 /* Make sure the kernel icache is valid before we go too much further */ 427 __syncicache((caddr_t)startkernel, endkernel - startkernel); 428 429 #ifndef __powerpc64__ 430 /* 431 * Figure out whether we need to use the 64 bit PMAP. This works by 432 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 433 * and setting ppc64 = 0 if that causes a trap. 434 */ 435 436 ppc64 = 1; 437 438 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 439 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 440 441 __asm __volatile("\ 442 mfmsr %0; \ 443 mtsprg2 %1; \ 444 \ 445 mtmsrd %0; \ 446 mfsprg2 %1;" 447 : "=r"(scratch), "=r"(ppc64)); 448 449 if (ppc64) 450 cpu_features |= PPC_FEATURE_64; 451 452 /* 453 * Now copy restorebridge into all the handlers, if necessary, 454 * and set up the trap tables. 455 */ 456 457 if (cpu_features & PPC_FEATURE_64) { 458 /* Patch the two instances of rfi -> rfid */ 459 bcopy(&rfid_patch,&rfi_patch1,4); 460 #ifdef KDB 461 /* rfi_patch2 is at the end of dbleave */ 462 bcopy(&rfid_patch,&rfi_patch2,4); 463 #endif 464 465 /* 466 * Copy a code snippet to restore 32-bit bridge mode 467 * to the top of every non-generic trap handler 468 */ 469 470 trap_offset += (size_t)&restorebridgesize; 471 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 472 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 473 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 474 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 475 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 476 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 477 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 478 479 /* 480 * Set the common trap entry point to the one that 481 * knows to restore 32-bit operation on execution. 482 */ 483 484 generictrap = &trapcode64; 485 } else { 486 generictrap = &trapcode; 487 } 488 489 #else /* powerpc64 */ 490 cpu_features |= PPC_FEATURE_64; 491 generictrap = &trapcode; 492 #endif 493 494 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstsize); 495 496#ifdef KDB 497 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbsize); 498 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbsize); 499 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbsize); 500 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbsize); 501#else 502 bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize); 503 bcopy(generictrap, (void *)EXC_PGM, (size_t)&trapsize); 504 bcopy(generictrap, (void *)EXC_TRC, (size_t)&trapsize); 505 bcopy(generictrap, (void *)EXC_BPT, (size_t)&trapsize); 506#endif 507 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&alisize); 508 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsisize); 509 bcopy(generictrap, (void *)EXC_ISI, (size_t)&trapsize); 510 #ifdef __powerpc64__ 511 bcopy(&slbtrap, (void *)EXC_DSE, (size_t)&slbtrapsize); 512 bcopy(&slbtrap, (void *)EXC_ISE, (size_t)&slbtrapsize); 513 #endif 514 bcopy(generictrap, (void *)EXC_EXI, (size_t)&trapsize); 515 bcopy(generictrap, (void *)EXC_FPU, (size_t)&trapsize); 516 bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize); 517 bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize); 518 bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize); 519 bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize); 520 bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize); 521 bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize); 522 bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize); 523 #ifndef __powerpc64__ 524 /* G2-specific TLB miss helper handlers */ 525 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 526 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 527 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 528 #endif 529 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 530 531 /* 532 * Restore MSR 533 */ 534 mtmsr(msr); 535 536 /* Warn if cachline size was not determined */ 537 if (cacheline_warn == 1) { 538 printf("WARNING: cacheline size undetermined, setting to 32\n"); 539 } 540 541 /* 542 * Choose a platform module so we can get the physical memory map. 543 */ 544 545 platform_probe_and_attach(); 546 547 /* 548 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 549 * in case the platform module had a better idea of what we 550 * should do. 551 */ 552 if (cpu_features & PPC_FEATURE_64) 553 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 554 else 555 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 556 557 pmap_bootstrap(startkernel, endkernel); 558 mtmsr(PSL_KERNSET & ~PSL_EE); 559 560 /* 561 * Initialize params/tunables that are derived from memsize 562 */ 563 init_param2(physmem); 564 565 /* 566 * Grab booted kernel's name 567 */ 568 env = getenv("kernelname"); 569 if (env != NULL) { 570 strlcpy(kernelname, env, sizeof(kernelname)); 571 freeenv(env); 572 } 573 574 /* 575 * Finish setting up thread0. 576 */ 577 thread0.td_pcb = (struct pcb *) 578 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - 579 sizeof(struct pcb)) & ~15UL); 580 bzero((void *)thread0.td_pcb, sizeof(struct pcb)); 581 pc->pc_curpcb = thread0.td_pcb; 582 583 /* Initialise the message buffer. */ 584 msgbufinit(msgbufp, msgbufsize); 585 586#ifdef KDB 587 if (boothowto & RB_KDB) 588 kdb_enter(KDB_WHY_BOOTFLAGS, 589 "Boot flags requested debugger"); 590#endif 591 592 return (((uintptr_t)thread0.td_pcb - 593 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); 594} 595 596void 597bzero(void *buf, size_t len) 598{ 599 caddr_t p; 600 601 p = buf; 602 603 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { 604 *p++ = 0; 605 len--; 606 } 607 608 while (len >= sizeof(u_long) * 8) { 609 *(u_long*) p = 0; 610 *((u_long*) p + 1) = 0; 611 *((u_long*) p + 2) = 0; 612 *((u_long*) p + 3) = 0; 613 len -= sizeof(u_long) * 8; 614 *((u_long*) p + 4) = 0; 615 *((u_long*) p + 5) = 0; 616 *((u_long*) p + 6) = 0; 617 *((u_long*) p + 7) = 0; 618 p += sizeof(u_long) * 8; 619 } 620 621 while (len >= sizeof(u_long)) { 622 *(u_long*) p = 0; 623 len -= sizeof(u_long); 624 p += sizeof(u_long); 625 } 626 627 while (len) { 628 *p++ = 0; 629 len--; 630 } 631} 632 633void 634cpu_boot(int howto) 635{ 636} 637 638/* 639 * Flush the D-cache for non-DMA I/O so that the I-cache can 640 * be made coherent later. 641 */ 642void 643cpu_flush_dcache(void *ptr, size_t len) 644{ 645 /* TBD */ 646} 647 648/* 649 * Shutdown the CPU as much as possible. 650 */ 651void 652cpu_halt(void) 653{ 654 655 OF_exit(); 656} 657 658int 659ptrace_set_pc(struct thread *td, unsigned long addr) 660{ 661 struct trapframe *tf; 662 663 tf = td->td_frame; 664 tf->srr0 = (register_t)addr; 665 666 return (0); 667} 668 669int 670ptrace_single_step(struct thread *td) 671{ 672 struct trapframe *tf; 673 674 tf = td->td_frame; 675 tf->srr1 |= PSL_SE; 676 677 return (0); 678} 679 680int 681ptrace_clear_single_step(struct thread *td) 682{ 683 struct trapframe *tf; 684 685 tf = td->td_frame; 686 tf->srr1 &= ~PSL_SE; 687 688 return (0); 689} 690 691void 692kdb_cpu_clear_singlestep(void) 693{ 694 695 kdb_frame->srr1 &= ~PSL_SE; 696} 697 698void 699kdb_cpu_set_singlestep(void) 700{ 701 702 kdb_frame->srr1 |= PSL_SE; 703} 704 705/* 706 * Initialise a struct pcpu. 707 */ 708void 709cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 710{ 711#ifdef __powerpc64__ 712/* Copy the SLB contents from the current CPU */ 713memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); 714#endif 715} 716 717void 718spinlock_enter(void) 719{ 720 struct thread *td; 721 register_t msr; 722 723 td = curthread; 724 if (td->td_md.md_spinlock_count == 0) { 725 msr = intr_disable(); 726 td->td_md.md_spinlock_count = 1; 727 td->td_md.md_saved_msr = msr; 728 } else 729 td->td_md.md_spinlock_count++; 730 critical_enter(); 731} 732 733void 734spinlock_exit(void) 735{ 736 struct thread *td; 737 register_t msr; 738 739 td = curthread; 740 critical_exit(); 741 msr = td->td_md.md_saved_msr; 742 td->td_md.md_spinlock_count--; 743 if (td->td_md.md_spinlock_count == 0) 744 intr_restore(msr); 745} 746 747int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ 748 749int 750db_trap_glue(struct trapframe *frame) 751{ 752 if (!(frame->srr1 & PSL_PR) 753 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC 754 || (frame->exc == EXC_PGM 755 && (frame->srr1 & 0x20000)) 756 || frame->exc == EXC_BPT 757 || frame->exc == EXC_DSI)) { 758 int type = frame->exc; 759 if (type == EXC_PGM && (frame->srr1 & 0x20000)) { 760 type = T_BREAKPOINT; 761 } 762 return (kdb_trap(type, 0, frame)); 763 } 764 765 return (0); 766} 767 768#ifndef __powerpc64__ 769 770uint64_t 771va_to_vsid(pmap_t pm, vm_offset_t va) 772{ 773 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 774} 775 776#endif 777 778vm_offset_t 779pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 780{ 781 782 return (pa); 783} 784 785/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 786void 787flush_disable_caches(void) 788{ 789 register_t msr; 790 register_t msscr0; 791 register_t cache_reg; 792 volatile uint32_t *memp; 793 uint32_t temp; 794 int i; 795 int x; 796 797 msr = mfmsr(); 798 powerpc_sync(); 799 mtmsr(msr & ~(PSL_EE | PSL_DR)); 800 msscr0 = mfspr(SPR_MSSCR0); 801 msscr0 &= ~MSSCR0_L2PFE; 802 mtspr(SPR_MSSCR0, msscr0); 803 powerpc_sync(); 804 isync(); 805 __asm__ __volatile__("dssall; sync"); 806 powerpc_sync(); 807 isync(); 808 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 809 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 810 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 811 812 /* Lock the L1 Data cache. */ 813 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 814 powerpc_sync(); 815 isync(); 816 817 mtspr(SPR_LDSTCR, 0); 818 819 /* 820 * Perform this in two stages: Flush the cache starting in RAM, then do it 821 * from ROM. 822 */ 823 memp = (volatile uint32_t *)0x00000000; 824 for (i = 0; i < 128 * 1024; i++) { 825 temp = *memp; 826 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 827 memp += 32/sizeof(*memp); 828 } 829 830 memp = (volatile uint32_t *)0xfff00000; 831 x = 0xfe; 832 833 for (; x != 0xff;) { 834 mtspr(SPR_LDSTCR, x); 835 for (i = 0; i < 128; i++) { 836 temp = *memp; 837 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 838 memp += 32/sizeof(*memp); 839 } 840 x = ((x << 1) | 1) & 0xff; 841 } 842 mtspr(SPR_LDSTCR, 0); 843 844 cache_reg = mfspr(SPR_L2CR); 845 if (cache_reg & L2CR_L2E) { 846 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 847 mtspr(SPR_L2CR, cache_reg); 848 powerpc_sync(); 849 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 850 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 851 ; /* Busy wait for cache to flush */ 852 powerpc_sync(); 853 cache_reg &= ~L2CR_L2E; 854 mtspr(SPR_L2CR, cache_reg); 855 powerpc_sync(); 856 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 857 powerpc_sync(); 858 while (mfspr(SPR_L2CR) & L2CR_L2I) 859 ; /* Busy wait for L2 cache invalidate */ 860 powerpc_sync(); 861 } 862 863 cache_reg = mfspr(SPR_L3CR); 864 if (cache_reg & L3CR_L3E) { 865 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 866 mtspr(SPR_L3CR, cache_reg); 867 powerpc_sync(); 868 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 869 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 870 ; /* Busy wait for cache to flush */ 871 powerpc_sync(); 872 cache_reg &= ~L3CR_L3E; 873 mtspr(SPR_L3CR, cache_reg); 874 powerpc_sync(); 875 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 876 powerpc_sync(); 877 while (mfspr(SPR_L3CR) & L3CR_L3I) 878 ; /* Busy wait for L3 cache invalidate */ 879 powerpc_sync(); 880 } 881 882 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 883 powerpc_sync(); 884 isync(); 885 886 mtmsr(msr); 887} 888 889void 890cpu_sleep() 891{ 892 static u_quad_t timebase = 0; 893 static register_t sprgs[4]; 894 static register_t srrs[2]; 895 896 jmp_buf resetjb; 897 struct thread *fputd; 898 struct thread *vectd; 899 register_t hid0; 900 register_t msr; 901 register_t saved_msr; 902 903 ap_pcpu = pcpup; 904 905 PCPU_SET(restore, &resetjb); 906 907 saved_msr = mfmsr(); 908 fputd = PCPU_GET(fputhread); 909 vectd = PCPU_GET(vecthread); 910 if (fputd != NULL) 911 save_fpu(fputd); 912 if (vectd != NULL) 913 save_vec(vectd); 914 if (setjmp(resetjb) == 0) { 915 sprgs[0] = mfspr(SPR_SPRG0); 916 sprgs[1] = mfspr(SPR_SPRG1); 917 sprgs[2] = mfspr(SPR_SPRG2); 918 sprgs[3] = mfspr(SPR_SPRG3); 919 srrs[0] = mfspr(SPR_SRR0); 920 srrs[1] = mfspr(SPR_SRR1); 921 timebase = mftb(); 922 powerpc_sync(); 923 flush_disable_caches(); 924 hid0 = mfspr(SPR_HID0); 925 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 926 powerpc_sync(); 927 isync(); 928 msr = mfmsr() | PSL_POW; 929 mtspr(SPR_HID0, hid0); 930 powerpc_sync(); 931 932 while (1) 933 mtmsr(msr); 934 } 935 mttb(timebase); 936 PCPU_SET(curthread, curthread); 937 PCPU_SET(curpcb, curthread->td_pcb); 938 pmap_activate(curthread); 939 powerpc_sync(); 940 mtspr(SPR_SPRG0, sprgs[0]); 941 mtspr(SPR_SPRG1, sprgs[1]); 942 mtspr(SPR_SPRG2, sprgs[2]); 943 mtspr(SPR_SPRG3, sprgs[3]); 944 mtspr(SPR_SRR0, srrs[0]); 945 mtspr(SPR_SRR1, srrs[1]); 946 mtmsr(saved_msr); 947 if (fputd == curthread) 948 enable_fpu(curthread); 949 if (vectd == curthread) 950 enable_vec(curthread); 951 powerpc_sync(); 952}