1/* $NetBSD$ */ 2 3/* 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1982, 1986, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: machdep.c 1.74 92/12/20$ 37 * 38 * @(#)machdep.c 8.10 (Berkeley) 4/20/94 39 */ 40 41#include <sys/cdefs.h> 42__KERNEL_RCSID(0, "$NetBSD$"); 43 44#include "opt_ddb.h" 45#include "opt_kgdb.h" 46#include "opt_compat_netbsd.h" 47#include "opt_fpu_emulate.h" 48#include "opt_m060sp.h" 49#include "opt_modular.h" 50#include "opt_panicbutton.h" 51#include "opt_extmem.h" 52#include "opt_m68k_arch.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/callout.h> 57#include <sys/signalvar.h> 58#include <sys/kauth.h> 59#include <sys/kernel.h> 60#include <sys/proc.h> 61#include <sys/buf.h> 62#include <sys/reboot.h> 63#include <sys/conf.h> 64#include <sys/file.h> 65#include <sys/malloc.h> 66#include <sys/mbuf.h> 67#include <sys/msgbuf.h> 68#include <sys/ioctl.h> 69#include <sys/tty.h> 70#include <sys/mount.h> 71#include <sys/exec.h> 72#include <sys/exec_aout.h> /* for MID_* */ 73#include <sys/vnode.h> 74#include <sys/syscallargs.h> 75#include <sys/core.h> 76#include <sys/kcore.h> 77#include <sys/ksyms.h> 78#include <sys/module.h> 79#include <sys/cpu.h> 80#include <sys/sysctl.h> 81#include <sys/device.h> 82 83#include "ksyms.h" 84 85#if NKSYMS || defined(DDB) || defined(MODULAR) 86#include <sys/exec_elf.h> 87#endif 88 89#include <machine/db_machdep.h> 90#include <ddb/db_sym.h> 91#include <ddb/db_extern.h> 92 93#include <m68k/cacheops.h> 94#include <machine/reg.h> 95#include <machine/pcb.h> 96#include <machine/psl.h> 97#include <machine/pte.h> 98#include <machine/kcore.h> 99 100#include <dev/cons.h> 101#include <dev/mm.h> 102 103#define MAXMEM 64*1024 /* XXX - from cmap.h */ 104#include <uvm/uvm.h> 105 106#include <machine/bus.h> 107#include <machine/autoconf.h> 108#include <arch/x68k/dev/intiovar.h> 109 110void initcpu(void); 111void identifycpu(void); 112void doboot(void) __attribute__((__noreturn__)); 113 114/* the following is used externally (sysctl_hw) */ 115char machine[] = MACHINE; /* from <machine/param.h> */ 116 117/* Our exported CPU info; we can have only one. */ 118struct cpu_info cpu_info_store; 119 120struct vm_map *phys_map = NULL; 121 122extern paddr_t avail_start, avail_end; 123extern u_int lowram; 124extern int end, *esym; 125 126int maxmem; /* max memory per process */ 127int physmem = MAXMEM; /* max supported memory, changes to actual */ 128 129/* 130 * safepri is a safe priority for sleep to set for a spin-wait 131 * during autoconfiguration or after a panic. 132 */ 133int safepri = PSL_LOWIPL; 134 135/* prototypes for local functions */ 136void identifycpu(void); 137void initcpu(void); 138int cpu_dumpsize(void); 139int cpu_dump(int (*)(dev_t, daddr_t, void *, size_t), daddr_t *); 140void cpu_init_kcore_hdr(void); 141#ifdef EXTENDED_MEMORY 142static int mem_exists(void *, u_long); 143static void setmemrange(void); 144#endif 145 146/* functions called from locore.s */ 147void x68k_init(void); 148void dumpsys(void); 149void straytrap(int, u_short); 150void nmihand(struct frame); 151void intrhand(int); 152 153/* 154 * On the 68020/68030, the value of delay_divisor is roughly 155 * 2048 / cpuspeed (where cpuspeed is in MHz). 156 * 157 * On the 68040, the value of delay_divisor is roughly 158 * 759 / cpuspeed (where cpuspeed is in MHz). 159 * 160 * On the 68060, the value of delay_divisor is reported to be 161 * 128 / cpuspeed (where cpuspeed is in MHz). 162 */ 163int delay_divisor = 140; /* assume some reasonable value to start */ 164static int cpuspeed; /* MPU clock (in MHz) */ 165 166/* 167 * Machine-dependent crash dump header info. 168 */ 169cpu_kcore_hdr_t cpu_kcore_hdr; 170 171static callout_t candbtimer_ch; 172 173void 174x68k_init(void) 175{ 176 u_int i; 177 paddr_t msgbuf_pa; 178 179 /* 180 * Tell the VM system about available physical memory. 181 */ 182 uvm_page_physload(atop(avail_start), atop(avail_end), 183 atop(avail_start), atop(avail_end), 184 VM_FREELIST_MAINMEM); 185 186 /* 187 * avail_end was pre-decremented in pmap_bootstrap to compensate 188 * for msgbuf pages, but avail_end is also used to check DMA'able 189 * memory range for intio devices and it would be updated per 190 * probed extended memories, so explicitly save msgbuf address here. 191 */ 192 msgbuf_pa = avail_end; 193 194#ifdef EXTENDED_MEMORY 195 setmemrange(); 196#endif 197 198 /* 199 * Initialize error message buffer (at end of core). 200 */ 201 for (i = 0; i < btoc(MSGBUFSIZE); i++) 202 pmap_kenter_pa((vaddr_t)msgbufaddr + i * PAGE_SIZE, 203 msgbuf_pa + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, 0); 204 pmap_update(pmap_kernel()); 205 initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE)); 206} 207 208/* 209 * Console initialization: called early on from main, 210 * before vm init or startup. Do enough configuration 211 * to choose and initialize a console. 212 */ 213void 214consinit(void) 215{ 216 217 /* 218 * bring graphics layer up. 219 */ 220 config_console(); 221 222 /* 223 * Initialize the console before we print anything out. 224 */ 225 cninit(); 226 227#ifdef KGDB 228 zs_kgdb_init(); /* XXX */ 229#endif 230#if NKSYMS || defined(DDB) || defined(MODULAR) 231 ksyms_addsyms_elf((int)esym - (int)&end - sizeof(Elf32_Ehdr), 232 (void *)&end, esym); 233#endif 234#ifdef DDB 235 if (boothowto & RB_KDB) 236 Debugger(); 237#endif 238} 239 240/* 241 * cpu_startup: allocate memory for variable-sized tables, 242 * initialize cpu, and do autoconfiguration. 243 */ 244void 245cpu_startup(void) 246{ 247 vaddr_t minaddr, maxaddr; 248 char pbuf[9]; 249#ifdef DEBUG 250 extern int pmapdebug; 251 int opmapdebug = pmapdebug; 252 253 pmapdebug = 0; 254#endif 255 256 if (fputype != FPU_NONE) 257 m68k_make_fpu_idle_frame(); 258 259 /* 260 * Initialize the kernel crash dump header. 261 */ 262 cpu_init_kcore_hdr(); 263 264 /* 265 * Good {morning,afternoon,evening,night}. 266 */ 267 printf("%s%s", copyright, version); 268 identifycpu(); 269 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 270 printf("total memory = %s\n", pbuf); 271 272 minaddr = 0; 273 274 /* 275 * Allocate a submap for physio 276 */ 277 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 278 VM_PHYS_SIZE, 0, false, NULL); 279 280#ifdef DEBUG 281 pmapdebug = opmapdebug; 282#endif 283 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 284 printf("avail memory = %s\n", pbuf); 285 286 /* 287 * Set up CPU-specific registers, cache, etc. 288 */ 289 initcpu(); 290 291 callout_init(&candbtimer_ch, 0); 292} 293 294/* 295 * Info for CTL_HW 296 */ 297char cpu_model[96]; /* max 85 chars */ 298static const char *fpu_descr[] = { 299#ifdef FPU_EMULATE 300 ", emulator FPU", /* 0 */ 301#else 302 ", no math support", /* 0 */ 303#endif 304 ", m68881 FPU", /* 1 */ 305 ", m68882 FPU", /* 2 */ 306 "/FPU", /* 3 */ 307 "/FPU", /* 4 */ 308 }; 309 310void 311identifycpu(void) 312{ 313 /* there's alot of XXX in here... */ 314 const char *cpu_type, *mach, *mmu, *fpu; 315 char clock[16]; 316 317 /* 318 * check machine type constant 319 */ 320 switch (intio_get_sysport_mpustat()) { 321 case 0xdc: 322 /* 323 * CPU Type == 68030, Clock == 25MHz 324 */ 325 mach = "030"; 326 break; 327 case 0xfe: 328 /* 329 * CPU Type == 68000, Clock == 16MHz 330 */ 331 mach = "000XVI"; 332 break; 333 case 0xff: 334 /* 335 * CPU Type == 68000, Clock == 10MHz 336 */ 337 mach = "000/ACE/PRO/EXPERT/SUPER"; 338 break; 339 default: 340 /* 341 * unknown type 342 */ 343 mach = "000?(unknown model)"; 344 break; 345 } 346 347 cpuspeed = 2048 / delay_divisor; 348 sprintf(clock, "%dMHz", cpuspeed); 349 switch (cputype) { 350 case CPU_68060: 351 cpu_type = "m68060"; 352 mmu = "/MMU"; 353 cpuspeed = 128 / delay_divisor; 354 sprintf(clock, "%d/%dMHz", cpuspeed*2, cpuspeed); 355 break; 356 case CPU_68040: 357 cpu_type = "m68040"; 358 mmu = "/MMU"; 359 cpuspeed = 759 / delay_divisor; 360 sprintf(clock, "%d/%dMHz", cpuspeed*2, cpuspeed); 361 break; 362 case CPU_68030: 363 cpu_type = "m68030"; 364 mmu = "/MMU"; 365 break; 366 case CPU_68020: 367 cpu_type = "m68020"; 368 mmu = ", m68851 MMU"; 369 break; 370 default: 371 cpu_type = "unknown"; 372 mmu = ", unknown MMU"; 373 break; 374 } 375 if (fputype >= 0 && fputype < sizeof(fpu_descr)/sizeof(fpu_descr[0])) 376 fpu = fpu_descr[fputype]; 377 else 378 fpu = ", unknown FPU"; 379 sprintf(cpu_model, "X68%s (%s CPU%s%s, %s clock)", 380 mach, cpu_type, mmu, fpu, clock); 381 printf("%s\n", cpu_model); 382} 383 384/* 385 * machine dependent system variables. 386 */ 387SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 388{ 389 390 sysctl_createv(clog, 0, NULL, NULL, 391 CTLFLAG_PERMANENT, 392 CTLTYPE_NODE, "machdep", NULL, 393 NULL, 0, NULL, 0, 394 CTL_MACHDEP, CTL_EOL); 395 396 sysctl_createv(clog, 0, NULL, NULL, 397 CTLFLAG_PERMANENT, 398 CTLTYPE_STRUCT, "console_device", NULL, 399 sysctl_consdev, 0, NULL, sizeof(dev_t), 400 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 401} 402 403int waittime = -1; 404int power_switch_is_off = 0; 405 406void 407cpu_reboot(int howto, char *bootstr) 408{ 409 struct pcb *pcb = lwp_getpcb(curlwp); 410 411 /* take a snap shot before clobbering any registers */ 412 if (pcb != NULL) 413 savectx(pcb); 414 415 boothowto = howto; 416 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 417 waittime = 0; 418 vfs_shutdown(); 419 /* 420 * If we've been adjusting the clock, the todr 421 * will be out of synch; adjust it now. 422 */ 423 /*resettodr();*/ 424 } 425 426 /* Disable interrputs. */ 427 splhigh(); 428 429 if (howto & RB_DUMP) 430 dumpsys(); 431 432 /* Run any shutdown hooks. */ 433 doshutdownhooks(); 434 435 pmf_system_shutdown(boothowto); 436 437#if defined(PANICWAIT) && !defined(DDB) 438 if ((howto & RB_HALT) == 0 && panicstr) { 439 printf("hit any key to reboot...\n"); 440 (void)cngetc(); 441 printf("\n"); 442 } 443#endif 444 445 /* Finally, halt/reboot the system. */ 446 /* a) RB_POWERDOWN 447 * a1: the power switch is still on 448 * Power cannot be removed; simply halt the system (b) 449 * Power switch state is checked in shutdown hook 450 * a2: the power switch is off 451 * Remove the power; the simplest way is go back to ROM eg. reboot 452 * b) RB_HALT 453 * call cngetc 454 * c) otherwise 455 * Reboot 456 */ 457 if (((howto & RB_POWERDOWN) == RB_POWERDOWN) && power_switch_is_off) 458 doboot(); 459 else if (/*((howto & RB_POWERDOWN) == RB_POWERDOWN) ||*/ 460 ((howto & RB_HALT) == RB_HALT)) { 461 printf("System halted. Hit any key to reboot.\n\n"); 462 (void)cngetc(); 463 } 464 465 printf("rebooting...\n"); 466 DELAY(1000000); 467 doboot(); 468 /* NOTREACHED */ 469} 470 471/* 472 * Initialize the kernel crash dump header. 473 */ 474void 475cpu_init_kcore_hdr(void) 476{ 477 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 478 struct m68k_kcore_hdr *m = &h->un._m68k; 479 int i; 480 481 memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr)); 482 483 /* 484 * Initialize the `dispatcher' portion of the header. 485 */ 486 strcpy(h->name, machine); 487 h->page_size = PAGE_SIZE; 488 h->kernbase = KERNBASE; 489 490 /* 491 * Fill in information about our MMU configuration. 492 */ 493 m->mmutype = mmutype; 494 m->sg_v = SG_V; 495 m->sg_frame = SG_FRAME; 496 m->sg_ishift = SG_ISHIFT; 497 m->sg_pmask = SG_PMASK; 498 m->sg40_shift1 = SG4_SHIFT1; 499 m->sg40_mask2 = SG4_MASK2; 500 m->sg40_shift2 = SG4_SHIFT2; 501 m->sg40_mask3 = SG4_MASK3; 502 m->sg40_shift3 = SG4_SHIFT3; 503 m->sg40_addr1 = SG4_ADDR1; 504 m->sg40_addr2 = SG4_ADDR2; 505 m->pg_v = PG_V; 506 m->pg_frame = PG_FRAME; 507 508 /* 509 * Initialize pointer to kernel segment table. 510 */ 511 m->sysseg_pa = (uint32_t)(pmap_kernel()->pm_stpa); 512 513 /* 514 * Initialize relocation value such that: 515 * 516 * pa = (va - KERNBASE) + reloc 517 */ 518 m->reloc = lowram; 519 520 /* 521 * Define the end of the relocatable range. 522 */ 523 m->relocend = (uint32_t)&end; 524 525 /* 526 * X68k has multiple RAM segments on some models. 527 */ 528 m->ram_segs[0].start = lowram; 529 m->ram_segs[0].size = mem_size - lowram; 530 for (i = 1; i < vm_nphysseg; i++) { 531 m->ram_segs[i].start = 532 ctob(VM_PHYSMEM_PTR(i)->start); 533 m->ram_segs[i].size = 534 ctob(VM_PHYSMEM_PTR(i)->end - VM_PHYSMEM_PTR(i)->start); 535 } 536} 537 538/* 539 * Compute the size of the machine-dependent crash dump header. 540 * Returns size in disk blocks. 541 */ 542 543#define CHDRSIZE (ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t))) 544#define MDHDRSIZE roundup(CHDRSIZE, dbtob(1)) 545 546int 547cpu_dumpsize(void) 548{ 549 550 return btodb(MDHDRSIZE); 551} 552 553/* 554 * Called by dumpsys() to dump the machine-dependent header. 555 */ 556int 557cpu_dump(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t *blknop) 558{ 559 int buf[MDHDRSIZE / sizeof(int)]; 560 cpu_kcore_hdr_t *chdr; 561 kcore_seg_t *kseg; 562 int error; 563 564 kseg = (kcore_seg_t *)buf; 565 chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) / 566 sizeof(int)]; 567 568 /* Create the segment header. */ 569 CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 570 kseg->c_size = MDHDRSIZE - ALIGN(sizeof(kcore_seg_t)); 571 572 memcpy(chdr, &cpu_kcore_hdr, sizeof(cpu_kcore_hdr_t)); 573 error = (*dump)(dumpdev, *blknop, (void *)buf, sizeof(buf)); 574 *blknop += btodb(sizeof(buf)); 575 return (error); 576} 577 578/* 579 * These variables are needed by /sbin/savecore 580 */ 581uint32_t dumpmag = 0x8fca0101; /* magic number */ 582int dumpsize = 0; /* pages */ 583long dumplo = 0; /* blocks */ 584 585/* 586 * This is called by main to set dumplo and dumpsize. 587 * Dumps always skip the first PAGE_SIZE of disk space in 588 * case there might be a disk label stored there. If there 589 * is extra space, put dump at the end to reduce the chance 590 * that swapping trashes it. 591 */ 592void 593cpu_dumpconf(void) 594{ 595 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 596 struct m68k_kcore_hdr *m = &h->un._m68k; 597 int chdrsize; /* size of dump header */ 598 int nblks; /* size of dump area */ 599 int i; 600 601 if (dumpdev == NODEV) 602 return; 603 nblks = bdev_size(dumpdev); 604 chdrsize = cpu_dumpsize(); 605 606 dumpsize = 0; 607 for (i = 0; m->ram_segs[i].size && i < M68K_NPHYS_RAM_SEGS; i++) 608 dumpsize += btoc(m->ram_segs[i].size); 609 /* 610 * Check to see if we will fit. Note we always skip the 611 * first PAGE_SIZE in case there is a disk label there. 612 */ 613 if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) { 614 dumpsize = 0; 615 dumplo = -1; 616 return; 617 } 618 619 /* 620 * Put dump at the end of the partition. 621 */ 622 dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize; 623} 624 625void 626dumpsys(void) 627{ 628 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 629 struct m68k_kcore_hdr *m = &h->un._m68k; 630 const struct bdevsw *bdev; 631 daddr_t blkno; /* current block to write */ 632 /* dump routine */ 633 int (*dump)(dev_t, daddr_t, void *, size_t); 634 int pg; /* page being dumped */ 635 paddr_t maddr; /* PA being dumped */ 636 int seg; /* RAM segment being dumped */ 637 int error; /* error code from (*dump)() */ 638 639 /* XXX initialized here because of gcc lossage */ 640 seg = 0; 641 maddr = m->ram_segs[seg].start; 642 pg = 0; 643 644 /* Make sure dump device is valid. */ 645 if (dumpdev == NODEV) 646 return; 647 bdev = bdevsw_lookup(dumpdev); 648 if (bdev == NULL) 649 return; 650 if (dumpsize == 0) { 651 cpu_dumpconf(); 652 if (dumpsize == 0) 653 return; 654 } 655 if (dumplo <= 0) { 656 printf("\ndump to dev %u,%u not possible\n", 657 major(dumpdev), minor(dumpdev)); 658 return; 659 } 660 dump = bdev->d_dump; 661 blkno = dumplo; 662 663 printf("\ndumping to dev %u,%u offset %ld\n", 664 major(dumpdev), minor(dumpdev), dumplo); 665 666 printf("dump "); 667 668 /* Write the dump header. */ 669 error = cpu_dump(dump, &blkno); 670 if (error) 671 goto bad; 672 673 for (pg = 0; pg < dumpsize; pg++) { 674#define NPGMB (1024*1024/PAGE_SIZE) 675 /* print out how many MBs we have dumped */ 676 if (pg && (pg % NPGMB) == 0) 677 printf_nolog("%d ", pg / NPGMB); 678#undef NPGMB 679 if (maddr == 0) { 680 /* Skip first page */ 681 maddr += PAGE_SIZE; 682 blkno += btodb(PAGE_SIZE); 683 continue; 684 } 685 while (maddr >= 686 (m->ram_segs[seg].start + m->ram_segs[seg].size)) { 687 if (++seg >= M68K_NPHYS_RAM_SEGS || 688 m->ram_segs[seg].size == 0) { 689 error = EINVAL; /* XXX ?? */ 690 goto bad; 691 } 692 maddr = m->ram_segs[seg].start; 693 } 694 pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr, 695 VM_PROT_READ, VM_PROT_READ|PMAP_WIRED); 696 pmap_update(pmap_kernel()); 697 698 error = (*dump)(dumpdev, blkno, vmmap, PAGE_SIZE); 699 bad: 700 switch (error) { 701 case 0: 702 maddr += PAGE_SIZE; 703 blkno += btodb(PAGE_SIZE); 704 break; 705 706 case ENXIO: 707 printf("device bad\n"); 708 return; 709 710 case EFAULT: 711 printf("device not ready\n"); 712 return; 713 714 case EINVAL: 715 printf("area improper\n"); 716 return; 717 718 case EIO: 719 printf("i/o error\n"); 720 return; 721 722 case EINTR: 723 printf("aborted from console\n"); 724 return; 725 726 default: 727 printf("error %d\n", error); 728 return; 729 } 730 } 731 printf("succeeded\n"); 732} 733 734void 735initcpu(void) 736{ 737 /* XXX should init '40 vecs here, too */ 738#if defined(M68060) 739 extern void *vectab[256]; 740#if defined(M060SP) 741 extern uint8_t I_CALL_TOP[]; 742 extern uint8_t FP_CALL_TOP[]; 743#else 744 extern uint8_t illinst; 745#endif 746 extern uint8_t fpfault; 747#endif 748 749#ifdef MAPPEDCOPY 750 751 /* 752 * Initialize lower bound for doing copyin/copyout using 753 * page mapping (if not already set). We don't do this on 754 * VAC machines as it loses big time. 755 */ 756 if ((int)mappedcopysize == -1) { 757 mappedcopysize = PAGE_SIZE; 758 } 759#endif 760 761#if defined(M68060) 762 if (cputype == CPU_68060) { 763#if defined(M060SP) 764 /* integer support */ 765 vectab[61] = &I_CALL_TOP[128 + 0x00]; 766 767 /* floating point support */ 768 vectab[11] = &FP_CALL_TOP[128 + 0x30]; 769 vectab[55] = &FP_CALL_TOP[128 + 0x38]; 770 vectab[60] = &FP_CALL_TOP[128 + 0x40]; 771 772 vectab[54] = &FP_CALL_TOP[128 + 0x00]; 773 vectab[52] = &FP_CALL_TOP[128 + 0x08]; 774 vectab[53] = &FP_CALL_TOP[128 + 0x10]; 775 vectab[51] = &FP_CALL_TOP[128 + 0x18]; 776 vectab[50] = &FP_CALL_TOP[128 + 0x20]; 777 vectab[49] = &FP_CALL_TOP[128 + 0x28]; 778#else 779 vectab[61] = &illinst; 780#endif 781 vectab[48] = &fpfault; 782 } 783 DCIS(); 784#endif 785} 786 787void 788straytrap(int pc, u_short evec) 789{ 790 791 printf("unexpected trap (vector offset %x) from %x\n", 792 evec & 0xFFF, pc); 793#if defined(DDB) 794 Debugger(); 795#endif 796} 797 798int *nofault; 799 800int 801badaddr(volatile void* addr) 802{ 803 int i; 804 label_t faultbuf; 805 806 nofault = (int *)&faultbuf; 807 if (setjmp((label_t *)nofault)) { 808 nofault = NULL; 809 return 1; 810 } 811 i = *(volatile short *)addr; 812 nofault = NULL; 813 return 0; 814} 815 816int 817badbaddr(volatile void *addr) 818{ 819 int i; 820 label_t faultbuf; 821 822 nofault = (int *)&faultbuf; 823 if (setjmp((label_t *)nofault)) { 824 nofault = NULL; 825 return 1; 826 } 827 i = *(volatile char *)addr; 828 nofault = NULL; 829 return 0; 830} 831 832void 833intrhand(int sr) 834{ 835 836 printf("intrhand: unexpected sr 0x%x\n", sr); 837} 838 839const uint16_t ipl2psl_table[NIPL] = { 840 [IPL_NONE] = PSL_S | PSL_IPL0, 841 [IPL_SOFTCLOCK] = PSL_S | PSL_IPL1, 842 [IPL_SOFTBIO] = PSL_S | PSL_IPL1, 843 [IPL_SOFTNET] = PSL_S | PSL_IPL1, 844 [IPL_SOFTSERIAL] = PSL_S | PSL_IPL1, 845 [IPL_VM] = PSL_S | PSL_IPL5, 846 [IPL_SCHED] = PSL_S | PSL_IPL7, 847 [IPL_HIGH] = PSL_S | PSL_IPL7, 848}; 849 850#if (defined(DDB) || defined(DEBUG)) && !defined(PANICBUTTON) 851#define PANICBUTTON 852#endif 853 854#ifdef PANICBUTTON 855int panicbutton = 1; /* non-zero if panic buttons are enabled */ 856int crashandburn = 0; 857int candbdelay = 50; /* give em half a second */ 858void candbtimer(void *); 859 860void 861candbtimer(void *arg) 862{ 863 864 crashandburn = 0; 865} 866#endif 867 868/* 869 * Level 7 interrupts can be caused by the keyboard or parity errors. 870 */ 871void 872nmihand(struct frame frame) 873{ 874 875 intio_set_sysport_keyctrl(intio_get_sysport_keyctrl() | 0x04); 876 877 if (1) { 878#ifdef PANICBUTTON 879 static int innmihand = 0; 880 881 /* 882 * Attempt to reduce the window of vulnerability for recursive 883 * NMIs (e.g. someone holding down the keyboard reset button). 884 */ 885 if (innmihand == 0) { 886 innmihand = 1; 887 printf("Got a keyboard NMI\n"); 888 innmihand = 0; 889 } 890#ifdef DDB 891 Debugger(); 892#else 893 if (panicbutton) { 894 if (crashandburn) { 895 crashandburn = 0; 896 panic(panicstr ? 897 "forced crash, nosync" : "forced crash"); 898 } 899 crashandburn++; 900 callout_reset(&candbtimer_ch, candbdelay, 901 candbtimer, NULL); 902 } 903#endif /* DDB */ 904#endif /* PANICBUTTON */ 905 return; 906 } 907 /* panic?? */ 908 printf("unexpected level 7 interrupt ignored\n"); 909} 910 911/* 912 * cpu_exec_aout_makecmds(): 913 * cpu-dependent a.out format hook for execve(). 914 * 915 * Determine of the given exec package refers to something which we 916 * understand and, if so, set up the vmcmds for it. 917 * 918 * XXX what are the special cases for the hp300? 919 * XXX why is this COMPAT_NOMID? was something generating 920 * hp300 binaries with an a_mid of 0? i thought that was only 921 * done on little-endian machines... -- cgd 922 */ 923int 924cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 925{ 926#if defined(COMPAT_NOMID) || defined(COMPAT_44) 927 u_long midmag, magic; 928 u_short mid; 929 int error; 930 struct exec *execp = epp->ep_hdr; 931 932 midmag = ntohl(execp->a_midmag); 933 mid = (midmag >> 16) & 0xffff; 934 magic = midmag & 0xffff; 935 936 midmag = mid << 16 | magic; 937 938 switch (midmag) { 939#ifdef COMPAT_NOMID 940 case (MID_ZERO << 16) | ZMAGIC: 941 error = exec_aout_prep_oldzmagic(l->l_proc, epp); 942 break; 943#endif 944#ifdef COMPAT_44 945 case (MID_HP300 << 16) | ZMAGIC: 946 error = exec_aout_prep_oldzmagic(l->l_proc, epp); 947 break; 948#endif 949 default: 950 error = ENOEXEC; 951 } 952 953 return error; 954#else /* !(defined(COMPAT_NOMID) || defined(COMPAT_44)) */ 955 return ENOEXEC; 956#endif 957} 958 959#ifdef MODULAR 960/* 961 * Push any modules loaded by the bootloader etc. 962 */ 963void 964module_init_md(void) 965{ 966} 967#endif 968 969#ifdef EXTENDED_MEMORY 970#ifdef EM_DEBUG 971static int em_debug = 0; 972#define DPRINTF(str) do{ if (em_debug) printf str; } while (0); 973#else 974#define DPRINTF(str) 975#endif 976 977static struct memlist { 978 void *base; 979 psize_t min; 980 psize_t max; 981} memlist[] = { 982 /* TS-6BE16 16MB memory */ 983 {(void *)0x01000000, 0x01000000, 0x01000000}, 984 /* 060turbo SIMM slot (4--128MB) */ 985 {(void *)0x10000000, 0x00400000, 0x08000000}, 986}; 987static vaddr_t mem_v, base_v; 988 989/* 990 * check memory existency 991 */ 992static int 993mem_exists(void *mem, u_long basemax) 994{ 995 /* most variables must be register! */ 996 volatile unsigned char *m, *b; 997 unsigned char save_m, save_b=0; /* XXX: shutup gcc */ 998 int baseismem; 999 int exists = 0; 1000 void *base; 1001 void *begin_check, *end_check; 1002 label_t faultbuf; 1003 1004 DPRINTF(("Enter mem_exists(%p, %lx)\n", mem, basemax)); 1005 DPRINTF((" pmap_enter(%" PRIxVADDR ", %p) for target... ", mem_v, mem)); 1006 pmap_enter(pmap_kernel(), mem_v, (paddr_t)mem, 1007 VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|PMAP_WIRED); 1008 pmap_update(pmap_kernel()); 1009 DPRINTF((" done.\n")); 1010 1011 /* only 24bits are significant on normal X680x0 systems */ 1012 base = (void *)((u_long)mem & 0x00FFFFFF); 1013 DPRINTF((" pmap_enter(%" PRIxVADDR ", %p) for shadow... ", base_v, base)); 1014 pmap_enter(pmap_kernel(), base_v, (paddr_t)base, 1015 VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|PMAP_WIRED); 1016 pmap_update(pmap_kernel()); 1017 DPRINTF((" done.\n")); 1018 1019 m = (void *)mem_v; 1020 b = (void *)base_v; 1021 1022 /* This is somewhat paranoid -- avoid overwriting myself */ 1023 __asm("lea %%pc@(begin_check_mem),%0" : "=a"(begin_check)); 1024 __asm("lea %%pc@(end_check_mem),%0" : "=a"(end_check)); 1025 if (base >= begin_check && base < end_check) { 1026 size_t off = (char *)end_check - (char *)begin_check; 1027 1028 DPRINTF((" Adjusting the testing area.\n")); 1029 m -= off; 1030 b -= off; 1031 } 1032 1033 nofault = (int *)&faultbuf; 1034 if (setjmp((label_t *)nofault)) { 1035 nofault = (int *)0; 1036 pmap_remove(pmap_kernel(), mem_v, mem_v+PAGE_SIZE); 1037 pmap_remove(pmap_kernel(), base_v, base_v+PAGE_SIZE); 1038 pmap_update(pmap_kernel()); 1039 DPRINTF(("Fault!!! Returning 0.\n")); 1040 return 0; 1041 } 1042 1043 DPRINTF((" Let's begin. mem=%p, base=%p, m=%p, b=%p\n", 1044 mem, base, m, b)); 1045 1046 (void)*m; 1047 /* 1048 * Can't check by writing if the corresponding 1049 * base address isn't memory. 1050 * 1051 * I hope this would be no harm.... 1052 */ 1053 baseismem = base < (void *)basemax; 1054 1055__asm("begin_check_mem:"); 1056 /* save original value (base must be saved first) */ 1057 if (baseismem) 1058 save_b = *b; 1059 save_m = *m; 1060 1061 /* 1062 * stack and other data segment variables are unusable 1063 * til end_check_mem, because they may be clobbered. 1064 */ 1065 1066 /* 1067 * check memory by writing/reading 1068 */ 1069 if (baseismem) 1070 *b = 0x55; 1071 *m = 0xAA; 1072 if ((baseismem && *b != 0x55) || *m != 0xAA) 1073 goto out; 1074 1075 *m = 0x55; 1076 if (baseismem) 1077 *b = 0xAA; 1078 if (*m != 0x55 || (baseismem && *b != 0xAA)) 1079 goto out; 1080 1081 exists = 1; 1082out: 1083 *m = save_m; 1084 if (baseismem) 1085 *b = save_b; 1086 1087__asm("end_check_mem:"); 1088 1089 nofault = (int *)0; 1090 pmap_remove(pmap_kernel(), mem_v, mem_v+PAGE_SIZE); 1091 pmap_remove(pmap_kernel(), base_v, base_v+PAGE_SIZE); 1092 pmap_update(pmap_kernel()); 1093 1094 DPRINTF((" End.\n")); 1095 1096 DPRINTF(("Returning from mem_exists. result = %d\n", exists)); 1097 1098 return exists; 1099} 1100 1101static void 1102setmemrange(void) 1103{ 1104 int i; 1105 psize_t s, minimum, maximum; 1106 struct memlist *mlist = memlist; 1107 u_long h; 1108 int basemax = ctob(physmem); 1109 1110 /* 1111 * VM system is not started yet. Use the first and second avalable 1112 * pages to map the (possible) target memory and its shadow. 1113 */ 1114 mem_v = virtual_avail; /* target */ 1115 base_v = mem_v + PAGE_SIZE; /* shadow */ 1116 1117 { /* Turn off the processor cache. */ 1118 int cacr; 1119 PCIA(); /* cpusha dc */ 1120 switch (cputype) { 1121 default: 1122 case CPU_68030: 1123 cacr = CACHE_OFF; 1124 break; 1125 case CPU_68040: 1126 cacr = CACHE40_OFF; 1127 break; 1128 case CPU_68060: 1129 cacr = CACHE60_OFF; 1130 break; 1131 } 1132 __asm volatile ("movc %0,%%cacr"::"d"(cacr)); 1133 } 1134 1135 /* discover extended memory */ 1136 for (i = 0; i < sizeof(memlist) / sizeof(memlist[0]); i++) { 1137 minimum = mlist[i].min; 1138 maximum = mlist[i].max; 1139 /* 1140 * Normally, x68k hardware is NOT 32bit-clean. 1141 * But some type of extended memory is in 32bit address space. 1142 * Check whether. 1143 */ 1144 if (!mem_exists(mlist[i].base, basemax)) 1145 continue; 1146 h = 0; 1147 /* range check */ 1148 for (s = minimum; s <= maximum; s += 0x00100000) { 1149 if (!mem_exists((char*)mlist[i].base + s - 4, basemax)) 1150 break; 1151 h = (u_long)((char*)mlist[i].base + s); 1152 } 1153 if ((u_long)mlist[i].base < h) { 1154 uvm_page_physload(atop(mlist[i].base), atop(h), 1155 atop(mlist[i].base), atop(h), 1156 VM_FREELIST_HIGHMEM); 1157 mem_size += h - (u_long) mlist[i].base; 1158 if (avail_end < h) 1159 avail_end = h; 1160 } 1161 } 1162 1163 { /* Re-enable the processor cache. */ 1164 int cacr; 1165 ICIA(); 1166 switch (cputype) { 1167 default: 1168 case CPU_68030: 1169 cacr = CACHE_ON; 1170 break; 1171 case CPU_68040: 1172 cacr = CACHE40_ON; 1173 break; 1174 case CPU_68060: 1175 cacr = CACHE60_ON; 1176 break; 1177 } 1178 __asm volatile ("movc %0,%%cacr"::"d"(cacr)); 1179 } 1180 1181 physmem = m68k_btop(mem_size); 1182} 1183#endif 1184 1185volatile int ssir; 1186int idepth; 1187 1188bool 1189cpu_intr_p(void) 1190{ 1191 1192 return idepth != 0; 1193} 1194 1195int 1196mm_md_physacc(paddr_t pa, vm_prot_t prot) 1197{ 1198 int i; 1199 1200 for (i = 0; i < vm_nphysseg; i++) { 1201 if (ctob(vm_physmem[i].start) <= pa && 1202 pa < ctob(vm_physmem[i].end)) 1203 return 0; 1204 } 1205 return EFAULT; 1206} 1207