1/* $NetBSD: atari_init.c,v 1.113 2024/02/10 18:43:51 andvar Exp $ */ 2 3/* 4 * Copyright (c) 1995 Leo Weppelman 5 * Copyright (c) 1994 Michael L. Hitch 6 * Copyright (c) 1993 Markus Wild 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Markus Wild. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: atari_init.c,v 1.113 2024/02/10 18:43:51 andvar Exp $"); 37 38#include "opt_ddb.h" 39#include "opt_mbtype.h" 40#include "opt_m060sp.h" 41#include "opt_m68k_arch.h" 42#include "opt_st_pool_size.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/ioctl.h> 47#include <sys/select.h> 48#include <sys/tty.h> 49#include <sys/buf.h> 50#include <sys/msgbuf.h> 51#include <sys/mbuf.h> 52#include <sys/extent.h> 53#include <sys/protosw.h> 54#include <sys/domain.h> 55#include <sys/dkbad.h> 56#include <sys/reboot.h> 57#include <sys/exec.h> 58#include <sys/exec_aout.h> 59#include <sys/core.h> 60#include <sys/kcore.h> 61#include <sys/bus.h> 62 63#include <uvm/uvm_extern.h> 64 65#include <machine/vmparam.h> 66#include <machine/pte.h> 67#include <machine/cpu.h> 68#include <machine/iomap.h> 69#include <machine/mfp.h> 70#include <machine/scu.h> 71#include <machine/acia.h> 72#include <machine/kcore.h> 73#include <machine/intr.h> 74 75#include <m68k/cpu.h> 76#include <m68k/cacheops.h> 77 78#include <atari/atari/stalloc.h> 79#include <atari/dev/clockvar.h> 80#include <atari/dev/ym2149reg.h> 81 82#include "pci.h" 83 84void start_c(int, u_int, u_int, u_int, char *); 85static void atari_hwinit(void); 86static void cpu_init_kcorehdr(paddr_t, paddr_t); 87static void initcpu(void); 88static void mmu030_setup(paddr_t, u_int, paddr_t, psize_t, paddr_t, paddr_t); 89static void map_io_areas(paddr_t, psize_t, u_int); 90static void set_machtype(void); 91 92#if defined(M68040) || defined(M68060) 93static void mmu040_setup(paddr_t, u_int, paddr_t, psize_t, paddr_t, paddr_t); 94#endif 95 96#if defined(_MILANHW_) 97static u_int milan_probe_bank_1(paddr_t paddr); 98static u_int milan_probe_bank(paddr_t paddr); 99 100#define NBANK 2 101#define NSLOT 4 102 103#define MB(n) ((n) * 1024 * 1024) 104#define MB_END(n) (MB(n) - 1) 105#define MAGIC_4M (4 - 1) 106#define MAGIC_4M_INV ((uint8_t)~MAGIC_4M) 107#define MAGIC_8M (8 - 1) 108#define MAGIC_16M (16 - 1) 109#define MAGIC_32M (32 - 1) 110#define MAGIC_64M (64 - 1) 111#endif 112 113/* 114 * All info needed to generate a panic dump. All fields are setup by 115 * start_c(). 116 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 117 * here.... Also, the name is badly chosen. Phys_segs contains the 118 * segment descriptions _after_ reservations are made. 119 * XXX: 'lowram' is obsoleted by the new panicdump format 120 */ 121static cpu_kcore_hdr_t cpu_kcore_hdr; 122 123extern u_int lowram; 124int machineid, mmutype, cputype; 125 126extern char *esym; 127extern struct pcb *curpcb; 128 129/* 130 * This is the virtual address of physical page 0. Used by 'do_boot()'. 131 */ 132vaddr_t page_zero; 133 134/* 135 * Simple support for allocation in ST-ram. 136 * Currently 16 bit ST-ram is required to allocate DMA buffers for SCSI and 137 * FDC transfers, and video memory for the XFree68 based Xservers. 138 * The physical address is also returned because the video init needs it to 139 * setup the controller at the time the vm-system is not yet operational so 140 * 'kvtop()' cannot be used. 141 */ 142#define ST_POOL_SIZE_MIN 24 /* for DMA bounce buffers */ 143#ifndef ST_POOL_SIZE 144#define ST_POOL_SIZE 56 /* Xserver requires 320KB (40 pages) */ 145#endif 146 147psize_t st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */ 148vaddr_t st_pool_virt; 149paddr_t st_pool_phys; 150 151/* 152 * Thresholds to restrict size of reserved ST memory to make sure 153 * the kernel at least boot even on lower memory machines. 154 * Nowadays we could assume most users have 4MB ST-RAM and 16MB TT-RAM. 155 */ 156#define STRAM_MINTHRESH (2 * 1024 * 1024) 157#define TTRAM_MINTHRESH (4 * 1024 * 1024) 158 159/* I/O address space variables */ 160vaddr_t stio_addr; /* Where the st io-area is mapped */ 161vaddr_t pci_conf_addr; /* KVA base of PCI config space */ 162vaddr_t pci_io_addr; /* KVA base of PCI io-space */ 163vaddr_t pci_mem_addr; /* KVA base of PCI mem-space */ 164vaddr_t pci_mem_uncached; /* KVA base of an uncached PCI mem-page */ 165 166/* 167 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 168 * it is also reported not to work on all TT's. So the default is NO. 169 */ 170#ifndef RELOC_KERNEL 171#define RELOC_KERNEL 0 172#endif 173int reloc_kernel = RELOC_KERNEL; /* Patchable */ 174 175#define RELOC_PA(base, pa) ((base) + (pa)) /* used to set up PTE etc. */ 176 177/* 178 * this is the C-level entry function, it's called from locore.s. 179 * Preconditions: 180 * Interrupts are disabled 181 * PA == VA, we don't have to relocate addresses before enabling 182 * the MMU 183 * Exec is no longer available (because we're loaded all over 184 * low memory, no ExecBase is available anymore) 185 * 186 * It's purpose is: 187 * Do the things that are done in locore.s in the hp300 version, 188 * this includes allocation of kernel maps and enabling the MMU. 189 * 190 * Some of the code in here is `stolen' from Amiga MACH, and was 191 * written by Bryan Ford and Niklas Hallqvist. 192 * 193 * Very crude 68040 support by Michael L. Hitch. 194 */ 195int kernel_copyback = 1; 196 197void 198start_c(int id, u_int ttphystart, u_int ttphysize, u_int stphysize, 199 char *esym_addr) 200 /* id: Machine id */ 201 /* ttphystart, ttphysize: Start address and size of TT-ram */ 202 /* stphysize: Size of ST-ram */ 203 /* esym_addr: Address of kernel '_esym' symbol */ 204{ 205 extern char end[]; 206 extern void etext(void); 207 paddr_t pstart; /* Next available physical address */ 208 vaddr_t vstart; /* Next available virtual address */ 209 vsize_t avail; 210 paddr_t ptpa; 211 psize_t ptsize; 212 u_int ptextra; 213 vaddr_t kva; 214 u_int i; 215 pt_entry_t *pg, *epg; 216 pt_entry_t pg_proto; 217 vaddr_t end_loaded; 218 paddr_t kbase; 219 u_int kstsize; 220 paddr_t Sysptmap_pa; 221#if defined(_MILANHW_) 222 /* 223 * The Milan Lies about the presence of TT-RAM. If you insert 224 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM, 225 * starting at address 16MB as the BIOS remapping memory using MMU. 226 * 227 * Milan actually has four SIMM slots and each slot has two banks, 228 * so it could have up to 8 memory segment regions. 229 */ 230 const paddr_t simm_base[NBANK][NSLOT] = { 231 /* slot 0-3, bank 0 */ 232 { 0x00000000, 0x04000000, 0x08000000, 0x0c000000 }, 233 /* slot 0-3, bank 1 */ 234 { 0x10000000, 0x14000000, 0x18000000, 0x1c000000 } 235 }; 236 int slot, bank, seg; 237 u_int mb; 238 239 /* On Milan, all RAMs are fast 32 bit so no need to reloc kernel */ 240 reloc_kernel = 0; 241 242 /* probe memory region in all SIMM slots and banks */ 243 seg = 0; 244 ttphysize = 0; 245 for (bank = 0; bank < 2; bank++) { 246 for (slot = 0; slot < 4; slot++) { 247 if (bank == 0 && slot == 0) { 248 /* 249 * The first bank has at least 16MB because 250 * the Milan's ROM bootloader requires it 251 * to allocate ST RAM. 252 */ 253 mb = milan_probe_bank_1(simm_base[bank][slot]); 254 boot_segs[0].start = 0; 255 boot_segs[0].end = MB(mb); 256 stphysize = MB(mb); 257 seg++; 258 } else { 259 /* 260 * The rest banks could be empty or 261 * have 4, 8, 16, 32, or 64MB. 262 */ 263 mb = milan_probe_bank(simm_base[bank][slot]); 264 if (mb > 0) { 265 boot_segs[seg].start = 266 simm_base[bank][slot]; 267 boot_segs[seg].end = 268 simm_base[bank][slot] + MB(mb); 269 ttphysize += MB(mb); 270 seg++; 271 } 272 } 273 } 274 } 275#else /* _MILANHW_ */ 276 boot_segs[0].start = 0; 277 boot_segs[0].end = stphysize; 278 boot_segs[1].start = ttphystart; 279 boot_segs[1].end = ttphystart + ttphysize; 280 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 281#endif 282 283 /* 284 * We do not know how much ST memory we really need until after 285 * configuration has finished, but typical users of ST memory 286 * are bounce buffers DMA against TT-RAM for SCSI and FDC, 287 * and video memory for the Xserver. 288 * If we have enough RAMs reserve ST memory including for the Xserver. 289 * Otherwise just allocate minimum one for SCSI and FDC. 290 * 291 * The round_page() call is meant to correct errors made by 292 * binpatching! 293 */ 294 if (st_pool_size > ST_POOL_SIZE_MIN * PAGE_SIZE && 295 (stphysize <= STRAM_MINTHRESH || ttphysize <= TTRAM_MINTHRESH)) { 296 st_pool_size = ST_POOL_SIZE_MIN * PAGE_SIZE; 297 } 298 st_pool_size = m68k_round_page(st_pool_size); 299 st_pool_phys = stphysize - st_pool_size; 300 stphysize = st_pool_phys; 301 302 physmem = btoc(stphysize) + btoc(ttphysize); 303 machineid = id; 304 esym = esym_addr; 305 306 /* 307 * the kernel ends at end() or esym. 308 */ 309 if (esym == NULL) 310 end_loaded = (vaddr_t)&end; 311 else 312 end_loaded = (vaddr_t)esym; 313 314 /* 315 * If we have enough fast-memory to put the kernel in and the 316 * RELOC_KERNEL option is set, do it! 317 */ 318 if ((reloc_kernel != 0) && (ttphysize >= end_loaded)) 319 kbase = ttphystart; 320 else 321 kbase = 0; 322 323 /* 324 * Determine the type of machine we are running on. This needs 325 * to be done early (and before initcpu())! 326 */ 327 set_machtype(); 328 329 /* 330 * Initialize CPU specific stuff 331 */ 332 initcpu(); 333 334 /* 335 * We run the kernel from ST memory at the moment. 336 * The kernel segment table is put just behind the loaded image. 337 * pstart: start of usable ST memory 338 * avail : size of ST memory available. 339 */ 340 vstart = (vaddr_t)end_loaded; 341 vstart = m68k_round_page(vstart); 342 pstart = (paddr_t)vstart; /* pre-reloc PA == kernel VA here */ 343 avail = stphysize - pstart; 344 345 /* 346 * Save KVA of lwp0 uarea and allocate it. 347 */ 348 lwp0uarea = vstart; 349 pstart += USPACE; 350 vstart += USPACE; 351 avail -= USPACE; 352 353 /* 354 * Calculate the number of pages needed for Sysseg. 355 * For the 68030, we need 256 descriptors (segment-table-entries). 356 * This easily fits into one page. 357 * For the 68040, both the level-1 and level-2 descriptors are 358 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 359 * level-1 & level-2 tables. 360 */ 361#if defined(M68040) || defined(M68060) 362 if (mmutype == MMU_68040) 363 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 364 else 365#endif 366 kstsize = 1; 367 /* 368 * allocate the kernel segment table 369 */ 370 Sysseg_pa = pstart; /* pre-reloc PA to init STEs */ 371 Sysseg = (st_entry_t *)vstart; 372 pstart += kstsize * PAGE_SIZE; 373 vstart += kstsize * PAGE_SIZE; 374 avail -= kstsize * PAGE_SIZE; 375 376 /* 377 * allocate kernel page table map 378 */ 379 Sysptmap_pa = pstart; /* pre-reloc PA to init PTEs */ 380 Sysptmap = (pt_entry_t *)vstart; 381 pstart += PAGE_SIZE; 382 vstart += PAGE_SIZE; 383 avail -= PAGE_SIZE; 384 385 /* 386 * Determine the number of pte's we need for extra's like 387 * ST I/O map's. 388 */ 389 ptextra = btoc(STIO_SIZE); 390 391 /* 392 * If present, add pci areas 393 */ 394 if (machineid & ATARI_HADES) 395 ptextra += btoc(PCI_CONFIG_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE); 396 if (machineid & ATARI_MILAN) 397 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE); 398 ptextra += btoc(BOOTM_VA_POOL); 399 /* 400 * now need to account for the kmem area, which is allocated 401 * before pmap_init() is called. It is roughly the size of physical 402 * memory. 403 */ 404 ptextra += physmem; 405 406 /* 407 * The 'pt' (the initial kernel pagetable) has to map the kernel and 408 * the I/O areas. The various I/O areas are mapped (virtually) at 409 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 410 */ 411 ptpa = pstart; /* pre-reloc PA to init PTEs */ 412 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 413 pstart += ptsize; 414 vstart += ptsize; 415 avail -= ptsize; 416 417 /* 418 * Sysmap is now placed at the end of Supervisor virtual address space. 419 */ 420 Sysmap = (pt_entry_t *)SYSMAP_VA; 421 422 /* 423 * Initialize segment tables 424 */ 425#if defined(M68040) || defined(M68060) 426 if (mmutype == MMU_68040) 427 mmu040_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa, 428 kbase); 429 else 430#endif /* defined(M68040) || defined(M68060) */ 431 mmu030_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa, 432 kbase); 433 434 /* 435 * initialize kernel page table page(s). 436 * Assume load at VA 0. 437 * - Text pages are RO 438 * - Page zero is invalid 439 */ 440 pg_proto = RELOC_PA(kbase, 0) | PG_RO | PG_V; 441 pg = (pt_entry_t *)ptpa; 442 *pg++ = PG_NV; 443 444 pg_proto += PAGE_SIZE; 445 for (kva = PAGE_SIZE; kva < (vaddr_t)etext; kva += PAGE_SIZE) { 446 *pg++ = pg_proto; 447 pg_proto += PAGE_SIZE; 448 } 449 450 /* 451 * data, bss and dynamic tables are read/write 452 */ 453 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 454 455#if defined(M68040) || defined(M68060) 456 /* 457 * Map the kernel segment table cache invalidated for 68040/68060. 458 * (for the 68040 not strictly necessary, but recommended by Motorola; 459 * for the 68060 mandatory) 460 */ 461 if (mmutype == MMU_68040) { 462 463 if (kernel_copyback) 464 pg_proto |= PG_CCB; 465 466 for (; kva < (vaddr_t)Sysseg; kva += PAGE_SIZE) { 467 *pg++ = pg_proto; 468 pg_proto += PAGE_SIZE; 469 } 470 471 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 472 for (; kva < (vaddr_t)Sysptmap; kva += PAGE_SIZE) { 473 *pg++ = pg_proto; 474 pg_proto += PAGE_SIZE; 475 } 476 477 pg_proto = (pg_proto & ~PG_CI); 478 if (kernel_copyback) 479 pg_proto |= PG_CCB; 480 } 481#endif /* defined(M68040) || defined(M68060) */ 482 483 /* 484 * go till end of data allocated so far 485 * plus lwp0 u-area (to be allocated) 486 */ 487 for (; kva < vstart; kva += PAGE_SIZE) { 488 *pg++ = pg_proto; 489 pg_proto += PAGE_SIZE; 490 } 491 492 /* 493 * invalidate remainder of kernel PT 494 */ 495 epg = (pt_entry_t *)ptpa; 496 epg = &epg[ptsize / sizeof(pt_entry_t)]; 497 while (pg < epg) 498 *pg++ = PG_NV; 499 500 /* 501 * Map various I/O areas 502 */ 503 map_io_areas(ptpa, ptsize, ptextra); 504 505 /* 506 * Map the allocated space in ST-ram now. In the contig-case, there 507 * is no need to make a distinction between virtual and physical 508 * addresses. But I make it anyway to be prepared. 509 * Physical space is already reserved! 510 */ 511 st_pool_virt = vstart; 512 pg = (pt_entry_t *)ptpa; 513 pg = &pg[vstart / PAGE_SIZE]; 514 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 515 vstart += st_pool_size; 516 while (pg_proto < (st_pool_phys + st_pool_size)) { 517 *pg++ = pg_proto; 518 pg_proto += PAGE_SIZE; 519 } 520 521 /* 522 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 523 * to reference it in the reboot code. Two pages are mapped, because 524 * we must make sure 'doboot()' is contained in it (see the tricky 525 * copying there....). 526 */ 527 page_zero = vstart; 528 pg = (pt_entry_t *)ptpa; 529 pg = &pg[vstart / PAGE_SIZE]; 530 *pg++ = PG_RW | PG_CI | PG_V; 531 vstart += PAGE_SIZE; 532 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE; 533 vstart += PAGE_SIZE; 534 535 /* 536 * All necessary STEs and PTEs have been initialized. 537 * Update Sysseg_pa and Sysptmap_pa to point relocated PA. 538 */ 539 if (kbase) { 540 Sysseg_pa += kbase; 541 Sysptmap_pa += kbase; 542 } 543 544 lowram = 0 >> PGSHIFT; /* XXX */ 545 546 /* 547 * Fill in usable segments. The page indexes will be initialized 548 * later when all reservations are made. 549 */ 550 usable_segs[0].start = 0; 551 usable_segs[0].end = stphysize; 552 usable_segs[0].free_list = VM_FREELIST_STRAM; 553#if defined(_MILANHW_) 554 for (i = 1; i < seg; i++) { 555 usable_segs[i].start = boot_segs[i].start; 556 usable_segs[i].end = boot_segs[i].end; 557 usable_segs[i].free_list = VM_FREELIST_TTRAM; 558 } 559 for (; i < NMEM_SEGS; i++) { 560 usable_segs[i].start = usable_segs[i].end = 0; 561 } 562#else 563 usable_segs[1].start = ttphystart; 564 usable_segs[1].end = ttphystart + ttphysize; 565 usable_segs[1].free_list = VM_FREELIST_TTRAM; 566 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 567#endif 568 569 if (kbase) { 570 /* 571 * First page of ST-ram is unusable, reserve the space 572 * for the kernel in the TT-ram segment. 573 * Note: Because physical page-zero is partially mapped to ROM 574 * by hardware, it is unusable. 575 */ 576 usable_segs[0].start = PAGE_SIZE; 577 usable_segs[1].start += pstart; 578 } else 579 usable_segs[0].start += pstart; 580 581 /* 582 * As all segment sizes are now valid, calculate page indexes and 583 * available physical memory. 584 */ 585 usable_segs[0].first_page = 0; 586 for (i = 1; i < NMEM_SEGS && usable_segs[i].start; i++) { 587 usable_segs[i].first_page = usable_segs[i-1].first_page; 588 usable_segs[i].first_page += 589 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE; 590 } 591 for (i = 0, physmem = 0; usable_segs[i].start; i++) 592 physmem += usable_segs[i].end - usable_segs[i].start; 593 physmem >>= PGSHIFT; 594 595 /* 596 * get the pmap module in sync with reality. 597 */ 598 pmap_bootstrap(vstart); 599 600 /* 601 * Prepare to enable the MMU. 602 * Setup and load SRP (see pmap.h) 603 */ 604 605 cpu_init_kcorehdr(kbase, Sysseg_pa); 606 607 /* 608 * copy over the kernel (and all now initialized variables) 609 * to fastram. DONT use bcopy(), this beast is much larger 610 * than 128k ! 611 */ 612 if (kbase) { 613 register paddr_t *lp, *le, *fp; 614 615 lp = (paddr_t *)0; 616 le = (paddr_t *)pstart; 617 fp = (paddr_t *)kbase; 618 while (lp < le) 619 *fp++ = *lp++; 620 } 621#if defined(M68040) || defined(M68060) 622 if (mmutype == MMU_68040) { 623 /* 624 * movel Sysseg_pa,a0; 625 * movec a0,SRP; 626 * pflusha; 627 * movel #$0xc000,d0; 628 * movec d0,TC 629 */ 630 if (cputype == CPU_68060) { 631 /* XXX: Need the branch cache be cleared? */ 632 __asm volatile (".word 0x4e7a,0x0002;" 633 "orl #0x400000,%%d0;" 634 ".word 0x4e7b,0x0002" : : : "d0"); 635 } 636 __asm volatile ("movel %0,%%a0;" 637 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 638 __asm volatile (".word 0xf518" : : ); 639 __asm volatile ("movel #0xc000,%%d0;" 640 ".word 0x4e7b,0x0003" : : : "d0" ); 641 } else 642#endif 643 { 644#if defined(M68030) 645 protorp[1] = Sysseg_pa; /* + segtable address */ 646 __asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0])); 647 /* 648 * setup and load TC register. 649 * enable_cpr, enable_srp, pagesize=8k, 650 * A = 8 bits, B = 11 bits 651 */ 652 u_int tc = MMU51_TCR_BITS; 653 __asm volatile ("pflusha" : : ); 654 __asm volatile ("pmove %0@,%%tc" : : "a" (&tc)); 655#endif /* M68030 */ 656 } 657 658 /* 659 * Initialize the "u-area" pages etc. 660 */ 661 pmap_bootstrap_finalize(); 662 663 /* 664 * Get the hardware into a defined state 665 */ 666 atari_hwinit(); 667 668 /* 669 * Initialize stmem allocator 670 */ 671 init_stmem(); 672 673 /* 674 * Initialize the iomem arena for bus_space(9) to manage address 675 * spaces and allocate the physical RAM from the extent map. 676 */ 677 atari_bus_space_arena_init(0x0, 0xffffffff); 678 for (i = 0; i < NMEM_SEGS && boot_segs[i].end != 0; i++) { 679 if (atari_bus_space_alloc_physmem(boot_segs[i].start, 680 boot_segs[i].end)) { 681 /* XXX: Ahum, should not happen ;-) */ 682 printf("Warning: Cannot allocate boot memory from" 683 " extent map!?\n"); 684 } 685 } 686 687 /* 688 * Initialize interrupt mapping. 689 */ 690 intr_init(); 691} 692 693#if defined(_MILANHW_) 694/* 695 * Probe and return available memory size in MB at specified address. 696 * The first slot SIMM have at least 16MB, so check if it has 32 or 64 MB. 697 * 698 * Note it seems Milan does not generate bus errors on accesses against 699 * address regions where memory doesn't exist, but it returns memory images 700 * of lower address of the bank. 701 */ 702static u_int 703milan_probe_bank_1(paddr_t start_paddr) 704{ 705 volatile uint8_t *base; 706 u_int mb; 707 uint8_t save_16, save_32, save_64; 708 709 /* Assume that this bank has at least 16MB */ 710 mb = 16; 711 712 base = (uint8_t *)start_paddr; 713 714 /* save and write a MAGIC at the end of 16MB region */ 715 save_16 = base[MB_END(16)]; 716 base[MB_END(16)] = MAGIC_16M; 717 718 /* check bus error at the end of 32MB region */ 719 if (badbaddr(__UNVOLATILE(base + MB_END(32)), sizeof(uint8_t))) { 720 /* bus error; assume no memory there */ 721 goto out16; 722 } 723 724 /* check if the 32MB region is not image of the prior 16MB region */ 725 save_32 = base[MB_END(32)]; 726 base[MB_END(32)] = MAGIC_32M; 727 if (base[MB_END(32)] != MAGIC_32M || base[MB_END(16)] != MAGIC_16M) { 728 /* no memory or image at the 32MB region */ 729 goto out16; 730 } 731 /* we have at least 32MB */ 732 mb = 32; 733 734 /* check bus error at the end of 64MB region */ 735 if (badbaddr(__UNVOLATILE(base + MB_END(64)), sizeof(uint8_t))) { 736 /* bus error; assume no memory there */ 737 goto out32; 738 } 739 740 /* check if the 64MB region is not image of the prior 32MB region */ 741 save_64 = base[MB_END(64)]; 742 base[MB_END(64)] = MAGIC_64M; 743 if (base[MB_END(64)] != MAGIC_64M || base[MB_END(32)] != MAGIC_32M) { 744 /* no memory or image at the 64MB region */ 745 goto out32; 746 } 747 /* we have 64MB */ 748 mb = 64; 749 base[MB_END(64)] = save_64; 750 out32: 751 base[MB_END(32)] = save_32; 752 out16: 753 base[MB_END(16)] = save_16; 754 755 return mb; 756} 757 758/* 759 * Probe and return available memory size in MB at specified address. 760 * The rest slot could be empty so check all possible size. 761 */ 762static u_int 763milan_probe_bank(paddr_t start_paddr) 764{ 765 volatile uint8_t *base; 766 u_int mb; 767 uint8_t save_4, save_8, save_16; 768 769 /* The rest banks might have no memory */ 770 mb = 0; 771 772 base = (uint8_t *)start_paddr; 773 774 /* check bus error at the end of 4MB region */ 775 if (badbaddr(__UNVOLATILE(base + MB_END(4)), sizeof(uint8_t))) { 776 /* bus error; assume no memory there */ 777 goto out; 778 } 779 780 /* check if the 4MB region has memory */ 781 save_4 = base[MB_END(4)]; 782 base[MB_END(4)] = MAGIC_4M_INV; 783 if (base[MB_END(4)] != MAGIC_4M_INV) { 784 /* no memory */ 785 goto out; 786 } 787 base[MB_END(4)] = MAGIC_4M; 788 if (base[MB_END(4)] != MAGIC_4M) { 789 /* no memory */ 790 goto out; 791 } 792 /* we have at least 4MB */ 793 mb = 4; 794 795 /* check bus error at the end of 8MB region */ 796 if (badbaddr(__UNVOLATILE(base + MB_END(8)), sizeof(uint8_t))) { 797 /* bus error; assume no memory there */ 798 goto out4; 799 } 800 801 /* check if the 8MB region is not image of the prior 4MB region */ 802 save_8 = base[MB_END(8)]; 803 base[MB_END(8)] = MAGIC_8M; 804 if (base[MB_END(8)] != MAGIC_8M || base[MB_END(4)] != MAGIC_4M) { 805 /* no memory or image at the 8MB region */ 806 goto out4; 807 } 808 /* we have at least 8MB */ 809 mb = 8; 810 811 /* check bus error at the end of 16MB region */ 812 if (badbaddr(__UNVOLATILE(base + MB_END(16)), sizeof(uint8_t))) { 813 /* bus error; assume no memory there */ 814 goto out8; 815 } 816 817 /* check if the 16MB region is not image of the prior 8MB region */ 818 save_16 = base[MB_END(16)]; 819 base[MB_END(16)] = MAGIC_16M; 820 if (base[MB_END(16)] != MAGIC_16M || base[MB_END(8)] != MAGIC_8M) { 821 /* no memory or image at the 32MB region */ 822 goto out8; 823 } 824 /* we have at least 16MB, so check more region as the first bank */ 825 mb = milan_probe_bank_1(start_paddr); 826 827 base[MB_END(16)] = save_16; 828 out8: 829 base[MB_END(8)] = save_8; 830 out4: 831 base[MB_END(4)] = save_4; 832 out: 833 834 return mb; 835} 836#endif /* _MILANHW_ */ 837 838/* 839 * Try to figure out on what type of machine we are running 840 * Note: This module runs *before* the io-mapping is setup! 841 */ 842static void 843set_machtype(void) 844{ 845 846#ifdef _MILANHW_ 847 machineid |= ATARI_MILAN; 848 849#else 850 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 851 if (badbaddr((void *)__UNVOLATILE(&MFP2->mf_gpip), sizeof(char))) { 852 /* 853 * Watch out! We can also have a Hades with < 16Mb 854 * RAM here... 855 */ 856 if (!badbaddr((void *)__UNVOLATILE(&MFP->mf_gpip), 857 sizeof(char))) { 858 machineid |= ATARI_FALCON; 859 return; 860 } 861 } 862 if (!badbaddr((void *)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 863 machineid |= ATARI_HADES; 864 else 865 machineid |= ATARI_TT; 866#endif /* _MILANHW_ */ 867} 868 869static void 870atari_hwinit(void) 871{ 872 873#if defined(_ATARIHW_) 874 /* 875 * Initialize the sound chip 876 */ 877 ym2149_init(); 878 879 /* 880 * Make sure that the midi acia will not generate an interrupt 881 * unless something attaches to it. We cannot do this for the 882 * keyboard acia because this breaks the '-d' option of the 883 * booter... 884 */ 885 MDI->ac_cs = 0; 886#endif /* defined(_ATARIHW_) */ 887 888 /* 889 * Initialize both MFP chips (if both present!) to generate 890 * auto-vectored interrupts with EOI. The active-edge registers are 891 * set up. The interrupt enable registers are set to disable all 892 * interrupts. 893 */ 894 MFP->mf_iera = MFP->mf_ierb = 0; 895 MFP->mf_imra = MFP->mf_imrb = 0; 896 MFP->mf_aer = MFP->mf_ddr = 0; 897 MFP->mf_vr = 0x40; 898 899#if defined(_ATARIHW_) 900 if (machineid & (ATARI_TT|ATARI_HADES)) { 901 MFP2->mf_iera = MFP2->mf_ierb = 0; 902 MFP2->mf_imra = MFP2->mf_imrb = 0; 903 MFP2->mf_aer = 0x80; 904 MFP2->mf_vr = 0x50; 905 } 906 907 if (machineid & ATARI_TT) { 908 /* 909 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 910 * MFP (ipl6) and softints (ipl1). 911 */ 912 SCU->sys_mask = SCU_SYS_SOFT; 913 SCU->vme_mask = SCU_MFP | SCU_SCC; 914#ifdef DDB 915 /* 916 * This allows people with the correct hardware modification 917 * to drop into the debugger from an NMI. 918 */ 919 SCU->sys_mask |= SCU_IRQ7; 920#endif 921 } 922#endif /* defined(_ATARIHW_) */ 923 924 /* 925 * Initialize a timer for delay(9). 926 */ 927 init_delay(); 928 929#if NPCI > 0 930 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 931 /* 932 * Configure PCI-bus 933 */ 934 init_pci_bus(); 935 } 936#endif 937 938} 939 940/* 941 * Do the dull work of mapping the various I/O areas. They MUST be Cache 942 * inhibited! 943 * All I/O areas are virtually mapped at the end of the pt-table. 944 */ 945static void 946map_io_areas(paddr_t ptpa, psize_t ptsize, u_int ptextra) 947 /* ptsize: Size of 'pt' in bytes */ 948 /* ptextra: #of additional I/O pte's */ 949{ 950 vaddr_t ioaddr; 951 pt_entry_t *pt, *pg, *epg; 952 pt_entry_t pg_proto; 953 u_long mask; 954 955 pt = (pt_entry_t *)ptpa; 956 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE; 957 958 /* 959 * Map ST-IO area 960 */ 961 stio_addr = ioaddr; 962 ioaddr += STIO_SIZE; 963 pg = &pt[stio_addr / PAGE_SIZE]; 964 epg = &pg[btoc(STIO_SIZE)]; 965#ifdef _MILANHW_ 966 /* 967 * Turn on byte swaps in the ST I/O area. On the Milan, the 968 * U0 signal of the MMU controls the BigEndian signal 969 * of the PLX9080. We use this setting so we can read/write the 970 * PLX registers (and PCI-config space) in big-endian mode. 971 */ 972 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100; 973#else 974 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 975#endif 976 while (pg < epg) { 977 *pg++ = pg_proto; 978 pg_proto += PAGE_SIZE; 979 } 980 981 /* 982 * Map PCI areas 983 */ 984 if (machineid & ATARI_HADES) { 985 /* 986 * Only Hades maps the PCI-config space! 987 */ 988 pci_conf_addr = ioaddr; 989 ioaddr += PCI_CONFIG_SIZE; 990 pg = &pt[pci_conf_addr / PAGE_SIZE]; 991 epg = &pg[btoc(PCI_CONFIG_SIZE)]; 992 mask = PCI_CONFM_PHYS; 993 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 994 for (; pg < epg; mask <<= 1) 995 *pg++ = pg_proto | mask; 996 } else 997 pci_conf_addr = 0; /* XXX: should crash */ 998 999 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 1000 pci_io_addr = ioaddr; 1001 ioaddr += PCI_IO_SIZE; 1002 pg = &pt[pci_io_addr / PAGE_SIZE]; 1003 epg = &pg[btoc(PCI_IO_SIZE)]; 1004 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 1005 while (pg < epg) { 1006 *pg++ = pg_proto; 1007 pg_proto += PAGE_SIZE; 1008 } 1009 1010 pci_mem_addr = ioaddr; 1011 /* Provide an uncached PCI address for the MILAN */ 1012 pci_mem_uncached = ioaddr; 1013 ioaddr += PCI_MEM_SIZE; 1014 epg = &pg[btoc(PCI_MEM_SIZE)]; 1015 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V; 1016 while (pg < epg) { 1017 *pg++ = pg_proto; 1018 pg_proto += PAGE_SIZE; 1019 } 1020 } 1021 1022 bootm_init(ioaddr, pg, BOOTM_VA_POOL); 1023 /* 1024 * ioaddr += BOOTM_VA_POOL; 1025 * pg = &pg[btoc(BOOTM_VA_POOL)]; 1026 */ 1027} 1028 1029/* 1030 * Used by dumpconf() to get the size of the machine-dependent panic-dump 1031 * header in disk blocks. 1032 */ 1033 1034#define CHDRSIZE (ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t))) 1035#define MDHDRSIZE roundup(CHDRSIZE, dbtob(1)) 1036 1037int 1038cpu_dumpsize(void) 1039{ 1040 1041 return btodb(MDHDRSIZE); 1042} 1043 1044/* 1045 * Called by dumpsys() to dump the machine-dependent header. 1046 * XXX: Assumes that it will all fit in one diskblock. 1047 */ 1048int 1049cpu_dump(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t *p_blkno) 1050{ 1051 int buf[MDHDRSIZE/sizeof(int)]; 1052 int error; 1053 kcore_seg_t *kseg_p; 1054 cpu_kcore_hdr_t *chdr_p; 1055 1056 kseg_p = (kcore_seg_t *)buf; 1057 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 1058 1059 /* 1060 * Generate a segment header 1061 */ 1062 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1063 kseg_p->c_size = MDHDRSIZE - ALIGN(sizeof(*kseg_p)); 1064 1065 /* 1066 * Add the md header 1067 */ 1068 *chdr_p = cpu_kcore_hdr; 1069 error = dump(dumpdev, *p_blkno, (void *)buf, sizeof(buf)); 1070 *p_blkno += btodb(sizeof(buf)); 1071 return (error); 1072} 1073 1074#if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 1075#error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 1076#endif 1077/* 1078 * Initialize the cpu_kcore_header. 1079 */ 1080static void 1081cpu_init_kcorehdr(paddr_t kbase, paddr_t sysseg_pa) 1082{ 1083 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 1084 struct m68k_kcore_hdr *m = &h->un._m68k; 1085 extern char end[]; 1086 int i; 1087 1088 memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr)); 1089 1090 /* 1091 * Initialize the `dispatcher' portion of the header. 1092 */ 1093 strcpy(h->name, machine); 1094 h->page_size = PAGE_SIZE; 1095 h->kernbase = KERNBASE; 1096 1097 /* 1098 * Fill in information about our MMU configuration. 1099 */ 1100 m->mmutype = mmutype; 1101 m->sg_v = SG_V; 1102 m->sg_frame = SG_FRAME; 1103 m->sg_ishift = SG_ISHIFT; 1104 m->sg_pmask = SG_PMASK; 1105 m->sg40_shift1 = SG4_SHIFT1; 1106 m->sg40_mask2 = SG4_MASK2; 1107 m->sg40_shift2 = SG4_SHIFT2; 1108 m->sg40_mask3 = SG4_MASK3; 1109 m->sg40_shift3 = SG4_SHIFT3; 1110 m->sg40_addr1 = SG4_ADDR1; 1111 m->sg40_addr2 = SG4_ADDR2; 1112 m->pg_v = PG_V; 1113 m->pg_frame = PG_FRAME; 1114 1115 /* 1116 * Initialize pointer to kernel segment table. 1117 */ 1118 m->sysseg_pa = sysseg_pa; /* PA after relocation */ 1119 1120 /* 1121 * Initialize relocation value such that: 1122 * 1123 * pa = (va - KERNBASE) + reloc 1124 */ 1125 m->reloc = kbase; 1126 1127 /* 1128 * Define the end of the relocatable range. 1129 */ 1130 m->relocend = (vaddr_t)end; 1131 1132 for (i = 0; i < NMEM_SEGS; i++) { 1133 m->ram_segs[i].start = boot_segs[i].start; 1134 m->ram_segs[i].size = boot_segs[i].end - 1135 boot_segs[i].start; 1136 } 1137} 1138 1139void 1140mmu030_setup(paddr_t sysseg_pa, u_int kstsize, paddr_t ptpa, psize_t ptsize, 1141 paddr_t sysptmap_pa, paddr_t kbase) 1142 /* sysseg_pa: System segment table */ 1143 /* kstsize: size of 'sysseg' in pages */ 1144 /* ptpa: Kernel page table */ 1145 /* ptsize: size of 'pt' in bytes */ 1146 /* sysptmap_pa: System page table */ 1147{ 1148 st_entry_t sg_proto, *sg, *esg; 1149 pt_entry_t pg_proto, *pg, *epg; 1150 1151 /* 1152 * Map the page table pages in both the HW segment table 1153 * and the software Sysptmap. 1154 */ 1155 sg = (st_entry_t *)sysseg_pa; 1156 pg = (pt_entry_t *)sysptmap_pa; 1157 epg = &pg[ptsize >> PGSHIFT]; 1158 sg_proto = RELOC_PA(kbase, ptpa) | SG_RW | SG_V; 1159 pg_proto = RELOC_PA(kbase, ptpa) | PG_RW | PG_CI | PG_V; 1160 while (pg < epg) { 1161 *sg++ = sg_proto; 1162 *pg++ = pg_proto; 1163 sg_proto += PAGE_SIZE; 1164 pg_proto += PAGE_SIZE; 1165 } 1166 1167 /* 1168 * Invalidate the remainder of the tables. 1169 */ 1170 esg = (st_entry_t *)sysseg_pa; 1171 esg = &esg[TIA_SIZE]; 1172 while (sg < esg) 1173 *sg++ = SG_NV; 1174 epg = (pt_entry_t *)sysptmap_pa; 1175 epg = &epg[TIB_SIZE]; 1176 while (pg < epg) 1177 *pg++ = PG_NV; 1178 1179 /* 1180 * Initialize the PTE for the last one to point Sysptmap. 1181 */ 1182 sg = (st_entry_t *)sysseg_pa; 1183 sg = &sg[SYSMAP_VA >> SEGSHIFT]; 1184 pg = (pt_entry_t *)sysptmap_pa; 1185 pg = &pg[SYSMAP_VA >> SEGSHIFT]; 1186 *sg = RELOC_PA(kbase, sysptmap_pa) | SG_RW | SG_V; 1187 *pg = RELOC_PA(kbase, sysptmap_pa) | PG_RW | PG_CI | PG_V; 1188} 1189 1190#if defined(M68040) || defined(M68060) 1191void 1192mmu040_setup(paddr_t sysseg_pa, u_int kstsize, paddr_t ptpa, psize_t ptsize, 1193 paddr_t sysptmap_pa, paddr_t kbase) 1194 /* sysseg_pa: System segment table */ 1195 /* kstsize: size of 'sysseg' in pages */ 1196 /* ptpa: Kernel page table */ 1197 /* ptsize: size of 'pt' in bytes */ 1198 /* sysptmap_pa: System page table */ 1199{ 1200 int nl1desc, nl2desc, i; 1201 st_entry_t sg_proto, *sg, *esg; 1202 pt_entry_t pg_proto, *pg, *epg; 1203 1204 /* 1205 * First invalidate the entire "segment table" pages 1206 * (levels 1 and 2 have the same "invalid" values). 1207 */ 1208 sg = (st_entry_t *)sysseg_pa; 1209 esg = &sg[kstsize * NPTEPG]; 1210 while (sg < esg) 1211 *sg++ = SG_NV; 1212 1213 /* 1214 * Initialize level 2 descriptors (which immediately 1215 * follow the level 1 table). 1216 * We need: 1217 * NPTEPG / SG4_LEV3SIZE 1218 * level 2 descriptors to map each of the nptpages 1219 * pages of PTEs. Note that we set the "used" bit 1220 * now to save the HW the expense of doing it. 1221 */ 1222 nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); 1223 sg = (st_entry_t *)sysseg_pa; 1224 sg = &sg[SG4_LEV1SIZE]; 1225 esg = &sg[nl2desc]; 1226 sg_proto = RELOC_PA(kbase, ptpa) | SG_U | SG_RW | SG_V; 1227 while (sg < esg) { 1228 *sg++ = sg_proto; 1229 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t)); 1230 } 1231 1232 /* 1233 * Initialize level 1 descriptors. We need: 1234 * howmany(nl2desc, SG4_LEV2SIZE) 1235 * level 1 descriptors to map the 'nl2desc' level 2's. 1236 */ 1237 nl1desc = howmany(nl2desc, SG4_LEV2SIZE); 1238 sg = (st_entry_t *)sysseg_pa; 1239 esg = &sg[nl1desc]; 1240 sg_proto = RELOC_PA(kbase, (paddr_t)&sg[SG4_LEV1SIZE]) 1241 | SG_U | SG_RW | SG_V; 1242 while (sg < esg) { 1243 *sg++ = sg_proto; 1244 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 1245 } 1246 1247 /* Sysmap is last entry in level 1 */ 1248 sg = (st_entry_t *)sysseg_pa; 1249 sg = &sg[SG4_LEV1SIZE - 1]; 1250 *sg = sg_proto; 1251 1252 /* 1253 * Kernel segment table at end of next level 2 table 1254 */ 1255 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); 1256 sg = (st_entry_t *)sysseg_pa; 1257 sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)]; 1258 esg = &sg[NPTEPG / SG4_LEV3SIZE]; 1259 sg_proto = RELOC_PA(kbase, sysptmap_pa) | SG_U | SG_RW | SG_V; 1260 while (sg < esg) { 1261 *sg++ = sg_proto; 1262 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t)); 1263 } 1264 1265 /* Include additional level 2 table for Sysmap in protostfree */ 1266 protostfree = (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */; 1267 1268 /* 1269 * Initialize Sysptmap 1270 */ 1271 pg = (pt_entry_t *)sysptmap_pa; 1272 epg = &pg[ptsize >> PGSHIFT]; 1273 pg_proto = RELOC_PA(kbase, ptpa) | PG_RW | PG_CI | PG_V; 1274 while (pg < epg) { 1275 *pg++ = pg_proto; 1276 pg_proto += PAGE_SIZE; 1277 } 1278 1279 /* 1280 * Invalidate rest of Sysptmap page. 1281 */ 1282 epg = (pt_entry_t *)sysptmap_pa; 1283 epg = &epg[TIB_SIZE]; 1284 while (pg < epg) 1285 *pg++ = PG_NV; 1286 1287 /* 1288 * Initialize the PTE for the last one to point Sysptmap. 1289 */ 1290 pg = (pt_entry_t *)sysptmap_pa; 1291 pg = &pg[SYSMAP_VA >> SEGSHIFT]; 1292 *pg = RELOC_PA(kbase, sysptmap_pa) | PG_RW | PG_CI | PG_V; 1293} 1294#endif /* M68040 */ 1295 1296#if defined(M68060) 1297int m68060_pcr_init = 0x21; /* make this patchable */ 1298#endif 1299 1300static void 1301initcpu(void) 1302{ 1303 typedef void trapfun(void); 1304 1305 switch (cputype) { 1306 1307#if defined(M68060) 1308 case CPU_68060: 1309 { 1310 extern trapfun *vectab[256]; 1311 extern trapfun buserr60, addrerr4060, fpfault; 1312#if defined(M060SP) 1313 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 1314#else 1315 extern trapfun illinst; 1316#endif 1317 1318 __asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : : 1319 "d"(m68060_pcr_init):"d0" ); 1320 1321 /* bus/addrerr vectors */ 1322 vectab[2] = buserr60; 1323 vectab[3] = addrerr4060; 1324 1325#if defined(M060SP) 1326 /* integer support */ 1327 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 1328 1329 /* floating point support */ 1330 /* 1331 * XXX maybe we really should run-time check for the 1332 * stack frame format here: 1333 */ 1334 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 1335 1336 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 1337 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 1338 1339 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 1340 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 1341 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 1342 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 1343 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 1344 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 1345#else 1346 vectab[61] = illinst; 1347#endif 1348 vectab[48] = fpfault; 1349 } 1350 break; 1351#endif /* defined(M68060) */ 1352#if defined(M68040) 1353 case CPU_68040: 1354 { 1355 extern trapfun *vectab[256]; 1356 extern trapfun buserr40, addrerr4060; 1357 1358 /* bus/addrerr vectors */ 1359 vectab[2] = buserr40; 1360 vectab[3] = addrerr4060; 1361 } 1362 break; 1363#endif /* defined(M68040) */ 1364#if defined(M68030) || defined(M68020) 1365 case CPU_68030: 1366 case CPU_68020: 1367 { 1368 extern trapfun *vectab[256]; 1369 extern trapfun buserr2030, addrerr2030; 1370 1371 /* bus/addrerr vectors */ 1372 vectab[2] = buserr2030; 1373 vectab[3] = addrerr2030; 1374 } 1375 break; 1376#endif /* defined(M68030) || defined(M68020) */ 1377 } 1378 1379 DCIS(); 1380} 1381 1382#ifdef DEBUG 1383void dump_segtable(u_int *); 1384void dump_pagetable(u_int *, u_int, u_int); 1385u_int vmtophys(u_int *, u_int); 1386 1387void 1388dump_segtable(u_int *stp) 1389{ 1390 u_int *s, *es; 1391 int shift, i; 1392 1393 s = stp; 1394 { 1395 es = s + (M68K_STSIZE >> 2); 1396 shift = SG_ISHIFT; 1397 } 1398 1399 /* 1400 * XXX need changes for 68040 1401 */ 1402 for (i = 0; s < es; s++, i++) 1403 if (*s & SG_V) 1404 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME); 1405 printf("\n"); 1406} 1407 1408void 1409dump_pagetable(u_int *ptp, u_int i, u_int n) 1410{ 1411 u_int *p, *ep; 1412 1413 p = ptp + i; 1414 ep = p + n; 1415 for (; p < ep; p++, i++) 1416 if (*p & PG_V) 1417 printf("$%08x -> $%08x\t", i, *p & PG_FRAME); 1418 printf("\n"); 1419} 1420 1421u_int 1422vmtophys(u_int *ste, u_int vm) 1423{ 1424 1425 ste = (u_int *)(*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1426 ste += (vm & SG_PMASK) >> PGSHIFT; 1427 return (*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1)); 1428} 1429 1430#endif 1431