1/* $NetBSD: machdep.c,v 1.22 2024/03/05 14:15:31 thorpej Exp $ */ 2 3/* 4 * Copyright 2001, 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38/* 39 * Copyright (c) 1988 University of Utah. 40 * Copyright (c) 1992, 1993 41 * The Regents of the University of California. All rights reserved. 42 * 43 * This code is derived from software contributed to Berkeley by 44 * the Systems Programming Group of the University of Utah Computer 45 * Science Department, The Mach Operating System project at 46 * Carnegie-Mellon University and Ralph Campbell. 47 * 48 * Redistribution and use in source and binary forms, with or without 49 * modification, are permitted provided that the following conditions 50 * are met: 51 * 1. Redistributions of source code must retain the above copyright 52 * notice, this list of conditions and the following disclaimer. 53 * 2. Redistributions in binary form must reproduce the above copyright 54 * notice, this list of conditions and the following disclaimer in the 55 * documentation and/or other materials provided with the distribution. 56 * 3. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)machdep.c 8.3 (Berkeley) 1/12/94 73 * from: Utah Hdr: machdep.c 1.63 91/04/24 74 */ 75 76#include <sys/cdefs.h> 77__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.22 2024/03/05 14:15:31 thorpej Exp $"); 78 79#define __INTR_PRIVATE 80 81#include "opt_multiprocessor.h" 82#include "opt_ddb.h" 83#include "opt_com.h" 84#include "opt_execfmt.h" 85#include "opt_memsize.h" 86#include "rmixl_pcix.h" 87#include "rmixl_pcie.h" 88 89#include <sys/param.h> 90#include <sys/systm.h> 91#include <sys/kernel.h> 92#include <sys/buf.h> 93#include <sys/cpu.h> 94#include <sys/reboot.h> 95#include <sys/mount.h> 96#include <sys/kcore.h> 97#include <sys/boot_flag.h> 98#include <sys/termios.h> 99#include <sys/ksyms.h> 100#include <sys/bus.h> 101#include <sys/device.h> 102#include <sys/extent.h> 103 104#include <uvm/uvm_extern.h> 105 106#include <dev/cons.h> 107 108#include "ksyms.h" 109 110#if NKSYMS || defined(DDB) || defined(LKM) 111#include <mips/db_machdep.h> 112#include <ddb/db_extern.h> 113#endif 114 115#include <mips/cpu.h> 116#include <mips/psl.h> 117#include <mips/cache.h> 118#include <mips/mips_opcode.h> 119 120#include "com.h" 121#if NCOM == 0 122#error no serial console 123#endif 124 125#include <dev/ic/comreg.h> 126#include <dev/ic/comvar.h> 127 128#include <mips/include/intr.h> 129 130#include <mips/rmi/rmixlreg.h> 131#include <mips/rmi/rmixlvar.h> 132#include <mips/rmi/rmixl_intr.h> 133#include <mips/rmi/rmixl_firmware.h> 134#include <mips/rmi/rmixl_comvar.h> 135#include <mips/rmi/rmixl_pcievar.h> 136#include <mips/rmi/rmixl_pcixvar.h> 137 138#ifdef MACHDEP_DEBUG 139int machdep_debug=MACHDEP_DEBUG; 140# define DPRINTF(x) do { if (machdep_debug) printf x ; } while(0) 141#else 142# define DPRINTF(x) 143#endif 144 145#ifndef CONSFREQ 146# define CONSFREQ 66000000 147#endif 148#ifndef CONSPEED 149# define CONSPEED 38400 150#endif 151#ifndef CONMODE 152# define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8) 153#endif 154#ifndef CONSADDR 155# define CONSADDR RMIXL_IO_DEV_UART_1 156#endif 157 158int comcnfreq = CONSFREQ; 159int comcnspeed = CONSPEED; 160tcflag_t comcnmode = CONMODE; 161bus_addr_t comcnaddr = (bus_addr_t)CONSADDR; 162 163struct rmixl_config rmixl_configuration; 164 165 166/* 167 * array of tested firmware versions 168 * if you find new ones and they work 169 * please add them 170 */ 171typedef struct rmiclfw_psb_id { 172 uint64_t psb_version; 173 rmixlfw_psb_type_t psb_type; 174} rmiclfw_psb_id_t; 175static rmiclfw_psb_id_t rmiclfw_psb_id[] = { 176 { 0x4958d4fb00000056ULL, PSB_TYPE_RMI }, 177 { 0x4aacdb6a00000056ULL, PSB_TYPE_RMI }, 178 { 0x4b67d03200000056ULL, PSB_TYPE_RMI }, 179 { 0x4c17058b00000056ULL, PSB_TYPE_RMI }, 180 { 0x49a5a8fa00000056ULL, PSB_TYPE_DELL }, 181 { 0x4b8ead3100000056ULL, PSB_TYPE_DELL }, 182}; 183#define RMICLFW_PSB_VERSIONS_LEN \ 184 (sizeof(rmiclfw_psb_id)/sizeof(rmiclfw_psb_id[0])) 185 186/* 187 * storage for fixed extent used to allocate physical address regions 188 * because extent(9) start and end values are u_long, they are only 189 * 32 bits on a 32 bit kernel, which is insuffucuent since XLS physical 190 * address is 40 bits wide. So the "physaddr" map stores regions 191 * in units of megabytes. 192 */ 193static u_long rmixl_physaddr_storage[ 194 EXTENT_FIXED_STORAGE_SIZE(32)/sizeof(u_long) 195]; 196 197/* Maps for VM objects. */ 198struct vm_map *phys_map = NULL; 199 200int netboot; /* Are we netbooting? */ 201 202 203phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; 204u_quad_t mem_cluster_maxaddr; 205u_int mem_cluster_cnt; 206 207 208void configure(void); 209void mach_init(int, int32_t *, void *, int64_t); 210static uint64_t rmixlfw_init(int64_t); 211static uint64_t mem_clusters_init(rmixlfw_mmap_t *, rmixlfw_mmap_t *); 212static void __attribute__((__noreturn__)) rmixl_reset(void); 213static void rmixl_physaddr_init(void); 214static u_int ram_seg_resv(phys_ram_seg_t *, u_int, u_quad_t, u_quad_t); 215void rmixlfw_mmap_print(rmixlfw_mmap_t *); 216 217 218#ifdef MULTIPROCESSOR 219static bool rmixl_fixup_cop0_oscratch(int32_t, uint32_t [2], void *); 220void rmixl_get_wakeup_info(struct rmixl_config *); 221#ifdef MACHDEP_DEBUG 222static void rmixl_wakeup_info_print(volatile rmixlfw_cpu_wakeup_info_t *); 223#endif /* MACHDEP_DEBUG */ 224#endif /* MULTIPROCESSOR */ 225static void rmixl_fixup_curcpu(void); 226 227/* 228 * Do all the stuff that locore normally does before calling main(). 229 */ 230void 231mach_init(int argc, int32_t *argv, void *envp, int64_t infop) 232{ 233 struct rmixl_config *rcp = &rmixl_configuration; 234 void *kernend; 235 uint64_t memsize; 236 extern char edata[], end[]; 237 238 rmixl_pcr_init_core(); 239 240 /* 241 * Clear the BSS segment. 242 */ 243 kernend = (void *)mips_round_page(end); 244 memset(edata, 0, (char *)kernend - edata); 245 246 /* 247 * Set up the exception vectors and CPU-specific function 248 * vectors early on. We need the wbflush() vector set up 249 * before comcnattach() is called (or at least before the 250 * first printf() after that is called). 251 * Also clears the I+D caches. 252 * 253 * specify chip-specific EIRR/EIMR based spl functions 254 */ 255#ifdef MULTIPROCESSOR 256 mips_vector_init(&rmixl_splsw, true); 257#else 258 mips_vector_init(&rmixl_splsw, false); 259#endif 260 261 /* mips_vector_init initialized mips_options */ 262 cpu_setmodel("%s", mips_options.mips_cpu->cpu_name); 263 264 /* get system info from firmware */ 265 memsize = rmixlfw_init(infop); 266 267 uvm_md_init(); 268 269 physmem = btoc(memsize); 270 271 rmixl_obio_eb_bus_mem_init(&rcp->rc_obio_eb_memt, rcp); 272 273#if NCOM > 0 274 rmixl_com_cnattach(comcnaddr, comcnspeed, comcnfreq, 275 COM_TYPE_NORMAL, comcnmode); 276#endif 277 278 printf("\nNetBSD/rmixl\n"); 279 printf("memsize = %#"PRIx64"\n", memsize); 280#ifdef MEMLIMIT 281 printf("memlimit = %#"PRIx64"\n", (uint64_t)MEMLIMIT); 282#endif 283 284#if defined(MULTIPROCESSOR) && defined(MACHDEP_DEBUG) 285 rmixl_wakeup_info_print(rcp->rc_cpu_wakeup_info); 286 rmixl_wakeup_info_print(rcp->rc_cpu_wakeup_info + 1); 287 printf("cpu_wakeup_info %p, cpu_wakeup_end %p\n", 288 rcp->rc_cpu_wakeup_info, 289 rcp->rc_cpu_wakeup_end); 290 printf("userapp_cpu_map: %#"PRIx64"\n", 291 rcp->rc_psb_info.userapp_cpu_map); 292 printf("wakeup: %#"PRIx64"\n", rcp->rc_psb_info.wakeup); 293{ 294 register_t sp; 295 asm volatile ("move %0, $sp\n" : "=r"(sp)); 296 printf("sp: %#"PRIx64"\n", sp); 297} 298#endif 299 300 rmixl_physaddr_init(); 301 302 /* 303 * Obtain the cpu frequency 304 * Compute the number of ticks for hz. 305 * Compute the delay divisor. 306 * Double the Hz if this CPU runs at twice the 307 * external/cp0-count frequency 308 */ 309 curcpu()->ci_cpu_freq = rcp->rc_psb_info.cpu_frequency; 310 curcpu()->ci_cctr_freq = curcpu()->ci_cpu_freq; 311 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; 312 curcpu()->ci_divisor_delay = 313 ((curcpu()->ci_cpu_freq + 500000) / 1000000); 314 if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT) 315 curcpu()->ci_cpu_freq *= 2; 316 317 /* 318 * Look at arguments passed to us and compute boothowto. 319 * - rmixl firmware gives us a 32 bit argv[i], so adapt 320 * by forcing sign extension in cast to (char *) 321 */ 322 boothowto = RB_AUTOBOOT; 323 for (int i = 1; i < argc; i++) { 324 for (char *cp = (char *)(intptr_t)argv[i]; *cp; cp++) { 325 int howto; 326 /* Ignore superfluous '-', if there is one */ 327 if (*cp == '-') 328 continue; 329 330 howto = 0; 331 BOOT_FLAG(*cp, howto); 332 if (howto != 0) 333 boothowto |= howto; 334#ifdef DIAGNOSTIC 335 else 336 printf("bootflag '%c' not recognised\n", *cp); 337#endif 338 } 339 } 340#ifdef DIAGNOSTIC 341 printf("boothowto %#x\n", boothowto); 342#endif 343 344 /* 345 * Reserve pages from the VM system. 346 */ 347 348 /* reserve 0..start..kernend pages */ 349 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 350 0, round_page(MIPS_KSEG0_TO_PHYS(kernend))); 351 352 /* reserve reset exception vector page */ 353 /* should never be in our clusters anyway... */ 354 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 355 0x1FC00000, 0x1FC00000+NBPG); 356 357#ifdef MULTIPROCEESOR 358 /* reserve the cpu_wakeup_info area */ 359 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 360 (u_quad_t)trunc_page(rcp->rc_cpu_wakeup_info), 361 (u_quad_t)round_page(rcp->rc_cpu_wakeup_end)); 362#endif 363 364#ifdef MEMLIMIT 365 /* reserve everything >= MEMLIMIT */ 366 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 367 (u_quad_t)MEMLIMIT, (u_quad_t)~0); 368#endif 369 370 /* get maximum RAM address from the VM clusters */ 371 mem_cluster_maxaddr = 0; 372 for (u_int i=0; i < mem_cluster_cnt; i++) { 373 u_quad_t tmp = round_page( 374 mem_clusters[i].start + mem_clusters[i].size); 375 if (tmp > mem_cluster_maxaddr) 376 mem_cluster_maxaddr = tmp; 377 } 378 DPRINTF(("mem_cluster_maxaddr %#"PRIx64"\n", mem_cluster_maxaddr)); 379 380 /* 381 * Load mem_clusters[] into the VM system. 382 */ 383 mips_page_physload(MIPS_KSEG0_START, (vaddr_t) kernend, 384 mem_clusters, mem_cluster_cnt, NULL, 0); 385 386 /* 387 * Initialize error message buffer (at end of core). 388 */ 389 mips_init_msgbuf(); 390 391 pmap_bootstrap(); 392 393 /* 394 * Allocate uarea page for lwp0 and set it. 395 */ 396 mips_init_lwp0_uarea(); 397 398#if defined(DDB) 399 if (boothowto & RB_KDB) 400 Debugger(); 401#endif 402 /* 403 * store (cpu#0) curcpu in COP0 OSSCRATCH0 404 * used in exception vector 405 */ 406 __asm __volatile("dmtc0 %0,$%1" 407 :: "r"(&cpu_info_store), "n"(MIPS_COP_0_OSSCRATCH)); 408#ifdef MULTIPROCESSOR 409 mips_fixup_exceptions(rmixl_fixup_cop0_oscratch, NULL); 410#endif 411 rmixl_fixup_curcpu(); 412} 413 414/* 415 * set up Processor Control Regs for this core 416 */ 417void 418rmixl_pcr_init_core(void) 419{ 420 uint32_t r; 421 422#ifdef MULTIPROCESSOR 423 rmixl_mtcr(RMIXL_PCR_MMU_SETUP, __BITS(2,0)); 424 /* enable MMU clock gating */ 425 /* 4 threads active -- why needed if Global? */ 426 /* enable global TLB mode */ 427#else 428 rmixl_mtcr(RMIXL_PCR_THREADEN, 1); /* disable all threads except #0 */ 429 rmixl_mtcr(RMIXL_PCR_MMU_SETUP, 0); /* enable MMU clock gating */ 430 /* set single MMU Thread Mode */ 431 /* TLB is partitioned (1 partition) */ 432#endif 433 434 r = rmixl_mfcr(RMIXL_PCR_L1D_CONFIG0); 435 r &= ~__BIT(14); /* disable Unaligned Access */ 436 rmixl_mtcr(RMIXL_PCR_L1D_CONFIG0, r); 437 438#if defined(DDB) && defined(MIPS_DDB_WATCH) 439 /* 440 * clear IEU_DEFEATURE[DBE] 441 * this enables COP0 watchpoint to trigger T_WATCH exception 442 * instead of signaling JTAG. 443 */ 444 r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE); 445 r &= ~__BIT(7); 446 rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r); 447#endif 448} 449 450#ifdef MULTIPROCESSOR 451static bool 452rmixl_fixup_cop0_oscratch(int32_t load_addr, uint32_t new_insns[2], void *arg) 453{ 454 size_t offset = load_addr - (intptr_t)&cpu_info_store; 455 456 KASSERT(MIPS_KSEG0_P(load_addr)); 457 KASSERT(offset < sizeof(struct cpu_info)); 458 459 /* 460 * Fixup this direct load cpu_info_store to actually get the current 461 * CPU's cpu_info from COP0 OSSCRATCH0 and then fix the load to be 462 * relative from the start of struct cpu_info. 463 */ 464 465 /* [0] = [d]mfc0 rX, $22 (OSScratch) */ 466 new_insns[0] = (020 << 26) 467#ifdef _LP64 468 | (1 << 21) /* double move */ 469#endif 470 | (new_insns[0] & 0x001f0000) 471 | (MIPS_COP_0_OSSCRATCH << 11) | (0 << 0); 472 473 /* [1] = [ls][dw] rX, offset(rX) */ 474 new_insns[1] = (new_insns[1] & 0xffff0000) | offset; 475 476 return true; 477} 478#endif /* MULTIPROCESSOR */ 479 480/* 481 * The following changes all lX rN, L_CPU(MIPS_CURLWP) [curlwp->l_cpu] 482 * to [d]mfc0 rN, $22 [MIPS_COP_0_OSSCRATCH] 483 * 484 * the mfc0 is 3 cycles shorter than the load. 485 */ 486#define LOAD_CURCPU_0 ((MIPS_CURLWP_REG << 21) | offsetof(lwp_t, l_cpu)) 487#define MFC0_CURCPU_0 ((OP_COP0 << 26) | (MIPS_COP_0_OSSCRATCH << 11)) 488#ifdef _LP64 489#define LOAD_CURCPU ((uint32_t)(OP_LD << 26) | LOAD_CURCPU_0) 490#define MFC0_CURCPU ((uint32_t)(OP_DMF << 21) | MFC0_CURCPU_0) 491#else 492#define LOAD_CURCPU ((uint32_t)(OP_LW << 26) | LOAD_CURCPU_0) 493#define MFC0_CURCPU ((uint32_t)(OP_MF << 21) | MFC0_CURCPU_0) 494#endif 495#define LOAD_CURCPU_MASK 0xffe0ffff 496 497static void 498rmixl_fixup_curcpu(void) 499{ 500 extern uint32_t _ftext[]; 501 extern uint32_t _etext[]; 502 503 for (uint32_t *insnp = _ftext; insnp < _etext; insnp++) { 504 const uint32_t insn = *insnp; 505 if (__predict_false((insn & LOAD_CURCPU_MASK) == LOAD_CURCPU)) { 506 /* 507 * Since the register to loaded is located in bits 508 * 16-20 for the mfc0 and the load instruction we can 509 * just change the instruction bits around it. 510 */ 511 *insnp = insn ^ LOAD_CURCPU ^ MFC0_CURCPU; 512 mips_icache_sync_range((vaddr_t)insnp, 4); 513 } 514 } 515} 516 517/* 518 * ram_seg_resv - cut reserved regions out of segs, fragmenting as needed 519 * 520 * we simply build a new table of segs, then copy it back over the given one 521 * this is inefficient but simple and called only a few times 522 * 523 * note: 'last' here means 1st addr past the end of the segment (start+size) 524 */ 525static u_int 526ram_seg_resv(phys_ram_seg_t *segs, u_int nsegs, 527 u_quad_t resv_first, u_quad_t resv_last) 528{ 529 u_quad_t first, last; 530 int new_nsegs=0; 531 int resv_flag; 532 phys_ram_seg_t new_segs[VM_PHYSSEG_MAX]; 533 534 for (u_int i=0; i < nsegs; i++) { 535 resv_flag = 0; 536 first = trunc_page(segs[i].start); 537 last = round_page(segs[i].start + segs[i].size); 538 539 KASSERT(new_nsegs < VM_PHYSSEG_MAX); 540 if ((resv_first <= first) && (resv_last >= last)) { 541 /* whole segment is resverved */ 542 continue; 543 } 544 if ((resv_first > first) && (resv_first < last)) { 545 u_quad_t new_last; 546 547 /* 548 * reserved start in segment 549 * salvage the leading fragment 550 */ 551 resv_flag = 1; 552 new_last = last - (last - resv_first); 553 KASSERT (new_last > first); 554 new_segs[new_nsegs].start = first; 555 new_segs[new_nsegs].size = new_last - first; 556 new_nsegs++; 557 } 558 if ((resv_last > first) && (resv_last < last)) { 559 u_quad_t new_first; 560 561 /* 562 * reserved end in segment 563 * salvage the trailing fragment 564 */ 565 resv_flag = 1; 566 new_first = first + (resv_last - first); 567 KASSERT (last > (new_first + NBPG)); 568 new_segs[new_nsegs].start = new_first; 569 new_segs[new_nsegs].size = last - new_first; 570 new_nsegs++; 571 } 572 if (resv_flag == 0) { 573 /* 574 * nothing reserved here, take it all 575 */ 576 new_segs[new_nsegs].start = first; 577 new_segs[new_nsegs].size = last - first; 578 new_nsegs++; 579 } 580 581 } 582 583 memcpy(segs, new_segs, sizeof(new_segs)); 584 585 return new_nsegs; 586} 587 588/* 589 * create an extent for physical address space 590 * these are in units of MB for sake of compression (for sake of 32 bit kernels) 591 * allocate the regions where we have known functions (DRAM, IO, etc) 592 * what remains can be allocated as needed for other stuff 593 * e.g. to configure BARs that are not already initialized and enabled. 594 */ 595static void 596rmixl_physaddr_init(void) 597{ 598 struct extent *ext; 599 unsigned long start = 0UL; 600 unsigned long end = (__BIT(40) / (1024 * 1024)) -1; 601 u_long base; 602 u_long size; 603 uint32_t r; 604 605 ext = extent_create("physaddr", start, end, 606 (void *)rmixl_physaddr_storage, sizeof(rmixl_physaddr_storage), 607 EX_NOWAIT | EX_NOCOALESCE); 608 609 if (ext == NULL) 610 panic("%s: extent_create failed", __func__); 611 612 /* 613 * grab regions per DRAM BARs 614 */ 615 for (u_int i=0; i < RMIXL_SBC_DRAM_NBARS; i++) { 616 r = RMIXL_IOREG_READ(RMIXL_SBC_DRAM_BAR(i)); 617 if ((r & RMIXL_DRAM_BAR_STATUS) == 0) 618 continue; /* not enabled */ 619 base = (u_long)(DRAM_BAR_TO_BASE((uint64_t)r) / (1024 * 1024)); 620 size = (u_long)(DRAM_BAR_TO_SIZE((uint64_t)r) / (1024 * 1024)); 621 622 DPRINTF(("%s: %d: %d: 0x%08x -- 0x%010lx:%lu MB\n", 623 __func__, __LINE__, i, r, base * (1024 * 1024), size)); 624 if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0) 625 panic("%s: extent_alloc_region(%p, %#lx, %#lx, %#x) " 626 "failed", __func__, ext, base, size, EX_NOWAIT); 627 } 628 629 /* 630 * get chip-dependent physaddr regions 631 */ 632 switch(cpu_rmixl_chip_type(mips_options.mips_cpu)) { 633 case CIDFL_RMI_TYPE_XLR: 634#if NRMIXL_PCIX 635 rmixl_physaddr_init_pcix(ext); 636#endif 637 break; 638 case CIDFL_RMI_TYPE_XLS: 639#if NRMIXL_PCIE 640 rmixl_physaddr_init_pcie(ext); 641#endif 642 break; 643 case CIDFL_RMI_TYPE_XLP: 644 /* XXX TBD */ 645 panic("%s: RMI XLP not yet supported", __func__); 646 } 647 648 /* 649 * at this point all regions left in "physaddr" extent 650 * are unused holes in the physical address space 651 * available for use as needed. 652 */ 653 rmixl_configuration.rc_phys_ex = ext; 654#ifdef MACHDEP_DEBUG 655 extent_print(ext); 656#endif 657} 658 659static uint64_t 660rmixlfw_init(int64_t infop) 661{ 662 struct rmixl_config *rcp = &rmixl_configuration; 663 664#ifdef MULTIPROCESSOR 665 rmixl_get_wakeup_info(rcp); 666#endif 667 668 infop |= MIPS_KSEG0_START; 669 rcp->rc_psb_info = *(rmixlfw_info_t *)(intptr_t)infop; 670 671 rcp->rc_psb_type = PSB_TYPE_UNKNOWN; 672 for (int i=0; i < RMICLFW_PSB_VERSIONS_LEN; i++) { 673 if (rmiclfw_psb_id[i].psb_version == 674 rcp->rc_psb_info.psb_version) { 675 rcp->rc_psb_type = rmiclfw_psb_id[i].psb_type; 676 goto found; 677 } 678 } 679 680 rcp->rc_io_pbase = RMIXL_IO_DEV_PBASE; 681 rmixl_putchar_init(rcp->rc_io_pbase); 682 683#ifdef DIAGNOSTIC 684 rmixl_puts("\r\nWARNING: untested psb_version: "); 685 rmixl_puthex64(rcp->rc_psb_info.psb_version); 686 rmixl_puts("\r\n"); 687#endif 688 689#ifdef MEMSIZE 690 /* XXX trust and use MEMSIZE */ 691 mem_clusters[0].start = 0; 692 mem_clusters[0].size = MEMSIZE; 693 mem_cluster_cnt = 1; 694 return MEMSIZE; 695#else 696 rmixl_puts("\r\nERROR: configure MEMSIZE\r\n"); 697 cpu_reboot(RB_HALT, NULL); 698 /* NOTREACHED */ 699#endif 700 701 found: 702 rcp->rc_io_pbase = MIPS_KSEG1_TO_PHYS(rcp->rc_psb_info.io_base); 703 rmixl_putchar_init(rcp->rc_io_pbase); 704#ifdef MACHDEP_DEBUG 705 rmixl_puts("\r\ninfop: "); 706 rmixl_puthex64((uint64_t)(intptr_t)infop); 707#endif 708#ifdef DIAGNOSTIC 709 rmixl_puts("\r\nrecognized psb_version="); 710 rmixl_puthex64(rcp->rc_psb_info.psb_version); 711 rmixl_puts(", psb_type="); 712 rmixl_puts(rmixlfw_psb_type_name(rcp->rc_psb_type)); 713 rmixl_puts("\r\n"); 714#endif 715 716 return mem_clusters_init( 717 (rmixlfw_mmap_t *)(intptr_t)rcp->rc_psb_info.psb_physaddr_map, 718 (rmixlfw_mmap_t *)(intptr_t)rcp->rc_psb_info.avail_mem_map); 719} 720 721void 722rmixlfw_mmap_print(rmixlfw_mmap_t *map) 723{ 724#ifdef MACHDEP_DEBUG 725 for (uint32_t i=0; i < map->nmmaps; i++) { 726 rmixl_puthex32(i); 727 rmixl_puts(", "); 728 rmixl_puthex64(map->entry[i].start); 729 rmixl_puts(", "); 730 rmixl_puthex64(map->entry[i].size); 731 rmixl_puts(", "); 732 rmixl_puthex32(map->entry[i].type); 733 rmixl_puts("\r\n"); 734 } 735#endif 736} 737 738/* 739 * mem_clusters_init 740 * 741 * initialize mem_clusters[] table based on memory address mapping 742 * provided by boot firmware. 743 * 744 * prefer avail_mem_map if we can, otherwise use psb_physaddr_map. 745 * these will be limited by MEMSIZE if it is configured. 746 * if neither are available, just use MEMSIZE. 747 */ 748static uint64_t 749mem_clusters_init( 750 rmixlfw_mmap_t *psb_physaddr_map, 751 rmixlfw_mmap_t *avail_mem_map) 752{ 753 rmixlfw_mmap_t *map = NULL; 754 const char *mapname; 755 uint64_t sz; 756 uint64_t sum; 757 u_int cnt; 758#ifdef MEMSIZE 759 uint64_t memsize = MEMSIZE; 760#endif 761 762#ifdef MACHDEP_DEBUG 763 rmixl_puts("psb_physaddr_map: "); 764 rmixl_puthex64((uint64_t)(intptr_t)psb_physaddr_map); 765 rmixl_puts("\r\n"); 766#endif 767 if (psb_physaddr_map != NULL) { 768 map = psb_physaddr_map; 769 mapname = "psb_physaddr_map"; 770 rmixlfw_mmap_print(map); 771 } 772#ifdef DIAGNOSTIC 773 else { 774 rmixl_puts("WARNING: no psb_physaddr_map\r\n"); 775 } 776#endif 777 778#ifdef MACHDEP_DEBUG 779 rmixl_puts("avail_mem_map: "); 780 rmixl_puthex64((uint64_t)(intptr_t)avail_mem_map); 781 rmixl_puts("\r\n"); 782#endif 783 if (avail_mem_map != NULL) { 784 map = avail_mem_map; 785 mapname = "avail_mem_map"; 786 rmixlfw_mmap_print(map); 787 } 788#ifdef DIAGNOSTIC 789 else { 790 rmixl_puts("WARNING: no avail_mem_map\r\n"); 791 } 792#endif 793 794 if (map == NULL) { 795#ifndef MEMSIZE 796 rmixl_puts("panic: no firmware memory map, " 797 "must configure MEMSIZE\r\n"); 798 for(;;); /* XXX */ 799#else 800#ifdef DIAGNOSTIC 801 rmixl_puts("WARNING: no avail_mem_map, " 802 "using MEMSIZE\r\n"); 803#endif 804 805 mem_clusters[0].start = 0; 806 mem_clusters[0].size = MEMSIZE; 807 mem_cluster_cnt = 1; 808 return MEMSIZE; 809#endif /* MEMSIZE */ 810 } 811 812#ifdef DIAGNOSTIC 813 rmixl_puts("using "); 814 rmixl_puts(mapname); 815 rmixl_puts("\r\n"); 816#endif 817#ifdef MACHDEP_DEBUG 818 rmixl_puts("memory clusters:\r\n"); 819#endif 820 sum = 0; 821 cnt = 0; 822 for (uint32_t i=0; i < map->nmmaps; i++) { 823 if (map->entry[i].type != RMIXLFW_MMAP_TYPE_RAM) 824 continue; 825 mem_clusters[cnt].start = map->entry[i].start; 826 sz = map->entry[i].size; 827 sum += sz; 828 mem_clusters[cnt].size = sz; 829#ifdef MACHDEP_DEBUG 830 rmixl_puthex32(i); 831 rmixl_puts(": "); 832 rmixl_puthex64(mem_clusters[cnt].start); 833 rmixl_puts(", "); 834 rmixl_puthex64(sz); 835 rmixl_puts(": "); 836 rmixl_puthex64(sum); 837 rmixl_puts("\r\n"); 838#endif 839#ifdef MEMSIZE 840 /* 841 * configurably limit memsize 842 */ 843 if (sum == memsize) 844 break; 845 if (sum > memsize) { 846 uint64_t tmp; 847 848 tmp = sum - memsize; 849 sz -= tmp; 850 sum -= tmp; 851 mem_clusters[cnt].size = sz; 852 cnt++; 853 break; 854 } 855#endif 856 cnt++; 857 } 858 mem_cluster_cnt = cnt; 859 return sum; 860} 861 862#ifdef MULTIPROCESSOR 863/* 864 * RMI firmware passes wakeup info structure in CP0 OS Scratch reg #7 865 * they do not explicitly give us the size of the wakeup area. 866 * we "know" that firmware loader sets wip->gp thusly: 867 * gp = stack_start[vcpu] = round_page(wakeup_end) + (vcpu * (PAGE_SIZE * 2)) 868 * so 869 * round_page(wakeup_end) == gp - (vcpu * (PAGE_SIZE * 2)) 870 * Only the "master" cpu runs this function, so 871 * vcpu = wip->master_cpu 872 */ 873void 874rmixl_get_wakeup_info(struct rmixl_config *rcp) 875{ 876 volatile rmixlfw_cpu_wakeup_info_t *wip; 877 int32_t scratch_7; 878 intptr_t end; 879 880 __asm__ volatile( 881 ".set push" "\n" 882 ".set noreorder" "\n" 883 ".set mips64" "\n" 884 "dmfc0 %0, $22, 7" "\n" 885 ".set pop" "\n" 886 : "=r"(scratch_7)); 887 888 wip = (volatile rmixlfw_cpu_wakeup_info_t *) 889 (intptr_t)scratch_7; 890 end = wip->entry.gp - (wip->master_cpu & (PAGE_SIZE * 2)); 891 892 if (wip->valid == 1) { 893 rcp->rc_cpu_wakeup_end = (const void *)end; 894 rcp->rc_cpu_wakeup_info = wip; 895 } 896}; 897 898#ifdef MACHDEP_DEBUG 899static void 900rmixl_wakeup_info_print(volatile rmixlfw_cpu_wakeup_info_t *wip) 901{ 902 int i; 903 904 printf("%s: wip %p, size %lu\n", __func__, wip, sizeof(*wip)); 905 906 printf("cpu_status %#x\n", wip->cpu_status); 907 printf("valid: %d\n", wip->valid); 908 printf("entry: addr %#x, args %#x, sp %#"PRIx64", gp %#"PRIx64"\n", 909 wip->entry.addr, 910 wip->entry.args, 911 wip->entry.sp, 912 wip->entry.gp); 913 printf("master_cpu %d\n", wip->master_cpu); 914 printf("master_cpu_mask %#x\n", wip->master_cpu_mask); 915 printf("buddy_cpu_mask %#x\n", wip->buddy_cpu_mask); 916 printf("psb_os_cpu_map %#x\n", wip->psb_os_cpu_map); 917 printf("argc %d\n", wip->argc); 918 printf("argv:"); 919 for (i=0; i < wip->argc; i++) 920 printf(" %#x", wip->argv[i]); 921 printf("\n"); 922 printf("valid_tlb_entries %d\n", wip->valid_tlb_entries); 923 printf("tlb_map:\n"); 924 for (i=0; i < wip->valid_tlb_entries; i++) { 925 volatile const struct lib_cpu_tlb_mapping *m = 926 &wip->tlb_map[i]; 927 printf(" %d", m->page_size); 928 printf(", %d", m->asid); 929 printf(", %d", m->coherency); 930 printf(", %d", m->coherency); 931 printf(", %d", m->attr); 932 printf(", %#x", m->virt); 933 printf(", %#"PRIx64"\n", m->phys); 934 } 935 printf("elf segs:\n"); 936 for (i=0; i < MAX_ELF_SEGMENTS; i++) { 937 volatile const struct core_segment_info *e = 938 &wip->seg_info[i]; 939 printf(" %#"PRIx64"", e->vaddr); 940 printf(", %#"PRIx64"", e->memsz); 941 printf(", %#x\n", e->flags); 942 } 943 printf("envc %d\n", wip->envc); 944 for (i=0; i < wip->envc; i++) 945 printf(" %#x \"%s\"", wip->envs[i], 946 (char *)(intptr_t)(int32_t)(wip->envs[i])); 947 printf("\n"); 948 printf("app_mode %d\n", wip->app_mode); 949 printf("printk_lock %#x\n", wip->printk_lock); 950 printf("kseg_master %d\n", wip->kseg_master); 951 printf("kuseg_reentry_function %#x\n", wip->kuseg_reentry_function); 952 printf("kuseg_reentry_args %#x\n", wip->kuseg_reentry_args); 953 printf("app_shared_mem_addr %#"PRIx64"\n", wip->app_shared_mem_addr); 954 printf("app_shared_mem_size %#"PRIx64"\n", wip->app_shared_mem_size); 955 printf("app_shared_mem_orig %#"PRIx64"\n", wip->app_shared_mem_orig); 956 printf("loader_lock %#x\n", wip->loader_lock); 957 printf("global_wakeup_mask %#x\n", wip->global_wakeup_mask); 958 printf("unused_0 %#x\n", wip->unused_0); 959} 960#endif /* MACHDEP_DEBUG */ 961#endif /* MULTIPROCESSOR */ 962 963void 964consinit(void) 965{ 966 967 /* 968 * Everything related to console initialization is done 969 * in mach_init(). 970 */ 971} 972 973/* 974 * Allocate memory for variable-sized tables, 975 */ 976void 977cpu_startup(void) 978{ 979 /* 980 * Virtual memory is bootstrapped -- notify the bus spaces 981 * that memory allocation is now safe. 982 */ 983 rmixl_configuration.rc_mallocsafe = 1; 984 985 /* Do the usual stuff */ 986 cpu_startup_common(); 987} 988 989int waittime = -1; 990 991void 992cpu_reboot(int howto, char *bootstr) 993{ 994 995 /* Take a snapshot before clobbering any registers. */ 996 savectx(lwp_getpcb(curlwp)); 997 998 if (cold) { 999 howto |= RB_HALT; 1000 goto haltsys; 1001 } 1002 1003 /* If "always halt" was specified as a boot flag, obey. */ 1004 if (boothowto & RB_HALT) 1005 howto |= RB_HALT; 1006 1007 boothowto = howto; 1008 if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { 1009 waittime = 0; 1010 vfs_shutdown(); 1011 } 1012 1013 splhigh(); 1014 1015 if (howto & RB_DUMP) 1016 dumpsys(); 1017 1018haltsys: 1019 doshutdownhooks(); 1020 1021 if (howto & RB_HALT) { 1022 printf("\n"); 1023 printf("The operating system has halted.\n"); 1024 printf("Please press any key to reboot.\n\n"); 1025 cnpollc(1); /* For proper keyboard command handling */ 1026 cngetc(); 1027 cnpollc(0); 1028 } 1029 1030 printf("rebooting...\n\n"); 1031 1032 rmixl_reset(); 1033} 1034 1035/* 1036 * goodbye world 1037 */ 1038void __attribute__((__noreturn__)) 1039rmixl_reset(void) 1040{ 1041 uint32_t r; 1042 1043 r = RMIXL_IOREG_READ(RMIXL_IO_DEV_GPIO + RMIXL_GPIO_RESET); 1044 r |= RMIXL_GPIO_RESET_RESET; 1045 RMIXL_IOREG_WRITE(RMIXL_IO_DEV_GPIO + RMIXL_GPIO_RESET, r); 1046 1047 printf("soft reset failed, spinning...\n"); 1048 for (;;); 1049} 1050