machdep.c revision 1.52
1/* $OpenBSD: machdep.c,v 1.52 2002/02/02 21:10:56 mickey Exp $ */ 2 3/* 4 * Copyright (c) 1999-2002 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Michael Shalayeff. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 29 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/signalvar.h> 37#include <sys/kernel.h> 38#include <sys/proc.h> 39#include <sys/buf.h> 40#include <sys/reboot.h> 41#include <sys/device.h> 42#include <sys/conf.h> 43#include <sys/file.h> 44#include <sys/timeout.h> 45#include <sys/malloc.h> 46#include <sys/mbuf.h> 47#include <sys/msgbuf.h> 48#include <sys/ioctl.h> 49#include <sys/tty.h> 50#include <sys/user.h> 51#include <sys/exec.h> 52#include <sys/sysctl.h> 53#include <sys/core.h> 54#include <sys/kcore.h> 55#include <sys/extent.h> 56#ifdef SYSVMSG 57#include <sys/msg.h> 58#endif 59#ifdef SYSVSEM 60#include <sys/sem.h> 61#endif 62#ifdef SYSVSHM 63#include <sys/shm.h> 64#endif 65 66#include <sys/mount.h> 67#include <sys/syscallargs.h> 68 69#include <uvm/uvm.h> 70#include <uvm/uvm_page.h> 71 72#include <dev/cons.h> 73 74#include <machine/pdc.h> 75#include <machine/iomod.h> 76#include <machine/psl.h> 77#include <machine/reg.h> 78#include <machine/cpufunc.h> 79#include <machine/autoconf.h> 80#include <machine/kcore.h> 81 82#ifdef COMPAT_HPUX 83#include <compat/hpux/hpux.h> 84#endif 85 86#ifdef DDB 87#include <machine/db_machdep.h> 88#include <ddb/db_access.h> 89#include <ddb/db_sym.h> 90#include <ddb/db_extern.h> 91#endif 92 93#include <hppa/dev/cpudevs.h> 94 95/* 96 * Patchable buffer cache parameters 97 */ 98#ifdef NBUF 99int nbuf = NBUF; 100#else 101int nbuf = 0; 102#endif 103#ifdef BUFPAGES 104int bufpages = BUFPAGES; 105#else 106int bufpages = 0; 107#endif 108 109/* 110 * Different kinds of flags used throughout the kernel. 111 */ 112int cold = 1; /* unset when engine is up to go */ 113int msgbufmapped; /* set when safe to use msgbuf */ 114 115/* 116 * cache configuration, for most machines is the same 117 * numbers, so it makes sense to do defines w/ numbers depending 118 * on cofigured cpu types in the kernel 119 */ 120int icache_stride, icache_line_mask; 121int dcache_stride, dcache_line_mask; 122 123/* 124 * things to not kill 125 */ 126volatile u_int8_t *machine_ledaddr; 127int machine_ledword, machine_leds; 128 129/* 130 * CPU params (should be the same for all cpus in the system) 131 */ 132struct pdc_cache pdc_cache PDC_ALIGNMENT; 133struct pdc_btlb pdc_btlb PDC_ALIGNMENT; 134 135 /* w/ a little deviation should be the same for all installed cpus */ 136u_int cpu_ticksnum, cpu_ticksdenom, cpu_hzticks; 137 138 /* exported info */ 139char machine[] = MACHINE_ARCH; 140char cpu_model[128]; 141enum hppa_cpu_type cpu_type; 142const char *cpu_typename; 143#ifdef COMPAT_HPUX 144int cpu_model_hpux; /* contains HPUX_SYSCONF_CPU* kind of value */ 145#endif 146 147/* 148 * exported methods for cpus 149 */ 150int (*cpu_desidhash) __P((void)); 151int (*cpu_hpt_init) __P((vaddr_t hpt, vsize_t hptsize)); 152int (*cpu_ibtlb_ins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 153 vsize_t sz, u_int prot)); 154int (*cpu_dbtlb_ins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 155 vsize_t sz, u_int prot)); 156 157dev_t bootdev; 158int totalphysmem, resvmem, physmem, esym; 159 160/* 161 * Things for MI glue to stick on. 162 */ 163struct user *proc0paddr; 164long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) / sizeof(long)]; 165struct extent *hppa_ex; 166 167struct vm_map *exec_map = NULL; 168struct vm_map *phys_map = NULL; 169/* Virtual page frame for /dev/mem (see mem.c) */ 170vaddr_t vmmap; 171 172void delay_init __P((void)); 173static __inline void fall __P((int, int, int, int, int)); 174void dumpsys __P((void)); 175void hpmc_dump __P((void)); 176 177/* 178 * wide used hardware params 179 */ 180struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT; 181struct pdc_coproc pdc_coproc PDC_ALIGNMENT; 182struct pdc_coherence pdc_coherence PDC_ALIGNMENT; 183struct pdc_spidb pdc_spidbits PDC_ALIGNMENT; 184 185#ifdef DEBUG 186int sigdebug = 0xff; 187pid_t sigpid = 0; 188#define SDB_FOLLOW 0x01 189#endif 190 191/* 192 * Whatever CPU types we support 193 */ 194extern const u_int itlb_x[], dtlb_x[], dtlbna_x[], tlbd_x[]; 195extern const u_int itlb_s[], dtlb_s[], dtlbna_s[], tlbd_s[]; 196extern const u_int itlb_t[], dtlb_t[], dtlbna_t[], tlbd_t[]; 197extern const u_int itlb_l[], dtlb_l[], dtlbna_l[], tlbd_l[]; 198int iibtlb_s __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 199 vsize_t sz, u_int prot)); 200int idbtlb_s __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 201 vsize_t sz, u_int prot)); 202int ibtlb_t __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 203 vsize_t sz, u_int prot)); 204int ibtlb_l __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 205 vsize_t sz, u_int prot)); 206int ibtlb_g __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 207 vsize_t sz, u_int prot)); 208int pbtlb_g __P((int i)); 209int hpti_l __P((vaddr_t, vsize_t)); 210int hpti_g __P((vaddr_t, vsize_t)); 211int desidhash_x __P((void)); 212int desidhash_s __P((void)); 213int desidhash_t __P((void)); 214int desidhash_l __P((void)); 215int desidhash_g __P((void)); 216const struct hppa_cpu_typed { 217 char name[8]; 218 enum hppa_cpu_type type; 219 int arch; 220 int features; 221 int (*desidhash) __P((void)); 222 const u_int *itlbh, *dtlbh, *dtlbnah, *tlbdh; 223 int (*dbtlbins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 224 vsize_t sz, u_int prot)); 225 int (*ibtlbins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa, 226 vsize_t sz, u_int prot)); 227 int (*btlbprg) __P((int i)); 228 int (*hptinit) __P((vaddr_t hpt, vsize_t hptsize)); 229} cpu_types[] = { 230#ifdef HP7000_CPU 231 { "PCX", hpcx, 0x10, 0, 232 desidhash_x, itlb_x, dtlb_x, dtlbna_x, tlbd_x, 233 ibtlb_g, NULL, pbtlb_g}, 234#endif 235#ifdef HP7100_CPU 236 { "PCXS", hpcxs, 0x11, HPPA_FTRS_BTLBS, 237 desidhash_s, itlb_s, dtlb_s, dtlbna_s, tlbd_s, 238 ibtlb_g, NULL, pbtlb_g}, 239#endif 240#ifdef HP7200_CPU 241 { "PCXT", hpcxt, 0x11, HPPA_FTRS_BTLBU, 242 desidhash_t, itlb_t, dtlb_t, dtlbna_t, tlbd_t, 243 ibtlb_g, NULL, pbtlb_g}, 244/* HOW? { "PCXT'", hpcxta,0x11, HPPA_FTRS_BTLBU, 245 desidhash_t, itlb_t, dtlb_t, dtlbna_t, tlbd_t, 246 ibtlb_g, NULL, pbtlb_g}, */ 247#endif 248#ifdef HP7100LC_CPU 249 { "PCXL", hpcxl, 0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT, 250 desidhash_l, itlb_l, dtlb_l, dtlbna_l, tlbd_l, 251 ibtlb_g, NULL, pbtlb_g, hpti_g}, 252#endif 253#ifdef HP7300LC_CPU 254/* HOW? { "PCXL2", hpcxl2,0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT, 255 desidhash_l, itlb_l, dtlb_l, dtlbna_l, tlbd_l, 256 ibtlb_g, NULL, pbtlb_g, hpti_g}, */ 257#endif 258#ifdef HP8000_CPU 259 { "PCXU", hpcxu, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT, 260 desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l, 261 ibtlb_g, NULL, pbtlb_g, hpti_g}, 262#endif 263#ifdef HP8200_CPU 264/* HOW? { "PCXU2", hpcxu2,0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT, 265 desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l, 266 ibtlb_g, NULL, pbtlb_g, hpti_g}, */ 267#endif 268#ifdef HP8500_CPU 269/* HOW? { "PCXW", hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT, 270 desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l, 271 ibtlb_g, NULL, pbtlb_g, hpti_g}, */ 272#endif 273#ifdef HP8600_CPU 274/* HOW? { "PCXW+", hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT, 275 desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l, 276 ibtlb_g, NULL, pbtlb_g, hpti_g}, */ 277#endif 278 { "", 0 } 279}; 280 281void 282hppa_init(start) 283 paddr_t start; 284{ 285 extern int kernel_text; 286 vaddr_t v, vstart, vend; 287 register int error; 288 int hptsize; /* size of HPT table if supported */ 289 int cpu_features = 0; 290 291 boothowto |= RB_SINGLE; /* XXX always go into single-user while debug */ 292 293 pdc_init(); /* init PDC iface, so we can call em easy */ 294 295 cpu_hzticks = (PAGE0->mem_10msec * 100) / hz; 296 delay_init(); /* calculate cpu clock ratio */ 297 298 /* cache parameters */ 299 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT, 300 &pdc_cache)) < 0) { 301#ifdef DEBUG 302 printf("WARNING: PDC_CACHE error %d\n", error); 303#endif 304 } 305 306 dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1; 307 dcache_stride = pdc_cache.dc_stride; 308 icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1; 309 icache_stride = pdc_cache.ic_stride; 310 311 /* cache coherence params (pbably available for 8k only) */ 312 error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS, 313 &pdc_coherence, 1, 1, 1, 1); 314#ifdef DEBUG 315 printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n", 316 pdc_coherence.ia_cst, pdc_coherence.da_cst, 317 pdc_coherence.ita_cst, pdc_coherence.dta_cst, error); 318#endif 319 error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB, 320 &pdc_spidbits, 0, 0, 0, 0); 321 printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error); 322 323 /* setup hpmc handler */ 324 { 325 extern u_int hpmc_v; /* from locore.s */ 326 register u_int *p = &hpmc_v; 327 328 if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p)) 329 *p = 0; /* XXX nop is more appropriate? */ 330 331 p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]); 332 } 333 334 /* BTLB params */ 335 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, 336 PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) { 337#ifdef DEBUG 338 printf("WARNING: PDC_BTLB error %d", error); 339#endif 340 } else { 341#ifdef BTLBDEBUG 342 printf("btlb info: minsz=%d, maxsz=%d\n", 343 pdc_btlb.min_size, pdc_btlb.max_size); 344 printf("btlb fixed: i=%d, d=%d, c=%d\n", 345 pdc_btlb.finfo.num_i, 346 pdc_btlb.finfo.num_d, 347 pdc_btlb.finfo.num_c); 348 printf("btlb varbl: i=%d, d=%d, c=%d\n", 349 pdc_btlb.vinfo.num_i, 350 pdc_btlb.vinfo.num_d, 351 pdc_btlb.vinfo.num_c); 352#endif /* BTLBDEBUG */ 353 /* purge TLBs and caches */ 354 if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, 355 PDC_BTLB_PURGE_ALL) < 0) 356 printf("WARNING: BTLB purge failed\n"); 357 358 cpu_features = pdc_btlb.finfo.num_c? 359 HPPA_FTRS_BTLBU : HPPA_FTRS_BTLBS; 360 } 361 362 ptlball(); 363 fcacheall(); 364 365 totalphysmem = PAGE0->imm_max_mem / NBPG; 366 resvmem = ((vaddr_t)&kernel_text) / NBPG; 367 368 /* calculate HPT size */ 369 /* for (hptsize = 256; hptsize < totalphysmem; hptsize *= 2); */ 370hptsize=256; /* XXX one page for now */ 371 hptsize *= 16; /* sizeof(hpt_entry) */ 372 373 if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) && 374 !pdc_hwtlb.min_size && !pdc_hwtlb.max_size) { 375 printf("WARNING: no HPT support, fine!\n"); 376 mtctl(hptsize - 1, CR_HPTMASK); 377 hptsize = 0; 378 } else { 379 cpu_features |= HPPA_FTRS_HVT; 380 381 if (hptsize > pdc_hwtlb.max_size) 382 hptsize = pdc_hwtlb.max_size; 383 else if (hptsize < pdc_hwtlb.min_size) 384 hptsize = pdc_hwtlb.min_size; 385 mtctl(hptsize - 1, CR_HPTMASK); 386 } 387 388 /* 389 * Deal w/ CPU now 390 */ 391 { 392 const struct hppa_cpu_typed *p; 393 394 for (p = cpu_types; 395 p->arch && p->features != cpu_features; p++); 396 397 if (!p->arch) 398 printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK (%x)\n", 399 cpu_features); 400 else { 401 /* 402 * Ptrs to various tlb handlers, to be filled 403 * based on cpu features. 404 * from locore.S 405 */ 406 extern u_int trap_ep_T_TLB_DIRTY[]; 407 extern u_int trap_ep_T_DTLBMISS[]; 408 extern u_int trap_ep_T_DTLBMISSNA[]; 409 extern u_int trap_ep_T_ITLBMISS[]; 410 extern u_int trap_ep_T_ITLBMISSNA[]; 411 412 cpu_type = p->type; 413 cpu_typename = p->name; 414 cpu_ibtlb_ins = p->ibtlbins; 415 cpu_dbtlb_ins = p->dbtlbins; 416 cpu_hpt_init = p->hptinit; 417 cpu_desidhash = p->desidhash; 418 419#define LDILDO(t,f) ((t)[0] = (f)[0], (t)[1] = (f)[1]) 420 LDILDO(trap_ep_T_TLB_DIRTY , p->tlbdh); 421 LDILDO(trap_ep_T_DTLBMISS , p->dtlbh); 422 LDILDO(trap_ep_T_DTLBMISSNA, p->dtlbnah); 423 LDILDO(trap_ep_T_ITLBMISS , p->itlbh); 424 LDILDO(trap_ep_T_ITLBMISSNA, p->itlbh); 425#undef LDILDO 426 } 427 } 428 429 /* we hope this won't fail */ 430 hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF, 431 (caddr_t)mem_ex_storage, sizeof(mem_ex_storage), 432 EX_NOCOALESCE|EX_NOWAIT); 433 if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem, 434 EX_NOWAIT)) 435 panic("cannot reserve main memory"); 436 437 vstart = hppa_round_page(start); 438 vend = VM_MAX_KERNEL_ADDRESS; 439 440 /* 441 * Now allocate kernel dynamic variables 442 */ 443 444 /* buffer cache parameters */ 445#ifndef BUFCACHEPERCENT 446#define BUFCACHEPERCENT 10 447#endif /* BUFCACHEPERCENT */ 448 if (bufpages == 0) 449 bufpages = totalphysmem / 100 * 450 (totalphysmem <= 0x1000? 5 : BUFCACHEPERCENT); 451 452 if (nbuf == 0) 453 nbuf = bufpages < 16? 16 : bufpages; 454 455 /* Restrict to at most 70% filled kvm */ 456 if (nbuf * MAXBSIZE > 457 (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10) 458 nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / 459 MAXBSIZE * 7 / 10; 460 461 /* More buffer pages than fits into the buffers is senseless. */ 462 if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) 463 bufpages = nbuf * MAXBSIZE / PAGE_SIZE; 464 465 v = vstart; 466#define valloc(name, type, num) (name) = (type *)v; v = (vaddr_t)((name)+(num)) 467 468 valloc(buf, struct buf, nbuf); 469 470#ifdef SYSVSHM 471 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 472#endif 473#ifdef SYSVSEM 474 valloc(sema, struct semid_ds, seminfo.semmni); 475 valloc(sem, struct sem, seminfo.semmns); 476 /* This is pretty disgusting! */ 477 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 478#endif 479#ifdef SYSVMSG 480 valloc(msgpool, char, msginfo.msgmax); 481 valloc(msgmaps, struct msgmap, msginfo.msgseg); 482 valloc(msghdrs, struct msg, msginfo.msgtql); 483 valloc(msqids, struct msqid_ds, msginfo.msgmni); 484#endif 485#undef valloc 486 487 v = hppa_round_page(v); 488 bzero ((void *)vstart, (v - vstart)); 489 vstart = v; 490 491 /* sets physmem */ 492 pmap_bootstrap(&vstart, &vend); 493 494 /* alloc msgbuf */ 495 if (!(msgbufp = (void *)pmap_steal_memory(MSGBUFSIZE, NULL, NULL))) 496 panic("cannot allocate msgbuf"); 497 msgbufmapped = 1; 498 499 /* Turn on the HW TLB assist */ 500 if (hptsize) { 501 u_int hpt; 502 503 mfctl(CR_VTOP, hpt); 504 if ((error = (cpu_hpt_init)(hpt, hptsize)) < 0) { 505#ifdef DEBUG 506 printf("WARNING: HPT init error %d\n", error); 507#endif 508 } else { 509#ifdef PMAPDEBUG 510 printf("HPT: %d entries @ 0x%x\n", 511 hptsize / sizeof(struct hpt_entry), hpt); 512#endif 513 } 514 } 515 516 /* locate coprocessors and SFUs */ 517 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT, 518 &pdc_coproc)) < 0) 519 printf("WARNING: PDC_COPROC error %d\n", error); 520 else { 521#ifdef DEBUG 522 printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable, 523 pdc_coproc.ccr_present); 524#endif 525 mtctl(pdc_coproc.ccr_enable & CCR_MASK, CR_CCR); 526 } 527 528 /* they say PDC_COPROC might turn fault light on */ 529 pdc_call((iodcio_t)pdc, PDC_CHASSIS, PDC_CHASSIS_DISP, 530 PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0); 531 532#ifdef DDB 533 ddb_init(); 534#endif 535} 536 537void 538cpu_startup() 539{ 540 struct pdc_model pdc_model PDC_ALIGNMENT; 541 vaddr_t minaddr, maxaddr; 542 vsize_t size; 543 int base, residual; 544 int err, i; 545#ifdef DEBUG 546 extern int pmapdebug; 547 int opmapdebug = pmapdebug; 548 549 pmapdebug = 0; 550#endif 551 552 /* 553 * i won't understand a friend of mine, 554 * who sat in a room full of artificial ice, 555 * fogging the air w/ humid cries -- 556 * WELCOME TO SUMMER! 557 */ 558 printf(version); 559 560 /* identify system type */ 561 if ((err = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO, 562 &pdc_model)) < 0) { 563#ifdef DEBUG 564 printf("WARNING: PDC_MODEL error %d\n", err); 565#endif 566 } else { 567 const char *p, *q; 568 i = pdc_model.hvers >> 4; 569 p = hppa_mod_info(HPPA_TYPE_BOARD, i); 570 switch (pdc_model.arch_rev) { 571 default: 572 case 0: 573 q = "1.0"; 574#ifdef COMPAT_HPUX 575 cpu_model_hpux = HPUX_SYSCONF_CPUPA10; 576#endif 577 break; 578 case 4: 579 q = "1.1"; 580#ifdef COMPAT_HPUX 581 cpu_model_hpux = HPUX_SYSCONF_CPUPA11; 582#endif 583 break; 584 case 8: 585 q = "2.0"; 586#ifdef COMPAT_HPUX 587 cpu_model_hpux = HPUX_SYSCONF_CPUPA20; 588#endif 589 break; 590 } 591 592 if (p) 593 sprintf(cpu_model, "HP9000/%s PA-RISC %s", p, q); 594 else 595 sprintf(cpu_model, "HP9000/(UNKNOWN %x) PA-RISC %s", 596 i, q); 597 printf("%s\n", cpu_model); 598 } 599 600 printf("real mem = %d (%d reserved for PROM, %d used by OpenBSD)\n", 601 ctob(totalphysmem), ctob(resvmem), ctob(physmem)); 602 603 /* 604 * Now allocate buffers proper. They are different than the above 605 * in that they usually occupy more virtual memory than physical. 606 */ 607 size = MAXBSIZE * nbuf; 608 if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), 609 NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, 610 UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) 611 panic("cpu_startup: cannot allocate VM for buffers"); 612 minaddr = (vaddr_t)buffers; 613 base = bufpages / nbuf; 614 residual = bufpages % nbuf; 615 for (i = 0; i < nbuf; i++) { 616 vsize_t curbufsize; 617 vaddr_t curbuf; 618 struct vm_page *pg; 619 620 /* 621 * First <residual> buffers get (base+1) physical pages 622 * allocated for them. The rest get (base) physical pages. 623 * 624 * The rest of each buffer occupies virtual space, 625 * but has no physical memory allocated for it. 626 */ 627 curbuf = (vaddr_t) buffers + (i * MAXBSIZE); 628 curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); 629 630 while (curbufsize) { 631 if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) 632 panic("cpu_startup: not enough memory for " 633 "buffer cache"); 634 pmap_enter(kernel_map->pmap, curbuf, 635 VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE, 636 VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); 637 curbuf += PAGE_SIZE; 638 curbufsize -= PAGE_SIZE; 639 } 640 } 641 642 /* 643 * Allocate a submap for exec arguments. This map effectively 644 * limits the number of processes exec'ing at any time. 645 */ 646 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 647 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 648 649 /* 650 * Allocate a submap for physio 651 */ 652 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 653 VM_PHYS_SIZE, 0, FALSE, NULL); 654 655#ifdef DEBUG 656 pmapdebug = opmapdebug; 657#endif 658 printf("avail mem = %ld\n", ptoa(uvmexp.free)); 659 printf("using %d buffers containing %d bytes of memory\n", 660 nbuf, bufpages * PAGE_SIZE); 661 662 /* 663 * Set up buffers, so they can be used to read disk labels. 664 */ 665 bufinit(); 666 vmmap = uvm_km_valloc_wait(kernel_map, NBPG); 667 668 /* 669 * Configure the system. 670 */ 671 if (boothowto & RB_CONFIG) { 672#ifdef BOOT_CONFIG 673 user_config(); 674#else 675 printf("kernel does not support -c; continuing..\n"); 676#endif 677 } 678} 679 680/* 681 * compute cpu clock ratio such as: 682 * cpu_ticksnum / cpu_ticksdenom = t + delta 683 * delta -> 0 684 */ 685void 686delay_init(void) 687{ 688 register u_int num, denom, delta, mdelta; 689 690 mdelta = UINT_MAX; 691 for (denom = 1; denom < 1000; denom++) { 692 num = (PAGE0->mem_10msec * denom) / 10000; 693 delta = num * 10000 / denom - PAGE0->mem_10msec; 694 if (!delta) { 695 cpu_ticksdenom = denom; 696 cpu_ticksnum = num; 697 break; 698 } else if (delta < mdelta) { 699 cpu_ticksdenom = denom; 700 cpu_ticksnum = num; 701 } 702 } 703} 704 705void 706delay(us) 707 u_int us; 708{ 709 register u_int start, end, n; 710 711 mfctl(CR_ITMR, start); 712 while (us) { 713 n = min(1000, us); 714 end = start + n * cpu_ticksnum / cpu_ticksdenom; 715 716 /* N.B. Interval Timer may wrap around */ 717 if (end < start) 718 do 719 mfctl(CR_ITMR, start); 720 while (start > end); 721 722 do 723 mfctl(CR_ITMR, start); 724 while (start < end); 725 726 us -= n; 727 mfctl(CR_ITMR, start); 728 } 729} 730 731static __inline void 732fall(c_base, c_count, c_loop, c_stride, data) 733 int c_base, c_count, c_loop, c_stride, data; 734{ 735 register int loop; 736 737 for (; c_count--; c_base += c_stride) 738 for (loop = c_loop; loop--; ) 739 if (data) 740 fdce(0, c_base); 741 else 742 fice(0, c_base); 743} 744 745void 746fcacheall() 747{ 748 /* 749 * Flush the instruction, then data cache. 750 */ 751 fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop, 752 pdc_cache.ic_stride, 0); 753 sync_caches(); 754 fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop, 755 pdc_cache.dc_stride, 1); 756 sync_caches(); 757} 758 759void 760ptlball() 761{ 762 register pa_space_t sp; 763 register int i, j, k; 764 765 /* instruction TLB */ 766 sp = pdc_cache.it_sp_base; 767 for (i = 0; i < pdc_cache.it_sp_count; i++) { 768 register vaddr_t off = pdc_cache.it_off_base; 769 for (j = 0; j < pdc_cache.it_off_count; j++) { 770 for (k = 0; k < pdc_cache.it_loop; k++) 771 pitlbe(sp, off); 772 off += pdc_cache.it_off_stride; 773 } 774 sp += pdc_cache.it_sp_stride; 775 } 776 777 /* data TLB */ 778 sp = pdc_cache.dt_sp_base; 779 for (i = 0; i < pdc_cache.dt_sp_count; i++) { 780 register vaddr_t off = pdc_cache.dt_off_base; 781 for (j = 0; j < pdc_cache.dt_off_count; j++) { 782 for (k = 0; k < pdc_cache.dt_loop; k++) 783 pdtlbe(sp, off); 784 off += pdc_cache.dt_off_stride; 785 } 786 sp += pdc_cache.dt_sp_stride; 787 } 788} 789 790int 791desidhash_g() 792{ 793 /* TODO call PDC to disable SID hashing in the cache index */ 794 795 return 0; 796} 797 798int 799hpti_g(hpt, hptsize) 800 vaddr_t hpt; 801 vsize_t hptsize; 802{ 803 return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG, 804 &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE); 805} 806 807int 808pbtlb_g(i) 809 int i; 810{ 811 return -1; 812} 813 814int 815ibtlb_g(i, sp, va, pa, sz, prot) 816 int i; 817 pa_space_t sp; 818 vaddr_t va; 819 paddr_t pa; 820 vsize_t sz; 821 u_int prot; 822{ 823 int error; 824 825 if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT, 826 sp, va, pa, sz, prot, i)) < 0) { 827#ifdef BTLBDEBUG 828 printf("WARNING: BTLB insert failed (%d)\n", error); 829#endif 830 } 831 return error; 832} 833 834int 835btlb_insert(space, va, pa, lenp, prot) 836 pa_space_t space; 837 vaddr_t va; 838 paddr_t pa; 839 vsize_t *lenp; 840 u_int prot; 841{ 842 static u_int32_t mask; 843 register vsize_t len; 844 register int error, i; 845 846 /* align size */ 847 for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1); 848 len >>= PGSHIFT; 849 i = ffs(~mask) - 1; 850 if (len > pdc_btlb.max_size || i < 0) { 851#ifdef BTLBDEBUG 852 printf("btln_insert: too big (%u < %u < %u)\n", 853 pdc_btlb.min_size, len, pdc_btlb.max_size); 854#endif 855 return -(ENOMEM); 856 } 857 858 mask |= 1 << i; 859 pa >>= PGSHIFT; 860 va >>= PGSHIFT; 861 /* check address alignment */ 862 if (pa & (len - 1)) 863 printf("WARNING: BTLB address misaligned\n"); 864 865 /* ensure IO space is uncached */ 866 if ((pa & 0xF0000) == 0xF0000) 867 prot |= TLB_UNCACHEABLE; 868 869#ifdef BTLBDEBUG 870 printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n", i, space, va, pa, len, prot); 871#endif 872 if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0) 873 return -(EINVAL); 874 *lenp = len << PGSHIFT; 875 876 return i; 877} 878 879int waittime = -1; 880 881void 882boot(howto) 883 int howto; 884{ 885 /* If system is cold, just halt. */ 886 if (cold) { 887 howto |= RB_HALT; 888 goto haltsys; 889 } 890 891 boothowto = howto | (boothowto & RB_HALT); 892 893 if (!(howto & RB_NOSYNC)) { 894 waittime = 0; 895 vfs_shutdown(); 896 /* 897 * If we've been adjusting the clock, the todr 898 * will be out of synch; adjust it now unless 899 * the system was sitting in ddb. 900 */ 901 if ((howto & RB_TIMEBAD) == 0) 902 resettodr(); 903 else 904 printf("WARNING: not updating battery clock\n"); 905 } 906 907 /* XXX probably save howto into stable storage */ 908 909 splhigh(); 910 911 if (howto & RB_DUMP) 912 dumpsys(); 913 914haltsys: 915 doshutdownhooks(); 916 917 if (howto & RB_HALT) { 918 printf("System halted!\n"); 919 __asm __volatile("stwas %0, 0(%1)" 920 :: "r" (CMD_STOP), "r" (LBCAST_ADDR + iomod_command)); 921 } else { 922 printf("rebooting..."); 923 DELAY(1000000); 924 __asm __volatile("stwas %0, 0(%1)" 925 :: "r" (CMD_RESET), "r" (LBCAST_ADDR + iomod_command)); 926 } 927 928 for(;;); /* loop while bus reset is comming up */ 929 /* NOTREACHED */ 930} 931 932u_long dumpmag = 0x8fca0101; /* magic number */ 933int dumpsize = 0; /* pages */ 934long dumplo = 0; /* blocks */ 935 936/* 937 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 938 */ 939int 940cpu_dumpsize() 941{ 942 int size; 943 944 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 945 if (roundup(size, dbtob(1)) != dbtob(1)) 946 return -1; 947 948 return 1; 949} 950 951/* 952 * Called from HPMC handler in locore 953 */ 954void 955hpmc_dump() 956{ 957 958} 959 960int 961cpu_dump() 962{ 963 long buf[dbtob(1) / sizeof (long)]; 964 kcore_seg_t *segp; 965 cpu_kcore_hdr_t *cpuhdrp; 966 967 segp = (kcore_seg_t *)buf; 968 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)]; 969 970 /* 971 * Generate a segment header. 972 */ 973 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 974 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 975 976 /* 977 * Add the machine-dependent header info 978 */ 979 /* nothing for now */ 980 981 return (bdevsw[major(dumpdev)].d_dump) 982 (dumpdev, dumplo, (caddr_t)buf, dbtob(1)); 983} 984 985/* 986 * Dump the kernel's image to the swap partition. 987 */ 988#define BYTES_PER_DUMP NBPG 989 990void 991dumpsys() 992{ 993 int psize, bytes, i, n; 994 register caddr_t maddr; 995 register daddr_t blkno; 996 register int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); 997 register int error; 998 999 /* Save registers 1000 savectx(&dumppcb); */ 1001 1002 if (dumpsize == 0) 1003 dumpconf(); 1004 if (dumplo <= 0) { 1005 printf("\ndump to dev %x not possible\n", dumpdev); 1006 return; 1007 } 1008 printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo); 1009 1010 psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); 1011 printf("dump "); 1012 if (psize == -1) { 1013 printf("area unavailable\n"); 1014 return; 1015 } 1016 1017 if (!(error = cpu_dump())) { 1018 1019 bytes = ctob(totalphysmem); 1020 maddr = NULL; 1021 blkno = dumplo + cpu_dumpsize(); 1022 dump = bdevsw[major(dumpdev)].d_dump; 1023 /* TODO block map the whole physical memory */ 1024 for (i = 0; i < bytes; i += n) { 1025 1026 /* Print out how many MBs we are to go. */ 1027 n = bytes - i; 1028 if (n && (n % (1024*1024)) == 0) 1029 printf("%d ", n / (1024 * 1024)); 1030 1031 /* Limit size for next transfer. */ 1032 1033 if (n > BYTES_PER_DUMP) 1034 n = BYTES_PER_DUMP; 1035 1036 if ((error = (*dump)(dumpdev, blkno, maddr, n))) 1037 break; 1038 maddr += n; 1039 blkno += btodb(n); 1040 } 1041 } 1042 1043 switch (error) { 1044 case ENXIO: printf("device bad\n"); break; 1045 case EFAULT: printf("device not ready\n"); break; 1046 case EINVAL: printf("area improper\n"); break; 1047 case EIO: printf("i/o error\n"); break; 1048 case EINTR: printf("aborted from console\n"); break; 1049 case 0: printf("succeeded\n"); break; 1050 default: printf("error %d\n", error); break; 1051 } 1052} 1053 1054/* bcopy(), error on fault */ 1055int 1056kcopy(from, to, size) 1057 const void *from; 1058 void *to; 1059 size_t size; 1060{ 1061 register u_int oldh = curproc->p_addr->u_pcb.pcb_onfault; 1062 1063 curproc->p_addr->u_pcb.pcb_onfault = (u_int)©_on_fault; 1064 bcopy(from, to, size); 1065 curproc->p_addr->u_pcb.pcb_onfault = oldh; 1066 1067 return 0; 1068} 1069 1070int 1071copystr(src, dst, size, lenp) 1072 const void *src; 1073 void *dst; 1074 size_t size; 1075 size_t *lenp; 1076{ 1077 return spstrcpy(HPPA_SID_KERNEL, src, HPPA_SID_KERNEL, dst, size, lenp); 1078} 1079 1080int 1081copyinstr(src, dst, size, lenp) 1082 const void *src; 1083 void *dst; 1084 size_t size; 1085 size_t *lenp; 1086{ 1087 return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src, 1088 HPPA_SID_KERNEL, dst, size, lenp); 1089} 1090 1091 1092int 1093copyoutstr(src, dst, size, lenp) 1094 const void *src; 1095 void *dst; 1096 size_t size; 1097 size_t *lenp; 1098{ 1099 return spstrcpy(HPPA_SID_KERNEL, src, 1100 curproc->p_addr->u_pcb.pcb_space, dst, size, lenp); 1101} 1102 1103 1104int 1105copyin(src, dst, size) 1106 const void *src; 1107 void *dst; 1108 size_t size; 1109{ 1110 return spcopy(curproc->p_addr->u_pcb.pcb_space, src, 1111 HPPA_SID_KERNEL, dst, size); 1112} 1113 1114int 1115copyout(src, dst, size) 1116 const void *src; 1117 void *dst; 1118 size_t size; 1119{ 1120 return spcopy(HPPA_SID_KERNEL, src, 1121 curproc->p_addr->u_pcb.pcb_space, dst, size); 1122} 1123 1124/* 1125 * Set registers on exec. 1126 */ 1127void 1128setregs(p, pack, stack, retval) 1129 register struct proc *p; 1130 struct exec_package *pack; 1131 u_long stack; 1132 register_t *retval; 1133{ 1134 register struct trapframe *tf = p->p_md.md_regs; 1135 /* register struct pcb *pcb = &p->p_addr->u_pcb; */ 1136#ifdef DEBUG 1137 /*extern int pmapdebug;*/ 1138 /*pmapdebug = 13;*/ 1139 printf("setregs(%p, %p, %x, %p), ep=%x, cr30=%x\n", 1140 p, pack, stack, retval, pack->ep_entry, tf->tf_cr30); 1141#endif 1142 1143 tf->tf_iioq_tail = 4 + 1144 (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER); 1145 tf->tf_rp = 0; 1146 tf->tf_arg0 = (u_long)PS_STRINGS; 1147 tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */ 1148 1149 /* setup terminal stack frame */ 1150 stack += HPPA_FRAME_SIZE; 1151 suword((caddr_t)(stack + HPPA_FRAME_PSP), 0); 1152 tf->tf_sp = stack; 1153 1154 retval[1] = 0; 1155} 1156 1157/* 1158 * Send an interrupt to process. 1159 */ 1160void 1161sendsig(catcher, sig, mask, code, type, val) 1162 sig_t catcher; 1163 int sig, mask; 1164 u_long code; 1165 int type; 1166 union sigval val; 1167{ 1168 struct proc *p = curproc; 1169 struct trapframe sf, *tf = p->p_md.md_regs; 1170 register_t sp = tf->tf_sp; 1171 1172#ifdef DEBUG 1173 if ((sigdebug | SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid)) 1174 printf("sendsig: %s[%d] sig %d catcher %p\n", 1175 p->p_comm, p->p_pid, sig, catcher); 1176#endif 1177 1178 sf = *tf; 1179 /* TODO send signal */ 1180 1181 if (copyout(&sf, (void *)sp, sizeof(sf))) 1182 sigexit(p, SIGILL); 1183} 1184 1185int 1186sys_sigreturn(p, v, retval) 1187 struct proc *p; 1188 void *v; 1189 register_t *retval; 1190{ 1191 /* TODO sigreturn */ 1192 return EINVAL; 1193} 1194 1195/* 1196 * machine dependent system variables. 1197 */ 1198int 1199cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) 1200 int *name; 1201 u_int namelen; 1202 void *oldp; 1203 size_t *oldlenp; 1204 void *newp; 1205 size_t newlen; 1206 struct proc *p; 1207{ 1208 dev_t consdev; 1209 /* all sysctl names at this level are terminal */ 1210 if (namelen != 1) 1211 return (ENOTDIR); /* overloaded */ 1212 switch (name[0]) { 1213 case CPU_CONSDEV: 1214 if (cn_tab != NULL) 1215 consdev = cn_tab->cn_dev; 1216 else 1217 consdev = NODEV; 1218 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, 1219 sizeof consdev)); 1220 default: 1221 return (EOPNOTSUPP); 1222 } 1223 /* NOTREACHED */ 1224} 1225 1226 1227/* 1228 * consinit: 1229 * initialize the system console. 1230 */ 1231void 1232consinit() 1233{ 1234 static int initted; 1235 1236 if (!initted) { 1237 initted++; 1238 cninit(); 1239 } 1240} 1241