machdep.c revision 276333
1/* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ 2 3/*- 4 * Copyright (c) 2004 Olivier Houchard 5 * Copyright (c) 1994-1998 Mark Brinicombe. 6 * Copyright (c) 1994 Brini. 7 * All rights reserved. 8 * 9 * This code is derived from software written for Brini by Mark Brinicombe 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by Mark Brinicombe 22 * for the NetBSD Project. 23 * 4. The name of the company nor the name of the author may be used to 24 * endorse or promote products derived from this software without specific 25 * prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * Machine dependant functions for kernel setup 40 * 41 * Created : 17/09/94 42 * Updated : 18/04/01 updated for new wscons 43 */ 44 45#include "opt_compat.h" 46#include "opt_ddb.h" 47#include "opt_platform.h" 48#include "opt_sched.h" 49#include "opt_timer.h" 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/arm/arm/machdep.c 276333 2014-12-28 18:12:56Z ian $"); 53 54#include <sys/param.h> 55#include <sys/proc.h> 56#include <sys/systm.h> 57#include <sys/bio.h> 58#include <sys/buf.h> 59#include <sys/bus.h> 60#include <sys/cons.h> 61#include <sys/cpu.h> 62#include <sys/exec.h> 63#include <sys/imgact.h> 64#include <sys/kdb.h> 65#include <sys/kernel.h> 66#include <sys/ktr.h> 67#include <sys/linker.h> 68#include <sys/lock.h> 69#include <sys/malloc.h> 70#include <sys/msgbuf.h> 71#include <sys/mutex.h> 72#include <sys/pcpu.h> 73#include <sys/ptrace.h> 74#include <sys/rwlock.h> 75#include <sys/sched.h> 76#include <sys/signalvar.h> 77#include <sys/syscallsubr.h> 78#include <sys/sysctl.h> 79#include <sys/sysent.h> 80#include <sys/sysproto.h> 81#include <sys/uio.h> 82 83#include <vm/vm.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_object.h> 87#include <vm/vm_page.h> 88#include <vm/vm_pager.h> 89 90#include <machine/armreg.h> 91#include <machine/atags.h> 92#include <machine/cpu.h> 93#include <machine/cpuinfo.h> 94#include <machine/devmap.h> 95#include <machine/frame.h> 96#include <machine/intr.h> 97#include <machine/machdep.h> 98#include <machine/md_var.h> 99#include <machine/metadata.h> 100#include <machine/pcb.h> 101#include <machine/physmem.h> 102#include <machine/platform.h> 103#include <machine/reg.h> 104#include <machine/trap.h> 105#include <machine/undefined.h> 106#include <machine/vfp.h> 107#include <machine/vmparam.h> 108#include <machine/sysarch.h> 109 110#ifdef FDT 111#include <dev/fdt/fdt_common.h> 112#include <dev/ofw/openfirm.h> 113#endif 114 115#ifdef DDB 116#include <ddb/ddb.h> 117#endif 118 119#ifdef DEBUG 120#define debugf(fmt, args...) printf(fmt, ##args) 121#else 122#define debugf(fmt, args...) 123#endif 124 125struct pcpu __pcpu[MAXCPU]; 126struct pcpu *pcpup = &__pcpu[0]; 127 128static struct trapframe proc0_tf; 129uint32_t cpu_reset_address = 0; 130int cold = 1; 131vm_offset_t vector_page; 132 133int (*_arm_memcpy)(void *, void *, int, int) = NULL; 134int (*_arm_bzero)(void *, int, int) = NULL; 135int _min_memcpy_size = 0; 136int _min_bzero_size = 0; 137 138extern int *end; 139 140#ifdef FDT 141/* 142 * This is the number of L2 page tables required for covering max 143 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 144 * stacks etc.), uprounded to be divisible by 4. 145 */ 146#define KERNEL_PT_MAX 78 147 148static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 149 150vm_paddr_t pmap_pa; 151 152struct pv_addr systempage; 153static struct pv_addr msgbufpv; 154struct pv_addr irqstack; 155struct pv_addr undstack; 156struct pv_addr abtstack; 157static struct pv_addr kernelstack; 158 159#endif 160 161#if defined(LINUX_BOOT_ABI) 162#define LBABI_MAX_BANKS 10 163 164uint32_t board_id; 165struct arm_lbabi_tag *atag_list; 166char linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; 167char atags[LBABI_MAX_COMMAND_LINE * 2]; 168uint32_t memstart[LBABI_MAX_BANKS]; 169uint32_t memsize[LBABI_MAX_BANKS]; 170uint32_t membanks; 171#endif 172 173static uint32_t board_revision; 174/* hex representation of uint64_t */ 175static char board_serial[32]; 176 177SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); 178SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, 179 &board_revision, 0, "Board revision"); 180SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, 181 board_serial, 0, "Board serial"); 182 183int vfp_exists; 184SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 185 &vfp_exists, 0, "Floating point support enabled"); 186 187void 188board_set_serial(uint64_t serial) 189{ 190 191 snprintf(board_serial, sizeof(board_serial)-1, 192 "%016jx", serial); 193} 194 195void 196board_set_revision(uint32_t revision) 197{ 198 199 board_revision = revision; 200} 201 202void 203sendsig(catcher, ksi, mask) 204 sig_t catcher; 205 ksiginfo_t *ksi; 206 sigset_t *mask; 207{ 208 struct thread *td; 209 struct proc *p; 210 struct trapframe *tf; 211 struct sigframe *fp, frame; 212 struct sigacts *psp; 213 int onstack; 214 int sig; 215 int code; 216 217 td = curthread; 218 p = td->td_proc; 219 PROC_LOCK_ASSERT(p, MA_OWNED); 220 sig = ksi->ksi_signo; 221 code = ksi->ksi_code; 222 psp = p->p_sigacts; 223 mtx_assert(&psp->ps_mtx, MA_OWNED); 224 tf = td->td_frame; 225 onstack = sigonstack(tf->tf_usr_sp); 226 227 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, 228 catcher, sig); 229 230 /* Allocate and validate space for the signal handler context. */ 231 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && 232 SIGISMEMBER(psp->ps_sigonstack, sig)) { 233 fp = (struct sigframe *)(td->td_sigstk.ss_sp + 234 td->td_sigstk.ss_size); 235#if defined(COMPAT_43) 236 td->td_sigstk.ss_flags |= SS_ONSTACK; 237#endif 238 } else 239 fp = (struct sigframe *)td->td_frame->tf_usr_sp; 240 241 /* make room on the stack */ 242 fp--; 243 244 /* make the stack aligned */ 245 fp = (struct sigframe *)STACKALIGN(fp); 246 /* Populate the siginfo frame. */ 247 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); 248 frame.sf_si = ksi->ksi_info; 249 frame.sf_uc.uc_sigmask = *mask; 250 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) 251 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; 252 frame.sf_uc.uc_stack = td->td_sigstk; 253 mtx_unlock(&psp->ps_mtx); 254 PROC_UNLOCK(td->td_proc); 255 256 /* Copy the sigframe out to the user's stack. */ 257 if (copyout(&frame, fp, sizeof(*fp)) != 0) { 258 /* Process has trashed its stack. Kill it. */ 259 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); 260 PROC_LOCK(p); 261 sigexit(td, SIGILL); 262 } 263 264 /* Translate the signal if appropriate. */ 265 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 266 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 267 268 /* 269 * Build context to run handler in. We invoke the handler 270 * directly, only returning via the trampoline. Note the 271 * trampoline version numbers are coordinated with machine- 272 * dependent code in libc. 273 */ 274 275 tf->tf_r0 = sig; 276 tf->tf_r1 = (register_t)&fp->sf_si; 277 tf->tf_r2 = (register_t)&fp->sf_uc; 278 279 /* the trampoline uses r5 as the uc address */ 280 tf->tf_r5 = (register_t)&fp->sf_uc; 281 tf->tf_pc = (register_t)catcher; 282 tf->tf_usr_sp = (register_t)fp; 283 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); 284 285 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, 286 tf->tf_usr_sp); 287 288 PROC_LOCK(p); 289 mtx_lock(&psp->ps_mtx); 290} 291 292struct kva_md_info kmi; 293 294/* 295 * arm32_vector_init: 296 * 297 * Initialize the vector page, and select whether or not to 298 * relocate the vectors. 299 * 300 * NOTE: We expect the vector page to be mapped at its expected 301 * destination. 302 */ 303 304extern unsigned int page0[], page0_data[]; 305void 306arm_vector_init(vm_offset_t va, int which) 307{ 308 unsigned int *vectors = (int *) va; 309 unsigned int *vectors_data = vectors + (page0_data - page0); 310 int vec; 311 312 /* 313 * Loop through the vectors we're taking over, and copy the 314 * vector's insn and data word. 315 */ 316 for (vec = 0; vec < ARM_NVEC; vec++) { 317 if ((which & (1 << vec)) == 0) { 318 /* Don't want to take over this vector. */ 319 continue; 320 } 321 vectors[vec] = page0[vec]; 322 vectors_data[vec] = page0_data[vec]; 323 } 324 325 /* Now sync the vectors. */ 326 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 327 328 vector_page = va; 329 330 if (va == ARM_VECTORS_HIGH) { 331 /* 332 * Assume the MD caller knows what it's doing here, and 333 * really does want the vector page relocated. 334 * 335 * Note: This has to be done here (and not just in 336 * cpu_setup()) because the vector page needs to be 337 * accessible *before* cpu_startup() is called. 338 * Think ddb(9) ... 339 * 340 * NOTE: If the CPU control register is not readable, 341 * this will totally fail! We'll just assume that 342 * any system that has high vector support has a 343 * readable CPU control register, for now. If we 344 * ever encounter one that does not, we'll have to 345 * rethink this. 346 */ 347 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 348 } 349} 350 351static void 352cpu_startup(void *dummy) 353{ 354 struct pcb *pcb = thread0.td_pcb; 355 const unsigned int mbyte = 1024 * 1024; 356#ifdef ARM_TP_ADDRESS 357#ifndef ARM_CACHE_LOCK_ENABLE 358 vm_page_t m; 359#endif 360#endif 361 362 identify_arm_cpu(); 363 364 vm_ksubmap_init(&kmi); 365 366 /* 367 * Display the RAM layout. 368 */ 369 printf("real memory = %ju (%ju MB)\n", 370 (uintmax_t)arm32_ptob(realmem), 371 (uintmax_t)arm32_ptob(realmem) / mbyte); 372 printf("avail memory = %ju (%ju MB)\n", 373 (uintmax_t)arm32_ptob(vm_cnt.v_free_count), 374 (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); 375 if (bootverbose) { 376 arm_physmem_print_tables(); 377 arm_devmap_print_table(); 378 } 379 380 bufinit(); 381 vm_pager_bufferinit(); 382 pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + 383 USPACE_SVC_STACK_TOP; 384 vector_page_setprot(VM_PROT_READ); 385 pmap_set_pcb_pagedir(pmap_kernel(), pcb); 386 pmap_postinit(); 387#ifdef ARM_TP_ADDRESS 388#ifdef ARM_CACHE_LOCK_ENABLE 389 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); 390 arm_lock_cache_line(ARM_TP_ADDRESS); 391#else 392 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); 393 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); 394#endif 395 *(uint32_t *)ARM_RAS_START = 0; 396 *(uint32_t *)ARM_RAS_END = 0xffffffff; 397#endif 398} 399 400SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 401 402/* 403 * Flush the D-cache for non-DMA I/O so that the I-cache can 404 * be made coherent later. 405 */ 406void 407cpu_flush_dcache(void *ptr, size_t len) 408{ 409 410 cpu_dcache_wb_range((uintptr_t)ptr, len); 411#ifdef ARM_L2_PIPT 412 cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len); 413#else 414 cpu_l2cache_wb_range((uintptr_t)ptr, len); 415#endif 416} 417 418/* Get current clock frequency for the given cpu id. */ 419int 420cpu_est_clockrate(int cpu_id, uint64_t *rate) 421{ 422 423 return (ENXIO); 424} 425 426void 427cpu_idle(int busy) 428{ 429 430 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); 431 spinlock_enter(); 432#ifndef NO_EVENTTIMERS 433 if (!busy) 434 cpu_idleclock(); 435#endif 436 if (!sched_runnable()) 437 cpu_sleep(0); 438#ifndef NO_EVENTTIMERS 439 if (!busy) 440 cpu_activeclock(); 441#endif 442 spinlock_exit(); 443 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); 444} 445 446int 447cpu_idle_wakeup(int cpu) 448{ 449 450 return (0); 451} 452 453/* 454 * Most ARM platforms don't need to do anything special to init their clocks 455 * (they get intialized during normal device attachment), and by not defining a 456 * cpu_initclocks() function they get this generic one. Any platform that needs 457 * to do something special can just provide their own implementation, which will 458 * override this one due to the weak linkage. 459 */ 460void 461arm_generic_initclocks(void) 462{ 463 464#ifndef NO_EVENTTIMERS 465#ifdef SMP 466 if (PCPU_GET(cpuid) == 0) 467 cpu_initclocks_bsp(); 468 else 469 cpu_initclocks_ap(); 470#else 471 cpu_initclocks_bsp(); 472#endif 473#endif 474} 475__weak_reference(arm_generic_initclocks, cpu_initclocks); 476 477int 478fill_regs(struct thread *td, struct reg *regs) 479{ 480 struct trapframe *tf = td->td_frame; 481 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); 482 regs->r_sp = tf->tf_usr_sp; 483 regs->r_lr = tf->tf_usr_lr; 484 regs->r_pc = tf->tf_pc; 485 regs->r_cpsr = tf->tf_spsr; 486 return (0); 487} 488int 489fill_fpregs(struct thread *td, struct fpreg *regs) 490{ 491 bzero(regs, sizeof(*regs)); 492 return (0); 493} 494 495int 496set_regs(struct thread *td, struct reg *regs) 497{ 498 struct trapframe *tf = td->td_frame; 499 500 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); 501 tf->tf_usr_sp = regs->r_sp; 502 tf->tf_usr_lr = regs->r_lr; 503 tf->tf_pc = regs->r_pc; 504 tf->tf_spsr &= ~PSR_FLAGS; 505 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; 506 return (0); 507} 508 509int 510set_fpregs(struct thread *td, struct fpreg *regs) 511{ 512 return (0); 513} 514 515int 516fill_dbregs(struct thread *td, struct dbreg *regs) 517{ 518 return (0); 519} 520int 521set_dbregs(struct thread *td, struct dbreg *regs) 522{ 523 return (0); 524} 525 526 527static int 528ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v) 529{ 530 struct iovec iov; 531 struct uio uio; 532 533 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 534 iov.iov_base = (caddr_t) v; 535 iov.iov_len = sizeof(u_int32_t); 536 uio.uio_iov = &iov; 537 uio.uio_iovcnt = 1; 538 uio.uio_offset = (off_t)addr; 539 uio.uio_resid = sizeof(u_int32_t); 540 uio.uio_segflg = UIO_SYSSPACE; 541 uio.uio_rw = UIO_READ; 542 uio.uio_td = td; 543 return proc_rwmem(td->td_proc, &uio); 544} 545 546static int 547ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v) 548{ 549 struct iovec iov; 550 struct uio uio; 551 552 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 553 iov.iov_base = (caddr_t) &v; 554 iov.iov_len = sizeof(u_int32_t); 555 uio.uio_iov = &iov; 556 uio.uio_iovcnt = 1; 557 uio.uio_offset = (off_t)addr; 558 uio.uio_resid = sizeof(u_int32_t); 559 uio.uio_segflg = UIO_SYSSPACE; 560 uio.uio_rw = UIO_WRITE; 561 uio.uio_td = td; 562 return proc_rwmem(td->td_proc, &uio); 563} 564 565int 566ptrace_single_step(struct thread *td) 567{ 568 struct proc *p; 569 int error; 570 571 KASSERT(td->td_md.md_ptrace_instr == 0, 572 ("Didn't clear single step")); 573 p = td->td_proc; 574 PROC_UNLOCK(p); 575 error = ptrace_read_int(td, td->td_frame->tf_pc + 4, 576 &td->td_md.md_ptrace_instr); 577 if (error) 578 goto out; 579 error = ptrace_write_int(td, td->td_frame->tf_pc + 4, 580 PTRACE_BREAKPOINT); 581 if (error) 582 td->td_md.md_ptrace_instr = 0; 583 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4; 584out: 585 PROC_LOCK(p); 586 return (error); 587} 588 589int 590ptrace_clear_single_step(struct thread *td) 591{ 592 struct proc *p; 593 594 if (td->td_md.md_ptrace_instr) { 595 p = td->td_proc; 596 PROC_UNLOCK(p); 597 ptrace_write_int(td, td->td_md.md_ptrace_addr, 598 td->td_md.md_ptrace_instr); 599 PROC_LOCK(p); 600 td->td_md.md_ptrace_instr = 0; 601 } 602 return (0); 603} 604 605int 606ptrace_set_pc(struct thread *td, unsigned long addr) 607{ 608 td->td_frame->tf_pc = addr; 609 return (0); 610} 611 612void 613cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 614{ 615} 616 617void 618spinlock_enter(void) 619{ 620 struct thread *td; 621 register_t cspr; 622 623 td = curthread; 624 if (td->td_md.md_spinlock_count == 0) { 625 cspr = disable_interrupts(PSR_I | PSR_F); 626 td->td_md.md_spinlock_count = 1; 627 td->td_md.md_saved_cspr = cspr; 628 } else 629 td->td_md.md_spinlock_count++; 630 critical_enter(); 631} 632 633void 634spinlock_exit(void) 635{ 636 struct thread *td; 637 register_t cspr; 638 639 td = curthread; 640 critical_exit(); 641 cspr = td->td_md.md_saved_cspr; 642 td->td_md.md_spinlock_count--; 643 if (td->td_md.md_spinlock_count == 0) 644 restore_interrupts(cspr); 645} 646 647/* 648 * Clear registers on exec 649 */ 650void 651exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 652{ 653 struct trapframe *tf = td->td_frame; 654 655 memset(tf, 0, sizeof(*tf)); 656 tf->tf_usr_sp = stack; 657 tf->tf_usr_lr = imgp->entry_addr; 658 tf->tf_svc_lr = 0x77777777; 659 tf->tf_pc = imgp->entry_addr; 660 tf->tf_spsr = PSR_USR32_MODE; 661} 662 663/* 664 * Get machine context. 665 */ 666int 667get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) 668{ 669 struct trapframe *tf = td->td_frame; 670 __greg_t *gr = mcp->__gregs; 671 672 if (clear_ret & GET_MC_CLEAR_RET) 673 gr[_REG_R0] = 0; 674 else 675 gr[_REG_R0] = tf->tf_r0; 676 gr[_REG_R1] = tf->tf_r1; 677 gr[_REG_R2] = tf->tf_r2; 678 gr[_REG_R3] = tf->tf_r3; 679 gr[_REG_R4] = tf->tf_r4; 680 gr[_REG_R5] = tf->tf_r5; 681 gr[_REG_R6] = tf->tf_r6; 682 gr[_REG_R7] = tf->tf_r7; 683 gr[_REG_R8] = tf->tf_r8; 684 gr[_REG_R9] = tf->tf_r9; 685 gr[_REG_R10] = tf->tf_r10; 686 gr[_REG_R11] = tf->tf_r11; 687 gr[_REG_R12] = tf->tf_r12; 688 gr[_REG_SP] = tf->tf_usr_sp; 689 gr[_REG_LR] = tf->tf_usr_lr; 690 gr[_REG_PC] = tf->tf_pc; 691 gr[_REG_CPSR] = tf->tf_spsr; 692 693 return (0); 694} 695 696/* 697 * Set machine context. 698 * 699 * However, we don't set any but the user modifiable flags, and we won't 700 * touch the cs selector. 701 */ 702int 703set_mcontext(struct thread *td, const mcontext_t *mcp) 704{ 705 struct trapframe *tf = td->td_frame; 706 const __greg_t *gr = mcp->__gregs; 707 708 tf->tf_r0 = gr[_REG_R0]; 709 tf->tf_r1 = gr[_REG_R1]; 710 tf->tf_r2 = gr[_REG_R2]; 711 tf->tf_r3 = gr[_REG_R3]; 712 tf->tf_r4 = gr[_REG_R4]; 713 tf->tf_r5 = gr[_REG_R5]; 714 tf->tf_r6 = gr[_REG_R6]; 715 tf->tf_r7 = gr[_REG_R7]; 716 tf->tf_r8 = gr[_REG_R8]; 717 tf->tf_r9 = gr[_REG_R9]; 718 tf->tf_r10 = gr[_REG_R10]; 719 tf->tf_r11 = gr[_REG_R11]; 720 tf->tf_r12 = gr[_REG_R12]; 721 tf->tf_usr_sp = gr[_REG_SP]; 722 tf->tf_usr_lr = gr[_REG_LR]; 723 tf->tf_pc = gr[_REG_PC]; 724 tf->tf_spsr = gr[_REG_CPSR]; 725 726 return (0); 727} 728 729/* 730 * MPSAFE 731 */ 732int 733sys_sigreturn(td, uap) 734 struct thread *td; 735 struct sigreturn_args /* { 736 const struct __ucontext *sigcntxp; 737 } */ *uap; 738{ 739 ucontext_t uc; 740 int spsr; 741 742 if (uap == NULL) 743 return (EFAULT); 744 if (copyin(uap->sigcntxp, &uc, sizeof(uc))) 745 return (EFAULT); 746 /* 747 * Make sure the processor mode has not been tampered with and 748 * interrupts have not been disabled. 749 */ 750 spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; 751 if ((spsr & PSR_MODE) != PSR_USR32_MODE || 752 (spsr & (PSR_I | PSR_F)) != 0) 753 return (EINVAL); 754 /* Restore register context. */ 755 set_mcontext(td, &uc.uc_mcontext); 756 757 /* Restore signal mask. */ 758 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); 759 760 return (EJUSTRETURN); 761} 762 763 764/* 765 * Construct a PCB from a trapframe. This is called from kdb_trap() where 766 * we want to start a backtrace from the function that caused us to enter 767 * the debugger. We have the context in the trapframe, but base the trace 768 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 769 * enough for a backtrace. 770 */ 771void 772makectx(struct trapframe *tf, struct pcb *pcb) 773{ 774 pcb->pcb_regs.sf_r4 = tf->tf_r4; 775 pcb->pcb_regs.sf_r5 = tf->tf_r5; 776 pcb->pcb_regs.sf_r6 = tf->tf_r6; 777 pcb->pcb_regs.sf_r7 = tf->tf_r7; 778 pcb->pcb_regs.sf_r8 = tf->tf_r8; 779 pcb->pcb_regs.sf_r9 = tf->tf_r9; 780 pcb->pcb_regs.sf_r10 = tf->tf_r10; 781 pcb->pcb_regs.sf_r11 = tf->tf_r11; 782 pcb->pcb_regs.sf_r12 = tf->tf_r12; 783 pcb->pcb_regs.sf_pc = tf->tf_pc; 784 pcb->pcb_regs.sf_lr = tf->tf_usr_lr; 785 pcb->pcb_regs.sf_sp = tf->tf_usr_sp; 786} 787 788/* 789 * Fake up a boot descriptor table 790 */ 791vm_offset_t 792fake_preload_metadata(struct arm_boot_params *abp __unused) 793{ 794#ifdef DDB 795 vm_offset_t zstart = 0, zend = 0; 796#endif 797 vm_offset_t lastaddr; 798 int i = 0; 799 static uint32_t fake_preload[35]; 800 801 fake_preload[i++] = MODINFO_NAME; 802 fake_preload[i++] = strlen("kernel") + 1; 803 strcpy((char*)&fake_preload[i++], "kernel"); 804 i += 1; 805 fake_preload[i++] = MODINFO_TYPE; 806 fake_preload[i++] = strlen("elf kernel") + 1; 807 strcpy((char*)&fake_preload[i++], "elf kernel"); 808 i += 2; 809 fake_preload[i++] = MODINFO_ADDR; 810 fake_preload[i++] = sizeof(vm_offset_t); 811 fake_preload[i++] = KERNVIRTADDR; 812 fake_preload[i++] = MODINFO_SIZE; 813 fake_preload[i++] = sizeof(uint32_t); 814 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; 815#ifdef DDB 816 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { 817 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; 818 fake_preload[i++] = sizeof(vm_offset_t); 819 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); 820 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; 821 fake_preload[i++] = sizeof(vm_offset_t); 822 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); 823 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); 824 zend = lastaddr; 825 zstart = *(uint32_t *)(KERNVIRTADDR + 4); 826 db_fetch_ksymtab(zstart, zend); 827 } else 828#endif 829 lastaddr = (vm_offset_t)&end; 830 fake_preload[i++] = 0; 831 fake_preload[i] = 0; 832 preload_metadata = (void *)fake_preload; 833 834 return (lastaddr); 835} 836 837void 838pcpu0_init(void) 839{ 840#if ARM_ARCH_6 || ARM_ARCH_7A || defined(CPU_MV_PJ4B) 841 set_curthread(&thread0); 842#endif 843 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 844 PCPU_SET(curthread, &thread0); 845#ifdef VFP 846 PCPU_SET(cpu, 0); 847#endif 848} 849 850#if defined(LINUX_BOOT_ABI) 851vm_offset_t 852linux_parse_boot_param(struct arm_boot_params *abp) 853{ 854 struct arm_lbabi_tag *walker; 855 uint32_t revision; 856 uint64_t serial; 857 858 /* 859 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 860 * is atags or dtb pointer. If all of these aren't satisfied, 861 * then punt. 862 */ 863 if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) 864 return 0; 865 866 board_id = abp->abp_r1; 867 walker = (struct arm_lbabi_tag *) 868 (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); 869 870 /* xxx - Need to also look for binary device tree */ 871 if (ATAG_TAG(walker) != ATAG_CORE) 872 return 0; 873 874 atag_list = walker; 875 while (ATAG_TAG(walker) != ATAG_NONE) { 876 switch (ATAG_TAG(walker)) { 877 case ATAG_CORE: 878 break; 879 case ATAG_MEM: 880 arm_physmem_hardware_region(walker->u.tag_mem.start, 881 walker->u.tag_mem.size); 882 break; 883 case ATAG_INITRD2: 884 break; 885 case ATAG_SERIAL: 886 serial = walker->u.tag_sn.low | 887 ((uint64_t)walker->u.tag_sn.high << 32); 888 board_set_serial(serial); 889 break; 890 case ATAG_REVISION: 891 revision = walker->u.tag_rev.rev; 892 board_set_revision(revision); 893 break; 894 case ATAG_CMDLINE: 895 /* XXX open question: Parse this for boothowto? */ 896 bcopy(walker->u.tag_cmd.command, linux_command_line, 897 ATAG_SIZE(walker)); 898 break; 899 default: 900 break; 901 } 902 walker = ATAG_NEXT(walker); 903 } 904 905 /* Save a copy for later */ 906 bcopy(atag_list, atags, 907 (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); 908 909 return fake_preload_metadata(abp); 910} 911#endif 912 913#if defined(FREEBSD_BOOT_LOADER) 914vm_offset_t 915freebsd_parse_boot_param(struct arm_boot_params *abp) 916{ 917 vm_offset_t lastaddr = 0; 918 void *mdp; 919 void *kmdp; 920#ifdef DDB 921 vm_offset_t ksym_start; 922 vm_offset_t ksym_end; 923#endif 924 925 /* 926 * Mask metadata pointer: it is supposed to be on page boundary. If 927 * the first argument (mdp) doesn't point to a valid address the 928 * bootloader must have passed us something else than the metadata 929 * ptr, so we give up. Also give up if we cannot find metadta section 930 * the loader creates that we get all this data out of. 931 */ 932 933 if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) 934 return 0; 935 preload_metadata = mdp; 936 kmdp = preload_search_by_type("elf kernel"); 937 if (kmdp == NULL) 938 return 0; 939 940 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 941 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 942 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); 943#ifdef DDB 944 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 945 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 946 db_fetch_ksymtab(ksym_start, ksym_end); 947#endif 948 preload_addr_relocate = KERNVIRTADDR - abp->abp_physaddr; 949 return lastaddr; 950} 951#endif 952 953vm_offset_t 954default_parse_boot_param(struct arm_boot_params *abp) 955{ 956 vm_offset_t lastaddr; 957 958#if defined(LINUX_BOOT_ABI) 959 if ((lastaddr = linux_parse_boot_param(abp)) != 0) 960 return lastaddr; 961#endif 962#if defined(FREEBSD_BOOT_LOADER) 963 if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) 964 return lastaddr; 965#endif 966 /* Fall back to hardcoded metadata. */ 967 lastaddr = fake_preload_metadata(abp); 968 969 return lastaddr; 970} 971 972/* 973 * Stub version of the boot parameter parsing routine. We are 974 * called early in initarm, before even VM has been initialized. 975 * This routine needs to preserve any data that the boot loader 976 * has passed in before the kernel starts to grow past the end 977 * of the BSS, traditionally the place boot-loaders put this data. 978 * 979 * Since this is called so early, things that depend on the vm system 980 * being setup (including access to some SoC's serial ports), about 981 * all that can be done in this routine is to copy the arguments. 982 * 983 * This is the default boot parameter parsing routine. Individual 984 * kernels/boards can override this weak function with one of their 985 * own. We just fake metadata... 986 */ 987__weak_reference(default_parse_boot_param, parse_boot_param); 988 989/* 990 * Initialize proc0 991 */ 992void 993init_proc0(vm_offset_t kstack) 994{ 995 proc_linkup0(&proc0, &thread0); 996 thread0.td_kstack = kstack; 997 thread0.td_pcb = (struct pcb *) 998 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 999 thread0.td_pcb->pcb_flags = 0; 1000 thread0.td_pcb->pcb_vfpcpu = -1; 1001 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ; 1002 thread0.td_frame = &proc0_tf; 1003 pcpup->pc_curpcb = thread0.td_pcb; 1004} 1005 1006void 1007set_stackptrs(int cpu) 1008{ 1009 1010 set_stackptr(PSR_IRQ32_MODE, 1011 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1012 set_stackptr(PSR_ABT32_MODE, 1013 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1014 set_stackptr(PSR_UND32_MODE, 1015 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1016} 1017 1018#ifdef FDT 1019static char * 1020kenv_next(char *cp) 1021{ 1022 1023 if (cp != NULL) { 1024 while (*cp != 0) 1025 cp++; 1026 cp++; 1027 if (*cp == 0) 1028 cp = NULL; 1029 } 1030 return (cp); 1031} 1032 1033static void 1034print_kenv(void) 1035{ 1036 int len; 1037 char *cp; 1038 1039 debugf("loader passed (static) kenv:\n"); 1040 if (kern_envp == NULL) { 1041 debugf(" no env, null ptr\n"); 1042 return; 1043 } 1044 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 1045 1046 len = 0; 1047 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 1048 debugf(" %x %s\n", (uint32_t)cp, cp); 1049} 1050 1051void * 1052initarm(struct arm_boot_params *abp) 1053{ 1054 struct mem_region mem_regions[FDT_MEM_REGIONS]; 1055 struct pv_addr kernel_l1pt; 1056 struct pv_addr dpcpu; 1057 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 1058 uint32_t memsize, l2size; 1059 char *env; 1060 void *kmdp; 1061 u_int l1pagetable; 1062 int i, j, err_devmap, mem_regions_sz; 1063 1064 lastaddr = parse_boot_param(abp); 1065 arm_physmem_kernaddr = abp->abp_physaddr; 1066 1067 memsize = 0; 1068 1069 cpuinfo_init(); 1070 set_cpufuncs(); 1071 1072 /* 1073 * Find the dtb passed in by the boot loader. 1074 */ 1075 kmdp = preload_search_by_type("elf kernel"); 1076 if (kmdp != NULL) 1077 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 1078 else 1079 dtbp = (vm_offset_t)NULL; 1080 1081#if defined(FDT_DTB_STATIC) 1082 /* 1083 * In case the device tree blob was not retrieved (from metadata) try 1084 * to use the statically embedded one. 1085 */ 1086 if (dtbp == (vm_offset_t)NULL) 1087 dtbp = (vm_offset_t)&fdt_static_dtb; 1088#endif 1089 1090 if (OF_install(OFW_FDT, 0) == FALSE) 1091 panic("Cannot install FDT"); 1092 1093 if (OF_init((void *)dtbp) != 0) 1094 panic("OF_init failed with the found device tree"); 1095 1096 /* Grab physical memory regions information from device tree. */ 1097 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) 1098 panic("Cannot get physical memory regions"); 1099 arm_physmem_hardware_regions(mem_regions, mem_regions_sz); 1100 1101 /* Grab reserved memory regions information from device tree. */ 1102 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) 1103 arm_physmem_exclude_regions(mem_regions, mem_regions_sz, 1104 EXFLAG_NODUMP | EXFLAG_NOALLOC); 1105 1106 /* Platform-specific initialisation */ 1107 platform_probe_and_attach(); 1108 1109 pcpu0_init(); 1110 1111 /* Do basic tuning, hz etc */ 1112 init_param1(); 1113 1114 /* Calculate number of L2 tables needed for mapping vm_page_array */ 1115 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 1116 l2size = (l2size >> L1_S_SHIFT) + 1; 1117 1118 /* 1119 * Add one table for end of kernel map, one for stacks, msgbuf and 1120 * L1 and L2 tables map and one for vectors map. 1121 */ 1122 l2size += 3; 1123 1124 /* Make it divisible by 4 */ 1125 l2size = (l2size + 3) & ~3; 1126 1127 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 1128 1129 /* Define a macro to simplify memory allocation */ 1130#define valloc_pages(var, np) \ 1131 alloc_pages((var).pv_va, (np)); \ 1132 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); 1133 1134#define alloc_pages(var, np) \ 1135 (var) = freemempos; \ 1136 freemempos += (np * PAGE_SIZE); \ 1137 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 1138 1139 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 1140 freemempos += PAGE_SIZE; 1141 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 1142 1143 for (i = 0, j = 0; i < l2size; ++i) { 1144 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 1145 valloc_pages(kernel_pt_table[i], 1146 L2_TABLE_SIZE / PAGE_SIZE); 1147 j = i; 1148 } else { 1149 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 1150 L2_TABLE_SIZE_REAL * (i - j); 1151 kernel_pt_table[i].pv_pa = 1152 kernel_pt_table[i].pv_va - KERNVIRTADDR + 1153 abp->abp_physaddr; 1154 1155 } 1156 } 1157 /* 1158 * Allocate a page for the system page mapped to 0x00000000 1159 * or 0xffff0000. This page will just contain the system vectors 1160 * and can be shared by all processes. 1161 */ 1162 valloc_pages(systempage, 1); 1163 1164 /* Allocate dynamic per-cpu area. */ 1165 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 1166 dpcpu_init((void *)dpcpu.pv_va, 0); 1167 1168 /* Allocate stacks for all modes */ 1169 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); 1170 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); 1171 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); 1172 valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU); 1173 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 1174 1175 /* 1176 * Now we start construction of the L1 page table 1177 * We start by mapping the L2 page tables into the L1. 1178 * This means that we can replace L1 mappings later on if necessary 1179 */ 1180 l1pagetable = kernel_l1pt.pv_va; 1181 1182 /* 1183 * Try to map as much as possible of kernel text and data using 1184 * 1MB section mapping and for the rest of initial kernel address 1185 * space use L2 coarse tables. 1186 * 1187 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 1188 * and kernel structures 1189 */ 1190 l2_start = lastaddr & ~(L1_S_OFFSET); 1191 for (i = 0 ; i < l2size - 1; i++) 1192 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 1193 &kernel_pt_table[i]); 1194 1195 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 1196 1197 /* Map kernel code and data */ 1198 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, 1199 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 1200 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1201 1202 /* Map L1 directory and allocated L2 page tables */ 1203 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 1204 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1205 1206 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 1207 kernel_pt_table[0].pv_pa, 1208 L2_TABLE_SIZE_REAL * l2size, 1209 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1210 1211 /* Map allocated DPCPU, stacks and msgbuf */ 1212 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 1213 freemempos - dpcpu.pv_va, 1214 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1215 1216 /* Link and map the vector page */ 1217 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 1218 &kernel_pt_table[l2size - 1]); 1219 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 1220 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 1221 1222 /* Establish static device mappings. */ 1223 err_devmap = platform_devmap_init(); 1224 arm_devmap_bootstrap(l1pagetable, NULL); 1225 vm_max_kernel_address = platform_lastaddr(); 1226 1227 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); 1228 pmap_pa = kernel_l1pt.pv_pa; 1229 setttb(kernel_l1pt.pv_pa); 1230 cpu_tlb_flushID(); 1231 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 1232 1233 /* 1234 * Now that proper page tables are installed, call cpu_setup() to enable 1235 * instruction and data caches and other chip-specific features. 1236 */ 1237 cpu_setup(""); 1238 1239 /* 1240 * Only after the SOC registers block is mapped we can perform device 1241 * tree fixups, as they may attempt to read parameters from hardware. 1242 */ 1243 OF_interpret("perform-fixup", 0); 1244 1245 platform_gpio_init(); 1246 1247 cninit(); 1248 1249 debugf("initarm: console initialized\n"); 1250 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 1251 debugf(" boothowto = 0x%08x\n", boothowto); 1252 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 1253 print_kenv(); 1254 1255 env = kern_getenv("kernelname"); 1256 if (env != NULL) { 1257 strlcpy(kernelname, env, sizeof(kernelname)); 1258 freeenv(env); 1259 } 1260 1261 if (err_devmap != 0) 1262 printf("WARNING: could not fully configure devmap, error=%d\n", 1263 err_devmap); 1264 1265 platform_late_init(); 1266 1267 /* 1268 * Pages were allocated during the secondary bootstrap for the 1269 * stacks for different CPU modes. 1270 * We must now set the r13 registers in the different CPU modes to 1271 * point to these stacks. 1272 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 1273 * of the stack memory. 1274 */ 1275 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 1276 1277 set_stackptrs(0); 1278 1279 /* 1280 * We must now clean the cache again.... 1281 * Cleaning may be done by reading new data to displace any 1282 * dirty data in the cache. This will have happened in setttb() 1283 * but since we are boot strapping the addresses used for the read 1284 * may have just been remapped and thus the cache could be out 1285 * of sync. A re-clean after the switch will cure this. 1286 * After booting there are no gross relocations of the kernel thus 1287 * this problem will not occur after initarm(). 1288 */ 1289 cpu_idcache_wbinv_all(); 1290 1291 undefined_init(); 1292 1293 init_proc0(kernelstack.pv_va); 1294 1295 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 1296 pmap_bootstrap(freemempos, &kernel_l1pt); 1297 msgbufp = (void *)msgbufpv.pv_va; 1298 msgbufinit(msgbufp, msgbufsize); 1299 mutex_init(); 1300 1301 /* 1302 * Exclude the kernel (and all the things we allocated which immediately 1303 * follow the kernel) from the VM allocation pool but not from crash 1304 * dumps. virtual_avail is a global variable which tracks the kva we've 1305 * "allocated" while setting up pmaps. 1306 * 1307 * Prepare the list of physical memory available to the vm subsystem. 1308 */ 1309 arm_physmem_exclude_region(abp->abp_physaddr, 1310 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); 1311 arm_physmem_init_kernel_globals(); 1312 1313 init_param2(physmem); 1314 kdb_init(); 1315 1316 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 1317 sizeof(struct pcb))); 1318} 1319#endif 1320