machdep.c revision 291937
1184610Salfred/* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ 2184610Salfred 3184610Salfred/*- 4184610Salfred * Copyright (c) 2004 Olivier Houchard 5184610Salfred * Copyright (c) 1994-1998 Mark Brinicombe. 6184610Salfred * Copyright (c) 1994 Brini. 7184610Salfred * All rights reserved. 8184610Salfred * 9184610Salfred * This code is derived from software written for Brini by Mark Brinicombe 10184610Salfred * 11184610Salfred * Redistribution and use in source and binary forms, with or without 12184610Salfred * modification, are permitted provided that the following conditions 13184610Salfred * are met: 14184610Salfred * 1. Redistributions of source code must retain the above copyright 15184610Salfred * notice, this list of conditions and the following disclaimer. 16184610Salfred * 2. Redistributions in binary form must reproduce the above copyright 17184610Salfred * notice, this list of conditions and the following disclaimer in the 18184610Salfred * documentation and/or other materials provided with the distribution. 19184610Salfred * 3. All advertising materials mentioning features or use of this software 20184610Salfred * must display the following acknowledgement: 21184610Salfred * This product includes software developed by Mark Brinicombe 22184610Salfred * for the NetBSD Project. 23184610Salfred * 4. The name of the company nor the name of the author may be used to 24184610Salfred * endorse or promote products derived from this software without specific 25184610Salfred * prior written permission. 26184610Salfred * 27184610Salfred * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 28184610Salfred * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 29184610Salfred * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30184610Salfred * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 31184610Salfred * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 32184610Salfred * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 33203815Swkoszek * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34203815Swkoszek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35184610Salfred * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36184610Salfred * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37184610Salfred * SUCH DAMAGE. 38184610Salfred * 39184610Salfred * Machine dependant functions for kernel setup 40189585Sthompsa * 41184610Salfred * Created : 17/09/94 42184610Salfred * Updated : 18/04/01 updated for new wscons 43184610Salfred */ 44184610Salfred 45184610Salfred#include "opt_compat.h" 46184610Salfred#include "opt_ddb.h" 47184610Salfred#include "opt_kstack_pages.h" 48184610Salfred#include "opt_platform.h" 49184610Salfred#include "opt_sched.h" 50184610Salfred#include "opt_timer.h" 51184610Salfred 52184610Salfred#include <sys/cdefs.h> 53184610Salfred__FBSDID("$FreeBSD: head/sys/arm/arm/machdep.c 291937 2015-12-07 12:20:26Z kib $"); 54184610Salfred 55184610Salfred#include <sys/param.h> 56184610Salfred#include <sys/proc.h> 57184610Salfred#include <sys/systm.h> 58184610Salfred#include <sys/bio.h> 59184610Salfred#include <sys/buf.h> 60184610Salfred#include <sys/bus.h> 61184610Salfred#include <sys/cons.h> 62184610Salfred#include <sys/cpu.h> 63184610Salfred#include <sys/efi.h> 64184610Salfred#include <sys/exec.h> 65184610Salfred#include <sys/imgact.h> 66184610Salfred#include <sys/kdb.h> 67184610Salfred#include <sys/kernel.h> 68184610Salfred#include <sys/ktr.h> 69184610Salfred#include <sys/linker.h> 70184610Salfred#include <sys/lock.h> 71184610Salfred#include <sys/malloc.h> 72184610Salfred#include <sys/msgbuf.h> 73184610Salfred#include <sys/mutex.h> 74184610Salfred#include <sys/pcpu.h> 75184610Salfred#include <sys/ptrace.h> 76184610Salfred#include <sys/reboot.h> 77184610Salfred#include <sys/rwlock.h> 78184610Salfred#include <sys/sched.h> 79184610Salfred#include <sys/signalvar.h> 80184610Salfred#include <sys/syscallsubr.h> 81184610Salfred#include <sys/sysctl.h> 82184610Salfred#include <sys/sysent.h> 83184610Salfred#include <sys/sysproto.h> 84184610Salfred#include <sys/uio.h> 85184610Salfred#include <sys/vdso.h> 86184610Salfred 87184610Salfred#include <vm/vm.h> 88184610Salfred#include <vm/pmap.h> 89184610Salfred#include <vm/vm_map.h> 90184610Salfred#include <vm/vm_object.h> 91184610Salfred#include <vm/vm_page.h> 92184610Salfred#include <vm/vm_pager.h> 93184610Salfred 94184610Salfred#include <machine/acle-compat.h> 95184610Salfred#include <machine/armreg.h> 96184610Salfred#include <machine/atags.h> 97184610Salfred#include <machine/cpu.h> 98184610Salfred#include <machine/cpuinfo.h> 99184610Salfred#include <machine/db_machdep.h> 100184610Salfred#include <machine/devmap.h> 101184610Salfred#include <machine/frame.h> 102184610Salfred#include <machine/intr.h> 103184610Salfred#include <machine/machdep.h> 104184610Salfred#include <machine/md_var.h> 105184610Salfred#include <machine/metadata.h> 106184610Salfred#include <machine/pcb.h> 107184610Salfred#include <machine/physmem.h> 108184610Salfred#include <machine/platform.h> 109184610Salfred#include <machine/reg.h> 110184610Salfred#include <machine/trap.h> 111184610Salfred#include <machine/undefined.h> 112184610Salfred#include <machine/vfp.h> 113184610Salfred#include <machine/vmparam.h> 114184610Salfred#include <machine/sysarch.h> 115185087Salfred 116184610Salfred#ifdef FDT 117184610Salfred#include <dev/fdt/fdt_common.h> 118184610Salfred#include <dev/ofw/openfirm.h> 119184610Salfred#endif 120184610Salfred 121184610Salfred#ifdef DDB 122184610Salfred#include <ddb/ddb.h> 123184610Salfred 124184610Salfred#if __ARM_ARCH >= 6 125264637Shselasky#include <machine/cpu-v6.h> 126264637Shselasky 127184610SalfredDB_SHOW_COMMAND(cp15, db_show_cp15) 128184610Salfred{ 129184610Salfred u_int reg; 130184610Salfred 131184610Salfred reg = cp15_midr_get(); 132184610Salfred db_printf("Cpu ID: 0x%08x\n", reg); 133184610Salfred reg = cp15_ctr_get(); 134184610Salfred db_printf("Current Cache Lvl ID: 0x%08x\n",reg); 135184610Salfred 136184610Salfred reg = cp15_sctlr_get(); 137184610Salfred db_printf("Ctrl: 0x%08x\n",reg); 138184610Salfred reg = cp15_actlr_get(); 139184610Salfred db_printf("Aux Ctrl: 0x%08x\n",reg); 140184610Salfred 141184610Salfred reg = cp15_id_pfr0_get(); 142184610Salfred db_printf("Processor Feat 0: 0x%08x\n", reg); 143184610Salfred reg = cp15_id_pfr1_get(); 144184610Salfred db_printf("Processor Feat 1: 0x%08x\n", reg); 145184610Salfred reg = cp15_id_dfr0_get(); 146184610Salfred db_printf("Debug Feat 0: 0x%08x\n", reg); 147184610Salfred reg = cp15_id_afr0_get(); 148184610Salfred db_printf("Auxiliary Feat 0: 0x%08x\n", reg); 149184610Salfred reg = cp15_id_mmfr0_get(); 150184610Salfred db_printf("Memory Model Feat 0: 0x%08x\n", reg); 151184610Salfred reg = cp15_id_mmfr1_get(); 152184610Salfred db_printf("Memory Model Feat 1: 0x%08x\n", reg); 153184610Salfred reg = cp15_id_mmfr2_get(); 154184610Salfred db_printf("Memory Model Feat 2: 0x%08x\n", reg); 155184610Salfred reg = cp15_id_mmfr3_get(); 156184610Salfred db_printf("Memory Model Feat 3: 0x%08x\n", reg); 157184610Salfred reg = cp15_ttbr_get(); 158184610Salfred db_printf("TTB0: 0x%08x\n", reg); 159184610Salfred} 160184610Salfred 161184610SalfredDB_SHOW_COMMAND(vtop, db_show_vtop) 162184610Salfred{ 163185290Salfred u_int reg; 164185290Salfred 165185290Salfred if (have_addr) { 166185290Salfred cp15_ats1cpr_set(addr); 167185290Salfred reg = cp15_par_get(); 168185290Salfred db_printf("Physical address reg: 0x%08x\n",reg); 169184610Salfred } else 170184610Salfred db_printf("show vtop <virt_addr>\n"); 171184610Salfred} 172184610Salfred#endif /* __ARM_ARCH >= 6 */ 173185290Salfred#endif /* DDB */ 174184610Salfred 175185290Salfred#ifdef DEBUG 176184610Salfred#define debugf(fmt, args...) printf(fmt, ##args) 177184610Salfred#else 178185290Salfred#define debugf(fmt, args...) 179184610Salfred#endif 180184610Salfred 181184610Salfredstruct pcpu __pcpu[MAXCPU]; 182184610Salfredstruct pcpu *pcpup = &__pcpu[0]; 183185290Salfred 184185290Salfredstatic struct trapframe proc0_tf; 185185290Salfreduint32_t cpu_reset_address = 0; 186185290Salfredint cold = 1; 187185290Salfredvm_offset_t vector_page; 188185290Salfred 189185290Salfredint (*_arm_memcpy)(void *, void *, int, int) = NULL; 190185290Salfredint (*_arm_bzero)(void *, int, int) = NULL; 191185290Salfredint _min_memcpy_size = 0; 192185290Salfredint _min_bzero_size = 0; 193185290Salfred 194185290Salfredextern int *end; 195185290Salfred 196185290Salfred#ifdef FDT 197185290Salfredvm_paddr_t pmap_pa; 198185290Salfred 199184610Salfred#ifdef ARM_NEW_PMAP 200184610Salfredvm_offset_t systempage; 201184610Salfredvm_offset_t irqstack; 202184610Salfredvm_offset_t undstack; 203185087Salfredvm_offset_t abtstack; 204184610Salfred#else 205184610Salfred/* 206184610Salfred * This is the number of L2 page tables required for covering max 207184610Salfred * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 208224085Shselasky * stacks etc.), uprounded to be divisible by 4. 209224085Shselasky */ 210224085Shselasky#define KERNEL_PT_MAX 78 211224085Shselasky 212224085Shselaskystatic struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 213224085Shselasky 214184610Salfredstruct pv_addr systempage; 215185087Salfredstatic struct pv_addr msgbufpv; 216184610Salfredstruct pv_addr irqstack; 217184610Salfredstruct pv_addr undstack; 218184610Salfredstruct pv_addr abtstack; 219184610Salfredstatic struct pv_addr kernelstack; 220184610Salfred#endif 221184610Salfred#endif 222184610Salfred 223184610Salfred#if defined(LINUX_BOOT_ABI) 224185087Salfred#define LBABI_MAX_BANKS 10 225184610Salfred 226184610Salfreduint32_t board_id; 227184610Salfredstruct arm_lbabi_tag *atag_list; 228184610Salfredchar linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; 229224085Shselaskychar atags[LBABI_MAX_COMMAND_LINE * 2]; 230224085Shselaskyuint32_t memstart[LBABI_MAX_BANKS]; 231224085Shselaskyuint32_t memsize[LBABI_MAX_BANKS]; 232224085Shselaskyuint32_t membanks; 233224085Shselasky#endif 234224085Shselasky 235184610Salfredstatic uint32_t board_revision; 236185087Salfred/* hex representation of uint64_t */ 237184610Salfredstatic char board_serial[32]; 238184610Salfred 239184610SalfredSYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); 240184610SalfredSYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, 241184610Salfred &board_revision, 0, "Board revision"); 242184610SalfredSYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, 243184610Salfred board_serial, 0, "Board serial"); 244184610Salfred 245184610Salfredint vfp_exists; 246185087SalfredSYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 247184610Salfred &vfp_exists, 0, "Floating point support enabled"); 248184610Salfred 249184610Salfredvoid 250224085Shselaskyboard_set_serial(uint64_t serial) 251224085Shselasky{ 252224085Shselasky 253224085Shselasky snprintf(board_serial, sizeof(board_serial)-1, 254224085Shselasky "%016jx", serial); 255224085Shselasky} 256184610Salfred 257185087Salfredvoid 258184610Salfredboard_set_revision(uint32_t revision) 259184610Salfred{ 260184610Salfred 261184610Salfred board_revision = revision; 262185087Salfred} 263184610Salfred 264184610Salfredvoid 265184610Salfredsendsig(catcher, ksi, mask) 266184610Salfred sig_t catcher; 267224085Shselasky ksiginfo_t *ksi; 268224085Shselasky sigset_t *mask; 269224085Shselasky{ 270224085Shselasky struct thread *td; 271224085Shselasky struct proc *p; 272224085Shselasky struct trapframe *tf; 273184610Salfred struct sigframe *fp, frame; 274185087Salfred struct sigacts *psp; 275184610Salfred struct sysentvec *sysent; 276184610Salfred int onstack; 277184610Salfred int sig; 278184610Salfred int code; 279184610Salfred 280184610Salfred td = curthread; 281184610Salfred p = td->td_proc; 282184610Salfred PROC_LOCK_ASSERT(p, MA_OWNED); 283184610Salfred sig = ksi->ksi_signo; 284184610Salfred code = ksi->ksi_code; 285184610Salfred psp = p->p_sigacts; 286184610Salfred mtx_assert(&psp->ps_mtx, MA_OWNED); 287184610Salfred tf = td->td_frame; 288184610Salfred onstack = sigonstack(tf->tf_usr_sp); 289184610Salfred 290184610Salfred CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, 291184610Salfred catcher, sig); 292184610Salfred 293184610Salfred /* Allocate and validate space for the signal handler context. */ 294184610Salfred if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && 295184610Salfred SIGISMEMBER(psp->ps_sigonstack, sig)) { 296184610Salfred fp = (struct sigframe *)(td->td_sigstk.ss_sp + 297184610Salfred td->td_sigstk.ss_size); 298184610Salfred#if defined(COMPAT_43) 299184610Salfred td->td_sigstk.ss_flags |= SS_ONSTACK; 300184610Salfred#endif 301184610Salfred } else 302184610Salfred fp = (struct sigframe *)td->td_frame->tf_usr_sp; 303184610Salfred 304184610Salfred /* make room on the stack */ 305184610Salfred fp--; 306184610Salfred 307184610Salfred /* make the stack aligned */ 308184610Salfred fp = (struct sigframe *)STACKALIGN(fp); 309184610Salfred /* Populate the siginfo frame. */ 310184610Salfred get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); 311184610Salfred frame.sf_si = ksi->ksi_info; 312184610Salfred frame.sf_uc.uc_sigmask = *mask; 313184610Salfred frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) 314184610Salfred ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; 315184610Salfred frame.sf_uc.uc_stack = td->td_sigstk; 316184610Salfred mtx_unlock(&psp->ps_mtx); 317184610Salfred PROC_UNLOCK(td->td_proc); 318184610Salfred 319184610Salfred /* Copy the sigframe out to the user's stack. */ 320184610Salfred if (copyout(&frame, fp, sizeof(*fp)) != 0) { 321184610Salfred /* Process has trashed its stack. Kill it. */ 322184610Salfred CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); 323184610Salfred PROC_LOCK(p); 324184610Salfred sigexit(td, SIGILL); 325184610Salfred } 326184610Salfred 327184610Salfred /* 328184610Salfred * Build context to run handler in. We invoke the handler 329184610Salfred * directly, only returning via the trampoline. Note the 330184610Salfred * trampoline version numbers are coordinated with machine- 331184610Salfred * dependent code in libc. 332184610Salfred */ 333184610Salfred 334184610Salfred tf->tf_r0 = sig; 335184610Salfred tf->tf_r1 = (register_t)&fp->sf_si; 336184610Salfred tf->tf_r2 = (register_t)&fp->sf_uc; 337184610Salfred 338184610Salfred /* the trampoline uses r5 as the uc address */ 339184610Salfred tf->tf_r5 = (register_t)&fp->sf_uc; 340184610Salfred tf->tf_pc = (register_t)catcher; 341184610Salfred tf->tf_usr_sp = (register_t)fp; 342184610Salfred sysent = p->p_sysent; 343184610Salfred if (sysent->sv_sigcode_base != 0) 344184610Salfred tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base; 345184610Salfred else 346184610Salfred tf->tf_usr_lr = (register_t)(sysent->sv_psstrings - 347184610Salfred *(sysent->sv_szsigcode)); 348184610Salfred /* Set the mode to enter in the signal handler */ 349184610Salfred#if __ARM_ARCH >= 7 350184610Salfred if ((register_t)catcher & 1) 351184610Salfred tf->tf_spsr |= PSR_T; 352184610Salfred else 353184610Salfred tf->tf_spsr &= ~PSR_T; 354184610Salfred#endif 355184610Salfred 356184610Salfred CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, 357184610Salfred tf->tf_usr_sp); 358184610Salfred 359184610Salfred PROC_LOCK(p); 360184610Salfred mtx_lock(&psp->ps_mtx); 361184610Salfred} 362184610Salfred 363184610Salfredstruct kva_md_info kmi; 364184610Salfred 365184610Salfred/* 366184610Salfred * arm32_vector_init: 367184610Salfred * 368184610Salfred * Initialize the vector page, and select whether or not to 369184610Salfred * relocate the vectors. 370184610Salfred * 371184610Salfred * NOTE: We expect the vector page to be mapped at its expected 372184610Salfred * destination. 373184610Salfred */ 374184610Salfred 375184610Salfredextern unsigned int page0[], page0_data[]; 376184610Salfredvoid 377184610Salfredarm_vector_init(vm_offset_t va, int which) 378184610Salfred{ 379184610Salfred unsigned int *vectors = (int *) va; 380184610Salfred unsigned int *vectors_data = vectors + (page0_data - page0); 381184610Salfred int vec; 382184610Salfred 383184610Salfred /* 384184610Salfred * Loop through the vectors we're taking over, and copy the 385184610Salfred * vector's insn and data word. 386184610Salfred */ 387184610Salfred for (vec = 0; vec < ARM_NVEC; vec++) { 388184610Salfred if ((which & (1 << vec)) == 0) { 389184610Salfred /* Don't want to take over this vector. */ 390184610Salfred continue; 391184610Salfred } 392184610Salfred vectors[vec] = page0[vec]; 393184610Salfred vectors_data[vec] = page0_data[vec]; 394184610Salfred } 395184610Salfred 396184610Salfred /* Now sync the vectors. */ 397184610Salfred cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 398184610Salfred 399184610Salfred vector_page = va; 400184610Salfred 401184610Salfred if (va == ARM_VECTORS_HIGH) { 402184610Salfred /* 403184610Salfred * Assume the MD caller knows what it's doing here, and 404184610Salfred * really does want the vector page relocated. 405184610Salfred * 406184610Salfred * Note: This has to be done here (and not just in 407184610Salfred * cpu_setup()) because the vector page needs to be 408184610Salfred * accessible *before* cpu_startup() is called. 409184610Salfred * Think ddb(9) ... 410184610Salfred * 411184610Salfred * NOTE: If the CPU control register is not readable, 412184610Salfred * this will totally fail! We'll just assume that 413184610Salfred * any system that has high vector support has a 414184610Salfred * readable CPU control register, for now. If we 415184610Salfred * ever encounter one that does not, we'll have to 416184610Salfred * rethink this. 417184610Salfred */ 418184610Salfred cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 419184610Salfred } 420184610Salfred} 421184610Salfred 422184610Salfredstatic void 423184610Salfredcpu_startup(void *dummy) 424184610Salfred{ 425184610Salfred struct pcb *pcb = thread0.td_pcb; 426184610Salfred const unsigned int mbyte = 1024 * 1024; 427184610Salfred#ifdef ARM_TP_ADDRESS 428184610Salfred#ifndef ARM_CACHE_LOCK_ENABLE 429184610Salfred vm_page_t m; 430184610Salfred#endif 431184610Salfred#endif 432184610Salfred 433184610Salfred identify_arm_cpu(); 434184610Salfred 435184610Salfred vm_ksubmap_init(&kmi); 436184610Salfred 437184610Salfred /* 438184610Salfred * Display the RAM layout. 439184610Salfred */ 440184610Salfred printf("real memory = %ju (%ju MB)\n", 441184610Salfred (uintmax_t)arm32_ptob(realmem), 442184610Salfred (uintmax_t)arm32_ptob(realmem) / mbyte); 443184610Salfred printf("avail memory = %ju (%ju MB)\n", 444184610Salfred (uintmax_t)arm32_ptob(vm_cnt.v_free_count), 445184610Salfred (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); 446184610Salfred if (bootverbose) { 447184610Salfred arm_physmem_print_tables(); 448184610Salfred arm_devmap_print_table(); 449184610Salfred } 450184610Salfred 451184610Salfred bufinit(); 452184610Salfred vm_pager_bufferinit(); 453184610Salfred pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + 454184610Salfred USPACE_SVC_STACK_TOP; 455184610Salfred pmap_set_pcb_pagedir(pmap_kernel(), pcb); 456184610Salfred#ifndef ARM_NEW_PMAP 457184610Salfred vector_page_setprot(VM_PROT_READ); 458184610Salfred pmap_postinit(); 459184610Salfred#endif 460184610Salfred#ifdef ARM_TP_ADDRESS 461184610Salfred#ifdef ARM_CACHE_LOCK_ENABLE 462184610Salfred pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); 463184610Salfred arm_lock_cache_line(ARM_TP_ADDRESS); 464184610Salfred#else 465184610Salfred m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); 466184610Salfred pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); 467184610Salfred#endif 468184610Salfred *(uint32_t *)ARM_RAS_START = 0; 469184610Salfred *(uint32_t *)ARM_RAS_END = 0xffffffff; 470184610Salfred#endif 471184610Salfred} 472184610Salfred 473184610SalfredSYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 474184610Salfred 475184610Salfred/* 476184610Salfred * Flush the D-cache for non-DMA I/O so that the I-cache can 477184610Salfred * be made coherent later. 478184610Salfred */ 479184610Salfredvoid 480184610Salfredcpu_flush_dcache(void *ptr, size_t len) 481184610Salfred{ 482184610Salfred 483184610Salfred cpu_dcache_wb_range((uintptr_t)ptr, len); 484184610Salfred#ifdef ARM_L2_PIPT 485184610Salfred cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len); 486216431Skevlo#else 487216431Skevlo cpu_l2cache_wb_range((uintptr_t)ptr, len); 488216431Skevlo#endif 489216431Skevlo} 490216431Skevlo 491184610Salfred/* Get current clock frequency for the given cpu id. */ 492184610Salfredint 493184610Salfredcpu_est_clockrate(int cpu_id, uint64_t *rate) 494184610Salfred{ 495184610Salfred 496184610Salfred return (ENXIO); 497184610Salfred} 498184610Salfred 499184610Salfredvoid 500184610Salfredcpu_idle(int busy) 501184610Salfred{ 502184610Salfred 503184610Salfred CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); 504184610Salfred spinlock_enter(); 505184610Salfred#ifndef NO_EVENTTIMERS 506184610Salfred if (!busy) 507184610Salfred cpu_idleclock(); 508184610Salfred#endif 509184610Salfred if (!sched_runnable()) 510184610Salfred cpu_sleep(0); 511184610Salfred#ifndef NO_EVENTTIMERS 512184610Salfred if (!busy) 513184610Salfred cpu_activeclock(); 514184610Salfred#endif 515184610Salfred spinlock_exit(); 516184610Salfred CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); 517184610Salfred} 518184610Salfred 519184610Salfredint 520184610Salfredcpu_idle_wakeup(int cpu) 521184610Salfred{ 522184610Salfred 523184610Salfred return (0); 524184610Salfred} 525184610Salfred 526184610Salfred/* 527184610Salfred * Most ARM platforms don't need to do anything special to init their clocks 528184610Salfred * (they get intialized during normal device attachment), and by not defining a 529184610Salfred * cpu_initclocks() function they get this generic one. Any platform that needs 530184610Salfred * to do something special can just provide their own implementation, which will 531184610Salfred * override this one due to the weak linkage. 532184610Salfred */ 533184610Salfredvoid 534184610Salfredarm_generic_initclocks(void) 535184610Salfred{ 536184610Salfred 537184610Salfred#ifndef NO_EVENTTIMERS 538184610Salfred#ifdef SMP 539184610Salfred if (PCPU_GET(cpuid) == 0) 540184610Salfred cpu_initclocks_bsp(); 541184610Salfred else 542184610Salfred cpu_initclocks_ap(); 543184610Salfred#else 544184610Salfred cpu_initclocks_bsp(); 545184610Salfred#endif 546184610Salfred#endif 547184610Salfred} 548184610Salfred__weak_reference(arm_generic_initclocks, cpu_initclocks); 549184610Salfred 550184610Salfredint 551184610Salfredfill_regs(struct thread *td, struct reg *regs) 552184610Salfred{ 553184610Salfred struct trapframe *tf = td->td_frame; 554184610Salfred bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); 555184610Salfred regs->r_sp = tf->tf_usr_sp; 556184610Salfred regs->r_lr = tf->tf_usr_lr; 557184610Salfred regs->r_pc = tf->tf_pc; 558184610Salfred regs->r_cpsr = tf->tf_spsr; 559184610Salfred return (0); 560184610Salfred} 561184610Salfredint 562184610Salfredfill_fpregs(struct thread *td, struct fpreg *regs) 563184610Salfred{ 564184610Salfred bzero(regs, sizeof(*regs)); 565184610Salfred return (0); 566184610Salfred} 567184610Salfred 568184610Salfredint 569184610Salfredset_regs(struct thread *td, struct reg *regs) 570184610Salfred{ 571184610Salfred struct trapframe *tf = td->td_frame; 572184610Salfred 573184610Salfred bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); 574184610Salfred tf->tf_usr_sp = regs->r_sp; 575184610Salfred tf->tf_usr_lr = regs->r_lr; 576184610Salfred tf->tf_pc = regs->r_pc; 577184610Salfred tf->tf_spsr &= ~PSR_FLAGS; 578184610Salfred tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; 579184610Salfred return (0); 580184610Salfred} 581184610Salfred 582184610Salfredint 583184610Salfredset_fpregs(struct thread *td, struct fpreg *regs) 584184610Salfred{ 585184610Salfred return (0); 586184610Salfred} 587184610Salfred 588184610Salfredint 589184610Salfredfill_dbregs(struct thread *td, struct dbreg *regs) 590184610Salfred{ 591184610Salfred return (0); 592184610Salfred} 593184610Salfredint 594184610Salfredset_dbregs(struct thread *td, struct dbreg *regs) 595184610Salfred{ 596184610Salfred return (0); 597184610Salfred} 598184610Salfred 599184610Salfred 600184610Salfredstatic int 601184610Salfredptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v) 602184610Salfred{ 603184610Salfred struct iovec iov; 604184610Salfred struct uio uio; 605184610Salfred 606184610Salfred PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 607184610Salfred iov.iov_base = (caddr_t) v; 608184610Salfred iov.iov_len = sizeof(u_int32_t); 609184610Salfred uio.uio_iov = &iov; 610184610Salfred uio.uio_iovcnt = 1; 611184610Salfred uio.uio_offset = (off_t)addr; 612184610Salfred uio.uio_resid = sizeof(u_int32_t); 613184610Salfred uio.uio_segflg = UIO_SYSSPACE; 614184610Salfred uio.uio_rw = UIO_READ; 615184610Salfred uio.uio_td = td; 616184610Salfred return proc_rwmem(td->td_proc, &uio); 617184610Salfred} 618184610Salfred 619184610Salfredstatic int 620184610Salfredptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v) 621184610Salfred{ 622184610Salfred struct iovec iov; 623184610Salfred struct uio uio; 624184610Salfred 625184610Salfred PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED); 626184610Salfred iov.iov_base = (caddr_t) &v; 627184610Salfred iov.iov_len = sizeof(u_int32_t); 628184610Salfred uio.uio_iov = &iov; 629184610Salfred uio.uio_iovcnt = 1; 630184610Salfred uio.uio_offset = (off_t)addr; 631184610Salfred uio.uio_resid = sizeof(u_int32_t); 632188678Sthompsa uio.uio_segflg = UIO_SYSSPACE; 633188678Sthompsa uio.uio_rw = UIO_WRITE; 634188678Sthompsa uio.uio_td = td; 635188678Sthompsa return proc_rwmem(td->td_proc, &uio); 636188678Sthompsa} 637188678Sthompsa 638188678Sthompsastatic u_int 639188678Sthompsaptrace_get_usr_reg(void *cookie, int reg) 640188678Sthompsa{ 641188678Sthompsa int ret; 642184610Salfred struct thread *td = cookie; 643184610Salfred 644184610Salfred KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)), 645184610Salfred ("reg is outside range")); 646184610Salfred 647184610Salfred switch(reg) { 648184610Salfred case ARM_REG_NUM_PC: 649184610Salfred ret = td->td_frame->tf_pc; 650184610Salfred break; 651184610Salfred case ARM_REG_NUM_LR: 652184610Salfred ret = td->td_frame->tf_usr_lr; 653184610Salfred break; 654184610Salfred case ARM_REG_NUM_SP: 655184610Salfred ret = td->td_frame->tf_usr_sp; 656184610Salfred break; 657184610Salfred default: 658184610Salfred ret = *((register_t*)&td->td_frame->tf_r0 + reg); 659184610Salfred break; 660184610Salfred } 661185087Salfred 662185087Salfred return (ret); 663184610Salfred} 664184610Salfred 665184610Salfredstatic u_int 666184610Salfredptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val) 667184610Salfred{ 668184610Salfred struct thread *td = cookie; 669185087Salfred u_int error; 670185087Salfred 671184610Salfred error = ptrace_read_int(td, offset, val); 672184610Salfred 673184610Salfred return (error); 674184610Salfred} 675184610Salfred 676184610Salfred/** 677185087Salfred * This function parses current instruction opcode and decodes 678185087Salfred * any possible jump (change in PC) which might occur after 679184610Salfred * the instruction is executed. 680184610Salfred * 681184610Salfred * @param td Thread structure of analysed task 682184610Salfred * @param cur_instr Currently executed instruction 683184610Salfred * @param alt_next_address Pointer to the variable where 684184610Salfred * the destination address of the 685185087Salfred * jump instruction shall be stored. 686185087Salfred * 687184610Salfred * @return <0> when jump is possible 688184610Salfred * <EINVAL> otherwise 689184610Salfred */ 690184610Salfredstatic int 691185087Salfredptrace_get_alternative_next(struct thread *td, uint32_t cur_instr, 692184610Salfred uint32_t *alt_next_address) 693184610Salfred{ 694184610Salfred int error; 695184610Salfred 696184610Salfred if (inst_branch(cur_instr) || inst_call(cur_instr) || 697184610Salfred inst_return(cur_instr)) { 698184610Salfred error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc, 699184610Salfred alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int); 700184610Salfred 701184610Salfred return (error); 702185087Salfred } 703184610Salfred 704184610Salfred return (EINVAL); 705184610Salfred} 706184610Salfred 707184610Salfredint 708184610Salfredptrace_single_step(struct thread *td) 709184610Salfred{ 710184610Salfred struct proc *p; 711184610Salfred int error, error_alt; 712184610Salfred uint32_t cur_instr, alt_next = 0; 713184610Salfred 714184610Salfred /* TODO: This needs to be updated for Thumb-2 */ 715185087Salfred if ((td->td_frame->tf_spsr & PSR_T) != 0) 716184610Salfred return (EINVAL); 717185087Salfred 718184610Salfred KASSERT(td->td_md.md_ptrace_instr == 0, 719185087Salfred ("Didn't clear single step")); 720184610Salfred KASSERT(td->td_md.md_ptrace_instr_alt == 0, 721185087Salfred ("Didn't clear alternative single step")); 722185087Salfred p = td->td_proc; 723185087Salfred PROC_UNLOCK(p); 724185087Salfred 725184610Salfred error = ptrace_read_int(td, td->td_frame->tf_pc, 726185087Salfred &cur_instr); 727185087Salfred if (error) 728185087Salfred goto out; 729185087Salfred 730185087Salfred error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE, 731185087Salfred &td->td_md.md_ptrace_instr); 732185087Salfred if (error == 0) { 733185087Salfred error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE, 734185087Salfred PTRACE_BREAKPOINT); 735185087Salfred if (error) { 736185087Salfred td->td_md.md_ptrace_instr = 0; 737185087Salfred } else { 738185087Salfred td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 739185087Salfred INSN_SIZE; 740185087Salfred } 741185087Salfred } 742185290Salfred 743185290Salfred error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next); 744185087Salfred if (error_alt == 0) { 745185087Salfred error_alt = ptrace_read_int(td, alt_next, 746185087Salfred &td->td_md.md_ptrace_instr_alt); 747185087Salfred if (error_alt) { 748185087Salfred td->td_md.md_ptrace_instr_alt = 0; 749185087Salfred } else { 750185087Salfred error_alt = ptrace_write_int(td, alt_next, 751184610Salfred PTRACE_BREAKPOINT); 752184610Salfred if (error_alt) 753184610Salfred td->td_md.md_ptrace_instr_alt = 0; 754184610Salfred else 755184610Salfred td->td_md.md_ptrace_addr_alt = alt_next; 756184610Salfred } 757184610Salfred } 758184610Salfred 759184610Salfredout: 760194069Sthompsa PROC_LOCK(p); 761184610Salfred return ((error != 0) && (error_alt != 0)); 762194069Sthompsa} 763184610Salfred 764184610Salfredint 765184610Salfredptrace_clear_single_step(struct thread *td) 766184610Salfred{ 767184610Salfred struct proc *p; 768184610Salfred 769184610Salfred /* TODO: This needs to be updated for Thumb-2 */ 770194069Sthompsa if ((td->td_frame->tf_spsr & PSR_T) != 0) 771184610Salfred return (EINVAL); 772184610Salfred 773184610Salfred if (td->td_md.md_ptrace_instr != 0) { 774184610Salfred p = td->td_proc; 775184610Salfred PROC_UNLOCK(p); 776184610Salfred ptrace_write_int(td, td->td_md.md_ptrace_addr, 777194069Sthompsa td->td_md.md_ptrace_instr); 778184610Salfred PROC_LOCK(p); 779184610Salfred td->td_md.md_ptrace_instr = 0; 780184610Salfred } 781194069Sthompsa 782184610Salfred if (td->td_md.md_ptrace_instr_alt != 0) { 783184610Salfred p = td->td_proc; 784184610Salfred PROC_UNLOCK(p); 785184610Salfred ptrace_write_int(td, td->td_md.md_ptrace_addr_alt, 786184610Salfred td->td_md.md_ptrace_instr_alt); 787184610Salfred PROC_LOCK(p); 788184610Salfred td->td_md.md_ptrace_instr_alt = 0; 789184610Salfred } 790184610Salfred 791184610Salfred return (0); 792184610Salfred} 793184610Salfred 794184610Salfredint 795184610Salfredptrace_set_pc(struct thread *td, unsigned long addr) 796184610Salfred{ 797184610Salfred td->td_frame->tf_pc = addr; 798184610Salfred return (0); 799184610Salfred} 800184610Salfred 801184610Salfredvoid 802184610Salfredcpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 803184610Salfred{ 804184610Salfred} 805184610Salfred 806184610Salfredvoid 807184610Salfredspinlock_enter(void) 808184610Salfred{ 809184610Salfred struct thread *td; 810184610Salfred register_t cspr; 811184610Salfred 812184610Salfred td = curthread; 813184610Salfred if (td->td_md.md_spinlock_count == 0) { 814184610Salfred cspr = disable_interrupts(PSR_I | PSR_F); 815184610Salfred td->td_md.md_spinlock_count = 1; 816184610Salfred td->td_md.md_saved_cspr = cspr; 817184610Salfred } else 818184610Salfred td->td_md.md_spinlock_count++; 819184610Salfred critical_enter(); 820184610Salfred} 821184610Salfred 822189621Sthompsavoid 823189621Sthompsaspinlock_exit(void) 824189621Sthompsa{ 825189621Sthompsa struct thread *td; 826189621Sthompsa register_t cspr; 827184610Salfred 828184610Salfred td = curthread; 829213848Shselasky critical_exit(); 830213848Shselasky cspr = td->td_md.md_saved_cspr; 831213848Shselasky td->td_md.md_spinlock_count--; 832213848Shselasky if (td->td_md.md_spinlock_count == 0) 833213848Shselasky restore_interrupts(cspr); 834213848Shselasky} 835213848Shselasky 836213848Shselasky/* 837213848Shselasky * Clear registers on exec 838213848Shselasky */ 839213848Shselaskyvoid 840213848Shselaskyexec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 841213848Shselasky{ 842185087Salfred struct trapframe *tf = td->td_frame; 843184610Salfred 844184610Salfred memset(tf, 0, sizeof(*tf)); 845184610Salfred tf->tf_usr_sp = stack; 846184610Salfred tf->tf_usr_lr = imgp->entry_addr; 847184610Salfred tf->tf_svc_lr = 0x77777777; 848184610Salfred tf->tf_pc = imgp->entry_addr; 849184610Salfred tf->tf_spsr = PSR_USR32_MODE; 850184610Salfred} 851184610Salfred 852184610Salfred/* 853184610Salfred * Get machine context. 854184610Salfred */ 855184610Salfredint 856184610Salfredget_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) 857184610Salfred{ 858184610Salfred struct trapframe *tf = td->td_frame; 859184610Salfred __greg_t *gr = mcp->__gregs; 860184610Salfred 861184610Salfred if (clear_ret & GET_MC_CLEAR_RET) { 862184610Salfred gr[_REG_R0] = 0; 863184610Salfred gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; 864184610Salfred } else { 865184610Salfred gr[_REG_R0] = tf->tf_r0; 866184610Salfred gr[_REG_CPSR] = tf->tf_spsr; 867208021Sthompsa } 868184610Salfred gr[_REG_R1] = tf->tf_r1; 869184610Salfred gr[_REG_R2] = tf->tf_r2; 870184610Salfred gr[_REG_R3] = tf->tf_r3; 871184610Salfred gr[_REG_R4] = tf->tf_r4; 872184610Salfred gr[_REG_R5] = tf->tf_r5; 873184610Salfred gr[_REG_R6] = tf->tf_r6; 874184610Salfred gr[_REG_R7] = tf->tf_r7; 875184610Salfred gr[_REG_R8] = tf->tf_r8; 876203773Swkoszek gr[_REG_R9] = tf->tf_r9; 877184610Salfred gr[_REG_R10] = tf->tf_r10; 878184610Salfred gr[_REG_R11] = tf->tf_r11; 879184610Salfred gr[_REG_R12] = tf->tf_r12; 880185290Salfred gr[_REG_SP] = tf->tf_usr_sp; 881184610Salfred gr[_REG_LR] = tf->tf_usr_lr; 882185290Salfred gr[_REG_PC] = tf->tf_pc; 883195957Salfred 884185290Salfred return (0); 885185290Salfred} 886185290Salfred 887185290Salfred/* 888185290Salfred * Set machine context. 889184610Salfred * 890184610Salfred * However, we don't set any but the user modifiable flags, and we won't 891184610Salfred * touch the cs selector. 892185290Salfred */ 893185290Salfredint 894185290Salfredset_mcontext(struct thread *td, mcontext_t *mcp) 895185290Salfred{ 896184610Salfred struct trapframe *tf = td->td_frame; 897184610Salfred const __greg_t *gr = mcp->__gregs; 898184610Salfred 899184610Salfred tf->tf_r0 = gr[_REG_R0]; 900184610Salfred tf->tf_r1 = gr[_REG_R1]; 901184610Salfred tf->tf_r2 = gr[_REG_R2]; 902184610Salfred tf->tf_r3 = gr[_REG_R3]; 903203773Swkoszek tf->tf_r4 = gr[_REG_R4]; 904184610Salfred tf->tf_r5 = gr[_REG_R5]; 905184610Salfred tf->tf_r6 = gr[_REG_R6]; 906184610Salfred tf->tf_r7 = gr[_REG_R7]; 907184610Salfred tf->tf_r8 = gr[_REG_R8]; 908184610Salfred tf->tf_r9 = gr[_REG_R9]; 909184610Salfred tf->tf_r10 = gr[_REG_R10]; 910184610Salfred tf->tf_r11 = gr[_REG_R11]; 911184610Salfred tf->tf_r12 = gr[_REG_R12]; 912184610Salfred tf->tf_usr_sp = gr[_REG_SP]; 913184610Salfred tf->tf_usr_lr = gr[_REG_LR]; 914184610Salfred tf->tf_pc = gr[_REG_PC]; 915184610Salfred tf->tf_spsr = gr[_REG_CPSR]; 916184610Salfred 917184610Salfred return (0); 918184610Salfred} 919184610Salfred 920184610Salfred/* 921184610Salfred * MPSAFE 922184610Salfred */ 923184610Salfredint 924184610Salfredsys_sigreturn(td, uap) 925184610Salfred struct thread *td; 926184610Salfred struct sigreturn_args /* { 927184610Salfred const struct __ucontext *sigcntxp; 928184610Salfred } */ *uap; 929184610Salfred{ 930184610Salfred ucontext_t uc; 931184610Salfred int spsr; 932184610Salfred 933184610Salfred if (uap == NULL) 934184610Salfred return (EFAULT); 935184610Salfred if (copyin(uap->sigcntxp, &uc, sizeof(uc))) 936184610Salfred return (EFAULT); 937184610Salfred /* 938184610Salfred * Make sure the processor mode has not been tampered with and 939184610Salfred * interrupts have not been disabled. 940203773Swkoszek */ 941184610Salfred spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; 942184610Salfred if ((spsr & PSR_MODE) != PSR_USR32_MODE || 943195957Salfred (spsr & (PSR_I | PSR_F)) != 0) 944184610Salfred return (EINVAL); 945184610Salfred /* Restore register context. */ 946184610Salfred set_mcontext(td, &uc.uc_mcontext); 947184610Salfred 948184610Salfred /* Restore signal mask. */ 949184610Salfred kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); 950184610Salfred 951184610Salfred return (EJUSTRETURN); 952184610Salfred} 953184610Salfred 954208021Sthompsa 955184610Salfred/* 956184610Salfred * Construct a PCB from a trapframe. This is called from kdb_trap() where 957184610Salfred * we want to start a backtrace from the function that caused us to enter 958184610Salfred * the debugger. We have the context in the trapframe, but base the trace 959184610Salfred * on the PCB. The PCB doesn't have to be perfect, as long as it contains 960184610Salfred * enough for a backtrace. 961184610Salfred */ 962184610Salfredvoid 963184610Salfredmakectx(struct trapframe *tf, struct pcb *pcb) 964195957Salfred{ 965184610Salfred pcb->pcb_regs.sf_r4 = tf->tf_r4; 966184610Salfred pcb->pcb_regs.sf_r5 = tf->tf_r5; 967184610Salfred pcb->pcb_regs.sf_r6 = tf->tf_r6; 968184610Salfred pcb->pcb_regs.sf_r7 = tf->tf_r7; 969184610Salfred pcb->pcb_regs.sf_r8 = tf->tf_r8; 970184610Salfred pcb->pcb_regs.sf_r9 = tf->tf_r9; 971184610Salfred pcb->pcb_regs.sf_r10 = tf->tf_r10; 972224085Shselasky pcb->pcb_regs.sf_r11 = tf->tf_r11; 973224085Shselasky pcb->pcb_regs.sf_r12 = tf->tf_r12; 974224085Shselasky pcb->pcb_regs.sf_pc = tf->tf_pc; 975224085Shselasky pcb->pcb_regs.sf_lr = tf->tf_usr_lr; 976224085Shselasky pcb->pcb_regs.sf_sp = tf->tf_usr_sp; 977224085Shselasky} 978224085Shselasky 979224085Shselasky/* 980224085Shselasky * Fake up a boot descriptor table 981224085Shselasky */ 982224085Shselaskyvm_offset_t 983224085Shselaskyfake_preload_metadata(struct arm_boot_params *abp __unused) 984224085Shselasky{ 985224085Shselasky#ifdef DDB 986224085Shselasky vm_offset_t zstart = 0, zend = 0; 987224085Shselasky#endif 988224085Shselasky vm_offset_t lastaddr; 989224085Shselasky int i = 0; 990224085Shselasky static uint32_t fake_preload[35]; 991224085Shselasky 992224085Shselasky fake_preload[i++] = MODINFO_NAME; 993224085Shselasky fake_preload[i++] = strlen("kernel") + 1; 994224085Shselasky strcpy((char*)&fake_preload[i++], "kernel"); 995224085Shselasky i += 1; 996224085Shselasky fake_preload[i++] = MODINFO_TYPE; 997224085Shselasky fake_preload[i++] = strlen("elf kernel") + 1; 998224085Shselasky strcpy((char*)&fake_preload[i++], "elf kernel"); 999224085Shselasky i += 2; 1000224085Shselasky fake_preload[i++] = MODINFO_ADDR; 1001224085Shselasky fake_preload[i++] = sizeof(vm_offset_t); 1002224085Shselasky fake_preload[i++] = KERNVIRTADDR; 1003224085Shselasky fake_preload[i++] = MODINFO_SIZE; 1004224085Shselasky fake_preload[i++] = sizeof(uint32_t); 1005224085Shselasky fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; 1006224085Shselasky#ifdef DDB 1007224085Shselasky if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { 1008224085Shselasky fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; 1009224085Shselasky fake_preload[i++] = sizeof(vm_offset_t); 1010224085Shselasky fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); 1011224085Shselasky fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; 1012224085Shselasky fake_preload[i++] = sizeof(vm_offset_t); 1013224085Shselasky fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); 1014224085Shselasky lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); 1015224085Shselasky zend = lastaddr; 1016224085Shselasky zstart = *(uint32_t *)(KERNVIRTADDR + 4); 1017224085Shselasky db_fetch_ksymtab(zstart, zend); 1018 } else 1019#endif 1020 lastaddr = (vm_offset_t)&end; 1021 fake_preload[i++] = 0; 1022 fake_preload[i] = 0; 1023 preload_metadata = (void *)fake_preload; 1024 1025 return (lastaddr); 1026} 1027 1028void 1029pcpu0_init(void) 1030{ 1031#if __ARM_ARCH >= 6 1032 set_curthread(&thread0); 1033#endif 1034 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 1035 PCPU_SET(curthread, &thread0); 1036} 1037 1038#if defined(LINUX_BOOT_ABI) 1039vm_offset_t 1040linux_parse_boot_param(struct arm_boot_params *abp) 1041{ 1042 struct arm_lbabi_tag *walker; 1043 uint32_t revision; 1044 uint64_t serial; 1045 1046 /* 1047 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 1048 * is atags or dtb pointer. If all of these aren't satisfied, 1049 * then punt. 1050 */ 1051 if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) 1052 return 0; 1053 1054 board_id = abp->abp_r1; 1055 walker = (struct arm_lbabi_tag *) 1056 (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); 1057 1058 /* xxx - Need to also look for binary device tree */ 1059 if (ATAG_TAG(walker) != ATAG_CORE) 1060 return 0; 1061 1062 atag_list = walker; 1063 while (ATAG_TAG(walker) != ATAG_NONE) { 1064 switch (ATAG_TAG(walker)) { 1065 case ATAG_CORE: 1066 break; 1067 case ATAG_MEM: 1068 arm_physmem_hardware_region(walker->u.tag_mem.start, 1069 walker->u.tag_mem.size); 1070 break; 1071 case ATAG_INITRD2: 1072 break; 1073 case ATAG_SERIAL: 1074 serial = walker->u.tag_sn.low | 1075 ((uint64_t)walker->u.tag_sn.high << 32); 1076 board_set_serial(serial); 1077 break; 1078 case ATAG_REVISION: 1079 revision = walker->u.tag_rev.rev; 1080 board_set_revision(revision); 1081 break; 1082 case ATAG_CMDLINE: 1083 /* XXX open question: Parse this for boothowto? */ 1084 bcopy(walker->u.tag_cmd.command, linux_command_line, 1085 ATAG_SIZE(walker)); 1086 break; 1087 default: 1088 break; 1089 } 1090 walker = ATAG_NEXT(walker); 1091 } 1092 1093 /* Save a copy for later */ 1094 bcopy(atag_list, atags, 1095 (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); 1096 1097 return fake_preload_metadata(abp); 1098} 1099#endif 1100 1101#if defined(FREEBSD_BOOT_LOADER) 1102vm_offset_t 1103freebsd_parse_boot_param(struct arm_boot_params *abp) 1104{ 1105 vm_offset_t lastaddr = 0; 1106 void *mdp; 1107 void *kmdp; 1108#ifdef DDB 1109 vm_offset_t ksym_start; 1110 vm_offset_t ksym_end; 1111#endif 1112 1113 /* 1114 * Mask metadata pointer: it is supposed to be on page boundary. If 1115 * the first argument (mdp) doesn't point to a valid address the 1116 * bootloader must have passed us something else than the metadata 1117 * ptr, so we give up. Also give up if we cannot find metadta section 1118 * the loader creates that we get all this data out of. 1119 */ 1120 1121 if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) 1122 return 0; 1123 preload_metadata = mdp; 1124 kmdp = preload_search_by_type("elf kernel"); 1125 if (kmdp == NULL) 1126 return 0; 1127 1128 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1129 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 1130 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); 1131#ifdef DDB 1132 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1133 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1134 db_fetch_ksymtab(ksym_start, ksym_end); 1135#endif 1136 return lastaddr; 1137} 1138#endif 1139 1140vm_offset_t 1141default_parse_boot_param(struct arm_boot_params *abp) 1142{ 1143 vm_offset_t lastaddr; 1144 1145#if defined(LINUX_BOOT_ABI) 1146 if ((lastaddr = linux_parse_boot_param(abp)) != 0) 1147 return lastaddr; 1148#endif 1149#if defined(FREEBSD_BOOT_LOADER) 1150 if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) 1151 return lastaddr; 1152#endif 1153 /* Fall back to hardcoded metadata. */ 1154 lastaddr = fake_preload_metadata(abp); 1155 1156 return lastaddr; 1157} 1158 1159/* 1160 * Stub version of the boot parameter parsing routine. We are 1161 * called early in initarm, before even VM has been initialized. 1162 * This routine needs to preserve any data that the boot loader 1163 * has passed in before the kernel starts to grow past the end 1164 * of the BSS, traditionally the place boot-loaders put this data. 1165 * 1166 * Since this is called so early, things that depend on the vm system 1167 * being setup (including access to some SoC's serial ports), about 1168 * all that can be done in this routine is to copy the arguments. 1169 * 1170 * This is the default boot parameter parsing routine. Individual 1171 * kernels/boards can override this weak function with one of their 1172 * own. We just fake metadata... 1173 */ 1174__weak_reference(default_parse_boot_param, parse_boot_param); 1175 1176/* 1177 * Initialize proc0 1178 */ 1179void 1180init_proc0(vm_offset_t kstack) 1181{ 1182 proc_linkup0(&proc0, &thread0); 1183 thread0.td_kstack = kstack; 1184 thread0.td_pcb = (struct pcb *) 1185 (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1; 1186 thread0.td_pcb->pcb_flags = 0; 1187 thread0.td_pcb->pcb_vfpcpu = -1; 1188 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; 1189 thread0.td_frame = &proc0_tf; 1190 pcpup->pc_curpcb = thread0.td_pcb; 1191} 1192 1193int 1194arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc, 1195 u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*)) 1196{ 1197 u_int addr, nregs, offset = 0; 1198 int error = 0; 1199 1200 switch ((insn >> 24) & 0xf) { 1201 case 0x2: /* add pc, reg1, #value */ 1202 case 0x0: /* add pc, reg1, reg2, lsl #offset */ 1203 addr = fetch_reg(cookie, (insn >> 16) & 0xf); 1204 if (((insn >> 16) & 0xf) == 15) 1205 addr += 8; 1206 if (insn & 0x0200000) { 1207 offset = (insn >> 7) & 0x1e; 1208 offset = (insn & 0xff) << (32 - offset) | 1209 (insn & 0xff) >> offset; 1210 } else { 1211 1212 offset = fetch_reg(cookie, insn & 0x0f); 1213 if ((insn & 0x0000ff0) != 0x00000000) { 1214 if (insn & 0x10) 1215 nregs = fetch_reg(cookie, 1216 (insn >> 8) & 0xf); 1217 else 1218 nregs = (insn >> 7) & 0x1f; 1219 switch ((insn >> 5) & 3) { 1220 case 0: 1221 /* lsl */ 1222 offset = offset << nregs; 1223 break; 1224 case 1: 1225 /* lsr */ 1226 offset = offset >> nregs; 1227 break; 1228 default: 1229 break; /* XXX */ 1230 } 1231 1232 } 1233 *new_pc = addr + offset; 1234 return (0); 1235 1236 } 1237 1238 case 0xa: /* b ... */ 1239 case 0xb: /* bl ... */ 1240 addr = ((insn << 2) & 0x03ffffff); 1241 if (addr & 0x02000000) 1242 addr |= 0xfc000000; 1243 *new_pc = (pc + 8 + addr); 1244 return (0); 1245 case 0x7: /* ldr pc, [pc, reg, lsl #2] */ 1246 addr = fetch_reg(cookie, insn & 0xf); 1247 addr = pc + 8 + (addr << 2); 1248 error = read_int(cookie, addr, &addr); 1249 *new_pc = addr; 1250 return (error); 1251 case 0x1: /* mov pc, reg */ 1252 *new_pc = fetch_reg(cookie, insn & 0xf); 1253 return (0); 1254 case 0x4: 1255 case 0x5: /* ldr pc, [reg] */ 1256 addr = fetch_reg(cookie, (insn >> 16) & 0xf); 1257 /* ldr pc, [reg, #offset] */ 1258 if (insn & (1 << 24)) 1259 offset = insn & 0xfff; 1260 if (insn & 0x00800000) 1261 addr += offset; 1262 else 1263 addr -= offset; 1264 error = read_int(cookie, addr, &addr); 1265 *new_pc = addr; 1266 1267 return (error); 1268 case 0x8: /* ldmxx reg, {..., pc} */ 1269 case 0x9: 1270 addr = fetch_reg(cookie, (insn >> 16) & 0xf); 1271 nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); 1272 nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); 1273 nregs = (nregs + (nregs >> 4)) & 0x0f0f; 1274 nregs = (nregs + (nregs >> 8)) & 0x001f; 1275 switch ((insn >> 23) & 0x3) { 1276 case 0x0: /* ldmda */ 1277 addr = addr - 0; 1278 break; 1279 case 0x1: /* ldmia */ 1280 addr = addr + 0 + ((nregs - 1) << 2); 1281 break; 1282 case 0x2: /* ldmdb */ 1283 addr = addr - 4; 1284 break; 1285 case 0x3: /* ldmib */ 1286 addr = addr + 4 + ((nregs - 1) << 2); 1287 break; 1288 } 1289 error = read_int(cookie, addr, &addr); 1290 *new_pc = addr; 1291 1292 return (error); 1293 default: 1294 return (EINVAL); 1295 } 1296} 1297 1298#ifdef ARM_NEW_PMAP 1299void 1300set_stackptrs(int cpu) 1301{ 1302 1303 set_stackptr(PSR_IRQ32_MODE, 1304 irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1305 set_stackptr(PSR_ABT32_MODE, 1306 abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1307 set_stackptr(PSR_UND32_MODE, 1308 undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1309} 1310#else 1311void 1312set_stackptrs(int cpu) 1313{ 1314 1315 set_stackptr(PSR_IRQ32_MODE, 1316 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1317 set_stackptr(PSR_ABT32_MODE, 1318 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1319 set_stackptr(PSR_UND32_MODE, 1320 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 1321} 1322#endif 1323 1324#ifdef EFI 1325#define efi_next_descriptor(ptr, size) \ 1326 ((struct efi_md *)(((uint8_t *) ptr) + size)) 1327 1328static void 1329add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr, 1330 int *mrcnt, uint32_t *memsize) 1331{ 1332 struct efi_md *map, *p; 1333 const char *type; 1334 size_t efisz, memory_size; 1335 int ndesc, i, j; 1336 1337 static const char *types[] = { 1338 "Reserved", 1339 "LoaderCode", 1340 "LoaderData", 1341 "BootServicesCode", 1342 "BootServicesData", 1343 "RuntimeServicesCode", 1344 "RuntimeServicesData", 1345 "ConventionalMemory", 1346 "UnusableMemory", 1347 "ACPIReclaimMemory", 1348 "ACPIMemoryNVS", 1349 "MemoryMappedIO", 1350 "MemoryMappedIOPortSpace", 1351 "PalCode" 1352 }; 1353 1354 *mrcnt = 0; 1355 *memsize = 0; 1356 1357 /* 1358 * Memory map data provided by UEFI via the GetMemoryMap 1359 * Boot Services API. 1360 */ 1361 efisz = roundup2(sizeof(struct efi_map_header), 0x10); 1362 map = (struct efi_md *)((uint8_t *)efihdr + efisz); 1363 1364 if (efihdr->descriptor_size == 0) 1365 return; 1366 ndesc = efihdr->memory_size / efihdr->descriptor_size; 1367 1368 if (boothowto & RB_VERBOSE) 1369 printf("%23s %12s %12s %8s %4s\n", 1370 "Type", "Physical", "Virtual", "#Pages", "Attr"); 1371 1372 memory_size = 0; 1373 for (i = 0, j = 0, p = map; i < ndesc; i++, 1374 p = efi_next_descriptor(p, efihdr->descriptor_size)) { 1375 if (boothowto & RB_VERBOSE) { 1376 if (p->md_type <= EFI_MD_TYPE_PALCODE) 1377 type = types[p->md_type]; 1378 else 1379 type = "<INVALID>"; 1380 printf("%23s %012llx %12p %08llx ", type, p->md_phys, 1381 p->md_virt, p->md_pages); 1382 if (p->md_attr & EFI_MD_ATTR_UC) 1383 printf("UC "); 1384 if (p->md_attr & EFI_MD_ATTR_WC) 1385 printf("WC "); 1386 if (p->md_attr & EFI_MD_ATTR_WT) 1387 printf("WT "); 1388 if (p->md_attr & EFI_MD_ATTR_WB) 1389 printf("WB "); 1390 if (p->md_attr & EFI_MD_ATTR_UCE) 1391 printf("UCE "); 1392 if (p->md_attr & EFI_MD_ATTR_WP) 1393 printf("WP "); 1394 if (p->md_attr & EFI_MD_ATTR_RP) 1395 printf("RP "); 1396 if (p->md_attr & EFI_MD_ATTR_XP) 1397 printf("XP "); 1398 if (p->md_attr & EFI_MD_ATTR_RT) 1399 printf("RUNTIME"); 1400 printf("\n"); 1401 } 1402 1403 switch (p->md_type) { 1404 case EFI_MD_TYPE_CODE: 1405 case EFI_MD_TYPE_DATA: 1406 case EFI_MD_TYPE_BS_CODE: 1407 case EFI_MD_TYPE_BS_DATA: 1408 case EFI_MD_TYPE_FREE: 1409 /* 1410 * We're allowed to use any entry with these types. 1411 */ 1412 break; 1413 default: 1414 continue; 1415 } 1416 1417 j++; 1418 if (j >= FDT_MEM_REGIONS) 1419 break; 1420 1421 mr[j].mr_start = p->md_phys; 1422 mr[j].mr_size = p->md_pages * PAGE_SIZE; 1423 memory_size += mr[j].mr_size; 1424 } 1425 1426 *mrcnt = j; 1427 *memsize = memory_size; 1428} 1429#endif /* EFI */ 1430 1431#ifdef FDT 1432static char * 1433kenv_next(char *cp) 1434{ 1435 1436 if (cp != NULL) { 1437 while (*cp != 0) 1438 cp++; 1439 cp++; 1440 if (*cp == 0) 1441 cp = NULL; 1442 } 1443 return (cp); 1444} 1445 1446static void 1447print_kenv(void) 1448{ 1449 char *cp; 1450 1451 debugf("loader passed (static) kenv:\n"); 1452 if (kern_envp == NULL) { 1453 debugf(" no env, null ptr\n"); 1454 return; 1455 } 1456 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 1457 1458 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 1459 debugf(" %x %s\n", (uint32_t)cp, cp); 1460} 1461 1462#ifndef ARM_NEW_PMAP 1463void * 1464initarm(struct arm_boot_params *abp) 1465{ 1466 struct mem_region mem_regions[FDT_MEM_REGIONS]; 1467 struct pv_addr kernel_l1pt; 1468 struct pv_addr dpcpu; 1469 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 1470 uint32_t memsize, l2size; 1471 char *env; 1472 void *kmdp; 1473 u_int l1pagetable; 1474 int i, j, err_devmap, mem_regions_sz; 1475 1476 lastaddr = parse_boot_param(abp); 1477 arm_physmem_kernaddr = abp->abp_physaddr; 1478 1479 memsize = 0; 1480 1481 cpuinfo_init(); 1482 set_cpufuncs(); 1483 1484 /* 1485 * Find the dtb passed in by the boot loader. 1486 */ 1487 kmdp = preload_search_by_type("elf kernel"); 1488 if (kmdp != NULL) 1489 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 1490 else 1491 dtbp = (vm_offset_t)NULL; 1492 1493#if defined(FDT_DTB_STATIC) 1494 /* 1495 * In case the device tree blob was not retrieved (from metadata) try 1496 * to use the statically embedded one. 1497 */ 1498 if (dtbp == (vm_offset_t)NULL) 1499 dtbp = (vm_offset_t)&fdt_static_dtb; 1500#endif 1501 1502 if (OF_install(OFW_FDT, 0) == FALSE) 1503 panic("Cannot install FDT"); 1504 1505 if (OF_init((void *)dtbp) != 0) 1506 panic("OF_init failed with the found device tree"); 1507 1508 /* Grab physical memory regions information from device tree. */ 1509 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) 1510 panic("Cannot get physical memory regions"); 1511 arm_physmem_hardware_regions(mem_regions, mem_regions_sz); 1512 1513 /* Grab reserved memory regions information from device tree. */ 1514 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) 1515 arm_physmem_exclude_regions(mem_regions, mem_regions_sz, 1516 EXFLAG_NODUMP | EXFLAG_NOALLOC); 1517 1518 /* Platform-specific initialisation */ 1519 platform_probe_and_attach(); 1520 1521 pcpu0_init(); 1522 1523 /* Do basic tuning, hz etc */ 1524 init_param1(); 1525 1526 /* Calculate number of L2 tables needed for mapping vm_page_array */ 1527 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 1528 l2size = (l2size >> L1_S_SHIFT) + 1; 1529 1530 /* 1531 * Add one table for end of kernel map, one for stacks, msgbuf and 1532 * L1 and L2 tables map and one for vectors map. 1533 */ 1534 l2size += 3; 1535 1536 /* Make it divisible by 4 */ 1537 l2size = (l2size + 3) & ~3; 1538 1539 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 1540 1541 /* Define a macro to simplify memory allocation */ 1542#define valloc_pages(var, np) \ 1543 alloc_pages((var).pv_va, (np)); \ 1544 (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); 1545 1546#define alloc_pages(var, np) \ 1547 (var) = freemempos; \ 1548 freemempos += (np * PAGE_SIZE); \ 1549 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 1550 1551 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 1552 freemempos += PAGE_SIZE; 1553 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 1554 1555 for (i = 0, j = 0; i < l2size; ++i) { 1556 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 1557 valloc_pages(kernel_pt_table[i], 1558 L2_TABLE_SIZE / PAGE_SIZE); 1559 j = i; 1560 } else { 1561 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 1562 L2_TABLE_SIZE_REAL * (i - j); 1563 kernel_pt_table[i].pv_pa = 1564 kernel_pt_table[i].pv_va - KERNVIRTADDR + 1565 abp->abp_physaddr; 1566 1567 } 1568 } 1569 /* 1570 * Allocate a page for the system page mapped to 0x00000000 1571 * or 0xffff0000. This page will just contain the system vectors 1572 * and can be shared by all processes. 1573 */ 1574 valloc_pages(systempage, 1); 1575 1576 /* Allocate dynamic per-cpu area. */ 1577 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 1578 dpcpu_init((void *)dpcpu.pv_va, 0); 1579 1580 /* Allocate stacks for all modes */ 1581 valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); 1582 valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); 1583 valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); 1584 valloc_pages(kernelstack, kstack_pages * MAXCPU); 1585 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 1586 1587 /* 1588 * Now we start construction of the L1 page table 1589 * We start by mapping the L2 page tables into the L1. 1590 * This means that we can replace L1 mappings later on if necessary 1591 */ 1592 l1pagetable = kernel_l1pt.pv_va; 1593 1594 /* 1595 * Try to map as much as possible of kernel text and data using 1596 * 1MB section mapping and for the rest of initial kernel address 1597 * space use L2 coarse tables. 1598 * 1599 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 1600 * and kernel structures 1601 */ 1602 l2_start = lastaddr & ~(L1_S_OFFSET); 1603 for (i = 0 ; i < l2size - 1; i++) 1604 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 1605 &kernel_pt_table[i]); 1606 1607 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 1608 1609 /* Map kernel code and data */ 1610 pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, 1611 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 1612 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1613 1614 /* Map L1 directory and allocated L2 page tables */ 1615 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 1616 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1617 1618 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 1619 kernel_pt_table[0].pv_pa, 1620 L2_TABLE_SIZE_REAL * l2size, 1621 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 1622 1623 /* Map allocated DPCPU, stacks and msgbuf */ 1624 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 1625 freemempos - dpcpu.pv_va, 1626 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 1627 1628 /* Link and map the vector page */ 1629 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 1630 &kernel_pt_table[l2size - 1]); 1631 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 1632 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 1633 1634 /* Establish static device mappings. */ 1635 err_devmap = platform_devmap_init(); 1636 arm_devmap_bootstrap(l1pagetable, NULL); 1637 vm_max_kernel_address = platform_lastaddr(); 1638 1639 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); 1640 pmap_pa = kernel_l1pt.pv_pa; 1641 setttb(kernel_l1pt.pv_pa); 1642 cpu_tlb_flushID(); 1643 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 1644 1645 /* 1646 * Now that proper page tables are installed, call cpu_setup() to enable 1647 * instruction and data caches and other chip-specific features. 1648 */ 1649 cpu_setup(); 1650 1651 /* 1652 * Only after the SOC registers block is mapped we can perform device 1653 * tree fixups, as they may attempt to read parameters from hardware. 1654 */ 1655 OF_interpret("perform-fixup", 0); 1656 1657 platform_gpio_init(); 1658 1659 cninit(); 1660 1661 debugf("initarm: console initialized\n"); 1662 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 1663 debugf(" boothowto = 0x%08x\n", boothowto); 1664 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 1665 print_kenv(); 1666 1667 env = kern_getenv("kernelname"); 1668 if (env != NULL) { 1669 strlcpy(kernelname, env, sizeof(kernelname)); 1670 freeenv(env); 1671 } 1672 1673 if (err_devmap != 0) 1674 printf("WARNING: could not fully configure devmap, error=%d\n", 1675 err_devmap); 1676 1677 platform_late_init(); 1678 1679 /* 1680 * Pages were allocated during the secondary bootstrap for the 1681 * stacks for different CPU modes. 1682 * We must now set the r13 registers in the different CPU modes to 1683 * point to these stacks. 1684 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 1685 * of the stack memory. 1686 */ 1687 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 1688 1689 set_stackptrs(0); 1690 1691 /* 1692 * We must now clean the cache again.... 1693 * Cleaning may be done by reading new data to displace any 1694 * dirty data in the cache. This will have happened in setttb() 1695 * but since we are boot strapping the addresses used for the read 1696 * may have just been remapped and thus the cache could be out 1697 * of sync. A re-clean after the switch will cure this. 1698 * After booting there are no gross relocations of the kernel thus 1699 * this problem will not occur after initarm(). 1700 */ 1701 cpu_idcache_wbinv_all(); 1702 1703 undefined_init(); 1704 1705 init_proc0(kernelstack.pv_va); 1706 1707 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 1708 pmap_bootstrap(freemempos, &kernel_l1pt); 1709 msgbufp = (void *)msgbufpv.pv_va; 1710 msgbufinit(msgbufp, msgbufsize); 1711 mutex_init(); 1712 1713 /* 1714 * Exclude the kernel (and all the things we allocated which immediately 1715 * follow the kernel) from the VM allocation pool but not from crash 1716 * dumps. virtual_avail is a global variable which tracks the kva we've 1717 * "allocated" while setting up pmaps. 1718 * 1719 * Prepare the list of physical memory available to the vm subsystem. 1720 */ 1721 arm_physmem_exclude_region(abp->abp_physaddr, 1722 (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); 1723 arm_physmem_init_kernel_globals(); 1724 1725 init_param2(physmem); 1726 kdb_init(); 1727 1728 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 1729 sizeof(struct pcb))); 1730} 1731#else /* !ARM_NEW_PMAP */ 1732void * 1733initarm(struct arm_boot_params *abp) 1734{ 1735 struct mem_region mem_regions[FDT_MEM_REGIONS]; 1736 vm_paddr_t lastaddr; 1737 vm_offset_t dtbp, kernelstack, dpcpu; 1738 uint32_t memsize; 1739 char *env; 1740 void *kmdp; 1741 int err_devmap, mem_regions_sz; 1742#ifdef EFI 1743 struct efi_map_header *efihdr; 1744#endif 1745 1746 /* get last allocated physical address */ 1747 arm_physmem_kernaddr = abp->abp_physaddr; 1748 lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; 1749 1750 memsize = 0; 1751 set_cpufuncs(); 1752 cpuinfo_init(); 1753 1754 /* 1755 * Find the dtb passed in by the boot loader. 1756 */ 1757 kmdp = preload_search_by_type("elf kernel"); 1758 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 1759#if defined(FDT_DTB_STATIC) 1760 /* 1761 * In case the device tree blob was not retrieved (from metadata) try 1762 * to use the statically embedded one. 1763 */ 1764 if (dtbp == (vm_offset_t)NULL) 1765 dtbp = (vm_offset_t)&fdt_static_dtb; 1766#endif 1767 1768 if (OF_install(OFW_FDT, 0) == FALSE) 1769 panic("Cannot install FDT"); 1770 1771 if (OF_init((void *)dtbp) != 0) 1772 panic("OF_init failed with the found device tree"); 1773 1774#ifdef EFI 1775 efihdr = (struct efi_map_header *)preload_search_info(kmdp, 1776 MODINFO_METADATA | MODINFOMD_EFI_MAP); 1777 if (efihdr != NULL) { 1778 add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz, 1779 &memsize); 1780 } else 1781#endif 1782 { 1783 /* Grab physical memory regions information from device tree. */ 1784 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, 1785 &memsize) != 0) 1786 panic("Cannot get physical memory regions"); 1787 } 1788 arm_physmem_hardware_regions(mem_regions, mem_regions_sz); 1789 1790 /* Grab reserved memory regions information from device tree. */ 1791 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) 1792 arm_physmem_exclude_regions(mem_regions, mem_regions_sz, 1793 EXFLAG_NODUMP | EXFLAG_NOALLOC); 1794 1795 /* 1796 * Set TEX remapping registers. 1797 * Setup kernel page tables and switch to kernel L1 page table. 1798 */ 1799 pmap_set_tex(); 1800 pmap_bootstrap_prepare(lastaddr); 1801 1802 /* 1803 * Now that proper page tables are installed, call cpu_setup() to enable 1804 * instruction and data caches and other chip-specific features. 1805 */ 1806 cpu_setup(); 1807 1808 /* Platform-specific initialisation */ 1809 platform_probe_and_attach(); 1810 pcpu0_init(); 1811 1812 /* Do basic tuning, hz etc */ 1813 init_param1(); 1814 1815 /* 1816 * Allocate a page for the system page mapped to 0xffff0000 1817 * This page will just contain the system vectors and can be 1818 * shared by all processes. 1819 */ 1820 systempage = pmap_preboot_get_pages(1); 1821 1822 /* Map the vector page. */ 1823 pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); 1824 if (virtual_end >= ARM_VECTORS_HIGH) 1825 virtual_end = ARM_VECTORS_HIGH - 1; 1826 1827 /* Allocate dynamic per-cpu area. */ 1828 dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); 1829 dpcpu_init((void *)dpcpu, 0); 1830 1831 /* Allocate stacks for all modes */ 1832 irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); 1833 abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); 1834 undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); 1835 kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU); 1836 1837 /* Allocate message buffer. */ 1838 msgbufp = (void *)pmap_preboot_get_vpages( 1839 round_page(msgbufsize) / PAGE_SIZE); 1840 1841 /* 1842 * Pages were allocated during the secondary bootstrap for the 1843 * stacks for different CPU modes. 1844 * We must now set the r13 registers in the different CPU modes to 1845 * point to these stacks. 1846 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 1847 * of the stack memory. 1848 */ 1849 set_stackptrs(0); 1850 mutex_init(); 1851 1852 /* Establish static device mappings. */ 1853 err_devmap = platform_devmap_init(); 1854 arm_devmap_bootstrap(0, NULL); 1855 vm_max_kernel_address = platform_lastaddr(); 1856 1857 /* 1858 * Only after the SOC registers block is mapped we can perform device 1859 * tree fixups, as they may attempt to read parameters from hardware. 1860 */ 1861 OF_interpret("perform-fixup", 0); 1862 platform_gpio_init(); 1863 cninit(); 1864 1865 debugf("initarm: console initialized\n"); 1866 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 1867 debugf(" boothowto = 0x%08x\n", boothowto); 1868 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 1869 debugf(" lastaddr1: 0x%08x\n", lastaddr); 1870 print_kenv(); 1871 1872 env = kern_getenv("kernelname"); 1873 if (env != NULL) 1874 strlcpy(kernelname, env, sizeof(kernelname)); 1875 1876 if (err_devmap != 0) 1877 printf("WARNING: could not fully configure devmap, error=%d\n", 1878 err_devmap); 1879 1880 platform_late_init(); 1881 1882 /* 1883 * We must now clean the cache again.... 1884 * Cleaning may be done by reading new data to displace any 1885 * dirty data in the cache. This will have happened in setttb() 1886 * but since we are boot strapping the addresses used for the read 1887 * may have just been remapped and thus the cache could be out 1888 * of sync. A re-clean after the switch will cure this. 1889 * After booting there are no gross relocations of the kernel thus 1890 * this problem will not occur after initarm(). 1891 */ 1892 /* Set stack for exception handlers */ 1893 undefined_init(); 1894 init_proc0(kernelstack); 1895 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 1896 enable_interrupts(PSR_A); 1897 pmap_bootstrap(0); 1898 1899 /* Exclude the kernel (and all the things we allocated which immediately 1900 * follow the kernel) from the VM allocation pool but not from crash 1901 * dumps. virtual_avail is a global variable which tracks the kva we've 1902 * "allocated" while setting up pmaps. 1903 * 1904 * Prepare the list of physical memory available to the vm subsystem. 1905 */ 1906 arm_physmem_exclude_region(abp->abp_physaddr, 1907 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); 1908 arm_physmem_init_kernel_globals(); 1909 1910 init_param2(physmem); 1911 /* Init message buffer. */ 1912 msgbufinit(msgbufp, msgbufsize); 1913 kdb_init(); 1914 return ((void *)STACKALIGN(thread0.td_pcb)); 1915 1916} 1917 1918#endif /* !ARM_NEW_PMAP */ 1919#endif /* FDT */ 1920 1921uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *, 1922 struct timecounter *); 1923 1924uint32_t 1925cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 1926{ 1927 1928 return (arm_cpu_fill_vdso_timehands != NULL ? 1929 arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0); 1930} 1931