arm32_kvminit.c revision 1.51
1/* $NetBSD: arm32_kvminit.c,v 1.51 2019/02/06 13:22:54 skrll Exp $ */ 2 3/* 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 5 * Written by Hiroyuki Bessho for Genetec Corporation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of Genetec Corporation may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (c) 2001 Wasabi Systems, Inc. 32 * All rights reserved. 33 * 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed for the NetBSD Project by 47 * Wasabi Systems, Inc. 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 49 * or promote products derived from this software without specific prior 50 * written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * POSSIBILITY OF SUCH DAMAGE. 63 * 64 * Copyright (c) 1997,1998 Mark Brinicombe. 65 * Copyright (c) 1997,1998 Causality Limited. 66 * All rights reserved. 67 * 68 * Redistribution and use in source and binary forms, with or without 69 * modification, are permitted provided that the following conditions 70 * are met: 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgement: 78 * This product includes software developed by Mark Brinicombe 79 * for the NetBSD Project. 80 * 4. The name of the company nor the name of the author may be used to 81 * endorse or promote products derived from this software without specific 82 * prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 94 * SUCH DAMAGE. 95 * 96 * Copyright (c) 2007 Microsoft 97 * All rights reserved. 98 * 99 * Redistribution and use in source and binary forms, with or without 100 * modification, are permitted provided that the following conditions 101 * are met: 102 * 1. Redistributions of source code must retain the above copyright 103 * notice, this list of conditions and the following disclaimer. 104 * 2. Redistributions in binary form must reproduce the above copyright 105 * notice, this list of conditions and the following disclaimer in the 106 * documentation and/or other materials provided with the distribution. 107 * 3. All advertising materials mentioning features or use of this software 108 * must display the following acknowledgement: 109 * This product includes software developed by Microsoft 110 * 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 121 * SUCH DAMAGE. 122 */ 123 124#include "opt_arm_debug.h" 125#include "opt_arm_start.h" 126#include "opt_fdt.h" 127#include "opt_multiprocessor.h" 128 129#include <sys/cdefs.h> 130__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.51 2019/02/06 13:22:54 skrll Exp $"); 131 132#include <sys/param.h> 133#include <sys/device.h> 134#include <sys/kernel.h> 135#include <sys/reboot.h> 136#include <sys/bus.h> 137 138#include <dev/cons.h> 139 140#include <uvm/uvm_extern.h> 141 142#include <arm/locore.h> 143#include <arm/db_machdep.h> 144#include <arm/undefined.h> 145#include <arm/bootconfig.h> 146#include <arm/arm32/machdep.h> 147 148#if defined(FDT) 149#include <arch/evbarm/fdt/platform.h> 150#include <arm/fdt/arm_fdtvar.h> 151#endif 152 153#ifdef MULTIPROCESSOR 154#ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP 155#error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack 156#endif 157#endif 158 159#ifdef VERBOSE_INIT_ARM 160#define VPRINTF(...) printf(__VA_ARGS__) 161#else 162#define VPRINTF(...) __nothing 163#endif 164 165struct bootmem_info bootmem_info; 166 167extern void *msgbufaddr; 168paddr_t msgbufphys; 169paddr_t physical_start; 170paddr_t physical_end; 171 172extern char etext[]; 173extern char __data_start[], _edata[]; 174extern char __bss_start[], __bss_end__[]; 175extern char _end[]; 176 177/* Page tables for mapping kernel VM */ 178#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 179 180u_long kern_vtopdiff __attribute__((__section__(".data"))); 181 182void 183arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 184{ 185 struct bootmem_info * const bmi = &bootmem_info; 186 pv_addr_t *pv = bmi->bmi_freeblocks; 187 188 /* 189 * FDT/generic start fills in kern_vtopdiff early 190 */ 191#if defined(__HAVE_GENERIC_START) 192 extern char KERNEL_BASE_virt[]; 193 extern char ARM_BOOTSTRAP_LxPT[]; 194 195 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff); 196 197 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt); 198 vaddr_t kendva = round_page((vaddr_t)ARM_BOOTSTRAP_LxPT + L1_TABLE_SIZE); 199 200 kernelstart = KERN_VTOPHYS(kstartva); 201 202 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart); 203#else 204 vaddr_t kendva = round_page((vaddr_t)_end); 205 206#if defined(KERNEL_BASE_VOFFSET) 207 kern_vtopdiff = KERNEL_BASE_VOFFSET; 208#else 209 KASSERT(memstart == kernelstart); 210 kern_vtopdiff = KERNEL_BASE + memstart; 211#endif 212#endif 213 paddr_t kernelend = KERN_VTOPHYS(kendva); 214 215 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__, 216 memstart, memsize); 217 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__, 218 kernelstart, kernelend); 219 220 physical_start = bmi->bmi_start = memstart; 221 physical_end = bmi->bmi_end = memstart + memsize; 222#ifndef ARM_HAS_LPAE 223 if (physical_end == 0) { 224 physical_end = -PAGE_SIZE; 225 memsize -= PAGE_SIZE; 226 bmi->bmi_end -= PAGE_SIZE; 227 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n", 228 __func__); 229 } 230#endif 231 physmem = memsize / PAGE_SIZE; 232 233 /* 234 * Let's record where the kernel lives. 235 */ 236 237 bmi->bmi_kernelstart = kernelstart; 238 bmi->bmi_kernelend = kernelend; 239 240#if defined(FDT) 241 fdt_add_reserved_memory_range(bmi->bmi_kernelstart, 242 bmi->bmi_kernelend - bmi->bmi_kernelstart); 243#endif 244 245 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart, 246 kernelend); 247 248#if 0 249 // XXX Makes RPI abort 250 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0); 251#endif 252 /* 253 * Now the rest of the free memory must be after the kernel. 254 */ 255 pv->pv_pa = bmi->bmi_kernelend; 256 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 257 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 258 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 259 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 260 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 261 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 262 pv++; 263 264 /* 265 * Add a free block for any memory before the kernel. 266 */ 267 if (bmi->bmi_start < bmi->bmi_kernelstart) { 268 pv->pv_pa = bmi->bmi_start; 269 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 270 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa; 271 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 272 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 273 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 274 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 275 pv++; 276 } 277 278 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 279 280 SLIST_INIT(&bmi->bmi_freechunks); 281 SLIST_INIT(&bmi->bmi_chunks); 282} 283 284static bool 285concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 286{ 287 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 288 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 289 && acc_pv->pv_prot == pv->pv_prot 290 && acc_pv->pv_cache == pv->pv_cache) { 291#if 0 292 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 293 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size, 294 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size); 295#endif 296 acc_pv->pv_size += pv->pv_size; 297 return true; 298 } 299 300 return false; 301} 302 303static void 304add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 305{ 306 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 307 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 308 pv_addr_t * const pv0 = (*pvp); 309 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 310 if (concat_pvaddr(pv0, pv)) { 311 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 312 __func__, "appending", pv, 313 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 314 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 315 pv = SLIST_NEXT(pv0, pv_list); 316 if (pv != NULL && concat_pvaddr(pv0, pv)) { 317 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 318 __func__, "merging", pv, 319 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 320 pv0->pv_pa, 321 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 322 SLIST_REMOVE_AFTER(pv0, pv_list); 323 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 324 } 325 return; 326 } 327 KASSERT(pv->pv_va != (*pvp)->pv_va); 328 pvp = &SLIST_NEXT(*pvp, pv_list); 329 } 330 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 331 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 332 KASSERT(new_pv != NULL); 333 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 334 *new_pv = *pv; 335 SLIST_NEXT(new_pv, pv_list) = *pvp; 336 (*pvp) = new_pv; 337 338 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 339 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 340 new_pv->pv_size / PAGE_SIZE); 341 if (SLIST_NEXT(new_pv, pv_list)) { 342 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 343 } else { 344 VPRINTF("at tail\n"); 345 } 346} 347 348static void 349valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 350 int prot, int cache, bool zero_p) 351{ 352 size_t nbytes = npages * PAGE_SIZE; 353 pv_addr_t *free_pv = bmi->bmi_freeblocks; 354 size_t free_idx = 0; 355 static bool l1pt_found; 356 357 KASSERT(npages > 0); 358 359 /* 360 * If we haven't allocated the kernel L1 page table and we are aligned 361 * at a L1 table boundary, alloc the memory for it. 362 */ 363 if (!l1pt_found 364 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 365 && free_pv->pv_size >= L1_TABLE_SIZE) { 366 l1pt_found = true; 367 VPRINTF(" l1pt"); 368 369 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 370 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 371 add_pages(bmi, &kernel_l1pt); 372 } 373 374 while (nbytes > free_pv->pv_size) { 375 free_pv++; 376 free_idx++; 377 if (free_idx == bmi->bmi_nfreeblocks) { 378 panic("%s: could not allocate %zu bytes", 379 __func__, nbytes); 380 } 381 } 382 383 /* 384 * As we allocate the memory, make sure that we don't walk over 385 * our current first level translation table. 386 */ 387 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 388 389#if defined(FDT) 390 fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes); 391#endif 392 pv->pv_pa = free_pv->pv_pa; 393 pv->pv_va = free_pv->pv_va; 394 pv->pv_size = nbytes; 395 pv->pv_prot = prot; 396 pv->pv_cache = cache; 397 398 /* 399 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 400 * just use PTE_CACHE. 401 */ 402 if (cache == PTE_PAGETABLE 403 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 404 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 405 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 406 pv->pv_cache = PTE_CACHE; 407 408 free_pv->pv_pa += nbytes; 409 free_pv->pv_va += nbytes; 410 free_pv->pv_size -= nbytes; 411 if (free_pv->pv_size == 0) { 412 --bmi->bmi_nfreeblocks; 413 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 414 free_pv[0] = free_pv[1]; 415 } 416 } 417 418 bmi->bmi_freepages -= npages; 419 420 if (zero_p) 421 memset((void *)pv->pv_va, 0, nbytes); 422} 423 424void 425arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 426 const struct pmap_devmap *devmap, bool mapallmem_p) 427{ 428 struct bootmem_info * const bmi = &bootmem_info; 429#ifdef MULTIPROCESSOR 430 const size_t cpu_num = arm_cpu_max; 431#else 432 const size_t cpu_num = 1; 433#endif 434 435#ifdef ARM_HAS_VBAR 436 const bool map_vectors_p = false; 437#elif defined(CPU_ARMV7) || defined(CPU_ARM11) 438 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH 439 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0; 440#else 441 const bool map_vectors_p = true; 442#endif 443 444#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 445 KASSERT(mapallmem_p); 446#ifdef ARM_MMU_EXTENDED 447 /* 448 * The direct map VA space ends at the start of the kernel VM space. 449 */ 450 pmap_directlimit = kernel_vm_base; 451#else 452 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start); 453#endif /* ARM_MMU_EXTENDED */ 454#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 455 456 /* 457 * Calculate the number of L2 pages needed for mapping the 458 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 459 * and 1 for IO 460 */ 461 size_t kernel_size = bmi->bmi_kernelend; 462 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 463 kernel_size += L1_TABLE_SIZE_REAL; 464 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM; 465 if (map_vectors_p) { 466 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */ 467 } 468 if (iovbase) { 469 kernel_size += PAGE_SIZE; /* L2PT for IO */ 470 } 471 kernel_size += 472 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 473 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 474 kernel_size += round_page(MSGBUFSIZE); 475 kernel_size += 0x10000; /* slop */ 476 if (!mapallmem_p) { 477 kernel_size += PAGE_SIZE 478 * ((kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE); 479 } 480 kernel_size = round_page(kernel_size); 481 482 /* 483 * Now we know how many L2 pages it will take. 484 */ 485 const size_t KERNEL_L2PT_KERNEL_NUM = 486 round_page(kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 487 488 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 489 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 490 491 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 492 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 493 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 494 pv_addr_t msgbuf; 495 pv_addr_t text; 496 pv_addr_t data; 497 pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11]; 498#if ARM_MMU_XSCALE == 1 499 pv_addr_t minidataclean; 500#endif 501 502 /* 503 * We need to allocate some fixed page tables to get the kernel going. 504 * 505 * We are going to allocate our bootstrap pages from the beginning of 506 * the free space that we just calculated. We allocate one page 507 * directory and a number of page tables and store the physical 508 * addresses in the bmi_l2pts array in bootmem_info. 509 * 510 * The kernel page directory must be on a 16K boundary. The page 511 * tables must be on 4K boundaries. What we do is allocate the 512 * page directory on the first 16K boundary that we encounter, and 513 * the page tables on 4K boundaries otherwise. Since we allocate 514 * at least 3 L2 page tables, we are guaranteed to encounter at 515 * least one 16K aligned region. 516 */ 517 518 VPRINTF("%s: allocating page tables for", __func__); 519 for (size_t i = 0; i < __arraycount(chunks); i++) { 520 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 521 } 522 523 kernel_l1pt.pv_pa = 0; 524 kernel_l1pt.pv_va = 0; 525 526 /* 527 * Allocate the L2 pages, but if we get to a page that is aligned for 528 * an L1 page table, we will allocate the pages for it first and then 529 * allocate the L2 page. 530 */ 531 532 if (map_vectors_p) { 533 /* 534 * First allocate L2 page for the vectors. 535 */ 536 VPRINTF(" vector"); 537 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, 538 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 539 add_pages(bmi, &bmi->bmi_vector_l2pt); 540 } 541 542 /* 543 * Now allocate L2 pages for the kernel 544 */ 545 VPRINTF(" kernel"); 546 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 547 valloc_pages(bmi, &kernel_l2pt[idx], 1, 548 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 549 add_pages(bmi, &kernel_l2pt[idx]); 550 } 551 552 /* 553 * Now allocate L2 pages for the initial kernel VA space. 554 */ 555 VPRINTF(" vm"); 556 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 557 valloc_pages(bmi, &vmdata_l2pt[idx], 1, 558 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 559 add_pages(bmi, &vmdata_l2pt[idx]); 560 } 561 562 /* 563 * If someone wanted a L2 page for I/O, allocate it now. 564 */ 565 if (iovbase) { 566 VPRINTF(" io"); 567 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, 568 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 569 add_pages(bmi, &bmi->bmi_io_l2pt); 570 } 571 572 VPRINTF("%s: allocating stacks\n", __func__); 573 574 /* Allocate stacks for all modes and CPUs */ 575 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 576 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 577 add_pages(bmi, &abtstack); 578 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 579 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 580 add_pages(bmi, &fiqstack); 581 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 582 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 583 add_pages(bmi, &irqstack); 584 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 585 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 586 add_pages(bmi, &undstack); 587 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 588 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 589 add_pages(bmi, &idlestack); 590 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 591 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 592 add_pages(bmi, &kernelstack); 593 594 /* Allocate the message buffer from the end of memory. */ 595 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 596 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 597 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false); 598 add_pages(bmi, &msgbuf); 599 msgbufphys = msgbuf.pv_pa; 600 msgbufaddr = (void *)msgbuf.pv_va; 601 602 if (map_vectors_p) { 603 /* 604 * Allocate a page for the system vector page. 605 * This page will just contain the system vectors and can be 606 * shared by all processes. 607 */ 608 VPRINTF(" vector"); 609 610 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, 611 PTE_CACHE, true); 612 } 613 systempage.pv_va = vectors; 614 615 /* 616 * If the caller needed a few extra pages for some reason, allocate 617 * them now. 618 */ 619#if ARM_MMU_XSCALE == 1 620#if (ARM_NMMUS > 1) 621 if (xscale_use_minidata) 622#endif 623 valloc_pages(bmi, &minidataclean, 1, 624 VM_PROT_READ|VM_PROT_WRITE, 0, true); 625#endif 626 627 /* 628 * Ok we have allocated physical pages for the primary kernel 629 * page tables and stacks. Let's just confirm that. 630 */ 631 if (kernel_l1pt.pv_va == 0 632 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 633 panic("%s: Failed to allocate or align the kernel " 634 "page directory", __func__); 635 636 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n", 637 kernel_l1pt.pv_va, kernel_l1pt.pv_pa); 638 639 /* 640 * Now we start construction of the L1 page table 641 * We start by mapping the L2 page tables into the L1. 642 * This means that we can replace L1 mappings later on if necessary 643 */ 644 vaddr_t l1pt_va = kernel_l1pt.pv_va; 645 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 646 647 if (map_vectors_p) { 648 /* Map the L2 pages tables in the L1 page table */ 649 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE, 650 &bmi->bmi_vector_l2pt); 651 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) " 652 "for VA %#lx\n (vectors)", 653 __func__, bmi->bmi_vector_l2pt.pv_va, 654 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va); 655 } 656 657 /* 658 * This enforces a alignment requirement of L2_S_SEGSIZE for kernel 659 * start PA 660 */ 661 const vaddr_t kernel_base = 662 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE); 663 664 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__, 665 kernel_base, KERNEL_L2PT_KERNEL_NUM); 666 667 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 668 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE, 669 &kernel_l2pt[idx]); 670 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n", 671 __func__, kernel_l2pt[idx].pv_va, 672 kernel_l2pt[idx].pv_pa, kernel_base + idx * L2_S_SEGSIZE); 673 } 674 675 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__, 676 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM); 677 678 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 679 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE, 680 &vmdata_l2pt[idx]); 681 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n", 682 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 683 kernel_vm_base + idx * L2_S_SEGSIZE); 684 } 685 if (iovbase) { 686 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt); 687 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n", 688 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 689 iovbase & -L2_S_SEGSIZE); 690 } 691 692 /* update the top of the kernel VM */ 693 pmap_curmaxkvaddr = 694 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 695 696 // This could be done earlier and then the kernel data and pages 697 // allocated above would get merged (concatentated) 698 699 VPRINTF("Mapping kernel\n"); 700 701 extern char etext[]; 702 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 703 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart; 704 705 textsize = (textsize + PGOFSET) & ~PGOFSET; 706 707 /* start at offset of kernel in RAM */ 708 709 text.pv_pa = bmi->bmi_kernelstart; 710 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart); 711 text.pv_size = textsize; 712 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE; 713 text.pv_cache = PTE_CACHE; 714 715 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 716 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 717 718 add_pages(bmi, &text); 719 720 data.pv_pa = text.pv_pa + textsize; 721 data.pv_va = text.pv_va + textsize; 722 data.pv_size = totalsize - textsize; 723 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 724 data.pv_cache = PTE_CACHE; 725 726 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 727 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 728 729 add_pages(bmi, &data); 730 731 VPRINTF("Listing Chunks\n"); 732 733 pv_addr_t *lpv; 734 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) { 735 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx " 736 "(PA %#lx, prot %d, cache %d)\n", 737 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1, 738 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache); 739 } 740 VPRINTF("\nMapping Chunks\n"); 741 742 pv_addr_t cur_pv; 743 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 744 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 745 cur_pv = *pv; 746 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va); 747 pv = SLIST_NEXT(pv, pv_list); 748 } else { 749 cur_pv.pv_va = KERNEL_BASE; 750 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va); 751 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa; 752 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 753 cur_pv.pv_cache = PTE_CACHE; 754 } 755 while (pv != NULL) { 756 if (mapallmem_p) { 757 if (concat_pvaddr(&cur_pv, pv)) { 758 pv = SLIST_NEXT(pv, pv_list); 759 continue; 760 } 761 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 762 /* 763 * See if we can extend the current pv to emcompass the 764 * hole, and if so do it and retry the concatenation. 765 */ 766 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE) 767 && cur_pv.pv_cache == PTE_CACHE) { 768 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 769 continue; 770 } 771 772 /* 773 * We couldn't so emit the current chunk and then 774 */ 775 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 776 "(PA %#lx, prot %d, cache %d)\n", 777 __func__, 778 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 779 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 780 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 781 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 782 783 /* 784 * set the current chunk to the hole and try again. 785 */ 786 cur_pv.pv_pa += cur_pv.pv_size; 787 cur_pv.pv_va += cur_pv.pv_size; 788 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 789 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 790 cur_pv.pv_cache = PTE_CACHE; 791 continue; 792 } 793 } 794 795 /* 796 * The new pv didn't concatenate so emit the current one 797 * and use the new pv as the current pv. 798 */ 799 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 800 "(PA %#lx, prot %d, cache %d)\n", 801 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 802 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 803 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 804 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 805 cur_pv = *pv; 806 pv = SLIST_NEXT(pv, pv_list); 807 } 808 809 /* 810 * If we are mapping all of memory, let's map the rest of memory. 811 */ 812 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 813 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 814 && cur_pv.pv_cache == PTE_CACHE) { 815 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 816 } else { 817 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base, 818 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size, 819 kernel_vm_base); 820 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 821 "(PA %#lx, prot %d, cache %d)\n", 822 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 823 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 824 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 825 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 826 cur_pv.pv_pa += cur_pv.pv_size; 827 cur_pv.pv_va += cur_pv.pv_size; 828 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 829 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 830 cur_pv.pv_cache = PTE_CACHE; 831 } 832 } 833 834 /* 835 * The amount we can direct map is limited by the start of the 836 * virtual part of the kernel address space. Don't overrun 837 * into it. 838 */ 839 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) { 840 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va; 841 } 842 843 /* 844 * Now we map the final chunk. 845 */ 846 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 847 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 848 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 849 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 850 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 851 852 /* 853 * Now we map the stuff that isn't directly after the kernel 854 */ 855 if (map_vectors_p) { 856 /* Map the vector page. */ 857 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 858 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 859 } 860 861 /* Map the Mini-Data cache clean area. */ 862#if ARM_MMU_XSCALE == 1 863#if (ARM_NMMUS > 1) 864 if (xscale_use_minidata) 865#endif 866 xscale_setup_minidata(l1pt_va, minidataclean.pv_va, 867 minidataclean.pv_pa); 868#endif 869 870 /* 871 * Map integrated peripherals at same address in first level page 872 * table so that we can continue to use console. 873 */ 874 if (devmap) 875 pmap_devmap_bootstrap(l1pt_va, devmap); 876 877 /* Tell the user about where all the bits and pieces live. */ 878 VPRINTF("%22s Physical Virtual Num\n", " "); 879 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " "); 880 881#ifdef VERBOSE_INIT_ARM 882 static const char mem_fmt[] = 883 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 884 static const char mem_fmt_nov[] = 885 "%20s: 0x%08lx 0x%08lx %zu\n"; 886#endif 887 888#if 0 889 // XXX Doesn't make sense if kernel not at bottom of RAM 890 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 891 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1), 892 (int)physmem); 893#endif 894 VPRINTF(mem_fmt, "text section", 895 text.pv_pa, text.pv_pa + text.pv_size - 1, 896 text.pv_va, text.pv_va + text.pv_size - 1, 897 (int)(text.pv_size / PAGE_SIZE)); 898 VPRINTF(mem_fmt, "data section", 899 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata), 900 (vaddr_t)__data_start, (vaddr_t)_edata, 901 (int)((round_page((vaddr_t)_edata) 902 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 903 VPRINTF(mem_fmt, "bss section", 904 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__), 905 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 906 (int)((round_page((vaddr_t)__bss_end__) 907 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 908 VPRINTF(mem_fmt, "L1 page directory", 909 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 910 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 911 L1_TABLE_SIZE / PAGE_SIZE); 912 VPRINTF(mem_fmt, "ABT stack (CPU 0)", 913 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 914 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 915 ABT_STACK_SIZE); 916 VPRINTF(mem_fmt, "FIQ stack (CPU 0)", 917 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 918 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 919 FIQ_STACK_SIZE); 920 VPRINTF(mem_fmt, "IRQ stack (CPU 0)", 921 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 922 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 923 IRQ_STACK_SIZE); 924 VPRINTF(mem_fmt, "UND stack (CPU 0)", 925 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 926 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 927 UND_STACK_SIZE); 928 VPRINTF(mem_fmt, "IDLE stack (CPU 0)", 929 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 930 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 931 UPAGES); 932 VPRINTF(mem_fmt, "SVC stack", 933 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 934 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 935 UPAGES); 936 VPRINTF(mem_fmt, "Message Buffer", 937 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 938 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 939 (int)msgbuf_pgs); 940 if (map_vectors_p) { 941 VPRINTF(mem_fmt, "Exception Vectors", 942 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 943 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 944 1); 945 } 946 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 947 pv = &bmi->bmi_freeblocks[i]; 948 949 VPRINTF(mem_fmt_nov, "Free Memory", 950 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 951 pv->pv_size / PAGE_SIZE); 952 } 953 /* 954 * Now we have the real page tables in place so we can switch to them. 955 * Once this is done we will be running with the REAL kernel page 956 * tables. 957 */ 958 959 VPRINTF("TTBR0=%#x", armreg_ttbr_read()); 960#ifdef _ARM_ARCH_6 961 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x", 962 armreg_ttbr1_read(), armreg_ttbcr_read(), 963 armreg_contextidr_read()); 964#endif 965 VPRINTF("\n"); 966 967 /* Switch tables */ 968 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa); 969 970 cpu_ttb = l1pt_pa; 971 972 cpu_domains(DOMAIN_DEFAULT); 973 974 cpu_idcache_wbinv_all(); 975 976#ifdef __HAVE_GENERIC_START 977 978 /* 979 * Turn on caches and set SCTLR/ACTLR 980 */ 981 cpu_setup(boot_args); 982#endif 983 984 VPRINTF(" ttb"); 985 986#ifdef ARM_MMU_EXTENDED 987 /* 988 * TTBCR should have been initialized by the MD start code. 989 */ 990 KASSERT((armreg_contextidr_read() & 0xff) == 0); 991 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 992 /* 993 * Disable lookups via TTBR0 until there is an activated pmap. 994 */ 995 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 996 cpu_setttb(l1pt_pa, KERNEL_PID); 997 arm_isb(); 998#else 999 cpu_setttb(l1pt_pa, true); 1000#endif 1001 1002 cpu_tlb_flushID(); 1003 1004#ifdef ARM_MMU_EXTENDED 1005 VPRINTF("\nsctlr=%#x actlr=%#x\n", 1006 armreg_sctlr_read(), armreg_auxctl_read()); 1007#else 1008 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read()); 1009#endif 1010 1011#ifdef MULTIPROCESSOR 1012#ifndef __HAVE_GENERIC_START 1013 /* 1014 * Kick the secondaries to load the TTB. After which they'll go 1015 * back to sleep to wait for the final kick so they will hatch. 1016 */ 1017 VPRINTF(" hatchlings"); 1018 cpu_boot_secondary_processors(); 1019#endif 1020#endif 1021 1022 VPRINTF(" OK\n"); 1023} 1024