arm32_kvminit.c revision 1.61
1/* $NetBSD: arm32_kvminit.c,v 1.61 2020/07/03 06:22:48 skrll Exp $ */ 2 3/* 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 5 * Written by Hiroyuki Bessho for Genetec Corporation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of Genetec Corporation may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (c) 2001 Wasabi Systems, Inc. 32 * All rights reserved. 33 * 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed for the NetBSD Project by 47 * Wasabi Systems, Inc. 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 49 * or promote products derived from this software without specific prior 50 * written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * POSSIBILITY OF SUCH DAMAGE. 63 * 64 * Copyright (c) 1997,1998 Mark Brinicombe. 65 * Copyright (c) 1997,1998 Causality Limited. 66 * All rights reserved. 67 * 68 * Redistribution and use in source and binary forms, with or without 69 * modification, are permitted provided that the following conditions 70 * are met: 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgement: 78 * This product includes software developed by Mark Brinicombe 79 * for the NetBSD Project. 80 * 4. The name of the company nor the name of the author may be used to 81 * endorse or promote products derived from this software without specific 82 * prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 94 * SUCH DAMAGE. 95 * 96 * Copyright (c) 2007 Microsoft 97 * All rights reserved. 98 * 99 * Redistribution and use in source and binary forms, with or without 100 * modification, are permitted provided that the following conditions 101 * are met: 102 * 1. Redistributions of source code must retain the above copyright 103 * notice, this list of conditions and the following disclaimer. 104 * 2. Redistributions in binary form must reproduce the above copyright 105 * notice, this list of conditions and the following disclaimer in the 106 * documentation and/or other materials provided with the distribution. 107 * 3. All advertising materials mentioning features or use of this software 108 * must display the following acknowledgement: 109 * This product includes software developed by Microsoft 110 * 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 121 * SUCH DAMAGE. 122 */ 123 124#include "opt_arm_debug.h" 125#include "opt_arm_start.h" 126#include "opt_fdt.h" 127#include "opt_multiprocessor.h" 128 129#include <sys/cdefs.h> 130__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.61 2020/07/03 06:22:48 skrll Exp $"); 131 132#include <sys/param.h> 133 134#include <sys/bus.h> 135#include <sys/device.h> 136#include <sys/kernel.h> 137#include <sys/reboot.h> 138 139#include <dev/cons.h> 140 141#include <uvm/uvm_extern.h> 142 143#include <arm/arm32/machdep.h> 144#include <arm/bootconfig.h> 145#include <arm/db_machdep.h> 146#include <arm/locore.h> 147#include <arm/undefined.h> 148 149#if defined(FDT) 150#include <arch/evbarm/fdt/platform.h> 151#include <arm/fdt/arm_fdtvar.h> 152#endif 153 154#ifdef MULTIPROCESSOR 155#ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP 156#error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack 157#endif 158#endif 159 160#ifdef VERBOSE_INIT_ARM 161#define VPRINTF(...) printf(__VA_ARGS__) 162#else 163#define VPRINTF(...) __nothing 164#endif 165 166struct bootmem_info bootmem_info; 167 168extern void *msgbufaddr; 169paddr_t msgbufphys; 170paddr_t physical_start; 171paddr_t physical_end; 172 173extern char etext[]; 174extern char __data_start[], _edata[]; 175extern char __bss_start[], __bss_end__[]; 176extern char _end[]; 177 178/* Page tables for mapping kernel VM */ 179#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 180 181u_long kern_vtopdiff __attribute__((__section__(".data"))); 182 183void 184arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 185{ 186 struct bootmem_info * const bmi = &bootmem_info; 187 pv_addr_t *pv = bmi->bmi_freeblocks; 188 189 /* 190 * FDT/generic start fills in kern_vtopdiff early 191 */ 192#if defined(__HAVE_GENERIC_START) 193 extern char KERNEL_BASE_virt[]; 194 extern char const __stop__init_memory[]; 195 196 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff); 197 198 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt); 199 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory); 200 201 kernelstart = KERN_VTOPHYS(kstartva); 202 203 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart); 204#else 205 vaddr_t kendva = round_page((vaddr_t)_end); 206 207#if defined(KERNEL_BASE_VOFFSET) 208 kern_vtopdiff = KERNEL_BASE_VOFFSET; 209#else 210 KASSERT(memstart == kernelstart); 211 kern_vtopdiff = KERNEL_BASE + memstart; 212#endif 213#endif 214 paddr_t kernelend = KERN_VTOPHYS(kendva); 215 216 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__, 217 memstart, memsize); 218 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__, 219 kernelstart, kernelend); 220 221 physical_start = bmi->bmi_start = memstart; 222 physical_end = bmi->bmi_end = memstart + memsize; 223#ifndef ARM_HAS_LPAE 224 if (physical_end == 0) { 225 physical_end = -PAGE_SIZE; 226 memsize -= PAGE_SIZE; 227 bmi->bmi_end -= PAGE_SIZE; 228 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n", 229 __func__); 230 } 231#endif 232 physmem = memsize / PAGE_SIZE; 233 234 /* 235 * Let's record where the kernel lives. 236 */ 237 238 bmi->bmi_kernelstart = kernelstart; 239 bmi->bmi_kernelend = kernelend; 240 241#if defined(FDT) 242 fdt_add_reserved_memory_range(bmi->bmi_kernelstart, 243 bmi->bmi_kernelend - bmi->bmi_kernelstart); 244#endif 245 246 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart, 247 kernelend); 248 249#if 0 250 // XXX Makes RPI abort 251 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0); 252#endif 253 /* 254 * Now the rest of the free memory must be after the kernel. 255 */ 256 pv->pv_pa = bmi->bmi_kernelend; 257 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 258 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 259 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 260 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 261 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 262 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 263 pv++; 264 265 /* 266 * Add a free block for any memory before the kernel. 267 */ 268 if (bmi->bmi_start < bmi->bmi_kernelstart) { 269 pv->pv_pa = bmi->bmi_start; 270 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 271 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa; 272 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 273 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 274 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 275 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 276 pv++; 277 } 278 279 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 280 281 SLIST_INIT(&bmi->bmi_freechunks); 282 SLIST_INIT(&bmi->bmi_chunks); 283} 284 285static bool 286concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 287{ 288 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 289 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 290 && acc_pv->pv_prot == pv->pv_prot 291 && acc_pv->pv_cache == pv->pv_cache) { 292#if 0 293 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 294 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size, 295 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size); 296#endif 297 acc_pv->pv_size += pv->pv_size; 298 return true; 299 } 300 301 return false; 302} 303 304static void 305add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 306{ 307 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 308 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 309 pv_addr_t * const pv0 = (*pvp); 310 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 311 if (concat_pvaddr(pv0, pv)) { 312 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 313 __func__, "appending", pv, 314 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 315 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 316 pv = SLIST_NEXT(pv0, pv_list); 317 if (pv != NULL && concat_pvaddr(pv0, pv)) { 318 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 319 __func__, "merging", pv, 320 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 321 pv0->pv_pa, 322 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 323 SLIST_REMOVE_AFTER(pv0, pv_list); 324 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 325 } 326 return; 327 } 328 KASSERT(pv->pv_va != (*pvp)->pv_va); 329 pvp = &SLIST_NEXT(*pvp, pv_list); 330 } 331 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 332 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 333 KASSERT(new_pv != NULL); 334 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 335 *new_pv = *pv; 336 SLIST_NEXT(new_pv, pv_list) = *pvp; 337 (*pvp) = new_pv; 338 339 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 340 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 341 new_pv->pv_size / PAGE_SIZE); 342 if (SLIST_NEXT(new_pv, pv_list)) { 343 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 344 } else { 345 VPRINTF("at tail\n"); 346 } 347} 348 349static void 350valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 351 int prot, int cache, bool zero_p) 352{ 353 size_t nbytes = npages * PAGE_SIZE; 354 pv_addr_t *free_pv = bmi->bmi_freeblocks; 355 size_t free_idx = 0; 356 static bool l1pt_found; 357 358 KASSERT(npages > 0); 359 360 /* 361 * If we haven't allocated the kernel L1 page table and we are aligned 362 * at a L1 table boundary, alloc the memory for it. 363 */ 364 if (!l1pt_found 365 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 366 && free_pv->pv_size >= L1_TABLE_SIZE) { 367 l1pt_found = true; 368 VPRINTF(" l1pt"); 369 370 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 371 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 372 add_pages(bmi, &kernel_l1pt); 373 } 374 375 while (nbytes > free_pv->pv_size) { 376 free_pv++; 377 free_idx++; 378 if (free_idx == bmi->bmi_nfreeblocks) { 379 panic("%s: could not allocate %zu bytes", 380 __func__, nbytes); 381 } 382 } 383 384 /* 385 * As we allocate the memory, make sure that we don't walk over 386 * our current first level translation table. 387 */ 388 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 389 390#if defined(FDT) 391 fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes); 392#endif 393 pv->pv_pa = free_pv->pv_pa; 394 pv->pv_va = free_pv->pv_va; 395 pv->pv_size = nbytes; 396 pv->pv_prot = prot; 397 pv->pv_cache = cache; 398 399 /* 400 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 401 * just use PTE_CACHE. 402 */ 403 if (cache == PTE_PAGETABLE 404 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 405 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 406 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 407 pv->pv_cache = PTE_CACHE; 408 409 free_pv->pv_pa += nbytes; 410 free_pv->pv_va += nbytes; 411 free_pv->pv_size -= nbytes; 412 if (free_pv->pv_size == 0) { 413 --bmi->bmi_nfreeblocks; 414 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 415 free_pv[0] = free_pv[1]; 416 } 417 } 418 419 bmi->bmi_freepages -= npages; 420 421 if (zero_p) 422 memset((void *)pv->pv_va, 0, nbytes); 423} 424 425void 426arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 427 const struct pmap_devmap *devmap, bool mapallmem_p) 428{ 429 struct bootmem_info * const bmi = &bootmem_info; 430#ifdef MULTIPROCESSOR 431 const size_t cpu_num = arm_cpu_max; 432#else 433 const size_t cpu_num = 1; 434#endif 435 436#ifdef ARM_HAS_VBAR 437 const bool map_vectors_p = false; 438#elif defined(CPU_ARMV7) || defined(CPU_ARM11) 439 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH 440 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0; 441#else 442 const bool map_vectors_p = true; 443#endif 444 445#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 446 KASSERT(mapallmem_p); 447#ifdef ARM_MMU_EXTENDED 448 /* 449 * The direct map VA space ends at the start of the kernel VM space. 450 */ 451 pmap_directlimit = kernel_vm_base; 452#else 453 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start); 454#endif /* ARM_MMU_EXTENDED */ 455#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 456 457 /* 458 * Calculate the number of L2 pages needed for mapping the 459 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 460 * and 1 for IO 461 */ 462 size_t kernel_size = bmi->bmi_kernelend; 463 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 464 kernel_size += L1_TABLE_SIZE; 465 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM; 466 if (map_vectors_p) { 467 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */ 468 } 469 if (iovbase) { 470 kernel_size += PAGE_SIZE; /* L2PT for IO */ 471 } 472 kernel_size += 473 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 474 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 475 kernel_size += round_page(MSGBUFSIZE); 476 kernel_size += 0x10000; /* slop */ 477 if (!mapallmem_p) { 478 kernel_size += PAGE_SIZE 479 * ((kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE); 480 } 481 kernel_size = round_page(kernel_size); 482 483 /* 484 * Now we know how many L2 pages it will take. 485 */ 486 const size_t KERNEL_L2PT_KERNEL_NUM = 487 round_page(kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 488 489 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 490 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 491 492 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 493 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 494 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 495 pv_addr_t msgbuf; 496 pv_addr_t text; 497 pv_addr_t data; 498 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11]; 499#if ARM_MMU_XSCALE == 1 500 pv_addr_t minidataclean; 501#endif 502 503 /* 504 * We need to allocate some fixed page tables to get the kernel going. 505 * 506 * We are going to allocate our bootstrap pages from the beginning of 507 * the free space that we just calculated. We allocate one page 508 * directory and a number of page tables and store the physical 509 * addresses in the bmi_l2pts array in bootmem_info. 510 * 511 * The kernel page directory must be on a 16K boundary. The page 512 * tables must be on 4K boundaries. What we do is allocate the 513 * page directory on the first 16K boundary that we encounter, and 514 * the page tables on 4K boundaries otherwise. Since we allocate 515 * at least 3 L2 page tables, we are guaranteed to encounter at 516 * least one 16K aligned region. 517 */ 518 519 VPRINTF("%s: allocating page tables for", __func__); 520 for (size_t i = 0; i < __arraycount(chunks); i++) { 521 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 522 } 523 524 kernel_l1pt.pv_pa = 0; 525 kernel_l1pt.pv_va = 0; 526 527 /* 528 * Allocate the L2 pages, but if we get to a page that is aligned for 529 * an L1 page table, we will allocate the pages for it first and then 530 * allocate the L2 page. 531 */ 532 533 if (map_vectors_p) { 534 /* 535 * First allocate L2 page for the vectors. 536 */ 537 VPRINTF(" vector"); 538 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, 539 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 540 add_pages(bmi, &bmi->bmi_vector_l2pt); 541 } 542 543 /* 544 * Now allocate L2 pages for the kernel 545 */ 546 VPRINTF(" kernel"); 547 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 548 valloc_pages(bmi, &kernel_l2pt[idx], 1, 549 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 550 add_pages(bmi, &kernel_l2pt[idx]); 551 } 552 553 /* 554 * Now allocate L2 pages for the initial kernel VA space. 555 */ 556 VPRINTF(" vm"); 557 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 558 valloc_pages(bmi, &vmdata_l2pt[idx], 1, 559 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 560 add_pages(bmi, &vmdata_l2pt[idx]); 561 } 562 563 /* 564 * If someone wanted a L2 page for I/O, allocate it now. 565 */ 566 if (iovbase) { 567 VPRINTF(" io"); 568 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, 569 VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true); 570 add_pages(bmi, &bmi->bmi_io_l2pt); 571 } 572 573 VPRINTF("%s: allocating stacks\n", __func__); 574 575 /* Allocate stacks for all modes and CPUs */ 576 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 577 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 578 add_pages(bmi, &abtstack); 579 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 580 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 581 add_pages(bmi, &fiqstack); 582 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 583 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 584 add_pages(bmi, &irqstack); 585 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 586 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 587 add_pages(bmi, &undstack); 588 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 589 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 590 add_pages(bmi, &idlestack); 591 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 592 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true); 593 add_pages(bmi, &kernelstack); 594 595 /* Allocate the message buffer from the end of memory. */ 596 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 597 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 598 VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, false); 599 add_pages(bmi, &msgbuf); 600 msgbufphys = msgbuf.pv_pa; 601 msgbufaddr = (void *)msgbuf.pv_va; 602 603 if (map_vectors_p) { 604 /* 605 * Allocate a page for the system vector page. 606 * This page will just contain the system vectors and can be 607 * shared by all processes. 608 */ 609 VPRINTF(" vector"); 610 611 valloc_pages(bmi, &systempage, 1, 612 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 613 PTE_CACHE, true); 614 } 615 systempage.pv_va = vectors; 616 617 /* 618 * If the caller needed a few extra pages for some reason, allocate 619 * them now. 620 */ 621#if ARM_MMU_XSCALE == 1 622#if (ARM_NMMUS > 1) 623 if (xscale_use_minidata) 624#endif 625 valloc_pages(bmi, &minidataclean, 1, 626 VM_PROT_READ | VM_PROT_WRITE, 0, true); 627#endif 628 629 /* 630 * Ok we have allocated physical pages for the primary kernel 631 * page tables and stacks. Let's just confirm that. 632 */ 633 if (kernel_l1pt.pv_va == 0 634 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 635 panic("%s: Failed to allocate or align the kernel " 636 "page directory", __func__); 637 638 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n", 639 kernel_l1pt.pv_va, kernel_l1pt.pv_pa); 640 641 /* 642 * Now we start construction of the L1 page table 643 * We start by mapping the L2 page tables into the L1. 644 * This means that we can replace L1 mappings later on if necessary 645 */ 646 vaddr_t l1pt_va = kernel_l1pt.pv_va; 647 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 648 649 if (map_vectors_p) { 650 /* Map the L2 pages tables in the L1 page table */ 651 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE, 652 &bmi->bmi_vector_l2pt); 653 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) " 654 "for VA %#lx\n (vectors)", 655 __func__, bmi->bmi_vector_l2pt.pv_va, 656 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va); 657 } 658 659 /* 660 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel 661 * start PA 662 */ 663 const vaddr_t kernel_base = 664 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE); 665 666 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__, 667 kernel_base, KERNEL_L2PT_KERNEL_NUM); 668 669 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 670 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE, 671 &kernel_l2pt[idx]); 672 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n", 673 __func__, kernel_l2pt[idx].pv_va, 674 kernel_l2pt[idx].pv_pa, kernel_base + idx * L2_S_SEGSIZE); 675 } 676 677 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__, 678 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM); 679 680 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 681 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE, 682 &vmdata_l2pt[idx]); 683 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n", 684 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 685 kernel_vm_base + idx * L2_S_SEGSIZE); 686 } 687 if (iovbase) { 688 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt); 689 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n", 690 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 691 iovbase & -L2_S_SEGSIZE); 692 } 693 694 /* update the top of the kernel VM */ 695 pmap_curmaxkvaddr = 696 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 697 698 // This could be done earlier and then the kernel data and pages 699 // allocated above would get merged (concatentated) 700 701 VPRINTF("Mapping kernel\n"); 702 703 extern char etext[]; 704 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 705 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart; 706 707 textsize = (textsize + PGOFSET) & ~PGOFSET; 708 709 /* start at offset of kernel in RAM */ 710 711 text.pv_pa = bmi->bmi_kernelstart; 712 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart); 713 text.pv_size = textsize; 714 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE; 715 text.pv_cache = PTE_CACHE; 716 717 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 718 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 719 720 add_pages(bmi, &text); 721 722 data.pv_pa = text.pv_pa + textsize; 723 data.pv_va = text.pv_va + textsize; 724 data.pv_size = totalsize - textsize; 725 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 726 data.pv_cache = PTE_CACHE; 727 728 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 729 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 730 731 add_pages(bmi, &data); 732 733 VPRINTF("Listing Chunks\n"); 734 735 pv_addr_t *lpv; 736 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) { 737 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx " 738 "(PA %#lx, prot %d, cache %d)\n", 739 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1, 740 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache); 741 } 742 VPRINTF("\nMapping Chunks\n"); 743 744 pv_addr_t cur_pv; 745 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 746 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 747 cur_pv = *pv; 748 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va); 749 pv = SLIST_NEXT(pv, pv_list); 750 } else { 751 cur_pv.pv_va = KERNEL_BASE; 752 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va); 753 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa; 754 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 755 cur_pv.pv_cache = PTE_CACHE; 756 } 757 while (pv != NULL) { 758 if (mapallmem_p) { 759 if (concat_pvaddr(&cur_pv, pv)) { 760 pv = SLIST_NEXT(pv, pv_list); 761 continue; 762 } 763 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 764 /* 765 * See if we can extend the current pv to emcompass the 766 * hole, and if so do it and retry the concatenation. 767 */ 768 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 769 && cur_pv.pv_cache == PTE_CACHE) { 770 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 771 continue; 772 } 773 774 /* 775 * We couldn't so emit the current chunk and then 776 */ 777 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 778 "(PA %#lx, prot %d, cache %d)\n", 779 __func__, 780 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 781 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 782 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 783 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 784 785 /* 786 * set the current chunk to the hole and try again. 787 */ 788 cur_pv.pv_pa += cur_pv.pv_size; 789 cur_pv.pv_va += cur_pv.pv_size; 790 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 791 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 792 cur_pv.pv_cache = PTE_CACHE; 793 continue; 794 } 795 } 796 797 /* 798 * The new pv didn't concatenate so emit the current one 799 * and use the new pv as the current pv. 800 */ 801 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 802 "(PA %#lx, prot %d, cache %d)\n", 803 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 804 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 805 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 806 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 807 cur_pv = *pv; 808 pv = SLIST_NEXT(pv, pv_list); 809 } 810 811 /* 812 * If we are mapping all of memory, let's map the rest of memory. 813 */ 814 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 815 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 816 && cur_pv.pv_cache == PTE_CACHE) { 817 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 818 } else { 819 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base, 820 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size, 821 kernel_vm_base); 822 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 823 "(PA %#lx, prot %d, cache %d)\n", 824 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 825 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 826 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 827 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 828 cur_pv.pv_pa += cur_pv.pv_size; 829 cur_pv.pv_va += cur_pv.pv_size; 830 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 831 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 832 cur_pv.pv_cache = PTE_CACHE; 833 } 834 } 835 836 /* 837 * The amount we can direct map is limited by the start of the 838 * virtual part of the kernel address space. Don't overrun 839 * into it. 840 */ 841 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) { 842 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va; 843 } 844 845 /* 846 * Now we map the final chunk. 847 */ 848 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 849 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 850 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 851 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 852 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 853 854 /* 855 * Now we map the stuff that isn't directly after the kernel 856 */ 857 if (map_vectors_p) { 858 /* Map the vector page. */ 859 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 860 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, PTE_CACHE); 861 } 862 863 /* Map the Mini-Data cache clean area. */ 864#if ARM_MMU_XSCALE == 1 865#if (ARM_NMMUS > 1) 866 if (xscale_use_minidata) 867#endif 868 xscale_setup_minidata(l1pt_va, minidataclean.pv_va, 869 minidataclean.pv_pa); 870#endif 871 872 /* 873 * Map integrated peripherals at same address in first level page 874 * table so that we can continue to use console. 875 */ 876 if (devmap) 877 pmap_devmap_bootstrap(l1pt_va, devmap); 878 879 /* Tell the user about where all the bits and pieces live. */ 880 VPRINTF("%22s Physical Virtual Num\n", " "); 881 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " "); 882 883#ifdef VERBOSE_INIT_ARM 884 static const char mem_fmt[] = 885 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 886 static const char mem_fmt_nov[] = 887 "%20s: 0x%08lx 0x%08lx %zu\n"; 888#endif 889 890#if 0 891 // XXX Doesn't make sense if kernel not at bottom of RAM 892 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 893 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1), 894 (int)physmem); 895#endif 896 VPRINTF(mem_fmt, "text section", 897 text.pv_pa, text.pv_pa + text.pv_size - 1, 898 text.pv_va, text.pv_va + text.pv_size - 1, 899 (int)(text.pv_size / PAGE_SIZE)); 900 VPRINTF(mem_fmt, "data section", 901 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata), 902 (vaddr_t)__data_start, (vaddr_t)_edata, 903 (int)((round_page((vaddr_t)_edata) 904 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 905 VPRINTF(mem_fmt, "bss section", 906 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__), 907 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 908 (int)((round_page((vaddr_t)__bss_end__) 909 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 910 VPRINTF(mem_fmt, "L1 page directory", 911 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 912 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 913 L1_TABLE_SIZE / PAGE_SIZE); 914 VPRINTF(mem_fmt, "ABT stack (CPU 0)", 915 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 916 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 917 ABT_STACK_SIZE); 918 VPRINTF(mem_fmt, "FIQ stack (CPU 0)", 919 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 920 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 921 FIQ_STACK_SIZE); 922 VPRINTF(mem_fmt, "IRQ stack (CPU 0)", 923 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 924 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 925 IRQ_STACK_SIZE); 926 VPRINTF(mem_fmt, "UND stack (CPU 0)", 927 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 928 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 929 UND_STACK_SIZE); 930 VPRINTF(mem_fmt, "IDLE stack (CPU 0)", 931 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 932 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 933 UPAGES); 934 VPRINTF(mem_fmt, "SVC stack", 935 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 936 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 937 UPAGES); 938 VPRINTF(mem_fmt, "Message Buffer", 939 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 940 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 941 (int)msgbuf_pgs); 942 if (map_vectors_p) { 943 VPRINTF(mem_fmt, "Exception Vectors", 944 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 945 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 946 1); 947 } 948 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 949 pv = &bmi->bmi_freeblocks[i]; 950 951 VPRINTF(mem_fmt_nov, "Free Memory", 952 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 953 pv->pv_size / PAGE_SIZE); 954 } 955 /* 956 * Now we have the real page tables in place so we can switch to them. 957 * Once this is done we will be running with the REAL kernel page 958 * tables. 959 */ 960 961 VPRINTF("TTBR0=%#x", armreg_ttbr_read()); 962#ifdef _ARM_ARCH_6 963 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x", 964 armreg_ttbr1_read(), armreg_ttbcr_read(), 965 armreg_contextidr_read()); 966#endif 967 VPRINTF("\n"); 968 969 /* Switch tables */ 970 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa); 971 972 cpu_ttb = l1pt_pa; 973 974 cpu_domains(DOMAIN_DEFAULT); 975 976 cpu_idcache_wbinv_all(); 977 978#ifdef __HAVE_GENERIC_START 979 980 /* 981 * Turn on caches and set SCTLR/ACTLR 982 */ 983 cpu_setup(boot_args); 984#endif 985 986 VPRINTF(" ttb"); 987 988#ifdef ARM_MMU_EXTENDED 989 /* 990 * TTBCR should have been initialized by the MD start code. 991 */ 992 KASSERT((armreg_contextidr_read() & 0xff) == 0); 993 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 994 /* 995 * Disable lookups via TTBR0 until there is an activated pmap. 996 */ 997 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 998 cpu_setttb(l1pt_pa, KERNEL_PID); 999 arm_isb(); 1000#else 1001 cpu_setttb(l1pt_pa, true); 1002#endif 1003 1004 cpu_tlb_flushID(); 1005 1006#ifdef ARM_MMU_EXTENDED 1007 VPRINTF("\nsctlr=%#x actlr=%#x\n", 1008 armreg_sctlr_read(), armreg_auxctl_read()); 1009#else 1010 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read()); 1011#endif 1012 1013#ifdef MULTIPROCESSOR 1014#ifndef __HAVE_GENERIC_START 1015 /* 1016 * Kick the secondaries to load the TTB. After which they'll go 1017 * back to sleep to wait for the final kick so they will hatch. 1018 */ 1019 VPRINTF(" hatchlings"); 1020 cpu_boot_secondary_processors(); 1021#endif 1022#endif 1023 1024 VPRINTF(" OK\n"); 1025} 1026