arm32_kvminit.c revision 1.35
1/* $NetBSD: arm32_kvminit.c,v 1.35 2015/06/01 19:16:44 matt Exp $ */ 2 3/* 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 5 * Written by Hiroyuki Bessho for Genetec Corporation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of Genetec Corporation may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (c) 2001 Wasabi Systems, Inc. 32 * All rights reserved. 33 * 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed for the NetBSD Project by 47 * Wasabi Systems, Inc. 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 49 * or promote products derived from this software without specific prior 50 * written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * POSSIBILITY OF SUCH DAMAGE. 63 * 64 * Copyright (c) 1997,1998 Mark Brinicombe. 65 * Copyright (c) 1997,1998 Causality Limited. 66 * All rights reserved. 67 * 68 * Redistribution and use in source and binary forms, with or without 69 * modification, are permitted provided that the following conditions 70 * are met: 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgement: 78 * This product includes software developed by Mark Brinicombe 79 * for the NetBSD Project. 80 * 4. The name of the company nor the name of the author may be used to 81 * endorse or promote products derived from this software without specific 82 * prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 94 * SUCH DAMAGE. 95 * 96 * Copyright (c) 2007 Microsoft 97 * All rights reserved. 98 * 99 * Redistribution and use in source and binary forms, with or without 100 * modification, are permitted provided that the following conditions 101 * are met: 102 * 1. Redistributions of source code must retain the above copyright 103 * notice, this list of conditions and the following disclaimer. 104 * 2. Redistributions in binary form must reproduce the above copyright 105 * notice, this list of conditions and the following disclaimer in the 106 * documentation and/or other materials provided with the distribution. 107 * 3. All advertising materials mentioning features or use of this software 108 * must display the following acknowledgement: 109 * This product includes software developed by Microsoft 110 * 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 121 * SUCH DAMAGE. 122 */ 123 124#include "opt_multiprocessor.h" 125 126#include <sys/cdefs.h> 127__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.35 2015/06/01 19:16:44 matt Exp $"); 128 129#include <sys/param.h> 130#include <sys/device.h> 131#include <sys/kernel.h> 132#include <sys/reboot.h> 133#include <sys/bus.h> 134 135#include <dev/cons.h> 136 137#include <uvm/uvm_extern.h> 138 139#include <arm/locore.h> 140#include <arm/db_machdep.h> 141#include <arm/undefined.h> 142#include <arm/bootconfig.h> 143#include <arm/arm32/machdep.h> 144 145struct bootmem_info bootmem_info; 146 147extern void *msgbufaddr; 148paddr_t msgbufphys; 149paddr_t physical_start; 150paddr_t physical_end; 151 152extern char etext[]; 153extern char __data_start[], _edata[]; 154extern char __bss_start[], __bss_end__[]; 155extern char _end[]; 156 157/* Page tables for mapping kernel VM */ 158#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 159 160/* 161 * Macros to translate between physical and virtual for a subset of the 162 * kernel address space. *Not* for general use. 163 */ 164#if defined(KERNEL_BASE_VOFFSET) 165#define KERN_VTOPHYS(bmi, va) \ 166 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE_VOFFSET)) 167#define KERN_PHYSTOV(bmi, pa) \ 168 ((vaddr_t)((paddr_t)(pa) + KERNEL_BASE_VOFFSET)) 169#else 170#define KERN_VTOPHYS(bmi, va) \ 171 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start)) 172#define KERN_PHYSTOV(bmi, pa) \ 173 ((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE)) 174#endif 175 176void 177arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 178{ 179 struct bootmem_info * const bmi = &bootmem_info; 180 pv_addr_t *pv = bmi->bmi_freeblocks; 181 182#ifdef VERBOSE_INIT_ARM 183 printf("%s: memstart=%#lx, memsize=%#lx, kernelstart=%#lx\n", 184 __func__, memstart, memsize, kernelstart); 185#endif 186 187 physical_start = bmi->bmi_start = memstart; 188 physical_end = bmi->bmi_end = memstart + memsize; 189#ifndef ARM_HAS_LPAE 190 if (physical_end == 0) { 191 physical_end = -PAGE_SIZE; 192 memsize -= PAGE_SIZE; 193 bmi->bmi_end -= PAGE_SIZE; 194#ifdef VERBOSE_INIT_ARM 195 printf("%s: memsize shrunk by a page to avoid ending at 4GB\n", 196 __func__); 197#endif 198 } 199#endif 200 physmem = memsize / PAGE_SIZE; 201 202 /* 203 * Let's record where the kernel lives. 204 */ 205 bmi->bmi_kernelstart = kernelstart; 206 bmi->bmi_kernelend = KERN_VTOPHYS(bmi, round_page((vaddr_t)_end)); 207 208#ifdef VERBOSE_INIT_ARM 209 printf("%s: kernelend=%#lx\n", __func__, bmi->bmi_kernelend); 210#endif 211 212 /* 213 * Now the rest of the free memory must be after the kernel. 214 */ 215 pv->pv_pa = bmi->bmi_kernelend; 216 pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa); 217 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 218 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 219#ifdef VERBOSE_INIT_ARM 220 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 221 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 222 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 223#endif 224 pv++; 225 226 /* 227 * Add a free block for any memory before the kernel. 228 */ 229 if (bmi->bmi_start < bmi->bmi_kernelstart) { 230 pv->pv_pa = bmi->bmi_start; 231 pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa); 232 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa; 233 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 234#ifdef VERBOSE_INIT_ARM 235 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 236 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 237 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 238#endif 239 pv++; 240 } 241 242 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 243 244 SLIST_INIT(&bmi->bmi_freechunks); 245 SLIST_INIT(&bmi->bmi_chunks); 246} 247 248static bool 249concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 250{ 251 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 252 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 253 && acc_pv->pv_prot == pv->pv_prot 254 && acc_pv->pv_cache == pv->pv_cache) { 255#ifdef VERBOSE_INIT_ARMX 256 printf("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 257 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size + 1, 258 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size + 1); 259#endif 260 acc_pv->pv_size += pv->pv_size; 261 return true; 262 } 263 264 return false; 265} 266 267static void 268add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 269{ 270 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 271 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 272 pv_addr_t * const pv0 = (*pvp); 273 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 274 if (concat_pvaddr(pv0, pv)) { 275#ifdef VERBOSE_INIT_ARM 276 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 277 __func__, "appending", pv, 278 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 279 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 280#endif 281 pv = SLIST_NEXT(pv0, pv_list); 282 if (pv != NULL && concat_pvaddr(pv0, pv)) { 283#ifdef VERBOSE_INIT_ARM 284 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 285 __func__, "merging", pv, 286 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 287 pv0->pv_pa, 288 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 289#endif 290 SLIST_REMOVE_AFTER(pv0, pv_list); 291 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 292 } 293 return; 294 } 295 KASSERT(pv->pv_va != (*pvp)->pv_va); 296 pvp = &SLIST_NEXT(*pvp, pv_list); 297 } 298 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 299 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 300 KASSERT(new_pv != NULL); 301 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 302 *new_pv = *pv; 303 SLIST_NEXT(new_pv, pv_list) = *pvp; 304 (*pvp) = new_pv; 305#ifdef VERBOSE_INIT_ARM 306 printf("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 307 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 308 new_pv->pv_size / PAGE_SIZE); 309 if (SLIST_NEXT(new_pv, pv_list)) 310 printf("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 311 else 312 printf("at tail\n"); 313#endif 314} 315 316static void 317valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 318 int prot, int cache, bool zero_p) 319{ 320 size_t nbytes = npages * PAGE_SIZE; 321 pv_addr_t *free_pv = bmi->bmi_freeblocks; 322 size_t free_idx = 0; 323 static bool l1pt_found; 324 325 KASSERT(npages > 0); 326 327 /* 328 * If we haven't allocated the kernel L1 page table and we are aligned 329 * at a L1 table boundary, alloc the memory for it. 330 */ 331 if (!l1pt_found 332 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 333 && free_pv->pv_size >= L1_TABLE_SIZE) { 334 l1pt_found = true; 335 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 336 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 337 add_pages(bmi, &kernel_l1pt); 338 } 339 340 while (nbytes > free_pv->pv_size) { 341 free_pv++; 342 free_idx++; 343 if (free_idx == bmi->bmi_nfreeblocks) { 344 panic("%s: could not allocate %zu bytes", 345 __func__, nbytes); 346 } 347 } 348 349 /* 350 * As we allocate the memory, make sure that we don't walk over 351 * our current first level translation table. 352 */ 353 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 354 355 pv->pv_pa = free_pv->pv_pa; 356 pv->pv_va = free_pv->pv_va; 357 pv->pv_size = nbytes; 358 pv->pv_prot = prot; 359 pv->pv_cache = cache; 360 361 /* 362 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 363 * just use PTE_CACHE. 364 */ 365 if (cache == PTE_PAGETABLE 366 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 367 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 368 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 369 pv->pv_cache = PTE_CACHE; 370 371 free_pv->pv_pa += nbytes; 372 free_pv->pv_va += nbytes; 373 free_pv->pv_size -= nbytes; 374 if (free_pv->pv_size == 0) { 375 --bmi->bmi_nfreeblocks; 376 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 377 free_pv[0] = free_pv[1]; 378 } 379 } 380 381 bmi->bmi_freepages -= npages; 382 383 if (zero_p) 384 memset((void *)pv->pv_va, 0, nbytes); 385} 386 387void 388arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 389 const struct pmap_devmap *devmap, bool mapallmem_p) 390{ 391 struct bootmem_info * const bmi = &bootmem_info; 392#ifdef MULTIPROCESSOR 393 const size_t cpu_num = arm_cpu_max; 394#else 395 const size_t cpu_num = 1; 396#endif 397#ifdef ARM_HAS_VBAR 398 const bool map_vectors_p = false; 399#elif defined(CPU_ARMV7) || defined(CPU_ARM11) 400 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH 401 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0; 402#else 403 const bool map_vectors_p = true; 404#endif 405 406#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 407 KASSERT(mapallmem_p); 408#ifdef ARM_MMU_EXTENDED 409 /* 410 * The direct map VA space ends at the start of the kernel VM space. 411 */ 412 pmap_directlimit = kernel_vm_base; 413#else 414 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start); 415#endif /* ARM_MMU_EXTENDED */ 416#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 417 418 /* 419 * Calculate the number of L2 pages needed for mapping the 420 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 421 * and 1 for IO 422 */ 423 size_t kernel_size = bmi->bmi_kernelend; 424 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 425 kernel_size += L1_TABLE_SIZE_REAL; 426 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM; 427 if (map_vectors_p) { 428 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */ 429 } 430 if (iovbase) { 431 kernel_size += PAGE_SIZE; /* L2PT for IO */ 432 } 433 kernel_size += 434 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 435 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 436 kernel_size += round_page(MSGBUFSIZE); 437 kernel_size += 0x10000; /* slop */ 438 if (!mapallmem_p) { 439 kernel_size += PAGE_SIZE 440 * ((kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE); 441 } 442 kernel_size = round_page(kernel_size); 443 444 /* 445 * Now we know how many L2 pages it will take. If we've mapped 446 * all of memory, then it won't take any. 447 */ 448 const size_t KERNEL_L2PT_KERNEL_NUM = mapallmem_p 449 ? 0 : round_page(kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 450 451#ifdef VERBOSE_INIT_ARM 452 printf("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 453 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 454#endif 455 456 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 457 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 458 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 459 pv_addr_t msgbuf; 460 pv_addr_t text; 461 pv_addr_t data; 462 pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11]; 463#if ARM_MMU_XSCALE == 1 464 pv_addr_t minidataclean; 465#endif 466 467 /* 468 * We need to allocate some fixed page tables to get the kernel going. 469 * 470 * We are going to allocate our bootstrap pages from the beginning of 471 * the free space that we just calculated. We allocate one page 472 * directory and a number of page tables and store the physical 473 * addresses in the bmi_l2pts array in bootmem_info. 474 * 475 * The kernel page directory must be on a 16K boundary. The page 476 * tables must be on 4K boundaries. What we do is allocate the 477 * page directory on the first 16K boundary that we encounter, and 478 * the page tables on 4K boundaries otherwise. Since we allocate 479 * at least 3 L2 page tables, we are guaranteed to encounter at 480 * least one 16K aligned region. 481 */ 482 483#ifdef VERBOSE_INIT_ARM 484 printf("%s: allocating page tables for", __func__); 485#endif 486 for (size_t i = 0; i < __arraycount(chunks); i++) { 487 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 488 } 489 490 kernel_l1pt.pv_pa = 0; 491 kernel_l1pt.pv_va = 0; 492 493 /* 494 * Allocate the L2 pages, but if we get to a page that is aligned for 495 * an L1 page table, we will allocate the pages for it first and then 496 * allocate the L2 page. 497 */ 498 499 if (map_vectors_p) { 500 /* 501 * First allocate L2 page for the vectors. 502 */ 503#ifdef VERBOSE_INIT_ARM 504 printf(" vector"); 505#endif 506 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, 507 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 508 add_pages(bmi, &bmi->bmi_vector_l2pt); 509 } 510 511 /* 512 * Now allocate L2 pages for the kernel 513 */ 514#ifdef VERBOSE_INIT_ARM 515 printf(" kernel"); 516#endif 517 KASSERT(mapallmem_p || KERNEL_L2PT_KERNEL_NUM > 0); 518 KASSERT(!mapallmem_p || KERNEL_L2PT_KERNEL_NUM == 0); 519 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 520 valloc_pages(bmi, &kernel_l2pt[idx], 1, 521 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 522 add_pages(bmi, &kernel_l2pt[idx]); 523 } 524 525 /* 526 * Now allocate L2 pages for the initial kernel VA space. 527 */ 528#ifdef VERBOSE_INIT_ARM 529 printf(" vm"); 530#endif 531 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 532 valloc_pages(bmi, &vmdata_l2pt[idx], 1, 533 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 534 add_pages(bmi, &vmdata_l2pt[idx]); 535 } 536 537 /* 538 * If someone wanted a L2 page for I/O, allocate it now. 539 */ 540 if (iovbase) { 541#ifdef VERBOSE_INIT_ARM 542 printf(" io"); 543#endif 544 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, 545 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 546 add_pages(bmi, &bmi->bmi_io_l2pt); 547 } 548 549#ifdef VERBOSE_INIT_ARM 550 printf("%s: allocating stacks\n", __func__); 551#endif 552 553 /* Allocate stacks for all modes and CPUs */ 554 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 555 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 556 add_pages(bmi, &abtstack); 557 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 558 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 559 add_pages(bmi, &fiqstack); 560 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 561 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 562 add_pages(bmi, &irqstack); 563 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 564 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 565 add_pages(bmi, &undstack); 566 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 567 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 568 add_pages(bmi, &idlestack); 569 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 570 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 571 add_pages(bmi, &kernelstack); 572 573 /* Allocate the message buffer from the end of memory. */ 574 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 575 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 576 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false); 577 add_pages(bmi, &msgbuf); 578 msgbufphys = msgbuf.pv_pa; 579 msgbufaddr = (void *)msgbuf.pv_va; 580 581 if (map_vectors_p) { 582 /* 583 * Allocate a page for the system vector page. 584 * This page will just contain the system vectors and can be 585 * shared by all processes. 586 */ 587 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE, 588 PTE_CACHE, true); 589 } 590 systempage.pv_va = vectors; 591 592 /* 593 * If the caller needed a few extra pages for some reason, allocate 594 * them now. 595 */ 596#if ARM_MMU_XSCALE == 1 597#if (ARM_NMMUS > 1) 598 if (xscale_use_minidata) 599#endif 600 valloc_pages(bmi, &minidataclean, 1, 601 VM_PROT_READ|VM_PROT_WRITE, 0, true); 602#endif 603 604 /* 605 * Ok we have allocated physical pages for the primary kernel 606 * page tables and stacks. Let's just confirm that. 607 */ 608 if (kernel_l1pt.pv_va == 0 609 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 610 panic("%s: Failed to allocate or align the kernel " 611 "page directory", __func__); 612 613 614#ifdef VERBOSE_INIT_ARM 615 printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); 616#endif 617 618 /* 619 * Now we start construction of the L1 page table 620 * We start by mapping the L2 page tables into the L1. 621 * This means that we can replace L1 mappings later on if necessary 622 */ 623 vaddr_t l1pt_va = kernel_l1pt.pv_va; 624 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 625 626 if (map_vectors_p) { 627 /* Map the L2 pages tables in the L1 page table */ 628 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE, 629 &bmi->bmi_vector_l2pt); 630#ifdef VERBOSE_INIT_ARM 631 printf("%s: adding L2 pt (VA %#lx, PA %#lx) " 632 "for VA %#lx\n (vectors)", 633 __func__, bmi->bmi_vector_l2pt.pv_va, 634 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va); 635#endif 636 } 637 638 const vaddr_t kernel_base = 639 KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE); 640 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 641 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE, 642 &kernel_l2pt[idx]); 643#ifdef VERBOSE_INIT_ARM 644 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n", 645 __func__, kernel_l2pt[idx].pv_va, 646 kernel_l2pt[idx].pv_pa, kernel_base + idx * L2_S_SEGSIZE); 647#endif 648 } 649 650 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 651 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE, 652 &vmdata_l2pt[idx]); 653#ifdef VERBOSE_INIT_ARM 654 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n", 655 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 656 kernel_vm_base + idx * L2_S_SEGSIZE); 657#endif 658 } 659 if (iovbase) { 660 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt); 661#ifdef VERBOSE_INIT_ARM 662 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n", 663 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 664 iovbase & -L2_S_SEGSIZE); 665#endif 666 } 667 668 /* update the top of the kernel VM */ 669 pmap_curmaxkvaddr = 670 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 671 672#ifdef VERBOSE_INIT_ARM 673 printf("Mapping kernel\n"); 674#endif 675 676 extern char etext[], _end[]; 677 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 678 size_t textsize = KERN_VTOPHYS(bmi, (uintptr_t)etext) - bmi->bmi_kernelstart; 679 680 textsize = (textsize + PGOFSET) & ~PGOFSET; 681 682 /* start at offset of kernel in RAM */ 683 684 text.pv_pa = bmi->bmi_kernelstart; 685 text.pv_va = KERN_PHYSTOV(bmi, bmi->bmi_kernelstart); 686 text.pv_size = textsize; 687 text.pv_prot = VM_PROT_READ|VM_PROT_WRITE; /* XXX VM_PROT_EXECUTE */ 688 text.pv_cache = PTE_CACHE; 689 690#ifdef VERBOSE_INIT_ARM 691 printf("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 692 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 693#endif 694 695 add_pages(bmi, &text); 696 697 data.pv_pa = text.pv_pa + textsize; 698 data.pv_va = text.pv_va + textsize; 699 data.pv_size = totalsize - textsize; 700 data.pv_prot = VM_PROT_READ|VM_PROT_WRITE; 701 data.pv_cache = PTE_CACHE; 702 703#ifdef VERBOSE_INIT_ARM 704 printf("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 705 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 706#endif 707 708 add_pages(bmi, &data); 709 710#ifdef VERBOSE_INIT_ARM 711 printf("Listing Chunks\n"); 712 713 pv_addr_t *lpv; 714 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) { 715 printf("%s: pv %p: chunk VA %#lx..%#lx " 716 "(PA %#lx, prot %d, cache %d)\n", 717 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1, 718 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache); 719 } 720 printf("\nMapping Chunks\n"); 721#endif 722 723 pv_addr_t cur_pv; 724 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 725 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 726 cur_pv = *pv; 727 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va); 728 pv = SLIST_NEXT(pv, pv_list); 729 } else { 730 cur_pv.pv_va = KERNEL_BASE; 731 cur_pv.pv_pa = KERN_VTOPHYS(bmi, cur_pv.pv_va); 732 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa; 733 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 734 cur_pv.pv_cache = PTE_CACHE; 735 } 736 while (pv != NULL) { 737 if (mapallmem_p) { 738 if (concat_pvaddr(&cur_pv, pv)) { 739 pv = SLIST_NEXT(pv, pv_list); 740 continue; 741 } 742 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 743 /* 744 * See if we can extend the current pv to emcompass the 745 * hole, and if so do it and retry the concatenation. 746 */ 747 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE) 748 && cur_pv.pv_cache == PTE_CACHE) { 749 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 750 continue; 751 } 752 753 /* 754 * We couldn't so emit the current chunk and then 755 */ 756#ifdef VERBOSE_INIT_ARM 757 printf("%s: mapping chunk VA %#lx..%#lx " 758 "(PA %#lx, prot %d, cache %d)\n", 759 __func__, 760 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 761 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 762#endif 763 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 764 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 765 766 /* 767 * set the current chunk to the hole and try again. 768 */ 769 cur_pv.pv_pa += cur_pv.pv_size; 770 cur_pv.pv_va += cur_pv.pv_size; 771 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 772 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 773 cur_pv.pv_cache = PTE_CACHE; 774 continue; 775 } 776 } 777 778 /* 779 * The new pv didn't concatenate so emit the current one 780 * and use the new pv as the current pv. 781 */ 782#ifdef VERBOSE_INIT_ARM 783 printf("%s: mapping chunk VA %#lx..%#lx " 784 "(PA %#lx, prot %d, cache %d)\n", 785 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 786 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 787#endif 788 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 789 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 790 cur_pv = *pv; 791 pv = SLIST_NEXT(pv, pv_list); 792 } 793 794 /* 795 * If we are mapping all of memory, let's map the rest of memory. 796 */ 797 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 798 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 799 && cur_pv.pv_cache == PTE_CACHE) { 800 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 801 } else { 802 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base, 803 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size, 804 kernel_vm_base); 805#ifdef VERBOSE_INIT_ARM 806 printf("%s: mapping chunk VA %#lx..%#lx " 807 "(PA %#lx, prot %d, cache %d)\n", 808 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 809 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 810#endif 811 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 812 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 813 cur_pv.pv_pa += cur_pv.pv_size; 814 cur_pv.pv_va += cur_pv.pv_size; 815 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 816 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 817 cur_pv.pv_cache = PTE_CACHE; 818 } 819 } 820 821 // The amount we can direct is limited by the start of the 822 // virtual part of the kernel address space. Don't overrun 823 // into it. 824 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) { 825 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va; 826 } 827 828 /* 829 * Now we map the final chunk. 830 */ 831#ifdef VERBOSE_INIT_ARM 832 printf("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 833 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 834 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 835#endif 836 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 837 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 838 839 /* 840 * Now we map the stuff that isn't directly after the kernel 841 */ 842 if (map_vectors_p) { 843 /* Map the vector page. */ 844 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 845 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 846 } 847 848 /* Map the Mini-Data cache clean area. */ 849#if ARM_MMU_XSCALE == 1 850#if (ARM_NMMUS > 1) 851 if (xscale_use_minidata) 852#endif 853 xscale_setup_minidata(l1pt_va, minidataclean.pv_va, 854 minidataclean.pv_pa); 855#endif 856 857 /* 858 * Map integrated peripherals at same address in first level page 859 * table so that we can continue to use console. 860 */ 861 if (devmap) 862 pmap_devmap_bootstrap(l1pt_va, devmap); 863 864#ifdef VERBOSE_INIT_ARM 865 /* Tell the user about where all the bits and pieces live. */ 866 printf("%22s Physical Virtual Num\n", " "); 867 printf("%22s Starting Ending Starting Ending Pages\n", " "); 868 869 static const char mem_fmt[] = 870 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 871 static const char mem_fmt_nov[] = 872 "%20s: 0x%08lx 0x%08lx %zu\n"; 873 874 printf(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 875 KERN_PHYSTOV(bmi, bmi->bmi_start), KERN_PHYSTOV(bmi, bmi->bmi_end - 1), 876 physmem); 877 printf(mem_fmt, "text section", 878 text.pv_pa, text.pv_pa + text.pv_size - 1, 879 text.pv_va, text.pv_va + text.pv_size - 1, 880 (int)(text.pv_size / PAGE_SIZE)); 881 printf(mem_fmt, "data section", 882 KERN_VTOPHYS(bmi, __data_start), KERN_VTOPHYS(bmi, _edata), 883 (vaddr_t)__data_start, (vaddr_t)_edata, 884 (int)((round_page((vaddr_t)_edata) 885 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 886 printf(mem_fmt, "bss section", 887 KERN_VTOPHYS(bmi, __bss_start), KERN_VTOPHYS(bmi, __bss_end__), 888 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 889 (int)((round_page((vaddr_t)__bss_end__) 890 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 891 printf(mem_fmt, "L1 page directory", 892 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 893 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 894 L1_TABLE_SIZE / PAGE_SIZE); 895 printf(mem_fmt, "ABT stack (CPU 0)", 896 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 897 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 898 ABT_STACK_SIZE); 899 printf(mem_fmt, "FIQ stack (CPU 0)", 900 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 901 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 902 FIQ_STACK_SIZE); 903 printf(mem_fmt, "IRQ stack (CPU 0)", 904 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 905 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 906 IRQ_STACK_SIZE); 907 printf(mem_fmt, "UND stack (CPU 0)", 908 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 909 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 910 UND_STACK_SIZE); 911 printf(mem_fmt, "IDLE stack (CPU 0)", 912 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 913 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 914 UPAGES); 915 printf(mem_fmt, "SVC stack", 916 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 917 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 918 UPAGES); 919 printf(mem_fmt, "Message Buffer", 920 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 921 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 922 (int)msgbuf_pgs); 923 if (map_vectors_p) { 924 printf(mem_fmt, "Exception Vectors", 925 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 926 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 927 1); 928 } 929 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 930 pv = &bmi->bmi_freeblocks[i]; 931 932 printf(mem_fmt_nov, "Free Memory", 933 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 934 pv->pv_size / PAGE_SIZE); 935 } 936#endif 937 /* 938 * Now we have the real page tables in place so we can switch to them. 939 * Once this is done we will be running with the REAL kernel page 940 * tables. 941 */ 942 943#if defined(VERBOSE_INIT_ARM) 944 printf("TTBR0=%#x", armreg_ttbr_read()); 945#ifdef _ARM_ARCH_6 946 printf(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x", 947 armreg_ttbr1_read(), armreg_ttbcr_read(), 948 armreg_contextidr_read()); 949#endif 950 printf("\n"); 951#endif 952 953 /* Switch tables */ 954#ifdef VERBOSE_INIT_ARM 955 printf("switching to new L1 page table @%#lx...", l1pt_pa); 956#endif 957 958#ifdef ARM_MMU_EXTENDED 959 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) 960 | (DOMAIN_CLIENT << (PMAP_DOMAIN_USER*2))); 961#else 962 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); 963#endif 964 cpu_idcache_wbinv_all(); 965#ifdef VERBOSE_INIT_ARM 966 printf(" ttb"); 967#endif 968#ifdef ARM_MMU_EXTENDED 969 /* 970 * TTBCR should have been initialized by the MD start code. 971 */ 972 KASSERT((armreg_contextidr_read() & 0xff) == 0); 973 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 974 /* 975 * Disable lookups via TTBR0 until there is an activated pmap. 976 */ 977 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 978 cpu_setttb(l1pt_pa, KERNEL_PID); 979 arm_isb(); 980#else 981 cpu_setttb(l1pt_pa, true); 982 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); 983#endif 984 cpu_tlb_flushID(); 985 986#ifdef VERBOSE_INIT_ARM 987#ifdef ARM_MMU_EXTENDED 988 printf(" (TTBCR=%#x TTBR0=%#x TTBR1=%#x)", 989 armreg_ttbcr_read(), armreg_ttbr_read(), armreg_ttbr1_read()); 990#else 991 printf(" (TTBR0=%#x)", armreg_ttbr_read()); 992#endif 993#endif 994 995#ifdef MULTIPROCESSOR 996 /* 997 * Kick the secondaries to load the TTB. After which they'll go 998 * back to sleep to wait for the final kick so they will hatch. 999 */ 1000#ifdef VERBOSE_INIT_ARM 1001 printf(" hatchlings"); 1002#endif 1003 cpu_boot_secondary_processors(); 1004#endif 1005 1006#ifdef VERBOSE_INIT_ARM 1007 printf(" OK\n"); 1008#endif 1009} 1010