arm32_kvminit.c revision 1.4
1/* $NetBSD: arm32_kvminit.c,v 1.4 2012/09/22 00:33:37 matt Exp $ */ 2 3/* 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 5 * Written by Hiroyuki Bessho for Genetec Corporation. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of Genetec Corporation may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (c) 2001 Wasabi Systems, Inc. 32 * All rights reserved. 33 * 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed for the NetBSD Project by 47 * Wasabi Systems, Inc. 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 49 * or promote products derived from this software without specific prior 50 * written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * POSSIBILITY OF SUCH DAMAGE. 63 * 64 * Copyright (c) 1997,1998 Mark Brinicombe. 65 * Copyright (c) 1997,1998 Causality Limited. 66 * All rights reserved. 67 * 68 * Redistribution and use in source and binary forms, with or without 69 * modification, are permitted provided that the following conditions 70 * are met: 71 * 1. Redistributions of source code must retain the above copyright 72 * notice, this list of conditions and the following disclaimer. 73 * 2. Redistributions in binary form must reproduce the above copyright 74 * notice, this list of conditions and the following disclaimer in the 75 * documentation and/or other materials provided with the distribution. 76 * 3. All advertising materials mentioning features or use of this software 77 * must display the following acknowledgement: 78 * This product includes software developed by Mark Brinicombe 79 * for the NetBSD Project. 80 * 4. The name of the company nor the name of the author may be used to 81 * endorse or promote products derived from this software without specific 82 * prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 94 * SUCH DAMAGE. 95 * 96 * Copyright (c) 2007 Microsoft 97 * All rights reserved. 98 * 99 * Redistribution and use in source and binary forms, with or without 100 * modification, are permitted provided that the following conditions 101 * are met: 102 * 1. Redistributions of source code must retain the above copyright 103 * notice, this list of conditions and the following disclaimer. 104 * 2. Redistributions in binary form must reproduce the above copyright 105 * notice, this list of conditions and the following disclaimer in the 106 * documentation and/or other materials provided with the distribution. 107 * 3. All advertising materials mentioning features or use of this software 108 * must display the following acknowledgement: 109 * This product includes software developed by Microsoft 110 * 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 121 * SUCH DAMAGE. 122 */ 123 124#include <sys/cdefs.h> 125__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.4 2012/09/22 00:33:37 matt Exp $"); 126 127#include <sys/param.h> 128#include <sys/device.h> 129#include <sys/kernel.h> 130#include <sys/reboot.h> 131#include <sys/bus.h> 132 133#include <dev/cons.h> 134 135#include <uvm/uvm_extern.h> 136 137#include <arm/db_machdep.h> 138#include <arm/undefined.h> 139#include <arm/bootconfig.h> 140#include <arm/arm32/machdep.h> 141 142#include "ksyms.h" 143 144struct bootmem_info bootmem_info; 145 146paddr_t msgbufphys; 147paddr_t physical_start; 148paddr_t physical_end; 149 150extern char etext[]; 151extern char __data_start[], _edata[]; 152extern char __bss_start[], __bss_end__[]; 153extern char _end[]; 154 155/* Page tables for mapping kernel VM */ 156#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 157 158/* 159 * Macros to translate between physical and virtual for a subset of the 160 * kernel address space. *Not* for general use. 161 */ 162#define KERN_VTOPHYS(bmi, va) \ 163 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start)) 164#define KERN_PHYSTOV(bmi, pa) \ 165 ((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE)) 166 167void 168arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 169{ 170 struct bootmem_info * const bmi = &bootmem_info; 171 pv_addr_t *pv = bmi->bmi_freeblocks; 172 173#ifdef VERBOSE_INIT_ARM 174 printf("%s: memstart=%#lx, memsize=%#lx, kernelstart=%#lx\n", 175 __func__, memstart, memsize, kernelstart); 176#endif 177 178 physical_start = bmi->bmi_start = memstart; 179 physical_end = bmi->bmi_end = memstart + memsize; 180 physmem = memsize / PAGE_SIZE; 181 182 /* 183 * Let's record where the kernel lives. 184 */ 185 bmi->bmi_kernelstart = kernelstart; 186 bmi->bmi_kernelend = KERN_VTOPHYS(bmi, round_page((vaddr_t)_end)); 187 188#ifdef VERBOSE_INIT_ARM 189 printf("%s: kernelend=%#lx\n", __func__, bmi->bmi_kernelend); 190#endif 191 192 /* 193 * Now the rest of the free memory must be after the kernel. 194 */ 195 pv->pv_pa = bmi->bmi_kernelend; 196 pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa); 197 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 198 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 199#ifdef VERBOSE_INIT_ARM 200 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 201 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 202 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 203#endif 204 pv++; 205 206 /* 207 * Add a free block for any memory before the kernel. 208 */ 209 if (bmi->bmi_start < bmi->bmi_kernelstart) { 210 pv->pv_pa = bmi->bmi_start; 211 pv->pv_va = KERNEL_BASE; 212 pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start; 213 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 214#ifdef VERBOSE_INIT_ARM 215 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 216 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 217 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 218#endif 219 pv++; 220 } 221 222 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 223 224 SLIST_INIT(&bmi->bmi_freechunks); 225 SLIST_INIT(&bmi->bmi_chunks); 226} 227 228static bool 229concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 230{ 231 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 232 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 233 && acc_pv->pv_prot == pv->pv_prot 234 && acc_pv->pv_cache == pv->pv_cache) { 235#ifdef VERBOSE_INIT_ARMX 236 printf("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 237 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size + 1, 238 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size + 1); 239#endif 240 acc_pv->pv_size += pv->pv_size; 241 return true; 242 } 243 244 return false; 245} 246 247static void 248add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 249{ 250 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 251 while ((*pvp) != 0 && (*pvp)->pv_va <= pv->pv_va) { 252 pv_addr_t * const pv0 = (*pvp); 253 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 254 if (concat_pvaddr(pv0, pv)) { 255#ifdef VERBOSE_INIT_ARM 256 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 257 __func__, "appending", pv, 258 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 259 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 260#endif 261 pv = SLIST_NEXT(pv0, pv_list); 262 if (pv != NULL && concat_pvaddr(pv0, pv)) { 263#ifdef VERBOSE_INIT_ARM 264 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 265 __func__, "merging", pv, 266 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 267 pv0->pv_pa, 268 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 269#endif 270 SLIST_REMOVE_AFTER(pv0, pv_list); 271 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 272 } 273 return; 274 } 275 KASSERT(pv->pv_va != (*pvp)->pv_va); 276 pvp = &SLIST_NEXT(*pvp, pv_list); 277 } 278 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 279 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 280 KASSERT(new_pv != NULL); 281 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 282 *new_pv = *pv; 283 SLIST_NEXT(new_pv, pv_list) = *pvp; 284 (*pvp) = new_pv; 285#ifdef VERBOSE_INIT_ARM 286 printf("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 287 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 288 new_pv->pv_size / PAGE_SIZE); 289 if (SLIST_NEXT(new_pv, pv_list)) 290 printf("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 291 else 292 printf("at tail\n"); 293#endif 294} 295 296static void 297valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 298 int prot, int cache) 299{ 300 size_t nbytes = npages * PAGE_SIZE; 301 pv_addr_t *free_pv = bmi->bmi_freeblocks; 302 size_t free_idx = 0; 303 static bool l1pt_found; 304 305 /* 306 * If we haven't allcoated the kernel L1 page table and we are aligned 307 * at a L1 table boundary, alloc the memory for it. 308 */ 309 if (!l1pt_found 310 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 311 && free_pv->pv_size >= L1_TABLE_SIZE) { 312 l1pt_found = true; 313 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 314 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 315 add_pages(bmi, &kernel_l1pt); 316 } 317 318 while (nbytes > free_pv->pv_size) { 319 free_pv++; 320 free_idx++; 321 if (free_idx == bmi->bmi_nfreeblocks) { 322 panic("%s: could not allocate %zu bytes", 323 __func__, nbytes); 324 } 325 } 326 327 pv->pv_pa = free_pv->pv_pa; 328 pv->pv_va = free_pv->pv_va; 329 pv->pv_size = nbytes; 330 pv->pv_prot = prot; 331 pv->pv_cache = cache; 332 333 /* 334 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 335 * just use PTE_CACHE. 336 */ 337 if (cache == PTE_PAGETABLE 338 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 339 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 340 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 341 pv->pv_cache = PTE_CACHE; 342 343 free_pv->pv_pa += nbytes; 344 free_pv->pv_va += nbytes; 345 free_pv->pv_size -= nbytes; 346 if (free_pv->pv_size == 0) { 347 --bmi->bmi_nfreeblocks; 348 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 349 free_pv[0] = free_pv[1]; 350 } 351 } 352 353 bmi->bmi_freepages -= npages; 354 355 memset((void *)pv->pv_va, 0, nbytes); 356} 357 358void 359arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 360 const struct pmap_devmap *devmap, bool mapallmem_p) 361{ 362 struct bootmem_info * const bmi = &bootmem_info; 363#ifdef MULTIPROCESSOR 364 const size_t cpu_num = arm_cpu_max + 1; 365#else 366 const size_t cpu_num = 1; 367#endif 368 369 /* 370 * Calculate the number of L2 pages needed for mapping the 371 * kernel + data + stuff 372 */ 373 size_t kernel_size = bmi->bmi_kernelend; 374 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 375 kernel_size += L1_TABLE_SIZE; 376 kernel_size += round_page(MSGBUFSIZE); 377 kernel_size += 378 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 379 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 380 kernel_size += 0x10000; /* slop */ 381 kernel_size += (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 382 kernel_size = round_page(kernel_size); 383 384 /* 385 * Now we know how many L2 pages it will take. 386 */ 387 const size_t KERNEL_L2PT_KERNEL_NUM = 388 (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 389 390#ifdef VERBOSE_INIT_ARM 391 printf("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 392 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 393#endif 394 395 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 396 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 397 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 398 pv_addr_t msgbuf; 399 pv_addr_t text; 400 pv_addr_t data; 401 pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11]; 402#if ARM_MMU_XSCALE == 1 403 pv_addr_t minidataclean; 404#endif 405 406 /* 407 * We need to allocate some fixed page tables to get the kernel going. 408 * 409 * We are going to allocate our bootstrap pages from the beginning of 410 * the free space that we just calculated. We allocate one page 411 * directory and a number of page tables and store the physical 412 * addresses in the kernel_l2pt_table array. 413 * 414 * The kernel page directory must be on a 16K boundary. The page 415 * tables must be on 4K boundaries. What we do is allocate the 416 * page directory on the first 16K boundary that we encounter, and 417 * the page tables on 4K boundaries otherwise. Since we allocate 418 * at least 3 L2 page tables, we are guaranteed to encounter at 419 * least one 16K aligned region. 420 */ 421 422#ifdef VERBOSE_INIT_ARM 423 printf("%s: allocating page tables for", __func__); 424#endif 425 for (size_t i = 0; i < __arraycount(chunks); i++) { 426 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 427 } 428 429 /* 430 * As we allocate the memory, make sure that we don't walk over 431 * our temporary first level translation table. 432 */ 433 434 kernel_l1pt.pv_pa = 0; 435 kernel_l1pt.pv_va = 0; 436 437 /* 438 * First allocate L2 page for the vectors. 439 */ 440#ifdef VERBOSE_INIT_ARM 441 printf(" vector"); 442#endif 443 valloc_pages(bmi, &bmi->bmi_vector_l2pt, L2_TABLE_SIZE / PAGE_SIZE, 444 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 445 add_pages(bmi, &bmi->bmi_vector_l2pt); 446 447 /* 448 * Allocate the L2 pages, but if we get to a page that aligned for a 449 * L1 page table, we will allocate pages for it first and allocate 450 * L2 page. 451 */ 452#ifdef VERBOSE_INIT_ARM 453 printf(" kernel"); 454#endif 455 for (size_t idx = 0; idx <= KERNEL_L2PT_KERNEL_NUM; ++idx) { 456 valloc_pages(bmi, &kernel_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE, 457 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 458 add_pages(bmi, &kernel_l2pt[idx]); 459 } 460#ifdef VERBOSE_INIT_ARM 461 printf(" vm"); 462#endif 463 for (size_t idx = 0; idx <= KERNEL_L2PT_VMDATA_NUM; ++idx) { 464 valloc_pages(bmi, &vmdata_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE, 465 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 466 add_pages(bmi, &vmdata_l2pt[idx]); 467 } 468 469 /* 470 * If someone wanted a L2 page for I/O, allocate it now. 471 */ 472 if (iovbase != 0) { 473#ifdef VERBOSE_INIT_ARM 474 printf(" io"); 475#endif 476 valloc_pages(bmi, &bmi->bmi_io_l2pt, L2_TABLE_SIZE / PAGE_SIZE, 477 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 478 add_pages(bmi, &bmi->bmi_io_l2pt); 479 } 480 481#ifdef VERBOSE_ARM_INIT 482 printf("%s: allocating stacks\n", __func__); 483#endif 484 485 /* Allocate stacks for all modes */ 486 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 487 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 488 add_pages(bmi, &abtstack); 489 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 490 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 491 add_pages(bmi, &fiqstack); 492 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 493 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 494 add_pages(bmi, &irqstack); 495 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 496 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 497 add_pages(bmi, &undstack); 498 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 499 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 500 add_pages(bmi, &idlestack); 501 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 502 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 503 add_pages(bmi, &kernelstack); 504 505 /* Allocate the message buffer from the end of memory. */ 506 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 507 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 508 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 509 add_pages(bmi, &msgbuf); 510 msgbufphys = msgbuf.pv_pa; 511 512 /* 513 * Allocate a page for the system vector page. 514 * This page will just contain the system vectors and can be 515 * shared by all processes. 516 */ 517 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 518 systempage.pv_va = vectors; 519 520 /* 521 * If the caller needed a few extra pages for some reason, allocate 522 * them now. 523 */ 524#if ARM_MMU_XSCALE == 1 525#if (ARM_NMMUS > 1) 526 if (xscale_use_minidata) 527#endif 528 valloc_pages(bmi, extrapv, nextrapages, 529 VM_PROT_READ|VM_PROT_WRITE, 0); 530#endif 531 532 /* 533 * Ok we have allocated physical pages for the primary kernel 534 * page tables and stacks. Let's just confirm that. 535 */ 536 if (kernel_l1pt.pv_va == 0 537 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 538 panic("%s: Failed to allocate or align the kernel " 539 "page directory", __func__); 540 541 542#ifdef VERBOSE_INIT_ARM 543 printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); 544#endif 545 546 /* 547 * Now we start construction of the L1 page table 548 * We start by mapping the L2 page tables into the L1. 549 * This means that we can replace L1 mappings later on if necessary 550 */ 551 vaddr_t l1pt_va = kernel_l1pt.pv_va; 552 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 553 554 /* Map the L2 pages tables in the L1 page table */ 555 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE, 556 &bmi->bmi_vector_l2pt); 557#ifdef VERBOSE_INIT_ARM 558 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n", 559 __func__, bmi->bmi_vector_l2pt.pv_va, bmi->bmi_vector_l2pt.pv_pa, 560 systempage.pv_va); 561#endif 562 563 const vaddr_t kernel_base = 564 KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE); 565 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 566 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE, 567 &kernel_l2pt[idx]); 568#ifdef VERBOSE_INIT_ARM 569 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n", 570 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa, 571 kernel_base + idx * L2_S_SEGSIZE); 572#endif 573 } 574 575 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 576 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE, 577 &vmdata_l2pt[idx]); 578#ifdef VERBOSE_INIT_ARM 579 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n", 580 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 581 kernel_vm_base + idx * L2_S_SEGSIZE); 582#endif 583 } 584 if (iovbase) { 585 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt); 586#ifdef VERBOSE_INIT_ARM 587 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx\n", 588 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 589 iovbase & -L2_S_SEGSIZE); 590#endif 591 } 592 593 /* update the top of the kernel VM */ 594 pmap_curmaxkvaddr = 595 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 596 597#ifdef VERBOSE_INIT_ARM 598 printf("Mapping kernel\n"); 599#endif 600 601 extern char etext[], _end[]; 602 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 603 size_t textsize = KERN_VTOPHYS(bmi, (uintptr_t)etext) - bmi->bmi_kernelstart; 604 605 textsize = (textsize + PGOFSET) & ~PGOFSET; 606 607 /* start at offset of kernel in RAM */ 608 609 text.pv_pa = bmi->bmi_kernelstart; 610 text.pv_va = KERN_PHYSTOV(bmi, bmi->bmi_kernelstart); 611 text.pv_size = textsize; 612 text.pv_prot = VM_PROT_READ|VM_PROT_WRITE; /* XXX VM_PROT_EXECUTE */ 613 text.pv_cache = PTE_CACHE; 614 615#ifdef VERBOSE_INIT_ARM 616 printf("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 617 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 618#endif 619 620 add_pages(bmi, &text); 621 622 data.pv_pa = text.pv_pa + textsize; 623 data.pv_va = text.pv_va + textsize; 624 data.pv_size = totalsize - textsize; 625 data.pv_prot = VM_PROT_READ|VM_PROT_WRITE; 626 data.pv_cache = PTE_CACHE; 627 628#ifdef VERBOSE_INIT_ARM 629 printf("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 630 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 631#endif 632 633 add_pages(bmi, &data); 634 635#ifdef VERBOSE_INIT_ARM 636 printf("Listing Chunks\n"); 637 { 638 pv_addr_t *pv; 639 SLIST_FOREACH(pv, &bmi->bmi_chunks, pv_list) { 640 printf("%s: pv %p: chunk VA %#lx..%#lx " 641 "(PA %#lx, prot %d, cache %d)\n", 642 __func__, pv, pv->pv_va, pv->pv_va + pv->pv_size - 1, 643 pv->pv_pa, pv->pv_prot, pv->pv_cache); 644 } 645 } 646 printf("\nMapping Chunks\n"); 647#endif 648 649 pv_addr_t cur_pv; 650 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 651 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 652 cur_pv = *pv; 653 pv = SLIST_NEXT(pv, pv_list); 654 } else { 655 cur_pv.pv_va = kernel_base; 656 cur_pv.pv_pa = bmi->bmi_start; 657 cur_pv.pv_size = pv->pv_pa - bmi->bmi_start; 658 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 659 cur_pv.pv_cache = PTE_CACHE; 660 } 661 while (pv != NULL) { 662 if (mapallmem_p) { 663 if (concat_pvaddr(&cur_pv, pv)) { 664 pv = SLIST_NEXT(pv, pv_list); 665 continue; 666 } 667 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 668 /* 669 * See if we can extend the current pv to emcompass the 670 * hole, and if so do it and retry the concatenation. 671 */ 672 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE) 673 && cur_pv.pv_cache == PTE_CACHE) { 674 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 675 continue; 676 } 677 678 /* 679 * We couldn't so emit the current chunk and then 680 */ 681#ifdef VERBOSE_INIT_ARM 682 printf("%s: mapping chunk VA %#lx..%#lx " 683 "(PA %#lx, prot %d, cache %d)\n", 684 __func__, 685 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 686 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 687#endif 688 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 689 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 690 691 /* 692 * set the current chunk to the hole and try again. 693 */ 694 cur_pv.pv_pa += cur_pv.pv_size; 695 cur_pv.pv_va += cur_pv.pv_size; 696 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 697 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 698 cur_pv.pv_cache = PTE_CACHE; 699 continue; 700 } 701 } 702 703 /* 704 * The new pv didn't concatenate so emit the current one 705 * and use the new pv as the current pv. 706 */ 707#ifdef VERBOSE_INIT_ARM 708 printf("%s: mapping chunk VA %#lx..%#lx " 709 "(PA %#lx, prot %d, cache %d)\n", 710 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 711 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 712#endif 713 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 714 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 715 cur_pv = *pv; 716 pv = SLIST_NEXT(pv, pv_list); 717 } 718 719 /* 720 * If we are mapping all of memory, let's map the rest of memory. 721 */ 722 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 723 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 724 && cur_pv.pv_cache == PTE_CACHE) { 725 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 726 } else { 727#ifdef VERBOSE_INIT_ARM 728 printf("%s: mapping chunk VA %#lx..%#lx " 729 "(PA %#lx, prot %d, cache %d)\n", 730 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 731 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 732#endif 733 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 734 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 735 cur_pv.pv_pa += cur_pv.pv_size; 736 cur_pv.pv_va += cur_pv.pv_size; 737 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 738 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 739 cur_pv.pv_cache = PTE_CACHE; 740 } 741 } 742 743 /* 744 * Now we map the final chunk. 745 */ 746#ifdef VERBOSE_INIT_ARM 747 printf("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 748 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 749 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 750#endif 751 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 752 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 753 754 /* 755 * Now we map the stuff that isn't directly after the kernel 756 */ 757 758 /* Map the vector page. */ 759 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 760 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 761 762 /* Map the Mini-Data cache clean area. */ 763#if ARM_MMU_XSCALE == 1 764#if (ARM_NMMUS > 1) 765 if (xscale_use_minidata) 766#endif 767 xscale_setup_minidata(l1_va, minidataclean.pv_va, 768 minidataclean.pv_pa); 769#endif 770 771 /* 772 * Map integrated peripherals at same address in first level page 773 * table so that we can continue to use console. 774 */ 775 if (devmap) 776 pmap_devmap_bootstrap(l1pt_va, devmap); 777 778#ifdef VERBOSE_INIT_ARM 779 /* Tell the user about where all the bits and pieces live. */ 780 printf("%22s Physical Virtual Num\n", " "); 781 printf("%22s Starting Ending Starting Ending Pages\n", " "); 782 783 static const char mem_fmt[] = 784 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 785 static const char mem_fmt_nov[] = 786 "%20s: 0x%08lx 0x%08lx %zu\n"; 787 788 printf(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 789 KERN_PHYSTOV(bmi, bmi->bmi_start), KERN_PHYSTOV(bmi, bmi->bmi_end - 1), 790 physmem); 791 printf(mem_fmt, "text section", 792 text.pv_pa, text.pv_pa + text.pv_size - 1, 793 text.pv_va, text.pv_va + text.pv_size - 1, 794 (int)(text.pv_size / PAGE_SIZE)); 795 printf(mem_fmt, "data section", 796 KERN_VTOPHYS(bmi, __data_start), KERN_VTOPHYS(bmi, _edata), 797 (vaddr_t)__data_start, (vaddr_t)_edata, 798 (int)((round_page((vaddr_t)_edata) 799 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 800 printf(mem_fmt, "bss section", 801 KERN_VTOPHYS(bmi, __bss_start), KERN_VTOPHYS(bmi, __bss_end__), 802 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 803 (int)((round_page((vaddr_t)__bss_end__) 804 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 805 printf(mem_fmt, "L1 page directory", 806 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 807 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 808 L1_TABLE_SIZE / PAGE_SIZE); 809 printf(mem_fmt, "Exception Vectors", 810 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 811 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 812 1); 813 printf(mem_fmt, "FIQ stack (CPU 0)", 814 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 815 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 816 FIQ_STACK_SIZE); 817 printf(mem_fmt, "IRQ stack (CPU 0)", 818 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 819 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 820 IRQ_STACK_SIZE); 821 printf(mem_fmt, "ABT stack (CPU 0)", 822 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 823 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 824 ABT_STACK_SIZE); 825 printf(mem_fmt, "UND stack (CPU 0)", 826 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 827 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 828 UND_STACK_SIZE); 829 printf(mem_fmt, "IDLE stack (CPU 0)", 830 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 831 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 832 UPAGES); 833 printf(mem_fmt, "SVC stack", 834 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 835 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 836 UPAGES); 837 printf(mem_fmt_nov, "Message Buffer", 838 msgbufphys, msgbufphys + msgbuf_pgs * PAGE_SIZE - 1, msgbuf_pgs); 839 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 840 pv = &bmi->bmi_freeblocks[i]; 841 842 printf(mem_fmt_nov, "Free Memory", 843 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 844 pv->pv_size / PAGE_SIZE); 845 } 846#endif 847 /* 848 * Now we have the real page tables in place so we can switch to them. 849 * Once this is done we will be running with the REAL kernel page 850 * tables. 851 */ 852 853#if defined(VERBOSE_INIT_ARM) && 0 854 printf("TTBR0=%#x", armreg_ttbr_read()); 855#ifdef _ARM_ARCH_6 856 printf(" TTBR1=%#x TTBCR=%#x", 857 armreg_ttbr1_read(), armreg_ttbcr_read()); 858#endif 859 printf("\n"); 860#endif 861 862 /* Switch tables */ 863#ifdef VERBOSE_INIT_ARM 864 printf("switching to new L1 page table @%#lx...", l1pt_pa); 865#endif 866 867 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); 868 cpu_idcache_wbinv_all(); 869 cpu_setttb(l1pt_pa, true); 870 cpu_tlb_flushID(); 871 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); 872 873#ifdef VERBOSE_INIT_ARM 874 printf("TTBR0=%#x OK", armreg_ttbr_read()); 875#endif 876} 877