1/*
2 * Copyright 2013, winocm. <winocm@icloud.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 *   Redistributions of source code must retain the above copyright notice, this
9 *   list of conditions and the following disclaimer.
10 *
11 *   Redistributions in binary form must reproduce the above copyright notice, this
12 *   list of conditions and the following disclaimer in the documentation and/or
13 *   other materials provided with the distribution.
14 *
15 *   If you are going to use this software in any form that does not involve
16 *   releasing the source to this project or improving it, let me know beforehand.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * l2_map_linear_range/l2_cache_to_range is not my code.
32 */
33
34/*
35 * ARM VM initialization
36 */
37
38#include <mach_debug.h>
39#include <debug.h>
40#include <mach/vm_types.h>
41#include <mach/vm_param.h>
42#include <mach/thread_status.h>
43#include <kern/misc_protos.h>
44#include <kern/assert.h>
45#include <kern/cpu_number.h>
46#include <kern/thread.h>
47#include <console/serial_protos.h>
48#include <mach-o/loader.h>
49#include <libkern/kernel_mach_header.h>
50#include <arm/pmap.h>
51#include <arm/misc_protos.h>
52#include <arm/low_globals.h>
53#include <arm/arch.h>
54
55#define align_down(p, s)        ((uintptr_t)(p)&~(s-1))
56#define align_up(p, s)          align_down((uintptr_t)(p)+s-1, s)
57
58/*
59 * cpu_ttb contains the current TTB (translation-table
60 * base) of the processor. This is a physical address.
61 */
62uint32_t cpu_ttb;
63
64/*
65 * The section offset is set by the bootloader.
66 */
67uint32_t sectionOffset = 0x1000;
68
69/*
70 * sane_size, max_mem and mem_size are controlled by arm_vm_init
71 *
72 * At the moment, sane_size is forced to the size of memory
73 * the booter passes to the kernel.
74 */
75uint64_t sane_size = 0;
76uint32_t mem_size = 0;
77
78/*
79 * The physical and virtual base of the kernel. These are used in
80 */
81unsigned long gPhysBase = 0x0, gVirtBase = 0x0;
82unsigned long gTopOfKernel;
83
84addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS;
85
86#define VM_KERNEL_BASE_ADDRESS          0x80000000
87
88/*
89 * Break down the KASLR. (pronunced like "castle" but with a twangy accent).
90 */
91
92/*
93 * These variables are initialized during vm init.
94 */
95ppnum_t vm_kernel_base_page;
96vm_offset_t vm_kernel_base;
97vm_offset_t vm_kernel_top;
98vm_offset_t vm_kernel_stext;
99vm_offset_t vm_kernel_etext;
100vm_offset_t vm_kernel_slide;
101
102/*
103 * These both are initialized to the same value.
104 */
105uint64_t max_mem;
106uint64_t mem_actual;
107
108/*
109 * The sectXxx stuff contains the current kernel Mach-O section information.
110 */
111vm_offset_t sectTEXTB;
112unsigned long sectSizeTEXT;
113vm_offset_t sectDATAB;
114unsigned long sectSizeDATA;
115vm_offset_t sectLINKB;
116unsigned long sectSizeLINK;
117vm_offset_t sectKLDB;
118unsigned long sectSizeKLD;
119vm_offset_t sectPRELINKB;
120unsigned long sectSizePRELINK;
121vm_offset_t sectHIBB;
122unsigned long sectSizeHIB;
123
124vm_offset_t segHIBB;
125unsigned long segSizeHIB;
126vm_offset_t segPRELINKB;
127unsigned long segSizePRELINK;
128
129
130vm_offset_t sectCONSTB;
131unsigned long sectSizeConst;
132boolean_t doconstro_override = FALSE;
133static kernel_section_t *sectDCONST, *segDATA;
134
135vm_offset_t end, etext, sdata, edata, sconstdata, econstdata;
136
137extern void *ExceptionVectorsBase;
138extern void *HighExceptionVectorsBase;
139
140#define LOWGLO_BASE     0xFFFF0040
141#define VECTORS_BASE    0xFFFF0000
142#define MANAGED_BASE    0xC0000000  /* Can also be 0xA0000000, but iPhone OS 5 uses this address. */
143
144/*
145 * These both represent the first physical page we can use in the system,
146 * and the end of the physical memory region.
147 */
148uint32_t first_avail;
149uint32_t avail_end;
150
151/*
152 * The identity base represents a mapping that maps the entire kernel region
153 * to the first section of memory.
154 */
155uint32_t identityBaseVA, identityCachePA;
156
157/*
158 * The managed page sections are dynamically mapped to free pages.
159 */
160uint32_t managedBaseVA, managedCachePA;
161
162/*
163 * The exception vectors are always mapped high on ARM.
164 */
165uint32_t exceptionVectVA, exceptionCachePA;
166uint32_t sectionOffset;
167
168/**
169 * l2_map_linear_range
170 *
171 * Maps a linear range of physical pages starting at 'phys_start' and ending
172 * at 'phys_end' into an L2 cache region starting at PA 'pa_cache_start'.
173 */
174void l2_map_linear_range(uint32_t pa_cache_start, uint32_t phys_start,
175                         uint32_t phys_end)
176{
177    uint32_t pte_iter;
178    uint32_t page_iter;
179    uint32_t phys_iter;
180
181    pte_iter = phys_to_virt(pa_cache_start);
182    page_iter = (phys_end - phys_start) >> PAGE_SHIFT;
183    phys_iter = phys_start;
184
185    for (unsigned int i = 0; i < page_iter; i++) {
186        unsigned int *ptv = (unsigned int *) pte_iter;
187
188        if (phys_iter & ~L2_ADDR_MASK) {
189            panic("l2_map_linear_range: Misaligned physical page!\n");
190        }
191
192        *ptv = phys_iter;
193
194        *ptv |= L2_SMALL_PAGE;
195        *ptv |= L2_ACCESS_PRW;
196
197        /* Writethrough, no write allocate. */
198        *ptv |= (MMU_TEXCB_CA_WT_NWA << 2);
199
200        pte_iter += sizeof(unsigned int);
201        phys_iter += PAGE_SIZE;
202    }
203}
204
205void l2_map_linear_range_no_cache(uint32_t pa_cache_start, uint32_t phys_start,
206                                  uint32_t phys_end)
207{
208    uint32_t pte_iter;
209    uint32_t page_iter;
210    uint32_t phys_iter;
211
212    pte_iter = phys_to_virt(pa_cache_start);
213    page_iter = (phys_end - phys_start) >> PAGE_SHIFT;
214    phys_iter = phys_start;
215
216    for (unsigned int i = 0; i < page_iter; i++) {
217        unsigned int *ptv = (unsigned int *) pte_iter;
218
219        if (phys_iter & ~L2_ADDR_MASK) {
220            panic("l2_map_linear_range: Misaligned physical page!\n");
221        }
222
223        *ptv = phys_iter;
224
225        *ptv |= L2_SMALL_PAGE;
226        *ptv |= L2_ACCESS_PRW;
227
228        pte_iter += sizeof(unsigned int);
229        phys_iter += PAGE_SIZE;
230    }
231}
232
233/**
234 * l2_cache_to_range
235 *
236 * Binds a set of L2 entries starting at PA 'cache_start' to a translation table
237 * 'tte' starting at virtual address 'va' and of size 'size'.
238 */
239void l2_cache_to_range(uint32_t pa_cache_start, uint32_t va, uint32_t tteb,
240                       uint32_t size, int zero)
241{
242    uint32_t pte_iter = pa_cache_start;
243    uint32_t tte_pbase;
244    uint32_t tte_psize;
245
246    tte_pbase = addr_to_tte(tteb, va);  /* Base of the L1 region for virtBase */
247    tte_psize = ((size >> 20) << 2);    /* Size of the L1 region */
248
249    /*
250     * We must make sure that the managed mapping is
251     * cleared, as this region may have been used by the
252     * bootloader, leaving some stuff in it. We do not
253     * do this for the identity mapping as every single page
254     * of it is mapped anyway.
255     *
256     * thing size = (page count * 4)
257     */
258    if (zero) {
259        bzero((void *) phys_to_virt(pte_iter), ((size >> PAGE_SHIFT) << 2));
260    }
261
262    /*
263     * Create an L2 for every section in the given region
264     */
265    for (unsigned int tte = tte_pbase; tte < (tte_pbase + tte_psize); tte += 4) {
266        unsigned int *ttv = (unsigned int *) tte;
267
268        if (pte_iter & ~L1_PTE_ADDR_MASK) {
269            panic("l2_cache_to_range: Misaligned L2 table %x!\n", pte_iter);
270        }
271
272        *ttv = pte_iter;
273        *ttv |= L1_TYPE_PTE;
274
275        pte_iter += L2_SIZE;
276    }
277}
278
279/**
280 * verify_lowGlo
281 *
282 * Verify that the ARM exception vectors are mapped in the right place. Our string
283 * is "Scolecit" for right now. If they aren't, panic and halt the system.
284 */
285static void verify_lowGlo(void)
286{
287    char *lowGloString = (char *) LOWGLO_BASE;
288
289    if (strncmp(lowGloString, "Scolecit", 8) != 0) {
290        panic
291            ("Invalid signature for lowGlo in vectors, got %s, was expecting %s\n",
292             lowGloString, "Scolecit");
293    }
294
295    kprintf("lowGlo verification string: %s\n", lowGloString);
296}
297
298/**
299 * arm_vm_init
300 *
301 * Initialize basic MMU mappings (L1/L2) for identity, managed and exception
302 * vector page tables. Additionally, kick off necessary subsystems such as
303 * kprintf so that we can get out of semihosting debug output (if enabled)
304 * and onto an actual serial port or something. Whatever.
305 */
306void arm_vm_init(uint32_t mem_limit, boot_args * args)
307{
308    uint32_t gMemSize;
309
310    /*
311     * ARM vm init starting up.
312     */
313    kdb_printf("\tboot_args:               0x%08x\n"
314               "\tboot_args->virtBase:     0x%08x\n"
315               "\tboot_args->physBase:     0x%08x\n"
316               "\tboot_args->topOfKernel:  0x%08x\n"
317               "\tboot_args->memSize:      0x%08x\n", (unsigned int)args, args->virtBase,
318               args->physBase, args->topOfKernelData, args->memSize);
319
320    /*
321     * Set up some globals.
322     */
323    gPhysBase = args->physBase;
324    gVirtBase = args->virtBase;
325    gMemSize = args->memSize;
326    gTopOfKernel = args->topOfKernelData;
327    max_mem = mem_size = sane_size = gMemSize;
328
329    /*
330     * Map L2 tables for identity. The initial bootloader sets up section maps for one L1, so we are next.
331     */
332    cpu_ttb = gTopOfKernel + L1_SIZE;
333    bzero((void*)phys_to_virt(cpu_ttb), L1_SIZE);
334
335    identityBaseVA = gVirtBase;
336    identityCachePA = cpu_ttb + L1_SIZE;    /* After the first initial TTB. */
337    kdb_printf("arm_vm_init: L2 address for identity mappings...\n"
338               "\tmapping VA: 0x%08x\n" "\tmapping PA: 0x%08x\n",
339               identityBaseVA, identityCachePA);
340
341    managedBaseVA = MANAGED_BASE;
342    managedCachePA = identityCachePA + l2_size(gMemSize);
343    kdb_printf("arm_vm_init: L2 address for kernel managed mappings...\n"
344               "\tmapping VA: 0x%08x\n" "\tmapping PA: 0x%08x\n", managedBaseVA,
345               managedCachePA);
346
347    /*
348     * Bit generous..
349     */
350    first_avail = managedCachePA + l2_size(0x40000000);
351
352    /*
353     * Configure tables for identity mapping..
354     */
355    kdb_printf("arm_vm_init: configuring tables for identity mapping...\n");
356    l2_cache_to_range(identityCachePA, identityBaseVA, phys_to_virt(cpu_ttb),
357                      gMemSize, TRUE);
358
359    l2_cache_to_range(managedCachePA, managedBaseVA, phys_to_virt(cpu_ttb),
360                      gMemSize, TRUE);
361
362    /*
363     * Create the first identity mappings.
364     */
365    kdb_printf
366        ("arm_vm_init: creating main identity mapping (section offset is 0x%x).\n",
367         sectionOffset);
368    l2_map_linear_range(identityCachePA, gPhysBase - sectionOffset,
369                        gPhysBase + gMemSize);
370
371    /*
372     * Set high exception vectors.. Steal one page from first_avail.
373     */
374    kdb_printf("arm_vm_init: exception vectors are at 0x%08x.\n",
375               &ExceptionVectorsBase);
376
377    /*
378     * Map them...
379     */
380    uint32_t *vecpt_start = (uint32_t*)(first_avail), *vectp, *va_vecpt;
381    vectp = (uint32_t *) addr_to_tte(phys_to_virt(cpu_ttb), VECTORS_BASE);
382    *vectp = (((uint32_t) vecpt_start) | L1_TYPE_PTE);
383    va_vecpt = (vm_offset_t)phys_to_virt(vecpt_start) + pte_offset(VECTORS_BASE);
384
385    /* NS-VBAR support */
386#ifndef _ARM_ARCH_7
387    *va_vecpt =
388        virt_to_phys(&ExceptionVectorsBase) | L2_ACCESS_PRW | L2_SMALL_PAGE;
389#else
390    *va_vecpt =
391        virt_to_phys(&HighExceptionVectorsBase) | L2_ACCESS_PRW | L2_SMALL_PAGE;
392#endif
393
394    /*
395     * Burn it away...
396     */
397#if defined(BOARD_CONFIG_S5L8930X) || defined(BOARD_CONFIG_S5L8920X) || defined(BOARD_CONFIG_S5L8922X)
398    first_avail += 6144 * L1_SIZE;  /* temporary..... */
399#else
400    first_avail += 1 * L1_SIZE; /* temporary..... */
401#endif
402    avail_end = gPhysBase + gMemSize;
403
404    /*
405     * Clean caches and flush TLBs..
406     */
407    kdb_printf("arm_vm_init: switching translation-tables now...\n");
408    set_mmu_ttb(cpu_ttb);
409    set_mmu_ttb_alt(cpu_ttb);
410    set_mmu_ttbcr(2);
411    flush_mmu_tlb();
412
413    /*
414     * Set settings for pmap.
415     */
416    kernel_pmap->pm_l1_phys = cpu_ttb;
417    kernel_pmap->pm_l1_virt = phys_to_virt(cpu_ttb);
418
419    /*
420     * Set up segment information.
421     */
422    kdb_printf("arm_vm_init: setting up segment information...\n");
423    sectTEXTB =
424        (vm_offset_t) (uint32_t *) getsegdatafromheader(&_mh_execute_header,
425                                                        "__TEXT",
426                                                        &sectSizeTEXT);
427    sectDATAB =
428        (vm_offset_t) (uint32_t *) getsegdatafromheader(&_mh_execute_header,
429                                                        "__DATA",
430                                                        &sectSizeDATA);
431    sectLINKB =
432        (vm_offset_t) (uint32_t *) getsegdatafromheader(&_mh_execute_header,
433                                                        "__LINKEDIT",
434                                                        &sectSizeLINK);
435    sectKLDB =
436        (vm_offset_t) (uint32_t *) getsegdatafromheader(&_mh_execute_header,
437                                                        "__KLD", &sectSizeKLD);
438    sectHIBB =
439        (vm_offset_t) (uint32_t *) getsegdatafromheader(&_mh_execute_header,
440                                                        "__HIB", &sectSizeHIB);
441    sectPRELINKB =
442        (vm_offset_t) (uint32_t *) getsegdatafromheader(&_mh_execute_header,
443                                                        "__PRELINK_TEXT",
444                                                        &sectSizePRELINK);
445    etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
446    edata = (vm_offset_t) sectDATAB + sectSizeDATA;
447    sdata = (vm_offset_t) sectDATAB;
448    end = round_page(getlastaddr());    /* Force end to next page */
449
450    vm_kernel_slide = (gVirtBase - VM_KERNEL_BASE_ADDRESS);
451
452    vm_kernel_etext = etext;
453    vm_kernel_stext = sectTEXTB;
454    vm_kernel_top = phys_to_virt(gTopOfKernel);
455    vm_kernel_base = gVirtBase;
456
457    segDATA = getsegbynamefromheader(&_mh_execute_header,
458                    "__DATA");
459    sectDCONST = getsectbynamefromheader(&_mh_execute_header,
460                    "__DATA", "__const");
461
462    segHIBB  = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
463                    "__HIB", &segSizeHIB);
464    segPRELINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
465                    "__PRELINK_TEXT", &segSizePRELINK);
466
467    sectCONSTB = (vm_offset_t) sectDCONST->addr;
468    sectSizeConst = sectDCONST->size;
469    sconstdata = sectCONSTB;
470    econstdata = sectCONSTB + sectSizeConst;
471
472    if (sectSizeConst & PAGE_MASK) {
473        kernel_section_t *ns = nextsect(segDATA, sectDCONST);
474        if (ns && !(ns->addr & PAGE_MASK))
475            doconstro_override = TRUE;
476    } else
477        doconstro_override = TRUE;
478
479    /*
480     * Bootstrap pmap.
481     */
482    uint32_t bootstrap_addr = managedBaseVA + 0x1000;
483    pmap_bootstrap(gMemSize, (vm_offset_t *) & bootstrap_addr, 0);
484
485    /*
486     * Subsystems.
487     */
488    printf_init();
489    panic_init();
490
491    PE_init_kprintf(TRUE);
492    kprintf("kprintf initialized!\n");
493
494    /*
495     * Verify vectors are in the right place.
496     */
497    verify_lowGlo();
498
499    return;
500}
501