1/*
2 * Copyright 2018, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13/*
14 *
15 * Copyright 2016, 2017 Hesham Almatary, Data61/CSIRO <hesham.almatary@data61.csiro.au>
16 * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
17 */
18
19#include <assert.h>
20#include <kernel/boot.h>
21#include <machine/io.h>
22#include <model/statedata.h>
23#include <object/interrupt.h>
24#include <arch/machine.h>
25#include <arch/kernel/boot.h>
26#include <arch/kernel/vspace.h>
27#include <arch/benchmark.h>
28#include <linker.h>
29#include <plat/machine/hardware.h>
30#include <plat/machine/fdt.h>
31#include <machine.h>
32#include <stdarg.h>
33
34/* pointer to the end of boot code/data in kernel image */
35/* need a fake array to get the pointer from the linker script */
36extern char ki_boot_end[1];
37/* pointer to end of kernel image */
38extern char ki_end[1];
39
40BOOT_CODE static bool_t
41create_untypeds(cap_t root_cnode_cap, region_t boot_mem_reuse_reg)
42{
43    seL4_SlotPos   slot_pos_before;
44    seL4_SlotPos   slot_pos_after;
45
46    slot_pos_before = ndks_boot.slot_pos_cur;
47    bool_t res = create_kernel_untypeds(root_cnode_cap, boot_mem_reuse_reg, slot_pos_before);
48
49    slot_pos_after = ndks_boot.slot_pos_cur;
50    ndks_boot.bi_frame->untyped = (seL4_SlotRegion) {
51        slot_pos_before, slot_pos_after
52    };
53    return res;
54
55}
56
57BOOT_CODE cap_t
58create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t
59                           use_large, bool_t executable)
60{
61    cap_t cap;
62    vm_page_size_t frame_size;
63
64    if (use_large) {
65        frame_size = RISCV_Mega_Page;
66    } else {
67        frame_size = RISCV_4K_Page;
68    }
69
70    cap = cap_frame_cap_new(
71              asid,                            /* capFMappedASID       */
72              pptr,                            /* capFBasePtr          */
73              frame_size,                      /* capFSize             */
74              wordFromVMRights(VMReadWrite),   /* capFVMRights         */
75              0,                               /* capFIsDevice         */
76              vptr                             /* capFMappedAddress    */
77          );
78
79    map_it_frame_cap(pd_cap, cap);
80    return cap;
81}
82
83/**
84 * Split mem_reg about reserved_reg. If memory exists in the lower
85 * segment, insert it. If memory exists in the upper segment, return it.
86 */
87BOOT_CODE static region_t
88insert_region_excluded(region_t mem_reg, region_t reserved_reg)
89{
90    region_t residual_reg = mem_reg;
91    bool_t result UNUSED;
92
93    if (reserved_reg.start < mem_reg.start) {
94        /* Reserved region is below the provided mem_reg. */
95        mem_reg.end = 0;
96        mem_reg.start = 0;
97        /* Fit the residual around the reserved region */
98        if (reserved_reg.end > residual_reg.start) {
99            residual_reg.start = reserved_reg.end;
100        }
101    } else if (mem_reg.end > reserved_reg.start) {
102        /* Split mem_reg around reserved_reg */
103        mem_reg.end = reserved_reg.start;
104        residual_reg.start = reserved_reg.end;
105    } else {
106        /* reserved_reg is completely above mem_reg */
107        residual_reg.start = 0;
108        residual_reg.end = 0;
109    }
110    /* Add the lower region if it exists */
111    if (mem_reg.start < mem_reg.end) {
112        result = insert_region(mem_reg);
113        assert(result);
114    }
115    /* Validate the upper region */
116    if (residual_reg.start > residual_reg.end) {
117        residual_reg.start = residual_reg.end;
118    }
119
120    return residual_reg;
121}
122
123BOOT_CODE static void
124init_freemem(region_t ui_reg, region_t dtb_reg)
125{
126    unsigned int i;
127    bool_t result UNUSED;
128    region_t cur_reg;
129    region_t res_reg[] = {
130        {
131            // We ignore all physical memory before the dtb as the current riscv-pk (proxy kernel)
132            // that we use for loading is broken and provides an incorrect memory map where
133            // it claims that the memory that is used to provide the m-mode services are
134            // free physical memory. As there is no interface to determine what the memory
135            // reserved for this is we simply hope it placed the dtb after itself and exclude
136            // all memory up until then.
137            .start = 0,
138            .end = dtb_reg.end
139        },
140        {
141            // This looks a bit awkward as our symbols are a reference in the kernel image window, but
142            // we want to do all allocations in terms of the main kernel window, so we do some translation
143            .start = (pptr_t)paddr_to_pptr(kpptr_to_paddr((void*)kernelBase)),
144            .end   = (pptr_t)paddr_to_pptr(kpptr_to_paddr((void*)ki_end))
145        },
146        {
147            .start = ui_reg.start,
148            .end = ui_reg.end
149        }
150    };
151
152    for (i = 0; i < MAX_NUM_FREEMEM_REG; i++) {
153        ndks_boot.freemem[i] = REG_EMPTY;
154    }
155
156    /* Force ordering and exclusivity of reserved regions. */
157    assert(res_reg[0].start < res_reg[0].end);
158    assert(res_reg[1].start < res_reg[1].end);
159    assert(res_reg[2].start < res_reg[2].end);
160
161    assert(res_reg[0].end <= res_reg[1].start);
162    assert(res_reg[1].end <= res_reg[2].start);
163
164    for (i = 0; i < get_num_avail_p_regs(); i++) {
165        cur_reg = paddr_to_pptr_reg(get_avail_p_reg(i));
166        /* Adjust region if it exceeds the kernel window
167         * Note that we compare physical address in case of overflow.
168         */
169        if (pptr_to_paddr((void*)cur_reg.end) > PADDR_TOP) {
170            cur_reg.end = PPTR_TOP;
171        }
172        if (pptr_to_paddr((void*)cur_reg.start) > PADDR_TOP) {
173            cur_reg.start = PPTR_TOP;
174        }
175
176        cur_reg = insert_region_excluded(cur_reg, res_reg[0]);
177        cur_reg = insert_region_excluded(cur_reg, res_reg[1]);
178        cur_reg = insert_region_excluded(cur_reg, res_reg[2]);
179
180        if (cur_reg.start != cur_reg.end) {
181            result = insert_region(cur_reg);
182            assert(result);
183        }
184    }
185}
186
187BOOT_CODE static void
188init_irqs(cap_t root_cnode_cap)
189{
190    irq_t i;
191
192    for (i = 0; i <= maxIRQ; i++) {
193        setIRQState(IRQInactive, i);
194    }
195    setIRQState(IRQTimer, KERNEL_TIMER_IRQ);
196
197    /* provide the IRQ control cap */
198    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIRQControl), cap_irq_control_cap_new());
199}
200
201/* This and only this function initialises the CPU. It does NOT initialise any kernel state. */
202extern char trap_entry[];
203
204BOOT_CODE static void
205init_cpu(void)
206{
207
208    /* Write trap entry address to stvec */
209    write_stvec((word_t)trap_entry);
210
211    activate_kernel_vspace();
212}
213
214/* This and only this function initialises the platform. It does NOT initialise any kernel state. */
215
216BOOT_CODE static void
217init_plat(region_t dtb)
218{
219    parseFDT((void*)dtb.start);
220    initIRQController();
221    initTimer();
222}
223
224/* Main kernel initialisation function. */
225
226static BOOT_CODE bool_t
227try_init_kernel(
228    paddr_t ui_p_reg_start,
229    paddr_t ui_p_reg_end,
230    paddr_t dtb_p_reg_start,
231    paddr_t dtb_p_reg_end,
232    uint32_t pv_offset,
233    vptr_t  v_entry
234)
235{
236    cap_t root_cnode_cap;
237    cap_t it_pd_cap;
238    cap_t it_ap_cap;
239    cap_t ipcbuf_cap;
240    p_region_t boot_mem_reuse_p_reg = ((p_region_t) {
241        kpptr_to_paddr((void*)KERNEL_BASE), kpptr_to_paddr(ki_boot_end)
242    });
243    region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg);
244    region_t ui_reg = paddr_to_pptr_reg((p_region_t) {
245        ui_p_reg_start, ui_p_reg_end
246    });
247    region_t dtb_reg = paddr_to_pptr_reg((p_region_t) {
248        dtb_p_reg_start, dtb_p_reg_end
249    });
250    pptr_t bi_frame_pptr;
251    vptr_t bi_frame_vptr;
252    vptr_t ipcbuf_vptr;
253    create_frames_of_region_ret_t create_frames_ret;
254
255    /* convert from physical addresses to userland vptrs */
256    v_region_t ui_v_reg;
257    v_region_t it_v_reg;
258    ui_v_reg.start = (uint32_t) (ui_p_reg_start - pv_offset);
259    ui_v_reg.end   = (uint32_t) (ui_p_reg_end   - pv_offset);
260
261    ipcbuf_vptr = ui_v_reg.end;
262    bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS);
263
264    /* The region of the initial thread is the user image + ipcbuf and boot info */
265    it_v_reg.start = ui_v_reg.start;
266    it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS);
267
268    map_kernel_window();
269
270    /* initialise the CPU */
271    init_cpu();
272
273    /* initialize the platform */
274    init_plat(dtb_reg);
275
276    /* make the free memory available to alloc_region() */
277    init_freemem(ui_reg, dtb_reg);
278
279    /* create the root cnode */
280    root_cnode_cap = create_root_cnode();
281    if (cap_get_capType(root_cnode_cap) == cap_null_cap) {
282        return false;
283    }
284
285    /* create the cap for managing thread domains */
286    create_domain_cap(root_cnode_cap);
287
288    /* create the IRQ CNode */
289    if (!create_irq_cnode()) {
290        return false;
291    }
292
293    /* initialise the IRQ states and provide the IRQ control cap */
294    init_irqs(root_cnode_cap);
295
296    /* create the bootinfo frame */
297    bi_frame_pptr = allocate_bi_frame(0, CONFIG_MAX_NUM_NODES, ipcbuf_vptr);
298    if (!bi_frame_pptr) {
299        return false;
300    }
301
302    /* Construct an initial address space with enough virtual addresses
303     * to cover the user image + ipc buffer and bootinfo frames */
304    it_pd_cap = create_it_address_space(root_cnode_cap, it_v_reg);
305    if (cap_get_capType(it_pd_cap) == cap_null_cap) {
306        return false;
307    }
308
309    /* Create and map bootinfo frame cap */
310    create_bi_frame_cap(
311        root_cnode_cap,
312        it_pd_cap,
313        bi_frame_pptr,
314        bi_frame_vptr
315    );
316
317    /* create the initial thread's IPC buffer */
318    ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_pd_cap, ipcbuf_vptr);
319    if (cap_get_capType(ipcbuf_cap) == cap_null_cap) {
320        return false;
321    }
322
323    /* create all userland image frames */
324    create_frames_ret =
325        create_frames_of_region(
326            root_cnode_cap,
327            it_pd_cap,
328            ui_reg,
329            true,
330            pv_offset
331        );
332    if (!create_frames_ret.success) {
333        return false;
334    }
335    ndks_boot.bi_frame->userImageFrames = create_frames_ret.region;
336
337    /* create the initial thread's ASID pool */
338    it_ap_cap = create_it_asid_pool(root_cnode_cap);
339    if (cap_get_capType(it_ap_cap) == cap_null_cap) {
340        return false;
341    }
342    write_it_asid_pool(it_ap_cap, it_pd_cap);
343
344    /* create the idle thread */
345    if (!create_idle_thread()) {
346        return false;
347    }
348
349
350    /* create the initial thread */
351    tcb_t *initial = create_initial_thread(
352                         root_cnode_cap,
353                         it_pd_cap,
354                         v_entry,
355                         bi_frame_vptr,
356                         ipcbuf_vptr,
357                         ipcbuf_cap
358                     );
359
360    if (initial == NULL) {
361        return false;
362    }
363
364    init_core_state(initial);
365
366    /* convert the remaining free memory into UT objects and provide the caps */
367    if (!create_untypeds(
368                root_cnode_cap,
369                boot_mem_reuse_reg)) {
370        return false;
371    }
372
373    /* no shared-frame caps (RISCV has no multikernel support) */
374    ndks_boot.bi_frame->sharedFrames = S_REG_EMPTY;
375
376    /* finalise the bootinfo frame */
377    bi_finalise();
378
379    ksNumCPUs = 1;
380
381    printf("Booting all finished, dropped to user space\n");
382    return true;
383}
384
385BOOT_CODE VISIBLE void
386init_kernel(
387    paddr_t ui_p_reg_start,
388    paddr_t ui_p_reg_end,
389    sword_t pv_offset,
390    vptr_t  v_entry,
391    word_t hartid,
392    paddr_t dtb_output_p
393)
394{
395    pptr_t dtb_output = (pptr_t)paddr_to_pptr(dtb_output_p);
396
397    bool_t result = try_init_kernel(ui_p_reg_start,
398                                    ui_p_reg_end,
399                                    dtb_output_p,
400                                    dtb_output_p + fdt_size((void*)dtb_output),
401                                    pv_offset,
402                                    v_entry
403                                   );
404
405    if (!result) {
406        fail ("Kernel init failed for some reason :(");
407    }
408
409    schedule();
410    activateThread();
411}
412