1/*
2 * Copyright 2020, DornerWorks
3 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 */
7#include <autoconf.h>
8#include <elfloader/gen_config.h>
9
10#include <types.h>
11#include <binaries/elf/elf.h>
12#include <elfloader.h>
13#include <abort.h>
14#include <cpio/cpio.h>
15
16#define PT_LEVEL_1 1
17#define PT_LEVEL_2 2
18
19#define PT_LEVEL_1_BITS 30
20#define PT_LEVEL_2_BITS 21
21
22#define PTE_TYPE_TABLE 0x00
23#define PTE_TYPE_SRWX 0xCE
24
25#define RISCV_PGSHIFT 12
26#define RISCV_PGSIZE BIT(RISCV_PGSHIFT)
27
28// page table entry (PTE) field
29#define PTE_V     0x001 // Valid
30
31#define PTE_PPN0_SHIFT 10
32
33#if __riscv_xlen == 32
34#define PT_INDEX_BITS  10
35#else
36#define PT_INDEX_BITS  9
37#endif
38
39#define PTES_PER_PT BIT(PT_INDEX_BITS)
40
41#define PTE_CREATE_PPN(PT_BASE)  (unsigned long)(((PT_BASE) >> RISCV_PGSHIFT) << PTE_PPN0_SHIFT)
42#define PTE_CREATE_NEXT(PT_BASE) (unsigned long)(PTE_CREATE_PPN(PT_BASE) | PTE_TYPE_TABLE | PTE_V)
43#define PTE_CREATE_LEAF(PT_BASE) (unsigned long)(PTE_CREATE_PPN(PT_BASE) | PTE_TYPE_SRWX | PTE_V)
44
45#define GET_PT_INDEX(addr, n) (((addr) >> (((PT_INDEX_BITS) * ((CONFIG_PT_LEVELS) - (n))) + RISCV_PGSHIFT)) % PTES_PER_PT)
46
47#define VIRT_PHYS_ALIGNED(virt, phys, level_bits) (IS_ALIGNED((virt), (level_bits)) && IS_ALIGNED((phys), (level_bits)))
48
49struct image_info kernel_info;
50struct image_info user_info;
51
52unsigned long l1pt[PTES_PER_PT] __attribute__((aligned(4096)));
53#if __riscv_xlen == 64
54unsigned long l2pt[PTES_PER_PT] __attribute__((aligned(4096)));
55unsigned long l2pt_elf[PTES_PER_PT] __attribute__((aligned(4096)));
56#endif
57
58char elfloader_stack_alloc[BIT(CONFIG_KERNEL_STACK_BITS)];
59
60/* first HART will initialise these */
61void *dtb = NULL;
62uint32_t dtb_size = 0;
63
64void map_kernel_window(struct image_info *kernel_info)
65{
66    uint32_t index;
67    unsigned long *lpt;
68
69    /* Map the elfloader into the new address space */
70    index = GET_PT_INDEX((uintptr_t)_text, PT_LEVEL_1);
71
72#if __riscv_xlen == 32
73    lpt = l1pt;
74#else
75    lpt = l2pt_elf;
76    l1pt[index] = PTE_CREATE_NEXT((uintptr_t)l2pt_elf);
77    index = GET_PT_INDEX((uintptr_t)_text, PT_LEVEL_2);
78#endif
79
80    if (IS_ALIGNED((uintptr_t)_text, PT_LEVEL_2_BITS)) {
81        for (int page = 0; index < PTES_PER_PT; index++, page++) {
82            lpt[index] = PTE_CREATE_LEAF((uintptr_t)_text +
83                                         (page << PT_LEVEL_2_BITS));
84        }
85    } else {
86        printf("Elfloader not properly aligned\n");
87        abort();
88    }
89
90    /* Map the kernel into the new address space */
91    index = GET_PT_INDEX(kernel_info->virt_region_start, PT_LEVEL_1);
92
93#if __riscv_xlen == 64
94    lpt = l2pt;
95    l1pt[index] = PTE_CREATE_NEXT((uintptr_t)l2pt);
96    index = GET_PT_INDEX(kernel_info->virt_region_start, PT_LEVEL_2);
97#endif
98    if (VIRT_PHYS_ALIGNED(kernel_info->virt_region_start,
99                          kernel_info->phys_region_start, PT_LEVEL_2_BITS)) {
100        for (int page = 0; index < PTES_PER_PT; index++, page++) {
101            lpt[index] = PTE_CREATE_LEAF(kernel_info->phys_region_start +
102                                         (page << PT_LEVEL_2_BITS));
103        }
104    } else {
105        printf("Kernel not properly aligned\n");
106        abort();
107    }
108}
109
110#if CONFIG_PT_LEVELS == 2
111uint64_t vm_mode = 0x1llu << 31;
112#elif CONFIG_PT_LEVELS == 3
113uint64_t vm_mode = 0x8llu << 60;
114#elif CONFIG_PT_LEVELS == 4
115uint64_t vm_mode = 0x9llu << 60;
116#else
117#error "Wrong PT level"
118#endif
119
120#if CONFIG_MAX_NUM_NODES > 1
121int secondary_go = 0;
122int next_logical_core_id = 1;
123int mutex = 0;
124int core_ready[CONFIG_MAX_NUM_NODES] = { 0 };
125static void set_and_wait_for_ready(int hart_id, int core_id)
126{
127    while (__atomic_exchange_n(&mutex, 1, __ATOMIC_ACQUIRE) != 0);
128    printf("Hart ID %d core ID %d\n", hart_id, core_id);
129    core_ready[core_id] = 1;
130    __atomic_store_n(&mutex, 0, __ATOMIC_RELEASE);
131
132    for (int i = 0; i < CONFIG_MAX_NUM_NODES; i++) {
133        while (__atomic_load_n(&core_ready[i], __ATOMIC_RELAXED) == 0) ;
134    }
135}
136#endif
137
138static inline void sfence_vma(void)
139{
140    asm volatile("sfence.vma" ::: "memory");
141}
142
143static inline void ifence(void)
144{
145    asm volatile("fence.i" ::: "memory");
146}
147
148static inline void enable_virtual_memory(void)
149{
150    sfence_vma();
151    asm volatile(
152        "csrw satp, %0\n"
153        :
154        : "r"(vm_mode | (uintptr_t)l1pt >> RISCV_PGSHIFT)
155        :
156    );
157    ifence();
158}
159
160int num_apps = 0;
161void main(UNUSED int hartid, void *bootloader_dtb)
162{
163    printf("ELF-loader started on (HART %d) (NODES %d)\n", hartid, CONFIG_MAX_NUM_NODES);
164
165    printf("  paddr=[%p..%p]\n", _text, _end - 1);
166    /* Unpack ELF images into memory. */
167    load_images(&kernel_info, &user_info, 1, &num_apps, bootloader_dtb, &dtb, &dtb_size);
168    if (num_apps != 1) {
169        printf("No user images loaded!\n");
170        abort();
171    }
172
173    map_kernel_window(&kernel_info);
174
175    printf("Jumping to kernel-image entry point...\n\n");
176
177#if CONFIG_MAX_NUM_NODES > 1
178    /* Unleash secondary cores */
179    __atomic_store_n(&secondary_go, 1, __ATOMIC_RELEASE);
180    /* Set that the current core is ready and wait for other cores */
181    set_and_wait_for_ready(hartid, 0);
182#endif
183
184    enable_virtual_memory();
185
186    ((init_riscv_kernel_t)kernel_info.virt_entry)(user_info.phys_region_start,
187                                                  user_info.phys_region_end, user_info.phys_virt_offset,
188                                                  user_info.virt_entry,
189                                                  (paddr_t) dtb, dtb_size
190#if CONFIG_MAX_NUM_NODES > 1
191                                                  ,
192                                                  hartid,
193                                                  0
194#endif
195                                                 );
196
197    /* We should never get here. */
198    printf("Kernel returned back to the elf-loader.\n");
199}
200
201#if CONFIG_MAX_NUM_NODES > 1
202
203void secondary_entry(int hart_id, int core_id)
204{
205    while (__atomic_load_n(&secondary_go, __ATOMIC_ACQUIRE) == 0) ;
206
207    set_and_wait_for_ready(hart_id, core_id);
208
209    enable_virtual_memory();
210
211    ((init_riscv_kernel_t)kernel_info.virt_entry)(user_info.phys_region_start,
212                                                  user_info.phys_region_end, user_info.phys_virt_offset,
213                                                  user_info.virt_entry,
214                                                  (paddr_t) dtb, dtb_size
215                                                  ,
216                                                  hart_id,
217                                                  core_id
218                                                 );
219}
220
221#endif
222