1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "trampoline.h"
6
7#include <inttypes.h>
8#include <stddef.h>
9#include <zircon/compiler.h>
10
11// Populate the trampoline area and enter the kernel in 64-bit mode.
12// Paging is already enabled.  The page tables, the kernel and ZBI images,
13// and the trampoline area are all placed safely outside the kernel's
14// range: PHYS_LOAD_ADDRESS + kernel image size + kernel bss size.
15noreturn void boot_zbi(zircon_kernel_t* kernel, zbi_header_t* zbi,
16                              struct trampoline* trampoline) {
17    // The trampoline area holds the 64-bit trampoline code we'll run, the
18    // GDT with the 64-bit code segment we'll run it in, and the long jump
19    // descriptor we'll use to get there.
20    *trampoline = (struct trampoline){
21        .code = TRAMPOLINE_CODE,
22        .gdt = GDT_ENTRIES,
23        .ljmp = {
24            .eip = trampoline->code,
25            .cs = 1 << 3,
26        },
27    };
28
29    // The kernel image includes its own container header.
30    size_t kernel_size = sizeof(kernel->hdr_file) + kernel->hdr_file.length;
31
32    // The entry point is an absolute physical address.
33    uint32_t kernel_entry = kernel->data_kernel.entry;
34    if (unlikely(kernel_entry != kernel->data_kernel.entry)) {
35        panic("ZBI kernel entry point %#llx truncated to %#"PRIx32,
36              kernel->data_kernel.entry, kernel_entry);
37    }
38    if (unlikely(kernel_entry < (uintptr_t)PHYS_LOAD_ADDRESS ||
39                 kernel_entry >= (uintptr_t)PHYS_LOAD_ADDRESS + kernel_size)) {
40        panic("ZBI kernel entry point %#"PRIx32" outside kernel [%p, %p)",
41              kernel_entry, PHYS_LOAD_ADDRESS,
42              PHYS_LOAD_ADDRESS + kernel_size);
43    }
44
45    // The descriptor needed to load the new GDT can be placed on the stack.
46    const struct { uint16_t limit; void* base; } __PACKED lgdt = {
47        .base = trampoline->gdt,
48        .limit = sizeof(trampoline->gdt) - 1,
49    };
50
51    // Tell the compiler all of the trampoline area is read.
52    // Otherwise it might conclude that only gdt and ljmp are used.
53    __asm__ volatile("" :: "m"(*trampoline));
54
55    __asm__ volatile(
56        // Load the GDT stored safely in the trampoline area.  We can
57        // access the descriptor via the stack segment and stack pointer
58        // using the Multiboot-provided flat segments.  Hereafter we can
59        // use only the registers and the already-running code and data
60        // segments, since there are no 32-bit segments in the new GDT.
61        "lgdt %[lgdt]\n\t"
62        // Jump into the 64-bit trampoline code.  The jump descriptor
63        // resides in the trampoline area, so the compiler will access it
64        // through a non-stack register here.
65        "ljmp *%[ljmp]\n\t"
66        :: [lgdt]"m"(lgdt), [ljmp]"m"(trampoline->ljmp),
67         // The 64-bit trampoline code copies the kernel into place and
68         // then jumps to its entry point, as instructed here:
69         "D"(PHYS_LOAD_ADDRESS),       // %rdi: destination pointer
70         "S"(kernel),                  // %rsi: source pointer
71         "c"(kernel_size / 8),         // %rcx: count of 8-byte words
72         "a"(kernel_entry),            // %rax: kernel entry point
73         "b"(zbi)                      // %rbx: ZBI data pointer for kernel
74        );
75    __builtin_unreachable();
76}
77