1// Copyright 2016 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7#include <arch/mmu.h>
8#include <arch/x86.h>
9#include <arch/x86/apic.h>
10#include <arch/x86/bootstrap16.h>
11#include <arch/x86/descriptor.h>
12#include <arch/x86/mmu.h>
13#include <arch/x86/mp.h>
14#include <assert.h>
15#include <err.h>
16#include <fbl/algorithm.h>
17#include <fbl/auto_call.h>
18#include <fbl/mutex.h>
19#include <string.h>
20#include <trace.h>
21#include <vm/pmm.h>
22#include <vm/vm.h>
23#include <zircon/thread_annotations.h>
24#include <zircon/types.h>
25
26static paddr_t bootstrap_phys_addr = UINT64_MAX;
27static fbl::Mutex bootstrap_lock;
28
29void x86_bootstrap16_init(paddr_t bootstrap_base) {
30    DEBUG_ASSERT(!IS_PAGE_ALIGNED(bootstrap_phys_addr));
31    DEBUG_ASSERT(IS_PAGE_ALIGNED(bootstrap_base));
32    DEBUG_ASSERT(bootstrap_base <= (1024 * 1024) - 2 * PAGE_SIZE);
33    bootstrap_phys_addr = bootstrap_base;
34}
35
36zx_status_t x86_bootstrap16_acquire(uintptr_t entry64, fbl::RefPtr<VmAspace>* temp_aspace,
37                                    void** bootstrap_aperature, paddr_t* instr_ptr)
38    TA_NO_THREAD_SAFETY_ANALYSIS {
39    // Make sure x86_bootstrap16_init has been called, and bail early if not.
40    if (!IS_PAGE_ALIGNED(bootstrap_phys_addr)) {
41        return ZX_ERR_BAD_STATE;
42    }
43
44    // Make sure the entrypoint code is in the bootstrap code that will be
45    // loaded
46    if (entry64 < (uintptr_t)x86_bootstrap16_start ||
47        entry64 >= (uintptr_t)x86_bootstrap16_end) {
48        return ZX_ERR_INVALID_ARGS;
49    }
50
51    VmAspace* kernel_aspace = VmAspace::kernel_aspace();
52    fbl::RefPtr<VmAspace> bootstrap_aspace = VmAspace::Create(VmAspace::TYPE_LOW_KERNEL,
53                                                              "bootstrap16");
54    if (!bootstrap_aspace) {
55        return ZX_ERR_NO_MEMORY;
56    }
57    void* bootstrap_virt_addr = nullptr;
58
59    // Ensure only one caller is using the bootstrap region
60    bootstrap_lock.Acquire();
61
62    // add an auto caller to clean up the address space on the way out
63    auto ac = fbl::MakeAutoCall([&]() TA_NO_THREAD_SAFETY_ANALYSIS {
64        bootstrap_aspace->Destroy();
65        if (bootstrap_virt_addr) {
66            kernel_aspace->FreeRegion(reinterpret_cast<vaddr_t>(bootstrap_virt_addr));
67        }
68        bootstrap_lock.Release();
69    });
70
71    // Actual GDT address.
72    extern uint8_t _temp_gdt;
73    extern uint8_t _temp_gdt_end;
74
75    // Compute what needs to go into the mappings
76    paddr_t gdt_phys_page =
77        vaddr_to_paddr((void*)ROUNDDOWN((uintptr_t)&_temp_gdt, PAGE_SIZE));
78    uintptr_t gdt_region_len =
79        ROUNDUP((uintptr_t)&_temp_gdt_end, PAGE_SIZE) - ROUNDDOWN((uintptr_t)&_temp_gdt, PAGE_SIZE);
80
81    // Temporary aspace needs 5 regions mapped:
82    struct map_range page_mappings[] = {
83        // 1) The bootstrap code page (identity mapped)
84        // 2) The bootstrap data page (identity mapped)
85        {.start_vaddr = bootstrap_phys_addr, .start_paddr = bootstrap_phys_addr, .size = 2 * PAGE_SIZE},
86        // 3) The page containing the GDT (identity mapped)
87        {.start_vaddr = (vaddr_t)gdt_phys_page, .start_paddr = gdt_phys_page, .size = gdt_region_len},
88        // These next two come implicitly from the shared kernel aspace:
89        // 4) The kernel's version of the bootstrap code page (matched mapping)
90        // 5) The page containing the aps_still_booting counter (matched mapping)
91    };
92    for (unsigned int i = 0; i < fbl::count_of(page_mappings); ++i) {
93        void* vaddr = (void*)page_mappings[i].start_vaddr;
94        zx_status_t status = bootstrap_aspace->AllocPhysical(
95            "bootstrap_mapping",
96            page_mappings[i].size,
97            &vaddr,
98            PAGE_SIZE_SHIFT,
99            page_mappings[i].start_paddr,
100            VmAspace::VMM_FLAG_VALLOC_SPECIFIC,
101            ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE | ARCH_MMU_FLAG_PERM_EXECUTE);
102        if (status != ZX_OK) {
103            TRACEF("Failed to create wakeup bootstrap aspace\n");
104            return status;
105        }
106    }
107
108    // Map the AP bootstrap page and a low mem data page to configure
109    // the AP processors with
110    zx_status_t status = kernel_aspace->AllocPhysical(
111        "bootstrap16_aperture",
112        PAGE_SIZE * 2,                                       // size
113        &bootstrap_virt_addr,                                // requested virtual address
114        PAGE_SIZE_SHIFT,                                     // alignment log2
115        bootstrap_phys_addr,                                 // physical address
116        0,                                                   // vmm flags
117        ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE); // arch mmu flags
118    if (status != ZX_OK) {
119        TRACEF("could not allocate AP bootstrap page: %d\n", status);
120        return status;
121    }
122    DEBUG_ASSERT(bootstrap_virt_addr != nullptr);
123    uintptr_t bootstrap_code_len = (uintptr_t)x86_bootstrap16_end -
124                                   (uintptr_t)x86_bootstrap16_start;
125    DEBUG_ASSERT(bootstrap_code_len <= PAGE_SIZE);
126    // Copy the bootstrap code in
127    memcpy(bootstrap_virt_addr, (const void*)x86_bootstrap16_start, bootstrap_code_len);
128
129    // Configuration data shared with the APs to get them to 64-bit mode
130    struct x86_bootstrap16_data* bootstrap_data =
131        (struct x86_bootstrap16_data*)((uintptr_t)bootstrap_virt_addr + 0x1000);
132
133    uintptr_t long_mode_entry = bootstrap_phys_addr +
134                                (entry64 - (uintptr_t)x86_bootstrap16_start);
135    ASSERT(long_mode_entry <= UINT32_MAX);
136
137    uint64_t phys_bootstrap_pml4 = bootstrap_aspace->arch_aspace().pt_phys();
138    uint64_t phys_kernel_pml4 = VmAspace::kernel_aspace()->arch_aspace().pt_phys();
139    if (phys_bootstrap_pml4 > UINT32_MAX) {
140        // TODO(ZX-978): Once the pmm supports it, we should request that this
141        // VmAspace is backed by a low mem PML4, so we can avoid this issue.
142        TRACEF("bootstrap PML4 was not allocated out of low mem\n");
143        return ZX_ERR_NO_MEMORY;
144    }
145    ASSERT(phys_kernel_pml4 <= UINT32_MAX);
146
147    bootstrap_data->phys_bootstrap_pml4 = static_cast<uint32_t>(phys_bootstrap_pml4);
148    bootstrap_data->phys_kernel_pml4 = static_cast<uint32_t>(phys_kernel_pml4);
149    bootstrap_data->phys_gdtr_limit =
150        static_cast<uint16_t>(&_temp_gdt_end - &_temp_gdt - 1);
151    bootstrap_data->phys_gdtr_base =
152        reinterpret_cast<uintptr_t>(&_temp_gdt) -
153        reinterpret_cast<uintptr_t>(__code_start) +
154        get_kernel_base_phys();
155    bootstrap_data->phys_long_mode_entry = static_cast<uint32_t>(long_mode_entry);
156    bootstrap_data->long_mode_cs = CODE_64_SELECTOR;
157
158    *bootstrap_aperature = (void*)((uintptr_t)bootstrap_virt_addr + 0x1000);
159    *temp_aspace = bootstrap_aspace;
160    *instr_ptr = bootstrap_phys_addr;
161
162    // Cancel the cleanup autocall, since we're returning the new aspace and region
163    // NOTE: Since we cancel the autocall, we are not releasing
164    // |bootstrap_lock|.  This is released in x86_bootstrap16_release() when the
165    // caller is done with the bootstrap region.
166    ac.cancel();
167
168    return ZX_OK;
169}
170
171void x86_bootstrap16_release(void* bootstrap_aperature) TA_NO_THREAD_SAFETY_ANALYSIS {
172    DEBUG_ASSERT(bootstrap_aperature);
173    DEBUG_ASSERT(bootstrap_lock.IsHeld());
174    VmAspace* kernel_aspace = VmAspace::kernel_aspace();
175    uintptr_t addr = reinterpret_cast<uintptr_t>(bootstrap_aperature) - 0x1000;
176    kernel_aspace->FreeRegion(addr);
177
178    bootstrap_lock.Release();
179}
180