1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2009 Corey Tabaka
3// Copyright (c) 2015 Intel Corporation
4// Copyright (c) 2016 Travis Geiselbrecht
5//
6// Use of this source code is governed by a MIT-style
7// license that can be found in the LICENSE file or at
8// https://opensource.org/licenses/MIT
9
10#include <arch.h>
11#include <arch/mmu.h>
12#include <arch/mp.h>
13#include <arch/ops.h>
14#include <arch/x86.h>
15#include <arch/x86/apic.h>
16#include <arch/x86/descriptor.h>
17#include <arch/x86/feature.h>
18#include <arch/x86/mmu.h>
19#include <arch/x86/mmu_mem_types.h>
20#include <arch/x86/mp.h>
21#include <arch/x86/perf_mon.h>
22#include <arch/x86/proc_trace.h>
23#include <arch/x86/tsc.h>
24#include <assert.h>
25#include <assert.h>
26#include <debug.h>
27#include <err.h>
28#include <inttypes.h>
29#include <lib/console.h>
30#include <lk/init.h>
31#include <lk/main.h>
32#include <platform.h>
33#include <string.h>
34#include <sys/types.h>
35#include <trace.h>
36#include <vm/vm.h>
37#include <zircon/compiler.h>
38#include <zircon/types.h>
39
40#define LOCAL_TRACE 0
41
42/* save a pointer to the multiboot information coming in from whoever called us */
43void* _multiboot_info;
44
45/* save a pointer to the bootdata, if present */
46void* _zbi_base;
47
48void arch_early_init(void) {
49    x86_mmu_early_init();
50}
51
52void arch_init(void) {
53    const struct x86_model_info* model = x86_get_model();
54    printf("Processor Model Info: type %#x family %#x model %#x stepping %#x\n",
55           model->processor_type, model->family, model->model, model->stepping);
56    printf("\tdisplay_family %#x display_model %#x\n",
57           model->display_family, model->display_model);
58
59    x86_feature_debug();
60
61    x86_mmu_init();
62
63    gdt_setup();
64    idt_setup_readonly();
65
66    x86_perfmon_init();
67    x86_processor_trace_init();
68}
69
70void arch_enter_uspace(uintptr_t entry_point, uintptr_t sp,
71                       uintptr_t arg1, uintptr_t arg2) {
72    LTRACEF("entry %#" PRIxPTR " user stack %#" PRIxPTR "\n", entry_point, sp);
73    LTRACEF("kernel stack %#" PRIxPTR "\n", x86_get_percpu()->default_tss.rsp0);
74
75    arch_disable_ints();
76
77    /* default user space flags:
78     * IOPL 0
79     * Interrupts enabled
80     */
81    ulong flags = (0 << X86_FLAGS_IOPL_SHIFT) | X86_FLAGS_IF;
82
83    /* check that we're probably still pointed at the kernel gs */
84    DEBUG_ASSERT(is_kernel_address(read_msr(X86_MSR_IA32_GS_BASE)));
85
86    /* check that the kernel stack is set properly */
87    DEBUG_ASSERT(is_kernel_address(x86_get_percpu()->default_tss.rsp0));
88
89    /* set up user's fs: gs: base */
90    write_msr(X86_MSR_IA32_FS_BASE, 0);
91
92    /* set the KERNEL_GS_BASE msr here, because we're going to swapgs below */
93    write_msr(X86_MSR_IA32_KERNEL_GS_BASE, 0);
94
95    x86_uspace_entry(arg1, arg2, sp, entry_point, flags);
96    __UNREACHABLE;
97}
98
99void arch_suspend(void) {
100    DEBUG_ASSERT(arch_ints_disabled());
101    apic_io_save();
102    x86_tsc_store_adjustment();
103}
104
105void arch_resume(void) {
106    DEBUG_ASSERT(arch_ints_disabled());
107
108    x86_init_percpu(0);
109    x86_mmu_percpu_init();
110    x86_pat_sync(cpu_num_to_mask(0));
111
112    apic_local_init();
113
114    // Ensure the CPU that resumed was assigned the correct percpu object.
115    DEBUG_ASSERT(apic_local_id() == x86_get_percpu()->apic_id);
116
117    apic_io_restore();
118}
119
120[[ noreturn, gnu::noinline ]] static void finish_secondary_entry(
121    volatile int* aps_still_booting, thread_t* thread, uint cpu_num) {
122
123    // Signal that this CPU is initialized.  It is important that after this
124    // operation, we do not touch any resources associated with bootstrap
125    // besides our thread_t and stack, since this is the checkpoint the
126    // bootstrap process uses to identify completion.
127    int old_val = atomic_and(aps_still_booting, ~(1U << cpu_num));
128    if (old_val == 0) {
129        // If the value is already zero, then booting this CPU timed out.
130        goto fail;
131    }
132
133    // Defer configuring memory settings until after the atomic_and above.
134    // This ensures that we were in no-fill cache mode for the duration of early
135    // AP init.
136    DEBUG_ASSERT(x86_get_cr0() & X86_CR0_CD);
137    x86_mmu_percpu_init();
138
139    // Load the appropriate PAT/MTRRs.  This must happen after init_percpu, so
140    // that this CPU is considered online.
141    x86_pat_sync(1U << cpu_num);
142
143    /* run early secondary cpu init routines up to the threading level */
144    lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
145
146    thread_secondary_cpu_init_early(thread);
147    // The thread stacks and struct are from a single allocation, free it
148    // when we exit into the scheduler.
149    thread->flags |= THREAD_FLAG_FREE_STRUCT;
150
151    lk_secondary_cpu_entry();
152
153// lk_secondary_cpu_entry only returns on an error, halt the core in this
154// case.
155fail:
156    arch_disable_ints();
157    while (1) {
158        x86_hlt();
159    }
160}
161
162// This is called from assembly, before any other C code.
163// The %gs.base is not set up yet, so we have to trust that
164// this function is simple enough that the compiler won't
165// want to generate stack-protector prologue/epilogue code,
166// which would use %gs.
167__NO_SAFESTACK __NO_RETURN void x86_secondary_entry(volatile int* aps_still_booting,
168                                                    thread_t* thread) {
169    // Would prefer this to be in init_percpu, but there is a dependency on a
170    // page mapping existing, and the BP calls that before the VM subsystem is
171    // initialized.
172    apic_local_init();
173
174    uint32_t local_apic_id = apic_local_id();
175    int cpu_num = x86_apic_id_to_cpu_num(local_apic_id);
176    if (cpu_num < 0) {
177        // If we could not find our CPU number, do not proceed further
178        arch_disable_ints();
179        while (1) {
180            x86_hlt();
181        }
182    }
183
184    DEBUG_ASSERT(cpu_num > 0);
185
186    // Set %gs.base to our percpu struct.  This has to be done before
187    // calling x86_init_percpu, which initializes most of that struct, so
188    // that x86_init_percpu can use safe-stack and/or stack-protector code.
189    struct x86_percpu* const percpu = &ap_percpus[cpu_num - 1];
190    write_msr(X86_MSR_IA32_GS_BASE, (uintptr_t)percpu);
191
192    // Copy the stack-guard value from the boot CPU's perpcu.
193    percpu->stack_guard = bp_percpu.stack_guard;
194
195#if __has_feature(safe_stack)
196    // Set up the initial unsafe stack pointer.
197    x86_write_gs_offset64(
198        ZX_TLS_UNSAFE_SP_OFFSET,
199        ROUNDDOWN(thread->stack.unsafe_base + thread->stack.size, 16));
200#endif
201
202    x86_init_percpu((uint)cpu_num);
203
204    // Now do the rest of the work, in a function that is free to
205    // use %gs in its code.
206    finish_secondary_entry(aps_still_booting, thread, cpu_num);
207}
208
209static int cmd_cpu(int argc, const cmd_args* argv, uint32_t flags) {
210    if (argc < 2) {
211        printf("not enough arguments\n");
212    usage:
213        printf("usage:\n");
214        printf("%s features\n", argv[0].str);
215        printf("%s unplug <cpu_id>\n", argv[0].str);
216        printf("%s hotplug <cpu_id>\n", argv[0].str);
217        return ZX_ERR_INTERNAL;
218    }
219
220    if (!strcmp(argv[1].str, "features")) {
221        x86_feature_debug();
222    } else if (!strcmp(argv[1].str, "unplug")) {
223        if (argc < 3) {
224            printf("specify a cpu_id\n");
225            goto usage;
226        }
227        zx_status_t status = mp_unplug_cpu((uint)argv[2].u);
228        printf("CPU %lu unplugged: %d\n", argv[2].u, status);
229    } else if (!strcmp(argv[1].str, "hotplug")) {
230        if (argc < 3) {
231            printf("specify a cpu_id\n");
232            goto usage;
233        }
234        zx_status_t status = mp_hotplug_cpu((uint)argv[2].u);
235        printf("CPU %lu hotplugged: %d\n", argv[2].u, status);
236    } else {
237        printf("unknown command\n");
238        goto usage;
239    }
240
241    return ZX_OK;
242}
243
244STATIC_COMMAND_START
245#if LK_DEBUGLEVEL > 0
246STATIC_COMMAND("cpu", "cpu test commands", &cmd_cpu)
247#endif
248STATIC_COMMAND_END(cpu);
249