1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <config.h>
14#include <arch/machine.h>
15#include <arch/kernel/boot_sys.h>
16#include <arch/kernel/smp_sys.h>
17#include <smp/lock.h>
18
19#ifdef ENABLE_SMP_SUPPORT
20
21/* Index of next AP to boot, BSP has index zero */
22BOOT_DATA VISIBLE
23volatile word_t smp_aps_index = 1;
24
25#ifdef CONFIG_USE_LOGICAL_IDS
26BOOT_CODE static void
27update_logical_id_mappings(void)
28{
29    cpu_mapping.index_to_logical_id[getCurrentCPUIndex()] = apic_get_logical_id();
30
31    for (int i = 0; i < smp_aps_index; i++) {
32        if (apic_get_cluster(cpu_mapping.index_to_logical_id[getCurrentCPUIndex()]) ==
33                apic_get_cluster(cpu_mapping.index_to_logical_id[i])) {
34
35            cpu_mapping.other_indexes_in_cluster[getCurrentCPUIndex()] |= BIT(i);
36            cpu_mapping.other_indexes_in_cluster[i] |= BIT(getCurrentCPUIndex());
37        }
38    }
39}
40#endif /* CONFIG_USE_LOGICAL_IDS */
41
42BOOT_CODE static void
43start_cpu(cpu_id_t cpu_id, paddr_t boot_fun_paddr)
44{
45    /* memory fence needed before starting the other CPU */
46    x86_mfence();
47
48    /* starting the other CPU */
49    apic_send_init_ipi(cpu_id);
50    apic_send_startup_ipi(cpu_id, boot_fun_paddr);
51}
52
53BOOT_CODE void
54start_boot_aps(void)
55{
56    /* update cpu mapping for BSP, cpus[0] is always assumed to be BSP */
57    cpu_mapping.index_to_cpu_id[getCurrentCPUIndex()] = boot_state.cpus[0];
58#ifdef CONFIG_USE_LOGICAL_IDS
59    cpu_mapping.index_to_logical_id[getCurrentCPUIndex()] = apic_get_logical_id();
60#endif /* CONFIG_USE_LOGICAL_IDS */
61
62    /* startup APs one at a time as we use shared kernel boot stack */
63    while (smp_aps_index < boot_state.num_cpus) {
64        word_t current_ap_index = smp_aps_index;
65
66        printf("Starting node #%lu with APIC ID %lu \n",
67               current_ap_index, boot_state.cpus[current_ap_index]);
68
69        /* update cpu mapping for APs, store APIC ID of the next booting AP
70         * as APIC ID are not continoius e.g. 0,2,1,3 for 4 cores with hyperthreading
71         * we need to store a mapping to translate the index to real APIC ID */
72        cpu_mapping.index_to_cpu_id[current_ap_index] = boot_state.cpus[current_ap_index];
73        start_cpu(boot_state.cpus[current_ap_index], BOOT_NODE_PADDR);
74
75        /* wait for current AP to boot up */
76        while (smp_aps_index == current_ap_index);
77    }
78}
79
80BOOT_CODE bool_t
81copy_boot_code_aps(uint32_t mem_lower)
82{
83    assert(boot_cpu_end - boot_cpu_start < 0x400);
84
85    /* Ensure that our boot code fits in the memory hole we want to use, and check this region
86     * is free according to multiboot. As boot_cpu_end and boot_cpu_start are link time
87     * symbols (and not compile time) this cannot be a compile time check */
88    word_t boot_size = (word_t)(boot_cpu_end - boot_cpu_start);
89    word_t boot_node_top = BOOT_NODE_PADDR + boot_size;
90    word_t mem_lower_bytes = mem_lower << 10;
91    if (boot_node_top > BOOT_NODE_MAX_PADDR) {
92        printf("AP boot code does not fit in chosen memory hole. Can be at most %lu, is %lu\n",
93               (word_t)(BOOT_NODE_MAX_PADDR - BOOT_NODE_PADDR), boot_size);
94        return false;
95    }
96    if (mem_lower_bytes < boot_node_top) {
97        printf("Need lower physical memory up to %lu to be free. Multiboot reports only up to %lu\n",
98               boot_node_top, mem_lower_bytes);
99        return false;
100    }
101
102    /* copy CPU bootup code to lower memory */
103    memcpy((void*)BOOT_NODE_PADDR, boot_cpu_start, boot_size);
104    return true;
105}
106
107static BOOT_CODE bool_t
108try_boot_node(void)
109{
110    setCurrentVSpaceRoot(kpptr_to_paddr(X86_KERNEL_VSPACE_ROOT), 0);
111    /* Sync up the compilers view of the world here to force the PD to actually
112     * be set *right now* instead of delayed */
113    asm volatile("" ::: "memory");
114
115    /* initialise the CPU, make sure legacy interrupts are disabled */
116    if (!init_cpu(1)) {
117        return false;
118    }
119
120#ifdef CONFIG_USE_LOGICAL_IDS
121    update_logical_id_mappings();
122#endif /* CONFIG_USE_LOGICAL_IDS */
123    return true;
124}
125
126/* This is the entry function for APs. However, it is not a BOOT_CODE as
127 * there is a race between exiting this function and root task running on
128 * node #0 to possibly reallocate this memory */
129VISIBLE void
130boot_node(void)
131{
132    bool_t result;
133
134    mode_init_tls(smp_aps_index);
135    result = try_boot_node();
136
137    if (!result) {
138        fail("boot_node failed for some reason :(\n");
139    }
140
141    smp_aps_index++;
142
143    /* grab BKL before leaving the kernel */
144    NODE_LOCK_SYS;
145
146    init_core_state(SchedulerAction_ChooseNewThread);
147    ARCH_NODE_STATE(x86KScurInterrupt) = int_invalid;
148    ARCH_NODE_STATE(x86KSPendingInterrupt) = int_invalid;
149
150    schedule();
151    activateThread();
152}
153
154#endif /* ENABLE_SMP_SUPPORT */
155