1/*
2 * Copyright 2019-2022, Haiku, Inc. All rights reserved.
3 * Released under the terms of the MIT License.
4*/
5
6
7#include "arch_smp.h"
8
9#include <algorithm>
10#include <string.h>
11
12#include <KernelExport.h>
13
14#include <kernel.h>
15#include <safemode.h>
16#include <boot/platform.h>
17#include <boot/stage2.h>
18#include <boot/menu.h>
19#include <platform/sbi/sbi_syscalls.h>
20
21#include "mmu.h"
22
23
24//#define TRACE_SMP
25#ifdef TRACE_SMP
26#	define TRACE(x) dprintf x
27#else
28#	define TRACE(x) ;
29#endif
30
31
32typedef status_t (*KernelEntry) (kernel_args *bootKernelArgs, int currentCPU);
33
34
35struct CpuEntryInfo {
36	uint64 satp;		//  0
37	uint64 stackBase;	//  8
38	uint64 stackSize;	// 16
39	KernelEntry kernelEntry;// 24
40};
41
42
43static platform_cpu_info sCpus[SMP_MAX_CPUS];
44uint32 sCpuCount = 0;
45
46
47static void
48arch_cpu_dump_hart_status(uint64 status)
49{
50	switch (status) {
51		case SBI_HART_STATE_STARTED:
52			dprintf("started");
53			break;
54		case SBI_HART_STATE_STOPPED:
55			dprintf("stopped");
56			break;
57		case SBI_HART_STATE_START_PENDING:
58			dprintf("startPending");
59			break;
60		case SBI_HART_STATE_STOP_PENDING:
61			dprintf("stopPending");
62			break;
63		case SBI_HART_STATE_SUSPENDED:
64			dprintf("suspended");
65			break;
66		case SBI_HART_STATE_SUSPEND_PENDING:
67			dprintf("suspendPending");
68			break;
69		case SBI_HART_STATE_RESUME_PENDING:
70			dprintf("resumePending");
71			break;
72		default:
73			dprintf("?(%" B_PRIu64 ")", status);
74	}
75}
76
77
78static void
79arch_cpu_dump_hart()
80{
81	dprintf("  hart status:\n");
82	for (uint32 i = 0; i < sCpuCount; i++) {
83		dprintf("    hart %" B_PRIu32 ": ", i);
84		sbiret res = sbi_hart_get_status(sCpus[i].id);
85		if (res.error < 0)
86			dprintf("error: %" B_PRIu64 , res.error);
87		else {
88			arch_cpu_dump_hart_status(res.value);
89		}
90		dprintf("\n");
91	}
92}
93
94
95static void __attribute__((naked))
96arch_cpu_entry(int hartId, CpuEntryInfo* info)
97{
98	// enable MMU
99	asm("ld t0, 0(a1)");   // CpuEntryInfo::satp
100	asm("csrw satp, t0");
101	asm("sfence.vma");
102
103	// setup stack
104	asm("ld sp, 8(a1)");   // CpuEntryInfo::stackBase
105	asm("ld t0, 16(a1)");  // CpuEntryInfo::stackSize
106	asm("add sp, sp, t0");
107	asm("li fp, 0");
108
109	asm("tail arch_cpu_entry2");
110}
111
112
113extern "C" void
114arch_cpu_entry2(int hartId, CpuEntryInfo* info)
115{
116	dprintf("%s(%d)\n", __func__, hartId);
117
118	uint32 cpu = 0;
119	while (cpu < sCpuCount && !(sCpus[cpu].id == (uint32)hartId))
120		cpu++;
121
122	if (!(cpu < sCpuCount))
123		panic("CPU for hart id %d not found\n", hartId);
124
125	info->kernelEntry(&gKernelArgs, cpu);
126	for (;;) {}
127}
128
129
130void
131arch_smp_register_cpu(platform_cpu_info** cpu)
132{
133	dprintf("arch_smp_register_cpu()\n");
134	uint32 newCount = sCpuCount + 1;
135	if (newCount > SMP_MAX_CPUS) {
136		*cpu = NULL;
137		return;
138	}
139	*cpu = &sCpus[sCpuCount];
140	sCpuCount = newCount;
141}
142
143
144platform_cpu_info*
145arch_smp_find_cpu(uint32 phandle)
146{
147	for (uint32 i = 0; i < sCpuCount; i++) {
148		if (sCpus[i].phandle == phandle)
149			return &sCpus[i];
150	}
151	return NULL;
152}
153
154
155int
156arch_smp_get_current_cpu(void)
157{
158	return Mhartid();
159}
160
161
162void
163arch_smp_init_other_cpus(void)
164{
165	gKernelArgs.num_cpus = sCpuCount;
166
167	// make boot CPU first as expected by kernel
168	for (uint32 i = 1; i < sCpuCount; i++) {
169		if (sCpus[i].id == gBootHart)
170			std::swap(sCpus[i], sCpus[0]);
171	}
172
173	for (uint32 i = 0; i < sCpuCount; i++) {
174		gKernelArgs.arch_args.hartIds[i] = sCpus[i].id;
175		gKernelArgs.arch_args.plicContexts[i] = sCpus[i].plicContext;
176	}
177
178	if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) {
179		// SMP has been disabled!
180		TRACE(("smp disabled per safemode setting\n"));
181		gKernelArgs.num_cpus = 1;
182	}
183
184	if (gKernelArgs.num_cpus < 2)
185		return;
186
187	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
188		// create a final stack the trampoline code will put the ap processor on
189		void * stack = NULL;
190		const size_t size = KERNEL_STACK_SIZE
191			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
192		if (platform_allocate_region(&stack, size, 0, false) != B_OK) {
193			panic("Unable to allocate AP stack");
194		}
195		memset(stack, 0, size);
196		gKernelArgs.cpu_kstack[i].start = fix_address((uint64_t)stack);
197		gKernelArgs.cpu_kstack[i].size = size;
198	}
199}
200
201
202void
203arch_smp_boot_other_cpus(uint64 satp, uint64 kernel_entry, addr_t virtKernelArgs)
204{
205	dprintf("arch_smp_boot_other_cpus(%p, %p)\n", (void*)satp, (void*)kernel_entry);
206
207	arch_cpu_dump_hart();
208	for (uint32 i = 0; i < sCpuCount; i++) {
209		if (sCpus[i].id != gBootHart) {
210			sbiret res;
211			dprintf("  starting CPU %" B_PRIu32 "\n", sCpus[i].id);
212
213			dprintf("  stack: %#" B_PRIx64 " - %#" B_PRIx64 "\n",
214				gKernelArgs.cpu_kstack[i].start, gKernelArgs.cpu_kstack[i].start
215				+ gKernelArgs.cpu_kstack[i].size - 1);
216
217			CpuEntryInfo* info = new(std::nothrow) CpuEntryInfo{
218				.satp = satp,
219				.stackBase = gKernelArgs.cpu_kstack[i].start,
220				.stackSize = gKernelArgs.cpu_kstack[i].size,
221				.kernelEntry = (KernelEntry)kernel_entry
222			};
223			res = sbi_hart_start(sCpus[i].id, (addr_t)&arch_cpu_entry, (addr_t)info);
224
225			for (;;) {
226				res = sbi_hart_get_status(sCpus[i].id);
227				if (res.error < 0 || res.value == SBI_HART_STATE_STARTED)
228					break;
229			}
230		}
231	}
232	arch_cpu_dump_hart();
233}
234
235
236void
237arch_smp_add_safemode_menus(Menu *menu)
238{
239	MenuItem *item;
240
241	if (gKernelArgs.num_cpus < 2)
242		return;
243
244	item = new(nothrow) MenuItem("Disable SMP");
245	menu->AddItem(item);
246	item->SetData(B_SAFEMODE_DISABLE_SMP);
247	item->SetType(MENU_ITEM_MARKABLE);
248	item->SetHelpText("Disables all but one CPU core.");
249}
250
251
252void
253arch_smp_init(void)
254{
255}
256