1/*
2 * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved.
3 * Copyright 2004-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8*/
9
10
11#include "smp.h"
12
13#include <string.h>
14
15#include <KernelExport.h>
16
17#include <kernel.h>
18#include <safemode.h>
19#include <boot/stage2.h>
20#include <boot/menu.h>
21#include <arch/x86/apic.h>
22#include <arch/x86/arch_acpi.h>
23#include <arch/x86/arch_cpu.h>
24#include <arch/x86/arch_smp.h>
25#include <arch/x86/arch_system_info.h>
26#include <arch/x86/descriptors.h>
27
28#include "mmu.h"
29#include "acpi.h"
30
31
32#define NO_SMP 0
33
34#define TRACE_SMP
35#ifdef TRACE_SMP
36#	define TRACE(x) dprintf x
37#else
38#	define TRACE(x) ;
39#endif
40
41
42static struct scan_spots_struct smp_scan_spots[] = {
43	{ 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 },
44	{ 0xf0000, 0x100000, 0x100000 - 0xf0000 },
45	{ 0, 0, 0 }
46};
47
48extern "C" void execute_n_instructions(int count);
49
50extern "C" void smp_trampoline(void);
51extern "C" void smp_trampoline_end(void);
52
53
54static uint32
55apic_read(uint32 offset)
56{
57	return *(volatile uint32 *)((addr_t)(void *)gKernelArgs.arch_args.apic + offset);
58}
59
60
61static void
62apic_write(uint32 offset, uint32 data)
63{
64	*(volatile uint32 *)((addr_t)(void *)gKernelArgs.arch_args.apic + offset) = data;
65}
66
67
68static mp_floating_struct *
69smp_mp_probe(uint32 base, uint32 limit)
70{
71	TRACE(("smp_mp_probe: entry base 0x%lx, limit 0x%lx\n", base, limit));
72	for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) {
73		if (*pointer == MP_FLOATING_SIGNATURE) {
74			TRACE(("smp_mp_probe: found floating pointer structure at %p\n",
75				pointer));
76			return (mp_floating_struct *)pointer;
77		}
78	}
79
80	return NULL;
81}
82
83
84static status_t
85smp_do_mp_config(mp_floating_struct *floatingStruct)
86{
87	if (floatingStruct->config_length != 1) {
88		TRACE(("smp: unsupported structure length of %" B_PRIu8 " units\n",
89			floatingStruct->config_length));
90		return B_UNSUPPORTED;
91	}
92
93	TRACE(("smp: intel mp version %s, %s",
94		(floatingStruct->spec_revision == 1) ? "1.1" : "1.4",
95		(floatingStruct->mp_feature_2 & 0x80)
96			? "imcr and pic compatibility mode.\n"
97			: "virtual wire compatibility mode.\n"));
98
99	if (floatingStruct->config_table == NULL) {
100#if 1
101		// TODO: need to implement
102		TRACE(("smp: standard configuration %d unimplemented\n",
103			floatingStruct->mp_feature_1));
104		gKernelArgs.num_cpus = 1;
105		return B_OK;
106#else
107		// this system conforms to one of the default configurations
108		TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1));
109		gKernelArgs.num_cpus = 2;
110		gKernelArgs.cpu_apic_id[0] = 0;
111		gKernelArgs.cpu_apic_id[1] = 1;
112		apic_phys = (unsigned int *)0xfee00000;
113		ioapic_phys = (unsigned int *)0xfec00000;
114		dprintf("smp: WARNING: standard configuration code is untested");
115		return B_OK;
116#endif
117	}
118
119	// We are not running in standard configuration, so we have to look through
120	// all of the mp configuration table crap to figure out how many processors
121	// we have, where our apics are, etc.
122
123	mp_config_table *config = floatingStruct->config_table;
124	gKernelArgs.num_cpus = 0;
125
126	if (config->signature != MP_CONFIG_TABLE_SIGNATURE) {
127		TRACE(("smp: invalid config table signature, aborting\n"));
128		return B_ERROR;
129	}
130
131	if (config->base_table_length < sizeof(mp_config_table)) {
132		TRACE(("smp: config table length %" B_PRIu16
133			" too short for structure, aborting\n",
134			config->base_table_length));
135		return B_ERROR;
136	}
137
138	// print our new found configuration.
139	TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem,
140		config->product));
141	TRACE(("smp: base table has %d entries, extended section %d bytes\n",
142		config->num_base_entries, config->ext_length));
143
144	gKernelArgs.arch_args.apic_phys = (uint32)config->apic;
145	if ((gKernelArgs.arch_args.apic_phys % 4096) != 0) {
146		// MP specs mandate a 4K alignment for the local APIC(s)
147		TRACE(("smp: local apic %p has bad alignment, aborting\n",
148			(void *)gKernelArgs.arch_args.apic_phys));
149		return B_ERROR;
150	}
151
152	char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table));
153	for (int32 i = 0; i < config->num_base_entries; i++) {
154		switch (*pointer) {
155			case MP_BASE_PROCESSOR:
156			{
157				struct mp_base_processor *processor
158					= (struct mp_base_processor *)pointer;
159				pointer += sizeof(struct mp_base_processor);
160
161				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
162					TRACE(("smp: already reached maximum boot CPUs (%d)\n",
163						MAX_BOOT_CPUS));
164					continue;
165				}
166
167				// skip if the processor is not enabled.
168				if (!(processor->cpu_flags & 0x1)) {
169					TRACE(("smp: skip apic id %d: disabled\n",
170						processor->apic_id));
171					continue;
172				}
173
174				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
175					= processor->apic_id;
176				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
177					= processor->apic_version;
178
179#ifdef TRACE_SMP
180				const char *cpuFamily[] = { "", "", "", "", "Intel 486",
181					"Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" };
182#endif
183				TRACE(("smp: cpu#%ld: %s, apic id %d, version %d%s\n",
184					gKernelArgs.num_cpus,
185					cpuFamily[(processor->signature & 0xf00) >> 8],
186					processor->apic_id, processor->apic_version,
187					(processor->cpu_flags & 0x2) ? ", BSP" : ""));
188
189				gKernelArgs.num_cpus++;
190				break;
191			}
192			case MP_BASE_BUS:
193			{
194				struct mp_base_bus *bus = (struct mp_base_bus *)pointer;
195				pointer += sizeof(struct mp_base_bus);
196
197				TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id,
198					bus->name[0], bus->name[1], bus->name[2], bus->name[3],
199					bus->name[4], bus->name[5]));
200
201				break;
202			}
203			case MP_BASE_IO_APIC:
204			{
205				struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer;
206				pointer += sizeof(struct mp_base_ioapic);
207
208				if (gKernelArgs.arch_args.ioapic_phys == 0) {
209					gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr;
210					if (gKernelArgs.arch_args.ioapic_phys % 1024) {
211						// MP specs mandate a 1K alignment for the IO-APICs
212						TRACE(("smp: io apic %p has bad alignment, aborting\n",
213							(void *)gKernelArgs.arch_args.ioapic_phys));
214						return B_ERROR;
215					}
216				}
217
218				TRACE(("smp: found io apic with apic id %d, version %d\n",
219					io->ioapic_id, io->ioapic_version));
220
221				break;
222			}
223			case MP_BASE_IO_INTR:
224			case MP_BASE_LOCAL_INTR:
225			{
226				struct mp_base_interrupt *interrupt
227					= (struct mp_base_interrupt *)pointer;
228				pointer += sizeof(struct mp_base_interrupt);
229
230				dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest "
231					"apic %d, int %3d, polarity %d, trigger mode %d\n",
232					interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local",
233					interrupt->interrupt_type, interrupt->source_bus_id,
234					interrupt->source_bus_irq, interrupt->dest_apic_id,
235					interrupt->dest_apic_int, interrupt->polarity,
236					interrupt->trigger_mode);
237				break;
238			}
239		}
240	}
241
242	if (gKernelArgs.num_cpus == 0) {
243		TRACE(("smp: didn't find any processors, aborting\n"));
244		return B_ERROR;
245	}
246
247	dprintf("smp: apic @ %p, i/o apic @ %p, total %ld processors detected\n",
248		(void *)gKernelArgs.arch_args.apic_phys,
249		(void *)gKernelArgs.arch_args.ioapic_phys,
250		gKernelArgs.num_cpus);
251
252	return B_OK;
253}
254
255
256static status_t
257smp_do_acpi_config(void)
258{
259	TRACE(("smp: using ACPI to detect MP configuration\n"));
260
261	// reset CPU count
262	gKernelArgs.num_cpus = 0;
263
264	acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE);
265
266	if (madt == NULL) {
267		TRACE(("smp: Failed to find MADT!\n"));
268		return B_ERROR;
269	}
270
271	gKernelArgs.arch_args.apic_phys = madt->local_apic_address;
272	TRACE(("smp: local apic address is 0x%lx\n", madt->local_apic_address));
273
274	acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt));
275	acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length);
276	while (apic < end) {
277		switch (apic->type) {
278			case ACPI_MADT_LOCAL_APIC:
279			{
280				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
281					TRACE(("smp: already reached maximum boot CPUs (%d)\n",
282						MAX_BOOT_CPUS));
283					break;
284				}
285
286				acpi_local_apic *localApic = (acpi_local_apic *)apic;
287				TRACE(("smp: found local APIC with id %u\n",
288					localApic->apic_id));
289				if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) {
290					TRACE(("smp: APIC is disabled and will not be used\n"));
291					break;
292				}
293
294				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
295					= localApic->apic_id;
296				// TODO: how to find out? putting 0x10 in to indicate a local apic
297				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
298					= 0x10;
299				gKernelArgs.num_cpus++;
300				break;
301			}
302
303			case ACPI_MADT_IO_APIC: {
304				acpi_io_apic *ioApic = (acpi_io_apic *)apic;
305				TRACE(("smp: found io APIC with id %u and address 0x%lx\n",
306					ioApic->io_apic_id, ioApic->io_apic_address));
307				if (gKernelArgs.arch_args.ioapic_phys == 0)
308					gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address;
309				break;
310			}
311			default:
312				break;
313		}
314
315		apic = (acpi_apic *)((uint8 *)apic + apic->length);
316	}
317
318	return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR;
319}
320
321
322static void
323calculate_apic_timer_conversion_factor(void)
324{
325	int64 t1, t2;
326	uint32 config;
327	uint32 count;
328
329	// setup the timer
330	config = apic_read(APIC_LVT_TIMER);
331	config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED;
332		// timer masked, vector 0
333	apic_write(APIC_LVT_TIMER, config);
334
335	config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f);
336	apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1);
337		// divide clock by one
338
339	t1 = system_time();
340	apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter
341
342	execute_n_instructions(128 * 20000);
343
344	count = apic_read(APIC_CURRENT_TIMER_COUNT);
345	t2 = system_time();
346
347	count = 0xffffffff - count;
348
349	gKernelArgs.arch_args.apic_time_cv_factor
350		= (uint32)((1000000.0/(t2 - t1)) * count);
351
352	TRACE(("APIC ticks/sec = %ld\n",
353		gKernelArgs.arch_args.apic_time_cv_factor));
354}
355
356
357//	#pragma mark -
358
359
360int
361smp_get_current_cpu(void)
362{
363	if (gKernelArgs.arch_args.apic == NULL)
364		return 0;
365
366	uint8 apicID = apic_read(APIC_ID) >> 24;
367	for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) {
368		if (gKernelArgs.arch_args.cpu_apic_id[i] == apicID)
369			return i;
370	}
371
372	return 0;
373}
374
375
376void
377smp_init_other_cpus(void)
378{
379	if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) {
380		// SMP has been disabled!
381		TRACE(("smp disabled per safemode setting\n"));
382		gKernelArgs.num_cpus = 1;
383	}
384
385	if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) {
386		TRACE(("local apic disabled per safemode setting, disabling smp\n"));
387		gKernelArgs.arch_args.apic_phys = 0;
388		gKernelArgs.num_cpus = 1;
389	}
390
391	if (gKernelArgs.arch_args.apic_phys == 0)
392		return;
393
394	TRACE(("smp: found %ld cpu%s\n", gKernelArgs.num_cpus,
395		gKernelArgs.num_cpus != 1 ? "s" : ""));
396	TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys));
397	TRACE(("smp: ioapic_phys = %p\n",
398		(void *)gKernelArgs.arch_args.ioapic_phys));
399
400	// map in the apic
401	gKernelArgs.arch_args.apic = (void *)mmu_map_physical_memory(
402		gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags);
403
404	TRACE(("smp: apic (mapped) = %p\n", (void *)gKernelArgs.arch_args.apic));
405
406	// calculate how fast the apic timer is
407	calculate_apic_timer_conversion_factor();
408
409	if (gKernelArgs.num_cpus < 2)
410		return;
411
412	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
413		// create a final stack the trampoline code will put the ap processor on
414		gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL,
415			KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
416		gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE
417			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
418	}
419}
420
421
422void
423smp_boot_other_cpus(void (*entryFunc)(void))
424{
425	if (gKernelArgs.num_cpus < 2)
426		return;
427
428	TRACE(("trampolining other cpus\n"));
429
430	// The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved
431	// for this, or when PXE services are used 0x8b000-0x8cfff.
432
433	// allocate a stack and a code area for the smp trampoline
434	// (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS,
435	// and when PXE services are used, the 0x8d000-0x9ffff is also reserved)
436#ifdef _PXE_ENV
437	uint32 trampolineCode = 0x8b000;
438	uint32 trampolineStack = 0x8c000;
439#else
440	uint32 trampolineCode = 0x9f000;
441	uint32 trampolineStack = 0x9e000;
442#endif
443
444	// copy the trampoline code over
445	memcpy((char *)trampolineCode, (const void*)&smp_trampoline,
446		(uint32)&smp_trampoline_end - (uint32)&smp_trampoline);
447
448	// boot the cpus
449	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
450		uint32 *finalStack;
451		uint32 *tempStack;
452		uint32 config;
453		uint32 numStartups;
454		uint32 j;
455
456		// set this stack up
457		finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start;
458		memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0,
459			KERNEL_STACK_SIZE);
460		tempStack = (finalStack
461			+ (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
462				/ sizeof(uint32)) - 1;
463		*tempStack = (uint32)entryFunc;
464
465		// set the trampoline stack up
466		tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4);
467		// final location of the stack
468		*tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE
469			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32);
470		tempStack--;
471		// page dir
472		*tempStack = x86_read_cr3() & 0xfffff000;
473
474		// put a gdt descriptor at the bottom of the stack
475		*((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT
476		*((uint32 *)(trampolineStack + 2)) = trampolineStack + 8;
477
478		// construct a temporary gdt at the bottom
479		segment_descriptor* tempGDT
480			= (segment_descriptor*)&((uint32 *)trampolineStack)[2];
481		clear_segment_descriptor(&tempGDT[0]);
482		set_segment_descriptor(&tempGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
483			DPL_KERNEL);
484		set_segment_descriptor(&tempGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
485			DPL_KERNEL);
486
487		/* clear apic errors */
488		if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) {
489			apic_write(APIC_ERROR_STATUS, 0);
490			apic_read(APIC_ERROR_STATUS);
491		}
492
493//dprintf("assert INIT\n");
494		/* send (aka assert) INIT IPI */
495		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
496			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
497		apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */
498		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
499			| APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT
500			| APIC_DELIVERY_MODE_INIT;
501		apic_write(APIC_INTR_COMMAND_1, config);
502
503dprintf("wait for delivery\n");
504		// wait for pending to end
505		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
506			asm volatile ("pause;");
507
508dprintf("deassert INIT\n");
509		/* deassert INIT */
510		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
511			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
512		apic_write(APIC_INTR_COMMAND_2, config);
513		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
514			| APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT;
515		apic_write(APIC_INTR_COMMAND_1, config);
516
517dprintf("wait for delivery\n");
518		// wait for pending to end
519		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
520			asm volatile ("pause;");
521
522		/* wait 10ms */
523		spin(10000);
524
525		/* is this a local apic or an 82489dx ? */
526		numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0)
527			? 2 : 0;
528dprintf("num startups = %ld\n", numStartups);
529		for (j = 0; j < numStartups; j++) {
530			/* it's a local apic, so send STARTUP IPIs */
531dprintf("send STARTUP\n");
532			apic_write(APIC_ERROR_STATUS, 0);
533
534			/* set target pe */
535			config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
536				| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
537			apic_write(APIC_INTR_COMMAND_2, config);
538
539			/* send the IPI */
540			config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800)
541				| APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12);
542			apic_write(APIC_INTR_COMMAND_1, config);
543
544			/* wait */
545			spin(200);
546
547dprintf("wait for delivery\n");
548			while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
549				asm volatile ("pause;");
550		}
551
552		// Wait for the trampoline code to clear the final stack location.
553		// This serves as a notification for us that it has loaded the address
554		// and it is safe for us to overwrite it to trampoline the next CPU.
555		tempStack++;
556		while (*tempStack != 0)
557			spin(1000);
558	}
559
560	TRACE(("done trampolining\n"));
561}
562
563
564void
565smp_add_safemode_menus(Menu *menu)
566{
567	MenuItem *item;
568
569	if (gKernelArgs.arch_args.ioapic_phys != 0) {
570		menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC"));
571		item->SetType(MENU_ITEM_MARKABLE);
572		item->SetData(B_SAFEMODE_DISABLE_IOAPIC);
573		item->SetHelpText("Disables using the IO APIC for interrupt routing, "
574			"forcing the use of the legacy PIC instead.");
575	}
576
577	if (gKernelArgs.arch_args.apic_phys != 0) {
578		menu->AddItem(item = new(nothrow) MenuItem("Disable local APIC"));
579		item->SetType(MENU_ITEM_MARKABLE);
580		item->SetData(B_SAFEMODE_DISABLE_APIC);
581		item->SetHelpText("Disables using the local APIC, also disables SMP.");
582	}
583
584	if (gKernelArgs.num_cpus < 2)
585		return;
586
587	item = new(nothrow) MenuItem("Disable SMP");
588	menu->AddItem(item);
589	item->SetData(B_SAFEMODE_DISABLE_SMP);
590	item->SetType(MENU_ITEM_MARKABLE);
591	item->SetHelpText("Disables all but one CPU core.");
592}
593
594
595void
596smp_init(void)
597{
598#if NO_SMP
599	gKernelArgs.num_cpus = 1;
600	return;
601#endif
602
603	cpuid_info info;
604	if (get_current_cpuid(&info, 1) != B_OK)
605		return;
606
607	if ((info.eax_1.features & IA32_FEATURE_APIC) == 0) {
608		// Local APICs aren't present; As they form the basis for all inter CPU
609		// communication and therefore SMP, we don't need to go any further.
610		dprintf("no local APIC present, not attempting SMP init\n");
611		return;
612	}
613
614	// first try to find ACPI tables to get MP configuration as it handles
615	// physical as well as logical MP configurations as in multiple cpus,
616	// multiple cores or hyper threading.
617	if (smp_do_acpi_config() == B_OK)
618		return;
619
620	// then try to find MPS tables and do configuration based on them
621	for (int32 i = 0; smp_scan_spots[i].length > 0; i++) {
622		mp_floating_struct *floatingStruct = smp_mp_probe(
623			smp_scan_spots[i].start, smp_scan_spots[i].stop);
624		if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK)
625			return;
626	}
627
628	// Everything failed or we are not running an SMP system, reset anything
629	// that might have been set through an incomplete configuration attempt.
630	gKernelArgs.arch_args.apic_phys = 0;
631	gKernelArgs.arch_args.ioapic_phys = 0;
632	gKernelArgs.num_cpus = 1;
633}
634