1/*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 *
4 * This software was developed by Andrew Turner under
5 * sponsorship from the FreeBSD Foundation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30#include "opt_acpi.h"
31#include "opt_ddb.h"
32#include "opt_kstack_pages.h"
33#include "opt_platform.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/cpu.h>
39#include <sys/csan.h>
40#include <sys/domainset.h>
41#include <sys/kernel.h>
42#include <sys/ktr.h>
43#include <sys/malloc.h>
44#include <sys/module.h>
45#include <sys/mutex.h>
46#include <sys/pcpu.h>
47#include <sys/proc.h>
48#include <sys/sched.h>
49#include <sys/smp.h>
50
51#include <vm/vm.h>
52#include <vm/pmap.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_map.h>
56
57#include <machine/machdep.h>
58#include <machine/cpu.h>
59#include <machine/debug_monitor.h>
60#include <machine/intr.h>
61#include <machine/smp.h>
62#ifdef VFP
63#include <machine/vfp.h>
64#endif
65
66#ifdef DEV_ACPI
67#include <contrib/dev/acpica/include/acpi.h>
68#include <dev/acpica/acpivar.h>
69#endif
70
71#ifdef FDT
72#include <dev/ofw/openfirm.h>
73#include <dev/ofw/ofw_bus.h>
74#include <dev/ofw/ofw_bus_subr.h>
75#include <dev/ofw/ofw_cpu.h>
76#endif
77
78#include <dev/psci/psci.h>
79
80#define	MP_BOOTSTACK_SIZE	(kstack_pages * PAGE_SIZE)
81
82#define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
83					/* don't panic if one fails to start */
84static uint32_t mp_quirks;
85
86#ifdef FDT
87static struct {
88	const char *compat;
89	uint32_t quirks;
90} fdt_quirks[] = {
91	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
92	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
93	/* This is incorrect in some DTS files */
94	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
95	{ NULL, 0 },
96};
97#endif
98
99static void ipi_ast(void *);
100static void ipi_hardclock(void *);
101static void ipi_preempt(void *);
102static void ipi_rendezvous(void *);
103static void ipi_stop(void *);
104
105#ifdef FDT
106static u_int fdt_cpuid;
107#endif
108
109void mpentry_psci(unsigned long cpuid);
110void mpentry_spintable(void);
111void init_secondary(uint64_t);
112
113/* Synchronize AP startup. */
114static struct mtx ap_boot_mtx;
115
116/* Used to initialize the PCPU ahead of calling init_secondary(). */
117void *bootpcpu;
118uint64_t ap_cpuid;
119
120/* Stacks for AP initialization, discarded once idle threads are started. */
121void *bootstack;
122static void *bootstacks[MAXCPU];
123
124/* Count of started APs, used to synchronize access to bootstack. */
125static volatile int aps_started;
126
127/* Set to 1 once we're ready to let the APs out of the pen. */
128static volatile int aps_ready;
129
130/* Temporary variables for init_secondary()  */
131static void *dpcpu[MAXCPU - 1];
132
133static bool
134is_boot_cpu(uint64_t target_cpu)
135{
136
137	return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
138}
139
140static void
141release_aps(void *dummy __unused)
142{
143	int i, started;
144
145	/* Only release CPUs if they exist */
146	if (mp_ncpus == 1)
147		return;
148
149	intr_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
150	intr_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
151	intr_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
152	intr_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
153	intr_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
154	intr_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
155
156	atomic_store_rel_int(&aps_ready, 1);
157	/* Wake up the other CPUs */
158	__asm __volatile(
159	    "dsb ishst	\n"
160	    "sev	\n"
161	    ::: "memory");
162
163	printf("Release APs...");
164
165	started = 0;
166	for (i = 0; i < 2000; i++) {
167		if (atomic_load_acq_int(&smp_started) != 0) {
168			printf("done\n");
169			return;
170		}
171		/*
172		 * Don't time out while we are making progress. Some large
173		 * systems can take a while to start all CPUs.
174		 */
175		if (smp_cpus > started) {
176			i = 0;
177			started = smp_cpus;
178		}
179		DELAY(1000);
180	}
181
182	printf("APs not started\n");
183}
184SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
185
186void
187init_secondary(uint64_t cpu)
188{
189	struct pcpu *pcpup;
190	pmap_t pmap0;
191	uint64_t mpidr;
192
193	ptrauth_mp_start(cpu);
194
195	/*
196	 * Verify that the value passed in 'cpu' argument (aka context_id) is
197	 * valid. Some older U-Boot based PSCI implementations are buggy,
198	 * they can pass random value in it.
199	 */
200	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
201	if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
202	    PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) {
203		for (cpu = 0; cpu < mp_maxid; cpu++)
204			if (cpuid_to_pcpu[cpu] != NULL &&
205			    PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr)
206				break;
207		if ( cpu >= MAXCPU)
208			panic("MPIDR for this CPU is not in pcpu table");
209	}
210
211	/*
212	 * Identify current CPU. This is necessary to setup
213	 * affinity registers and to provide support for
214	 * runtime chip identification.
215	 *
216	 * We need this before signalling the CPU is ready to
217	 * let the boot CPU use the results.
218	 */
219	pcpup = cpuid_to_pcpu[cpu];
220	pcpup->pc_midr = get_midr();
221	identify_cpu(cpu);
222
223	/* Ensure the stores in identify_cpu have completed */
224	atomic_thread_fence_acq_rel();
225
226	/* Signal the BSP and spin until it has released all APs. */
227	atomic_add_int(&aps_started, 1);
228	while (!atomic_load_int(&aps_ready))
229		__asm __volatile("wfe");
230
231	/* Initialize curthread */
232	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
233	pcpup->pc_curthread = pcpup->pc_idlethread;
234	schedinit_ap();
235
236	/* Initialize curpmap to match TTBR0's current setting. */
237	pmap0 = vmspace_pmap(&vmspace0);
238	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
239	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
240	pcpup->pc_curpmap = pmap0;
241
242	install_cpu_errata();
243
244	intr_pic_init_secondary();
245
246	/* Start per-CPU event timers. */
247	cpu_initclocks_ap();
248
249#ifdef VFP
250	vfp_init_secondary();
251#endif
252
253	dbg_init();
254	pan_enable();
255
256	mtx_lock_spin(&ap_boot_mtx);
257	atomic_add_rel_32(&smp_cpus, 1);
258	if (smp_cpus == mp_ncpus) {
259		/* enable IPI's, tlb shootdown, freezes etc */
260		atomic_store_rel_int(&smp_started, 1);
261	}
262	mtx_unlock_spin(&ap_boot_mtx);
263
264	kcsan_cpu_init(cpu);
265
266	/* Enter the scheduler */
267	sched_ap_entry();
268
269	panic("scheduler returned us to init_secondary");
270	/* NOTREACHED */
271}
272
273static void
274smp_after_idle_runnable(void *arg __unused)
275{
276	int cpu;
277
278	if (mp_ncpus == 1)
279		return;
280
281	KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
282
283	/*
284	 * Wait for all APs to handle an interrupt.  After that, we know that
285	 * the APs have entered the scheduler at least once, so the boot stacks
286	 * are safe to free.
287	 */
288	smp_rendezvous(smp_no_rendezvous_barrier, NULL,
289	    smp_no_rendezvous_barrier, NULL);
290
291	for (cpu = 1; cpu < mp_ncpus; cpu++) {
292		if (bootstacks[cpu] != NULL)
293			kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
294	}
295}
296SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
297    smp_after_idle_runnable, NULL);
298
299static void
300ipi_ast(void *dummy __unused)
301{
302
303	CTR0(KTR_SMP, "IPI_AST");
304}
305
306static void
307ipi_hardclock(void *dummy __unused)
308{
309
310	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
311	hardclockintr();
312}
313
314static void
315ipi_preempt(void *dummy __unused)
316{
317	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
318	sched_preempt(curthread);
319}
320
321static void
322ipi_rendezvous(void *dummy __unused)
323{
324
325	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
326	smp_rendezvous_action();
327}
328
329static void
330ipi_stop(void *dummy __unused)
331{
332	u_int cpu;
333
334	CTR0(KTR_SMP, "IPI_STOP");
335
336	cpu = PCPU_GET(cpuid);
337	savectx(&stoppcbs[cpu]);
338
339	/* Indicate we are stopped */
340	CPU_SET_ATOMIC(cpu, &stopped_cpus);
341
342	/* Wait for restart */
343	while (!CPU_ISSET(cpu, &started_cpus))
344		cpu_spinwait();
345
346#ifdef DDB
347	dbg_register_sync(NULL);
348#endif
349
350	CPU_CLR_ATOMIC(cpu, &started_cpus);
351	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
352	CTR0(KTR_SMP, "IPI_STOP (restart)");
353}
354
355struct cpu_group *
356cpu_topo(void)
357{
358	struct cpu_group *dom, *root;
359	int i;
360
361	root = smp_topo_alloc(1);
362	dom = smp_topo_alloc(vm_ndomains);
363
364	root->cg_parent = NULL;
365	root->cg_child = dom;
366	CPU_COPY(&all_cpus, &root->cg_mask);
367	root->cg_count = mp_ncpus;
368	root->cg_children = vm_ndomains;
369	root->cg_level = CG_SHARE_NONE;
370	root->cg_flags = 0;
371
372	/*
373	 * Redundant layers will be collapsed by the caller so we don't need a
374	 * special case for a single domain.
375	 */
376	for (i = 0; i < vm_ndomains; i++, dom++) {
377		dom->cg_parent = root;
378		dom->cg_child = NULL;
379		CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
380		dom->cg_count = CPU_COUNT(&dom->cg_mask);
381		dom->cg_children = 0;
382		dom->cg_level = CG_SHARE_L3;
383		dom->cg_flags = 0;
384	}
385
386	return (root);
387}
388
389/* Determine if we running MP machine */
390int
391cpu_mp_probe(void)
392{
393
394	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
395	return (1);
396}
397
398static int
399enable_cpu_psci(uint64_t target_cpu, vm_paddr_t entry, u_int cpuid)
400{
401	int err;
402
403	err = psci_cpu_on(target_cpu, entry, cpuid);
404	if (err != PSCI_RETVAL_SUCCESS) {
405		/*
406		 * Panic here if INVARIANTS are enabled and PSCI failed to
407		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
408		 * to indicate we are unable to use it to start the given CPU.
409		 */
410		KASSERT(err == PSCI_MISSING ||
411		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
412		    ("Failed to start CPU %u (%lx), error %d\n",
413		    cpuid, target_cpu, err));
414		return (EINVAL);
415	}
416
417	return (0);
418}
419
420static int
421enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
422{
423	vm_paddr_t *release_addr;
424
425	ap_cpuid = cpu & CPU_AFF_MASK;
426
427	release_addr = pmap_mapdev_attr(release_paddr, sizeof(*release_addr),
428	    VM_MEMATTR_DEFAULT);
429	if (release_addr == NULL)
430		return (ENOMEM);
431
432	*release_addr = entry;
433	cpu_dcache_wbinv_range(release_addr, sizeof(*release_addr));
434	pmap_unmapdev(release_addr, sizeof(*release_addr));
435
436	__asm __volatile(
437	    "sev	\n"
438	    ::: "memory");
439
440	/* Wait for the target CPU to start */
441	while (atomic_load_64(&ap_cpuid) != 0)
442		__asm __volatile("wfe");
443
444	return (0);
445}
446
447/*
448 * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
449 * do nothing. Returns true if the CPU is present and running.
450 */
451static bool
452start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
453{
454	struct pcpu *pcpup;
455	vm_size_t size;
456	vm_paddr_t pa;
457	int err, naps;
458
459	/* Check we are able to start this cpu */
460	if (cpuid > mp_maxid)
461		return (false);
462
463	/* Skip boot CPU */
464	if (is_boot_cpu(target_cpu))
465		return (true);
466
467	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
468
469	size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
470	pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
471	    M_WAITOK | M_ZERO);
472	pmap_disable_promotion((vm_offset_t)pcpup, size);
473	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
474	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
475	bootpcpu = pcpup;
476
477	dpcpu[cpuid - 1] = (void *)(pcpup + 1);
478	dpcpu_init(dpcpu[cpuid - 1], cpuid);
479
480	bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain),
481	    MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
482
483	naps = atomic_load_int(&aps_started);
484	bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
485
486	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
487
488	/*
489	 * A limited set of hardware we support can only do spintables and
490	 * remain useful, due to lack of EL3.  Thus, we'll usually fall into the
491	 * PSCI branch here.
492	 */
493	MPASS(release_addr == 0 || !psci_present);
494	if (release_addr != 0) {
495		pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_spintable);
496		err = enable_cpu_spin(target_cpu, pa, release_addr);
497	} else {
498		pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_psci);
499		err = enable_cpu_psci(target_cpu, pa, cpuid);
500	}
501
502	if (err != 0) {
503		pcpu_destroy(pcpup);
504		dpcpu[cpuid - 1] = NULL;
505		kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE);
506		kmem_free(pcpup, size);
507		bootstacks[cpuid] = NULL;
508		mp_ncpus--;
509		return (false);
510	}
511
512	/* Wait for the AP to switch to its boot stack. */
513	while (atomic_load_int(&aps_started) < naps + 1)
514		cpu_spinwait();
515	CPU_SET(cpuid, &all_cpus);
516
517	return (true);
518}
519
520#ifdef DEV_ACPI
521static void
522madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
523{
524	ACPI_MADT_GENERIC_INTERRUPT *intr;
525	u_int *cpuid;
526	u_int id;
527	int domain;
528
529	switch(entry->Type) {
530	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
531		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
532		cpuid = arg;
533
534		if (is_boot_cpu(intr->ArmMpidr))
535			id = 0;
536		else
537			id = *cpuid;
538
539		domain = 0;
540#ifdef NUMA
541		if (vm_ndomains > 1)
542			domain = acpi_pxm_get_cpu_locality(intr->Uid);
543#endif
544		if (start_cpu(id, intr->ArmMpidr, domain, 0)) {
545			MPASS(cpuid_to_pcpu[id] != NULL);
546			cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
547			/*
548			 * Don't increment for the boot CPU, its CPU ID is
549			 * reserved.
550			 */
551			if (!is_boot_cpu(intr->ArmMpidr))
552				(*cpuid)++;
553		}
554
555		break;
556	default:
557		break;
558	}
559}
560
561static void
562cpu_init_acpi(void)
563{
564	ACPI_TABLE_MADT *madt;
565	vm_paddr_t physaddr;
566	u_int cpuid;
567
568	physaddr = acpi_find_table(ACPI_SIG_MADT);
569	if (physaddr == 0)
570		return;
571
572	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
573	if (madt == NULL) {
574		printf("Unable to map the MADT, not starting APs\n");
575		return;
576	}
577	/* Boot CPU is always 0 */
578	cpuid = 1;
579	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
580	    madt_handler, &cpuid);
581
582	acpi_unmap_table(madt);
583
584#if MAXMEMDOM > 1
585	acpi_pxm_set_cpu_locality();
586#endif
587}
588#endif
589
590#ifdef FDT
591/*
592 * Failure is indicated by failing to populate *release_addr.
593 */
594static void
595populate_release_addr(phandle_t node, vm_paddr_t *release_addr)
596{
597	pcell_t buf[2];
598
599	if (OF_getencprop(node, "cpu-release-addr", buf, sizeof(buf)) !=
600	    sizeof(buf))
601		return;
602
603	*release_addr = (((uintptr_t)buf[0] << 32) | buf[1]);
604}
605
606static bool
607start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
608{
609	uint64_t target_cpu;
610	vm_paddr_t release_addr;
611	char *enable_method;
612	int domain;
613	int cpuid;
614
615	target_cpu = reg[0];
616	if (addr_size == 2) {
617		target_cpu <<= 32;
618		target_cpu |= reg[1];
619	}
620
621	if (is_boot_cpu(target_cpu))
622		cpuid = 0;
623	else
624		cpuid = fdt_cpuid;
625
626	/*
627	 * If PSCI is present, we'll always use that -- the cpu_on method is
628	 * mandated in both v0.1 and v0.2.  We'll check the enable-method if
629	 * we don't have PSCI and use spin table if it's provided.
630	 */
631	release_addr = 0;
632	if (!psci_present && cpuid != 0) {
633		if (OF_getprop_alloc(node, "enable-method",
634		    (void **)&enable_method) <= 0)
635			return (false);
636
637		if (strcmp(enable_method, "spin-table") != 0) {
638			OF_prop_free(enable_method);
639			return (false);
640		}
641
642		OF_prop_free(enable_method);
643		populate_release_addr(node, &release_addr);
644		if (release_addr == 0) {
645			printf("Failed to fetch release address for CPU %u",
646			    cpuid);
647			return (false);
648		}
649	}
650
651	if (!start_cpu(cpuid, target_cpu, 0, release_addr))
652		return (false);
653
654	/*
655	 * Don't increment for the boot CPU, its CPU ID is reserved.
656	 */
657	if (!is_boot_cpu(target_cpu))
658		fdt_cpuid++;
659
660	/* Try to read the numa node of this cpu */
661	if (vm_ndomains == 1 ||
662	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
663		domain = 0;
664	cpuid_to_pcpu[cpuid]->pc_domain = domain;
665	if (domain < MAXMEMDOM)
666		CPU_SET(cpuid, &cpuset_domain[domain]);
667	return (true);
668}
669static void
670cpu_init_fdt(void)
671{
672	phandle_t node;
673	int i;
674
675	node = OF_peer(0);
676	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
677		if (ofw_bus_node_is_compatible(node,
678		    fdt_quirks[i].compat) != 0) {
679			mp_quirks = fdt_quirks[i].quirks;
680		}
681	}
682	fdt_cpuid = 1;
683	ofw_cpu_early_foreach(start_cpu_fdt, true);
684}
685#endif
686
687/* Initialize and fire up non-boot processors */
688void
689cpu_mp_start(void)
690{
691	uint64_t mpidr;
692
693	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
694
695	/* CPU 0 is always boot CPU. */
696	CPU_SET(0, &all_cpus);
697	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
698	cpuid_to_pcpu[0]->pc_mpidr = mpidr;
699
700	cpu_desc_init();
701
702	switch(arm64_bus_method) {
703#ifdef DEV_ACPI
704	case ARM64_BUS_ACPI:
705		mp_quirks = MP_QUIRK_CPULIST;
706		cpu_init_acpi();
707		break;
708#endif
709#ifdef FDT
710	case ARM64_BUS_FDT:
711		cpu_init_fdt();
712		break;
713#endif
714	default:
715		break;
716	}
717}
718
719/* Introduce rest of cores to the world */
720void
721cpu_mp_announce(void)
722{
723}
724
725#ifdef DEV_ACPI
726static void
727cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
728{
729	u_int *cores = arg;
730
731	switch(entry->Type) {
732	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
733		(*cores)++;
734		break;
735	default:
736		break;
737	}
738}
739
740static u_int
741cpu_count_acpi(void)
742{
743	ACPI_TABLE_MADT *madt;
744	vm_paddr_t physaddr;
745	u_int cores;
746
747	physaddr = acpi_find_table(ACPI_SIG_MADT);
748	if (physaddr == 0)
749		return (0);
750
751	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
752	if (madt == NULL) {
753		printf("Unable to map the MADT, not starting APs\n");
754		return (0);
755	}
756
757	cores = 0;
758	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
759	    cpu_count_acpi_handler, &cores);
760
761	acpi_unmap_table(madt);
762
763	return (cores);
764}
765#endif
766
767void
768cpu_mp_setmaxid(void)
769{
770	int cores;
771
772	mp_ncpus = 1;
773	mp_maxid = 0;
774
775	switch(arm64_bus_method) {
776#ifdef DEV_ACPI
777	case ARM64_BUS_ACPI:
778		cores = cpu_count_acpi();
779		if (cores > 0) {
780			cores = MIN(cores, MAXCPU);
781			if (bootverbose)
782				printf("Found %d CPUs in the ACPI tables\n",
783				    cores);
784			mp_ncpus = cores;
785			mp_maxid = cores - 1;
786		}
787		break;
788#endif
789#ifdef FDT
790	case ARM64_BUS_FDT:
791		cores = ofw_cpu_early_foreach(NULL, false);
792		if (cores > 0) {
793			cores = MIN(cores, MAXCPU);
794			if (bootverbose)
795				printf("Found %d CPUs in the device tree\n",
796				    cores);
797			mp_ncpus = cores;
798			mp_maxid = cores - 1;
799		}
800		break;
801#endif
802	default:
803		if (bootverbose)
804			printf("No CPU data, limiting to 1 core\n");
805		break;
806	}
807
808	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
809		if (cores > 0 && cores < mp_ncpus) {
810			mp_ncpus = cores;
811			mp_maxid = cores - 1;
812		}
813	}
814}
815
816/* Sending IPI */
817void
818ipi_all_but_self(u_int ipi)
819{
820	cpuset_t cpus;
821
822	cpus = all_cpus;
823	CPU_CLR(PCPU_GET(cpuid), &cpus);
824	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
825	intr_ipi_send(cpus, ipi);
826}
827
828void
829ipi_cpu(int cpu, u_int ipi)
830{
831	cpuset_t cpus;
832
833	CPU_ZERO(&cpus);
834	CPU_SET(cpu, &cpus);
835
836	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
837	intr_ipi_send(cpus, ipi);
838}
839
840void
841ipi_selected(cpuset_t cpus, u_int ipi)
842{
843
844	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
845	intr_ipi_send(cpus, ipi);
846}
847