mp_x86.c revision 144637
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: head/sys/i386/i386/mp_machdep.c 144637 2005-04-04 21:53:56Z jhb $");
28
29#include "opt_apic.h"
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32#include "opt_mp_watchdog.h"
33
34#if !defined(lint)
35#if !defined(SMP)
36#error How did you get here?
37#endif
38
39#ifndef DEV_APIC
40#error The apic device is required for SMP, add "device apic" to your config file.
41#endif
42#if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
43#error SMP not supported with CPU_DISABLE_CMPXCHG
44#endif
45#endif /* not lint */
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/cons.h>	/* cngetc() */
51#ifdef GPROF
52#include <sys/gmon.h>
53#endif
54#include <sys/kernel.h>
55#include <sys/ktr.h>
56#include <sys/lock.h>
57#include <sys/malloc.h>
58#include <sys/memrange.h>
59#include <sys/mutex.h>
60#include <sys/pcpu.h>
61#include <sys/proc.h>
62#include <sys/smp.h>
63#include <sys/sysctl.h>
64
65#include <vm/vm.h>
66#include <vm/vm_param.h>
67#include <vm/pmap.h>
68#include <vm/vm_kern.h>
69#include <vm/vm_extern.h>
70
71#include <machine/apicreg.h>
72#include <machine/clock.h>
73#include <machine/md_var.h>
74#include <machine/mp_watchdog.h>
75#include <machine/pcb.h>
76#include <machine/smp.h>
77#include <machine/smptests.h>	/** COUNT_XINVLTLB_HITS */
78#include <machine/specialreg.h>
79#include <machine/privatespace.h>
80
81#define WARMBOOT_TARGET		0
82#define WARMBOOT_OFF		(KERNBASE + 0x0467)
83#define WARMBOOT_SEG		(KERNBASE + 0x0469)
84
85#define CMOS_REG		(0x70)
86#define CMOS_DATA		(0x71)
87#define BIOS_RESET		(0x0f)
88#define BIOS_WARM		(0x0a)
89
90/*
91 * this code MUST be enabled here and in mpboot.s.
92 * it follows the very early stages of AP boot by placing values in CMOS ram.
93 * it NORMALLY will never be needed and thus the primitive method for enabling.
94 *
95#define CHECK_POINTS
96 */
97
98#if defined(CHECK_POINTS) && !defined(PC98)
99#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
100#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
101
102#define CHECK_INIT(D);				\
103	CHECK_WRITE(0x34, (D));			\
104	CHECK_WRITE(0x35, (D));			\
105	CHECK_WRITE(0x36, (D));			\
106	CHECK_WRITE(0x37, (D));			\
107	CHECK_WRITE(0x38, (D));			\
108	CHECK_WRITE(0x39, (D));
109
110#define CHECK_PRINT(S);				\
111	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
112	   (S),					\
113	   CHECK_READ(0x34),			\
114	   CHECK_READ(0x35),			\
115	   CHECK_READ(0x36),			\
116	   CHECK_READ(0x37),			\
117	   CHECK_READ(0x38),			\
118	   CHECK_READ(0x39));
119
120#else				/* CHECK_POINTS */
121
122#define CHECK_INIT(D)
123#define CHECK_PRINT(S)
124#define CHECK_WRITE(A, D)
125
126#endif				/* CHECK_POINTS */
127
128/*
129 * Values to send to the POST hardware.
130 */
131#define MP_BOOTADDRESS_POST	0x10
132#define MP_PROBE_POST		0x11
133#define MPTABLE_PASS1_POST	0x12
134
135#define MP_START_POST		0x13
136#define MP_ENABLE_POST		0x14
137#define MPTABLE_PASS2_POST	0x15
138
139#define START_ALL_APS_POST	0x16
140#define INSTALL_AP_TRAMP_POST	0x17
141#define START_AP_POST		0x18
142
143#define MP_ANNOUNCE_POST	0x19
144
145/* lock region used by kernel profiling */
146int	mcount_lock;
147
148/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
149int	current_postcode;
150
151int	mp_naps;		/* # of Applications processors */
152int	boot_cpu_id = -1;	/* designated BSP */
153extern	int nkpt;
154
155/*
156 * CPU topology map datastructures for HTT.
157 */
158static struct cpu_group mp_groups[MAXCPU];
159static struct cpu_top mp_top;
160
161/* AP uses this during bootstrap.  Do not staticize.  */
162char *bootSTK;
163static int bootAP;
164
165/* Hotwire a 0->4MB V==P mapping */
166extern pt_entry_t *KPTphys;
167
168/* SMP page table page */
169extern pt_entry_t *SMPpt;
170
171struct pcb stoppcbs[MAXCPU];
172
173/* Variables needed for SMP tlb shootdown. */
174vm_offset_t smp_tlb_addr1;
175vm_offset_t smp_tlb_addr2;
176volatile int smp_tlb_wait;
177
178/*
179 * Local data and functions.
180 */
181
182static u_int logical_cpus;
183
184/* used to hold the AP's until we are ready to release them */
185static struct mtx ap_boot_mtx;
186
187/* Set to 1 once we're ready to let the APs out of the pen. */
188static volatile int aps_ready = 0;
189
190/*
191 * Store data from cpu_add() until later in the boot when we actually setup
192 * the APs.
193 */
194struct cpu_info {
195	int	cpu_present:1;
196	int	cpu_bsp:1;
197	int	cpu_disabled:1;
198} static cpu_info[MAXCPU];
199static int cpu_apic_ids[MAXCPU];
200
201/* Holds pending bitmap based IPIs per CPU */
202static volatile u_int cpu_ipi_pending[MAXCPU];
203
204static u_int boot_address;
205
206static void	set_logical_apic_ids(void);
207static int	start_all_aps(void);
208static void	install_ap_tramp(void);
209static int	start_ap(int apic_id);
210static void	release_aps(void *dummy);
211
212static int	hlt_logical_cpus;
213static struct	sysctl_ctx_list logical_cpu_clist;
214
215static void
216mem_range_AP_init(void)
217{
218	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
219		mem_range_softc.mr_op->initAP(&mem_range_softc);
220}
221
222void
223mp_topology(void)
224{
225	struct cpu_group *group;
226	int logical_cpus;
227	int apic_id;
228	int groups;
229	int cpu;
230
231	/* Build the smp_topology map. */
232	/* Nothing to do if there is no HTT support. */
233	if ((cpu_feature & CPUID_HTT) == 0)
234		return;
235	logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
236	if (logical_cpus <= 1)
237		return;
238	group = &mp_groups[0];
239	groups = 1;
240	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
241		if (!cpu_info[apic_id].cpu_present)
242			continue;
243		/*
244		 * If the current group has members and we're not a logical
245		 * cpu, create a new group.
246		 */
247		if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
248			group++;
249			groups++;
250		}
251		group->cg_count++;
252		group->cg_mask |= 1 << cpu;
253		cpu++;
254	}
255
256	mp_top.ct_count = groups;
257	mp_top.ct_group = mp_groups;
258	smp_topology = &mp_top;
259}
260
261
262/*
263 * Calculate usable address in base memory for AP trampoline code.
264 */
265u_int
266mp_bootaddress(u_int basemem)
267{
268	POSTCODE(MP_BOOTADDRESS_POST);
269
270	boot_address = trunc_page(basemem);	/* round down to 4k boundary */
271	if ((basemem - boot_address) < bootMP_size)
272		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
273
274	return boot_address;
275}
276
277void
278cpu_add(u_int apic_id, char boot_cpu)
279{
280
281	if (apic_id >= MAXCPU) {
282		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
283		    apic_id, MAXCPU - 1);
284		return;
285	}
286	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
287	    apic_id));
288	cpu_info[apic_id].cpu_present = 1;
289	if (boot_cpu) {
290		KASSERT(boot_cpu_id == -1,
291		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
292		    boot_cpu_id));
293		boot_cpu_id = apic_id;
294		cpu_info[apic_id].cpu_bsp = 1;
295	}
296	mp_ncpus++;
297	if (bootverbose)
298		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
299		    "AP");
300
301}
302
303void
304cpu_mp_setmaxid(void)
305{
306
307	mp_maxid = MAXCPU - 1;
308}
309
310int
311cpu_mp_probe(void)
312{
313
314	/*
315	 * Always record BSP in CPU map so that the mbuf init code works
316	 * correctly.
317	 */
318	all_cpus = 1;
319	if (mp_ncpus == 0) {
320		/*
321		 * No CPUs were found, so this must be a UP system.  Setup
322		 * the variables to represent a system with a single CPU
323		 * with an id of 0.
324		 */
325		mp_ncpus = 1;
326		return (0);
327	}
328
329	/* At least one CPU was found. */
330	if (mp_ncpus == 1) {
331		/*
332		 * One CPU was found, so this must be a UP system with
333		 * an I/O APIC.
334		 */
335		return (0);
336	}
337
338	/* At least two CPUs were found. */
339	return (1);
340}
341
342/*
343 * Initialize the IPI handlers and start up the AP's.
344 */
345void
346cpu_mp_start(void)
347{
348	int i;
349
350	POSTCODE(MP_START_POST);
351
352	/* Initialize the logical ID to APIC ID table. */
353	for (i = 0; i < MAXCPU; i++) {
354		cpu_apic_ids[i] = -1;
355		cpu_ipi_pending[i] = 0;
356	}
357
358	/* Install an inter-CPU IPI for TLB invalidation */
359	setidt(IPI_INVLTLB, IDTVEC(invltlb),
360	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
361	setidt(IPI_INVLPG, IDTVEC(invlpg),
362	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
363	setidt(IPI_INVLRNG, IDTVEC(invlrng),
364	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
365
366	/* Install an inter-CPU IPI for lazy pmap release */
367	setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
368	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
369
370	/* Install an inter-CPU IPI for all-CPU rendezvous */
371	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
372	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
373
374	/* Install generic inter-CPU IPI handler */
375	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
376	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
377
378	/* Install an inter-CPU IPI for CPU stop/restart */
379	setidt(IPI_STOP, IDTVEC(cpustop),
380	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
381
382
383	/* Set boot_cpu_id if needed. */
384	if (boot_cpu_id == -1) {
385		boot_cpu_id = PCPU_GET(apic_id);
386		cpu_info[boot_cpu_id].cpu_bsp = 1;
387	} else
388		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
389		    ("BSP's APIC ID doesn't match boot_cpu_id"));
390	cpu_apic_ids[0] = boot_cpu_id;
391
392	/* Start each Application Processor */
393	start_all_aps();
394
395	/* Setup the initial logical CPUs info. */
396	logical_cpus = logical_cpus_mask = 0;
397	if (cpu_feature & CPUID_HTT)
398		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
399
400	set_logical_apic_ids();
401}
402
403
404/*
405 * Print various information about the SMP system hardware and setup.
406 */
407void
408cpu_mp_announce(void)
409{
410	int i, x;
411
412	POSTCODE(MP_ANNOUNCE_POST);
413
414	/* List CPUs */
415	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
416	for (i = 1, x = 0; x < MAXCPU; x++) {
417		if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
418			continue;
419		if (cpu_info[x].cpu_disabled)
420			printf("  cpu (AP): APIC ID: %2d (disabled)\n", x);
421		else {
422			KASSERT(i < mp_ncpus,
423			    ("mp_ncpus and actual cpus are out of whack"));
424			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
425		}
426	}
427}
428
429/*
430 * AP CPU's call this to initialize themselves.
431 */
432void
433init_secondary(void)
434{
435	vm_offset_t addr;
436	int	gsel_tss;
437	int	x, myid;
438	u_int	cr0;
439
440	/* bootAP is set in start_ap() to our ID. */
441	myid = bootAP;
442	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
443	gdt_segs[GPROC0_SEL].ssd_base =
444		(int) &SMP_prvspace[myid].pcpu.pc_common_tss;
445	SMP_prvspace[myid].pcpu.pc_prvspace =
446		&SMP_prvspace[myid].pcpu;
447
448	for (x = 0; x < NGDT; x++) {
449		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
450	}
451
452	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
453	r_gdt.rd_base = (int) &gdt[myid * NGDT];
454	lgdt(&r_gdt);			/* does magic intra-segment return */
455
456	lidt(&r_idt);
457
458	lldt(_default_ldt);
459	PCPU_SET(currentldt, _default_ldt);
460
461	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
462	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
463	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
464	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
465	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
466	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
467	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
468	ltr(gsel_tss);
469
470	/*
471	 * Set to a known state:
472	 * Set by mpboot.s: CR0_PG, CR0_PE
473	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
474	 */
475	cr0 = rcr0();
476	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
477	load_cr0(cr0);
478	CHECK_WRITE(0x38, 5);
479
480	/* Disable local APIC just to be sure. */
481	lapic_disable();
482
483	/* signal our startup to the BSP. */
484	mp_naps++;
485	CHECK_WRITE(0x39, 6);
486
487	/* Spin until the BSP releases the AP's. */
488	while (!aps_ready)
489		ia32_pause();
490
491	/* BSP may have changed PTD while we were waiting */
492	invltlb();
493	for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
494		invlpg(addr);
495
496#if defined(I586_CPU) && !defined(NO_F00F_HACK)
497	lidt(&r_idt);
498#endif
499
500	/* set up CPU registers and state */
501	cpu_setregs();
502
503	/* set up FPU state on the AP */
504	npxinit(__INITIAL_NPXCW__);
505
506	/* set up SSE registers */
507	enable_sse();
508
509	/* A quick check from sanity claus */
510	if (PCPU_GET(apic_id) != lapic_id()) {
511		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
512		printf("SMP: actual apic_id = %d\n", lapic_id());
513		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
514		printf("PTD[MPPTDI] = %#jx\n", (uintmax_t)PTD[MPPTDI]);
515		panic("cpuid mismatch! boom!!");
516	}
517
518	/* Initialize curthread. */
519	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
520	PCPU_SET(curthread, PCPU_GET(idlethread));
521
522	mtx_lock_spin(&ap_boot_mtx);
523
524	/* Init local apic for irq's */
525	lapic_setup();
526
527	/* Set memory range attributes for this CPU to match the BSP */
528	mem_range_AP_init();
529
530	smp_cpus++;
531
532	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
533	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
534
535	/* Determine if we are a logical CPU. */
536	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
537		logical_cpus_mask |= PCPU_GET(cpumask);
538
539	/* Build our map of 'other' CPUs. */
540	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
541
542	if (bootverbose)
543		lapic_dump("AP");
544
545	if (smp_cpus == mp_ncpus) {
546		/* enable IPI's, tlb shootdown, freezes etc */
547		atomic_store_rel_int(&smp_started, 1);
548		smp_active = 1;	 /* historic */
549	}
550
551	mtx_unlock_spin(&ap_boot_mtx);
552
553	/* wait until all the AP's are up */
554	while (smp_started == 0)
555		ia32_pause();
556
557	/* ok, now grab sched_lock and enter the scheduler */
558	mtx_lock_spin(&sched_lock);
559
560	/*
561	 * Correct spinlock nesting.  The idle thread context that we are
562	 * borrowing was created so that it would start out with a single
563	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
564	 * explicitly acquired locks in this function, the nesting count
565	 * is now 2 rather than 1.  Since we are nested, calling
566	 * spinlock_exit() will simply adjust the counts without allowing
567	 * spin lock using code to interrupt us.
568	 */
569	spinlock_exit();
570	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
571
572	binuptime(PCPU_PTR(switchtime));
573	PCPU_SET(switchticks, ticks);
574
575	cpu_throw(NULL, choosethread());	/* doesn't return */
576
577	panic("scheduler returned us to %s", __func__);
578	/* NOTREACHED */
579}
580
581/*******************************************************************
582 * local functions and data
583 */
584
585/*
586 * Set the APIC logical IDs.
587 *
588 * We want to cluster logical CPU's within the same APIC ID cluster.
589 * Since logical CPU's are aligned simply filling in the clusters in
590 * APIC ID order works fine.  Note that this does not try to balance
591 * the number of CPU's in each cluster. (XXX?)
592 */
593static void
594set_logical_apic_ids(void)
595{
596	u_int apic_id, cluster, cluster_id;
597
598	/* Force us to allocate cluster 0 at the start. */
599	cluster = -1;
600	cluster_id = APIC_MAX_INTRACLUSTER_ID;
601	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
602		if (!cpu_info[apic_id].cpu_present)
603			continue;
604		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
605			cluster = ioapic_next_logical_cluster();
606			cluster_id = 0;
607		} else
608			cluster_id++;
609		if (bootverbose)
610			printf("APIC ID: physical %u, logical %u:%u\n",
611			    apic_id, cluster, cluster_id);
612		lapic_set_logical_id(apic_id, cluster, cluster_id);
613	}
614}
615
616/*
617 * start each AP in our list
618 */
619static int
620start_all_aps(void)
621{
622#ifndef PC98
623	u_char mpbiosreason;
624#endif
625	u_long mpbioswarmvec;
626	struct pcpu *pc;
627	char *stack;
628	uintptr_t kptbase;
629	int i, pg, apic_id, cpu;
630
631	POSTCODE(START_ALL_APS_POST);
632
633	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
634
635	/* install the AP 1st level boot code */
636	install_ap_tramp();
637
638	/* save the current value of the warm-start vector */
639	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
640#ifndef PC98
641	outb(CMOS_REG, BIOS_RESET);
642	mpbiosreason = inb(CMOS_DATA);
643#endif
644
645	/* set up temporary P==V mapping for AP boot */
646	/* XXX this is a hack, we should boot the AP on its own stack/PTD */
647	kptbase = (uintptr_t)(void *)KPTphys;
648	for (i = 0; i < NKPT; i++)
649		PTD[i] = (pd_entry_t)(PG_V | PG_RW |
650		    ((kptbase + i * PAGE_SIZE) & PG_FRAME));
651	invltlb();
652
653	/* start each AP */
654	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
655
656		/* Ignore non-existent CPUs and the BSP. */
657		if (!cpu_info[apic_id].cpu_present ||
658		    cpu_info[apic_id].cpu_bsp)
659			continue;
660
661		/* Don't use this CPU if it has been disabled by a tunable. */
662		if (resource_disabled("lapic", apic_id)) {
663			cpu_info[apic_id].cpu_disabled = 1;
664			mp_ncpus--;
665			continue;
666		}
667
668		cpu++;
669
670		/* save APIC ID for this logical ID */
671		cpu_apic_ids[cpu] = apic_id;
672
673		/* first page of AP's private space */
674		pg = cpu * i386_btop(sizeof(struct privatespace));
675
676		/* allocate a new private data page */
677		pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE);
678
679		/* wire it into the private page table page */
680		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc));
681
682		/* allocate and set up an idle stack data page */
683		stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */
684		for (i = 0; i < KSTACK_PAGES; i++)
685			SMPpt[pg + 1 + i] = (pt_entry_t)
686			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
687
688		/* prime data page for it to use */
689		pcpu_init(pc, cpu, sizeof(struct pcpu));
690		pc->pc_apic_id = apic_id;
691
692		/* setup a vector to our boot code */
693		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
694		*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
695#ifndef PC98
696		outb(CMOS_REG, BIOS_RESET);
697		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
698#endif
699
700		bootSTK = &SMP_prvspace[cpu].idlekstack[KSTACK_PAGES *
701		    PAGE_SIZE];
702		bootAP = cpu;
703
704		/* attempt to start the Application Processor */
705		CHECK_INIT(99);	/* setup checkpoints */
706		if (!start_ap(apic_id)) {
707			printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
708			CHECK_PRINT("trace");	/* show checkpoints */
709			/* better panic as the AP may be running loose */
710			printf("panic y/n? [y] ");
711			if (cngetc() != 'n')
712				panic("bye-bye");
713		}
714		CHECK_PRINT("trace");		/* show checkpoints */
715
716		all_cpus |= (1 << cpu);		/* record AP in CPU map */
717	}
718
719	/* build our map of 'other' CPUs */
720	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
721
722	/* restore the warmstart vector */
723	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
724#ifndef PC98
725	outb(CMOS_REG, BIOS_RESET);
726	outb(CMOS_DATA, mpbiosreason);
727#endif
728
729	/*
730	 * Set up the idle context for the BSP.  Similar to above except
731	 * that some was done by locore, some by pmap.c and some is implicit
732	 * because the BSP is cpu#0 and the page is initially zero and also
733	 * because we can refer to variables by name on the BSP..
734	 */
735
736	/* Allocate and setup BSP idle stack */
737	stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
738	for (i = 0; i < KSTACK_PAGES; i++)
739		SMPpt[1 + i] = (pt_entry_t)
740		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
741
742	for (i = 0; i < NKPT; i++)
743		PTD[i] = 0;
744	pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
745
746	/* number of APs actually started */
747	return mp_naps;
748}
749
750/*
751 * load the 1st level AP boot code into base memory.
752 */
753
754/* targets for relocation */
755extern void bigJump(void);
756extern void bootCodeSeg(void);
757extern void bootDataSeg(void);
758extern void MPentry(void);
759extern u_int MP_GDT;
760extern u_int mp_gdtbase;
761
762static void
763install_ap_tramp(void)
764{
765	int     x;
766	int     size = *(int *) ((u_long) & bootMP_size);
767	vm_offset_t va = boot_address + KERNBASE;
768	u_char *src = (u_char *) ((u_long) bootMP);
769	u_char *dst = (u_char *) va;
770	u_int   boot_base = (u_int) bootMP;
771	u_int8_t *dst8;
772	u_int16_t *dst16;
773	u_int32_t *dst32;
774
775	POSTCODE(INSTALL_AP_TRAMP_POST);
776
777	KASSERT (size <= PAGE_SIZE,
778	    ("'size' do not fit into PAGE_SIZE, as expected."));
779	pmap_kenter(va, boot_address);
780	pmap_invalidate_page (kernel_pmap, va);
781	for (x = 0; x < size; ++x)
782		*dst++ = *src++;
783
784	/*
785	 * modify addresses in code we just moved to basemem. unfortunately we
786	 * need fairly detailed info about mpboot.s for this to work.  changes
787	 * to mpboot.s might require changes here.
788	 */
789
790	/* boot code is located in KERNEL space */
791	dst = (u_char *) va;
792
793	/* modify the lgdt arg */
794	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
795	*dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
796
797	/* modify the ljmp target for MPentry() */
798	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
799	*dst32 = ((u_int) MPentry - KERNBASE);
800
801	/* modify the target for boot code segment */
802	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
803	dst8 = (u_int8_t *) (dst16 + 1);
804	*dst16 = (u_int) boot_address & 0xffff;
805	*dst8 = ((u_int) boot_address >> 16) & 0xff;
806
807	/* modify the target for boot data segment */
808	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
809	dst8 = (u_int8_t *) (dst16 + 1);
810	*dst16 = (u_int) boot_address & 0xffff;
811	*dst8 = ((u_int) boot_address >> 16) & 0xff;
812}
813
814/*
815 * This function starts the AP (application processor) identified
816 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
817 * to accomplish this.  This is necessary because of the nuances
818 * of the different hardware we might encounter.  It isn't pretty,
819 * but it seems to work.
820 */
821static int
822start_ap(int apic_id)
823{
824	int vector, ms;
825	int cpus;
826
827	POSTCODE(START_AP_POST);
828
829	/* calculate the vector */
830	vector = (boot_address >> 12) & 0xff;
831
832	/* used as a watchpoint to signal AP startup */
833	cpus = mp_naps;
834
835	/*
836	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
837	 * and running the target CPU. OR this INIT IPI might be latched (P5
838	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
839	 * ignored.
840	 */
841
842	/* do an INIT IPI: assert RESET */
843	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
844	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
845
846	/* wait for pending status end */
847	lapic_ipi_wait(-1);
848
849	/* do an INIT IPI: deassert RESET */
850	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
851	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
852
853	/* wait for pending status end */
854	DELAY(10000);		/* wait ~10mS */
855	lapic_ipi_wait(-1);
856
857	/*
858	 * next we do a STARTUP IPI: the previous INIT IPI might still be
859	 * latched, (P5 bug) this 1st STARTUP would then terminate
860	 * immediately, and the previously started INIT IPI would continue. OR
861	 * the previous INIT IPI has already run. and this STARTUP IPI will
862	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
863	 * will run.
864	 */
865
866	/* do a STARTUP IPI */
867	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
868	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
869	    vector, apic_id);
870	lapic_ipi_wait(-1);
871	DELAY(200);		/* wait ~200uS */
872
873	/*
874	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
875	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
876	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
877	 * recognized after hardware RESET or INIT IPI.
878	 */
879
880	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
881	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
882	    vector, apic_id);
883	lapic_ipi_wait(-1);
884	DELAY(200);		/* wait ~200uS */
885
886	/* Wait up to 5 seconds for it to start. */
887	for (ms = 0; ms < 5000; ms++) {
888		if (mp_naps > cpus)
889			return 1;	/* return SUCCESS */
890		DELAY(1000);
891	}
892	return 0;		/* return FAILURE */
893}
894
895#ifdef COUNT_XINVLTLB_HITS
896u_int xhits_gbl[MAXCPU];
897u_int xhits_pg[MAXCPU];
898u_int xhits_rng[MAXCPU];
899SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
900SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
901    sizeof(xhits_gbl), "IU", "");
902SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
903    sizeof(xhits_pg), "IU", "");
904SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
905    sizeof(xhits_rng), "IU", "");
906
907u_int ipi_global;
908u_int ipi_page;
909u_int ipi_range;
910u_int ipi_range_size;
911SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
912SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
913SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
914SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
915    0, "");
916
917u_int ipi_masked_global;
918u_int ipi_masked_page;
919u_int ipi_masked_range;
920u_int ipi_masked_range_size;
921SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
922    &ipi_masked_global, 0, "");
923SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
924    &ipi_masked_page, 0, "");
925SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
926    &ipi_masked_range, 0, "");
927SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
928    &ipi_masked_range_size, 0, "");
929#endif /* COUNT_XINVLTLB_HITS */
930
931/*
932 * Flush the TLB on all other CPU's
933 */
934static void
935smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
936{
937	u_int ncpu;
938
939	ncpu = mp_ncpus - 1;	/* does not shootdown self */
940	if (ncpu < 1)
941		return;		/* no other cpus */
942	mtx_assert(&smp_ipi_mtx, MA_OWNED);
943	smp_tlb_addr1 = addr1;
944	smp_tlb_addr2 = addr2;
945	atomic_store_rel_int(&smp_tlb_wait, 0);
946	ipi_all_but_self(vector);
947	while (smp_tlb_wait < ncpu)
948		ia32_pause();
949}
950
951/*
952 * This is about as magic as it gets.  fortune(1) has got similar code
953 * for reversing bits in a word.  Who thinks up this stuff??
954 *
955 * Yes, it does appear to be consistently faster than:
956 * while (i = ffs(m)) {
957 *	m >>= i;
958 *	bits++;
959 * }
960 * and
961 * while (lsb = (m & -m)) {	// This is magic too
962 * 	m &= ~lsb;		// or: m ^= lsb
963 *	bits++;
964 * }
965 * Both of these latter forms do some very strange things on gcc-3.1 with
966 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
967 * There is probably an SSE or MMX popcnt instruction.
968 *
969 * I wonder if this should be in libkern?
970 *
971 * XXX Stop the presses!  Another one:
972 * static __inline u_int32_t
973 * popcnt1(u_int32_t v)
974 * {
975 *	v -= ((v >> 1) & 0x55555555);
976 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
977 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
978 *	return (v * 0x01010101) >> 24;
979 * }
980 * The downside is that it has a multiply.  With a pentium3 with
981 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
982 * an imull, and in that case it is faster.  In most other cases
983 * it appears slightly slower.
984 *
985 * Another variant (also from fortune):
986 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
987 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
988 *                          - (((x)>>2)&0x33333333)            \
989 *                          - (((x)>>3)&0x11111111))
990 */
991static __inline u_int32_t
992popcnt(u_int32_t m)
993{
994
995	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
996	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
997	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
998	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
999	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
1000	return m;
1001}
1002
1003static void
1004smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1005{
1006	int ncpu, othercpus;
1007
1008	othercpus = mp_ncpus - 1;
1009	if (mask == (u_int)-1) {
1010		ncpu = othercpus;
1011		if (ncpu < 1)
1012			return;
1013	} else {
1014		mask &= ~PCPU_GET(cpumask);
1015		if (mask == 0)
1016			return;
1017		ncpu = popcnt(mask);
1018		if (ncpu > othercpus) {
1019			/* XXX this should be a panic offence */
1020			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1021			    ncpu, othercpus);
1022			ncpu = othercpus;
1023		}
1024		/* XXX should be a panic, implied by mask == 0 above */
1025		if (ncpu < 1)
1026			return;
1027	}
1028	mtx_assert(&smp_ipi_mtx, MA_OWNED);
1029	smp_tlb_addr1 = addr1;
1030	smp_tlb_addr2 = addr2;
1031	atomic_store_rel_int(&smp_tlb_wait, 0);
1032	if (mask == (u_int)-1)
1033		ipi_all_but_self(vector);
1034	else
1035		ipi_selected(mask, vector);
1036	while (smp_tlb_wait < ncpu)
1037		ia32_pause();
1038}
1039
1040void
1041smp_invltlb(void)
1042{
1043	if (smp_started) {
1044		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1045#ifdef COUNT_XINVLTLB_HITS
1046		ipi_global++;
1047#endif
1048	}
1049}
1050
1051void
1052smp_invlpg(vm_offset_t addr)
1053{
1054	if (smp_started) {
1055		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1056#ifdef COUNT_XINVLTLB_HITS
1057		ipi_page++;
1058#endif
1059	}
1060}
1061
1062void
1063smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1064{
1065	if (smp_started) {
1066		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1067#ifdef COUNT_XINVLTLB_HITS
1068		ipi_range++;
1069		ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1070#endif
1071	}
1072}
1073
1074void
1075smp_masked_invltlb(u_int mask)
1076{
1077	if (smp_started) {
1078		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1079#ifdef COUNT_XINVLTLB_HITS
1080		ipi_masked_global++;
1081#endif
1082	}
1083}
1084
1085void
1086smp_masked_invlpg(u_int mask, vm_offset_t addr)
1087{
1088	if (smp_started) {
1089		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1090#ifdef COUNT_XINVLTLB_HITS
1091		ipi_masked_page++;
1092#endif
1093	}
1094}
1095
1096void
1097smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
1098{
1099	if (smp_started) {
1100		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1101#ifdef COUNT_XINVLTLB_HITS
1102		ipi_masked_range++;
1103		ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1104#endif
1105	}
1106}
1107
1108
1109void
1110ipi_bitmap_handler(struct clockframe frame)
1111{
1112	int cpu = PCPU_GET(cpuid);
1113	u_int ipi_bitmap;
1114
1115	ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1116
1117	/* Nothing to do for AST */
1118}
1119
1120/*
1121 * send an IPI to a set of cpus.
1122 */
1123void
1124ipi_selected(u_int32_t cpus, u_int ipi)
1125{
1126	int cpu;
1127	u_int bitmap = 0;
1128	u_int old_pending;
1129	u_int new_pending;
1130
1131	if (IPI_IS_BITMAPED(ipi)) {
1132		bitmap = 1 << ipi;
1133		ipi = IPI_BITMAP_VECTOR;
1134	}
1135
1136	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1137	while ((cpu = ffs(cpus)) != 0) {
1138		cpu--;
1139		cpus &= ~(1 << cpu);
1140
1141		KASSERT(cpu_apic_ids[cpu] != -1,
1142		    ("IPI to non-existent CPU %d", cpu));
1143
1144		if (bitmap) {
1145			do {
1146				old_pending = cpu_ipi_pending[cpu];
1147				new_pending = old_pending | bitmap;
1148			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1149
1150			if (old_pending)
1151				continue;
1152		}
1153
1154		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1155	}
1156
1157}
1158
1159/*
1160 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1161 */
1162void
1163ipi_all(u_int ipi)
1164{
1165
1166	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1167	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1168}
1169
1170/*
1171 * send an IPI to all CPUs EXCEPT myself
1172 */
1173void
1174ipi_all_but_self(u_int ipi)
1175{
1176
1177	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1178	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1179}
1180
1181/*
1182 * send an IPI to myself
1183 */
1184void
1185ipi_self(u_int ipi)
1186{
1187
1188	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1189	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1190}
1191
1192/*
1193 * This is called once the rest of the system is up and running and we're
1194 * ready to let the AP's out of the pen.
1195 */
1196static void
1197release_aps(void *dummy __unused)
1198{
1199
1200	if (mp_ncpus == 1)
1201		return;
1202	mtx_lock_spin(&sched_lock);
1203	atomic_store_rel_int(&aps_ready, 1);
1204	while (smp_started == 0)
1205		ia32_pause();
1206	mtx_unlock_spin(&sched_lock);
1207}
1208SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1209
1210static int
1211sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1212{
1213	u_int mask;
1214	int error;
1215
1216	mask = hlt_cpus_mask;
1217	error = sysctl_handle_int(oidp, &mask, 0, req);
1218	if (error || !req->newptr)
1219		return (error);
1220
1221	if (logical_cpus_mask != 0 &&
1222	    (mask & logical_cpus_mask) == logical_cpus_mask)
1223		hlt_logical_cpus = 1;
1224	else
1225		hlt_logical_cpus = 0;
1226
1227	if ((mask & all_cpus) == all_cpus)
1228		mask &= ~(1<<0);
1229	hlt_cpus_mask = mask;
1230	return (error);
1231}
1232SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1233    0, 0, sysctl_hlt_cpus, "IU",
1234    "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
1235
1236static int
1237sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1238{
1239	int disable, error;
1240
1241	disable = hlt_logical_cpus;
1242	error = sysctl_handle_int(oidp, &disable, 0, req);
1243	if (error || !req->newptr)
1244		return (error);
1245
1246	if (disable)
1247		hlt_cpus_mask |= logical_cpus_mask;
1248	else
1249		hlt_cpus_mask &= ~logical_cpus_mask;
1250
1251	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1252		hlt_cpus_mask &= ~(1<<0);
1253
1254	hlt_logical_cpus = disable;
1255	return (error);
1256}
1257
1258static void
1259cpu_hlt_setup(void *dummy __unused)
1260{
1261
1262	if (logical_cpus_mask != 0) {
1263		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1264		    &hlt_logical_cpus);
1265		sysctl_ctx_init(&logical_cpu_clist);
1266		SYSCTL_ADD_PROC(&logical_cpu_clist,
1267		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1268		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1269		    sysctl_hlt_logical_cpus, "IU", "");
1270		SYSCTL_ADD_UINT(&logical_cpu_clist,
1271		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1272		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1273		    &logical_cpus_mask, 0, "");
1274
1275		if (hlt_logical_cpus)
1276			hlt_cpus_mask |= logical_cpus_mask;
1277	}
1278}
1279SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1280
1281int
1282mp_grab_cpu_hlt(void)
1283{
1284	u_int mask = PCPU_GET(cpumask);
1285#ifdef MP_WATCHDOG
1286	u_int cpuid = PCPU_GET(cpuid);
1287#endif
1288	int retval;
1289
1290#ifdef MP_WATCHDOG
1291	ap_watchdog(cpuid);
1292#endif
1293
1294	retval = mask & hlt_cpus_mask;
1295	while (mask & hlt_cpus_mask)
1296		__asm __volatile("sti; hlt" : : : "memory");
1297	return (retval);
1298}
1299