mp_x86.c revision 137116
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 *    derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: head/sys/i386/i386/mp_machdep.c 137116 2004-11-01 22:11:27Z jhb $");
28
29#include "opt_apic.h"
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32#include "opt_mp_watchdog.h"
33
34#if !defined(lint)
35#if !defined(SMP)
36#error How did you get here?
37#endif
38
39#if defined(I386_CPU) && !defined(COMPILING_LINT)
40#error SMP not supported with I386_CPU
41#endif
42#ifndef DEV_APIC
43#error The apic device is required for SMP, add "device apic" to your config file.
44#endif
45#if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
46#error SMP not supported with CPU_DISABLE_CMPXCHG
47#endif
48#endif /* not lint */
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/bus.h>
53#include <sys/cons.h>	/* cngetc() */
54#ifdef GPROF
55#include <sys/gmon.h>
56#endif
57#include <sys/kernel.h>
58#include <sys/ktr.h>
59#include <sys/lock.h>
60#include <sys/malloc.h>
61#include <sys/memrange.h>
62#include <sys/mutex.h>
63#include <sys/pcpu.h>
64#include <sys/proc.h>
65#include <sys/smp.h>
66#include <sys/sysctl.h>
67
68#include <vm/vm.h>
69#include <vm/vm_param.h>
70#include <vm/pmap.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_extern.h>
73
74#include <machine/apicreg.h>
75#include <machine/clock.h>
76#include <machine/md_var.h>
77#include <machine/mp_watchdog.h>
78#include <machine/pcb.h>
79#include <machine/smp.h>
80#include <machine/smptests.h>	/** COUNT_XINVLTLB_HITS */
81#include <machine/specialreg.h>
82#include <machine/privatespace.h>
83
84#define WARMBOOT_TARGET		0
85#define WARMBOOT_OFF		(KERNBASE + 0x0467)
86#define WARMBOOT_SEG		(KERNBASE + 0x0469)
87
88#define CMOS_REG		(0x70)
89#define CMOS_DATA		(0x71)
90#define BIOS_RESET		(0x0f)
91#define BIOS_WARM		(0x0a)
92
93/*
94 * this code MUST be enabled here and in mpboot.s.
95 * it follows the very early stages of AP boot by placing values in CMOS ram.
96 * it NORMALLY will never be needed and thus the primitive method for enabling.
97 *
98#define CHECK_POINTS
99 */
100
101#if defined(CHECK_POINTS) && !defined(PC98)
102#define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
103#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
104
105#define CHECK_INIT(D);				\
106	CHECK_WRITE(0x34, (D));			\
107	CHECK_WRITE(0x35, (D));			\
108	CHECK_WRITE(0x36, (D));			\
109	CHECK_WRITE(0x37, (D));			\
110	CHECK_WRITE(0x38, (D));			\
111	CHECK_WRITE(0x39, (D));
112
113#define CHECK_PRINT(S);				\
114	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
115	   (S),					\
116	   CHECK_READ(0x34),			\
117	   CHECK_READ(0x35),			\
118	   CHECK_READ(0x36),			\
119	   CHECK_READ(0x37),			\
120	   CHECK_READ(0x38),			\
121	   CHECK_READ(0x39));
122
123#else				/* CHECK_POINTS */
124
125#define CHECK_INIT(D)
126#define CHECK_PRINT(S)
127#define CHECK_WRITE(A, D)
128
129#endif				/* CHECK_POINTS */
130
131/*
132 * Values to send to the POST hardware.
133 */
134#define MP_BOOTADDRESS_POST	0x10
135#define MP_PROBE_POST		0x11
136#define MPTABLE_PASS1_POST	0x12
137
138#define MP_START_POST		0x13
139#define MP_ENABLE_POST		0x14
140#define MPTABLE_PASS2_POST	0x15
141
142#define START_ALL_APS_POST	0x16
143#define INSTALL_AP_TRAMP_POST	0x17
144#define START_AP_POST		0x18
145
146#define MP_ANNOUNCE_POST	0x19
147
148/* lock region used by kernel profiling */
149int	mcount_lock;
150
151/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
152int	current_postcode;
153
154int	mp_naps;		/* # of Applications processors */
155int	boot_cpu_id = -1;	/* designated BSP */
156extern	int nkpt;
157
158/*
159 * CPU topology map datastructures for HTT.
160 */
161static struct cpu_group mp_groups[MAXCPU];
162static struct cpu_top mp_top;
163
164/* AP uses this during bootstrap.  Do not staticize.  */
165char *bootSTK;
166static int bootAP;
167
168/* Hotwire a 0->4MB V==P mapping */
169extern pt_entry_t *KPTphys;
170
171/* SMP page table page */
172extern pt_entry_t *SMPpt;
173
174struct pcb stoppcbs[MAXCPU];
175
176/* Variables needed for SMP tlb shootdown. */
177vm_offset_t smp_tlb_addr1;
178vm_offset_t smp_tlb_addr2;
179volatile int smp_tlb_wait;
180
181/*
182 * Local data and functions.
183 */
184
185static u_int logical_cpus;
186
187/* used to hold the AP's until we are ready to release them */
188static struct mtx ap_boot_mtx;
189
190/* Set to 1 once we're ready to let the APs out of the pen. */
191static volatile int aps_ready = 0;
192
193/*
194 * Store data from cpu_add() until later in the boot when we actually setup
195 * the APs.
196 */
197struct cpu_info {
198	int	cpu_present:1;
199	int	cpu_bsp:1;
200	int	cpu_disabled:1;
201} static cpu_info[MAXCPU];
202static int cpu_apic_ids[MAXCPU];
203
204static u_int boot_address;
205
206static void	set_logical_apic_ids(void);
207static int	start_all_aps(void);
208static void	install_ap_tramp(void);
209static int	start_ap(int apic_id);
210static void	release_aps(void *dummy);
211
212static int	hlt_logical_cpus;
213static struct	sysctl_ctx_list logical_cpu_clist;
214
215static void
216mem_range_AP_init(void)
217{
218	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
219		mem_range_softc.mr_op->initAP(&mem_range_softc);
220}
221
222void
223mp_topology(void)
224{
225	struct cpu_group *group;
226	int logical_cpus;
227	int apic_id;
228	int groups;
229	int cpu;
230
231	/* Build the smp_topology map. */
232	/* Nothing to do if there is no HTT support. */
233	if ((cpu_feature & CPUID_HTT) == 0)
234		return;
235	logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
236	if (logical_cpus <= 1)
237		return;
238	group = &mp_groups[0];
239	groups = 1;
240	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
241		if (!cpu_info[apic_id].cpu_present)
242			continue;
243		/*
244		 * If the current group has members and we're not a logical
245		 * cpu, create a new group.
246		 */
247		if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
248			group++;
249			groups++;
250		}
251		group->cg_count++;
252		group->cg_mask |= 1 << cpu;
253		cpu++;
254	}
255
256	mp_top.ct_count = groups;
257	mp_top.ct_group = mp_groups;
258	smp_topology = &mp_top;
259}
260
261
262/*
263 * Calculate usable address in base memory for AP trampoline code.
264 */
265u_int
266mp_bootaddress(u_int basemem)
267{
268	POSTCODE(MP_BOOTADDRESS_POST);
269
270	boot_address = trunc_page(basemem);	/* round down to 4k boundary */
271	if ((basemem - boot_address) < bootMP_size)
272		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
273
274	return boot_address;
275}
276
277void
278cpu_add(u_int apic_id, char boot_cpu)
279{
280
281	if (apic_id >= MAXCPU) {
282		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
283		    apic_id, MAXCPU - 1);
284		return;
285	}
286	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
287	    apic_id));
288	cpu_info[apic_id].cpu_present = 1;
289	if (boot_cpu) {
290		KASSERT(boot_cpu_id == -1,
291		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
292		    boot_cpu_id));
293		boot_cpu_id = apic_id;
294		cpu_info[apic_id].cpu_bsp = 1;
295	}
296	mp_ncpus++;
297	if (bootverbose)
298		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
299		    "AP");
300
301}
302
303void
304cpu_mp_setmaxid(void)
305{
306
307	mp_maxid = MAXCPU - 1;
308}
309
310int
311cpu_mp_probe(void)
312{
313
314	/*
315	 * Always record BSP in CPU map so that the mbuf init code works
316	 * correctly.
317	 */
318	all_cpus = 1;
319	if (mp_ncpus == 0) {
320		/*
321		 * No CPUs were found, so this must be a UP system.  Setup
322		 * the variables to represent a system with a single CPU
323		 * with an id of 0.
324		 */
325		mp_ncpus = 1;
326		return (0);
327	}
328
329	/* At least one CPU was found. */
330	if (mp_ncpus == 1) {
331		/*
332		 * One CPU was found, so this must be a UP system with
333		 * an I/O APIC.
334		 */
335		return (0);
336	}
337
338	/* At least two CPUs were found. */
339	return (1);
340}
341
342/*
343 * Initialize the IPI handlers and start up the AP's.
344 */
345void
346cpu_mp_start(void)
347{
348	int i;
349
350	POSTCODE(MP_START_POST);
351
352	/* Initialize the logical ID to APIC ID table. */
353	for (i = 0; i < MAXCPU; i++)
354		cpu_apic_ids[i] = -1;
355
356	/* Install an inter-CPU IPI for TLB invalidation */
357	setidt(IPI_INVLTLB, IDTVEC(invltlb),
358	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
359	setidt(IPI_INVLPG, IDTVEC(invlpg),
360	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
361	setidt(IPI_INVLRNG, IDTVEC(invlrng),
362	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
363
364	/* Install an inter-CPU IPI for forwarding hardclock() */
365	setidt(IPI_HARDCLOCK, IDTVEC(hardclock),
366	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
367
368	/* Install an inter-CPU IPI for forwarding statclock() */
369	setidt(IPI_STATCLOCK, IDTVEC(statclock),
370	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
371
372	/* Install an inter-CPU IPI for lazy pmap release */
373	setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
374	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
375
376	/* Install an inter-CPU IPI for all-CPU rendezvous */
377	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
378	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
379
380	/* Install an inter-CPU IPI for forcing an additional software trap */
381	setidt(IPI_AST, IDTVEC(cpuast),
382	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
383
384	/* Install an inter-CPU IPI for CPU stop/restart */
385	setidt(IPI_STOP, IDTVEC(cpustop),
386	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
387
388
389	/* Set boot_cpu_id if needed. */
390	if (boot_cpu_id == -1) {
391		boot_cpu_id = PCPU_GET(apic_id);
392		cpu_info[boot_cpu_id].cpu_bsp = 1;
393	} else
394		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
395		    ("BSP's APIC ID doesn't match boot_cpu_id"));
396	cpu_apic_ids[0] = boot_cpu_id;
397
398	/* Start each Application Processor */
399	start_all_aps();
400
401	/* Setup the initial logical CPUs info. */
402	logical_cpus = logical_cpus_mask = 0;
403	if (cpu_feature & CPUID_HTT)
404		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
405
406	set_logical_apic_ids();
407}
408
409
410/*
411 * Print various information about the SMP system hardware and setup.
412 */
413void
414cpu_mp_announce(void)
415{
416	int i, x;
417
418	POSTCODE(MP_ANNOUNCE_POST);
419
420	/* List CPUs */
421	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
422	for (i = 1, x = 0; x < MAXCPU; x++) {
423		if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
424			continue;
425		if (cpu_info[x].cpu_disabled)
426			printf("  cpu (AP): APIC ID: %2d (disabled)\n", x);
427		else {
428			KASSERT(i < mp_ncpus,
429			    ("mp_ncpus and actual cpus are out of whack"));
430			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
431		}
432	}
433}
434
435/*
436 * AP CPU's call this to initialize themselves.
437 */
438void
439init_secondary(void)
440{
441	int	gsel_tss;
442	int	x, myid;
443	u_int	cr0;
444
445	/* bootAP is set in start_ap() to our ID. */
446	myid = bootAP;
447	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
448	gdt_segs[GPROC0_SEL].ssd_base =
449		(int) &SMP_prvspace[myid].pcpu.pc_common_tss;
450	SMP_prvspace[myid].pcpu.pc_prvspace =
451		&SMP_prvspace[myid].pcpu;
452
453	for (x = 0; x < NGDT; x++) {
454		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
455	}
456
457	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
458	r_gdt.rd_base = (int) &gdt[myid * NGDT];
459	lgdt(&r_gdt);			/* does magic intra-segment return */
460
461	lidt(&r_idt);
462
463	lldt(_default_ldt);
464	PCPU_SET(currentldt, _default_ldt);
465
466	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
467	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
468	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
469	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
470	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
471	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
472	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
473	ltr(gsel_tss);
474
475	/*
476	 * Set to a known state:
477	 * Set by mpboot.s: CR0_PG, CR0_PE
478	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
479	 */
480	cr0 = rcr0();
481	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
482	load_cr0(cr0);
483	CHECK_WRITE(0x38, 5);
484
485	/* Disable local APIC just to be sure. */
486	lapic_disable();
487
488	/* signal our startup to the BSP. */
489	mp_naps++;
490	CHECK_WRITE(0x39, 6);
491
492	/* Spin until the BSP releases the AP's. */
493	while (!aps_ready)
494		ia32_pause();
495
496	/* BSP may have changed PTD while we were waiting */
497	invltlb();
498	pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
499
500#if defined(I586_CPU) && !defined(NO_F00F_HACK)
501	lidt(&r_idt);
502#endif
503
504	/* set up CPU registers and state */
505	cpu_setregs();
506
507	/* set up FPU state on the AP */
508	npxinit(__INITIAL_NPXCW__);
509
510	/* set up SSE registers */
511	enable_sse();
512
513	/* A quick check from sanity claus */
514	if (PCPU_GET(apic_id) != lapic_id()) {
515		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
516		printf("SMP: actual apic_id = %d\n", lapic_id());
517		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
518		printf("PTD[MPPTDI] = %#jx\n", (uintmax_t)PTD[MPPTDI]);
519		panic("cpuid mismatch! boom!!");
520	}
521
522	mtx_lock_spin(&ap_boot_mtx);
523
524	/* Init local apic for irq's */
525	lapic_setup();
526
527	/* Set memory range attributes for this CPU to match the BSP */
528	mem_range_AP_init();
529
530	smp_cpus++;
531
532	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
533	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
534
535	/* Determine if we are a logical CPU. */
536	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
537		logical_cpus_mask |= PCPU_GET(cpumask);
538
539	/* Build our map of 'other' CPUs. */
540	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
541
542	if (bootverbose)
543		lapic_dump("AP");
544
545	if (smp_cpus == mp_ncpus) {
546		/* enable IPI's, tlb shootdown, freezes etc */
547		atomic_store_rel_int(&smp_started, 1);
548		smp_active = 1;	 /* historic */
549	}
550
551	mtx_unlock_spin(&ap_boot_mtx);
552
553	/* wait until all the AP's are up */
554	while (smp_started == 0)
555		ia32_pause();
556
557	/* ok, now grab sched_lock and enter the scheduler */
558	mtx_lock_spin(&sched_lock);
559
560	binuptime(PCPU_PTR(switchtime));
561	PCPU_SET(switchticks, ticks);
562
563	cpu_throw(NULL, choosethread());	/* doesn't return */
564
565	panic("scheduler returned us to %s", __func__);
566	/* NOTREACHED */
567}
568
569/*******************************************************************
570 * local functions and data
571 */
572
573/*
574 * Set the APIC logical IDs.
575 *
576 * We want to cluster logical CPU's within the same APIC ID cluster.
577 * Since logical CPU's are aligned simply filling in the clusters in
578 * APIC ID order works fine.  Note that this does not try to balance
579 * the number of CPU's in each cluster. (XXX?)
580 */
581static void
582set_logical_apic_ids(void)
583{
584	u_int apic_id, cluster, cluster_id;
585
586	/* Force us to allocate cluster 0 at the start. */
587	cluster = -1;
588	cluster_id = APIC_MAX_INTRACLUSTER_ID;
589	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
590		if (!cpu_info[apic_id].cpu_present)
591			continue;
592		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
593			cluster = ioapic_next_logical_cluster();
594			cluster_id = 0;
595		} else
596			cluster_id++;
597		if (bootverbose)
598			printf("APIC ID: physical %u, logical %u:%u\n",
599			    apic_id, cluster, cluster_id);
600		lapic_set_logical_id(apic_id, cluster, cluster_id);
601	}
602}
603
604/*
605 * start each AP in our list
606 */
607static int
608start_all_aps(void)
609{
610#ifndef PC98
611	u_char mpbiosreason;
612#endif
613	u_long mpbioswarmvec;
614	struct pcpu *pc;
615	char *stack;
616	uintptr_t kptbase;
617	int i, pg, apic_id, cpu;
618
619	POSTCODE(START_ALL_APS_POST);
620
621	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
622
623	/* install the AP 1st level boot code */
624	install_ap_tramp();
625
626	/* save the current value of the warm-start vector */
627	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
628#ifndef PC98
629	outb(CMOS_REG, BIOS_RESET);
630	mpbiosreason = inb(CMOS_DATA);
631#endif
632
633	/* set up temporary P==V mapping for AP boot */
634	/* XXX this is a hack, we should boot the AP on its own stack/PTD */
635	kptbase = (uintptr_t)(void *)KPTphys;
636	for (i = 0; i < NKPT; i++)
637		PTD[i] = (pd_entry_t)(PG_V | PG_RW |
638		    ((kptbase + i * PAGE_SIZE) & PG_FRAME));
639	invltlb();
640
641	/* start each AP */
642	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
643
644		/* Ignore non-existent CPUs and the BSP. */
645		if (!cpu_info[apic_id].cpu_present ||
646		    cpu_info[apic_id].cpu_bsp)
647			continue;
648
649		/* Don't use this CPU if it has been disabled by a tunable. */
650		if (resource_disabled("lapic", apic_id)) {
651			cpu_info[apic_id].cpu_disabled = 1;
652			mp_ncpus--;
653			continue;
654		}
655
656		cpu++;
657
658		/* save APIC ID for this logical ID */
659		cpu_apic_ids[cpu] = apic_id;
660
661		/* first page of AP's private space */
662		pg = cpu * i386_btop(sizeof(struct privatespace));
663
664		/* allocate a new private data page */
665		pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE);
666
667		/* wire it into the private page table page */
668		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc));
669
670		/* allocate and set up an idle stack data page */
671		stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */
672		for (i = 0; i < KSTACK_PAGES; i++)
673			SMPpt[pg + 1 + i] = (pt_entry_t)
674			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
675
676		/* prime data page for it to use */
677		pcpu_init(pc, cpu, sizeof(struct pcpu));
678		pc->pc_apic_id = apic_id;
679
680		/* setup a vector to our boot code */
681		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
682		*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
683#ifndef PC98
684		outb(CMOS_REG, BIOS_RESET);
685		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
686#endif
687
688		bootSTK = &SMP_prvspace[cpu].idlekstack[KSTACK_PAGES *
689		    PAGE_SIZE];
690		bootAP = cpu;
691
692		/* attempt to start the Application Processor */
693		CHECK_INIT(99);	/* setup checkpoints */
694		if (!start_ap(apic_id)) {
695			printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
696			CHECK_PRINT("trace");	/* show checkpoints */
697			/* better panic as the AP may be running loose */
698			printf("panic y/n? [y] ");
699			if (cngetc() != 'n')
700				panic("bye-bye");
701		}
702		CHECK_PRINT("trace");		/* show checkpoints */
703
704		all_cpus |= (1 << cpu);		/* record AP in CPU map */
705	}
706
707	/* build our map of 'other' CPUs */
708	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
709
710	/* restore the warmstart vector */
711	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
712#ifndef PC98
713	outb(CMOS_REG, BIOS_RESET);
714	outb(CMOS_DATA, mpbiosreason);
715#endif
716
717	/*
718	 * Set up the idle context for the BSP.  Similar to above except
719	 * that some was done by locore, some by pmap.c and some is implicit
720	 * because the BSP is cpu#0 and the page is initially zero and also
721	 * because we can refer to variables by name on the BSP..
722	 */
723
724	/* Allocate and setup BSP idle stack */
725	stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
726	for (i = 0; i < KSTACK_PAGES; i++)
727		SMPpt[1 + i] = (pt_entry_t)
728		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
729
730	for (i = 0; i < NKPT; i++)
731		PTD[i] = 0;
732	pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
733
734	/* number of APs actually started */
735	return mp_naps;
736}
737
738/*
739 * load the 1st level AP boot code into base memory.
740 */
741
742/* targets for relocation */
743extern void bigJump(void);
744extern void bootCodeSeg(void);
745extern void bootDataSeg(void);
746extern void MPentry(void);
747extern u_int MP_GDT;
748extern u_int mp_gdtbase;
749
750static void
751install_ap_tramp(void)
752{
753	int     x;
754	int     size = *(int *) ((u_long) & bootMP_size);
755	vm_offset_t va = boot_address + KERNBASE;
756	u_char *src = (u_char *) ((u_long) bootMP);
757	u_char *dst = (u_char *) va;
758	u_int   boot_base = (u_int) bootMP;
759	u_int8_t *dst8;
760	u_int16_t *dst16;
761	u_int32_t *dst32;
762
763	POSTCODE(INSTALL_AP_TRAMP_POST);
764
765	KASSERT (size <= PAGE_SIZE,
766	    ("'size' do not fit into PAGE_SIZE, as expected."));
767	pmap_kenter(va, boot_address);
768	pmap_invalidate_page (kernel_pmap, va);
769	for (x = 0; x < size; ++x)
770		*dst++ = *src++;
771
772	/*
773	 * modify addresses in code we just moved to basemem. unfortunately we
774	 * need fairly detailed info about mpboot.s for this to work.  changes
775	 * to mpboot.s might require changes here.
776	 */
777
778	/* boot code is located in KERNEL space */
779	dst = (u_char *) va;
780
781	/* modify the lgdt arg */
782	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
783	*dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
784
785	/* modify the ljmp target for MPentry() */
786	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
787	*dst32 = ((u_int) MPentry - KERNBASE);
788
789	/* modify the target for boot code segment */
790	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
791	dst8 = (u_int8_t *) (dst16 + 1);
792	*dst16 = (u_int) boot_address & 0xffff;
793	*dst8 = ((u_int) boot_address >> 16) & 0xff;
794
795	/* modify the target for boot data segment */
796	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
797	dst8 = (u_int8_t *) (dst16 + 1);
798	*dst16 = (u_int) boot_address & 0xffff;
799	*dst8 = ((u_int) boot_address >> 16) & 0xff;
800}
801
802/*
803 * This function starts the AP (application processor) identified
804 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
805 * to accomplish this.  This is necessary because of the nuances
806 * of the different hardware we might encounter.  It isn't pretty,
807 * but it seems to work.
808 */
809static int
810start_ap(int apic_id)
811{
812	int vector, ms;
813	int cpus;
814
815	POSTCODE(START_AP_POST);
816
817	/* calculate the vector */
818	vector = (boot_address >> 12) & 0xff;
819
820	/* used as a watchpoint to signal AP startup */
821	cpus = mp_naps;
822
823	/*
824	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
825	 * and running the target CPU. OR this INIT IPI might be latched (P5
826	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
827	 * ignored.
828	 */
829
830	/* do an INIT IPI: assert RESET */
831	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
832	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
833
834	/* wait for pending status end */
835	lapic_ipi_wait(-1);
836
837	/* do an INIT IPI: deassert RESET */
838	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
839	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
840
841	/* wait for pending status end */
842	DELAY(10000);		/* wait ~10mS */
843	lapic_ipi_wait(-1);
844
845	/*
846	 * next we do a STARTUP IPI: the previous INIT IPI might still be
847	 * latched, (P5 bug) this 1st STARTUP would then terminate
848	 * immediately, and the previously started INIT IPI would continue. OR
849	 * the previous INIT IPI has already run. and this STARTUP IPI will
850	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
851	 * will run.
852	 */
853
854	/* do a STARTUP IPI */
855	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
856	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
857	    vector, apic_id);
858	lapic_ipi_wait(-1);
859	DELAY(200);		/* wait ~200uS */
860
861	/*
862	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
863	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
864	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
865	 * recognized after hardware RESET or INIT IPI.
866	 */
867
868	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
869	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
870	    vector, apic_id);
871	lapic_ipi_wait(-1);
872	DELAY(200);		/* wait ~200uS */
873
874	/* Wait up to 5 seconds for it to start. */
875	for (ms = 0; ms < 5000; ms++) {
876		if (mp_naps > cpus)
877			return 1;	/* return SUCCESS */
878		DELAY(1000);
879	}
880	return 0;		/* return FAILURE */
881}
882
883#ifdef COUNT_XINVLTLB_HITS
884u_int xhits_gbl[MAXCPU];
885u_int xhits_pg[MAXCPU];
886u_int xhits_rng[MAXCPU];
887SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
888SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
889    sizeof(xhits_gbl), "IU", "");
890SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
891    sizeof(xhits_pg), "IU", "");
892SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
893    sizeof(xhits_rng), "IU", "");
894
895u_int ipi_global;
896u_int ipi_page;
897u_int ipi_range;
898u_int ipi_range_size;
899SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
900SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
901SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
902SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
903    0, "");
904
905u_int ipi_masked_global;
906u_int ipi_masked_page;
907u_int ipi_masked_range;
908u_int ipi_masked_range_size;
909SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
910    &ipi_masked_global, 0, "");
911SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
912    &ipi_masked_page, 0, "");
913SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
914    &ipi_masked_range, 0, "");
915SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
916    &ipi_masked_range_size, 0, "");
917#endif /* COUNT_XINVLTLB_HITS */
918
919/*
920 * Flush the TLB on all other CPU's
921 */
922static void
923smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
924{
925	u_int ncpu;
926
927	ncpu = mp_ncpus - 1;	/* does not shootdown self */
928	if (ncpu < 1)
929		return;		/* no other cpus */
930	mtx_assert(&smp_ipi_mtx, MA_OWNED);
931	smp_tlb_addr1 = addr1;
932	smp_tlb_addr2 = addr2;
933	atomic_store_rel_int(&smp_tlb_wait, 0);
934	ipi_all_but_self(vector);
935	while (smp_tlb_wait < ncpu)
936		ia32_pause();
937}
938
939/*
940 * This is about as magic as it gets.  fortune(1) has got similar code
941 * for reversing bits in a word.  Who thinks up this stuff??
942 *
943 * Yes, it does appear to be consistently faster than:
944 * while (i = ffs(m)) {
945 *	m >>= i;
946 *	bits++;
947 * }
948 * and
949 * while (lsb = (m & -m)) {	// This is magic too
950 * 	m &= ~lsb;		// or: m ^= lsb
951 *	bits++;
952 * }
953 * Both of these latter forms do some very strange things on gcc-3.1 with
954 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
955 * There is probably an SSE or MMX popcnt instruction.
956 *
957 * I wonder if this should be in libkern?
958 *
959 * XXX Stop the presses!  Another one:
960 * static __inline u_int32_t
961 * popcnt1(u_int32_t v)
962 * {
963 *	v -= ((v >> 1) & 0x55555555);
964 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
965 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
966 *	return (v * 0x01010101) >> 24;
967 * }
968 * The downside is that it has a multiply.  With a pentium3 with
969 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
970 * an imull, and in that case it is faster.  In most other cases
971 * it appears slightly slower.
972 *
973 * Another variant (also from fortune):
974 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
975 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
976 *                          - (((x)>>2)&0x33333333)            \
977 *                          - (((x)>>3)&0x11111111))
978 */
979static __inline u_int32_t
980popcnt(u_int32_t m)
981{
982
983	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
984	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
985	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
986	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
987	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
988	return m;
989}
990
991static void
992smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
993{
994	int ncpu, othercpus;
995
996	othercpus = mp_ncpus - 1;
997	if (mask == (u_int)-1) {
998		ncpu = othercpus;
999		if (ncpu < 1)
1000			return;
1001	} else {
1002		mask &= ~PCPU_GET(cpumask);
1003		if (mask == 0)
1004			return;
1005		ncpu = popcnt(mask);
1006		if (ncpu > othercpus) {
1007			/* XXX this should be a panic offence */
1008			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1009			    ncpu, othercpus);
1010			ncpu = othercpus;
1011		}
1012		/* XXX should be a panic, implied by mask == 0 above */
1013		if (ncpu < 1)
1014			return;
1015	}
1016	mtx_assert(&smp_ipi_mtx, MA_OWNED);
1017	smp_tlb_addr1 = addr1;
1018	smp_tlb_addr2 = addr2;
1019	atomic_store_rel_int(&smp_tlb_wait, 0);
1020	if (mask == (u_int)-1)
1021		ipi_all_but_self(vector);
1022	else
1023		ipi_selected(mask, vector);
1024	while (smp_tlb_wait < ncpu)
1025		ia32_pause();
1026}
1027
1028void
1029smp_invltlb(void)
1030{
1031	if (smp_started) {
1032		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1033#ifdef COUNT_XINVLTLB_HITS
1034		ipi_global++;
1035#endif
1036	}
1037}
1038
1039void
1040smp_invlpg(vm_offset_t addr)
1041{
1042	if (smp_started) {
1043		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1044#ifdef COUNT_XINVLTLB_HITS
1045		ipi_page++;
1046#endif
1047	}
1048}
1049
1050void
1051smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1052{
1053	if (smp_started) {
1054		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1055#ifdef COUNT_XINVLTLB_HITS
1056		ipi_range++;
1057		ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1058#endif
1059	}
1060}
1061
1062void
1063smp_masked_invltlb(u_int mask)
1064{
1065	if (smp_started) {
1066		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1067#ifdef COUNT_XINVLTLB_HITS
1068		ipi_masked_global++;
1069#endif
1070	}
1071}
1072
1073void
1074smp_masked_invlpg(u_int mask, vm_offset_t addr)
1075{
1076	if (smp_started) {
1077		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1078#ifdef COUNT_XINVLTLB_HITS
1079		ipi_masked_page++;
1080#endif
1081	}
1082}
1083
1084void
1085smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
1086{
1087	if (smp_started) {
1088		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1089#ifdef COUNT_XINVLTLB_HITS
1090		ipi_masked_range++;
1091		ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1092#endif
1093	}
1094}
1095
1096
1097/*
1098 * For statclock, we send an IPI to all CPU's to have them call this
1099 * function.
1100 */
1101void
1102forwarded_statclock(struct clockframe frame)
1103{
1104	struct thread *td;
1105
1106	CTR0(KTR_SMP, "forwarded_statclock");
1107	td = curthread;
1108	td->td_intr_nesting_level++;
1109	if (profprocs != 0)
1110		profclock(&frame);
1111	if (pscnt == psdiv)
1112		statclock(&frame);
1113	td->td_intr_nesting_level--;
1114}
1115
1116void
1117forward_statclock(void)
1118{
1119	int map;
1120
1121	CTR0(KTR_SMP, "forward_statclock");
1122
1123	if (!smp_started || cold || panicstr)
1124		return;
1125
1126	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
1127	if (map != 0)
1128		ipi_selected(map, IPI_STATCLOCK);
1129}
1130
1131/*
1132 * For each hardclock(), we send an IPI to all other CPU's to have them
1133 * execute this function.  It would be nice to reduce contention on
1134 * sched_lock if we could simply peek at the CPU to determine the user/kernel
1135 * state and call hardclock_process() on the CPU receiving the clock interrupt
1136 * and then just use a simple IPI to handle any ast's if needed.
1137 */
1138void
1139forwarded_hardclock(struct clockframe frame)
1140{
1141	struct thread *td;
1142
1143	CTR0(KTR_SMP, "forwarded_hardclock");
1144	td = curthread;
1145	td->td_intr_nesting_level++;
1146	hardclock_process(&frame);
1147	td->td_intr_nesting_level--;
1148}
1149
1150void
1151forward_hardclock(void)
1152{
1153	u_int map;
1154
1155	CTR0(KTR_SMP, "forward_hardclock");
1156
1157	if (!smp_started || cold || panicstr)
1158		return;
1159
1160	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
1161	if (map != 0)
1162		ipi_selected(map, IPI_HARDCLOCK);
1163}
1164
1165/*
1166 * send an IPI to a set of cpus.
1167 */
1168void
1169ipi_selected(u_int32_t cpus, u_int ipi)
1170{
1171	int cpu;
1172
1173	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1174	while ((cpu = ffs(cpus)) != 0) {
1175		cpu--;
1176		KASSERT(cpu_apic_ids[cpu] != -1,
1177		    ("IPI to non-existent CPU %d", cpu));
1178		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1179		cpus &= ~(1 << cpu);
1180	}
1181}
1182
1183/*
1184 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1185 */
1186void
1187ipi_all(u_int ipi)
1188{
1189
1190	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1191	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1192}
1193
1194/*
1195 * send an IPI to all CPUs EXCEPT myself
1196 */
1197void
1198ipi_all_but_self(u_int ipi)
1199{
1200
1201	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1202	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1203}
1204
1205/*
1206 * send an IPI to myself
1207 */
1208void
1209ipi_self(u_int ipi)
1210{
1211
1212	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1213	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1214}
1215
1216/*
1217 * This is called once the rest of the system is up and running and we're
1218 * ready to let the AP's out of the pen.
1219 */
1220static void
1221release_aps(void *dummy __unused)
1222{
1223
1224	if (mp_ncpus == 1)
1225		return;
1226	mtx_lock_spin(&sched_lock);
1227	atomic_store_rel_int(&aps_ready, 1);
1228	while (smp_started == 0)
1229		ia32_pause();
1230	mtx_unlock_spin(&sched_lock);
1231}
1232SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1233
1234static int
1235sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1236{
1237	u_int mask;
1238	int error;
1239
1240	mask = hlt_cpus_mask;
1241	error = sysctl_handle_int(oidp, &mask, 0, req);
1242	if (error || !req->newptr)
1243		return (error);
1244
1245	if (logical_cpus_mask != 0 &&
1246	    (mask & logical_cpus_mask) == logical_cpus_mask)
1247		hlt_logical_cpus = 1;
1248	else
1249		hlt_logical_cpus = 0;
1250
1251	if ((mask & all_cpus) == all_cpus)
1252		mask &= ~(1<<0);
1253	hlt_cpus_mask = mask;
1254	return (error);
1255}
1256SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1257    0, 0, sysctl_hlt_cpus, "IU",
1258    "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
1259
1260static int
1261sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1262{
1263	int disable, error;
1264
1265	disable = hlt_logical_cpus;
1266	error = sysctl_handle_int(oidp, &disable, 0, req);
1267	if (error || !req->newptr)
1268		return (error);
1269
1270	if (disable)
1271		hlt_cpus_mask |= logical_cpus_mask;
1272	else
1273		hlt_cpus_mask &= ~logical_cpus_mask;
1274
1275	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1276		hlt_cpus_mask &= ~(1<<0);
1277
1278	hlt_logical_cpus = disable;
1279	return (error);
1280}
1281
1282static void
1283cpu_hlt_setup(void *dummy __unused)
1284{
1285
1286	if (logical_cpus_mask != 0) {
1287		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1288		    &hlt_logical_cpus);
1289		sysctl_ctx_init(&logical_cpu_clist);
1290		SYSCTL_ADD_PROC(&logical_cpu_clist,
1291		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1292		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1293		    sysctl_hlt_logical_cpus, "IU", "");
1294		SYSCTL_ADD_UINT(&logical_cpu_clist,
1295		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1296		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1297		    &logical_cpus_mask, 0, "");
1298
1299		if (hlt_logical_cpus)
1300			hlt_cpus_mask |= logical_cpus_mask;
1301	}
1302}
1303SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1304
1305int
1306mp_grab_cpu_hlt(void)
1307{
1308	u_int mask = PCPU_GET(cpumask);
1309#ifdef MP_WATCHDOG
1310	u_int cpuid = PCPU_GET(cpuid);
1311#endif
1312	int retval;
1313
1314#ifdef MP_WATCHDOG
1315	ap_watchdog(cpuid);
1316#endif
1317
1318	retval = mask & hlt_cpus_mask;
1319	while (mask & hlt_cpus_mask)
1320		__asm __volatile("sti; hlt" : : : "memory");
1321	return (retval);
1322}
1323