mp_machdep.c revision 215753
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 215753 2010-11-23 16:12:35Z jkim $");
29
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32#include "opt_mp_watchdog.h"
33#include "opt_sched.h"
34#include "opt_smp.h"
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/bus.h>
39#ifdef GPROF
40#include <sys/gmon.h>
41#endif
42#include <sys/kernel.h>
43#include <sys/ktr.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/memrange.h>
47#include <sys/mutex.h>
48#include <sys/pcpu.h>
49#include <sys/proc.h>
50#include <sys/sched.h>
51#include <sys/smp.h>
52#include <sys/sysctl.h>
53
54#include <vm/vm.h>
55#include <vm/vm_param.h>
56#include <vm/pmap.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_extern.h>
59
60#include <x86/apicreg.h>
61#include <machine/clock.h>
62#include <machine/cputypes.h>
63#include <machine/cpufunc.h>
64#include <x86/mca.h>
65#include <machine/md_var.h>
66#include <machine/mp_watchdog.h>
67#include <machine/pcb.h>
68#include <machine/psl.h>
69#include <machine/smp.h>
70#include <machine/specialreg.h>
71#include <machine/tss.h>
72
73#define WARMBOOT_TARGET		0
74#define WARMBOOT_OFF		(KERNBASE + 0x0467)
75#define WARMBOOT_SEG		(KERNBASE + 0x0469)
76
77#define CMOS_REG		(0x70)
78#define CMOS_DATA		(0x71)
79#define BIOS_RESET		(0x0f)
80#define BIOS_WARM		(0x0a)
81
82/* lock region used by kernel profiling */
83int	mcount_lock;
84
85int	mp_naps;		/* # of Applications processors */
86int	boot_cpu_id = -1;	/* designated BSP */
87
88extern  struct pcpu __pcpu[];
89
90/* AP uses this during bootstrap.  Do not staticize.  */
91char *bootSTK;
92static int bootAP;
93
94/* Free these after use */
95void *bootstacks[MAXCPU];
96
97/* Temporary variables for init_secondary()  */
98char *doublefault_stack;
99char *nmi_stack;
100void *dpcpu;
101
102struct pcb stoppcbs[MAXCPU];
103struct pcb **susppcbs = NULL;
104
105/* Variables needed for SMP tlb shootdown. */
106vm_offset_t smp_tlb_addr1;
107vm_offset_t smp_tlb_addr2;
108volatile int smp_tlb_wait;
109
110#ifdef COUNT_IPIS
111/* Interrupt counts. */
112static u_long *ipi_preempt_counts[MAXCPU];
113static u_long *ipi_ast_counts[MAXCPU];
114u_long *ipi_invltlb_counts[MAXCPU];
115u_long *ipi_invlrng_counts[MAXCPU];
116u_long *ipi_invlpg_counts[MAXCPU];
117u_long *ipi_invlcache_counts[MAXCPU];
118u_long *ipi_rendezvous_counts[MAXCPU];
119u_long *ipi_lazypmap_counts[MAXCPU];
120static u_long *ipi_hardclock_counts[MAXCPU];
121#endif
122
123extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
124
125/*
126 * Local data and functions.
127 */
128
129static volatile cpumask_t ipi_nmi_pending;
130
131/* used to hold the AP's until we are ready to release them */
132static struct mtx ap_boot_mtx;
133
134/* Set to 1 once we're ready to let the APs out of the pen. */
135static volatile int aps_ready = 0;
136
137/*
138 * Store data from cpu_add() until later in the boot when we actually setup
139 * the APs.
140 */
141struct cpu_info {
142	int	cpu_present:1;
143	int	cpu_bsp:1;
144	int	cpu_disabled:1;
145	int	cpu_hyperthread:1;
146} static cpu_info[MAX_APIC_ID + 1];
147int cpu_apic_ids[MAXCPU];
148int apic_cpuids[MAX_APIC_ID + 1];
149
150/* Holds pending bitmap based IPIs per CPU */
151static volatile u_int cpu_ipi_pending[MAXCPU];
152
153static u_int boot_address;
154static int cpu_logical;			/* logical cpus per core */
155static int cpu_cores;			/* cores per package */
156
157static void	assign_cpu_ids(void);
158static void	set_interrupt_apic_ids(void);
159static int	start_all_aps(void);
160static int	start_ap(int apic_id);
161static void	release_aps(void *dummy);
162
163static int	hlt_logical_cpus;
164static u_int	hyperthreading_cpus;	/* logical cpus sharing L1 cache */
165static cpumask_t	hyperthreading_cpus_mask;
166static int	hyperthreading_allowed = 1;
167static struct	sysctl_ctx_list logical_cpu_clist;
168static u_int	bootMP_size;
169
170static void
171mem_range_AP_init(void)
172{
173	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
174		mem_range_softc.mr_op->initAP(&mem_range_softc);
175}
176
177static void
178topo_probe_amd(void)
179{
180
181	/* AMD processors do not support HTT. */
182	cpu_cores = (amd_feature2 & AMDID2_CMP) != 0 ?
183	    (cpu_procinfo2 & AMDID_CMP_CORES) + 1 : 1;
184	cpu_logical = 1;
185}
186
187/*
188 * Round up to the next power of two, if necessary, and then
189 * take log2.
190 * Returns -1 if argument is zero.
191 */
192static __inline int
193mask_width(u_int x)
194{
195
196	return (fls(x << (1 - powerof2(x))) - 1);
197}
198
199static void
200topo_probe_0x4(void)
201{
202	u_int p[4];
203	int pkg_id_bits;
204	int core_id_bits;
205	int max_cores;
206	int max_logical;
207	int id;
208
209	/* Both zero and one here mean one logical processor per package. */
210	max_logical = (cpu_feature & CPUID_HTT) != 0 ?
211	    (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
212	if (max_logical <= 1)
213		return;
214
215	/*
216	 * Because of uniformity assumption we examine only
217	 * those logical processors that belong to the same
218	 * package as BSP.  Further, we count number of
219	 * logical processors that belong to the same core
220	 * as BSP thus deducing number of threads per core.
221	 */
222	cpuid_count(0x04, 0, p);
223	max_cores = ((p[0] >> 26) & 0x3f) + 1;
224	core_id_bits = mask_width(max_logical/max_cores);
225	if (core_id_bits < 0)
226		return;
227	pkg_id_bits = core_id_bits + mask_width(max_cores);
228
229	for (id = 0; id <= MAX_APIC_ID; id++) {
230		/* Check logical CPU availability. */
231		if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
232			continue;
233		/* Check if logical CPU has the same package ID. */
234		if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits))
235			continue;
236		cpu_cores++;
237		/* Check if logical CPU has the same package and core IDs. */
238		if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits))
239			cpu_logical++;
240	}
241
242	KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
243	    ("topo_probe_0x4 couldn't find BSP"));
244
245	cpu_cores /= cpu_logical;
246	hyperthreading_cpus = cpu_logical;
247}
248
249static void
250topo_probe_0xb(void)
251{
252	u_int p[4];
253	int bits;
254	int cnt;
255	int i;
256	int logical;
257	int type;
258	int x;
259
260	/* We only support three levels for now. */
261	for (i = 0; i < 3; i++) {
262		cpuid_count(0x0b, i, p);
263
264		/* Fall back if CPU leaf 11 doesn't really exist. */
265		if (i == 0 && p[1] == 0) {
266			topo_probe_0x4();
267			return;
268		}
269
270		bits = p[0] & 0x1f;
271		logical = p[1] &= 0xffff;
272		type = (p[2] >> 8) & 0xff;
273		if (type == 0 || logical == 0)
274			break;
275		/*
276		 * Because of uniformity assumption we examine only
277		 * those logical processors that belong to the same
278		 * package as BSP.
279		 */
280		for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
281			if (!cpu_info[x].cpu_present ||
282			    cpu_info[x].cpu_disabled)
283				continue;
284			if (x >> bits == boot_cpu_id >> bits)
285				cnt++;
286		}
287		if (type == CPUID_TYPE_SMT)
288			cpu_logical = cnt;
289		else if (type == CPUID_TYPE_CORE)
290			cpu_cores = cnt;
291	}
292	if (cpu_logical == 0)
293		cpu_logical = 1;
294	cpu_cores /= cpu_logical;
295}
296
297/*
298 * Both topology discovery code and code that consumes topology
299 * information assume top-down uniformity of the topology.
300 * That is, all physical packages must be identical and each
301 * core in a package must have the same number of threads.
302 * Topology information is queried only on BSP, on which this
303 * code runs and for which it can query CPUID information.
304 * Then topology is extrapolated on all packages using the
305 * uniformity assumption.
306 */
307static void
308topo_probe(void)
309{
310	static int cpu_topo_probed = 0;
311
312	if (cpu_topo_probed)
313		return;
314
315	logical_cpus_mask = 0;
316	if (mp_ncpus <= 1)
317		cpu_cores = cpu_logical = 1;
318	else if (cpu_vendor_id == CPU_VENDOR_AMD)
319		topo_probe_amd();
320	else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
321		/*
322		 * See Intel(R) 64 Architecture Processor
323		 * Topology Enumeration article for details.
324		 *
325		 * Note that 0x1 <= cpu_high < 4 case should be
326		 * compatible with topo_probe_0x4() logic when
327		 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
328		 * or it should trigger the fallback otherwise.
329		 */
330		if (cpu_high >= 0xb)
331			topo_probe_0xb();
332		else if (cpu_high >= 0x1)
333			topo_probe_0x4();
334	}
335
336	/*
337	 * Fallback: assume each logical CPU is in separate
338	 * physical package.  That is, no multi-core, no SMT.
339	 */
340	if (cpu_cores == 0 || cpu_logical == 0)
341		cpu_cores = cpu_logical = 1;
342	cpu_topo_probed = 1;
343}
344
345struct cpu_group *
346cpu_topo(void)
347{
348	int cg_flags;
349
350	/*
351	 * Determine whether any threading flags are
352	 * necessry.
353	 */
354	topo_probe();
355	if (cpu_logical > 1 && hyperthreading_cpus)
356		cg_flags = CG_FLAG_HTT;
357	else if (cpu_logical > 1)
358		cg_flags = CG_FLAG_SMT;
359	else
360		cg_flags = 0;
361	if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
362		printf("WARNING: Non-uniform processors.\n");
363		printf("WARNING: Using suboptimal topology.\n");
364		return (smp_topo_none());
365	}
366	/*
367	 * No multi-core or hyper-threaded.
368	 */
369	if (cpu_logical * cpu_cores == 1)
370		return (smp_topo_none());
371	/*
372	 * Only HTT no multi-core.
373	 */
374	if (cpu_logical > 1 && cpu_cores == 1)
375		return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
376	/*
377	 * Only multi-core no HTT.
378	 */
379	if (cpu_cores > 1 && cpu_logical == 1)
380		return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
381	/*
382	 * Both HTT and multi-core.
383	 */
384	return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
385	    CG_SHARE_L1, cpu_logical, cg_flags));
386}
387
388/*
389 * Calculate usable address in base memory for AP trampoline code.
390 */
391u_int
392mp_bootaddress(u_int basemem)
393{
394
395	bootMP_size = mptramp_end - mptramp_start;
396	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
397	if (((basemem * 1024) - boot_address) < bootMP_size)
398		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
399	/* 3 levels of page table pages */
400	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
401
402	return mptramp_pagetables;
403}
404
405void
406cpu_add(u_int apic_id, char boot_cpu)
407{
408
409	if (apic_id > MAX_APIC_ID) {
410		panic("SMP: APIC ID %d too high", apic_id);
411		return;
412	}
413	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
414	    apic_id));
415	cpu_info[apic_id].cpu_present = 1;
416	if (boot_cpu) {
417		KASSERT(boot_cpu_id == -1,
418		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
419		    boot_cpu_id));
420		boot_cpu_id = apic_id;
421		cpu_info[apic_id].cpu_bsp = 1;
422	}
423	if (mp_ncpus < MAXCPU) {
424		mp_ncpus++;
425		mp_maxid = mp_ncpus - 1;
426	}
427	if (bootverbose)
428		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
429		    "AP");
430}
431
432void
433cpu_mp_setmaxid(void)
434{
435
436	/*
437	 * mp_maxid should be already set by calls to cpu_add().
438	 * Just sanity check its value here.
439	 */
440	if (mp_ncpus == 0)
441		KASSERT(mp_maxid == 0,
442		    ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
443	else if (mp_ncpus == 1)
444		mp_maxid = 0;
445	else
446		KASSERT(mp_maxid >= mp_ncpus - 1,
447		    ("%s: counters out of sync: max %d, count %d", __func__,
448			mp_maxid, mp_ncpus));
449}
450
451int
452cpu_mp_probe(void)
453{
454
455	/*
456	 * Always record BSP in CPU map so that the mbuf init code works
457	 * correctly.
458	 */
459	all_cpus = 1;
460	if (mp_ncpus == 0) {
461		/*
462		 * No CPUs were found, so this must be a UP system.  Setup
463		 * the variables to represent a system with a single CPU
464		 * with an id of 0.
465		 */
466		mp_ncpus = 1;
467		return (0);
468	}
469
470	/* At least one CPU was found. */
471	if (mp_ncpus == 1) {
472		/*
473		 * One CPU was found, so this must be a UP system with
474		 * an I/O APIC.
475		 */
476		mp_maxid = 0;
477		return (0);
478	}
479
480	/* At least two CPUs were found. */
481	return (1);
482}
483
484/*
485 * Initialize the IPI handlers and start up the AP's.
486 */
487void
488cpu_mp_start(void)
489{
490	int i;
491
492	/* Initialize the logical ID to APIC ID table. */
493	for (i = 0; i < MAXCPU; i++) {
494		cpu_apic_ids[i] = -1;
495		cpu_ipi_pending[i] = 0;
496	}
497
498	/* Install an inter-CPU IPI for TLB invalidation */
499	setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
500	setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
501	setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
502
503	/* Install an inter-CPU IPI for cache invalidation. */
504	setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
505
506	/* Install an inter-CPU IPI for all-CPU rendezvous */
507	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
508
509	/* Install generic inter-CPU IPI handler */
510	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
511	       SDT_SYSIGT, SEL_KPL, 0);
512
513	/* Install an inter-CPU IPI for CPU stop/restart */
514	setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
515
516	/* Install an inter-CPU IPI for CPU suspend/resume */
517	setidt(IPI_SUSPEND, IDTVEC(cpususpend), SDT_SYSIGT, SEL_KPL, 0);
518
519	/* Set boot_cpu_id if needed. */
520	if (boot_cpu_id == -1) {
521		boot_cpu_id = PCPU_GET(apic_id);
522		cpu_info[boot_cpu_id].cpu_bsp = 1;
523	} else
524		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
525		    ("BSP's APIC ID doesn't match boot_cpu_id"));
526
527	/* Probe logical/physical core configuration. */
528	topo_probe();
529
530	assign_cpu_ids();
531
532	/* Start each Application Processor */
533	start_all_aps();
534
535	set_interrupt_apic_ids();
536}
537
538
539/*
540 * Print various information about the SMP system hardware and setup.
541 */
542void
543cpu_mp_announce(void)
544{
545	const char *hyperthread;
546	int i;
547
548	printf("FreeBSD/SMP: %d package(s) x %d core(s)",
549	    mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
550	if (hyperthreading_cpus > 1)
551	    printf(" x %d HTT threads", cpu_logical);
552	else if (cpu_logical > 1)
553	    printf(" x %d SMT threads", cpu_logical);
554	printf("\n");
555
556	/* List active CPUs first. */
557	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
558	for (i = 1; i < mp_ncpus; i++) {
559		if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
560			hyperthread = "/HT";
561		else
562			hyperthread = "";
563		printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
564		    cpu_apic_ids[i]);
565	}
566
567	/* List disabled CPUs last. */
568	for (i = 0; i <= MAX_APIC_ID; i++) {
569		if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
570			continue;
571		if (cpu_info[i].cpu_hyperthread)
572			hyperthread = "/HT";
573		else
574			hyperthread = "";
575		printf("  cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
576		    i);
577	}
578}
579
580/*
581 * AP CPU's call this to initialize themselves.
582 */
583void
584init_secondary(void)
585{
586	struct pcpu *pc;
587	struct nmi_pcpu *np;
588	u_int64_t msr, cr0;
589	int cpu, gsel_tss, x;
590	struct region_descriptor ap_gdt;
591
592	/* Set by the startup code for us to use */
593	cpu = bootAP;
594
595	/* Init tss */
596	common_tss[cpu] = common_tss[0];
597	common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
598	common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
599	    IOPAGES * PAGE_SIZE;
600	common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
601
602	/* The NMI stack runs on IST2. */
603	np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
604	common_tss[cpu].tss_ist2 = (long) np;
605
606	/* Prepare private GDT */
607	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
608	for (x = 0; x < NGDT; x++) {
609		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
610		    x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
611			ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
612	}
613	ssdtosyssd(&gdt_segs[GPROC0_SEL],
614	    (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
615	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
616	ap_gdt.rd_base =  (long) &gdt[NGDT * cpu];
617	lgdt(&ap_gdt);			/* does magic intra-segment return */
618
619	/* Get per-cpu data */
620	pc = &__pcpu[cpu];
621
622	/* prime data page for it to use */
623	pcpu_init(pc, cpu, sizeof(struct pcpu));
624	dpcpu_init(dpcpu, cpu);
625	pc->pc_apic_id = cpu_apic_ids[cpu];
626	pc->pc_prvspace = pc;
627	pc->pc_curthread = 0;
628	pc->pc_tssp = &common_tss[cpu];
629	pc->pc_commontssp = &common_tss[cpu];
630	pc->pc_rsp0 = 0;
631	pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
632	    GPROC0_SEL];
633	pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
634	pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
635	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
636	    GUSERLDT_SEL];
637
638	/* Save the per-cpu pointer for use by the NMI handler. */
639	np->np_pcpu = (register_t) pc;
640
641	wrmsr(MSR_FSBASE, 0);		/* User value */
642	wrmsr(MSR_GSBASE, (u_int64_t)pc);
643	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
644
645	lidt(&r_idt);
646
647	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
648	ltr(gsel_tss);
649
650	/*
651	 * Set to a known state:
652	 * Set by mpboot.s: CR0_PG, CR0_PE
653	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
654	 */
655	cr0 = rcr0();
656	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
657	load_cr0(cr0);
658
659	/* Set up the fast syscall stuff */
660	msr = rdmsr(MSR_EFER) | EFER_SCE;
661	wrmsr(MSR_EFER, msr);
662	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
663	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
664	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
665	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
666	wrmsr(MSR_STAR, msr);
667	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
668
669	/* Disable local APIC just to be sure. */
670	lapic_disable();
671
672	/* signal our startup to the BSP. */
673	mp_naps++;
674
675	/* Spin until the BSP releases the AP's. */
676	while (!aps_ready)
677		ia32_pause();
678
679	/* Initialize the PAT MSR. */
680	pmap_init_pat();
681
682	/* set up CPU registers and state */
683	cpu_setregs();
684
685	/* set up SSE/NX registers */
686	initializecpu();
687
688	/* set up FPU state on the AP */
689	fpuinit();
690
691	/* A quick check from sanity claus */
692	if (PCPU_GET(apic_id) != lapic_id()) {
693		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
694		printf("SMP: actual apic_id = %d\n", lapic_id());
695		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
696		panic("cpuid mismatch! boom!!");
697	}
698
699	/* Initialize curthread. */
700	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
701	PCPU_SET(curthread, PCPU_GET(idlethread));
702
703	mca_init();
704
705	mtx_lock_spin(&ap_boot_mtx);
706
707	/* Init local apic for irq's */
708	lapic_setup(1);
709
710	/* Set memory range attributes for this CPU to match the BSP */
711	mem_range_AP_init();
712
713	smp_cpus++;
714
715	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
716	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
717
718	/* Determine if we are a logical CPU. */
719	/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
720	if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
721		logical_cpus_mask |= PCPU_GET(cpumask);
722
723	/* Determine if we are a hyperthread. */
724	if (hyperthreading_cpus > 1 &&
725	    PCPU_GET(apic_id) % hyperthreading_cpus != 0)
726		hyperthreading_cpus_mask |= PCPU_GET(cpumask);
727
728	/* Build our map of 'other' CPUs. */
729	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
730
731	if (bootverbose)
732		lapic_dump("AP");
733
734	if (smp_cpus == mp_ncpus) {
735		/* enable IPI's, tlb shootdown, freezes etc */
736		atomic_store_rel_int(&smp_started, 1);
737		smp_active = 1;	 /* historic */
738	}
739
740	/*
741	 * Enable global pages TLB extension
742	 * This also implicitly flushes the TLB
743	 */
744
745	load_cr4(rcr4() | CR4_PGE);
746	load_ds(_udatasel);
747	load_es(_udatasel);
748	load_fs(_ufssel);
749	mtx_unlock_spin(&ap_boot_mtx);
750
751	/* Wait until all the AP's are up. */
752	while (smp_started == 0)
753		ia32_pause();
754
755	/* Start per-CPU event timers. */
756	cpu_initclocks_ap();
757
758	sched_throw(NULL);
759
760	panic("scheduler returned us to %s", __func__);
761	/* NOTREACHED */
762}
763
764/*******************************************************************
765 * local functions and data
766 */
767
768/*
769 * We tell the I/O APIC code about all the CPUs we want to receive
770 * interrupts.  If we don't want certain CPUs to receive IRQs we
771 * can simply not tell the I/O APIC code about them in this function.
772 * We also do not tell it about the BSP since it tells itself about
773 * the BSP internally to work with UP kernels and on UP machines.
774 */
775static void
776set_interrupt_apic_ids(void)
777{
778	u_int i, apic_id;
779
780	for (i = 0; i < MAXCPU; i++) {
781		apic_id = cpu_apic_ids[i];
782		if (apic_id == -1)
783			continue;
784		if (cpu_info[apic_id].cpu_bsp)
785			continue;
786		if (cpu_info[apic_id].cpu_disabled)
787			continue;
788
789		/* Don't let hyperthreads service interrupts. */
790		if (hyperthreading_cpus > 1 &&
791		    apic_id % hyperthreading_cpus != 0)
792			continue;
793
794		intr_add_cpu(i);
795	}
796}
797
798/*
799 * Assign logical CPU IDs to local APICs.
800 */
801static void
802assign_cpu_ids(void)
803{
804	u_int i;
805
806	TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
807	    &hyperthreading_allowed);
808
809	/* Check for explicitly disabled CPUs. */
810	for (i = 0; i <= MAX_APIC_ID; i++) {
811		if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
812			continue;
813
814		if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
815			cpu_info[i].cpu_hyperthread = 1;
816#if defined(SCHED_ULE)
817			/*
818			 * Don't use HT CPU if it has been disabled by a
819			 * tunable.
820			 */
821			if (hyperthreading_allowed == 0) {
822				cpu_info[i].cpu_disabled = 1;
823				continue;
824			}
825#endif
826		}
827
828		/* Don't use this CPU if it has been disabled by a tunable. */
829		if (resource_disabled("lapic", i)) {
830			cpu_info[i].cpu_disabled = 1;
831			continue;
832		}
833	}
834
835	/*
836	 * Assign CPU IDs to local APIC IDs and disable any CPUs
837	 * beyond MAXCPU.  CPU 0 is always assigned to the BSP.
838	 *
839	 * To minimize confusion for userland, we attempt to number
840	 * CPUs such that all threads and cores in a package are
841	 * grouped together.  For now we assume that the BSP is always
842	 * the first thread in a package and just start adding APs
843	 * starting with the BSP's APIC ID.
844	 */
845	mp_ncpus = 1;
846	cpu_apic_ids[0] = boot_cpu_id;
847	apic_cpuids[boot_cpu_id] = 0;
848	for (i = boot_cpu_id + 1; i != boot_cpu_id;
849	     i == MAX_APIC_ID ? i = 0 : i++) {
850		if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
851		    cpu_info[i].cpu_disabled)
852			continue;
853
854		if (mp_ncpus < MAXCPU) {
855			cpu_apic_ids[mp_ncpus] = i;
856			apic_cpuids[i] = mp_ncpus;
857			mp_ncpus++;
858		} else
859			cpu_info[i].cpu_disabled = 1;
860	}
861	KASSERT(mp_maxid >= mp_ncpus - 1,
862	    ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
863	    mp_ncpus));
864}
865
866/*
867 * start each AP in our list
868 */
869static int
870start_all_aps(void)
871{
872	vm_offset_t va = boot_address + KERNBASE;
873	u_int64_t *pt4, *pt3, *pt2;
874	u_int32_t mpbioswarmvec;
875	int apic_id, cpu, i;
876	u_char mpbiosreason;
877
878	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
879
880	/* install the AP 1st level boot code */
881	pmap_kenter(va, boot_address);
882	pmap_invalidate_page(kernel_pmap, va);
883	bcopy(mptramp_start, (void *)va, bootMP_size);
884
885	/* Locate the page tables, they'll be below the trampoline */
886	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
887	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
888	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
889
890	/* Create the initial 1GB replicated page tables */
891	for (i = 0; i < 512; i++) {
892		/* Each slot of the level 4 pages points to the same level 3 page */
893		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
894		pt4[i] |= PG_V | PG_RW | PG_U;
895
896		/* Each slot of the level 3 pages points to the same level 2 page */
897		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
898		pt3[i] |= PG_V | PG_RW | PG_U;
899
900		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
901		pt2[i] = i * (2 * 1024 * 1024);
902		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
903	}
904
905	/* save the current value of the warm-start vector */
906	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
907	outb(CMOS_REG, BIOS_RESET);
908	mpbiosreason = inb(CMOS_DATA);
909
910	/* setup a vector to our boot code */
911	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
912	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
913	outb(CMOS_REG, BIOS_RESET);
914	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
915
916	/* start each AP */
917	for (cpu = 1; cpu < mp_ncpus; cpu++) {
918		apic_id = cpu_apic_ids[cpu];
919
920		/* allocate and set up an idle stack data page */
921		bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
922		doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
923		nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
924		dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
925
926		bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
927		bootAP = cpu;
928
929		/* attempt to start the Application Processor */
930		if (!start_ap(apic_id)) {
931			/* restore the warmstart vector */
932			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
933			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
934		}
935
936		all_cpus |= (1 << cpu);		/* record AP in CPU map */
937	}
938
939	/* build our map of 'other' CPUs */
940	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
941
942	/* restore the warmstart vector */
943	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
944
945	outb(CMOS_REG, BIOS_RESET);
946	outb(CMOS_DATA, mpbiosreason);
947
948	/* number of APs actually started */
949	return mp_naps;
950}
951
952
953/*
954 * This function starts the AP (application processor) identified
955 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
956 * to accomplish this.  This is necessary because of the nuances
957 * of the different hardware we might encounter.  It isn't pretty,
958 * but it seems to work.
959 */
960static int
961start_ap(int apic_id)
962{
963	int vector, ms;
964	int cpus;
965
966	/* calculate the vector */
967	vector = (boot_address >> 12) & 0xff;
968
969	/* used as a watchpoint to signal AP startup */
970	cpus = mp_naps;
971
972	/*
973	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
974	 * and running the target CPU. OR this INIT IPI might be latched (P5
975	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
976	 * ignored.
977	 */
978
979	/* do an INIT IPI: assert RESET */
980	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
981	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
982
983	/* wait for pending status end */
984	lapic_ipi_wait(-1);
985
986	/* do an INIT IPI: deassert RESET */
987	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
988	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
989
990	/* wait for pending status end */
991	DELAY(10000);		/* wait ~10mS */
992	lapic_ipi_wait(-1);
993
994	/*
995	 * next we do a STARTUP IPI: the previous INIT IPI might still be
996	 * latched, (P5 bug) this 1st STARTUP would then terminate
997	 * immediately, and the previously started INIT IPI would continue. OR
998	 * the previous INIT IPI has already run. and this STARTUP IPI will
999	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1000	 * will run.
1001	 */
1002
1003	/* do a STARTUP IPI */
1004	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1005	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1006	    vector, apic_id);
1007	lapic_ipi_wait(-1);
1008	DELAY(200);		/* wait ~200uS */
1009
1010	/*
1011	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1012	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1013	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1014	 * recognized after hardware RESET or INIT IPI.
1015	 */
1016
1017	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1018	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1019	    vector, apic_id);
1020	lapic_ipi_wait(-1);
1021	DELAY(200);		/* wait ~200uS */
1022
1023	/* Wait up to 5 seconds for it to start. */
1024	for (ms = 0; ms < 5000; ms++) {
1025		if (mp_naps > cpus)
1026			return 1;	/* return SUCCESS */
1027		DELAY(1000);
1028	}
1029	return 0;		/* return FAILURE */
1030}
1031
1032#ifdef COUNT_XINVLTLB_HITS
1033u_int xhits_gbl[MAXCPU];
1034u_int xhits_pg[MAXCPU];
1035u_int xhits_rng[MAXCPU];
1036SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
1037SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
1038    sizeof(xhits_gbl), "IU", "");
1039SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
1040    sizeof(xhits_pg), "IU", "");
1041SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
1042    sizeof(xhits_rng), "IU", "");
1043
1044u_int ipi_global;
1045u_int ipi_page;
1046u_int ipi_range;
1047u_int ipi_range_size;
1048SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
1049SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
1050SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1051SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
1052    0, "");
1053
1054u_int ipi_masked_global;
1055u_int ipi_masked_page;
1056u_int ipi_masked_range;
1057u_int ipi_masked_range_size;
1058SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
1059    &ipi_masked_global, 0, "");
1060SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
1061    &ipi_masked_page, 0, "");
1062SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
1063    &ipi_masked_range, 0, "");
1064SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
1065    &ipi_masked_range_size, 0, "");
1066#endif /* COUNT_XINVLTLB_HITS */
1067
1068/*
1069 * Flush the TLB on all other CPU's
1070 */
1071static void
1072smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1073{
1074	u_int ncpu;
1075
1076	ncpu = mp_ncpus - 1;	/* does not shootdown self */
1077	if (ncpu < 1)
1078		return;		/* no other cpus */
1079	if (!(read_rflags() & PSL_I))
1080		panic("%s: interrupts disabled", __func__);
1081	mtx_lock_spin(&smp_ipi_mtx);
1082	smp_tlb_addr1 = addr1;
1083	smp_tlb_addr2 = addr2;
1084	atomic_store_rel_int(&smp_tlb_wait, 0);
1085	ipi_all_but_self(vector);
1086	while (smp_tlb_wait < ncpu)
1087		ia32_pause();
1088	mtx_unlock_spin(&smp_ipi_mtx);
1089}
1090
1091static void
1092smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1093{
1094	int ncpu, othercpus;
1095
1096	othercpus = mp_ncpus - 1;
1097	if (mask == (cpumask_t)-1) {
1098		ncpu = othercpus;
1099		if (ncpu < 1)
1100			return;
1101	} else {
1102		mask &= ~PCPU_GET(cpumask);
1103		if (mask == 0)
1104			return;
1105		ncpu = bitcount32(mask);
1106		if (ncpu > othercpus) {
1107			/* XXX this should be a panic offence */
1108			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1109			    ncpu, othercpus);
1110			ncpu = othercpus;
1111		}
1112		/* XXX should be a panic, implied by mask == 0 above */
1113		if (ncpu < 1)
1114			return;
1115	}
1116	if (!(read_rflags() & PSL_I))
1117		panic("%s: interrupts disabled", __func__);
1118	mtx_lock_spin(&smp_ipi_mtx);
1119	smp_tlb_addr1 = addr1;
1120	smp_tlb_addr2 = addr2;
1121	atomic_store_rel_int(&smp_tlb_wait, 0);
1122	if (mask == (cpumask_t)-1)
1123		ipi_all_but_self(vector);
1124	else
1125		ipi_selected(mask, vector);
1126	while (smp_tlb_wait < ncpu)
1127		ia32_pause();
1128	mtx_unlock_spin(&smp_ipi_mtx);
1129}
1130
1131/*
1132 * Send an IPI to specified CPU handling the bitmap logic.
1133 */
1134static void
1135ipi_send_cpu(int cpu, u_int ipi)
1136{
1137	u_int bitmap, old_pending, new_pending;
1138
1139	KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1140
1141	if (IPI_IS_BITMAPED(ipi)) {
1142		bitmap = 1 << ipi;
1143		ipi = IPI_BITMAP_VECTOR;
1144		do {
1145			old_pending = cpu_ipi_pending[cpu];
1146			new_pending = old_pending | bitmap;
1147		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1148		    old_pending, new_pending));
1149		if (old_pending)
1150			return;
1151	}
1152	lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1153}
1154
1155void
1156smp_cache_flush(void)
1157{
1158
1159	if (smp_started)
1160		smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1161}
1162
1163void
1164smp_invltlb(void)
1165{
1166
1167	if (smp_started) {
1168		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1169#ifdef COUNT_XINVLTLB_HITS
1170		ipi_global++;
1171#endif
1172	}
1173}
1174
1175void
1176smp_invlpg(vm_offset_t addr)
1177{
1178
1179	if (smp_started) {
1180		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1181#ifdef COUNT_XINVLTLB_HITS
1182		ipi_page++;
1183#endif
1184	}
1185}
1186
1187void
1188smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1189{
1190
1191	if (smp_started) {
1192		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1193#ifdef COUNT_XINVLTLB_HITS
1194		ipi_range++;
1195		ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1196#endif
1197	}
1198}
1199
1200void
1201smp_masked_invltlb(cpumask_t mask)
1202{
1203
1204	if (smp_started) {
1205		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1206#ifdef COUNT_XINVLTLB_HITS
1207		ipi_masked_global++;
1208#endif
1209	}
1210}
1211
1212void
1213smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
1214{
1215
1216	if (smp_started) {
1217		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1218#ifdef COUNT_XINVLTLB_HITS
1219		ipi_masked_page++;
1220#endif
1221	}
1222}
1223
1224void
1225smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
1226{
1227
1228	if (smp_started) {
1229		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1230#ifdef COUNT_XINVLTLB_HITS
1231		ipi_masked_range++;
1232		ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1233#endif
1234	}
1235}
1236
1237void
1238ipi_bitmap_handler(struct trapframe frame)
1239{
1240	struct trapframe *oldframe;
1241	struct thread *td;
1242	int cpu = PCPU_GET(cpuid);
1243	u_int ipi_bitmap;
1244
1245	critical_enter();
1246	td = curthread;
1247	td->td_intr_nesting_level++;
1248	oldframe = td->td_intr_frame;
1249	td->td_intr_frame = &frame;
1250	ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1251	if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1252#ifdef COUNT_IPIS
1253		(*ipi_preempt_counts[cpu])++;
1254#endif
1255		sched_preempt(td);
1256	}
1257	if (ipi_bitmap & (1 << IPI_AST)) {
1258#ifdef COUNT_IPIS
1259		(*ipi_ast_counts[cpu])++;
1260#endif
1261		/* Nothing to do for AST */
1262	}
1263	if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1264#ifdef COUNT_IPIS
1265		(*ipi_hardclock_counts[cpu])++;
1266#endif
1267		hardclockintr();
1268	}
1269	td->td_intr_frame = oldframe;
1270	td->td_intr_nesting_level--;
1271	critical_exit();
1272}
1273
1274/*
1275 * send an IPI to a set of cpus.
1276 */
1277void
1278ipi_selected(cpumask_t cpus, u_int ipi)
1279{
1280	int cpu;
1281
1282	/*
1283	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1284	 * of help in order to understand what is the source.
1285	 * Set the mask of receiving CPUs for this purpose.
1286	 */
1287	if (ipi == IPI_STOP_HARD)
1288		atomic_set_int(&ipi_nmi_pending, cpus);
1289
1290	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
1291	while ((cpu = ffs(cpus)) != 0) {
1292		cpu--;
1293		cpus &= ~(1 << cpu);
1294		ipi_send_cpu(cpu, ipi);
1295	}
1296}
1297
1298/*
1299 * send an IPI to a specific CPU.
1300 */
1301void
1302ipi_cpu(int cpu, u_int ipi)
1303{
1304
1305	/*
1306	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1307	 * of help in order to understand what is the source.
1308	 * Set the mask of receiving CPUs for this purpose.
1309	 */
1310	if (ipi == IPI_STOP_HARD)
1311		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
1312
1313	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1314	ipi_send_cpu(cpu, ipi);
1315}
1316
1317/*
1318 * send an IPI to all CPUs EXCEPT myself
1319 */
1320void
1321ipi_all_but_self(u_int ipi)
1322{
1323
1324	if (IPI_IS_BITMAPED(ipi)) {
1325		ipi_selected(PCPU_GET(other_cpus), ipi);
1326		return;
1327	}
1328
1329	/*
1330	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1331	 * of help in order to understand what is the source.
1332	 * Set the mask of receiving CPUs for this purpose.
1333	 */
1334	if (ipi == IPI_STOP_HARD)
1335		atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
1336
1337	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1338	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1339}
1340
1341int
1342ipi_nmi_handler()
1343{
1344	cpumask_t cpumask;
1345
1346	/*
1347	 * As long as there is not a simple way to know about a NMI's
1348	 * source, if the bitmask for the current CPU is present in
1349	 * the global pending bitword an IPI_STOP_HARD has been issued
1350	 * and should be handled.
1351	 */
1352	cpumask = PCPU_GET(cpumask);
1353	if ((ipi_nmi_pending & cpumask) == 0)
1354		return (1);
1355
1356	atomic_clear_int(&ipi_nmi_pending, cpumask);
1357	cpustop_handler();
1358	return (0);
1359}
1360
1361/*
1362 * Handle an IPI_STOP by saving our current context and spinning until we
1363 * are resumed.
1364 */
1365void
1366cpustop_handler(void)
1367{
1368	cpumask_t cpumask;
1369	u_int cpu;
1370
1371	cpu = PCPU_GET(cpuid);
1372	cpumask = PCPU_GET(cpumask);
1373
1374	savectx(&stoppcbs[cpu]);
1375
1376	/* Indicate that we are stopped */
1377	atomic_set_int(&stopped_cpus, cpumask);
1378
1379	/* Wait for restart */
1380	while (!(started_cpus & cpumask))
1381	    ia32_pause();
1382
1383	atomic_clear_int(&started_cpus, cpumask);
1384	atomic_clear_int(&stopped_cpus, cpumask);
1385
1386	if (cpu == 0 && cpustop_restartfunc != NULL) {
1387		cpustop_restartfunc();
1388		cpustop_restartfunc = NULL;
1389	}
1390}
1391
1392/*
1393 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1394 * are resumed.
1395 */
1396void
1397cpususpend_handler(void)
1398{
1399	cpumask_t cpumask;
1400	register_t cr3, rf;
1401	u_int cpu;
1402
1403	cpu = PCPU_GET(cpuid);
1404	cpumask = PCPU_GET(cpumask);
1405
1406	rf = intr_disable();
1407	cr3 = rcr3();
1408
1409	if (savectx(susppcbs[cpu])) {
1410		wbinvd();
1411		atomic_set_int(&stopped_cpus, cpumask);
1412	} else {
1413		pmap_init_pat();
1414		PCPU_SET(switchtime, 0);
1415		PCPU_SET(switchticks, ticks);
1416	}
1417
1418	/* Wait for resume */
1419	while (!(started_cpus & cpumask))
1420		ia32_pause();
1421
1422	atomic_clear_int(&started_cpus, cpumask);
1423	atomic_clear_int(&stopped_cpus, cpumask);
1424
1425	/* Restore CR3 and enable interrupts */
1426	load_cr3(cr3);
1427	mca_resume();
1428	lapic_setup(0);
1429	intr_restore(rf);
1430}
1431
1432/*
1433 * This is called once the rest of the system is up and running and we're
1434 * ready to let the AP's out of the pen.
1435 */
1436static void
1437release_aps(void *dummy __unused)
1438{
1439
1440	if (mp_ncpus == 1)
1441		return;
1442	atomic_store_rel_int(&aps_ready, 1);
1443	while (smp_started == 0)
1444		ia32_pause();
1445}
1446SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1447
1448static int
1449sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1450{
1451	cpumask_t mask;
1452	int error;
1453
1454	mask = hlt_cpus_mask;
1455	error = sysctl_handle_int(oidp, &mask, 0, req);
1456	if (error || !req->newptr)
1457		return (error);
1458
1459	if (logical_cpus_mask != 0 &&
1460	    (mask & logical_cpus_mask) == logical_cpus_mask)
1461		hlt_logical_cpus = 1;
1462	else
1463		hlt_logical_cpus = 0;
1464
1465	if (! hyperthreading_allowed)
1466		mask |= hyperthreading_cpus_mask;
1467
1468	if ((mask & all_cpus) == all_cpus)
1469		mask &= ~(1<<0);
1470	hlt_cpus_mask = mask;
1471	return (error);
1472}
1473SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1474    0, 0, sysctl_hlt_cpus, "IU",
1475    "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
1476
1477static int
1478sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1479{
1480	int disable, error;
1481
1482	disable = hlt_logical_cpus;
1483	error = sysctl_handle_int(oidp, &disable, 0, req);
1484	if (error || !req->newptr)
1485		return (error);
1486
1487	if (disable)
1488		hlt_cpus_mask |= logical_cpus_mask;
1489	else
1490		hlt_cpus_mask &= ~logical_cpus_mask;
1491
1492	if (! hyperthreading_allowed)
1493		hlt_cpus_mask |= hyperthreading_cpus_mask;
1494
1495	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1496		hlt_cpus_mask &= ~(1<<0);
1497
1498	hlt_logical_cpus = disable;
1499	return (error);
1500}
1501
1502static int
1503sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1504{
1505	int allowed, error;
1506
1507	allowed = hyperthreading_allowed;
1508	error = sysctl_handle_int(oidp, &allowed, 0, req);
1509	if (error || !req->newptr)
1510		return (error);
1511
1512#ifdef SCHED_ULE
1513	/*
1514	 * SCHED_ULE doesn't allow enabling/disabling HT cores at
1515	 * run-time.
1516	 */
1517	if (allowed != hyperthreading_allowed)
1518		return (ENOTSUP);
1519	return (error);
1520#endif
1521
1522	if (allowed)
1523		hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1524	else
1525		hlt_cpus_mask |= hyperthreading_cpus_mask;
1526
1527	if (logical_cpus_mask != 0 &&
1528	    (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1529		hlt_logical_cpus = 1;
1530	else
1531		hlt_logical_cpus = 0;
1532
1533	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1534		hlt_cpus_mask &= ~(1<<0);
1535
1536	hyperthreading_allowed = allowed;
1537	return (error);
1538}
1539
1540static void
1541cpu_hlt_setup(void *dummy __unused)
1542{
1543
1544	if (logical_cpus_mask != 0) {
1545		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1546		    &hlt_logical_cpus);
1547		sysctl_ctx_init(&logical_cpu_clist);
1548		SYSCTL_ADD_PROC(&logical_cpu_clist,
1549		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1550		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1551		    sysctl_hlt_logical_cpus, "IU", "");
1552		SYSCTL_ADD_UINT(&logical_cpu_clist,
1553		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1554		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1555		    &logical_cpus_mask, 0, "");
1556
1557		if (hlt_logical_cpus)
1558			hlt_cpus_mask |= logical_cpus_mask;
1559
1560		/*
1561		 * If necessary for security purposes, force
1562		 * hyperthreading off, regardless of the value
1563		 * of hlt_logical_cpus.
1564		 */
1565		if (hyperthreading_cpus_mask) {
1566			SYSCTL_ADD_PROC(&logical_cpu_clist,
1567			    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1568			    "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1569			    0, 0, sysctl_hyperthreading_allowed, "IU", "");
1570			if (! hyperthreading_allowed)
1571				hlt_cpus_mask |= hyperthreading_cpus_mask;
1572		}
1573	}
1574}
1575SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1576
1577int
1578mp_grab_cpu_hlt(void)
1579{
1580	cpumask_t mask;
1581#ifdef MP_WATCHDOG
1582	u_int cpuid;
1583#endif
1584	int retval;
1585
1586	mask = PCPU_GET(cpumask);
1587#ifdef MP_WATCHDOG
1588	cpuid = PCPU_GET(cpuid);
1589	ap_watchdog(cpuid);
1590#endif
1591
1592	retval = 0;
1593	while (mask & hlt_cpus_mask) {
1594		retval = 1;
1595		__asm __volatile("sti; hlt" : : : "memory");
1596	}
1597	return (retval);
1598}
1599
1600#ifdef COUNT_IPIS
1601/*
1602 * Setup interrupt counters for IPI handlers.
1603 */
1604static void
1605mp_ipi_intrcnt(void *dummy)
1606{
1607	char buf[64];
1608	int i;
1609
1610	CPU_FOREACH(i) {
1611		snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1612		intrcnt_add(buf, &ipi_invltlb_counts[i]);
1613		snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1614		intrcnt_add(buf, &ipi_invlrng_counts[i]);
1615		snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1616		intrcnt_add(buf, &ipi_invlpg_counts[i]);
1617		snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1618		intrcnt_add(buf, &ipi_preempt_counts[i]);
1619		snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1620		intrcnt_add(buf, &ipi_ast_counts[i]);
1621		snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1622		intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1623		snprintf(buf, sizeof(buf), "cpu%d:lazypmap", i);
1624		intrcnt_add(buf, &ipi_lazypmap_counts[i]);
1625		snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1626		intrcnt_add(buf, &ipi_hardclock_counts[i]);
1627	}
1628}
1629SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1630#endif
1631
1632