mp_machdep.c revision 144637
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 144637 2005-04-04 21:53:56Z jhb $");
29
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32#include "opt_mp_watchdog.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#ifdef GPROF
38#include <sys/gmon.h>
39#endif
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#include <sys/pcpu.h>
47#include <sys/proc.h>
48#include <sys/smp.h>
49#include <sys/sysctl.h>
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56
57#include <machine/apicreg.h>
58#include <machine/clock.h>
59#include <machine/md_var.h>
60#include <machine/mp_watchdog.h>
61#include <machine/pcb.h>
62#include <machine/psl.h>
63#include <machine/smp.h>
64#include <machine/specialreg.h>
65#include <machine/tss.h>
66
67#define WARMBOOT_TARGET		0
68#define WARMBOOT_OFF		(KERNBASE + 0x0467)
69#define WARMBOOT_SEG		(KERNBASE + 0x0469)
70
71#define CMOS_REG		(0x70)
72#define CMOS_DATA		(0x71)
73#define BIOS_RESET		(0x0f)
74#define BIOS_WARM		(0x0a)
75
76/* lock region used by kernel profiling */
77int	mcount_lock;
78
79int	mp_naps;		/* # of Applications processors */
80int	boot_cpu_id = -1;	/* designated BSP */
81extern	int nkpt;
82
83/*
84 * CPU topology map datastructures for HTT.
85 */
86static struct cpu_group mp_groups[MAXCPU];
87static struct cpu_top mp_top;
88
89/* AP uses this during bootstrap.  Do not staticize.  */
90char *bootSTK;
91static int bootAP;
92
93/* Free these after use */
94void *bootstacks[MAXCPU];
95
96/* Hotwire a 0->4MB V==P mapping */
97extern pt_entry_t *KPTphys;
98
99/* SMP page table page */
100extern pt_entry_t *SMPpt;
101
102struct pcb stoppcbs[MAXCPU];
103
104/* Variables needed for SMP tlb shootdown. */
105vm_offset_t smp_tlb_addr1;
106vm_offset_t smp_tlb_addr2;
107volatile int smp_tlb_wait;
108
109extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
110
111/*
112 * Local data and functions.
113 */
114
115static u_int logical_cpus;
116
117/* used to hold the AP's until we are ready to release them */
118static struct mtx ap_boot_mtx;
119
120/* Set to 1 once we're ready to let the APs out of the pen. */
121static volatile int aps_ready = 0;
122
123/*
124 * Store data from cpu_add() until later in the boot when we actually setup
125 * the APs.
126 */
127struct cpu_info {
128	int	cpu_present:1;
129	int	cpu_bsp:1;
130	int	cpu_disabled:1;
131} static cpu_info[MAXCPU];
132static int cpu_apic_ids[MAXCPU];
133
134/* Holds pending bitmap based IPIs per CPU */
135static volatile u_int cpu_ipi_pending[MAXCPU];
136
137static u_int boot_address;
138
139static void	set_logical_apic_ids(void);
140static int	start_all_aps(void);
141static int	start_ap(int apic_id);
142static void	release_aps(void *dummy);
143
144static int	hlt_logical_cpus;
145static struct	sysctl_ctx_list logical_cpu_clist;
146static u_int	bootMP_size;
147
148static void
149mem_range_AP_init(void)
150{
151	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
152		mem_range_softc.mr_op->initAP(&mem_range_softc);
153}
154
155void
156mp_topology(void)
157{
158	struct cpu_group *group;
159	int logical_cpus;
160	int apic_id;
161	int groups;
162	int cpu;
163
164	/* Build the smp_topology map. */
165	/* Nothing to do if there is no HTT support. */
166	if ((cpu_feature & CPUID_HTT) == 0)
167		return;
168	logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
169	if (logical_cpus <= 1)
170		return;
171	group = &mp_groups[0];
172	groups = 1;
173	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
174		if (!cpu_info[apic_id].cpu_present)
175			continue;
176		/*
177		 * If the current group has members and we're not a logical
178		 * cpu, create a new group.
179		 */
180		if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
181			group++;
182			groups++;
183		}
184		group->cg_count++;
185		group->cg_mask |= 1 << cpu;
186		cpu++;
187	}
188
189	mp_top.ct_count = groups;
190	mp_top.ct_group = mp_groups;
191	smp_topology = &mp_top;
192}
193
194
195/*
196 * Calculate usable address in base memory for AP trampoline code.
197 */
198u_int
199mp_bootaddress(u_int basemem)
200{
201
202	bootMP_size = mptramp_end - mptramp_start;
203	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
204	if (((basemem * 1024) - boot_address) < bootMP_size)
205		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
206	/* 3 levels of page table pages */
207	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
208
209	return mptramp_pagetables;
210}
211
212void
213cpu_add(u_int apic_id, char boot_cpu)
214{
215
216	if (apic_id >= MAXCPU) {
217		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
218		    apic_id, MAXCPU - 1);
219		return;
220	}
221	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
222	    apic_id));
223	cpu_info[apic_id].cpu_present = 1;
224	if (boot_cpu) {
225		KASSERT(boot_cpu_id == -1,
226		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
227		    boot_cpu_id));
228		boot_cpu_id = apic_id;
229		cpu_info[apic_id].cpu_bsp = 1;
230	}
231	mp_ncpus++;
232	if (apic_id > mp_maxid)
233		mp_maxid = apic_id;
234	if (bootverbose)
235		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
236		    "AP");
237
238}
239
240void
241cpu_mp_setmaxid(void)
242{
243
244	/*
245	 * mp_maxid should be already set by calls to cpu_add().
246	 * Just sanity check its value here.
247	 */
248	if (mp_ncpus == 0)
249		KASSERT(mp_maxid == 0,
250		    ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
251	else if (mp_ncpus == 1)
252		mp_maxid = 0;
253	else
254		KASSERT(mp_maxid >= mp_ncpus - 1,
255		    ("%s: counters out of sync: max %d, count %d", __func__,
256			mp_maxid, mp_ncpus));
257
258}
259
260int
261cpu_mp_probe(void)
262{
263
264	/*
265	 * Always record BSP in CPU map so that the mbuf init code works
266	 * correctly.
267	 */
268	all_cpus = 1;
269	if (mp_ncpus == 0) {
270		/*
271		 * No CPUs were found, so this must be a UP system.  Setup
272		 * the variables to represent a system with a single CPU
273		 * with an id of 0.
274		 */
275		mp_ncpus = 1;
276		return (0);
277	}
278
279	/* At least one CPU was found. */
280	if (mp_ncpus == 1) {
281		/*
282		 * One CPU was found, so this must be a UP system with
283		 * an I/O APIC.
284		 */
285		mp_maxid = 0;
286		return (0);
287	}
288
289	/* At least two CPUs were found. */
290	return (1);
291}
292
293/*
294 * Initialize the IPI handlers and start up the AP's.
295 */
296void
297cpu_mp_start(void)
298{
299	int i;
300
301	/* Initialize the logical ID to APIC ID table. */
302	for (i = 0; i < MAXCPU; i++) {
303		cpu_apic_ids[i] = -1;
304		cpu_ipi_pending[i] = 0;
305	}
306
307	/* Install an inter-CPU IPI for TLB invalidation */
308	setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
309	setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
310	setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
311
312	/* Install an inter-CPU IPI for all-CPU rendezvous */
313	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
314
315	/* Install generic inter-CPU IPI handler */
316	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
317	       SDT_SYSIGT, SEL_KPL, 0);
318
319	/* Install an inter-CPU IPI for CPU stop/restart */
320	setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
321
322	/* Set boot_cpu_id if needed. */
323	if (boot_cpu_id == -1) {
324		boot_cpu_id = PCPU_GET(apic_id);
325		cpu_info[boot_cpu_id].cpu_bsp = 1;
326	} else
327		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
328		    ("BSP's APIC ID doesn't match boot_cpu_id"));
329	cpu_apic_ids[0] = boot_cpu_id;
330
331	/* Start each Application Processor */
332	start_all_aps();
333
334	/* Setup the initial logical CPUs info. */
335	logical_cpus = logical_cpus_mask = 0;
336	if (cpu_feature & CPUID_HTT)
337		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
338
339	set_logical_apic_ids();
340}
341
342
343/*
344 * Print various information about the SMP system hardware and setup.
345 */
346void
347cpu_mp_announce(void)
348{
349	int i, x;
350
351	/* List CPUs */
352	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
353	for (i = 1, x = 0; x < MAXCPU; x++) {
354		if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
355			continue;
356		if (cpu_info[x].cpu_disabled)
357			printf("  cpu (AP): APIC ID: %2d (disabled)\n", x);
358		else {
359			KASSERT(i < mp_ncpus,
360			    ("mp_ncpus and actual cpus are out of whack"));
361			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
362		}
363	}
364}
365
366/*
367 * AP CPU's call this to initialize themselves.
368 */
369void
370init_secondary(void)
371{
372	struct pcpu *pc;
373	u_int64_t msr, cr0;
374	int cpu, gsel_tss;
375
376	/* Set by the startup code for us to use */
377	cpu = bootAP;
378
379	/* Init tss */
380	common_tss[cpu] = common_tss[0];
381	common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
382
383	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
384	ssdtosyssd(&gdt_segs[GPROC0_SEL],
385	   (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
386
387	lgdt(&r_gdt);			/* does magic intra-segment return */
388
389	/* Get per-cpu data */
390	pc = &__pcpu[cpu];
391
392	/* prime data page for it to use */
393	pcpu_init(pc, cpu, sizeof(struct pcpu));
394	pc->pc_apic_id = cpu_apic_ids[cpu];
395	pc->pc_prvspace = pc;
396	pc->pc_curthread = 0;
397	pc->pc_tssp = &common_tss[cpu];
398	pc->pc_rsp0 = 0;
399
400	wrmsr(MSR_FSBASE, 0);		/* User value */
401	wrmsr(MSR_GSBASE, (u_int64_t)pc);
402	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
403
404	lidt(&r_idt);
405
406	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
407	ltr(gsel_tss);
408
409	/*
410	 * Set to a known state:
411	 * Set by mpboot.s: CR0_PG, CR0_PE
412	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
413	 */
414	cr0 = rcr0();
415	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
416	load_cr0(cr0);
417
418	/* Set up the fast syscall stuff */
419	msr = rdmsr(MSR_EFER) | EFER_SCE;
420	wrmsr(MSR_EFER, msr);
421	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
422	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
423	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
424	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
425	wrmsr(MSR_STAR, msr);
426	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
427
428	/* Disable local apic just to be sure. */
429	lapic_disable();
430
431	/* signal our startup to the BSP. */
432	mp_naps++;
433
434	/* Spin until the BSP releases the AP's. */
435	while (!aps_ready)
436		ia32_pause();
437
438	/* set up CPU registers and state */
439	cpu_setregs();
440
441	/* set up SSE/NX registers */
442	initializecpu();
443
444	/* set up FPU state on the AP */
445	fpuinit();
446
447	/* A quick check from sanity claus */
448	if (PCPU_GET(apic_id) != lapic_id()) {
449		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
450		printf("SMP: actual apic_id = %d\n", lapic_id());
451		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
452		panic("cpuid mismatch! boom!!");
453	}
454
455	/* Initialize curthread. */
456	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
457	PCPU_SET(curthread, PCPU_GET(idlethread));
458
459	mtx_lock_spin(&ap_boot_mtx);
460
461	/* Init local apic for irq's */
462	lapic_setup();
463
464	/* Set memory range attributes for this CPU to match the BSP */
465	mem_range_AP_init();
466
467	smp_cpus++;
468
469	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
470	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
471
472	/* Determine if we are a logical CPU. */
473	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
474		logical_cpus_mask |= PCPU_GET(cpumask);
475
476	/* Build our map of 'other' CPUs. */
477	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
478
479	if (bootverbose)
480		lapic_dump("AP");
481
482	if (smp_cpus == mp_ncpus) {
483		/* enable IPI's, tlb shootdown, freezes etc */
484		atomic_store_rel_int(&smp_started, 1);
485		smp_active = 1;	 /* historic */
486	}
487
488	mtx_unlock_spin(&ap_boot_mtx);
489
490	/* wait until all the AP's are up */
491	while (smp_started == 0)
492		ia32_pause();
493
494	/* ok, now grab sched_lock and enter the scheduler */
495	mtx_lock_spin(&sched_lock);
496
497	/*
498	 * Correct spinlock nesting.  The idle thread context that we are
499	 * borrowing was created so that it would start out with a single
500	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
501	 * explicitly acquired locks in this function, the nesting count
502	 * is now 2 rather than 1.  Since we are nested, calling
503	 * spinlock_exit() will simply adjust the counts without allowing
504	 * spin lock using code to interrupt us.
505	 */
506	spinlock_exit();
507	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
508
509	binuptime(PCPU_PTR(switchtime));
510	PCPU_SET(switchticks, ticks);
511
512	cpu_throw(NULL, choosethread());	/* doesn't return */
513
514	panic("scheduler returned us to %s", __func__);
515	/* NOTREACHED */
516}
517
518/*******************************************************************
519 * local functions and data
520 */
521
522/*
523 * Set the APIC logical IDs.
524 *
525 * We want to cluster logical CPU's within the same APIC ID cluster.
526 * Since logical CPU's are aligned simply filling in the clusters in
527 * APIC ID order works fine.  Note that this does not try to balance
528 * the number of CPU's in each cluster. (XXX?)
529 */
530static void
531set_logical_apic_ids(void)
532{
533	u_int apic_id, cluster, cluster_id;
534
535	/* Force us to allocate cluster 0 at the start. */
536	cluster = -1;
537	cluster_id = APIC_MAX_INTRACLUSTER_ID;
538	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
539		if (!cpu_info[apic_id].cpu_present)
540			continue;
541		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
542			cluster = ioapic_next_logical_cluster();
543			cluster_id = 0;
544		} else
545			cluster_id++;
546		if (bootverbose)
547			printf("APIC ID: physical %u, logical %u:%u\n",
548			    apic_id, cluster, cluster_id);
549		lapic_set_logical_id(apic_id, cluster, cluster_id);
550	}
551}
552
553/*
554 * start each AP in our list
555 */
556static int
557start_all_aps(void)
558{
559	u_char mpbiosreason;
560	u_int32_t mpbioswarmvec;
561	int apic_id, cpu, i;
562	u_int64_t *pt4, *pt3, *pt2;
563	vm_offset_t va = boot_address + KERNBASE;
564
565	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
566
567	/* install the AP 1st level boot code */
568	pmap_kenter(va, boot_address);
569	pmap_invalidate_page(kernel_pmap, va);
570	bcopy(mptramp_start, (void *)va, bootMP_size);
571
572	/* Locate the page tables, they'll be below the trampoline */
573	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
574	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
575	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
576
577	/* Create the initial 1GB replicated page tables */
578	for (i = 0; i < 512; i++) {
579		/* Each slot of the level 4 pages points to the same level 3 page */
580		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
581		pt4[i] |= PG_V | PG_RW | PG_U;
582
583		/* Each slot of the level 3 pages points to the same level 2 page */
584		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
585		pt3[i] |= PG_V | PG_RW | PG_U;
586
587		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
588		pt2[i] = i * (2 * 1024 * 1024);
589		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
590	}
591
592	/* save the current value of the warm-start vector */
593	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
594	outb(CMOS_REG, BIOS_RESET);
595	mpbiosreason = inb(CMOS_DATA);
596
597	/* setup a vector to our boot code */
598	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
599	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
600	outb(CMOS_REG, BIOS_RESET);
601	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
602
603	/* start each AP */
604	cpu = 0;
605	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
606
607		/* Ignore non-existent CPUs and the BSP. */
608		if (!cpu_info[apic_id].cpu_present ||
609		    cpu_info[apic_id].cpu_bsp)
610			continue;
611
612		/* Don't use this CPU if it has been disabled by a tunable. */
613		if (resource_disabled("lapic", apic_id)) {
614			cpu_info[apic_id].cpu_disabled = 1;
615			mp_ncpus--;
616			continue;
617		}
618
619		cpu++;
620
621		/* save APIC ID for this logical ID */
622		cpu_apic_ids[cpu] = apic_id;
623
624		/* allocate and set up an idle stack data page */
625		bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
626
627		bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
628		bootAP = cpu;
629
630		/* attempt to start the Application Processor */
631		if (!start_ap(apic_id)) {
632			/* restore the warmstart vector */
633			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
634			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
635		}
636
637		all_cpus |= (1 << cpu);		/* record AP in CPU map */
638	}
639
640	/* build our map of 'other' CPUs */
641	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
642
643	/* restore the warmstart vector */
644	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
645
646	outb(CMOS_REG, BIOS_RESET);
647	outb(CMOS_DATA, mpbiosreason);
648
649	/* number of APs actually started */
650	return mp_naps;
651}
652
653
654/*
655 * This function starts the AP (application processor) identified
656 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
657 * to accomplish this.  This is necessary because of the nuances
658 * of the different hardware we might encounter.  It isn't pretty,
659 * but it seems to work.
660 */
661static int
662start_ap(int apic_id)
663{
664	int vector, ms;
665	int cpus;
666
667	/* calculate the vector */
668	vector = (boot_address >> 12) & 0xff;
669
670	/* used as a watchpoint to signal AP startup */
671	cpus = mp_naps;
672
673	/*
674	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
675	 * and running the target CPU. OR this INIT IPI might be latched (P5
676	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
677	 * ignored.
678	 */
679
680	/* do an INIT IPI: assert RESET */
681	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
682	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
683
684	/* wait for pending status end */
685	lapic_ipi_wait(-1);
686
687	/* do an INIT IPI: deassert RESET */
688	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
689	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
690
691	/* wait for pending status end */
692	DELAY(10000);		/* wait ~10mS */
693	lapic_ipi_wait(-1);
694
695	/*
696	 * next we do a STARTUP IPI: the previous INIT IPI might still be
697	 * latched, (P5 bug) this 1st STARTUP would then terminate
698	 * immediately, and the previously started INIT IPI would continue. OR
699	 * the previous INIT IPI has already run. and this STARTUP IPI will
700	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
701	 * will run.
702	 */
703
704	/* do a STARTUP IPI */
705	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
706	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
707	    vector, apic_id);
708	lapic_ipi_wait(-1);
709	DELAY(200);		/* wait ~200uS */
710
711	/*
712	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
713	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
714	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
715	 * recognized after hardware RESET or INIT IPI.
716	 */
717
718	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
719	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
720	    vector, apic_id);
721	lapic_ipi_wait(-1);
722	DELAY(200);		/* wait ~200uS */
723
724	/* Wait up to 5 seconds for it to start. */
725	for (ms = 0; ms < 50; ms++) {
726		if (mp_naps > cpus)
727			return 1;	/* return SUCCESS */
728		DELAY(100000);
729	}
730	return 0;		/* return FAILURE */
731}
732
733/*
734 * Flush the TLB on all other CPU's
735 */
736static void
737smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
738{
739	u_int ncpu;
740
741	ncpu = mp_ncpus - 1;	/* does not shootdown self */
742	if (ncpu < 1)
743		return;		/* no other cpus */
744	mtx_assert(&smp_ipi_mtx, MA_OWNED);
745	smp_tlb_addr1 = addr1;
746	smp_tlb_addr2 = addr2;
747	atomic_store_rel_int(&smp_tlb_wait, 0);
748	ipi_all_but_self(vector);
749	while (smp_tlb_wait < ncpu)
750		ia32_pause();
751}
752
753/*
754 * This is about as magic as it gets.  fortune(1) has got similar code
755 * for reversing bits in a word.  Who thinks up this stuff??
756 *
757 * Yes, it does appear to be consistently faster than:
758 * while (i = ffs(m)) {
759 *	m >>= i;
760 *	bits++;
761 * }
762 * and
763 * while (lsb = (m & -m)) {	// This is magic too
764 * 	m &= ~lsb;		// or: m ^= lsb
765 *	bits++;
766 * }
767 * Both of these latter forms do some very strange things on gcc-3.1 with
768 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
769 * There is probably an SSE or MMX popcnt instruction.
770 *
771 * I wonder if this should be in libkern?
772 *
773 * XXX Stop the presses!  Another one:
774 * static __inline u_int32_t
775 * popcnt1(u_int32_t v)
776 * {
777 *	v -= ((v >> 1) & 0x55555555);
778 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
779 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
780 *	return (v * 0x01010101) >> 24;
781 * }
782 * The downside is that it has a multiply.  With a pentium3 with
783 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
784 * an imull, and in that case it is faster.  In most other cases
785 * it appears slightly slower.
786 *
787 * Another variant (also from fortune):
788 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
789 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
790 *                          - (((x)>>2)&0x33333333)            \
791 *                          - (((x)>>3)&0x11111111))
792 */
793static __inline u_int32_t
794popcnt(u_int32_t m)
795{
796
797	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
798	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
799	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
800	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
801	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
802	return m;
803}
804
805static void
806smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
807{
808	int ncpu, othercpus;
809
810	othercpus = mp_ncpus - 1;
811	if (mask == (u_int)-1) {
812		ncpu = othercpus;
813		if (ncpu < 1)
814			return;
815	} else {
816		mask &= ~PCPU_GET(cpumask);
817		if (mask == 0)
818			return;
819		ncpu = popcnt(mask);
820		if (ncpu > othercpus) {
821			/* XXX this should be a panic offence */
822			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
823			    ncpu, othercpus);
824			ncpu = othercpus;
825		}
826		/* XXX should be a panic, implied by mask == 0 above */
827		if (ncpu < 1)
828			return;
829	}
830	mtx_assert(&smp_ipi_mtx, MA_OWNED);
831	smp_tlb_addr1 = addr1;
832	smp_tlb_addr2 = addr2;
833	atomic_store_rel_int(&smp_tlb_wait, 0);
834	if (mask == (u_int)-1)
835		ipi_all_but_self(vector);
836	else
837		ipi_selected(mask, vector);
838	while (smp_tlb_wait < ncpu)
839		ia32_pause();
840}
841
842void
843smp_invltlb(void)
844{
845
846	if (smp_started)
847		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
848}
849
850void
851smp_invlpg(vm_offset_t addr)
852{
853
854	if (smp_started)
855		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
856}
857
858void
859smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
860{
861
862	if (smp_started)
863		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
864}
865
866void
867smp_masked_invltlb(u_int mask)
868{
869
870	if (smp_started)
871		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
872}
873
874void
875smp_masked_invlpg(u_int mask, vm_offset_t addr)
876{
877
878	if (smp_started)
879		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
880}
881
882void
883smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
884{
885
886	if (smp_started)
887		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
888}
889
890
891void
892ipi_bitmap_handler(struct clockframe frame)
893{
894	int cpu = PCPU_GET(cpuid);
895	u_int ipi_bitmap;
896
897	ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
898
899	/* Nothing to do for AST */
900}
901
902/*
903 * send an IPI to a set of cpus.
904 */
905void
906ipi_selected(u_int32_t cpus, u_int ipi)
907{
908	int cpu;
909	u_int bitmap = 0;
910	u_int old_pending;
911	u_int new_pending;
912
913	if (IPI_IS_BITMAPED(ipi)) {
914		bitmap = 1 << ipi;
915		ipi = IPI_BITMAP_VECTOR;
916	}
917
918	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
919	while ((cpu = ffs(cpus)) != 0) {
920		cpu--;
921		cpus &= ~(1 << cpu);
922
923		KASSERT(cpu_apic_ids[cpu] != -1,
924		    ("IPI to non-existent CPU %d", cpu));
925
926		if (bitmap) {
927			do {
928				old_pending = cpu_ipi_pending[cpu];
929				new_pending = old_pending | bitmap;
930			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
931
932			if (old_pending)
933				continue;
934		}
935
936		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
937	}
938
939}
940
941/*
942 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
943 */
944void
945ipi_all(u_int ipi)
946{
947
948	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
949	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
950}
951
952/*
953 * send an IPI to all CPUs EXCEPT myself
954 */
955void
956ipi_all_but_self(u_int ipi)
957{
958
959	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
960	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
961}
962
963/*
964 * send an IPI to myself
965 */
966void
967ipi_self(u_int ipi)
968{
969
970	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
971	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
972}
973
974/*
975 * This is called once the rest of the system is up and running and we're
976 * ready to let the AP's out of the pen.
977 */
978static void
979release_aps(void *dummy __unused)
980{
981
982	if (mp_ncpus == 1)
983		return;
984	mtx_lock_spin(&sched_lock);
985	atomic_store_rel_int(&aps_ready, 1);
986	while (smp_started == 0)
987		ia32_pause();
988	mtx_unlock_spin(&sched_lock);
989}
990SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
991
992static int
993sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
994{
995	u_int mask;
996	int error;
997
998	mask = hlt_cpus_mask;
999	error = sysctl_handle_int(oidp, &mask, 0, req);
1000	if (error || !req->newptr)
1001		return (error);
1002
1003	if (logical_cpus_mask != 0 &&
1004	    (mask & logical_cpus_mask) == logical_cpus_mask)
1005		hlt_logical_cpus = 1;
1006	else
1007		hlt_logical_cpus = 0;
1008
1009	if ((mask & all_cpus) == all_cpus)
1010		mask &= ~(1<<0);
1011	hlt_cpus_mask = mask;
1012	return (error);
1013}
1014SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1015    0, 0, sysctl_hlt_cpus, "IU",
1016    "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
1017
1018static int
1019sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1020{
1021	int disable, error;
1022
1023	disable = hlt_logical_cpus;
1024	error = sysctl_handle_int(oidp, &disable, 0, req);
1025	if (error || !req->newptr)
1026		return (error);
1027
1028	if (disable)
1029		hlt_cpus_mask |= logical_cpus_mask;
1030	else
1031		hlt_cpus_mask &= ~logical_cpus_mask;
1032
1033	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1034		hlt_cpus_mask &= ~(1<<0);
1035
1036	hlt_logical_cpus = disable;
1037	return (error);
1038}
1039
1040static void
1041cpu_hlt_setup(void *dummy __unused)
1042{
1043
1044	if (logical_cpus_mask != 0) {
1045		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1046		    &hlt_logical_cpus);
1047		sysctl_ctx_init(&logical_cpu_clist);
1048		SYSCTL_ADD_PROC(&logical_cpu_clist,
1049		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1050		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1051		    sysctl_hlt_logical_cpus, "IU", "");
1052		SYSCTL_ADD_UINT(&logical_cpu_clist,
1053		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1054		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1055		    &logical_cpus_mask, 0, "");
1056
1057		if (hlt_logical_cpus)
1058			hlt_cpus_mask |= logical_cpus_mask;
1059	}
1060}
1061SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1062
1063int
1064mp_grab_cpu_hlt(void)
1065{
1066	u_int mask = PCPU_GET(cpumask);
1067#ifdef MP_WATCHDOG
1068	u_int cpuid = PCPU_GET(cpuid);
1069#endif
1070	int retval;
1071
1072#ifdef MP_WATCHDOG
1073	ap_watchdog(cpuid);
1074#endif
1075
1076	retval = mask & hlt_cpus_mask;
1077	while (mask & hlt_cpus_mask)
1078		__asm __volatile("sti; hlt" : : : "memory");
1079	return (retval);
1080}
1081