mp_machdep.c revision 140555
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 140555 2005-01-21 06:01:20Z peter $");
29
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32#include "opt_mp_watchdog.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#ifdef GPROF
38#include <sys/gmon.h>
39#endif
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#include <sys/pcpu.h>
47#include <sys/proc.h>
48#include <sys/smp.h>
49#include <sys/sysctl.h>
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56
57#include <machine/apicreg.h>
58#include <machine/clock.h>
59#include <machine/md_var.h>
60#include <machine/mp_watchdog.h>
61#include <machine/pcb.h>
62#include <machine/psl.h>
63#include <machine/smp.h>
64#include <machine/specialreg.h>
65#include <machine/tss.h>
66
67#define WARMBOOT_TARGET		0
68#define WARMBOOT_OFF		(KERNBASE + 0x0467)
69#define WARMBOOT_SEG		(KERNBASE + 0x0469)
70
71#define CMOS_REG		(0x70)
72#define CMOS_DATA		(0x71)
73#define BIOS_RESET		(0x0f)
74#define BIOS_WARM		(0x0a)
75
76/* lock region used by kernel profiling */
77int	mcount_lock;
78
79int	mp_naps;		/* # of Applications processors */
80int	boot_cpu_id = -1;	/* designated BSP */
81extern	int nkpt;
82
83/*
84 * CPU topology map datastructures for HTT.
85 */
86static struct cpu_group mp_groups[MAXCPU];
87static struct cpu_top mp_top;
88
89/* AP uses this during bootstrap.  Do not staticize.  */
90char *bootSTK;
91static int bootAP;
92
93/* Free these after use */
94void *bootstacks[MAXCPU];
95
96/* Hotwire a 0->4MB V==P mapping */
97extern pt_entry_t *KPTphys;
98
99/* SMP page table page */
100extern pt_entry_t *SMPpt;
101
102struct pcb stoppcbs[MAXCPU];
103
104/* Variables needed for SMP tlb shootdown. */
105vm_offset_t smp_tlb_addr1;
106vm_offset_t smp_tlb_addr2;
107volatile int smp_tlb_wait;
108
109extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
110
111/*
112 * Local data and functions.
113 */
114
115static u_int logical_cpus;
116
117/* used to hold the AP's until we are ready to release them */
118static struct mtx ap_boot_mtx;
119
120/* Set to 1 once we're ready to let the APs out of the pen. */
121static volatile int aps_ready = 0;
122
123/*
124 * Store data from cpu_add() until later in the boot when we actually setup
125 * the APs.
126 */
127struct cpu_info {
128	int	cpu_present:1;
129	int	cpu_bsp:1;
130	int	cpu_disabled:1;
131} static cpu_info[MAXCPU];
132static int cpu_apic_ids[MAXCPU];
133
134/* Holds pending bitmap based IPIs per CPU */
135static volatile u_int cpu_ipi_pending[MAXCPU];
136
137static u_int boot_address;
138
139static void	set_logical_apic_ids(void);
140static int	start_all_aps(void);
141static int	start_ap(int apic_id);
142static void	release_aps(void *dummy);
143
144static int	hlt_logical_cpus;
145static struct	sysctl_ctx_list logical_cpu_clist;
146static u_int	bootMP_size;
147
148static void
149mem_range_AP_init(void)
150{
151	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
152		mem_range_softc.mr_op->initAP(&mem_range_softc);
153}
154
155void
156mp_topology(void)
157{
158	struct cpu_group *group;
159	int logical_cpus;
160	int apic_id;
161	int groups;
162	int cpu;
163
164	/* Build the smp_topology map. */
165	/* Nothing to do if there is no HTT support. */
166	if ((cpu_feature & CPUID_HTT) == 0)
167		return;
168	logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
169	if (logical_cpus <= 1)
170		return;
171	group = &mp_groups[0];
172	groups = 1;
173	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
174		if (!cpu_info[apic_id].cpu_present)
175			continue;
176		/*
177		 * If the current group has members and we're not a logical
178		 * cpu, create a new group.
179		 */
180		if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
181			group++;
182			groups++;
183		}
184		group->cg_count++;
185		group->cg_mask |= 1 << cpu;
186		cpu++;
187	}
188
189	mp_top.ct_count = groups;
190	mp_top.ct_group = mp_groups;
191	smp_topology = &mp_top;
192}
193
194
195/*
196 * Calculate usable address in base memory for AP trampoline code.
197 */
198u_int
199mp_bootaddress(u_int basemem)
200{
201
202	bootMP_size = mptramp_end - mptramp_start;
203	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
204	if (((basemem * 1024) - boot_address) < bootMP_size)
205		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
206	/* 3 levels of page table pages */
207	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
208
209	return mptramp_pagetables;
210}
211
212void
213cpu_add(u_int apic_id, char boot_cpu)
214{
215
216	if (apic_id >= MAXCPU) {
217		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
218		    apic_id, MAXCPU - 1);
219		return;
220	}
221	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
222	    apic_id));
223	cpu_info[apic_id].cpu_present = 1;
224	if (boot_cpu) {
225		KASSERT(boot_cpu_id == -1,
226		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
227		    boot_cpu_id));
228		boot_cpu_id = apic_id;
229		cpu_info[apic_id].cpu_bsp = 1;
230	}
231	mp_ncpus++;
232	if (apic_id > mp_maxid)
233		mp_maxid = apic_id;
234	if (bootverbose)
235		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
236		    "AP");
237
238}
239
240void
241cpu_mp_setmaxid(void)
242{
243
244	/*
245	 * mp_maxid should be already set by calls to cpu_add().
246	 * Just sanity check its value here.
247	 */
248	if (mp_ncpus == 0)
249		KASSERT(mp_maxid == 0,
250		    ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
251	else if (mp_ncpus == 1)
252		mp_maxid = 0;
253	else
254		KASSERT(mp_maxid >= mp_ncpus - 1,
255		    ("%s: counters out of sync: max %d, count %d", __func__,
256			mp_maxid, mp_ncpus));
257
258}
259
260int
261cpu_mp_probe(void)
262{
263
264	/*
265	 * Always record BSP in CPU map so that the mbuf init code works
266	 * correctly.
267	 */
268	all_cpus = 1;
269	if (mp_ncpus == 0) {
270		/*
271		 * No CPUs were found, so this must be a UP system.  Setup
272		 * the variables to represent a system with a single CPU
273		 * with an id of 0.
274		 */
275		mp_ncpus = 1;
276		return (0);
277	}
278
279	/* At least one CPU was found. */
280	if (mp_ncpus == 1) {
281		/*
282		 * One CPU was found, so this must be a UP system with
283		 * an I/O APIC.
284		 */
285		mp_maxid = 0;
286		return (0);
287	}
288
289	/* At least two CPUs were found. */
290	return (1);
291}
292
293/*
294 * Initialize the IPI handlers and start up the AP's.
295 */
296void
297cpu_mp_start(void)
298{
299	int i;
300
301	/* Initialize the logical ID to APIC ID table. */
302	for (i = 0; i < MAXCPU; i++) {
303		cpu_apic_ids[i] = -1;
304		cpu_ipi_pending[i] = 0;
305	}
306
307	/* Install an inter-CPU IPI for TLB invalidation */
308	setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
309	setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
310	setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
311
312	/* Install an inter-CPU IPI for all-CPU rendezvous */
313	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
314
315	/* Install generic inter-CPU IPI handler */
316	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
317	       SDT_SYSIGT, SEL_KPL, 0);
318
319	/* Install an inter-CPU IPI for CPU stop/restart */
320	setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
321
322	/* Set boot_cpu_id if needed. */
323	if (boot_cpu_id == -1) {
324		boot_cpu_id = PCPU_GET(apic_id);
325		cpu_info[boot_cpu_id].cpu_bsp = 1;
326	} else
327		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
328		    ("BSP's APIC ID doesn't match boot_cpu_id"));
329	cpu_apic_ids[0] = boot_cpu_id;
330
331	/* Start each Application Processor */
332	start_all_aps();
333
334	/* Setup the initial logical CPUs info. */
335	logical_cpus = logical_cpus_mask = 0;
336	if (cpu_feature & CPUID_HTT)
337		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
338
339	set_logical_apic_ids();
340}
341
342
343/*
344 * Print various information about the SMP system hardware and setup.
345 */
346void
347cpu_mp_announce(void)
348{
349	int i, x;
350
351	/* List CPUs */
352	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
353	for (i = 1, x = 0; x < MAXCPU; x++) {
354		if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
355			continue;
356		if (cpu_info[x].cpu_disabled)
357			printf("  cpu (AP): APIC ID: %2d (disabled)\n", x);
358		else {
359			KASSERT(i < mp_ncpus,
360			    ("mp_ncpus and actual cpus are out of whack"));
361			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
362		}
363	}
364}
365
366/*
367 * AP CPU's call this to initialize themselves.
368 */
369void
370init_secondary(void)
371{
372	struct pcpu *pc;
373	u_int64_t msr, cr0;
374	int cpu, gsel_tss;
375
376	/* Set by the startup code for us to use */
377	cpu = bootAP;
378
379	/* Init tss */
380	common_tss[cpu] = common_tss[0];
381	common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
382
383	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
384	ssdtosyssd(&gdt_segs[GPROC0_SEL],
385	   (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
386
387	lgdt(&r_gdt);			/* does magic intra-segment return */
388
389	/* Get per-cpu data */
390	pc = &__pcpu[cpu];
391
392	/* prime data page for it to use */
393	pcpu_init(pc, cpu, sizeof(struct pcpu));
394	pc->pc_apic_id = cpu_apic_ids[cpu];
395	pc->pc_prvspace = pc;
396	pc->pc_curthread = 0;
397	pc->pc_tssp = &common_tss[cpu];
398	pc->pc_rsp0 = 0;
399
400	wrmsr(MSR_FSBASE, 0);		/* User value */
401	wrmsr(MSR_GSBASE, (u_int64_t)pc);
402	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
403
404	lidt(&r_idt);
405
406	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
407	ltr(gsel_tss);
408
409	/*
410	 * Set to a known state:
411	 * Set by mpboot.s: CR0_PG, CR0_PE
412	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
413	 */
414	cr0 = rcr0();
415	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
416	load_cr0(cr0);
417
418	/* Set up the fast syscall stuff */
419	msr = rdmsr(MSR_EFER) | EFER_SCE;
420	wrmsr(MSR_EFER, msr);
421	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
422	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
423	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
424	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
425	wrmsr(MSR_STAR, msr);
426	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
427
428	/* Disable local apic just to be sure. */
429	lapic_disable();
430
431	/* signal our startup to the BSP. */
432	mp_naps++;
433
434	/* Spin until the BSP releases the AP's. */
435	while (!aps_ready)
436		ia32_pause();
437
438	/* set up CPU registers and state */
439	cpu_setregs();
440
441	/* set up SSE/NX registers */
442	initializecpu();
443
444	/* set up FPU state on the AP */
445	fpuinit();
446
447	/* A quick check from sanity claus */
448	if (PCPU_GET(apic_id) != lapic_id()) {
449		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
450		printf("SMP: actual apic_id = %d\n", lapic_id());
451		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
452		panic("cpuid mismatch! boom!!");
453	}
454
455	mtx_lock_spin(&ap_boot_mtx);
456
457	/* Init local apic for irq's */
458	lapic_setup();
459
460	/* Set memory range attributes for this CPU to match the BSP */
461	mem_range_AP_init();
462
463	smp_cpus++;
464
465	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
466	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
467
468	/* Determine if we are a logical CPU. */
469	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
470		logical_cpus_mask |= PCPU_GET(cpumask);
471
472	/* Build our map of 'other' CPUs. */
473	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
474
475	if (bootverbose)
476		lapic_dump("AP");
477
478	if (smp_cpus == mp_ncpus) {
479		/* enable IPI's, tlb shootdown, freezes etc */
480		atomic_store_rel_int(&smp_started, 1);
481		smp_active = 1;	 /* historic */
482	}
483
484	mtx_unlock_spin(&ap_boot_mtx);
485
486	/* wait until all the AP's are up */
487	while (smp_started == 0)
488		ia32_pause();
489
490	/* ok, now grab sched_lock and enter the scheduler */
491	mtx_lock_spin(&sched_lock);
492
493	binuptime(PCPU_PTR(switchtime));
494	PCPU_SET(switchticks, ticks);
495
496	cpu_throw(NULL, choosethread());	/* doesn't return */
497
498	panic("scheduler returned us to %s", __func__);
499	/* NOTREACHED */
500}
501
502/*******************************************************************
503 * local functions and data
504 */
505
506/*
507 * Set the APIC logical IDs.
508 *
509 * We want to cluster logical CPU's within the same APIC ID cluster.
510 * Since logical CPU's are aligned simply filling in the clusters in
511 * APIC ID order works fine.  Note that this does not try to balance
512 * the number of CPU's in each cluster. (XXX?)
513 */
514static void
515set_logical_apic_ids(void)
516{
517	u_int apic_id, cluster, cluster_id;
518
519	/* Force us to allocate cluster 0 at the start. */
520	cluster = -1;
521	cluster_id = APIC_MAX_INTRACLUSTER_ID;
522	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
523		if (!cpu_info[apic_id].cpu_present)
524			continue;
525		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
526			cluster = ioapic_next_logical_cluster();
527			cluster_id = 0;
528		} else
529			cluster_id++;
530		if (bootverbose)
531			printf("APIC ID: physical %u, logical %u:%u\n",
532			    apic_id, cluster, cluster_id);
533		lapic_set_logical_id(apic_id, cluster, cluster_id);
534	}
535}
536
537/*
538 * start each AP in our list
539 */
540static int
541start_all_aps(void)
542{
543	u_char mpbiosreason;
544	u_int32_t mpbioswarmvec;
545	int apic_id, cpu, i;
546	u_int64_t *pt4, *pt3, *pt2;
547	vm_offset_t va = boot_address + KERNBASE;
548
549	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
550
551	/* install the AP 1st level boot code */
552	pmap_kenter(va, boot_address);
553	pmap_invalidate_page(kernel_pmap, va);
554	bcopy(mptramp_start, (void *)va, bootMP_size);
555
556	/* Locate the page tables, they'll be below the trampoline */
557	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
558	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
559	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
560
561	/* Create the initial 1GB replicated page tables */
562	for (i = 0; i < 512; i++) {
563		/* Each slot of the level 4 pages points to the same level 3 page */
564		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
565		pt4[i] |= PG_V | PG_RW | PG_U;
566
567		/* Each slot of the level 3 pages points to the same level 2 page */
568		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
569		pt3[i] |= PG_V | PG_RW | PG_U;
570
571		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
572		pt2[i] = i * (2 * 1024 * 1024);
573		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
574	}
575
576	/* save the current value of the warm-start vector */
577	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
578	outb(CMOS_REG, BIOS_RESET);
579	mpbiosreason = inb(CMOS_DATA);
580
581	/* setup a vector to our boot code */
582	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
583	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
584	outb(CMOS_REG, BIOS_RESET);
585	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
586
587	/* start each AP */
588	cpu = 0;
589	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
590
591		/* Ignore non-existent CPUs and the BSP. */
592		if (!cpu_info[apic_id].cpu_present ||
593		    cpu_info[apic_id].cpu_bsp)
594			continue;
595
596		/* Don't use this CPU if it has been disabled by a tunable. */
597		if (resource_disabled("lapic", apic_id)) {
598			cpu_info[apic_id].cpu_disabled = 1;
599			mp_ncpus--;
600			continue;
601		}
602
603		cpu++;
604
605		/* save APIC ID for this logical ID */
606		cpu_apic_ids[cpu] = apic_id;
607
608		/* allocate and set up an idle stack data page */
609		bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
610
611		bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
612		bootAP = cpu;
613
614		/* attempt to start the Application Processor */
615		if (!start_ap(apic_id)) {
616			/* restore the warmstart vector */
617			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
618			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
619		}
620
621		all_cpus |= (1 << cpu);		/* record AP in CPU map */
622	}
623
624	/* build our map of 'other' CPUs */
625	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
626
627	/* restore the warmstart vector */
628	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
629
630	outb(CMOS_REG, BIOS_RESET);
631	outb(CMOS_DATA, mpbiosreason);
632
633	/* number of APs actually started */
634	return mp_naps;
635}
636
637
638/*
639 * This function starts the AP (application processor) identified
640 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
641 * to accomplish this.  This is necessary because of the nuances
642 * of the different hardware we might encounter.  It isn't pretty,
643 * but it seems to work.
644 */
645static int
646start_ap(int apic_id)
647{
648	int vector, ms;
649	int cpus;
650
651	/* calculate the vector */
652	vector = (boot_address >> 12) & 0xff;
653
654	/* used as a watchpoint to signal AP startup */
655	cpus = mp_naps;
656
657	/*
658	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
659	 * and running the target CPU. OR this INIT IPI might be latched (P5
660	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
661	 * ignored.
662	 */
663
664	/* do an INIT IPI: assert RESET */
665	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
666	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
667
668	/* wait for pending status end */
669	lapic_ipi_wait(-1);
670
671	/* do an INIT IPI: deassert RESET */
672	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
673	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
674
675	/* wait for pending status end */
676	DELAY(10000);		/* wait ~10mS */
677	lapic_ipi_wait(-1);
678
679	/*
680	 * next we do a STARTUP IPI: the previous INIT IPI might still be
681	 * latched, (P5 bug) this 1st STARTUP would then terminate
682	 * immediately, and the previously started INIT IPI would continue. OR
683	 * the previous INIT IPI has already run. and this STARTUP IPI will
684	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
685	 * will run.
686	 */
687
688	/* do a STARTUP IPI */
689	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
690	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
691	    vector, apic_id);
692	lapic_ipi_wait(-1);
693	DELAY(200);		/* wait ~200uS */
694
695	/*
696	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
697	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
698	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
699	 * recognized after hardware RESET or INIT IPI.
700	 */
701
702	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
703	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
704	    vector, apic_id);
705	lapic_ipi_wait(-1);
706	DELAY(200);		/* wait ~200uS */
707
708	/* Wait up to 5 seconds for it to start. */
709	for (ms = 0; ms < 50; ms++) {
710		if (mp_naps > cpus)
711			return 1;	/* return SUCCESS */
712		DELAY(100000);
713	}
714	return 0;		/* return FAILURE */
715}
716
717/*
718 * Flush the TLB on all other CPU's
719 */
720static void
721smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
722{
723	u_int ncpu;
724
725	ncpu = mp_ncpus - 1;	/* does not shootdown self */
726	if (ncpu < 1)
727		return;		/* no other cpus */
728	mtx_assert(&smp_ipi_mtx, MA_OWNED);
729	smp_tlb_addr1 = addr1;
730	smp_tlb_addr2 = addr2;
731	atomic_store_rel_int(&smp_tlb_wait, 0);
732	ipi_all_but_self(vector);
733	while (smp_tlb_wait < ncpu)
734		ia32_pause();
735}
736
737/*
738 * This is about as magic as it gets.  fortune(1) has got similar code
739 * for reversing bits in a word.  Who thinks up this stuff??
740 *
741 * Yes, it does appear to be consistently faster than:
742 * while (i = ffs(m)) {
743 *	m >>= i;
744 *	bits++;
745 * }
746 * and
747 * while (lsb = (m & -m)) {	// This is magic too
748 * 	m &= ~lsb;		// or: m ^= lsb
749 *	bits++;
750 * }
751 * Both of these latter forms do some very strange things on gcc-3.1 with
752 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
753 * There is probably an SSE or MMX popcnt instruction.
754 *
755 * I wonder if this should be in libkern?
756 *
757 * XXX Stop the presses!  Another one:
758 * static __inline u_int32_t
759 * popcnt1(u_int32_t v)
760 * {
761 *	v -= ((v >> 1) & 0x55555555);
762 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
763 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
764 *	return (v * 0x01010101) >> 24;
765 * }
766 * The downside is that it has a multiply.  With a pentium3 with
767 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
768 * an imull, and in that case it is faster.  In most other cases
769 * it appears slightly slower.
770 *
771 * Another variant (also from fortune):
772 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
773 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
774 *                          - (((x)>>2)&0x33333333)            \
775 *                          - (((x)>>3)&0x11111111))
776 */
777static __inline u_int32_t
778popcnt(u_int32_t m)
779{
780
781	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
782	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
783	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
784	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
785	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
786	return m;
787}
788
789static void
790smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
791{
792	int ncpu, othercpus;
793
794	othercpus = mp_ncpus - 1;
795	if (mask == (u_int)-1) {
796		ncpu = othercpus;
797		if (ncpu < 1)
798			return;
799	} else {
800		mask &= ~PCPU_GET(cpumask);
801		if (mask == 0)
802			return;
803		ncpu = popcnt(mask);
804		if (ncpu > othercpus) {
805			/* XXX this should be a panic offence */
806			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
807			    ncpu, othercpus);
808			ncpu = othercpus;
809		}
810		/* XXX should be a panic, implied by mask == 0 above */
811		if (ncpu < 1)
812			return;
813	}
814	mtx_assert(&smp_ipi_mtx, MA_OWNED);
815	smp_tlb_addr1 = addr1;
816	smp_tlb_addr2 = addr2;
817	atomic_store_rel_int(&smp_tlb_wait, 0);
818	if (mask == (u_int)-1)
819		ipi_all_but_self(vector);
820	else
821		ipi_selected(mask, vector);
822	while (smp_tlb_wait < ncpu)
823		ia32_pause();
824}
825
826void
827smp_invltlb(void)
828{
829
830	if (smp_started)
831		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
832}
833
834void
835smp_invlpg(vm_offset_t addr)
836{
837
838	if (smp_started)
839		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
840}
841
842void
843smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
844{
845
846	if (smp_started)
847		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
848}
849
850void
851smp_masked_invltlb(u_int mask)
852{
853
854	if (smp_started)
855		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
856}
857
858void
859smp_masked_invlpg(u_int mask, vm_offset_t addr)
860{
861
862	if (smp_started)
863		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
864}
865
866void
867smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
868{
869
870	if (smp_started)
871		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
872}
873
874
875/*
876 * For statclock, we send an IPI to all CPU's to have them call this
877 * function.
878 */
879
880void
881forward_statclock(void)
882{
883	int map;
884
885	CTR0(KTR_SMP, "forward_statclock");
886
887	if (!smp_started || cold || panicstr)
888		return;
889
890	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
891	if (map != 0)
892		ipi_selected(map, IPI_STATCLOCK);
893}
894
895/*
896 * For each hardclock(), we send an IPI to all other CPU's to have them
897 * execute this function.  It would be nice to reduce contention on
898 * sched_lock if we could simply peek at the CPU to determine the user/kernel
899 * state and call hardclock_process() on the CPU receiving the clock interrupt
900 * and then just use a simple IPI to handle any ast's if needed.
901 */
902
903void
904forward_hardclock(void)
905{
906	u_int map;
907
908	CTR0(KTR_SMP, "forward_hardclock");
909
910	if (!smp_started || cold || panicstr)
911		return;
912
913	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
914	if (map != 0)
915		ipi_selected(map, IPI_HARDCLOCK);
916}
917
918void
919ipi_bitmap_handler(struct clockframe frame)
920{
921	int cpu = PCPU_GET(cpuid);
922	u_int ipi_bitmap;
923	struct thread *td;
924
925	ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
926
927	critical_enter();
928
929	/* Nothing to do for AST */
930
931	if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
932		td = curthread;
933		td->td_intr_nesting_level++;
934		hardclock_process(&frame);
935		td->td_intr_nesting_level--;
936	}
937
938	if (ipi_bitmap & (1 << IPI_STATCLOCK)) {
939		CTR0(KTR_SMP, "forwarded_statclock");
940
941		td = curthread;
942		td->td_intr_nesting_level++;
943		if (profprocs != 0)
944			profclock(&frame);
945		if (pscnt == psdiv)
946			statclock(&frame);
947		td->td_intr_nesting_level--;
948	}
949
950	critical_exit();
951}
952
953/*
954 * send an IPI to a set of cpus.
955 */
956void
957ipi_selected(u_int32_t cpus, u_int ipi)
958{
959	int cpu;
960	u_int bitmap = 0;
961	u_int old_pending;
962	u_int new_pending;
963
964	if (IPI_IS_BITMAPED(ipi)) {
965		bitmap = 1 << ipi;
966		ipi = IPI_BITMAP_VECTOR;
967	}
968
969	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
970	while ((cpu = ffs(cpus)) != 0) {
971		cpu--;
972		cpus &= ~(1 << cpu);
973
974		KASSERT(cpu_apic_ids[cpu] != -1,
975		    ("IPI to non-existent CPU %d", cpu));
976
977		if (bitmap) {
978			do {
979				old_pending = cpu_ipi_pending[cpu];
980				new_pending = old_pending | bitmap;
981			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
982
983			if (old_pending)
984				continue;
985		}
986
987		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
988	}
989
990}
991
992/*
993 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
994 */
995void
996ipi_all(u_int ipi)
997{
998
999	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1000	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1001}
1002
1003/*
1004 * send an IPI to all CPUs EXCEPT myself
1005 */
1006void
1007ipi_all_but_self(u_int ipi)
1008{
1009
1010	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1011	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1012}
1013
1014/*
1015 * send an IPI to myself
1016 */
1017void
1018ipi_self(u_int ipi)
1019{
1020
1021	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1022	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1023}
1024
1025/*
1026 * This is called once the rest of the system is up and running and we're
1027 * ready to let the AP's out of the pen.
1028 */
1029static void
1030release_aps(void *dummy __unused)
1031{
1032
1033	if (mp_ncpus == 1)
1034		return;
1035	mtx_lock_spin(&sched_lock);
1036	atomic_store_rel_int(&aps_ready, 1);
1037	while (smp_started == 0)
1038		ia32_pause();
1039	mtx_unlock_spin(&sched_lock);
1040}
1041SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1042
1043static int
1044sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1045{
1046	u_int mask;
1047	int error;
1048
1049	mask = hlt_cpus_mask;
1050	error = sysctl_handle_int(oidp, &mask, 0, req);
1051	if (error || !req->newptr)
1052		return (error);
1053
1054	if (logical_cpus_mask != 0 &&
1055	    (mask & logical_cpus_mask) == logical_cpus_mask)
1056		hlt_logical_cpus = 1;
1057	else
1058		hlt_logical_cpus = 0;
1059
1060	if ((mask & all_cpus) == all_cpus)
1061		mask &= ~(1<<0);
1062	hlt_cpus_mask = mask;
1063	return (error);
1064}
1065SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1066    0, 0, sysctl_hlt_cpus, "IU",
1067    "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
1068
1069static int
1070sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1071{
1072	int disable, error;
1073
1074	disable = hlt_logical_cpus;
1075	error = sysctl_handle_int(oidp, &disable, 0, req);
1076	if (error || !req->newptr)
1077		return (error);
1078
1079	if (disable)
1080		hlt_cpus_mask |= logical_cpus_mask;
1081	else
1082		hlt_cpus_mask &= ~logical_cpus_mask;
1083
1084	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1085		hlt_cpus_mask &= ~(1<<0);
1086
1087	hlt_logical_cpus = disable;
1088	return (error);
1089}
1090
1091static void
1092cpu_hlt_setup(void *dummy __unused)
1093{
1094
1095	if (logical_cpus_mask != 0) {
1096		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1097		    &hlt_logical_cpus);
1098		sysctl_ctx_init(&logical_cpu_clist);
1099		SYSCTL_ADD_PROC(&logical_cpu_clist,
1100		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1101		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1102		    sysctl_hlt_logical_cpus, "IU", "");
1103		SYSCTL_ADD_UINT(&logical_cpu_clist,
1104		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1105		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1106		    &logical_cpus_mask, 0, "");
1107
1108		if (hlt_logical_cpus)
1109			hlt_cpus_mask |= logical_cpus_mask;
1110	}
1111}
1112SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1113
1114int
1115mp_grab_cpu_hlt(void)
1116{
1117	u_int mask = PCPU_GET(cpumask);
1118#ifdef MP_WATCHDOG
1119	u_int cpuid = PCPU_GET(cpuid);
1120#endif
1121	int retval;
1122
1123#ifdef MP_WATCHDOG
1124	ap_watchdog(cpuid);
1125#endif
1126
1127	retval = mask & hlt_cpus_mask;
1128	while (mask & hlt_cpus_mask)
1129		__asm __volatile("sti; hlt" : : : "memory");
1130	return (retval);
1131}
1132