mp_machdep.c revision 145727
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 145727 2005-04-30 20:01:00Z dwhite $");
29
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32#include "opt_mp_watchdog.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#ifdef GPROF
38#include <sys/gmon.h>
39#endif
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/memrange.h>
45#include <sys/mutex.h>
46#include <sys/pcpu.h>
47#include <sys/proc.h>
48#include <sys/smp.h>
49#include <sys/sysctl.h>
50
51#include <vm/vm.h>
52#include <vm/vm_param.h>
53#include <vm/pmap.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56
57#include <machine/apicreg.h>
58#include <machine/clock.h>
59#include <machine/md_var.h>
60#include <machine/mp_watchdog.h>
61#include <machine/pcb.h>
62#include <machine/psl.h>
63#include <machine/smp.h>
64#include <machine/specialreg.h>
65#include <machine/tss.h>
66
67#define WARMBOOT_TARGET		0
68#define WARMBOOT_OFF		(KERNBASE + 0x0467)
69#define WARMBOOT_SEG		(KERNBASE + 0x0469)
70
71#define CMOS_REG		(0x70)
72#define CMOS_DATA		(0x71)
73#define BIOS_RESET		(0x0f)
74#define BIOS_WARM		(0x0a)
75
76/* lock region used by kernel profiling */
77int	mcount_lock;
78
79int	mp_naps;		/* # of Applications processors */
80int	boot_cpu_id = -1;	/* designated BSP */
81extern	int nkpt;
82
83/*
84 * CPU topology map datastructures for HTT.
85 */
86static struct cpu_group mp_groups[MAXCPU];
87static struct cpu_top mp_top;
88
89/* AP uses this during bootstrap.  Do not staticize.  */
90char *bootSTK;
91static int bootAP;
92
93/* Free these after use */
94void *bootstacks[MAXCPU];
95
96/* Hotwire a 0->4MB V==P mapping */
97extern pt_entry_t *KPTphys;
98
99/* SMP page table page */
100extern pt_entry_t *SMPpt;
101
102struct pcb stoppcbs[MAXCPU];
103
104/* Variables needed for SMP tlb shootdown. */
105vm_offset_t smp_tlb_addr1;
106vm_offset_t smp_tlb_addr2;
107volatile int smp_tlb_wait;
108
109extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
110
111/*
112 * Local data and functions.
113 */
114
115static u_int logical_cpus;
116
117/* used to hold the AP's until we are ready to release them */
118static struct mtx ap_boot_mtx;
119
120/* Set to 1 once we're ready to let the APs out of the pen. */
121static volatile int aps_ready = 0;
122
123/*
124 * Store data from cpu_add() until later in the boot when we actually setup
125 * the APs.
126 */
127struct cpu_info {
128	int	cpu_present:1;
129	int	cpu_bsp:1;
130	int	cpu_disabled:1;
131} static cpu_info[MAXCPU];
132static int cpu_apic_ids[MAXCPU];
133
134/* Holds pending bitmap based IPIs per CPU */
135static volatile u_int cpu_ipi_pending[MAXCPU];
136
137static u_int boot_address;
138
139static void	set_logical_apic_ids(void);
140static int	start_all_aps(void);
141static int	start_ap(int apic_id);
142static void	release_aps(void *dummy);
143
144static int	hlt_logical_cpus;
145static struct	sysctl_ctx_list logical_cpu_clist;
146static u_int	bootMP_size;
147
148static void
149mem_range_AP_init(void)
150{
151	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
152		mem_range_softc.mr_op->initAP(&mem_range_softc);
153}
154
155void
156mp_topology(void)
157{
158	struct cpu_group *group;
159	int logical_cpus;
160	int apic_id;
161	int groups;
162	int cpu;
163
164	/* Build the smp_topology map. */
165	/* Nothing to do if there is no HTT support. */
166	if ((cpu_feature & CPUID_HTT) == 0)
167		return;
168	logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
169	if (logical_cpus <= 1)
170		return;
171	group = &mp_groups[0];
172	groups = 1;
173	for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
174		if (!cpu_info[apic_id].cpu_present)
175			continue;
176		/*
177		 * If the current group has members and we're not a logical
178		 * cpu, create a new group.
179		 */
180		if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
181			group++;
182			groups++;
183		}
184		group->cg_count++;
185		group->cg_mask |= 1 << cpu;
186		cpu++;
187	}
188
189	mp_top.ct_count = groups;
190	mp_top.ct_group = mp_groups;
191	smp_topology = &mp_top;
192}
193
194
195#ifdef KDB_STOP_NMI
196volatile cpumask_t ipi_nmi_pending;
197#endif
198
199/*
200 * Calculate usable address in base memory for AP trampoline code.
201 */
202u_int
203mp_bootaddress(u_int basemem)
204{
205
206	bootMP_size = mptramp_end - mptramp_start;
207	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
208	if (((basemem * 1024) - boot_address) < bootMP_size)
209		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
210	/* 3 levels of page table pages */
211	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
212
213	return mptramp_pagetables;
214}
215
216void
217cpu_add(u_int apic_id, char boot_cpu)
218{
219
220	if (apic_id >= MAXCPU) {
221		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
222		    apic_id, MAXCPU - 1);
223		return;
224	}
225	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
226	    apic_id));
227	cpu_info[apic_id].cpu_present = 1;
228	if (boot_cpu) {
229		KASSERT(boot_cpu_id == -1,
230		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
231		    boot_cpu_id));
232		boot_cpu_id = apic_id;
233		cpu_info[apic_id].cpu_bsp = 1;
234	}
235	mp_ncpus++;
236	if (apic_id > mp_maxid)
237		mp_maxid = apic_id;
238	if (bootverbose)
239		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
240		    "AP");
241
242}
243
244void
245cpu_mp_setmaxid(void)
246{
247
248	/*
249	 * mp_maxid should be already set by calls to cpu_add().
250	 * Just sanity check its value here.
251	 */
252	if (mp_ncpus == 0)
253		KASSERT(mp_maxid == 0,
254		    ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
255	else if (mp_ncpus == 1)
256		mp_maxid = 0;
257	else
258		KASSERT(mp_maxid >= mp_ncpus - 1,
259		    ("%s: counters out of sync: max %d, count %d", __func__,
260			mp_maxid, mp_ncpus));
261
262}
263
264int
265cpu_mp_probe(void)
266{
267
268	/*
269	 * Always record BSP in CPU map so that the mbuf init code works
270	 * correctly.
271	 */
272	all_cpus = 1;
273	if (mp_ncpus == 0) {
274		/*
275		 * No CPUs were found, so this must be a UP system.  Setup
276		 * the variables to represent a system with a single CPU
277		 * with an id of 0.
278		 */
279		mp_ncpus = 1;
280		return (0);
281	}
282
283	/* At least one CPU was found. */
284	if (mp_ncpus == 1) {
285		/*
286		 * One CPU was found, so this must be a UP system with
287		 * an I/O APIC.
288		 */
289		mp_maxid = 0;
290		return (0);
291	}
292
293	/* At least two CPUs were found. */
294	return (1);
295}
296
297/*
298 * Initialize the IPI handlers and start up the AP's.
299 */
300void
301cpu_mp_start(void)
302{
303	int i;
304
305	/* Initialize the logical ID to APIC ID table. */
306	for (i = 0; i < MAXCPU; i++) {
307		cpu_apic_ids[i] = -1;
308		cpu_ipi_pending[i] = 0;
309	}
310
311	/* Install an inter-CPU IPI for TLB invalidation */
312	setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
313	setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
314	setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
315
316	/* Install an inter-CPU IPI for all-CPU rendezvous */
317	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
318
319	/* Install generic inter-CPU IPI handler */
320	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
321	       SDT_SYSIGT, SEL_KPL, 0);
322
323	/* Install an inter-CPU IPI for CPU stop/restart */
324	setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
325
326	/* Set boot_cpu_id if needed. */
327	if (boot_cpu_id == -1) {
328		boot_cpu_id = PCPU_GET(apic_id);
329		cpu_info[boot_cpu_id].cpu_bsp = 1;
330	} else
331		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
332		    ("BSP's APIC ID doesn't match boot_cpu_id"));
333	cpu_apic_ids[0] = boot_cpu_id;
334
335	/* Start each Application Processor */
336	start_all_aps();
337
338	/* Setup the initial logical CPUs info. */
339	logical_cpus = logical_cpus_mask = 0;
340	if (cpu_feature & CPUID_HTT)
341		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
342
343	set_logical_apic_ids();
344}
345
346
347/*
348 * Print various information about the SMP system hardware and setup.
349 */
350void
351cpu_mp_announce(void)
352{
353	int i, x;
354
355	/* List CPUs */
356	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
357	for (i = 1, x = 0; x < MAXCPU; x++) {
358		if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
359			continue;
360		if (cpu_info[x].cpu_disabled)
361			printf("  cpu (AP): APIC ID: %2d (disabled)\n", x);
362		else {
363			KASSERT(i < mp_ncpus,
364			    ("mp_ncpus and actual cpus are out of whack"));
365			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
366		}
367	}
368}
369
370/*
371 * AP CPU's call this to initialize themselves.
372 */
373void
374init_secondary(void)
375{
376	struct pcpu *pc;
377	u_int64_t msr, cr0;
378	int cpu, gsel_tss;
379
380	/* Set by the startup code for us to use */
381	cpu = bootAP;
382
383	/* Init tss */
384	common_tss[cpu] = common_tss[0];
385	common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
386	common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
387
388	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
389	ssdtosyssd(&gdt_segs[GPROC0_SEL],
390	   (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
391
392	lgdt(&r_gdt);			/* does magic intra-segment return */
393
394	/* Get per-cpu data */
395	pc = &__pcpu[cpu];
396
397	/* prime data page for it to use */
398	pcpu_init(pc, cpu, sizeof(struct pcpu));
399	pc->pc_apic_id = cpu_apic_ids[cpu];
400	pc->pc_prvspace = pc;
401	pc->pc_curthread = 0;
402	pc->pc_tssp = &common_tss[cpu];
403	pc->pc_rsp0 = 0;
404
405	wrmsr(MSR_FSBASE, 0);		/* User value */
406	wrmsr(MSR_GSBASE, (u_int64_t)pc);
407	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
408
409	lidt(&r_idt);
410
411	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
412	ltr(gsel_tss);
413
414	/*
415	 * Set to a known state:
416	 * Set by mpboot.s: CR0_PG, CR0_PE
417	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
418	 */
419	cr0 = rcr0();
420	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
421	load_cr0(cr0);
422
423	/* Set up the fast syscall stuff */
424	msr = rdmsr(MSR_EFER) | EFER_SCE;
425	wrmsr(MSR_EFER, msr);
426	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
427	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
428	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
429	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
430	wrmsr(MSR_STAR, msr);
431	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
432
433	/* Disable local apic just to be sure. */
434	lapic_disable();
435
436	/* signal our startup to the BSP. */
437	mp_naps++;
438
439	/* Spin until the BSP releases the AP's. */
440	while (!aps_ready)
441		ia32_pause();
442
443	/* set up CPU registers and state */
444	cpu_setregs();
445
446	/* set up SSE/NX registers */
447	initializecpu();
448
449	/* set up FPU state on the AP */
450	fpuinit();
451
452	/* A quick check from sanity claus */
453	if (PCPU_GET(apic_id) != lapic_id()) {
454		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
455		printf("SMP: actual apic_id = %d\n", lapic_id());
456		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
457		panic("cpuid mismatch! boom!!");
458	}
459
460	/* Initialize curthread. */
461	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
462	PCPU_SET(curthread, PCPU_GET(idlethread));
463
464	mtx_lock_spin(&ap_boot_mtx);
465
466	/* Init local apic for irq's */
467	lapic_setup();
468
469	/* Set memory range attributes for this CPU to match the BSP */
470	mem_range_AP_init();
471
472	smp_cpus++;
473
474	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
475	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
476
477	/* Determine if we are a logical CPU. */
478	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
479		logical_cpus_mask |= PCPU_GET(cpumask);
480
481	/* Build our map of 'other' CPUs. */
482	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
483
484	if (bootverbose)
485		lapic_dump("AP");
486
487	if (smp_cpus == mp_ncpus) {
488		/* enable IPI's, tlb shootdown, freezes etc */
489		atomic_store_rel_int(&smp_started, 1);
490		smp_active = 1;	 /* historic */
491	}
492
493	mtx_unlock_spin(&ap_boot_mtx);
494
495	/* wait until all the AP's are up */
496	while (smp_started == 0)
497		ia32_pause();
498
499	/* ok, now grab sched_lock and enter the scheduler */
500	mtx_lock_spin(&sched_lock);
501
502	/*
503	 * Correct spinlock nesting.  The idle thread context that we are
504	 * borrowing was created so that it would start out with a single
505	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
506	 * explicitly acquired locks in this function, the nesting count
507	 * is now 2 rather than 1.  Since we are nested, calling
508	 * spinlock_exit() will simply adjust the counts without allowing
509	 * spin lock using code to interrupt us.
510	 */
511	spinlock_exit();
512	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
513
514	binuptime(PCPU_PTR(switchtime));
515	PCPU_SET(switchticks, ticks);
516
517	cpu_throw(NULL, choosethread());	/* doesn't return */
518
519	panic("scheduler returned us to %s", __func__);
520	/* NOTREACHED */
521}
522
523/*******************************************************************
524 * local functions and data
525 */
526
527/*
528 * Set the APIC logical IDs.
529 *
530 * We want to cluster logical CPU's within the same APIC ID cluster.
531 * Since logical CPU's are aligned simply filling in the clusters in
532 * APIC ID order works fine.  Note that this does not try to balance
533 * the number of CPU's in each cluster. (XXX?)
534 */
535static void
536set_logical_apic_ids(void)
537{
538	u_int apic_id, cluster, cluster_id;
539
540	/* Force us to allocate cluster 0 at the start. */
541	cluster = -1;
542	cluster_id = APIC_MAX_INTRACLUSTER_ID;
543	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
544		if (!cpu_info[apic_id].cpu_present)
545			continue;
546		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
547			cluster = ioapic_next_logical_cluster();
548			cluster_id = 0;
549		} else
550			cluster_id++;
551		if (bootverbose)
552			printf("APIC ID: physical %u, logical %u:%u\n",
553			    apic_id, cluster, cluster_id);
554		lapic_set_logical_id(apic_id, cluster, cluster_id);
555	}
556}
557
558/*
559 * start each AP in our list
560 */
561static int
562start_all_aps(void)
563{
564	u_char mpbiosreason;
565	u_int32_t mpbioswarmvec;
566	int apic_id, cpu, i;
567	u_int64_t *pt4, *pt3, *pt2;
568	vm_offset_t va = boot_address + KERNBASE;
569
570	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
571
572	/* install the AP 1st level boot code */
573	pmap_kenter(va, boot_address);
574	pmap_invalidate_page(kernel_pmap, va);
575	bcopy(mptramp_start, (void *)va, bootMP_size);
576
577	/* Locate the page tables, they'll be below the trampoline */
578	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
579	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
580	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
581
582	/* Create the initial 1GB replicated page tables */
583	for (i = 0; i < 512; i++) {
584		/* Each slot of the level 4 pages points to the same level 3 page */
585		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
586		pt4[i] |= PG_V | PG_RW | PG_U;
587
588		/* Each slot of the level 3 pages points to the same level 2 page */
589		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
590		pt3[i] |= PG_V | PG_RW | PG_U;
591
592		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
593		pt2[i] = i * (2 * 1024 * 1024);
594		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
595	}
596
597	/* save the current value of the warm-start vector */
598	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
599	outb(CMOS_REG, BIOS_RESET);
600	mpbiosreason = inb(CMOS_DATA);
601
602	/* setup a vector to our boot code */
603	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
604	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
605	outb(CMOS_REG, BIOS_RESET);
606	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
607
608	/* start each AP */
609	cpu = 0;
610	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
611
612		/* Ignore non-existent CPUs and the BSP. */
613		if (!cpu_info[apic_id].cpu_present ||
614		    cpu_info[apic_id].cpu_bsp)
615			continue;
616
617		/* Don't use this CPU if it has been disabled by a tunable. */
618		if (resource_disabled("lapic", apic_id)) {
619			cpu_info[apic_id].cpu_disabled = 1;
620			mp_ncpus--;
621			continue;
622		}
623
624		cpu++;
625
626		/* save APIC ID for this logical ID */
627		cpu_apic_ids[cpu] = apic_id;
628
629		/* allocate and set up an idle stack data page */
630		bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
631
632		bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
633		bootAP = cpu;
634
635		/* attempt to start the Application Processor */
636		if (!start_ap(apic_id)) {
637			/* restore the warmstart vector */
638			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
639			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
640		}
641
642		all_cpus |= (1 << cpu);		/* record AP in CPU map */
643	}
644
645	/* build our map of 'other' CPUs */
646	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
647
648	/* restore the warmstart vector */
649	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
650
651	outb(CMOS_REG, BIOS_RESET);
652	outb(CMOS_DATA, mpbiosreason);
653
654	/* number of APs actually started */
655	return mp_naps;
656}
657
658
659/*
660 * This function starts the AP (application processor) identified
661 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
662 * to accomplish this.  This is necessary because of the nuances
663 * of the different hardware we might encounter.  It isn't pretty,
664 * but it seems to work.
665 */
666static int
667start_ap(int apic_id)
668{
669	int vector, ms;
670	int cpus;
671
672	/* calculate the vector */
673	vector = (boot_address >> 12) & 0xff;
674
675	/* used as a watchpoint to signal AP startup */
676	cpus = mp_naps;
677
678	/*
679	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
680	 * and running the target CPU. OR this INIT IPI might be latched (P5
681	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
682	 * ignored.
683	 */
684
685	/* do an INIT IPI: assert RESET */
686	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
687	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
688
689	/* wait for pending status end */
690	lapic_ipi_wait(-1);
691
692	/* do an INIT IPI: deassert RESET */
693	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
694	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
695
696	/* wait for pending status end */
697	DELAY(10000);		/* wait ~10mS */
698	lapic_ipi_wait(-1);
699
700	/*
701	 * next we do a STARTUP IPI: the previous INIT IPI might still be
702	 * latched, (P5 bug) this 1st STARTUP would then terminate
703	 * immediately, and the previously started INIT IPI would continue. OR
704	 * the previous INIT IPI has already run. and this STARTUP IPI will
705	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
706	 * will run.
707	 */
708
709	/* do a STARTUP IPI */
710	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
711	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
712	    vector, apic_id);
713	lapic_ipi_wait(-1);
714	DELAY(200);		/* wait ~200uS */
715
716	/*
717	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
718	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
719	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
720	 * recognized after hardware RESET or INIT IPI.
721	 */
722
723	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
724	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
725	    vector, apic_id);
726	lapic_ipi_wait(-1);
727	DELAY(200);		/* wait ~200uS */
728
729	/* Wait up to 5 seconds for it to start. */
730	for (ms = 0; ms < 50; ms++) {
731		if (mp_naps > cpus)
732			return 1;	/* return SUCCESS */
733		DELAY(100000);
734	}
735	return 0;		/* return FAILURE */
736}
737
738/*
739 * Flush the TLB on all other CPU's
740 */
741static void
742smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
743{
744	u_int ncpu;
745
746	ncpu = mp_ncpus - 1;	/* does not shootdown self */
747	if (ncpu < 1)
748		return;		/* no other cpus */
749	mtx_assert(&smp_ipi_mtx, MA_OWNED);
750	smp_tlb_addr1 = addr1;
751	smp_tlb_addr2 = addr2;
752	atomic_store_rel_int(&smp_tlb_wait, 0);
753	ipi_all_but_self(vector);
754	while (smp_tlb_wait < ncpu)
755		ia32_pause();
756}
757
758/*
759 * This is about as magic as it gets.  fortune(1) has got similar code
760 * for reversing bits in a word.  Who thinks up this stuff??
761 *
762 * Yes, it does appear to be consistently faster than:
763 * while (i = ffs(m)) {
764 *	m >>= i;
765 *	bits++;
766 * }
767 * and
768 * while (lsb = (m & -m)) {	// This is magic too
769 * 	m &= ~lsb;		// or: m ^= lsb
770 *	bits++;
771 * }
772 * Both of these latter forms do some very strange things on gcc-3.1 with
773 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
774 * There is probably an SSE or MMX popcnt instruction.
775 *
776 * I wonder if this should be in libkern?
777 *
778 * XXX Stop the presses!  Another one:
779 * static __inline u_int32_t
780 * popcnt1(u_int32_t v)
781 * {
782 *	v -= ((v >> 1) & 0x55555555);
783 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
784 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
785 *	return (v * 0x01010101) >> 24;
786 * }
787 * The downside is that it has a multiply.  With a pentium3 with
788 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
789 * an imull, and in that case it is faster.  In most other cases
790 * it appears slightly slower.
791 *
792 * Another variant (also from fortune):
793 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
794 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
795 *                          - (((x)>>2)&0x33333333)            \
796 *                          - (((x)>>3)&0x11111111))
797 */
798static __inline u_int32_t
799popcnt(u_int32_t m)
800{
801
802	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
803	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
804	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
805	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
806	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
807	return m;
808}
809
810static void
811smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
812{
813	int ncpu, othercpus;
814
815	othercpus = mp_ncpus - 1;
816	if (mask == (u_int)-1) {
817		ncpu = othercpus;
818		if (ncpu < 1)
819			return;
820	} else {
821		mask &= ~PCPU_GET(cpumask);
822		if (mask == 0)
823			return;
824		ncpu = popcnt(mask);
825		if (ncpu > othercpus) {
826			/* XXX this should be a panic offence */
827			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
828			    ncpu, othercpus);
829			ncpu = othercpus;
830		}
831		/* XXX should be a panic, implied by mask == 0 above */
832		if (ncpu < 1)
833			return;
834	}
835	mtx_assert(&smp_ipi_mtx, MA_OWNED);
836	smp_tlb_addr1 = addr1;
837	smp_tlb_addr2 = addr2;
838	atomic_store_rel_int(&smp_tlb_wait, 0);
839	if (mask == (u_int)-1)
840		ipi_all_but_self(vector);
841	else
842		ipi_selected(mask, vector);
843	while (smp_tlb_wait < ncpu)
844		ia32_pause();
845}
846
847void
848smp_invltlb(void)
849{
850
851	if (smp_started)
852		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
853}
854
855void
856smp_invlpg(vm_offset_t addr)
857{
858
859	if (smp_started)
860		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
861}
862
863void
864smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
865{
866
867	if (smp_started)
868		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
869}
870
871void
872smp_masked_invltlb(u_int mask)
873{
874
875	if (smp_started)
876		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
877}
878
879void
880smp_masked_invlpg(u_int mask, vm_offset_t addr)
881{
882
883	if (smp_started)
884		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
885}
886
887void
888smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
889{
890
891	if (smp_started)
892		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
893}
894
895
896void
897ipi_bitmap_handler(struct clockframe frame)
898{
899	int cpu = PCPU_GET(cpuid);
900	u_int ipi_bitmap;
901
902	ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
903
904	/* Nothing to do for AST */
905}
906
907/*
908 * send an IPI to a set of cpus.
909 */
910void
911ipi_selected(u_int32_t cpus, u_int ipi)
912{
913	int cpu;
914	u_int bitmap = 0;
915	u_int old_pending;
916	u_int new_pending;
917
918	if (IPI_IS_BITMAPED(ipi)) {
919		bitmap = 1 << ipi;
920		ipi = IPI_BITMAP_VECTOR;
921	}
922
923	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
924	while ((cpu = ffs(cpus)) != 0) {
925		cpu--;
926		cpus &= ~(1 << cpu);
927
928		KASSERT(cpu_apic_ids[cpu] != -1,
929		    ("IPI to non-existent CPU %d", cpu));
930
931		if (bitmap) {
932			do {
933				old_pending = cpu_ipi_pending[cpu];
934				new_pending = old_pending | bitmap;
935			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
936
937			if (old_pending)
938				continue;
939		}
940
941		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
942	}
943
944}
945
946/*
947 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
948 */
949void
950ipi_all(u_int ipi)
951{
952
953	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
954	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
955}
956
957/*
958 * send an IPI to all CPUs EXCEPT myself
959 */
960void
961ipi_all_but_self(u_int ipi)
962{
963
964	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
965	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
966}
967
968/*
969 * send an IPI to myself
970 */
971void
972ipi_self(u_int ipi)
973{
974
975	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
976	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
977}
978
979#ifdef KDB_STOP_NMI
980/*
981 * send NMI IPI to selected CPUs
982 */
983
984#define	BEFORE_SPIN	1000000
985
986void
987ipi_nmi_selected(u_int32_t cpus)
988{
989
990	int cpu;
991	register_t icrlo;
992
993	icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
994		| APIC_TRIGMOD_EDGE;
995
996	CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
997
998
999	atomic_set_int(&ipi_nmi_pending, cpus);
1000
1001
1002	while ((cpu = ffs(cpus)) != 0) {
1003		cpu--;
1004		cpus &= ~(1 << cpu);
1005
1006		KASSERT(cpu_apic_ids[cpu] != -1,
1007		    ("IPI NMI to non-existent CPU %d", cpu));
1008
1009		/* Wait for an earlier IPI to finish. */
1010		if (!lapic_ipi_wait(BEFORE_SPIN))
1011			panic("ipi_nmi_selected: previous IPI has not cleared");
1012
1013		lapic_ipi_raw(icrlo,cpu_apic_ids[cpu]);
1014	}
1015}
1016
1017
1018int
1019ipi_nmi_handler()
1020{
1021	int cpu  = PCPU_GET(cpuid);
1022
1023	if(!(atomic_load_acq_int(&ipi_nmi_pending) & (1 << cpu)))
1024		return 1;
1025
1026	atomic_clear_int(&ipi_nmi_pending,1 << cpu);
1027
1028	savectx(&stoppcbs[cpu]);
1029
1030	/* Indicate that we are stopped */
1031	atomic_set_int(&stopped_cpus,1 << cpu);
1032
1033
1034	/* Wait for restart */
1035	while(!(atomic_load_acq_int(&started_cpus) & (1 << cpu)))
1036	    ia32_pause();
1037
1038	atomic_clear_int(&started_cpus,1 << cpu);
1039	atomic_clear_int(&stopped_cpus,1 << cpu);
1040
1041	if(cpu == 0 && cpustop_restartfunc != NULL)
1042		cpustop_restartfunc();
1043
1044	return 0;
1045}
1046
1047#endif /* KDB_STOP_NMI */
1048
1049/*
1050 * This is called once the rest of the system is up and running and we're
1051 * ready to let the AP's out of the pen.
1052 */
1053static void
1054release_aps(void *dummy __unused)
1055{
1056
1057	if (mp_ncpus == 1)
1058		return;
1059	mtx_lock_spin(&sched_lock);
1060	atomic_store_rel_int(&aps_ready, 1);
1061	while (smp_started == 0)
1062		ia32_pause();
1063	mtx_unlock_spin(&sched_lock);
1064}
1065SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1066
1067static int
1068sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1069{
1070	u_int mask;
1071	int error;
1072
1073	mask = hlt_cpus_mask;
1074	error = sysctl_handle_int(oidp, &mask, 0, req);
1075	if (error || !req->newptr)
1076		return (error);
1077
1078	if (logical_cpus_mask != 0 &&
1079	    (mask & logical_cpus_mask) == logical_cpus_mask)
1080		hlt_logical_cpus = 1;
1081	else
1082		hlt_logical_cpus = 0;
1083
1084	if ((mask & all_cpus) == all_cpus)
1085		mask &= ~(1<<0);
1086	hlt_cpus_mask = mask;
1087	return (error);
1088}
1089SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1090    0, 0, sysctl_hlt_cpus, "IU",
1091    "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
1092
1093static int
1094sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1095{
1096	int disable, error;
1097
1098	disable = hlt_logical_cpus;
1099	error = sysctl_handle_int(oidp, &disable, 0, req);
1100	if (error || !req->newptr)
1101		return (error);
1102
1103	if (disable)
1104		hlt_cpus_mask |= logical_cpus_mask;
1105	else
1106		hlt_cpus_mask &= ~logical_cpus_mask;
1107
1108	if ((hlt_cpus_mask & all_cpus) == all_cpus)
1109		hlt_cpus_mask &= ~(1<<0);
1110
1111	hlt_logical_cpus = disable;
1112	return (error);
1113}
1114
1115static void
1116cpu_hlt_setup(void *dummy __unused)
1117{
1118
1119	if (logical_cpus_mask != 0) {
1120		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1121		    &hlt_logical_cpus);
1122		sysctl_ctx_init(&logical_cpu_clist);
1123		SYSCTL_ADD_PROC(&logical_cpu_clist,
1124		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1125		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1126		    sysctl_hlt_logical_cpus, "IU", "");
1127		SYSCTL_ADD_UINT(&logical_cpu_clist,
1128		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1129		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1130		    &logical_cpus_mask, 0, "");
1131
1132		if (hlt_logical_cpus)
1133			hlt_cpus_mask |= logical_cpus_mask;
1134	}
1135}
1136SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1137
1138int
1139mp_grab_cpu_hlt(void)
1140{
1141	u_int mask = PCPU_GET(cpumask);
1142#ifdef MP_WATCHDOG
1143	u_int cpuid = PCPU_GET(cpuid);
1144#endif
1145	int retval;
1146
1147#ifdef MP_WATCHDOG
1148	ap_watchdog(cpuid);
1149#endif
1150
1151	retval = mask & hlt_cpus_mask;
1152	while (mask & hlt_cpus_mask)
1153		__asm __volatile("sti; hlt" : : : "memory");
1154	return (retval);
1155}
1156