mp_machdep.c revision 122940
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 122940 2003-11-21 03:02:00Z peter $");
29
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#ifdef GPROF
37#include <sys/gmon.h>
38#endif
39#include <sys/kernel.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/memrange.h>
44#include <sys/mutex.h>
45#include <sys/pcpu.h>
46#include <sys/proc.h>
47#include <sys/smp.h>
48#include <sys/sysctl.h>
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/pmap.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_extern.h>
55
56#include <machine/apicreg.h>
57#include <machine/clock.h>
58#include <machine/md_var.h>
59#include <machine/pcb.h>
60#include <machine/smp.h>
61#include <machine/specialreg.h>
62#include <machine/tss.h>
63
64#define WARMBOOT_TARGET		0
65#define WARMBOOT_OFF		(KERNBASE + 0x0467)
66#define WARMBOOT_SEG		(KERNBASE + 0x0469)
67
68#define CMOS_REG		(0x70)
69#define CMOS_DATA		(0x71)
70#define BIOS_RESET		(0x0f)
71#define BIOS_WARM		(0x0a)
72
73/* lock region used by kernel profiling */
74int	mcount_lock;
75
76int	mp_naps;		/* # of Applications processors */
77int	boot_cpu_id = -1;	/* designated BSP */
78extern	int nkpt;
79
80/*
81 * CPU topology map datastructures for HTT. (XXX)
82 */
83struct cpu_group mp_groups[MAXCPU];
84struct cpu_top mp_top;
85struct cpu_top *smp_topology;
86
87/* AP uses this during bootstrap.  Do not staticize.  */
88char *bootSTK;
89static int bootAP;
90
91/* Free these after use */
92void *bootstacks[MAXCPU];
93
94/* Hotwire a 0->4MB V==P mapping */
95extern pt_entry_t *KPTphys;
96
97/* SMP page table page */
98extern pt_entry_t *SMPpt;
99
100struct pcb stoppcbs[MAXCPU];
101
102/* Variables needed for SMP tlb shootdown. */
103vm_offset_t smp_tlb_addr1;
104vm_offset_t smp_tlb_addr2;
105volatile int smp_tlb_wait;
106struct mtx smp_tlb_mtx;
107
108extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
109
110/*
111 * Local data and functions.
112 */
113
114static u_int logical_cpus;
115static u_int logical_cpus_mask;
116
117/* used to hold the AP's until we are ready to release them */
118static struct mtx ap_boot_mtx;
119
120/* Set to 1 once we're ready to let the APs out of the pen. */
121static volatile int aps_ready = 0;
122
123/*
124 * Store data from cpu_add() until later in the boot when we actually setup
125 * the APs.
126 */
127struct cpu_info {
128	int	cpu_present:1;
129	int	cpu_bsp:1;
130} static cpu_info[MAXCPU];
131static int cpu_apic_ids[MAXCPU];
132
133static u_int boot_address;
134
135static void	set_logical_apic_ids(void);
136static int	start_all_aps(void);
137static int	start_ap(int apic_id);
138static void	release_aps(void *dummy);
139
140static int	hlt_cpus_mask;
141static int	hlt_logical_cpus;
142static struct	sysctl_ctx_list logical_cpu_clist;
143static u_int	bootMP_size;
144
145/*
146 * Calculate usable address in base memory for AP trampoline code.
147 */
148u_int
149mp_bootaddress(u_int basemem)
150{
151
152	bootMP_size = mptramp_end - mptramp_start;
153	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
154	if ((basemem - boot_address) < bootMP_size)
155		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
156	/* 3 levels of page table pages */
157	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
158
159	return mptramp_pagetables;
160}
161
162void
163cpu_add(u_int apic_id, char boot_cpu)
164{
165
166	if (apic_id > MAXCPU) {
167		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
168		    apic_id, MAXCPU);
169		return;
170	}
171	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
172	    apic_id));
173	cpu_info[apic_id].cpu_present = 1;
174	if (boot_cpu) {
175		KASSERT(boot_cpu_id == -1,
176		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
177		    boot_cpu_id));
178		boot_cpu_id = apic_id;
179		cpu_info[apic_id].cpu_bsp = 1;
180	}
181	mp_ncpus++;
182	if (apic_id > mp_maxid)
183		mp_maxid = apic_id;
184	if (bootverbose)
185		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
186		    "AP");
187
188}
189
190int
191cpu_mp_probe(void)
192{
193
194	/*
195	 * Always record BSP in CPU map so that the mbuf init code works
196	 * correctly.
197	 */
198	all_cpus = 1;
199	if (mp_ncpus == 0) {
200		/*
201		 * No CPUs were found, so this must be a UP system.  Setup
202		 * the variables to represent a system with a single CPU
203		 * with an id of 0.
204		 */
205		KASSERT(mp_maxid == 0,
206		    ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
207		mp_ncpus = 1;
208		return (0);
209	}
210
211	/* At least one CPU was found. */
212	if (mp_ncpus == 1) {
213		/*
214		 * One CPU was found, so this must be a UP system with
215		 * an I/O APIC.
216		 */
217		mp_maxid = 0;
218		return (0);
219	}
220
221	/* At least two CPUs were found. */
222	KASSERT(mp_maxid >= mp_ncpus - 1,
223	    ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
224	    mp_ncpus));
225	return (1);
226}
227
228/*
229 * Initialize the IPI handlers and start up the AP's.
230 */
231void
232cpu_mp_start(void)
233{
234	int i;
235
236	/* Initialize the logical ID to APIC ID table. */
237	for (i = 0; i < MAXCPU; i++)
238		cpu_apic_ids[i] = -1;
239
240	/* Install an inter-CPU IPI for TLB invalidation */
241	setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
242	setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
243	setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
244
245	/* Install an inter-CPU IPI for forwarding hardclock() */
246	setidt(IPI_HARDCLOCK, IDTVEC(hardclock), SDT_SYSIGT, SEL_KPL, 0);
247
248	/* Install an inter-CPU IPI for forwarding statclock() */
249	setidt(IPI_STATCLOCK, IDTVEC(statclock), SDT_SYSIGT, SEL_KPL, 0);
250
251#ifdef LAZY_SWITCH
252	/* Install an inter-CPU IPI for lazy pmap release */
253	setidt(IPI_LAZYPMAP, IDTVEC(lazypmap), SDT_SYSIGT, SEL_KPL, 0);
254#endif
255
256	/* Install an inter-CPU IPI for all-CPU rendezvous */
257	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
258
259	/* Install an inter-CPU IPI for forcing an additional software trap */
260	setidt(IPI_AST, IDTVEC(cpuast), SDT_SYSIGT, SEL_KPL, 0);
261
262	/* Install an inter-CPU IPI for CPU stop/restart */
263	setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
264
265	mtx_init(&smp_tlb_mtx, "tlb", NULL, MTX_SPIN);
266
267	/* Set boot_cpu_id if needed. */
268	if (boot_cpu_id == -1) {
269		boot_cpu_id = PCPU_GET(apic_id);
270		cpu_info[boot_cpu_id].cpu_bsp = 1;
271	} else
272		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
273		    ("BSP's APIC ID doesn't match boot_cpu_id"));
274	cpu_apic_ids[0] = boot_cpu_id;
275
276	/* Start each Application Processor */
277	start_all_aps();
278
279	/* Setup the initial logical CPUs info. */
280	logical_cpus = logical_cpus_mask = 0;
281	if (cpu_feature & CPUID_HTT)
282		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
283
284	set_logical_apic_ids();
285}
286
287
288/*
289 * Print various information about the SMP system hardware and setup.
290 */
291void
292cpu_mp_announce(void)
293{
294	int i, x;
295
296	/* List CPUs */
297	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
298	for (i = 1, x = 0; x < MAXCPU; x++) {
299		if (cpu_info[x].cpu_present && !cpu_info[x].cpu_bsp) {
300			KASSERT(i < mp_ncpus,
301			    ("mp_ncpus and actual cpus are out of whack"));
302			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
303		}
304	}
305}
306
307/*
308 * AP CPU's call this to initialize themselves.
309 */
310void
311init_secondary(void)
312{
313	struct pcpu *pc;
314	u_int64_t msr, cr0;
315	int cpu, gsel_tss;
316
317	/* Set by the startup code for us to use */
318	cpu = bootAP;
319
320	/* Init tss */
321	common_tss[cpu] = common_tss[0];
322	common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
323
324	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
325	ssdtosyssd(&gdt_segs[GPROC0_SEL],
326	   (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
327
328	lgdt(&r_gdt);			/* does magic intra-segment return */
329
330	/* Get per-cpu data */
331	pc = &__pcpu[cpu];
332
333	/* prime data page for it to use */
334	pcpu_init(pc, cpu, sizeof(struct pcpu));
335	pc->pc_apic_id = cpu_apic_ids[cpu];
336	pc->pc_prvspace = pc;
337	pc->pc_curthread = 0;
338	pc->pc_tssp = &common_tss[cpu];
339	pc->pc_rsp0 = 0;
340
341	wrmsr(MSR_FSBASE, 0);		/* User value */
342	wrmsr(MSR_GSBASE, (u_int64_t)pc);
343	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
344
345	lidt(&r_idt);
346
347	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
348	ltr(gsel_tss);
349
350	/*
351	 * Set to a known state:
352	 * Set by mpboot.s: CR0_PG, CR0_PE
353	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
354	 */
355	cr0 = rcr0();
356	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
357	load_cr0(cr0);
358
359	/* Set up the fast syscall stuff */
360	msr = rdmsr(MSR_EFER) | EFER_SCE;
361	wrmsr(MSR_EFER, msr);
362	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
363	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
364	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
365	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
366	wrmsr(MSR_STAR, msr);
367	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
368
369	/* Disable local apic just to be sure. */
370	lapic_disable();
371
372	/* signal our startup to the BSP. */
373	mp_naps++;
374
375	/* Spin until the BSP releases the AP's. */
376	while (!aps_ready)
377		ia32_pause();
378
379	/* set up CPU registers and state */
380	cpu_setregs();
381
382	/* set up FPU state on the AP */
383	fpuinit();
384
385	/* set up SSE registers */
386	enable_sse();
387
388	/* A quick check from sanity claus */
389	if (PCPU_GET(apic_id) != lapic_id()) {
390		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
391		printf("SMP: actual apic_id = %d\n", lapic_id());
392		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
393		panic("cpuid mismatch! boom!!");
394	}
395
396	mtx_lock_spin(&ap_boot_mtx);
397
398	/* Init local apic for irq's */
399	lapic_setup();
400
401	/* Set memory range attributes for this CPU to match the BSP */
402	mem_range_AP_init();
403
404	smp_cpus++;
405
406	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
407	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
408
409	/* Determine if we are a logical CPU. */
410	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
411		logical_cpus_mask |= PCPU_GET(cpumask);
412
413	/* Build our map of 'other' CPUs. */
414	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
415
416	if (bootverbose)
417		lapic_dump("AP");
418
419	if (smp_cpus == mp_ncpus) {
420		/* enable IPI's, tlb shootdown, freezes etc */
421		atomic_store_rel_int(&smp_started, 1);
422		smp_active = 1;	 /* historic */
423	}
424
425	mtx_unlock_spin(&ap_boot_mtx);
426
427	/* wait until all the AP's are up */
428	while (smp_started == 0)
429		ia32_pause();
430
431	/* ok, now grab sched_lock and enter the scheduler */
432	mtx_lock_spin(&sched_lock);
433
434	binuptime(PCPU_PTR(switchtime));
435	PCPU_SET(switchticks, ticks);
436
437	cpu_throw(NULL, choosethread());	/* doesn't return */
438
439	panic("scheduler returned us to %s", __func__);
440	/* NOTREACHED */
441}
442
443/*******************************************************************
444 * local functions and data
445 */
446
447/*
448 * Set the APIC logical IDs.
449 *
450 * We want to cluster logical CPU's within the same APIC ID cluster.
451 * Since logical CPU's are aligned simply filling in the clusters in
452 * APIC ID order works fine.  Note that this does not try to balance
453 * the number of CPU's in each cluster. (XXX?)
454 */
455static void
456set_logical_apic_ids(void)
457{
458	u_int apic_id, cluster, cluster_id;
459
460	/* Force us to allocate cluster 0 at the start. */
461	cluster = -1;
462	cluster_id = APIC_MAX_INTRACLUSTER_ID;
463	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
464		if (!cpu_info[apic_id].cpu_present)
465			continue;
466		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
467			cluster = ioapic_next_logical_cluster();
468			cluster_id = 0;
469		} else
470			cluster_id++;
471		if (bootverbose)
472			printf("APIC ID: physical %u, logical %u:%u\n",
473			    apic_id, cluster, cluster_id);
474		lapic_set_logical_id(apic_id, cluster, cluster_id);
475	}
476}
477
478/*
479 * start each AP in our list
480 */
481static int
482start_all_aps(void)
483{
484	u_char mpbiosreason;
485	u_int32_t mpbioswarmvec;
486	int apic_id, cpu, i;
487	u_int64_t *pt4, *pt3, *pt2;
488
489	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
490
491	/* install the AP 1st level boot code */
492	pmap_kenter(boot_address + KERNBASE, boot_address);
493	bcopy(mptramp_start, (void *)((uintptr_t)boot_address + KERNBASE), bootMP_size);
494
495	/* Locate the page tables, they'll be below the trampoline */
496	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
497	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
498	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
499
500	/* Create the initial 1GB replicated page tables */
501	for (i = 0; i < 512; i++) {
502		/* Each slot of the level 4 pages points to the same level 3 page */
503		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
504		pt4[i] |= PG_V | PG_RW | PG_U;
505
506		/* Each slot of the level 3 pages points to the same level 2 page */
507		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
508		pt3[i] |= PG_V | PG_RW | PG_U;
509
510		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
511		pt2[i] = i * (2 * 1024 * 1024);
512		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
513	}
514
515	/* save the current value of the warm-start vector */
516	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
517	outb(CMOS_REG, BIOS_RESET);
518	mpbiosreason = inb(CMOS_DATA);
519
520	/* setup a vector to our boot code */
521	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
522	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
523	outb(CMOS_REG, BIOS_RESET);
524	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
525
526	/* start each AP */
527	cpu = 0;
528	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
529		if (!cpu_info[apic_id].cpu_present ||
530		    cpu_info[apic_id].cpu_bsp)
531			continue;
532		cpu++;
533
534		/* save APIC ID for this logical ID */
535		cpu_apic_ids[cpu] = apic_id;
536
537		/* allocate and set up an idle stack data page */
538		bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
539
540		bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
541		bootAP = cpu;
542
543		/* attempt to start the Application Processor */
544		if (!start_ap(apic_id)) {
545			/* restore the warmstart vector */
546			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
547			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
548		}
549
550		all_cpus |= (1 << cpu);		/* record AP in CPU map */
551	}
552
553	/* build our map of 'other' CPUs */
554	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
555
556	/* restore the warmstart vector */
557	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
558
559	outb(CMOS_REG, BIOS_RESET);
560	outb(CMOS_DATA, mpbiosreason);
561
562	/* number of APs actually started */
563	return mp_naps;
564}
565
566
567/*
568 * This function starts the AP (application processor) identified
569 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
570 * to accomplish this.  This is necessary because of the nuances
571 * of the different hardware we might encounter.  It isn't pretty,
572 * but it seems to work.
573 */
574static int
575start_ap(int apic_id)
576{
577	int vector, ms;
578	int cpus;
579
580	/* calculate the vector */
581	vector = (boot_address >> 12) & 0xff;
582
583	/* used as a watchpoint to signal AP startup */
584	cpus = mp_naps;
585
586	/*
587	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
588	 * and running the target CPU. OR this INIT IPI might be latched (P5
589	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
590	 * ignored.
591	 */
592
593	/* do an INIT IPI: assert RESET */
594	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
595	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
596
597	/* wait for pending status end */
598	lapic_ipi_wait(-1);
599
600	/* do an INIT IPI: deassert RESET */
601	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
602	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
603
604	/* wait for pending status end */
605	DELAY(10000);		/* wait ~10mS */
606	lapic_ipi_wait(-1);
607
608	/*
609	 * next we do a STARTUP IPI: the previous INIT IPI might still be
610	 * latched, (P5 bug) this 1st STARTUP would then terminate
611	 * immediately, and the previously started INIT IPI would continue. OR
612	 * the previous INIT IPI has already run. and this STARTUP IPI will
613	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
614	 * will run.
615	 */
616
617	/* do a STARTUP IPI */
618	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
619	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
620	    vector, apic_id);
621	lapic_ipi_wait(-1);
622	DELAY(200);		/* wait ~200uS */
623
624	/*
625	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
626	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
627	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
628	 * recognized after hardware RESET or INIT IPI.
629	 */
630
631	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
632	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
633	    vector, apic_id);
634	lapic_ipi_wait(-1);
635	DELAY(200);		/* wait ~200uS */
636
637	/* Wait up to 5 seconds for it to start. */
638	for (ms = 0; ms < 50; ms++) {
639		if (mp_naps > cpus)
640			return 1;	/* return SUCCESS */
641		DELAY(100000);
642	}
643	return 0;		/* return FAILURE */
644}
645
646/*
647 * Flush the TLB on all other CPU's
648 */
649static void
650smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
651{
652	u_int ncpu;
653
654	ncpu = mp_ncpus - 1;	/* does not shootdown self */
655	if (ncpu < 1)
656		return;		/* no other cpus */
657	mtx_assert(&smp_tlb_mtx, MA_OWNED);
658	smp_tlb_addr1 = addr1;
659	smp_tlb_addr2 = addr2;
660	atomic_store_rel_int(&smp_tlb_wait, 0);
661	ipi_all_but_self(vector);
662	while (smp_tlb_wait < ncpu)
663		ia32_pause();
664}
665
666/*
667 * This is about as magic as it gets.  fortune(1) has got similar code
668 * for reversing bits in a word.  Who thinks up this stuff??
669 *
670 * Yes, it does appear to be consistently faster than:
671 * while (i = ffs(m)) {
672 *	m >>= i;
673 *	bits++;
674 * }
675 * and
676 * while (lsb = (m & -m)) {	// This is magic too
677 * 	m &= ~lsb;		// or: m ^= lsb
678 *	bits++;
679 * }
680 * Both of these latter forms do some very strange things on gcc-3.1 with
681 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
682 * There is probably an SSE or MMX popcnt instruction.
683 *
684 * I wonder if this should be in libkern?
685 *
686 * XXX Stop the presses!  Another one:
687 * static __inline u_int32_t
688 * popcnt1(u_int32_t v)
689 * {
690 *	v -= ((v >> 1) & 0x55555555);
691 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
692 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
693 *	return (v * 0x01010101) >> 24;
694 * }
695 * The downside is that it has a multiply.  With a pentium3 with
696 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
697 * an imull, and in that case it is faster.  In most other cases
698 * it appears slightly slower.
699 *
700 * Another variant (also from fortune):
701 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
702 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
703 *                          - (((x)>>2)&0x33333333)            \
704 *                          - (((x)>>3)&0x11111111))
705 */
706static __inline u_int32_t
707popcnt(u_int32_t m)
708{
709
710	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
711	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
712	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
713	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
714	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
715	return m;
716}
717
718static void
719smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
720{
721	int ncpu, othercpus;
722
723	othercpus = mp_ncpus - 1;
724	if (mask == (u_int)-1) {
725		ncpu = othercpus;
726		if (ncpu < 1)
727			return;
728	} else {
729		mask &= ~PCPU_GET(cpumask);
730		if (mask == 0)
731			return;
732		ncpu = popcnt(mask);
733		if (ncpu > othercpus) {
734			/* XXX this should be a panic offence */
735			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
736			    ncpu, othercpus);
737			ncpu = othercpus;
738		}
739		/* XXX should be a panic, implied by mask == 0 above */
740		if (ncpu < 1)
741			return;
742	}
743	mtx_assert(&smp_tlb_mtx, MA_OWNED);
744	smp_tlb_addr1 = addr1;
745	smp_tlb_addr2 = addr2;
746	atomic_store_rel_int(&smp_tlb_wait, 0);
747	if (mask == (u_int)-1)
748		ipi_all_but_self(vector);
749	else
750		ipi_selected(mask, vector);
751	while (smp_tlb_wait < ncpu)
752		ia32_pause();
753}
754
755void
756smp_invltlb(void)
757{
758
759	if (smp_started)
760		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
761}
762
763void
764smp_invlpg(vm_offset_t addr)
765{
766
767	if (smp_started)
768		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
769}
770
771void
772smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
773{
774
775	if (smp_started)
776		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
777}
778
779void
780smp_masked_invltlb(u_int mask)
781{
782
783	if (smp_started)
784		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
785}
786
787void
788smp_masked_invlpg(u_int mask, vm_offset_t addr)
789{
790
791	if (smp_started)
792		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
793}
794
795void
796smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
797{
798
799	if (smp_started)
800		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
801}
802
803
804/*
805 * For statclock, we send an IPI to all CPU's to have them call this
806 * function.
807 */
808void
809forwarded_statclock(struct clockframe frame)
810{
811	struct thread *td;
812
813	CTR0(KTR_SMP, "forwarded_statclock");
814	td = curthread;
815	td->td_intr_nesting_level++;
816	if (profprocs != 0)
817		profclock(&frame);
818	if (pscnt == psdiv)
819		statclock(&frame);
820	td->td_intr_nesting_level--;
821}
822
823void
824forward_statclock(void)
825{
826	int map;
827
828	CTR0(KTR_SMP, "forward_statclock");
829
830	if (!smp_started || cold || panicstr)
831		return;
832
833	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
834	if (map != 0)
835		ipi_selected(map, IPI_STATCLOCK);
836}
837
838/*
839 * For each hardclock(), we send an IPI to all other CPU's to have them
840 * execute this function.  It would be nice to reduce contention on
841 * sched_lock if we could simply peek at the CPU to determine the user/kernel
842 * state and call hardclock_process() on the CPU receiving the clock interrupt
843 * and then just use a simple IPI to handle any ast's if needed.
844 */
845void
846forwarded_hardclock(struct clockframe frame)
847{
848	struct thread *td;
849
850	CTR0(KTR_SMP, "forwarded_hardclock");
851	td = curthread;
852	td->td_intr_nesting_level++;
853	hardclock_process(&frame);
854	td->td_intr_nesting_level--;
855}
856
857void
858forward_hardclock(void)
859{
860	u_int map;
861
862	CTR0(KTR_SMP, "forward_hardclock");
863
864	if (!smp_started || cold || panicstr)
865		return;
866
867	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
868	if (map != 0)
869		ipi_selected(map, IPI_HARDCLOCK);
870}
871
872/*
873 * send an IPI to a set of cpus.
874 */
875void
876ipi_selected(u_int32_t cpus, u_int ipi)
877{
878	int cpu;
879
880	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
881	while ((cpu = ffs(cpus)) != 0) {
882		cpu--;
883		KASSERT(cpu_apic_ids[cpu] != -1,
884		    ("IPI to non-existent CPU %d", cpu));
885		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
886		cpus &= ~(1 << cpu);
887	}
888}
889
890/*
891 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
892 */
893void
894ipi_all(u_int ipi)
895{
896
897	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
898	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
899}
900
901/*
902 * send an IPI to all CPUs EXCEPT myself
903 */
904void
905ipi_all_but_self(u_int ipi)
906{
907
908	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
909	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
910}
911
912/*
913 * send an IPI to myself
914 */
915void
916ipi_self(u_int ipi)
917{
918
919	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
920	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
921}
922
923/*
924 * This is called once the rest of the system is up and running and we're
925 * ready to let the AP's out of the pen.
926 */
927static void
928release_aps(void *dummy __unused)
929{
930
931	if (mp_ncpus == 1)
932		return;
933	mtx_lock_spin(&sched_lock);
934	atomic_store_rel_int(&aps_ready, 1);
935	while (smp_started == 0)
936		ia32_pause();
937	mtx_unlock_spin(&sched_lock);
938}
939SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
940
941static int
942sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
943{
944	u_int mask;
945	int error;
946
947	mask = hlt_cpus_mask;
948	error = sysctl_handle_int(oidp, &mask, 0, req);
949	if (error || !req->newptr)
950		return (error);
951
952	if (logical_cpus_mask != 0 &&
953	    (mask & logical_cpus_mask) == logical_cpus_mask)
954		hlt_logical_cpus = 1;
955	else
956		hlt_logical_cpus = 0;
957
958	if ((mask & all_cpus) == all_cpus)
959		mask &= ~(1<<0);
960	hlt_cpus_mask = mask;
961	return (error);
962}
963SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
964    0, 0, sysctl_hlt_cpus, "IU", "");
965
966static int
967sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
968{
969	int disable, error;
970
971	disable = hlt_logical_cpus;
972	error = sysctl_handle_int(oidp, &disable, 0, req);
973	if (error || !req->newptr)
974		return (error);
975
976	if (disable)
977		hlt_cpus_mask |= logical_cpus_mask;
978	else
979		hlt_cpus_mask &= ~logical_cpus_mask;
980
981	if ((hlt_cpus_mask & all_cpus) == all_cpus)
982		hlt_cpus_mask &= ~(1<<0);
983
984	hlt_logical_cpus = disable;
985	return (error);
986}
987
988static void
989cpu_hlt_setup(void *dummy __unused)
990{
991
992	if (logical_cpus_mask != 0) {
993		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
994		    &hlt_logical_cpus);
995		sysctl_ctx_init(&logical_cpu_clist);
996		SYSCTL_ADD_PROC(&logical_cpu_clist,
997		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
998		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
999		    sysctl_hlt_logical_cpus, "IU", "");
1000		SYSCTL_ADD_UINT(&logical_cpu_clist,
1001		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1002		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1003		    &logical_cpus_mask, 0, "");
1004
1005		if (hlt_logical_cpus)
1006			hlt_cpus_mask |= logical_cpus_mask;
1007	}
1008}
1009SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1010
1011int
1012mp_grab_cpu_hlt(void)
1013{
1014	u_int mask = PCPU_GET(cpumask);
1015	int retval;
1016
1017	retval = mask & hlt_cpus_mask;
1018	while (mask & hlt_cpus_mask)
1019		__asm __volatile("sti; hlt" : : : "memory");
1020	return (retval);
1021}
1022