mp_machdep.c revision 122947
1/*-
2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 122947 2003-11-21 22:23:26Z jhb $");
29
30#include "opt_cpu.h"
31#include "opt_kstack_pages.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#ifdef GPROF
37#include <sys/gmon.h>
38#endif
39#include <sys/kernel.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/memrange.h>
44#include <sys/mutex.h>
45#include <sys/pcpu.h>
46#include <sys/proc.h>
47#include <sys/smp.h>
48#include <sys/sysctl.h>
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/pmap.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_extern.h>
55
56#include <machine/apicreg.h>
57#include <machine/clock.h>
58#include <machine/md_var.h>
59#include <machine/pcb.h>
60#include <machine/smp.h>
61#include <machine/specialreg.h>
62#include <machine/tss.h>
63
64#define WARMBOOT_TARGET		0
65#define WARMBOOT_OFF		(KERNBASE + 0x0467)
66#define WARMBOOT_SEG		(KERNBASE + 0x0469)
67
68#define CMOS_REG		(0x70)
69#define CMOS_DATA		(0x71)
70#define BIOS_RESET		(0x0f)
71#define BIOS_WARM		(0x0a)
72
73/* lock region used by kernel profiling */
74int	mcount_lock;
75
76int	mp_naps;		/* # of Applications processors */
77int	boot_cpu_id = -1;	/* designated BSP */
78extern	int nkpt;
79
80/*
81 * CPU topology map datastructures for HTT. (XXX)
82 */
83struct cpu_group mp_groups[MAXCPU];
84struct cpu_top mp_top;
85struct cpu_top *smp_topology;
86
87/* AP uses this during bootstrap.  Do not staticize.  */
88char *bootSTK;
89static int bootAP;
90
91/* Free these after use */
92void *bootstacks[MAXCPU];
93
94/* Hotwire a 0->4MB V==P mapping */
95extern pt_entry_t *KPTphys;
96
97/* SMP page table page */
98extern pt_entry_t *SMPpt;
99
100struct pcb stoppcbs[MAXCPU];
101
102/* Variables needed for SMP tlb shootdown. */
103vm_offset_t smp_tlb_addr1;
104vm_offset_t smp_tlb_addr2;
105volatile int smp_tlb_wait;
106struct mtx smp_tlb_mtx;
107
108extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
109
110/*
111 * Local data and functions.
112 */
113
114static u_int logical_cpus;
115static u_int logical_cpus_mask;
116
117/* used to hold the AP's until we are ready to release them */
118static struct mtx ap_boot_mtx;
119
120/* Set to 1 once we're ready to let the APs out of the pen. */
121static volatile int aps_ready = 0;
122
123/*
124 * Store data from cpu_add() until later in the boot when we actually setup
125 * the APs.
126 */
127struct cpu_info {
128	int	cpu_present:1;
129	int	cpu_bsp:1;
130} static cpu_info[MAXCPU];
131static int cpu_apic_ids[MAXCPU];
132
133static u_int boot_address;
134
135static void	set_logical_apic_ids(void);
136static int	start_all_aps(void);
137static int	start_ap(int apic_id);
138static void	release_aps(void *dummy);
139
140static int	hlt_cpus_mask;
141static int	hlt_logical_cpus;
142static struct	sysctl_ctx_list logical_cpu_clist;
143static u_int	bootMP_size;
144
145/*
146 * Calculate usable address in base memory for AP trampoline code.
147 */
148u_int
149mp_bootaddress(u_int basemem)
150{
151
152	bootMP_size = mptramp_end - mptramp_start;
153	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
154	if ((basemem - boot_address) < bootMP_size)
155		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
156	/* 3 levels of page table pages */
157	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
158
159	return mptramp_pagetables;
160}
161
162void
163cpu_add(u_int apic_id, char boot_cpu)
164{
165
166	if (apic_id > MAXCPU) {
167		printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
168		    apic_id, MAXCPU);
169		return;
170	}
171	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
172	    apic_id));
173	cpu_info[apic_id].cpu_present = 1;
174	if (boot_cpu) {
175		KASSERT(boot_cpu_id == -1,
176		    ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
177		    boot_cpu_id));
178		boot_cpu_id = apic_id;
179		cpu_info[apic_id].cpu_bsp = 1;
180	}
181	mp_ncpus++;
182	if (apic_id > mp_maxid)
183		mp_maxid = apic_id;
184	if (bootverbose)
185		printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
186		    "AP");
187
188}
189
190void
191cpu_mp_setmaxid(void)
192{
193
194	/*
195	 * mp_maxid should be already set by calls to cpu_add().
196	 * Just sanity check its value here.
197	 */
198	if (mp_ncpus == 0)
199		KASSERT(mp_maxid == 0,
200		    ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
201	else if (mp_ncpus == 1)
202		mp_maxid = 0;
203	else
204		KASSERT(mp_maxid >= mp_ncpus - 1,
205		    ("%s: counters out of sync: max %d, count %d", __func__,
206			mp_maxid, mp_ncpus));
207
208}
209
210int
211cpu_mp_probe(void)
212{
213
214	/*
215	 * Always record BSP in CPU map so that the mbuf init code works
216	 * correctly.
217	 */
218	all_cpus = 1;
219	if (mp_ncpus == 0) {
220		/*
221		 * No CPUs were found, so this must be a UP system.  Setup
222		 * the variables to represent a system with a single CPU
223		 * with an id of 0.
224		 */
225		mp_ncpus = 1;
226		return (0);
227	}
228
229	/* At least one CPU was found. */
230	if (mp_ncpus == 1) {
231		/*
232		 * One CPU was found, so this must be a UP system with
233		 * an I/O APIC.
234		 */
235		mp_maxid = 0;
236		return (0);
237	}
238
239	/* At least two CPUs were found. */
240	return (1);
241}
242
243/*
244 * Initialize the IPI handlers and start up the AP's.
245 */
246void
247cpu_mp_start(void)
248{
249	int i;
250
251	/* Initialize the logical ID to APIC ID table. */
252	for (i = 0; i < MAXCPU; i++)
253		cpu_apic_ids[i] = -1;
254
255	/* Install an inter-CPU IPI for TLB invalidation */
256	setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
257	setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
258	setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
259
260	/* Install an inter-CPU IPI for forwarding hardclock() */
261	setidt(IPI_HARDCLOCK, IDTVEC(hardclock), SDT_SYSIGT, SEL_KPL, 0);
262
263	/* Install an inter-CPU IPI for forwarding statclock() */
264	setidt(IPI_STATCLOCK, IDTVEC(statclock), SDT_SYSIGT, SEL_KPL, 0);
265
266#ifdef LAZY_SWITCH
267	/* Install an inter-CPU IPI for lazy pmap release */
268	setidt(IPI_LAZYPMAP, IDTVEC(lazypmap), SDT_SYSIGT, SEL_KPL, 0);
269#endif
270
271	/* Install an inter-CPU IPI for all-CPU rendezvous */
272	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
273
274	/* Install an inter-CPU IPI for forcing an additional software trap */
275	setidt(IPI_AST, IDTVEC(cpuast), SDT_SYSIGT, SEL_KPL, 0);
276
277	/* Install an inter-CPU IPI for CPU stop/restart */
278	setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
279
280	mtx_init(&smp_tlb_mtx, "tlb", NULL, MTX_SPIN);
281
282	/* Set boot_cpu_id if needed. */
283	if (boot_cpu_id == -1) {
284		boot_cpu_id = PCPU_GET(apic_id);
285		cpu_info[boot_cpu_id].cpu_bsp = 1;
286	} else
287		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
288		    ("BSP's APIC ID doesn't match boot_cpu_id"));
289	cpu_apic_ids[0] = boot_cpu_id;
290
291	/* Start each Application Processor */
292	start_all_aps();
293
294	/* Setup the initial logical CPUs info. */
295	logical_cpus = logical_cpus_mask = 0;
296	if (cpu_feature & CPUID_HTT)
297		logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
298
299	set_logical_apic_ids();
300}
301
302
303/*
304 * Print various information about the SMP system hardware and setup.
305 */
306void
307cpu_mp_announce(void)
308{
309	int i, x;
310
311	/* List CPUs */
312	printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
313	for (i = 1, x = 0; x < MAXCPU; x++) {
314		if (cpu_info[x].cpu_present && !cpu_info[x].cpu_bsp) {
315			KASSERT(i < mp_ncpus,
316			    ("mp_ncpus and actual cpus are out of whack"));
317			printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
318		}
319	}
320}
321
322/*
323 * AP CPU's call this to initialize themselves.
324 */
325void
326init_secondary(void)
327{
328	struct pcpu *pc;
329	u_int64_t msr, cr0;
330	int cpu, gsel_tss;
331
332	/* Set by the startup code for us to use */
333	cpu = bootAP;
334
335	/* Init tss */
336	common_tss[cpu] = common_tss[0];
337	common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
338
339	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
340	ssdtosyssd(&gdt_segs[GPROC0_SEL],
341	   (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
342
343	lgdt(&r_gdt);			/* does magic intra-segment return */
344
345	/* Get per-cpu data */
346	pc = &__pcpu[cpu];
347
348	/* prime data page for it to use */
349	pcpu_init(pc, cpu, sizeof(struct pcpu));
350	pc->pc_apic_id = cpu_apic_ids[cpu];
351	pc->pc_prvspace = pc;
352	pc->pc_curthread = 0;
353	pc->pc_tssp = &common_tss[cpu];
354	pc->pc_rsp0 = 0;
355
356	wrmsr(MSR_FSBASE, 0);		/* User value */
357	wrmsr(MSR_GSBASE, (u_int64_t)pc);
358	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
359
360	lidt(&r_idt);
361
362	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
363	ltr(gsel_tss);
364
365	/*
366	 * Set to a known state:
367	 * Set by mpboot.s: CR0_PG, CR0_PE
368	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
369	 */
370	cr0 = rcr0();
371	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
372	load_cr0(cr0);
373
374	/* Set up the fast syscall stuff */
375	msr = rdmsr(MSR_EFER) | EFER_SCE;
376	wrmsr(MSR_EFER, msr);
377	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
378	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
379	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
380	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
381	wrmsr(MSR_STAR, msr);
382	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
383
384	/* Disable local apic just to be sure. */
385	lapic_disable();
386
387	/* signal our startup to the BSP. */
388	mp_naps++;
389
390	/* Spin until the BSP releases the AP's. */
391	while (!aps_ready)
392		ia32_pause();
393
394	/* set up CPU registers and state */
395	cpu_setregs();
396
397	/* set up FPU state on the AP */
398	fpuinit();
399
400	/* set up SSE registers */
401	enable_sse();
402
403	/* A quick check from sanity claus */
404	if (PCPU_GET(apic_id) != lapic_id()) {
405		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
406		printf("SMP: actual apic_id = %d\n", lapic_id());
407		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
408		panic("cpuid mismatch! boom!!");
409	}
410
411	mtx_lock_spin(&ap_boot_mtx);
412
413	/* Init local apic for irq's */
414	lapic_setup();
415
416	/* Set memory range attributes for this CPU to match the BSP */
417	mem_range_AP_init();
418
419	smp_cpus++;
420
421	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
422	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
423
424	/* Determine if we are a logical CPU. */
425	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
426		logical_cpus_mask |= PCPU_GET(cpumask);
427
428	/* Build our map of 'other' CPUs. */
429	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
430
431	if (bootverbose)
432		lapic_dump("AP");
433
434	if (smp_cpus == mp_ncpus) {
435		/* enable IPI's, tlb shootdown, freezes etc */
436		atomic_store_rel_int(&smp_started, 1);
437		smp_active = 1;	 /* historic */
438	}
439
440	mtx_unlock_spin(&ap_boot_mtx);
441
442	/* wait until all the AP's are up */
443	while (smp_started == 0)
444		ia32_pause();
445
446	/* ok, now grab sched_lock and enter the scheduler */
447	mtx_lock_spin(&sched_lock);
448
449	binuptime(PCPU_PTR(switchtime));
450	PCPU_SET(switchticks, ticks);
451
452	cpu_throw(NULL, choosethread());	/* doesn't return */
453
454	panic("scheduler returned us to %s", __func__);
455	/* NOTREACHED */
456}
457
458/*******************************************************************
459 * local functions and data
460 */
461
462/*
463 * Set the APIC logical IDs.
464 *
465 * We want to cluster logical CPU's within the same APIC ID cluster.
466 * Since logical CPU's are aligned simply filling in the clusters in
467 * APIC ID order works fine.  Note that this does not try to balance
468 * the number of CPU's in each cluster. (XXX?)
469 */
470static void
471set_logical_apic_ids(void)
472{
473	u_int apic_id, cluster, cluster_id;
474
475	/* Force us to allocate cluster 0 at the start. */
476	cluster = -1;
477	cluster_id = APIC_MAX_INTRACLUSTER_ID;
478	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
479		if (!cpu_info[apic_id].cpu_present)
480			continue;
481		if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
482			cluster = ioapic_next_logical_cluster();
483			cluster_id = 0;
484		} else
485			cluster_id++;
486		if (bootverbose)
487			printf("APIC ID: physical %u, logical %u:%u\n",
488			    apic_id, cluster, cluster_id);
489		lapic_set_logical_id(apic_id, cluster, cluster_id);
490	}
491}
492
493/*
494 * start each AP in our list
495 */
496static int
497start_all_aps(void)
498{
499	u_char mpbiosreason;
500	u_int32_t mpbioswarmvec;
501	int apic_id, cpu, i;
502	u_int64_t *pt4, *pt3, *pt2;
503
504	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
505
506	/* install the AP 1st level boot code */
507	pmap_kenter(boot_address + KERNBASE, boot_address);
508	bcopy(mptramp_start, (void *)((uintptr_t)boot_address + KERNBASE), bootMP_size);
509
510	/* Locate the page tables, they'll be below the trampoline */
511	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
512	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
513	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
514
515	/* Create the initial 1GB replicated page tables */
516	for (i = 0; i < 512; i++) {
517		/* Each slot of the level 4 pages points to the same level 3 page */
518		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
519		pt4[i] |= PG_V | PG_RW | PG_U;
520
521		/* Each slot of the level 3 pages points to the same level 2 page */
522		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
523		pt3[i] |= PG_V | PG_RW | PG_U;
524
525		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
526		pt2[i] = i * (2 * 1024 * 1024);
527		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
528	}
529
530	/* save the current value of the warm-start vector */
531	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
532	outb(CMOS_REG, BIOS_RESET);
533	mpbiosreason = inb(CMOS_DATA);
534
535	/* setup a vector to our boot code */
536	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
537	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
538	outb(CMOS_REG, BIOS_RESET);
539	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
540
541	/* start each AP */
542	cpu = 0;
543	for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
544		if (!cpu_info[apic_id].cpu_present ||
545		    cpu_info[apic_id].cpu_bsp)
546			continue;
547		cpu++;
548
549		/* save APIC ID for this logical ID */
550		cpu_apic_ids[cpu] = apic_id;
551
552		/* allocate and set up an idle stack data page */
553		bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
554
555		bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
556		bootAP = cpu;
557
558		/* attempt to start the Application Processor */
559		if (!start_ap(apic_id)) {
560			/* restore the warmstart vector */
561			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
562			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
563		}
564
565		all_cpus |= (1 << cpu);		/* record AP in CPU map */
566	}
567
568	/* build our map of 'other' CPUs */
569	PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
570
571	/* restore the warmstart vector */
572	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
573
574	outb(CMOS_REG, BIOS_RESET);
575	outb(CMOS_DATA, mpbiosreason);
576
577	/* number of APs actually started */
578	return mp_naps;
579}
580
581
582/*
583 * This function starts the AP (application processor) identified
584 * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
585 * to accomplish this.  This is necessary because of the nuances
586 * of the different hardware we might encounter.  It isn't pretty,
587 * but it seems to work.
588 */
589static int
590start_ap(int apic_id)
591{
592	int vector, ms;
593	int cpus;
594
595	/* calculate the vector */
596	vector = (boot_address >> 12) & 0xff;
597
598	/* used as a watchpoint to signal AP startup */
599	cpus = mp_naps;
600
601	/*
602	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
603	 * and running the target CPU. OR this INIT IPI might be latched (P5
604	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
605	 * ignored.
606	 */
607
608	/* do an INIT IPI: assert RESET */
609	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
610	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
611
612	/* wait for pending status end */
613	lapic_ipi_wait(-1);
614
615	/* do an INIT IPI: deassert RESET */
616	lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
617	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
618
619	/* wait for pending status end */
620	DELAY(10000);		/* wait ~10mS */
621	lapic_ipi_wait(-1);
622
623	/*
624	 * next we do a STARTUP IPI: the previous INIT IPI might still be
625	 * latched, (P5 bug) this 1st STARTUP would then terminate
626	 * immediately, and the previously started INIT IPI would continue. OR
627	 * the previous INIT IPI has already run. and this STARTUP IPI will
628	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
629	 * will run.
630	 */
631
632	/* do a STARTUP IPI */
633	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
634	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
635	    vector, apic_id);
636	lapic_ipi_wait(-1);
637	DELAY(200);		/* wait ~200uS */
638
639	/*
640	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
641	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
642	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
643	 * recognized after hardware RESET or INIT IPI.
644	 */
645
646	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
647	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
648	    vector, apic_id);
649	lapic_ipi_wait(-1);
650	DELAY(200);		/* wait ~200uS */
651
652	/* Wait up to 5 seconds for it to start. */
653	for (ms = 0; ms < 50; ms++) {
654		if (mp_naps > cpus)
655			return 1;	/* return SUCCESS */
656		DELAY(100000);
657	}
658	return 0;		/* return FAILURE */
659}
660
661/*
662 * Flush the TLB on all other CPU's
663 */
664static void
665smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
666{
667	u_int ncpu;
668
669	ncpu = mp_ncpus - 1;	/* does not shootdown self */
670	if (ncpu < 1)
671		return;		/* no other cpus */
672	mtx_assert(&smp_tlb_mtx, MA_OWNED);
673	smp_tlb_addr1 = addr1;
674	smp_tlb_addr2 = addr2;
675	atomic_store_rel_int(&smp_tlb_wait, 0);
676	ipi_all_but_self(vector);
677	while (smp_tlb_wait < ncpu)
678		ia32_pause();
679}
680
681/*
682 * This is about as magic as it gets.  fortune(1) has got similar code
683 * for reversing bits in a word.  Who thinks up this stuff??
684 *
685 * Yes, it does appear to be consistently faster than:
686 * while (i = ffs(m)) {
687 *	m >>= i;
688 *	bits++;
689 * }
690 * and
691 * while (lsb = (m & -m)) {	// This is magic too
692 * 	m &= ~lsb;		// or: m ^= lsb
693 *	bits++;
694 * }
695 * Both of these latter forms do some very strange things on gcc-3.1 with
696 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
697 * There is probably an SSE or MMX popcnt instruction.
698 *
699 * I wonder if this should be in libkern?
700 *
701 * XXX Stop the presses!  Another one:
702 * static __inline u_int32_t
703 * popcnt1(u_int32_t v)
704 * {
705 *	v -= ((v >> 1) & 0x55555555);
706 *	v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
707 *	v = (v + (v >> 4)) & 0x0F0F0F0F;
708 *	return (v * 0x01010101) >> 24;
709 * }
710 * The downside is that it has a multiply.  With a pentium3 with
711 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
712 * an imull, and in that case it is faster.  In most other cases
713 * it appears slightly slower.
714 *
715 * Another variant (also from fortune):
716 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
717 * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
718 *                          - (((x)>>2)&0x33333333)            \
719 *                          - (((x)>>3)&0x11111111))
720 */
721static __inline u_int32_t
722popcnt(u_int32_t m)
723{
724
725	m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
726	m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
727	m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
728	m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
729	m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
730	return m;
731}
732
733static void
734smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
735{
736	int ncpu, othercpus;
737
738	othercpus = mp_ncpus - 1;
739	if (mask == (u_int)-1) {
740		ncpu = othercpus;
741		if (ncpu < 1)
742			return;
743	} else {
744		mask &= ~PCPU_GET(cpumask);
745		if (mask == 0)
746			return;
747		ncpu = popcnt(mask);
748		if (ncpu > othercpus) {
749			/* XXX this should be a panic offence */
750			printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
751			    ncpu, othercpus);
752			ncpu = othercpus;
753		}
754		/* XXX should be a panic, implied by mask == 0 above */
755		if (ncpu < 1)
756			return;
757	}
758	mtx_assert(&smp_tlb_mtx, MA_OWNED);
759	smp_tlb_addr1 = addr1;
760	smp_tlb_addr2 = addr2;
761	atomic_store_rel_int(&smp_tlb_wait, 0);
762	if (mask == (u_int)-1)
763		ipi_all_but_self(vector);
764	else
765		ipi_selected(mask, vector);
766	while (smp_tlb_wait < ncpu)
767		ia32_pause();
768}
769
770void
771smp_invltlb(void)
772{
773
774	if (smp_started)
775		smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
776}
777
778void
779smp_invlpg(vm_offset_t addr)
780{
781
782	if (smp_started)
783		smp_tlb_shootdown(IPI_INVLPG, addr, 0);
784}
785
786void
787smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
788{
789
790	if (smp_started)
791		smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
792}
793
794void
795smp_masked_invltlb(u_int mask)
796{
797
798	if (smp_started)
799		smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
800}
801
802void
803smp_masked_invlpg(u_int mask, vm_offset_t addr)
804{
805
806	if (smp_started)
807		smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
808}
809
810void
811smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
812{
813
814	if (smp_started)
815		smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
816}
817
818
819/*
820 * For statclock, we send an IPI to all CPU's to have them call this
821 * function.
822 */
823void
824forwarded_statclock(struct clockframe frame)
825{
826	struct thread *td;
827
828	CTR0(KTR_SMP, "forwarded_statclock");
829	td = curthread;
830	td->td_intr_nesting_level++;
831	if (profprocs != 0)
832		profclock(&frame);
833	if (pscnt == psdiv)
834		statclock(&frame);
835	td->td_intr_nesting_level--;
836}
837
838void
839forward_statclock(void)
840{
841	int map;
842
843	CTR0(KTR_SMP, "forward_statclock");
844
845	if (!smp_started || cold || panicstr)
846		return;
847
848	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
849	if (map != 0)
850		ipi_selected(map, IPI_STATCLOCK);
851}
852
853/*
854 * For each hardclock(), we send an IPI to all other CPU's to have them
855 * execute this function.  It would be nice to reduce contention on
856 * sched_lock if we could simply peek at the CPU to determine the user/kernel
857 * state and call hardclock_process() on the CPU receiving the clock interrupt
858 * and then just use a simple IPI to handle any ast's if needed.
859 */
860void
861forwarded_hardclock(struct clockframe frame)
862{
863	struct thread *td;
864
865	CTR0(KTR_SMP, "forwarded_hardclock");
866	td = curthread;
867	td->td_intr_nesting_level++;
868	hardclock_process(&frame);
869	td->td_intr_nesting_level--;
870}
871
872void
873forward_hardclock(void)
874{
875	u_int map;
876
877	CTR0(KTR_SMP, "forward_hardclock");
878
879	if (!smp_started || cold || panicstr)
880		return;
881
882	map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
883	if (map != 0)
884		ipi_selected(map, IPI_HARDCLOCK);
885}
886
887/*
888 * send an IPI to a set of cpus.
889 */
890void
891ipi_selected(u_int32_t cpus, u_int ipi)
892{
893	int cpu;
894
895	CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
896	while ((cpu = ffs(cpus)) != 0) {
897		cpu--;
898		KASSERT(cpu_apic_ids[cpu] != -1,
899		    ("IPI to non-existent CPU %d", cpu));
900		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
901		cpus &= ~(1 << cpu);
902	}
903}
904
905/*
906 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
907 */
908void
909ipi_all(u_int ipi)
910{
911
912	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
913	lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
914}
915
916/*
917 * send an IPI to all CPUs EXCEPT myself
918 */
919void
920ipi_all_but_self(u_int ipi)
921{
922
923	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
924	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
925}
926
927/*
928 * send an IPI to myself
929 */
930void
931ipi_self(u_int ipi)
932{
933
934	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
935	lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
936}
937
938/*
939 * This is called once the rest of the system is up and running and we're
940 * ready to let the AP's out of the pen.
941 */
942static void
943release_aps(void *dummy __unused)
944{
945
946	if (mp_ncpus == 1)
947		return;
948	mtx_lock_spin(&sched_lock);
949	atomic_store_rel_int(&aps_ready, 1);
950	while (smp_started == 0)
951		ia32_pause();
952	mtx_unlock_spin(&sched_lock);
953}
954SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
955
956static int
957sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
958{
959	u_int mask;
960	int error;
961
962	mask = hlt_cpus_mask;
963	error = sysctl_handle_int(oidp, &mask, 0, req);
964	if (error || !req->newptr)
965		return (error);
966
967	if (logical_cpus_mask != 0 &&
968	    (mask & logical_cpus_mask) == logical_cpus_mask)
969		hlt_logical_cpus = 1;
970	else
971		hlt_logical_cpus = 0;
972
973	if ((mask & all_cpus) == all_cpus)
974		mask &= ~(1<<0);
975	hlt_cpus_mask = mask;
976	return (error);
977}
978SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
979    0, 0, sysctl_hlt_cpus, "IU", "");
980
981static int
982sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
983{
984	int disable, error;
985
986	disable = hlt_logical_cpus;
987	error = sysctl_handle_int(oidp, &disable, 0, req);
988	if (error || !req->newptr)
989		return (error);
990
991	if (disable)
992		hlt_cpus_mask |= logical_cpus_mask;
993	else
994		hlt_cpus_mask &= ~logical_cpus_mask;
995
996	if ((hlt_cpus_mask & all_cpus) == all_cpus)
997		hlt_cpus_mask &= ~(1<<0);
998
999	hlt_logical_cpus = disable;
1000	return (error);
1001}
1002
1003static void
1004cpu_hlt_setup(void *dummy __unused)
1005{
1006
1007	if (logical_cpus_mask != 0) {
1008		TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1009		    &hlt_logical_cpus);
1010		sysctl_ctx_init(&logical_cpu_clist);
1011		SYSCTL_ADD_PROC(&logical_cpu_clist,
1012		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1013		    "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1014		    sysctl_hlt_logical_cpus, "IU", "");
1015		SYSCTL_ADD_UINT(&logical_cpu_clist,
1016		    SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1017		    "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1018		    &logical_cpus_mask, 0, "");
1019
1020		if (hlt_logical_cpus)
1021			hlt_cpus_mask |= logical_cpus_mask;
1022	}
1023}
1024SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1025
1026int
1027mp_grab_cpu_hlt(void)
1028{
1029	u_int mask = PCPU_GET(cpumask);
1030	int retval;
1031
1032	retval = mask & hlt_cpus_mask;
1033	while (mask & hlt_cpus_mask)
1034		__asm __volatile("sti; hlt" : : : "memory");
1035	return (retval);
1036}
1037