local_apic.c revision 208507
1/*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * Copyright (c) 1996, by Steve Passe
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Local APIC support on Pentium and later processors.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/x86/x86/local_apic.c 208507 2010-05-24 15:45:05Z jhb $");
36
37#include "opt_hwpmc_hooks.h"
38#include "opt_kdtrace.h"
39
40#include "opt_ddb.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bus.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/pcpu.h>
49#include <sys/proc.h>
50#include <sys/sched.h>
51#include <sys/smp.h>
52
53#include <vm/vm.h>
54#include <vm/pmap.h>
55
56#include <machine/apicreg.h>
57#include <machine/cpu.h>
58#include <machine/cputypes.h>
59#include <machine/frame.h>
60#include <machine/intr_machdep.h>
61#include <machine/apicvar.h>
62#include <machine/mca.h>
63#include <machine/md_var.h>
64#include <machine/smp.h>
65#include <machine/specialreg.h>
66
67#ifdef DDB
68#include <sys/interrupt.h>
69#include <ddb/ddb.h>
70#endif
71
72#ifdef __amd64__
73#define	SDT_APIC	SDT_SYSIGT
74#define	SDT_APICT	SDT_SYSIGT
75#define	GSEL_APIC	0
76#else
77#define	SDT_APIC	SDT_SYS386IGT
78#define	SDT_APICT	SDT_SYS386TGT
79#define	GSEL_APIC	GSEL(GCODE_SEL, SEL_KPL)
80#endif
81
82#ifdef KDTRACE_HOOKS
83#include <sys/dtrace_bsd.h>
84cyclic_clock_func_t	cyclic_clock_func[MAXCPU];
85#endif
86
87/* Sanity checks on IDT vectors. */
88CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
89CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
90CTASSERT(APIC_LOCAL_INTS == 240);
91CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
92
93/* Magic IRQ values for the timer and syscalls. */
94#define	IRQ_TIMER	(NUM_IO_INTS + 1)
95#define	IRQ_SYSCALL	(NUM_IO_INTS + 2)
96
97/*
98 * Support for local APICs.  Local APICs manage interrupts on each
99 * individual processor as opposed to I/O APICs which receive interrupts
100 * from I/O devices and then forward them on to the local APICs.
101 *
102 * Local APICs can also send interrupts to each other thus providing the
103 * mechanism for IPIs.
104 */
105
106struct lvt {
107	u_int lvt_edgetrigger:1;
108	u_int lvt_activehi:1;
109	u_int lvt_masked:1;
110	u_int lvt_active:1;
111	u_int lvt_mode:16;
112	u_int lvt_vector:8;
113};
114
115struct lapic {
116	struct lvt la_lvts[LVT_MAX + 1];
117	u_int la_id:8;
118	u_int la_cluster:4;
119	u_int la_cluster_id:2;
120	u_int la_present:1;
121	u_long *la_timer_count;
122	/* Include IDT_SYSCALL to make indexing easier. */
123	int la_ioint_irqs[APIC_NUM_IOINTS + 1];
124} static lapics[MAX_APIC_ID + 1];
125
126/* Global defaults for local APIC LVT entries. */
127static struct lvt lvts[LVT_MAX + 1] = {
128	{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },	/* LINT0: masked ExtINT */
129	{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },	/* LINT1: NMI */
130	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },	/* Timer */
131	{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },	/* Error */
132	{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },	/* PMC */
133	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },	/* Thermal */
134	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },	/* CMCI */
135};
136
137static inthand_t *ioint_handlers[] = {
138	NULL,			/* 0 - 31 */
139	IDTVEC(apic_isr1),	/* 32 - 63 */
140	IDTVEC(apic_isr2),	/* 64 - 95 */
141	IDTVEC(apic_isr3),	/* 96 - 127 */
142	IDTVEC(apic_isr4),	/* 128 - 159 */
143	IDTVEC(apic_isr5),	/* 160 - 191 */
144	IDTVEC(apic_isr6),	/* 192 - 223 */
145	IDTVEC(apic_isr7),	/* 224 - 255 */
146};
147
148
149static u_int32_t lapic_timer_divisors[] = {
150	APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
151	APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
152};
153
154extern inthand_t IDTVEC(rsvd);
155
156volatile lapic_t *lapic;
157vm_paddr_t lapic_paddr;
158static u_long lapic_timer_divisor, lapic_timer_period, lapic_timer_hz;
159static enum lapic_clock clockcoverage;
160
161static void	lapic_enable(void);
162static void	lapic_resume(struct pic *pic);
163static void	lapic_timer_enable_intr(void);
164static void	lapic_timer_oneshot(u_int count);
165static void	lapic_timer_periodic(u_int count);
166static void	lapic_timer_set_divisor(u_int divisor);
167static uint32_t	lvt_mode(struct lapic *la, u_int pin, uint32_t value);
168
169struct pic lapic_pic = { .pic_resume = lapic_resume };
170
171static uint32_t
172lvt_mode(struct lapic *la, u_int pin, uint32_t value)
173{
174	struct lvt *lvt;
175
176	KASSERT(pin <= LVT_MAX, ("%s: pin %u out of range", __func__, pin));
177	if (la->la_lvts[pin].lvt_active)
178		lvt = &la->la_lvts[pin];
179	else
180		lvt = &lvts[pin];
181
182	value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
183	    APIC_LVT_VECTOR);
184	if (lvt->lvt_edgetrigger == 0)
185		value |= APIC_LVT_TM;
186	if (lvt->lvt_activehi == 0)
187		value |= APIC_LVT_IIPP_INTALO;
188	if (lvt->lvt_masked)
189		value |= APIC_LVT_M;
190	value |= lvt->lvt_mode;
191	switch (lvt->lvt_mode) {
192	case APIC_LVT_DM_NMI:
193	case APIC_LVT_DM_SMI:
194	case APIC_LVT_DM_INIT:
195	case APIC_LVT_DM_EXTINT:
196		if (!lvt->lvt_edgetrigger) {
197			printf("lapic%u: Forcing LINT%u to edge trigger\n",
198			    la->la_id, pin);
199			value |= APIC_LVT_TM;
200		}
201		/* Use a vector of 0. */
202		break;
203	case APIC_LVT_DM_FIXED:
204		value |= lvt->lvt_vector;
205		break;
206	default:
207		panic("bad APIC LVT delivery mode: %#x\n", value);
208	}
209	return (value);
210}
211
212/*
213 * Map the local APIC and setup necessary interrupt vectors.
214 */
215void
216lapic_init(vm_paddr_t addr)
217{
218
219	/* Map the local APIC and setup the spurious interrupt handler. */
220	KASSERT(trunc_page(addr) == addr,
221	    ("local APIC not aligned on a page boundary"));
222	lapic = pmap_mapdev(addr, sizeof(lapic_t));
223	lapic_paddr = addr;
224	setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
225	    GSEL_APIC);
226
227	/* Perform basic initialization of the BSP's local APIC. */
228	lapic_enable();
229
230	/* Set BSP's per-CPU local APIC ID. */
231	PCPU_SET(apic_id, lapic_id());
232
233	/* Local APIC timer interrupt. */
234	setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
235
236	/* Local APIC error interrupt. */
237	setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
238
239	/* XXX: Thermal interrupt */
240
241	/* Local APIC CMCI. */
242	setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
243}
244
245/*
246 * Create a local APIC instance.
247 */
248void
249lapic_create(u_int apic_id, int boot_cpu)
250{
251	int i;
252
253	if (apic_id > MAX_APIC_ID) {
254		printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
255		if (boot_cpu)
256			panic("Can't ignore BSP");
257		return;
258	}
259	KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
260	    apic_id));
261
262	/*
263	 * Assume no local LVT overrides and a cluster of 0 and
264	 * intra-cluster ID of 0.
265	 */
266	lapics[apic_id].la_present = 1;
267	lapics[apic_id].la_id = apic_id;
268	for (i = 0; i <= LVT_MAX; i++) {
269		lapics[apic_id].la_lvts[i] = lvts[i];
270		lapics[apic_id].la_lvts[i].lvt_active = 0;
271	}
272	for (i = 0; i <= APIC_NUM_IOINTS; i++)
273	    lapics[apic_id].la_ioint_irqs[i] = -1;
274	lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
275	lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
276	    IRQ_TIMER;
277
278#ifdef SMP
279	cpu_add(apic_id, boot_cpu);
280#endif
281}
282
283/*
284 * Dump contents of local APIC registers
285 */
286void
287lapic_dump(const char* str)
288{
289
290	printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
291	printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
292	    lapic->id, lapic->version, lapic->ldr, lapic->dfr);
293	printf("  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
294	    lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
295	printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x pmc: 0x%08x\n",
296	    lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error,
297	    lapic->lvt_pcint);
298	printf("   cmci: 0x%08x\n", lapic->lvt_cmci);
299}
300
301void
302lapic_setup(int boot)
303{
304	struct lapic *la;
305	u_int32_t maxlvt;
306	register_t eflags;
307	char buf[MAXCOMLEN + 1];
308
309	la = &lapics[lapic_id()];
310	KASSERT(la->la_present, ("missing APIC structure"));
311	eflags = intr_disable();
312	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
313
314	/* Initialize the TPR to allow all interrupts. */
315	lapic_set_tpr(0);
316
317	/* Setup spurious vector and enable the local APIC. */
318	lapic_enable();
319
320	/* Program LINT[01] LVT entries. */
321	lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
322	lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);
323
324	/* Program the PMC LVT entry if present. */
325	if (maxlvt >= LVT_PMC)
326		lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
327
328	/* Program timer LVT and setup handler. */
329	lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer);
330	if (boot) {
331		snprintf(buf, sizeof(buf), "cpu%d: timer", PCPU_GET(cpuid));
332		intrcnt_add(buf, &la->la_timer_count);
333	}
334
335	/* We don't setup the timer during boot on the BSP until later. */
336	if (!(boot && PCPU_GET(cpuid) == 0) && lapic_timer_hz != 0) {
337		KASSERT(lapic_timer_period != 0, ("lapic%u: zero divisor",
338		    lapic_id()));
339		lapic_timer_set_divisor(lapic_timer_divisor);
340		lapic_timer_periodic(lapic_timer_period);
341		lapic_timer_enable_intr();
342	}
343
344	/* Program error LVT and clear any existing errors. */
345	lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error);
346	lapic->esr = 0;
347
348	/* XXX: Thermal LVT */
349
350	/* Program the CMCI LVT entry if present. */
351	if (maxlvt >= LVT_CMCI)
352		lapic->lvt_cmci = lvt_mode(la, LVT_CMCI, lapic->lvt_cmci);
353
354	intr_restore(eflags);
355}
356
357void
358lapic_reenable_pmc(void)
359{
360#ifdef HWPMC_HOOKS
361	uint32_t value;
362
363	value =  lapic->lvt_pcint;
364	value &= ~APIC_LVT_M;
365	lapic->lvt_pcint = value;
366#endif
367}
368
369#ifdef HWPMC_HOOKS
370static void
371lapic_update_pmc(void *dummy)
372{
373	struct lapic *la;
374
375	la = &lapics[lapic_id()];
376	lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
377}
378#endif
379
380int
381lapic_enable_pmc(void)
382{
383#ifdef HWPMC_HOOKS
384	u_int32_t maxlvt;
385
386	/* Fail if the local APIC is not present. */
387	if (lapic == NULL)
388		return (0);
389
390	/* Fail if the PMC LVT is not present. */
391	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
392	if (maxlvt < LVT_PMC)
393		return (0);
394
395	lvts[LVT_PMC].lvt_masked = 0;
396
397#ifdef SMP
398	/*
399	 * If hwpmc was loaded at boot time then the APs may not be
400	 * started yet.  In that case, don't forward the request to
401	 * them as they will program the lvt when they start.
402	 */
403	if (smp_started)
404		smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
405	else
406#endif
407		lapic_update_pmc(NULL);
408	return (1);
409#else
410	return (0);
411#endif
412}
413
414void
415lapic_disable_pmc(void)
416{
417#ifdef HWPMC_HOOKS
418	u_int32_t maxlvt;
419
420	/* Fail if the local APIC is not present. */
421	if (lapic == NULL)
422		return;
423
424	/* Fail if the PMC LVT is not present. */
425	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
426	if (maxlvt < LVT_PMC)
427		return;
428
429	lvts[LVT_PMC].lvt_masked = 1;
430
431#ifdef SMP
432	/* The APs should always be started when hwpmc is unloaded. */
433	KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
434#endif
435	smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
436#endif
437}
438
439/*
440 * Called by cpu_initclocks() on the BSP to setup the local APIC timer so
441 * that it can drive hardclock, statclock, and profclock.
442 */
443enum lapic_clock
444lapic_setup_clock(enum lapic_clock srcsdes)
445{
446	u_long value;
447	int i;
448
449	/* lapic_setup_clock() should not be called with LAPIC_CLOCK_NONE. */
450	MPASS(srcsdes != LAPIC_CLOCK_NONE);
451
452	/* Can't drive the timer without a local APIC. */
453	if (lapic == NULL ||
454	    (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0)) {
455		clockcoverage = LAPIC_CLOCK_NONE;
456		return (clockcoverage);
457	}
458
459	/* Start off with a divisor of 2 (power on reset default). */
460	lapic_timer_divisor = 2;
461
462	/* Try to calibrate the local APIC timer. */
463	do {
464		lapic_timer_set_divisor(lapic_timer_divisor);
465		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
466		DELAY(2000000);
467		value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
468		if (value != APIC_TIMER_MAX_COUNT)
469			break;
470		lapic_timer_divisor <<= 1;
471	} while (lapic_timer_divisor <= 128);
472	if (lapic_timer_divisor > 128)
473		panic("lapic: Divisor too big");
474	value /= 2;
475	if (bootverbose)
476		printf("lapic: Divisor %lu, Frequency %lu Hz\n",
477		    lapic_timer_divisor, value);
478
479	/*
480	 * We want to run stathz in the neighborhood of 128hz.  We would
481	 * like profhz to run as often as possible, so we let it run on
482	 * each clock tick.  We try to honor the requested 'hz' value as
483	 * much as possible.
484	 *
485	 * If 'hz' is above 1500, then we just let the lapic timer
486	 * (and profhz) run at hz.  If 'hz' is below 1500 but above
487	 * 750, then we let the lapic timer run at 2 * 'hz'.  If 'hz'
488	 * is below 750 then we let the lapic timer run at 4 * 'hz'.
489	 *
490	 * Please note that stathz and profhz are set only if all the
491	 * clocks are handled through the local APIC.
492	 */
493	if (srcsdes == LAPIC_CLOCK_ALL) {
494		if (hz >= 1500)
495			lapic_timer_hz = hz;
496		else if (hz >= 750)
497			lapic_timer_hz = hz * 2;
498		else
499			lapic_timer_hz = hz * 4;
500	} else
501		lapic_timer_hz = hz;
502	lapic_timer_period = value / lapic_timer_hz;
503	timer1hz = lapic_timer_hz;
504	if (srcsdes == LAPIC_CLOCK_ALL) {
505		if (lapic_timer_hz < 128)
506			stathz = lapic_timer_hz;
507		else
508			stathz = lapic_timer_hz / (lapic_timer_hz / 128);
509		profhz = lapic_timer_hz;
510		timer2hz = 0;
511	}
512
513	/*
514	 * Start up the timer on the BSP.  The APs will kick off their
515	 * timer during lapic_setup().
516	 */
517	lapic_timer_periodic(lapic_timer_period);
518	lapic_timer_enable_intr();
519	clockcoverage = srcsdes;
520	return (srcsdes);
521}
522
523void
524lapic_disable(void)
525{
526	uint32_t value;
527
528	/* Software disable the local APIC. */
529	value = lapic->svr;
530	value &= ~APIC_SVR_SWEN;
531	lapic->svr = value;
532}
533
534static void
535lapic_enable(void)
536{
537	u_int32_t value;
538
539	/* Program the spurious vector to enable the local APIC. */
540	value = lapic->svr;
541	value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
542	value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
543	lapic->svr = value;
544}
545
546/* Reset the local APIC on the BSP during resume. */
547static void
548lapic_resume(struct pic *pic)
549{
550
551	lapic_setup(0);
552}
553
554int
555lapic_id(void)
556{
557
558	KASSERT(lapic != NULL, ("local APIC is not mapped"));
559	return (lapic->id >> APIC_ID_SHIFT);
560}
561
562int
563lapic_intr_pending(u_int vector)
564{
565	volatile u_int32_t *irr;
566
567	/*
568	 * The IRR registers are an array of 128-bit registers each of
569	 * which only describes 32 interrupts in the low 32 bits..  Thus,
570	 * we divide the vector by 32 to get the 128-bit index.  We then
571	 * multiply that index by 4 to get the equivalent index from
572	 * treating the IRR as an array of 32-bit registers.  Finally, we
573	 * modulus the vector by 32 to determine the individual bit to
574	 * test.
575	 */
576	irr = &lapic->irr0;
577	return (irr[(vector / 32) * 4] & 1 << (vector % 32));
578}
579
580void
581lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
582{
583	struct lapic *la;
584
585	KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
586	    __func__, apic_id));
587	KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
588	    __func__, cluster));
589	KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
590	    ("%s: intra cluster id %u too big", __func__, cluster_id));
591	la = &lapics[apic_id];
592	la->la_cluster = cluster;
593	la->la_cluster_id = cluster_id;
594}
595
596int
597lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
598{
599
600	if (pin > LVT_MAX)
601		return (EINVAL);
602	if (apic_id == APIC_ID_ALL) {
603		lvts[pin].lvt_masked = masked;
604		if (bootverbose)
605			printf("lapic:");
606	} else {
607		KASSERT(lapics[apic_id].la_present,
608		    ("%s: missing APIC %u", __func__, apic_id));
609		lapics[apic_id].la_lvts[pin].lvt_masked = masked;
610		lapics[apic_id].la_lvts[pin].lvt_active = 1;
611		if (bootverbose)
612			printf("lapic%u:", apic_id);
613	}
614	if (bootverbose)
615		printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
616	return (0);
617}
618
619int
620lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
621{
622	struct lvt *lvt;
623
624	if (pin > LVT_MAX)
625		return (EINVAL);
626	if (apic_id == APIC_ID_ALL) {
627		lvt = &lvts[pin];
628		if (bootverbose)
629			printf("lapic:");
630	} else {
631		KASSERT(lapics[apic_id].la_present,
632		    ("%s: missing APIC %u", __func__, apic_id));
633		lvt = &lapics[apic_id].la_lvts[pin];
634		lvt->lvt_active = 1;
635		if (bootverbose)
636			printf("lapic%u:", apic_id);
637	}
638	lvt->lvt_mode = mode;
639	switch (mode) {
640	case APIC_LVT_DM_NMI:
641	case APIC_LVT_DM_SMI:
642	case APIC_LVT_DM_INIT:
643	case APIC_LVT_DM_EXTINT:
644		lvt->lvt_edgetrigger = 1;
645		lvt->lvt_activehi = 1;
646		if (mode == APIC_LVT_DM_EXTINT)
647			lvt->lvt_masked = 1;
648		else
649			lvt->lvt_masked = 0;
650		break;
651	default:
652		panic("Unsupported delivery mode: 0x%x\n", mode);
653	}
654	if (bootverbose) {
655		printf(" Routing ");
656		switch (mode) {
657		case APIC_LVT_DM_NMI:
658			printf("NMI");
659			break;
660		case APIC_LVT_DM_SMI:
661			printf("SMI");
662			break;
663		case APIC_LVT_DM_INIT:
664			printf("INIT");
665			break;
666		case APIC_LVT_DM_EXTINT:
667			printf("ExtINT");
668			break;
669		}
670		printf(" -> LINT%u\n", pin);
671	}
672	return (0);
673}
674
675int
676lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
677{
678
679	if (pin > LVT_MAX || pol == INTR_POLARITY_CONFORM)
680		return (EINVAL);
681	if (apic_id == APIC_ID_ALL) {
682		lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
683		if (bootverbose)
684			printf("lapic:");
685	} else {
686		KASSERT(lapics[apic_id].la_present,
687		    ("%s: missing APIC %u", __func__, apic_id));
688		lapics[apic_id].la_lvts[pin].lvt_active = 1;
689		lapics[apic_id].la_lvts[pin].lvt_activehi =
690		    (pol == INTR_POLARITY_HIGH);
691		if (bootverbose)
692			printf("lapic%u:", apic_id);
693	}
694	if (bootverbose)
695		printf(" LINT%u polarity: %s\n", pin,
696		    pol == INTR_POLARITY_HIGH ? "high" : "low");
697	return (0);
698}
699
700int
701lapic_set_lvt_triggermode(u_int apic_id, u_int pin, enum intr_trigger trigger)
702{
703
704	if (pin > LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
705		return (EINVAL);
706	if (apic_id == APIC_ID_ALL) {
707		lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
708		if (bootverbose)
709			printf("lapic:");
710	} else {
711		KASSERT(lapics[apic_id].la_present,
712		    ("%s: missing APIC %u", __func__, apic_id));
713		lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
714		    (trigger == INTR_TRIGGER_EDGE);
715		lapics[apic_id].la_lvts[pin].lvt_active = 1;
716		if (bootverbose)
717			printf("lapic%u:", apic_id);
718	}
719	if (bootverbose)
720		printf(" LINT%u trigger: %s\n", pin,
721		    trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
722	return (0);
723}
724
725/*
726 * Adjust the TPR of the current CPU so that it blocks all interrupts below
727 * the passed in vector.
728 */
729void
730lapic_set_tpr(u_int vector)
731{
732#ifdef CHEAP_TPR
733	lapic->tpr = vector;
734#else
735	u_int32_t tpr;
736
737	tpr = lapic->tpr & ~APIC_TPR_PRIO;
738	tpr |= vector;
739	lapic->tpr = tpr;
740#endif
741}
742
743void
744lapic_eoi(void)
745{
746
747	lapic->eoi = 0;
748}
749
750void
751lapic_handle_intr(int vector, struct trapframe *frame)
752{
753	struct intsrc *isrc;
754
755	if (vector == -1)
756		panic("Couldn't get vector from ISR!");
757	isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
758	    vector));
759	intr_execute_handlers(isrc, frame);
760}
761
762void
763lapic_handle_timer(struct trapframe *frame)
764{
765	struct lapic *la;
766
767	/* Send EOI first thing. */
768	lapic_eoi();
769
770#if defined(SMP) && !defined(SCHED_ULE)
771	/*
772	 * Don't do any accounting for the disabled HTT cores, since it
773	 * will provide misleading numbers for the userland.
774	 *
775	 * No locking is necessary here, since even if we loose the race
776	 * when hlt_cpus_mask changes it is not a big deal, really.
777	 *
778	 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
779	 * and unlike other schedulers it actually schedules threads to
780	 * those CPUs.
781	 */
782	if ((hlt_cpus_mask & (1 << PCPU_GET(cpuid))) != 0)
783		return;
784#endif
785
786	/* Look up our local APIC structure for the tick counters. */
787	la = &lapics[PCPU_GET(apic_id)];
788	(*la->la_timer_count)++;
789	critical_enter();
790
791#ifdef KDTRACE_HOOKS
792	/*
793	 * If the DTrace hooks are configured and a callback function
794	 * has been registered, then call it to process the high speed
795	 * timers.
796	 */
797	int cpu = PCPU_GET(cpuid);
798	if (cyclic_clock_func[cpu] != NULL)
799		(*cyclic_clock_func[cpu])(frame);
800#endif
801
802	timer1clock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
803	critical_exit();
804}
805
806static void
807lapic_timer_set_divisor(u_int divisor)
808{
809
810	KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
811	KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
812	    sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
813	lapic->dcr_timer = lapic_timer_divisors[ffs(divisor) - 1];
814}
815
816static void
817lapic_timer_oneshot(u_int count)
818{
819	u_int32_t value;
820
821	value = lapic->lvt_timer;
822	value &= ~APIC_LVTT_TM;
823	value |= APIC_LVTT_TM_ONE_SHOT;
824	lapic->lvt_timer = value;
825	lapic->icr_timer = count;
826}
827
828static void
829lapic_timer_periodic(u_int count)
830{
831	u_int32_t value;
832
833	value = lapic->lvt_timer;
834	value &= ~APIC_LVTT_TM;
835	value |= APIC_LVTT_TM_PERIODIC;
836	lapic->lvt_timer = value;
837	lapic->icr_timer = count;
838}
839
840static void
841lapic_timer_enable_intr(void)
842{
843	u_int32_t value;
844
845	value = lapic->lvt_timer;
846	value &= ~APIC_LVT_M;
847	lapic->lvt_timer = value;
848}
849
850void
851lapic_handle_cmc(void)
852{
853
854	lapic_eoi();
855	cmc_intr();
856}
857
858/*
859 * Called from the mca_init() to activate the CMC interrupt if this CPU is
860 * responsible for monitoring any MC banks for CMC events.  Since mca_init()
861 * is called prior to lapic_setup() during boot, this just needs to unmask
862 * this CPU's LVT_CMCI entry.
863 */
864void
865lapic_enable_cmc(void)
866{
867	u_int apic_id;
868
869	apic_id = PCPU_GET(apic_id);
870	KASSERT(lapics[apic_id].la_present,
871	    ("%s: missing APIC %u", __func__, apic_id));
872	lapics[apic_id].la_lvts[LVT_CMCI].lvt_masked = 0;
873	lapics[apic_id].la_lvts[LVT_CMCI].lvt_active = 1;
874	if (bootverbose)
875		printf("lapic%u: CMCI unmasked\n", apic_id);
876}
877
878void
879lapic_handle_error(void)
880{
881	u_int32_t esr;
882
883	/*
884	 * Read the contents of the error status register.  Write to
885	 * the register first before reading from it to force the APIC
886	 * to update its value to indicate any errors that have
887	 * occurred since the previous write to the register.
888	 */
889	lapic->esr = 0;
890	esr = lapic->esr;
891
892	printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
893	lapic_eoi();
894}
895
896u_int
897apic_cpuid(u_int apic_id)
898{
899#ifdef SMP
900	return apic_cpuids[apic_id];
901#else
902	return 0;
903#endif
904}
905
906/* Request a free IDT vector to be used by the specified IRQ. */
907u_int
908apic_alloc_vector(u_int apic_id, u_int irq)
909{
910	u_int vector;
911
912	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
913
914	/*
915	 * Search for a free vector.  Currently we just use a very simple
916	 * algorithm to find the first free vector.
917	 */
918	mtx_lock_spin(&icu_lock);
919	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
920		if (lapics[apic_id].la_ioint_irqs[vector] != -1)
921			continue;
922		lapics[apic_id].la_ioint_irqs[vector] = irq;
923		mtx_unlock_spin(&icu_lock);
924		return (vector + APIC_IO_INTS);
925	}
926	mtx_unlock_spin(&icu_lock);
927	return (0);
928}
929
930/*
931 * Request 'count' free contiguous IDT vectors to be used by 'count'
932 * IRQs.  'count' must be a power of two and the vectors will be
933 * aligned on a boundary of 'align'.  If the request cannot be
934 * satisfied, 0 is returned.
935 */
936u_int
937apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
938{
939	u_int first, run, vector;
940
941	KASSERT(powerof2(count), ("bad count"));
942	KASSERT(powerof2(align), ("bad align"));
943	KASSERT(align >= count, ("align < count"));
944#ifdef INVARIANTS
945	for (run = 0; run < count; run++)
946		KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
947		    irqs[run], run));
948#endif
949
950	/*
951	 * Search for 'count' free vectors.  As with apic_alloc_vector(),
952	 * this just uses a simple first fit algorithm.
953	 */
954	run = 0;
955	first = 0;
956	mtx_lock_spin(&icu_lock);
957	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
958
959		/* Vector is in use, end run. */
960		if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
961			run = 0;
962			first = 0;
963			continue;
964		}
965
966		/* Start a new run if run == 0 and vector is aligned. */
967		if (run == 0) {
968			if ((vector & (align - 1)) != 0)
969				continue;
970			first = vector;
971		}
972		run++;
973
974		/* Keep looping if the run isn't long enough yet. */
975		if (run < count)
976			continue;
977
978		/* Found a run, assign IRQs and return the first vector. */
979		for (vector = 0; vector < count; vector++)
980			lapics[apic_id].la_ioint_irqs[first + vector] =
981			    irqs[vector];
982		mtx_unlock_spin(&icu_lock);
983		return (first + APIC_IO_INTS);
984	}
985	mtx_unlock_spin(&icu_lock);
986	printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
987	return (0);
988}
989
990/*
991 * Enable a vector for a particular apic_id.  Since all lapics share idt
992 * entries and ioint_handlers this enables the vector on all lapics.  lapics
993 * which do not have the vector configured would report spurious interrupts
994 * should it fire.
995 */
996void
997apic_enable_vector(u_int apic_id, u_int vector)
998{
999
1000	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1001	KASSERT(ioint_handlers[vector / 32] != NULL,
1002	    ("No ISR handler for vector %u", vector));
1003	setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1004	    GSEL_APIC);
1005}
1006
1007void
1008apic_disable_vector(u_int apic_id, u_int vector)
1009{
1010
1011	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1012	KASSERT(ioint_handlers[vector / 32] != NULL,
1013	    ("No ISR handler for vector %u", vector));
1014#ifdef notyet
1015	/*
1016	 * We can not currently clear the idt entry because other cpus
1017	 * may have a valid vector at this offset.
1018	 */
1019	setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1020#endif
1021}
1022
1023/* Release an APIC vector when it's no longer in use. */
1024void
1025apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1026{
1027	struct thread *td;
1028
1029	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1030	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1031	    ("Vector %u does not map to an IRQ line", vector));
1032	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1033	KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1034	    irq, ("IRQ mismatch"));
1035
1036	/*
1037	 * Bind us to the cpu that owned the vector before freeing it so
1038	 * we don't lose an interrupt delivery race.
1039	 */
1040	td = curthread;
1041	if (!rebooting) {
1042		thread_lock(td);
1043		if (sched_is_bound(td))
1044			panic("apic_free_vector: Thread already bound.\n");
1045		sched_bind(td, apic_cpuid(apic_id));
1046		thread_unlock(td);
1047	}
1048	mtx_lock_spin(&icu_lock);
1049	lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1050	mtx_unlock_spin(&icu_lock);
1051	if (!rebooting) {
1052		thread_lock(td);
1053		sched_unbind(td);
1054		thread_unlock(td);
1055	}
1056}
1057
1058/* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1059u_int
1060apic_idt_to_irq(u_int apic_id, u_int vector)
1061{
1062	int irq;
1063
1064	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1065	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1066	    ("Vector %u does not map to an IRQ line", vector));
1067	irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1068	if (irq < 0)
1069		irq = 0;
1070	return (irq);
1071}
1072
1073#ifdef DDB
1074/*
1075 * Dump data about APIC IDT vector mappings.
1076 */
1077DB_SHOW_COMMAND(apic, db_show_apic)
1078{
1079	struct intsrc *isrc;
1080	int i, verbose;
1081	u_int apic_id;
1082	u_int irq;
1083
1084	if (strcmp(modif, "vv") == 0)
1085		verbose = 2;
1086	else if (strcmp(modif, "v") == 0)
1087		verbose = 1;
1088	else
1089		verbose = 0;
1090	for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1091		if (lapics[apic_id].la_present == 0)
1092			continue;
1093		db_printf("Interrupts bound to lapic %u\n", apic_id);
1094		for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1095			irq = lapics[apic_id].la_ioint_irqs[i];
1096			if (irq == -1 || irq == IRQ_SYSCALL)
1097				continue;
1098			db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1099			if (irq == IRQ_TIMER)
1100				db_printf("lapic timer\n");
1101			else if (irq < NUM_IO_INTS) {
1102				isrc = intr_lookup_source(irq);
1103				if (isrc == NULL || verbose == 0)
1104					db_printf("IRQ %u\n", irq);
1105				else
1106					db_dump_intr_event(isrc->is_event,
1107					    verbose == 2);
1108			} else
1109				db_printf("IRQ %u ???\n", irq);
1110		}
1111	}
1112}
1113
1114static void
1115dump_mask(const char *prefix, uint32_t v, int base)
1116{
1117	int i, first;
1118
1119	first = 1;
1120	for (i = 0; i < 32; i++)
1121		if (v & (1 << i)) {
1122			if (first) {
1123				db_printf("%s:", prefix);
1124				first = 0;
1125			}
1126			db_printf(" %02x", base + i);
1127		}
1128	if (!first)
1129		db_printf("\n");
1130}
1131
1132/* Show info from the lapic regs for this CPU. */
1133DB_SHOW_COMMAND(lapic, db_show_lapic)
1134{
1135	uint32_t v;
1136
1137	db_printf("lapic ID = %d\n", lapic_id());
1138	v = lapic->version;
1139	db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1140	    v & 0xf);
1141	db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1142	v = lapic->svr;
1143	db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1144	    v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1145	db_printf("TPR      = %02x\n", lapic->tpr);
1146
1147#define dump_field(prefix, index)					\
1148	dump_mask(__XSTRING(prefix ## index), lapic->prefix ## index,	\
1149	    index * 32)
1150
1151	db_printf("In-service Interrupts:\n");
1152	dump_field(isr, 0);
1153	dump_field(isr, 1);
1154	dump_field(isr, 2);
1155	dump_field(isr, 3);
1156	dump_field(isr, 4);
1157	dump_field(isr, 5);
1158	dump_field(isr, 6);
1159	dump_field(isr, 7);
1160
1161	db_printf("TMR Interrupts:\n");
1162	dump_field(tmr, 0);
1163	dump_field(tmr, 1);
1164	dump_field(tmr, 2);
1165	dump_field(tmr, 3);
1166	dump_field(tmr, 4);
1167	dump_field(tmr, 5);
1168	dump_field(tmr, 6);
1169	dump_field(tmr, 7);
1170
1171	db_printf("IRR Interrupts:\n");
1172	dump_field(irr, 0);
1173	dump_field(irr, 1);
1174	dump_field(irr, 2);
1175	dump_field(irr, 3);
1176	dump_field(irr, 4);
1177	dump_field(irr, 5);
1178	dump_field(irr, 6);
1179	dump_field(irr, 7);
1180
1181#undef dump_field
1182}
1183#endif
1184
1185/*
1186 * APIC probing support code.  This includes code to manage enumerators.
1187 */
1188
1189static SLIST_HEAD(, apic_enumerator) enumerators =
1190	SLIST_HEAD_INITIALIZER(enumerators);
1191static struct apic_enumerator *best_enum;
1192
1193void
1194apic_register_enumerator(struct apic_enumerator *enumerator)
1195{
1196#ifdef INVARIANTS
1197	struct apic_enumerator *apic_enum;
1198
1199	SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1200		if (apic_enum == enumerator)
1201			panic("%s: Duplicate register of %s", __func__,
1202			    enumerator->apic_name);
1203	}
1204#endif
1205	SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1206}
1207
1208/*
1209 * We have to look for CPU's very, very early because certain subsystems
1210 * want to know how many CPU's we have extremely early on in the boot
1211 * process.
1212 */
1213static void
1214apic_init(void *dummy __unused)
1215{
1216	struct apic_enumerator *enumerator;
1217#ifndef __amd64__
1218	uint64_t apic_base;
1219#endif
1220	int retval, best;
1221
1222	/* We only support built in local APICs. */
1223	if (!(cpu_feature & CPUID_APIC))
1224		return;
1225
1226	/* Don't probe if APIC mode is disabled. */
1227	if (resource_disabled("apic", 0))
1228		return;
1229
1230	/* First, probe all the enumerators to find the best match. */
1231	best_enum = NULL;
1232	best = 0;
1233	SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1234		retval = enumerator->apic_probe();
1235		if (retval > 0)
1236			continue;
1237		if (best_enum == NULL || best < retval) {
1238			best_enum = enumerator;
1239			best = retval;
1240		}
1241	}
1242	if (best_enum == NULL) {
1243		if (bootverbose)
1244			printf("APIC: Could not find any APICs.\n");
1245		return;
1246	}
1247
1248	if (bootverbose)
1249		printf("APIC: Using the %s enumerator.\n",
1250		    best_enum->apic_name);
1251
1252#ifndef __amd64__
1253	/*
1254	 * To work around an errata, we disable the local APIC on some
1255	 * CPUs during early startup.  We need to turn the local APIC back
1256	 * on on such CPUs now.
1257	 */
1258	if (cpu == CPU_686 && cpu_vendor_id == CPU_VENDOR_INTEL &&
1259	    (cpu_id & 0xff0) == 0x610) {
1260		apic_base = rdmsr(MSR_APICBASE);
1261		apic_base |= APICBASE_ENABLED;
1262		wrmsr(MSR_APICBASE, apic_base);
1263	}
1264#endif
1265
1266	/* Second, probe the CPU's in the system. */
1267	retval = best_enum->apic_probe_cpus();
1268	if (retval != 0)
1269		printf("%s: Failed to probe CPUs: returned %d\n",
1270		    best_enum->apic_name, retval);
1271
1272#ifdef __amd64__
1273}
1274SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1275
1276/*
1277 * Setup the local APIC.  We have to do this prior to starting up the APs
1278 * in the SMP case.
1279 */
1280static void
1281apic_setup_local(void *dummy __unused)
1282{
1283	int retval;
1284
1285	if (best_enum == NULL)
1286		return;
1287#endif
1288	/* Third, initialize the local APIC. */
1289	retval = best_enum->apic_setup_local();
1290	if (retval != 0)
1291		printf("%s: Failed to setup the local APIC: returned %d\n",
1292		    best_enum->apic_name, retval);
1293}
1294#ifdef __amd64__
1295SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local,
1296    NULL);
1297#else
1298SYSINIT(apic_init, SI_SUB_CPU, SI_ORDER_SECOND, apic_init, NULL);
1299#endif
1300
1301/*
1302 * Setup the I/O APICs.
1303 */
1304static void
1305apic_setup_io(void *dummy __unused)
1306{
1307	int retval;
1308
1309	if (best_enum == NULL)
1310		return;
1311	retval = best_enum->apic_setup_io();
1312	if (retval != 0)
1313		printf("%s: Failed to setup I/O APICs: returned %d\n",
1314		    best_enum->apic_name, retval);
1315
1316#ifdef XEN
1317	return;
1318#endif
1319	/*
1320	 * Finish setting up the local APIC on the BSP once we know how to
1321	 * properly program the LINT pins.
1322	 */
1323	lapic_setup(1);
1324	intr_register_pic(&lapic_pic);
1325	if (bootverbose)
1326		lapic_dump("BSP");
1327
1328	/* Enable the MSI "pic". */
1329	msi_init();
1330}
1331SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_SECOND, apic_setup_io, NULL);
1332
1333#ifdef SMP
1334/*
1335 * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1336 * private to the MD code.  The public interface for the rest of the
1337 * kernel is defined in mp_machdep.c.
1338 */
1339int
1340lapic_ipi_wait(int delay)
1341{
1342	int x, incr;
1343
1344	/*
1345	 * Wait delay loops for IPI to be sent.  This is highly bogus
1346	 * since this is sensitive to CPU clock speed.  If delay is
1347	 * -1, we wait forever.
1348	 */
1349	if (delay == -1) {
1350		incr = 0;
1351		delay = 1;
1352	} else
1353		incr = 1;
1354	for (x = 0; x < delay; x += incr) {
1355		if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
1356			return (1);
1357		ia32_pause();
1358	}
1359	return (0);
1360}
1361
1362void
1363lapic_ipi_raw(register_t icrlo, u_int dest)
1364{
1365	register_t value, eflags;
1366
1367	/* XXX: Need more sanity checking of icrlo? */
1368	KASSERT(lapic != NULL, ("%s called too early", __func__));
1369	KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1370	    ("%s: invalid dest field", __func__));
1371	KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1372	    ("%s: reserved bits set in ICR LO register", __func__));
1373
1374	/* Set destination in ICR HI register if it is being used. */
1375	eflags = intr_disable();
1376	if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1377		value = lapic->icr_hi;
1378		value &= ~APIC_ID_MASK;
1379		value |= dest << APIC_ID_SHIFT;
1380		lapic->icr_hi = value;
1381	}
1382
1383	/* Program the contents of the IPI and dispatch it. */
1384	value = lapic->icr_lo;
1385	value &= APIC_ICRLO_RESV_MASK;
1386	value |= icrlo;
1387	lapic->icr_lo = value;
1388	intr_restore(eflags);
1389}
1390
1391#define	BEFORE_SPIN	1000000
1392#ifdef DETECT_DEADLOCK
1393#define	AFTER_SPIN	1000
1394#endif
1395
1396void
1397lapic_ipi_vectored(u_int vector, int dest)
1398{
1399	register_t icrlo, destfield;
1400
1401	KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1402	    ("%s: invalid vector %d", __func__, vector));
1403
1404	icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE;
1405
1406	/*
1407	 * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
1408	 * Use special rules regard NMI if passed, otherwise specify
1409	 * the vector.
1410	 */
1411	if (vector == IPI_STOP_HARD)
1412		icrlo |= APIC_DELMODE_NMI | APIC_LEVEL_ASSERT;
1413	else
1414		icrlo |= vector | APIC_DELMODE_FIXED | APIC_LEVEL_DEASSERT;
1415	destfield = 0;
1416	switch (dest) {
1417	case APIC_IPI_DEST_SELF:
1418		icrlo |= APIC_DEST_SELF;
1419		break;
1420	case APIC_IPI_DEST_ALL:
1421		icrlo |= APIC_DEST_ALLISELF;
1422		break;
1423	case APIC_IPI_DEST_OTHERS:
1424		icrlo |= APIC_DEST_ALLESELF;
1425		break;
1426	default:
1427		KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1428		    ("%s: invalid destination 0x%x", __func__, dest));
1429		destfield = dest;
1430	}
1431
1432	/* Wait for an earlier IPI to finish. */
1433	if (!lapic_ipi_wait(BEFORE_SPIN)) {
1434		if (panicstr != NULL)
1435			return;
1436		else
1437			panic("APIC: Previous IPI is stuck");
1438	}
1439
1440	lapic_ipi_raw(icrlo, destfield);
1441
1442#ifdef DETECT_DEADLOCK
1443	/* Wait for IPI to be delivered. */
1444	if (!lapic_ipi_wait(AFTER_SPIN)) {
1445#ifdef needsattention
1446		/*
1447		 * XXX FIXME:
1448		 *
1449		 * The above function waits for the message to actually be
1450		 * delivered.  It breaks out after an arbitrary timeout
1451		 * since the message should eventually be delivered (at
1452		 * least in theory) and that if it wasn't we would catch
1453		 * the failure with the check above when the next IPI is
1454		 * sent.
1455		 *
1456		 * We could skip this wait entirely, EXCEPT it probably
1457		 * protects us from other routines that assume that the
1458		 * message was delivered and acted upon when this function
1459		 * returns.
1460		 */
1461		printf("APIC: IPI might be stuck\n");
1462#else /* !needsattention */
1463		/* Wait until mesage is sent without a timeout. */
1464		while (lapic->icr_lo & APIC_DELSTAT_PEND)
1465			ia32_pause();
1466#endif /* needsattention */
1467	}
1468#endif /* DETECT_DEADLOCK */
1469}
1470#endif /* SMP */
1471