local_apic.c revision 277493
1/*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * Copyright (c) 1996, by Steve Passe
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Local APIC support on Pentium and later processors.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/x86/x86/local_apic.c 277493 2015-01-21 17:59:32Z jhb $");
36
37#include "opt_atpic.h"
38#include "opt_hwpmc_hooks.h"
39#include "opt_kdtrace.h"
40
41#include "opt_ddb.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/sched.h>
52#include <sys/smp.h>
53#include <sys/timeet.h>
54
55#include <vm/vm.h>
56#include <vm/pmap.h>
57
58#include <x86/apicreg.h>
59#include <machine/cputypes.h>
60#include <machine/frame.h>
61#include <machine/intr_machdep.h>
62#include <machine/apicvar.h>
63#include <x86/mca.h>
64#include <machine/md_var.h>
65#include <machine/smp.h>
66#include <machine/specialreg.h>
67
68#ifdef DDB
69#include <sys/interrupt.h>
70#include <ddb/ddb.h>
71#endif
72
73#ifdef __amd64__
74#define	SDT_APIC	SDT_SYSIGT
75#define	SDT_APICT	SDT_SYSIGT
76#define	GSEL_APIC	0
77#else
78#define	SDT_APIC	SDT_SYS386IGT
79#define	SDT_APICT	SDT_SYS386TGT
80#define	GSEL_APIC	GSEL(GCODE_SEL, SEL_KPL)
81#endif
82
83/* Sanity checks on IDT vectors. */
84CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
85CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
86CTASSERT(APIC_LOCAL_INTS == 240);
87CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
88
89/* Magic IRQ values for the timer and syscalls. */
90#define	IRQ_TIMER	(NUM_IO_INTS + 1)
91#define	IRQ_SYSCALL	(NUM_IO_INTS + 2)
92#define	IRQ_DTRACE_RET	(NUM_IO_INTS + 3)
93#define	IRQ_EVTCHN	(NUM_IO_INTS + 4)
94
95/*
96 * Support for local APICs.  Local APICs manage interrupts on each
97 * individual processor as opposed to I/O APICs which receive interrupts
98 * from I/O devices and then forward them on to the local APICs.
99 *
100 * Local APICs can also send interrupts to each other thus providing the
101 * mechanism for IPIs.
102 */
103
104struct lvt {
105	u_int lvt_edgetrigger:1;
106	u_int lvt_activehi:1;
107	u_int lvt_masked:1;
108	u_int lvt_active:1;
109	u_int lvt_mode:16;
110	u_int lvt_vector:8;
111};
112
113struct lapic {
114	struct lvt la_lvts[APIC_LVT_MAX + 1];
115	u_int la_id:8;
116	u_int la_cluster:4;
117	u_int la_cluster_id:2;
118	u_int la_present:1;
119	u_long *la_timer_count;
120	u_long la_timer_period;
121	u_int la_timer_mode;
122	uint32_t lvt_timer_cache;
123	/* Include IDT_SYSCALL to make indexing easier. */
124	int la_ioint_irqs[APIC_NUM_IOINTS + 1];
125} static lapics[MAX_APIC_ID + 1];
126
127/* Global defaults for local APIC LVT entries. */
128static struct lvt lvts[APIC_LVT_MAX + 1] = {
129	{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },	/* LINT0: masked ExtINT */
130	{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },	/* LINT1: NMI */
131	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },	/* Timer */
132	{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },	/* Error */
133	{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },	/* PMC */
134	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },	/* Thermal */
135	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },	/* CMCI */
136};
137
138static inthand_t *ioint_handlers[] = {
139	NULL,			/* 0 - 31 */
140	IDTVEC(apic_isr1),	/* 32 - 63 */
141	IDTVEC(apic_isr2),	/* 64 - 95 */
142	IDTVEC(apic_isr3),	/* 96 - 127 */
143	IDTVEC(apic_isr4),	/* 128 - 159 */
144	IDTVEC(apic_isr5),	/* 160 - 191 */
145	IDTVEC(apic_isr6),	/* 192 - 223 */
146	IDTVEC(apic_isr7),	/* 224 - 255 */
147};
148
149
150static u_int32_t lapic_timer_divisors[] = {
151	APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
152	APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
153};
154
155extern inthand_t IDTVEC(rsvd);
156
157volatile lapic_t *lapic;
158vm_paddr_t lapic_paddr;
159static u_long lapic_timer_divisor;
160static struct eventtimer lapic_et;
161
162static void	lapic_enable(void);
163static void	lapic_resume(struct pic *pic, bool suspend_cancelled);
164static void	lapic_timer_oneshot(struct lapic *,
165		    u_int count, int enable_int);
166static void	lapic_timer_periodic(struct lapic *,
167		    u_int count, int enable_int);
168static void	lapic_timer_stop(struct lapic *);
169static void	lapic_timer_set_divisor(u_int divisor);
170static uint32_t	lvt_mode(struct lapic *la, u_int pin, uint32_t value);
171static int	lapic_et_start(struct eventtimer *et,
172    sbintime_t first, sbintime_t period);
173static int	lapic_et_stop(struct eventtimer *et);
174
175struct pic lapic_pic = { .pic_resume = lapic_resume };
176
177static uint32_t
178lvt_mode(struct lapic *la, u_int pin, uint32_t value)
179{
180	struct lvt *lvt;
181
182	KASSERT(pin <= APIC_LVT_MAX, ("%s: pin %u out of range", __func__, pin));
183	if (la->la_lvts[pin].lvt_active)
184		lvt = &la->la_lvts[pin];
185	else
186		lvt = &lvts[pin];
187
188	value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
189	    APIC_LVT_VECTOR);
190	if (lvt->lvt_edgetrigger == 0)
191		value |= APIC_LVT_TM;
192	if (lvt->lvt_activehi == 0)
193		value |= APIC_LVT_IIPP_INTALO;
194	if (lvt->lvt_masked)
195		value |= APIC_LVT_M;
196	value |= lvt->lvt_mode;
197	switch (lvt->lvt_mode) {
198	case APIC_LVT_DM_NMI:
199	case APIC_LVT_DM_SMI:
200	case APIC_LVT_DM_INIT:
201	case APIC_LVT_DM_EXTINT:
202		if (!lvt->lvt_edgetrigger) {
203			printf("lapic%u: Forcing LINT%u to edge trigger\n",
204			    la->la_id, pin);
205			value |= APIC_LVT_TM;
206		}
207		/* Use a vector of 0. */
208		break;
209	case APIC_LVT_DM_FIXED:
210		value |= lvt->lvt_vector;
211		break;
212	default:
213		panic("bad APIC LVT delivery mode: %#x\n", value);
214	}
215	return (value);
216}
217
218/*
219 * Map the local APIC and setup necessary interrupt vectors.
220 */
221void
222lapic_init(vm_paddr_t addr)
223{
224	u_int regs[4];
225	int i, arat;
226
227	/* Map the local APIC and setup the spurious interrupt handler. */
228	KASSERT(trunc_page(addr) == addr,
229	    ("local APIC not aligned on a page boundary"));
230	lapic_paddr = addr;
231	lapic = pmap_mapdev(addr, sizeof(lapic_t));
232	setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
233	    GSEL_APIC);
234
235	/* Perform basic initialization of the BSP's local APIC. */
236	lapic_enable();
237
238	/* Set BSP's per-CPU local APIC ID. */
239	PCPU_SET(apic_id, lapic_id());
240
241	/* Local APIC timer interrupt. */
242	setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
243
244	/* Local APIC error interrupt. */
245	setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
246
247	/* XXX: Thermal interrupt */
248
249	/* Local APIC CMCI. */
250	setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
251
252	if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
253		arat = 0;
254		/* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
255		if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
256			do_cpuid(0x06, regs);
257			if ((regs[0] & CPUTPM1_ARAT) != 0)
258				arat = 1;
259		}
260		bzero(&lapic_et, sizeof(lapic_et));
261		lapic_et.et_name = "LAPIC";
262		lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
263		    ET_FLAGS_PERCPU;
264		lapic_et.et_quality = 600;
265		if (!arat) {
266			lapic_et.et_flags |= ET_FLAGS_C3STOP;
267			lapic_et.et_quality -= 200;
268		}
269		lapic_et.et_frequency = 0;
270		/* We don't know frequency yet, so trying to guess. */
271		lapic_et.et_min_period = 0x00001000LL;
272		lapic_et.et_max_period = SBT_1S;
273		lapic_et.et_start = lapic_et_start;
274		lapic_et.et_stop = lapic_et_stop;
275		lapic_et.et_priv = NULL;
276		et_register(&lapic_et);
277	}
278}
279
280/*
281 * Create a local APIC instance.
282 */
283void
284lapic_create(u_int apic_id, int boot_cpu)
285{
286	int i;
287
288	if (apic_id > MAX_APIC_ID) {
289		printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
290		if (boot_cpu)
291			panic("Can't ignore BSP");
292		return;
293	}
294	KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
295	    apic_id));
296
297	/*
298	 * Assume no local LVT overrides and a cluster of 0 and
299	 * intra-cluster ID of 0.
300	 */
301	lapics[apic_id].la_present = 1;
302	lapics[apic_id].la_id = apic_id;
303	for (i = 0; i <= APIC_LVT_MAX; i++) {
304		lapics[apic_id].la_lvts[i] = lvts[i];
305		lapics[apic_id].la_lvts[i].lvt_active = 0;
306	}
307	for (i = 0; i <= APIC_NUM_IOINTS; i++)
308	    lapics[apic_id].la_ioint_irqs[i] = -1;
309	lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
310	lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
311	    IRQ_TIMER;
312#ifdef KDTRACE_HOOKS
313	lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
314	    IRQ_DTRACE_RET;
315#endif
316#ifdef XENHVM
317	lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
318#endif
319
320
321#ifdef SMP
322	cpu_add(apic_id, boot_cpu);
323#endif
324}
325
326/*
327 * Dump contents of local APIC registers
328 */
329void
330lapic_dump(const char* str)
331{
332	uint32_t maxlvt;
333
334	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
335	printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
336	printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
337	    lapic->id, lapic->version, lapic->ldr, lapic->dfr);
338	printf("  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
339	    lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
340	printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
341	    lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error);
342	if (maxlvt >= APIC_LVT_PMC)
343		printf(" pmc: 0x%08x", lapic->lvt_pcint);
344	printf("\n");
345	if (maxlvt >= APIC_LVT_CMCI)
346		printf("   cmci: 0x%08x\n", lapic->lvt_cmci);
347}
348
349void
350lapic_setup(int boot)
351{
352	struct lapic *la;
353	u_int32_t maxlvt;
354	register_t saveintr;
355	char buf[MAXCOMLEN + 1];
356
357	la = &lapics[lapic_id()];
358	KASSERT(la->la_present, ("missing APIC structure"));
359	saveintr = intr_disable();
360	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
361
362	/* Initialize the TPR to allow all interrupts. */
363	lapic_set_tpr(0);
364
365	/* Setup spurious vector and enable the local APIC. */
366	lapic_enable();
367
368	/* Program LINT[01] LVT entries. */
369	lapic->lvt_lint0 = lvt_mode(la, APIC_LVT_LINT0, lapic->lvt_lint0);
370	lapic->lvt_lint1 = lvt_mode(la, APIC_LVT_LINT1, lapic->lvt_lint1);
371
372	/* Program the PMC LVT entry if present. */
373	if (maxlvt >= APIC_LVT_PMC)
374		lapic->lvt_pcint = lvt_mode(la, APIC_LVT_PMC, lapic->lvt_pcint);
375
376	/* Program timer LVT and setup handler. */
377	la->lvt_timer_cache = lapic->lvt_timer =
378	    lvt_mode(la, APIC_LVT_TIMER, lapic->lvt_timer);
379	if (boot) {
380		snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
381		intrcnt_add(buf, &la->la_timer_count);
382	}
383
384	/* Setup the timer if configured. */
385	if (la->la_timer_mode != 0) {
386		KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
387		    lapic_id()));
388		lapic_timer_set_divisor(lapic_timer_divisor);
389		if (la->la_timer_mode == 1)
390			lapic_timer_periodic(la, la->la_timer_period, 1);
391		else
392			lapic_timer_oneshot(la, la->la_timer_period, 1);
393	}
394
395	/* Program error LVT and clear any existing errors. */
396	lapic->lvt_error = lvt_mode(la, APIC_LVT_ERROR, lapic->lvt_error);
397	lapic->esr = 0;
398
399	/* XXX: Thermal LVT */
400
401	/* Program the CMCI LVT entry if present. */
402	if (maxlvt >= APIC_LVT_CMCI)
403		lapic->lvt_cmci = lvt_mode(la, APIC_LVT_CMCI, lapic->lvt_cmci);
404
405	intr_restore(saveintr);
406}
407
408void
409lapic_reenable_pmc(void)
410{
411#ifdef HWPMC_HOOKS
412	uint32_t value;
413
414	value =  lapic->lvt_pcint;
415	value &= ~APIC_LVT_M;
416	lapic->lvt_pcint = value;
417#endif
418}
419
420#ifdef HWPMC_HOOKS
421static void
422lapic_update_pmc(void *dummy)
423{
424	struct lapic *la;
425
426	la = &lapics[lapic_id()];
427	lapic->lvt_pcint = lvt_mode(la, APIC_LVT_PMC, lapic->lvt_pcint);
428}
429#endif
430
431int
432lapic_enable_pmc(void)
433{
434#ifdef HWPMC_HOOKS
435	u_int32_t maxlvt;
436
437	/* Fail if the local APIC is not present. */
438	if (lapic == NULL)
439		return (0);
440
441	/* Fail if the PMC LVT is not present. */
442	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
443	if (maxlvt < APIC_LVT_PMC)
444		return (0);
445
446	lvts[APIC_LVT_PMC].lvt_masked = 0;
447
448#ifdef SMP
449	/*
450	 * If hwpmc was loaded at boot time then the APs may not be
451	 * started yet.  In that case, don't forward the request to
452	 * them as they will program the lvt when they start.
453	 */
454	if (smp_started)
455		smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
456	else
457#endif
458		lapic_update_pmc(NULL);
459	return (1);
460#else
461	return (0);
462#endif
463}
464
465void
466lapic_disable_pmc(void)
467{
468#ifdef HWPMC_HOOKS
469	u_int32_t maxlvt;
470
471	/* Fail if the local APIC is not present. */
472	if (lapic == NULL)
473		return;
474
475	/* Fail if the PMC LVT is not present. */
476	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
477	if (maxlvt < APIC_LVT_PMC)
478		return;
479
480	lvts[APIC_LVT_PMC].lvt_masked = 1;
481
482#ifdef SMP
483	/* The APs should always be started when hwpmc is unloaded. */
484	KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
485#endif
486	smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
487#endif
488}
489
490static int
491lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
492{
493	struct lapic *la;
494	u_long value;
495
496	la = &lapics[PCPU_GET(apic_id)];
497	if (et->et_frequency == 0) {
498		/* Start off with a divisor of 2 (power on reset default). */
499		lapic_timer_divisor = 2;
500		/* Try to calibrate the local APIC timer. */
501		do {
502			lapic_timer_set_divisor(lapic_timer_divisor);
503			lapic_timer_oneshot(la, APIC_TIMER_MAX_COUNT, 0);
504			DELAY(1000000);
505			value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
506			if (value != APIC_TIMER_MAX_COUNT)
507				break;
508			lapic_timer_divisor <<= 1;
509		} while (lapic_timer_divisor <= 128);
510		if (lapic_timer_divisor > 128)
511			panic("lapic: Divisor too big");
512		if (bootverbose)
513			printf("lapic: Divisor %lu, Frequency %lu Hz\n",
514			    lapic_timer_divisor, value);
515		et->et_frequency = value;
516		et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
517		et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
518	}
519	if (la->la_timer_mode == 0)
520		lapic_timer_set_divisor(lapic_timer_divisor);
521	if (period != 0) {
522		la->la_timer_mode = 1;
523		la->la_timer_period = ((uint32_t)et->et_frequency * period) >> 32;
524		lapic_timer_periodic(la, la->la_timer_period, 1);
525	} else {
526		la->la_timer_mode = 2;
527		la->la_timer_period = ((uint32_t)et->et_frequency * first) >> 32;
528		lapic_timer_oneshot(la, la->la_timer_period, 1);
529	}
530	return (0);
531}
532
533static int
534lapic_et_stop(struct eventtimer *et)
535{
536	struct lapic *la = &lapics[PCPU_GET(apic_id)];
537
538	la->la_timer_mode = 0;
539	lapic_timer_stop(la);
540	return (0);
541}
542
543void
544lapic_disable(void)
545{
546	uint32_t value;
547
548	/* Software disable the local APIC. */
549	value = lapic->svr;
550	value &= ~APIC_SVR_SWEN;
551	lapic->svr = value;
552}
553
554static void
555lapic_enable(void)
556{
557	u_int32_t value;
558
559	/* Program the spurious vector to enable the local APIC. */
560	value = lapic->svr;
561	value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
562	value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
563	lapic->svr = value;
564}
565
566/* Reset the local APIC on the BSP during resume. */
567static void
568lapic_resume(struct pic *pic, bool suspend_cancelled)
569{
570
571	lapic_setup(0);
572}
573
574int
575lapic_id(void)
576{
577
578	KASSERT(lapic != NULL, ("local APIC is not mapped"));
579	return (lapic->id >> APIC_ID_SHIFT);
580}
581
582int
583lapic_intr_pending(u_int vector)
584{
585	volatile u_int32_t *irr;
586
587	/*
588	 * The IRR registers are an array of 128-bit registers each of
589	 * which only describes 32 interrupts in the low 32 bits..  Thus,
590	 * we divide the vector by 32 to get the 128-bit index.  We then
591	 * multiply that index by 4 to get the equivalent index from
592	 * treating the IRR as an array of 32-bit registers.  Finally, we
593	 * modulus the vector by 32 to determine the individual bit to
594	 * test.
595	 */
596	irr = &lapic->irr0;
597	return (irr[(vector / 32) * 4] & 1 << (vector % 32));
598}
599
600void
601lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
602{
603	struct lapic *la;
604
605	KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
606	    __func__, apic_id));
607	KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
608	    __func__, cluster));
609	KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
610	    ("%s: intra cluster id %u too big", __func__, cluster_id));
611	la = &lapics[apic_id];
612	la->la_cluster = cluster;
613	la->la_cluster_id = cluster_id;
614}
615
616int
617lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
618{
619
620	if (pin > APIC_LVT_MAX)
621		return (EINVAL);
622	if (apic_id == APIC_ID_ALL) {
623		lvts[pin].lvt_masked = masked;
624		if (bootverbose)
625			printf("lapic:");
626	} else {
627		KASSERT(lapics[apic_id].la_present,
628		    ("%s: missing APIC %u", __func__, apic_id));
629		lapics[apic_id].la_lvts[pin].lvt_masked = masked;
630		lapics[apic_id].la_lvts[pin].lvt_active = 1;
631		if (bootverbose)
632			printf("lapic%u:", apic_id);
633	}
634	if (bootverbose)
635		printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
636	return (0);
637}
638
639int
640lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
641{
642	struct lvt *lvt;
643
644	if (pin > APIC_LVT_MAX)
645		return (EINVAL);
646	if (apic_id == APIC_ID_ALL) {
647		lvt = &lvts[pin];
648		if (bootverbose)
649			printf("lapic:");
650	} else {
651		KASSERT(lapics[apic_id].la_present,
652		    ("%s: missing APIC %u", __func__, apic_id));
653		lvt = &lapics[apic_id].la_lvts[pin];
654		lvt->lvt_active = 1;
655		if (bootverbose)
656			printf("lapic%u:", apic_id);
657	}
658	lvt->lvt_mode = mode;
659	switch (mode) {
660	case APIC_LVT_DM_NMI:
661	case APIC_LVT_DM_SMI:
662	case APIC_LVT_DM_INIT:
663	case APIC_LVT_DM_EXTINT:
664		lvt->lvt_edgetrigger = 1;
665		lvt->lvt_activehi = 1;
666		if (mode == APIC_LVT_DM_EXTINT)
667			lvt->lvt_masked = 1;
668		else
669			lvt->lvt_masked = 0;
670		break;
671	default:
672		panic("Unsupported delivery mode: 0x%x\n", mode);
673	}
674	if (bootverbose) {
675		printf(" Routing ");
676		switch (mode) {
677		case APIC_LVT_DM_NMI:
678			printf("NMI");
679			break;
680		case APIC_LVT_DM_SMI:
681			printf("SMI");
682			break;
683		case APIC_LVT_DM_INIT:
684			printf("INIT");
685			break;
686		case APIC_LVT_DM_EXTINT:
687			printf("ExtINT");
688			break;
689		}
690		printf(" -> LINT%u\n", pin);
691	}
692	return (0);
693}
694
695int
696lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
697{
698
699	if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
700		return (EINVAL);
701	if (apic_id == APIC_ID_ALL) {
702		lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
703		if (bootverbose)
704			printf("lapic:");
705	} else {
706		KASSERT(lapics[apic_id].la_present,
707		    ("%s: missing APIC %u", __func__, apic_id));
708		lapics[apic_id].la_lvts[pin].lvt_active = 1;
709		lapics[apic_id].la_lvts[pin].lvt_activehi =
710		    (pol == INTR_POLARITY_HIGH);
711		if (bootverbose)
712			printf("lapic%u:", apic_id);
713	}
714	if (bootverbose)
715		printf(" LINT%u polarity: %s\n", pin,
716		    pol == INTR_POLARITY_HIGH ? "high" : "low");
717	return (0);
718}
719
720int
721lapic_set_lvt_triggermode(u_int apic_id, u_int pin, enum intr_trigger trigger)
722{
723
724	if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
725		return (EINVAL);
726	if (apic_id == APIC_ID_ALL) {
727		lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
728		if (bootverbose)
729			printf("lapic:");
730	} else {
731		KASSERT(lapics[apic_id].la_present,
732		    ("%s: missing APIC %u", __func__, apic_id));
733		lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
734		    (trigger == INTR_TRIGGER_EDGE);
735		lapics[apic_id].la_lvts[pin].lvt_active = 1;
736		if (bootverbose)
737			printf("lapic%u:", apic_id);
738	}
739	if (bootverbose)
740		printf(" LINT%u trigger: %s\n", pin,
741		    trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
742	return (0);
743}
744
745/*
746 * Adjust the TPR of the current CPU so that it blocks all interrupts below
747 * the passed in vector.
748 */
749void
750lapic_set_tpr(u_int vector)
751{
752#ifdef CHEAP_TPR
753	lapic->tpr = vector;
754#else
755	u_int32_t tpr;
756
757	tpr = lapic->tpr & ~APIC_TPR_PRIO;
758	tpr |= vector;
759	lapic->tpr = tpr;
760#endif
761}
762
763void
764lapic_eoi(void)
765{
766
767	lapic->eoi = 0;
768}
769
770void
771lapic_handle_intr(int vector, struct trapframe *frame)
772{
773	struct intsrc *isrc;
774
775	isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
776	    vector));
777	intr_execute_handlers(isrc, frame);
778}
779
780void
781lapic_handle_timer(struct trapframe *frame)
782{
783	struct lapic *la;
784	struct trapframe *oldframe;
785	struct thread *td;
786
787	/* Send EOI first thing. */
788	lapic_eoi();
789
790#if defined(SMP) && !defined(SCHED_ULE)
791	/*
792	 * Don't do any accounting for the disabled HTT cores, since it
793	 * will provide misleading numbers for the userland.
794	 *
795	 * No locking is necessary here, since even if we lose the race
796	 * when hlt_cpus_mask changes it is not a big deal, really.
797	 *
798	 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
799	 * and unlike other schedulers it actually schedules threads to
800	 * those CPUs.
801	 */
802	if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
803		return;
804#endif
805
806	/* Look up our local APIC structure for the tick counters. */
807	la = &lapics[PCPU_GET(apic_id)];
808	(*la->la_timer_count)++;
809	critical_enter();
810	if (lapic_et.et_active) {
811		td = curthread;
812		td->td_intr_nesting_level++;
813		oldframe = td->td_intr_frame;
814		td->td_intr_frame = frame;
815		lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
816		td->td_intr_frame = oldframe;
817		td->td_intr_nesting_level--;
818	}
819	critical_exit();
820}
821
822static void
823lapic_timer_set_divisor(u_int divisor)
824{
825
826	KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
827	KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
828	    sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
829	lapic->dcr_timer = lapic_timer_divisors[ffs(divisor) - 1];
830}
831
832static void
833lapic_timer_oneshot(struct lapic *la, u_int count, int enable_int)
834{
835	u_int32_t value;
836
837	value = la->lvt_timer_cache;
838	value &= ~APIC_LVTT_TM;
839	value |= APIC_LVTT_TM_ONE_SHOT;
840	if (enable_int)
841		value &= ~APIC_LVT_M;
842	lapic->lvt_timer = value;
843	lapic->icr_timer = count;
844}
845
846static void
847lapic_timer_periodic(struct lapic *la, u_int count, int enable_int)
848{
849	u_int32_t value;
850
851	value = la->lvt_timer_cache;
852	value &= ~APIC_LVTT_TM;
853	value |= APIC_LVTT_TM_PERIODIC;
854	if (enable_int)
855		value &= ~APIC_LVT_M;
856	lapic->lvt_timer = value;
857	lapic->icr_timer = count;
858}
859
860static void
861lapic_timer_stop(struct lapic *la)
862{
863	u_int32_t value;
864
865	value = la->lvt_timer_cache;
866	value &= ~APIC_LVTT_TM;
867	value |= APIC_LVT_M;
868	lapic->lvt_timer = value;
869}
870
871void
872lapic_handle_cmc(void)
873{
874
875	lapic_eoi();
876	cmc_intr();
877}
878
879/*
880 * Called from the mca_init() to activate the CMC interrupt if this CPU is
881 * responsible for monitoring any MC banks for CMC events.  Since mca_init()
882 * is called prior to lapic_setup() during boot, this just needs to unmask
883 * this CPU's LVT_CMCI entry.
884 */
885void
886lapic_enable_cmc(void)
887{
888	u_int apic_id;
889
890#ifdef DEV_ATPIC
891	if (lapic == NULL)
892		return;
893#endif
894	apic_id = PCPU_GET(apic_id);
895	KASSERT(lapics[apic_id].la_present,
896	    ("%s: missing APIC %u", __func__, apic_id));
897	lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
898	lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
899	if (bootverbose)
900		printf("lapic%u: CMCI unmasked\n", apic_id);
901}
902
903void
904lapic_handle_error(void)
905{
906	u_int32_t esr;
907
908	/*
909	 * Read the contents of the error status register.  Write to
910	 * the register first before reading from it to force the APIC
911	 * to update its value to indicate any errors that have
912	 * occurred since the previous write to the register.
913	 */
914	lapic->esr = 0;
915	esr = lapic->esr;
916
917	printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
918	lapic_eoi();
919}
920
921u_int
922apic_cpuid(u_int apic_id)
923{
924#ifdef SMP
925	return apic_cpuids[apic_id];
926#else
927	return 0;
928#endif
929}
930
931/* Request a free IDT vector to be used by the specified IRQ. */
932u_int
933apic_alloc_vector(u_int apic_id, u_int irq)
934{
935	u_int vector;
936
937	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
938
939	/*
940	 * Search for a free vector.  Currently we just use a very simple
941	 * algorithm to find the first free vector.
942	 */
943	mtx_lock_spin(&icu_lock);
944	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
945		if (lapics[apic_id].la_ioint_irqs[vector] != -1)
946			continue;
947		lapics[apic_id].la_ioint_irqs[vector] = irq;
948		mtx_unlock_spin(&icu_lock);
949		return (vector + APIC_IO_INTS);
950	}
951	mtx_unlock_spin(&icu_lock);
952	return (0);
953}
954
955/*
956 * Request 'count' free contiguous IDT vectors to be used by 'count'
957 * IRQs.  'count' must be a power of two and the vectors will be
958 * aligned on a boundary of 'align'.  If the request cannot be
959 * satisfied, 0 is returned.
960 */
961u_int
962apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
963{
964	u_int first, run, vector;
965
966	KASSERT(powerof2(count), ("bad count"));
967	KASSERT(powerof2(align), ("bad align"));
968	KASSERT(align >= count, ("align < count"));
969#ifdef INVARIANTS
970	for (run = 0; run < count; run++)
971		KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
972		    irqs[run], run));
973#endif
974
975	/*
976	 * Search for 'count' free vectors.  As with apic_alloc_vector(),
977	 * this just uses a simple first fit algorithm.
978	 */
979	run = 0;
980	first = 0;
981	mtx_lock_spin(&icu_lock);
982	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
983
984		/* Vector is in use, end run. */
985		if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
986			run = 0;
987			first = 0;
988			continue;
989		}
990
991		/* Start a new run if run == 0 and vector is aligned. */
992		if (run == 0) {
993			if ((vector & (align - 1)) != 0)
994				continue;
995			first = vector;
996		}
997		run++;
998
999		/* Keep looping if the run isn't long enough yet. */
1000		if (run < count)
1001			continue;
1002
1003		/* Found a run, assign IRQs and return the first vector. */
1004		for (vector = 0; vector < count; vector++)
1005			lapics[apic_id].la_ioint_irqs[first + vector] =
1006			    irqs[vector];
1007		mtx_unlock_spin(&icu_lock);
1008		return (first + APIC_IO_INTS);
1009	}
1010	mtx_unlock_spin(&icu_lock);
1011	printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1012	return (0);
1013}
1014
1015/*
1016 * Enable a vector for a particular apic_id.  Since all lapics share idt
1017 * entries and ioint_handlers this enables the vector on all lapics.  lapics
1018 * which do not have the vector configured would report spurious interrupts
1019 * should it fire.
1020 */
1021void
1022apic_enable_vector(u_int apic_id, u_int vector)
1023{
1024
1025	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1026	KASSERT(ioint_handlers[vector / 32] != NULL,
1027	    ("No ISR handler for vector %u", vector));
1028#ifdef KDTRACE_HOOKS
1029	KASSERT(vector != IDT_DTRACE_RET,
1030	    ("Attempt to overwrite DTrace entry"));
1031#endif
1032	setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1033	    GSEL_APIC);
1034}
1035
1036void
1037apic_disable_vector(u_int apic_id, u_int vector)
1038{
1039
1040	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1041#ifdef KDTRACE_HOOKS
1042	KASSERT(vector != IDT_DTRACE_RET,
1043	    ("Attempt to overwrite DTrace entry"));
1044#endif
1045	KASSERT(ioint_handlers[vector / 32] != NULL,
1046	    ("No ISR handler for vector %u", vector));
1047#ifdef notyet
1048	/*
1049	 * We can not currently clear the idt entry because other cpus
1050	 * may have a valid vector at this offset.
1051	 */
1052	setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1053#endif
1054}
1055
1056/* Release an APIC vector when it's no longer in use. */
1057void
1058apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1059{
1060	struct thread *td;
1061
1062	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1063	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1064	    ("Vector %u does not map to an IRQ line", vector));
1065	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1066	KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1067	    irq, ("IRQ mismatch"));
1068#ifdef KDTRACE_HOOKS
1069	KASSERT(vector != IDT_DTRACE_RET,
1070	    ("Attempt to overwrite DTrace entry"));
1071#endif
1072
1073	/*
1074	 * Bind us to the cpu that owned the vector before freeing it so
1075	 * we don't lose an interrupt delivery race.
1076	 */
1077	td = curthread;
1078	if (!rebooting) {
1079		thread_lock(td);
1080		if (sched_is_bound(td))
1081			panic("apic_free_vector: Thread already bound.\n");
1082		sched_bind(td, apic_cpuid(apic_id));
1083		thread_unlock(td);
1084	}
1085	mtx_lock_spin(&icu_lock);
1086	lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1087	mtx_unlock_spin(&icu_lock);
1088	if (!rebooting) {
1089		thread_lock(td);
1090		sched_unbind(td);
1091		thread_unlock(td);
1092	}
1093}
1094
1095/* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1096u_int
1097apic_idt_to_irq(u_int apic_id, u_int vector)
1098{
1099	int irq;
1100
1101	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1102	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1103	    ("Vector %u does not map to an IRQ line", vector));
1104#ifdef KDTRACE_HOOKS
1105	KASSERT(vector != IDT_DTRACE_RET,
1106	    ("Attempt to overwrite DTrace entry"));
1107#endif
1108	irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1109	if (irq < 0)
1110		irq = 0;
1111	return (irq);
1112}
1113
1114#ifdef DDB
1115/*
1116 * Dump data about APIC IDT vector mappings.
1117 */
1118DB_SHOW_COMMAND(apic, db_show_apic)
1119{
1120	struct intsrc *isrc;
1121	int i, verbose;
1122	u_int apic_id;
1123	u_int irq;
1124
1125	if (strcmp(modif, "vv") == 0)
1126		verbose = 2;
1127	else if (strcmp(modif, "v") == 0)
1128		verbose = 1;
1129	else
1130		verbose = 0;
1131	for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1132		if (lapics[apic_id].la_present == 0)
1133			continue;
1134		db_printf("Interrupts bound to lapic %u\n", apic_id);
1135		for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1136			irq = lapics[apic_id].la_ioint_irqs[i];
1137			if (irq == -1 || irq == IRQ_SYSCALL)
1138				continue;
1139#ifdef KDTRACE_HOOKS
1140			if (irq == IRQ_DTRACE_RET)
1141				continue;
1142#endif
1143#ifdef XENHVM
1144			if (irq == IRQ_EVTCHN)
1145				continue;
1146#endif
1147			db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1148			if (irq == IRQ_TIMER)
1149				db_printf("lapic timer\n");
1150			else if (irq < NUM_IO_INTS) {
1151				isrc = intr_lookup_source(irq);
1152				if (isrc == NULL || verbose == 0)
1153					db_printf("IRQ %u\n", irq);
1154				else
1155					db_dump_intr_event(isrc->is_event,
1156					    verbose == 2);
1157			} else
1158				db_printf("IRQ %u ???\n", irq);
1159		}
1160	}
1161}
1162
1163static void
1164dump_mask(const char *prefix, uint32_t v, int base)
1165{
1166	int i, first;
1167
1168	first = 1;
1169	for (i = 0; i < 32; i++)
1170		if (v & (1 << i)) {
1171			if (first) {
1172				db_printf("%s:", prefix);
1173				first = 0;
1174			}
1175			db_printf(" %02x", base + i);
1176		}
1177	if (!first)
1178		db_printf("\n");
1179}
1180
1181/* Show info from the lapic regs for this CPU. */
1182DB_SHOW_COMMAND(lapic, db_show_lapic)
1183{
1184	uint32_t v;
1185
1186	db_printf("lapic ID = %d\n", lapic_id());
1187	v = lapic->version;
1188	db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1189	    v & 0xf);
1190	db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1191	v = lapic->svr;
1192	db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1193	    v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1194	db_printf("TPR      = %02x\n", lapic->tpr);
1195
1196#define dump_field(prefix, index)					\
1197	dump_mask(__XSTRING(prefix ## index), lapic->prefix ## index,	\
1198	    index * 32)
1199
1200	db_printf("In-service Interrupts:\n");
1201	dump_field(isr, 0);
1202	dump_field(isr, 1);
1203	dump_field(isr, 2);
1204	dump_field(isr, 3);
1205	dump_field(isr, 4);
1206	dump_field(isr, 5);
1207	dump_field(isr, 6);
1208	dump_field(isr, 7);
1209
1210	db_printf("TMR Interrupts:\n");
1211	dump_field(tmr, 0);
1212	dump_field(tmr, 1);
1213	dump_field(tmr, 2);
1214	dump_field(tmr, 3);
1215	dump_field(tmr, 4);
1216	dump_field(tmr, 5);
1217	dump_field(tmr, 6);
1218	dump_field(tmr, 7);
1219
1220	db_printf("IRR Interrupts:\n");
1221	dump_field(irr, 0);
1222	dump_field(irr, 1);
1223	dump_field(irr, 2);
1224	dump_field(irr, 3);
1225	dump_field(irr, 4);
1226	dump_field(irr, 5);
1227	dump_field(irr, 6);
1228	dump_field(irr, 7);
1229
1230#undef dump_field
1231}
1232#endif
1233
1234/*
1235 * APIC probing support code.  This includes code to manage enumerators.
1236 */
1237
1238static SLIST_HEAD(, apic_enumerator) enumerators =
1239	SLIST_HEAD_INITIALIZER(enumerators);
1240static struct apic_enumerator *best_enum;
1241
1242void
1243apic_register_enumerator(struct apic_enumerator *enumerator)
1244{
1245#ifdef INVARIANTS
1246	struct apic_enumerator *apic_enum;
1247
1248	SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1249		if (apic_enum == enumerator)
1250			panic("%s: Duplicate register of %s", __func__,
1251			    enumerator->apic_name);
1252	}
1253#endif
1254	SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1255}
1256
1257/*
1258 * We have to look for CPU's very, very early because certain subsystems
1259 * want to know how many CPU's we have extremely early on in the boot
1260 * process.
1261 */
1262static void
1263apic_init(void *dummy __unused)
1264{
1265	struct apic_enumerator *enumerator;
1266	int retval, best;
1267
1268	/* We only support built in local APICs. */
1269	if (!(cpu_feature & CPUID_APIC))
1270		return;
1271
1272	/* Don't probe if APIC mode is disabled. */
1273	if (resource_disabled("apic", 0))
1274		return;
1275
1276	/* Probe all the enumerators to find the best match. */
1277	best_enum = NULL;
1278	best = 0;
1279	SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1280		retval = enumerator->apic_probe();
1281		if (retval > 0)
1282			continue;
1283		if (best_enum == NULL || best < retval) {
1284			best_enum = enumerator;
1285			best = retval;
1286		}
1287	}
1288	if (best_enum == NULL) {
1289		if (bootverbose)
1290			printf("APIC: Could not find any APICs.\n");
1291#ifndef DEV_ATPIC
1292		panic("running without device atpic requires a local APIC");
1293#endif
1294		return;
1295	}
1296
1297	if (bootverbose)
1298		printf("APIC: Using the %s enumerator.\n",
1299		    best_enum->apic_name);
1300
1301#ifdef I686_CPU
1302	/*
1303	 * To work around an errata, we disable the local APIC on some
1304	 * CPUs during early startup.  We need to turn the local APIC back
1305	 * on on such CPUs now.
1306	 */
1307	ppro_reenable_apic();
1308#endif
1309
1310	/* Probe the CPU's in the system. */
1311	retval = best_enum->apic_probe_cpus();
1312	if (retval != 0)
1313		printf("%s: Failed to probe CPUs: returned %d\n",
1314		    best_enum->apic_name, retval);
1315
1316}
1317SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1318
1319/*
1320 * Setup the local APIC.  We have to do this prior to starting up the APs
1321 * in the SMP case.
1322 */
1323static void
1324apic_setup_local(void *dummy __unused)
1325{
1326	int retval;
1327
1328	if (best_enum == NULL)
1329		return;
1330
1331	/* Initialize the local APIC. */
1332	retval = best_enum->apic_setup_local();
1333	if (retval != 0)
1334		printf("%s: Failed to setup the local APIC: returned %d\n",
1335		    best_enum->apic_name, retval);
1336}
1337SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1338
1339/*
1340 * Setup the I/O APICs.
1341 */
1342static void
1343apic_setup_io(void *dummy __unused)
1344{
1345	int retval;
1346
1347	if (best_enum == NULL)
1348		return;
1349
1350	/*
1351	 * Local APIC must be registered before other PICs and pseudo PICs
1352	 * for proper suspend/resume order.
1353	 */
1354#ifndef XEN
1355	intr_register_pic(&lapic_pic);
1356#endif
1357
1358	retval = best_enum->apic_setup_io();
1359	if (retval != 0)
1360		printf("%s: Failed to setup I/O APICs: returned %d\n",
1361		    best_enum->apic_name, retval);
1362#ifdef XEN
1363	return;
1364#endif
1365	/*
1366	 * Finish setting up the local APIC on the BSP once we know how to
1367	 * properly program the LINT pins.
1368	 */
1369	lapic_setup(1);
1370	if (bootverbose)
1371		lapic_dump("BSP");
1372
1373	/* Enable the MSI "pic". */
1374	msi_init();
1375}
1376SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_SECOND, apic_setup_io, NULL);
1377
1378#ifdef SMP
1379/*
1380 * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1381 * private to the MD code.  The public interface for the rest of the
1382 * kernel is defined in mp_machdep.c.
1383 */
1384int
1385lapic_ipi_wait(int delay)
1386{
1387	int x, incr;
1388
1389	/*
1390	 * Wait delay loops for IPI to be sent.  This is highly bogus
1391	 * since this is sensitive to CPU clock speed.  If delay is
1392	 * -1, we wait forever.
1393	 */
1394	if (delay == -1) {
1395		incr = 0;
1396		delay = 1;
1397	} else
1398		incr = 1;
1399	for (x = 0; x < delay; x += incr) {
1400		if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
1401			return (1);
1402		ia32_pause();
1403	}
1404	return (0);
1405}
1406
1407void
1408lapic_ipi_raw(register_t icrlo, u_int dest)
1409{
1410	register_t value, saveintr;
1411
1412	/* XXX: Need more sanity checking of icrlo? */
1413	KASSERT(lapic != NULL, ("%s called too early", __func__));
1414	KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1415	    ("%s: invalid dest field", __func__));
1416	KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1417	    ("%s: reserved bits set in ICR LO register", __func__));
1418
1419	/* Set destination in ICR HI register if it is being used. */
1420	saveintr = intr_disable();
1421	if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1422		value = lapic->icr_hi;
1423		value &= ~APIC_ID_MASK;
1424		value |= dest << APIC_ID_SHIFT;
1425		lapic->icr_hi = value;
1426	}
1427
1428	/* Program the contents of the IPI and dispatch it. */
1429	value = lapic->icr_lo;
1430	value &= APIC_ICRLO_RESV_MASK;
1431	value |= icrlo;
1432	lapic->icr_lo = value;
1433	intr_restore(saveintr);
1434}
1435
1436#define	BEFORE_SPIN	1000000
1437#ifdef DETECT_DEADLOCK
1438#define	AFTER_SPIN	1000
1439#endif
1440
1441void
1442lapic_ipi_vectored(u_int vector, int dest)
1443{
1444	register_t icrlo, destfield;
1445
1446	KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1447	    ("%s: invalid vector %d", __func__, vector));
1448
1449	icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE;
1450
1451	/*
1452	 * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
1453	 * Use special rules regard NMI if passed, otherwise specify
1454	 * the vector.
1455	 */
1456	if (vector == IPI_STOP_HARD)
1457		icrlo |= APIC_DELMODE_NMI | APIC_LEVEL_ASSERT;
1458	else
1459		icrlo |= vector | APIC_DELMODE_FIXED | APIC_LEVEL_DEASSERT;
1460	destfield = 0;
1461	switch (dest) {
1462	case APIC_IPI_DEST_SELF:
1463		icrlo |= APIC_DEST_SELF;
1464		break;
1465	case APIC_IPI_DEST_ALL:
1466		icrlo |= APIC_DEST_ALLISELF;
1467		break;
1468	case APIC_IPI_DEST_OTHERS:
1469		icrlo |= APIC_DEST_ALLESELF;
1470		break;
1471	default:
1472		KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1473		    ("%s: invalid destination 0x%x", __func__, dest));
1474		destfield = dest;
1475	}
1476
1477	/* Wait for an earlier IPI to finish. */
1478	if (!lapic_ipi_wait(BEFORE_SPIN)) {
1479		if (panicstr != NULL)
1480			return;
1481		else
1482			panic("APIC: Previous IPI is stuck");
1483	}
1484
1485	lapic_ipi_raw(icrlo, destfield);
1486
1487#ifdef DETECT_DEADLOCK
1488	/* Wait for IPI to be delivered. */
1489	if (!lapic_ipi_wait(AFTER_SPIN)) {
1490#ifdef needsattention
1491		/*
1492		 * XXX FIXME:
1493		 *
1494		 * The above function waits for the message to actually be
1495		 * delivered.  It breaks out after an arbitrary timeout
1496		 * since the message should eventually be delivered (at
1497		 * least in theory) and that if it wasn't we would catch
1498		 * the failure with the check above when the next IPI is
1499		 * sent.
1500		 *
1501		 * We could skip this wait entirely, EXCEPT it probably
1502		 * protects us from other routines that assume that the
1503		 * message was delivered and acted upon when this function
1504		 * returns.
1505		 */
1506		printf("APIC: IPI might be stuck\n");
1507#else /* !needsattention */
1508		/* Wait until mesage is sent without a timeout. */
1509		while (lapic->icr_lo & APIC_DELSTAT_PEND)
1510			ia32_pause();
1511#endif /* needsattention */
1512	}
1513#endif /* DETECT_DEADLOCK */
1514}
1515#endif /* SMP */
1516