local_apic.c revision 299067
1/*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * Copyright (c) 1996, by Steve Passe
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Local APIC support on Pentium and later processors.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: releng/10.2/sys/x86/x86/local_apic.c 299067 2016-05-04 15:26:23Z delphij $");
36
37#include "opt_atpic.h"
38#include "opt_hwpmc_hooks.h"
39#include "opt_kdtrace.h"
40
41#include "opt_ddb.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/bus.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/sched.h>
52#include <sys/smp.h>
53#include <sys/timeet.h>
54
55#include <vm/vm.h>
56#include <vm/pmap.h>
57
58#include <x86/apicreg.h>
59#include <machine/clock.h>
60#include <machine/cputypes.h>
61#include <machine/frame.h>
62#include <machine/intr_machdep.h>
63#include <machine/apicvar.h>
64#include <x86/mca.h>
65#include <machine/md_var.h>
66#include <machine/smp.h>
67#include <machine/specialreg.h>
68
69#ifdef DDB
70#include <sys/interrupt.h>
71#include <ddb/ddb.h>
72#endif
73
74#ifdef __amd64__
75#define	SDT_APIC	SDT_SYSIGT
76#define	SDT_APICT	SDT_SYSIGT
77#define	GSEL_APIC	0
78#else
79#define	SDT_APIC	SDT_SYS386IGT
80#define	SDT_APICT	SDT_SYS386TGT
81#define	GSEL_APIC	GSEL(GCODE_SEL, SEL_KPL)
82#endif
83
84/* Sanity checks on IDT vectors. */
85CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
86CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
87CTASSERT(APIC_LOCAL_INTS == 240);
88CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
89
90/* Magic IRQ values for the timer and syscalls. */
91#define	IRQ_TIMER	(NUM_IO_INTS + 1)
92#define	IRQ_SYSCALL	(NUM_IO_INTS + 2)
93#define	IRQ_DTRACE_RET	(NUM_IO_INTS + 3)
94#define	IRQ_EVTCHN	(NUM_IO_INTS + 4)
95
96/*
97 * Support for local APICs.  Local APICs manage interrupts on each
98 * individual processor as opposed to I/O APICs which receive interrupts
99 * from I/O devices and then forward them on to the local APICs.
100 *
101 * Local APICs can also send interrupts to each other thus providing the
102 * mechanism for IPIs.
103 */
104
105struct lvt {
106	u_int lvt_edgetrigger:1;
107	u_int lvt_activehi:1;
108	u_int lvt_masked:1;
109	u_int lvt_active:1;
110	u_int lvt_mode:16;
111	u_int lvt_vector:8;
112};
113
114struct lapic {
115	struct lvt la_lvts[APIC_LVT_MAX + 1];
116	u_int la_id:8;
117	u_int la_cluster:4;
118	u_int la_cluster_id:2;
119	u_int la_present:1;
120	u_long *la_timer_count;
121	u_long la_timer_period;
122	u_int la_timer_mode;
123	uint32_t lvt_timer_cache;
124	/* Include IDT_SYSCALL to make indexing easier. */
125	int la_ioint_irqs[APIC_NUM_IOINTS + 1];
126} static lapics[MAX_APIC_ID + 1];
127
128/* Global defaults for local APIC LVT entries. */
129static struct lvt lvts[APIC_LVT_MAX + 1] = {
130	{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },	/* LINT0: masked ExtINT */
131	{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },	/* LINT1: NMI */
132	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },	/* Timer */
133	{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },	/* Error */
134	{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },	/* PMC */
135	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },	/* Thermal */
136	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },	/* CMCI */
137};
138
139static inthand_t *ioint_handlers[] = {
140	NULL,			/* 0 - 31 */
141	IDTVEC(apic_isr1),	/* 32 - 63 */
142	IDTVEC(apic_isr2),	/* 64 - 95 */
143	IDTVEC(apic_isr3),	/* 96 - 127 */
144	IDTVEC(apic_isr4),	/* 128 - 159 */
145	IDTVEC(apic_isr5),	/* 160 - 191 */
146	IDTVEC(apic_isr6),	/* 192 - 223 */
147	IDTVEC(apic_isr7),	/* 224 - 255 */
148};
149
150
151static u_int32_t lapic_timer_divisors[] = {
152	APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
153	APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
154};
155
156extern inthand_t IDTVEC(rsvd);
157
158volatile lapic_t *lapic;
159vm_paddr_t lapic_paddr;
160static u_long lapic_timer_divisor;
161static struct eventtimer lapic_et;
162#ifdef SMP
163static uint64_t lapic_ipi_wait_mult;
164#endif
165
166static void	lapic_enable(void);
167static void	lapic_resume(struct pic *pic, bool suspend_cancelled);
168static void	lapic_timer_oneshot(struct lapic *,
169		    u_int count, int enable_int);
170static void	lapic_timer_periodic(struct lapic *,
171		    u_int count, int enable_int);
172static void	lapic_timer_stop(struct lapic *);
173static void	lapic_timer_set_divisor(u_int divisor);
174static uint32_t	lvt_mode(struct lapic *la, u_int pin, uint32_t value);
175static int	lapic_et_start(struct eventtimer *et,
176    sbintime_t first, sbintime_t period);
177static int	lapic_et_stop(struct eventtimer *et);
178
179struct pic lapic_pic = { .pic_resume = lapic_resume };
180
181static uint32_t
182lvt_mode(struct lapic *la, u_int pin, uint32_t value)
183{
184	struct lvt *lvt;
185
186	KASSERT(pin <= APIC_LVT_MAX, ("%s: pin %u out of range", __func__, pin));
187	if (la->la_lvts[pin].lvt_active)
188		lvt = &la->la_lvts[pin];
189	else
190		lvt = &lvts[pin];
191
192	value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
193	    APIC_LVT_VECTOR);
194	if (lvt->lvt_edgetrigger == 0)
195		value |= APIC_LVT_TM;
196	if (lvt->lvt_activehi == 0)
197		value |= APIC_LVT_IIPP_INTALO;
198	if (lvt->lvt_masked)
199		value |= APIC_LVT_M;
200	value |= lvt->lvt_mode;
201	switch (lvt->lvt_mode) {
202	case APIC_LVT_DM_NMI:
203	case APIC_LVT_DM_SMI:
204	case APIC_LVT_DM_INIT:
205	case APIC_LVT_DM_EXTINT:
206		if (!lvt->lvt_edgetrigger) {
207			printf("lapic%u: Forcing LINT%u to edge trigger\n",
208			    la->la_id, pin);
209			value |= APIC_LVT_TM;
210		}
211		/* Use a vector of 0. */
212		break;
213	case APIC_LVT_DM_FIXED:
214		value |= lvt->lvt_vector;
215		break;
216	default:
217		panic("bad APIC LVT delivery mode: %#x\n", value);
218	}
219	return (value);
220}
221
222/*
223 * Map the local APIC and setup necessary interrupt vectors.
224 */
225void
226lapic_init(vm_paddr_t addr)
227{
228#ifdef SMP
229	uint64_t r, r1, r2, rx;
230#endif
231	u_int regs[4];
232	int i, arat;
233
234	/* Map the local APIC and setup the spurious interrupt handler. */
235	KASSERT(trunc_page(addr) == addr,
236	    ("local APIC not aligned on a page boundary"));
237	lapic_paddr = addr;
238	lapic = pmap_mapdev(addr, sizeof(lapic_t));
239	setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
240	    GSEL_APIC);
241
242	/* Perform basic initialization of the BSP's local APIC. */
243	lapic_enable();
244
245	/* Set BSP's per-CPU local APIC ID. */
246	PCPU_SET(apic_id, lapic_id());
247
248	/* Local APIC timer interrupt. */
249	setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
250
251	/* Local APIC error interrupt. */
252	setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
253
254	/* XXX: Thermal interrupt */
255
256	/* Local APIC CMCI. */
257	setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
258
259	if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
260		arat = 0;
261		/* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
262		if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
263			do_cpuid(0x06, regs);
264			if ((regs[0] & CPUTPM1_ARAT) != 0)
265				arat = 1;
266		}
267		bzero(&lapic_et, sizeof(lapic_et));
268		lapic_et.et_name = "LAPIC";
269		lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
270		    ET_FLAGS_PERCPU;
271		lapic_et.et_quality = 600;
272		if (!arat) {
273			lapic_et.et_flags |= ET_FLAGS_C3STOP;
274			lapic_et.et_quality -= 200;
275		}
276		lapic_et.et_frequency = 0;
277		/* We don't know frequency yet, so trying to guess. */
278		lapic_et.et_min_period = 0x00001000LL;
279		lapic_et.et_max_period = SBT_1S;
280		lapic_et.et_start = lapic_et_start;
281		lapic_et.et_stop = lapic_et_stop;
282		lapic_et.et_priv = NULL;
283		et_register(&lapic_et);
284	}
285
286#ifdef SMP
287#define	LOOPS	1000000
288	/*
289	 * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
290	 * lapic_ipi_wait_mult contains the number of iterations which
291	 * approximately delay execution for 1 microsecond (the
292	 * argument to native_lapic_ipi_wait() is in microseconds).
293	 *
294	 * We assume that TSC is present and already measured.
295	 * Possible TSC frequency jumps are irrelevant to the
296	 * calibration loop below, the CPU clock management code is
297	 * not yet started, and we do not enter sleep states.
298	 */
299	KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
300	    ("TSC not initialized"));
301	r = rdtsc();
302	for (rx = 0; rx < LOOPS; rx++) {
303		(void)lapic->icr_lo;
304		ia32_pause();
305	}
306	r = rdtsc() - r;
307	r1 = tsc_freq * LOOPS;
308	r2 = r * 1000000;
309	lapic_ipi_wait_mult = r1 >= r2 ? r1 / r2 : 1;
310	if (bootverbose) {
311		printf("LAPIC: ipi_wait() us multiplier %ju (r %ju tsc %ju)\n",
312		    (uintmax_t)lapic_ipi_wait_mult, (uintmax_t)r,
313		    (uintmax_t)tsc_freq);
314	}
315#undef LOOPS
316#endif /* SMP */
317}
318
319/*
320 * Create a local APIC instance.
321 */
322void
323lapic_create(u_int apic_id, int boot_cpu)
324{
325	int i;
326
327	if (apic_id > MAX_APIC_ID) {
328		printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
329		if (boot_cpu)
330			panic("Can't ignore BSP");
331		return;
332	}
333	KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
334	    apic_id));
335
336	/*
337	 * Assume no local LVT overrides and a cluster of 0 and
338	 * intra-cluster ID of 0.
339	 */
340	lapics[apic_id].la_present = 1;
341	lapics[apic_id].la_id = apic_id;
342	for (i = 0; i <= APIC_LVT_MAX; i++) {
343		lapics[apic_id].la_lvts[i] = lvts[i];
344		lapics[apic_id].la_lvts[i].lvt_active = 0;
345	}
346	for (i = 0; i <= APIC_NUM_IOINTS; i++)
347	    lapics[apic_id].la_ioint_irqs[i] = -1;
348	lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
349	lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
350	    IRQ_TIMER;
351#ifdef KDTRACE_HOOKS
352	lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
353	    IRQ_DTRACE_RET;
354#endif
355#ifdef XENHVM
356	lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
357#endif
358
359
360#ifdef SMP
361	cpu_add(apic_id, boot_cpu);
362#endif
363}
364
365/*
366 * Dump contents of local APIC registers
367 */
368void
369lapic_dump(const char* str)
370{
371	uint32_t maxlvt;
372
373	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
374	printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
375	printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
376	    lapic->id, lapic->version, lapic->ldr, lapic->dfr);
377	printf("  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
378	    lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
379	printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
380	    lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error);
381	if (maxlvt >= APIC_LVT_PMC)
382		printf(" pmc: 0x%08x", lapic->lvt_pcint);
383	printf("\n");
384	if (maxlvt >= APIC_LVT_CMCI)
385		printf("   cmci: 0x%08x\n", lapic->lvt_cmci);
386}
387
388void
389lapic_setup(int boot)
390{
391	struct lapic *la;
392	u_int32_t maxlvt;
393	register_t saveintr;
394	char buf[MAXCOMLEN + 1];
395
396	la = &lapics[lapic_id()];
397	KASSERT(la->la_present, ("missing APIC structure"));
398	saveintr = intr_disable();
399	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
400
401	/* Initialize the TPR to allow all interrupts. */
402	lapic_set_tpr(0);
403
404	/* Setup spurious vector and enable the local APIC. */
405	lapic_enable();
406
407	/* Program LINT[01] LVT entries. */
408	lapic->lvt_lint0 = lvt_mode(la, APIC_LVT_LINT0, lapic->lvt_lint0);
409	lapic->lvt_lint1 = lvt_mode(la, APIC_LVT_LINT1, lapic->lvt_lint1);
410
411	/* Program the PMC LVT entry if present. */
412	if (maxlvt >= APIC_LVT_PMC)
413		lapic->lvt_pcint = lvt_mode(la, APIC_LVT_PMC, lapic->lvt_pcint);
414
415	/* Program timer LVT and setup handler. */
416	la->lvt_timer_cache = lapic->lvt_timer =
417	    lvt_mode(la, APIC_LVT_TIMER, lapic->lvt_timer);
418	if (boot) {
419		snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
420		intrcnt_add(buf, &la->la_timer_count);
421	}
422
423	/* Setup the timer if configured. */
424	if (la->la_timer_mode != 0) {
425		KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
426		    lapic_id()));
427		lapic_timer_set_divisor(lapic_timer_divisor);
428		if (la->la_timer_mode == 1)
429			lapic_timer_periodic(la, la->la_timer_period, 1);
430		else
431			lapic_timer_oneshot(la, la->la_timer_period, 1);
432	}
433
434	/* Program error LVT and clear any existing errors. */
435	lapic->lvt_error = lvt_mode(la, APIC_LVT_ERROR, lapic->lvt_error);
436	lapic->esr = 0;
437
438	/* XXX: Thermal LVT */
439
440	/* Program the CMCI LVT entry if present. */
441	if (maxlvt >= APIC_LVT_CMCI)
442		lapic->lvt_cmci = lvt_mode(la, APIC_LVT_CMCI, lapic->lvt_cmci);
443
444	intr_restore(saveintr);
445}
446
447void
448lapic_reenable_pmc(void)
449{
450#ifdef HWPMC_HOOKS
451	uint32_t value;
452
453	value =  lapic->lvt_pcint;
454	value &= ~APIC_LVT_M;
455	lapic->lvt_pcint = value;
456#endif
457}
458
459#ifdef HWPMC_HOOKS
460static void
461lapic_update_pmc(void *dummy)
462{
463	struct lapic *la;
464
465	la = &lapics[lapic_id()];
466	lapic->lvt_pcint = lvt_mode(la, APIC_LVT_PMC, lapic->lvt_pcint);
467}
468#endif
469
470int
471lapic_enable_pmc(void)
472{
473#ifdef HWPMC_HOOKS
474	u_int32_t maxlvt;
475
476	/* Fail if the local APIC is not present. */
477	if (lapic == NULL)
478		return (0);
479
480	/* Fail if the PMC LVT is not present. */
481	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
482	if (maxlvt < APIC_LVT_PMC)
483		return (0);
484
485	lvts[APIC_LVT_PMC].lvt_masked = 0;
486
487#ifdef SMP
488	/*
489	 * If hwpmc was loaded at boot time then the APs may not be
490	 * started yet.  In that case, don't forward the request to
491	 * them as they will program the lvt when they start.
492	 */
493	if (smp_started)
494		smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
495	else
496#endif
497		lapic_update_pmc(NULL);
498	return (1);
499#else
500	return (0);
501#endif
502}
503
504void
505lapic_disable_pmc(void)
506{
507#ifdef HWPMC_HOOKS
508	u_int32_t maxlvt;
509
510	/* Fail if the local APIC is not present. */
511	if (lapic == NULL)
512		return;
513
514	/* Fail if the PMC LVT is not present. */
515	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
516	if (maxlvt < APIC_LVT_PMC)
517		return;
518
519	lvts[APIC_LVT_PMC].lvt_masked = 1;
520
521#ifdef SMP
522	/* The APs should always be started when hwpmc is unloaded. */
523	KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
524#endif
525	smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
526#endif
527}
528
529static int
530lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
531{
532	struct lapic *la;
533	u_long value;
534
535	la = &lapics[PCPU_GET(apic_id)];
536	if (et->et_frequency == 0) {
537		/* Start off with a divisor of 2 (power on reset default). */
538		lapic_timer_divisor = 2;
539		/* Try to calibrate the local APIC timer. */
540		do {
541			lapic_timer_set_divisor(lapic_timer_divisor);
542			lapic_timer_oneshot(la, APIC_TIMER_MAX_COUNT, 0);
543			DELAY(1000000);
544			value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
545			if (value != APIC_TIMER_MAX_COUNT)
546				break;
547			lapic_timer_divisor <<= 1;
548		} while (lapic_timer_divisor <= 128);
549		if (lapic_timer_divisor > 128)
550			panic("lapic: Divisor too big");
551		if (bootverbose)
552			printf("lapic: Divisor %lu, Frequency %lu Hz\n",
553			    lapic_timer_divisor, value);
554		et->et_frequency = value;
555		et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
556		et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
557	}
558	if (la->la_timer_mode == 0)
559		lapic_timer_set_divisor(lapic_timer_divisor);
560	if (period != 0) {
561		la->la_timer_mode = 1;
562		la->la_timer_period = ((uint32_t)et->et_frequency * period) >> 32;
563		lapic_timer_periodic(la, la->la_timer_period, 1);
564	} else {
565		la->la_timer_mode = 2;
566		la->la_timer_period = ((uint32_t)et->et_frequency * first) >> 32;
567		lapic_timer_oneshot(la, la->la_timer_period, 1);
568	}
569	return (0);
570}
571
572static int
573lapic_et_stop(struct eventtimer *et)
574{
575	struct lapic *la = &lapics[PCPU_GET(apic_id)];
576
577	la->la_timer_mode = 0;
578	lapic_timer_stop(la);
579	return (0);
580}
581
582void
583lapic_disable(void)
584{
585	uint32_t value;
586
587	/* Software disable the local APIC. */
588	value = lapic->svr;
589	value &= ~APIC_SVR_SWEN;
590	lapic->svr = value;
591}
592
593static void
594lapic_enable(void)
595{
596	u_int32_t value;
597
598	/* Program the spurious vector to enable the local APIC. */
599	value = lapic->svr;
600	value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
601	value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
602	lapic->svr = value;
603}
604
605/* Reset the local APIC on the BSP during resume. */
606static void
607lapic_resume(struct pic *pic, bool suspend_cancelled)
608{
609
610	lapic_setup(0);
611}
612
613int
614lapic_id(void)
615{
616
617	KASSERT(lapic != NULL, ("local APIC is not mapped"));
618	return (lapic->id >> APIC_ID_SHIFT);
619}
620
621int
622lapic_intr_pending(u_int vector)
623{
624	volatile u_int32_t *irr;
625
626	/*
627	 * The IRR registers are an array of 128-bit registers each of
628	 * which only describes 32 interrupts in the low 32 bits..  Thus,
629	 * we divide the vector by 32 to get the 128-bit index.  We then
630	 * multiply that index by 4 to get the equivalent index from
631	 * treating the IRR as an array of 32-bit registers.  Finally, we
632	 * modulus the vector by 32 to determine the individual bit to
633	 * test.
634	 */
635	irr = &lapic->irr0;
636	return (irr[(vector / 32) * 4] & 1 << (vector % 32));
637}
638
639void
640lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
641{
642	struct lapic *la;
643
644	KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
645	    __func__, apic_id));
646	KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
647	    __func__, cluster));
648	KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
649	    ("%s: intra cluster id %u too big", __func__, cluster_id));
650	la = &lapics[apic_id];
651	la->la_cluster = cluster;
652	la->la_cluster_id = cluster_id;
653}
654
655int
656lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
657{
658
659	if (pin > APIC_LVT_MAX)
660		return (EINVAL);
661	if (apic_id == APIC_ID_ALL) {
662		lvts[pin].lvt_masked = masked;
663		if (bootverbose)
664			printf("lapic:");
665	} else {
666		KASSERT(lapics[apic_id].la_present,
667		    ("%s: missing APIC %u", __func__, apic_id));
668		lapics[apic_id].la_lvts[pin].lvt_masked = masked;
669		lapics[apic_id].la_lvts[pin].lvt_active = 1;
670		if (bootverbose)
671			printf("lapic%u:", apic_id);
672	}
673	if (bootverbose)
674		printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
675	return (0);
676}
677
678int
679lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
680{
681	struct lvt *lvt;
682
683	if (pin > APIC_LVT_MAX)
684		return (EINVAL);
685	if (apic_id == APIC_ID_ALL) {
686		lvt = &lvts[pin];
687		if (bootverbose)
688			printf("lapic:");
689	} else {
690		KASSERT(lapics[apic_id].la_present,
691		    ("%s: missing APIC %u", __func__, apic_id));
692		lvt = &lapics[apic_id].la_lvts[pin];
693		lvt->lvt_active = 1;
694		if (bootverbose)
695			printf("lapic%u:", apic_id);
696	}
697	lvt->lvt_mode = mode;
698	switch (mode) {
699	case APIC_LVT_DM_NMI:
700	case APIC_LVT_DM_SMI:
701	case APIC_LVT_DM_INIT:
702	case APIC_LVT_DM_EXTINT:
703		lvt->lvt_edgetrigger = 1;
704		lvt->lvt_activehi = 1;
705		if (mode == APIC_LVT_DM_EXTINT)
706			lvt->lvt_masked = 1;
707		else
708			lvt->lvt_masked = 0;
709		break;
710	default:
711		panic("Unsupported delivery mode: 0x%x\n", mode);
712	}
713	if (bootverbose) {
714		printf(" Routing ");
715		switch (mode) {
716		case APIC_LVT_DM_NMI:
717			printf("NMI");
718			break;
719		case APIC_LVT_DM_SMI:
720			printf("SMI");
721			break;
722		case APIC_LVT_DM_INIT:
723			printf("INIT");
724			break;
725		case APIC_LVT_DM_EXTINT:
726			printf("ExtINT");
727			break;
728		}
729		printf(" -> LINT%u\n", pin);
730	}
731	return (0);
732}
733
734int
735lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
736{
737
738	if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
739		return (EINVAL);
740	if (apic_id == APIC_ID_ALL) {
741		lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
742		if (bootverbose)
743			printf("lapic:");
744	} else {
745		KASSERT(lapics[apic_id].la_present,
746		    ("%s: missing APIC %u", __func__, apic_id));
747		lapics[apic_id].la_lvts[pin].lvt_active = 1;
748		lapics[apic_id].la_lvts[pin].lvt_activehi =
749		    (pol == INTR_POLARITY_HIGH);
750		if (bootverbose)
751			printf("lapic%u:", apic_id);
752	}
753	if (bootverbose)
754		printf(" LINT%u polarity: %s\n", pin,
755		    pol == INTR_POLARITY_HIGH ? "high" : "low");
756	return (0);
757}
758
759int
760lapic_set_lvt_triggermode(u_int apic_id, u_int pin, enum intr_trigger trigger)
761{
762
763	if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
764		return (EINVAL);
765	if (apic_id == APIC_ID_ALL) {
766		lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
767		if (bootverbose)
768			printf("lapic:");
769	} else {
770		KASSERT(lapics[apic_id].la_present,
771		    ("%s: missing APIC %u", __func__, apic_id));
772		lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
773		    (trigger == INTR_TRIGGER_EDGE);
774		lapics[apic_id].la_lvts[pin].lvt_active = 1;
775		if (bootverbose)
776			printf("lapic%u:", apic_id);
777	}
778	if (bootverbose)
779		printf(" LINT%u trigger: %s\n", pin,
780		    trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
781	return (0);
782}
783
784/*
785 * Adjust the TPR of the current CPU so that it blocks all interrupts below
786 * the passed in vector.
787 */
788void
789lapic_set_tpr(u_int vector)
790{
791#ifdef CHEAP_TPR
792	lapic->tpr = vector;
793#else
794	u_int32_t tpr;
795
796	tpr = lapic->tpr & ~APIC_TPR_PRIO;
797	tpr |= vector;
798	lapic->tpr = tpr;
799#endif
800}
801
802void
803lapic_eoi(void)
804{
805
806	lapic->eoi = 0;
807}
808
809void
810lapic_handle_intr(int vector, struct trapframe *frame)
811{
812	struct intsrc *isrc;
813
814	isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
815	    vector));
816	intr_execute_handlers(isrc, frame);
817}
818
819void
820lapic_handle_timer(struct trapframe *frame)
821{
822	struct lapic *la;
823	struct trapframe *oldframe;
824	struct thread *td;
825
826	/* Send EOI first thing. */
827	lapic_eoi();
828
829#if defined(SMP) && !defined(SCHED_ULE)
830	/*
831	 * Don't do any accounting for the disabled HTT cores, since it
832	 * will provide misleading numbers for the userland.
833	 *
834	 * No locking is necessary here, since even if we lose the race
835	 * when hlt_cpus_mask changes it is not a big deal, really.
836	 *
837	 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
838	 * and unlike other schedulers it actually schedules threads to
839	 * those CPUs.
840	 */
841	if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
842		return;
843#endif
844
845	/* Look up our local APIC structure for the tick counters. */
846	la = &lapics[PCPU_GET(apic_id)];
847	(*la->la_timer_count)++;
848	critical_enter();
849	if (lapic_et.et_active) {
850		td = curthread;
851		td->td_intr_nesting_level++;
852		oldframe = td->td_intr_frame;
853		td->td_intr_frame = frame;
854		lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
855		td->td_intr_frame = oldframe;
856		td->td_intr_nesting_level--;
857	}
858	critical_exit();
859}
860
861static void
862lapic_timer_set_divisor(u_int divisor)
863{
864
865	KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
866	KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
867	    sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
868	lapic->dcr_timer = lapic_timer_divisors[ffs(divisor) - 1];
869}
870
871static void
872lapic_timer_oneshot(struct lapic *la, u_int count, int enable_int)
873{
874	u_int32_t value;
875
876	value = la->lvt_timer_cache;
877	value &= ~APIC_LVTT_TM;
878	value |= APIC_LVTT_TM_ONE_SHOT;
879	if (enable_int)
880		value &= ~APIC_LVT_M;
881	lapic->lvt_timer = value;
882	lapic->icr_timer = count;
883}
884
885static void
886lapic_timer_periodic(struct lapic *la, u_int count, int enable_int)
887{
888	u_int32_t value;
889
890	value = la->lvt_timer_cache;
891	value &= ~APIC_LVTT_TM;
892	value |= APIC_LVTT_TM_PERIODIC;
893	if (enable_int)
894		value &= ~APIC_LVT_M;
895	lapic->lvt_timer = value;
896	lapic->icr_timer = count;
897}
898
899static void
900lapic_timer_stop(struct lapic *la)
901{
902	u_int32_t value;
903
904	value = la->lvt_timer_cache;
905	value &= ~APIC_LVTT_TM;
906	value |= APIC_LVT_M;
907	lapic->lvt_timer = value;
908}
909
910void
911lapic_handle_cmc(void)
912{
913
914	lapic_eoi();
915	cmc_intr();
916}
917
918/*
919 * Called from the mca_init() to activate the CMC interrupt if this CPU is
920 * responsible for monitoring any MC banks for CMC events.  Since mca_init()
921 * is called prior to lapic_setup() during boot, this just needs to unmask
922 * this CPU's LVT_CMCI entry.
923 */
924void
925lapic_enable_cmc(void)
926{
927	u_int apic_id;
928
929#ifdef DEV_ATPIC
930	if (lapic == NULL)
931		return;
932#endif
933	apic_id = PCPU_GET(apic_id);
934	KASSERT(lapics[apic_id].la_present,
935	    ("%s: missing APIC %u", __func__, apic_id));
936	lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
937	lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
938	if (bootverbose)
939		printf("lapic%u: CMCI unmasked\n", apic_id);
940}
941
942void
943lapic_handle_error(void)
944{
945	u_int32_t esr;
946
947	/*
948	 * Read the contents of the error status register.  Write to
949	 * the register first before reading from it to force the APIC
950	 * to update its value to indicate any errors that have
951	 * occurred since the previous write to the register.
952	 */
953	lapic->esr = 0;
954	esr = lapic->esr;
955
956	printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
957	lapic_eoi();
958}
959
960u_int
961apic_cpuid(u_int apic_id)
962{
963#ifdef SMP
964	return apic_cpuids[apic_id];
965#else
966	return 0;
967#endif
968}
969
970/* Request a free IDT vector to be used by the specified IRQ. */
971u_int
972apic_alloc_vector(u_int apic_id, u_int irq)
973{
974	u_int vector;
975
976	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
977
978	/*
979	 * Search for a free vector.  Currently we just use a very simple
980	 * algorithm to find the first free vector.
981	 */
982	mtx_lock_spin(&icu_lock);
983	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
984		if (lapics[apic_id].la_ioint_irqs[vector] != -1)
985			continue;
986		lapics[apic_id].la_ioint_irqs[vector] = irq;
987		mtx_unlock_spin(&icu_lock);
988		return (vector + APIC_IO_INTS);
989	}
990	mtx_unlock_spin(&icu_lock);
991	return (0);
992}
993
994/*
995 * Request 'count' free contiguous IDT vectors to be used by 'count'
996 * IRQs.  'count' must be a power of two and the vectors will be
997 * aligned on a boundary of 'align'.  If the request cannot be
998 * satisfied, 0 is returned.
999 */
1000u_int
1001apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1002{
1003	u_int first, run, vector;
1004
1005	KASSERT(powerof2(count), ("bad count"));
1006	KASSERT(powerof2(align), ("bad align"));
1007	KASSERT(align >= count, ("align < count"));
1008#ifdef INVARIANTS
1009	for (run = 0; run < count; run++)
1010		KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
1011		    irqs[run], run));
1012#endif
1013
1014	/*
1015	 * Search for 'count' free vectors.  As with apic_alloc_vector(),
1016	 * this just uses a simple first fit algorithm.
1017	 */
1018	run = 0;
1019	first = 0;
1020	mtx_lock_spin(&icu_lock);
1021	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1022
1023		/* Vector is in use, end run. */
1024		if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
1025			run = 0;
1026			first = 0;
1027			continue;
1028		}
1029
1030		/* Start a new run if run == 0 and vector is aligned. */
1031		if (run == 0) {
1032			if ((vector & (align - 1)) != 0)
1033				continue;
1034			first = vector;
1035		}
1036		run++;
1037
1038		/* Keep looping if the run isn't long enough yet. */
1039		if (run < count)
1040			continue;
1041
1042		/* Found a run, assign IRQs and return the first vector. */
1043		for (vector = 0; vector < count; vector++)
1044			lapics[apic_id].la_ioint_irqs[first + vector] =
1045			    irqs[vector];
1046		mtx_unlock_spin(&icu_lock);
1047		return (first + APIC_IO_INTS);
1048	}
1049	mtx_unlock_spin(&icu_lock);
1050	printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1051	return (0);
1052}
1053
1054/*
1055 * Enable a vector for a particular apic_id.  Since all lapics share idt
1056 * entries and ioint_handlers this enables the vector on all lapics.  lapics
1057 * which do not have the vector configured would report spurious interrupts
1058 * should it fire.
1059 */
1060void
1061apic_enable_vector(u_int apic_id, u_int vector)
1062{
1063
1064	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1065	KASSERT(ioint_handlers[vector / 32] != NULL,
1066	    ("No ISR handler for vector %u", vector));
1067#ifdef KDTRACE_HOOKS
1068	KASSERT(vector != IDT_DTRACE_RET,
1069	    ("Attempt to overwrite DTrace entry"));
1070#endif
1071	setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1072	    GSEL_APIC);
1073}
1074
1075void
1076apic_disable_vector(u_int apic_id, u_int vector)
1077{
1078
1079	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1080#ifdef KDTRACE_HOOKS
1081	KASSERT(vector != IDT_DTRACE_RET,
1082	    ("Attempt to overwrite DTrace entry"));
1083#endif
1084	KASSERT(ioint_handlers[vector / 32] != NULL,
1085	    ("No ISR handler for vector %u", vector));
1086#ifdef notyet
1087	/*
1088	 * We can not currently clear the idt entry because other cpus
1089	 * may have a valid vector at this offset.
1090	 */
1091	setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1092#endif
1093}
1094
1095/* Release an APIC vector when it's no longer in use. */
1096void
1097apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1098{
1099	struct thread *td;
1100
1101	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1102	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1103	    ("Vector %u does not map to an IRQ line", vector));
1104	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1105	KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1106	    irq, ("IRQ mismatch"));
1107#ifdef KDTRACE_HOOKS
1108	KASSERT(vector != IDT_DTRACE_RET,
1109	    ("Attempt to overwrite DTrace entry"));
1110#endif
1111
1112	/*
1113	 * Bind us to the cpu that owned the vector before freeing it so
1114	 * we don't lose an interrupt delivery race.
1115	 */
1116	td = curthread;
1117	if (!rebooting) {
1118		thread_lock(td);
1119		if (sched_is_bound(td))
1120			panic("apic_free_vector: Thread already bound.\n");
1121		sched_bind(td, apic_cpuid(apic_id));
1122		thread_unlock(td);
1123	}
1124	mtx_lock_spin(&icu_lock);
1125	lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1126	mtx_unlock_spin(&icu_lock);
1127	if (!rebooting) {
1128		thread_lock(td);
1129		sched_unbind(td);
1130		thread_unlock(td);
1131	}
1132}
1133
1134/* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1135u_int
1136apic_idt_to_irq(u_int apic_id, u_int vector)
1137{
1138	int irq;
1139
1140	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1141	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1142	    ("Vector %u does not map to an IRQ line", vector));
1143#ifdef KDTRACE_HOOKS
1144	KASSERT(vector != IDT_DTRACE_RET,
1145	    ("Attempt to overwrite DTrace entry"));
1146#endif
1147	irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1148	if (irq < 0)
1149		irq = 0;
1150	return (irq);
1151}
1152
1153#ifdef DDB
1154/*
1155 * Dump data about APIC IDT vector mappings.
1156 */
1157DB_SHOW_COMMAND(apic, db_show_apic)
1158{
1159	struct intsrc *isrc;
1160	int i, verbose;
1161	u_int apic_id;
1162	u_int irq;
1163
1164	if (strcmp(modif, "vv") == 0)
1165		verbose = 2;
1166	else if (strcmp(modif, "v") == 0)
1167		verbose = 1;
1168	else
1169		verbose = 0;
1170	for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1171		if (lapics[apic_id].la_present == 0)
1172			continue;
1173		db_printf("Interrupts bound to lapic %u\n", apic_id);
1174		for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1175			irq = lapics[apic_id].la_ioint_irqs[i];
1176			if (irq == -1 || irq == IRQ_SYSCALL)
1177				continue;
1178#ifdef KDTRACE_HOOKS
1179			if (irq == IRQ_DTRACE_RET)
1180				continue;
1181#endif
1182#ifdef XENHVM
1183			if (irq == IRQ_EVTCHN)
1184				continue;
1185#endif
1186			db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1187			if (irq == IRQ_TIMER)
1188				db_printf("lapic timer\n");
1189			else if (irq < NUM_IO_INTS) {
1190				isrc = intr_lookup_source(irq);
1191				if (isrc == NULL || verbose == 0)
1192					db_printf("IRQ %u\n", irq);
1193				else
1194					db_dump_intr_event(isrc->is_event,
1195					    verbose == 2);
1196			} else
1197				db_printf("IRQ %u ???\n", irq);
1198		}
1199	}
1200}
1201
1202static void
1203dump_mask(const char *prefix, uint32_t v, int base)
1204{
1205	int i, first;
1206
1207	first = 1;
1208	for (i = 0; i < 32; i++)
1209		if (v & (1 << i)) {
1210			if (first) {
1211				db_printf("%s:", prefix);
1212				first = 0;
1213			}
1214			db_printf(" %02x", base + i);
1215		}
1216	if (!first)
1217		db_printf("\n");
1218}
1219
1220/* Show info from the lapic regs for this CPU. */
1221DB_SHOW_COMMAND(lapic, db_show_lapic)
1222{
1223	uint32_t v;
1224
1225	db_printf("lapic ID = %d\n", lapic_id());
1226	v = lapic->version;
1227	db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1228	    v & 0xf);
1229	db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1230	v = lapic->svr;
1231	db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1232	    v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1233	db_printf("TPR      = %02x\n", lapic->tpr);
1234
1235#define dump_field(prefix, index)					\
1236	dump_mask(__XSTRING(prefix ## index), lapic->prefix ## index,	\
1237	    index * 32)
1238
1239	db_printf("In-service Interrupts:\n");
1240	dump_field(isr, 0);
1241	dump_field(isr, 1);
1242	dump_field(isr, 2);
1243	dump_field(isr, 3);
1244	dump_field(isr, 4);
1245	dump_field(isr, 5);
1246	dump_field(isr, 6);
1247	dump_field(isr, 7);
1248
1249	db_printf("TMR Interrupts:\n");
1250	dump_field(tmr, 0);
1251	dump_field(tmr, 1);
1252	dump_field(tmr, 2);
1253	dump_field(tmr, 3);
1254	dump_field(tmr, 4);
1255	dump_field(tmr, 5);
1256	dump_field(tmr, 6);
1257	dump_field(tmr, 7);
1258
1259	db_printf("IRR Interrupts:\n");
1260	dump_field(irr, 0);
1261	dump_field(irr, 1);
1262	dump_field(irr, 2);
1263	dump_field(irr, 3);
1264	dump_field(irr, 4);
1265	dump_field(irr, 5);
1266	dump_field(irr, 6);
1267	dump_field(irr, 7);
1268
1269#undef dump_field
1270}
1271#endif
1272
1273/*
1274 * APIC probing support code.  This includes code to manage enumerators.
1275 */
1276
1277static SLIST_HEAD(, apic_enumerator) enumerators =
1278	SLIST_HEAD_INITIALIZER(enumerators);
1279static struct apic_enumerator *best_enum;
1280
1281void
1282apic_register_enumerator(struct apic_enumerator *enumerator)
1283{
1284#ifdef INVARIANTS
1285	struct apic_enumerator *apic_enum;
1286
1287	SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1288		if (apic_enum == enumerator)
1289			panic("%s: Duplicate register of %s", __func__,
1290			    enumerator->apic_name);
1291	}
1292#endif
1293	SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1294}
1295
1296/*
1297 * We have to look for CPU's very, very early because certain subsystems
1298 * want to know how many CPU's we have extremely early on in the boot
1299 * process.
1300 */
1301static void
1302apic_init(void *dummy __unused)
1303{
1304	struct apic_enumerator *enumerator;
1305	int retval, best;
1306
1307	/* We only support built in local APICs. */
1308	if (!(cpu_feature & CPUID_APIC))
1309		return;
1310
1311	/* Don't probe if APIC mode is disabled. */
1312	if (resource_disabled("apic", 0))
1313		return;
1314
1315	/* Probe all the enumerators to find the best match. */
1316	best_enum = NULL;
1317	best = 0;
1318	SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1319		retval = enumerator->apic_probe();
1320		if (retval > 0)
1321			continue;
1322		if (best_enum == NULL || best < retval) {
1323			best_enum = enumerator;
1324			best = retval;
1325		}
1326	}
1327	if (best_enum == NULL) {
1328		if (bootverbose)
1329			printf("APIC: Could not find any APICs.\n");
1330#ifndef DEV_ATPIC
1331		panic("running without device atpic requires a local APIC");
1332#endif
1333		return;
1334	}
1335
1336	if (bootverbose)
1337		printf("APIC: Using the %s enumerator.\n",
1338		    best_enum->apic_name);
1339
1340#ifdef I686_CPU
1341	/*
1342	 * To work around an errata, we disable the local APIC on some
1343	 * CPUs during early startup.  We need to turn the local APIC back
1344	 * on on such CPUs now.
1345	 */
1346	ppro_reenable_apic();
1347#endif
1348
1349	/* Probe the CPU's in the system. */
1350	retval = best_enum->apic_probe_cpus();
1351	if (retval != 0)
1352		printf("%s: Failed to probe CPUs: returned %d\n",
1353		    best_enum->apic_name, retval);
1354
1355}
1356SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1357
1358/*
1359 * Setup the local APIC.  We have to do this prior to starting up the APs
1360 * in the SMP case.
1361 */
1362static void
1363apic_setup_local(void *dummy __unused)
1364{
1365	int retval;
1366
1367	if (best_enum == NULL)
1368		return;
1369
1370	/* Initialize the local APIC. */
1371	retval = best_enum->apic_setup_local();
1372	if (retval != 0)
1373		printf("%s: Failed to setup the local APIC: returned %d\n",
1374		    best_enum->apic_name, retval);
1375}
1376SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1377
1378/*
1379 * Setup the I/O APICs.
1380 */
1381static void
1382apic_setup_io(void *dummy __unused)
1383{
1384	int retval;
1385
1386	if (best_enum == NULL)
1387		return;
1388
1389	/*
1390	 * Local APIC must be registered before other PICs and pseudo PICs
1391	 * for proper suspend/resume order.
1392	 */
1393#ifndef XEN
1394	intr_register_pic(&lapic_pic);
1395#endif
1396
1397	retval = best_enum->apic_setup_io();
1398	if (retval != 0)
1399		printf("%s: Failed to setup I/O APICs: returned %d\n",
1400		    best_enum->apic_name, retval);
1401#ifdef XEN
1402	return;
1403#endif
1404	/*
1405	 * Finish setting up the local APIC on the BSP once we know how to
1406	 * properly program the LINT pins.
1407	 */
1408	lapic_setup(1);
1409	if (bootverbose)
1410		lapic_dump("BSP");
1411
1412	/* Enable the MSI "pic". */
1413	msi_init();
1414}
1415SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_SECOND, apic_setup_io, NULL);
1416
1417#ifdef SMP
1418/*
1419 * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1420 * private to the MD code.  The public interface for the rest of the
1421 * kernel is defined in mp_machdep.c.
1422 */
1423
1424/*
1425 * Wait delay microseconds for IPI to be sent.  If delay is -1, we
1426 * wait forever.
1427 */
1428int
1429lapic_ipi_wait(int delay)
1430{
1431	uint64_t rx;
1432
1433	for (rx = 0; delay == -1 || rx < lapic_ipi_wait_mult * delay; rx++) {
1434		if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
1435			return (1);
1436		ia32_pause();
1437	}
1438	return (0);
1439}
1440
1441void
1442lapic_ipi_raw(register_t icrlo, u_int dest)
1443{
1444	register_t value, saveintr;
1445
1446	/* XXX: Need more sanity checking of icrlo? */
1447	KASSERT(lapic != NULL, ("%s called too early", __func__));
1448	KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1449	    ("%s: invalid dest field", __func__));
1450	KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1451	    ("%s: reserved bits set in ICR LO register", __func__));
1452
1453	/* Set destination in ICR HI register if it is being used. */
1454	saveintr = intr_disable();
1455	if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1456		value = lapic->icr_hi;
1457		value &= ~APIC_ID_MASK;
1458		value |= dest << APIC_ID_SHIFT;
1459		lapic->icr_hi = value;
1460	}
1461
1462	/* Program the contents of the IPI and dispatch it. */
1463	value = lapic->icr_lo;
1464	value &= APIC_ICRLO_RESV_MASK;
1465	value |= icrlo;
1466	lapic->icr_lo = value;
1467	intr_restore(saveintr);
1468}
1469
1470#define	BEFORE_SPIN	50000
1471#ifdef DETECT_DEADLOCK
1472#define	AFTER_SPIN	50
1473#endif
1474
1475void
1476lapic_ipi_vectored(u_int vector, int dest)
1477{
1478	register_t icrlo, destfield;
1479
1480	KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1481	    ("%s: invalid vector %d", __func__, vector));
1482
1483	icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
1484
1485	/*
1486	 * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
1487	 * Use special rules regard NMI if passed, otherwise specify
1488	 * the vector.
1489	 */
1490	if (vector == IPI_STOP_HARD)
1491		icrlo |= APIC_DELMODE_NMI;
1492	else
1493		icrlo |= vector | APIC_DELMODE_FIXED;
1494	destfield = 0;
1495	switch (dest) {
1496	case APIC_IPI_DEST_SELF:
1497		icrlo |= APIC_DEST_SELF;
1498		break;
1499	case APIC_IPI_DEST_ALL:
1500		icrlo |= APIC_DEST_ALLISELF;
1501		break;
1502	case APIC_IPI_DEST_OTHERS:
1503		icrlo |= APIC_DEST_ALLESELF;
1504		break;
1505	default:
1506		KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1507		    ("%s: invalid destination 0x%x", __func__, dest));
1508		destfield = dest;
1509	}
1510
1511	/* Wait for an earlier IPI to finish. */
1512	if (!lapic_ipi_wait(BEFORE_SPIN)) {
1513		if (panicstr != NULL)
1514			return;
1515		else
1516			panic("APIC: Previous IPI is stuck");
1517	}
1518
1519	lapic_ipi_raw(icrlo, destfield);
1520
1521#ifdef DETECT_DEADLOCK
1522	/* Wait for IPI to be delivered. */
1523	if (!lapic_ipi_wait(AFTER_SPIN)) {
1524#ifdef needsattention
1525		/*
1526		 * XXX FIXME:
1527		 *
1528		 * The above function waits for the message to actually be
1529		 * delivered.  It breaks out after an arbitrary timeout
1530		 * since the message should eventually be delivered (at
1531		 * least in theory) and that if it wasn't we would catch
1532		 * the failure with the check above when the next IPI is
1533		 * sent.
1534		 *
1535		 * We could skip this wait entirely, EXCEPT it probably
1536		 * protects us from other routines that assume that the
1537		 * message was delivered and acted upon when this function
1538		 * returns.
1539		 */
1540		printf("APIC: IPI might be stuck\n");
1541#else /* !needsattention */
1542		/* Wait until mesage is sent without a timeout. */
1543		while (lapic->icr_lo & APIC_DELSTAT_PEND)
1544			ia32_pause();
1545#endif /* needsattention */
1546	}
1547#endif /* DETECT_DEADLOCK */
1548}
1549#endif /* SMP */
1550