vlapic.c revision 262350
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/sys/amd64/vmm/io/vlapic.c 262350 2014-02-23 00:46:05Z jhb $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/io/vlapic.c 262350 2014-02-23 00:46:05Z jhb $");
31
32#include <sys/param.h>
33#include <sys/lock.h>
34#include <sys/kernel.h>
35#include <sys/malloc.h>
36#include <sys/mutex.h>
37#include <sys/systm.h>
38#include <sys/smp.h>
39
40#include <machine/clock.h>
41#include <x86/specialreg.h>
42#include <x86/apicreg.h>
43
44#include <machine/vmm.h>
45
46#include "vmm_stat.h"
47#include "vmm_lapic.h"
48#include "vmm_ktr.h"
49#include "vlapic.h"
50#include "vioapic.h"
51
52#define	VLAPIC_CTR0(vlapic, format)					\
53	VCPU_CTR0((vlapic)->vm, (vlapic)->vcpuid, format)
54
55#define	VLAPIC_CTR1(vlapic, format, p1)					\
56	VCPU_CTR1((vlapic)->vm, (vlapic)->vcpuid, format, p1)
57
58#define	VLAPIC_CTR2(vlapic, format, p1, p2)				\
59	VCPU_CTR2((vlapic)->vm, (vlapic)->vcpuid, format, p1, p2)
60
61#define	VLAPIC_CTR_IRR(vlapic, msg)					\
62do {									\
63	uint32_t *irrptr = &(vlapic)->apic.irr0;			\
64	irrptr[0] = irrptr[0];	/* silence compiler */			\
65	VLAPIC_CTR1((vlapic), msg " irr0 0x%08x", irrptr[0 << 2]);	\
66	VLAPIC_CTR1((vlapic), msg " irr1 0x%08x", irrptr[1 << 2]);	\
67	VLAPIC_CTR1((vlapic), msg " irr2 0x%08x", irrptr[2 << 2]);	\
68	VLAPIC_CTR1((vlapic), msg " irr3 0x%08x", irrptr[3 << 2]);	\
69	VLAPIC_CTR1((vlapic), msg " irr4 0x%08x", irrptr[4 << 2]);	\
70	VLAPIC_CTR1((vlapic), msg " irr5 0x%08x", irrptr[5 << 2]);	\
71	VLAPIC_CTR1((vlapic), msg " irr6 0x%08x", irrptr[6 << 2]);	\
72	VLAPIC_CTR1((vlapic), msg " irr7 0x%08x", irrptr[7 << 2]);	\
73} while (0)
74
75#define	VLAPIC_CTR_ISR(vlapic, msg)					\
76do {									\
77	uint32_t *isrptr = &(vlapic)->apic.isr0;			\
78	isrptr[0] = isrptr[0];	/* silence compiler */			\
79	VLAPIC_CTR1((vlapic), msg " isr0 0x%08x", isrptr[0 << 2]);	\
80	VLAPIC_CTR1((vlapic), msg " isr1 0x%08x", isrptr[1 << 2]);	\
81	VLAPIC_CTR1((vlapic), msg " isr2 0x%08x", isrptr[2 << 2]);	\
82	VLAPIC_CTR1((vlapic), msg " isr3 0x%08x", isrptr[3 << 2]);	\
83	VLAPIC_CTR1((vlapic), msg " isr4 0x%08x", isrptr[4 << 2]);	\
84	VLAPIC_CTR1((vlapic), msg " isr5 0x%08x", isrptr[5 << 2]);	\
85	VLAPIC_CTR1((vlapic), msg " isr6 0x%08x", isrptr[6 << 2]);	\
86	VLAPIC_CTR1((vlapic), msg " isr7 0x%08x", isrptr[7 << 2]);	\
87} while (0)
88
89static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
90
91#define	PRIO(x)			((x) >> 4)
92
93#define VLAPIC_VERSION		(16)
94#define VLAPIC_MAXLVT_ENTRIES	(APIC_LVT_CMCI)
95
96#define	x2apic(vlapic)	(((vlapic)->msr_apicbase & APICBASE_X2APIC) ? 1 : 0)
97
98enum boot_state {
99	BS_INIT,
100	BS_SIPI,
101	BS_RUNNING
102};
103
104struct vlapic {
105	struct vm		*vm;
106	int			vcpuid;
107
108	struct LAPIC		apic;
109
110	uint32_t		esr_pending;
111	int			esr_firing;
112
113	struct callout	callout;	/* vlapic timer */
114	struct bintime	timer_fire_bt;	/* callout expiry time */
115	struct bintime	timer_freq_bt;	/* timer frequency */
116	struct bintime	timer_period_bt; /* timer period */
117	struct mtx	timer_mtx;
118
119	/*
120	 * The 'isrvec_stk' is a stack of vectors injected by the local apic.
121	 * A vector is popped from the stack when the processor does an EOI.
122	 * The vector on the top of the stack is used to compute the
123	 * Processor Priority in conjunction with the TPR.
124	 */
125	uint8_t			 isrvec_stk[ISRVEC_STK_SIZE];
126	int			 isrvec_stk_top;
127
128	uint64_t		msr_apicbase;
129	enum boot_state		boot_state;
130};
131
132/*
133 * The 'vlapic->timer_mtx' is used to provide mutual exclusion between the
134 * vlapic_callout_handler() and vcpu accesses to the following registers:
135 * - initial count register aka icr_timer
136 * - current count register aka ccr_timer
137 * - divide config register aka dcr_timer
138 * - timer LVT register
139 *
140 * Note that the vlapic_callout_handler() does not write to any of these
141 * registers so they can be safely read from the vcpu context without locking.
142 */
143#define	VLAPIC_TIMER_LOCK(vlapic)	mtx_lock_spin(&((vlapic)->timer_mtx))
144#define	VLAPIC_TIMER_UNLOCK(vlapic)	mtx_unlock_spin(&((vlapic)->timer_mtx))
145#define	VLAPIC_TIMER_LOCKED(vlapic)	mtx_owned(&((vlapic)->timer_mtx))
146
147#define VLAPIC_BUS_FREQ	tsc_freq
148
149static __inline uint32_t
150vlapic_get_id(struct vlapic *vlapic)
151{
152
153	if (x2apic(vlapic))
154		return (vlapic->vcpuid);
155	else
156		return (vlapic->vcpuid << 24);
157}
158
159static __inline uint32_t
160vlapic_get_ldr(struct vlapic *vlapic)
161{
162	struct LAPIC *lapic;
163	int apicid;
164	uint32_t ldr;
165
166	lapic = &vlapic->apic;
167	if (x2apic(vlapic)) {
168		apicid = vlapic_get_id(vlapic);
169		ldr = 1 << (apicid & 0xf);
170		ldr |= (apicid & 0xffff0) << 12;
171		return (ldr);
172	} else
173		return (lapic->ldr);
174}
175
176static __inline uint32_t
177vlapic_get_dfr(struct vlapic *vlapic)
178{
179	struct LAPIC *lapic;
180
181	lapic = &vlapic->apic;
182	if (x2apic(vlapic))
183		return (0);
184	else
185		return (lapic->dfr);
186}
187
188static void
189vlapic_set_dfr(struct vlapic *vlapic, uint32_t data)
190{
191	uint32_t dfr;
192	struct LAPIC *lapic;
193
194	if (x2apic(vlapic)) {
195		VM_CTR1(vlapic->vm, "write to DFR in x2apic mode: %#x", data);
196		return;
197	}
198
199	lapic = &vlapic->apic;
200	dfr = (lapic->dfr & APIC_DFR_RESERVED) | (data & APIC_DFR_MODEL_MASK);
201	if ((dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_FLAT)
202		VLAPIC_CTR0(vlapic, "vlapic DFR in Flat Model");
203	else if ((dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_CLUSTER)
204		VLAPIC_CTR0(vlapic, "vlapic DFR in Cluster Model");
205	else
206		VLAPIC_CTR1(vlapic, "vlapic DFR in Unknown Model %#x", dfr);
207
208	lapic->dfr = dfr;
209}
210
211static void
212vlapic_set_ldr(struct vlapic *vlapic, uint32_t data)
213{
214	struct LAPIC *lapic;
215
216	/* LDR is read-only in x2apic mode */
217	if (x2apic(vlapic)) {
218		VLAPIC_CTR1(vlapic, "write to LDR in x2apic mode: %#x", data);
219		return;
220	}
221
222	lapic = &vlapic->apic;
223	lapic->ldr = data & ~APIC_LDR_RESERVED;
224	VLAPIC_CTR1(vlapic, "vlapic LDR set to %#x", lapic->ldr);
225}
226
227static int
228vlapic_timer_divisor(uint32_t dcr)
229{
230	switch (dcr & 0xB) {
231	case APIC_TDCR_1:
232		return (1);
233	case APIC_TDCR_2:
234		return (2);
235	case APIC_TDCR_4:
236		return (4);
237	case APIC_TDCR_8:
238		return (8);
239	case APIC_TDCR_16:
240		return (16);
241	case APIC_TDCR_32:
242		return (32);
243	case APIC_TDCR_64:
244		return (64);
245	case APIC_TDCR_128:
246		return (128);
247	default:
248		panic("vlapic_timer_divisor: invalid dcr 0x%08x", dcr);
249	}
250}
251
252static void
253vlapic_mask_lvts(uint32_t *lvts, int num_lvt)
254{
255	int i;
256	for (i = 0; i < num_lvt; i++) {
257		*lvts |= APIC_LVT_M;
258		lvts += 4;
259	}
260}
261
262#if 0
263static inline void
264vlapic_dump_lvt(uint32_t offset, uint32_t *lvt)
265{
266	printf("Offset %x: lvt %08x (V:%02x DS:%x M:%x)\n", offset,
267	    *lvt, *lvt & APIC_LVTT_VECTOR, *lvt & APIC_LVTT_DS,
268	    *lvt & APIC_LVTT_M);
269}
270#endif
271
272static uint32_t
273vlapic_get_ccr(struct vlapic *vlapic)
274{
275	struct bintime bt_now, bt_rem;
276	struct LAPIC *lapic;
277	uint32_t ccr;
278
279	ccr = 0;
280	lapic = &vlapic->apic;
281
282	VLAPIC_TIMER_LOCK(vlapic);
283	if (callout_active(&vlapic->callout)) {
284		/*
285		 * If the timer is scheduled to expire in the future then
286		 * compute the value of 'ccr' based on the remaining time.
287		 */
288		binuptime(&bt_now);
289		if (bintime_cmp(&vlapic->timer_fire_bt, &bt_now, >)) {
290			bt_rem = vlapic->timer_fire_bt;
291			bintime_sub(&bt_rem, &bt_now);
292			ccr += bt_rem.sec * BT2FREQ(&vlapic->timer_freq_bt);
293			ccr += bt_rem.frac / vlapic->timer_freq_bt.frac;
294		}
295	}
296	KASSERT(ccr <= lapic->icr_timer, ("vlapic_get_ccr: invalid ccr %#x, "
297	    "icr_timer is %#x", ccr, lapic->icr_timer));
298	VLAPIC_CTR2(vlapic, "vlapic ccr_timer = %#x, icr_timer = %#x",
299	    ccr, lapic->icr_timer);
300	VLAPIC_TIMER_UNLOCK(vlapic);
301	return (ccr);
302}
303
304static void
305vlapic_set_dcr(struct vlapic *vlapic, uint32_t dcr)
306{
307	struct LAPIC *lapic;
308	int divisor;
309
310	lapic = &vlapic->apic;
311	VLAPIC_TIMER_LOCK(vlapic);
312
313	lapic->dcr_timer = dcr;
314	divisor = vlapic_timer_divisor(dcr);
315	VLAPIC_CTR2(vlapic, "vlapic dcr_timer=%#x, divisor=%d", dcr, divisor);
316
317	/*
318	 * Update the timer frequency and the timer period.
319	 *
320	 * XXX changes to the frequency divider will not take effect until
321	 * the timer is reloaded.
322	 */
323	FREQ2BT(VLAPIC_BUS_FREQ / divisor, &vlapic->timer_freq_bt);
324	vlapic->timer_period_bt = vlapic->timer_freq_bt;
325	bintime_mul(&vlapic->timer_period_bt, lapic->icr_timer);
326
327	VLAPIC_TIMER_UNLOCK(vlapic);
328}
329
330static void
331vlapic_update_errors(struct vlapic *vlapic)
332{
333	struct LAPIC    *lapic = &vlapic->apic;
334	lapic->esr = vlapic->esr_pending;
335	vlapic->esr_pending = 0;
336}
337
338static void
339vlapic_reset(struct vlapic *vlapic)
340{
341	struct LAPIC *lapic;
342
343	lapic = &vlapic->apic;
344	bzero(lapic, sizeof(struct LAPIC));
345
346	lapic->version = VLAPIC_VERSION;
347	lapic->version |= (VLAPIC_MAXLVT_ENTRIES << MAXLVTSHIFT);
348	lapic->dfr = 0xffffffff;
349	lapic->svr = APIC_SVR_VECTOR;
350	vlapic_mask_lvts(&lapic->lvt_timer, 6);
351	vlapic_mask_lvts(&lapic->lvt_cmci, 1);
352	vlapic_set_dcr(vlapic, 0);
353
354	if (vlapic->vcpuid == 0)
355		vlapic->boot_state = BS_RUNNING;	/* BSP */
356	else
357		vlapic->boot_state = BS_INIT;		/* AP */
358}
359
360void
361vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
362{
363	struct LAPIC	*lapic = &vlapic->apic;
364	uint32_t	*irrptr, *tmrptr, mask;
365	int		idx;
366
367	if (vector < 0 || vector >= 256)
368		panic("vlapic_set_intr_ready: invalid vector %d\n", vector);
369
370	if (!(lapic->svr & APIC_SVR_ENABLE)) {
371		VLAPIC_CTR1(vlapic, "vlapic is software disabled, ignoring "
372		    "interrupt %d", vector);
373		return;
374	}
375
376	if (vector < 16) {
377		vlapic_set_error(vlapic, APIC_ESR_RECEIVE_ILLEGAL_VECTOR);
378		return;
379	}
380
381	idx = (vector / 32) * 4;
382	mask = 1 << (vector % 32);
383
384	irrptr = &lapic->irr0;
385	atomic_set_int(&irrptr[idx], mask);
386
387	/*
388	 * Upon acceptance of an interrupt into the IRR the corresponding
389	 * TMR bit is cleared for edge-triggered interrupts and set for
390	 * level-triggered interrupts.
391	 */
392	tmrptr = &lapic->tmr0;
393	if (level)
394		atomic_set_int(&tmrptr[idx], mask);
395	else
396		atomic_clear_int(&tmrptr[idx], mask);
397
398	VLAPIC_CTR_IRR(vlapic, "vlapic_set_intr_ready");
399}
400
401static __inline uint32_t *
402vlapic_get_lvtptr(struct vlapic *vlapic, uint32_t offset)
403{
404	struct LAPIC	*lapic = &vlapic->apic;
405	int 		 i;
406
407	switch (offset) {
408	case APIC_OFFSET_CMCI_LVT:
409		return (&lapic->lvt_cmci);
410	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
411		i = (offset - APIC_OFFSET_TIMER_LVT) >> 2;
412		return ((&lapic->lvt_timer) + i);;
413	default:
414		panic("vlapic_get_lvt: invalid LVT\n");
415	}
416}
417
418static __inline uint32_t
419vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
420{
421
422	return (*vlapic_get_lvtptr(vlapic, offset));
423}
424
425static void
426vlapic_set_lvt(struct vlapic *vlapic, uint32_t offset, uint32_t val)
427{
428	uint32_t *lvtptr, mask;
429	struct LAPIC *lapic;
430
431	lapic = &vlapic->apic;
432	lvtptr = vlapic_get_lvtptr(vlapic, offset);
433
434	if (offset == APIC_OFFSET_TIMER_LVT)
435		VLAPIC_TIMER_LOCK(vlapic);
436
437	if (!(lapic->svr & APIC_SVR_ENABLE))
438		val |= APIC_LVT_M;
439	mask = APIC_LVT_M | APIC_LVT_DS | APIC_LVT_VECTOR;
440	switch (offset) {
441	case APIC_OFFSET_TIMER_LVT:
442		mask |= APIC_LVTT_TM;
443		break;
444	case APIC_OFFSET_ERROR_LVT:
445		break;
446	case APIC_OFFSET_LINT0_LVT:
447	case APIC_OFFSET_LINT1_LVT:
448		mask |= APIC_LVT_TM | APIC_LVT_RIRR | APIC_LVT_IIPP;
449		/* FALLTHROUGH */
450	default:
451		mask |= APIC_LVT_DM;
452		break;
453	}
454	*lvtptr = val & mask;
455
456	if (offset == APIC_OFFSET_TIMER_LVT)
457		VLAPIC_TIMER_UNLOCK(vlapic);
458}
459
460static int
461vlapic_fire_lvt(struct vlapic *vlapic, uint32_t lvt)
462{
463	uint32_t vec, mode;
464
465	if (lvt & APIC_LVT_M)
466		return (0);
467
468	vec = lvt & APIC_LVT_VECTOR;
469	mode = lvt & APIC_LVT_DM;
470
471	switch (mode) {
472	case APIC_LVT_DM_FIXED:
473		if (vec < 16) {
474			vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR);
475			return (0);
476		}
477		vlapic_set_intr_ready(vlapic, vec, false);
478		vcpu_notify_event(vlapic->vm, vlapic->vcpuid);
479		break;
480	case APIC_LVT_DM_NMI:
481		vm_inject_nmi(vlapic->vm, vlapic->vcpuid);
482		break;
483	default:
484		// Other modes ignored
485		return (0);
486	}
487	return (1);
488}
489
490#if 1
491static void
492dump_isrvec_stk(struct vlapic *vlapic)
493{
494	int i;
495	uint32_t *isrptr;
496
497	isrptr = &vlapic->apic.isr0;
498	for (i = 0; i < 8; i++)
499		printf("ISR%d 0x%08x\n", i, isrptr[i * 4]);
500
501	for (i = 0; i <= vlapic->isrvec_stk_top; i++)
502		printf("isrvec_stk[%d] = %d\n", i, vlapic->isrvec_stk[i]);
503}
504#endif
505
506/*
507 * Algorithm adopted from section "Interrupt, Task and Processor Priority"
508 * in Intel Architecture Manual Vol 3a.
509 */
510static void
511vlapic_update_ppr(struct vlapic *vlapic)
512{
513	int isrvec, tpr, ppr;
514
515	/*
516	 * Note that the value on the stack at index 0 is always 0.
517	 *
518	 * This is a placeholder for the value of ISRV when none of the
519	 * bits is set in the ISRx registers.
520	 */
521	isrvec = vlapic->isrvec_stk[vlapic->isrvec_stk_top];
522	tpr = vlapic->apic.tpr;
523
524#if 1
525	{
526		int i, lastprio, curprio, vector, idx;
527		uint32_t *isrptr;
528
529		if (vlapic->isrvec_stk_top == 0 && isrvec != 0)
530			panic("isrvec_stk is corrupted: %d", isrvec);
531
532		/*
533		 * Make sure that the priority of the nested interrupts is
534		 * always increasing.
535		 */
536		lastprio = -1;
537		for (i = 1; i <= vlapic->isrvec_stk_top; i++) {
538			curprio = PRIO(vlapic->isrvec_stk[i]);
539			if (curprio <= lastprio) {
540				dump_isrvec_stk(vlapic);
541				panic("isrvec_stk does not satisfy invariant");
542			}
543			lastprio = curprio;
544		}
545
546		/*
547		 * Make sure that each bit set in the ISRx registers has a
548		 * corresponding entry on the isrvec stack.
549		 */
550		i = 1;
551		isrptr = &vlapic->apic.isr0;
552		for (vector = 0; vector < 256; vector++) {
553			idx = (vector / 32) * 4;
554			if (isrptr[idx] & (1 << (vector % 32))) {
555				if (i > vlapic->isrvec_stk_top ||
556				    vlapic->isrvec_stk[i] != vector) {
557					dump_isrvec_stk(vlapic);
558					panic("ISR and isrvec_stk out of sync");
559				}
560				i++;
561			}
562		}
563	}
564#endif
565
566	if (PRIO(tpr) >= PRIO(isrvec))
567		ppr = tpr;
568	else
569		ppr = isrvec & 0xf0;
570
571	vlapic->apic.ppr = ppr;
572	VLAPIC_CTR1(vlapic, "vlapic_update_ppr 0x%02x", ppr);
573}
574
575static void
576vlapic_process_eoi(struct vlapic *vlapic)
577{
578	struct LAPIC	*lapic = &vlapic->apic;
579	uint32_t	*isrptr, *tmrptr;
580	int		i, idx, bitpos, vector;
581
582	isrptr = &lapic->isr0;
583	tmrptr = &lapic->tmr0;
584
585	/*
586	 * The x86 architecture reserves the the first 32 vectors for use
587	 * by the processor.
588	 */
589	for (i = 7; i > 0; i--) {
590		idx = i * 4;
591		bitpos = fls(isrptr[idx]);
592		if (bitpos-- != 0) {
593			if (vlapic->isrvec_stk_top <= 0) {
594				panic("invalid vlapic isrvec_stk_top %d",
595				      vlapic->isrvec_stk_top);
596			}
597			isrptr[idx] &= ~(1 << bitpos);
598			VLAPIC_CTR_ISR(vlapic, "vlapic_process_eoi");
599			vlapic->isrvec_stk_top--;
600			vlapic_update_ppr(vlapic);
601			if ((tmrptr[idx] & (1 << bitpos)) != 0) {
602				vector = i * 32 + bitpos;
603				vioapic_process_eoi(vlapic->vm, vlapic->vcpuid,
604				    vector);
605			}
606			return;
607		}
608	}
609}
610
611static __inline int
612vlapic_get_lvt_field(uint32_t lvt, uint32_t mask)
613{
614
615	return (lvt & mask);
616}
617
618static __inline int
619vlapic_periodic_timer(struct vlapic *vlapic)
620{
621	uint32_t lvt;
622
623	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT);
624
625	return (vlapic_get_lvt_field(lvt, APIC_LVTT_TM_PERIODIC));
626}
627
628static VMM_STAT(VLAPIC_INTR_ERROR, "error interrupts generated by vlapic");
629
630void
631vlapic_set_error(struct vlapic *vlapic, uint32_t mask)
632{
633	uint32_t lvt;
634
635	vlapic->esr_pending |= mask;
636	if (vlapic->esr_firing)
637		return;
638	vlapic->esr_firing = 1;
639
640	// The error LVT always uses the fixed delivery mode.
641	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_ERROR_LVT);
642	if (vlapic_fire_lvt(vlapic, lvt | APIC_LVT_DM_FIXED)) {
643		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_ERROR, 1);
644	}
645	vlapic->esr_firing = 0;
646}
647
648static VMM_STAT(VLAPIC_INTR_TIMER, "timer interrupts generated by vlapic");
649
650static void
651vlapic_fire_timer(struct vlapic *vlapic)
652{
653	uint32_t lvt;
654
655	KASSERT(VLAPIC_TIMER_LOCKED(vlapic), ("vlapic_fire_timer not locked"));
656
657	// The timer LVT always uses the fixed delivery mode.
658	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT);
659	if (vlapic_fire_lvt(vlapic, lvt | APIC_LVT_DM_FIXED)) {
660		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_TIMER, 1);
661	}
662}
663
664static VMM_STAT(VLAPIC_INTR_CMC,
665    "corrected machine check interrupts generated by vlapic");
666
667void
668vlapic_fire_cmci(struct vlapic *vlapic)
669{
670	uint32_t lvt;
671
672	lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_CMCI_LVT);
673	if (vlapic_fire_lvt(vlapic, lvt)) {
674		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_CMC, 1);
675	}
676}
677
678static VMM_STAT_ARRAY(LVTS_TRIGGERRED, VLAPIC_MAXLVT_ENTRIES,
679    "lvts triggered");
680
681int
682vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
683{
684	uint32_t lvt;
685
686	switch (vector) {
687	case APIC_LVT_LINT0:
688		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_LINT0_LVT);
689		break;
690	case APIC_LVT_LINT1:
691		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_LINT1_LVT);
692		break;
693	case APIC_LVT_TIMER:
694		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT);
695		lvt |= APIC_LVT_DM_FIXED;
696		break;
697	case APIC_LVT_ERROR:
698		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_ERROR_LVT);
699		lvt |= APIC_LVT_DM_FIXED;
700		break;
701	case APIC_LVT_PMC:
702		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_PERF_LVT);
703		break;
704	case APIC_LVT_THERMAL:
705		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_THERM_LVT);
706		break;
707	case APIC_LVT_CMCI:
708		lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_CMCI_LVT);
709		break;
710	default:
711		return (EINVAL);
712	}
713	if (vlapic_fire_lvt(vlapic, lvt)) {
714		vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
715		    LVTS_TRIGGERRED, vector, 1);
716	}
717	return (0);
718}
719
720static void
721vlapic_callout_handler(void *arg)
722{
723	struct vlapic *vlapic;
724	struct bintime bt, btnow;
725	sbintime_t rem_sbt;
726
727	vlapic = arg;
728
729	VLAPIC_TIMER_LOCK(vlapic);
730	if (callout_pending(&vlapic->callout))	/* callout was reset */
731		goto done;
732
733	if (!callout_active(&vlapic->callout))	/* callout was stopped */
734		goto done;
735
736	callout_deactivate(&vlapic->callout);
737
738	KASSERT(vlapic->apic.icr_timer != 0, ("vlapic timer is disabled"));
739
740	vlapic_fire_timer(vlapic);
741
742	if (vlapic_periodic_timer(vlapic)) {
743		binuptime(&btnow);
744		KASSERT(bintime_cmp(&btnow, &vlapic->timer_fire_bt, >=),
745		    ("vlapic callout at %#lx.%#lx, expected at %#lx.#%lx",
746		    btnow.sec, btnow.frac, vlapic->timer_fire_bt.sec,
747		    vlapic->timer_fire_bt.frac));
748
749		/*
750		 * Compute the delta between when the timer was supposed to
751		 * fire and the present time.
752		 */
753		bt = btnow;
754		bintime_sub(&bt, &vlapic->timer_fire_bt);
755
756		rem_sbt = bttosbt(vlapic->timer_period_bt);
757		if (bintime_cmp(&bt, &vlapic->timer_period_bt, <)) {
758			/*
759			 * Adjust the time until the next countdown downward
760			 * to account for the lost time.
761			 */
762			rem_sbt -= bttosbt(bt);
763		} else {
764			/*
765			 * If the delta is greater than the timer period then
766			 * just reset our time base instead of trying to catch
767			 * up.
768			 */
769			vlapic->timer_fire_bt = btnow;
770			VLAPIC_CTR2(vlapic, "vlapic timer lagging by %lu "
771			    "usecs, period is %lu usecs - resetting time base",
772			    bttosbt(bt) / SBT_1US,
773			    bttosbt(vlapic->timer_period_bt) / SBT_1US);
774		}
775
776		bintime_add(&vlapic->timer_fire_bt, &vlapic->timer_period_bt);
777		callout_reset_sbt(&vlapic->callout, rem_sbt, 0,
778		    vlapic_callout_handler, vlapic, 0);
779	}
780done:
781	VLAPIC_TIMER_UNLOCK(vlapic);
782}
783
784static void
785vlapic_set_icr_timer(struct vlapic *vlapic, uint32_t icr_timer)
786{
787	struct LAPIC *lapic;
788	sbintime_t sbt;
789
790	VLAPIC_TIMER_LOCK(vlapic);
791
792	lapic = &vlapic->apic;
793	lapic->icr_timer = icr_timer;
794
795	vlapic->timer_period_bt = vlapic->timer_freq_bt;
796	bintime_mul(&vlapic->timer_period_bt, icr_timer);
797
798	if (icr_timer != 0) {
799		binuptime(&vlapic->timer_fire_bt);
800		bintime_add(&vlapic->timer_fire_bt, &vlapic->timer_period_bt);
801
802		sbt = bttosbt(vlapic->timer_period_bt);
803		callout_reset_sbt(&vlapic->callout, sbt, 0,
804		    vlapic_callout_handler, vlapic, 0);
805	} else
806		callout_stop(&vlapic->callout);
807
808	VLAPIC_TIMER_UNLOCK(vlapic);
809}
810
811/*
812 * This function populates 'dmask' with the set of vcpus that match the
813 * addressing specified by the (dest, phys, lowprio) tuple.
814 *
815 * 'x2apic_dest' specifies whether 'dest' is interpreted as x2APIC (32-bit)
816 * or xAPIC (8-bit) destination field.
817 */
818static void
819vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
820    bool lowprio, bool x2apic_dest)
821{
822	struct vlapic *vlapic;
823	uint32_t dfr, ldr, ldest, cluster;
824	uint32_t mda_flat_ldest, mda_cluster_ldest, mda_ldest, mda_cluster_id;
825	cpuset_t amask;
826	int vcpuid;
827
828	if ((x2apic_dest && dest == 0xffffffff) ||
829	    (!x2apic_dest && dest == 0xff)) {
830		/*
831		 * Broadcast in both logical and physical modes.
832		 */
833		*dmask = vm_active_cpus(vm);
834		return;
835	}
836
837	if (phys) {
838		/*
839		 * Physical mode: destination is APIC ID.
840		 */
841		CPU_ZERO(dmask);
842		vcpuid = vm_apicid2vcpuid(vm, dest);
843		if (vcpuid < VM_MAXCPU)
844			CPU_SET(vcpuid, dmask);
845	} else {
846		/*
847		 * In the "Flat Model" the MDA is interpreted as an 8-bit wide
848		 * bitmask. This model is only avilable in the xAPIC mode.
849		 */
850		mda_flat_ldest = dest & 0xff;
851
852		/*
853		 * In the "Cluster Model" the MDA is used to identify a
854		 * specific cluster and a set of APICs in that cluster.
855		 */
856		if (x2apic_dest) {
857			mda_cluster_id = dest >> 16;
858			mda_cluster_ldest = dest & 0xffff;
859		} else {
860			mda_cluster_id = (dest >> 4) & 0xf;
861			mda_cluster_ldest = dest & 0xf;
862		}
863
864		/*
865		 * Logical mode: match each APIC that has a bit set
866		 * in it's LDR that matches a bit in the ldest.
867		 */
868		CPU_ZERO(dmask);
869		amask = vm_active_cpus(vm);
870		while ((vcpuid = CPU_FFS(&amask)) != 0) {
871			vcpuid--;
872			CPU_CLR(vcpuid, &amask);
873
874			vlapic = vm_lapic(vm, vcpuid);
875			dfr = vlapic_get_dfr(vlapic);
876			ldr = vlapic_get_ldr(vlapic);
877
878			if ((dfr & APIC_DFR_MODEL_MASK) ==
879			    APIC_DFR_MODEL_FLAT) {
880				ldest = ldr >> 24;
881				mda_ldest = mda_flat_ldest;
882			} else if ((dfr & APIC_DFR_MODEL_MASK) ==
883			    APIC_DFR_MODEL_CLUSTER) {
884				if (x2apic(vlapic)) {
885					cluster = ldr >> 16;
886					ldest = ldr & 0xffff;
887				} else {
888					cluster = ldr >> 28;
889					ldest = (ldr >> 24) & 0xf;
890				}
891				if (cluster != mda_cluster_id)
892					continue;
893				mda_ldest = mda_cluster_ldest;
894			} else {
895				/*
896				 * Guest has configured a bad logical
897				 * model for this vcpu - skip it.
898				 */
899				VLAPIC_CTR1(vlapic, "vlapic has bad logical "
900				    "model %x - cannot deliver interrupt", dfr);
901				continue;
902			}
903
904			if ((mda_ldest & ldest) != 0) {
905				CPU_SET(vcpuid, dmask);
906				if (lowprio)
907					break;
908			}
909		}
910	}
911}
912
913static VMM_STAT_ARRAY(IPIS_SENT, VM_MAXCPU, "ipis sent to vcpu");
914
915static int
916lapic_process_icr(struct vlapic *vlapic, uint64_t icrval, bool *retu)
917{
918	int i;
919	bool phys;
920	cpuset_t dmask;
921	uint32_t dest, vec, mode;
922	struct vlapic *vlapic2;
923	struct vm_exit *vmexit;
924
925	if (x2apic(vlapic))
926		dest = icrval >> 32;
927	else
928		dest = icrval >> (32 + 24);
929	vec = icrval & APIC_VECTOR_MASK;
930	mode = icrval & APIC_DELMODE_MASK;
931
932	if (mode == APIC_DELMODE_FIXED && vec < 16) {
933		vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR);
934		return (0);
935	}
936
937	if (mode == APIC_DELMODE_FIXED || mode == APIC_DELMODE_NMI) {
938		switch (icrval & APIC_DEST_MASK) {
939		case APIC_DEST_DESTFLD:
940			phys = ((icrval & APIC_DESTMODE_LOG) == 0);
941			vlapic_calcdest(vlapic->vm, &dmask, dest, phys, false,
942			    x2apic(vlapic));
943			break;
944		case APIC_DEST_SELF:
945			CPU_SETOF(vlapic->vcpuid, &dmask);
946			break;
947		case APIC_DEST_ALLISELF:
948			dmask = vm_active_cpus(vlapic->vm);
949			break;
950		case APIC_DEST_ALLESELF:
951			dmask = vm_active_cpus(vlapic->vm);
952			CPU_CLR(vlapic->vcpuid, &dmask);
953			break;
954		default:
955			CPU_ZERO(&dmask);	/* satisfy gcc */
956			break;
957		}
958
959		while ((i = CPU_FFS(&dmask)) != 0) {
960			i--;
961			CPU_CLR(i, &dmask);
962			if (mode == APIC_DELMODE_FIXED) {
963				lapic_intr_edge(vlapic->vm, i, vec);
964				vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
965						    IPIS_SENT, i, 1);
966			} else
967				vm_inject_nmi(vlapic->vm, i);
968		}
969
970		return (0);	/* handled completely in the kernel */
971	}
972
973	if (mode == APIC_DELMODE_INIT) {
974		if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT)
975			return (0);
976
977		if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
978			vlapic2 = vm_lapic(vlapic->vm, dest);
979
980			/* move from INIT to waiting-for-SIPI state */
981			if (vlapic2->boot_state == BS_INIT) {
982				vlapic2->boot_state = BS_SIPI;
983			}
984
985			return (0);
986		}
987	}
988
989	if (mode == APIC_DELMODE_STARTUP) {
990		if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
991			vlapic2 = vm_lapic(vlapic->vm, dest);
992
993			/*
994			 * Ignore SIPIs in any state other than wait-for-SIPI
995			 */
996			if (vlapic2->boot_state != BS_SIPI)
997				return (0);
998
999			/*
1000			 * XXX this assumes that the startup IPI always succeeds
1001			 */
1002			vlapic2->boot_state = BS_RUNNING;
1003			vm_activate_cpu(vlapic2->vm, dest);
1004
1005			*retu = true;
1006			vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
1007			vmexit->exitcode = VM_EXITCODE_SPINUP_AP;
1008			vmexit->u.spinup_ap.vcpu = dest;
1009			vmexit->u.spinup_ap.rip = vec << PAGE_SHIFT;
1010
1011			return (0);
1012		}
1013	}
1014
1015	/*
1016	 * This will cause a return to userland.
1017	 */
1018	return (1);
1019}
1020
1021int
1022vlapic_pending_intr(struct vlapic *vlapic)
1023{
1024	struct LAPIC	*lapic = &vlapic->apic;
1025	int	  	 idx, i, bitpos, vector;
1026	uint32_t	*irrptr, val;
1027
1028	irrptr = &lapic->irr0;
1029
1030	/*
1031	 * The x86 architecture reserves the the first 32 vectors for use
1032	 * by the processor.
1033	 */
1034	for (i = 7; i > 0; i--) {
1035		idx = i * 4;
1036		val = atomic_load_acq_int(&irrptr[idx]);
1037		bitpos = fls(val);
1038		if (bitpos != 0) {
1039			vector = i * 32 + (bitpos - 1);
1040			if (PRIO(vector) > PRIO(lapic->ppr)) {
1041				VLAPIC_CTR1(vlapic, "pending intr %d", vector);
1042				return (vector);
1043			} else
1044				break;
1045		}
1046	}
1047	return (-1);
1048}
1049
1050void
1051vlapic_intr_accepted(struct vlapic *vlapic, int vector)
1052{
1053	struct LAPIC	*lapic = &vlapic->apic;
1054	uint32_t	*irrptr, *isrptr;
1055	int		idx, stk_top;
1056
1057	/*
1058	 * clear the ready bit for vector being accepted in irr
1059	 * and set the vector as in service in isr.
1060	 */
1061	idx = (vector / 32) * 4;
1062
1063	irrptr = &lapic->irr0;
1064	atomic_clear_int(&irrptr[idx], 1 << (vector % 32));
1065	VLAPIC_CTR_IRR(vlapic, "vlapic_intr_accepted");
1066
1067	isrptr = &lapic->isr0;
1068	isrptr[idx] |= 1 << (vector % 32);
1069	VLAPIC_CTR_ISR(vlapic, "vlapic_intr_accepted");
1070
1071	/*
1072	 * Update the PPR
1073	 */
1074	vlapic->isrvec_stk_top++;
1075
1076	stk_top = vlapic->isrvec_stk_top;
1077	if (stk_top >= ISRVEC_STK_SIZE)
1078		panic("isrvec_stk_top overflow %d", stk_top);
1079
1080	vlapic->isrvec_stk[stk_top] = vector;
1081	vlapic_update_ppr(vlapic);
1082}
1083
1084static void
1085lapic_set_svr(struct vlapic *vlapic, uint32_t new)
1086{
1087	struct LAPIC *lapic;
1088	uint32_t old, changed;
1089
1090	lapic = &vlapic->apic;
1091	old = lapic->svr;
1092	changed = old ^ new;
1093	if ((changed & APIC_SVR_ENABLE) != 0) {
1094		if ((new & APIC_SVR_ENABLE) == 0) {
1095			/*
1096			 * The apic is now disabled so stop the apic timer.
1097			 */
1098			VLAPIC_CTR0(vlapic, "vlapic is software-disabled");
1099			VLAPIC_TIMER_LOCK(vlapic);
1100			callout_stop(&vlapic->callout);
1101			VLAPIC_TIMER_UNLOCK(vlapic);
1102		} else {
1103			/*
1104			 * The apic is now enabled so restart the apic timer
1105			 * if it is configured in periodic mode.
1106			 */
1107			VLAPIC_CTR0(vlapic, "vlapic is software-enabled");
1108			if (vlapic_periodic_timer(vlapic))
1109				vlapic_set_icr_timer(vlapic, lapic->icr_timer);
1110		}
1111	}
1112	lapic->svr = new;
1113}
1114
1115int
1116vlapic_read(struct vlapic *vlapic, uint64_t offset, uint64_t *data, bool *retu)
1117{
1118	struct LAPIC	*lapic = &vlapic->apic;
1119	uint32_t	*reg;
1120	int		 i;
1121
1122	if (offset > sizeof(*lapic)) {
1123		*data = 0;
1124		goto done;
1125	}
1126
1127	offset &= ~3;
1128	switch(offset)
1129	{
1130		case APIC_OFFSET_ID:
1131			*data = vlapic_get_id(vlapic);
1132			break;
1133		case APIC_OFFSET_VER:
1134			*data = lapic->version;
1135			break;
1136		case APIC_OFFSET_TPR:
1137			*data = lapic->tpr;
1138			break;
1139		case APIC_OFFSET_APR:
1140			*data = lapic->apr;
1141			break;
1142		case APIC_OFFSET_PPR:
1143			*data = lapic->ppr;
1144			break;
1145		case APIC_OFFSET_EOI:
1146			*data = lapic->eoi;
1147			break;
1148		case APIC_OFFSET_LDR:
1149			*data = vlapic_get_ldr(vlapic);
1150			break;
1151		case APIC_OFFSET_DFR:
1152			*data = vlapic_get_dfr(vlapic);
1153			break;
1154		case APIC_OFFSET_SVR:
1155			*data = lapic->svr;
1156			break;
1157		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1158			i = (offset - APIC_OFFSET_ISR0) >> 2;
1159			reg = &lapic->isr0;
1160			*data = *(reg + i);
1161			break;
1162		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1163			i = (offset - APIC_OFFSET_TMR0) >> 2;
1164			reg = &lapic->tmr0;
1165			*data = *(reg + i);
1166			break;
1167		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1168			i = (offset - APIC_OFFSET_IRR0) >> 2;
1169			reg = &lapic->irr0;
1170			*data = atomic_load_acq_int(reg + i);
1171			break;
1172		case APIC_OFFSET_ESR:
1173			*data = lapic->esr;
1174			break;
1175		case APIC_OFFSET_ICR_LOW:
1176			*data = lapic->icr_lo;
1177			break;
1178		case APIC_OFFSET_ICR_HI:
1179			*data = lapic->icr_hi;
1180			break;
1181		case APIC_OFFSET_CMCI_LVT:
1182		case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1183			*data = vlapic_get_lvt(vlapic, offset);
1184			break;
1185		case APIC_OFFSET_ICR:
1186			*data = lapic->icr_timer;
1187			break;
1188		case APIC_OFFSET_CCR:
1189			*data = vlapic_get_ccr(vlapic);
1190			break;
1191		case APIC_OFFSET_DCR:
1192			*data = lapic->dcr_timer;
1193			break;
1194		case APIC_OFFSET_RRR:
1195		default:
1196			*data = 0;
1197			break;
1198	}
1199done:
1200	VLAPIC_CTR2(vlapic, "vlapic read offset %#x, data %#lx", offset, *data);
1201	return 0;
1202}
1203
1204int
1205vlapic_write(struct vlapic *vlapic, uint64_t offset, uint64_t data, bool *retu)
1206{
1207	struct LAPIC	*lapic = &vlapic->apic;
1208	int		retval;
1209
1210	VLAPIC_CTR2(vlapic, "vlapic write offset %#x, data %#lx", offset, data);
1211
1212	if (offset > sizeof(*lapic)) {
1213		return 0;
1214	}
1215
1216	retval = 0;
1217	offset &= ~3;
1218	switch(offset)
1219	{
1220		case APIC_OFFSET_ID:
1221			break;
1222		case APIC_OFFSET_TPR:
1223			lapic->tpr = data & 0xff;
1224			vlapic_update_ppr(vlapic);
1225			break;
1226		case APIC_OFFSET_EOI:
1227			vlapic_process_eoi(vlapic);
1228			break;
1229		case APIC_OFFSET_LDR:
1230			vlapic_set_ldr(vlapic, data);
1231			break;
1232		case APIC_OFFSET_DFR:
1233			vlapic_set_dfr(vlapic, data);
1234			break;
1235		case APIC_OFFSET_SVR:
1236			lapic_set_svr(vlapic, data);
1237			break;
1238		case APIC_OFFSET_ICR_LOW:
1239			if (!x2apic(vlapic)) {
1240				data &= 0xffffffff;
1241				data |= (uint64_t)lapic->icr_hi << 32;
1242			}
1243			retval = lapic_process_icr(vlapic, data, retu);
1244			break;
1245		case APIC_OFFSET_ICR_HI:
1246			if (!x2apic(vlapic)) {
1247				retval = 0;
1248				lapic->icr_hi = data;
1249			}
1250			break;
1251		case APIC_OFFSET_CMCI_LVT:
1252		case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1253			vlapic_set_lvt(vlapic, offset, data);
1254			break;
1255		case APIC_OFFSET_ICR:
1256			vlapic_set_icr_timer(vlapic, data);
1257			break;
1258
1259		case APIC_OFFSET_DCR:
1260			vlapic_set_dcr(vlapic, data);
1261			break;
1262
1263		case APIC_OFFSET_ESR:
1264			vlapic_update_errors(vlapic);
1265			break;
1266		case APIC_OFFSET_VER:
1267		case APIC_OFFSET_APR:
1268		case APIC_OFFSET_PPR:
1269		case APIC_OFFSET_RRR:
1270		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1271		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1272		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1273		case APIC_OFFSET_CCR:
1274		default:
1275			// Read only.
1276			break;
1277	}
1278
1279	return (retval);
1280}
1281
1282struct vlapic *
1283vlapic_init(struct vm *vm, int vcpuid)
1284{
1285	struct vlapic 		*vlapic;
1286
1287	vlapic = malloc(sizeof(struct vlapic), M_VLAPIC, M_WAITOK | M_ZERO);
1288	vlapic->vm = vm;
1289	vlapic->vcpuid = vcpuid;
1290
1291	/*
1292	 * If the vlapic is configured in x2apic mode then it will be
1293	 * accessed in the critical section via the MSR emulation code.
1294	 *
1295	 * Therefore the timer mutex must be a spinlock because blockable
1296	 * mutexes cannot be acquired in a critical section.
1297	 */
1298	mtx_init(&vlapic->timer_mtx, "vlapic timer mtx", NULL, MTX_SPIN);
1299	callout_init(&vlapic->callout, 1);
1300
1301	vlapic->msr_apicbase = DEFAULT_APIC_BASE | APICBASE_ENABLED;
1302
1303	if (vcpuid == 0)
1304		vlapic->msr_apicbase |= APICBASE_BSP;
1305
1306	vlapic_reset(vlapic);
1307
1308	return (vlapic);
1309}
1310
1311void
1312vlapic_cleanup(struct vlapic *vlapic)
1313{
1314
1315	callout_drain(&vlapic->callout);
1316	free(vlapic, M_VLAPIC);
1317}
1318
1319uint64_t
1320vlapic_get_apicbase(struct vlapic *vlapic)
1321{
1322
1323	return (vlapic->msr_apicbase);
1324}
1325
1326void
1327vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val)
1328{
1329	int err;
1330	enum x2apic_state state;
1331
1332	err = vm_get_x2apic_state(vlapic->vm, vlapic->vcpuid, &state);
1333	if (err)
1334		panic("vlapic_set_apicbase: err %d fetching x2apic state", err);
1335
1336	if (state == X2APIC_DISABLED)
1337		val &= ~APICBASE_X2APIC;
1338
1339	vlapic->msr_apicbase = val;
1340}
1341
1342void
1343vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
1344{
1345	struct vlapic *vlapic;
1346
1347	vlapic = vm_lapic(vm, vcpuid);
1348
1349	if (state == X2APIC_DISABLED)
1350		vlapic->msr_apicbase &= ~APICBASE_X2APIC;
1351}
1352
1353void
1354vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
1355    int delmode, int vec)
1356{
1357	bool lowprio;
1358	int vcpuid;
1359	cpuset_t dmask;
1360
1361	if (delmode != APIC_DELMODE_FIXED && delmode != APIC_DELMODE_LOWPRIO) {
1362		VM_CTR1(vm, "vlapic intr invalid delmode %#x", delmode);
1363		return;
1364	}
1365	lowprio = (delmode == APIC_DELMODE_LOWPRIO);
1366
1367	/*
1368	 * We don't provide any virtual interrupt redirection hardware so
1369	 * all interrupts originating from the ioapic or MSI specify the
1370	 * 'dest' in the legacy xAPIC format.
1371	 */
1372	vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false);
1373
1374	while ((vcpuid = CPU_FFS(&dmask)) != 0) {
1375		vcpuid--;
1376		CPU_CLR(vcpuid, &dmask);
1377		lapic_set_intr(vm, vcpuid, vec, level);
1378	}
1379}
1380
1381bool
1382vlapic_enabled(struct vlapic *vlapic)
1383{
1384	struct LAPIC *lapic = &vlapic->apic;
1385
1386	if ((vlapic->msr_apicbase & APICBASE_ENABLED) != 0 &&
1387	    (lapic->svr & APIC_SVR_ENABLE) != 0)
1388		return (true);
1389	else
1390		return (false);
1391}
1392