hvm.c revision 267068
1/*
2 * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3 * Copyright (c) 2012 Spectra Logic Corporation
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/x86/xen/hvm.c 267068 2014-06-04 17:50:47Z jhb $");
30
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/proc.h>
36#include <sys/smp.h>
37#include <sys/systm.h>
38
39#include <vm/vm.h>
40#include <vm/pmap.h>
41
42#include <dev/pci/pcivar.h>
43
44#include <machine/cpufunc.h>
45#include <machine/cpu.h>
46#include <machine/smp.h>
47
48#include <x86/apicreg.h>
49
50#include <xen/xen-os.h>
51#include <xen/features.h>
52#include <xen/gnttab.h>
53#include <xen/hypervisor.h>
54#include <xen/hvm.h>
55#include <xen/xen_intr.h>
56
57#include <xen/interface/hvm/params.h>
58#include <xen/interface/vcpu.h>
59
60/*--------------------------- Forward Declarations ---------------------------*/
61#ifdef SMP
62static driver_filter_t xen_smp_rendezvous_action;
63static driver_filter_t xen_invltlb;
64static driver_filter_t xen_invlpg;
65static driver_filter_t xen_invlrng;
66static driver_filter_t xen_invlcache;
67#ifdef __i386__
68static driver_filter_t xen_lazypmap;
69#endif
70static driver_filter_t xen_ipi_bitmap_handler;
71static driver_filter_t xen_cpustop_handler;
72static driver_filter_t xen_cpususpend_handler;
73static driver_filter_t xen_cpustophard_handler;
74static void xen_ipi_vectored(u_int vector, int dest);
75static void xen_hvm_cpu_resume(void);
76#endif
77static void xen_hvm_cpu_init(void);
78
79/*---------------------------- Extern Declarations ---------------------------*/
80#ifdef __i386__
81extern void pmap_lazyfix_action(void);
82#endif
83#ifdef __amd64__
84extern int pmap_pcid_enabled;
85#endif
86
87/* Variables used by mp_machdep to perform the bitmap IPI */
88extern volatile u_int cpu_ipi_pending[MAXCPU];
89
90/*---------------------------------- Macros ----------------------------------*/
91#define	IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
92
93/*-------------------------------- Local Types -------------------------------*/
94enum xen_hvm_init_type {
95	XEN_HVM_INIT_COLD,
96	XEN_HVM_INIT_CANCELLED_SUSPEND,
97	XEN_HVM_INIT_RESUME
98};
99
100struct xen_ipi_handler
101{
102	driver_filter_t	*filter;
103	const char	*description;
104};
105
106/*-------------------------------- Global Data -------------------------------*/
107enum xen_domain_type xen_domain_type = XEN_NATIVE;
108
109#ifdef SMP
110struct cpu_ops xen_hvm_cpu_ops = {
111	.ipi_vectored	= lapic_ipi_vectored,
112	.cpu_init	= xen_hvm_cpu_init,
113	.cpu_resume	= xen_hvm_cpu_resume
114};
115#endif
116
117static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
118
119#ifdef SMP
120static struct xen_ipi_handler xen_ipis[] =
121{
122	[IPI_TO_IDX(IPI_RENDEZVOUS)]	= { xen_smp_rendezvous_action,	"r"   },
123	[IPI_TO_IDX(IPI_INVLTLB)]	= { xen_invltlb,		"itlb"},
124	[IPI_TO_IDX(IPI_INVLPG)]	= { xen_invlpg,			"ipg" },
125	[IPI_TO_IDX(IPI_INVLRNG)]	= { xen_invlrng,		"irg" },
126	[IPI_TO_IDX(IPI_INVLCACHE)]	= { xen_invlcache,		"ic"  },
127#ifdef __i386__
128	[IPI_TO_IDX(IPI_LAZYPMAP)]	= { xen_lazypmap,		"lp"  },
129#endif
130	[IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,	"b"   },
131	[IPI_TO_IDX(IPI_STOP)]		= { xen_cpustop_handler,	"st"  },
132	[IPI_TO_IDX(IPI_SUSPEND)]	= { xen_cpususpend_handler,	"sp"  },
133	[IPI_TO_IDX(IPI_STOP_HARD)]	= { xen_cpustophard_handler,	"sth" },
134};
135#endif
136
137/**
138 * If non-zero, the hypervisor has been configured to use a direct
139 * IDT event callback for interrupt injection.
140 */
141int xen_vector_callback_enabled;
142
143/*------------------------------- Per-CPU Data -------------------------------*/
144DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
145DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
146#ifdef SMP
147DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
148#endif
149
150/*------------------ Hypervisor Access Shared Memory Regions -----------------*/
151/** Hypercall table accessed via HYPERVISOR_*_op() methods. */
152char *hypercall_stubs;
153shared_info_t *HYPERVISOR_shared_info;
154
155#ifdef SMP
156/*---------------------------- XEN PV IPI Handlers ---------------------------*/
157/*
158 * This are C clones of the ASM functions found in apic_vector.s
159 */
160static int
161xen_ipi_bitmap_handler(void *arg)
162{
163	struct trapframe *frame;
164
165	frame = arg;
166	ipi_bitmap_handler(*frame);
167	return (FILTER_HANDLED);
168}
169
170static int
171xen_smp_rendezvous_action(void *arg)
172{
173#ifdef COUNT_IPIS
174	(*ipi_rendezvous_counts[PCPU_GET(cpuid)])++;
175#endif /* COUNT_IPIS */
176
177	smp_rendezvous_action();
178	return (FILTER_HANDLED);
179}
180
181static int
182xen_invltlb(void *arg)
183{
184
185	invltlb_handler();
186	return (FILTER_HANDLED);
187}
188
189#ifdef __amd64__
190static int
191xen_invltlb_pcid(void *arg)
192{
193
194	invltlb_pcid_handler();
195	return (FILTER_HANDLED);
196}
197#endif
198
199static int
200xen_invlpg(void *arg)
201{
202
203	invlpg_handler();
204	return (FILTER_HANDLED);
205}
206
207#ifdef __amd64__
208static int
209xen_invlpg_pcid(void *arg)
210{
211
212	invlpg_pcid_handler();
213	return (FILTER_HANDLED);
214}
215#endif
216
217static int
218xen_invlrng(void *arg)
219{
220
221	invlrng_handler();
222	return (FILTER_HANDLED);
223}
224
225static int
226xen_invlcache(void *arg)
227{
228
229	invlcache_handler();
230	return (FILTER_HANDLED);
231}
232
233#ifdef __i386__
234static int
235xen_lazypmap(void *arg)
236{
237
238	pmap_lazyfix_action();
239	return (FILTER_HANDLED);
240}
241#endif
242
243static int
244xen_cpustop_handler(void *arg)
245{
246
247	cpustop_handler();
248	return (FILTER_HANDLED);
249}
250
251static int
252xen_cpususpend_handler(void *arg)
253{
254
255	cpususpend_handler();
256	return (FILTER_HANDLED);
257}
258
259static int
260xen_cpustophard_handler(void *arg)
261{
262
263	ipi_nmi_handler();
264	return (FILTER_HANDLED);
265}
266
267/* Xen PV IPI sender */
268static void
269xen_ipi_vectored(u_int vector, int dest)
270{
271	xen_intr_handle_t *ipi_handle;
272	int ipi_idx, to_cpu, self;
273
274	ipi_idx = IPI_TO_IDX(vector);
275	if (ipi_idx > nitems(xen_ipis))
276		panic("IPI out of range");
277
278	switch(dest) {
279	case APIC_IPI_DEST_SELF:
280		ipi_handle = DPCPU_GET(ipi_handle);
281		xen_intr_signal(ipi_handle[ipi_idx]);
282		break;
283	case APIC_IPI_DEST_ALL:
284		CPU_FOREACH(to_cpu) {
285			ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
286			xen_intr_signal(ipi_handle[ipi_idx]);
287		}
288		break;
289	case APIC_IPI_DEST_OTHERS:
290		self = PCPU_GET(cpuid);
291		CPU_FOREACH(to_cpu) {
292			if (to_cpu != self) {
293				ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
294				xen_intr_signal(ipi_handle[ipi_idx]);
295			}
296		}
297		break;
298	default:
299		to_cpu = apic_cpuid(dest);
300		ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
301		xen_intr_signal(ipi_handle[ipi_idx]);
302		break;
303	}
304}
305
306/* XEN diverged cpu operations */
307static void
308xen_hvm_cpu_resume(void)
309{
310	u_int cpuid = PCPU_GET(cpuid);
311
312	/*
313	 * Reset pending bitmap IPIs, because Xen doesn't preserve pending
314	 * event channels on migration.
315	 */
316	cpu_ipi_pending[cpuid] = 0;
317
318	/* register vcpu_info area */
319	xen_hvm_cpu_init();
320}
321
322static void
323xen_cpu_ipi_init(int cpu)
324{
325	xen_intr_handle_t *ipi_handle;
326	const struct xen_ipi_handler *ipi;
327	device_t dev;
328	int idx, rc;
329
330	ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
331	dev = pcpu_find(cpu)->pc_device;
332	KASSERT((dev != NULL), ("NULL pcpu device_t"));
333
334	for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
335
336		if (ipi->filter == NULL) {
337			ipi_handle[idx] = NULL;
338			continue;
339		}
340
341		rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
342		    INTR_TYPE_TTY, &ipi_handle[idx]);
343		if (rc != 0)
344			panic("Unable to allocate a XEN IPI port");
345		xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
346	}
347}
348
349static void
350xen_setup_cpus(void)
351{
352	int i;
353
354	if (!xen_hvm_domain() || !xen_vector_callback_enabled)
355		return;
356
357#ifdef __amd64__
358	if (pmap_pcid_enabled) {
359		xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
360		xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
361	}
362#endif
363	CPU_FOREACH(i)
364		xen_cpu_ipi_init(i);
365
366	/* Set the xen pv ipi ops to replace the native ones */
367	cpu_ops.ipi_vectored = xen_ipi_vectored;
368}
369#endif
370
371/*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
372static uint32_t
373xen_hvm_cpuid_base(void)
374{
375	uint32_t base, regs[4];
376
377	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
378		do_cpuid(base, regs);
379		if (!memcmp("XenVMMXenVMM", &regs[1], 12)
380		    && (regs[0] - base) >= 2)
381			return (base);
382	}
383	return (0);
384}
385
386/*
387 * Allocate and fill in the hypcall page.
388 */
389static int
390xen_hvm_init_hypercall_stubs(void)
391{
392	uint32_t base, regs[4];
393	int i;
394
395	base = xen_hvm_cpuid_base();
396	if (base == 0)
397		return (ENXIO);
398
399	if (hypercall_stubs == NULL) {
400		do_cpuid(base + 1, regs);
401		printf("XEN: Hypervisor version %d.%d detected.\n",
402		    regs[0] >> 16, regs[0] & 0xffff);
403	}
404
405	/*
406	 * Find the hypercall pages.
407	 */
408	do_cpuid(base + 2, regs);
409
410	if (hypercall_stubs == NULL) {
411		size_t call_region_size;
412
413		call_region_size = regs[0] * PAGE_SIZE;
414		hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
415		if (hypercall_stubs == NULL)
416			panic("Unable to allocate Xen hypercall region");
417	}
418
419	for (i = 0; i < regs[0]; i++)
420		wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
421
422	return (0);
423}
424
425static void
426xen_hvm_init_shared_info_page(void)
427{
428	struct xen_add_to_physmap xatp;
429
430	if (HYPERVISOR_shared_info == NULL) {
431		HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
432		if (HYPERVISOR_shared_info == NULL)
433			panic("Unable to allocate Xen shared info page");
434	}
435
436	xatp.domid = DOMID_SELF;
437	xatp.idx = 0;
438	xatp.space = XENMAPSPACE_shared_info;
439	xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
440	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
441		panic("HYPERVISOR_memory_op failed");
442}
443
444/*
445 * Tell the hypervisor how to contact us for event channel callbacks.
446 */
447void
448xen_hvm_set_callback(device_t dev)
449{
450	struct xen_hvm_param xhp;
451	int irq;
452
453	if (xen_vector_callback_enabled)
454		return;
455
456	xhp.domid = DOMID_SELF;
457	xhp.index = HVM_PARAM_CALLBACK_IRQ;
458	if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
459		int error;
460
461		xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
462		error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
463		if (error == 0) {
464			xen_vector_callback_enabled = 1;
465			return;
466		}
467		printf("Xen HVM callback vector registration failed (%d). "
468		    "Falling back to emulated device interrupt\n", error);
469	}
470	xen_vector_callback_enabled = 0;
471	if (dev == NULL) {
472		/*
473		 * Called from early boot or resume.
474		 * xenpci will invoke us again later.
475		 */
476		return;
477	}
478
479	irq = pci_get_irq(dev);
480	if (irq < 16) {
481		xhp.value = HVM_CALLBACK_GSI(irq);
482	} else {
483		u_int slot;
484		u_int pin;
485
486		slot = pci_get_slot(dev);
487		pin = pci_get_intpin(dev) - 1;
488		xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
489	}
490
491	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
492		panic("Can't set evtchn callback");
493}
494
495#define	XEN_MAGIC_IOPORT 0x10
496enum {
497	XMI_MAGIC			 = 0x49d2,
498	XMI_UNPLUG_IDE_DISKS		 = 0x01,
499	XMI_UNPLUG_NICS			 = 0x02,
500	XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
501};
502
503static void
504xen_hvm_disable_emulated_devices(void)
505{
506	if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
507		return;
508
509	if (bootverbose)
510		printf("XEN: Disabling emulated block and network devices\n");
511	outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
512}
513
514static void
515xen_hvm_init(enum xen_hvm_init_type init_type)
516{
517	int error;
518	int i;
519
520	if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
521		return;
522
523	error = xen_hvm_init_hypercall_stubs();
524
525	switch (init_type) {
526	case XEN_HVM_INIT_COLD:
527		if (error != 0)
528			return;
529
530		setup_xen_features();
531#ifdef SMP
532		cpu_ops = xen_hvm_cpu_ops;
533#endif
534 		vm_guest = VM_GUEST_XEN;
535		break;
536	case XEN_HVM_INIT_RESUME:
537		if (error != 0)
538			panic("Unable to init Xen hypercall stubs on resume");
539
540		/* Clear stale vcpu_info. */
541		CPU_FOREACH(i)
542			DPCPU_ID_SET(i, vcpu_info, NULL);
543		break;
544	default:
545		panic("Unsupported HVM initialization type");
546	}
547
548	xen_vector_callback_enabled = 0;
549	xen_domain_type = XEN_HVM_DOMAIN;
550	xen_hvm_init_shared_info_page();
551	xen_hvm_set_callback(NULL);
552	xen_hvm_disable_emulated_devices();
553}
554
555void
556xen_hvm_suspend(void)
557{
558}
559
560void
561xen_hvm_resume(bool suspend_cancelled)
562{
563
564	xen_hvm_init(suspend_cancelled ?
565	    XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
566
567	/* Register vcpu_info area for CPU#0. */
568	xen_hvm_cpu_init();
569}
570
571static void
572xen_hvm_sysinit(void *arg __unused)
573{
574	xen_hvm_init(XEN_HVM_INIT_COLD);
575}
576
577static void
578xen_set_vcpu_id(void)
579{
580	struct pcpu *pc;
581	int i;
582
583	/* Set vcpu_id to acpi_id */
584	CPU_FOREACH(i) {
585		pc = pcpu_find(i);
586		pc->pc_vcpu_id = pc->pc_acpi_id;
587		if (bootverbose)
588			printf("XEN: CPU %u has VCPU ID %u\n",
589			       i, pc->pc_vcpu_id);
590	}
591}
592
593static void
594xen_hvm_cpu_init(void)
595{
596	struct vcpu_register_vcpu_info info;
597	struct vcpu_info *vcpu_info;
598	int cpu, rc;
599
600	if (!xen_domain())
601		return;
602
603	if (DPCPU_GET(vcpu_info) != NULL) {
604		/*
605		 * vcpu_info is already set.  We're resuming
606		 * from a failed migration and our pre-suspend
607		 * configuration is still valid.
608		 */
609		return;
610	}
611
612	vcpu_info = DPCPU_PTR(vcpu_local_info);
613	cpu = PCPU_GET(vcpu_id);
614	info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
615	info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
616
617	rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
618	if (rc != 0)
619		DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
620	else
621		DPCPU_SET(vcpu_info, vcpu_info);
622}
623
624SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
625#ifdef SMP
626SYSINIT(xen_setup_cpus, SI_SUB_SMP, SI_ORDER_FIRST, xen_setup_cpus, NULL);
627#endif
628SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL);
629SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL);
630