hvm.c revision 255726
1/*
2 * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3 * Copyright (c) 2012 Spectra Logic Corporation
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/x86/xen/hvm.c 255726 2013-09-20 05:06:03Z gibbs $");
30
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/proc.h>
36#include <sys/smp.h>
37#include <sys/systm.h>
38
39#include <vm/vm.h>
40#include <vm/pmap.h>
41
42#include <dev/pci/pcivar.h>
43
44#include <machine/cpufunc.h>
45#include <machine/cpu.h>
46#include <machine/smp.h>
47
48#include <x86/apicreg.h>
49
50#include <xen/xen-os.h>
51#include <xen/features.h>
52#include <xen/gnttab.h>
53#include <xen/hypervisor.h>
54#include <xen/hvm.h>
55#include <xen/xen_intr.h>
56
57#include <xen/interface/hvm/params.h>
58#include <xen/interface/vcpu.h>
59
60/*--------------------------- Forward Declarations ---------------------------*/
61#ifdef SMP
62static driver_filter_t xen_smp_rendezvous_action;
63static driver_filter_t xen_invltlb;
64static driver_filter_t xen_invlpg;
65static driver_filter_t xen_invlrng;
66static driver_filter_t xen_invlcache;
67#ifdef __i386__
68static driver_filter_t xen_lazypmap;
69#endif
70static driver_filter_t xen_ipi_bitmap_handler;
71static driver_filter_t xen_cpustop_handler;
72static driver_filter_t xen_cpususpend_handler;
73static driver_filter_t xen_cpustophard_handler;
74#endif
75
76/*---------------------------- Extern Declarations ---------------------------*/
77/* Variables used by mp_machdep to perform the MMU related IPIs */
78extern volatile int smp_tlb_wait;
79extern vm_offset_t smp_tlb_addr2;
80#ifdef __i386__
81extern vm_offset_t smp_tlb_addr1;
82#else
83extern struct invpcid_descr smp_tlb_invpcid;
84extern uint64_t pcid_cr3;
85extern int invpcid_works;
86extern int pmap_pcid_enabled;
87extern pmap_t smp_tlb_pmap;
88#endif
89
90#ifdef __i386__
91extern void pmap_lazyfix_action(void);
92#endif
93
94/*---------------------------------- Macros ----------------------------------*/
95#define	IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
96
97/*-------------------------------- Local Types -------------------------------*/
98enum xen_hvm_init_type {
99	XEN_HVM_INIT_COLD,
100	XEN_HVM_INIT_CANCELLED_SUSPEND,
101	XEN_HVM_INIT_RESUME
102};
103
104struct xen_ipi_handler
105{
106	driver_filter_t	*filter;
107	const char	*description;
108};
109
110/*-------------------------------- Global Data -------------------------------*/
111enum xen_domain_type xen_domain_type = XEN_NATIVE;
112
113static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
114
115#ifdef SMP
116static struct xen_ipi_handler xen_ipis[] =
117{
118	[IPI_TO_IDX(IPI_RENDEZVOUS)]	= { xen_smp_rendezvous_action,	"r"   },
119	[IPI_TO_IDX(IPI_INVLTLB)]	= { xen_invltlb,		"itlb"},
120	[IPI_TO_IDX(IPI_INVLPG)]	= { xen_invlpg,			"ipg" },
121	[IPI_TO_IDX(IPI_INVLRNG)]	= { xen_invlrng,		"irg" },
122	[IPI_TO_IDX(IPI_INVLCACHE)]	= { xen_invlcache,		"ic"  },
123#ifdef __i386__
124	[IPI_TO_IDX(IPI_LAZYPMAP)]	= { xen_lazypmap,		"lp"  },
125#endif
126	[IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,	"b"   },
127	[IPI_TO_IDX(IPI_STOP)]		= { xen_cpustop_handler,	"st"  },
128	[IPI_TO_IDX(IPI_SUSPEND)]	= { xen_cpususpend_handler,	"sp"  },
129	[IPI_TO_IDX(IPI_STOP_HARD)]	= { xen_cpustophard_handler,	"sth" },
130};
131#endif
132
133/**
134 * If non-zero, the hypervisor has been configured to use a direct
135 * IDT event callback for interrupt injection.
136 */
137int xen_vector_callback_enabled;
138
139/*------------------------------- Per-CPU Data -------------------------------*/
140DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
141DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
142#ifdef SMP
143DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
144#endif
145
146/*------------------ Hypervisor Access Shared Memory Regions -----------------*/
147/** Hypercall table accessed via HYPERVISOR_*_op() methods. */
148char *hypercall_stubs;
149shared_info_t *HYPERVISOR_shared_info;
150
151#ifdef SMP
152/*---------------------------- XEN PV IPI Handlers ---------------------------*/
153/*
154 * This are C clones of the ASM functions found in apic_vector.s
155 */
156static int
157xen_ipi_bitmap_handler(void *arg)
158{
159	struct trapframe *frame;
160
161	frame = arg;
162	ipi_bitmap_handler(*frame);
163	return (FILTER_HANDLED);
164}
165
166static int
167xen_smp_rendezvous_action(void *arg)
168{
169#ifdef COUNT_IPIS
170	int cpu;
171
172	cpu = PCPU_GET(cpuid);
173	(*ipi_rendezvous_counts[cpu])++;
174#endif /* COUNT_IPIS */
175
176	smp_rendezvous_action();
177	return (FILTER_HANDLED);
178}
179
180static int
181xen_invltlb(void *arg)
182{
183#if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
184	int cpu;
185
186	cpu = PCPU_GET(cpuid);
187#ifdef COUNT_XINVLTLB_HITS
188	xhits_gbl[cpu]++;
189#endif /* COUNT_XINVLTLB_HITS */
190#ifdef COUNT_IPIS
191	(*ipi_invltlb_counts[cpu])++;
192#endif /* COUNT_IPIS */
193#endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
194
195	invltlb();
196	atomic_add_int(&smp_tlb_wait, 1);
197	return (FILTER_HANDLED);
198}
199
200#ifdef __amd64__
201static int
202xen_invltlb_pcid(void *arg)
203{
204	uint64_t cr3;
205#if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
206	int cpu;
207
208	cpu = PCPU_GET(cpuid);
209#ifdef COUNT_XINVLTLB_HITS
210	xhits_gbl[cpu]++;
211#endif /* COUNT_XINVLTLB_HITS */
212#ifdef COUNT_IPIS
213	(*ipi_invltlb_counts[cpu])++;
214#endif /* COUNT_IPIS */
215#endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
216
217	cr3 = rcr3();
218	if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
219	    smp_tlb_invpcid.pcid != 0) {
220
221		if (invpcid_works) {
222			invpcid(&smp_tlb_invpcid, INVPCID_CTX);
223		} else {
224			/* Otherwise reload %cr3 twice. */
225			if (cr3 != pcid_cr3) {
226				load_cr3(pcid_cr3);
227				cr3 |= CR3_PCID_SAVE;
228			}
229			load_cr3(cr3);
230		}
231	} else {
232		invltlb_globpcid();
233	}
234	if (smp_tlb_pmap != NULL)
235		CPU_CLR_ATOMIC(PCPU_GET(cpuid), &smp_tlb_pmap->pm_save);
236
237	atomic_add_int(&smp_tlb_wait, 1);
238	return (FILTER_HANDLED);
239}
240#endif
241
242static int
243xen_invlpg(void *arg)
244{
245#if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
246	int cpu;
247
248	cpu = PCPU_GET(cpuid);
249#ifdef COUNT_XINVLTLB_HITS
250	xhits_pg[cpu]++;
251#endif /* COUNT_XINVLTLB_HITS */
252#ifdef COUNT_IPIS
253	(*ipi_invlpg_counts[cpu])++;
254#endif /* COUNT_IPIS */
255#endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
256
257#ifdef __i386__
258	invlpg(smp_tlb_addr1);
259#else
260	invlpg(smp_tlb_invpcid.addr);
261#endif
262	atomic_add_int(&smp_tlb_wait, 1);
263	return (FILTER_HANDLED);
264}
265
266#ifdef __amd64__
267static int
268xen_invlpg_pcid(void *arg)
269{
270#if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
271	int cpu;
272
273	cpu = PCPU_GET(cpuid);
274#ifdef COUNT_XINVLTLB_HITS
275	xhits_pg[cpu]++;
276#endif /* COUNT_XINVLTLB_HITS */
277#ifdef COUNT_IPIS
278	(*ipi_invlpg_counts[cpu])++;
279#endif /* COUNT_IPIS */
280#endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
281
282	if (invpcid_works) {
283		invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
284	} else if (smp_tlb_invpcid.pcid == 0) {
285		invlpg(smp_tlb_invpcid.addr);
286	} else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
287		invltlb_globpcid();
288	} else {
289		uint64_t cr3;
290
291		/*
292		 * PCID supported, but INVPCID is not.
293		 * Temporarily switch to the target address
294		 * space and do INVLPG.
295		 */
296		cr3 = rcr3();
297		if (cr3 != pcid_cr3)
298			load_cr3(pcid_cr3 | CR3_PCID_SAVE);
299		invlpg(smp_tlb_invpcid.addr);
300		load_cr3(cr3 | CR3_PCID_SAVE);
301	}
302
303	atomic_add_int(&smp_tlb_wait, 1);
304	return (FILTER_HANDLED);
305}
306#endif
307
308static inline void
309invlpg_range(vm_offset_t start, vm_offset_t end)
310{
311	do {
312		invlpg(start);
313		start += PAGE_SIZE;
314	} while (start < end);
315}
316
317static int
318xen_invlrng(void *arg)
319{
320	vm_offset_t addr;
321#if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
322	int cpu;
323
324	cpu = PCPU_GET(cpuid);
325#ifdef COUNT_XINVLTLB_HITS
326	xhits_rng[cpu]++;
327#endif /* COUNT_XINVLTLB_HITS */
328#ifdef COUNT_IPIS
329	(*ipi_invlrng_counts[cpu])++;
330#endif /* COUNT_IPIS */
331#endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
332
333#ifdef __i386__
334	addr = smp_tlb_addr1;
335	invlpg_range(addr, smp_tlb_addr2);
336#else
337	addr = smp_tlb_invpcid.addr;
338	if (pmap_pcid_enabled) {
339		if (invpcid_works) {
340			struct invpcid_descr d;
341
342			d = smp_tlb_invpcid;
343			do {
344				invpcid(&d, INVPCID_ADDR);
345				d.addr += PAGE_SIZE;
346			} while (d.addr < smp_tlb_addr2);
347		} else if (smp_tlb_invpcid.pcid == 0) {
348			/*
349			 * kernel pmap - use invlpg to invalidate
350			 * global mapping.
351			 */
352			invlpg_range(addr, smp_tlb_addr2);
353		} else if (smp_tlb_invpcid.pcid != (uint64_t)-1) {
354			invltlb_globpcid();
355			if (smp_tlb_pmap != NULL) {
356				CPU_CLR_ATOMIC(PCPU_GET(cpuid),
357				    &smp_tlb_pmap->pm_save);
358			}
359		} else {
360			uint64_t cr3;
361
362			cr3 = rcr3();
363			if (cr3 != pcid_cr3)
364				load_cr3(pcid_cr3 | CR3_PCID_SAVE);
365			invlpg_range(addr, smp_tlb_addr2);
366			load_cr3(cr3 | CR3_PCID_SAVE);
367		}
368	} else {
369		invlpg_range(addr, smp_tlb_addr2);
370	}
371#endif
372
373	atomic_add_int(&smp_tlb_wait, 1);
374	return (FILTER_HANDLED);
375}
376
377static int
378xen_invlcache(void *arg)
379{
380#ifdef COUNT_IPIS
381	int cpu = PCPU_GET(cpuid);
382
383	cpu = PCPU_GET(cpuid);
384	(*ipi_invlcache_counts[cpu])++;
385#endif /* COUNT_IPIS */
386
387	wbinvd();
388	atomic_add_int(&smp_tlb_wait, 1);
389	return (FILTER_HANDLED);
390}
391
392#ifdef __i386__
393static int
394xen_lazypmap(void *arg)
395{
396
397	pmap_lazyfix_action();
398	return (FILTER_HANDLED);
399}
400#endif
401
402static int
403xen_cpustop_handler(void *arg)
404{
405
406	cpustop_handler();
407	return (FILTER_HANDLED);
408}
409
410static int
411xen_cpususpend_handler(void *arg)
412{
413
414	cpususpend_handler();
415	return (FILTER_HANDLED);
416}
417
418static int
419xen_cpustophard_handler(void *arg)
420{
421
422	ipi_nmi_handler();
423	return (FILTER_HANDLED);
424}
425
426/* Xen PV IPI sender */
427static void
428xen_ipi_vectored(u_int vector, int dest)
429{
430	xen_intr_handle_t *ipi_handle;
431	int ipi_idx, to_cpu, self;
432
433	ipi_idx = IPI_TO_IDX(vector);
434	if (ipi_idx > nitems(xen_ipis))
435		panic("IPI out of range");
436
437	switch(dest) {
438	case APIC_IPI_DEST_SELF:
439		ipi_handle = DPCPU_GET(ipi_handle);
440		xen_intr_signal(ipi_handle[ipi_idx]);
441		break;
442	case APIC_IPI_DEST_ALL:
443		CPU_FOREACH(to_cpu) {
444			ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
445			xen_intr_signal(ipi_handle[ipi_idx]);
446		}
447		break;
448	case APIC_IPI_DEST_OTHERS:
449		self = PCPU_GET(cpuid);
450		CPU_FOREACH(to_cpu) {
451			if (to_cpu != self) {
452				ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
453				xen_intr_signal(ipi_handle[ipi_idx]);
454			}
455		}
456		break;
457	default:
458		to_cpu = apic_cpuid(dest);
459		ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
460		xen_intr_signal(ipi_handle[ipi_idx]);
461		break;
462	}
463}
464
465static void
466xen_cpu_ipi_init(int cpu)
467{
468	xen_intr_handle_t *ipi_handle;
469	const struct xen_ipi_handler *ipi;
470	device_t dev;
471	int idx, rc;
472
473	ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
474	dev = pcpu_find(cpu)->pc_device;
475	KASSERT((dev != NULL), ("NULL pcpu device_t"));
476
477	for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
478
479		if (ipi->filter == NULL) {
480			ipi_handle[idx] = NULL;
481			continue;
482		}
483
484		rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
485		    INTR_TYPE_TTY, &ipi_handle[idx]);
486		if (rc != 0)
487			panic("Unable to allocate a XEN IPI port");
488		xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
489	}
490}
491
492static void
493xen_init_ipis(void)
494{
495	int i;
496
497	if (!xen_hvm_domain() || !xen_vector_callback_enabled)
498		return;
499
500#ifdef __amd64__
501	if (pmap_pcid_enabled) {
502		xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
503		xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
504	}
505#endif
506	CPU_FOREACH(i)
507		xen_cpu_ipi_init(i);
508
509	/* Set the xen pv ipi ops to replace the native ones */
510	cpu_ops.ipi_vectored = xen_ipi_vectored;
511}
512#endif
513
514/*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
515static uint32_t
516xen_hvm_cpuid_base(void)
517{
518	uint32_t base, regs[4];
519
520	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
521		do_cpuid(base, regs);
522		if (!memcmp("XenVMMXenVMM", &regs[1], 12)
523		    && (regs[0] - base) >= 2)
524			return (base);
525	}
526	return (0);
527}
528
529/*
530 * Allocate and fill in the hypcall page.
531 */
532static int
533xen_hvm_init_hypercall_stubs(void)
534{
535	uint32_t base, regs[4];
536	int i;
537
538	base = xen_hvm_cpuid_base();
539	if (base == 0)
540		return (ENXIO);
541
542	if (hypercall_stubs == NULL) {
543		do_cpuid(base + 1, regs);
544		printf("XEN: Hypervisor version %d.%d detected.\n",
545		    regs[0] >> 16, regs[0] & 0xffff);
546	}
547
548	/*
549	 * Find the hypercall pages.
550	 */
551	do_cpuid(base + 2, regs);
552
553	if (hypercall_stubs == NULL) {
554		size_t call_region_size;
555
556		call_region_size = regs[0] * PAGE_SIZE;
557		hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
558		if (hypercall_stubs == NULL)
559			panic("Unable to allocate Xen hypercall region");
560	}
561
562	for (i = 0; i < regs[0]; i++)
563		wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
564
565	return (0);
566}
567
568static void
569xen_hvm_init_shared_info_page(void)
570{
571	struct xen_add_to_physmap xatp;
572
573	if (HYPERVISOR_shared_info == NULL) {
574		HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
575		if (HYPERVISOR_shared_info == NULL)
576			panic("Unable to allocate Xen shared info page");
577	}
578
579	xatp.domid = DOMID_SELF;
580	xatp.idx = 0;
581	xatp.space = XENMAPSPACE_shared_info;
582	xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
583	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
584		panic("HYPERVISOR_memory_op failed");
585}
586
587/*
588 * Tell the hypervisor how to contact us for event channel callbacks.
589 */
590void
591xen_hvm_set_callback(device_t dev)
592{
593	struct xen_hvm_param xhp;
594	int irq;
595
596	if (xen_vector_callback_enabled)
597		return;
598
599	xhp.domid = DOMID_SELF;
600	xhp.index = HVM_PARAM_CALLBACK_IRQ;
601	if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
602		int error;
603
604		xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
605		error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
606		if (error == 0) {
607			xen_vector_callback_enabled = 1;
608			return;
609		}
610		printf("Xen HVM callback vector registration failed (%d). "
611		    "Falling back to emulated device interrupt\n", error);
612	}
613	xen_vector_callback_enabled = 0;
614	if (dev == NULL) {
615		/*
616		 * Called from early boot or resume.
617		 * xenpci will invoke us again later.
618		 */
619		return;
620	}
621
622	irq = pci_get_irq(dev);
623	if (irq < 16) {
624		xhp.value = HVM_CALLBACK_GSI(irq);
625	} else {
626		u_int slot;
627		u_int pin;
628
629		slot = pci_get_slot(dev);
630		pin = pci_get_intpin(dev) - 1;
631		xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
632	}
633
634	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
635		panic("Can't set evtchn callback");
636}
637
638#define	XEN_MAGIC_IOPORT 0x10
639enum {
640	XMI_MAGIC			 = 0x49d2,
641	XMI_UNPLUG_IDE_DISKS		 = 0x01,
642	XMI_UNPLUG_NICS			 = 0x02,
643	XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
644};
645
646static void
647xen_hvm_disable_emulated_devices(void)
648{
649	if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
650		return;
651
652	if (bootverbose)
653		printf("XEN: Disabling emulated block and network devices\n");
654	outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
655}
656
657static void
658xen_hvm_init(enum xen_hvm_init_type init_type)
659{
660	int error;
661	int i;
662
663	if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
664		return;
665
666	error = xen_hvm_init_hypercall_stubs();
667
668	switch (init_type) {
669	case XEN_HVM_INIT_COLD:
670		if (error != 0)
671			return;
672
673		setup_xen_features();
674		break;
675	case XEN_HVM_INIT_RESUME:
676		if (error != 0)
677			panic("Unable to init Xen hypercall stubs on resume");
678		break;
679	default:
680		panic("Unsupported HVM initialization type");
681	}
682
683	/* Clear any stale vcpu_info. */
684	CPU_FOREACH(i)
685		DPCPU_ID_SET(i, vcpu_info, NULL);
686
687	xen_vector_callback_enabled = 0;
688	xen_domain_type = XEN_HVM_DOMAIN;
689	xen_hvm_init_shared_info_page();
690	xen_hvm_set_callback(NULL);
691	xen_hvm_disable_emulated_devices();
692}
693
694void
695xen_hvm_suspend(void)
696{
697}
698
699void
700xen_hvm_resume(bool suspend_cancelled)
701{
702
703	xen_hvm_init(suspend_cancelled ?
704	    XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
705
706	/* Register vcpu_info area for CPU#0. */
707	xen_hvm_init_cpu();
708}
709
710static void
711xen_hvm_sysinit(void *arg __unused)
712{
713	xen_hvm_init(XEN_HVM_INIT_COLD);
714}
715
716void
717xen_hvm_init_cpu(void)
718{
719	struct vcpu_register_vcpu_info info;
720	struct vcpu_info *vcpu_info;
721	int cpu, rc;
722
723	if (DPCPU_GET(vcpu_info) != NULL) {
724		/*
725		 * vcpu_info is already set.  We're resuming
726		 * from a failed migration and our pre-suspend
727		 * configuration is still valid.
728		 */
729		return;
730	}
731
732	vcpu_info = DPCPU_PTR(vcpu_local_info);
733	cpu = PCPU_GET(acpi_id);
734	info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
735	info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
736
737	rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
738	if (rc != 0)
739		DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
740	else
741		DPCPU_SET(vcpu_info, vcpu_info);
742}
743
744SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
745#ifdef SMP
746SYSINIT(xen_init_ipis, SI_SUB_SMP, SI_ORDER_FIRST, xen_init_ipis, NULL);
747#endif
748SYSINIT(xen_hvm_init_cpu, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_init_cpu, NULL);
749