bhyverun.c revision 245020
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include <machine/segments.h>
37
38#include <stdio.h>
39#include <stdlib.h>
40#include <libgen.h>
41#include <unistd.h>
42#include <assert.h>
43#include <errno.h>
44#include <signal.h>
45#include <pthread.h>
46#include <pthread_np.h>
47
48#include <machine/vmm.h>
49#include <vmmapi.h>
50
51#include "bhyverun.h"
52#include "acpi.h"
53#include "inout.h"
54#include "dbgport.h"
55#include "mem.h"
56#include "mevent.h"
57#include "mptbl.h"
58#include "pci_emul.h"
59#include "xmsr.h"
60#include "ioapic.h"
61#include "spinup_ap.h"
62
63#define	DEFAULT_GUEST_HZ	100
64#define	DEFAULT_GUEST_TSLICE	200
65
66#define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
67
68#define	VMEXIT_SWITCH		0	/* force vcpu switch in mux mode */
69#define	VMEXIT_CONTINUE		1	/* continue from next instruction */
70#define	VMEXIT_RESTART		2	/* restart current instruction */
71#define	VMEXIT_ABORT		3	/* abort the vm run loop */
72#define	VMEXIT_RESET		4	/* guest machine has reset */
73
74#define MB		(1024UL * 1024)
75#define GB		(1024UL * MB)
76
77typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
78
79int guest_tslice = DEFAULT_GUEST_TSLICE;
80int guest_hz = DEFAULT_GUEST_HZ;
81char *vmname;
82
83u_long lomem_sz;
84u_long himem_sz;
85
86int guest_ncpus;
87
88static int pincpu = -1;
89static int guest_vcpu_mux;
90static int guest_vmexit_on_hlt, guest_vmexit_on_pause, disable_x2apic;
91
92static int foundcpus;
93
94static int strictio;
95
96static int acpi;
97
98static char *lomem_addr;
99static char *himem_addr;
100
101static char *progname;
102static const int BSP = 0;
103
104static int cpumask;
105
106static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
107
108struct vm_exit vmexit[VM_MAXCPU];
109
110struct fbsdstats {
111        uint64_t        vmexit_bogus;
112        uint64_t        vmexit_bogus_switch;
113        uint64_t        vmexit_hlt;
114        uint64_t        vmexit_pause;
115        uint64_t        vmexit_mtrap;
116        uint64_t        vmexit_paging;
117        uint64_t        cpu_switch_rotate;
118        uint64_t        cpu_switch_direct;
119        int             io_reset;
120} stats;
121
122struct mt_vmm_info {
123	pthread_t	mt_thr;
124	struct vmctx	*mt_ctx;
125	int		mt_vcpu;
126} mt_vmm_info[VM_MAXCPU];
127
128static void
129usage(int code)
130{
131
132        fprintf(stderr,
133                "Usage: %s [-aehABHIP][-g <gdb port>][-z <hz>][-s <pci>]"
134		"[-S <pci>][-p pincpu][-n <pci>][-m lowmem][-M highmem] <vm>\n"
135		"       -a: local apic is in XAPIC mode (default is X2APIC)\n"
136		"       -A: create an ACPI table\n"
137		"       -g: gdb port (default is %d and 0 means don't open)\n"
138		"       -c: # cpus (default 1)\n"
139		"       -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
140		"       -B: inject breakpoint exception on vm entry\n"
141		"       -H: vmexit from the guest on hlt\n"
142		"       -I: present an ioapic to the guest\n"
143		"       -P: vmexit from the guest on pause\n"
144		"	-e: exit on unhandled i/o access\n"
145		"       -h: help\n"
146		"       -z: guest hz (default is %d)\n"
147		"       -s: <slot,driver,configinfo> PCI slot config\n"
148		"       -S: <slot,driver,configinfo> legacy PCI slot config\n"
149		"       -m: lowmem in MB\n"
150		"       -M: highmem in MB\n"
151		"       -x: mux vcpus to 1 hcpu\n"
152		"       -t: mux vcpu timeslice hz (default %d)\n",
153		progname, DEFAULT_GDB_PORT, DEFAULT_GUEST_HZ,
154		DEFAULT_GUEST_TSLICE);
155	exit(code);
156}
157
158void *
159paddr_guest2host(uintptr_t gaddr)
160{
161	if (lomem_sz == 0)
162		return (NULL);
163
164	if (gaddr < lomem_sz) {
165		return ((void *)(lomem_addr + gaddr));
166	} else if (gaddr >= 4*GB && gaddr < (4*GB + himem_sz)) {
167		return ((void *)(himem_addr + gaddr - 4*GB));
168	} else
169		return (NULL);
170}
171
172int
173fbsdrun_disable_x2apic(void)
174{
175
176	return (disable_x2apic);
177}
178
179int
180fbsdrun_vmexit_on_pause(void)
181{
182
183	return (guest_vmexit_on_pause);
184}
185
186int
187fbsdrun_vmexit_on_hlt(void)
188{
189
190	return (guest_vmexit_on_hlt);
191}
192
193int
194fbsdrun_muxed(void)
195{
196
197	return (guest_vcpu_mux);
198}
199
200static void *
201fbsdrun_start_thread(void *param)
202{
203	char tname[MAXCOMLEN + 1];
204	struct mt_vmm_info *mtp;
205	int vcpu;
206
207	mtp = param;
208	vcpu = mtp->mt_vcpu;
209
210	snprintf(tname, sizeof(tname), "%s vcpu %d", vmname, vcpu);
211	pthread_set_name_np(mtp->mt_thr, tname);
212
213	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
214
215	/* not reached */
216	exit(1);
217	return (NULL);
218}
219
220void
221fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
222{
223	int error;
224
225	if (cpumask & (1 << vcpu)) {
226		fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
227		    vcpu);
228		exit(1);
229	}
230
231	cpumask |= 1 << vcpu;
232	foundcpus++;
233
234	/*
235	 * Set up the vmexit struct to allow execution to start
236	 * at the given RIP
237	 */
238	vmexit[vcpu].rip = rip;
239	vmexit[vcpu].inst_length = 0;
240
241	if (vcpu == BSP || !guest_vcpu_mux){
242		mt_vmm_info[vcpu].mt_ctx = ctx;
243		mt_vmm_info[vcpu].mt_vcpu = vcpu;
244
245		error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
246				fbsdrun_start_thread, &mt_vmm_info[vcpu]);
247		assert(error == 0);
248	}
249}
250
251static int
252fbsdrun_get_next_cpu(int curcpu)
253{
254
255	/*
256	 * Get the next available CPU. Assumes they arrive
257	 * in ascending order with no gaps.
258	 */
259	return ((curcpu + 1) % foundcpus);
260}
261
262static int
263vmexit_catch_reset(void)
264{
265        stats.io_reset++;
266        return (VMEXIT_RESET);
267}
268
269static int
270vmexit_catch_inout(void)
271{
272	return (VMEXIT_ABORT);
273}
274
275static int
276vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
277		     uint32_t eax)
278{
279#if PG_DEBUG /* put all types of debug here */
280        if (eax == 0) {
281		pause_noswitch = 1;
282	} else if (eax == 1) {
283		pause_noswitch = 0;
284	} else {
285		pause_noswitch = 0;
286		if (eax == 5) {
287			vm_set_capability(ctx, *pvcpu, VM_CAP_MTRAP_EXIT, 1);
288		}
289	}
290#endif
291        return (VMEXIT_CONTINUE);
292}
293
294static int
295vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
296{
297	int error;
298	int bytes, port, in, out;
299	uint32_t eax;
300	int vcpu;
301
302	vcpu = *pvcpu;
303
304	port = vme->u.inout.port;
305	bytes = vme->u.inout.bytes;
306	eax = vme->u.inout.eax;
307	in = vme->u.inout.in;
308	out = !in;
309
310	/* We don't deal with these */
311	if (vme->u.inout.string || vme->u.inout.rep)
312		return (VMEXIT_ABORT);
313
314	/* Special case of guest reset */
315	if (out && port == 0x64 && (uint8_t)eax == 0xFE)
316		return (vmexit_catch_reset());
317
318        /* Extra-special case of host notifications */
319        if (out && port == GUEST_NIO_PORT)
320                return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
321
322	error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
323	if (error == 0 && in)
324		error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
325
326	if (error == 0)
327		return (VMEXIT_CONTINUE);
328	else {
329		fprintf(stderr, "Unhandled %s%c 0x%04x\n",
330			in ? "in" : "out",
331			bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
332		return (vmexit_catch_inout());
333	}
334}
335
336static int
337vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
338{
339	fprintf(stderr, "vm exit rdmsr 0x%x, cpu %d\n", vme->u.msr.code,
340	    *pvcpu);
341	return (VMEXIT_ABORT);
342}
343
344static int
345vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
346{
347	int newcpu;
348	int retval = VMEXIT_CONTINUE;
349
350	newcpu = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code,vme->u.msr.wval);
351
352	if (guest_vcpu_mux && *pvcpu != newcpu) {
353                retval = VMEXIT_SWITCH;
354                *pvcpu = newcpu;
355        }
356
357        return (retval);
358}
359
360static int
361vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
362{
363	int newcpu;
364	int retval = VMEXIT_CONTINUE;
365
366	newcpu = spinup_ap(ctx, *pvcpu,
367			   vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
368
369	if (guest_vcpu_mux && *pvcpu != newcpu) {
370		retval = VMEXIT_SWITCH;
371		*pvcpu = newcpu;
372	}
373
374	return (retval);
375}
376
377static int
378vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
379{
380
381	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
382	fprintf(stderr, "\treason\t\tVMX\n");
383	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
384	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
385	fprintf(stderr, "\terror\t\t%d\n", vmexit->u.vmx.error);
386	fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
387	fprintf(stderr, "\tqualification\t0x%016lx\n",
388	    vmexit->u.vmx.exit_qualification);
389
390	return (VMEXIT_ABORT);
391}
392
393static int bogus_noswitch = 1;
394
395static int
396vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
397{
398	stats.vmexit_bogus++;
399
400	if (!guest_vcpu_mux || guest_ncpus == 1 || bogus_noswitch) {
401		return (VMEXIT_RESTART);
402	} else {
403		stats.vmexit_bogus_switch++;
404		vmexit->inst_length = 0;
405		*pvcpu = -1;
406		return (VMEXIT_SWITCH);
407	}
408}
409
410static int
411vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
412{
413	stats.vmexit_hlt++;
414	if (fbsdrun_muxed()) {
415		*pvcpu = -1;
416		return (VMEXIT_SWITCH);
417	} else {
418		/*
419		 * Just continue execution with the next instruction. We use
420		 * the HLT VM exit as a way to be friendly with the host
421		 * scheduler.
422		 */
423		return (VMEXIT_CONTINUE);
424	}
425}
426
427static int pause_noswitch;
428
429static int
430vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
431{
432	stats.vmexit_pause++;
433
434	if (fbsdrun_muxed() && !pause_noswitch) {
435		*pvcpu = -1;
436		return (VMEXIT_SWITCH);
437        } else {
438		return (VMEXIT_CONTINUE);
439	}
440}
441
442static int
443vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
444{
445	stats.vmexit_mtrap++;
446
447	return (VMEXIT_RESTART);
448}
449
450static int
451vmexit_paging(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
452{
453	int err;
454	stats.vmexit_paging++;
455
456	err = emulate_mem(ctx, *pvcpu, vmexit->u.paging.gpa,
457			  &vmexit->u.paging.vie);
458
459	if (err) {
460		if (err == EINVAL) {
461			fprintf(stderr,
462			    "Failed to emulate instruction at 0x%lx\n",
463			    vmexit->rip);
464		} else if (err == ESRCH) {
465			fprintf(stderr, "Unhandled memory access to 0x%lx\n",
466			    vmexit->u.paging.gpa);
467		}
468
469		return (VMEXIT_ABORT);
470	}
471
472	return (VMEXIT_CONTINUE);
473}
474
475static void
476sigalrm(int sig)
477{
478	return;
479}
480
481static void
482setup_timeslice(void)
483{
484	struct sigaction sa;
485	struct itimerval itv;
486	int error;
487
488	/*
489	 * Setup a realtime timer to generate a SIGALRM at a
490	 * frequency of 'guest_tslice' ticks per second.
491	 */
492	sigemptyset(&sa.sa_mask);
493	sa.sa_flags = 0;
494	sa.sa_handler = sigalrm;
495
496	error = sigaction(SIGALRM, &sa, NULL);
497	assert(error == 0);
498
499	itv.it_interval.tv_sec = 0;
500	itv.it_interval.tv_usec = 1000000 / guest_tslice;
501	itv.it_value.tv_sec = 0;
502	itv.it_value.tv_usec = 1000000 / guest_tslice;
503
504	error = setitimer(ITIMER_REAL, &itv, NULL);
505	assert(error == 0);
506}
507
508static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
509	[VM_EXITCODE_INOUT]  = vmexit_inout,
510	[VM_EXITCODE_VMX]    = vmexit_vmx,
511	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
512	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
513	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
514	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
515	[VM_EXITCODE_PAGING] = vmexit_paging,
516	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
517};
518
519static void
520vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
521{
522	int error, rc, prevcpu;
523
524	if (guest_vcpu_mux)
525		setup_timeslice();
526
527	if (pincpu >= 0) {
528		error = vm_set_pinning(ctx, vcpu, pincpu + vcpu);
529		assert(error == 0);
530	}
531
532	while (1) {
533		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
534		if (error != 0) {
535			/*
536			 * It is possible that 'vmmctl' or some other process
537			 * has transitioned the vcpu to CANNOT_RUN state right
538			 * before we tried to transition it to RUNNING.
539			 *
540			 * This is expected to be temporary so just retry.
541			 */
542			if (errno == EBUSY)
543				continue;
544			else
545				break;
546		}
547
548		prevcpu = vcpu;
549                rc = (*handler[vmexit[vcpu].exitcode])(ctx, &vmexit[vcpu],
550                                                       &vcpu);
551		switch (rc) {
552                case VMEXIT_SWITCH:
553			assert(guest_vcpu_mux);
554			if (vcpu == -1) {
555				stats.cpu_switch_rotate++;
556				vcpu = fbsdrun_get_next_cpu(prevcpu);
557			} else {
558				stats.cpu_switch_direct++;
559			}
560			/* fall through */
561		case VMEXIT_CONTINUE:
562                        rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
563			break;
564		case VMEXIT_RESTART:
565                        rip = vmexit[vcpu].rip;
566			break;
567		case VMEXIT_RESET:
568			exit(0);
569		default:
570			exit(1);
571		}
572	}
573	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
574}
575
576static int
577num_vcpus_allowed(struct vmctx *ctx)
578{
579	int tmp, error;
580
581	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
582
583	/*
584	 * The guest is allowed to spinup more than one processor only if the
585	 * UNRESTRICTED_GUEST capability is available.
586	 */
587	if (error == 0)
588		return (VM_MAXCPU);
589	else
590		return (1);
591}
592
593int
594main(int argc, char *argv[])
595{
596	int c, error, gdb_port, inject_bkpt, tmp, err, ioapic, bvmcons;
597	int max_vcpus;
598	struct vmctx *ctx;
599	uint64_t rip;
600
601	bvmcons = 0;
602	inject_bkpt = 0;
603	progname = basename(argv[0]);
604	gdb_port = DEFAULT_GDB_PORT;
605	guest_ncpus = 1;
606	ioapic = 0;
607
608	while ((c = getopt(argc, argv, "abehABHIPxp:g:c:z:s:S:n:m:M:")) != -1) {
609		switch (c) {
610		case 'a':
611			disable_x2apic = 1;
612			break;
613		case 'A':
614			acpi = 1;
615			break;
616		case 'b':
617			bvmcons = 1;
618			break;
619		case 'B':
620			inject_bkpt = 1;
621			break;
622		case 'x':
623			guest_vcpu_mux = 1;
624			break;
625		case 'p':
626			pincpu = atoi(optarg);
627			break;
628                case 'c':
629			guest_ncpus = atoi(optarg);
630			break;
631		case 'g':
632			gdb_port = atoi(optarg);
633			break;
634		case 'z':
635			guest_hz = atoi(optarg);
636			break;
637		case 't':
638			guest_tslice = atoi(optarg);
639			break;
640		case 's':
641			pci_parse_slot(optarg, 0);
642			break;
643		case 'S':
644			pci_parse_slot(optarg, 1);
645			break;
646                case 'm':
647			lomem_sz = strtoul(optarg, NULL, 0) * MB;
648			break;
649                case 'M':
650			himem_sz = strtoul(optarg, NULL, 0) * MB;
651			break;
652		case 'H':
653			guest_vmexit_on_hlt = 1;
654			break;
655		case 'I':
656			ioapic = 1;
657			break;
658		case 'P':
659			guest_vmexit_on_pause = 1;
660			break;
661		case 'e':
662			strictio = 1;
663			break;
664		case 'h':
665			usage(0);
666		default:
667			usage(1);
668		}
669	}
670	argc -= optind;
671	argv += optind;
672
673	if (argc != 1)
674		usage(1);
675
676	/* No need to mux if guest is uni-processor */
677	if (guest_ncpus <= 1)
678		guest_vcpu_mux = 0;
679
680	/* vmexit on hlt if guest is muxed */
681	if (guest_vcpu_mux) {
682		guest_vmexit_on_hlt = 1;
683		guest_vmexit_on_pause = 1;
684	}
685
686	vmname = argv[0];
687
688	ctx = vm_open(vmname);
689	if (ctx == NULL) {
690		perror("vm_open");
691		exit(1);
692	}
693
694	max_vcpus = num_vcpus_allowed(ctx);
695	if (guest_ncpus > max_vcpus) {
696		fprintf(stderr, "%d vCPUs requested but only %d available\n",
697			guest_ncpus, max_vcpus);
698		exit(1);
699	}
700
701	if (fbsdrun_vmexit_on_hlt()) {
702		err = vm_get_capability(ctx, BSP, VM_CAP_HALT_EXIT, &tmp);
703		if (err < 0) {
704			fprintf(stderr, "VM exit on HLT not supported\n");
705			exit(1);
706		}
707		vm_set_capability(ctx, BSP, VM_CAP_HALT_EXIT, 1);
708		handler[VM_EXITCODE_HLT] = vmexit_hlt;
709	}
710
711        if (fbsdrun_vmexit_on_pause()) {
712		/*
713		 * pause exit support required for this mode
714		 */
715		err = vm_get_capability(ctx, BSP, VM_CAP_PAUSE_EXIT, &tmp);
716		if (err < 0) {
717			fprintf(stderr,
718			    "SMP mux requested, no pause support\n");
719			exit(1);
720		}
721		vm_set_capability(ctx, BSP, VM_CAP_PAUSE_EXIT, 1);
722		handler[VM_EXITCODE_PAUSE] = vmexit_pause;
723        }
724
725	if (fbsdrun_disable_x2apic())
726		err = vm_set_x2apic_state(ctx, BSP, X2APIC_DISABLED);
727	else
728		err = vm_set_x2apic_state(ctx, BSP, X2APIC_ENABLED);
729
730	if (err) {
731		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
732		exit(1);
733	}
734
735	if (lomem_sz != 0) {
736		lomem_addr = vm_map_memory(ctx, 0, lomem_sz);
737		if (lomem_addr == (char *) MAP_FAILED) {
738			lomem_sz = 0;
739		} else if (himem_sz != 0) {
740			himem_addr = vm_map_memory(ctx, 4*GB, himem_sz);
741			if (himem_addr == (char *) MAP_FAILED) {
742				lomem_sz = 0;
743				himem_sz = 0;
744			}
745		}
746	}
747
748	init_inout();
749	init_pci(ctx);
750	if (ioapic)
751		ioapic_init(0);
752
753	if (gdb_port != 0)
754		init_dbgport(gdb_port);
755
756	if (bvmcons)
757		init_bvmcons();
758
759	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
760	assert(error == 0);
761
762	if (inject_bkpt) {
763		error = vm_inject_event(ctx, BSP, VM_HW_EXCEPTION, IDT_BP);
764		assert(error == 0);
765	}
766
767	/*
768	 * build the guest tables, MP etc.
769	 */
770	mptable_build(ctx, guest_ncpus, ioapic);
771
772	if (acpi) {
773		error = acpi_build(ctx, guest_ncpus, ioapic);
774		assert(error == 0);
775	}
776
777	/*
778	 * Add CPU 0
779	 */
780	fbsdrun_addcpu(ctx, BSP, rip);
781
782	/*
783	 * Head off to the main event dispatch loop
784	 */
785	mevent_dispatch();
786
787	exit(1);
788}
789