bhyverun.c revision 253452
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/usr.sbin/bhyve/bhyverun.c 253452 2013-07-18 18:40:54Z grehan $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/usr.sbin/bhyve/bhyverun.c 253452 2013-07-18 18:40:54Z grehan $");
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include <machine/segments.h>
37
38#include <stdio.h>
39#include <stdlib.h>
40#include <libgen.h>
41#include <unistd.h>
42#include <assert.h>
43#include <errno.h>
44#include <signal.h>
45#include <pthread.h>
46#include <pthread_np.h>
47
48#include <machine/vmm.h>
49#include <vmmapi.h>
50
51#include "bhyverun.h"
52#include "acpi.h"
53#include "inout.h"
54#include "dbgport.h"
55#include "mem.h"
56#include "mevent.h"
57#include "mptbl.h"
58#include "pci_emul.h"
59#include "xmsr.h"
60#include "ioapic.h"
61#include "spinup_ap.h"
62#include "rtc.h"
63
64#define	DEFAULT_GUEST_HZ	100
65#define	DEFAULT_GUEST_TSLICE	200
66
67#define GUEST_NIO_PORT		0x488	/* guest upcalls via i/o port */
68
69#define	VMEXIT_SWITCH		0	/* force vcpu switch in mux mode */
70#define	VMEXIT_CONTINUE		1	/* continue from next instruction */
71#define	VMEXIT_RESTART		2	/* restart current instruction */
72#define	VMEXIT_ABORT		3	/* abort the vm run loop */
73#define	VMEXIT_RESET		4	/* guest machine has reset */
74
75#define MB		(1024UL * 1024)
76#define GB		(1024UL * MB)
77
78typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
79
80int guest_tslice = DEFAULT_GUEST_TSLICE;
81int guest_hz = DEFAULT_GUEST_HZ;
82char *vmname;
83
84int guest_ncpus;
85
86static int pincpu = -1;
87static int guest_vcpu_mux;
88static int guest_vmexit_on_hlt, guest_vmexit_on_pause, disable_x2apic;
89
90static int foundcpus;
91
92static int strictio;
93
94static int acpi;
95
96static char *progname;
97static const int BSP = 0;
98
99static int cpumask;
100
101static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
102
103struct vm_exit vmexit[VM_MAXCPU];
104
105struct fbsdstats {
106        uint64_t        vmexit_bogus;
107        uint64_t        vmexit_bogus_switch;
108        uint64_t        vmexit_hlt;
109        uint64_t        vmexit_pause;
110        uint64_t        vmexit_mtrap;
111        uint64_t        vmexit_paging;
112        uint64_t        cpu_switch_rotate;
113        uint64_t        cpu_switch_direct;
114        int             io_reset;
115} stats;
116
117struct mt_vmm_info {
118	pthread_t	mt_thr;
119	struct vmctx	*mt_ctx;
120	int		mt_vcpu;
121} mt_vmm_info[VM_MAXCPU];
122
123static void
124usage(int code)
125{
126
127        fprintf(stderr,
128                "Usage: %s [-aehABHIP][-g <gdb port>][-z <hz>][-s <pci>]"
129		"[-S <pci>][-p pincpu][-n <pci>][-m lowmem][-M highmem]"
130		" <vmname>\n"
131		"       -a: local apic is in XAPIC mode (default is X2APIC)\n"
132		"       -A: create an ACPI table\n"
133		"       -g: gdb port (default is %d and 0 means don't open)\n"
134		"       -c: # cpus (default 1)\n"
135		"       -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
136		"       -B: inject breakpoint exception on vm entry\n"
137		"       -H: vmexit from the guest on hlt\n"
138		"       -I: present an ioapic to the guest\n"
139		"       -P: vmexit from the guest on pause\n"
140		"	-e: exit on unhandled i/o access\n"
141		"       -h: help\n"
142		"       -z: guest hz (default is %d)\n"
143		"       -s: <slot,driver,configinfo> PCI slot config\n"
144		"       -S: <slot,driver,configinfo> legacy PCI slot config\n"
145		"       -m: memory size in MB\n"
146		"       -x: mux vcpus to 1 hcpu\n"
147		"       -t: mux vcpu timeslice hz (default %d)\n",
148		progname, DEFAULT_GDB_PORT, DEFAULT_GUEST_HZ,
149		DEFAULT_GUEST_TSLICE);
150	exit(code);
151}
152
153void *
154paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
155{
156
157	return (vm_map_gpa(ctx, gaddr, len));
158}
159
160int
161fbsdrun_disable_x2apic(void)
162{
163
164	return (disable_x2apic);
165}
166
167int
168fbsdrun_vmexit_on_pause(void)
169{
170
171	return (guest_vmexit_on_pause);
172}
173
174int
175fbsdrun_vmexit_on_hlt(void)
176{
177
178	return (guest_vmexit_on_hlt);
179}
180
181int
182fbsdrun_muxed(void)
183{
184
185	return (guest_vcpu_mux);
186}
187
188static void *
189fbsdrun_start_thread(void *param)
190{
191	char tname[MAXCOMLEN + 1];
192	struct mt_vmm_info *mtp;
193	int vcpu;
194
195	mtp = param;
196	vcpu = mtp->mt_vcpu;
197
198	snprintf(tname, sizeof(tname), "%s vcpu %d", vmname, vcpu);
199	pthread_set_name_np(mtp->mt_thr, tname);
200
201	vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
202
203	/* not reached */
204	exit(1);
205	return (NULL);
206}
207
208void
209fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
210{
211	int error;
212
213	if (cpumask & (1 << vcpu)) {
214		fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
215		    vcpu);
216		exit(1);
217	}
218
219	cpumask |= 1 << vcpu;
220	foundcpus++;
221
222	/*
223	 * Set up the vmexit struct to allow execution to start
224	 * at the given RIP
225	 */
226	vmexit[vcpu].rip = rip;
227	vmexit[vcpu].inst_length = 0;
228
229	if (vcpu == BSP || !guest_vcpu_mux){
230		mt_vmm_info[vcpu].mt_ctx = ctx;
231		mt_vmm_info[vcpu].mt_vcpu = vcpu;
232
233		error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
234				fbsdrun_start_thread, &mt_vmm_info[vcpu]);
235		assert(error == 0);
236	}
237}
238
239static int
240fbsdrun_get_next_cpu(int curcpu)
241{
242
243	/*
244	 * Get the next available CPU. Assumes they arrive
245	 * in ascending order with no gaps.
246	 */
247	return ((curcpu + 1) % foundcpus);
248}
249
250static int
251vmexit_catch_reset(void)
252{
253        stats.io_reset++;
254        return (VMEXIT_RESET);
255}
256
257static int
258vmexit_catch_inout(void)
259{
260	return (VMEXIT_ABORT);
261}
262
263static int
264vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
265		     uint32_t eax)
266{
267#if PG_DEBUG /* put all types of debug here */
268        if (eax == 0) {
269		pause_noswitch = 1;
270	} else if (eax == 1) {
271		pause_noswitch = 0;
272	} else {
273		pause_noswitch = 0;
274		if (eax == 5) {
275			vm_set_capability(ctx, *pvcpu, VM_CAP_MTRAP_EXIT, 1);
276		}
277	}
278#endif
279        return (VMEXIT_CONTINUE);
280}
281
282static int
283vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
284{
285	int error;
286	int bytes, port, in, out;
287	uint32_t eax;
288	int vcpu;
289
290	vcpu = *pvcpu;
291
292	port = vme->u.inout.port;
293	bytes = vme->u.inout.bytes;
294	eax = vme->u.inout.eax;
295	in = vme->u.inout.in;
296	out = !in;
297
298	/* We don't deal with these */
299	if (vme->u.inout.string || vme->u.inout.rep)
300		return (VMEXIT_ABORT);
301
302	/* Special case of guest reset */
303	if (out && port == 0x64 && (uint8_t)eax == 0xFE)
304		return (vmexit_catch_reset());
305
306        /* Extra-special case of host notifications */
307        if (out && port == GUEST_NIO_PORT)
308                return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
309
310	error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
311	if (error == 0 && in)
312		error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
313
314	if (error == 0)
315		return (VMEXIT_CONTINUE);
316	else {
317		fprintf(stderr, "Unhandled %s%c 0x%04x\n",
318			in ? "in" : "out",
319			bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
320		return (vmexit_catch_inout());
321	}
322}
323
324static int
325vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
326{
327	fprintf(stderr, "vm exit rdmsr 0x%x, cpu %d\n", vme->u.msr.code,
328	    *pvcpu);
329	return (VMEXIT_ABORT);
330}
331
332static int
333vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
334{
335	int newcpu;
336	int retval = VMEXIT_CONTINUE;
337
338	newcpu = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code,vme->u.msr.wval);
339
340	if (guest_vcpu_mux && *pvcpu != newcpu) {
341                retval = VMEXIT_SWITCH;
342                *pvcpu = newcpu;
343        }
344
345        return (retval);
346}
347
348static int
349vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
350{
351	int newcpu;
352	int retval = VMEXIT_CONTINUE;
353
354	newcpu = spinup_ap(ctx, *pvcpu,
355			   vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
356
357	if (guest_vcpu_mux && *pvcpu != newcpu) {
358		retval = VMEXIT_SWITCH;
359		*pvcpu = newcpu;
360	}
361
362	return (retval);
363}
364
365static int
366vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
367{
368
369	fprintf(stderr, "vm exit[%d]\n", *pvcpu);
370	fprintf(stderr, "\treason\t\tVMX\n");
371	fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
372	fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
373	fprintf(stderr, "\terror\t\t%d\n", vmexit->u.vmx.error);
374	fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
375	fprintf(stderr, "\tqualification\t0x%016lx\n",
376	    vmexit->u.vmx.exit_qualification);
377
378	return (VMEXIT_ABORT);
379}
380
381static int bogus_noswitch = 1;
382
383static int
384vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
385{
386	stats.vmexit_bogus++;
387
388	if (!guest_vcpu_mux || guest_ncpus == 1 || bogus_noswitch) {
389		return (VMEXIT_RESTART);
390	} else {
391		stats.vmexit_bogus_switch++;
392		vmexit->inst_length = 0;
393		*pvcpu = -1;
394		return (VMEXIT_SWITCH);
395	}
396}
397
398static int
399vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
400{
401	stats.vmexit_hlt++;
402	if (fbsdrun_muxed()) {
403		*pvcpu = -1;
404		return (VMEXIT_SWITCH);
405	} else {
406		/*
407		 * Just continue execution with the next instruction. We use
408		 * the HLT VM exit as a way to be friendly with the host
409		 * scheduler.
410		 */
411		return (VMEXIT_CONTINUE);
412	}
413}
414
415static int pause_noswitch;
416
417static int
418vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
419{
420	stats.vmexit_pause++;
421
422	if (fbsdrun_muxed() && !pause_noswitch) {
423		*pvcpu = -1;
424		return (VMEXIT_SWITCH);
425        } else {
426		return (VMEXIT_CONTINUE);
427	}
428}
429
430static int
431vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
432{
433	stats.vmexit_mtrap++;
434
435	return (VMEXIT_RESTART);
436}
437
438static int
439vmexit_paging(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
440{
441	int err;
442	stats.vmexit_paging++;
443
444	err = emulate_mem(ctx, *pvcpu, vmexit->u.paging.gpa,
445			  &vmexit->u.paging.vie);
446
447	if (err) {
448		if (err == EINVAL) {
449			fprintf(stderr,
450			    "Failed to emulate instruction at 0x%lx\n",
451			    vmexit->rip);
452		} else if (err == ESRCH) {
453			fprintf(stderr, "Unhandled memory access to 0x%lx\n",
454			    vmexit->u.paging.gpa);
455		}
456
457		return (VMEXIT_ABORT);
458	}
459
460	return (VMEXIT_CONTINUE);
461}
462
463static void
464sigalrm(int sig)
465{
466	return;
467}
468
469static void
470setup_timeslice(void)
471{
472	struct sigaction sa;
473	struct itimerval itv;
474	int error;
475
476	/*
477	 * Setup a realtime timer to generate a SIGALRM at a
478	 * frequency of 'guest_tslice' ticks per second.
479	 */
480	sigemptyset(&sa.sa_mask);
481	sa.sa_flags = 0;
482	sa.sa_handler = sigalrm;
483
484	error = sigaction(SIGALRM, &sa, NULL);
485	assert(error == 0);
486
487	itv.it_interval.tv_sec = 0;
488	itv.it_interval.tv_usec = 1000000 / guest_tslice;
489	itv.it_value.tv_sec = 0;
490	itv.it_value.tv_usec = 1000000 / guest_tslice;
491
492	error = setitimer(ITIMER_REAL, &itv, NULL);
493	assert(error == 0);
494}
495
496static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
497	[VM_EXITCODE_INOUT]  = vmexit_inout,
498	[VM_EXITCODE_VMX]    = vmexit_vmx,
499	[VM_EXITCODE_BOGUS]  = vmexit_bogus,
500	[VM_EXITCODE_RDMSR]  = vmexit_rdmsr,
501	[VM_EXITCODE_WRMSR]  = vmexit_wrmsr,
502	[VM_EXITCODE_MTRAP]  = vmexit_mtrap,
503	[VM_EXITCODE_PAGING] = vmexit_paging,
504	[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
505};
506
507static void
508vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
509{
510	cpuset_t mask;
511	int error, rc, prevcpu;
512	enum vm_exitcode exitcode;
513
514	if (guest_vcpu_mux)
515		setup_timeslice();
516
517	if (pincpu >= 0) {
518		CPU_ZERO(&mask);
519		CPU_SET(pincpu + vcpu, &mask);
520		error = pthread_setaffinity_np(pthread_self(),
521					       sizeof(mask), &mask);
522		assert(error == 0);
523	}
524
525	while (1) {
526		error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
527		if (error != 0) {
528			/*
529			 * It is possible that 'vmmctl' or some other process
530			 * has transitioned the vcpu to CANNOT_RUN state right
531			 * before we tried to transition it to RUNNING.
532			 *
533			 * This is expected to be temporary so just retry.
534			 */
535			if (errno == EBUSY)
536				continue;
537			else
538				break;
539		}
540
541		prevcpu = vcpu;
542
543		exitcode = vmexit[vcpu].exitcode;
544		if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
545			fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
546			    exitcode);
547			exit(1);
548		}
549
550                rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
551
552		switch (rc) {
553                case VMEXIT_SWITCH:
554			assert(guest_vcpu_mux);
555			if (vcpu == -1) {
556				stats.cpu_switch_rotate++;
557				vcpu = fbsdrun_get_next_cpu(prevcpu);
558			} else {
559				stats.cpu_switch_direct++;
560			}
561			/* fall through */
562		case VMEXIT_CONTINUE:
563                        rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
564			break;
565		case VMEXIT_RESTART:
566                        rip = vmexit[vcpu].rip;
567			break;
568		case VMEXIT_RESET:
569			exit(0);
570		default:
571			exit(1);
572		}
573	}
574	fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
575}
576
577static int
578num_vcpus_allowed(struct vmctx *ctx)
579{
580	int tmp, error;
581
582	error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
583
584	/*
585	 * The guest is allowed to spinup more than one processor only if the
586	 * UNRESTRICTED_GUEST capability is available.
587	 */
588	if (error == 0)
589		return (VM_MAXCPU);
590	else
591		return (1);
592}
593
594int
595main(int argc, char *argv[])
596{
597	int c, error, gdb_port, inject_bkpt, tmp, err, ioapic, bvmcons;
598	int max_vcpus;
599	struct vmctx *ctx;
600	uint64_t rip;
601	size_t memsize;
602
603	bvmcons = 0;
604	inject_bkpt = 0;
605	progname = basename(argv[0]);
606	gdb_port = DEFAULT_GDB_PORT;
607	guest_ncpus = 1;
608	ioapic = 0;
609	memsize = 256 * MB;
610
611	while ((c = getopt(argc, argv, "abehABHIPxp:g:c:z:s:S:n:m:")) != -1) {
612		switch (c) {
613		case 'a':
614			disable_x2apic = 1;
615			break;
616		case 'A':
617			acpi = 1;
618			break;
619		case 'b':
620			bvmcons = 1;
621			break;
622		case 'B':
623			inject_bkpt = 1;
624			break;
625		case 'x':
626			guest_vcpu_mux = 1;
627			break;
628		case 'p':
629			pincpu = atoi(optarg);
630			break;
631                case 'c':
632			guest_ncpus = atoi(optarg);
633			break;
634		case 'g':
635			gdb_port = atoi(optarg);
636			break;
637		case 'z':
638			guest_hz = atoi(optarg);
639			break;
640		case 't':
641			guest_tslice = atoi(optarg);
642			break;
643		case 's':
644			if (pci_parse_slot(optarg, 0) != 0)
645				exit(1);
646			else
647				break;
648		case 'S':
649			if (pci_parse_slot(optarg, 1) != 0)
650				exit(1);
651			else
652				break;
653                case 'm':
654			memsize = strtoul(optarg, NULL, 0) * MB;
655			break;
656		case 'H':
657			guest_vmexit_on_hlt = 1;
658			break;
659		case 'I':
660			ioapic = 1;
661			break;
662		case 'P':
663			guest_vmexit_on_pause = 1;
664			break;
665		case 'e':
666			strictio = 1;
667			break;
668		case 'h':
669			usage(0);
670		default:
671			usage(1);
672		}
673	}
674	argc -= optind;
675	argv += optind;
676
677	if (argc != 1)
678		usage(1);
679
680	/* No need to mux if guest is uni-processor */
681	if (guest_ncpus <= 1)
682		guest_vcpu_mux = 0;
683
684	/* vmexit on hlt if guest is muxed */
685	if (guest_vcpu_mux) {
686		guest_vmexit_on_hlt = 1;
687		guest_vmexit_on_pause = 1;
688	}
689
690	vmname = argv[0];
691
692	ctx = vm_open(vmname);
693	if (ctx == NULL) {
694		perror("vm_open");
695		exit(1);
696	}
697
698	max_vcpus = num_vcpus_allowed(ctx);
699	if (guest_ncpus > max_vcpus) {
700		fprintf(stderr, "%d vCPUs requested but only %d available\n",
701			guest_ncpus, max_vcpus);
702		exit(1);
703	}
704
705	if (fbsdrun_vmexit_on_hlt()) {
706		err = vm_get_capability(ctx, BSP, VM_CAP_HALT_EXIT, &tmp);
707		if (err < 0) {
708			fprintf(stderr, "VM exit on HLT not supported\n");
709			exit(1);
710		}
711		vm_set_capability(ctx, BSP, VM_CAP_HALT_EXIT, 1);
712		handler[VM_EXITCODE_HLT] = vmexit_hlt;
713	}
714
715        if (fbsdrun_vmexit_on_pause()) {
716		/*
717		 * pause exit support required for this mode
718		 */
719		err = vm_get_capability(ctx, BSP, VM_CAP_PAUSE_EXIT, &tmp);
720		if (err < 0) {
721			fprintf(stderr,
722			    "SMP mux requested, no pause support\n");
723			exit(1);
724		}
725		vm_set_capability(ctx, BSP, VM_CAP_PAUSE_EXIT, 1);
726		handler[VM_EXITCODE_PAUSE] = vmexit_pause;
727        }
728
729	if (fbsdrun_disable_x2apic())
730		err = vm_set_x2apic_state(ctx, BSP, X2APIC_DISABLED);
731	else
732		err = vm_set_x2apic_state(ctx, BSP, X2APIC_ENABLED);
733
734	if (err) {
735		fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
736		exit(1);
737	}
738
739	err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
740	if (err) {
741		fprintf(stderr, "Unable to setup memory (%d)\n", err);
742		exit(1);
743	}
744
745	init_mem();
746	init_inout();
747
748	rtc_init(ctx);
749
750	/*
751	 * Exit if a device emulation finds an error in it's initilization
752	 */
753	if (init_pci(ctx) != 0)
754		exit(1);
755
756	if (ioapic)
757		ioapic_init(0);
758
759	if (gdb_port != 0)
760		init_dbgport(gdb_port);
761
762	if (bvmcons)
763		init_bvmcons();
764
765	error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
766	assert(error == 0);
767
768	if (inject_bkpt) {
769		error = vm_inject_event(ctx, BSP, VM_HW_EXCEPTION, IDT_BP);
770		assert(error == 0);
771	}
772
773	/*
774	 * build the guest tables, MP etc.
775	 */
776	mptable_build(ctx, guest_ncpus, ioapic);
777
778	if (acpi) {
779		error = acpi_build(ctx, guest_ncpus, ioapic);
780		assert(error == 0);
781	}
782
783	/*
784	 * Add CPU 0
785	 */
786	fbsdrun_addcpu(ctx, BSP, rip);
787
788	/*
789	 * Head off to the main event dispatch loop
790	 */
791	mevent_dispatch();
792
793	exit(1);
794}
795