Deleted Added
full compact
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/usr.sbin/bhyve/bhyverun.c 259826 2013-12-24 16:14:19Z jhb $
26 * $FreeBSD: head/usr.sbin/bhyve/bhyverun.c 260167 2014-01-01 21:17:08Z neel $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/usr.sbin/bhyve/bhyverun.c 259826 2013-12-24 16:14:19Z jhb $");
30__FBSDID("$FreeBSD: head/usr.sbin/bhyve/bhyverun.c 260167 2014-01-01 21:17:08Z neel $");
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include <machine/atomic.h>
37#include <machine/segments.h>
38
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <err.h>
43#include <libgen.h>
44#include <unistd.h>
45#include <assert.h>
46#include <errno.h>
47#include <pthread.h>
48#include <pthread_np.h>
49#include <sysexits.h>
50
51#include <machine/vmm.h>
52#include <vmmapi.h>
53
54#include "bhyverun.h"
55#include "acpi.h"
56#include "inout.h"
57#include "dbgport.h"
58#include "legacy_irq.h"
59#include "mem.h"
60#include "mevent.h"
61#include "mptbl.h"
62#include "pci_emul.h"
63#include "pci_lpc.h"
64#include "xmsr.h"
65#include "spinup_ap.h"
66#include "rtc.h"
67
68#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
69
70#define VMEXIT_SWITCH 0 /* force vcpu switch in mux mode */
71#define VMEXIT_CONTINUE 1 /* continue from next instruction */
72#define VMEXIT_RESTART 2 /* restart current instruction */
73#define VMEXIT_ABORT 3 /* abort the vm run loop */
74#define VMEXIT_RESET 4 /* guest machine has reset */
75#define VMEXIT_POWEROFF 5 /* guest machine has powered off */
76
77#define MB (1024UL * 1024)
78#define GB (1024UL * MB)
79
80typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
81
82char *vmname;
83
84int guest_ncpus;
85
86static int pincpu = -1;
87static int guest_vmexit_on_hlt, guest_vmexit_on_pause, disable_x2apic;
88static int virtio_msix = 1;
89
90static int strictio;
91static int strictmsr = 1;
92
93static int acpi;
94
95static char *progname;
96static const int BSP = 0;
97
98static int cpumask;
99
100static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
101
102struct vm_exit vmexit[VM_MAXCPU];
103
104struct bhyvestats {
105 uint64_t vmexit_bogus;
106 uint64_t vmexit_bogus_switch;
107 uint64_t vmexit_hlt;
108 uint64_t vmexit_pause;
109 uint64_t vmexit_mtrap;
110 uint64_t vmexit_inst_emul;
111 uint64_t cpu_switch_rotate;
112 uint64_t cpu_switch_direct;
113 int io_reset;
114} stats;
115
116struct mt_vmm_info {
117 pthread_t mt_thr;
118 struct vmctx *mt_ctx;
119 int mt_vcpu;
120} mt_vmm_info[VM_MAXCPU];
121
122static void
123usage(int code)
124{
125
126 fprintf(stderr,
127 "Usage: %s [-aehwAHIPW] [-g <gdb port>] [-s <pci>] [-S <pci>]\n"
128 " %*s [-c vcpus] [-p pincpu] [-m mem] [-l <lpc>] <vm>\n"
129 " -a: local apic is in XAPIC mode (default is X2APIC)\n"
130 " -A: create an ACPI table\n"
131 " -g: gdb port\n"
132 " -c: # cpus (default 1)\n"
133 " -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
134 " -H: vmexit from the guest on hlt\n"
135 " -P: vmexit from the guest on pause\n"
136 " -W: force virtio to use single-vector MSI\n"
137 " -e: exit on unhandled I/O access\n"
138 " -h: help\n"
139 " -s: <slot,driver,configinfo> PCI slot config\n"
140 " -S: <slot,driver,configinfo> legacy PCI slot config\n"
141 " -l: LPC device configuration\n"
142 " -m: memory size in MB\n"
143 " -w: ignore unimplemented MSRs\n",
144 progname, (int)strlen(progname), "");
145
146 exit(code);
147}
148
149void *
150paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
151{
152
153 return (vm_map_gpa(ctx, gaddr, len));
154}
155
156int
157fbsdrun_disable_x2apic(void)
158{
159
160 return (disable_x2apic);
161}
162
163int
164fbsdrun_vmexit_on_pause(void)
165{
166
167 return (guest_vmexit_on_pause);
168}
169
170int
171fbsdrun_vmexit_on_hlt(void)
172{
173
174 return (guest_vmexit_on_hlt);
175}
176
177int
178fbsdrun_virtio_msix(void)
179{
180
181 return (virtio_msix);
182}
183
184static void *
185fbsdrun_start_thread(void *param)
186{
187 char tname[MAXCOMLEN + 1];
188 struct mt_vmm_info *mtp;
189 int vcpu;
190
191 mtp = param;
192 vcpu = mtp->mt_vcpu;
193
194 snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
195 pthread_set_name_np(mtp->mt_thr, tname);
196
197 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
198
199 /* not reached */
200 exit(1);
201 return (NULL);
202}
203
204void
205fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
206{
207 int error;
208
209 if (cpumask & (1 << vcpu)) {
210 fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
211 vcpu);
212 exit(1);
213 }
214
215 atomic_set_int(&cpumask, 1 << vcpu);
216
217 /*
218 * Set up the vmexit struct to allow execution to start
219 * at the given RIP
220 */
221 vmexit[vcpu].rip = rip;
222 vmexit[vcpu].inst_length = 0;
223
224 mt_vmm_info[vcpu].mt_ctx = ctx;
225 mt_vmm_info[vcpu].mt_vcpu = vcpu;
226
227 error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
228 fbsdrun_start_thread, &mt_vmm_info[vcpu]);
229 assert(error == 0);
230}
231
232static int
233fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
234{
235
236 if ((cpumask & (1 << vcpu)) == 0) {
237 fprintf(stderr, "addcpu: attempting to delete unknown cpu %d\n",
238 vcpu);
239 exit(1);
240 }
241
242 atomic_clear_int(&cpumask, 1 << vcpu);
243 return (cpumask == 0);
244}
245
246static int
247vmexit_catch_reset(void)
248{
249 stats.io_reset++;
250 return (VMEXIT_RESET);
251}
252
253static int
254vmexit_catch_inout(void)
255{
256 return (VMEXIT_ABORT);
257}
258
259static int
260vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
261 uint32_t eax)
262{
263#if BHYVE_DEBUG
264 /*
265 * put guest-driven debug here
266 */
267#endif
268 return (VMEXIT_CONTINUE);
269}
270
271static int
272vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
273{
274 int error;
275 int bytes, port, in, out;
276 uint32_t eax;
277 int vcpu;
278
279 vcpu = *pvcpu;
280
281 port = vme->u.inout.port;
282 bytes = vme->u.inout.bytes;
283 eax = vme->u.inout.eax;
284 in = vme->u.inout.in;
285 out = !in;
286
287 /* We don't deal with these */
288 if (vme->u.inout.string || vme->u.inout.rep)
289 return (VMEXIT_ABORT);
290
291 /* Special case of guest reset */
292 if (out && port == 0x64 && (uint8_t)eax == 0xFE)
293 return (vmexit_catch_reset());
294
295 /* Extra-special case of host notifications */
296 if (out && port == GUEST_NIO_PORT)
297 return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
298
299 error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
300 if (error == INOUT_OK && in)
301 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
302
303 switch (error) {
304 case INOUT_OK:
305 return (VMEXIT_CONTINUE);
306 case INOUT_RESET:
307 return (VMEXIT_RESET);
308 case INOUT_POWEROFF:
309 return (VMEXIT_POWEROFF);
310 default:
311 fprintf(stderr, "Unhandled %s%c 0x%04x\n",
312 in ? "in" : "out",
313 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
314 return (vmexit_catch_inout());
315 }
316}
317
318static int
319vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
320{
321 uint64_t val;
322 uint32_t eax, edx;
323 int error;
324
325 val = 0;
326 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
327 if (error != 0) {
328 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
329 vme->u.msr.code, *pvcpu);
330 if (strictmsr)
331 return (VMEXIT_ABORT);
332 }
333
334 eax = val;
335 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
336 assert(error == 0);
337
338 edx = val >> 32;
339 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
340 assert(error == 0);
341
342 return (VMEXIT_CONTINUE);
343}
344
345static int
346vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
347{
348 int error;
349
350 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
351 if (error != 0) {
352 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
353 vme->u.msr.code, vme->u.msr.wval, *pvcpu);
354 if (strictmsr)
355 return (VMEXIT_ABORT);
356 }
357 return (VMEXIT_CONTINUE);
358}
359
360static int
361vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
362{
363 int newcpu;
364 int retval = VMEXIT_CONTINUE;
365
366 newcpu = spinup_ap(ctx, *pvcpu,
367 vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
368
369 return (retval);
370}
371
372static int
373vmexit_spindown_cpu(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
374{
375 int lastcpu;
376
377 lastcpu = fbsdrun_deletecpu(ctx, *pvcpu);
378 if (!lastcpu)
379 pthread_exit(NULL);
380 return (vmexit_catch_reset());
381}
382
383static int
384vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
385{
386
387 fprintf(stderr, "vm exit[%d]\n", *pvcpu);
388 fprintf(stderr, "\treason\t\tVMX\n");
389 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
390 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
391 fprintf(stderr, "\terror\t\t%d\n", vmexit->u.vmx.error);
391 fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
392 fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
393 fprintf(stderr, "\tqualification\t0x%016lx\n",
394 vmexit->u.vmx.exit_qualification);
395 fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
396 fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
397
398 return (VMEXIT_ABORT);
399}
400
401static int
402vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
403{
404
405 stats.vmexit_bogus++;
406
407 return (VMEXIT_RESTART);
408}
409
410static int
411vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
412{
413
414 stats.vmexit_hlt++;
415
416 /*
417 * Just continue execution with the next instruction. We use
418 * the HLT VM exit as a way to be friendly with the host
419 * scheduler.
420 */
421 return (VMEXIT_CONTINUE);
422}
423
424static int
425vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
426{
427
428 stats.vmexit_pause++;
429
430 return (VMEXIT_CONTINUE);
431}
432
433static int
434vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
435{
436
437 stats.vmexit_mtrap++;
438
439 return (VMEXIT_RESTART);
440}
441
442static int
443vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
444{
445 int err;
446 stats.vmexit_inst_emul++;
447
448 err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
449 &vmexit->u.inst_emul.vie);
450
451 if (err) {
452 if (err == EINVAL) {
453 fprintf(stderr,
454 "Failed to emulate instruction at 0x%lx\n",
455 vmexit->rip);
456 } else if (err == ESRCH) {
457 fprintf(stderr, "Unhandled memory access to 0x%lx\n",
458 vmexit->u.inst_emul.gpa);
459 }
460
461 return (VMEXIT_ABORT);
462 }
463
464 return (VMEXIT_CONTINUE);
465}
466
467static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
468 [VM_EXITCODE_INOUT] = vmexit_inout,
469 [VM_EXITCODE_VMX] = vmexit_vmx,
470 [VM_EXITCODE_BOGUS] = vmexit_bogus,
471 [VM_EXITCODE_RDMSR] = vmexit_rdmsr,
472 [VM_EXITCODE_WRMSR] = vmexit_wrmsr,
473 [VM_EXITCODE_MTRAP] = vmexit_mtrap,
474 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
475 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
476 [VM_EXITCODE_SPINDOWN_CPU] = vmexit_spindown_cpu,
477};
478
479static void
480vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
481{
482 cpuset_t mask;
483 int error, rc, prevcpu;
484 enum vm_exitcode exitcode;
485
486 if (pincpu >= 0) {
487 CPU_ZERO(&mask);
488 CPU_SET(pincpu + vcpu, &mask);
489 error = pthread_setaffinity_np(pthread_self(),
490 sizeof(mask), &mask);
491 assert(error == 0);
492 }
493
494 while (1) {
495 error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
496 if (error != 0)
497 break;
498
499 prevcpu = vcpu;
500
501 exitcode = vmexit[vcpu].exitcode;
502 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
503 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
504 exitcode);
505 exit(1);
506 }
507
508 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
509
510 switch (rc) {
511 case VMEXIT_CONTINUE:
512 rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
513 break;
514 case VMEXIT_RESTART:
515 rip = vmexit[vcpu].rip;
516 break;
517 case VMEXIT_RESET:
518 exit(0);
519 default:
520 exit(1);
521 }
522 }
523 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
524}
525
526static int
527num_vcpus_allowed(struct vmctx *ctx)
528{
529 int tmp, error;
530
531 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
532
533 /*
534 * The guest is allowed to spinup more than one processor only if the
535 * UNRESTRICTED_GUEST capability is available.
536 */
537 if (error == 0)
538 return (VM_MAXCPU);
539 else
540 return (1);
541}
542
543void
544fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
545{
546 int err, tmp;
547
548 if (fbsdrun_vmexit_on_hlt()) {
549 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
550 if (err < 0) {
551 fprintf(stderr, "VM exit on HLT not supported\n");
552 exit(1);
553 }
554 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
555 if (cpu == BSP)
556 handler[VM_EXITCODE_HLT] = vmexit_hlt;
557 }
558
559 if (fbsdrun_vmexit_on_pause()) {
560 /*
561 * pause exit support required for this mode
562 */
563 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
564 if (err < 0) {
565 fprintf(stderr,
566 "SMP mux requested, no pause support\n");
567 exit(1);
568 }
569 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
570 if (cpu == BSP)
571 handler[VM_EXITCODE_PAUSE] = vmexit_pause;
572 }
573
574 if (fbsdrun_disable_x2apic())
575 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
576 else
577 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
578
579 if (err) {
580 fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
581 exit(1);
582 }
583
584 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
585}
586
587int
588main(int argc, char *argv[])
589{
590 int c, error, gdb_port, err, bvmcons;
591 int max_vcpus;
592 struct vmctx *ctx;
593 uint64_t rip;
594 size_t memsize;
595
596 bvmcons = 0;
597 progname = basename(argv[0]);
598 gdb_port = 0;
599 guest_ncpus = 1;
600 memsize = 256 * MB;
601
602 while ((c = getopt(argc, argv, "abehwAHIPWp:g:c:s:S:m:l:")) != -1) {
603 switch (c) {
604 case 'a':
605 disable_x2apic = 1;
606 break;
607 case 'A':
608 acpi = 1;
609 break;
610 case 'b':
611 bvmcons = 1;
612 break;
613 case 'p':
614 pincpu = atoi(optarg);
615 break;
616 case 'c':
617 guest_ncpus = atoi(optarg);
618 break;
619 case 'g':
620 gdb_port = atoi(optarg);
621 break;
622 case 'l':
623 if (lpc_device_parse(optarg) != 0) {
624 errx(EX_USAGE, "invalid lpc device "
625 "configuration '%s'", optarg);
626 }
627 break;
628 case 's':
629 if (pci_parse_slot(optarg, 0) != 0)
630 exit(1);
631 else
632 break;
633 case 'S':
634 if (pci_parse_slot(optarg, 1) != 0)
635 exit(1);
636 else
637 break;
638 case 'm':
639 error = vm_parse_memsize(optarg, &memsize);
640 if (error)
641 errx(EX_USAGE, "invalid memsize '%s'", optarg);
642 break;
643 case 'H':
644 guest_vmexit_on_hlt = 1;
645 break;
646 case 'I':
647 /*
648 * The "-I" option was used to add an ioapic to the
649 * virtual machine.
650 *
651 * An ioapic is now provided unconditionally for each
652 * virtual machine and this option is now deprecated.
653 */
654 break;
655 case 'P':
656 guest_vmexit_on_pause = 1;
657 break;
658 case 'e':
659 strictio = 1;
660 break;
661 case 'w':
662 strictmsr = 0;
663 break;
664 case 'W':
665 virtio_msix = 0;
666 break;
667 case 'h':
668 usage(0);
669 default:
670 usage(1);
671 }
672 }
673 argc -= optind;
674 argv += optind;
675
676 if (argc != 1)
677 usage(1);
678
679 vmname = argv[0];
680
681 ctx = vm_open(vmname);
682 if (ctx == NULL) {
683 perror("vm_open");
684 exit(1);
685 }
686
687 max_vcpus = num_vcpus_allowed(ctx);
688 if (guest_ncpus > max_vcpus) {
689 fprintf(stderr, "%d vCPUs requested but only %d available\n",
690 guest_ncpus, max_vcpus);
691 exit(1);
692 }
693
694 fbsdrun_set_capabilities(ctx, BSP);
695
696 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
697 if (err) {
698 fprintf(stderr, "Unable to setup memory (%d)\n", err);
699 exit(1);
700 }
701
702 init_mem();
703 init_inout();
704 legacy_irq_init();
705
706 rtc_init(ctx);
707
708 /*
709 * Exit if a device emulation finds an error in it's initilization
710 */
711 if (init_pci(ctx) != 0)
712 exit(1);
713
714 if (gdb_port != 0)
715 init_dbgport(gdb_port);
716
717 if (bvmcons)
718 init_bvmcons();
719
720 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
721 assert(error == 0);
722
723 /*
724 * build the guest tables, MP etc.
725 */
726 mptable_build(ctx, guest_ncpus);
727
728 if (acpi) {
729 error = acpi_build(ctx, guest_ncpus);
730 assert(error == 0);
731 }
732
733 /*
734 * Change the proc title to include the VM name.
735 */
736 setproctitle("%s", vmname);
737
738 /*
739 * Add CPU 0
740 */
741 fbsdrun_addcpu(ctx, BSP, rip);
742
743 /*
744 * Head off to the main event dispatch loop
745 */
746 mevent_dispatch();
747
748 exit(1);
749}