vmmapi.c revision 241178
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/types.h>
33#include <sys/sysctl.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36
37#include <machine/specialreg.h>
38
39#include <stdio.h>
40#include <stdlib.h>
41#include <assert.h>
42#include <string.h>
43#include <fcntl.h>
44#include <unistd.h>
45
46#include <machine/vmm.h>
47#include <machine/vmm_dev.h>
48
49#include "vmmapi.h"
50#include "mptable.h"
51
52#define BIOS_ROM_BASE		(0xf0000)
53#define BIOS_ROM_SIZE		(0x10000)
54
55struct vmctx {
56	int	fd;
57	char	*name;
58};
59
60#define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
61#define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
62
63static int
64vm_device_open(const char *name)
65{
66        int fd, len;
67        char *vmfile;
68
69	len = strlen("/dev/vmm/") + strlen(name) + 1;
70	vmfile = malloc(len);
71	assert(vmfile != NULL);
72	snprintf(vmfile, len, "/dev/vmm/%s", name);
73
74        /* Open the device file */
75        fd = open(vmfile, O_RDWR, 0);
76
77	free(vmfile);
78        return (fd);
79}
80
81int
82vm_create(const char *name)
83{
84
85	return (CREATE((char *)name));
86}
87
88struct vmctx *
89vm_open(const char *name)
90{
91	struct vmctx *vm;
92
93	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
94	assert(vm != NULL);
95
96	vm->fd = -1;
97	vm->name = (char *)(vm + 1);
98	strcpy(vm->name, name);
99
100	if ((vm->fd = vm_device_open(vm->name)) < 0)
101		goto err;
102
103	return (vm);
104err:
105	vm_destroy(vm);
106	return (NULL);
107}
108
109void
110vm_destroy(struct vmctx *vm)
111{
112	assert(vm != NULL);
113
114	if (vm->fd >= 0)
115		close(vm->fd);
116	DESTROY(vm->name);
117
118	free(vm);
119}
120
121size_t
122vmm_get_mem_total(void)
123{
124	size_t mem_total = 0;
125	size_t oldlen = sizeof(mem_total);
126	int error;
127	error = sysctlbyname("hw.vmm.mem_total", &mem_total, &oldlen, NULL, 0);
128	if (error)
129		return -1;
130	return mem_total;
131}
132
133size_t
134vmm_get_mem_free(void)
135{
136	size_t mem_free = 0;
137	size_t oldlen = sizeof(mem_free);
138	int error;
139	error = sysctlbyname("hw.vmm.mem_free", &mem_free, &oldlen, NULL, 0);
140	if (error)
141		return -1;
142	return mem_free;
143}
144
145int
146vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa,
147		  vm_paddr_t *ret_hpa, size_t *ret_len)
148{
149	int error;
150	struct vm_memory_segment seg;
151
152	bzero(&seg, sizeof(seg));
153	seg.gpa = gpa;
154	error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
155	*ret_len = seg.len;
156	return (error);
157}
158
159int
160vm_setup_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **mapaddr)
161{
162	int error;
163	struct vm_memory_segment seg;
164
165	/*
166	 * Create and optionally map 'len' bytes of memory at guest
167	 * physical address 'gpa'
168	 */
169	bzero(&seg, sizeof(seg));
170	seg.gpa = gpa;
171	seg.len = len;
172	error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
173	if (error == 0 && mapaddr != NULL) {
174		*mapaddr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
175				ctx->fd, gpa);
176	}
177	return (error);
178}
179
180char *
181vm_map_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
182{
183
184	/* Map 'len' bytes of memory at guest physical address 'gpa' */
185	return ((char *)mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
186		     ctx->fd, gpa));
187}
188
189int
190vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
191	    uint64_t base, uint32_t limit, uint32_t access)
192{
193	int error;
194	struct vm_seg_desc vmsegdesc;
195
196	bzero(&vmsegdesc, sizeof(vmsegdesc));
197	vmsegdesc.cpuid = vcpu;
198	vmsegdesc.regnum = reg;
199	vmsegdesc.desc.base = base;
200	vmsegdesc.desc.limit = limit;
201	vmsegdesc.desc.access = access;
202
203	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
204	return (error);
205}
206
207int
208vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
209	    uint64_t *base, uint32_t *limit, uint32_t *access)
210{
211	int error;
212	struct vm_seg_desc vmsegdesc;
213
214	bzero(&vmsegdesc, sizeof(vmsegdesc));
215	vmsegdesc.cpuid = vcpu;
216	vmsegdesc.regnum = reg;
217
218	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
219	if (error == 0) {
220		*base = vmsegdesc.desc.base;
221		*limit = vmsegdesc.desc.limit;
222		*access = vmsegdesc.desc.access;
223	}
224	return (error);
225}
226
227int
228vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
229{
230	int error;
231	struct vm_register vmreg;
232
233	bzero(&vmreg, sizeof(vmreg));
234	vmreg.cpuid = vcpu;
235	vmreg.regnum = reg;
236	vmreg.regval = val;
237
238	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
239	return (error);
240}
241
242int
243vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
244{
245	int error;
246	struct vm_register vmreg;
247
248	bzero(&vmreg, sizeof(vmreg));
249	vmreg.cpuid = vcpu;
250	vmreg.regnum = reg;
251
252	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
253	*ret_val = vmreg.regval;
254	return (error);
255}
256
257int
258vm_get_pinning(struct vmctx *ctx, int vcpu, int *host_cpuid)
259{
260	int error;
261	struct vm_pin vmpin;
262
263	bzero(&vmpin, sizeof(vmpin));
264	vmpin.vm_cpuid = vcpu;
265
266	error = ioctl(ctx->fd, VM_GET_PINNING, &vmpin);
267	*host_cpuid = vmpin.host_cpuid;
268	return (error);
269}
270
271int
272vm_set_pinning(struct vmctx *ctx, int vcpu, int host_cpuid)
273{
274	int error;
275	struct vm_pin vmpin;
276
277	bzero(&vmpin, sizeof(vmpin));
278	vmpin.vm_cpuid = vcpu;
279	vmpin.host_cpuid = host_cpuid;
280
281	error = ioctl(ctx->fd, VM_SET_PINNING, &vmpin);
282	return (error);
283}
284
285int
286vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
287{
288	int error;
289	struct vm_run vmrun;
290
291	bzero(&vmrun, sizeof(vmrun));
292	vmrun.cpuid = vcpu;
293	vmrun.rip = rip;
294
295	error = ioctl(ctx->fd, VM_RUN, &vmrun);
296	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
297	return (error);
298}
299
300static int
301vm_inject_event_real(struct vmctx *ctx, int vcpu, enum vm_event_type type,
302		     int vector, int error_code, int error_code_valid)
303{
304	struct vm_event ev;
305
306	bzero(&ev, sizeof(ev));
307	ev.cpuid = vcpu;
308	ev.type = type;
309	ev.vector = vector;
310	ev.error_code = error_code;
311	ev.error_code_valid = error_code_valid;
312
313	return (ioctl(ctx->fd, VM_INJECT_EVENT, &ev));
314}
315
316int
317vm_inject_event(struct vmctx *ctx, int vcpu, enum vm_event_type type,
318		int vector)
319{
320
321	return (vm_inject_event_real(ctx, vcpu, type, vector, 0, 0));
322}
323
324int
325vm_inject_event2(struct vmctx *ctx, int vcpu, enum vm_event_type type,
326		 int vector, int error_code)
327{
328
329	return (vm_inject_event_real(ctx, vcpu, type, vector, error_code, 1));
330}
331
332int
333vm_build_tables(struct vmctx *ctxt, int ncpu, int ioapic,
334		void *oemtbl, int oemtblsz)
335{
336
337	return (vm_build_mptable(ctxt, BIOS_ROM_BASE, BIOS_ROM_SIZE, ncpu,
338				 ioapic, oemtbl, oemtblsz));
339}
340
341int
342vm_apicid2vcpu(struct vmctx *ctx, int apicid)
343{
344	/*
345	 * The apic id associated with the 'vcpu' has the same numerical value
346	 * as the 'vcpu' itself.
347	 */
348	return (apicid);
349}
350
351int
352vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
353{
354	struct vm_lapic_irq vmirq;
355
356	bzero(&vmirq, sizeof(vmirq));
357	vmirq.cpuid = vcpu;
358	vmirq.vector = vector;
359
360	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
361}
362
363int
364vm_inject_nmi(struct vmctx *ctx, int vcpu)
365{
366	struct vm_nmi vmnmi;
367
368	bzero(&vmnmi, sizeof(vmnmi));
369	vmnmi.cpuid = vcpu;
370
371	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
372}
373
374int
375vm_capability_name2type(const char *capname)
376{
377	int i;
378
379	static struct {
380		const char	*name;
381		int		type;
382	} capstrmap[] = {
383		{ "hlt_exit",		VM_CAP_HALT_EXIT },
384		{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
385		{ "pause_exit",		VM_CAP_PAUSE_EXIT },
386		{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
387		{ 0 }
388	};
389
390	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
391		if (strcmp(capstrmap[i].name, capname) == 0)
392			return (capstrmap[i].type);
393	}
394
395	return (-1);
396}
397
398int
399vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
400		  int *retval)
401{
402	int error;
403	struct vm_capability vmcap;
404
405	bzero(&vmcap, sizeof(vmcap));
406	vmcap.cpuid = vcpu;
407	vmcap.captype = cap;
408
409	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
410	*retval = vmcap.capval;
411	return (error);
412}
413
414int
415vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
416{
417	struct vm_capability vmcap;
418
419	bzero(&vmcap, sizeof(vmcap));
420	vmcap.cpuid = vcpu;
421	vmcap.captype = cap;
422	vmcap.capval = val;
423
424	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
425}
426
427int
428vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
429{
430	struct vm_pptdev pptdev;
431
432	bzero(&pptdev, sizeof(pptdev));
433	pptdev.bus = bus;
434	pptdev.slot = slot;
435	pptdev.func = func;
436
437	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
438}
439
440int
441vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
442{
443	struct vm_pptdev pptdev;
444
445	bzero(&pptdev, sizeof(pptdev));
446	pptdev.bus = bus;
447	pptdev.slot = slot;
448	pptdev.func = func;
449
450	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
451}
452
453int
454vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
455		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
456{
457	struct vm_pptdev_mmio pptmmio;
458
459	bzero(&pptmmio, sizeof(pptmmio));
460	pptmmio.bus = bus;
461	pptmmio.slot = slot;
462	pptmmio.func = func;
463	pptmmio.gpa = gpa;
464	pptmmio.len = len;
465	pptmmio.hpa = hpa;
466
467	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
468}
469
470int
471vm_setup_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
472	     int destcpu, int vector, int numvec)
473{
474	struct vm_pptdev_msi pptmsi;
475
476	bzero(&pptmsi, sizeof(pptmsi));
477	pptmsi.vcpu = vcpu;
478	pptmsi.bus = bus;
479	pptmsi.slot = slot;
480	pptmsi.func = func;
481	pptmsi.destcpu = destcpu;
482	pptmsi.vector = vector;
483	pptmsi.numvec = numvec;
484
485	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
486}
487
488int
489vm_setup_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
490	      int idx, uint32_t msg, uint32_t vector_control, uint64_t addr)
491{
492	struct vm_pptdev_msix pptmsix;
493
494	bzero(&pptmsix, sizeof(pptmsix));
495	pptmsix.vcpu = vcpu;
496	pptmsix.bus = bus;
497	pptmsix.slot = slot;
498	pptmsix.func = func;
499	pptmsix.idx = idx;
500	pptmsix.msg = msg;
501	pptmsix.addr = addr;
502	pptmsix.vector_control = vector_control;
503
504	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
505}
506
507uint64_t *
508vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
509	     int *ret_entries)
510{
511	int error;
512
513	static struct vm_stats vmstats;
514
515	vmstats.cpuid = vcpu;
516
517	error = ioctl(ctx->fd, VM_STATS, &vmstats);
518	if (error == 0) {
519		if (ret_entries)
520			*ret_entries = vmstats.num_entries;
521		if (ret_tv)
522			*ret_tv = vmstats.tv;
523		return (vmstats.statbuf);
524	} else
525		return (NULL);
526}
527
528const char *
529vm_get_stat_desc(struct vmctx *ctx, int index)
530{
531	static struct vm_stat_desc statdesc;
532
533	statdesc.index = index;
534	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
535		return (statdesc.desc);
536	else
537		return (NULL);
538}
539
540int
541vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
542{
543	int error;
544	struct vm_x2apic x2apic;
545
546	bzero(&x2apic, sizeof(x2apic));
547	x2apic.cpuid = vcpu;
548
549	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
550	*state = x2apic.state;
551	return (error);
552}
553
554int
555vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
556{
557	int error;
558	struct vm_x2apic x2apic;
559
560	bzero(&x2apic, sizeof(x2apic));
561	x2apic.cpuid = vcpu;
562	x2apic.state = state;
563
564	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
565
566	return (error);
567}
568
569/*
570 * From Intel Vol 3a:
571 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
572 */
573int
574vcpu_reset(struct vmctx *vmctx, int vcpu)
575{
576	int error;
577	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
578	uint32_t desc_access, desc_limit;
579	uint16_t sel;
580
581	zero = 0;
582
583	rflags = 0x2;
584	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
585	if (error)
586		goto done;
587
588	rip = 0xfff0;
589	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
590		goto done;
591
592	cr0 = CR0_NE;
593	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
594		goto done;
595
596	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
597		goto done;
598
599	cr4 = 0;
600	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
601		goto done;
602
603	/*
604	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
605	 */
606	desc_base = 0xffff0000;
607	desc_limit = 0xffff;
608	desc_access = 0x0093;
609	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
610			    desc_base, desc_limit, desc_access);
611	if (error)
612		goto done;
613
614	sel = 0xf000;
615	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
616		goto done;
617
618	/*
619	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
620	 */
621	desc_base = 0;
622	desc_limit = 0xffff;
623	desc_access = 0x0093;
624	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
625			    desc_base, desc_limit, desc_access);
626	if (error)
627		goto done;
628
629	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
630			    desc_base, desc_limit, desc_access);
631	if (error)
632		goto done;
633
634	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
635			    desc_base, desc_limit, desc_access);
636	if (error)
637		goto done;
638
639	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
640			    desc_base, desc_limit, desc_access);
641	if (error)
642		goto done;
643
644	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
645			    desc_base, desc_limit, desc_access);
646	if (error)
647		goto done;
648
649	sel = 0;
650	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
651		goto done;
652	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
653		goto done;
654	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
655		goto done;
656	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
657		goto done;
658	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
659		goto done;
660
661	/* General purpose registers */
662	rdx = 0xf00;
663	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
664		goto done;
665	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
666		goto done;
667	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
668		goto done;
669	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
670		goto done;
671	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
672		goto done;
673	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
674		goto done;
675	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
676		goto done;
677	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
678		goto done;
679
680	/* GDTR, IDTR */
681	desc_base = 0;
682	desc_limit = 0xffff;
683	desc_access = 0;
684	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
685			    desc_base, desc_limit, desc_access);
686	if (error != 0)
687		goto done;
688
689	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
690			    desc_base, desc_limit, desc_access);
691	if (error != 0)
692		goto done;
693
694	/* TR */
695	desc_base = 0;
696	desc_limit = 0xffff;
697	desc_access = 0x0000008b;
698	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
699	if (error)
700		goto done;
701
702	sel = 0;
703	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
704		goto done;
705
706	/* LDTR */
707	desc_base = 0;
708	desc_limit = 0xffff;
709	desc_access = 0x00000082;
710	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
711			    desc_limit, desc_access);
712	if (error)
713		goto done;
714
715	sel = 0;
716	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
717		goto done;
718
719	/* XXX cr2, debug registers */
720
721	error = 0;
722done:
723	return (error);
724}
725